max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
inference/online_inference/src/entities/dataclasses.py
|
made-ml-in-prod-2021/marina-zav
| 0
|
6628351
|
<filename>inference/online_inference/src/entities/dataclasses.py
from pydantic import BaseModel, conlist, validator
from typing import List, Union
from src.entities import read_features_params
DEFAULT_FEATURES_CONFIG_PATH = "configs/features_config.yaml"
MODEL_FEATURES = read_features_params(DEFAULT_FEATURES_CONFIG_PATH).features
class HeartDiseaseModelRequest(BaseModel):
data: List[conlist(Union[float, int])]
features: List[str]
@validator("features")
def validate_model_features(cls, features):
if not features == MODEL_FEATURES:
raise ValueError("Wrong setup of features to predict")
return features
class HeartDiseaseModelResponse(BaseModel):
class_id: int
|
<filename>inference/online_inference/src/entities/dataclasses.py
from pydantic import BaseModel, conlist, validator
from typing import List, Union
from src.entities import read_features_params
DEFAULT_FEATURES_CONFIG_PATH = "configs/features_config.yaml"
MODEL_FEATURES = read_features_params(DEFAULT_FEATURES_CONFIG_PATH).features
class HeartDiseaseModelRequest(BaseModel):
data: List[conlist(Union[float, int])]
features: List[str]
@validator("features")
def validate_model_features(cls, features):
if not features == MODEL_FEATURES:
raise ValueError("Wrong setup of features to predict")
return features
class HeartDiseaseModelResponse(BaseModel):
class_id: int
|
none
| 1
| 2.549798
| 3
|
|
2019_3_Cooper_Type/RoboFont/simple_interp.py
|
benkiel/python_workshops
| 6
|
6628352
|
font = CurrentFont()
ss = """
feature ss%s{
sub %s by %s;
} ss%s;
"""
one = font['a']
two = font['a.2']
print(one.isCompatible(two))
features = ""
for f in range(10):
f = f+1
name = "test."+str(f)
result = font.newGlyph(name)
r = .1*f
print(r)
result.interpolate(f/5,one,two)
if f < 10:
f = "0"+str(f)
features += ss % (f, 'a', name, f)
font.features.text = features
|
font = CurrentFont()
ss = """
feature ss%s{
sub %s by %s;
} ss%s;
"""
one = font['a']
two = font['a.2']
print(one.isCompatible(two))
features = ""
for f in range(10):
f = f+1
name = "test."+str(f)
result = font.newGlyph(name)
r = .1*f
print(r)
result.interpolate(f/5,one,two)
if f < 10:
f = "0"+str(f)
features += ss % (f, 'a', name, f)
font.features.text = features
|
en
| 0.780662
|
feature ss%s{ sub %s by %s; } ss%s;
| 3.483123
| 3
|
tests/test_checker.py
|
TestowanieAutomatyczneUG/laboratorium-9-wgulan
| 0
|
6628353
|
<reponame>TestowanieAutomatyczneUG/laboratorium-9-wgulan
from src.sample.Checker import Checker
from unittest.mock import *
from unittest import TestCase, main
class TestCar(TestCase):
def setUp(self):
self.test_checker = Checker()
def test_checker_play_file_after_17(self):
wav_file = 'file.wav'
# prepare mock
self.test_checker.env.getTime = Mock('getTime')
self.test_checker.env.getTime.return_value = 18
self.test_checker.reminder(wav_file)
# testing
self.assertEqual(self.test_checker.env.played, True)
def test_checker_do_not_play_file_before_17(self):
wav_file = 'file.wav'
# prepare mock
self.test_checker.env.getTime = Mock('getTime')
self.test_checker.env.getTime.return_value = 16
self.test_checker.reminder(wav_file)
# testing
self.assertEqual(self.test_checker.env.played, False)
def tearDown(self):
self.test_checker = None
if __name__ == '__main__':
main()
|
from src.sample.Checker import Checker
from unittest.mock import *
from unittest import TestCase, main
class TestCar(TestCase):
def setUp(self):
self.test_checker = Checker()
def test_checker_play_file_after_17(self):
wav_file = 'file.wav'
# prepare mock
self.test_checker.env.getTime = Mock('getTime')
self.test_checker.env.getTime.return_value = 18
self.test_checker.reminder(wav_file)
# testing
self.assertEqual(self.test_checker.env.played, True)
def test_checker_do_not_play_file_before_17(self):
wav_file = 'file.wav'
# prepare mock
self.test_checker.env.getTime = Mock('getTime')
self.test_checker.env.getTime.return_value = 16
self.test_checker.reminder(wav_file)
# testing
self.assertEqual(self.test_checker.env.played, False)
def tearDown(self):
self.test_checker = None
if __name__ == '__main__':
main()
|
en
| 0.742083
|
# prepare mock # testing # prepare mock # testing
| 2.891853
| 3
|
examples/loading-img.py
|
m0rphed/comp-vis-notes
| 0
|
6628354
|
import cv2
import matplotlib.pyplot as plt
image = cv2.imread('./images/watch.jpg', cv2.IMREAD_GRAYSCALE)
cv2.imshow('picture', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
plt.imshow(image, cmap='gray', interpolation='bicubic')
plt.plot([50, 100], [80, 100], 'c', linewidth=5)
plt.show()
cv2.imwrite('./images/watch-gray.png', image)
|
import cv2
import matplotlib.pyplot as plt
image = cv2.imread('./images/watch.jpg', cv2.IMREAD_GRAYSCALE)
cv2.imshow('picture', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
plt.imshow(image, cmap='gray', interpolation='bicubic')
plt.plot([50, 100], [80, 100], 'c', linewidth=5)
plt.show()
cv2.imwrite('./images/watch-gray.png', image)
|
none
| 1
| 3.059597
| 3
|
|
oops_fhir/r4/code_system/v3_substitution_condition.py
|
Mikuana/oops_fhir
| 0
|
6628355
|
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["v3SubstitutionCondition"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class v3SubstitutionCondition:
"""
v3 Code System SubstitutionCondition
Identifies what sort of change is permitted or has occurred between the
item that was ordered/requested and the one that was/will be provided.
Status: active - Version: 2018-08-12
Copyright None
http://terminology.hl7.org/CodeSystem/v3-SubstitutionCondition
"""
underscore_conditional = CodeSystemConcept(
{
"code": "_Conditional",
"concept": [
{
"code": "CONFIRM",
"definition": "Confirmation with Contact Person prior to making any substitutions has or will occur.",
"display": "Confirm first",
},
{
"code": "NOTIFY",
"definition": "Notification to the Contact Person, prior to substitution and through normal institutional procedures, has or will be made.",
"display": "Notify first",
},
],
"definition": "Some conditions may be attached to an allowable substitution. An allowable substitution is based on a match to any other attributes that may be specified.",
"display": "Conditional",
"property": [{"code": "notSelectable", "valueBoolean": True}],
}
)
"""
Conditional
Some conditions may be attached to an allowable substitution. An allowable substitution is based on a match to any other attributes that may be specified.
"""
nosub = CodeSystemConcept(
{
"code": "NOSUB",
"definition": "Substitution is not permitted.",
"display": "No substitution",
}
)
"""
No substitution
Substitution is not permitted.
"""
uncond = CodeSystemConcept(
{
"code": "UNCOND",
"definition": "No conditions are required.",
"display": "Unconditional",
}
)
"""
Unconditional
No conditions are required.
"""
class Meta:
resource = _resource
|
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["v3SubstitutionCondition"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class v3SubstitutionCondition:
"""
v3 Code System SubstitutionCondition
Identifies what sort of change is permitted or has occurred between the
item that was ordered/requested and the one that was/will be provided.
Status: active - Version: 2018-08-12
Copyright None
http://terminology.hl7.org/CodeSystem/v3-SubstitutionCondition
"""
underscore_conditional = CodeSystemConcept(
{
"code": "_Conditional",
"concept": [
{
"code": "CONFIRM",
"definition": "Confirmation with Contact Person prior to making any substitutions has or will occur.",
"display": "Confirm first",
},
{
"code": "NOTIFY",
"definition": "Notification to the Contact Person, prior to substitution and through normal institutional procedures, has or will be made.",
"display": "Notify first",
},
],
"definition": "Some conditions may be attached to an allowable substitution. An allowable substitution is based on a match to any other attributes that may be specified.",
"display": "Conditional",
"property": [{"code": "notSelectable", "valueBoolean": True}],
}
)
"""
Conditional
Some conditions may be attached to an allowable substitution. An allowable substitution is based on a match to any other attributes that may be specified.
"""
nosub = CodeSystemConcept(
{
"code": "NOSUB",
"definition": "Substitution is not permitted.",
"display": "No substitution",
}
)
"""
No substitution
Substitution is not permitted.
"""
uncond = CodeSystemConcept(
{
"code": "UNCOND",
"definition": "No conditions are required.",
"display": "Unconditional",
}
)
"""
Unconditional
No conditions are required.
"""
class Meta:
resource = _resource
|
en
| 0.868351
|
v3 Code System SubstitutionCondition Identifies what sort of change is permitted or has occurred between the item that was ordered/requested and the one that was/will be provided. Status: active - Version: 2018-08-12 Copyright None http://terminology.hl7.org/CodeSystem/v3-SubstitutionCondition Conditional Some conditions may be attached to an allowable substitution. An allowable substitution is based on a match to any other attributes that may be specified. No substitution Substitution is not permitted. Unconditional No conditions are required.
| 2.265389
| 2
|
silabel/sample.py
|
wahyubram82/indonesian_syllabelizer
| 0
|
6628356
|
<filename>silabel/sample.py
test_sample = [
'BSD', 'SMP', 'main', 'april', 'swasta', 'instan', 'dengan', 'pandai', 'makhluk', 'saudara', 'menyapu',
'etiopia', 'masyhur', 'biografi', 'instrumen', 'pengarang', 'reboisasi', 'musyawarah', 'dramatisasi',
'memproklamasikan', 'berkesinambungan', 'mempertanggungjawabkan'
]
|
<filename>silabel/sample.py
test_sample = [
'BSD', 'SMP', 'main', 'april', 'swasta', 'instan', 'dengan', 'pandai', 'makhluk', 'saudara', 'menyapu',
'etiopia', 'masyhur', 'biografi', 'instrumen', 'pengarang', 'reboisasi', 'musyawarah', 'dramatisasi',
'memproklamasikan', 'berkesinambungan', 'mempertanggungjawabkan'
]
|
none
| 1
| 1.331142
| 1
|
|
run_route_scripts/results/diff_route_stats.py
|
eric-erki/valhalla
| 0
|
6628357
|
<reponame>eric-erki/valhalla
#!/usr/bin/env python3
import csv
import argparse
STATS_TO_DIFF = ['#Passes', 'runtime', 'trip time', 'length', '#Manuevers']
def main(old_stats_file, new_stats_file, output_file):
with open(old_stats_file, 'r') as old_file, \
open(new_stats_file, 'r') as new_file, \
open(output_file, 'w', newline='') as output_csv:
old_csv_reader = csv.reader(old_file)
new_csv_reader = csv.reader(new_file)
# Store header, stripping any whitespace that might be present
headers = list(map(str.strip, next(old_csv_reader)))
# Skip header row in the second csv
next(new_csv_reader)
cols_to_diff = []
stats_diff_fieldnames = ['routeID']
# Collect indexes of cols we're going to generate diff stats of and
# generate fieldnames for stats diff
for col in STATS_TO_DIFF:
cols_to_diff.append(headers.index(col))
# each field generates the following field names in the diff:
# - <field name>_old
# - <field name>_new
# - <field name>_diff
# - <field name>_%diff_
stats_diff_fieldnames.append('{}_old'.format(col))
stats_diff_fieldnames.append('{}_new'.format(col))
stats_diff_fieldnames.append('{}_diff'.format(col))
stats_diff_fieldnames.append('{}_%diff'.format(col))
csv_writer = csv.writer(output_csv)
csv_writer.writerow(stats_diff_fieldnames)
route_num = 1
# Assume same number of rows in both csv
for old_row, new_row in zip(old_csv_reader, new_csv_reader):
diff_row = []
diff_row.append(route_num)
for col_index in cols_to_diff:
# Treat everything as float
old_stat, new_stat = (float(old_row[col_index]),
float(new_row[col_index]))
diff = new_stat - old_stat
pct_diff = diff/old_stat * 100
diff_row.append(old_stat)
diff_row.append(new_stat)
diff_row.append('{}'.format(diff))
diff_row.append('{:.2f}'.format(pct_diff))
csv_writer.writerow(diff_row)
route_num += 1
print('Combined statistics generated: {}'.format(output_file))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Compare 2 RAD statistics and '
'write output as a csv')
parser.add_argument('old_stats_file', help='Old statistics.csv')
parser.add_argument('new_stats_file', help='New statistics.csv')
parser.add_argument('output_file', help='Output CSV filename')
args = parser.parse_args()
main(args.old_stats_file, args.new_stats_file, args.output_file)
|
#!/usr/bin/env python3
import csv
import argparse
STATS_TO_DIFF = ['#Passes', 'runtime', 'trip time', 'length', '#Manuevers']
def main(old_stats_file, new_stats_file, output_file):
with open(old_stats_file, 'r') as old_file, \
open(new_stats_file, 'r') as new_file, \
open(output_file, 'w', newline='') as output_csv:
old_csv_reader = csv.reader(old_file)
new_csv_reader = csv.reader(new_file)
# Store header, stripping any whitespace that might be present
headers = list(map(str.strip, next(old_csv_reader)))
# Skip header row in the second csv
next(new_csv_reader)
cols_to_diff = []
stats_diff_fieldnames = ['routeID']
# Collect indexes of cols we're going to generate diff stats of and
# generate fieldnames for stats diff
for col in STATS_TO_DIFF:
cols_to_diff.append(headers.index(col))
# each field generates the following field names in the diff:
# - <field name>_old
# - <field name>_new
# - <field name>_diff
# - <field name>_%diff_
stats_diff_fieldnames.append('{}_old'.format(col))
stats_diff_fieldnames.append('{}_new'.format(col))
stats_diff_fieldnames.append('{}_diff'.format(col))
stats_diff_fieldnames.append('{}_%diff'.format(col))
csv_writer = csv.writer(output_csv)
csv_writer.writerow(stats_diff_fieldnames)
route_num = 1
# Assume same number of rows in both csv
for old_row, new_row in zip(old_csv_reader, new_csv_reader):
diff_row = []
diff_row.append(route_num)
for col_index in cols_to_diff:
# Treat everything as float
old_stat, new_stat = (float(old_row[col_index]),
float(new_row[col_index]))
diff = new_stat - old_stat
pct_diff = diff/old_stat * 100
diff_row.append(old_stat)
diff_row.append(new_stat)
diff_row.append('{}'.format(diff))
diff_row.append('{:.2f}'.format(pct_diff))
csv_writer.writerow(diff_row)
route_num += 1
print('Combined statistics generated: {}'.format(output_file))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Compare 2 RAD statistics and '
'write output as a csv')
parser.add_argument('old_stats_file', help='Old statistics.csv')
parser.add_argument('new_stats_file', help='New statistics.csv')
parser.add_argument('output_file', help='Output CSV filename')
args = parser.parse_args()
main(args.old_stats_file, args.new_stats_file, args.output_file)
|
en
| 0.865417
|
#!/usr/bin/env python3 # Store header, stripping any whitespace that might be present # Skip header row in the second csv # Collect indexes of cols we're going to generate diff stats of and # generate fieldnames for stats diff # each field generates the following field names in the diff: # - <field name>_old # - <field name>_new # - <field name>_diff # - <field name>_%diff_ # Assume same number of rows in both csv # Treat everything as float
| 3.36944
| 3
|
py/moma/models/end_effectors/wrist_sensors/robotiq_fts300.py
|
wx-b/dm_robotics
| 128
|
6628358
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing Robotiq FTS300 Sensor."""
import collections
from dm_control import composer
from dm_control import mjcf
from dm_robotics.moma.models import types
from dm_robotics.moma.models import utils as models_utils
from dm_robotics.moma.models.end_effectors.wrist_sensors import robotiq_fts300_constants as consts
import numpy as np
_ROBOTIQ_ASSETS_PATH = 'robots/robotiq/assets'
_ATTACHMENT_SITE = 'ft_sensor_attachment_site'
_FRAME_SITE = 'ft_sensor_frame_site'
_FORCE_SENSOR_NAME = 'ft_sensor_force'
_TORQUE_SENSOR_NAME = 'ft_sensor_torque'
_SensorParams = collections.namedtuple(
'SensorParams',
['force_std', 'torque_std', 'max_abs_force', 'max_abs_torque'])
_COLLISION_KWARGS = [{
'name': 'base_mount_CollisionGeom',
'type': 'sphere',
'pos': '0 0.0 0.015',
'size': '0.05'
}]
# Dictionary mapping body names to a list of their collision geoms
_COLLISION_GEOMS_DICT = {
'base_mount': _COLLISION_KWARGS,
}
class RobotiqFTS300(composer.Entity):
"""A class representing Robotiq FTS300 force/torque sensor."""
_mjcf_root: mjcf.RootElement
def _build(
self,
name: str = 'robotiq_fts300',
) -> None:
"""Initializes RobotiqFTS300.
Args:
name: The name of this sensor. Used as a prefix in the MJCF name
attributes.
"""
self._mjcf_root = mjcf.from_path(consts.XML_PATH)
self._mjcf_root.model = name
self._attachment_site = self._mjcf_root.find('site', _ATTACHMENT_SITE)
self._sensor_frame_site = self._mjcf_root.find('site', _FRAME_SITE)
self._force_sensor = self._mjcf_root.find('sensor', _FORCE_SENSOR_NAME)
self._torque_sensor = self._mjcf_root.find('sensor', _TORQUE_SENSOR_NAME)
self._add_collision_geoms()
def _add_collision_geoms(self):
"""Add collision geoms."""
self._collision_geoms = models_utils.attach_collision_geoms(
self.mjcf_model, _COLLISION_GEOMS_DICT)
def initialize_episode(self, physics: mjcf.Physics,
random_state: np.random.RandomState):
"""Function called at the beginning of every episode."""
del random_state # Unused.
# Apply gravity compensation
body_elements = self.mjcf_model.find_all('body')
gravity = np.hstack([physics.model.opt.gravity, [0, 0, 0]])
physics_bodies = physics.bind(body_elements)
if physics_bodies is None:
raise ValueError('Calling physics.bind with bodies returns None.')
physics_bodies.xfrc_applied[:] = -gravity * physics_bodies.mass[..., None]
@property
def force_sensor(self) -> types.MjcfElement:
return self._force_sensor
@property
def torque_sensor(self) -> types.MjcfElement:
return self._torque_sensor
@property
def mjcf_model(self) -> mjcf.RootElement:
return self._mjcf_root
@property
def attachment_site(self) -> types.MjcfElement:
return self._attachment_site
@property
def frame_site(self) -> types.MjcfElement:
return self._sensor_frame_site
@property
def sensor_params(self):
"""`_SensorParams` namedtuple specifying noise and clipping parameters."""
return _SensorParams(
# The noise values (zero-mean standard deviation) below were extracted
# from the manufacturer's datasheet. Whilst torque drift is non-
# significant as per the manual, force drift (+/-3N over 24h) is not
# currently modelled.
force_std=(1.2, 1.2, 0.5),
torque_std=(0.02, 0.02, 0.12),
# The absolute force/torque range values below were also extracted from
# the manufacturer's datasheet.
max_abs_force=300.,
max_abs_torque=30.)
@property
def collision_geom_group(self):
collision_geom_group = [
geom.full_identifier for geom in self._collision_geoms
]
return collision_geom_group
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing Robotiq FTS300 Sensor."""
import collections
from dm_control import composer
from dm_control import mjcf
from dm_robotics.moma.models import types
from dm_robotics.moma.models import utils as models_utils
from dm_robotics.moma.models.end_effectors.wrist_sensors import robotiq_fts300_constants as consts
import numpy as np
_ROBOTIQ_ASSETS_PATH = 'robots/robotiq/assets'
_ATTACHMENT_SITE = 'ft_sensor_attachment_site'
_FRAME_SITE = 'ft_sensor_frame_site'
_FORCE_SENSOR_NAME = 'ft_sensor_force'
_TORQUE_SENSOR_NAME = 'ft_sensor_torque'
_SensorParams = collections.namedtuple(
'SensorParams',
['force_std', 'torque_std', 'max_abs_force', 'max_abs_torque'])
_COLLISION_KWARGS = [{
'name': 'base_mount_CollisionGeom',
'type': 'sphere',
'pos': '0 0.0 0.015',
'size': '0.05'
}]
# Dictionary mapping body names to a list of their collision geoms
_COLLISION_GEOMS_DICT = {
'base_mount': _COLLISION_KWARGS,
}
class RobotiqFTS300(composer.Entity):
"""A class representing Robotiq FTS300 force/torque sensor."""
_mjcf_root: mjcf.RootElement
def _build(
self,
name: str = 'robotiq_fts300',
) -> None:
"""Initializes RobotiqFTS300.
Args:
name: The name of this sensor. Used as a prefix in the MJCF name
attributes.
"""
self._mjcf_root = mjcf.from_path(consts.XML_PATH)
self._mjcf_root.model = name
self._attachment_site = self._mjcf_root.find('site', _ATTACHMENT_SITE)
self._sensor_frame_site = self._mjcf_root.find('site', _FRAME_SITE)
self._force_sensor = self._mjcf_root.find('sensor', _FORCE_SENSOR_NAME)
self._torque_sensor = self._mjcf_root.find('sensor', _TORQUE_SENSOR_NAME)
self._add_collision_geoms()
def _add_collision_geoms(self):
"""Add collision geoms."""
self._collision_geoms = models_utils.attach_collision_geoms(
self.mjcf_model, _COLLISION_GEOMS_DICT)
def initialize_episode(self, physics: mjcf.Physics,
random_state: np.random.RandomState):
"""Function called at the beginning of every episode."""
del random_state # Unused.
# Apply gravity compensation
body_elements = self.mjcf_model.find_all('body')
gravity = np.hstack([physics.model.opt.gravity, [0, 0, 0]])
physics_bodies = physics.bind(body_elements)
if physics_bodies is None:
raise ValueError('Calling physics.bind with bodies returns None.')
physics_bodies.xfrc_applied[:] = -gravity * physics_bodies.mass[..., None]
@property
def force_sensor(self) -> types.MjcfElement:
return self._force_sensor
@property
def torque_sensor(self) -> types.MjcfElement:
return self._torque_sensor
@property
def mjcf_model(self) -> mjcf.RootElement:
return self._mjcf_root
@property
def attachment_site(self) -> types.MjcfElement:
return self._attachment_site
@property
def frame_site(self) -> types.MjcfElement:
return self._sensor_frame_site
@property
def sensor_params(self):
"""`_SensorParams` namedtuple specifying noise and clipping parameters."""
return _SensorParams(
# The noise values (zero-mean standard deviation) below were extracted
# from the manufacturer's datasheet. Whilst torque drift is non-
# significant as per the manual, force drift (+/-3N over 24h) is not
# currently modelled.
force_std=(1.2, 1.2, 0.5),
torque_std=(0.02, 0.02, 0.12),
# The absolute force/torque range values below were also extracted from
# the manufacturer's datasheet.
max_abs_force=300.,
max_abs_torque=30.)
@property
def collision_geom_group(self):
collision_geom_group = [
geom.full_identifier for geom in self._collision_geoms
]
return collision_geom_group
|
en
| 0.848661
|
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Module containing Robotiq FTS300 Sensor. # Dictionary mapping body names to a list of their collision geoms A class representing Robotiq FTS300 force/torque sensor. Initializes RobotiqFTS300. Args: name: The name of this sensor. Used as a prefix in the MJCF name attributes. Add collision geoms. Function called at the beginning of every episode. # Unused. # Apply gravity compensation `_SensorParams` namedtuple specifying noise and clipping parameters. # The noise values (zero-mean standard deviation) below were extracted # from the manufacturer's datasheet. Whilst torque drift is non- # significant as per the manual, force drift (+/-3N over 24h) is not # currently modelled. # The absolute force/torque range values below were also extracted from # the manufacturer's datasheet.
| 1.934058
| 2
|
pipeline/contrib/external_plugins/tests/utils/importer/test_base.py
|
ZhuoZhuoCrayon/bk-nodeman
| 31
|
6628359
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import imp
import sys
from django.test import TestCase
from pipeline.contrib.external_plugins.tests.mock import * # noqa
from pipeline.contrib.external_plugins.tests.mock_settings import * # noqa
from pipeline.contrib.external_plugins.utils.importer.base import NonstandardModuleImporter
class DummyImporter(NonstandardModuleImporter):
def __init__(self, **kwargs):
super(DummyImporter, self).__init__(modules=kwargs.get("modules", []))
self._is_package = kwargs.get("is_package")
self._get_code = kwargs.get("get_code")
self._get_source = kwargs.get("get_source")
self._get_file = kwargs.get("get_file")
self._get_path = kwargs.get("get_path")
self._accept_find_module_request_hook = MagicMock()
self._pre_load_module_hook = MagicMock()
self._post_load_module_hook = MagicMock()
self._import_error_hook = MagicMock()
def is_package(self, fullname):
return self._is_package
def get_code(self, fullname):
return self._get_code
def get_source(self, fullname):
return self._get_source
def get_file(self, fullname):
return self._get_file
def get_path(self, fullname):
return self._get_path
def accept_find_module_request_hook(self, fullname, path):
self._accept_find_module_request_hook(fullname=fullname, path=path)
def pre_load_module_hook(self, fullname, module):
self._pre_load_module_hook(fullname=fullname, module=module)
def post_load_module_hook(self, fullname, module):
self._post_load_module_hook(fullname=fullname, module=module)
def import_error_hook(self, fullname):
self._import_error_hook(fullname=fullname)
class NonstandardModuleImporterTestCase(TestCase):
def setUp(self):
self.imp_acquire_lock_patcher = patch(IMP_ACQUIRE_LOCK, MagicMock())
self.imp_release_lock_patcher = patch(IMP_RELEASE_LOCK, MagicMock())
self.importer_exec_src_code_patcher = patch(UTILS_IMPORTER_BASE_EXECUTE_SRC_CODE, MagicMock())
self.imp_acquire_lock_patcher.start()
self.imp_release_lock_patcher.start()
self.importer_exec_src_code_patcher.start()
def tearDown(self):
self.imp_acquire_lock_patcher.stop()
self.imp_release_lock_patcher.stop()
self.importer_exec_src_code_patcher.stop()
def test_find_module__module_not_in_self_modules(self):
importer = DummyImporter()
self.assertIsNone(importer.find_module("django"))
importer._accept_find_module_request_hook.assert_not_called()
self.assertIsNone(importer.find_module("django.test"))
importer._accept_find_module_request_hook.assert_not_called()
self.assertIsNone(importer.find_module("django.test.utils"))
importer._accept_find_module_request_hook.assert_not_called()
def test_find_module__module_in_built_in(self):
importer = DummyImporter()
self.assertIsNone(importer.find_module("math"))
importer._accept_find_module_request_hook.assert_not_called()
def test_find_module__module_has_name_repetition(self):
importer = DummyImporter(modules=["magic_module"])
self.assertIsNone(importer.find_module("magic_module.magic_sub_module.magic_module"))
importer._accept_find_module_request_hook.assert_not_called()
def test_find_module__accept(self):
importer = DummyImporter(modules=["magic_module"])
fullname = "magic_module"
self.assertIs(importer, importer.find_module(fullname))
importer._accept_find_module_request_hook.assert_called_once_with(fullname=fullname, path=None)
importer._accept_find_module_request_hook.reset_mock()
fullname = "magic_module.magic_sub_module_1"
self.assertIs(importer, importer.find_module(fullname))
importer._accept_find_module_request_hook.assert_called_once_with(fullname=fullname, path=None)
importer._accept_find_module_request_hook.reset_mock()
fullname = "magic_module.magic_sub_module_1.magic_sub_module_2"
self.assertIs(importer, importer.find_module(fullname))
importer._accept_find_module_request_hook.assert_called_once_with(fullname=fullname, path=None)
importer._accept_find_module_request_hook.reset_mock()
def test_load_module__module_already_in_sys_modules(self):
fullname = "exist_module"
mod = Object()
importer = DummyImporter()
with patch(SYS_MODULES, {fullname: mod}):
self.assertEqual(importer.load_module(fullname=fullname), mod)
imp.acquire_lock.assert_called_once()
imp.release_lock.assert_called_once()
def test_load_module__get_source_raise_import_error(self):
sub_module = "sub_module"
fullname = "exist_module.sub_module"
mod = Object()
importer = DummyImporter()
importer.get_source = MagicMock(side_effect=ImportError)
with patch(SYS_MODULES, {sub_module: mod}):
self.assertIsNone(importer.load_module(fullname=fullname))
imp.acquire_lock.assert_called_once()
imp.release_lock.assert_called_once()
def test_load_module__is_package(self):
src_code = "src_code"
fullname = "magic_module"
_file = "file"
path = "path"
importer = DummyImporter(is_package=True, get_source=src_code, get_file=_file, get_path=path)
with patch(SYS_MODULES, {}):
mod = importer.load_module(fullname=fullname)
self.assertIs(sys.modules[fullname], mod)
self.assertEqual(mod.__file__, _file)
self.assertIs(mod.__loader__, importer)
self.assertEqual(mod.__path__, path)
self.assertEqual(mod.__package__, fullname)
imp.acquire_lock.assert_called_once()
importer._pre_load_module_hook.assert_called_once_with(fullname=fullname, module=mod)
importer._execute_src_code.assert_called_once_with(src_code=src_code, module=mod)
importer._post_load_module_hook.assert_called_once_with(fullname=fullname, module=mod)
imp.release_lock.assert_called_once()
def test_load_module__is_not_package(self):
src_code = "src_code"
fullname = "magic_module.sub_module"
_file = "file"
importer = DummyImporter(is_package=False, get_source=src_code, get_file=_file)
with patch(SYS_MODULES, {}):
mod = importer.load_module(fullname=fullname)
self.assertIs(sys.modules[fullname], mod)
self.assertEqual(mod.__file__, _file)
self.assertIs(mod.__loader__, importer)
self.assertEqual(mod.__package__, fullname.rpartition(".")[0])
imp.acquire_lock.assert_called_once()
importer._pre_load_module_hook.assert_called_once_with(fullname=fullname, module=mod)
importer._execute_src_code.assert_called_once_with(src_code=src_code, module=mod)
importer._post_load_module_hook.assert_called_once_with(fullname=fullname, module=mod)
imp.release_lock.assert_called_once()
def test_load_module__raise_exception_before_add_module(self):
fullname = "magic_module.sub_module"
importer = DummyImporter(is_package=False)
importer.get_source = MagicMock(side_effect=Exception())
importer._import_error_hook = MagicMock(side_effect=Exception())
with patch(SYS_MODULES, {}):
self.assertRaises(ImportError, importer.load_module, fullname)
self.assertNotIn(fullname, sys.modules)
importer._import_error_hook.assert_called_once()
imp.release_lock.assert_called_once()
def test_load_module__raise_exception_after_add_module(self):
fullname = "magic_module.sub_module"
importer = DummyImporter(is_package=False)
importer.get_file = MagicMock(side_effect=Exception())
with patch(SYS_MODULES, {}):
self.assertRaises(ImportError, importer.load_module, fullname)
self.assertNotIn(fullname, sys.modules)
importer._import_error_hook.assert_called_once()
imp.release_lock.assert_called_once()
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import imp
import sys
from django.test import TestCase
from pipeline.contrib.external_plugins.tests.mock import * # noqa
from pipeline.contrib.external_plugins.tests.mock_settings import * # noqa
from pipeline.contrib.external_plugins.utils.importer.base import NonstandardModuleImporter
class DummyImporter(NonstandardModuleImporter):
def __init__(self, **kwargs):
super(DummyImporter, self).__init__(modules=kwargs.get("modules", []))
self._is_package = kwargs.get("is_package")
self._get_code = kwargs.get("get_code")
self._get_source = kwargs.get("get_source")
self._get_file = kwargs.get("get_file")
self._get_path = kwargs.get("get_path")
self._accept_find_module_request_hook = MagicMock()
self._pre_load_module_hook = MagicMock()
self._post_load_module_hook = MagicMock()
self._import_error_hook = MagicMock()
def is_package(self, fullname):
return self._is_package
def get_code(self, fullname):
return self._get_code
def get_source(self, fullname):
return self._get_source
def get_file(self, fullname):
return self._get_file
def get_path(self, fullname):
return self._get_path
def accept_find_module_request_hook(self, fullname, path):
self._accept_find_module_request_hook(fullname=fullname, path=path)
def pre_load_module_hook(self, fullname, module):
self._pre_load_module_hook(fullname=fullname, module=module)
def post_load_module_hook(self, fullname, module):
self._post_load_module_hook(fullname=fullname, module=module)
def import_error_hook(self, fullname):
self._import_error_hook(fullname=fullname)
class NonstandardModuleImporterTestCase(TestCase):
def setUp(self):
self.imp_acquire_lock_patcher = patch(IMP_ACQUIRE_LOCK, MagicMock())
self.imp_release_lock_patcher = patch(IMP_RELEASE_LOCK, MagicMock())
self.importer_exec_src_code_patcher = patch(UTILS_IMPORTER_BASE_EXECUTE_SRC_CODE, MagicMock())
self.imp_acquire_lock_patcher.start()
self.imp_release_lock_patcher.start()
self.importer_exec_src_code_patcher.start()
def tearDown(self):
self.imp_acquire_lock_patcher.stop()
self.imp_release_lock_patcher.stop()
self.importer_exec_src_code_patcher.stop()
def test_find_module__module_not_in_self_modules(self):
importer = DummyImporter()
self.assertIsNone(importer.find_module("django"))
importer._accept_find_module_request_hook.assert_not_called()
self.assertIsNone(importer.find_module("django.test"))
importer._accept_find_module_request_hook.assert_not_called()
self.assertIsNone(importer.find_module("django.test.utils"))
importer._accept_find_module_request_hook.assert_not_called()
def test_find_module__module_in_built_in(self):
importer = DummyImporter()
self.assertIsNone(importer.find_module("math"))
importer._accept_find_module_request_hook.assert_not_called()
def test_find_module__module_has_name_repetition(self):
importer = DummyImporter(modules=["magic_module"])
self.assertIsNone(importer.find_module("magic_module.magic_sub_module.magic_module"))
importer._accept_find_module_request_hook.assert_not_called()
def test_find_module__accept(self):
importer = DummyImporter(modules=["magic_module"])
fullname = "magic_module"
self.assertIs(importer, importer.find_module(fullname))
importer._accept_find_module_request_hook.assert_called_once_with(fullname=fullname, path=None)
importer._accept_find_module_request_hook.reset_mock()
fullname = "magic_module.magic_sub_module_1"
self.assertIs(importer, importer.find_module(fullname))
importer._accept_find_module_request_hook.assert_called_once_with(fullname=fullname, path=None)
importer._accept_find_module_request_hook.reset_mock()
fullname = "magic_module.magic_sub_module_1.magic_sub_module_2"
self.assertIs(importer, importer.find_module(fullname))
importer._accept_find_module_request_hook.assert_called_once_with(fullname=fullname, path=None)
importer._accept_find_module_request_hook.reset_mock()
def test_load_module__module_already_in_sys_modules(self):
fullname = "exist_module"
mod = Object()
importer = DummyImporter()
with patch(SYS_MODULES, {fullname: mod}):
self.assertEqual(importer.load_module(fullname=fullname), mod)
imp.acquire_lock.assert_called_once()
imp.release_lock.assert_called_once()
def test_load_module__get_source_raise_import_error(self):
sub_module = "sub_module"
fullname = "exist_module.sub_module"
mod = Object()
importer = DummyImporter()
importer.get_source = MagicMock(side_effect=ImportError)
with patch(SYS_MODULES, {sub_module: mod}):
self.assertIsNone(importer.load_module(fullname=fullname))
imp.acquire_lock.assert_called_once()
imp.release_lock.assert_called_once()
def test_load_module__is_package(self):
src_code = "src_code"
fullname = "magic_module"
_file = "file"
path = "path"
importer = DummyImporter(is_package=True, get_source=src_code, get_file=_file, get_path=path)
with patch(SYS_MODULES, {}):
mod = importer.load_module(fullname=fullname)
self.assertIs(sys.modules[fullname], mod)
self.assertEqual(mod.__file__, _file)
self.assertIs(mod.__loader__, importer)
self.assertEqual(mod.__path__, path)
self.assertEqual(mod.__package__, fullname)
imp.acquire_lock.assert_called_once()
importer._pre_load_module_hook.assert_called_once_with(fullname=fullname, module=mod)
importer._execute_src_code.assert_called_once_with(src_code=src_code, module=mod)
importer._post_load_module_hook.assert_called_once_with(fullname=fullname, module=mod)
imp.release_lock.assert_called_once()
def test_load_module__is_not_package(self):
src_code = "src_code"
fullname = "magic_module.sub_module"
_file = "file"
importer = DummyImporter(is_package=False, get_source=src_code, get_file=_file)
with patch(SYS_MODULES, {}):
mod = importer.load_module(fullname=fullname)
self.assertIs(sys.modules[fullname], mod)
self.assertEqual(mod.__file__, _file)
self.assertIs(mod.__loader__, importer)
self.assertEqual(mod.__package__, fullname.rpartition(".")[0])
imp.acquire_lock.assert_called_once()
importer._pre_load_module_hook.assert_called_once_with(fullname=fullname, module=mod)
importer._execute_src_code.assert_called_once_with(src_code=src_code, module=mod)
importer._post_load_module_hook.assert_called_once_with(fullname=fullname, module=mod)
imp.release_lock.assert_called_once()
def test_load_module__raise_exception_before_add_module(self):
fullname = "magic_module.sub_module"
importer = DummyImporter(is_package=False)
importer.get_source = MagicMock(side_effect=Exception())
importer._import_error_hook = MagicMock(side_effect=Exception())
with patch(SYS_MODULES, {}):
self.assertRaises(ImportError, importer.load_module, fullname)
self.assertNotIn(fullname, sys.modules)
importer._import_error_hook.assert_called_once()
imp.release_lock.assert_called_once()
def test_load_module__raise_exception_after_add_module(self):
fullname = "magic_module.sub_module"
importer = DummyImporter(is_package=False)
importer.get_file = MagicMock(side_effect=Exception())
with patch(SYS_MODULES, {}):
self.assertRaises(ImportError, importer.load_module, fullname)
self.assertNotIn(fullname, sys.modules)
importer._import_error_hook.assert_called_once()
imp.release_lock.assert_called_once()
|
en
| 0.861578
|
# -*- coding: utf-8 -*- Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # noqa # noqa
| 1.908101
| 2
|
scp/plugins/user/paste.py
|
ALiwoto/SCP-5170
| 2
|
6628360
|
from scp import user
import aiofiles
import os
__PLUGIN__ = 'paste'
__DOC__ = str(
user.md.KanTeXDocument(
user.md.Section(
'Paste Utility',
user.md.SubSection(
'paste',
user.md.Code('(*prefix)paste {content}'),
),
),
),
)
@user.on_message(user.sudo & user.command('paste'))
async def _(_, message: user.types.Message):
text = message.text.split(None, 1)[1] if len(
message.command,
) != 1 else None
if message.reply_to_message:
if message.reply_to_message.text:
text = message.reply_to_message.text
elif (
message.reply_to_message.document
and message.reply_to_message.document.file_size < 2 ** 20 * 10
):
path = await message.reply_to_message.download()
async with aiofiles.open(path, 'r', encoding='UTF-8') as doc:
text = await doc.read()
await doc.close()
os.remove(path)
if not text:
return await message.reply(
user.md.KanTeXDocument(
user.md.Section(
'Error',
user.md.Italic('Paste Failed'),
),
),
quote=True,
)
await message.reply(
user.md.KanTeXDocument(
user.md.Section(
'Paste',
user.md.KeyValueItem(
user.md.Bold('Link'),
await user.netcat('termbin.com', 9999, text),
),
),
),
quote=True,
)
|
from scp import user
import aiofiles
import os
__PLUGIN__ = 'paste'
__DOC__ = str(
user.md.KanTeXDocument(
user.md.Section(
'Paste Utility',
user.md.SubSection(
'paste',
user.md.Code('(*prefix)paste {content}'),
),
),
),
)
@user.on_message(user.sudo & user.command('paste'))
async def _(_, message: user.types.Message):
text = message.text.split(None, 1)[1] if len(
message.command,
) != 1 else None
if message.reply_to_message:
if message.reply_to_message.text:
text = message.reply_to_message.text
elif (
message.reply_to_message.document
and message.reply_to_message.document.file_size < 2 ** 20 * 10
):
path = await message.reply_to_message.download()
async with aiofiles.open(path, 'r', encoding='UTF-8') as doc:
text = await doc.read()
await doc.close()
os.remove(path)
if not text:
return await message.reply(
user.md.KanTeXDocument(
user.md.Section(
'Error',
user.md.Italic('Paste Failed'),
),
),
quote=True,
)
await message.reply(
user.md.KanTeXDocument(
user.md.Section(
'Paste',
user.md.KeyValueItem(
user.md.Bold('Link'),
await user.netcat('termbin.com', 9999, text),
),
),
),
quote=True,
)
|
none
| 1
| 2.455572
| 2
|
|
lista3/Q42.py
|
AlexandrePeBrito/Python
| 0
|
6628361
|
<reponame>AlexandrePeBrito/Python
#Faça um programa que leia uma quantidade indeterminada de números positivos e conte
#quantos deles estão nos seguintes intervalos: [0-25], [26-50], [51-75] e [76-100]. A entrada de
#dados deverá terminar quando for lido um número negativo.
num=int(input("Informe um numero: "))
numeros=[]
cl1=0
cl2=0
cl3=0
cl4=0
#clas[0]=[0-25]
#clas[1]=[26-50]
#clas[2]=[51-75]
#clas[3]=[76-100]
while(num>=0):
numeros.append(num)
num=int(input("\nInforme um numero: "))
for c in range(0,len(numeros)):
if(numeros[c]>=0 and numeros[c]<=25):
cl1+=1
elif(numeros[c]>=26 and numeros[c]<=50):
cl2+=1
elif(numeros[c]>=51 and numeros[c]<=75):
cl3+=1
elif(numeros[c]>=76 and numeros[c]<=100):
cl4+=1
print(f"\nDentre os numeros digitados tem {cl1} numeros entre [0-25], {cl2} numeros entre [26-50], {cl3} numeros entre [51-75], {cl4} numeros entre [76-100]")
|
#Faça um programa que leia uma quantidade indeterminada de números positivos e conte
#quantos deles estão nos seguintes intervalos: [0-25], [26-50], [51-75] e [76-100]. A entrada de
#dados deverá terminar quando for lido um número negativo.
num=int(input("Informe um numero: "))
numeros=[]
cl1=0
cl2=0
cl3=0
cl4=0
#clas[0]=[0-25]
#clas[1]=[26-50]
#clas[2]=[51-75]
#clas[3]=[76-100]
while(num>=0):
numeros.append(num)
num=int(input("\nInforme um numero: "))
for c in range(0,len(numeros)):
if(numeros[c]>=0 and numeros[c]<=25):
cl1+=1
elif(numeros[c]>=26 and numeros[c]<=50):
cl2+=1
elif(numeros[c]>=51 and numeros[c]<=75):
cl3+=1
elif(numeros[c]>=76 and numeros[c]<=100):
cl4+=1
print(f"\nDentre os numeros digitados tem {cl1} numeros entre [0-25], {cl2} numeros entre [26-50], {cl3} numeros entre [51-75], {cl4} numeros entre [76-100]")
|
pt
| 0.869158
|
#Faça um programa que leia uma quantidade indeterminada de números positivos e conte #quantos deles estão nos seguintes intervalos: [0-25], [26-50], [51-75] e [76-100]. A entrada de #dados deverá terminar quando for lido um número negativo. #clas[0]=[0-25] #clas[1]=[26-50] #clas[2]=[51-75] #clas[3]=[76-100]
| 3.819633
| 4
|
tests/utils/test_concurrent.py
|
fpacifici/snuba
| 0
|
6628362
|
import threading
import time
import pytest
from concurrent.futures import TimeoutError
from snuba.utils.concurrent import Synchronized, execute
def test_execute() -> None:
assert execute(threading.current_thread).result() != threading.current_thread()
with pytest.raises(ZeroDivisionError):
assert execute(lambda: 1 / 0).result()
with pytest.raises(TimeoutError):
assert execute(lambda: time.sleep(10), daemon=True).result(timeout=0)
def test_synchronized() -> None:
value = object()
wrapper = Synchronized(value)
with wrapper.get() as wrapped:
assert wrapped is value
wrapper.set(object())
with wrapper.get() as wrapped:
assert wrapped is not value
wrapper.set(value)
with wrapper.get() as wrapped:
assert wrapped is value
|
import threading
import time
import pytest
from concurrent.futures import TimeoutError
from snuba.utils.concurrent import Synchronized, execute
def test_execute() -> None:
assert execute(threading.current_thread).result() != threading.current_thread()
with pytest.raises(ZeroDivisionError):
assert execute(lambda: 1 / 0).result()
with pytest.raises(TimeoutError):
assert execute(lambda: time.sleep(10), daemon=True).result(timeout=0)
def test_synchronized() -> None:
value = object()
wrapper = Synchronized(value)
with wrapper.get() as wrapped:
assert wrapped is value
wrapper.set(object())
with wrapper.get() as wrapped:
assert wrapped is not value
wrapper.set(value)
with wrapper.get() as wrapped:
assert wrapped is value
|
none
| 1
| 2.321411
| 2
|
|
ambari-common/src/main/python/resource_management/core/providers/system.py
|
nexr/ambari
| 1
|
6628363
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from __future__ import with_statement
import re
import grp
import os
import pwd
import time
from resource_management.core import shell
from resource_management.core import sudo
from resource_management.core.base import Fail
from resource_management.core import ExecuteTimeoutException
from resource_management.core.providers import Provider
from resource_management.core.logger import Logger
def _coerce_uid(user):
try:
uid = int(user)
except ValueError:
try:
uid = pwd.getpwnam(user).pw_uid
except KeyError:
raise Fail("User %s doesn't exist." % user)
return uid
def _coerce_gid(group):
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
raise Fail("Group %s doesn't exist." % group)
return gid
def _ensure_metadata(path, user, group, mode=None, cd_access=None):
stat = sudo.stat(path)
if user:
uid = _coerce_uid(user)
if stat.st_uid != uid:
Logger.info(
"Changing owner for %s from %d to %s" % (path, stat.st_uid, user))
sudo.chown(path, user, None)
if group:
gid = _coerce_gid(group)
if stat.st_gid != gid:
Logger.info(
"Changing group for %s from %d to %s" % (path, stat.st_gid, group))
sudo.chown(path, None, group)
if mode:
if stat.st_mode != mode:
Logger.info("Changing permission for %s from %o to %o" % (
path, stat.st_mode, mode))
sudo.chmod(path, mode)
if cd_access:
if not re.match("^[ugoa]+$", cd_access):
raise Fail("'cd_acess' value '%s' is not valid" % (cd_access))
dir_path = path
while dir_path != os.sep:
if sudo.path_isdir(dir_path):
sudo.chmod_extended(dir_path, cd_access+"+x")
dir_path = os.path.split(dir_path)[0]
class FileProvider(Provider):
def action_create(self):
path = self.resource.path
if sudo.path_isdir(path):
raise Fail("Applying %s failed, directory with name %s exists" % (self.resource, path))
dirname = os.path.dirname(path)
if not sudo.path_isdir(dirname):
raise Fail("Applying %s failed, parent directory %s doesn't exist" % (self.resource, dirname))
write = False
content = self._get_content()
if not sudo.path_exists(path):
write = True
reason = "it doesn't exist"
elif self.resource.replace:
if content is not None:
old_content = sudo.read_file(path, encoding=self.resource.encoding)
if content != old_content:
write = True
reason = "contents don't match"
if self.resource.backup:
self.resource.env.backup_file(path)
if write:
Logger.info("Writing %s because %s" % (self.resource, reason))
sudo.create_file(path, content, encoding=self.resource.encoding)
_ensure_metadata(self.resource.path, self.resource.owner,
self.resource.group, mode=self.resource.mode, cd_access=self.resource.cd_access)
def action_delete(self):
path = self.resource.path
if sudo.path_isdir(path):
raise Fail("Applying %s failed, %s is directory not file!" % (self.resource, path))
if sudo.path_exists(path):
Logger.info("Deleting %s" % self.resource)
sudo.unlink(path)
def _get_content(self):
content = self.resource.content
if content is None:
return None
elif isinstance(content, basestring):
return content
elif hasattr(content, "__call__"):
return content()
raise Fail("Unknown source type for %s: %r" % (self, content))
class DirectoryProvider(Provider):
def action_create(self):
path = self.resource.path
if not sudo.path_exists(path):
Logger.info("Creating directory %s" % self.resource)
if self.resource.recursive:
if self.resource.recursive_permission:
DirectoryProvider.makedirs_and_set_permission_recursively(path, self.resource.owner,
self.resource.group, self.resource.mode)
else:
sudo.makedirs(path, self.resource.mode or 0755)
else:
dirname = os.path.dirname(path)
if not sudo.path_isdir(dirname):
raise Fail("Applying %s failed, parent directory %s doesn't exist" % (self.resource, dirname))
sudo.makedir(path, self.resource.mode or 0755)
if not sudo.path_isdir(path):
raise Fail("Applying %s failed, file %s already exists" % (self.resource, path))
_ensure_metadata(path, self.resource.owner, self.resource.group,
mode=self.resource.mode, cd_access=self.resource.cd_access)
@staticmethod
def makedirs_and_set_permission_recursively(path, owner, group, mode):
folders=[]
path,folder=os.path.split(path)
while folder!="":
folders.append(folder)
path,folder=os.path.split(path)
if path!="":
folders.append(path)
folders.reverse()
dir_prefix=""
for folder in folders:
dir_prefix=os.path.join(dir_prefix, folder)
if not sudo.path_exists(dir_prefix):
sudo.makedir(dir_prefix, mode or 0755)
_ensure_metadata(dir_prefix, None, None, mode)
def action_delete(self):
path = self.resource.path
if sudo.path_exists(path):
if not sudo.path_isdir(path):
raise Fail("Applying %s failed, %s is not a directory" % (self.resource, path))
Logger.info("Removing directory %s and all its content" % self.resource)
sudo.rmtree(path)
class LinkProvider(Provider):
def action_create(self):
path = self.resource.path
if sudo.path_lexists(path):
oldpath = os.path.realpath(path)
if oldpath == self.resource.to:
return
if not sudo.path_lexists(path):
raise Fail(
"%s trying to create a symlink with the same name as an existing file or directory" % self.resource)
Logger.info("%s replacing old symlink to %s" % (self.resource, oldpath))
sudo.unlink(path)
if self.resource.hard:
if not sudo.path_exists(self.resource.to):
raise Fail("Failed to apply %s, linking to nonexistent location %s" % (self.resource, self.resource.to))
if sudo.path_isdir(self.resource.to):
raise Fail("Failed to apply %s, cannot create hard link to a directory (%s)" % (self.resource, self.resource.to))
Logger.info("Creating hard %s" % self.resource)
sudo.link(self.resource.to, path)
else:
if not sudo.path_exists(self.resource.to):
Logger.info("Warning: linking to nonexistent location %s" % self.resource.to)
Logger.info("Creating symbolic %s to %s" % (self.resource, self.resource.to))
sudo.symlink(self.resource.to, path)
def action_delete(self):
path = self.resource.path
if sudo.path_exists(path):
Logger.info("Deleting %s" % self.resource)
sudo.unlink(path)
def _preexec_fn(resource):
def preexec():
if resource.group:
gid = _coerce_gid(resource.group)
os.setgid(gid)
os.setegid(gid)
return preexec
class ExecuteProvider(Provider):
def action_run(self):
if self.resource.creates:
if sudo.path_exists(self.resource.creates):
Logger.info("Skipping %s due to creates" % self.resource)
return
env = self.resource.environment
for i in range (0, self.resource.tries):
try:
shell.checked_call(self.resource.command, logoutput=self.resource.logoutput,
cwd=self.resource.cwd, env=env,
preexec_fn=_preexec_fn(self.resource), user=self.resource.user,
wait_for_finish=self.resource.wait_for_finish,
timeout=self.resource.timeout,
path=self.resource.path,
sudo=self.resource.sudo,
on_new_line=self.resource.on_new_line)
break
except Fail as ex:
if i == self.resource.tries-1: # last try
raise ex
else:
Logger.info("Retrying after %d seconds. Reason: %s" % (self.resource.try_sleep, str(ex)))
time.sleep(self.resource.try_sleep)
except ExecuteTimeoutException:
err_msg = ("Execution of '%s' was killed due timeout after %d seconds") % (self.resource.command, self.resource.timeout)
if self.resource.on_timeout:
Logger.info("Executing '%s'. Reason: %s" % (self.resource.on_timeout, err_msg))
shell.checked_call(self.resource.on_timeout)
else:
raise Fail(err_msg)
class ExecuteScriptProvider(Provider):
def action_run(self):
from tempfile import NamedTemporaryFile
Logger.info("Running script %s" % self.resource)
with NamedTemporaryFile(prefix="resource_management-script", bufsize=0) as tf:
tf.write(self.resource.code)
tf.flush()
_ensure_metadata(tf.name, self.resource.user, self.resource.group)
shell.call([self.resource.interpreter, tf.name],
cwd=self.resource.cwd, env=self.resource.environment,
preexec_fn=_preexec_fn(self.resource))
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from __future__ import with_statement
import re
import grp
import os
import pwd
import time
from resource_management.core import shell
from resource_management.core import sudo
from resource_management.core.base import Fail
from resource_management.core import ExecuteTimeoutException
from resource_management.core.providers import Provider
from resource_management.core.logger import Logger
def _coerce_uid(user):
try:
uid = int(user)
except ValueError:
try:
uid = pwd.getpwnam(user).pw_uid
except KeyError:
raise Fail("User %s doesn't exist." % user)
return uid
def _coerce_gid(group):
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
raise Fail("Group %s doesn't exist." % group)
return gid
def _ensure_metadata(path, user, group, mode=None, cd_access=None):
stat = sudo.stat(path)
if user:
uid = _coerce_uid(user)
if stat.st_uid != uid:
Logger.info(
"Changing owner for %s from %d to %s" % (path, stat.st_uid, user))
sudo.chown(path, user, None)
if group:
gid = _coerce_gid(group)
if stat.st_gid != gid:
Logger.info(
"Changing group for %s from %d to %s" % (path, stat.st_gid, group))
sudo.chown(path, None, group)
if mode:
if stat.st_mode != mode:
Logger.info("Changing permission for %s from %o to %o" % (
path, stat.st_mode, mode))
sudo.chmod(path, mode)
if cd_access:
if not re.match("^[ugoa]+$", cd_access):
raise Fail("'cd_acess' value '%s' is not valid" % (cd_access))
dir_path = path
while dir_path != os.sep:
if sudo.path_isdir(dir_path):
sudo.chmod_extended(dir_path, cd_access+"+x")
dir_path = os.path.split(dir_path)[0]
class FileProvider(Provider):
def action_create(self):
path = self.resource.path
if sudo.path_isdir(path):
raise Fail("Applying %s failed, directory with name %s exists" % (self.resource, path))
dirname = os.path.dirname(path)
if not sudo.path_isdir(dirname):
raise Fail("Applying %s failed, parent directory %s doesn't exist" % (self.resource, dirname))
write = False
content = self._get_content()
if not sudo.path_exists(path):
write = True
reason = "it doesn't exist"
elif self.resource.replace:
if content is not None:
old_content = sudo.read_file(path, encoding=self.resource.encoding)
if content != old_content:
write = True
reason = "contents don't match"
if self.resource.backup:
self.resource.env.backup_file(path)
if write:
Logger.info("Writing %s because %s" % (self.resource, reason))
sudo.create_file(path, content, encoding=self.resource.encoding)
_ensure_metadata(self.resource.path, self.resource.owner,
self.resource.group, mode=self.resource.mode, cd_access=self.resource.cd_access)
def action_delete(self):
path = self.resource.path
if sudo.path_isdir(path):
raise Fail("Applying %s failed, %s is directory not file!" % (self.resource, path))
if sudo.path_exists(path):
Logger.info("Deleting %s" % self.resource)
sudo.unlink(path)
def _get_content(self):
content = self.resource.content
if content is None:
return None
elif isinstance(content, basestring):
return content
elif hasattr(content, "__call__"):
return content()
raise Fail("Unknown source type for %s: %r" % (self, content))
class DirectoryProvider(Provider):
def action_create(self):
path = self.resource.path
if not sudo.path_exists(path):
Logger.info("Creating directory %s" % self.resource)
if self.resource.recursive:
if self.resource.recursive_permission:
DirectoryProvider.makedirs_and_set_permission_recursively(path, self.resource.owner,
self.resource.group, self.resource.mode)
else:
sudo.makedirs(path, self.resource.mode or 0755)
else:
dirname = os.path.dirname(path)
if not sudo.path_isdir(dirname):
raise Fail("Applying %s failed, parent directory %s doesn't exist" % (self.resource, dirname))
sudo.makedir(path, self.resource.mode or 0755)
if not sudo.path_isdir(path):
raise Fail("Applying %s failed, file %s already exists" % (self.resource, path))
_ensure_metadata(path, self.resource.owner, self.resource.group,
mode=self.resource.mode, cd_access=self.resource.cd_access)
@staticmethod
def makedirs_and_set_permission_recursively(path, owner, group, mode):
folders=[]
path,folder=os.path.split(path)
while folder!="":
folders.append(folder)
path,folder=os.path.split(path)
if path!="":
folders.append(path)
folders.reverse()
dir_prefix=""
for folder in folders:
dir_prefix=os.path.join(dir_prefix, folder)
if not sudo.path_exists(dir_prefix):
sudo.makedir(dir_prefix, mode or 0755)
_ensure_metadata(dir_prefix, None, None, mode)
def action_delete(self):
path = self.resource.path
if sudo.path_exists(path):
if not sudo.path_isdir(path):
raise Fail("Applying %s failed, %s is not a directory" % (self.resource, path))
Logger.info("Removing directory %s and all its content" % self.resource)
sudo.rmtree(path)
class LinkProvider(Provider):
def action_create(self):
path = self.resource.path
if sudo.path_lexists(path):
oldpath = os.path.realpath(path)
if oldpath == self.resource.to:
return
if not sudo.path_lexists(path):
raise Fail(
"%s trying to create a symlink with the same name as an existing file or directory" % self.resource)
Logger.info("%s replacing old symlink to %s" % (self.resource, oldpath))
sudo.unlink(path)
if self.resource.hard:
if not sudo.path_exists(self.resource.to):
raise Fail("Failed to apply %s, linking to nonexistent location %s" % (self.resource, self.resource.to))
if sudo.path_isdir(self.resource.to):
raise Fail("Failed to apply %s, cannot create hard link to a directory (%s)" % (self.resource, self.resource.to))
Logger.info("Creating hard %s" % self.resource)
sudo.link(self.resource.to, path)
else:
if not sudo.path_exists(self.resource.to):
Logger.info("Warning: linking to nonexistent location %s" % self.resource.to)
Logger.info("Creating symbolic %s to %s" % (self.resource, self.resource.to))
sudo.symlink(self.resource.to, path)
def action_delete(self):
path = self.resource.path
if sudo.path_exists(path):
Logger.info("Deleting %s" % self.resource)
sudo.unlink(path)
def _preexec_fn(resource):
def preexec():
if resource.group:
gid = _coerce_gid(resource.group)
os.setgid(gid)
os.setegid(gid)
return preexec
class ExecuteProvider(Provider):
def action_run(self):
if self.resource.creates:
if sudo.path_exists(self.resource.creates):
Logger.info("Skipping %s due to creates" % self.resource)
return
env = self.resource.environment
for i in range (0, self.resource.tries):
try:
shell.checked_call(self.resource.command, logoutput=self.resource.logoutput,
cwd=self.resource.cwd, env=env,
preexec_fn=_preexec_fn(self.resource), user=self.resource.user,
wait_for_finish=self.resource.wait_for_finish,
timeout=self.resource.timeout,
path=self.resource.path,
sudo=self.resource.sudo,
on_new_line=self.resource.on_new_line)
break
except Fail as ex:
if i == self.resource.tries-1: # last try
raise ex
else:
Logger.info("Retrying after %d seconds. Reason: %s" % (self.resource.try_sleep, str(ex)))
time.sleep(self.resource.try_sleep)
except ExecuteTimeoutException:
err_msg = ("Execution of '%s' was killed due timeout after %d seconds") % (self.resource.command, self.resource.timeout)
if self.resource.on_timeout:
Logger.info("Executing '%s'. Reason: %s" % (self.resource.on_timeout, err_msg))
shell.checked_call(self.resource.on_timeout)
else:
raise Fail(err_msg)
class ExecuteScriptProvider(Provider):
def action_run(self):
from tempfile import NamedTemporaryFile
Logger.info("Running script %s" % self.resource)
with NamedTemporaryFile(prefix="resource_management-script", bufsize=0) as tf:
tf.write(self.resource.code)
tf.flush()
_ensure_metadata(tf.name, self.resource.user, self.resource.group)
shell.call([self.resource.interpreter, tf.name],
cwd=self.resource.cwd, env=self.resource.environment,
preexec_fn=_preexec_fn(self.resource))
|
en
| 0.853581
|
#!/usr/bin/env python Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ambari Agent # last try
| 1.801901
| 2
|
twisted/names/test/test_dns.py
|
linxuping/twisted
| 3
|
6628364
|
<reponame>linxuping/twisted
# test-case-name: twisted.names.test.test_dns
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for twisted.names.dns.
"""
from __future__ import division, absolute_import
from io import BytesIO
import struct
from zope.interface.verify import verifyClass
from twisted.python.failure import Failure
from twisted.python.util import FancyEqMixin, FancyStrMixin
from twisted.internet import address, task
from twisted.internet.error import CannotListenError, ConnectionDone
from twisted.trial import unittest
from twisted.names import dns
from twisted.test import proto_helpers
from twisted.test.testutils import ComparisonTestsMixin
RECORD_TYPES = [
dns.Record_NS, dns.Record_MD, dns.Record_MF, dns.Record_CNAME,
dns.Record_MB, dns.Record_MG, dns.Record_MR, dns.Record_PTR,
dns.Record_DNAME, dns.Record_A, dns.Record_SOA, dns.Record_NULL,
dns.Record_WKS, dns.Record_SRV, dns.Record_AFSDB, dns.Record_RP,
dns.Record_HINFO, dns.Record_MINFO, dns.Record_MX, dns.Record_TXT,
dns.Record_AAAA, dns.Record_A6, dns.Record_NAPTR, dns.UnknownRecord,
]
class Ord2ByteTests(unittest.TestCase):
"""
Tests for L{dns._ord2bytes}.
"""
def test_ord2byte(self):
"""
L{dns._ord2byte} accepts an integer and returns a byte string of length
one with an ordinal value equal to the given integer.
"""
self.assertEqual(b'\x10', dns._ord2bytes(0x10))
class Str2TimeTests(unittest.TestCase):
"""
Tests for L{dns.str2name}.
"""
def test_nonString(self):
"""
When passed a non-string object, L{dns.str2name} returns it unmodified.
"""
time = object()
self.assertIs(time, dns.str2time(time))
def test_seconds(self):
"""
Passed a string giving a number of seconds, L{dns.str2time} returns the
number of seconds represented. For example, C{"10S"} represents C{10}
seconds.
"""
self.assertEqual(10, dns.str2time("10S"))
def test_minutes(self):
"""
Like C{test_seconds}, but for the C{"M"} suffix which multiplies the
time value by C{60} (the number of seconds in a minute!).
"""
self.assertEqual(2 * 60, dns.str2time("2M"))
def test_hours(self):
"""
Like C{test_seconds}, but for the C{"H"} suffix which multiplies the
time value by C{3600}, the number of seconds in an hour.
"""
self.assertEqual(3 * 3600, dns.str2time("3H"))
def test_days(self):
"""
Like L{test_seconds}, but for the C{"D"} suffix which multiplies the
time value by C{86400}, the number of seconds in a day.
"""
self.assertEqual(4 * 86400, dns.str2time("4D"))
def test_weeks(self):
"""
Like L{test_seconds}, but for the C{"W"} suffix which multiplies the
time value by C{604800}, the number of seconds in a week.
"""
self.assertEqual(5 * 604800, dns.str2time("5W"))
def test_years(self):
"""
Like L{test_seconds}, but for the C{"Y"} suffix which multiplies the
time value by C{31536000}, the number of seconds in a year.
"""
self.assertEqual(6 * 31536000, dns.str2time("6Y"))
def test_invalidPrefix(self):
"""
If a non-integer prefix is given, L{dns.str2time} raises L{ValueError}.
"""
self.assertRaises(ValueError, dns.str2time, "fooS")
class NameTests(unittest.TestCase):
"""
Tests for L{Name}, the representation of a single domain name with support
for encoding into and decoding from DNS message format.
"""
def test_nonStringName(self):
"""
When constructed with a name which is neither C{bytes} nor C{str},
L{Name} raises L{TypeError}.
"""
self.assertRaises(TypeError, dns.Name, 123)
self.assertRaises(TypeError, dns.Name, object())
self.assertRaises(TypeError, dns.Name, [])
def test_unicodeName(self):
"""
L{dns.Name} automatically encodes unicode domain name using C{idna}
encoding.
"""
name = dns.Name(u'\u00e9chec.example.org')
self.assertIsInstance(name.name, bytes)
self.assertEqual(b'xn--chec-9oa.example.org', name.name)
def test_decode(self):
"""
L{Name.decode} populates the L{Name} instance with name information read
from the file-like object passed to it.
"""
n = dns.Name()
n.decode(BytesIO(b"\x07example\x03com\x00"))
self.assertEqual(n.name, b"example.com")
def test_encode(self):
"""
L{Name.encode} encodes its name information and writes it to the
file-like object passed to it.
"""
name = dns.Name(b"foo.example.com")
stream = BytesIO()
name.encode(stream)
self.assertEqual(stream.getvalue(), b"\x03foo\x07example\x03com\x00")
def test_encodeWithCompression(self):
"""
If a compression dictionary is passed to it, L{Name.encode} uses offset
information from it to encode its name with references to existing
labels in the stream instead of including another copy of them in the
output. It also updates the compression dictionary with the location of
the name it writes to the stream.
"""
name = dns.Name(b"foo.example.com")
compression = {b"example.com": 0x17}
# Some bytes already encoded into the stream for this message
previous = b"some prefix to change .tell()"
stream = BytesIO()
stream.write(previous)
# The position at which the encoded form of this new name will appear in
# the stream.
expected = len(previous) + dns.Message.headerSize
name.encode(stream, compression)
self.assertEqual(
b"\x03foo\xc0\x17",
stream.getvalue()[len(previous):])
self.assertEqual(
{b"example.com": 0x17, b"foo.example.com": expected},
compression)
def test_unknown(self):
"""
A resource record of unknown type and class is parsed into an
L{UnknownRecord} instance with its data preserved, and an
L{UnknownRecord} instance is serialized to a string equal to the one it
was parsed from.
"""
wire = (
b'\x01\x00' # Message ID
b'\x00' # answer bit, opCode nibble, auth bit, trunc bit, recursive
# bit
b'\x00' # recursion bit, empty bit, authenticData bit,
# checkingDisabled bit, response code nibble
b'\x00\x01' # number of queries
b'\x00\x01' # number of answers
b'\x00\x00' # number of authorities
b'\x00\x01' # number of additionals
# query
b'\x03foo\x03bar\x00' # foo.bar
b'\xde\xad' # type=0xdead
b'\xbe\xef' # cls=0xbeef
# 1st answer
b'\xc0\x0c' # foo.bar - compressed
b'\xde\xad' # type=0xdead
b'\xbe\xef' # cls=0xbeef
b'\x00\x00\x01\x01' # ttl=257
b'\x00\x08somedata' # some payload data
# 1st additional
b'\x03baz\x03ban\x00' # baz.ban
b'\x00\x01' # type=A
b'\x00\x01' # cls=IN
b'\x00\x00\x01\x01' # ttl=257
b'\x00\x04' # len=4
b'\x01\x02\x03\x04' # 172.16.31.10
)
msg = dns.Message()
msg.fromStr(wire)
self.assertEqual(msg.queries, [
dns.Query(b'foo.bar', type=0xdead, cls=0xbeef),
])
self.assertEqual(msg.answers, [
dns.RRHeader(b'foo.bar', type=0xdead, cls=0xbeef, ttl=257,
payload=dns.UnknownRecord(b'somedata', ttl=257)),
])
self.assertEqual(msg.additional, [
dns.RRHeader(b'baz.ban', type=dns.A, cls=dns.IN, ttl=257,
payload=dns.Record_A('172.16.31.10', ttl=257)),
])
enc = msg.toStr()
self.assertEqual(enc, wire)
def test_decodeWithCompression(self):
"""
If the leading byte of an encoded label (in bytes read from a stream
passed to L{Name.decode}) has its two high bits set, the next byte is
treated as a pointer to another label in the stream and that label is
included in the name being decoded.
"""
# Slightly modified version of the example from RFC 1035, section 4.1.4.
stream = BytesIO(
b"x" * 20 +
b"\x01f\x03isi\x04arpa\x00"
b"\x03foo\xc0\x14"
b"\x03bar\xc0\x20")
stream.seek(20)
name = dns.Name()
name.decode(stream)
# Verify we found the first name in the stream and that the stream
# position is left at the first byte after the decoded name.
self.assertEqual(b"f.isi.arpa", name.name)
self.assertEqual(32, stream.tell())
# Get the second name from the stream and make the same assertions.
name.decode(stream)
self.assertEqual(name.name, b"foo.f.isi.arpa")
self.assertEqual(38, stream.tell())
# Get the third and final name
name.decode(stream)
self.assertEqual(name.name, b"bar.foo.f.isi.arpa")
self.assertEqual(44, stream.tell())
def test_rejectCompressionLoop(self):
"""
L{Name.decode} raises L{ValueError} if the stream passed to it includes
a compression pointer which forms a loop, causing the name to be
undecodable.
"""
name = dns.Name()
stream = BytesIO(b"\xc0\x00")
self.assertRaises(ValueError, name.decode, stream)
class RoundtripDNSTestCase(unittest.TestCase):
"""
Encoding and then decoding various objects.
"""
names = [b"example.org", b"go-away.fish.tv", b"23strikesback.net"]
def testName(self):
for n in self.names:
# encode the name
f = BytesIO()
dns.Name(n).encode(f)
# decode the name
f.seek(0, 0)
result = dns.Name()
result.decode(f)
self.assertEqual(result.name, n)
def test_query(self):
"""
L{dns.Query.encode} returns a byte string representing the fields of the
query which can be decoded into a new L{dns.Query} instance using
L{dns.Query.decode}.
"""
for n in self.names:
for dnstype in range(1, 17):
for dnscls in range(1, 5):
# encode the query
f = BytesIO()
dns.Query(n, dnstype, dnscls).encode(f)
# decode the result
f.seek(0, 0)
result = dns.Query()
result.decode(f)
self.assertEqual(result.name.name, n)
self.assertEqual(result.type, dnstype)
self.assertEqual(result.cls, dnscls)
def test_resourceRecordHeader(self):
"""
L{dns.RRHeader.encode} encodes the record header's information and
writes it to the file-like object passed to it and
L{dns.RRHeader.decode} reads from a file-like object to re-construct a
L{dns.RRHeader} instance.
"""
# encode the RR
f = BytesIO()
dns.RRHeader(b"test.org", 3, 4, 17).encode(f)
# decode the result
f.seek(0, 0)
result = dns.RRHeader()
result.decode(f)
self.assertEqual(result.name, dns.Name(b"test.org"))
self.assertEqual(result.type, 3)
self.assertEqual(result.cls, 4)
self.assertEqual(result.ttl, 17)
def test_resources(self):
"""
L{dns.SimpleRecord.encode} encodes the record's name information and
writes it to the file-like object passed to it and
L{dns.SimpleRecord.decode} reads from a file-like object to re-construct
a L{dns.SimpleRecord} instance.
"""
names = (
b"this.are.test.name",
b"will.compress.will.this.will.name.will.hopefully",
b"test.CASE.preSErVatIOn.YeAH",
b"a.s.h.o.r.t.c.a.s.e.t.o.t.e.s.t",
b"singleton"
)
for s in names:
f = BytesIO()
dns.SimpleRecord(s).encode(f)
f.seek(0, 0)
result = dns.SimpleRecord()
result.decode(f)
self.assertEqual(result.name, dns.Name(s))
def test_hashable(self):
"""
Instances of all record types are hashable.
"""
for k in RECORD_TYPES:
k1, k2 = k(), k()
hk1 = hash(k1)
hk2 = hash(k2)
self.assertEqual(hk1, hk2, "%s != %s (for %s)" % (hk1,hk2,k))
def test_Charstr(self):
"""
Test L{dns.Charstr} encode and decode.
"""
for n in self.names:
# encode the name
f = BytesIO()
dns.Charstr(n).encode(f)
# decode the name
f.seek(0, 0)
result = dns.Charstr()
result.decode(f)
self.assertEqual(result.string, n)
def _recordRoundtripTest(self, record):
"""
Assert that encoding C{record} and then decoding the resulting bytes
creates a record which compares equal to C{record}.
"""
stream = BytesIO()
record.encode(stream)
length = stream.tell()
stream.seek(0, 0)
replica = record.__class__()
replica.decode(stream, length)
self.assertEqual(record, replica)
def test_SOA(self):
"""
The byte stream written by L{dns.Record_SOA.encode} can be used by
L{dns.Record_SOA.decode} to reconstruct the state of the original
L{dns.Record_SOA} instance.
"""
self._recordRoundtripTest(
dns.Record_SOA(
mname=b'foo', rname=b'bar', serial=12, refresh=34,
retry=56, expire=78, minimum=90))
def test_A(self):
"""
The byte stream written by L{dns.Record_A.encode} can be used by
L{dns.Record_A.decode} to reconstruct the state of the original
L{dns.Record_A} instance.
"""
self._recordRoundtripTest(dns.Record_A('172.16.31.10'))
def test_NULL(self):
"""
The byte stream written by L{dns.Record_NULL.encode} can be used by
L{dns.Record_NULL.decode} to reconstruct the state of the original
L{dns.Record_NULL} instance.
"""
self._recordRoundtripTest(dns.Record_NULL(b'foo bar'))
def test_WKS(self):
"""
The byte stream written by L{dns.Record_WKS.encode} can be used by
L{dns.Record_WKS.decode} to reconstruct the state of the original
L{dns.Record_WKS} instance.
"""
self._recordRoundtripTest(dns.Record_WKS('172.16.31.10', 3, b'xyz'))
def test_AAAA(self):
"""
The byte stream written by L{dns.Record_AAAA.encode} can be used by
L{dns.Record_AAAA.decode} to reconstruct the state of the original
L{dns.Record_AAAA} instance.
"""
self._recordRoundtripTest(dns.Record_AAAA('::1'))
def test_A6(self):
"""
The byte stream written by L{dns.Record_A6.encode} can be used by
L{dns.Record_A6.decode} to reconstruct the state of the original
L{dns.Record_A6} instance.
"""
self._recordRoundtripTest(dns.Record_A6(8, '::1:2', b'foo'))
def test_SRV(self):
"""
The byte stream written by L{dns.Record_SRV.encode} can be used by
L{dns.Record_SRV.decode} to reconstruct the state of the original
L{dns.Record_SRV} instance.
"""
self._recordRoundtripTest(dns.Record_SRV(
priority=1, weight=2, port=3, target=b'example.com'))
def test_NAPTR(self):
"""
Test L{dns.Record_NAPTR} encode and decode.
"""
naptrs = [
(100, 10, b"u", b"sip+E2U",
b"!^.*$!sip:<EMAIL>!", b""),
(100, 50, b"s", b"http+I2L+I2C+I2R",
b"", b"_http._tcp.gatech.edu")]
for (order, preference, flags, service, regexp, replacement) in naptrs:
rin = dns.Record_NAPTR(order, preference, flags, service, regexp,
replacement)
e = BytesIO()
rin.encode(e)
e.seek(0, 0)
rout = dns.Record_NAPTR()
rout.decode(e)
self.assertEqual(rin.order, rout.order)
self.assertEqual(rin.preference, rout.preference)
self.assertEqual(rin.flags, rout.flags)
self.assertEqual(rin.service, rout.service)
self.assertEqual(rin.regexp, rout.regexp)
self.assertEqual(rin.replacement.name, rout.replacement.name)
self.assertEqual(rin.ttl, rout.ttl)
def test_AFSDB(self):
"""
The byte stream written by L{dns.Record_AFSDB.encode} can be used by
L{dns.Record_AFSDB.decode} to reconstruct the state of the original
L{dns.Record_AFSDB} instance.
"""
self._recordRoundtripTest(dns.Record_AFSDB(
subtype=3, hostname=b'example.com'))
def test_RP(self):
"""
The byte stream written by L{dns.Record_RP.encode} can be used by
L{dns.Record_RP.decode} to reconstruct the state of the original
L{dns.Record_RP} instance.
"""
self._recordRoundtripTest(dns.Record_RP(
mbox=b'alice.example.com', txt=b'example.com'))
def test_HINFO(self):
"""
The byte stream written by L{dns.Record_HINFO.encode} can be used by
L{dns.Record_HINFO.decode} to reconstruct the state of the original
L{dns.Record_HINFO} instance.
"""
self._recordRoundtripTest(dns.Record_HINFO(cpu=b'fast', os=b'great'))
def test_MINFO(self):
"""
The byte stream written by L{dns.Record_MINFO.encode} can be used by
L{dns.Record_MINFO.decode} to reconstruct the state of the original
L{dns.Record_MINFO} instance.
"""
self._recordRoundtripTest(dns.Record_MINFO(
rmailbx=b'foo', emailbx=b'bar'))
def test_MX(self):
"""
The byte stream written by L{dns.Record_MX.encode} can be used by
L{dns.Record_MX.decode} to reconstruct the state of the original
L{dns.Record_MX} instance.
"""
self._recordRoundtripTest(dns.Record_MX(
preference=1, name=b'example.com'))
def test_TXT(self):
"""
The byte stream written by L{dns.Record_TXT.encode} can be used by
L{dns.Record_TXT.decode} to reconstruct the state of the original
L{dns.Record_TXT} instance.
"""
self._recordRoundtripTest(dns.Record_TXT(b'foo', b'bar'))
MESSAGE_AUTHENTIC_DATA_BYTES = (
b'\x00\x00' # ID
b'\x00' #
b'\x20' # RA, Z, AD=1, CD, RCODE
b'\x00\x00' # Query count
b'\x00\x00' # Answer count
b'\x00\x00' # Authority count
b'\x00\x00' # Additional count
)
MESSAGE_CHECKING_DISABLED_BYTES = (
b'\x00\x00' # ID
b'\x00' #
b'\x10' # RA, Z, AD, CD=1, RCODE
b'\x00\x00' # Query count
b'\x00\x00' # Answer count
b'\x00\x00' # Authority count
b'\x00\x00' # Additional count
)
class MessageTestCase(unittest.SynchronousTestCase):
"""
Tests for L{twisted.names.dns.Message}.
"""
def test_authenticDataDefault(self):
"""
L{dns.Message.authenticData} has default value 0.
"""
self.assertEqual(dns.Message().authenticData, 0)
def test_authenticDataOverride(self):
"""
L{dns.Message.__init__} accepts a C{authenticData} argument which
is assigned to L{dns.Message.authenticData}.
"""
self.assertEqual(dns.Message(authenticData=1).authenticData, 1)
def test_authenticDataEncode(self):
"""
L{dns.Message.toStr} encodes L{dns.Message.authenticData} into
byte4 of the byte string.
"""
self.assertEqual(
dns.Message(authenticData=1).toStr(),
MESSAGE_AUTHENTIC_DATA_BYTES
)
def test_authenticDataDecode(self):
"""
L{dns.Message.fromStr} decodes byte4 and assigns bit3 to
L{dns.Message.authenticData}.
"""
m = dns.Message()
m.fromStr(MESSAGE_AUTHENTIC_DATA_BYTES)
self.assertEqual(m.authenticData, 1)
def test_checkingDisabledDefault(self):
"""
L{dns.Message.checkingDisabled} has default value 0.
"""
self.assertEqual(dns.Message().checkingDisabled, 0)
def test_checkingDisabledOverride(self):
"""
L{dns.Message.__init__} accepts a C{checkingDisabled} argument which
is assigned to L{dns.Message.checkingDisabled}.
"""
self.assertEqual(
dns.Message(checkingDisabled=1).checkingDisabled, 1)
def test_checkingDisabledEncode(self):
"""
L{dns.Message.toStr} encodes L{dns.Message.checkingDisabled} into
byte4 of the byte string.
"""
self.assertEqual(
dns.Message(checkingDisabled=1).toStr(),
MESSAGE_CHECKING_DISABLED_BYTES
)
def test_checkingDisabledDecode(self):
"""
L{dns.Message.fromStr} decodes byte4 and assigns bit4 to
L{dns.Message.checkingDisabled}.
"""
m = dns.Message()
m.fromStr(MESSAGE_CHECKING_DISABLED_BYTES)
self.assertEqual(m.checkingDisabled, 1)
def test_reprDefaults(self):
"""
L{dns.Message.__repr__} omits field values and sections which are
identical to their defaults. The id field value is always shown.
"""
self.assertEqual(
'<Message id=0>',
repr(dns.Message())
)
def test_reprFlagsIfSet(self):
"""
L{dns.Message.__repr__} displays flags if they are L{True}.
"""
m = dns.Message(answer=True, auth=True, trunc=True, recDes=True,
recAv=True, authenticData=True, checkingDisabled=True)
self.assertEqual(
'<Message '
'id=0 '
'flags=answer,auth,trunc,recDes,recAv,authenticData,'
'checkingDisabled'
'>',
repr(m),
)
def test_reprNonDefautFields(self):
"""
L{dns.Message.__repr__} displays field values if they differ from their
defaults.
"""
m = dns.Message(id=10, opCode=20, rCode=30, maxSize=40)
self.assertEqual(
'<Message '
'id=10 '
'opCode=20 '
'rCode=30 '
'maxSize=40'
'>',
repr(m),
)
def test_reprNonDefaultSections(self):
"""
L{dns.Message.__repr__} displays sections which differ from their
defaults.
"""
m = dns.Message()
m.queries = [1, 2, 3]
m.answers = [4, 5, 6]
m.authority = [7, 8, 9]
m.additional = [10, 11, 12]
self.assertEqual(
'<Message '
'id=0 '
'queries=[1, 2, 3] '
'answers=[4, 5, 6] '
'authority=[7, 8, 9] '
'additional=[10, 11, 12]'
'>',
repr(m),
)
def testEmptyMessage(self):
"""
Test that a message which has been truncated causes an EOFError to
be raised when it is parsed.
"""
msg = dns.Message()
self.assertRaises(EOFError, msg.fromStr, b'')
def test_emptyQuery(self):
"""
Test that bytes representing an empty query message can be decoded
as such.
"""
msg = dns.Message()
msg.fromStr(
b'\x01\x00' # Message ID
b'\x00' # answer bit, opCode nibble, auth bit, trunc bit, recursive bit
b'\x00' # recursion bit, empty bit, authenticData bit,
# checkingDisabled bit, response code nibble
b'\x00\x00' # number of queries
b'\x00\x00' # number of answers
b'\x00\x00' # number of authorities
b'\x00\x00' # number of additionals
)
self.assertEqual(msg.id, 256)
self.assertFalse(
msg.answer, "Message was not supposed to be an answer.")
self.assertEqual(msg.opCode, dns.OP_QUERY)
self.assertFalse(
msg.auth, "Message was not supposed to be authoritative.")
self.assertFalse(
msg.trunc, "Message was not supposed to be truncated.")
self.assertEqual(msg.queries, [])
self.assertEqual(msg.answers, [])
self.assertEqual(msg.authority, [])
self.assertEqual(msg.additional, [])
def test_NULL(self):
"""
A I{NULL} record with an arbitrary payload can be encoded and decoded as
part of a L{dns.Message}.
"""
bytes = b''.join([dns._ord2bytes(i) for i in range(256)])
rec = dns.Record_NULL(bytes)
rr = dns.RRHeader(b'testname', dns.NULL, payload=rec)
msg1 = dns.Message()
msg1.answers.append(rr)
s = BytesIO()
msg1.encode(s)
s.seek(0, 0)
msg2 = dns.Message()
msg2.decode(s)
self.assertIsInstance(msg2.answers[0].payload, dns.Record_NULL)
self.assertEqual(msg2.answers[0].payload.payload, bytes)
def test_lookupRecordTypeDefault(self):
"""
L{Message.lookupRecordType} returns C{dns.UnknownRecord} if it is
called with an integer which doesn't correspond to any known record
type.
"""
# 65280 is the first value in the range reserved for private
# use, so it shouldn't ever conflict with an officially
# allocated value.
self.assertIs(dns.Message().lookupRecordType(65280), dns.UnknownRecord)
def test_nonAuthoritativeMessage(self):
"""
The L{RRHeader} instances created by L{Message} from a non-authoritative
message are marked as not authoritative.
"""
buf = BytesIO()
answer = dns.RRHeader(payload=dns.Record_A('172.16.31.10', ttl=0))
answer.encode(buf)
message = dns.Message()
message.fromStr(
b'\x01\x00' # Message ID
# answer bit, opCode nibble, auth bit, trunc bit, recursive bit
b'\x00'
# recursion bit, empty bit, authenticData bit,
# checkingDisabled bit, response code nibble
b'\x00'
b'\x00\x00' # number of queries
b'\x00\x01' # number of answers
b'\x00\x00' # number of authorities
b'\x00\x00' # number of additionals
+ buf.getvalue()
)
self.assertEqual(message.answers, [answer])
self.assertFalse(message.answers[0].auth)
def test_authoritativeMessage(self):
"""
The L{RRHeader} instances created by L{Message} from an authoritative
message are marked as authoritative.
"""
buf = BytesIO()
answer = dns.RRHeader(payload=dns.Record_A('172.16.31.10', ttl=0))
answer.encode(buf)
message = dns.Message()
message.fromStr(
b'\x01\x00' # Message ID
# answer bit, opCode nibble, auth bit, trunc bit, recursive bit
b'\x04'
# recursion bit, empty bit, authenticData bit,
# checkingDisabled bit, response code nibble
b'\x00'
b'\x00\x00' # number of queries
b'\x00\x01' # number of answers
b'\x00\x00' # number of authorities
b'\x00\x00' # number of additionals
+ buf.getvalue()
)
answer.auth = True
self.assertEqual(message.answers, [answer])
self.assertTrue(message.answers[0].auth)
class MessageComparisonTests(ComparisonTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for the rich comparison of L{dns.Message} instances.
"""
def messageFactory(self, *args, **kwargs):
"""
Create a L{dns.Message}.
The L{dns.Message} constructor doesn't accept C{queries}, C{answers},
C{authority}, C{additional} arguments, so we extract them from the
kwargs supplied to this factory function and assign them to the message.
@param args: Positional arguments.
@param kwargs: Keyword arguments.
@return: A L{dns.Message} instance.
"""
queries = kwargs.pop('queries', [])
answers = kwargs.pop('answers', [])
authority = kwargs.pop('authority', [])
additional = kwargs.pop('additional', [])
m = dns.Message(**kwargs)
if queries:
m.queries = queries
if answers:
m.answers = answers
if authority:
m.authority = authority
if additional:
m.additional = additional
return m
def test_id(self):
"""
Two L{dns.Message} instances compare equal if they have the same id
value.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(id=10),
self.messageFactory(id=10),
self.messageFactory(id=20),
)
def test_answer(self):
"""
Two L{dns.Message} instances compare equal if they have the same answer
flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(answer=1),
self.messageFactory(answer=1),
self.messageFactory(answer=0),
)
def test_opCode(self):
"""
Two L{dns.Message} instances compare equal if they have the same opCode
value.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(opCode=10),
self.messageFactory(opCode=10),
self.messageFactory(opCode=20),
)
def test_recDes(self):
"""
Two L{dns.Message} instances compare equal if they have the same recDes
flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(recDes=1),
self.messageFactory(recDes=1),
self.messageFactory(recDes=0),
)
def test_recAv(self):
"""
Two L{dns.Message} instances compare equal if they have the same recAv
flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(recAv=1),
self.messageFactory(recAv=1),
self.messageFactory(recAv=0),
)
def test_auth(self):
"""
Two L{dns.Message} instances compare equal if they have the same auth
flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(auth=1),
self.messageFactory(auth=1),
self.messageFactory(auth=0),
)
def test_rCode(self):
"""
Two L{dns.Message} instances compare equal if they have the same rCode
value.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(rCode=10),
self.messageFactory(rCode=10),
self.messageFactory(rCode=20),
)
def test_trunc(self):
"""
Two L{dns.Message} instances compare equal if they have the same trunc
flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(trunc=1),
self.messageFactory(trunc=1),
self.messageFactory(trunc=0),
)
def test_maxSize(self):
"""
Two L{dns.Message} instances compare equal if they have the same
maxSize value.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(maxSize=10),
self.messageFactory(maxSize=10),
self.messageFactory(maxSize=20),
)
def test_authenticData(self):
"""
Two L{dns.Message} instances compare equal if they have the same
authenticData flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(authenticData=1),
self.messageFactory(authenticData=1),
self.messageFactory(authenticData=0),
)
def test_checkingDisabled(self):
"""
Two L{dns.Message} instances compare equal if they have the same
checkingDisabled flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(checkingDisabled=1),
self.messageFactory(checkingDisabled=1),
self.messageFactory(checkingDisabled=0),
)
def test_queries(self):
"""
Two L{dns.Message} instances compare equal if they have the same
queries.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(queries=[dns.Query(b'example.com')]),
self.messageFactory(queries=[dns.Query(b'example.com')]),
self.messageFactory(queries=[dns.Query(b'example.org')]),
)
def test_answers(self):
"""
Two L{dns.Message} instances compare equal if they have the same
answers.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(answers=[dns.RRHeader(
b'example.com', payload=dns.Record_A('172.16.31.10'))]),
self.messageFactory(answers=[dns.RRHeader(
b'example.com', payload=dns.Record_A('172.16.31.10'))]),
self.messageFactory(answers=[dns.RRHeader(
b'example.org', payload=dns.Record_A('172.16.58.3'))]),
)
def test_authority(self):
"""
Two L{dns.Message} instances compare equal if they have the same
authority records.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(authority=[dns.RRHeader(
b'example.com',
type=dns.SOA, payload=dns.Record_SOA())]),
self.messageFactory(authority=[dns.RRHeader(
b'example.com',
type=dns.SOA, payload=dns.Record_SOA())]),
self.messageFactory(authority=[dns.RRHeader(
b'example.org',
type=dns.SOA, payload=dns.Record_SOA())]),
)
def test_additional(self):
"""
Two L{dns.Message} instances compare equal if they have the same
additional records.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(additional=[dns.RRHeader(
b'example.com', payload=dns.Record_A('172.16.31.10'))]),
self.messageFactory(additional=[dns.RRHeader(
b'example.com', payload=dns.Record_A('172.16.31.10'))]),
self.messageFactory(additional=[dns.RRHeader(
b'example.org', payload=dns.Record_A('172.16.31.10'))]),
)
class TestController(object):
"""
Pretend to be a DNS query processor for a DNSDatagramProtocol.
@ivar messages: the list of received messages.
@type messages: C{list} of (msg, protocol, address)
"""
def __init__(self):
"""
Initialize the controller: create a list of messages.
"""
self.messages = []
def messageReceived(self, msg, proto, addr=None):
"""
Save the message so that it can be checked during the tests.
"""
self.messages.append((msg, proto, addr))
class DatagramProtocolTestCase(unittest.TestCase):
"""
Test various aspects of L{dns.DNSDatagramProtocol}.
"""
def setUp(self):
"""
Create a L{dns.DNSDatagramProtocol} with a deterministic clock.
"""
self.clock = task.Clock()
self.controller = TestController()
self.proto = dns.DNSDatagramProtocol(self.controller)
transport = proto_helpers.FakeDatagramTransport()
self.proto.makeConnection(transport)
self.proto.callLater = self.clock.callLater
def test_truncatedPacket(self):
"""
Test that when a short datagram is received, datagramReceived does
not raise an exception while processing it.
"""
self.proto.datagramReceived(
b'', address.IPv4Address('UDP', '127.0.0.1', 12345))
self.assertEqual(self.controller.messages, [])
def test_simpleQuery(self):
"""
Test content received after a query.
"""
d = self.proto.query(('127.0.0.1', 21345), [dns.Query(b'foo')])
self.assertEqual(len(self.proto.liveMessages.keys()), 1)
m = dns.Message()
m.id = next(iter(self.proto.liveMessages.keys()))
m.answers = [dns.RRHeader(payload=dns.Record_A(address='172.16.31.10'))]
def cb(result):
self.assertEqual(result.answers[0].payload.dottedQuad(), '172.16.31.10')
d.addCallback(cb)
self.proto.datagramReceived(m.toStr(), ('127.0.0.1', 21345))
return d
def test_queryTimeout(self):
"""
Test that query timeouts after some seconds.
"""
d = self.proto.query(('127.0.0.1', 21345), [dns.Query(b'foo')])
self.assertEqual(len(self.proto.liveMessages), 1)
self.clock.advance(10)
self.assertFailure(d, dns.DNSQueryTimeoutError)
self.assertEqual(len(self.proto.liveMessages), 0)
return d
def test_writeError(self):
"""
Exceptions raised by the transport's write method should be turned into
C{Failure}s passed to errbacks of the C{Deferred} returned by
L{DNSDatagramProtocol.query}.
"""
def writeError(message, addr):
raise RuntimeError("bar")
self.proto.transport.write = writeError
d = self.proto.query(('127.0.0.1', 21345), [dns.Query(b'foo')])
return self.assertFailure(d, RuntimeError)
def test_listenError(self):
"""
Exception L{CannotListenError} raised by C{listenUDP} should be turned
into a C{Failure} passed to errback of the C{Deferred} returned by
L{DNSDatagramProtocol.query}.
"""
def startListeningError():
raise CannotListenError(None, None, None)
self.proto.startListening = startListeningError
# Clean up transport so that the protocol calls startListening again
self.proto.transport = None
d = self.proto.query(('127.0.0.1', 21345), [dns.Query(b'foo')])
return self.assertFailure(d, CannotListenError)
def test_receiveMessageNotInLiveMessages(self):
"""
When receiving a message whose id is not in
L{DNSDatagramProtocol.liveMessages} or L{DNSDatagramProtocol.resends},
the message will be received by L{DNSDatagramProtocol.controller}.
"""
message = dns.Message()
message.id = 1
message.answers = [dns.RRHeader(
payload=dns.Record_A(address='172.16.31.10'))]
self.proto.datagramReceived(message.toStr(), ('127.0.0.1', 21345))
self.assertEqual(self.controller.messages[-1][0].toStr(),
message.toStr())
class TestTCPController(TestController):
"""
Pretend to be a DNS query processor for a DNSProtocol.
@ivar connections: A list of L{DNSProtocol} instances which have
notified this controller that they are connected and have not
yet notified it that their connection has been lost.
"""
def __init__(self):
TestController.__init__(self)
self.connections = []
def connectionMade(self, proto):
self.connections.append(proto)
def connectionLost(self, proto):
self.connections.remove(proto)
class DNSProtocolTestCase(unittest.TestCase):
"""
Test various aspects of L{dns.DNSProtocol}.
"""
def setUp(self):
"""
Create a L{dns.DNSProtocol} with a deterministic clock.
"""
self.clock = task.Clock()
self.controller = TestTCPController()
self.proto = dns.DNSProtocol(self.controller)
self.proto.makeConnection(proto_helpers.StringTransport())
self.proto.callLater = self.clock.callLater
def test_connectionTracking(self):
"""
L{dns.DNSProtocol} calls its controller's C{connectionMade}
method with itself when it is connected to a transport and its
controller's C{connectionLost} method when it is disconnected.
"""
self.assertEqual(self.controller.connections, [self.proto])
self.proto.connectionLost(
Failure(ConnectionDone("Fake Connection Done")))
self.assertEqual(self.controller.connections, [])
def test_queryTimeout(self):
"""
Test that query timeouts after some seconds.
"""
d = self.proto.query([dns.Query(b'foo')])
self.assertEqual(len(self.proto.liveMessages), 1)
self.clock.advance(60)
self.assertFailure(d, dns.DNSQueryTimeoutError)
self.assertEqual(len(self.proto.liveMessages), 0)
return d
def test_simpleQuery(self):
"""
Test content received after a query.
"""
d = self.proto.query([dns.Query(b'foo')])
self.assertEqual(len(self.proto.liveMessages.keys()), 1)
m = dns.Message()
m.id = next(iter(self.proto.liveMessages.keys()))
m.answers = [dns.RRHeader(payload=dns.Record_A(address='172.16.31.10'))]
def cb(result):
self.assertEqual(result.answers[0].payload.dottedQuad(), '1.2.3.4')
d.addCallback(cb)
s = m.toStr()
s = struct.pack('!H', len(s)) + s
self.proto.dataReceived(s)
return d
def test_writeError(self):
"""
Exceptions raised by the transport's write method should be turned into
C{Failure}s passed to errbacks of the C{Deferred} returned by
L{DNSProtocol.query}.
"""
def writeError(message):
raise RuntimeError("bar")
self.proto.transport.write = writeError
d = self.proto.query([dns.Query(b'foo')])
return self.assertFailure(d, RuntimeError)
def test_receiveMessageNotInLiveMessages(self):
"""
When receiving a message whose id is not in L{DNSProtocol.liveMessages}
the message will be received by L{DNSProtocol.controller}.
"""
message = dns.Message()
message.id = 1
message.answers = [dns.RRHeader(
payload=dns.Record_A(address='172.16.31.10'))]
string = message.toStr()
string = struct.pack('!H', len(string)) + string
self.proto.dataReceived(string)
self.assertEqual(self.controller.messages[-1][0].toStr(),
message.toStr())
class ReprTests(unittest.TestCase):
"""
Tests for the C{__repr__} implementation of record classes.
"""
def test_ns(self):
"""
The repr of a L{dns.Record_NS} instance includes the name of the
nameserver and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_NS(b'example.com', 4321)),
"<NS name=example.com ttl=4321>")
def test_md(self):
"""
The repr of a L{dns.Record_MD} instance includes the name of the
mail destination and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MD(b'example.com', 4321)),
"<MD name=example.com ttl=4321>")
def test_mf(self):
"""
The repr of a L{dns.Record_MF} instance includes the name of the
mail forwarder and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MF(b'example.com', 4321)),
"<MF name=example.com ttl=4321>")
def test_cname(self):
"""
The repr of a L{dns.Record_CNAME} instance includes the name of the
mail forwarder and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_CNAME(b'example.com', 4321)),
"<CNAME name=example.com ttl=4321>")
def test_mb(self):
"""
The repr of a L{dns.Record_MB} instance includes the name of the
mailbox and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MB(b'example.com', 4321)),
"<MB name=example.com ttl=4321>")
def test_mg(self):
"""
The repr of a L{dns.Record_MG} instance includes the name of the
mail group member and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MG(b'example.com', 4321)),
"<MG name=example.com ttl=4321>")
def test_mr(self):
"""
The repr of a L{dns.Record_MR} instance includes the name of the
mail rename domain and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MR(b'example.com', 4321)),
"<MR name=example.com ttl=4321>")
def test_ptr(self):
"""
The repr of a L{dns.Record_PTR} instance includes the name of the
pointer and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_PTR(b'example.com', 4321)),
"<PTR name=example.com ttl=4321>")
def test_dname(self):
"""
The repr of a L{dns.Record_DNAME} instance includes the name of the
non-terminal DNS name redirection and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_DNAME(b'example.com', 4321)),
"<DNAME name=example.com ttl=4321>")
def test_a(self):
"""
The repr of a L{dns.Record_A} instance includes the dotted-quad
string representation of the address it is for and the TTL of the
record.
"""
self.assertEqual(
repr(dns.Record_A('172.16.31.10', 567)),
'<A address=1.2.3.4 ttl=567>')
def test_soa(self):
"""
The repr of a L{dns.Record_SOA} instance includes all of the
authority fields.
"""
self.assertEqual(
repr(dns.Record_SOA(mname=b'mName', rname=b'rName', serial=123,
refresh=456, retry=789, expire=10,
minimum=11, ttl=12)),
"<SOA mname=mName rname=rName serial=123 refresh=456 "
"retry=789 expire=10 minimum=11 ttl=12>")
def test_null(self):
"""
The repr of a L{dns.Record_NULL} instance includes the repr of its
payload and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_NULL(b'abcd', 123)),
"<NULL payload='abcd' ttl=123>")
def test_wks(self):
"""
The repr of a L{dns.Record_WKS} instance includes the dotted-quad
string representation of the address it is for, the IP protocol
number it is for, and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_WKS('192.168.3.11', 7, ttl=8)),
"<WKS address=2.3.4.5 protocol=7 ttl=8>")
def test_aaaa(self):
"""
The repr of a L{dns.Record_AAAA} instance includes the colon-separated
hex string representation of the address it is for and the TTL of the
record.
"""
self.assertEqual(
repr(dns.Record_AAAA('fdf8:f53e:61e4::18', ttl=10)),
"<AAAA address=fdf8:f53e:61e4::18 ttl=10>")
def test_a6(self):
"""
The repr of a L{dns.Record_A6} instance includes the colon-separated
hex string representation of the address it is for and the TTL of the
record.
"""
self.assertEqual(
repr(dns.Record_A6(0, 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b', b'foo.bar', ttl=10)),
"<A6 suffix=fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b prefix=foo.bar ttl=10>")
def test_srv(self):
"""
The repr of a L{dns.Record_SRV} instance includes the name and port of
the target and the priority, weight, and TTL of the record.
"""
self.assertEqual(
repr(dns.Record_SRV(1, 2, 3, b'example.org', 4)),
"<SRV priority=1 weight=2 target=example.org port=3 ttl=4>")
def test_naptr(self):
"""
The repr of a L{dns.Record_NAPTR} instance includes the order,
preference, flags, service, regular expression, replacement, and TTL of
the record.
"""
record = dns.Record_NAPTR(
5, 9, b"S", b"http", b"/foo/bar/i", b"baz", 3)
self.assertEqual(
repr(record),
"<NAPTR order=5 preference=9 flags=S service=http "
"regexp=/foo/bar/i replacement=baz ttl=3>")
def test_afsdb(self):
"""
The repr of a L{dns.Record_AFSDB} instance includes the subtype,
hostname, and TTL of the record.
"""
self.assertEqual(
repr(dns.Record_AFSDB(3, b'example.org', 5)),
"<AFSDB subtype=3 hostname=example.org ttl=5>")
def test_rp(self):
"""
The repr of a L{dns.Record_RP} instance includes the mbox, txt, and TTL
fields of the record.
"""
self.assertEqual(
repr(dns.Record_RP(b'alice.example.com', b'admin.example.com', 3)),
"<RP mbox=alice.example.com txt=admin.example.com ttl=3>")
def test_hinfo(self):
"""
The repr of a L{dns.Record_HINFO} instance includes the cpu, os, and
TTL fields of the record.
"""
self.assertEqual(
repr(dns.Record_HINFO(b'sparc', b'minix', 12)),
"<HINFO cpu='sparc' os='minix' ttl=12>")
def test_minfo(self):
"""
The repr of a L{dns.Record_MINFO} instance includes the rmailbx,
emailbx, and TTL fields of the record.
"""
record = dns.Record_MINFO(
b'alice.example.com', b'bob.example.com', 15)
self.assertEqual(
repr(record),
"<MINFO responsibility=alice.example.com "
"errors=bob.example.com ttl=15>")
def test_mx(self):
"""
The repr of a L{dns.Record_MX} instance includes the preference, name,
and TTL fields of the record.
"""
self.assertEqual(
repr(dns.Record_MX(13, b'mx.example.com', 2)),
"<MX preference=13 name=mx.example.com ttl=2>")
def test_txt(self):
"""
The repr of a L{dns.Record_TXT} instance includes the data and ttl
fields of the record.
"""
self.assertEqual(
repr(dns.Record_TXT(b"foo", b"bar", ttl=15)),
"<TXT data=['foo', 'bar'] ttl=15>")
def test_spf(self):
"""
The repr of a L{dns.Record_SPF} instance includes the data and ttl
fields of the record.
"""
self.assertEqual(
repr(dns.Record_SPF(b"foo", b"bar", ttl=15)),
"<SPF data=['foo', 'bar'] ttl=15>")
def test_unknown(self):
"""
The repr of a L{dns.UnknownRecord} instance includes the data and ttl
fields of the record.
"""
self.assertEqual(
repr(dns.UnknownRecord(b"foo\x1fbar", 12)),
"<UNKNOWN data='foo\\x1fbar' ttl=12>")
class EqualityTests(ComparisonTestsMixin, unittest.TestCase):
"""
Tests for the equality and non-equality behavior of record classes.
"""
def _equalityTest(self, firstValueOne, secondValueOne, valueTwo):
return self.assertNormalEqualityImplementation(
firstValueOne, secondValueOne, valueTwo)
def test_charstr(self):
"""
Two L{dns.Charstr} instances compare equal if and only if they have the
same string value.
"""
self._equalityTest(
dns.Charstr(b'abc'), dns.Charstr(b'abc'), dns.Charstr(b'def'))
def test_name(self):
"""
Two L{dns.Name} instances compare equal if and only if they have the
same name value.
"""
self._equalityTest(
dns.Name(b'abc'), dns.Name(b'abc'), dns.Name(b'def'))
def _simpleEqualityTest(self, cls):
"""
Assert that instances of C{cls} with the same attributes compare equal
to each other and instances with different attributes compare as not
equal.
@param cls: A L{dns.SimpleRecord} subclass.
"""
# Vary the TTL
self._equalityTest(
cls(b'example.com', 123),
cls(b'example.com', 123),
cls(b'example.com', 321))
# Vary the name
self._equalityTest(
cls(b'example.com', 123),
cls(b'example.com', 123),
cls(b'example.org', 123))
def test_rrheader(self):
"""
Two L{dns.RRHeader} instances compare equal if and only if they have
the same name, type, class, time to live, payload, and authoritative
bit.
"""
# Vary the name
self._equalityTest(
dns.RRHeader(b'example.com', payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.com', payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.org', payload=dns.Record_A('172.16.31.10')))
# Vary the payload
self._equalityTest(
dns.RRHeader(b'example.com', payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.com', payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.com', payload=dns.Record_A('192.168.127.12')))
# Vary the type. Leave the payload as None so that we don't have to
# provide non-equal values.
self._equalityTest(
dns.RRHeader(b'example.com', dns.A),
dns.RRHeader(b'example.com', dns.A),
dns.RRHeader(b'example.com', dns.MX))
# Probably not likely to come up. Most people use the internet.
self._equalityTest(
dns.RRHeader(b'example.com', cls=dns.IN, payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.com', cls=dns.IN, payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.com', cls=dns.CS, payload=dns.Record_A('172.16.31.10')))
# Vary the ttl
self._equalityTest(
dns.RRHeader(b'example.com', ttl=60, payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.com', ttl=60, payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.com', ttl=120, payload=dns.Record_A('172.16.31.10')))
# Vary the auth bit
self._equalityTest(
dns.RRHeader(b'example.com', auth=1, payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.com', auth=1, payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.com', auth=0, payload=dns.Record_A('172.16.31.10')))
def test_ns(self):
"""
Two L{dns.Record_NS} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_NS)
def test_md(self):
"""
Two L{dns.Record_MD} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MD)
def test_mf(self):
"""
Two L{dns.Record_MF} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MF)
def test_cname(self):
"""
Two L{dns.Record_CNAME} instances compare equal if and only if they
have the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_CNAME)
def test_mb(self):
"""
Two L{dns.Record_MB} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MB)
def test_mg(self):
"""
Two L{dns.Record_MG} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MG)
def test_mr(self):
"""
Two L{dns.Record_MR} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MR)
def test_ptr(self):
"""
Two L{dns.Record_PTR} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_PTR)
def test_dname(self):
"""
Two L{dns.Record_MD} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_DNAME)
def test_a(self):
"""
Two L{dns.Record_A} instances compare equal if and only if they have
the same address and TTL.
"""
# Vary the TTL
self._equalityTest(
dns.Record_A('172.16.31.10', 5),
dns.Record_A('172.16.31.10', 5),
dns.Record_A('172.16.31.10', 6))
# Vary the address
self._equalityTest(
dns.Record_A('172.16.31.10', 5),
dns.Record_A('172.16.31.10', 5),
dns.Record_A('192.168.127.12', 5))
def test_soa(self):
"""
Two L{dns.Record_SOA} instances compare equal if and only if they have
the same mname, rname, serial, refresh, minimum, expire, retry, and
ttl.
"""
# Vary the mname
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'xname', b'rname', 123, 456, 789, 10, 20, 30))
# Vary the rname
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'xname', 123, 456, 789, 10, 20, 30))
# Vary the serial
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 1, 456, 789, 10, 20, 30))
# Vary the refresh
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 1, 789, 10, 20, 30))
# Vary the minimum
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 1, 10, 20, 30))
# Vary the expire
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 1, 20, 30))
# Vary the retry
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 1, 30))
# Vary the ttl
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'xname', 123, 456, 789, 10, 20, 1))
def test_null(self):
"""
Two L{dns.Record_NULL} instances compare equal if and only if they have
the same payload and ttl.
"""
# Vary the payload
self._equalityTest(
dns.Record_NULL('foo bar', 10),
dns.Record_NULL('foo bar', 10),
dns.Record_NULL('bar foo', 10))
# Vary the ttl
self._equalityTest(
dns.Record_NULL('foo bar', 10),
dns.Record_NULL('foo bar', 10),
dns.Record_NULL('foo bar', 100))
def test_wks(self):
"""
Two L{dns.Record_WKS} instances compare equal if and only if they have
the same address, protocol, map, and ttl.
"""
# Vary the address
self._equalityTest(
dns.Record_WKS('172.16.31.10', 1, 'foo', 2),
dns.Record_WKS('172.16.31.10', 1, 'foo', 2),
dns.Record_WKS('172.16.58.3', 1, 'foo', 2))
# Vary the protocol
self._equalityTest(
dns.Record_WKS('172.16.31.10', 1, 'foo', 2),
dns.Record_WKS('172.16.31.10', 1, 'foo', 2),
dns.Record_WKS('172.16.31.10', 100, 'foo', 2))
# Vary the map
self._equalityTest(
dns.Record_WKS('172.16.31.10', 1, 'foo', 2),
dns.Record_WKS('172.16.31.10', 1, 'foo', 2),
dns.Record_WKS('172.16.31.10', 1, 'bar', 2))
# Vary the ttl
self._equalityTest(
dns.Record_WKS('172.16.31.10', 1, 'foo', 2),
dns.Record_WKS('172.16.31.10', 1, 'foo', 2),
dns.Record_WKS('172.16.31.10', 1, 'foo', 200))
def test_aaaa(self):
"""
Two L{dns.Record_AAAA} instances compare equal if and only if they have
the same address and ttl.
"""
# Vary the address
self._equalityTest(
dns.Record_AAAA('fc00:db20:35b:7399::5', 1),
dns.Record_AAAA('fc00:db20:35b:7399::5', 1),
dns.Record_AAAA('fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b', 1))
# Vary the ttl
self._equalityTest(
dns.Record_AAAA('fc00:db20:35b:7399::5', 1),
dns.Record_AAAA('fc00:db20:35b:7399::5', 1),
dns.Record_AAAA('fc00:db20:35b:7399::5', 10))
def test_a6(self):
"""
Two L{dns.Record_A6} instances compare equal if and only if they have
the same prefix, prefix length, suffix, and ttl.
"""
# Note, A6 is crazy, I'm not sure these values are actually legal.
# Hopefully that doesn't matter for this test. -exarkun
# Vary the prefix length
self._equalityTest(
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(32, '::abcd', b'example.com', 10))
# Vary the suffix
self._equalityTest(
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd:0', b'example.com', 10))
# Vary the prefix
self._equalityTest(
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd', b'example.org', 10))
# Vary the ttl
self._equalityTest(
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd', b'example.com', 100))
def test_srv(self):
"""
Two L{dns.Record_SRV} instances compare equal if and only if they have
the same priority, weight, port, target, and ttl.
"""
# Vary the priority
self._equalityTest(
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(100, 20, 30, b'example.com', 40))
# Vary the weight
self._equalityTest(
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 200, 30, b'example.com', 40))
# Vary the port
self._equalityTest(
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 300, b'example.com', 40))
# Vary the target
self._equalityTest(
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.org', 40))
# Vary the ttl
self._equalityTest(
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.com', 400))
def test_naptr(self):
"""
Two L{dns.Record_NAPTR} instances compare equal if and only if they
have the same order, preference, flags, service, regexp, replacement,
and ttl.
"""
# Vary the order
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(2, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12))
# Vary the preference
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 3, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12))
# Vary the flags
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"p", b"sip+E2U", b"/foo/bar/", b"baz", 12))
# Vary the service
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"http", b"/foo/bar/", b"baz", 12))
# Vary the regexp
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/bar/foo/", b"baz", 12))
# Vary the replacement
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/bar/foo/", b"quux", 12))
# Vary the ttl
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/bar/foo/", b"baz", 5))
def test_afsdb(self):
"""
Two L{dns.Record_AFSDB} instances compare equal if and only if they
have the same subtype, hostname, and ttl.
"""
# Vary the subtype
self._equalityTest(
dns.Record_AFSDB(1, b'example.com', 2),
dns.Record_AFSDB(1, b'example.com', 2),
dns.Record_AFSDB(2, b'example.com', 2))
# Vary the hostname
self._equalityTest(
dns.Record_AFSDB(1, b'example.com', 2),
dns.Record_AFSDB(1, b'example.com', 2),
dns.Record_AFSDB(1, b'example.org', 2))
# Vary the ttl
self._equalityTest(
dns.Record_AFSDB(1, b'example.com', 2),
dns.Record_AFSDB(1, b'example.com', 2),
dns.Record_AFSDB(1, b'example.com', 3))
def test_rp(self):
"""
Two L{Record_RP} instances compare equal if and only if they have the
same mbox, txt, and ttl.
"""
# Vary the mbox
self._equalityTest(
dns.Record_RP(b'alice.example.com', b'alice is nice', 10),
dns.Record_RP(b'alice.example.com', b'alice is nice', 10),
dns.Record_RP(b'bob.example.com', b'alice is nice', 10))
# Vary the txt
self._equalityTest(
dns.Record_RP(b'alice.example.com', b'alice is nice', 10),
dns.Record_RP(b'alice.example.com', b'alice is nice', 10),
dns.Record_RP(b'alice.example.com', b'alice is not nice', 10))
# Vary the ttl
self._equalityTest(
dns.Record_RP(b'alice.example.com', b'alice is nice', 10),
dns.Record_RP(b'alice.example.com', b'alice is nice', 10),
dns.Record_RP(b'alice.example.com', b'alice is nice', 100))
def test_hinfo(self):
"""
Two L{dns.Record_HINFO} instances compare equal if and only if they
have the same cpu, os, and ttl.
"""
# Vary the cpu
self._equalityTest(
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('i386', 'plan9', 10))
# Vary the os
self._equalityTest(
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan11', 10))
# Vary the ttl
self._equalityTest(
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan9', 100))
def test_minfo(self):
"""
Two L{dns.Record_MINFO} instances compare equal if and only if they
have the same rmailbx, emailbx, and ttl.
"""
# Vary the rmailbx
self._equalityTest(
dns.Record_MINFO(b'rmailbox', b'emailbox', 10),
dns.Record_MINFO(b'rmailbox', b'emailbox', 10),
dns.Record_MINFO(b'someplace', b'emailbox', 10))
# Vary the emailbx
self._equalityTest(
dns.Record_MINFO(b'rmailbox', b'emailbox', 10),
dns.Record_MINFO(b'rmailbox', b'emailbox', 10),
dns.Record_MINFO(b'rmailbox', b'something', 10))
# Vary the ttl
self._equalityTest(
dns.Record_MINFO(b'rmailbox', b'emailbox', 10),
dns.Record_MINFO(b'rmailbox', b'emailbox', 10),
dns.Record_MINFO(b'rmailbox', b'emailbox', 100))
def test_mx(self):
"""
Two L{dns.Record_MX} instances compare equal if and only if they have
the same preference, name, and ttl.
"""
# Vary the preference
self._equalityTest(
dns.Record_MX(10, b'example.org', 20),
dns.Record_MX(10, b'example.org', 20),
dns.Record_MX(100, b'example.org', 20))
# Vary the name
self._equalityTest(
dns.Record_MX(10, b'example.org', 20),
dns.Record_MX(10, b'example.org', 20),
dns.Record_MX(10, b'example.net', 20))
# Vary the ttl
self._equalityTest(
dns.Record_MX(10, b'example.org', 20),
dns.Record_MX(10, b'example.org', 20),
dns.Record_MX(10, b'example.org', 200))
def test_txt(self):
"""
Two L{dns.Record_TXT} instances compare equal if and only if they have
the same data and ttl.
"""
# Vary the length of the data
self._equalityTest(
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', 'baz', ttl=10))
# Vary the value of the data
self._equalityTest(
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('bar', 'foo', ttl=10))
# Vary the ttl
self._equalityTest(
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', ttl=100))
def test_spf(self):
"""
L{dns.Record_SPF} instances compare equal if and only if they have the
same data and ttl.
"""
# Vary the length of the data
self._equalityTest(
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', 'baz', ttl=10))
# Vary the value of the data
self._equalityTest(
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('bar', 'foo', ttl=10))
# Vary the ttl
self._equalityTest(
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', ttl=100))
def test_unknown(self):
"""
L{dns.UnknownRecord} instances compare equal if and only if they have
the same data and ttl.
"""
# Vary the length of the data
self._equalityTest(
dns.UnknownRecord('foo', ttl=10),
dns.UnknownRecord('foo', ttl=10),
dns.UnknownRecord('foobar', ttl=10))
# Vary the value of the data
self._equalityTest(
dns.UnknownRecord('foo', ttl=10),
dns.UnknownRecord('foo', ttl=10),
dns.UnknownRecord('bar', ttl=10))
# Vary the ttl
self._equalityTest(
dns.UnknownRecord('foo', ttl=10),
dns.UnknownRecord('foo', ttl=10),
dns.UnknownRecord('foo', ttl=100))
class RRHeaderTests(unittest.TestCase):
"""
Tests for L{twisted.names.dns.RRHeader}.
"""
def test_negativeTTL(self):
"""
Attempting to create a L{dns.RRHeader} instance with a negative TTL
causes L{ValueError} to be raised.
"""
self.assertRaises(
ValueError, dns.RRHeader, "example.com", dns.A,
dns.IN, -1, dns.Record_A("127.0.0.1"))
class NameToLabelsTests(unittest.SynchronousTestCase):
"""
Tests for L{twisted.names.dns._nameToLabels}.
"""
def test_empty(self):
"""
L{dns._nameToLabels} returns a list containing a single
empty label for an empty name.
"""
self.assertEqual(dns._nameToLabels(b''), [b''])
def test_onlyDot(self):
"""
L{dns._nameToLabels} returns a list containing a single
empty label for a name containing only a dot.
"""
self.assertEqual(dns._nameToLabels(b'.'), [b''])
def test_withoutTrailingDot(self):
"""
L{dns._nameToLabels} returns a list ending with an empty
label for a name without a trailing dot.
"""
self.assertEqual(dns._nameToLabels(b'com'), [b'com', b''])
def test_withTrailingDot(self):
"""
L{dns._nameToLabels} returns a list ending with an empty
label for a name with a trailing dot.
"""
self.assertEqual(dns._nameToLabels(b'com.'), [b'com', b''])
def test_subdomain(self):
"""
L{dns._nameToLabels} returns a list containing entries
for all labels in a subdomain name.
"""
self.assertEqual(
dns._nameToLabels(b'foo.bar.baz.example.com.'),
[b'foo', b'bar', b'baz', b'example', b'com', b''])
def test_casePreservation(self):
"""
L{dns._nameToLabels} preserves the case of ascii
characters in labels.
"""
self.assertEqual(
dns._nameToLabels(b'EXAMPLE.COM'),
[b'EXAMPLE', b'COM', b''])
def assertIsSubdomainOf(testCase, descendant, ancestor):
"""
Assert that C{descendant} *is* a subdomain of C{ancestor}.
@type testCase: L{unittest.SynchronousTestCase}
@param testCase: The test case on which to run the assertions.
@type descendant: C{str}
@param descendant: The subdomain name to test.
@type ancestor: C{str}
@param ancestor: The superdomain name to test.
"""
testCase.assertTrue(
dns._isSubdomainOf(descendant, ancestor),
'%r is not a subdomain of %r' % (descendant, ancestor))
def assertIsNotSubdomainOf(testCase, descendant, ancestor):
"""
Assert that C{descendant} *is not* a subdomain of C{ancestor}.
@type testCase: L{unittest.SynchronousTestCase}
@param testCase: The test case on which to run the assertions.
@type descendant: C{str}
@param descendant: The subdomain name to test.
@type ancestor: C{str}
@param ancestor: The superdomain name to test.
"""
testCase.assertFalse(
dns._isSubdomainOf(descendant, ancestor),
'%r is a subdomain of %r' % (descendant, ancestor))
class IsSubdomainOfTests(unittest.SynchronousTestCase):
"""
Tests for L{twisted.names.dns._isSubdomainOf}.
"""
def test_identical(self):
"""
L{dns._isSubdomainOf} returns C{True} for identical
domain names.
"""
assertIsSubdomainOf(self, b'example.com', b'example.com')
def test_parent(self):
"""
L{dns._isSubdomainOf} returns C{True} when the first
name is an immediate descendant of the second name.
"""
assertIsSubdomainOf(self, b'foo.example.com', b'example.com')
def test_distantAncestor(self):
"""
L{dns._isSubdomainOf} returns C{True} when the first
name is a distant descendant of the second name.
"""
assertIsSubdomainOf(self, b'foo.bar.baz.example.com', b'com')
def test_superdomain(self):
"""
L{dns._isSubdomainOf} returns C{False} when the first
name is an ancestor of the second name.
"""
assertIsNotSubdomainOf(self, b'example.com', b'foo.example.com')
def test_sibling(self):
"""
L{dns._isSubdomainOf} returns C{False} if the first name
is a sibling of the second name.
"""
assertIsNotSubdomainOf(self, b'foo.example.com', b'bar.example.com')
def test_unrelatedCommonSuffix(self):
"""
L{dns._isSubdomainOf} returns C{False} even when domain
names happen to share a common suffix.
"""
assertIsNotSubdomainOf(self, b'foo.myexample.com', b'example.com')
def test_subdomainWithTrailingDot(self):
"""
L{dns._isSubdomainOf} returns C{True} if the first name
is a subdomain of the second name but the first name has a
trailing ".".
"""
assertIsSubdomainOf(self, b'foo.example.com.', b'example.com')
def test_superdomainWithTrailingDot(self):
"""
L{dns._isSubdomainOf} returns C{True} if the first name
is a subdomain of the second name but the second name has a
trailing ".".
"""
assertIsSubdomainOf(self, b'foo.example.com', b'example.com.')
def test_bothWithTrailingDot(self):
"""
L{dns._isSubdomainOf} returns C{True} if the first name
is a subdomain of the second name and both names have a
trailing ".".
"""
assertIsSubdomainOf(self, b'foo.example.com.', b'example.com.')
def test_emptySubdomain(self):
"""
L{dns._isSubdomainOf} returns C{False} if the first name
is empty and the second name is not.
"""
assertIsNotSubdomainOf(self, b'', b'example.com')
def test_emptySuperdomain(self):
"""
L{dns._isSubdomainOf} returns C{True} if the second name
is empty and the first name is not.
"""
assertIsSubdomainOf(self, b'foo.example.com', b'')
def test_caseInsensitiveComparison(self):
"""
L{dns._isSubdomainOf} does case-insensitive comparison
of name labels.
"""
assertIsSubdomainOf(self, b'foo.example.com', b'EXAMPLE.COM')
assertIsSubdomainOf(self, b'FOO.EXAMPLE.COM', b'example.com')
class OPTNonStandardAttributes(object):
"""
Generate byte and instance representations of an L{dns._OPTHeader}
where all attributes are set to non-default values.
For testing whether attributes have really been read from the byte
string during decoding.
"""
@classmethod
def bytes(cls, excludeName=False, excludeOptions=False):
"""
Return L{bytes} representing an encoded OPT record.
@param excludeName: A flag that controls whether to exclude
the name field. This allows a non-standard name to be
prepended during the test.
@type excludeName: L{bool}
@param excludeOptions: A flag that controls whether to exclude
the RDLEN field. This allows encoded variable options to be
appended during the test.
@type excludeOptions: L{bool}
@return: L{bytes} representing the encoded OPT record returned
by L{object}.
"""
rdlen = b'\x00\x00' # RDLEN 0
if excludeOptions:
rdlen = b''
return (
b'\x00' # 0 root zone
b'\x00\x29' # type 41
b'\x02\x00' # udpPayloadsize 512
b'\x03' # extendedRCODE 3
b'\x04' # version 4
b'\x80\x00' # DNSSEC OK 1 + Z
) + rdlen
@classmethod
def object(cls):
"""
Return a new L{dns._OPTHeader} instance.
@return: A L{dns._OPTHeader} instance with attributes that
match the encoded record returned by L{bytes}.
"""
return dns._OPTHeader(
udpPayloadSize=512,
extendedRCODE=3,
version=4,
dnssecOK=True)
class OPTHeaderTests(ComparisonTestsMixin, unittest.TestCase):
"""
Tests for L{twisted.names.dns._OPTHeader}.
"""
def test_interface(self):
"""
L{dns._OPTHeader} implements L{dns.IEncodable}.
"""
verifyClass(dns.IEncodable, dns._OPTHeader)
def test_name(self):
"""
L{dns._OPTHeader.name} is a instance attribute whose value is
fixed as the root domain
"""
self.assertEqual(dns._OPTHeader().name, dns.Name(b''))
def test_nameReadonly(self):
"""
L{dns._OPTHeader.name} is readonly.
"""
h = dns._OPTHeader()
self.assertRaises(
AttributeError, setattr, h, 'name', dns.Name(b'example.com'))
def test_type(self):
"""
L{dns._OPTHeader.type} is an instance attribute with fixed value
41.
"""
self.assertEqual(dns._OPTHeader().type, 41)
def test_typeReadonly(self):
"""
L{dns._OPTHeader.type} is readonly.
"""
h = dns._OPTHeader()
self.assertRaises(
AttributeError, setattr, h, 'type', dns.A)
def test_udpPayloadSize(self):
"""
L{dns._OPTHeader.udpPayloadSize} defaults to 4096 as
recommended in rfc6891 section-6.2.5.
"""
self.assertEqual(dns._OPTHeader().udpPayloadSize, 4096)
def test_udpPayloadSizeOverride(self):
"""
L{dns._OPTHeader.udpPayloadSize} can be overridden in the
constructor.
"""
self.assertEqual(dns._OPTHeader(udpPayloadSize=512).udpPayloadSize, 512)
def test_extendedRCODE(self):
"""
L{dns._OPTHeader.extendedRCODE} defaults to 0.
"""
self.assertEqual(dns._OPTHeader().extendedRCODE, 0)
def test_extendedRCODEOverride(self):
"""
L{dns._OPTHeader.extendedRCODE} can be overridden in the
constructor.
"""
self.assertEqual(dns._OPTHeader(extendedRCODE=1).extendedRCODE, 1)
def test_version(self):
"""
L{dns._OPTHeader.version} defaults to 0.
"""
self.assertEqual(dns._OPTHeader().version, 0)
def test_versionOverride(self):
"""
L{dns._OPTHeader.version} can be overridden in the
constructor.
"""
self.assertEqual(dns._OPTHeader(version=1).version, 1)
def test_dnssecOK(self):
"""
L{dns._OPTHeader.dnssecOK} defaults to False.
"""
self.assertEqual(dns._OPTHeader().dnssecOK, False)
def test_dnssecOKOverride(self):
"""
L{dns._OPTHeader.dnssecOK} can be overridden in the
constructor.
"""
self.assertEqual(dns._OPTHeader(dnssecOK=True).dnssecOK, True)
def test_options(self):
"""
L{dns._OPTHeader.options} defaults to empty list.
"""
self.assertEqual(dns._OPTHeader().options, [])
def test_optionsOverride(self):
"""
L{dns._OPTHeader.options} can be overridden in the
constructor.
"""
h = dns._OPTHeader(options=[(1, 1, b'\x00')])
self.assertEqual(h.options, [(1, 1, b'\x00')])
def test_encode(self):
"""
L{dns._OPTHeader.encode} packs the header fields and writes
them to a file like object passed in as an argument.
"""
b = BytesIO()
OPTNonStandardAttributes.object().encode(b)
self.assertEqual(
b.getvalue(),
OPTNonStandardAttributes.bytes()
)
def test_encodeWithOptions(self):
"""
L{dns._OPTHeader.options} is a list of L{dns._OPTVariableOption}
instances which are packed into the rdata area of the header.
"""
h = OPTNonStandardAttributes.object()
h.options = [
dns._OPTVariableOption(1, b'foobarbaz'),
dns._OPTVariableOption(2, b'qux'),
]
b = BytesIO()
h.encode(b)
self.assertEqual(
b.getvalue(),
OPTNonStandardAttributes.bytes(excludeOptions=True) + (
b'\x00\x14' # RDLEN 20
b'\x00\x01' # OPTION-CODE
b'\x00\x09' # OPTION-LENGTH
b'foobarbaz' # OPTION-DATA
b'\x00\x02' # OPTION-CODE
b'\x00\x03' # OPTION-LENGTH
b'qux' # OPTION-DATA
))
def test_decode(self):
"""
L{dns._OPTHeader.decode} unpacks the header fields from a file
like object and populates the attributes of an existing
L{dns._OPTHeader} instance.
"""
decodedHeader = dns._OPTHeader()
decodedHeader.decode(BytesIO(OPTNonStandardAttributes.bytes()))
self.assertEqual(
decodedHeader,
OPTNonStandardAttributes.object())
def test_decodeAllExpectedBytes(self):
"""
L{dns._OPTHeader.decode} reads all the bytes of the record
that is being decoded.
"""
# Check that all the input data has been consumed.
b = BytesIO(OPTNonStandardAttributes.bytes())
decodedHeader = dns._OPTHeader()
decodedHeader.decode(b)
self.assertEqual(b.tell(), len(b.getvalue()))
def test_decodeOnlyExpectedBytes(self):
"""
L{dns._OPTHeader.decode} reads only the bytes from the current
file position to the end of the record that is being
decoded. Trailing bytes are not consumed.
"""
b = BytesIO(OPTNonStandardAttributes.bytes()
+ b'xxxx') # Trailing bytes
decodedHeader = dns._OPTHeader()
decodedHeader.decode(b)
self.assertEqual(b.tell(), len(b.getvalue())-len(b'xxxx'))
def test_decodeDiscardsName(self):
"""
L{dns._OPTHeader.decode} discards the name which is encoded in
the supplied bytes. The name attribute of the resulting
L{dns._OPTHeader} instance will always be L{dns.Name(b'')}.
"""
b = BytesIO(OPTNonStandardAttributes.bytes(excludeName=True)
+ b'\x07example\x03com\x00')
h = dns._OPTHeader()
h.decode(b)
self.assertEqual(h.name, dns.Name(b''))
def test_decodeRdlengthTooShort(self):
"""
L{dns._OPTHeader.decode} raises an exception if the supplied
RDLEN is too short.
"""
b = BytesIO(
OPTNonStandardAttributes.bytes(excludeOptions=True) + (
b'\x00\x05' # RDLEN 5 Too short - should be 6
b'\x00\x01' # OPTION-CODE
b'\x00\x02' # OPTION-LENGTH
b'\x00\x00' # OPTION-DATA
))
h = dns._OPTHeader()
self.assertRaises(EOFError, h.decode, b)
def test_decodeRdlengthTooLong(self):
"""
L{dns._OPTHeader.decode} raises an exception if the supplied
RDLEN is too long.
"""
b = BytesIO(
OPTNonStandardAttributes.bytes(excludeOptions=True) + (
b'\x00\x07' # RDLEN 7 Too long - should be 6
b'\x00\x01' # OPTION-CODE
b'\x00\x02' # OPTION-LENGTH
b'\x00\x00' # OPTION-DATA
))
h = dns._OPTHeader()
self.assertRaises(EOFError, h.decode, b)
def test_decodeWithOptions(self):
"""
If the OPT bytes contain variable options,
L{dns._OPTHeader.decode} will populate a list
L{dns._OPTHeader.options} with L{dns._OPTVariableOption}
instances.
"""
b = BytesIO(
OPTNonStandardAttributes.bytes(excludeOptions=True) + (
b'\x00\x14' # RDLEN 20
b'\x00\x01' # OPTION-CODE
b'\x00\x09' # OPTION-LENGTH
b'foobarbaz' # OPTION-DATA
b'\x00\x02' # OPTION-CODE
b'\x00\x03' # OPTION-LENGTH
b'qux' # OPTION-DATA
))
h = dns._OPTHeader()
h.decode(b)
self.assertEqual(
h.options,
[dns._OPTVariableOption(1, b'foobarbaz'),
dns._OPTVariableOption(2, b'qux'),]
)
def test_fromRRHeader(self):
"""
L{_OPTHeader.fromRRHeader} accepts an L{RRHeader} instance and
returns an L{_OPTHeader} instance whose attribute values have
been derived from the C{cls}, C{ttl} and C{payload} attributes
of the original header.
"""
genericHeader = dns.RRHeader(
b'example.com',
type=dns.OPT,
cls=0xffff,
ttl=(0xfe << 24
| 0xfd << 16
| True << 15),
payload=dns.UnknownRecord(b'\xff\xff\x00\x03abc'))
decodedOptHeader = dns._OPTHeader.fromRRHeader(genericHeader)
expectedOptHeader = dns._OPTHeader(
udpPayloadSize=0xffff,
extendedRCODE=0xfe,
version=0xfd,
dnssecOK=True,
options=[dns._OPTVariableOption(code=0xffff, data=b'abc')])
self.assertEqual(decodedOptHeader, expectedOptHeader)
def test_repr(self):
"""
L{dns._OPTHeader.__repr__} displays the name and type and all
the fixed and extended header values of the OPT record.
"""
self.assertEqual(
repr(dns._OPTHeader()),
'<_OPTHeader '
'name= '
'type=41 '
'udpPayloadSize=4096 '
'extendedRCODE=0 '
'version=0 '
'dnssecOK=False '
'options=[]>')
def test_equalityUdpPayloadSize(self):
"""
Two L{OPTHeader} instances compare equal if they have the same
udpPayloadSize.
"""
self.assertNormalEqualityImplementation(
dns._OPTHeader(udpPayloadSize=512),
dns._OPTHeader(udpPayloadSize=512),
dns._OPTHeader(udpPayloadSize=4096))
def test_equalityExtendedRCODE(self):
"""
Two L{OPTHeader} instances compare equal if they have the same
extendedRCODE.
"""
self.assertNormalEqualityImplementation(
dns._OPTHeader(extendedRCODE=1),
dns._OPTHeader(extendedRCODE=1),
dns._OPTHeader(extendedRCODE=2))
def test_equalityVersion(self):
"""
Two L{OPTHeader} instances compare equal if they have the same
version.
"""
self.assertNormalEqualityImplementation(
dns._OPTHeader(version=1),
dns._OPTHeader(version=1),
dns._OPTHeader(version=2))
def test_equalityDnssecOK(self):
"""
Two L{OPTHeader} instances compare equal if they have the same
dnssecOK flags.
"""
self.assertNormalEqualityImplementation(
dns._OPTHeader(dnssecOK=True),
dns._OPTHeader(dnssecOK=True),
dns._OPTHeader(dnssecOK=False))
def test_equalityOptions(self):
"""
Two L{OPTHeader} instances compare equal if they have the same
options.
"""
self.assertNormalEqualityImplementation(
dns._OPTHeader(options=[dns._OPTVariableOption(1, b'x')]),
dns._OPTHeader(options=[dns._OPTVariableOption(1, b'x')]),
dns._OPTHeader(options=[dns._OPTVariableOption(2, b'y')]))
class OPTVariableOptionTests(ComparisonTestsMixin, unittest.TestCase):
"""
Tests for L{dns._OPTVariableOption}.
"""
def test_interface(self):
"""
L{dns._OPTVariableOption} implements L{dns.IEncodable}.
"""
verifyClass(dns.IEncodable, dns._OPTVariableOption)
def test_constructorArguments(self):
"""
L{dns._OPTVariableOption.__init__} requires code and data
arguments which are saved as public instance attributes.
"""
h = dns._OPTVariableOption(1, b'x')
self.assertEqual(h.code, 1)
self.assertEqual(h.data, b'x')
def test_repr(self):
"""
L{dns._OPTVariableOption.__repr__} displays the code and data
of the option.
"""
self.assertEqual(
repr(dns._OPTVariableOption(1, b'x')),
'<_OPTVariableOption '
'code=1 '
"data=x"
'>')
def test_equality(self):
"""
Two OPTVariableOption instances compare equal if they have the same
code and data values.
"""
self.assertNormalEqualityImplementation(
dns._OPTVariableOption(1, b'x'),
dns._OPTVariableOption(1, b'x'),
dns._OPTVariableOption(2, b'x'))
self.assertNormalEqualityImplementation(
dns._OPTVariableOption(1, b'x'),
dns._OPTVariableOption(1, b'x'),
dns._OPTVariableOption(1, b'y'))
def test_encode(self):
"""
L{dns._OPTVariableOption.encode} encodes the code and data
instance attributes to a byte string which also includes the
data length.
"""
o = dns._OPTVariableOption(1, b'foobar')
b = BytesIO()
o.encode(b)
self.assertEqual(
b.getvalue(),
b'\x00\x01' # OPTION-CODE 1
b'\x00\x06' # OPTION-LENGTH 6
b'foobar' # OPTION-DATA
)
def test_decode(self):
"""
L{dns._OPTVariableOption.decode} is a classmethod that decodes
a byte string and returns a L{dns._OPTVariableOption} instance.
"""
b = BytesIO(
b'\x00\x01' # OPTION-CODE 1
b'\x00\x06' # OPTION-LENGTH 6
b'foobar' # OPTION-DATA
)
o = dns._OPTVariableOption()
o.decode(b)
self.assertEqual(o.code, 1)
self.assertEqual(o.data, b'foobar')
class RaisedArgs(Exception):
"""
An exception which can be raised by fakes to test that the fake is called
with expected arguments.
"""
def __init__(self, args, kwargs):
"""
Store the positional and keyword arguments as attributes.
@param args: The positional args.
@param kwargs: The keyword args.
"""
self.args = args
self.kwargs = kwargs
class MessageEmpty(object):
"""
Generate byte string and constructor arguments for an empty
L{dns._EDNSMessage}.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x01\x00' # id: 256
b'\x97' # QR: 1, OPCODE: 2, AA: 0, TC: 0, RD: 1
b'\x8f' # RA: 1, Z, RCODE: 15
b'\x00\x00' # number of queries
b'\x00\x00' # number of answers
b'\x00\x00' # number of authorities
b'\x00\x00' # number of additionals
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=256,
answer=True,
opCode=dns.OP_STATUS,
auth=True,
trunc=True,
recDes=True,
recAv=True,
rCode=15,
ednsVersion=None,
)
class MessageTruncated(object):
"""
An empty response message whose TR bit is set to 1.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x01\x00' # ID: 256
b'\x82' # QR: 1, OPCODE: 0, AA: 0, TC: 1, RD: 0
b'\x00' # RA: 0, Z, RCODE: 0
b'\x00\x00' # Number of queries
b'\x00\x00' # Number of answers
b'\x00\x00' # Number of authorities
b'\x00\x00' # Number of additionals
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=256,
answer=1,
opCode=0,
auth=0,
trunc=1,
recDes=0,
recAv=0,
rCode=0,
ednsVersion=None,)
class MessageNonAuthoritative(object):
"""
A minimal non-authoritative message.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x01\x00' # ID 256
b'\x00' # QR: 0, OPCODE: 0, AA: 0, TC: 0, RD: 0
b'\x00' # RA: 0, Z, RCODE: 0
b'\x00\x00' # Query count
b'\x00\x01' # Answer count
b'\x00\x00' # Authorities count
b'\x00\x00' # Additionals count
# Answer
b'\x00' # RR NAME (root)
b'\x00\x01' # RR TYPE 1 (A)
b'\x00\x01' # RR CLASS 1 (IN)
b'\x00\x00\x00\x00' # RR TTL
b'\x00\x04' # RDLENGTH 4
b'\x01\x02\x03\x04' # IPv4 172.16.31.10
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=256,
auth=0,
ednsVersion=None,
answers=[
dns.RRHeader(
b'',
payload=dns.Record_A('172.16.31.10', ttl=0),
auth=False)])
class MessageAuthoritative(object):
"""
A minimal authoritative message.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x01\x00' # ID: 256
b'\x04' # QR: 0, OPCODE: 0, AA: 1, TC: 0, RD: 0
b'\x00' # RA: 0, Z, RCODE: 0
b'\x00\x00' # Query count
b'\x00\x01' # Answer count
b'\x00\x00' # Authorities count
b'\x00\x00' # Additionals count
# Answer
b'\x00' # RR NAME (root)
b'\x00\x01' # RR TYPE 1 (A)
b'\x00\x01' # RR CLASS 1 (IN)
b'\x00\x00\x00\x00' # RR TTL
b'\x00\x04' # RDLENGTH 4
b'\x01\x02\x03\x04' # IPv4 172.16.31.10
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=256,
auth=1,
ednsVersion=None,
answers=[
dns.RRHeader(
b'',
payload=dns.Record_A('172.16.31.10', ttl=0),
auth=True)])
class MessageComplete:
"""
An example of a fully populated non-edns response message.
Contains name compression, answers, authority, and additional records.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x01\x00' # ID: 256
b'\x95' # QR: 1, OPCODE: 2, AA: 1, TC: 0, RD: 1
b'\x8f' # RA: 1, Z, RCODE: 15
b'\x00\x01' # Query count
b'\x00\x01' # Answer count
b'\x00\x01' # Authorities count
b'\x00\x01' # Additionals count
# Query begins at Byte 12
b'\x07example\x03com\x00' # QNAME
b'\x00\x06' # QTYPE 6 (SOA)
b'\x00\x01' # QCLASS 1 (IN)
# Answers
b'\xc0\x0c' # RR NAME (compression ref b12)
b'\x00\x06' # RR TYPE 6 (SOA)
b'\x00\x01' # RR CLASS 1 (IN)
b'\xff\xff\xff\xff' # RR TTL
b'\x00\x27' # RDLENGTH 39
b'\x03ns1\xc0\x0c' # Mname (ns1.example.com (compression ref b15)
b'\x0ahostmaster\xc0\x0c' # rname (hostmaster.example.com)
b'\xff\xff\xff\xfe' # Serial
b'\x7f\xff\xff\xfd' # Refresh
b'\x7f\xff\xff\xfc' # Retry
b'\x7f\xff\xff\xfb' # Expire
b'\xff\xff\xff\xfa' # Minimum
# Authority
b'\xc0\x0c' # RR NAME (example.com compression ref b12)
b'\x00\x02' # RR TYPE 2 (NS)
b'\x00\x01' # RR CLASS 1 (IN)
b'\xff\xff\xff\xff' # RR TTL
b'\x00\x02' # RDLENGTH
b'\xc0\x29' # RDATA (ns1.example.com (compression ref b41)
# Additional
b'\xc0\x29' # RR NAME (ns1.example.com compression ref b41)
b'\x00\x01' # RR TYPE 1 (A)
b'\x00\x01' # RR CLASS 1 (IN)
b'\xff\xff\xff\xff' # RR TTL
b'\x00\x04' # RDLENGTH
b'\x05\x06\x07\x08' # RDATA 5.6.7.8
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=256,
answer=1,
opCode=dns.OP_STATUS,
auth=1,
recDes=1,
recAv=1,
rCode=15,
ednsVersion=None,
queries=[dns.Query(b'example.com', dns.SOA)],
answers=[
dns.RRHeader(
b'example.com',
type=dns.SOA,
ttl=0xffffffff,
auth=True,
payload=dns.Record_SOA(
ttl=0xffffffff,
mname=b'ns1.example.com',
rname=b'hostmaster.example.com',
serial=0xfffffffe,
refresh=0x7ffffffd,
retry=0x7ffffffc,
expire=0x7ffffffb,
minimum=0xfffffffa,
))],
authority=[
dns.RRHeader(
b'example.com',
type=dns.NS,
ttl=0xffffffff,
auth=True,
payload=dns.Record_NS(
'ns1.example.com', ttl=0xffffffff))],
additional=[
dns.RRHeader(
b'ns1.example.com',
type=dns.A,
ttl=0xffffffff,
auth=True,
payload=dns.Record_A(
'192.168.3.11', ttl=0xffffffff))])
class MessageEDNSQuery(object):
"""
A minimal EDNS query message.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x00\x00' # ID: 0
b'\x00' # QR: 0, OPCODE: 0, AA: 0, TC: 0, RD: 0
b'\x00' # RA: 0, Z, RCODE: 0
b'\x00\x01' # Queries count
b'\x00\x00' # Anwers count
b'\x00\x00' # Authority count
b'\x00\x01' # Additionals count
# Queries
b'\x03www\x07example\x03com\x00' # QNAME
b'\x00\x01' # QTYPE (A)
b'\x00\x01' # QCLASS (IN)
# Additional OPT record
b'\x00' # NAME (.)
b'\x00\x29' # TYPE (OPT 41)
b'\x10\x00' # UDP Payload Size (4096)
b'\x00' # Extended RCODE
b'\x03' # EDNS version
b'\x00\x00' # DO: False + Z
b'\x00\x00' # RDLENGTH
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=0,
answer=0,
opCode=dns.OP_QUERY,
auth=0,
recDes=0,
recAv=0,
rCode=0,
ednsVersion=3,
dnssecOK=False,
queries=[dns.Query(b'www.example.com', dns.A)],
additional=[])
class MessageEDNSComplete(object):
"""
An example of a fully populated edns response message.
Contains name compression, answers, authority, and additional records.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x01\x00' # ID: 256
b'\x95' # QR: 1, OPCODE: 2, AA: 1, TC: 0, RD: 1
b'\xbf' # RA: 1, AD: 1, RCODE: 15
b'\x00\x01' # Query count
b'\x00\x01' # Answer count
b'\x00\x01' # Authorities count
b'\x00\x02' # Additionals count
# Query begins at Byte 12
b'\x07example\x03com\x00' # QNAME
b'\x00\x06' # QTYPE 6 (SOA)
b'\x00\x01' # QCLASS 1 (IN)
# Answers
b'\xc0\x0c' # RR NAME (compression ref b12)
b'\x00\x06' # RR TYPE 6 (SOA)
b'\x00\x01' # RR CLASS 1 (IN)
b'\xff\xff\xff\xff' # RR TTL
b'\x00\x27' # RDLENGTH 39
b'\x03ns1\xc0\x0c' # mname (ns1.example.com (compression ref b15)
b'\x0ahostmaster\xc0\x0c' # rname (hostmaster.example.com)
b'\xff\xff\xff\xfe' # Serial
b'\x7f\xff\xff\xfd' # Refresh
b'\x7f\xff\xff\xfc' # Retry
b'\x7f\xff\xff\xfb' # Expire
b'\xff\xff\xff\xfa' # Minimum
# Authority
b'\xc0\x0c' # RR NAME (example.com compression ref b12)
b'\x00\x02' # RR TYPE 2 (NS)
b'\x00\x01' # RR CLASS 1 (IN)
b'\xff\xff\xff\xff' # RR TTL
b'\x00\x02' # RDLENGTH
b'\xc0\x29' # RDATA (ns1.example.com (compression ref b41)
# Additional
b'\xc0\x29' # RR NAME (ns1.example.com compression ref b41)
b'\x00\x01' # RR TYPE 1 (A)
b'\x00\x01' # RR CLASS 1 (IN)
b'\xff\xff\xff\xff' # RR TTL
b'\x00\x04' # RDLENGTH
b'\x05\x06\x07\x08' # RDATA 5.6.7.8
# Additional OPT record
b'\x00' # NAME (.)
b'\x00\x29' # TYPE (OPT 41)
b'\x04\x00' # UDP Payload Size (1024)
b'\x00' # Extended RCODE
b'\x03' # EDNS version
b'\x80\x00' # DO: True + Z
b'\x00\x00' # RDLENGTH
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=256,
answer=1,
opCode=dns.OP_STATUS,
auth=1,
trunc=0,
recDes=1,
recAv=1,
rCode=15,
ednsVersion=3,
dnssecOK=True,
authenticData=True,
checkingDisabled=True,
maxSize=1024,
queries=[dns.Query(b'example.com', dns.SOA)],
answers=[
dns.RRHeader(
b'example.com',
type=dns.SOA,
ttl=0xffffffff,
auth=True,
payload=dns.Record_SOA(
ttl=0xffffffff,
mname=b'ns1.example.com',
rname=b'hostmaster.example.com',
serial=0xfffffffe,
refresh=0x7ffffffd,
retry=0x7ffffffc,
expire=0x7ffffffb,
minimum=0xfffffffa,
))],
authority=[
dns.RRHeader(
b'example.com',
type=dns.NS,
ttl=0xffffffff,
auth=True,
payload=dns.Record_NS(
'ns1.example.com', ttl=0xffffffff))],
additional=[
dns.RRHeader(
b'ns1.example.com',
type=dns.A,
ttl=0xffffffff,
auth=True,
payload=dns.Record_A(
'192.168.3.11', ttl=0xffffffff))])
class MessageEDNSExtendedRCODE(object):
"""
An example of an EDNS message with an extended RCODE.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x00\x00'
b'\x00'
b'\x0c' # RA: 0, Z, RCODE: 12
b'\x00\x00'
b'\x00\x00'
b'\x00\x00'
b'\x00\x01' # 1 additionals
# Additional OPT record
b'\x00'
b'\x00\x29'
b'\x10\x00'
b'\xab' # Extended RCODE: 171
b'\x00'
b'\x00\x00'
b'\x00\x00'
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=0,
answer=False,
opCode=dns.OP_QUERY,
auth=False,
trunc=False,
recDes=False,
recAv=False,
rCode=0xabc, # Combined OPT extended RCODE + Message RCODE
ednsVersion=0,
dnssecOK=False,
maxSize=4096,
queries=[],
answers=[],
authority=[],
additional=[],
)
class MessageComparable(FancyEqMixin, FancyStrMixin, object):
"""
A wrapper around L{dns.Message} which is comparable so that it can be tested
using some of the L{dns._EDNSMessage} tests.
"""
showAttributes = compareAttributes = (
'id', 'answer', 'opCode', 'auth', 'trunc',
'recDes', 'recAv', 'rCode',
'queries', 'answers', 'authority', 'additional')
def __init__(self, original):
self.original = original
def __getattr__(self, key):
return getattr(self.original, key)
def verifyConstructorArgument(testCase, cls, argName, defaultVal, altVal,
attrName=None):
"""
Verify that an attribute has the expected default value and that a
corresponding argument passed to a constructor is assigned to that
attribute.
@param testCase: The L{TestCase} whose assert methods will be
called.
@type testCase: L{unittest.TestCase}
@param cls: The constructor under test.
@type cls: L{type}
@param argName: The name of the constructor argument under test.
@type argName: L{str}
@param defaultVal: The expected default value of C{attrName} /
C{argName}
@type defaultVal: L{object}
@param altVal: A value which is different from the default. Used to
test that supplied constructor arguments are actually assigned to the
correct attribute.
@type altVal: L{object}
@param attrName: The name of the attribute under test if different
from C{argName}. Defaults to C{argName}
@type attrName: L{str}
"""
if attrName is None:
attrName = argName
actual = {}
expected = {'defaultVal': defaultVal, 'altVal': altVal}
o = cls()
actual['defaultVal'] = getattr(o, attrName)
o = cls(**{argName: altVal})
actual['altVal'] = getattr(o, attrName)
testCase.assertEqual(expected, actual)
class ConstructorTestsMixin(object):
"""
Helper methods for verifying default attribute values and corresponding
constructor arguments.
"""
def _verifyConstructorArgument(self, argName, defaultVal, altVal):
"""
Wrap L{verifyConstructorArgument} to provide simpler interface for
testing Message and _EDNSMessage constructor arguments.
@param argName: The name of the constructor argument.
@param defaultVal: The expected default value.
@param altVal: An alternative value which is expected to be assigned to
a correspondingly named attribute.
"""
verifyConstructorArgument(testCase=self, cls=self.messageFactory,
argName=argName, defaultVal=defaultVal,
altVal=altVal)
def _verifyConstructorFlag(self, argName, defaultVal):
"""
Wrap L{verifyConstructorArgument} to provide simpler interface for
testing _EDNSMessage constructor flags.
@param argName: The name of the constructor flag argument
@param defaultVal: The expected default value of the flag
"""
assert defaultVal in (True, False)
verifyConstructorArgument(testCase=self, cls=self.messageFactory,
argName=argName, defaultVal=defaultVal,
altVal=not defaultVal,)
class CommonConstructorTestsMixin(object):
"""
Tests for constructor arguments and their associated attributes that are
common to both L{twisted.names.dns._EDNSMessage} and L{dns.Message}.
TestCase classes that use this mixin must provide a C{messageFactory} method
which accepts any argment supported by L{dns.Message.__init__}.
TestCases must also mixin ConstructorTestsMixin which provides some custom
assertions for testing constructor arguments.
"""
def test_id(self):
"""
L{dns._EDNSMessage.id} defaults to C{0} and can be overridden in
the constructor.
"""
self._verifyConstructorArgument('id', defaultVal=0, altVal=1)
def test_answer(self):
"""
L{dns._EDNSMessage.answer} defaults to C{False} and can be overridden in
the constructor.
"""
self._verifyConstructorFlag('answer', defaultVal=False)
def test_opCode(self):
"""
L{dns._EDNSMessage.opCode} defaults to L{dns.OP_QUERY} and can be
overridden in the constructor.
"""
self._verifyConstructorArgument(
'opCode', defaultVal=dns.OP_QUERY, altVal=dns.OP_STATUS)
def test_auth(self):
"""
L{dns._EDNSMessage.auth} defaults to C{False} and can be overridden in
the constructor.
"""
self._verifyConstructorFlag('auth', defaultVal=False)
def test_trunc(self):
"""
L{dns._EDNSMessage.trunc} defaults to C{False} and can be overridden in
the constructor.
"""
self._verifyConstructorFlag('trunc', defaultVal=False)
def test_recDes(self):
"""
L{dns._EDNSMessage.recDes} defaults to C{False} and can be overridden in
the constructor.
"""
self._verifyConstructorFlag('recDes', defaultVal=False)
def test_recAv(self):
"""
L{dns._EDNSMessage.recAv} defaults to C{False} and can be overridden in
the constructor.
"""
self._verifyConstructorFlag('recAv', defaultVal=False)
def test_rCode(self):
"""
L{dns._EDNSMessage.rCode} defaults to C{0} and can be overridden in the
constructor.
"""
self._verifyConstructorArgument('rCode', defaultVal=0, altVal=123)
def test_maxSize(self):
"""
L{dns._EDNSMessage.maxSize} defaults to C{512} and can be overridden in
the constructor.
"""
self._verifyConstructorArgument('maxSize', defaultVal=512, altVal=1024)
def test_queries(self):
"""
L{dns._EDNSMessage.queries} defaults to C{[]}.
"""
self.assertEqual(self.messageFactory().queries, [])
def test_answers(self):
"""
L{dns._EDNSMessage.answers} defaults to C{[]}.
"""
self.assertEqual(self.messageFactory().answers, [])
def test_authority(self):
"""
L{dns._EDNSMessage.authority} defaults to C{[]}.
"""
self.assertEqual(self.messageFactory().authority, [])
def test_additional(self):
"""
L{dns._EDNSMessage.additional} defaults to C{[]}.
"""
self.assertEqual(self.messageFactory().additional, [])
class EDNSMessageConstructorTests(ConstructorTestsMixin,
CommonConstructorTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for L{twisted.names.dns._EDNSMessage} constructor arguments that are
shared with L{dns.Message}.
"""
messageFactory = dns._EDNSMessage
class MessageConstructorTests(ConstructorTestsMixin,
CommonConstructorTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for L{twisted.names.dns.Message} constructor arguments that are shared
with L{dns._EDNSMessage}.
"""
messageFactory = dns.Message
class EDNSMessageSpecificsTestCase(ConstructorTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for L{dns._EDNSMessage}.
These tests are for L{dns._EDNSMessage} APIs which are not shared with
L{dns.Message}.
"""
messageFactory = dns._EDNSMessage
def test_ednsVersion(self):
"""
L{dns._EDNSMessage.ednsVersion} defaults to C{0} and can be overridden
in the constructor.
"""
self._verifyConstructorArgument(
'ednsVersion', defaultVal=0, altVal=None)
def test_dnssecOK(self):
"""
L{dns._EDNSMessage.dnssecOK} defaults to C{False} and can be overridden
in the constructor.
"""
self._verifyConstructorFlag('dnssecOK', defaultVal=False)
def test_authenticData(self):
"""
L{dns._EDNSMessage.authenticData} defaults to C{False} and can be
overridden in the constructor.
"""
self._verifyConstructorFlag('authenticData', defaultVal=False)
def test_checkingDisabled(self):
"""
L{dns._EDNSMessage.checkingDisabled} defaults to C{False} and can be
overridden in the constructor.
"""
self._verifyConstructorFlag('checkingDisabled', defaultVal=False)
def test_queriesOverride(self):
"""
L{dns._EDNSMessage.queries} can be overridden in the constructor.
"""
msg = self.messageFactory(queries=[dns.Query(b'example.com')])
self.assertEqual(
msg.queries,
[dns.Query(b'example.com')])
def test_answersOverride(self):
"""
L{dns._EDNSMessage.answers} can be overridden in the constructor.
"""
msg = self.messageFactory(
answers=[
dns.RRHeader(
b'example.com',
payload=dns.Record_A('172.16.31.10'))])
self.assertEqual(
msg.answers,
[dns.RRHeader(b'example.com', payload=dns.Record_A('172.16.31.10'))])
def test_authorityOverride(self):
"""
L{dns._EDNSMessage.authority} can be overridden in the constructor.
"""
msg = self.messageFactory(
authority=[
dns.RRHeader(
b'example.com',
type=dns.SOA,
payload=dns.Record_SOA())])
self.assertEqual(
msg.authority,
[dns.RRHeader(b'example.com', type=dns.SOA,
payload=dns.Record_SOA())])
def test_additionalOverride(self):
"""
L{dns._EDNSMessage.authority} can be overridden in the constructor.
"""
msg = self.messageFactory(
additional=[
dns.RRHeader(
b'example.com',
payload=dns.Record_A('172.16.31.10'))])
self.assertEqual(
msg.additional,
[dns.RRHeader(b'example.com', payload=dns.Record_A('172.16.31.10'))])
def test_reprDefaults(self):
"""
L{dns._EDNSMessage.__repr__} omits field values and sections which are
identical to their defaults. The id field value is always shown.
"""
self.assertEqual(
'<_EDNSMessage id=0>',
repr(self.messageFactory())
)
def test_reprFlagsIfSet(self):
"""
L{dns._EDNSMessage.__repr__} displays flags if they are L{True}.
"""
m = self.messageFactory(answer=True, auth=True, trunc=True, recDes=True,
recAv=True, authenticData=True,
checkingDisabled=True, dnssecOK=True)
self.assertEqual(
'<_EDNSMessage '
'id=0 '
'flags=answer,auth,trunc,recDes,recAv,authenticData,'
'checkingDisabled,dnssecOK'
'>',
repr(m),
)
def test_reprNonDefautFields(self):
"""
L{dns._EDNSMessage.__repr__} displays field values if they differ from
their defaults.
"""
m = self.messageFactory(id=10, opCode=20, rCode=30, maxSize=40,
ednsVersion=50)
self.assertEqual(
'<_EDNSMessage '
'id=10 '
'opCode=20 '
'rCode=30 '
'maxSize=40 '
'ednsVersion=50'
'>',
repr(m),
)
def test_reprNonDefaultSections(self):
"""
L{dns.Message.__repr__} displays sections which differ from their
defaults.
"""
m = self.messageFactory()
m.queries = [1, 2, 3]
m.answers = [4, 5, 6]
m.authority = [7, 8, 9]
m.additional = [10, 11, 12]
self.assertEqual(
'<_EDNSMessage '
'id=0 '
'queries=[1, 2, 3] '
'answers=[4, 5, 6] '
'authority=[7, 8, 9] '
'additional=[10, 11, 12]'
'>',
repr(m),
)
def test_fromStrCallsMessageFactory(self):
"""
L{dns._EDNSMessage.fromString} calls L{dns._EDNSMessage._messageFactory}
to create a new L{dns.Message} instance which is used to decode the
supplied bytes.
"""
class FakeMessageFactory(object):
"""
Fake message factory.
"""
def fromStr(self, *args, **kwargs):
"""
Fake fromStr method which raises the arguments it was passed.
@param args: positional arguments
@param kwargs: keyword arguments
"""
raise RaisedArgs(args, kwargs)
m = dns._EDNSMessage()
m._messageFactory = FakeMessageFactory
dummyBytes = object()
e = self.assertRaises(RaisedArgs, m.fromStr, dummyBytes)
self.assertEqual(
((dummyBytes,), {}),
(e.args, e.kwargs)
)
def test_fromStrCallsFromMessage(self):
"""
L{dns._EDNSMessage.fromString} calls L{dns._EDNSMessage._fromMessage}
with a L{dns.Message} instance
"""
m = dns._EDNSMessage()
class FakeMessageFactory():
"""
Fake message factory.
"""
def fromStr(self, bytes):
"""
A noop fake version of fromStr
@param bytes: the bytes to be decoded
"""
fakeMessage = FakeMessageFactory()
m._messageFactory = lambda: fakeMessage
def fakeFromMessage(*args, **kwargs):
raise RaisedArgs(args, kwargs)
m._fromMessage = fakeFromMessage
e = self.assertRaises(RaisedArgs, m.fromStr, b'')
self.assertEqual(
((fakeMessage,), {}),
(e.args, e.kwargs)
)
def test_toStrCallsToMessage(self):
"""
L{dns._EDNSMessage.toStr} calls L{dns._EDNSMessage._toMessage}
"""
m = dns._EDNSMessage()
def fakeToMessage(*args, **kwargs):
raise RaisedArgs(args, kwargs)
m._toMessage = fakeToMessage
e = self.assertRaises(RaisedArgs, m.toStr)
self.assertEqual(
((), {}),
(e.args, e.kwargs)
)
def test_toStrCallsToMessageToStr(self):
"""
L{dns._EDNSMessage.toStr} calls C{toStr} on the message returned by
L{dns._EDNSMessage._toMessage}.
"""
m = dns._EDNSMessage()
dummyBytes = object()
class FakeMessage(object):
"""
Fake Message
"""
def toStr(self):
"""
Fake toStr which returns dummyBytes.
@return: dummyBytes
"""
return dummyBytes
def fakeToMessage(*args, **kwargs):
return FakeMessage()
m._toMessage = fakeToMessage
self.assertEqual(
dummyBytes,
m.toStr()
)
class EDNSMessageEqualityTests(ComparisonTestsMixin, unittest.SynchronousTestCase):
"""
Tests for equality between L(dns._EDNSMessage} instances.
These tests will not work with L{dns.Message} because it does not use
L{twisted.python.util.FancyEqMixin}.
"""
messageFactory = dns._EDNSMessage
def test_id(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
id.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(id=1),
self.messageFactory(id=1),
self.messageFactory(id=2),
)
def test_answer(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
answer flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(answer=True),
self.messageFactory(answer=True),
self.messageFactory(answer=False),
)
def test_opCode(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
opCode.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(opCode=dns.OP_STATUS),
self.messageFactory(opCode=dns.OP_STATUS),
self.messageFactory(opCode=dns.OP_INVERSE),
)
def test_auth(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
auth flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(auth=True),
self.messageFactory(auth=True),
self.messageFactory(auth=False),
)
def test_trunc(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
trunc flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(trunc=True),
self.messageFactory(trunc=True),
self.messageFactory(trunc=False),
)
def test_recDes(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
recDes flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(recDes=True),
self.messageFactory(recDes=True),
self.messageFactory(recDes=False),
)
def test_recAv(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
recAv flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(recAv=True),
self.messageFactory(recAv=True),
self.messageFactory(recAv=False),
)
def test_rCode(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
rCode.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(rCode=16),
self.messageFactory(rCode=16),
self.messageFactory(rCode=15),
)
def test_ednsVersion(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
ednsVersion.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(ednsVersion=1),
self.messageFactory(ednsVersion=1),
self.messageFactory(ednsVersion=None),
)
def test_dnssecOK(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
dnssecOK.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(dnssecOK=True),
self.messageFactory(dnssecOK=True),
self.messageFactory(dnssecOK=False),
)
def test_authenticData(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
authenticData flags.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(authenticData=True),
self.messageFactory(authenticData=True),
self.messageFactory(authenticData=False),
)
def test_checkingDisabled(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
checkingDisabled flags.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(checkingDisabled=True),
self.messageFactory(checkingDisabled=True),
self.messageFactory(checkingDisabled=False),
)
def test_maxSize(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
maxSize.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(maxSize=2048),
self.messageFactory(maxSize=2048),
self.messageFactory(maxSize=1024),
)
def test_queries(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
queries.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(queries=[dns.Query(b'example.com')]),
self.messageFactory(queries=[dns.Query(b'example.com')]),
self.messageFactory(queries=[dns.Query(b'example.org')]),
)
def test_answers(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
answers.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(answers=[dns.RRHeader(
b'example.com', payload=dns.Record_A('172.16.31.10'))]),
self.messageFactory(answers=[dns.RRHeader(
b'example.com', payload=dns.Record_A('172.16.31.10'))]),
self.messageFactory(answers=[dns.RRHeader(
b'example.org', payload=dns.Record_A('172.16.58.3'))]),
)
def test_authority(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
authority records.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(authority=[dns.RRHeader(
b'example.com',
type=dns.SOA, payload=dns.Record_SOA())]),
self.messageFactory(authority=[dns.RRHeader(
b'example.com',
type=dns.SOA, payload=dns.Record_SOA())]),
self.messageFactory(authority=[dns.RRHeader(
b'example.org',
type=dns.SOA, payload=dns.Record_SOA())]),
)
def test_additional(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
additional records.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(additional=[dns.RRHeader(
b'example.com', payload=dns.Record_A('172.16.31.10'))]),
self.messageFactory(additional=[dns.RRHeader(
b'example.com', payload=dns.Record_A('172.16.31.10'))]),
self.messageFactory(additional=[dns.RRHeader(
b'example.org', payload=dns.Record_A('172.16.31.10'))]),
)
class StandardEncodingTestsMixin(object):
"""
Tests for the encoding and decoding of various standard (not EDNS) messages.
These tests should work with both L{dns._EDNSMessage} and L{dns.Message}.
TestCase classes that use this mixin must provide a C{messageFactory} method
which accepts any argment supported by L{dns._EDNSMessage.__init__}.
EDNS specific arguments may be discarded if not supported by the message
class under construction.
"""
def test_emptyMessageEncode(self):
"""
An empty message can be encoded.
"""
self.assertEqual(
self.messageFactory(**MessageEmpty.kwargs()).toStr(),
MessageEmpty.bytes())
def test_emptyMessageDecode(self):
"""
An empty message byte sequence can be decoded.
"""
m = self.messageFactory()
m.fromStr(MessageEmpty.bytes())
self.assertEqual(m, self.messageFactory(**MessageEmpty.kwargs()))
def test_completeQueryEncode(self):
"""
A fully populated query message can be encoded.
"""
self.assertEqual(
self.messageFactory(**MessageComplete.kwargs()).toStr(),
MessageComplete.bytes())
def test_completeQueryDecode(self):
"""
A fully populated message byte string can be decoded.
"""
m = self.messageFactory()
m.fromStr(MessageComplete.bytes()),
self.assertEqual(m, self.messageFactory(**MessageComplete.kwargs()))
def test_NULL(self):
"""
A I{NULL} record with an arbitrary payload can be encoded and decoded as
part of a message.
"""
bytes = b''.join([dns._ord2bytes(i) for i in range(256)])
rec = dns.Record_NULL(bytes)
rr = dns.RRHeader(b'testname', dns.NULL, payload=rec)
msg1 = self.messageFactory()
msg1.answers.append(rr)
s = msg1.toStr()
msg2 = self.messageFactory()
msg2.fromStr(s)
self.assertIsInstance(msg2.answers[0].payload, dns.Record_NULL)
self.assertEqual(msg2.answers[0].payload.payload, bytes)
def test_nonAuthoritativeMessageEncode(self):
"""
If the message C{authoritative} attribute is set to 0, the encoded bytes
will have AA bit 0.
"""
self.assertEqual(
self.messageFactory(**MessageNonAuthoritative.kwargs()).toStr(),
MessageNonAuthoritative.bytes())
def test_nonAuthoritativeMessageDecode(self):
"""
The L{dns.RRHeader} instances created by a message from a
non-authoritative message byte string are marked as not authoritative.
"""
m = self.messageFactory()
m.fromStr(MessageNonAuthoritative.bytes())
self.assertEqual(
m, self.messageFactory(**MessageNonAuthoritative.kwargs()))
def test_authoritativeMessageEncode(self):
"""
If the message C{authoritative} attribute is set to 1, the encoded bytes
will have AA bit 1.
"""
self.assertEqual(
self.messageFactory(**MessageAuthoritative.kwargs()).toStr(),
MessageAuthoritative.bytes())
def test_authoritativeMessageDecode(self):
"""
The message and its L{dns.RRHeader} instances created by C{decode} from
an authoritative message byte string, are marked as authoritative.
"""
m = self.messageFactory()
m.fromStr(MessageAuthoritative.bytes())
self.assertEqual(
m, self.messageFactory(**MessageAuthoritative.kwargs()))
def test_truncatedMessageEncode(self):
"""
If the message C{trunc} attribute is set to 1 the encoded bytes will
have TR bit 1.
"""
self.assertEqual(
self.messageFactory(**MessageTruncated.kwargs()).toStr(),
MessageTruncated.bytes())
def test_truncatedMessageDecode(self):
"""
The message instance created by decoding a truncated message is marked
as truncated.
"""
m = self.messageFactory()
m.fromStr(MessageTruncated.bytes())
self.assertEqual(m, self.messageFactory(**MessageTruncated.kwargs()))
class EDNSMessageStandardEncodingTests(StandardEncodingTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for the encoding and decoding of various standard (non-EDNS) messages
by L{dns._EDNSMessage}.
"""
messageFactory = dns._EDNSMessage
class MessageStandardEncodingTests(StandardEncodingTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for the encoding and decoding of various standard (non-EDNS) messages
by L{dns.Message}.
"""
@staticmethod
def messageFactory(**kwargs):
"""
This function adapts constructor arguments expected by
_EDNSMessage.__init__ to arguments suitable for use with the
Message.__init__.
Also handles the fact that unlike L{dns._EDNSMessage},
L{dns.Message.__init__} does not accept queries, answers etc as
arguments.
Also removes any L{dns._EDNSMessage} specific arguments.
@param args: The positional arguments which will be passed to
L{dns.Message.__init__}.
@param kwargs: The keyword arguments which will be stripped of EDNS
specific arguments before being passed to L{dns.Message.__init__}.
@return: An L{dns.Message} instance.
"""
queries = kwargs.pop('queries', [])
answers = kwargs.pop('answers', [])
authority = kwargs.pop('authority', [])
additional = kwargs.pop('additional', [])
kwargs.pop('ednsVersion', None)
m = dns.Message(**kwargs)
m.queries = queries
m.answers = answers
m.authority = authority
m.additional = additional
return MessageComparable(m)
class EDNSMessageEDNSEncodingTests(unittest.SynchronousTestCase):
"""
Tests for the encoding and decoding of various EDNS messages.
These test will not work with L{dns.Message}.
"""
messageFactory = dns._EDNSMessage
def test_ednsMessageDecodeStripsOptRecords(self):
"""
The L(_EDNSMessage} instance created by L{dns._EDNSMessage.decode} from
an EDNS query never includes OPT records in the additional section.
"""
m = self.messageFactory()
m.fromStr(MessageEDNSQuery.bytes())
self.assertEqual(m.additional, [])
def test_ednsMessageDecodeMultipleOptRecords(self):
"""
An L(_EDNSMessage} instance created from a byte string containing
multiple I{OPT} records will discard all the C{OPT} records.
C{ednsVersion} will be set to C{None}.
@see: U{https://tools.ietf.org/html/rfc6891#section-6.1.1}
"""
m = dns.Message()
m.additional = [
dns._OPTHeader(version=2),
dns._OPTHeader(version=3)]
ednsMessage = dns._EDNSMessage()
ednsMessage.fromStr(m.toStr())
self.assertEqual(ednsMessage.ednsVersion, None)
def test_fromMessageCopiesSections(self):
"""
L{dns._EDNSMessage._fromMessage} returns an L{_EDNSMessage} instance
whose queries, answers, authority and additional lists are copies (not
references to) the original message lists.
"""
standardMessage = dns.Message()
standardMessage.fromStr(MessageEDNSQuery.bytes())
ednsMessage = dns._EDNSMessage._fromMessage(standardMessage)
duplicates = []
for attrName in ('queries', 'answers', 'authority', 'additional'):
if (getattr(standardMessage, attrName)
is getattr(ednsMessage, attrName)):
duplicates.append(attrName)
if duplicates:
self.fail(
'Message and _EDNSMessage shared references to the following '
'section lists after decoding: %s' % (duplicates,))
def test_toMessageCopiesSections(self):
"""
L{dns._EDNSMessage.toStr} makes no in place changes to the message
instance.
"""
ednsMessage = dns._EDNSMessage(ednsVersion=1)
ednsMessage.toStr()
self.assertEqual(ednsMessage.additional, [])
def test_optHeaderPosition(self):
"""
L{dns._EDNSMessage} can decode OPT records, regardless of their position
in the additional records section.
"The OPT RR MAY be placed anywhere within the additional data section."
@see: U{https://tools.ietf.org/html/rfc6891#section-6.1.1}
"""
# XXX: We need an _OPTHeader.toRRHeader method. See #6779.
b = BytesIO()
optRecord = dns._OPTHeader(version=1)
optRecord.encode(b)
optRRHeader = dns.RRHeader()
b.seek(0)
optRRHeader.decode(b)
m = dns.Message()
m.additional = [optRRHeader]
actualMessages = []
actualMessages.append(dns._EDNSMessage._fromMessage(m).ednsVersion)
m.additional.append(dns.RRHeader(type=dns.A))
actualMessages.append(
dns._EDNSMessage._fromMessage(m).ednsVersion)
m.additional.insert(0, dns.RRHeader(type=dns.A))
actualMessages.append(
dns._EDNSMessage._fromMessage(m).ednsVersion)
self.assertEqual(
[1] * 3,
actualMessages
)
def test_ednsDecode(self):
"""
The L(_EDNSMessage} instance created by L{dns._EDNSMessage.fromStr}
derives its edns specific values (C{ednsVersion}, etc) from the supplied
OPT record.
"""
m = self.messageFactory()
m.fromStr(MessageEDNSComplete.bytes())
self.assertEqual(m, self.messageFactory(**MessageEDNSComplete.kwargs()))
def test_ednsEncode(self):
"""
The L(_EDNSMessage} instance created by L{dns._EDNSMessage.toStr}
encodes its edns specific values (C{ednsVersion}, etc) into an OPT
record added to the additional section.
"""
self.assertEqual(
self.messageFactory(**MessageEDNSComplete.kwargs()).toStr(),
MessageEDNSComplete.bytes())
def test_extendedRcodeEncode(self):
"""
The L(_EDNSMessage.toStr} encodes the extended I{RCODE} (>=16) by
assigning the lower 4bits to the message RCODE field and the upper 4bits
to the OPT pseudo record.
"""
self.assertEqual(
self.messageFactory(**MessageEDNSExtendedRCODE.kwargs()).toStr(),
MessageEDNSExtendedRCODE.bytes())
def test_extendedRcodeDecode(self):
"""
The L(_EDNSMessage} instance created by L{dns._EDNSMessage.fromStr}
derives RCODE from the supplied OPT record.
"""
m = self.messageFactory()
m.fromStr(MessageEDNSExtendedRCODE.bytes())
self.assertEqual(
m, self.messageFactory(**MessageEDNSExtendedRCODE.kwargs()))
def test_extendedRcodeZero(self):
"""
Note that EXTENDED-RCODE value 0 indicates that an unextended RCODE is
in use (values 0 through 15).
https://tools.ietf.org/html/rfc6891#section-6.1.3
"""
ednsMessage = self.messageFactory(rCode=15, ednsVersion=0)
standardMessage = ednsMessage._toMessage()
self.assertEqual(
(15, 0),
(standardMessage.rCode, standardMessage.additional[0].extendedRCODE)
)
class ResponseFromMessageTests(unittest.SynchronousTestCase):
"""
Tests for L{dns._responseFromMessage}.
"""
def test_responseFromMessageResponseType(self):
"""
L{dns.Message._responseFromMessage} is a constructor function which
generates a new I{answer} message from an existing L{dns.Message} like
instance.
"""
request = dns.Message()
response = dns._responseFromMessage(responseConstructor=dns.Message,
message=request)
self.assertIsNot(request, response)
def test_responseType(self):
"""
L{dns._responseFromMessage} returns a new instance of C{cls}
"""
class SuppliedClass(object):
id = 1
queries = []
expectedClass = dns.Message
self.assertIsInstance(
dns._responseFromMessage(responseConstructor=expectedClass,
message=SuppliedClass()),
expectedClass
)
def test_responseId(self):
"""
L{dns._responseFromMessage} copies the C{id} attribute of the original
message.
"""
self.assertEqual(
1234,
dns._responseFromMessage(responseConstructor=dns.Message,
message=dns.Message(id=1234)).id
)
def test_responseAnswer(self):
"""
L{dns._responseFromMessage} sets the C{answer} flag to L{True}
"""
request = dns.Message()
response = dns._responseFromMessage(responseConstructor=dns.Message,
message=request)
self.assertEqual(
(False, True),
(request.answer, response.answer)
)
def test_responseQueries(self):
"""
L{dns._responseFromMessage} copies the C{queries} attribute of the
original message.
"""
request = dns.Message()
expectedQueries = [object(), object(), object()]
request.queries = expectedQueries[:]
self.assertEqual(
expectedQueries,
dns._responseFromMessage(responseConstructor=dns.Message,
message=request).queries
)
def test_responseKwargs(self):
"""
L{dns._responseFromMessage} accepts other C{kwargs} which are assigned
to the new message before it is returned.
"""
self.assertEqual(
123,
dns._responseFromMessage(
responseConstructor=dns.Message, message=dns.Message(),
rCode=123).rCode
)
class Foo(object):
"""
An example class for use in L{dns._compactRepr} tests.
It follows the pattern of initialiser settable flags, fields and sections
found in L{dns.Message} and L{dns._EDNSMessage}.
"""
def __init__(self,
field1=1, field2=2, alwaysShowField='AS',
flagTrue=True, flagFalse=False, section1=None):
"""
Set some flags, fields and sections as public attributes.
"""
self.field1 = field1
self.field2 = field2
self.alwaysShowField = alwaysShowField
self.flagTrue = flagTrue
self.flagFalse = flagFalse
if section1 is None:
section1 = []
self.section1 = section1
def __repr__(self):
"""
Call L{dns._compactRepr} to generate a string representation.
"""
return dns._compactRepr(
self,
alwaysShow='alwaysShowField'.split(),
fieldNames='field1 field2 alwaysShowField'.split(),
flagNames='flagTrue flagFalse'.split(),
sectionNames='section1 section2'.split()
)
class CompactReprTests(unittest.SynchronousTestCase):
"""
Tests for L[dns._compactRepr}.
"""
messageFactory = Foo
def test_defaults(self):
"""
L{dns._compactRepr} omits field values and sections which have the
default value. Flags which are True are always shown.
"""
self.assertEqual(
"<Foo alwaysShowField='AS' flags=flagTrue>",
repr(self.messageFactory())
)
def test_flagsIfSet(self):
"""
L{dns._compactRepr} displays flags if they have a non-default value.
"""
m = self.messageFactory(flagTrue=True, flagFalse=True)
self.assertEqual(
'<Foo '
"alwaysShowField='AS' "
'flags=flagTrue,flagFalse'
'>',
repr(m),
)
def test_nonDefautFields(self):
"""
L{dns._compactRepr} displays field values if they differ from their
defaults.
"""
m = self.messageFactory(field1=10, field2=20)
self.assertEqual(
'<Foo '
'field1=10 '
'field2=20 '
"alwaysShowField='AS' "
'flags=flagTrue'
'>',
repr(m),
)
def test_nonDefaultSections(self):
"""
L{dns._compactRepr} displays sections which differ from their defaults.
"""
m = self.messageFactory()
m.section1 = [1, 1, 1]
m.section2 = [2, 2, 2]
self.assertEqual(
'<Foo '
"alwaysShowField='AS' "
'flags=flagTrue '
'section1=[1, 1, 1] '
'section2=[2, 2, 2]'
'>',
repr(m),
)
|
# test-case-name: twisted.names.test.test_dns
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for twisted.names.dns.
"""
from __future__ import division, absolute_import
from io import BytesIO
import struct
from zope.interface.verify import verifyClass
from twisted.python.failure import Failure
from twisted.python.util import FancyEqMixin, FancyStrMixin
from twisted.internet import address, task
from twisted.internet.error import CannotListenError, ConnectionDone
from twisted.trial import unittest
from twisted.names import dns
from twisted.test import proto_helpers
from twisted.test.testutils import ComparisonTestsMixin
RECORD_TYPES = [
dns.Record_NS, dns.Record_MD, dns.Record_MF, dns.Record_CNAME,
dns.Record_MB, dns.Record_MG, dns.Record_MR, dns.Record_PTR,
dns.Record_DNAME, dns.Record_A, dns.Record_SOA, dns.Record_NULL,
dns.Record_WKS, dns.Record_SRV, dns.Record_AFSDB, dns.Record_RP,
dns.Record_HINFO, dns.Record_MINFO, dns.Record_MX, dns.Record_TXT,
dns.Record_AAAA, dns.Record_A6, dns.Record_NAPTR, dns.UnknownRecord,
]
class Ord2ByteTests(unittest.TestCase):
"""
Tests for L{dns._ord2bytes}.
"""
def test_ord2byte(self):
"""
L{dns._ord2byte} accepts an integer and returns a byte string of length
one with an ordinal value equal to the given integer.
"""
self.assertEqual(b'\x10', dns._ord2bytes(0x10))
class Str2TimeTests(unittest.TestCase):
"""
Tests for L{dns.str2name}.
"""
def test_nonString(self):
"""
When passed a non-string object, L{dns.str2name} returns it unmodified.
"""
time = object()
self.assertIs(time, dns.str2time(time))
def test_seconds(self):
"""
Passed a string giving a number of seconds, L{dns.str2time} returns the
number of seconds represented. For example, C{"10S"} represents C{10}
seconds.
"""
self.assertEqual(10, dns.str2time("10S"))
def test_minutes(self):
"""
Like C{test_seconds}, but for the C{"M"} suffix which multiplies the
time value by C{60} (the number of seconds in a minute!).
"""
self.assertEqual(2 * 60, dns.str2time("2M"))
def test_hours(self):
"""
Like C{test_seconds}, but for the C{"H"} suffix which multiplies the
time value by C{3600}, the number of seconds in an hour.
"""
self.assertEqual(3 * 3600, dns.str2time("3H"))
def test_days(self):
"""
Like L{test_seconds}, but for the C{"D"} suffix which multiplies the
time value by C{86400}, the number of seconds in a day.
"""
self.assertEqual(4 * 86400, dns.str2time("4D"))
def test_weeks(self):
"""
Like L{test_seconds}, but for the C{"W"} suffix which multiplies the
time value by C{604800}, the number of seconds in a week.
"""
self.assertEqual(5 * 604800, dns.str2time("5W"))
def test_years(self):
"""
Like L{test_seconds}, but for the C{"Y"} suffix which multiplies the
time value by C{31536000}, the number of seconds in a year.
"""
self.assertEqual(6 * 31536000, dns.str2time("6Y"))
def test_invalidPrefix(self):
"""
If a non-integer prefix is given, L{dns.str2time} raises L{ValueError}.
"""
self.assertRaises(ValueError, dns.str2time, "fooS")
class NameTests(unittest.TestCase):
"""
Tests for L{Name}, the representation of a single domain name with support
for encoding into and decoding from DNS message format.
"""
def test_nonStringName(self):
"""
When constructed with a name which is neither C{bytes} nor C{str},
L{Name} raises L{TypeError}.
"""
self.assertRaises(TypeError, dns.Name, 123)
self.assertRaises(TypeError, dns.Name, object())
self.assertRaises(TypeError, dns.Name, [])
def test_unicodeName(self):
"""
L{dns.Name} automatically encodes unicode domain name using C{idna}
encoding.
"""
name = dns.Name(u'\u00e9chec.example.org')
self.assertIsInstance(name.name, bytes)
self.assertEqual(b'xn--chec-9oa.example.org', name.name)
def test_decode(self):
"""
L{Name.decode} populates the L{Name} instance with name information read
from the file-like object passed to it.
"""
n = dns.Name()
n.decode(BytesIO(b"\x07example\x03com\x00"))
self.assertEqual(n.name, b"example.com")
def test_encode(self):
"""
L{Name.encode} encodes its name information and writes it to the
file-like object passed to it.
"""
name = dns.Name(b"foo.example.com")
stream = BytesIO()
name.encode(stream)
self.assertEqual(stream.getvalue(), b"\x03foo\x07example\x03com\x00")
def test_encodeWithCompression(self):
"""
If a compression dictionary is passed to it, L{Name.encode} uses offset
information from it to encode its name with references to existing
labels in the stream instead of including another copy of them in the
output. It also updates the compression dictionary with the location of
the name it writes to the stream.
"""
name = dns.Name(b"foo.example.com")
compression = {b"example.com": 0x17}
# Some bytes already encoded into the stream for this message
previous = b"some prefix to change .tell()"
stream = BytesIO()
stream.write(previous)
# The position at which the encoded form of this new name will appear in
# the stream.
expected = len(previous) + dns.Message.headerSize
name.encode(stream, compression)
self.assertEqual(
b"\x03foo\xc0\x17",
stream.getvalue()[len(previous):])
self.assertEqual(
{b"example.com": 0x17, b"foo.example.com": expected},
compression)
def test_unknown(self):
"""
A resource record of unknown type and class is parsed into an
L{UnknownRecord} instance with its data preserved, and an
L{UnknownRecord} instance is serialized to a string equal to the one it
was parsed from.
"""
wire = (
b'\x01\x00' # Message ID
b'\x00' # answer bit, opCode nibble, auth bit, trunc bit, recursive
# bit
b'\x00' # recursion bit, empty bit, authenticData bit,
# checkingDisabled bit, response code nibble
b'\x00\x01' # number of queries
b'\x00\x01' # number of answers
b'\x00\x00' # number of authorities
b'\x00\x01' # number of additionals
# query
b'\x03foo\x03bar\x00' # foo.bar
b'\xde\xad' # type=0xdead
b'\xbe\xef' # cls=0xbeef
# 1st answer
b'\xc0\x0c' # foo.bar - compressed
b'\xde\xad' # type=0xdead
b'\xbe\xef' # cls=0xbeef
b'\x00\x00\x01\x01' # ttl=257
b'\x00\x08somedata' # some payload data
# 1st additional
b'\x03baz\x03ban\x00' # baz.ban
b'\x00\x01' # type=A
b'\x00\x01' # cls=IN
b'\x00\x00\x01\x01' # ttl=257
b'\x00\x04' # len=4
b'\x01\x02\x03\x04' # 172.16.31.10
)
msg = dns.Message()
msg.fromStr(wire)
self.assertEqual(msg.queries, [
dns.Query(b'foo.bar', type=0xdead, cls=0xbeef),
])
self.assertEqual(msg.answers, [
dns.RRHeader(b'foo.bar', type=0xdead, cls=0xbeef, ttl=257,
payload=dns.UnknownRecord(b'somedata', ttl=257)),
])
self.assertEqual(msg.additional, [
dns.RRHeader(b'baz.ban', type=dns.A, cls=dns.IN, ttl=257,
payload=dns.Record_A('172.16.31.10', ttl=257)),
])
enc = msg.toStr()
self.assertEqual(enc, wire)
def test_decodeWithCompression(self):
"""
If the leading byte of an encoded label (in bytes read from a stream
passed to L{Name.decode}) has its two high bits set, the next byte is
treated as a pointer to another label in the stream and that label is
included in the name being decoded.
"""
# Slightly modified version of the example from RFC 1035, section 4.1.4.
stream = BytesIO(
b"x" * 20 +
b"\x01f\x03isi\x04arpa\x00"
b"\x03foo\xc0\x14"
b"\x03bar\xc0\x20")
stream.seek(20)
name = dns.Name()
name.decode(stream)
# Verify we found the first name in the stream and that the stream
# position is left at the first byte after the decoded name.
self.assertEqual(b"f.isi.arpa", name.name)
self.assertEqual(32, stream.tell())
# Get the second name from the stream and make the same assertions.
name.decode(stream)
self.assertEqual(name.name, b"foo.f.isi.arpa")
self.assertEqual(38, stream.tell())
# Get the third and final name
name.decode(stream)
self.assertEqual(name.name, b"bar.foo.f.isi.arpa")
self.assertEqual(44, stream.tell())
def test_rejectCompressionLoop(self):
"""
L{Name.decode} raises L{ValueError} if the stream passed to it includes
a compression pointer which forms a loop, causing the name to be
undecodable.
"""
name = dns.Name()
stream = BytesIO(b"\xc0\x00")
self.assertRaises(ValueError, name.decode, stream)
class RoundtripDNSTestCase(unittest.TestCase):
"""
Encoding and then decoding various objects.
"""
names = [b"example.org", b"go-away.fish.tv", b"23strikesback.net"]
def testName(self):
for n in self.names:
# encode the name
f = BytesIO()
dns.Name(n).encode(f)
# decode the name
f.seek(0, 0)
result = dns.Name()
result.decode(f)
self.assertEqual(result.name, n)
def test_query(self):
"""
L{dns.Query.encode} returns a byte string representing the fields of the
query which can be decoded into a new L{dns.Query} instance using
L{dns.Query.decode}.
"""
for n in self.names:
for dnstype in range(1, 17):
for dnscls in range(1, 5):
# encode the query
f = BytesIO()
dns.Query(n, dnstype, dnscls).encode(f)
# decode the result
f.seek(0, 0)
result = dns.Query()
result.decode(f)
self.assertEqual(result.name.name, n)
self.assertEqual(result.type, dnstype)
self.assertEqual(result.cls, dnscls)
def test_resourceRecordHeader(self):
"""
L{dns.RRHeader.encode} encodes the record header's information and
writes it to the file-like object passed to it and
L{dns.RRHeader.decode} reads from a file-like object to re-construct a
L{dns.RRHeader} instance.
"""
# encode the RR
f = BytesIO()
dns.RRHeader(b"test.org", 3, 4, 17).encode(f)
# decode the result
f.seek(0, 0)
result = dns.RRHeader()
result.decode(f)
self.assertEqual(result.name, dns.Name(b"test.org"))
self.assertEqual(result.type, 3)
self.assertEqual(result.cls, 4)
self.assertEqual(result.ttl, 17)
def test_resources(self):
"""
L{dns.SimpleRecord.encode} encodes the record's name information and
writes it to the file-like object passed to it and
L{dns.SimpleRecord.decode} reads from a file-like object to re-construct
a L{dns.SimpleRecord} instance.
"""
names = (
b"this.are.test.name",
b"will.compress.will.this.will.name.will.hopefully",
b"test.CASE.preSErVatIOn.YeAH",
b"a.s.h.o.r.t.c.a.s.e.t.o.t.e.s.t",
b"singleton"
)
for s in names:
f = BytesIO()
dns.SimpleRecord(s).encode(f)
f.seek(0, 0)
result = dns.SimpleRecord()
result.decode(f)
self.assertEqual(result.name, dns.Name(s))
def test_hashable(self):
"""
Instances of all record types are hashable.
"""
for k in RECORD_TYPES:
k1, k2 = k(), k()
hk1 = hash(k1)
hk2 = hash(k2)
self.assertEqual(hk1, hk2, "%s != %s (for %s)" % (hk1,hk2,k))
def test_Charstr(self):
"""
Test L{dns.Charstr} encode and decode.
"""
for n in self.names:
# encode the name
f = BytesIO()
dns.Charstr(n).encode(f)
# decode the name
f.seek(0, 0)
result = dns.Charstr()
result.decode(f)
self.assertEqual(result.string, n)
def _recordRoundtripTest(self, record):
"""
Assert that encoding C{record} and then decoding the resulting bytes
creates a record which compares equal to C{record}.
"""
stream = BytesIO()
record.encode(stream)
length = stream.tell()
stream.seek(0, 0)
replica = record.__class__()
replica.decode(stream, length)
self.assertEqual(record, replica)
def test_SOA(self):
"""
The byte stream written by L{dns.Record_SOA.encode} can be used by
L{dns.Record_SOA.decode} to reconstruct the state of the original
L{dns.Record_SOA} instance.
"""
self._recordRoundtripTest(
dns.Record_SOA(
mname=b'foo', rname=b'bar', serial=12, refresh=34,
retry=56, expire=78, minimum=90))
def test_A(self):
"""
The byte stream written by L{dns.Record_A.encode} can be used by
L{dns.Record_A.decode} to reconstruct the state of the original
L{dns.Record_A} instance.
"""
self._recordRoundtripTest(dns.Record_A('172.16.31.10'))
def test_NULL(self):
"""
The byte stream written by L{dns.Record_NULL.encode} can be used by
L{dns.Record_NULL.decode} to reconstruct the state of the original
L{dns.Record_NULL} instance.
"""
self._recordRoundtripTest(dns.Record_NULL(b'foo bar'))
def test_WKS(self):
"""
The byte stream written by L{dns.Record_WKS.encode} can be used by
L{dns.Record_WKS.decode} to reconstruct the state of the original
L{dns.Record_WKS} instance.
"""
self._recordRoundtripTest(dns.Record_WKS('172.16.31.10', 3, b'xyz'))
def test_AAAA(self):
"""
The byte stream written by L{dns.Record_AAAA.encode} can be used by
L{dns.Record_AAAA.decode} to reconstruct the state of the original
L{dns.Record_AAAA} instance.
"""
self._recordRoundtripTest(dns.Record_AAAA('::1'))
def test_A6(self):
"""
The byte stream written by L{dns.Record_A6.encode} can be used by
L{dns.Record_A6.decode} to reconstruct the state of the original
L{dns.Record_A6} instance.
"""
self._recordRoundtripTest(dns.Record_A6(8, '::1:2', b'foo'))
def test_SRV(self):
"""
The byte stream written by L{dns.Record_SRV.encode} can be used by
L{dns.Record_SRV.decode} to reconstruct the state of the original
L{dns.Record_SRV} instance.
"""
self._recordRoundtripTest(dns.Record_SRV(
priority=1, weight=2, port=3, target=b'example.com'))
def test_NAPTR(self):
"""
Test L{dns.Record_NAPTR} encode and decode.
"""
naptrs = [
(100, 10, b"u", b"sip+E2U",
b"!^.*$!sip:<EMAIL>!", b""),
(100, 50, b"s", b"http+I2L+I2C+I2R",
b"", b"_http._tcp.gatech.edu")]
for (order, preference, flags, service, regexp, replacement) in naptrs:
rin = dns.Record_NAPTR(order, preference, flags, service, regexp,
replacement)
e = BytesIO()
rin.encode(e)
e.seek(0, 0)
rout = dns.Record_NAPTR()
rout.decode(e)
self.assertEqual(rin.order, rout.order)
self.assertEqual(rin.preference, rout.preference)
self.assertEqual(rin.flags, rout.flags)
self.assertEqual(rin.service, rout.service)
self.assertEqual(rin.regexp, rout.regexp)
self.assertEqual(rin.replacement.name, rout.replacement.name)
self.assertEqual(rin.ttl, rout.ttl)
def test_AFSDB(self):
"""
The byte stream written by L{dns.Record_AFSDB.encode} can be used by
L{dns.Record_AFSDB.decode} to reconstruct the state of the original
L{dns.Record_AFSDB} instance.
"""
self._recordRoundtripTest(dns.Record_AFSDB(
subtype=3, hostname=b'example.com'))
def test_RP(self):
"""
The byte stream written by L{dns.Record_RP.encode} can be used by
L{dns.Record_RP.decode} to reconstruct the state of the original
L{dns.Record_RP} instance.
"""
self._recordRoundtripTest(dns.Record_RP(
mbox=b'alice.example.com', txt=b'example.com'))
def test_HINFO(self):
"""
The byte stream written by L{dns.Record_HINFO.encode} can be used by
L{dns.Record_HINFO.decode} to reconstruct the state of the original
L{dns.Record_HINFO} instance.
"""
self._recordRoundtripTest(dns.Record_HINFO(cpu=b'fast', os=b'great'))
def test_MINFO(self):
"""
The byte stream written by L{dns.Record_MINFO.encode} can be used by
L{dns.Record_MINFO.decode} to reconstruct the state of the original
L{dns.Record_MINFO} instance.
"""
self._recordRoundtripTest(dns.Record_MINFO(
rmailbx=b'foo', emailbx=b'bar'))
def test_MX(self):
"""
The byte stream written by L{dns.Record_MX.encode} can be used by
L{dns.Record_MX.decode} to reconstruct the state of the original
L{dns.Record_MX} instance.
"""
self._recordRoundtripTest(dns.Record_MX(
preference=1, name=b'example.com'))
def test_TXT(self):
"""
The byte stream written by L{dns.Record_TXT.encode} can be used by
L{dns.Record_TXT.decode} to reconstruct the state of the original
L{dns.Record_TXT} instance.
"""
self._recordRoundtripTest(dns.Record_TXT(b'foo', b'bar'))
MESSAGE_AUTHENTIC_DATA_BYTES = (
b'\x00\x00' # ID
b'\x00' #
b'\x20' # RA, Z, AD=1, CD, RCODE
b'\x00\x00' # Query count
b'\x00\x00' # Answer count
b'\x00\x00' # Authority count
b'\x00\x00' # Additional count
)
MESSAGE_CHECKING_DISABLED_BYTES = (
b'\x00\x00' # ID
b'\x00' #
b'\x10' # RA, Z, AD, CD=1, RCODE
b'\x00\x00' # Query count
b'\x00\x00' # Answer count
b'\x00\x00' # Authority count
b'\x00\x00' # Additional count
)
class MessageTestCase(unittest.SynchronousTestCase):
"""
Tests for L{twisted.names.dns.Message}.
"""
def test_authenticDataDefault(self):
"""
L{dns.Message.authenticData} has default value 0.
"""
self.assertEqual(dns.Message().authenticData, 0)
def test_authenticDataOverride(self):
"""
L{dns.Message.__init__} accepts a C{authenticData} argument which
is assigned to L{dns.Message.authenticData}.
"""
self.assertEqual(dns.Message(authenticData=1).authenticData, 1)
def test_authenticDataEncode(self):
"""
L{dns.Message.toStr} encodes L{dns.Message.authenticData} into
byte4 of the byte string.
"""
self.assertEqual(
dns.Message(authenticData=1).toStr(),
MESSAGE_AUTHENTIC_DATA_BYTES
)
def test_authenticDataDecode(self):
"""
L{dns.Message.fromStr} decodes byte4 and assigns bit3 to
L{dns.Message.authenticData}.
"""
m = dns.Message()
m.fromStr(MESSAGE_AUTHENTIC_DATA_BYTES)
self.assertEqual(m.authenticData, 1)
def test_checkingDisabledDefault(self):
"""
L{dns.Message.checkingDisabled} has default value 0.
"""
self.assertEqual(dns.Message().checkingDisabled, 0)
def test_checkingDisabledOverride(self):
"""
L{dns.Message.__init__} accepts a C{checkingDisabled} argument which
is assigned to L{dns.Message.checkingDisabled}.
"""
self.assertEqual(
dns.Message(checkingDisabled=1).checkingDisabled, 1)
def test_checkingDisabledEncode(self):
"""
L{dns.Message.toStr} encodes L{dns.Message.checkingDisabled} into
byte4 of the byte string.
"""
self.assertEqual(
dns.Message(checkingDisabled=1).toStr(),
MESSAGE_CHECKING_DISABLED_BYTES
)
def test_checkingDisabledDecode(self):
"""
L{dns.Message.fromStr} decodes byte4 and assigns bit4 to
L{dns.Message.checkingDisabled}.
"""
m = dns.Message()
m.fromStr(MESSAGE_CHECKING_DISABLED_BYTES)
self.assertEqual(m.checkingDisabled, 1)
def test_reprDefaults(self):
"""
L{dns.Message.__repr__} omits field values and sections which are
identical to their defaults. The id field value is always shown.
"""
self.assertEqual(
'<Message id=0>',
repr(dns.Message())
)
def test_reprFlagsIfSet(self):
"""
L{dns.Message.__repr__} displays flags if they are L{True}.
"""
m = dns.Message(answer=True, auth=True, trunc=True, recDes=True,
recAv=True, authenticData=True, checkingDisabled=True)
self.assertEqual(
'<Message '
'id=0 '
'flags=answer,auth,trunc,recDes,recAv,authenticData,'
'checkingDisabled'
'>',
repr(m),
)
def test_reprNonDefautFields(self):
"""
L{dns.Message.__repr__} displays field values if they differ from their
defaults.
"""
m = dns.Message(id=10, opCode=20, rCode=30, maxSize=40)
self.assertEqual(
'<Message '
'id=10 '
'opCode=20 '
'rCode=30 '
'maxSize=40'
'>',
repr(m),
)
def test_reprNonDefaultSections(self):
"""
L{dns.Message.__repr__} displays sections which differ from their
defaults.
"""
m = dns.Message()
m.queries = [1, 2, 3]
m.answers = [4, 5, 6]
m.authority = [7, 8, 9]
m.additional = [10, 11, 12]
self.assertEqual(
'<Message '
'id=0 '
'queries=[1, 2, 3] '
'answers=[4, 5, 6] '
'authority=[7, 8, 9] '
'additional=[10, 11, 12]'
'>',
repr(m),
)
def testEmptyMessage(self):
"""
Test that a message which has been truncated causes an EOFError to
be raised when it is parsed.
"""
msg = dns.Message()
self.assertRaises(EOFError, msg.fromStr, b'')
def test_emptyQuery(self):
"""
Test that bytes representing an empty query message can be decoded
as such.
"""
msg = dns.Message()
msg.fromStr(
b'\x01\x00' # Message ID
b'\x00' # answer bit, opCode nibble, auth bit, trunc bit, recursive bit
b'\x00' # recursion bit, empty bit, authenticData bit,
# checkingDisabled bit, response code nibble
b'\x00\x00' # number of queries
b'\x00\x00' # number of answers
b'\x00\x00' # number of authorities
b'\x00\x00' # number of additionals
)
self.assertEqual(msg.id, 256)
self.assertFalse(
msg.answer, "Message was not supposed to be an answer.")
self.assertEqual(msg.opCode, dns.OP_QUERY)
self.assertFalse(
msg.auth, "Message was not supposed to be authoritative.")
self.assertFalse(
msg.trunc, "Message was not supposed to be truncated.")
self.assertEqual(msg.queries, [])
self.assertEqual(msg.answers, [])
self.assertEqual(msg.authority, [])
self.assertEqual(msg.additional, [])
def test_NULL(self):
"""
A I{NULL} record with an arbitrary payload can be encoded and decoded as
part of a L{dns.Message}.
"""
bytes = b''.join([dns._ord2bytes(i) for i in range(256)])
rec = dns.Record_NULL(bytes)
rr = dns.RRHeader(b'testname', dns.NULL, payload=rec)
msg1 = dns.Message()
msg1.answers.append(rr)
s = BytesIO()
msg1.encode(s)
s.seek(0, 0)
msg2 = dns.Message()
msg2.decode(s)
self.assertIsInstance(msg2.answers[0].payload, dns.Record_NULL)
self.assertEqual(msg2.answers[0].payload.payload, bytes)
def test_lookupRecordTypeDefault(self):
"""
L{Message.lookupRecordType} returns C{dns.UnknownRecord} if it is
called with an integer which doesn't correspond to any known record
type.
"""
# 65280 is the first value in the range reserved for private
# use, so it shouldn't ever conflict with an officially
# allocated value.
self.assertIs(dns.Message().lookupRecordType(65280), dns.UnknownRecord)
def test_nonAuthoritativeMessage(self):
"""
The L{RRHeader} instances created by L{Message} from a non-authoritative
message are marked as not authoritative.
"""
buf = BytesIO()
answer = dns.RRHeader(payload=dns.Record_A('172.16.31.10', ttl=0))
answer.encode(buf)
message = dns.Message()
message.fromStr(
b'\x01\x00' # Message ID
# answer bit, opCode nibble, auth bit, trunc bit, recursive bit
b'\x00'
# recursion bit, empty bit, authenticData bit,
# checkingDisabled bit, response code nibble
b'\x00'
b'\x00\x00' # number of queries
b'\x00\x01' # number of answers
b'\x00\x00' # number of authorities
b'\x00\x00' # number of additionals
+ buf.getvalue()
)
self.assertEqual(message.answers, [answer])
self.assertFalse(message.answers[0].auth)
def test_authoritativeMessage(self):
"""
The L{RRHeader} instances created by L{Message} from an authoritative
message are marked as authoritative.
"""
buf = BytesIO()
answer = dns.RRHeader(payload=dns.Record_A('172.16.31.10', ttl=0))
answer.encode(buf)
message = dns.Message()
message.fromStr(
b'\x01\x00' # Message ID
# answer bit, opCode nibble, auth bit, trunc bit, recursive bit
b'\x04'
# recursion bit, empty bit, authenticData bit,
# checkingDisabled bit, response code nibble
b'\x00'
b'\x00\x00' # number of queries
b'\x00\x01' # number of answers
b'\x00\x00' # number of authorities
b'\x00\x00' # number of additionals
+ buf.getvalue()
)
answer.auth = True
self.assertEqual(message.answers, [answer])
self.assertTrue(message.answers[0].auth)
class MessageComparisonTests(ComparisonTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for the rich comparison of L{dns.Message} instances.
"""
def messageFactory(self, *args, **kwargs):
"""
Create a L{dns.Message}.
The L{dns.Message} constructor doesn't accept C{queries}, C{answers},
C{authority}, C{additional} arguments, so we extract them from the
kwargs supplied to this factory function and assign them to the message.
@param args: Positional arguments.
@param kwargs: Keyword arguments.
@return: A L{dns.Message} instance.
"""
queries = kwargs.pop('queries', [])
answers = kwargs.pop('answers', [])
authority = kwargs.pop('authority', [])
additional = kwargs.pop('additional', [])
m = dns.Message(**kwargs)
if queries:
m.queries = queries
if answers:
m.answers = answers
if authority:
m.authority = authority
if additional:
m.additional = additional
return m
def test_id(self):
"""
Two L{dns.Message} instances compare equal if they have the same id
value.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(id=10),
self.messageFactory(id=10),
self.messageFactory(id=20),
)
def test_answer(self):
"""
Two L{dns.Message} instances compare equal if they have the same answer
flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(answer=1),
self.messageFactory(answer=1),
self.messageFactory(answer=0),
)
def test_opCode(self):
"""
Two L{dns.Message} instances compare equal if they have the same opCode
value.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(opCode=10),
self.messageFactory(opCode=10),
self.messageFactory(opCode=20),
)
def test_recDes(self):
"""
Two L{dns.Message} instances compare equal if they have the same recDes
flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(recDes=1),
self.messageFactory(recDes=1),
self.messageFactory(recDes=0),
)
def test_recAv(self):
"""
Two L{dns.Message} instances compare equal if they have the same recAv
flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(recAv=1),
self.messageFactory(recAv=1),
self.messageFactory(recAv=0),
)
def test_auth(self):
"""
Two L{dns.Message} instances compare equal if they have the same auth
flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(auth=1),
self.messageFactory(auth=1),
self.messageFactory(auth=0),
)
def test_rCode(self):
"""
Two L{dns.Message} instances compare equal if they have the same rCode
value.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(rCode=10),
self.messageFactory(rCode=10),
self.messageFactory(rCode=20),
)
def test_trunc(self):
"""
Two L{dns.Message} instances compare equal if they have the same trunc
flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(trunc=1),
self.messageFactory(trunc=1),
self.messageFactory(trunc=0),
)
def test_maxSize(self):
"""
Two L{dns.Message} instances compare equal if they have the same
maxSize value.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(maxSize=10),
self.messageFactory(maxSize=10),
self.messageFactory(maxSize=20),
)
def test_authenticData(self):
"""
Two L{dns.Message} instances compare equal if they have the same
authenticData flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(authenticData=1),
self.messageFactory(authenticData=1),
self.messageFactory(authenticData=0),
)
def test_checkingDisabled(self):
"""
Two L{dns.Message} instances compare equal if they have the same
checkingDisabled flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(checkingDisabled=1),
self.messageFactory(checkingDisabled=1),
self.messageFactory(checkingDisabled=0),
)
def test_queries(self):
"""
Two L{dns.Message} instances compare equal if they have the same
queries.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(queries=[dns.Query(b'example.com')]),
self.messageFactory(queries=[dns.Query(b'example.com')]),
self.messageFactory(queries=[dns.Query(b'example.org')]),
)
def test_answers(self):
"""
Two L{dns.Message} instances compare equal if they have the same
answers.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(answers=[dns.RRHeader(
b'example.com', payload=dns.Record_A('172.16.31.10'))]),
self.messageFactory(answers=[dns.RRHeader(
b'example.com', payload=dns.Record_A('172.16.31.10'))]),
self.messageFactory(answers=[dns.RRHeader(
b'example.org', payload=dns.Record_A('172.16.58.3'))]),
)
def test_authority(self):
"""
Two L{dns.Message} instances compare equal if they have the same
authority records.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(authority=[dns.RRHeader(
b'example.com',
type=dns.SOA, payload=dns.Record_SOA())]),
self.messageFactory(authority=[dns.RRHeader(
b'example.com',
type=dns.SOA, payload=dns.Record_SOA())]),
self.messageFactory(authority=[dns.RRHeader(
b'example.org',
type=dns.SOA, payload=dns.Record_SOA())]),
)
def test_additional(self):
"""
Two L{dns.Message} instances compare equal if they have the same
additional records.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(additional=[dns.RRHeader(
b'example.com', payload=dns.Record_A('172.16.31.10'))]),
self.messageFactory(additional=[dns.RRHeader(
b'example.com', payload=dns.Record_A('172.16.31.10'))]),
self.messageFactory(additional=[dns.RRHeader(
b'example.org', payload=dns.Record_A('172.16.31.10'))]),
)
class TestController(object):
"""
Pretend to be a DNS query processor for a DNSDatagramProtocol.
@ivar messages: the list of received messages.
@type messages: C{list} of (msg, protocol, address)
"""
def __init__(self):
"""
Initialize the controller: create a list of messages.
"""
self.messages = []
def messageReceived(self, msg, proto, addr=None):
"""
Save the message so that it can be checked during the tests.
"""
self.messages.append((msg, proto, addr))
class DatagramProtocolTestCase(unittest.TestCase):
"""
Test various aspects of L{dns.DNSDatagramProtocol}.
"""
def setUp(self):
"""
Create a L{dns.DNSDatagramProtocol} with a deterministic clock.
"""
self.clock = task.Clock()
self.controller = TestController()
self.proto = dns.DNSDatagramProtocol(self.controller)
transport = proto_helpers.FakeDatagramTransport()
self.proto.makeConnection(transport)
self.proto.callLater = self.clock.callLater
def test_truncatedPacket(self):
"""
Test that when a short datagram is received, datagramReceived does
not raise an exception while processing it.
"""
self.proto.datagramReceived(
b'', address.IPv4Address('UDP', '127.0.0.1', 12345))
self.assertEqual(self.controller.messages, [])
def test_simpleQuery(self):
"""
Test content received after a query.
"""
d = self.proto.query(('127.0.0.1', 21345), [dns.Query(b'foo')])
self.assertEqual(len(self.proto.liveMessages.keys()), 1)
m = dns.Message()
m.id = next(iter(self.proto.liveMessages.keys()))
m.answers = [dns.RRHeader(payload=dns.Record_A(address='172.16.31.10'))]
def cb(result):
self.assertEqual(result.answers[0].payload.dottedQuad(), '172.16.31.10')
d.addCallback(cb)
self.proto.datagramReceived(m.toStr(), ('127.0.0.1', 21345))
return d
def test_queryTimeout(self):
"""
Test that query timeouts after some seconds.
"""
d = self.proto.query(('127.0.0.1', 21345), [dns.Query(b'foo')])
self.assertEqual(len(self.proto.liveMessages), 1)
self.clock.advance(10)
self.assertFailure(d, dns.DNSQueryTimeoutError)
self.assertEqual(len(self.proto.liveMessages), 0)
return d
def test_writeError(self):
"""
Exceptions raised by the transport's write method should be turned into
C{Failure}s passed to errbacks of the C{Deferred} returned by
L{DNSDatagramProtocol.query}.
"""
def writeError(message, addr):
raise RuntimeError("bar")
self.proto.transport.write = writeError
d = self.proto.query(('127.0.0.1', 21345), [dns.Query(b'foo')])
return self.assertFailure(d, RuntimeError)
def test_listenError(self):
"""
Exception L{CannotListenError} raised by C{listenUDP} should be turned
into a C{Failure} passed to errback of the C{Deferred} returned by
L{DNSDatagramProtocol.query}.
"""
def startListeningError():
raise CannotListenError(None, None, None)
self.proto.startListening = startListeningError
# Clean up transport so that the protocol calls startListening again
self.proto.transport = None
d = self.proto.query(('127.0.0.1', 21345), [dns.Query(b'foo')])
return self.assertFailure(d, CannotListenError)
def test_receiveMessageNotInLiveMessages(self):
"""
When receiving a message whose id is not in
L{DNSDatagramProtocol.liveMessages} or L{DNSDatagramProtocol.resends},
the message will be received by L{DNSDatagramProtocol.controller}.
"""
message = dns.Message()
message.id = 1
message.answers = [dns.RRHeader(
payload=dns.Record_A(address='172.16.31.10'))]
self.proto.datagramReceived(message.toStr(), ('127.0.0.1', 21345))
self.assertEqual(self.controller.messages[-1][0].toStr(),
message.toStr())
class TestTCPController(TestController):
"""
Pretend to be a DNS query processor for a DNSProtocol.
@ivar connections: A list of L{DNSProtocol} instances which have
notified this controller that they are connected and have not
yet notified it that their connection has been lost.
"""
def __init__(self):
TestController.__init__(self)
self.connections = []
def connectionMade(self, proto):
self.connections.append(proto)
def connectionLost(self, proto):
self.connections.remove(proto)
class DNSProtocolTestCase(unittest.TestCase):
"""
Test various aspects of L{dns.DNSProtocol}.
"""
def setUp(self):
"""
Create a L{dns.DNSProtocol} with a deterministic clock.
"""
self.clock = task.Clock()
self.controller = TestTCPController()
self.proto = dns.DNSProtocol(self.controller)
self.proto.makeConnection(proto_helpers.StringTransport())
self.proto.callLater = self.clock.callLater
def test_connectionTracking(self):
"""
L{dns.DNSProtocol} calls its controller's C{connectionMade}
method with itself when it is connected to a transport and its
controller's C{connectionLost} method when it is disconnected.
"""
self.assertEqual(self.controller.connections, [self.proto])
self.proto.connectionLost(
Failure(ConnectionDone("Fake Connection Done")))
self.assertEqual(self.controller.connections, [])
def test_queryTimeout(self):
"""
Test that query timeouts after some seconds.
"""
d = self.proto.query([dns.Query(b'foo')])
self.assertEqual(len(self.proto.liveMessages), 1)
self.clock.advance(60)
self.assertFailure(d, dns.DNSQueryTimeoutError)
self.assertEqual(len(self.proto.liveMessages), 0)
return d
def test_simpleQuery(self):
"""
Test content received after a query.
"""
d = self.proto.query([dns.Query(b'foo')])
self.assertEqual(len(self.proto.liveMessages.keys()), 1)
m = dns.Message()
m.id = next(iter(self.proto.liveMessages.keys()))
m.answers = [dns.RRHeader(payload=dns.Record_A(address='172.16.31.10'))]
def cb(result):
self.assertEqual(result.answers[0].payload.dottedQuad(), '1.2.3.4')
d.addCallback(cb)
s = m.toStr()
s = struct.pack('!H', len(s)) + s
self.proto.dataReceived(s)
return d
def test_writeError(self):
"""
Exceptions raised by the transport's write method should be turned into
C{Failure}s passed to errbacks of the C{Deferred} returned by
L{DNSProtocol.query}.
"""
def writeError(message):
raise RuntimeError("bar")
self.proto.transport.write = writeError
d = self.proto.query([dns.Query(b'foo')])
return self.assertFailure(d, RuntimeError)
def test_receiveMessageNotInLiveMessages(self):
"""
When receiving a message whose id is not in L{DNSProtocol.liveMessages}
the message will be received by L{DNSProtocol.controller}.
"""
message = dns.Message()
message.id = 1
message.answers = [dns.RRHeader(
payload=dns.Record_A(address='172.16.31.10'))]
string = message.toStr()
string = struct.pack('!H', len(string)) + string
self.proto.dataReceived(string)
self.assertEqual(self.controller.messages[-1][0].toStr(),
message.toStr())
class ReprTests(unittest.TestCase):
"""
Tests for the C{__repr__} implementation of record classes.
"""
def test_ns(self):
"""
The repr of a L{dns.Record_NS} instance includes the name of the
nameserver and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_NS(b'example.com', 4321)),
"<NS name=example.com ttl=4321>")
def test_md(self):
"""
The repr of a L{dns.Record_MD} instance includes the name of the
mail destination and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MD(b'example.com', 4321)),
"<MD name=example.com ttl=4321>")
def test_mf(self):
"""
The repr of a L{dns.Record_MF} instance includes the name of the
mail forwarder and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MF(b'example.com', 4321)),
"<MF name=example.com ttl=4321>")
def test_cname(self):
"""
The repr of a L{dns.Record_CNAME} instance includes the name of the
mail forwarder and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_CNAME(b'example.com', 4321)),
"<CNAME name=example.com ttl=4321>")
def test_mb(self):
"""
The repr of a L{dns.Record_MB} instance includes the name of the
mailbox and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MB(b'example.com', 4321)),
"<MB name=example.com ttl=4321>")
def test_mg(self):
"""
The repr of a L{dns.Record_MG} instance includes the name of the
mail group member and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MG(b'example.com', 4321)),
"<MG name=example.com ttl=4321>")
def test_mr(self):
"""
The repr of a L{dns.Record_MR} instance includes the name of the
mail rename domain and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MR(b'example.com', 4321)),
"<MR name=example.com ttl=4321>")
def test_ptr(self):
"""
The repr of a L{dns.Record_PTR} instance includes the name of the
pointer and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_PTR(b'example.com', 4321)),
"<PTR name=example.com ttl=4321>")
def test_dname(self):
"""
The repr of a L{dns.Record_DNAME} instance includes the name of the
non-terminal DNS name redirection and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_DNAME(b'example.com', 4321)),
"<DNAME name=example.com ttl=4321>")
def test_a(self):
"""
The repr of a L{dns.Record_A} instance includes the dotted-quad
string representation of the address it is for and the TTL of the
record.
"""
self.assertEqual(
repr(dns.Record_A('172.16.31.10', 567)),
'<A address=1.2.3.4 ttl=567>')
def test_soa(self):
"""
The repr of a L{dns.Record_SOA} instance includes all of the
authority fields.
"""
self.assertEqual(
repr(dns.Record_SOA(mname=b'mName', rname=b'rName', serial=123,
refresh=456, retry=789, expire=10,
minimum=11, ttl=12)),
"<SOA mname=mName rname=rName serial=123 refresh=456 "
"retry=789 expire=10 minimum=11 ttl=12>")
def test_null(self):
"""
The repr of a L{dns.Record_NULL} instance includes the repr of its
payload and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_NULL(b'abcd', 123)),
"<NULL payload='abcd' ttl=123>")
def test_wks(self):
"""
The repr of a L{dns.Record_WKS} instance includes the dotted-quad
string representation of the address it is for, the IP protocol
number it is for, and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_WKS('192.168.3.11', 7, ttl=8)),
"<WKS address=2.3.4.5 protocol=7 ttl=8>")
def test_aaaa(self):
"""
The repr of a L{dns.Record_AAAA} instance includes the colon-separated
hex string representation of the address it is for and the TTL of the
record.
"""
self.assertEqual(
repr(dns.Record_AAAA('fdf8:f53e:61e4::18', ttl=10)),
"<AAAA address=fdf8:f53e:61e4::18 ttl=10>")
def test_a6(self):
"""
The repr of a L{dns.Record_A6} instance includes the colon-separated
hex string representation of the address it is for and the TTL of the
record.
"""
self.assertEqual(
repr(dns.Record_A6(0, 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b', b'foo.bar', ttl=10)),
"<A6 suffix=fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b prefix=foo.bar ttl=10>")
def test_srv(self):
"""
The repr of a L{dns.Record_SRV} instance includes the name and port of
the target and the priority, weight, and TTL of the record.
"""
self.assertEqual(
repr(dns.Record_SRV(1, 2, 3, b'example.org', 4)),
"<SRV priority=1 weight=2 target=example.org port=3 ttl=4>")
def test_naptr(self):
"""
The repr of a L{dns.Record_NAPTR} instance includes the order,
preference, flags, service, regular expression, replacement, and TTL of
the record.
"""
record = dns.Record_NAPTR(
5, 9, b"S", b"http", b"/foo/bar/i", b"baz", 3)
self.assertEqual(
repr(record),
"<NAPTR order=5 preference=9 flags=S service=http "
"regexp=/foo/bar/i replacement=baz ttl=3>")
def test_afsdb(self):
"""
The repr of a L{dns.Record_AFSDB} instance includes the subtype,
hostname, and TTL of the record.
"""
self.assertEqual(
repr(dns.Record_AFSDB(3, b'example.org', 5)),
"<AFSDB subtype=3 hostname=example.org ttl=5>")
def test_rp(self):
"""
The repr of a L{dns.Record_RP} instance includes the mbox, txt, and TTL
fields of the record.
"""
self.assertEqual(
repr(dns.Record_RP(b'alice.example.com', b'admin.example.com', 3)),
"<RP mbox=alice.example.com txt=admin.example.com ttl=3>")
def test_hinfo(self):
"""
The repr of a L{dns.Record_HINFO} instance includes the cpu, os, and
TTL fields of the record.
"""
self.assertEqual(
repr(dns.Record_HINFO(b'sparc', b'minix', 12)),
"<HINFO cpu='sparc' os='minix' ttl=12>")
def test_minfo(self):
"""
The repr of a L{dns.Record_MINFO} instance includes the rmailbx,
emailbx, and TTL fields of the record.
"""
record = dns.Record_MINFO(
b'alice.example.com', b'bob.example.com', 15)
self.assertEqual(
repr(record),
"<MINFO responsibility=alice.example.com "
"errors=bob.example.com ttl=15>")
def test_mx(self):
"""
The repr of a L{dns.Record_MX} instance includes the preference, name,
and TTL fields of the record.
"""
self.assertEqual(
repr(dns.Record_MX(13, b'mx.example.com', 2)),
"<MX preference=13 name=mx.example.com ttl=2>")
def test_txt(self):
"""
The repr of a L{dns.Record_TXT} instance includes the data and ttl
fields of the record.
"""
self.assertEqual(
repr(dns.Record_TXT(b"foo", b"bar", ttl=15)),
"<TXT data=['foo', 'bar'] ttl=15>")
def test_spf(self):
"""
The repr of a L{dns.Record_SPF} instance includes the data and ttl
fields of the record.
"""
self.assertEqual(
repr(dns.Record_SPF(b"foo", b"bar", ttl=15)),
"<SPF data=['foo', 'bar'] ttl=15>")
def test_unknown(self):
"""
The repr of a L{dns.UnknownRecord} instance includes the data and ttl
fields of the record.
"""
self.assertEqual(
repr(dns.UnknownRecord(b"foo\x1fbar", 12)),
"<UNKNOWN data='foo\\x1fbar' ttl=12>")
class EqualityTests(ComparisonTestsMixin, unittest.TestCase):
"""
Tests for the equality and non-equality behavior of record classes.
"""
def _equalityTest(self, firstValueOne, secondValueOne, valueTwo):
return self.assertNormalEqualityImplementation(
firstValueOne, secondValueOne, valueTwo)
def test_charstr(self):
"""
Two L{dns.Charstr} instances compare equal if and only if they have the
same string value.
"""
self._equalityTest(
dns.Charstr(b'abc'), dns.Charstr(b'abc'), dns.Charstr(b'def'))
def test_name(self):
"""
Two L{dns.Name} instances compare equal if and only if they have the
same name value.
"""
self._equalityTest(
dns.Name(b'abc'), dns.Name(b'abc'), dns.Name(b'def'))
def _simpleEqualityTest(self, cls):
"""
Assert that instances of C{cls} with the same attributes compare equal
to each other and instances with different attributes compare as not
equal.
@param cls: A L{dns.SimpleRecord} subclass.
"""
# Vary the TTL
self._equalityTest(
cls(b'example.com', 123),
cls(b'example.com', 123),
cls(b'example.com', 321))
# Vary the name
self._equalityTest(
cls(b'example.com', 123),
cls(b'example.com', 123),
cls(b'example.org', 123))
def test_rrheader(self):
"""
Two L{dns.RRHeader} instances compare equal if and only if they have
the same name, type, class, time to live, payload, and authoritative
bit.
"""
# Vary the name
self._equalityTest(
dns.RRHeader(b'example.com', payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.com', payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.org', payload=dns.Record_A('172.16.31.10')))
# Vary the payload
self._equalityTest(
dns.RRHeader(b'example.com', payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.com', payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.com', payload=dns.Record_A('192.168.127.12')))
# Vary the type. Leave the payload as None so that we don't have to
# provide non-equal values.
self._equalityTest(
dns.RRHeader(b'example.com', dns.A),
dns.RRHeader(b'example.com', dns.A),
dns.RRHeader(b'example.com', dns.MX))
# Probably not likely to come up. Most people use the internet.
self._equalityTest(
dns.RRHeader(b'example.com', cls=dns.IN, payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.com', cls=dns.IN, payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.com', cls=dns.CS, payload=dns.Record_A('172.16.31.10')))
# Vary the ttl
self._equalityTest(
dns.RRHeader(b'example.com', ttl=60, payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.com', ttl=60, payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.com', ttl=120, payload=dns.Record_A('172.16.31.10')))
# Vary the auth bit
self._equalityTest(
dns.RRHeader(b'example.com', auth=1, payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.com', auth=1, payload=dns.Record_A('172.16.31.10')),
dns.RRHeader(b'example.com', auth=0, payload=dns.Record_A('172.16.31.10')))
def test_ns(self):
"""
Two L{dns.Record_NS} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_NS)
def test_md(self):
"""
Two L{dns.Record_MD} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MD)
def test_mf(self):
"""
Two L{dns.Record_MF} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MF)
def test_cname(self):
"""
Two L{dns.Record_CNAME} instances compare equal if and only if they
have the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_CNAME)
def test_mb(self):
"""
Two L{dns.Record_MB} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MB)
def test_mg(self):
"""
Two L{dns.Record_MG} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MG)
def test_mr(self):
"""
Two L{dns.Record_MR} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MR)
def test_ptr(self):
"""
Two L{dns.Record_PTR} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_PTR)
def test_dname(self):
"""
Two L{dns.Record_MD} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_DNAME)
def test_a(self):
"""
Two L{dns.Record_A} instances compare equal if and only if they have
the same address and TTL.
"""
# Vary the TTL
self._equalityTest(
dns.Record_A('172.16.31.10', 5),
dns.Record_A('172.16.31.10', 5),
dns.Record_A('172.16.31.10', 6))
# Vary the address
self._equalityTest(
dns.Record_A('172.16.31.10', 5),
dns.Record_A('172.16.31.10', 5),
dns.Record_A('192.168.127.12', 5))
def test_soa(self):
"""
Two L{dns.Record_SOA} instances compare equal if and only if they have
the same mname, rname, serial, refresh, minimum, expire, retry, and
ttl.
"""
# Vary the mname
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'xname', b'rname', 123, 456, 789, 10, 20, 30))
# Vary the rname
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'xname', 123, 456, 789, 10, 20, 30))
# Vary the serial
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 1, 456, 789, 10, 20, 30))
# Vary the refresh
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 1, 789, 10, 20, 30))
# Vary the minimum
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 1, 10, 20, 30))
# Vary the expire
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 1, 20, 30))
# Vary the retry
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 1, 30))
# Vary the ttl
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'xname', 123, 456, 789, 10, 20, 1))
def test_null(self):
"""
Two L{dns.Record_NULL} instances compare equal if and only if they have
the same payload and ttl.
"""
# Vary the payload
self._equalityTest(
dns.Record_NULL('foo bar', 10),
dns.Record_NULL('foo bar', 10),
dns.Record_NULL('bar foo', 10))
# Vary the ttl
self._equalityTest(
dns.Record_NULL('foo bar', 10),
dns.Record_NULL('foo bar', 10),
dns.Record_NULL('foo bar', 100))
def test_wks(self):
"""
Two L{dns.Record_WKS} instances compare equal if and only if they have
the same address, protocol, map, and ttl.
"""
# Vary the address
self._equalityTest(
dns.Record_WKS('172.16.31.10', 1, 'foo', 2),
dns.Record_WKS('172.16.31.10', 1, 'foo', 2),
dns.Record_WKS('172.16.58.3', 1, 'foo', 2))
# Vary the protocol
self._equalityTest(
dns.Record_WKS('172.16.31.10', 1, 'foo', 2),
dns.Record_WKS('172.16.31.10', 1, 'foo', 2),
dns.Record_WKS('172.16.31.10', 100, 'foo', 2))
# Vary the map
self._equalityTest(
dns.Record_WKS('172.16.31.10', 1, 'foo', 2),
dns.Record_WKS('172.16.31.10', 1, 'foo', 2),
dns.Record_WKS('172.16.31.10', 1, 'bar', 2))
# Vary the ttl
self._equalityTest(
dns.Record_WKS('172.16.31.10', 1, 'foo', 2),
dns.Record_WKS('172.16.31.10', 1, 'foo', 2),
dns.Record_WKS('172.16.31.10', 1, 'foo', 200))
def test_aaaa(self):
"""
Two L{dns.Record_AAAA} instances compare equal if and only if they have
the same address and ttl.
"""
# Vary the address
self._equalityTest(
dns.Record_AAAA('fc00:db20:35b:7399::5', 1),
dns.Record_AAAA('fc00:db20:35b:7399::5', 1),
dns.Record_AAAA('fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b', 1))
# Vary the ttl
self._equalityTest(
dns.Record_AAAA('fc00:db20:35b:7399::5', 1),
dns.Record_AAAA('fc00:db20:35b:7399::5', 1),
dns.Record_AAAA('fc00:db20:35b:7399::5', 10))
def test_a6(self):
"""
Two L{dns.Record_A6} instances compare equal if and only if they have
the same prefix, prefix length, suffix, and ttl.
"""
# Note, A6 is crazy, I'm not sure these values are actually legal.
# Hopefully that doesn't matter for this test. -exarkun
# Vary the prefix length
self._equalityTest(
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(32, '::abcd', b'example.com', 10))
# Vary the suffix
self._equalityTest(
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd:0', b'example.com', 10))
# Vary the prefix
self._equalityTest(
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd', b'example.org', 10))
# Vary the ttl
self._equalityTest(
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd', b'example.com', 100))
def test_srv(self):
"""
Two L{dns.Record_SRV} instances compare equal if and only if they have
the same priority, weight, port, target, and ttl.
"""
# Vary the priority
self._equalityTest(
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(100, 20, 30, b'example.com', 40))
# Vary the weight
self._equalityTest(
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 200, 30, b'example.com', 40))
# Vary the port
self._equalityTest(
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 300, b'example.com', 40))
# Vary the target
self._equalityTest(
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.org', 40))
# Vary the ttl
self._equalityTest(
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.com', 400))
def test_naptr(self):
"""
Two L{dns.Record_NAPTR} instances compare equal if and only if they
have the same order, preference, flags, service, regexp, replacement,
and ttl.
"""
# Vary the order
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(2, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12))
# Vary the preference
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 3, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12))
# Vary the flags
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"p", b"sip+E2U", b"/foo/bar/", b"baz", 12))
# Vary the service
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"http", b"/foo/bar/", b"baz", 12))
# Vary the regexp
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/bar/foo/", b"baz", 12))
# Vary the replacement
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/bar/foo/", b"quux", 12))
# Vary the ttl
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/bar/foo/", b"baz", 5))
def test_afsdb(self):
"""
Two L{dns.Record_AFSDB} instances compare equal if and only if they
have the same subtype, hostname, and ttl.
"""
# Vary the subtype
self._equalityTest(
dns.Record_AFSDB(1, b'example.com', 2),
dns.Record_AFSDB(1, b'example.com', 2),
dns.Record_AFSDB(2, b'example.com', 2))
# Vary the hostname
self._equalityTest(
dns.Record_AFSDB(1, b'example.com', 2),
dns.Record_AFSDB(1, b'example.com', 2),
dns.Record_AFSDB(1, b'example.org', 2))
# Vary the ttl
self._equalityTest(
dns.Record_AFSDB(1, b'example.com', 2),
dns.Record_AFSDB(1, b'example.com', 2),
dns.Record_AFSDB(1, b'example.com', 3))
def test_rp(self):
"""
Two L{Record_RP} instances compare equal if and only if they have the
same mbox, txt, and ttl.
"""
# Vary the mbox
self._equalityTest(
dns.Record_RP(b'alice.example.com', b'alice is nice', 10),
dns.Record_RP(b'alice.example.com', b'alice is nice', 10),
dns.Record_RP(b'bob.example.com', b'alice is nice', 10))
# Vary the txt
self._equalityTest(
dns.Record_RP(b'alice.example.com', b'alice is nice', 10),
dns.Record_RP(b'alice.example.com', b'alice is nice', 10),
dns.Record_RP(b'alice.example.com', b'alice is not nice', 10))
# Vary the ttl
self._equalityTest(
dns.Record_RP(b'alice.example.com', b'alice is nice', 10),
dns.Record_RP(b'alice.example.com', b'alice is nice', 10),
dns.Record_RP(b'alice.example.com', b'alice is nice', 100))
def test_hinfo(self):
"""
Two L{dns.Record_HINFO} instances compare equal if and only if they
have the same cpu, os, and ttl.
"""
# Vary the cpu
self._equalityTest(
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('i386', 'plan9', 10))
# Vary the os
self._equalityTest(
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan11', 10))
# Vary the ttl
self._equalityTest(
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan9', 100))
def test_minfo(self):
"""
Two L{dns.Record_MINFO} instances compare equal if and only if they
have the same rmailbx, emailbx, and ttl.
"""
# Vary the rmailbx
self._equalityTest(
dns.Record_MINFO(b'rmailbox', b'emailbox', 10),
dns.Record_MINFO(b'rmailbox', b'emailbox', 10),
dns.Record_MINFO(b'someplace', b'emailbox', 10))
# Vary the emailbx
self._equalityTest(
dns.Record_MINFO(b'rmailbox', b'emailbox', 10),
dns.Record_MINFO(b'rmailbox', b'emailbox', 10),
dns.Record_MINFO(b'rmailbox', b'something', 10))
# Vary the ttl
self._equalityTest(
dns.Record_MINFO(b'rmailbox', b'emailbox', 10),
dns.Record_MINFO(b'rmailbox', b'emailbox', 10),
dns.Record_MINFO(b'rmailbox', b'emailbox', 100))
def test_mx(self):
"""
Two L{dns.Record_MX} instances compare equal if and only if they have
the same preference, name, and ttl.
"""
# Vary the preference
self._equalityTest(
dns.Record_MX(10, b'example.org', 20),
dns.Record_MX(10, b'example.org', 20),
dns.Record_MX(100, b'example.org', 20))
# Vary the name
self._equalityTest(
dns.Record_MX(10, b'example.org', 20),
dns.Record_MX(10, b'example.org', 20),
dns.Record_MX(10, b'example.net', 20))
# Vary the ttl
self._equalityTest(
dns.Record_MX(10, b'example.org', 20),
dns.Record_MX(10, b'example.org', 20),
dns.Record_MX(10, b'example.org', 200))
def test_txt(self):
"""
Two L{dns.Record_TXT} instances compare equal if and only if they have
the same data and ttl.
"""
# Vary the length of the data
self._equalityTest(
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', 'baz', ttl=10))
# Vary the value of the data
self._equalityTest(
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('bar', 'foo', ttl=10))
# Vary the ttl
self._equalityTest(
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', ttl=100))
def test_spf(self):
"""
L{dns.Record_SPF} instances compare equal if and only if they have the
same data and ttl.
"""
# Vary the length of the data
self._equalityTest(
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', 'baz', ttl=10))
# Vary the value of the data
self._equalityTest(
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('bar', 'foo', ttl=10))
# Vary the ttl
self._equalityTest(
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', ttl=100))
def test_unknown(self):
"""
L{dns.UnknownRecord} instances compare equal if and only if they have
the same data and ttl.
"""
# Vary the length of the data
self._equalityTest(
dns.UnknownRecord('foo', ttl=10),
dns.UnknownRecord('foo', ttl=10),
dns.UnknownRecord('foobar', ttl=10))
# Vary the value of the data
self._equalityTest(
dns.UnknownRecord('foo', ttl=10),
dns.UnknownRecord('foo', ttl=10),
dns.UnknownRecord('bar', ttl=10))
# Vary the ttl
self._equalityTest(
dns.UnknownRecord('foo', ttl=10),
dns.UnknownRecord('foo', ttl=10),
dns.UnknownRecord('foo', ttl=100))
class RRHeaderTests(unittest.TestCase):
"""
Tests for L{twisted.names.dns.RRHeader}.
"""
def test_negativeTTL(self):
"""
Attempting to create a L{dns.RRHeader} instance with a negative TTL
causes L{ValueError} to be raised.
"""
self.assertRaises(
ValueError, dns.RRHeader, "example.com", dns.A,
dns.IN, -1, dns.Record_A("127.0.0.1"))
class NameToLabelsTests(unittest.SynchronousTestCase):
"""
Tests for L{twisted.names.dns._nameToLabels}.
"""
def test_empty(self):
"""
L{dns._nameToLabels} returns a list containing a single
empty label for an empty name.
"""
self.assertEqual(dns._nameToLabels(b''), [b''])
def test_onlyDot(self):
"""
L{dns._nameToLabels} returns a list containing a single
empty label for a name containing only a dot.
"""
self.assertEqual(dns._nameToLabels(b'.'), [b''])
def test_withoutTrailingDot(self):
"""
L{dns._nameToLabels} returns a list ending with an empty
label for a name without a trailing dot.
"""
self.assertEqual(dns._nameToLabels(b'com'), [b'com', b''])
def test_withTrailingDot(self):
"""
L{dns._nameToLabels} returns a list ending with an empty
label for a name with a trailing dot.
"""
self.assertEqual(dns._nameToLabels(b'com.'), [b'com', b''])
def test_subdomain(self):
"""
L{dns._nameToLabels} returns a list containing entries
for all labels in a subdomain name.
"""
self.assertEqual(
dns._nameToLabels(b'foo.bar.baz.example.com.'),
[b'foo', b'bar', b'baz', b'example', b'com', b''])
def test_casePreservation(self):
"""
L{dns._nameToLabels} preserves the case of ascii
characters in labels.
"""
self.assertEqual(
dns._nameToLabels(b'EXAMPLE.COM'),
[b'EXAMPLE', b'COM', b''])
def assertIsSubdomainOf(testCase, descendant, ancestor):
"""
Assert that C{descendant} *is* a subdomain of C{ancestor}.
@type testCase: L{unittest.SynchronousTestCase}
@param testCase: The test case on which to run the assertions.
@type descendant: C{str}
@param descendant: The subdomain name to test.
@type ancestor: C{str}
@param ancestor: The superdomain name to test.
"""
testCase.assertTrue(
dns._isSubdomainOf(descendant, ancestor),
'%r is not a subdomain of %r' % (descendant, ancestor))
def assertIsNotSubdomainOf(testCase, descendant, ancestor):
"""
Assert that C{descendant} *is not* a subdomain of C{ancestor}.
@type testCase: L{unittest.SynchronousTestCase}
@param testCase: The test case on which to run the assertions.
@type descendant: C{str}
@param descendant: The subdomain name to test.
@type ancestor: C{str}
@param ancestor: The superdomain name to test.
"""
testCase.assertFalse(
dns._isSubdomainOf(descendant, ancestor),
'%r is a subdomain of %r' % (descendant, ancestor))
class IsSubdomainOfTests(unittest.SynchronousTestCase):
"""
Tests for L{twisted.names.dns._isSubdomainOf}.
"""
def test_identical(self):
"""
L{dns._isSubdomainOf} returns C{True} for identical
domain names.
"""
assertIsSubdomainOf(self, b'example.com', b'example.com')
def test_parent(self):
"""
L{dns._isSubdomainOf} returns C{True} when the first
name is an immediate descendant of the second name.
"""
assertIsSubdomainOf(self, b'foo.example.com', b'example.com')
def test_distantAncestor(self):
"""
L{dns._isSubdomainOf} returns C{True} when the first
name is a distant descendant of the second name.
"""
assertIsSubdomainOf(self, b'foo.bar.baz.example.com', b'com')
def test_superdomain(self):
"""
L{dns._isSubdomainOf} returns C{False} when the first
name is an ancestor of the second name.
"""
assertIsNotSubdomainOf(self, b'example.com', b'foo.example.com')
def test_sibling(self):
"""
L{dns._isSubdomainOf} returns C{False} if the first name
is a sibling of the second name.
"""
assertIsNotSubdomainOf(self, b'foo.example.com', b'bar.example.com')
def test_unrelatedCommonSuffix(self):
"""
L{dns._isSubdomainOf} returns C{False} even when domain
names happen to share a common suffix.
"""
assertIsNotSubdomainOf(self, b'foo.myexample.com', b'example.com')
def test_subdomainWithTrailingDot(self):
"""
L{dns._isSubdomainOf} returns C{True} if the first name
is a subdomain of the second name but the first name has a
trailing ".".
"""
assertIsSubdomainOf(self, b'foo.example.com.', b'example.com')
def test_superdomainWithTrailingDot(self):
"""
L{dns._isSubdomainOf} returns C{True} if the first name
is a subdomain of the second name but the second name has a
trailing ".".
"""
assertIsSubdomainOf(self, b'foo.example.com', b'example.com.')
def test_bothWithTrailingDot(self):
"""
L{dns._isSubdomainOf} returns C{True} if the first name
is a subdomain of the second name and both names have a
trailing ".".
"""
assertIsSubdomainOf(self, b'foo.example.com.', b'example.com.')
def test_emptySubdomain(self):
"""
L{dns._isSubdomainOf} returns C{False} if the first name
is empty and the second name is not.
"""
assertIsNotSubdomainOf(self, b'', b'example.com')
def test_emptySuperdomain(self):
"""
L{dns._isSubdomainOf} returns C{True} if the second name
is empty and the first name is not.
"""
assertIsSubdomainOf(self, b'foo.example.com', b'')
def test_caseInsensitiveComparison(self):
"""
L{dns._isSubdomainOf} does case-insensitive comparison
of name labels.
"""
assertIsSubdomainOf(self, b'foo.example.com', b'EXAMPLE.COM')
assertIsSubdomainOf(self, b'FOO.EXAMPLE.COM', b'example.com')
class OPTNonStandardAttributes(object):
"""
Generate byte and instance representations of an L{dns._OPTHeader}
where all attributes are set to non-default values.
For testing whether attributes have really been read from the byte
string during decoding.
"""
@classmethod
def bytes(cls, excludeName=False, excludeOptions=False):
"""
Return L{bytes} representing an encoded OPT record.
@param excludeName: A flag that controls whether to exclude
the name field. This allows a non-standard name to be
prepended during the test.
@type excludeName: L{bool}
@param excludeOptions: A flag that controls whether to exclude
the RDLEN field. This allows encoded variable options to be
appended during the test.
@type excludeOptions: L{bool}
@return: L{bytes} representing the encoded OPT record returned
by L{object}.
"""
rdlen = b'\x00\x00' # RDLEN 0
if excludeOptions:
rdlen = b''
return (
b'\x00' # 0 root zone
b'\x00\x29' # type 41
b'\x02\x00' # udpPayloadsize 512
b'\x03' # extendedRCODE 3
b'\x04' # version 4
b'\x80\x00' # DNSSEC OK 1 + Z
) + rdlen
@classmethod
def object(cls):
"""
Return a new L{dns._OPTHeader} instance.
@return: A L{dns._OPTHeader} instance with attributes that
match the encoded record returned by L{bytes}.
"""
return dns._OPTHeader(
udpPayloadSize=512,
extendedRCODE=3,
version=4,
dnssecOK=True)
class OPTHeaderTests(ComparisonTestsMixin, unittest.TestCase):
"""
Tests for L{twisted.names.dns._OPTHeader}.
"""
def test_interface(self):
"""
L{dns._OPTHeader} implements L{dns.IEncodable}.
"""
verifyClass(dns.IEncodable, dns._OPTHeader)
def test_name(self):
"""
L{dns._OPTHeader.name} is a instance attribute whose value is
fixed as the root domain
"""
self.assertEqual(dns._OPTHeader().name, dns.Name(b''))
def test_nameReadonly(self):
"""
L{dns._OPTHeader.name} is readonly.
"""
h = dns._OPTHeader()
self.assertRaises(
AttributeError, setattr, h, 'name', dns.Name(b'example.com'))
def test_type(self):
"""
L{dns._OPTHeader.type} is an instance attribute with fixed value
41.
"""
self.assertEqual(dns._OPTHeader().type, 41)
def test_typeReadonly(self):
"""
L{dns._OPTHeader.type} is readonly.
"""
h = dns._OPTHeader()
self.assertRaises(
AttributeError, setattr, h, 'type', dns.A)
def test_udpPayloadSize(self):
"""
L{dns._OPTHeader.udpPayloadSize} defaults to 4096 as
recommended in rfc6891 section-6.2.5.
"""
self.assertEqual(dns._OPTHeader().udpPayloadSize, 4096)
def test_udpPayloadSizeOverride(self):
"""
L{dns._OPTHeader.udpPayloadSize} can be overridden in the
constructor.
"""
self.assertEqual(dns._OPTHeader(udpPayloadSize=512).udpPayloadSize, 512)
def test_extendedRCODE(self):
"""
L{dns._OPTHeader.extendedRCODE} defaults to 0.
"""
self.assertEqual(dns._OPTHeader().extendedRCODE, 0)
def test_extendedRCODEOverride(self):
"""
L{dns._OPTHeader.extendedRCODE} can be overridden in the
constructor.
"""
self.assertEqual(dns._OPTHeader(extendedRCODE=1).extendedRCODE, 1)
def test_version(self):
"""
L{dns._OPTHeader.version} defaults to 0.
"""
self.assertEqual(dns._OPTHeader().version, 0)
def test_versionOverride(self):
"""
L{dns._OPTHeader.version} can be overridden in the
constructor.
"""
self.assertEqual(dns._OPTHeader(version=1).version, 1)
def test_dnssecOK(self):
"""
L{dns._OPTHeader.dnssecOK} defaults to False.
"""
self.assertEqual(dns._OPTHeader().dnssecOK, False)
def test_dnssecOKOverride(self):
"""
L{dns._OPTHeader.dnssecOK} can be overridden in the
constructor.
"""
self.assertEqual(dns._OPTHeader(dnssecOK=True).dnssecOK, True)
def test_options(self):
"""
L{dns._OPTHeader.options} defaults to empty list.
"""
self.assertEqual(dns._OPTHeader().options, [])
def test_optionsOverride(self):
"""
L{dns._OPTHeader.options} can be overridden in the
constructor.
"""
h = dns._OPTHeader(options=[(1, 1, b'\x00')])
self.assertEqual(h.options, [(1, 1, b'\x00')])
def test_encode(self):
"""
L{dns._OPTHeader.encode} packs the header fields and writes
them to a file like object passed in as an argument.
"""
b = BytesIO()
OPTNonStandardAttributes.object().encode(b)
self.assertEqual(
b.getvalue(),
OPTNonStandardAttributes.bytes()
)
def test_encodeWithOptions(self):
"""
L{dns._OPTHeader.options} is a list of L{dns._OPTVariableOption}
instances which are packed into the rdata area of the header.
"""
h = OPTNonStandardAttributes.object()
h.options = [
dns._OPTVariableOption(1, b'foobarbaz'),
dns._OPTVariableOption(2, b'qux'),
]
b = BytesIO()
h.encode(b)
self.assertEqual(
b.getvalue(),
OPTNonStandardAttributes.bytes(excludeOptions=True) + (
b'\x00\x14' # RDLEN 20
b'\x00\x01' # OPTION-CODE
b'\x00\x09' # OPTION-LENGTH
b'foobarbaz' # OPTION-DATA
b'\x00\x02' # OPTION-CODE
b'\x00\x03' # OPTION-LENGTH
b'qux' # OPTION-DATA
))
def test_decode(self):
"""
L{dns._OPTHeader.decode} unpacks the header fields from a file
like object and populates the attributes of an existing
L{dns._OPTHeader} instance.
"""
decodedHeader = dns._OPTHeader()
decodedHeader.decode(BytesIO(OPTNonStandardAttributes.bytes()))
self.assertEqual(
decodedHeader,
OPTNonStandardAttributes.object())
def test_decodeAllExpectedBytes(self):
"""
L{dns._OPTHeader.decode} reads all the bytes of the record
that is being decoded.
"""
# Check that all the input data has been consumed.
b = BytesIO(OPTNonStandardAttributes.bytes())
decodedHeader = dns._OPTHeader()
decodedHeader.decode(b)
self.assertEqual(b.tell(), len(b.getvalue()))
def test_decodeOnlyExpectedBytes(self):
"""
L{dns._OPTHeader.decode} reads only the bytes from the current
file position to the end of the record that is being
decoded. Trailing bytes are not consumed.
"""
b = BytesIO(OPTNonStandardAttributes.bytes()
+ b'xxxx') # Trailing bytes
decodedHeader = dns._OPTHeader()
decodedHeader.decode(b)
self.assertEqual(b.tell(), len(b.getvalue())-len(b'xxxx'))
def test_decodeDiscardsName(self):
"""
L{dns._OPTHeader.decode} discards the name which is encoded in
the supplied bytes. The name attribute of the resulting
L{dns._OPTHeader} instance will always be L{dns.Name(b'')}.
"""
b = BytesIO(OPTNonStandardAttributes.bytes(excludeName=True)
+ b'\x07example\x03com\x00')
h = dns._OPTHeader()
h.decode(b)
self.assertEqual(h.name, dns.Name(b''))
def test_decodeRdlengthTooShort(self):
"""
L{dns._OPTHeader.decode} raises an exception if the supplied
RDLEN is too short.
"""
b = BytesIO(
OPTNonStandardAttributes.bytes(excludeOptions=True) + (
b'\x00\x05' # RDLEN 5 Too short - should be 6
b'\x00\x01' # OPTION-CODE
b'\x00\x02' # OPTION-LENGTH
b'\x00\x00' # OPTION-DATA
))
h = dns._OPTHeader()
self.assertRaises(EOFError, h.decode, b)
def test_decodeRdlengthTooLong(self):
"""
L{dns._OPTHeader.decode} raises an exception if the supplied
RDLEN is too long.
"""
b = BytesIO(
OPTNonStandardAttributes.bytes(excludeOptions=True) + (
b'\x00\x07' # RDLEN 7 Too long - should be 6
b'\x00\x01' # OPTION-CODE
b'\x00\x02' # OPTION-LENGTH
b'\x00\x00' # OPTION-DATA
))
h = dns._OPTHeader()
self.assertRaises(EOFError, h.decode, b)
def test_decodeWithOptions(self):
"""
If the OPT bytes contain variable options,
L{dns._OPTHeader.decode} will populate a list
L{dns._OPTHeader.options} with L{dns._OPTVariableOption}
instances.
"""
b = BytesIO(
OPTNonStandardAttributes.bytes(excludeOptions=True) + (
b'\x00\x14' # RDLEN 20
b'\x00\x01' # OPTION-CODE
b'\x00\x09' # OPTION-LENGTH
b'foobarbaz' # OPTION-DATA
b'\x00\x02' # OPTION-CODE
b'\x00\x03' # OPTION-LENGTH
b'qux' # OPTION-DATA
))
h = dns._OPTHeader()
h.decode(b)
self.assertEqual(
h.options,
[dns._OPTVariableOption(1, b'foobarbaz'),
dns._OPTVariableOption(2, b'qux'),]
)
def test_fromRRHeader(self):
"""
L{_OPTHeader.fromRRHeader} accepts an L{RRHeader} instance and
returns an L{_OPTHeader} instance whose attribute values have
been derived from the C{cls}, C{ttl} and C{payload} attributes
of the original header.
"""
genericHeader = dns.RRHeader(
b'example.com',
type=dns.OPT,
cls=0xffff,
ttl=(0xfe << 24
| 0xfd << 16
| True << 15),
payload=dns.UnknownRecord(b'\xff\xff\x00\x03abc'))
decodedOptHeader = dns._OPTHeader.fromRRHeader(genericHeader)
expectedOptHeader = dns._OPTHeader(
udpPayloadSize=0xffff,
extendedRCODE=0xfe,
version=0xfd,
dnssecOK=True,
options=[dns._OPTVariableOption(code=0xffff, data=b'abc')])
self.assertEqual(decodedOptHeader, expectedOptHeader)
def test_repr(self):
"""
L{dns._OPTHeader.__repr__} displays the name and type and all
the fixed and extended header values of the OPT record.
"""
self.assertEqual(
repr(dns._OPTHeader()),
'<_OPTHeader '
'name= '
'type=41 '
'udpPayloadSize=4096 '
'extendedRCODE=0 '
'version=0 '
'dnssecOK=False '
'options=[]>')
def test_equalityUdpPayloadSize(self):
"""
Two L{OPTHeader} instances compare equal if they have the same
udpPayloadSize.
"""
self.assertNormalEqualityImplementation(
dns._OPTHeader(udpPayloadSize=512),
dns._OPTHeader(udpPayloadSize=512),
dns._OPTHeader(udpPayloadSize=4096))
def test_equalityExtendedRCODE(self):
"""
Two L{OPTHeader} instances compare equal if they have the same
extendedRCODE.
"""
self.assertNormalEqualityImplementation(
dns._OPTHeader(extendedRCODE=1),
dns._OPTHeader(extendedRCODE=1),
dns._OPTHeader(extendedRCODE=2))
def test_equalityVersion(self):
"""
Two L{OPTHeader} instances compare equal if they have the same
version.
"""
self.assertNormalEqualityImplementation(
dns._OPTHeader(version=1),
dns._OPTHeader(version=1),
dns._OPTHeader(version=2))
def test_equalityDnssecOK(self):
"""
Two L{OPTHeader} instances compare equal if they have the same
dnssecOK flags.
"""
self.assertNormalEqualityImplementation(
dns._OPTHeader(dnssecOK=True),
dns._OPTHeader(dnssecOK=True),
dns._OPTHeader(dnssecOK=False))
def test_equalityOptions(self):
"""
Two L{OPTHeader} instances compare equal if they have the same
options.
"""
self.assertNormalEqualityImplementation(
dns._OPTHeader(options=[dns._OPTVariableOption(1, b'x')]),
dns._OPTHeader(options=[dns._OPTVariableOption(1, b'x')]),
dns._OPTHeader(options=[dns._OPTVariableOption(2, b'y')]))
class OPTVariableOptionTests(ComparisonTestsMixin, unittest.TestCase):
"""
Tests for L{dns._OPTVariableOption}.
"""
def test_interface(self):
"""
L{dns._OPTVariableOption} implements L{dns.IEncodable}.
"""
verifyClass(dns.IEncodable, dns._OPTVariableOption)
def test_constructorArguments(self):
"""
L{dns._OPTVariableOption.__init__} requires code and data
arguments which are saved as public instance attributes.
"""
h = dns._OPTVariableOption(1, b'x')
self.assertEqual(h.code, 1)
self.assertEqual(h.data, b'x')
def test_repr(self):
"""
L{dns._OPTVariableOption.__repr__} displays the code and data
of the option.
"""
self.assertEqual(
repr(dns._OPTVariableOption(1, b'x')),
'<_OPTVariableOption '
'code=1 '
"data=x"
'>')
def test_equality(self):
"""
Two OPTVariableOption instances compare equal if they have the same
code and data values.
"""
self.assertNormalEqualityImplementation(
dns._OPTVariableOption(1, b'x'),
dns._OPTVariableOption(1, b'x'),
dns._OPTVariableOption(2, b'x'))
self.assertNormalEqualityImplementation(
dns._OPTVariableOption(1, b'x'),
dns._OPTVariableOption(1, b'x'),
dns._OPTVariableOption(1, b'y'))
def test_encode(self):
"""
L{dns._OPTVariableOption.encode} encodes the code and data
instance attributes to a byte string which also includes the
data length.
"""
o = dns._OPTVariableOption(1, b'foobar')
b = BytesIO()
o.encode(b)
self.assertEqual(
b.getvalue(),
b'\x00\x01' # OPTION-CODE 1
b'\x00\x06' # OPTION-LENGTH 6
b'foobar' # OPTION-DATA
)
def test_decode(self):
"""
L{dns._OPTVariableOption.decode} is a classmethod that decodes
a byte string and returns a L{dns._OPTVariableOption} instance.
"""
b = BytesIO(
b'\x00\x01' # OPTION-CODE 1
b'\x00\x06' # OPTION-LENGTH 6
b'foobar' # OPTION-DATA
)
o = dns._OPTVariableOption()
o.decode(b)
self.assertEqual(o.code, 1)
self.assertEqual(o.data, b'foobar')
class RaisedArgs(Exception):
"""
An exception which can be raised by fakes to test that the fake is called
with expected arguments.
"""
def __init__(self, args, kwargs):
"""
Store the positional and keyword arguments as attributes.
@param args: The positional args.
@param kwargs: The keyword args.
"""
self.args = args
self.kwargs = kwargs
class MessageEmpty(object):
"""
Generate byte string and constructor arguments for an empty
L{dns._EDNSMessage}.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x01\x00' # id: 256
b'\x97' # QR: 1, OPCODE: 2, AA: 0, TC: 0, RD: 1
b'\x8f' # RA: 1, Z, RCODE: 15
b'\x00\x00' # number of queries
b'\x00\x00' # number of answers
b'\x00\x00' # number of authorities
b'\x00\x00' # number of additionals
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=256,
answer=True,
opCode=dns.OP_STATUS,
auth=True,
trunc=True,
recDes=True,
recAv=True,
rCode=15,
ednsVersion=None,
)
class MessageTruncated(object):
"""
An empty response message whose TR bit is set to 1.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x01\x00' # ID: 256
b'\x82' # QR: 1, OPCODE: 0, AA: 0, TC: 1, RD: 0
b'\x00' # RA: 0, Z, RCODE: 0
b'\x00\x00' # Number of queries
b'\x00\x00' # Number of answers
b'\x00\x00' # Number of authorities
b'\x00\x00' # Number of additionals
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=256,
answer=1,
opCode=0,
auth=0,
trunc=1,
recDes=0,
recAv=0,
rCode=0,
ednsVersion=None,)
class MessageNonAuthoritative(object):
"""
A minimal non-authoritative message.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x01\x00' # ID 256
b'\x00' # QR: 0, OPCODE: 0, AA: 0, TC: 0, RD: 0
b'\x00' # RA: 0, Z, RCODE: 0
b'\x00\x00' # Query count
b'\x00\x01' # Answer count
b'\x00\x00' # Authorities count
b'\x00\x00' # Additionals count
# Answer
b'\x00' # RR NAME (root)
b'\x00\x01' # RR TYPE 1 (A)
b'\x00\x01' # RR CLASS 1 (IN)
b'\x00\x00\x00\x00' # RR TTL
b'\x00\x04' # RDLENGTH 4
b'\x01\x02\x03\x04' # IPv4 172.16.31.10
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=256,
auth=0,
ednsVersion=None,
answers=[
dns.RRHeader(
b'',
payload=dns.Record_A('172.16.31.10', ttl=0),
auth=False)])
class MessageAuthoritative(object):
"""
A minimal authoritative message.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x01\x00' # ID: 256
b'\x04' # QR: 0, OPCODE: 0, AA: 1, TC: 0, RD: 0
b'\x00' # RA: 0, Z, RCODE: 0
b'\x00\x00' # Query count
b'\x00\x01' # Answer count
b'\x00\x00' # Authorities count
b'\x00\x00' # Additionals count
# Answer
b'\x00' # RR NAME (root)
b'\x00\x01' # RR TYPE 1 (A)
b'\x00\x01' # RR CLASS 1 (IN)
b'\x00\x00\x00\x00' # RR TTL
b'\x00\x04' # RDLENGTH 4
b'\x01\x02\x03\x04' # IPv4 172.16.31.10
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=256,
auth=1,
ednsVersion=None,
answers=[
dns.RRHeader(
b'',
payload=dns.Record_A('172.16.31.10', ttl=0),
auth=True)])
class MessageComplete:
"""
An example of a fully populated non-edns response message.
Contains name compression, answers, authority, and additional records.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x01\x00' # ID: 256
b'\x95' # QR: 1, OPCODE: 2, AA: 1, TC: 0, RD: 1
b'\x8f' # RA: 1, Z, RCODE: 15
b'\x00\x01' # Query count
b'\x00\x01' # Answer count
b'\x00\x01' # Authorities count
b'\x00\x01' # Additionals count
# Query begins at Byte 12
b'\x07example\x03com\x00' # QNAME
b'\x00\x06' # QTYPE 6 (SOA)
b'\x00\x01' # QCLASS 1 (IN)
# Answers
b'\xc0\x0c' # RR NAME (compression ref b12)
b'\x00\x06' # RR TYPE 6 (SOA)
b'\x00\x01' # RR CLASS 1 (IN)
b'\xff\xff\xff\xff' # RR TTL
b'\x00\x27' # RDLENGTH 39
b'\x03ns1\xc0\x0c' # Mname (ns1.example.com (compression ref b15)
b'\x0ahostmaster\xc0\x0c' # rname (hostmaster.example.com)
b'\xff\xff\xff\xfe' # Serial
b'\x7f\xff\xff\xfd' # Refresh
b'\x7f\xff\xff\xfc' # Retry
b'\x7f\xff\xff\xfb' # Expire
b'\xff\xff\xff\xfa' # Minimum
# Authority
b'\xc0\x0c' # RR NAME (example.com compression ref b12)
b'\x00\x02' # RR TYPE 2 (NS)
b'\x00\x01' # RR CLASS 1 (IN)
b'\xff\xff\xff\xff' # RR TTL
b'\x00\x02' # RDLENGTH
b'\xc0\x29' # RDATA (ns1.example.com (compression ref b41)
# Additional
b'\xc0\x29' # RR NAME (ns1.example.com compression ref b41)
b'\x00\x01' # RR TYPE 1 (A)
b'\x00\x01' # RR CLASS 1 (IN)
b'\xff\xff\xff\xff' # RR TTL
b'\x00\x04' # RDLENGTH
b'\x05\x06\x07\x08' # RDATA 5.6.7.8
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=256,
answer=1,
opCode=dns.OP_STATUS,
auth=1,
recDes=1,
recAv=1,
rCode=15,
ednsVersion=None,
queries=[dns.Query(b'example.com', dns.SOA)],
answers=[
dns.RRHeader(
b'example.com',
type=dns.SOA,
ttl=0xffffffff,
auth=True,
payload=dns.Record_SOA(
ttl=0xffffffff,
mname=b'ns1.example.com',
rname=b'hostmaster.example.com',
serial=0xfffffffe,
refresh=0x7ffffffd,
retry=0x7ffffffc,
expire=0x7ffffffb,
minimum=0xfffffffa,
))],
authority=[
dns.RRHeader(
b'example.com',
type=dns.NS,
ttl=0xffffffff,
auth=True,
payload=dns.Record_NS(
'ns1.example.com', ttl=0xffffffff))],
additional=[
dns.RRHeader(
b'ns1.example.com',
type=dns.A,
ttl=0xffffffff,
auth=True,
payload=dns.Record_A(
'192.168.3.11', ttl=0xffffffff))])
class MessageEDNSQuery(object):
"""
A minimal EDNS query message.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x00\x00' # ID: 0
b'\x00' # QR: 0, OPCODE: 0, AA: 0, TC: 0, RD: 0
b'\x00' # RA: 0, Z, RCODE: 0
b'\x00\x01' # Queries count
b'\x00\x00' # Anwers count
b'\x00\x00' # Authority count
b'\x00\x01' # Additionals count
# Queries
b'\x03www\x07example\x03com\x00' # QNAME
b'\x00\x01' # QTYPE (A)
b'\x00\x01' # QCLASS (IN)
# Additional OPT record
b'\x00' # NAME (.)
b'\x00\x29' # TYPE (OPT 41)
b'\x10\x00' # UDP Payload Size (4096)
b'\x00' # Extended RCODE
b'\x03' # EDNS version
b'\x00\x00' # DO: False + Z
b'\x00\x00' # RDLENGTH
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=0,
answer=0,
opCode=dns.OP_QUERY,
auth=0,
recDes=0,
recAv=0,
rCode=0,
ednsVersion=3,
dnssecOK=False,
queries=[dns.Query(b'www.example.com', dns.A)],
additional=[])
class MessageEDNSComplete(object):
"""
An example of a fully populated edns response message.
Contains name compression, answers, authority, and additional records.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x01\x00' # ID: 256
b'\x95' # QR: 1, OPCODE: 2, AA: 1, TC: 0, RD: 1
b'\xbf' # RA: 1, AD: 1, RCODE: 15
b'\x00\x01' # Query count
b'\x00\x01' # Answer count
b'\x00\x01' # Authorities count
b'\x00\x02' # Additionals count
# Query begins at Byte 12
b'\x07example\x03com\x00' # QNAME
b'\x00\x06' # QTYPE 6 (SOA)
b'\x00\x01' # QCLASS 1 (IN)
# Answers
b'\xc0\x0c' # RR NAME (compression ref b12)
b'\x00\x06' # RR TYPE 6 (SOA)
b'\x00\x01' # RR CLASS 1 (IN)
b'\xff\xff\xff\xff' # RR TTL
b'\x00\x27' # RDLENGTH 39
b'\x03ns1\xc0\x0c' # mname (ns1.example.com (compression ref b15)
b'\x0ahostmaster\xc0\x0c' # rname (hostmaster.example.com)
b'\xff\xff\xff\xfe' # Serial
b'\x7f\xff\xff\xfd' # Refresh
b'\x7f\xff\xff\xfc' # Retry
b'\x7f\xff\xff\xfb' # Expire
b'\xff\xff\xff\xfa' # Minimum
# Authority
b'\xc0\x0c' # RR NAME (example.com compression ref b12)
b'\x00\x02' # RR TYPE 2 (NS)
b'\x00\x01' # RR CLASS 1 (IN)
b'\xff\xff\xff\xff' # RR TTL
b'\x00\x02' # RDLENGTH
b'\xc0\x29' # RDATA (ns1.example.com (compression ref b41)
# Additional
b'\xc0\x29' # RR NAME (ns1.example.com compression ref b41)
b'\x00\x01' # RR TYPE 1 (A)
b'\x00\x01' # RR CLASS 1 (IN)
b'\xff\xff\xff\xff' # RR TTL
b'\x00\x04' # RDLENGTH
b'\x05\x06\x07\x08' # RDATA 5.6.7.8
# Additional OPT record
b'\x00' # NAME (.)
b'\x00\x29' # TYPE (OPT 41)
b'\x04\x00' # UDP Payload Size (1024)
b'\x00' # Extended RCODE
b'\x03' # EDNS version
b'\x80\x00' # DO: True + Z
b'\x00\x00' # RDLENGTH
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=256,
answer=1,
opCode=dns.OP_STATUS,
auth=1,
trunc=0,
recDes=1,
recAv=1,
rCode=15,
ednsVersion=3,
dnssecOK=True,
authenticData=True,
checkingDisabled=True,
maxSize=1024,
queries=[dns.Query(b'example.com', dns.SOA)],
answers=[
dns.RRHeader(
b'example.com',
type=dns.SOA,
ttl=0xffffffff,
auth=True,
payload=dns.Record_SOA(
ttl=0xffffffff,
mname=b'ns1.example.com',
rname=b'hostmaster.example.com',
serial=0xfffffffe,
refresh=0x7ffffffd,
retry=0x7ffffffc,
expire=0x7ffffffb,
minimum=0xfffffffa,
))],
authority=[
dns.RRHeader(
b'example.com',
type=dns.NS,
ttl=0xffffffff,
auth=True,
payload=dns.Record_NS(
'ns1.example.com', ttl=0xffffffff))],
additional=[
dns.RRHeader(
b'ns1.example.com',
type=dns.A,
ttl=0xffffffff,
auth=True,
payload=dns.Record_A(
'192.168.3.11', ttl=0xffffffff))])
class MessageEDNSExtendedRCODE(object):
"""
An example of an EDNS message with an extended RCODE.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x00\x00'
b'\x00'
b'\x0c' # RA: 0, Z, RCODE: 12
b'\x00\x00'
b'\x00\x00'
b'\x00\x00'
b'\x00\x01' # 1 additionals
# Additional OPT record
b'\x00'
b'\x00\x29'
b'\x10\x00'
b'\xab' # Extended RCODE: 171
b'\x00'
b'\x00\x00'
b'\x00\x00'
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=0,
answer=False,
opCode=dns.OP_QUERY,
auth=False,
trunc=False,
recDes=False,
recAv=False,
rCode=0xabc, # Combined OPT extended RCODE + Message RCODE
ednsVersion=0,
dnssecOK=False,
maxSize=4096,
queries=[],
answers=[],
authority=[],
additional=[],
)
class MessageComparable(FancyEqMixin, FancyStrMixin, object):
"""
A wrapper around L{dns.Message} which is comparable so that it can be tested
using some of the L{dns._EDNSMessage} tests.
"""
showAttributes = compareAttributes = (
'id', 'answer', 'opCode', 'auth', 'trunc',
'recDes', 'recAv', 'rCode',
'queries', 'answers', 'authority', 'additional')
def __init__(self, original):
self.original = original
def __getattr__(self, key):
return getattr(self.original, key)
def verifyConstructorArgument(testCase, cls, argName, defaultVal, altVal,
attrName=None):
"""
Verify that an attribute has the expected default value and that a
corresponding argument passed to a constructor is assigned to that
attribute.
@param testCase: The L{TestCase} whose assert methods will be
called.
@type testCase: L{unittest.TestCase}
@param cls: The constructor under test.
@type cls: L{type}
@param argName: The name of the constructor argument under test.
@type argName: L{str}
@param defaultVal: The expected default value of C{attrName} /
C{argName}
@type defaultVal: L{object}
@param altVal: A value which is different from the default. Used to
test that supplied constructor arguments are actually assigned to the
correct attribute.
@type altVal: L{object}
@param attrName: The name of the attribute under test if different
from C{argName}. Defaults to C{argName}
@type attrName: L{str}
"""
if attrName is None:
attrName = argName
actual = {}
expected = {'defaultVal': defaultVal, 'altVal': altVal}
o = cls()
actual['defaultVal'] = getattr(o, attrName)
o = cls(**{argName: altVal})
actual['altVal'] = getattr(o, attrName)
testCase.assertEqual(expected, actual)
class ConstructorTestsMixin(object):
"""
Helper methods for verifying default attribute values and corresponding
constructor arguments.
"""
def _verifyConstructorArgument(self, argName, defaultVal, altVal):
"""
Wrap L{verifyConstructorArgument} to provide simpler interface for
testing Message and _EDNSMessage constructor arguments.
@param argName: The name of the constructor argument.
@param defaultVal: The expected default value.
@param altVal: An alternative value which is expected to be assigned to
a correspondingly named attribute.
"""
verifyConstructorArgument(testCase=self, cls=self.messageFactory,
argName=argName, defaultVal=defaultVal,
altVal=altVal)
def _verifyConstructorFlag(self, argName, defaultVal):
"""
Wrap L{verifyConstructorArgument} to provide simpler interface for
testing _EDNSMessage constructor flags.
@param argName: The name of the constructor flag argument
@param defaultVal: The expected default value of the flag
"""
assert defaultVal in (True, False)
verifyConstructorArgument(testCase=self, cls=self.messageFactory,
argName=argName, defaultVal=defaultVal,
altVal=not defaultVal,)
class CommonConstructorTestsMixin(object):
"""
Tests for constructor arguments and their associated attributes that are
common to both L{twisted.names.dns._EDNSMessage} and L{dns.Message}.
TestCase classes that use this mixin must provide a C{messageFactory} method
which accepts any argment supported by L{dns.Message.__init__}.
TestCases must also mixin ConstructorTestsMixin which provides some custom
assertions for testing constructor arguments.
"""
def test_id(self):
"""
L{dns._EDNSMessage.id} defaults to C{0} and can be overridden in
the constructor.
"""
self._verifyConstructorArgument('id', defaultVal=0, altVal=1)
def test_answer(self):
"""
L{dns._EDNSMessage.answer} defaults to C{False} and can be overridden in
the constructor.
"""
self._verifyConstructorFlag('answer', defaultVal=False)
def test_opCode(self):
"""
L{dns._EDNSMessage.opCode} defaults to L{dns.OP_QUERY} and can be
overridden in the constructor.
"""
self._verifyConstructorArgument(
'opCode', defaultVal=dns.OP_QUERY, altVal=dns.OP_STATUS)
def test_auth(self):
"""
L{dns._EDNSMessage.auth} defaults to C{False} and can be overridden in
the constructor.
"""
self._verifyConstructorFlag('auth', defaultVal=False)
def test_trunc(self):
"""
L{dns._EDNSMessage.trunc} defaults to C{False} and can be overridden in
the constructor.
"""
self._verifyConstructorFlag('trunc', defaultVal=False)
def test_recDes(self):
"""
L{dns._EDNSMessage.recDes} defaults to C{False} and can be overridden in
the constructor.
"""
self._verifyConstructorFlag('recDes', defaultVal=False)
def test_recAv(self):
"""
L{dns._EDNSMessage.recAv} defaults to C{False} and can be overridden in
the constructor.
"""
self._verifyConstructorFlag('recAv', defaultVal=False)
def test_rCode(self):
"""
L{dns._EDNSMessage.rCode} defaults to C{0} and can be overridden in the
constructor.
"""
self._verifyConstructorArgument('rCode', defaultVal=0, altVal=123)
def test_maxSize(self):
"""
L{dns._EDNSMessage.maxSize} defaults to C{512} and can be overridden in
the constructor.
"""
self._verifyConstructorArgument('maxSize', defaultVal=512, altVal=1024)
def test_queries(self):
"""
L{dns._EDNSMessage.queries} defaults to C{[]}.
"""
self.assertEqual(self.messageFactory().queries, [])
def test_answers(self):
"""
L{dns._EDNSMessage.answers} defaults to C{[]}.
"""
self.assertEqual(self.messageFactory().answers, [])
def test_authority(self):
"""
L{dns._EDNSMessage.authority} defaults to C{[]}.
"""
self.assertEqual(self.messageFactory().authority, [])
def test_additional(self):
"""
L{dns._EDNSMessage.additional} defaults to C{[]}.
"""
self.assertEqual(self.messageFactory().additional, [])
class EDNSMessageConstructorTests(ConstructorTestsMixin,
CommonConstructorTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for L{twisted.names.dns._EDNSMessage} constructor arguments that are
shared with L{dns.Message}.
"""
messageFactory = dns._EDNSMessage
class MessageConstructorTests(ConstructorTestsMixin,
CommonConstructorTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for L{twisted.names.dns.Message} constructor arguments that are shared
with L{dns._EDNSMessage}.
"""
messageFactory = dns.Message
class EDNSMessageSpecificsTestCase(ConstructorTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for L{dns._EDNSMessage}.
These tests are for L{dns._EDNSMessage} APIs which are not shared with
L{dns.Message}.
"""
messageFactory = dns._EDNSMessage
def test_ednsVersion(self):
"""
L{dns._EDNSMessage.ednsVersion} defaults to C{0} and can be overridden
in the constructor.
"""
self._verifyConstructorArgument(
'ednsVersion', defaultVal=0, altVal=None)
def test_dnssecOK(self):
"""
L{dns._EDNSMessage.dnssecOK} defaults to C{False} and can be overridden
in the constructor.
"""
self._verifyConstructorFlag('dnssecOK', defaultVal=False)
def test_authenticData(self):
"""
L{dns._EDNSMessage.authenticData} defaults to C{False} and can be
overridden in the constructor.
"""
self._verifyConstructorFlag('authenticData', defaultVal=False)
def test_checkingDisabled(self):
"""
L{dns._EDNSMessage.checkingDisabled} defaults to C{False} and can be
overridden in the constructor.
"""
self._verifyConstructorFlag('checkingDisabled', defaultVal=False)
def test_queriesOverride(self):
"""
L{dns._EDNSMessage.queries} can be overridden in the constructor.
"""
msg = self.messageFactory(queries=[dns.Query(b'example.com')])
self.assertEqual(
msg.queries,
[dns.Query(b'example.com')])
def test_answersOverride(self):
"""
L{dns._EDNSMessage.answers} can be overridden in the constructor.
"""
msg = self.messageFactory(
answers=[
dns.RRHeader(
b'example.com',
payload=dns.Record_A('172.16.31.10'))])
self.assertEqual(
msg.answers,
[dns.RRHeader(b'example.com', payload=dns.Record_A('172.16.31.10'))])
def test_authorityOverride(self):
"""
L{dns._EDNSMessage.authority} can be overridden in the constructor.
"""
msg = self.messageFactory(
authority=[
dns.RRHeader(
b'example.com',
type=dns.SOA,
payload=dns.Record_SOA())])
self.assertEqual(
msg.authority,
[dns.RRHeader(b'example.com', type=dns.SOA,
payload=dns.Record_SOA())])
def test_additionalOverride(self):
"""
L{dns._EDNSMessage.authority} can be overridden in the constructor.
"""
msg = self.messageFactory(
additional=[
dns.RRHeader(
b'example.com',
payload=dns.Record_A('172.16.31.10'))])
self.assertEqual(
msg.additional,
[dns.RRHeader(b'example.com', payload=dns.Record_A('172.16.31.10'))])
def test_reprDefaults(self):
"""
L{dns._EDNSMessage.__repr__} omits field values and sections which are
identical to their defaults. The id field value is always shown.
"""
self.assertEqual(
'<_EDNSMessage id=0>',
repr(self.messageFactory())
)
def test_reprFlagsIfSet(self):
"""
L{dns._EDNSMessage.__repr__} displays flags if they are L{True}.
"""
m = self.messageFactory(answer=True, auth=True, trunc=True, recDes=True,
recAv=True, authenticData=True,
checkingDisabled=True, dnssecOK=True)
self.assertEqual(
'<_EDNSMessage '
'id=0 '
'flags=answer,auth,trunc,recDes,recAv,authenticData,'
'checkingDisabled,dnssecOK'
'>',
repr(m),
)
def test_reprNonDefautFields(self):
"""
L{dns._EDNSMessage.__repr__} displays field values if they differ from
their defaults.
"""
m = self.messageFactory(id=10, opCode=20, rCode=30, maxSize=40,
ednsVersion=50)
self.assertEqual(
'<_EDNSMessage '
'id=10 '
'opCode=20 '
'rCode=30 '
'maxSize=40 '
'ednsVersion=50'
'>',
repr(m),
)
def test_reprNonDefaultSections(self):
"""
L{dns.Message.__repr__} displays sections which differ from their
defaults.
"""
m = self.messageFactory()
m.queries = [1, 2, 3]
m.answers = [4, 5, 6]
m.authority = [7, 8, 9]
m.additional = [10, 11, 12]
self.assertEqual(
'<_EDNSMessage '
'id=0 '
'queries=[1, 2, 3] '
'answers=[4, 5, 6] '
'authority=[7, 8, 9] '
'additional=[10, 11, 12]'
'>',
repr(m),
)
def test_fromStrCallsMessageFactory(self):
"""
L{dns._EDNSMessage.fromString} calls L{dns._EDNSMessage._messageFactory}
to create a new L{dns.Message} instance which is used to decode the
supplied bytes.
"""
class FakeMessageFactory(object):
"""
Fake message factory.
"""
def fromStr(self, *args, **kwargs):
"""
Fake fromStr method which raises the arguments it was passed.
@param args: positional arguments
@param kwargs: keyword arguments
"""
raise RaisedArgs(args, kwargs)
m = dns._EDNSMessage()
m._messageFactory = FakeMessageFactory
dummyBytes = object()
e = self.assertRaises(RaisedArgs, m.fromStr, dummyBytes)
self.assertEqual(
((dummyBytes,), {}),
(e.args, e.kwargs)
)
def test_fromStrCallsFromMessage(self):
"""
L{dns._EDNSMessage.fromString} calls L{dns._EDNSMessage._fromMessage}
with a L{dns.Message} instance
"""
m = dns._EDNSMessage()
class FakeMessageFactory():
"""
Fake message factory.
"""
def fromStr(self, bytes):
"""
A noop fake version of fromStr
@param bytes: the bytes to be decoded
"""
fakeMessage = FakeMessageFactory()
m._messageFactory = lambda: fakeMessage
def fakeFromMessage(*args, **kwargs):
raise RaisedArgs(args, kwargs)
m._fromMessage = fakeFromMessage
e = self.assertRaises(RaisedArgs, m.fromStr, b'')
self.assertEqual(
((fakeMessage,), {}),
(e.args, e.kwargs)
)
def test_toStrCallsToMessage(self):
"""
L{dns._EDNSMessage.toStr} calls L{dns._EDNSMessage._toMessage}
"""
m = dns._EDNSMessage()
def fakeToMessage(*args, **kwargs):
raise RaisedArgs(args, kwargs)
m._toMessage = fakeToMessage
e = self.assertRaises(RaisedArgs, m.toStr)
self.assertEqual(
((), {}),
(e.args, e.kwargs)
)
def test_toStrCallsToMessageToStr(self):
"""
L{dns._EDNSMessage.toStr} calls C{toStr} on the message returned by
L{dns._EDNSMessage._toMessage}.
"""
m = dns._EDNSMessage()
dummyBytes = object()
class FakeMessage(object):
"""
Fake Message
"""
def toStr(self):
"""
Fake toStr which returns dummyBytes.
@return: dummyBytes
"""
return dummyBytes
def fakeToMessage(*args, **kwargs):
return FakeMessage()
m._toMessage = fakeToMessage
self.assertEqual(
dummyBytes,
m.toStr()
)
class EDNSMessageEqualityTests(ComparisonTestsMixin, unittest.SynchronousTestCase):
"""
Tests for equality between L(dns._EDNSMessage} instances.
These tests will not work with L{dns.Message} because it does not use
L{twisted.python.util.FancyEqMixin}.
"""
messageFactory = dns._EDNSMessage
def test_id(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
id.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(id=1),
self.messageFactory(id=1),
self.messageFactory(id=2),
)
def test_answer(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
answer flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(answer=True),
self.messageFactory(answer=True),
self.messageFactory(answer=False),
)
def test_opCode(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
opCode.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(opCode=dns.OP_STATUS),
self.messageFactory(opCode=dns.OP_STATUS),
self.messageFactory(opCode=dns.OP_INVERSE),
)
def test_auth(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
auth flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(auth=True),
self.messageFactory(auth=True),
self.messageFactory(auth=False),
)
def test_trunc(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
trunc flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(trunc=True),
self.messageFactory(trunc=True),
self.messageFactory(trunc=False),
)
def test_recDes(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
recDes flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(recDes=True),
self.messageFactory(recDes=True),
self.messageFactory(recDes=False),
)
def test_recAv(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
recAv flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(recAv=True),
self.messageFactory(recAv=True),
self.messageFactory(recAv=False),
)
def test_rCode(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
rCode.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(rCode=16),
self.messageFactory(rCode=16),
self.messageFactory(rCode=15),
)
def test_ednsVersion(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
ednsVersion.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(ednsVersion=1),
self.messageFactory(ednsVersion=1),
self.messageFactory(ednsVersion=None),
)
def test_dnssecOK(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
dnssecOK.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(dnssecOK=True),
self.messageFactory(dnssecOK=True),
self.messageFactory(dnssecOK=False),
)
def test_authenticData(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
authenticData flags.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(authenticData=True),
self.messageFactory(authenticData=True),
self.messageFactory(authenticData=False),
)
def test_checkingDisabled(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
checkingDisabled flags.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(checkingDisabled=True),
self.messageFactory(checkingDisabled=True),
self.messageFactory(checkingDisabled=False),
)
def test_maxSize(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
maxSize.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(maxSize=2048),
self.messageFactory(maxSize=2048),
self.messageFactory(maxSize=1024),
)
def test_queries(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
queries.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(queries=[dns.Query(b'example.com')]),
self.messageFactory(queries=[dns.Query(b'example.com')]),
self.messageFactory(queries=[dns.Query(b'example.org')]),
)
def test_answers(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
answers.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(answers=[dns.RRHeader(
b'example.com', payload=dns.Record_A('172.16.31.10'))]),
self.messageFactory(answers=[dns.RRHeader(
b'example.com', payload=dns.Record_A('172.16.31.10'))]),
self.messageFactory(answers=[dns.RRHeader(
b'example.org', payload=dns.Record_A('172.16.58.3'))]),
)
def test_authority(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
authority records.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(authority=[dns.RRHeader(
b'example.com',
type=dns.SOA, payload=dns.Record_SOA())]),
self.messageFactory(authority=[dns.RRHeader(
b'example.com',
type=dns.SOA, payload=dns.Record_SOA())]),
self.messageFactory(authority=[dns.RRHeader(
b'example.org',
type=dns.SOA, payload=dns.Record_SOA())]),
)
def test_additional(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
additional records.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(additional=[dns.RRHeader(
b'example.com', payload=dns.Record_A('172.16.31.10'))]),
self.messageFactory(additional=[dns.RRHeader(
b'example.com', payload=dns.Record_A('172.16.31.10'))]),
self.messageFactory(additional=[dns.RRHeader(
b'example.org', payload=dns.Record_A('172.16.31.10'))]),
)
class StandardEncodingTestsMixin(object):
"""
Tests for the encoding and decoding of various standard (not EDNS) messages.
These tests should work with both L{dns._EDNSMessage} and L{dns.Message}.
TestCase classes that use this mixin must provide a C{messageFactory} method
which accepts any argment supported by L{dns._EDNSMessage.__init__}.
EDNS specific arguments may be discarded if not supported by the message
class under construction.
"""
def test_emptyMessageEncode(self):
"""
An empty message can be encoded.
"""
self.assertEqual(
self.messageFactory(**MessageEmpty.kwargs()).toStr(),
MessageEmpty.bytes())
def test_emptyMessageDecode(self):
"""
An empty message byte sequence can be decoded.
"""
m = self.messageFactory()
m.fromStr(MessageEmpty.bytes())
self.assertEqual(m, self.messageFactory(**MessageEmpty.kwargs()))
def test_completeQueryEncode(self):
"""
A fully populated query message can be encoded.
"""
self.assertEqual(
self.messageFactory(**MessageComplete.kwargs()).toStr(),
MessageComplete.bytes())
def test_completeQueryDecode(self):
"""
A fully populated message byte string can be decoded.
"""
m = self.messageFactory()
m.fromStr(MessageComplete.bytes()),
self.assertEqual(m, self.messageFactory(**MessageComplete.kwargs()))
def test_NULL(self):
"""
A I{NULL} record with an arbitrary payload can be encoded and decoded as
part of a message.
"""
bytes = b''.join([dns._ord2bytes(i) for i in range(256)])
rec = dns.Record_NULL(bytes)
rr = dns.RRHeader(b'testname', dns.NULL, payload=rec)
msg1 = self.messageFactory()
msg1.answers.append(rr)
s = msg1.toStr()
msg2 = self.messageFactory()
msg2.fromStr(s)
self.assertIsInstance(msg2.answers[0].payload, dns.Record_NULL)
self.assertEqual(msg2.answers[0].payload.payload, bytes)
def test_nonAuthoritativeMessageEncode(self):
"""
If the message C{authoritative} attribute is set to 0, the encoded bytes
will have AA bit 0.
"""
self.assertEqual(
self.messageFactory(**MessageNonAuthoritative.kwargs()).toStr(),
MessageNonAuthoritative.bytes())
def test_nonAuthoritativeMessageDecode(self):
"""
The L{dns.RRHeader} instances created by a message from a
non-authoritative message byte string are marked as not authoritative.
"""
m = self.messageFactory()
m.fromStr(MessageNonAuthoritative.bytes())
self.assertEqual(
m, self.messageFactory(**MessageNonAuthoritative.kwargs()))
def test_authoritativeMessageEncode(self):
"""
If the message C{authoritative} attribute is set to 1, the encoded bytes
will have AA bit 1.
"""
self.assertEqual(
self.messageFactory(**MessageAuthoritative.kwargs()).toStr(),
MessageAuthoritative.bytes())
def test_authoritativeMessageDecode(self):
"""
The message and its L{dns.RRHeader} instances created by C{decode} from
an authoritative message byte string, are marked as authoritative.
"""
m = self.messageFactory()
m.fromStr(MessageAuthoritative.bytes())
self.assertEqual(
m, self.messageFactory(**MessageAuthoritative.kwargs()))
def test_truncatedMessageEncode(self):
"""
If the message C{trunc} attribute is set to 1 the encoded bytes will
have TR bit 1.
"""
self.assertEqual(
self.messageFactory(**MessageTruncated.kwargs()).toStr(),
MessageTruncated.bytes())
def test_truncatedMessageDecode(self):
"""
The message instance created by decoding a truncated message is marked
as truncated.
"""
m = self.messageFactory()
m.fromStr(MessageTruncated.bytes())
self.assertEqual(m, self.messageFactory(**MessageTruncated.kwargs()))
class EDNSMessageStandardEncodingTests(StandardEncodingTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for the encoding and decoding of various standard (non-EDNS) messages
by L{dns._EDNSMessage}.
"""
messageFactory = dns._EDNSMessage
class MessageStandardEncodingTests(StandardEncodingTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for the encoding and decoding of various standard (non-EDNS) messages
by L{dns.Message}.
"""
@staticmethod
def messageFactory(**kwargs):
"""
This function adapts constructor arguments expected by
_EDNSMessage.__init__ to arguments suitable for use with the
Message.__init__.
Also handles the fact that unlike L{dns._EDNSMessage},
L{dns.Message.__init__} does not accept queries, answers etc as
arguments.
Also removes any L{dns._EDNSMessage} specific arguments.
@param args: The positional arguments which will be passed to
L{dns.Message.__init__}.
@param kwargs: The keyword arguments which will be stripped of EDNS
specific arguments before being passed to L{dns.Message.__init__}.
@return: An L{dns.Message} instance.
"""
queries = kwargs.pop('queries', [])
answers = kwargs.pop('answers', [])
authority = kwargs.pop('authority', [])
additional = kwargs.pop('additional', [])
kwargs.pop('ednsVersion', None)
m = dns.Message(**kwargs)
m.queries = queries
m.answers = answers
m.authority = authority
m.additional = additional
return MessageComparable(m)
class EDNSMessageEDNSEncodingTests(unittest.SynchronousTestCase):
"""
Tests for the encoding and decoding of various EDNS messages.
These test will not work with L{dns.Message}.
"""
messageFactory = dns._EDNSMessage
def test_ednsMessageDecodeStripsOptRecords(self):
"""
The L(_EDNSMessage} instance created by L{dns._EDNSMessage.decode} from
an EDNS query never includes OPT records in the additional section.
"""
m = self.messageFactory()
m.fromStr(MessageEDNSQuery.bytes())
self.assertEqual(m.additional, [])
def test_ednsMessageDecodeMultipleOptRecords(self):
"""
An L(_EDNSMessage} instance created from a byte string containing
multiple I{OPT} records will discard all the C{OPT} records.
C{ednsVersion} will be set to C{None}.
@see: U{https://tools.ietf.org/html/rfc6891#section-6.1.1}
"""
m = dns.Message()
m.additional = [
dns._OPTHeader(version=2),
dns._OPTHeader(version=3)]
ednsMessage = dns._EDNSMessage()
ednsMessage.fromStr(m.toStr())
self.assertEqual(ednsMessage.ednsVersion, None)
def test_fromMessageCopiesSections(self):
"""
L{dns._EDNSMessage._fromMessage} returns an L{_EDNSMessage} instance
whose queries, answers, authority and additional lists are copies (not
references to) the original message lists.
"""
standardMessage = dns.Message()
standardMessage.fromStr(MessageEDNSQuery.bytes())
ednsMessage = dns._EDNSMessage._fromMessage(standardMessage)
duplicates = []
for attrName in ('queries', 'answers', 'authority', 'additional'):
if (getattr(standardMessage, attrName)
is getattr(ednsMessage, attrName)):
duplicates.append(attrName)
if duplicates:
self.fail(
'Message and _EDNSMessage shared references to the following '
'section lists after decoding: %s' % (duplicates,))
def test_toMessageCopiesSections(self):
"""
L{dns._EDNSMessage.toStr} makes no in place changes to the message
instance.
"""
ednsMessage = dns._EDNSMessage(ednsVersion=1)
ednsMessage.toStr()
self.assertEqual(ednsMessage.additional, [])
def test_optHeaderPosition(self):
"""
L{dns._EDNSMessage} can decode OPT records, regardless of their position
in the additional records section.
"The OPT RR MAY be placed anywhere within the additional data section."
@see: U{https://tools.ietf.org/html/rfc6891#section-6.1.1}
"""
# XXX: We need an _OPTHeader.toRRHeader method. See #6779.
b = BytesIO()
optRecord = dns._OPTHeader(version=1)
optRecord.encode(b)
optRRHeader = dns.RRHeader()
b.seek(0)
optRRHeader.decode(b)
m = dns.Message()
m.additional = [optRRHeader]
actualMessages = []
actualMessages.append(dns._EDNSMessage._fromMessage(m).ednsVersion)
m.additional.append(dns.RRHeader(type=dns.A))
actualMessages.append(
dns._EDNSMessage._fromMessage(m).ednsVersion)
m.additional.insert(0, dns.RRHeader(type=dns.A))
actualMessages.append(
dns._EDNSMessage._fromMessage(m).ednsVersion)
self.assertEqual(
[1] * 3,
actualMessages
)
def test_ednsDecode(self):
"""
The L(_EDNSMessage} instance created by L{dns._EDNSMessage.fromStr}
derives its edns specific values (C{ednsVersion}, etc) from the supplied
OPT record.
"""
m = self.messageFactory()
m.fromStr(MessageEDNSComplete.bytes())
self.assertEqual(m, self.messageFactory(**MessageEDNSComplete.kwargs()))
def test_ednsEncode(self):
"""
The L(_EDNSMessage} instance created by L{dns._EDNSMessage.toStr}
encodes its edns specific values (C{ednsVersion}, etc) into an OPT
record added to the additional section.
"""
self.assertEqual(
self.messageFactory(**MessageEDNSComplete.kwargs()).toStr(),
MessageEDNSComplete.bytes())
def test_extendedRcodeEncode(self):
"""
The L(_EDNSMessage.toStr} encodes the extended I{RCODE} (>=16) by
assigning the lower 4bits to the message RCODE field and the upper 4bits
to the OPT pseudo record.
"""
self.assertEqual(
self.messageFactory(**MessageEDNSExtendedRCODE.kwargs()).toStr(),
MessageEDNSExtendedRCODE.bytes())
def test_extendedRcodeDecode(self):
"""
The L(_EDNSMessage} instance created by L{dns._EDNSMessage.fromStr}
derives RCODE from the supplied OPT record.
"""
m = self.messageFactory()
m.fromStr(MessageEDNSExtendedRCODE.bytes())
self.assertEqual(
m, self.messageFactory(**MessageEDNSExtendedRCODE.kwargs()))
def test_extendedRcodeZero(self):
"""
Note that EXTENDED-RCODE value 0 indicates that an unextended RCODE is
in use (values 0 through 15).
https://tools.ietf.org/html/rfc6891#section-6.1.3
"""
ednsMessage = self.messageFactory(rCode=15, ednsVersion=0)
standardMessage = ednsMessage._toMessage()
self.assertEqual(
(15, 0),
(standardMessage.rCode, standardMessage.additional[0].extendedRCODE)
)
class ResponseFromMessageTests(unittest.SynchronousTestCase):
"""
Tests for L{dns._responseFromMessage}.
"""
def test_responseFromMessageResponseType(self):
"""
L{dns.Message._responseFromMessage} is a constructor function which
generates a new I{answer} message from an existing L{dns.Message} like
instance.
"""
request = dns.Message()
response = dns._responseFromMessage(responseConstructor=dns.Message,
message=request)
self.assertIsNot(request, response)
def test_responseType(self):
"""
L{dns._responseFromMessage} returns a new instance of C{cls}
"""
class SuppliedClass(object):
id = 1
queries = []
expectedClass = dns.Message
self.assertIsInstance(
dns._responseFromMessage(responseConstructor=expectedClass,
message=SuppliedClass()),
expectedClass
)
def test_responseId(self):
"""
L{dns._responseFromMessage} copies the C{id} attribute of the original
message.
"""
self.assertEqual(
1234,
dns._responseFromMessage(responseConstructor=dns.Message,
message=dns.Message(id=1234)).id
)
def test_responseAnswer(self):
"""
L{dns._responseFromMessage} sets the C{answer} flag to L{True}
"""
request = dns.Message()
response = dns._responseFromMessage(responseConstructor=dns.Message,
message=request)
self.assertEqual(
(False, True),
(request.answer, response.answer)
)
def test_responseQueries(self):
"""
L{dns._responseFromMessage} copies the C{queries} attribute of the
original message.
"""
request = dns.Message()
expectedQueries = [object(), object(), object()]
request.queries = expectedQueries[:]
self.assertEqual(
expectedQueries,
dns._responseFromMessage(responseConstructor=dns.Message,
message=request).queries
)
def test_responseKwargs(self):
"""
L{dns._responseFromMessage} accepts other C{kwargs} which are assigned
to the new message before it is returned.
"""
self.assertEqual(
123,
dns._responseFromMessage(
responseConstructor=dns.Message, message=dns.Message(),
rCode=123).rCode
)
class Foo(object):
"""
An example class for use in L{dns._compactRepr} tests.
It follows the pattern of initialiser settable flags, fields and sections
found in L{dns.Message} and L{dns._EDNSMessage}.
"""
def __init__(self,
field1=1, field2=2, alwaysShowField='AS',
flagTrue=True, flagFalse=False, section1=None):
"""
Set some flags, fields and sections as public attributes.
"""
self.field1 = field1
self.field2 = field2
self.alwaysShowField = alwaysShowField
self.flagTrue = flagTrue
self.flagFalse = flagFalse
if section1 is None:
section1 = []
self.section1 = section1
def __repr__(self):
"""
Call L{dns._compactRepr} to generate a string representation.
"""
return dns._compactRepr(
self,
alwaysShow='alwaysShowField'.split(),
fieldNames='field1 field2 alwaysShowField'.split(),
flagNames='flagTrue flagFalse'.split(),
sectionNames='section1 section2'.split()
)
class CompactReprTests(unittest.SynchronousTestCase):
"""
Tests for L[dns._compactRepr}.
"""
messageFactory = Foo
def test_defaults(self):
"""
L{dns._compactRepr} omits field values and sections which have the
default value. Flags which are True are always shown.
"""
self.assertEqual(
"<Foo alwaysShowField='AS' flags=flagTrue>",
repr(self.messageFactory())
)
def test_flagsIfSet(self):
"""
L{dns._compactRepr} displays flags if they have a non-default value.
"""
m = self.messageFactory(flagTrue=True, flagFalse=True)
self.assertEqual(
'<Foo '
"alwaysShowField='AS' "
'flags=flagTrue,flagFalse'
'>',
repr(m),
)
def test_nonDefautFields(self):
"""
L{dns._compactRepr} displays field values if they differ from their
defaults.
"""
m = self.messageFactory(field1=10, field2=20)
self.assertEqual(
'<Foo '
'field1=10 '
'field2=20 '
"alwaysShowField='AS' "
'flags=flagTrue'
'>',
repr(m),
)
def test_nonDefaultSections(self):
"""
L{dns._compactRepr} displays sections which differ from their defaults.
"""
m = self.messageFactory()
m.section1 = [1, 1, 1]
m.section2 = [2, 2, 2]
self.assertEqual(
'<Foo '
"alwaysShowField='AS' "
'flags=flagTrue '
'section1=[1, 1, 1] '
'section2=[2, 2, 2]'
'>',
repr(m),
)
|
en
| 0.701337
|
# test-case-name: twisted.names.test.test_dns # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. Tests for twisted.names.dns. Tests for L{dns._ord2bytes}. L{dns._ord2byte} accepts an integer and returns a byte string of length one with an ordinal value equal to the given integer. Tests for L{dns.str2name}. When passed a non-string object, L{dns.str2name} returns it unmodified. Passed a string giving a number of seconds, L{dns.str2time} returns the number of seconds represented. For example, C{"10S"} represents C{10} seconds. Like C{test_seconds}, but for the C{"M"} suffix which multiplies the time value by C{60} (the number of seconds in a minute!). Like C{test_seconds}, but for the C{"H"} suffix which multiplies the time value by C{3600}, the number of seconds in an hour. Like L{test_seconds}, but for the C{"D"} suffix which multiplies the time value by C{86400}, the number of seconds in a day. Like L{test_seconds}, but for the C{"W"} suffix which multiplies the time value by C{604800}, the number of seconds in a week. Like L{test_seconds}, but for the C{"Y"} suffix which multiplies the time value by C{31536000}, the number of seconds in a year. If a non-integer prefix is given, L{dns.str2time} raises L{ValueError}. Tests for L{Name}, the representation of a single domain name with support for encoding into and decoding from DNS message format. When constructed with a name which is neither C{bytes} nor C{str}, L{Name} raises L{TypeError}. L{dns.Name} automatically encodes unicode domain name using C{idna} encoding. L{Name.decode} populates the L{Name} instance with name information read from the file-like object passed to it. L{Name.encode} encodes its name information and writes it to the file-like object passed to it. If a compression dictionary is passed to it, L{Name.encode} uses offset information from it to encode its name with references to existing labels in the stream instead of including another copy of them in the output. It also updates the compression dictionary with the location of the name it writes to the stream. # Some bytes already encoded into the stream for this message # The position at which the encoded form of this new name will appear in # the stream. A resource record of unknown type and class is parsed into an L{UnknownRecord} instance with its data preserved, and an L{UnknownRecord} instance is serialized to a string equal to the one it was parsed from. # Message ID # answer bit, opCode nibble, auth bit, trunc bit, recursive # bit # recursion bit, empty bit, authenticData bit, # checkingDisabled bit, response code nibble # number of queries # number of answers # number of authorities # number of additionals # query # foo.bar # type=0xdead # cls=0xbeef # 1st answer # foo.bar - compressed # type=0xdead # cls=0xbeef # ttl=257 # some payload data # 1st additional # baz.ban # type=A # cls=IN # ttl=257 # len=4 # 172.16.31.10 If the leading byte of an encoded label (in bytes read from a stream passed to L{Name.decode}) has its two high bits set, the next byte is treated as a pointer to another label in the stream and that label is included in the name being decoded. # Slightly modified version of the example from RFC 1035, section 4.1.4. # Verify we found the first name in the stream and that the stream # position is left at the first byte after the decoded name. # Get the second name from the stream and make the same assertions. # Get the third and final name L{Name.decode} raises L{ValueError} if the stream passed to it includes a compression pointer which forms a loop, causing the name to be undecodable. Encoding and then decoding various objects. # encode the name # decode the name L{dns.Query.encode} returns a byte string representing the fields of the query which can be decoded into a new L{dns.Query} instance using L{dns.Query.decode}. # encode the query # decode the result L{dns.RRHeader.encode} encodes the record header's information and writes it to the file-like object passed to it and L{dns.RRHeader.decode} reads from a file-like object to re-construct a L{dns.RRHeader} instance. # encode the RR # decode the result L{dns.SimpleRecord.encode} encodes the record's name information and writes it to the file-like object passed to it and L{dns.SimpleRecord.decode} reads from a file-like object to re-construct a L{dns.SimpleRecord} instance. Instances of all record types are hashable. Test L{dns.Charstr} encode and decode. # encode the name # decode the name Assert that encoding C{record} and then decoding the resulting bytes creates a record which compares equal to C{record}. The byte stream written by L{dns.Record_SOA.encode} can be used by L{dns.Record_SOA.decode} to reconstruct the state of the original L{dns.Record_SOA} instance. The byte stream written by L{dns.Record_A.encode} can be used by L{dns.Record_A.decode} to reconstruct the state of the original L{dns.Record_A} instance. The byte stream written by L{dns.Record_NULL.encode} can be used by L{dns.Record_NULL.decode} to reconstruct the state of the original L{dns.Record_NULL} instance. The byte stream written by L{dns.Record_WKS.encode} can be used by L{dns.Record_WKS.decode} to reconstruct the state of the original L{dns.Record_WKS} instance. The byte stream written by L{dns.Record_AAAA.encode} can be used by L{dns.Record_AAAA.decode} to reconstruct the state of the original L{dns.Record_AAAA} instance. The byte stream written by L{dns.Record_A6.encode} can be used by L{dns.Record_A6.decode} to reconstruct the state of the original L{dns.Record_A6} instance. The byte stream written by L{dns.Record_SRV.encode} can be used by L{dns.Record_SRV.decode} to reconstruct the state of the original L{dns.Record_SRV} instance. Test L{dns.Record_NAPTR} encode and decode. The byte stream written by L{dns.Record_AFSDB.encode} can be used by L{dns.Record_AFSDB.decode} to reconstruct the state of the original L{dns.Record_AFSDB} instance. The byte stream written by L{dns.Record_RP.encode} can be used by L{dns.Record_RP.decode} to reconstruct the state of the original L{dns.Record_RP} instance. The byte stream written by L{dns.Record_HINFO.encode} can be used by L{dns.Record_HINFO.decode} to reconstruct the state of the original L{dns.Record_HINFO} instance. The byte stream written by L{dns.Record_MINFO.encode} can be used by L{dns.Record_MINFO.decode} to reconstruct the state of the original L{dns.Record_MINFO} instance. The byte stream written by L{dns.Record_MX.encode} can be used by L{dns.Record_MX.decode} to reconstruct the state of the original L{dns.Record_MX} instance. The byte stream written by L{dns.Record_TXT.encode} can be used by L{dns.Record_TXT.decode} to reconstruct the state of the original L{dns.Record_TXT} instance. # ID # # RA, Z, AD=1, CD, RCODE # Query count # Answer count # Authority count # Additional count # ID # # RA, Z, AD, CD=1, RCODE # Query count # Answer count # Authority count # Additional count Tests for L{twisted.names.dns.Message}. L{dns.Message.authenticData} has default value 0. L{dns.Message.__init__} accepts a C{authenticData} argument which is assigned to L{dns.Message.authenticData}. L{dns.Message.toStr} encodes L{dns.Message.authenticData} into byte4 of the byte string. L{dns.Message.fromStr} decodes byte4 and assigns bit3 to L{dns.Message.authenticData}. L{dns.Message.checkingDisabled} has default value 0. L{dns.Message.__init__} accepts a C{checkingDisabled} argument which is assigned to L{dns.Message.checkingDisabled}. L{dns.Message.toStr} encodes L{dns.Message.checkingDisabled} into byte4 of the byte string. L{dns.Message.fromStr} decodes byte4 and assigns bit4 to L{dns.Message.checkingDisabled}. L{dns.Message.__repr__} omits field values and sections which are identical to their defaults. The id field value is always shown. L{dns.Message.__repr__} displays flags if they are L{True}. L{dns.Message.__repr__} displays field values if they differ from their defaults. L{dns.Message.__repr__} displays sections which differ from their defaults. Test that a message which has been truncated causes an EOFError to be raised when it is parsed. Test that bytes representing an empty query message can be decoded as such. # Message ID # answer bit, opCode nibble, auth bit, trunc bit, recursive bit # recursion bit, empty bit, authenticData bit, # checkingDisabled bit, response code nibble # number of queries # number of answers # number of authorities # number of additionals A I{NULL} record with an arbitrary payload can be encoded and decoded as part of a L{dns.Message}. L{Message.lookupRecordType} returns C{dns.UnknownRecord} if it is called with an integer which doesn't correspond to any known record type. # 65280 is the first value in the range reserved for private # use, so it shouldn't ever conflict with an officially # allocated value. The L{RRHeader} instances created by L{Message} from a non-authoritative message are marked as not authoritative. # Message ID # answer bit, opCode nibble, auth bit, trunc bit, recursive bit # recursion bit, empty bit, authenticData bit, # checkingDisabled bit, response code nibble # number of queries # number of answers # number of authorities # number of additionals The L{RRHeader} instances created by L{Message} from an authoritative message are marked as authoritative. # Message ID # answer bit, opCode nibble, auth bit, trunc bit, recursive bit # recursion bit, empty bit, authenticData bit, # checkingDisabled bit, response code nibble # number of queries # number of answers # number of authorities # number of additionals Tests for the rich comparison of L{dns.Message} instances. Create a L{dns.Message}. The L{dns.Message} constructor doesn't accept C{queries}, C{answers}, C{authority}, C{additional} arguments, so we extract them from the kwargs supplied to this factory function and assign them to the message. @param args: Positional arguments. @param kwargs: Keyword arguments. @return: A L{dns.Message} instance. Two L{dns.Message} instances compare equal if they have the same id value. Two L{dns.Message} instances compare equal if they have the same answer flag. Two L{dns.Message} instances compare equal if they have the same opCode value. Two L{dns.Message} instances compare equal if they have the same recDes flag. Two L{dns.Message} instances compare equal if they have the same recAv flag. Two L{dns.Message} instances compare equal if they have the same auth flag. Two L{dns.Message} instances compare equal if they have the same rCode value. Two L{dns.Message} instances compare equal if they have the same trunc flag. Two L{dns.Message} instances compare equal if they have the same maxSize value. Two L{dns.Message} instances compare equal if they have the same authenticData flag. Two L{dns.Message} instances compare equal if they have the same checkingDisabled flag. Two L{dns.Message} instances compare equal if they have the same queries. Two L{dns.Message} instances compare equal if they have the same answers. Two L{dns.Message} instances compare equal if they have the same authority records. Two L{dns.Message} instances compare equal if they have the same additional records. Pretend to be a DNS query processor for a DNSDatagramProtocol. @ivar messages: the list of received messages. @type messages: C{list} of (msg, protocol, address) Initialize the controller: create a list of messages. Save the message so that it can be checked during the tests. Test various aspects of L{dns.DNSDatagramProtocol}. Create a L{dns.DNSDatagramProtocol} with a deterministic clock. Test that when a short datagram is received, datagramReceived does not raise an exception while processing it. Test content received after a query. Test that query timeouts after some seconds. Exceptions raised by the transport's write method should be turned into C{Failure}s passed to errbacks of the C{Deferred} returned by L{DNSDatagramProtocol.query}. Exception L{CannotListenError} raised by C{listenUDP} should be turned into a C{Failure} passed to errback of the C{Deferred} returned by L{DNSDatagramProtocol.query}. # Clean up transport so that the protocol calls startListening again When receiving a message whose id is not in L{DNSDatagramProtocol.liveMessages} or L{DNSDatagramProtocol.resends}, the message will be received by L{DNSDatagramProtocol.controller}. Pretend to be a DNS query processor for a DNSProtocol. @ivar connections: A list of L{DNSProtocol} instances which have notified this controller that they are connected and have not yet notified it that their connection has been lost. Test various aspects of L{dns.DNSProtocol}. Create a L{dns.DNSProtocol} with a deterministic clock. L{dns.DNSProtocol} calls its controller's C{connectionMade} method with itself when it is connected to a transport and its controller's C{connectionLost} method when it is disconnected. Test that query timeouts after some seconds. Test content received after a query. Exceptions raised by the transport's write method should be turned into C{Failure}s passed to errbacks of the C{Deferred} returned by L{DNSProtocol.query}. When receiving a message whose id is not in L{DNSProtocol.liveMessages} the message will be received by L{DNSProtocol.controller}. Tests for the C{__repr__} implementation of record classes. The repr of a L{dns.Record_NS} instance includes the name of the nameserver and the TTL of the record. The repr of a L{dns.Record_MD} instance includes the name of the mail destination and the TTL of the record. The repr of a L{dns.Record_MF} instance includes the name of the mail forwarder and the TTL of the record. The repr of a L{dns.Record_CNAME} instance includes the name of the mail forwarder and the TTL of the record. The repr of a L{dns.Record_MB} instance includes the name of the mailbox and the TTL of the record. The repr of a L{dns.Record_MG} instance includes the name of the mail group member and the TTL of the record. The repr of a L{dns.Record_MR} instance includes the name of the mail rename domain and the TTL of the record. The repr of a L{dns.Record_PTR} instance includes the name of the pointer and the TTL of the record. The repr of a L{dns.Record_DNAME} instance includes the name of the non-terminal DNS name redirection and the TTL of the record. The repr of a L{dns.Record_A} instance includes the dotted-quad string representation of the address it is for and the TTL of the record. The repr of a L{dns.Record_SOA} instance includes all of the authority fields. The repr of a L{dns.Record_NULL} instance includes the repr of its payload and the TTL of the record. The repr of a L{dns.Record_WKS} instance includes the dotted-quad string representation of the address it is for, the IP protocol number it is for, and the TTL of the record. The repr of a L{dns.Record_AAAA} instance includes the colon-separated hex string representation of the address it is for and the TTL of the record. The repr of a L{dns.Record_A6} instance includes the colon-separated hex string representation of the address it is for and the TTL of the record. The repr of a L{dns.Record_SRV} instance includes the name and port of the target and the priority, weight, and TTL of the record. The repr of a L{dns.Record_NAPTR} instance includes the order, preference, flags, service, regular expression, replacement, and TTL of the record. The repr of a L{dns.Record_AFSDB} instance includes the subtype, hostname, and TTL of the record. The repr of a L{dns.Record_RP} instance includes the mbox, txt, and TTL fields of the record. The repr of a L{dns.Record_HINFO} instance includes the cpu, os, and TTL fields of the record. The repr of a L{dns.Record_MINFO} instance includes the rmailbx, emailbx, and TTL fields of the record. The repr of a L{dns.Record_MX} instance includes the preference, name, and TTL fields of the record. The repr of a L{dns.Record_TXT} instance includes the data and ttl fields of the record. The repr of a L{dns.Record_SPF} instance includes the data and ttl fields of the record. The repr of a L{dns.UnknownRecord} instance includes the data and ttl fields of the record. Tests for the equality and non-equality behavior of record classes. Two L{dns.Charstr} instances compare equal if and only if they have the same string value. Two L{dns.Name} instances compare equal if and only if they have the same name value. Assert that instances of C{cls} with the same attributes compare equal to each other and instances with different attributes compare as not equal. @param cls: A L{dns.SimpleRecord} subclass. # Vary the TTL # Vary the name Two L{dns.RRHeader} instances compare equal if and only if they have the same name, type, class, time to live, payload, and authoritative bit. # Vary the name # Vary the payload # Vary the type. Leave the payload as None so that we don't have to # provide non-equal values. # Probably not likely to come up. Most people use the internet. # Vary the ttl # Vary the auth bit Two L{dns.Record_NS} instances compare equal if and only if they have the same name and TTL. Two L{dns.Record_MD} instances compare equal if and only if they have the same name and TTL. Two L{dns.Record_MF} instances compare equal if and only if they have the same name and TTL. Two L{dns.Record_CNAME} instances compare equal if and only if they have the same name and TTL. Two L{dns.Record_MB} instances compare equal if and only if they have the same name and TTL. Two L{dns.Record_MG} instances compare equal if and only if they have the same name and TTL. Two L{dns.Record_MR} instances compare equal if and only if they have the same name and TTL. Two L{dns.Record_PTR} instances compare equal if and only if they have the same name and TTL. Two L{dns.Record_MD} instances compare equal if and only if they have the same name and TTL. Two L{dns.Record_A} instances compare equal if and only if they have the same address and TTL. # Vary the TTL # Vary the address Two L{dns.Record_SOA} instances compare equal if and only if they have the same mname, rname, serial, refresh, minimum, expire, retry, and ttl. # Vary the mname # Vary the rname # Vary the serial # Vary the refresh # Vary the minimum # Vary the expire # Vary the retry # Vary the ttl Two L{dns.Record_NULL} instances compare equal if and only if they have the same payload and ttl. # Vary the payload # Vary the ttl Two L{dns.Record_WKS} instances compare equal if and only if they have the same address, protocol, map, and ttl. # Vary the address # Vary the protocol # Vary the map # Vary the ttl Two L{dns.Record_AAAA} instances compare equal if and only if they have the same address and ttl. # Vary the address # Vary the ttl Two L{dns.Record_A6} instances compare equal if and only if they have the same prefix, prefix length, suffix, and ttl. # Note, A6 is crazy, I'm not sure these values are actually legal. # Hopefully that doesn't matter for this test. -exarkun # Vary the prefix length # Vary the suffix # Vary the prefix # Vary the ttl Two L{dns.Record_SRV} instances compare equal if and only if they have the same priority, weight, port, target, and ttl. # Vary the priority # Vary the weight # Vary the port # Vary the target # Vary the ttl Two L{dns.Record_NAPTR} instances compare equal if and only if they have the same order, preference, flags, service, regexp, replacement, and ttl. # Vary the order # Vary the preference # Vary the flags # Vary the service # Vary the regexp # Vary the replacement # Vary the ttl Two L{dns.Record_AFSDB} instances compare equal if and only if they have the same subtype, hostname, and ttl. # Vary the subtype # Vary the hostname # Vary the ttl Two L{Record_RP} instances compare equal if and only if they have the same mbox, txt, and ttl. # Vary the mbox # Vary the txt # Vary the ttl Two L{dns.Record_HINFO} instances compare equal if and only if they have the same cpu, os, and ttl. # Vary the cpu # Vary the os # Vary the ttl Two L{dns.Record_MINFO} instances compare equal if and only if they have the same rmailbx, emailbx, and ttl. # Vary the rmailbx # Vary the emailbx # Vary the ttl Two L{dns.Record_MX} instances compare equal if and only if they have the same preference, name, and ttl. # Vary the preference # Vary the name # Vary the ttl Two L{dns.Record_TXT} instances compare equal if and only if they have the same data and ttl. # Vary the length of the data # Vary the value of the data # Vary the ttl L{dns.Record_SPF} instances compare equal if and only if they have the same data and ttl. # Vary the length of the data # Vary the value of the data # Vary the ttl L{dns.UnknownRecord} instances compare equal if and only if they have the same data and ttl. # Vary the length of the data # Vary the value of the data # Vary the ttl Tests for L{twisted.names.dns.RRHeader}. Attempting to create a L{dns.RRHeader} instance with a negative TTL causes L{ValueError} to be raised. Tests for L{twisted.names.dns._nameToLabels}. L{dns._nameToLabels} returns a list containing a single empty label for an empty name. L{dns._nameToLabels} returns a list containing a single empty label for a name containing only a dot. L{dns._nameToLabels} returns a list ending with an empty label for a name without a trailing dot. L{dns._nameToLabels} returns a list ending with an empty label for a name with a trailing dot. L{dns._nameToLabels} returns a list containing entries for all labels in a subdomain name. L{dns._nameToLabels} preserves the case of ascii characters in labels. Assert that C{descendant} *is* a subdomain of C{ancestor}. @type testCase: L{unittest.SynchronousTestCase} @param testCase: The test case on which to run the assertions. @type descendant: C{str} @param descendant: The subdomain name to test. @type ancestor: C{str} @param ancestor: The superdomain name to test. Assert that C{descendant} *is not* a subdomain of C{ancestor}. @type testCase: L{unittest.SynchronousTestCase} @param testCase: The test case on which to run the assertions. @type descendant: C{str} @param descendant: The subdomain name to test. @type ancestor: C{str} @param ancestor: The superdomain name to test. Tests for L{twisted.names.dns._isSubdomainOf}. L{dns._isSubdomainOf} returns C{True} for identical domain names. L{dns._isSubdomainOf} returns C{True} when the first name is an immediate descendant of the second name. L{dns._isSubdomainOf} returns C{True} when the first name is a distant descendant of the second name. L{dns._isSubdomainOf} returns C{False} when the first name is an ancestor of the second name. L{dns._isSubdomainOf} returns C{False} if the first name is a sibling of the second name. L{dns._isSubdomainOf} returns C{False} even when domain names happen to share a common suffix. L{dns._isSubdomainOf} returns C{True} if the first name is a subdomain of the second name but the first name has a trailing ".". L{dns._isSubdomainOf} returns C{True} if the first name is a subdomain of the second name but the second name has a trailing ".". L{dns._isSubdomainOf} returns C{True} if the first name is a subdomain of the second name and both names have a trailing ".". L{dns._isSubdomainOf} returns C{False} if the first name is empty and the second name is not. L{dns._isSubdomainOf} returns C{True} if the second name is empty and the first name is not. L{dns._isSubdomainOf} does case-insensitive comparison of name labels. Generate byte and instance representations of an L{dns._OPTHeader} where all attributes are set to non-default values. For testing whether attributes have really been read from the byte string during decoding. Return L{bytes} representing an encoded OPT record. @param excludeName: A flag that controls whether to exclude the name field. This allows a non-standard name to be prepended during the test. @type excludeName: L{bool} @param excludeOptions: A flag that controls whether to exclude the RDLEN field. This allows encoded variable options to be appended during the test. @type excludeOptions: L{bool} @return: L{bytes} representing the encoded OPT record returned by L{object}. # RDLEN 0 # 0 root zone # type 41 # udpPayloadsize 512 # extendedRCODE 3 # version 4 # DNSSEC OK 1 + Z Return a new L{dns._OPTHeader} instance. @return: A L{dns._OPTHeader} instance with attributes that match the encoded record returned by L{bytes}. Tests for L{twisted.names.dns._OPTHeader}. L{dns._OPTHeader} implements L{dns.IEncodable}. L{dns._OPTHeader.name} is a instance attribute whose value is fixed as the root domain L{dns._OPTHeader.name} is readonly. L{dns._OPTHeader.type} is an instance attribute with fixed value 41. L{dns._OPTHeader.type} is readonly. L{dns._OPTHeader.udpPayloadSize} defaults to 4096 as recommended in rfc6891 section-6.2.5. L{dns._OPTHeader.udpPayloadSize} can be overridden in the constructor. L{dns._OPTHeader.extendedRCODE} defaults to 0. L{dns._OPTHeader.extendedRCODE} can be overridden in the constructor. L{dns._OPTHeader.version} defaults to 0. L{dns._OPTHeader.version} can be overridden in the constructor. L{dns._OPTHeader.dnssecOK} defaults to False. L{dns._OPTHeader.dnssecOK} can be overridden in the constructor. L{dns._OPTHeader.options} defaults to empty list. L{dns._OPTHeader.options} can be overridden in the constructor. L{dns._OPTHeader.encode} packs the header fields and writes them to a file like object passed in as an argument. L{dns._OPTHeader.options} is a list of L{dns._OPTVariableOption} instances which are packed into the rdata area of the header. # RDLEN 20 # OPTION-CODE # OPTION-LENGTH # OPTION-DATA # OPTION-CODE # OPTION-LENGTH # OPTION-DATA L{dns._OPTHeader.decode} unpacks the header fields from a file like object and populates the attributes of an existing L{dns._OPTHeader} instance. L{dns._OPTHeader.decode} reads all the bytes of the record that is being decoded. # Check that all the input data has been consumed. L{dns._OPTHeader.decode} reads only the bytes from the current file position to the end of the record that is being decoded. Trailing bytes are not consumed. # Trailing bytes L{dns._OPTHeader.decode} discards the name which is encoded in the supplied bytes. The name attribute of the resulting L{dns._OPTHeader} instance will always be L{dns.Name(b'')}. L{dns._OPTHeader.decode} raises an exception if the supplied RDLEN is too short. # RDLEN 5 Too short - should be 6 # OPTION-CODE # OPTION-LENGTH # OPTION-DATA L{dns._OPTHeader.decode} raises an exception if the supplied RDLEN is too long. # RDLEN 7 Too long - should be 6 # OPTION-CODE # OPTION-LENGTH # OPTION-DATA If the OPT bytes contain variable options, L{dns._OPTHeader.decode} will populate a list L{dns._OPTHeader.options} with L{dns._OPTVariableOption} instances. # RDLEN 20 # OPTION-CODE # OPTION-LENGTH # OPTION-DATA # OPTION-CODE # OPTION-LENGTH # OPTION-DATA L{_OPTHeader.fromRRHeader} accepts an L{RRHeader} instance and returns an L{_OPTHeader} instance whose attribute values have been derived from the C{cls}, C{ttl} and C{payload} attributes of the original header. L{dns._OPTHeader.__repr__} displays the name and type and all the fixed and extended header values of the OPT record. Two L{OPTHeader} instances compare equal if they have the same udpPayloadSize. Two L{OPTHeader} instances compare equal if they have the same extendedRCODE. Two L{OPTHeader} instances compare equal if they have the same version. Two L{OPTHeader} instances compare equal if they have the same dnssecOK flags. Two L{OPTHeader} instances compare equal if they have the same options. Tests for L{dns._OPTVariableOption}. L{dns._OPTVariableOption} implements L{dns.IEncodable}. L{dns._OPTVariableOption.__init__} requires code and data arguments which are saved as public instance attributes. L{dns._OPTVariableOption.__repr__} displays the code and data of the option. Two OPTVariableOption instances compare equal if they have the same code and data values. L{dns._OPTVariableOption.encode} encodes the code and data instance attributes to a byte string which also includes the data length. # OPTION-CODE 1 # OPTION-LENGTH 6 # OPTION-DATA L{dns._OPTVariableOption.decode} is a classmethod that decodes a byte string and returns a L{dns._OPTVariableOption} instance. # OPTION-CODE 1 # OPTION-LENGTH 6 # OPTION-DATA An exception which can be raised by fakes to test that the fake is called with expected arguments. Store the positional and keyword arguments as attributes. @param args: The positional args. @param kwargs: The keyword args. Generate byte string and constructor arguments for an empty L{dns._EDNSMessage}. Bytes which are expected when encoding an instance constructed using C{kwargs} and which are expected to result in an identical instance when decoded. @return: The L{bytes} of a wire encoded message. # id: 256 # QR: 1, OPCODE: 2, AA: 0, TC: 0, RD: 1 # RA: 1, Z, RCODE: 15 # number of queries # number of answers # number of authorities # number of additionals Keyword constructor arguments which are expected to result in an instance which returns C{bytes} when encoded. @return: A L{dict} of keyword arguments. An empty response message whose TR bit is set to 1. Bytes which are expected when encoding an instance constructed using C{kwargs} and which are expected to result in an identical instance when decoded. @return: The L{bytes} of a wire encoded message. # ID: 256 # QR: 1, OPCODE: 0, AA: 0, TC: 1, RD: 0 # RA: 0, Z, RCODE: 0 # Number of queries # Number of answers # Number of authorities # Number of additionals Keyword constructor arguments which are expected to result in an instance which returns C{bytes} when encoded. @return: A L{dict} of keyword arguments. A minimal non-authoritative message. Bytes which are expected when encoding an instance constructed using C{kwargs} and which are expected to result in an identical instance when decoded. @return: The L{bytes} of a wire encoded message. # ID 256 # QR: 0, OPCODE: 0, AA: 0, TC: 0, RD: 0 # RA: 0, Z, RCODE: 0 # Query count # Answer count # Authorities count # Additionals count # Answer # RR NAME (root) # RR TYPE 1 (A) # RR CLASS 1 (IN) # RR TTL # RDLENGTH 4 # IPv4 172.16.31.10 Keyword constructor arguments which are expected to result in an instance which returns C{bytes} when encoded. @return: A L{dict} of keyword arguments. A minimal authoritative message. Bytes which are expected when encoding an instance constructed using C{kwargs} and which are expected to result in an identical instance when decoded. @return: The L{bytes} of a wire encoded message. # ID: 256 # QR: 0, OPCODE: 0, AA: 1, TC: 0, RD: 0 # RA: 0, Z, RCODE: 0 # Query count # Answer count # Authorities count # Additionals count # Answer # RR NAME (root) # RR TYPE 1 (A) # RR CLASS 1 (IN) # RR TTL # RDLENGTH 4 # IPv4 172.16.31.10 Keyword constructor arguments which are expected to result in an instance which returns C{bytes} when encoded. @return: A L{dict} of keyword arguments. An example of a fully populated non-edns response message. Contains name compression, answers, authority, and additional records. Bytes which are expected when encoding an instance constructed using C{kwargs} and which are expected to result in an identical instance when decoded. @return: The L{bytes} of a wire encoded message. # ID: 256 # QR: 1, OPCODE: 2, AA: 1, TC: 0, RD: 1 # RA: 1, Z, RCODE: 15 # Query count # Answer count # Authorities count # Additionals count # Query begins at Byte 12 # QNAME # QTYPE 6 (SOA) # QCLASS 1 (IN) # Answers # RR NAME (compression ref b12) # RR TYPE 6 (SOA) # RR CLASS 1 (IN) # RR TTL # RDLENGTH 39 # Mname (ns1.example.com (compression ref b15) # rname (hostmaster.example.com) # Serial # Refresh # Retry # Expire # Minimum # Authority # RR NAME (example.com compression ref b12) # RR TYPE 2 (NS) # RR CLASS 1 (IN) # RR TTL # RDLENGTH # RDATA (ns1.example.com (compression ref b41) # Additional # RR NAME (ns1.example.com compression ref b41) # RR TYPE 1 (A) # RR CLASS 1 (IN) # RR TTL # RDLENGTH # RDATA 5.6.7.8 Keyword constructor arguments which are expected to result in an instance which returns C{bytes} when encoded. @return: A L{dict} of keyword arguments. A minimal EDNS query message. Bytes which are expected when encoding an instance constructed using C{kwargs} and which are expected to result in an identical instance when decoded. @return: The L{bytes} of a wire encoded message. # ID: 0 # QR: 0, OPCODE: 0, AA: 0, TC: 0, RD: 0 # RA: 0, Z, RCODE: 0 # Queries count # Anwers count # Authority count # Additionals count # Queries # QNAME # QTYPE (A) # QCLASS (IN) # Additional OPT record # NAME (.) # TYPE (OPT 41) # UDP Payload Size (4096) # Extended RCODE # EDNS version # DO: False + Z # RDLENGTH Keyword constructor arguments which are expected to result in an instance which returns C{bytes} when encoded. @return: A L{dict} of keyword arguments. An example of a fully populated edns response message. Contains name compression, answers, authority, and additional records. Bytes which are expected when encoding an instance constructed using C{kwargs} and which are expected to result in an identical instance when decoded. @return: The L{bytes} of a wire encoded message. # ID: 256 # QR: 1, OPCODE: 2, AA: 1, TC: 0, RD: 1 # RA: 1, AD: 1, RCODE: 15 # Query count # Answer count # Authorities count # Additionals count # Query begins at Byte 12 # QNAME # QTYPE 6 (SOA) # QCLASS 1 (IN) # Answers # RR NAME (compression ref b12) # RR TYPE 6 (SOA) # RR CLASS 1 (IN) # RR TTL # RDLENGTH 39 # mname (ns1.example.com (compression ref b15) # rname (hostmaster.example.com) # Serial # Refresh # Retry # Expire # Minimum # Authority # RR NAME (example.com compression ref b12) # RR TYPE 2 (NS) # RR CLASS 1 (IN) # RR TTL # RDLENGTH # RDATA (ns1.example.com (compression ref b41) # Additional # RR NAME (ns1.example.com compression ref b41) # RR TYPE 1 (A) # RR CLASS 1 (IN) # RR TTL # RDLENGTH # RDATA 5.6.7.8 # Additional OPT record # NAME (.) # TYPE (OPT 41) # UDP Payload Size (1024) # Extended RCODE # EDNS version # DO: True + Z # RDLENGTH Keyword constructor arguments which are expected to result in an instance which returns C{bytes} when encoded. @return: A L{dict} of keyword arguments. An example of an EDNS message with an extended RCODE. Bytes which are expected when encoding an instance constructed using C{kwargs} and which are expected to result in an identical instance when decoded. @return: The L{bytes} of a wire encoded message. # RA: 0, Z, RCODE: 12 # 1 additionals # Additional OPT record # Extended RCODE: 171 Keyword constructor arguments which are expected to result in an instance which returns C{bytes} when encoded. @return: A L{dict} of keyword arguments. # Combined OPT extended RCODE + Message RCODE A wrapper around L{dns.Message} which is comparable so that it can be tested using some of the L{dns._EDNSMessage} tests. Verify that an attribute has the expected default value and that a corresponding argument passed to a constructor is assigned to that attribute. @param testCase: The L{TestCase} whose assert methods will be called. @type testCase: L{unittest.TestCase} @param cls: The constructor under test. @type cls: L{type} @param argName: The name of the constructor argument under test. @type argName: L{str} @param defaultVal: The expected default value of C{attrName} / C{argName} @type defaultVal: L{object} @param altVal: A value which is different from the default. Used to test that supplied constructor arguments are actually assigned to the correct attribute. @type altVal: L{object} @param attrName: The name of the attribute under test if different from C{argName}. Defaults to C{argName} @type attrName: L{str} Helper methods for verifying default attribute values and corresponding constructor arguments. Wrap L{verifyConstructorArgument} to provide simpler interface for testing Message and _EDNSMessage constructor arguments. @param argName: The name of the constructor argument. @param defaultVal: The expected default value. @param altVal: An alternative value which is expected to be assigned to a correspondingly named attribute. Wrap L{verifyConstructorArgument} to provide simpler interface for testing _EDNSMessage constructor flags. @param argName: The name of the constructor flag argument @param defaultVal: The expected default value of the flag Tests for constructor arguments and their associated attributes that are common to both L{twisted.names.dns._EDNSMessage} and L{dns.Message}. TestCase classes that use this mixin must provide a C{messageFactory} method which accepts any argment supported by L{dns.Message.__init__}. TestCases must also mixin ConstructorTestsMixin which provides some custom assertions for testing constructor arguments. L{dns._EDNSMessage.id} defaults to C{0} and can be overridden in the constructor. L{dns._EDNSMessage.answer} defaults to C{False} and can be overridden in the constructor. L{dns._EDNSMessage.opCode} defaults to L{dns.OP_QUERY} and can be overridden in the constructor. L{dns._EDNSMessage.auth} defaults to C{False} and can be overridden in the constructor. L{dns._EDNSMessage.trunc} defaults to C{False} and can be overridden in the constructor. L{dns._EDNSMessage.recDes} defaults to C{False} and can be overridden in the constructor. L{dns._EDNSMessage.recAv} defaults to C{False} and can be overridden in the constructor. L{dns._EDNSMessage.rCode} defaults to C{0} and can be overridden in the constructor. L{dns._EDNSMessage.maxSize} defaults to C{512} and can be overridden in the constructor. L{dns._EDNSMessage.queries} defaults to C{[]}. L{dns._EDNSMessage.answers} defaults to C{[]}. L{dns._EDNSMessage.authority} defaults to C{[]}. L{dns._EDNSMessage.additional} defaults to C{[]}. Tests for L{twisted.names.dns._EDNSMessage} constructor arguments that are shared with L{dns.Message}. Tests for L{twisted.names.dns.Message} constructor arguments that are shared with L{dns._EDNSMessage}. Tests for L{dns._EDNSMessage}. These tests are for L{dns._EDNSMessage} APIs which are not shared with L{dns.Message}. L{dns._EDNSMessage.ednsVersion} defaults to C{0} and can be overridden in the constructor. L{dns._EDNSMessage.dnssecOK} defaults to C{False} and can be overridden in the constructor. L{dns._EDNSMessage.authenticData} defaults to C{False} and can be overridden in the constructor. L{dns._EDNSMessage.checkingDisabled} defaults to C{False} and can be overridden in the constructor. L{dns._EDNSMessage.queries} can be overridden in the constructor. L{dns._EDNSMessage.answers} can be overridden in the constructor. L{dns._EDNSMessage.authority} can be overridden in the constructor. L{dns._EDNSMessage.authority} can be overridden in the constructor. L{dns._EDNSMessage.__repr__} omits field values and sections which are identical to their defaults. The id field value is always shown. L{dns._EDNSMessage.__repr__} displays flags if they are L{True}. L{dns._EDNSMessage.__repr__} displays field values if they differ from their defaults. L{dns.Message.__repr__} displays sections which differ from their defaults. L{dns._EDNSMessage.fromString} calls L{dns._EDNSMessage._messageFactory} to create a new L{dns.Message} instance which is used to decode the supplied bytes. Fake message factory. Fake fromStr method which raises the arguments it was passed. @param args: positional arguments @param kwargs: keyword arguments L{dns._EDNSMessage.fromString} calls L{dns._EDNSMessage._fromMessage} with a L{dns.Message} instance Fake message factory. A noop fake version of fromStr @param bytes: the bytes to be decoded L{dns._EDNSMessage.toStr} calls L{dns._EDNSMessage._toMessage} L{dns._EDNSMessage.toStr} calls C{toStr} on the message returned by L{dns._EDNSMessage._toMessage}. Fake Message Fake toStr which returns dummyBytes. @return: dummyBytes Tests for equality between L(dns._EDNSMessage} instances. These tests will not work with L{dns.Message} because it does not use L{twisted.python.util.FancyEqMixin}. Two L{dns._EDNSMessage} instances compare equal if they have the same id. Two L{dns._EDNSMessage} instances compare equal if they have the same answer flag. Two L{dns._EDNSMessage} instances compare equal if they have the same opCode. Two L{dns._EDNSMessage} instances compare equal if they have the same auth flag. Two L{dns._EDNSMessage} instances compare equal if they have the same trunc flag. Two L{dns._EDNSMessage} instances compare equal if they have the same recDes flag. Two L{dns._EDNSMessage} instances compare equal if they have the same recAv flag. Two L{dns._EDNSMessage} instances compare equal if they have the same rCode. Two L{dns._EDNSMessage} instances compare equal if they have the same ednsVersion. Two L{dns._EDNSMessage} instances compare equal if they have the same dnssecOK. Two L{dns._EDNSMessage} instances compare equal if they have the same authenticData flags. Two L{dns._EDNSMessage} instances compare equal if they have the same checkingDisabled flags. Two L{dns._EDNSMessage} instances compare equal if they have the same maxSize. Two L{dns._EDNSMessage} instances compare equal if they have the same queries. Two L{dns._EDNSMessage} instances compare equal if they have the same answers. Two L{dns._EDNSMessage} instances compare equal if they have the same authority records. Two L{dns._EDNSMessage} instances compare equal if they have the same additional records. Tests for the encoding and decoding of various standard (not EDNS) messages. These tests should work with both L{dns._EDNSMessage} and L{dns.Message}. TestCase classes that use this mixin must provide a C{messageFactory} method which accepts any argment supported by L{dns._EDNSMessage.__init__}. EDNS specific arguments may be discarded if not supported by the message class under construction. An empty message can be encoded. An empty message byte sequence can be decoded. A fully populated query message can be encoded. A fully populated message byte string can be decoded. A I{NULL} record with an arbitrary payload can be encoded and decoded as part of a message. If the message C{authoritative} attribute is set to 0, the encoded bytes will have AA bit 0. The L{dns.RRHeader} instances created by a message from a non-authoritative message byte string are marked as not authoritative. If the message C{authoritative} attribute is set to 1, the encoded bytes will have AA bit 1. The message and its L{dns.RRHeader} instances created by C{decode} from an authoritative message byte string, are marked as authoritative. If the message C{trunc} attribute is set to 1 the encoded bytes will have TR bit 1. The message instance created by decoding a truncated message is marked as truncated. Tests for the encoding and decoding of various standard (non-EDNS) messages by L{dns._EDNSMessage}. Tests for the encoding and decoding of various standard (non-EDNS) messages by L{dns.Message}. This function adapts constructor arguments expected by _EDNSMessage.__init__ to arguments suitable for use with the Message.__init__. Also handles the fact that unlike L{dns._EDNSMessage}, L{dns.Message.__init__} does not accept queries, answers etc as arguments. Also removes any L{dns._EDNSMessage} specific arguments. @param args: The positional arguments which will be passed to L{dns.Message.__init__}. @param kwargs: The keyword arguments which will be stripped of EDNS specific arguments before being passed to L{dns.Message.__init__}. @return: An L{dns.Message} instance. Tests for the encoding and decoding of various EDNS messages. These test will not work with L{dns.Message}. The L(_EDNSMessage} instance created by L{dns._EDNSMessage.decode} from an EDNS query never includes OPT records in the additional section. An L(_EDNSMessage} instance created from a byte string containing multiple I{OPT} records will discard all the C{OPT} records. C{ednsVersion} will be set to C{None}. @see: U{https://tools.ietf.org/html/rfc6891#section-6.1.1} L{dns._EDNSMessage._fromMessage} returns an L{_EDNSMessage} instance whose queries, answers, authority and additional lists are copies (not references to) the original message lists. L{dns._EDNSMessage.toStr} makes no in place changes to the message instance. L{dns._EDNSMessage} can decode OPT records, regardless of their position in the additional records section. "The OPT RR MAY be placed anywhere within the additional data section." @see: U{https://tools.ietf.org/html/rfc6891#section-6.1.1} # XXX: We need an _OPTHeader.toRRHeader method. See #6779. The L(_EDNSMessage} instance created by L{dns._EDNSMessage.fromStr} derives its edns specific values (C{ednsVersion}, etc) from the supplied OPT record. The L(_EDNSMessage} instance created by L{dns._EDNSMessage.toStr} encodes its edns specific values (C{ednsVersion}, etc) into an OPT record added to the additional section. The L(_EDNSMessage.toStr} encodes the extended I{RCODE} (>=16) by assigning the lower 4bits to the message RCODE field and the upper 4bits to the OPT pseudo record. The L(_EDNSMessage} instance created by L{dns._EDNSMessage.fromStr} derives RCODE from the supplied OPT record. Note that EXTENDED-RCODE value 0 indicates that an unextended RCODE is in use (values 0 through 15). https://tools.ietf.org/html/rfc6891#section-6.1.3 Tests for L{dns._responseFromMessage}. L{dns.Message._responseFromMessage} is a constructor function which generates a new I{answer} message from an existing L{dns.Message} like instance. L{dns._responseFromMessage} returns a new instance of C{cls} L{dns._responseFromMessage} copies the C{id} attribute of the original message. L{dns._responseFromMessage} sets the C{answer} flag to L{True} L{dns._responseFromMessage} copies the C{queries} attribute of the original message. L{dns._responseFromMessage} accepts other C{kwargs} which are assigned to the new message before it is returned. An example class for use in L{dns._compactRepr} tests. It follows the pattern of initialiser settable flags, fields and sections found in L{dns.Message} and L{dns._EDNSMessage}. Set some flags, fields and sections as public attributes. Call L{dns._compactRepr} to generate a string representation. Tests for L[dns._compactRepr}. L{dns._compactRepr} omits field values and sections which have the default value. Flags which are True are always shown. L{dns._compactRepr} displays flags if they have a non-default value. L{dns._compactRepr} displays field values if they differ from their defaults. L{dns._compactRepr} displays sections which differ from their defaults.
| 2.593453
| 3
|
launcher.py
|
MrForg3t/sourcecodetrm
| 0
|
6628365
|
from platform import system
from os import system as cmd
from os import path
from time import sleep
def launcherMain():
try:
if system() == "Windows":
if path.exists("checkfileint.exe"):
cmd("checkfileint.exe")
if path.exists("uuid_gen.exe"):
cmd("uuid_gen.exe")
if path.exists("main.exe"):
cmd("main.exe")
else:
print("Could not find main.exe in the PATH environment.")
else:
print("Could not find checkfileint.exe in the PATH environment.")
elif system() == "Darwin":
print("Not supported on this platform for now.")
elif system() == "Linux":
print("Not supported on this platform for now.")
else:
print("We cannnot find your operating system")
except Exception as error:
print(f"Error: {error}")
if __name__ == '__main__': launcherMain()
sleep(3)
|
from platform import system
from os import system as cmd
from os import path
from time import sleep
def launcherMain():
try:
if system() == "Windows":
if path.exists("checkfileint.exe"):
cmd("checkfileint.exe")
if path.exists("uuid_gen.exe"):
cmd("uuid_gen.exe")
if path.exists("main.exe"):
cmd("main.exe")
else:
print("Could not find main.exe in the PATH environment.")
else:
print("Could not find checkfileint.exe in the PATH environment.")
elif system() == "Darwin":
print("Not supported on this platform for now.")
elif system() == "Linux":
print("Not supported on this platform for now.")
else:
print("We cannnot find your operating system")
except Exception as error:
print(f"Error: {error}")
if __name__ == '__main__': launcherMain()
sleep(3)
|
none
| 1
| 3.046436
| 3
|
|
county_avg_sat.py
|
Statistica/pennsylvania-education
| 0
|
6628366
|
# Written by <NAME>, released April 11th, 2016 for Statisti.ca
# Released under the MIT License (https://opensource.org/licenses/MIT)
from __future__ import division
import csv, requests, re, collections, plotly.plotly as plotly, plotly.graph_objs as go
from plotly.graph_objs import Scatter, Layout
schools=[]
with open('pa_schools.csv', 'r') as f: #add all of the schools to the 'schools' list
#pa_schools.csv from: http://www.edna.ed.state.pa.us/Screens/Extracts/wfExtractPublicSchools.aspx
reader=csv.reader(f)
next(reader) #skip header row
for row in reader:
try:
schools.append({'aun': int(row[0]), 'county': row[5]}) #row[0] is the aun, row[5] is the county name
except ValueError:
pass
schools_sats=[]
with open('pa_sat_scores.csv', 'r') as f: #add each high school's sat score
#pa_sat_scores.csv from: http://www.education.pa.gov/K-12/Assessment%20and%20Accountability/Pages/SAT-and-ACT.aspx (Public School SAT Scores 2015)
reader=csv.reader(f)
for i in range(8): #skip header rows
next(reader)
for row in reader:
try:
schools_sats.append({'aun': int(row[0]), 'score': int(row[8])}) #add each school's AUN (Administrative Unit Number) and score
except ValueError:
pass
for school in schools_sats: #loop through every school's aun and score
for s in schools: #loop through every school
if s['aun']==school['aun']: #match the school's aun and the aun of the sat score list
school.update({'county': s['county']}) #add the school's county
del school['aun'] #remove the aun from the school
grouped=collections.defaultdict(list) #created a defaultdict
for county in schools_sats:
grouped[county['county']].append(county['score']) #append the scores to counties in the defaultdict
county_avg_scores=[]
for county, scores in grouped.iteritems(): #get the average scores for each county
county_avg_scores.append({'county': county, 'avg_sat': sum(scores)/len(scores)})
#get each county's per capita income
for county_avg_score in county_avg_scores: #loop through every county's average sat scores
with open('pa_avg_income.csv', 'r') as f: #pa_avg_income.csv from: https://en.wikipedia.org/wiki/List_of_Pennsylvania_counties_by_per_capita_income#Pennsylvania_counties_ranked_by_per_capita_income (from US Census Bureau)
reader=csv.reader(f)
for row in reader: #loop through every county average income
if county_avg_score['county']==row[1]: #row[1] is the county name
per_capita_income=int(row[2].replace("$", "").replace(",", "")) #format money (e.g. "$41,251"->41251)
county_avg_score.update({'per_capita_income': per_capita_income})
break #if we already found the county's income, no need to keep looping
sats=[]
incomes=[]
names=[]
f=open('counties_avg_sat.csv', 'w')
w=csv.writer(f)
w.writerow(["county", "average sat score", "per capita income"])
for c in county_avg_scores:
sats.append(c['avg_sat'])
incomes.append(c['per_capita_income'])
names.append(c['county'])
w.writerow([c['county'], c['avg_sat'], c['per_capita_income']])
f.close()
trace=go.Scatter(
x=incomes,
y=sats,
text=names,
mode='markers'
)
data=[trace]
fig=go.Figure(data=data)
plotly.plot(fig) #plot the scatter plot!
|
# Written by <NAME>, released April 11th, 2016 for Statisti.ca
# Released under the MIT License (https://opensource.org/licenses/MIT)
from __future__ import division
import csv, requests, re, collections, plotly.plotly as plotly, plotly.graph_objs as go
from plotly.graph_objs import Scatter, Layout
schools=[]
with open('pa_schools.csv', 'r') as f: #add all of the schools to the 'schools' list
#pa_schools.csv from: http://www.edna.ed.state.pa.us/Screens/Extracts/wfExtractPublicSchools.aspx
reader=csv.reader(f)
next(reader) #skip header row
for row in reader:
try:
schools.append({'aun': int(row[0]), 'county': row[5]}) #row[0] is the aun, row[5] is the county name
except ValueError:
pass
schools_sats=[]
with open('pa_sat_scores.csv', 'r') as f: #add each high school's sat score
#pa_sat_scores.csv from: http://www.education.pa.gov/K-12/Assessment%20and%20Accountability/Pages/SAT-and-ACT.aspx (Public School SAT Scores 2015)
reader=csv.reader(f)
for i in range(8): #skip header rows
next(reader)
for row in reader:
try:
schools_sats.append({'aun': int(row[0]), 'score': int(row[8])}) #add each school's AUN (Administrative Unit Number) and score
except ValueError:
pass
for school in schools_sats: #loop through every school's aun and score
for s in schools: #loop through every school
if s['aun']==school['aun']: #match the school's aun and the aun of the sat score list
school.update({'county': s['county']}) #add the school's county
del school['aun'] #remove the aun from the school
grouped=collections.defaultdict(list) #created a defaultdict
for county in schools_sats:
grouped[county['county']].append(county['score']) #append the scores to counties in the defaultdict
county_avg_scores=[]
for county, scores in grouped.iteritems(): #get the average scores for each county
county_avg_scores.append({'county': county, 'avg_sat': sum(scores)/len(scores)})
#get each county's per capita income
for county_avg_score in county_avg_scores: #loop through every county's average sat scores
with open('pa_avg_income.csv', 'r') as f: #pa_avg_income.csv from: https://en.wikipedia.org/wiki/List_of_Pennsylvania_counties_by_per_capita_income#Pennsylvania_counties_ranked_by_per_capita_income (from US Census Bureau)
reader=csv.reader(f)
for row in reader: #loop through every county average income
if county_avg_score['county']==row[1]: #row[1] is the county name
per_capita_income=int(row[2].replace("$", "").replace(",", "")) #format money (e.g. "$41,251"->41251)
county_avg_score.update({'per_capita_income': per_capita_income})
break #if we already found the county's income, no need to keep looping
sats=[]
incomes=[]
names=[]
f=open('counties_avg_sat.csv', 'w')
w=csv.writer(f)
w.writerow(["county", "average sat score", "per capita income"])
for c in county_avg_scores:
sats.append(c['avg_sat'])
incomes.append(c['per_capita_income'])
names.append(c['county'])
w.writerow([c['county'], c['avg_sat'], c['per_capita_income']])
f.close()
trace=go.Scatter(
x=incomes,
y=sats,
text=names,
mode='markers'
)
data=[trace]
fig=go.Figure(data=data)
plotly.plot(fig) #plot the scatter plot!
|
en
| 0.850772
|
# Written by <NAME>, released April 11th, 2016 for Statisti.ca # Released under the MIT License (https://opensource.org/licenses/MIT) #add all of the schools to the 'schools' list #pa_schools.csv from: http://www.edna.ed.state.pa.us/Screens/Extracts/wfExtractPublicSchools.aspx #skip header row #row[0] is the aun, row[5] is the county name #add each high school's sat score #pa_sat_scores.csv from: http://www.education.pa.gov/K-12/Assessment%20and%20Accountability/Pages/SAT-and-ACT.aspx (Public School SAT Scores 2015) #skip header rows #add each school's AUN (Administrative Unit Number) and score #loop through every school's aun and score #loop through every school #match the school's aun and the aun of the sat score list #add the school's county #remove the aun from the school #created a defaultdict #append the scores to counties in the defaultdict #get the average scores for each county #get each county's per capita income #loop through every county's average sat scores #pa_avg_income.csv from: https://en.wikipedia.org/wiki/List_of_Pennsylvania_counties_by_per_capita_income#Pennsylvania_counties_ranked_by_per_capita_income (from US Census Bureau) #loop through every county average income #row[1] is the county name #format money (e.g. "$41,251"->41251) #if we already found the county's income, no need to keep looping #plot the scatter plot!
| 2.860035
| 3
|
pypika/tests/test_formats.py
|
YiuRULE/pypika
| 1,616
|
6628367
|
<reponame>YiuRULE/pypika
import unittest
from pypika import Query, Tables, functions as fn
class QuoteTests(unittest.TestCase):
maxDiff = None
table_abc, table_efg = Tables("abc", "efg")
def setUp(self):
subquery1 = (
Query.from_(self.table_abc)
.select(
self.table_abc.foo,
fn.Sum(self.table_abc.fizz + self.table_abc.buzz).as_("fizzbuzz"),
)
.groupby(self.table_abc.foo)
)
subquery2 = Query.from_(self.table_efg).select(
self.table_efg.foo.as_("foo_two"),
self.table_efg.bar,
)
self.query = (
Query.from_(subquery1)
.select(subquery1.foo, subquery1.fizzbuzz)
.join(subquery2)
.on(subquery1.foo == subquery2.foo_two)
.select(subquery2.foo_two, subquery2.bar)
)
def test_replace_quote_char_in_complex_query(self):
self.assertEqual(
"SELECT "
"`sq0`.`foo`,`sq0`.`fizzbuzz`,"
"`sq1`.`foo_two`,`sq1`.`bar` "
"FROM ("
"SELECT "
"`foo`,SUM(`fizz`+`buzz`) `fizzbuzz` "
"FROM `abc` "
"GROUP BY `foo`"
") `sq0` JOIN ("
"SELECT "
"`foo` `foo_two`,`bar` "
"FROM `efg`"
") `sq1` ON `sq0`.`foo`=`sq1`.`foo_two`",
self.query.get_sql(quote_char="`"),
)
def test_no_quote_char_in_complex_query(self):
self.assertEqual(
"SELECT "
"sq0.foo,sq0.fizzbuzz,"
"sq1.foo_two,sq1.bar "
"FROM ("
"SELECT "
"foo,SUM(fizz+buzz) fizzbuzz "
"FROM abc "
"GROUP BY foo"
") sq0 JOIN ("
"SELECT "
"foo foo_two,bar "
"FROM efg"
") sq1 ON sq0.foo=sq1.foo_two",
self.query.get_sql(quote_char=None),
)
|
import unittest
from pypika import Query, Tables, functions as fn
class QuoteTests(unittest.TestCase):
maxDiff = None
table_abc, table_efg = Tables("abc", "efg")
def setUp(self):
subquery1 = (
Query.from_(self.table_abc)
.select(
self.table_abc.foo,
fn.Sum(self.table_abc.fizz + self.table_abc.buzz).as_("fizzbuzz"),
)
.groupby(self.table_abc.foo)
)
subquery2 = Query.from_(self.table_efg).select(
self.table_efg.foo.as_("foo_two"),
self.table_efg.bar,
)
self.query = (
Query.from_(subquery1)
.select(subquery1.foo, subquery1.fizzbuzz)
.join(subquery2)
.on(subquery1.foo == subquery2.foo_two)
.select(subquery2.foo_two, subquery2.bar)
)
def test_replace_quote_char_in_complex_query(self):
self.assertEqual(
"SELECT "
"`sq0`.`foo`,`sq0`.`fizzbuzz`,"
"`sq1`.`foo_two`,`sq1`.`bar` "
"FROM ("
"SELECT "
"`foo`,SUM(`fizz`+`buzz`) `fizzbuzz` "
"FROM `abc` "
"GROUP BY `foo`"
") `sq0` JOIN ("
"SELECT "
"`foo` `foo_two`,`bar` "
"FROM `efg`"
") `sq1` ON `sq0`.`foo`=`sq1`.`foo_two`",
self.query.get_sql(quote_char="`"),
)
def test_no_quote_char_in_complex_query(self):
self.assertEqual(
"SELECT "
"sq0.foo,sq0.fizzbuzz,"
"sq1.foo_two,sq1.bar "
"FROM ("
"SELECT "
"foo,SUM(fizz+buzz) fizzbuzz "
"FROM abc "
"GROUP BY foo"
") sq0 JOIN ("
"SELECT "
"foo foo_two,bar "
"FROM efg"
") sq1 ON sq0.foo=sq1.foo_two",
self.query.get_sql(quote_char=None),
)
|
none
| 1
| 2.758606
| 3
|
|
djangobb_forum/tests/test_templatetags.py
|
dwminer/s2forums
| 2
|
6628368
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.contrib.auth.models import User
from djangobb_forum.models import Post
from djangobb_forum.templatetags.forum_extras import profile_link, link, mobile_link
class TestLinkTags(TestCase):
fixtures = ['test_forum.json']
def setUp(self):
self.user = User.objects.get(pk=1)
self.post = Post.objects.get(pk=1)
def test_profile_link(self):
plink = profile_link(self.user)
self.assertEqual(plink, u"<a href=\"/forum/user/djangobb/\">djangobb</a>")
def test_link(self):
l = link(self.post)
self.assertEqual(l, "<a href=\"/forum/post/1/\">Test Body</a>")
def test_mobile_link(self):
l = mobile_link(self.post)
self.assertEqual(l, "<a href=\"/forum/post/1/lofi/\">Test Body</a>")
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.contrib.auth.models import User
from djangobb_forum.models import Post
from djangobb_forum.templatetags.forum_extras import profile_link, link, mobile_link
class TestLinkTags(TestCase):
fixtures = ['test_forum.json']
def setUp(self):
self.user = User.objects.get(pk=1)
self.post = Post.objects.get(pk=1)
def test_profile_link(self):
plink = profile_link(self.user)
self.assertEqual(plink, u"<a href=\"/forum/user/djangobb/\">djangobb</a>")
def test_link(self):
l = link(self.post)
self.assertEqual(l, "<a href=\"/forum/post/1/\">Test Body</a>")
def test_mobile_link(self):
l = mobile_link(self.post)
self.assertEqual(l, "<a href=\"/forum/post/1/lofi/\">Test Body</a>")
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 2.444294
| 2
|
task_5/scripts/spawning_test.py
|
Shobuj-Paul/Strawberry-Stacker
| 0
|
6628369
|
#!/usr/bin/env python3
import rospy
import rospkg
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import SetModelState
from std_msgs.msg import UInt8
import pandas as pd
box_count_in_row = [0]*15
max_box_per_row = 10
rand = [-0.15, 0.44, 0.04, -0.84, -0.66, -0.1, 0.04, 0.46, -0.54, 0.19, 0.64, 0.32, -0.14, 0.22, -0.11, -0.84, 0.35, 0.46, -0.4, 0.81, 0.57, -0.86, 0.08, -0.92, -0.38, 0.9, -0.53, -0.85, -0.84, 0.3]
boxes_spawned = 0
total_blue_count = 0
total_red_count = 0
data = None
state_msg = ModelState()
def spawn_box(row, box_number, color):
global boxes_spawned, total_blue_count, total_red_count, state_msg
if color == 'blue':
state_msg.model_name = 'box_'+str(20+total_blue_count)
total_blue_count += 1
elif color == 'red':
state_msg.model_name = 'box_'+str(total_red_count)
total_red_count += 1
state_msg.pose.position.x = 2 + (box_number-1)*7 + rand[boxes_spawned]
state_msg.pose.position.y = 1 + (row-1)*4
state_msg.pose.position.z = 0.053
rospy.wait_for_service('/gazebo/set_model_state')
try:
set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
set_state(state_msg)
except rospy.ServiceException as e:
print(str(e))
boxes_spawned += 1
info_pub.publish(row)
def check_spawn(event):
global data, box_count_in_row, timer
if data:
if (event.current_real.secs >= data[0][0]):
box_count_in_row[data[0][1]] += 1
if (box_count_in_row[data[0][1]] < max_box_per_row):
spawn_box(data[0][1], box_count_in_row[data[0][1]], data[0][2])
del data[0]
else:
print("Box count for row exceeded")
else:
timer.shutdown()
rospy.signal_shutdown("All boxes in config spawned, shuttinng down node")
if __name__ == '__main__':
rospy.init_node('spawn_boxes')
info_pub = rospy.Publisher('/spawn_info', UInt8, queue_size=1)
rp = rospkg.RosPack()
pkg_path = rp.get_path('task_5')
csv_data = pd.read_csv(pkg_path+'/scripts/config.csv')
csv_data = csv_data.sort_values(by=['time'])
data = csv_data.values.tolist()
print(pkg_path+'/scripts/config.csv')
timer = rospy.Timer(rospy.Duration(0.2), check_spawn)
while not rospy.is_shutdown():
rospy.spin()
|
#!/usr/bin/env python3
import rospy
import rospkg
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import SetModelState
from std_msgs.msg import UInt8
import pandas as pd
box_count_in_row = [0]*15
max_box_per_row = 10
rand = [-0.15, 0.44, 0.04, -0.84, -0.66, -0.1, 0.04, 0.46, -0.54, 0.19, 0.64, 0.32, -0.14, 0.22, -0.11, -0.84, 0.35, 0.46, -0.4, 0.81, 0.57, -0.86, 0.08, -0.92, -0.38, 0.9, -0.53, -0.85, -0.84, 0.3]
boxes_spawned = 0
total_blue_count = 0
total_red_count = 0
data = None
state_msg = ModelState()
def spawn_box(row, box_number, color):
global boxes_spawned, total_blue_count, total_red_count, state_msg
if color == 'blue':
state_msg.model_name = 'box_'+str(20+total_blue_count)
total_blue_count += 1
elif color == 'red':
state_msg.model_name = 'box_'+str(total_red_count)
total_red_count += 1
state_msg.pose.position.x = 2 + (box_number-1)*7 + rand[boxes_spawned]
state_msg.pose.position.y = 1 + (row-1)*4
state_msg.pose.position.z = 0.053
rospy.wait_for_service('/gazebo/set_model_state')
try:
set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
set_state(state_msg)
except rospy.ServiceException as e:
print(str(e))
boxes_spawned += 1
info_pub.publish(row)
def check_spawn(event):
global data, box_count_in_row, timer
if data:
if (event.current_real.secs >= data[0][0]):
box_count_in_row[data[0][1]] += 1
if (box_count_in_row[data[0][1]] < max_box_per_row):
spawn_box(data[0][1], box_count_in_row[data[0][1]], data[0][2])
del data[0]
else:
print("Box count for row exceeded")
else:
timer.shutdown()
rospy.signal_shutdown("All boxes in config spawned, shuttinng down node")
if __name__ == '__main__':
rospy.init_node('spawn_boxes')
info_pub = rospy.Publisher('/spawn_info', UInt8, queue_size=1)
rp = rospkg.RosPack()
pkg_path = rp.get_path('task_5')
csv_data = pd.read_csv(pkg_path+'/scripts/config.csv')
csv_data = csv_data.sort_values(by=['time'])
data = csv_data.values.tolist()
print(pkg_path+'/scripts/config.csv')
timer = rospy.Timer(rospy.Duration(0.2), check_spawn)
while not rospy.is_shutdown():
rospy.spin()
|
fr
| 0.221828
|
#!/usr/bin/env python3
| 2.202159
| 2
|
mammoth_snowplow/launch/include/realsense/rs_launch.py
|
iscumd/Yeti2020
| 1
|
6628370
|
<reponame>iscumd/Yeti2020<filename>mammoth_snowplow/launch/include/realsense/rs_launch.py
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch realsense2_camera node."""
import os
from launch import LaunchDescription
import launch_ros.actions
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration, PythonExpression
from launch.conditions import IfCondition
configurable_parameters = [
{
'name': 'camera_name',
'default': 'camera',
'description': 'camera unique name'
},
{
'name': 'serial_no',
'default': "''",
'description': 'choose device by serial number'
},
{
'name': 'usb_port_id',
'default': "''",
'description': 'choose device by usb port id'
},
{
'name': 'device_type',
'default': "''",
'description': 'choose device by type'
},
{
'name': 'config_file',
'default': "''",
'description': 'yaml config file'
},
{
'name': 'enable_pointcloud',
'default': 'false',
'description': 'enable pointcloud'
},
{
'name': 'unite_imu_method',
'default': "''",
'description': '[copy|linear_interpolation]'
},
{
'name': 'json_file_path',
'default': "''",
'description': 'allows advanced configuration'
},
{
'name': 'log_level',
'default': 'info',
'description': 'debug log level [DEBUG|INFO|WARN|ERROR|FATAL]'
},
{
'name': 'output',
'default': 'screen',
'description': 'pipe node output [screen|log]'
},
{
'name': 'depth_width',
'default': '-1',
'description': 'depth image width'
},
{
'name': 'depth_height',
'default': '-1',
'description': 'depth image height'
},
{
'name': 'enable_depth',
'default': 'true',
'description': 'enable depth stream'
},
{
'name': 'color_width',
'default': '-1',
'description': 'color image width'
},
{
'name': 'color_height',
'default': '-1',
'description': 'color image height'
},
{
'name': 'enable_color',
'default': 'true',
'description': 'enable color stream'
},
{
'name': 'infra_width',
'default': '-1',
'description': 'infra width'
},
{
'name': 'infra_height',
'default': '-1',
'description': 'infra width'
},
{
'name': 'enable_infra1',
'default': 'true',
'description': 'enable infra1 stream'
},
{
'name': 'enable_infra2',
'default': 'true',
'description': 'enable infra2 stream'
},
{
'name': 'infra_rgb',
'default': 'false',
'description': 'enable infra2 stream'
},
{
'name': 'fisheye_width',
'default': '-1',
'description': 'fisheye width'
},
{
'name': 'fisheye_height',
'default': '-1',
'description': 'fisheye width'
},
{
'name': 'enable_fisheye1',
'default': 'true',
'description': 'enable fisheye1 stream'
},
{
'name': 'enable_fisheye2',
'default': 'true',
'description': 'enable fisheye2 stream'
},
{
'name': 'confidence_width',
'default': '-1',
'description': 'depth image width'
},
{
'name': 'confidence_height',
'default': '-1',
'description': 'depth image height'
},
{
'name': 'enable_confidence',
'default': 'true',
'description': 'enable depth stream'
},
{
'name': 'fisheye_fps',
'default': '-1.',
'description': ''
},
{
'name': 'depth_fps',
'default': '-1.',
'description': ''
},
{
'name': 'confidence_fps',
'default': '-1.',
'description': ''
},
{
'name': 'infra_fps',
'default': '-1.',
'description': ''
},
{
'name': 'color_fps',
'default': '-1.',
'description': ''
},
{
'name': 'gyro_fps',
'default': '-1.',
'description': ''
},
{
'name': 'accel_fps',
'default': '-1.',
'description': ''
},
{
'name': 'color_qos',
'default': 'SYSTEM_DEFAULT',
'description': 'QoS profile name'
},
{
'name': 'confidence_qos',
'default': 'SYSTEM_DEFAULT',
'description': 'QoS profile name'
},
{
'name': 'depth_qos',
'default': 'SYSTEM_DEFAULT',
'description': 'QoS profile name'
},
{
'name': 'fisheye_qos',
'default': 'SYSTEM_DEFAULT',
'description': 'QoS profile name'
},
{
'name': 'infra_qos',
'default': 'SYSTEM_DEFAULT',
'description': 'QoS profile name'
},
{
'name': 'pointcloud_qos',
'default': 'SYSTEM_DEFAULT',
'description': 'QoS profile name'
},
{
'name': 'enable_gyro',
'default': 'false',
'description': ''
},
{
'name': 'enable_accel',
'default': 'false',
'description': ''
},
{
'name': 'pointcloud_texture_stream',
'default': 'RS2_STREAM_COLOR',
'description': 'testure stream for pointcloud'
},
{
'name': 'pointcloud_texture_index',
'default': '0',
'description': 'testure stream index for pointcloud'
},
{
'name': 'enable_sync',
'default': 'false',
'description': ''
},
{
'name': 'align_depth',
'default': 'false',
'description': ''
},
{
'name': 'filters',
'default': "''",
'description': ''
},
{
'name': 'clip_distance',
'default': '-2.',
'description': ''
},
{
'name': 'linear_accel_cov',
'default': '0.01',
'description': ''
},
{
'name': 'initial_reset',
'default': 'false',
'description': ''
},
{
'name': 'allow_no_texture_points',
'default': 'false',
'description': ''
},
{
'name': 'ordered_pc',
'default': 'false',
'description': ''
},
{
'name': 'calib_odom_file',
'default': "''",
'description': "''"
},
{
'name': 'topic_odom_in',
'default': "''",
'description': 'topic for T265 wheel odometry'
},
{
'name': 'tf_publish_rate',
'default': '20.0',
'description': 'Rate of publishing static_tf'
},
{
'name': 'diagnostics_period',
'default': '0.0',
'description': 'Rate of publishing diagnostics. 0=Disabled'
},
{
'name': 'rosbag_filename',
'default': "''",
'description': 'A realsense bagfile to run from as a device'
},
{
'name': 'temporal.holes_fill',
'default': '0',
'description': 'Persistency mode'
},
{
'name': 'stereo_module.exposure.1',
'default': '7500',
'description': 'Initial value for hdr_merge filter'
},
{
'name': 'stereo_module.gain.1',
'default': '16',
'description': 'Initial value for hdr_merge filter'
},
{
'name': 'stereo_module.exposure.2',
'default': '1',
'description': 'Initial value for hdr_merge filter'
},
{
'name': 'stereo_module.gain.2',
'default': '16',
'description': 'Initial value for hdr_merge filter'
},
{
'name': 'wait_for_device_timeout',
'default': '-1.',
'description': 'Timeout for waiting for device to connect (Seconds)'
},
{
'name': 'reconnect_timeout',
'default': '6.',
'description':
'Timeout(seconds) between consequtive reconnection attempts'
},
{
'name': 'odom_frame_id',
'default': 'odom',
'description': 'set odom frame'
},
{
'name': 'pose_frame_id',
'default': 'base_footprint',
'description': 'set pose frame'
},
{
'name': 'publish_tf',
'default': 'true',
'description': 'publish tf'
},
]
def declare_configurable_parameters(parameters):
return [
DeclareLaunchArgument(param['name'],
default_value=param['default'],
description=param['description'])
for param in parameters
]
def set_configurable_parameters(parameters):
return dict([(param['name'], LaunchConfiguration(param['name']))
for param in parameters])
def generate_launch_description():
if (os.getenv('ROS_DISTRO') == "dashing") or (os.getenv('ROS_DISTRO')
== "eloquent"):
return LaunchDescription(
declare_configurable_parameters(configurable_parameters) + [
# Realsense
launch_ros.actions.Node(
condition=IfCondition(
PythonExpression(
[LaunchConfiguration('config_file'), " == ''"])),
package='realsense2_camera',
node_namespace=LaunchConfiguration("camera_name"),
node_name=LaunchConfiguration("camera_name"),
node_executable='realsense2_camera_node',
prefix=['stdbuf -o L'],
parameters=[
set_configurable_parameters(configurable_parameters)
],
output='screen',
arguments=[
'--ros-args', '--log-level',
LaunchConfiguration('log_level')
],
),
launch_ros.actions.Node(
condition=IfCondition(
PythonExpression(
[LaunchConfiguration('config_file'), " != ''"])),
package='realsense2_camera',
node_namespace=LaunchConfiguration("camera_name"),
node_name=LaunchConfiguration("camera_name"),
node_executable='realsense2_camera_node',
prefix=['stdbuf -o L'],
parameters=[
set_configurable_parameters(configurable_parameters),
PythonExpression([LaunchConfiguration("config_file")])
],
output='screen',
arguments=[
'--ros-args', '--log-level',
LaunchConfiguration('log_level')
],
),
])
else:
return LaunchDescription(
declare_configurable_parameters(configurable_parameters) + [
# Realsense
launch_ros.actions.Node(
condition=IfCondition(
PythonExpression(
[LaunchConfiguration('config_file'), " == ''"])),
package='realsense2_camera',
namespace=LaunchConfiguration("camera_name"),
name=LaunchConfiguration("camera_name"),
executable='realsense2_camera_node',
parameters=[
set_configurable_parameters(configurable_parameters)
],
remappings=[
('/camera/odom/sample', '/mammoth/odom'),
],
output='screen',
arguments=[
'--ros-args', '--log-level',
LaunchConfiguration('log_level')
],
emulate_tty=True,
),
launch_ros.actions.Node(
condition=IfCondition(
PythonExpression(
[LaunchConfiguration('config_file'), " != ''"])),
package='realsense2_camera',
namespace=LaunchConfiguration("camera_name"),
name=LaunchConfiguration("camera_name"),
executable='realsense2_camera_node',
parameters=[
set_configurable_parameters(configurable_parameters),
PythonExpression([LaunchConfiguration("config_file")])
],
remappings=[
('/camera/odom/sample', '/mammoth/odom'),
],
output='screen',
arguments=[
'--ros-args', '--log-level',
LaunchConfiguration('log_level')
],
emulate_tty=True,
),
])
|
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch realsense2_camera node."""
import os
from launch import LaunchDescription
import launch_ros.actions
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration, PythonExpression
from launch.conditions import IfCondition
configurable_parameters = [
{
'name': 'camera_name',
'default': 'camera',
'description': 'camera unique name'
},
{
'name': 'serial_no',
'default': "''",
'description': 'choose device by serial number'
},
{
'name': 'usb_port_id',
'default': "''",
'description': 'choose device by usb port id'
},
{
'name': 'device_type',
'default': "''",
'description': 'choose device by type'
},
{
'name': 'config_file',
'default': "''",
'description': 'yaml config file'
},
{
'name': 'enable_pointcloud',
'default': 'false',
'description': 'enable pointcloud'
},
{
'name': 'unite_imu_method',
'default': "''",
'description': '[copy|linear_interpolation]'
},
{
'name': 'json_file_path',
'default': "''",
'description': 'allows advanced configuration'
},
{
'name': 'log_level',
'default': 'info',
'description': 'debug log level [DEBUG|INFO|WARN|ERROR|FATAL]'
},
{
'name': 'output',
'default': 'screen',
'description': 'pipe node output [screen|log]'
},
{
'name': 'depth_width',
'default': '-1',
'description': 'depth image width'
},
{
'name': 'depth_height',
'default': '-1',
'description': 'depth image height'
},
{
'name': 'enable_depth',
'default': 'true',
'description': 'enable depth stream'
},
{
'name': 'color_width',
'default': '-1',
'description': 'color image width'
},
{
'name': 'color_height',
'default': '-1',
'description': 'color image height'
},
{
'name': 'enable_color',
'default': 'true',
'description': 'enable color stream'
},
{
'name': 'infra_width',
'default': '-1',
'description': 'infra width'
},
{
'name': 'infra_height',
'default': '-1',
'description': 'infra width'
},
{
'name': 'enable_infra1',
'default': 'true',
'description': 'enable infra1 stream'
},
{
'name': 'enable_infra2',
'default': 'true',
'description': 'enable infra2 stream'
},
{
'name': 'infra_rgb',
'default': 'false',
'description': 'enable infra2 stream'
},
{
'name': 'fisheye_width',
'default': '-1',
'description': 'fisheye width'
},
{
'name': 'fisheye_height',
'default': '-1',
'description': 'fisheye width'
},
{
'name': 'enable_fisheye1',
'default': 'true',
'description': 'enable fisheye1 stream'
},
{
'name': 'enable_fisheye2',
'default': 'true',
'description': 'enable fisheye2 stream'
},
{
'name': 'confidence_width',
'default': '-1',
'description': 'depth image width'
},
{
'name': 'confidence_height',
'default': '-1',
'description': 'depth image height'
},
{
'name': 'enable_confidence',
'default': 'true',
'description': 'enable depth stream'
},
{
'name': 'fisheye_fps',
'default': '-1.',
'description': ''
},
{
'name': 'depth_fps',
'default': '-1.',
'description': ''
},
{
'name': 'confidence_fps',
'default': '-1.',
'description': ''
},
{
'name': 'infra_fps',
'default': '-1.',
'description': ''
},
{
'name': 'color_fps',
'default': '-1.',
'description': ''
},
{
'name': 'gyro_fps',
'default': '-1.',
'description': ''
},
{
'name': 'accel_fps',
'default': '-1.',
'description': ''
},
{
'name': 'color_qos',
'default': 'SYSTEM_DEFAULT',
'description': 'QoS profile name'
},
{
'name': 'confidence_qos',
'default': 'SYSTEM_DEFAULT',
'description': 'QoS profile name'
},
{
'name': 'depth_qos',
'default': 'SYSTEM_DEFAULT',
'description': 'QoS profile name'
},
{
'name': 'fisheye_qos',
'default': 'SYSTEM_DEFAULT',
'description': 'QoS profile name'
},
{
'name': 'infra_qos',
'default': 'SYSTEM_DEFAULT',
'description': 'QoS profile name'
},
{
'name': 'pointcloud_qos',
'default': 'SYSTEM_DEFAULT',
'description': 'QoS profile name'
},
{
'name': 'enable_gyro',
'default': 'false',
'description': ''
},
{
'name': 'enable_accel',
'default': 'false',
'description': ''
},
{
'name': 'pointcloud_texture_stream',
'default': 'RS2_STREAM_COLOR',
'description': 'testure stream for pointcloud'
},
{
'name': 'pointcloud_texture_index',
'default': '0',
'description': 'testure stream index for pointcloud'
},
{
'name': 'enable_sync',
'default': 'false',
'description': ''
},
{
'name': 'align_depth',
'default': 'false',
'description': ''
},
{
'name': 'filters',
'default': "''",
'description': ''
},
{
'name': 'clip_distance',
'default': '-2.',
'description': ''
},
{
'name': 'linear_accel_cov',
'default': '0.01',
'description': ''
},
{
'name': 'initial_reset',
'default': 'false',
'description': ''
},
{
'name': 'allow_no_texture_points',
'default': 'false',
'description': ''
},
{
'name': 'ordered_pc',
'default': 'false',
'description': ''
},
{
'name': 'calib_odom_file',
'default': "''",
'description': "''"
},
{
'name': 'topic_odom_in',
'default': "''",
'description': 'topic for T265 wheel odometry'
},
{
'name': 'tf_publish_rate',
'default': '20.0',
'description': 'Rate of publishing static_tf'
},
{
'name': 'diagnostics_period',
'default': '0.0',
'description': 'Rate of publishing diagnostics. 0=Disabled'
},
{
'name': 'rosbag_filename',
'default': "''",
'description': 'A realsense bagfile to run from as a device'
},
{
'name': 'temporal.holes_fill',
'default': '0',
'description': 'Persistency mode'
},
{
'name': 'stereo_module.exposure.1',
'default': '7500',
'description': 'Initial value for hdr_merge filter'
},
{
'name': 'stereo_module.gain.1',
'default': '16',
'description': 'Initial value for hdr_merge filter'
},
{
'name': 'stereo_module.exposure.2',
'default': '1',
'description': 'Initial value for hdr_merge filter'
},
{
'name': 'stereo_module.gain.2',
'default': '16',
'description': 'Initial value for hdr_merge filter'
},
{
'name': 'wait_for_device_timeout',
'default': '-1.',
'description': 'Timeout for waiting for device to connect (Seconds)'
},
{
'name': 'reconnect_timeout',
'default': '6.',
'description':
'Timeout(seconds) between consequtive reconnection attempts'
},
{
'name': 'odom_frame_id',
'default': 'odom',
'description': 'set odom frame'
},
{
'name': 'pose_frame_id',
'default': 'base_footprint',
'description': 'set pose frame'
},
{
'name': 'publish_tf',
'default': 'true',
'description': 'publish tf'
},
]
def declare_configurable_parameters(parameters):
return [
DeclareLaunchArgument(param['name'],
default_value=param['default'],
description=param['description'])
for param in parameters
]
def set_configurable_parameters(parameters):
return dict([(param['name'], LaunchConfiguration(param['name']))
for param in parameters])
def generate_launch_description():
if (os.getenv('ROS_DISTRO') == "dashing") or (os.getenv('ROS_DISTRO')
== "eloquent"):
return LaunchDescription(
declare_configurable_parameters(configurable_parameters) + [
# Realsense
launch_ros.actions.Node(
condition=IfCondition(
PythonExpression(
[LaunchConfiguration('config_file'), " == ''"])),
package='realsense2_camera',
node_namespace=LaunchConfiguration("camera_name"),
node_name=LaunchConfiguration("camera_name"),
node_executable='realsense2_camera_node',
prefix=['stdbuf -o L'],
parameters=[
set_configurable_parameters(configurable_parameters)
],
output='screen',
arguments=[
'--ros-args', '--log-level',
LaunchConfiguration('log_level')
],
),
launch_ros.actions.Node(
condition=IfCondition(
PythonExpression(
[LaunchConfiguration('config_file'), " != ''"])),
package='realsense2_camera',
node_namespace=LaunchConfiguration("camera_name"),
node_name=LaunchConfiguration("camera_name"),
node_executable='realsense2_camera_node',
prefix=['stdbuf -o L'],
parameters=[
set_configurable_parameters(configurable_parameters),
PythonExpression([LaunchConfiguration("config_file")])
],
output='screen',
arguments=[
'--ros-args', '--log-level',
LaunchConfiguration('log_level')
],
),
])
else:
return LaunchDescription(
declare_configurable_parameters(configurable_parameters) + [
# Realsense
launch_ros.actions.Node(
condition=IfCondition(
PythonExpression(
[LaunchConfiguration('config_file'), " == ''"])),
package='realsense2_camera',
namespace=LaunchConfiguration("camera_name"),
name=LaunchConfiguration("camera_name"),
executable='realsense2_camera_node',
parameters=[
set_configurable_parameters(configurable_parameters)
],
remappings=[
('/camera/odom/sample', '/mammoth/odom'),
],
output='screen',
arguments=[
'--ros-args', '--log-level',
LaunchConfiguration('log_level')
],
emulate_tty=True,
),
launch_ros.actions.Node(
condition=IfCondition(
PythonExpression(
[LaunchConfiguration('config_file'), " != ''"])),
package='realsense2_camera',
namespace=LaunchConfiguration("camera_name"),
name=LaunchConfiguration("camera_name"),
executable='realsense2_camera_node',
parameters=[
set_configurable_parameters(configurable_parameters),
PythonExpression([LaunchConfiguration("config_file")])
],
remappings=[
('/camera/odom/sample', '/mammoth/odom'),
],
output='screen',
arguments=[
'--ros-args', '--log-level',
LaunchConfiguration('log_level')
],
emulate_tty=True,
),
])
|
en
| 0.833147
|
# Copyright (c) 2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Launch realsense2_camera node. # Realsense # Realsense
| 1.713069
| 2
|
lldb/test/API/tools/lldb-vscode/runInTerminal/TestVSCode_runInTerminal.py
|
hanzhan1/llvm
| 305
|
6628371
|
<gh_stars>100-1000
"""
Test lldb-vscode runInTerminal reverse request
"""
import unittest2
import vscode
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import lldbvscode_testcase
import time
import os
class TestVSCode_runInTerminal(lldbvscode_testcase.VSCodeTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@skipIfRemote
def test_runInTerminal(self):
'''
Tests the "runInTerminal" reverse request. It makes sure that the IDE can
launch the inferior with the correct environment variables and arguments.
'''
program = self.getBuildArtifact("a.out")
source = 'main.c'
self.build_and_launch(program, stopOnEntry=True, runInTerminal=True, args=["foobar"], env=["FOO=bar"])
breakpoint_line = line_number(source, '// breakpoint')
self.set_source_breakpoints(source, [breakpoint_line])
self.continue_to_next_stop()
# We verify we actually stopped inside the loop
counter = int(self.vscode.get_local_variable_value('counter'))
self.assertTrue(counter > 0)
# We verify we were able to set the launch arguments
argc = int(self.vscode.get_local_variable_value('argc'))
self.assertEqual(argc, 2)
argv1 = self.vscode.request_evaluate('argv[1]')['body']['result']
self.assertIn('foobar', argv1)
# We verify we were able to set the environment
env = self.vscode.request_evaluate('foo')['body']['result']
self.assertIn('bar', env)
|
"""
Test lldb-vscode runInTerminal reverse request
"""
import unittest2
import vscode
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import lldbvscode_testcase
import time
import os
class TestVSCode_runInTerminal(lldbvscode_testcase.VSCodeTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@skipIfRemote
def test_runInTerminal(self):
'''
Tests the "runInTerminal" reverse request. It makes sure that the IDE can
launch the inferior with the correct environment variables and arguments.
'''
program = self.getBuildArtifact("a.out")
source = 'main.c'
self.build_and_launch(program, stopOnEntry=True, runInTerminal=True, args=["foobar"], env=["FOO=bar"])
breakpoint_line = line_number(source, '// breakpoint')
self.set_source_breakpoints(source, [breakpoint_line])
self.continue_to_next_stop()
# We verify we actually stopped inside the loop
counter = int(self.vscode.get_local_variable_value('counter'))
self.assertTrue(counter > 0)
# We verify we were able to set the launch arguments
argc = int(self.vscode.get_local_variable_value('argc'))
self.assertEqual(argc, 2)
argv1 = self.vscode.request_evaluate('argv[1]')['body']['result']
self.assertIn('foobar', argv1)
# We verify we were able to set the environment
env = self.vscode.request_evaluate('foo')['body']['result']
self.assertIn('bar', env)
|
en
| 0.928397
|
Test lldb-vscode runInTerminal reverse request Tests the "runInTerminal" reverse request. It makes sure that the IDE can launch the inferior with the correct environment variables and arguments. # We verify we actually stopped inside the loop # We verify we were able to set the launch arguments # We verify we were able to set the environment
| 2.695549
| 3
|
ambari-server/src/test/python/TestCheckHost.py
|
vsosrc/ambari
| 0
|
6628372
|
# !/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import os
import socket
from resource_management import Script,ConfigDictionary
from mock.mock import patch
from mock.mock import MagicMock
from unittest import TestCase
check_host = __import__('check_host')
from check_host import CheckHost
class TestCheckHost(TestCase):
@patch("os.path.isfile")
@patch.object(Script, 'get_config')
@patch.object(Script, 'get_tmp_dir')
@patch("resource_management.libraries.script.Script.put_structured_out")
def testJavaHomeAvailableCheck(self, structured_out_mock, get_tmp_dir_mock, mock_config, os_isfile_mock):
# test, java home exists
os_isfile_mock.return_value = True
get_tmp_dir_mock.return_value = "/tmp"
mock_config.return_value = {"commandParams" : {"check_execute_list" : "java_home_check",
"java_home" : "test_java_home"}}
checkHost = CheckHost()
checkHost.actionexecute(None)
self.assertEquals(os_isfile_mock.call_args[0][0], 'test_java_home/bin/java')
self.assertEquals(structured_out_mock.call_args[0][0], {'java_home_check': {'message': 'Java home exists!',
'exit_code': 0}})
# test, java home doesn't exist
os_isfile_mock.reset_mock()
os_isfile_mock.return_value = False
checkHost.actionexecute(None)
self.assertEquals(os_isfile_mock.call_args[0][0], 'test_java_home/bin/java')
self.assertEquals(structured_out_mock.call_args[0][0], {'java_home_check': {"message": "Java home doesn't exist!",
"exit_code" : 1}})
@patch.object(Script, 'get_config')
@patch.object(Script, 'get_tmp_dir')
@patch("check_host.Execute")
@patch("resource_management.libraries.script.Script.put_structured_out")
@patch("subprocess.Popen")
@patch("check_host.format")
@patch("os.path.isfile")
def testDBConnectionCheck(self, isfile_mock, format_mock, popenMock, structured_out_mock, execute_mock, get_tmp_dir_mock, mock_config):
# test, download DBConnectionVerification.jar failed
mock_config.return_value = {"commandParams" : {"check_execute_list" : "db_connection_check",
"java_home" : "test_java_home",
"ambari_server_host" : "test_host",
"jdk_location" : "test_jdk_location",
"db_name" : "mysql",
"db_connection_url" : "test_db_connection_url",
"user_name" : "test_user_name",
"user_passwd" : "<PASSWORD>",
"jdk_name" : "test_jdk_name"}}
get_tmp_dir_mock.return_value = "/tmp"
execute_mock.side_effect = Exception("test exception")
isfile_mock.return_value = True
checkHost = CheckHost()
checkHost.actionexecute(None)
self.assertEquals(structured_out_mock.call_args[0][0], {'db_connection_check': {'message': 'Error downloading ' \
'DBConnectionVerification.jar from Ambari Server resources. Check network access to Ambari ' \
'Server.\ntest exception', 'exit_code': 1}})
self.assertEquals(format_mock.call_args_list[2][0][0], "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf " \
"--retry 5 {jdk_location}{check_db_connection_jar_name} -o {check_db_connection_jar_name}'")
self.assertEquals(format_mock.call_args_list[3][0][0], "[ -f /usr/lib/ambari-agent/{check_db_connection_jar_name}]")
# test, download jdbc driver failed
mock_config.return_value = {"commandParams" : {"check_execute_list" : "db_connection_check",
"java_home" : "test_java_home",
"ambari_server_host" : "test_host",
"jdk_location" : "test_jdk_location",
"db_name" : "oracle",
"db_connection_url" : "test_db_connection_url",
"user_name" : "test_user_name",
"user_passwd" : "<PASSWORD>",
"jdk_name" : "test_jdk_name"}}
format_mock.reset_mock()
execute_mock.reset_mock()
p = MagicMock()
execute_mock.side_effect = [p, Exception("test exception")]
checkHost.actionexecute(None)
self.assertEquals(format_mock.call_args[0][0], 'Error: Ambari Server cannot download the database JDBC driver '
'and is unable to test the database connection. You must run ambari-server setup '
'--jdbc-db={db_name} --jdbc-driver=/path/to/your/{db_name}/driver.jar on the Ambari '
'Server host to make the JDBC driver available for download and to enable testing '
'the database connection.\n')
self.assertEquals(structured_out_mock.call_args[0][0]['db_connection_check']['exit_code'], 1)
self.assertEquals(format_mock.call_args_list[4][0][0], "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf " \
"--retry 5 {jdbc_url} -o {jdbc_name}'")
self.assertEquals(format_mock.call_args_list[5][0][0], "[ -f /usr/lib/ambari-agent/{jdbc_name}]")
# test, no connection to remote db
mock_config.return_value = {"commandParams" : {"check_execute_list" : "db_connection_check",
"java_home" : "test_java_home",
"ambari_server_host" : "test_host",
"jdk_location" : "test_jdk_location",
"db_name" : "postgresql",
"db_connection_url" : "test_db_connection_url",
"user_name" : "test_user_name",
"user_passwd" : "<PASSWORD>",
"jdk_name" : "test_jdk_name"}}
format_mock.reset_mock()
execute_mock.reset_mock()
execute_mock.side_effect = [p, p]
s = MagicMock()
s.communicate.return_value = ("test message", "")
s.returncode = 1
popenMock.return_value = s
checkHost.actionexecute(None)
self.assertEquals(structured_out_mock.call_args[0][0], {'db_connection_check': {'message': 'test message',
'exit_code': 1}})
self.assertEquals(format_mock.call_args[0][0],'{java64_home}/bin/java -cp /usr/lib/ambari-agent/{check_db_' \
'connection_jar_name}:/usr/lib/ambari-agent/{jdbc_name} org.' \
'apache.ambari.server.DBConnectionVerification \'{db_connection_url}\' ' \
'{user_name} {user_passwd!p} {jdbc_driver}')
# test, db connection success
execute_mock.reset_mock()
execute_mock.side_effect = [p, p]
s.returncode = 0
checkHost.actionexecute(None)
self.assertEquals(structured_out_mock.call_args[0][0], {'db_connection_check':
{'message': 'DB connection check completed successfully!', 'exit_code': 0}})
#test jdk_name and java home are not available
mock_config.return_value = {"commandParams" : {"check_execute_list" : "db_connection_check",
"java_home" : "test_java_home",
"ambari_server_host" : "test_host",
"jdk_location" : "test_jdk_location",
"db_connection_url" : "test_db_connection_url",
"user_name" : "test_user_name",
"user_passwd" : "<PASSWORD>",
"db_name" : "postgresql"}}
isfile_mock.return_value = False
checkHost.actionexecute(None)
self.assertEquals(structured_out_mock.call_args[0][0], {'db_connection_check': {'message': 'Custom java is not ' \
'available on host. Please install it. Java home should be the same as on server. \n', 'exit_code': 1}})
@patch("socket.gethostbyname")
@patch.object(Script, 'get_config')
@patch.object(Script, 'get_tmp_dir')
@patch("resource_management.libraries.script.Script.put_structured_out")
def testHostResolution(self, structured_out_mock, get_tmp_dir_mock, mock_config, mock_socket):
mock_socket.return_value = "192.168.1.1"
jsonFilePath = os.path.join("../resources/custom_actions", "check_host_ip_addresses.json")
with open(jsonFilePath, "r") as jsonFile:
jsonPayload = json.load(jsonFile)
mock_config.return_value = ConfigDictionary(jsonPayload)
get_tmp_dir_mock.return_value = "/tmp"
checkHost = CheckHost()
checkHost.actionexecute(None)
# ensure the correct function was called
self.assertTrue(structured_out_mock.called)
structured_out_mock.assert_called_with({'host_resolution_check':
{'failures': [],
'message': 'All hosts resolved to an IP address.',
'failed_count': 0,
'success_count': 5,
'exit_code': 0}})
# try it now with errors
mock_socket.side_effect = socket.error
checkHost.actionexecute(None)
structured_out_mock.assert_called_with({'host_resolution_check':
{'failures': [
{'cause': (), 'host': u'c6401.ambari.apache.org', 'type': 'FORWARD_LOOKUP'},
{'cause': (), 'host': u'c6402.ambari.apache.org', 'type': 'FORWARD_LOOKUP'},
{'cause': (), 'host': u'c6403.ambari.apache.org', 'type': 'FORWARD_LOOKUP'},
{'cause': (), 'host': u'foobar', 'type': 'FORWARD_LOOKUP'},
{'cause': (), 'host': u'!!!', 'type': 'FORWARD_LOOKUP'}],
'message': 'There were 5 host(s) that could not resolve to an IP address.',
'failed_count': 5, 'success_count': 0, 'exit_code': 0}})
@patch.object(Script, 'get_config')
@patch.object(Script, 'get_tmp_dir')
@patch("resource_management.libraries.script.Script.put_structured_out")
def testInvalidCheck(self, structured_out_mock, get_tmp_dir_mock, mock_config):
jsonFilePath = os.path.join("../resources/custom_actions", "invalid_check.json")
with open(jsonFilePath, "r") as jsonFile:
jsonPayload = json.load(jsonFile)
mock_config.return_value = ConfigDictionary(jsonPayload)
get_tmp_dir_mock.return_value = "tmp"
checkHost = CheckHost()
checkHost.actionexecute(None)
# ensure the correct function was called
self.assertTrue(structured_out_mock.called)
structured_out_mock.assert_called_with({})
|
# !/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import os
import socket
from resource_management import Script,ConfigDictionary
from mock.mock import patch
from mock.mock import MagicMock
from unittest import TestCase
check_host = __import__('check_host')
from check_host import CheckHost
class TestCheckHost(TestCase):
@patch("os.path.isfile")
@patch.object(Script, 'get_config')
@patch.object(Script, 'get_tmp_dir')
@patch("resource_management.libraries.script.Script.put_structured_out")
def testJavaHomeAvailableCheck(self, structured_out_mock, get_tmp_dir_mock, mock_config, os_isfile_mock):
# test, java home exists
os_isfile_mock.return_value = True
get_tmp_dir_mock.return_value = "/tmp"
mock_config.return_value = {"commandParams" : {"check_execute_list" : "java_home_check",
"java_home" : "test_java_home"}}
checkHost = CheckHost()
checkHost.actionexecute(None)
self.assertEquals(os_isfile_mock.call_args[0][0], 'test_java_home/bin/java')
self.assertEquals(structured_out_mock.call_args[0][0], {'java_home_check': {'message': 'Java home exists!',
'exit_code': 0}})
# test, java home doesn't exist
os_isfile_mock.reset_mock()
os_isfile_mock.return_value = False
checkHost.actionexecute(None)
self.assertEquals(os_isfile_mock.call_args[0][0], 'test_java_home/bin/java')
self.assertEquals(structured_out_mock.call_args[0][0], {'java_home_check': {"message": "Java home doesn't exist!",
"exit_code" : 1}})
@patch.object(Script, 'get_config')
@patch.object(Script, 'get_tmp_dir')
@patch("check_host.Execute")
@patch("resource_management.libraries.script.Script.put_structured_out")
@patch("subprocess.Popen")
@patch("check_host.format")
@patch("os.path.isfile")
def testDBConnectionCheck(self, isfile_mock, format_mock, popenMock, structured_out_mock, execute_mock, get_tmp_dir_mock, mock_config):
# test, download DBConnectionVerification.jar failed
mock_config.return_value = {"commandParams" : {"check_execute_list" : "db_connection_check",
"java_home" : "test_java_home",
"ambari_server_host" : "test_host",
"jdk_location" : "test_jdk_location",
"db_name" : "mysql",
"db_connection_url" : "test_db_connection_url",
"user_name" : "test_user_name",
"user_passwd" : "<PASSWORD>",
"jdk_name" : "test_jdk_name"}}
get_tmp_dir_mock.return_value = "/tmp"
execute_mock.side_effect = Exception("test exception")
isfile_mock.return_value = True
checkHost = CheckHost()
checkHost.actionexecute(None)
self.assertEquals(structured_out_mock.call_args[0][0], {'db_connection_check': {'message': 'Error downloading ' \
'DBConnectionVerification.jar from Ambari Server resources. Check network access to Ambari ' \
'Server.\ntest exception', 'exit_code': 1}})
self.assertEquals(format_mock.call_args_list[2][0][0], "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf " \
"--retry 5 {jdk_location}{check_db_connection_jar_name} -o {check_db_connection_jar_name}'")
self.assertEquals(format_mock.call_args_list[3][0][0], "[ -f /usr/lib/ambari-agent/{check_db_connection_jar_name}]")
# test, download jdbc driver failed
mock_config.return_value = {"commandParams" : {"check_execute_list" : "db_connection_check",
"java_home" : "test_java_home",
"ambari_server_host" : "test_host",
"jdk_location" : "test_jdk_location",
"db_name" : "oracle",
"db_connection_url" : "test_db_connection_url",
"user_name" : "test_user_name",
"user_passwd" : "<PASSWORD>",
"jdk_name" : "test_jdk_name"}}
format_mock.reset_mock()
execute_mock.reset_mock()
p = MagicMock()
execute_mock.side_effect = [p, Exception("test exception")]
checkHost.actionexecute(None)
self.assertEquals(format_mock.call_args[0][0], 'Error: Ambari Server cannot download the database JDBC driver '
'and is unable to test the database connection. You must run ambari-server setup '
'--jdbc-db={db_name} --jdbc-driver=/path/to/your/{db_name}/driver.jar on the Ambari '
'Server host to make the JDBC driver available for download and to enable testing '
'the database connection.\n')
self.assertEquals(structured_out_mock.call_args[0][0]['db_connection_check']['exit_code'], 1)
self.assertEquals(format_mock.call_args_list[4][0][0], "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf " \
"--retry 5 {jdbc_url} -o {jdbc_name}'")
self.assertEquals(format_mock.call_args_list[5][0][0], "[ -f /usr/lib/ambari-agent/{jdbc_name}]")
# test, no connection to remote db
mock_config.return_value = {"commandParams" : {"check_execute_list" : "db_connection_check",
"java_home" : "test_java_home",
"ambari_server_host" : "test_host",
"jdk_location" : "test_jdk_location",
"db_name" : "postgresql",
"db_connection_url" : "test_db_connection_url",
"user_name" : "test_user_name",
"user_passwd" : "<PASSWORD>",
"jdk_name" : "test_jdk_name"}}
format_mock.reset_mock()
execute_mock.reset_mock()
execute_mock.side_effect = [p, p]
s = MagicMock()
s.communicate.return_value = ("test message", "")
s.returncode = 1
popenMock.return_value = s
checkHost.actionexecute(None)
self.assertEquals(structured_out_mock.call_args[0][0], {'db_connection_check': {'message': 'test message',
'exit_code': 1}})
self.assertEquals(format_mock.call_args[0][0],'{java64_home}/bin/java -cp /usr/lib/ambari-agent/{check_db_' \
'connection_jar_name}:/usr/lib/ambari-agent/{jdbc_name} org.' \
'apache.ambari.server.DBConnectionVerification \'{db_connection_url}\' ' \
'{user_name} {user_passwd!p} {jdbc_driver}')
# test, db connection success
execute_mock.reset_mock()
execute_mock.side_effect = [p, p]
s.returncode = 0
checkHost.actionexecute(None)
self.assertEquals(structured_out_mock.call_args[0][0], {'db_connection_check':
{'message': 'DB connection check completed successfully!', 'exit_code': 0}})
#test jdk_name and java home are not available
mock_config.return_value = {"commandParams" : {"check_execute_list" : "db_connection_check",
"java_home" : "test_java_home",
"ambari_server_host" : "test_host",
"jdk_location" : "test_jdk_location",
"db_connection_url" : "test_db_connection_url",
"user_name" : "test_user_name",
"user_passwd" : "<PASSWORD>",
"db_name" : "postgresql"}}
isfile_mock.return_value = False
checkHost.actionexecute(None)
self.assertEquals(structured_out_mock.call_args[0][0], {'db_connection_check': {'message': 'Custom java is not ' \
'available on host. Please install it. Java home should be the same as on server. \n', 'exit_code': 1}})
@patch("socket.gethostbyname")
@patch.object(Script, 'get_config')
@patch.object(Script, 'get_tmp_dir')
@patch("resource_management.libraries.script.Script.put_structured_out")
def testHostResolution(self, structured_out_mock, get_tmp_dir_mock, mock_config, mock_socket):
mock_socket.return_value = "192.168.1.1"
jsonFilePath = os.path.join("../resources/custom_actions", "check_host_ip_addresses.json")
with open(jsonFilePath, "r") as jsonFile:
jsonPayload = json.load(jsonFile)
mock_config.return_value = ConfigDictionary(jsonPayload)
get_tmp_dir_mock.return_value = "/tmp"
checkHost = CheckHost()
checkHost.actionexecute(None)
# ensure the correct function was called
self.assertTrue(structured_out_mock.called)
structured_out_mock.assert_called_with({'host_resolution_check':
{'failures': [],
'message': 'All hosts resolved to an IP address.',
'failed_count': 0,
'success_count': 5,
'exit_code': 0}})
# try it now with errors
mock_socket.side_effect = socket.error
checkHost.actionexecute(None)
structured_out_mock.assert_called_with({'host_resolution_check':
{'failures': [
{'cause': (), 'host': u'c6401.ambari.apache.org', 'type': 'FORWARD_LOOKUP'},
{'cause': (), 'host': u'c6402.ambari.apache.org', 'type': 'FORWARD_LOOKUP'},
{'cause': (), 'host': u'c6403.ambari.apache.org', 'type': 'FORWARD_LOOKUP'},
{'cause': (), 'host': u'foobar', 'type': 'FORWARD_LOOKUP'},
{'cause': (), 'host': u'!!!', 'type': 'FORWARD_LOOKUP'}],
'message': 'There were 5 host(s) that could not resolve to an IP address.',
'failed_count': 5, 'success_count': 0, 'exit_code': 0}})
@patch.object(Script, 'get_config')
@patch.object(Script, 'get_tmp_dir')
@patch("resource_management.libraries.script.Script.put_structured_out")
def testInvalidCheck(self, structured_out_mock, get_tmp_dir_mock, mock_config):
jsonFilePath = os.path.join("../resources/custom_actions", "invalid_check.json")
with open(jsonFilePath, "r") as jsonFile:
jsonPayload = json.load(jsonFile)
mock_config.return_value = ConfigDictionary(jsonPayload)
get_tmp_dir_mock.return_value = "tmp"
checkHost = CheckHost()
checkHost.actionexecute(None)
# ensure the correct function was called
self.assertTrue(structured_out_mock.called)
structured_out_mock.assert_called_with({})
|
en
| 0.89078
|
# !/usr/bin/env python Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # test, java home exists # test, java home doesn't exist # test, download DBConnectionVerification.jar failed # test, download jdbc driver failed # test, no connection to remote db # test, db connection success #test jdk_name and java home are not available # ensure the correct function was called # try it now with errors # ensure the correct function was called
| 1.9034
| 2
|
Sudoku_py/Sudoku.py
|
yuryybk/opencv-basic-samples
| 0
|
6628373
|
<filename>Sudoku_py/Sudoku.py
import cv2, numpy as np
import sys
def get_new(old):
new = np.ones(old.shape, np.uint8)
cv2.bitwise_not(new,new)
return new
if __name__ == '__main__':
img = cv2.imread(sys.argv[1])
img = cv2.GaussianBlur(img,(5,5),0)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
mask = np.zeros((gray.shape),np.uint8)
kernel1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11))
close = cv2.morphologyEx(gray,cv2.MORPH_CLOSE,kernel1)
div = np.float32(gray)/(close)
res = np.uint8(cv2.normalize(div,div,0,255,cv2.NORM_MINMAX))
res2 = cv2.cvtColor(res,cv2.COLOR_GRAY2BGR)
thresh = cv2.adaptiveThreshold(res,255,0,1,19,2)
_ ,contour,hier = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
max_area = 0
best_cnt = None
for cnt in contour:
area = cv2.contourArea(cnt)
if area > 1000:
if area > max_area:
max_area = area
best_cnt = cnt
cv2.drawContours(mask,[best_cnt],0,255,-1)
cv2.drawContours(mask,[best_cnt],0,0,2)
res = cv2.bitwise_and(res,mask)
cv2.namedWindow('result', cv2.WINDOW_NORMAL)
cv2.imshow('result', res)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
<filename>Sudoku_py/Sudoku.py
import cv2, numpy as np
import sys
def get_new(old):
new = np.ones(old.shape, np.uint8)
cv2.bitwise_not(new,new)
return new
if __name__ == '__main__':
img = cv2.imread(sys.argv[1])
img = cv2.GaussianBlur(img,(5,5),0)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
mask = np.zeros((gray.shape),np.uint8)
kernel1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11))
close = cv2.morphologyEx(gray,cv2.MORPH_CLOSE,kernel1)
div = np.float32(gray)/(close)
res = np.uint8(cv2.normalize(div,div,0,255,cv2.NORM_MINMAX))
res2 = cv2.cvtColor(res,cv2.COLOR_GRAY2BGR)
thresh = cv2.adaptiveThreshold(res,255,0,1,19,2)
_ ,contour,hier = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
max_area = 0
best_cnt = None
for cnt in contour:
area = cv2.contourArea(cnt)
if area > 1000:
if area > max_area:
max_area = area
best_cnt = cnt
cv2.drawContours(mask,[best_cnt],0,255,-1)
cv2.drawContours(mask,[best_cnt],0,0,2)
res = cv2.bitwise_and(res,mask)
cv2.namedWindow('result', cv2.WINDOW_NORMAL)
cv2.imshow('result', res)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
none
| 1
| 2.797687
| 3
|
|
src/models/mobilenetv2.py
|
JoseLuisRojasAranda/tfmodels
| 1
|
6628374
|
<filename>src/models/mobilenetv2.py<gh_stars>1-10
import tensorflow as tf
from tensorflow.keras import Model, Sequential
from tensorflow.keras import layers
from models.ops.conv_ops import normal_conv, ReLU6, pointwise_conv
from models.ops.conv_blocks import BottleneckResidualBlock, basic_conv_block
from models.ops.conv_blocks import pwise_conv_block, separable_conv_block
from models.ops.model_layers import LayerList
from models.ops.SSD import SSD_layer
#
# Implementacion de MobilenetV2, suponiendo un input size de 224x224x3
#
class MobileNetV2(Model):
@staticmethod
def build_model(classes, width_multiplier=1):
a = width_multiplier
model = Sequential()
def crearBloques2(input_channels, t, c, n, s):
for i in range(n):
# Solo el primer bloque tiene stride 2
# a partir del segundo bottleneck el numero de input_channels es igual al output_channels
if i > 0:
s = 1
input_channels = c
l_num = 1
l_res = BottleneckResidualBlock(input_channels, int(c), stride=s, t=t,
name="layer_{}_BottleneckResidualBlock".format(l_num))
model.add(l_res)
l = basic_conv_block(int(a*32), (3, 3), stride=2, dropout=0.25, activation="ReLU6", name="layer_0")
model.add(l)
# los bloques de bottleneck intermedios
crearBloques2(32, 1, a*16, 1, 1)
crearBloques2(16, 6, a*24, 2, 2)
crearBloques2(24, 6, a*32, 3, 2)
crearBloques2(32, 6, a*64, 4, 2)
crearBloques2(69, 6, a*96, 3, 1)
crearBloques2(96, 6, a*160, 3, 2)
crearBloques2(160, 6, a*320, 1, 1)
# ultima convolucion
l = pwise_conv_block(int(a*1280), dropout=0.25, activation="ReLU6",
name="layer_conv1x1")
model.add(l)
# Average Pooling y Fully Connected
model.add(layers.AveragePooling2D(pool_size=(7,7), strides=(1,1)))
model.add(layers.Flatten())
model.add(layers.Dense(1280))
model.add(layers.Dropout(0.5, name="dropout"))
model.add(layers.Dense(classes))
model.add(layers.Activation("softmax"))
return model
#
# Args:
# classes: el numero de classes que realizara predicciones
# width_multiplier: numero para controlar la complejidad del modelo
#
def __init__(self, classes, width_multiplier=1):
super(MobileNetV2, self).__init__()
a = width_multiplier
self.classes = classes
self.m_layers = LayerList()
# convolucion inicial
l = basic_conv_block(int(a*32), (3, 3), stride=2,
dropout=0.25, activation="ReLU6", name="layer_0")
self.m_layers.add(l)
# los bloques de bottleneck intermedios
self.crearBloques(32, 1, a*16, 1, 1)
self.crearBloques(16, 6, a*24, 2, 2)
self.crearBloques(24, 6, a*32, 3, 2)
self.crearBloques(32, 6, a*64, 4, 2)
self.crearBloques(69, 6, a*96, 3, 1)
self.crearBloques(96, 6, a*160, 3, 2)
self.crearBloques(160, 6, a*320, 1, 1)
# ultima convolucion
l = pwise_conv_block(int(a*1280), dropout=0.25, activation="ReLU6",
name="layer_{}_conv1x1".format(len(self.m_layers)))
self.m_layers.add(l)
# Average Pooling y Fully Connected
self.m_layers.add(layers.AveragePooling2D(pool_size=(7,7),
strides=(1,1)), training_arg=False)
self.m_layers.add(layers.Flatten(), training_arg=False)
self.m_layers.add(layers.Dense(1280))
self.m_layers.add(layers.Dropout(0.5, name="dropout"), only_training=True)
self.m_layers.add(layers.Dense(classes))
self.m_layers.add(layers.Activation("softmax"))
# Crea BottleneckResidualBlock n veces
def crearBloques(self, input_channels, t, c, n, s):
for i in range(n):
# Solo el primer bloque tiene stride 2
# a partir del segundo bottleneck el numero de input_channels es igual al output_channels
if i > 0:
s = 1
input_channels = c
l_num = len(self.m_layers)
l = BottleneckResidualBlock(input_channels, int(c), stride=s, t=t,
name="layer_{}_BottleneckResidualBlock".format(l_num))
self.m_layers.add(l)
def call(self, inputs, training=False):
x = self.m_layers.feed_forward(inputs, training)
return x
@staticmethod
def get_input_size():
return 224
# Implementacion de SSD framework para object detection con arquitectura
# de MobileNetV2, SSD esta configurado de la siguiente manera segun paper:
# - first SSD layer: expansion de layer 15 stride=16
# - second and rest SSD layer: ultima layer stride=32
class MobileNetV2_SSD(Model):
def __init__(self, classes, width_multiplier=1):
super(MobileNetV2_SSD, self).__init__()
#self.classes = classes
a = width_multiplier
self.classes = classes
self.m_layers = LayerList()
self.saved_block = 13 # output que guarda para ssd_lite
# convolucion inicial
l = basic_conv_block(int(a*32), (3, 3), stride=2,
dropout=0.25, activation="ReLU6", name="layer_0")
self.m_layers.add(l)
# los bloques de bottleneck intermedios
self.crearBloques(32, 1, a*16, 1, 1)
self.crearBloques(16, 6, a*24, 2, 2)
self.crearBloques(24, 6, a*32, 3, 2)
self.crearBloques(32, 6, a*64, 4, 2)
self.crearBloques(69, 6, a*96, 3, 1)
self.crearBloques(96, 6, a*160, 3, 2)
self.crearBloques(160, 6, a*320, 1, 1)
# ultima convolucion
l_num = len(self.m_layers)
l = pwise_conv_block(int(a*1280), dropout=0.25, activation="ReLU6",
name="layer_{}_conv1x1".format(l_num))
self.m_layers.add(l, save_as="last_layer")
# SSD extra feature layers
l = separable_conv_block(512, 2, name="ssd_feature_layer_1")
self.m_layers.add(l, save_as=l.name)
l = separable_conv_block(256, 2, name="ssd_feature_layer_2")
self.m_layers.add(l, save_as=l.name)
l = separable_conv_block(256, 2, name="ssd_feature_layer_3")
self.m_layers.add(l, save_as=l.name)
l = separable_conv_block(128, 2, name="ssd_feature_layer_4")
self.m_layers.add(l, save_as=l.name)
# SSD classifier
l = SSD_layer(classes=self.classes, num_fmap=1, total_fmaps=5,
img_size=320, name="ssd_layer_1")
self.m_layers.add(l, save_as=l.name, custom_input="layer_13",
custom_input_index=0)
l = SSD_layer(classes=self.classes, num_fmap=2, total_fmaps=5,
img_size=320, name="ssd_layer_2")
self.m_layers.add(l, save_as=l.name, custom_input="last_layer")
l = SSD_layer(classes=self.classes, num_fmap=3, total_fmaps=5,
img_size=320, name="ssd_layer_3")
self.m_layers.add(l, save_as=l.name, custom_input="ssd_feature_layer_1")
l = SSD_layer(classes=self.classes, num_fmap=4, total_fmaps=5,
img_size=320, name="ssd_layer_4")
self.m_layers.add(l, save_as=l.name, custom_input="ssd_feature_layer_2")
l = SSD_layer(classes=self.classes, num_fmap=5, total_fmaps=5,
img_size=320, name="ssd_layer_5")
self.m_layers.add(l, save_as=l.name, custom_input="ssd_feature_layer_4")
# Crea BottleneckResidualBlock n veces
def crearBloques(self, input_channels, t, c, n, s):
for i in range(n):
# Solo el primer bloque tiene stride 2
# a partir del segundo bottleneck el numero de input_channels es igual al output_channels
if i > 0:
s = 1
input_channels = c
l_num = len(self.m_layers)
l = BottleneckResidualBlock(input_channels, int(c), stride=s, t=t,
name="layer_{}_BottleneckResidualBlock".format(l_num))
save_as = None
if l_num == self.saved_block:
save_as = "layer_{}".format(l_num)
self.m_layers.add(l, save_as=save_as)
def call(self, inputs, training=False):
x = self.m_layers.feed_forward(inputs, training)
return x
@staticmethod
def get_fmaps_array():
return [(20, 20), (10, 10), (5, 5), (3, 3), (1, 1)]
@staticmethod
def get_input_size():
return 320
|
<filename>src/models/mobilenetv2.py<gh_stars>1-10
import tensorflow as tf
from tensorflow.keras import Model, Sequential
from tensorflow.keras import layers
from models.ops.conv_ops import normal_conv, ReLU6, pointwise_conv
from models.ops.conv_blocks import BottleneckResidualBlock, basic_conv_block
from models.ops.conv_blocks import pwise_conv_block, separable_conv_block
from models.ops.model_layers import LayerList
from models.ops.SSD import SSD_layer
#
# Implementacion de MobilenetV2, suponiendo un input size de 224x224x3
#
class MobileNetV2(Model):
@staticmethod
def build_model(classes, width_multiplier=1):
a = width_multiplier
model = Sequential()
def crearBloques2(input_channels, t, c, n, s):
for i in range(n):
# Solo el primer bloque tiene stride 2
# a partir del segundo bottleneck el numero de input_channels es igual al output_channels
if i > 0:
s = 1
input_channels = c
l_num = 1
l_res = BottleneckResidualBlock(input_channels, int(c), stride=s, t=t,
name="layer_{}_BottleneckResidualBlock".format(l_num))
model.add(l_res)
l = basic_conv_block(int(a*32), (3, 3), stride=2, dropout=0.25, activation="ReLU6", name="layer_0")
model.add(l)
# los bloques de bottleneck intermedios
crearBloques2(32, 1, a*16, 1, 1)
crearBloques2(16, 6, a*24, 2, 2)
crearBloques2(24, 6, a*32, 3, 2)
crearBloques2(32, 6, a*64, 4, 2)
crearBloques2(69, 6, a*96, 3, 1)
crearBloques2(96, 6, a*160, 3, 2)
crearBloques2(160, 6, a*320, 1, 1)
# ultima convolucion
l = pwise_conv_block(int(a*1280), dropout=0.25, activation="ReLU6",
name="layer_conv1x1")
model.add(l)
# Average Pooling y Fully Connected
model.add(layers.AveragePooling2D(pool_size=(7,7), strides=(1,1)))
model.add(layers.Flatten())
model.add(layers.Dense(1280))
model.add(layers.Dropout(0.5, name="dropout"))
model.add(layers.Dense(classes))
model.add(layers.Activation("softmax"))
return model
#
# Args:
# classes: el numero de classes que realizara predicciones
# width_multiplier: numero para controlar la complejidad del modelo
#
def __init__(self, classes, width_multiplier=1):
super(MobileNetV2, self).__init__()
a = width_multiplier
self.classes = classes
self.m_layers = LayerList()
# convolucion inicial
l = basic_conv_block(int(a*32), (3, 3), stride=2,
dropout=0.25, activation="ReLU6", name="layer_0")
self.m_layers.add(l)
# los bloques de bottleneck intermedios
self.crearBloques(32, 1, a*16, 1, 1)
self.crearBloques(16, 6, a*24, 2, 2)
self.crearBloques(24, 6, a*32, 3, 2)
self.crearBloques(32, 6, a*64, 4, 2)
self.crearBloques(69, 6, a*96, 3, 1)
self.crearBloques(96, 6, a*160, 3, 2)
self.crearBloques(160, 6, a*320, 1, 1)
# ultima convolucion
l = pwise_conv_block(int(a*1280), dropout=0.25, activation="ReLU6",
name="layer_{}_conv1x1".format(len(self.m_layers)))
self.m_layers.add(l)
# Average Pooling y Fully Connected
self.m_layers.add(layers.AveragePooling2D(pool_size=(7,7),
strides=(1,1)), training_arg=False)
self.m_layers.add(layers.Flatten(), training_arg=False)
self.m_layers.add(layers.Dense(1280))
self.m_layers.add(layers.Dropout(0.5, name="dropout"), only_training=True)
self.m_layers.add(layers.Dense(classes))
self.m_layers.add(layers.Activation("softmax"))
# Crea BottleneckResidualBlock n veces
def crearBloques(self, input_channels, t, c, n, s):
for i in range(n):
# Solo el primer bloque tiene stride 2
# a partir del segundo bottleneck el numero de input_channels es igual al output_channels
if i > 0:
s = 1
input_channels = c
l_num = len(self.m_layers)
l = BottleneckResidualBlock(input_channels, int(c), stride=s, t=t,
name="layer_{}_BottleneckResidualBlock".format(l_num))
self.m_layers.add(l)
def call(self, inputs, training=False):
x = self.m_layers.feed_forward(inputs, training)
return x
@staticmethod
def get_input_size():
return 224
# Implementacion de SSD framework para object detection con arquitectura
# de MobileNetV2, SSD esta configurado de la siguiente manera segun paper:
# - first SSD layer: expansion de layer 15 stride=16
# - second and rest SSD layer: ultima layer stride=32
class MobileNetV2_SSD(Model):
def __init__(self, classes, width_multiplier=1):
super(MobileNetV2_SSD, self).__init__()
#self.classes = classes
a = width_multiplier
self.classes = classes
self.m_layers = LayerList()
self.saved_block = 13 # output que guarda para ssd_lite
# convolucion inicial
l = basic_conv_block(int(a*32), (3, 3), stride=2,
dropout=0.25, activation="ReLU6", name="layer_0")
self.m_layers.add(l)
# los bloques de bottleneck intermedios
self.crearBloques(32, 1, a*16, 1, 1)
self.crearBloques(16, 6, a*24, 2, 2)
self.crearBloques(24, 6, a*32, 3, 2)
self.crearBloques(32, 6, a*64, 4, 2)
self.crearBloques(69, 6, a*96, 3, 1)
self.crearBloques(96, 6, a*160, 3, 2)
self.crearBloques(160, 6, a*320, 1, 1)
# ultima convolucion
l_num = len(self.m_layers)
l = pwise_conv_block(int(a*1280), dropout=0.25, activation="ReLU6",
name="layer_{}_conv1x1".format(l_num))
self.m_layers.add(l, save_as="last_layer")
# SSD extra feature layers
l = separable_conv_block(512, 2, name="ssd_feature_layer_1")
self.m_layers.add(l, save_as=l.name)
l = separable_conv_block(256, 2, name="ssd_feature_layer_2")
self.m_layers.add(l, save_as=l.name)
l = separable_conv_block(256, 2, name="ssd_feature_layer_3")
self.m_layers.add(l, save_as=l.name)
l = separable_conv_block(128, 2, name="ssd_feature_layer_4")
self.m_layers.add(l, save_as=l.name)
# SSD classifier
l = SSD_layer(classes=self.classes, num_fmap=1, total_fmaps=5,
img_size=320, name="ssd_layer_1")
self.m_layers.add(l, save_as=l.name, custom_input="layer_13",
custom_input_index=0)
l = SSD_layer(classes=self.classes, num_fmap=2, total_fmaps=5,
img_size=320, name="ssd_layer_2")
self.m_layers.add(l, save_as=l.name, custom_input="last_layer")
l = SSD_layer(classes=self.classes, num_fmap=3, total_fmaps=5,
img_size=320, name="ssd_layer_3")
self.m_layers.add(l, save_as=l.name, custom_input="ssd_feature_layer_1")
l = SSD_layer(classes=self.classes, num_fmap=4, total_fmaps=5,
img_size=320, name="ssd_layer_4")
self.m_layers.add(l, save_as=l.name, custom_input="ssd_feature_layer_2")
l = SSD_layer(classes=self.classes, num_fmap=5, total_fmaps=5,
img_size=320, name="ssd_layer_5")
self.m_layers.add(l, save_as=l.name, custom_input="ssd_feature_layer_4")
# Crea BottleneckResidualBlock n veces
def crearBloques(self, input_channels, t, c, n, s):
for i in range(n):
# Solo el primer bloque tiene stride 2
# a partir del segundo bottleneck el numero de input_channels es igual al output_channels
if i > 0:
s = 1
input_channels = c
l_num = len(self.m_layers)
l = BottleneckResidualBlock(input_channels, int(c), stride=s, t=t,
name="layer_{}_BottleneckResidualBlock".format(l_num))
save_as = None
if l_num == self.saved_block:
save_as = "layer_{}".format(l_num)
self.m_layers.add(l, save_as=save_as)
def call(self, inputs, training=False):
x = self.m_layers.feed_forward(inputs, training)
return x
@staticmethod
def get_fmaps_array():
return [(20, 20), (10, 10), (5, 5), (3, 3), (1, 1)]
@staticmethod
def get_input_size():
return 320
|
es
| 0.692676
|
# # Implementacion de MobilenetV2, suponiendo un input size de 224x224x3 # # Solo el primer bloque tiene stride 2 # a partir del segundo bottleneck el numero de input_channels es igual al output_channels # los bloques de bottleneck intermedios # ultima convolucion # Average Pooling y Fully Connected # # Args: # classes: el numero de classes que realizara predicciones # width_multiplier: numero para controlar la complejidad del modelo # # convolucion inicial # los bloques de bottleneck intermedios # ultima convolucion # Average Pooling y Fully Connected # Crea BottleneckResidualBlock n veces # Solo el primer bloque tiene stride 2 # a partir del segundo bottleneck el numero de input_channels es igual al output_channels # Implementacion de SSD framework para object detection con arquitectura # de MobileNetV2, SSD esta configurado de la siguiente manera segun paper: # - first SSD layer: expansion de layer 15 stride=16 # - second and rest SSD layer: ultima layer stride=32 #self.classes = classes # output que guarda para ssd_lite # convolucion inicial # los bloques de bottleneck intermedios # ultima convolucion # SSD extra feature layers # SSD classifier # Crea BottleneckResidualBlock n veces # Solo el primer bloque tiene stride 2 # a partir del segundo bottleneck el numero de input_channels es igual al output_channels
| 2.608841
| 3
|
test/augmenters/test_blur.py
|
HubukiNinten/imgaug
| 0
|
6628375
|
from __future__ import print_function, division, absolute_import
import warnings
import sys
import itertools
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import cv2
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug import random as iarandom
from imgaug.testutils import keypoints_equal, reseed
class Test_blur_gaussian_(unittest.TestCase):
def setUp(self):
reseed()
def test_integration(self):
backends = ["auto", "scipy", "cv2"]
nb_channels_lst = [None, 1, 3, 4, 5, 10]
gen = itertools.product(backends, nb_channels_lst)
for backend, nb_channels in gen:
with self.subTest(backend=backend, nb_channels=nb_channels):
image = np.zeros((5, 5), dtype=np.uint8)
if nb_channels is not None:
image = np.tile(image[..., np.newaxis], (1, 1, nb_channels))
image[2, 2] = 255
mask = image < 255
observed = iaa.blur_gaussian_(
np.copy(image), sigma=5.0, backend=backend)
assert observed.shape == image.shape
assert observed.dtype.name == "uint8"
assert np.all(observed[2, 2] < 255)
assert np.sum(observed[mask]) > (5*5-1)
if nb_channels is not None and nb_channels > 1:
for c in sm.xrange(1, observed.shape[2]):
assert np.array_equal(observed[..., c],
observed[..., 0])
def test_sigma_zero(self):
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
observed = iaa.blur_gaussian_(np.copy(image), 0)
assert np.array_equal(observed, image)
image = np.arange(4*4).astype(np.uint8).reshape((4, 4, 1))
observed = iaa.blur_gaussian_(np.copy(image), 0)
assert np.array_equal(observed, image)
image = np.arange(4*4*3).astype(np.uint8).reshape((4, 4, 3))
observed = iaa.blur_gaussian_(np.copy(image), 0)
assert np.array_equal(observed, image)
def test_eps(self):
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
observed_no_eps = iaa.blur_gaussian_(np.copy(image), 1.0, eps=0)
observed_with_eps = iaa.blur_gaussian_(np.copy(image), 1.0, eps=1e10)
assert not np.array_equal(observed_no_eps, observed_with_eps)
assert np.array_equal(observed_with_eps, image)
def test_ksize(self):
def side_effect(image, ksize, sigmaX, sigmaY, borderType):
return image + 1
sigmas = [5.0, 5.0]
ksizes = [None, 3]
ksizes_expected = [2.6*5.0, 3]
gen = zip(sigmas, ksizes, ksizes_expected)
for (sigma, ksize, ksize_expected) in gen:
with self.subTest(sigma=sigma, ksize=ksize):
mock_GaussianBlur = mock.Mock(side_effect=side_effect)
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
with mock.patch('cv2.GaussianBlur', mock_GaussianBlur):
observed = iaa.blur_gaussian_(
np.copy(image),
sigma=sigma,
ksize=ksize,
backend="cv2")
assert np.array_equal(observed, image+1)
cargs = mock_GaussianBlur.call_args
assert mock_GaussianBlur.call_count == 1
assert np.array_equal(cargs[0][0], image)
assert isinstance(cargs[0][1], tuple)
assert np.allclose(
np.float32(cargs[0][1]),
np.float32([ksize_expected, ksize_expected]))
assert np.isclose(cargs[1]["sigmaX"], sigma)
assert np.isclose(cargs[1]["sigmaY"], sigma)
assert cargs[1]["borderType"] == cv2.BORDER_REFLECT_101
def test_more_than_four_channels(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.blur_gaussian_(np.copy(image), 1.0)
assert image_aug.shape == image.shape
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.blur_gaussian_(np.copy(image), 1.0)
assert image_aug.shape == image.shape
def test_backends_called(self):
def side_effect_cv2(image, ksize, sigmaX, sigmaY, borderType):
return image + 1
def side_effect_scipy(image, sigma, mode):
return image + 1
mock_GaussianBlur = mock.Mock(side_effect=side_effect_cv2)
mock_gaussian_filter = mock.Mock(side_effect=side_effect_scipy)
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
with mock.patch('cv2.GaussianBlur', mock_GaussianBlur):
_observed = iaa.blur_gaussian_(
np.copy(image), sigma=1.0, eps=0, backend="cv2")
assert mock_GaussianBlur.call_count == 1
with mock.patch('scipy.ndimage.gaussian_filter', mock_gaussian_filter):
_observed = iaa.blur_gaussian_(
np.copy(image), sigma=1.0, eps=0, backend="scipy")
assert mock_gaussian_filter.call_count == 1
def test_backends_similar(self):
with self.subTest(nb_channels=None):
size = 10
image = np.arange(
0, size*size).astype(np.uint8).reshape((size, size))
image_cv2 = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, ksize=20, backend="cv2")
image_scipy = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, backend="scipy")
diff = np.abs(image_cv2.astype(np.int32)
- image_scipy.astype(np.int32))
assert np.average(diff) < 0.05 * (size * size)
with self.subTest(nb_channels=3):
size = 10
image = np.arange(
0, size*size).astype(np.uint8).reshape((size, size))
image = np.tile(image[..., np.newaxis], (1, 1, 3))
image[1] += 1
image[2] += 2
image_cv2 = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, ksize=20, backend="cv2")
image_scipy = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, backend="scipy")
diff = np.abs(image_cv2.astype(np.int32)
- image_scipy.astype(np.int32))
assert np.average(diff) < 0.05 * (size * size)
for c in sm.xrange(3):
diff = np.abs(image_cv2[..., c].astype(np.int32)
- image_scipy[..., c].astype(np.int32))
assert np.average(diff) < 0.05 * (size * size)
def test_warnings(self):
# note that self.assertWarningRegex does not exist in python 2.7
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
_ = iaa.blur_gaussian_(
np.zeros((1, 1), dtype=np.uint32),
sigma=3.0,
ksize=11,
backend="scipy")
assert len(caught_warnings) == 1
assert (
"but also provided 'ksize' argument"
in str(caught_warnings[-1].message))
def test_other_dtypes_sigma_0(self):
dtypes_to_test_list = [
["bool",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64", "float128"],
["bool",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64", "float128"]
]
gen = zip(["scipy", "cv2"], dtypes_to_test_list)
for backend, dtypes_to_test in gen:
# bool
if "bool" in dtypes_to_test:
with self.subTest(backend=backend, dtype="bool"):
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0, backend=backend)
assert image_aug.dtype.name == "bool"
assert np.all(image_aug == image)
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32, np.uint64]
int_dts = [np.int8, np.int16, np.int32, np.int64]
for dtype in uint_dts + int_dts:
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = int(center_value)
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0, backend=backend)
assert image_aug.dtype.name == dtype.name
assert np.all(image_aug == image)
# float
float_dts = [np.float16, np.float32, np.float64, np.float128]
for dtype in float_dts:
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = center_value
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0, backend=backend)
assert image_aug.dtype.name == dtype.name
assert np.allclose(image_aug, image)
def test_other_dtypes_sigma_075(self):
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.int32)
# mask[2, 2] = 1000 * 1000
# kernel = ndimage.gaussian_filter(mask, 0.75)
mask = np.float64([
[ 923, 6650, 16163, 6650, 923],
[ 6650, 47896, 116408, 47896, 6650],
[ 16163, 116408, 282925, 116408, 16163],
[ 6650, 47896, 116408, 47896, 6650],
[ 923, 6650, 16163, 6650, 923]
]) / (1000.0 * 1000.0)
dtypes_to_test_list = [
# scipy
["bool",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64"],
# cv2
["bool",
"uint8", "uint16",
"int8", "int16", "int32",
"float16", "float32", "float64"]
]
gen = zip(["scipy", "cv2"], dtypes_to_test_list)
for backend, dtypes_to_test in gen:
# bool
if "bool" in dtypes_to_test:
with self.subTest(backend=backend, dtype="bool"):
image = np.zeros((5, 5), dtype=bool)
image[2, 2] = True
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0.75, backend=backend)
assert image_aug.dtype.name == "bool"
assert np.all(image_aug == (mask > 0.5))
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32, np.uint64]
int_dts = [np.int8, np.int16, np.int32, np.int64]
for dtype in uint_dts + int_dts:
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
dynamic_range = max_value - min_value
value = int(center_value + 0.4 * max_value)
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = iaa.blur_gaussian_(
image, sigma=0.75, backend=backend)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
if dtype.itemsize <= 1:
assert np.max(diff) <= 4
else:
assert np.max(diff) <= 0.01 * dynamic_range
# float
float_dts = [np.float16, np.float32, np.float64, np.float128]
values = [5000, 1000**1, 1000**2, 1000**3]
for dtype, value in zip(float_dts, values):
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = iaa.blur_gaussian_(
image, sigma=0.75, backend=backend)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
# accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1,
# 2, 4, 8 bytes, i.e. 8, 16, 32, 64 bit)
max_diff = (
np.dtype(dtype).itemsize
* 0.01
* np.float128(value))
assert np.max(diff) < max_diff
def test_other_dtypes_bool_at_sigma_06(self):
# --
# blur of bool input at sigma=0.6
# --
# here we use a special mask and sigma as otherwise the only values
# ending up with >0.5 would be the ones that
# were before the blur already at >0.5
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.float64)
# mask[1, 0] = 255
# mask[2, 0] = 255
# mask[2, 2] = 255
# mask[2, 4] = 255
# mask[3, 0] = 255
# mask = ndimage.gaussian_filter(mask, 1.0, mode="mirror")
mask_bool = np.float64([
[ 57, 14, 2, 1, 1],
[142, 42, 29, 14, 28],
[169, 69, 114, 56, 114],
[142, 42, 29, 14, 28],
[ 57, 14, 2, 1, 1]
]) / 255.0
image = np.zeros((5, 5), dtype=bool)
image[1, 0] = True
image[2, 0] = True
image[2, 2] = True
image[2, 4] = True
image[3, 0] = True
for backend in ["scipy", "cv2"]:
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0.6, backend=backend)
expected = mask_bool > 0.5
assert image_aug.shape == mask_bool.shape
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
class Test_blur_mean_shift_(unittest.TestCase):
@property
def image(self):
image = [
[1, 2, 3, 4, 200, 201, 202, 203],
[1, 2, 3, 4, 200, 201, 202, 203],
[1, 2, 3, 4, 200, 201, 202, 203],
[1, 2, 3, 4, 200, 201, 202, 203]
]
image = np.array(image, dtype=np.uint8).reshape((4, 2*4, 1))
image = np.tile(image, (1, 1, 3))
return image
def test_simple_image(self):
image = self.image
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0.5, 0.5)
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
assert 0 <= np.average(image[:, 0:4, :]) <= 5
assert 199 <= np.average(image[:, 4:, :]) <= 203
def test_hw_image(self):
image = self.image[:, :, 0]
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0.5, 0.5)
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
def test_hw1_image(self):
image = self.image[:, :, 0:1]
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0.5, 0.5)
assert image_blurred.ndim == 3
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
def test_non_contiguous_image(self):
image = self.image
image_cp = np.copy(np.fliplr(image))
image = np.fliplr(image)
assert image.flags["C_CONTIGUOUS"] is False
image_blurred = iaa.blur_mean_shift_(image, 0.5, 0.5)
assert image_blurred.shape == image_cp.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image_cp)
def test_both_parameters_are_zero(self):
image = self.image[:, :, 0]
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0, 0)
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.blur_mean_shift_(np.copy(image), 1.0, 1.0)
assert image_aug.shape == image.shape
class TestGaussianBlur(unittest.TestCase):
def setUp(self):
reseed()
def test_sigma_is_zero(self):
# no blur, shouldnt change anything
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
aug = iaa.GaussianBlur(sigma=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_low_sigma(self):
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
# weak blur of center pixel
aug = iaa.GaussianBlur(sigma=0.5)
aug_det = aug.to_deterministic()
# images as numpy array
observed = aug.augment_images(images)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
observed = aug_det.augment_images(images)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
# images as list
observed = aug.augment_images(images_list)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
observed = aug_det.augment_images(images_list)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
def test_keypoints_dont_change(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
kpsoi = [ia.KeypointsOnImage(kps, shape=(3, 3, 1))]
aug = iaa.GaussianBlur(sigma=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
def test_sigma_is_tuple(self):
# varying blur sigmas
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
aug = iaa.GaussianBlur(sigma=(0, 1))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.8)
assert nb_changed_aug_det == 0
def test_other_dtypes_bool_at_sigma_0(self):
# bool
aug = iaa.GaussianBlur(sigma=0)
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == image)
def test_other_dtypes_uint_int_at_sigma_0(self):
aug = iaa.GaussianBlur(sigma=0)
dts = [np.uint8, np.uint16, np.uint32,
np.int8, np.int16, np.int32]
for dtype in dts:
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = int(center_value)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == image)
def test_other_dtypes_float_at_sigma_0(self):
aug = iaa.GaussianBlur(sigma=0)
dts = [np.float16, np.float32, np.float64]
for dtype in dts:
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = center_value
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, image)
def test_other_dtypes_bool_at_sigma_060(self):
# --
# blur of bool input at sigma=0.6
# --
# here we use a special mask and sigma as otherwise the only values
# ending up with >0.5 would be the ones that
# were before the blur already at >0.5
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.float64)
# mask[1, 0] = 255
# mask[2, 0] = 255
# mask[2, 2] = 255
# mask[2, 4] = 255
# mask[3, 0] = 255
# mask = ndimage.gaussian_filter(mask, 1.0, mode="mirror")
aug = iaa.GaussianBlur(sigma=0.6)
mask_bool = np.float64([
[ 57, 14, 2, 1, 1],
[142, 42, 29, 14, 28],
[169, 69, 114, 56, 114],
[142, 42, 29, 14, 28],
[ 57, 14, 2, 1, 1]
]) / 255.0
image = np.zeros((5, 5), dtype=bool)
image[1, 0] = True
image[2, 0] = True
image[2, 2] = True
image[2, 4] = True
image[3, 0] = True
image_aug = aug.augment_image(image)
expected = mask_bool > 0.5
assert image_aug.shape == mask_bool.shape
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
def test_other_dtypes_at_sigma_1(self):
# --
# blur of various dtypes at sigma=1.0
# and using an example value of 100 for int/uint/float and True for
# bool
# --
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.float64)
# mask[2, 2] = 100
# mask = ndimage.gaussian_filter(mask, 1.0, mode="mirror")
aug = iaa.GaussianBlur(sigma=1.0)
mask = np.float64([
[1, 2, 3, 2, 1],
[2, 5, 9, 5, 2],
[4, 9, 15, 9, 4],
[2, 5, 9, 5, 2],
[1, 2, 3, 2, 1]
])
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32]
int_dts = [np.int8, np.int16, np.int32]
for dtype in uint_dts + int_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 4
assert np.average(diff) <= 2
# float
float_dts = [np.float16, np.float32, np.float64]
for dtype in float_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100.0
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) < 4
assert np.average(diff) < 2.0
def test_other_dtypes_at_sigma_040(self):
# --
# blur of various dtypes at sigma=0.4
# and using an example value of 100 for int/uint/float and True for
# bool
# --
aug = iaa.GaussianBlur(sigma=0.4)
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.uint8)
# mask[2, 2] = 100
# kernel = ndimage.gaussian_filter(mask, 0.4, mode="mirror")
mask = np.float64([
[0, 0, 0, 0, 0],
[0, 0, 3, 0, 0],
[0, 3, 83, 3, 0],
[0, 0, 3, 0, 0],
[0, 0, 0, 0, 0]
])
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32]
int_dts = [np.int8, np.int16, np.int32]
for dtype in uint_dts + int_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 4
# float
float_dts = [np.float16, np.float32, np.float64]
for dtype in float_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100.0
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) < 4.0
def test_other_dtypes_at_sigma_075(self):
# --
# blur of various dtypes at sigma=0.75
# and values being half-way between center and maximum for each dtype
# The goal of this test is to verify that no major loss of resolution
# happens for large dtypes.
# Such inaccuracies appear for float64 if used.
# --
aug = iaa.GaussianBlur(sigma=0.75)
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.int32)
# mask[2, 2] = 1000 * 1000
# kernel = ndimage.gaussian_filter(mask, 0.75)
mask = np.float64([
[ 923, 6650, 16163, 6650, 923],
[ 6650, 47896, 116408, 47896, 6650],
[ 16163, 116408, 282925, 116408, 16163],
[ 6650, 47896, 116408, 47896, 6650],
[ 923, 6650, 16163, 6650, 923]
]) / (1000.0 * 1000.0)
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32]
int_dts = [np.int8, np.int16, np.int32]
for dtype in uint_dts + int_dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
dynamic_range = max_value - min_value
value = int(center_value + 0.4 * max_value)
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
if np.dtype(dtype).itemsize <= 1:
assert np.max(diff) <= 4
else:
assert np.max(diff) <= 0.01 * dynamic_range
# float
float_dts = [np.float16, np.float32, np.float64]
values = [5000, 1000*1000, 1000*1000*1000]
for dtype, value in zip(float_dts, values):
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
# accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, 2, 4, 8 bytes,
# i.e. 8, 16, 32, 64 bit)
max_diff = np.dtype(dtype).itemsize * 0.01 * np.float128(value)
assert np.max(diff) < max_diff
def test_failure_on_invalid_dtypes(self):
# assert failure on invalid dtypes
aug = iaa.GaussianBlur(sigma=1.0)
for dt in [np.float128]:
got_exception = False
try:
_ = aug.augment_image(np.zeros((1, 1), dtype=dt))
except Exception as exc:
assert "forbidden dtype" in str(exc)
got_exception = True
assert got_exception
class TestAverageBlur(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestAverageBlur, self).__init__(*args, **kwargs)
base_img = np.zeros((11, 11, 1), dtype=np.uint8)
base_img[5, 5, 0] = 200
base_img[4, 5, 0] = 100
base_img[6, 5, 0] = 100
base_img[5, 4, 0] = 100
base_img[5, 6, 0] = 100
blur3x3 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],
[0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],
[0, 0, 0, 11, 56, 67, 56, 11, 0, 0, 0],
[0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],
[0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur3x3 = np.array(blur3x3, dtype=np.uint8)[..., np.newaxis]
blur4x4 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],
[0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],
[0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],
[0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],
[0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],
[0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur4x4 = np.array(blur4x4, dtype=np.uint8)[..., np.newaxis]
blur5x5 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],
[0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur5x5 = np.array(blur5x5, dtype=np.uint8)[..., np.newaxis]
self.base_img = base_img
self.blur3x3 = blur3x3
self.blur4x4 = blur4x4
self.blur5x5 = blur5x5
def setUp(self):
reseed()
def test_kernel_size_0(self):
# no blur, shouldnt change anything
aug = iaa.AverageBlur(k=0)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.base_img)
def test_kernel_size_3(self):
# k=3
aug = iaa.AverageBlur(k=3)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.blur3x3)
def test_kernel_size_5(self):
# k=5
aug = iaa.AverageBlur(k=5)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.blur5x5)
def test_kernel_size_is_tuple(self):
# k as (3, 4)
aug = iaa.AverageBlur(k=(3, 4))
nb_iterations = 100
nb_seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, self.blur4x4):
nb_seen[1] += 1
else:
raise Exception("Unexpected result in AverageBlur@1")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.4 <= p_seen[0] <= 0.6
assert 0.4 <= p_seen[1] <= 0.6
def test_kernel_size_is_tuple_with_wider_range(self):
# k as (3, 5)
aug = iaa.AverageBlur(k=(3, 5))
nb_iterations = 200
nb_seen = [0, 0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, self.blur4x4):
nb_seen[1] += 1
elif np.array_equal(observed, self.blur5x5):
nb_seen[2] += 1
else:
raise Exception("Unexpected result in AverageBlur@2")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.23 <= p_seen[0] <= 0.43
assert 0.23 <= p_seen[1] <= 0.43
assert 0.23 <= p_seen[2] <= 0.43
def test_kernel_size_is_stochastic_parameter(self):
# k as stochastic parameter
aug = iaa.AverageBlur(k=iap.Choice([3, 5]))
nb_iterations = 100
nb_seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, self.blur5x5):
nb_seen[1] += 1
else:
raise Exception("Unexpected result in AverageBlur@3")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.4 <= p_seen[0] <= 0.6
assert 0.4 <= p_seen[1] <= 0.6
def test_kernel_size_is_tuple_of_tuples(self):
# k as ((3, 5), (3, 5))
aug = iaa.AverageBlur(k=((3, 5), (3, 5)))
possible = dict()
for kh in [3, 4, 5]:
for kw in [3, 4, 5]:
key = (kh, kw)
if kh == 0 or kw == 0:
possible[key] = np.copy(self.base_img)
else:
possible[key] = cv2.blur(
self.base_img, (kh, kw))[..., np.newaxis]
nb_iterations = 250
nb_seen = dict([(key, 0) for key, val in possible.items()])
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
for key, img_aug in possible.items():
if np.array_equal(observed, img_aug):
nb_seen[key] += 1
# dont check sum here, because 0xX and Xx0 are all the same, i.e. much
# higher sum than nb_iterations
assert np.all([v > 0 for v in nb_seen.values()])
def test_more_than_four_channels(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.AverageBlur(k=3)(image=image)
assert image_aug.shape == image.shape
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.AverageBlur(k=3)(image=image)
assert image_aug.shape == image.shape
def test_keypoints_dont_change(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
kpsoi = [ia.KeypointsOnImage(kps, shape=(11, 11, 1))]
aug = iaa.AverageBlur(k=3)
aug_det = aug.to_deterministic()
observed = aug.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
def test_other_dtypes_k0(self):
aug = iaa.AverageBlur(k=0)
# bool
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image[2, 2] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == image)
# uint, int
uint_dts = [np.uint8, np.uint16]
int_dts = [np.int8, np.int16]
for dtype in uint_dts + int_dts:
_min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = int(center_value + 0.4 * max_value)
image[2, 2] = int(center_value + 0.4 * max_value)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == image)
# float
float_dts = [np.float16, np.float32, np.float64]
values = [5000, 1000*1000, 1000*1000*1000]
for dtype, value in zip(float_dts, values):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image[2, 2] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, image)
def test_other_dtypes_k3_value_100(self):
# --
# blur of various dtypes at k=3
# and using an example value of 100 for int/uint/float and True for
# bool
# --
aug = iaa.AverageBlur(k=3)
# prototype mask
# we place values in a 3x3 grid at positions (row=1, col=1) and
# (row=2, col=2) (beginning with 0)
# AverageBlur uses cv2.blur(), which uses BORDER_REFLECT_101 as its
# default padding mode,
# see https://docs.opencv.org/3.1.0/d2/de8/group__core__array.html
# the matrix below shows the 3x3 grid and the padded row/col values
# around it
# [1, 0, 1, 0, 1]
# [0, 0, 0, 0, 0]
# [1, 0, 1, 0, 1]
# [0, 0, 0, 1, 0]
# [1, 0, 1, 0, 1]
mask = np.float64([
[4/9, 2/9, 4/9],
[2/9, 2/9, 3/9],
[4/9, 3/9, 5/9]
])
# bool
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image[2, 2] = True
image_aug = aug.augment_image(image)
expected = mask > 0.5
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
# uint, int
uint_dts = [np.uint8, np.uint16]
int_dts = [np.int8, np.int16]
for dtype in uint_dts + int_dts:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = 100
image[2, 2] = 100
image_aug = aug.augment_image(image)
# cv2.blur() applies rounding for int/uint dtypes
expected = np.round(mask * 100).astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 2
# float
float_dts = [np.float16, np.float32, np.float64]
for dtype in float_dts:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = 100.0
image[2, 2] = 100.0
image_aug = aug.augment_image(image)
expected = (mask * 100.0).astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.dtype.type == dtype
assert np.max(diff) < 1.0
def test_other_dtypes_k3_dynamic_value(self):
# --
# blur of various dtypes at k=3
# and values being half-way between center and maximum for each
# dtype (bool is skipped as it doesnt make any sense here)
# The goal of this test is to verify that no major loss of resolution
# happens for large dtypes.
# --
aug = iaa.AverageBlur(k=3)
# prototype mask (see above)
mask = np.float64([
[4/9, 2/9, 4/9],
[2/9, 2/9, 3/9],
[4/9, 3/9, 5/9]
])
# uint, int
uint_dts = [np.uint8, np.uint16]
int_dts = [np.int8, np.int16]
for dtype in uint_dts + int_dts:
_min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
value = int(center_value + 0.4 * max_value)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.dtype.type == dtype
# accepts difference of 4, 8, 16 (at 1, 2, 4 bytes, i.e. 8, 16,
# 32 bit)
assert np.max(diff) <= 2**(1 + np.dtype(dtype).itemsize)
# float
float_dts = [np.float16, np.float32, np.float64]
values = [5000, 1000*1000, 1000*1000*1000]
for dtype, value in zip(float_dts, values):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.dtype.type == dtype
# accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, 2, 4, 8 bytes,
# i.e. 8, 16, 32, 64 bit)
assert np.max(diff) < 2**(1 + np.dtype(dtype).itemsize)
def test_failure_on_invalid_dtypes(self):
# assert failure on invalid dtypes
aug = iaa.AverageBlur(k=3)
for dt in [np.uint32, np.uint64, np.int32, np.int64]:
got_exception = False
try:
_ = aug.augment_image(np.zeros((1, 1), dtype=dt))
except Exception as exc:
assert "forbidden dtype" in str(exc)
got_exception = True
assert got_exception
class TestMedianBlur(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMedianBlur, self).__init__(*args, **kwargs)
base_img = np.zeros((11, 11, 1), dtype=np.uint8)
base_img[3:8, 3:8, 0] = 1
base_img[4:7, 4:7, 0] = 2
base_img[5:6, 5:6, 0] = 3
blur3x3 = np.zeros_like(base_img)
blur3x3[3:8, 3:8, 0] = 1
blur3x3[4:7, 4:7, 0] = 2
blur3x3[4, 4, 0] = 1
blur3x3[4, 6, 0] = 1
blur3x3[6, 4, 0] = 1
blur3x3[6, 6, 0] = 1
blur3x3[3, 3, 0] = 0
blur3x3[3, 7, 0] = 0
blur3x3[7, 3, 0] = 0
blur3x3[7, 7, 0] = 0
blur5x5 = np.copy(blur3x3)
blur5x5[4, 3, 0] = 0
blur5x5[3, 4, 0] = 0
blur5x5[6, 3, 0] = 0
blur5x5[7, 4, 0] = 0
blur5x5[4, 7, 0] = 0
blur5x5[3, 6, 0] = 0
blur5x5[6, 7, 0] = 0
blur5x5[7, 6, 0] = 0
blur5x5[blur5x5 > 1] = 1
self.base_img = base_img
self.blur3x3 = blur3x3
self.blur5x5 = blur5x5
def setUp(self):
reseed()
def test_k_is_1(self):
# no blur, shouldnt change anything
aug = iaa.MedianBlur(k=1)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.base_img)
def test_k_is_3(self):
# k=3
aug = iaa.MedianBlur(k=3)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.blur3x3)
def test_k_is_5(self):
# k=5
aug = iaa.MedianBlur(k=5)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.blur5x5)
def test_k_is_tuple(self):
# k as (3, 5)
aug = iaa.MedianBlur(k=(3, 5))
seen = [False, False]
for i in sm.xrange(100):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
seen[0] = True
elif np.array_equal(observed, self.blur5x5):
seen[1] = True
else:
raise Exception("Unexpected result in MedianBlur@1")
if all(seen):
break
assert np.all(seen)
def test_k_is_stochastic_parameter(self):
# k as stochastic parameter
aug = iaa.MedianBlur(k=iap.Choice([3, 5]))
seen = [False, False]
for i in sm.xrange(100):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
seen[0] += True
elif np.array_equal(observed, self.blur5x5):
seen[1] += True
else:
raise Exception("Unexpected result in MedianBlur@2")
if all(seen):
break
assert np.all(seen)
def test_more_than_four_channels(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.MedianBlur(k=3)(image=image)
assert image_aug.shape == image.shape
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.MedianBlur(k=3)(image=image)
assert image_aug.shape == image.shape
def test_keypoints_not_changed(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
kpsoi = [ia.KeypointsOnImage(kps, shape=(11, 11, 1))]
aug = iaa.MedianBlur(k=3)
aug_det = aug.to_deterministic()
observed = aug.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
# TODO extend these tests
class TestBilateralBlur(unittest.TestCase):
def setUp(self):
reseed()
def test_zero_sized_axes(self):
shapes = [
(0, 0, 3),
(0, 1, 3),
(1, 0, 3)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.BilateralBlur(3)(image=image)
assert image_aug.shape == image.shape
class TestMotionBlur(unittest.TestCase):
def setUp(self):
reseed()
def test_simple_parameters(self):
# simple scenario
aug = iaa.MotionBlur(k=3, angle=0, direction=0.0)
matrix_func = aug.matrix
matrices = [
matrix_func(
np.zeros((128, 128, 3), dtype=np.uint8),
3,
iarandom.RNG(i)
) for i in range(10)
]
expected = np.float32([
[0, 1.0/3, 0],
[0, 1.0/3, 0],
[0, 1.0/3, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected)
def test_simple_parameters_angle_is_90(self):
# 90deg angle
aug = iaa.MotionBlur(k=3, angle=90, direction=0.0)
matrix_func = aug.matrix
matrices = [
matrix_func(
np.zeros((128, 128, 3), dtype=np.uint8),
3,
iarandom.RNG(i)
) for i in range(10)
]
expected = np.float32([
[0, 0, 0],
[1.0/3, 1.0/3, 1.0/3],
[0, 0, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected)
def test_simple_parameters_angle_is_45(self):
# 45deg angle
aug = iaa.MotionBlur(k=3, angle=45, direction=0.0, order=0)
matrix_func = aug.matrix
matrices = [
matrix_func(
np.zeros((128, 128, 3), dtype=np.uint8),
3,
iarandom.RNG(i)
) for i in range(10)
]
expected = np.float32([
[0, 0, 1.0/3],
[0, 1.0/3, 0],
[1.0/3, 0, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected)
def test_simple_parameters_angle_is_list(self):
# random angle
aug = iaa.MotionBlur(k=3, angle=[0, 90], direction=0.0)
matrix_func = aug.matrix
matrices = [
matrix_func(
np.zeros((128, 128, 3), dtype=np.uint8),
3,
iarandom.RNG(i)
) for i in range(50)
]
expected1 = np.float32([
[0, 1.0/3, 0],
[0, 1.0/3, 0],
[0, 1.0/3, 0]
])
expected2 = np.float32([
[0, 0, 0],
[1.0/3, 1.0/3, 1.0/3],
[0, 0, 0],
])
nb_seen = [0, 0]
for matrices_image in matrices:
assert np.allclose(matrices_image[0], matrices_image[1])
assert np.allclose(matrices_image[1], matrices_image[2])
for matrix_channel in matrices_image:
if np.allclose(matrix_channel, expected1):
nb_seen[0] += 1
elif np.allclose(matrix_channel, expected2):
nb_seen[1] += 1
assert nb_seen[0] > 0
assert nb_seen[1] > 0
def test_k_is_5_angle_90(self):
# 5x5
aug = iaa.MotionBlur(k=5, angle=90, direction=0.0)
matrix_func = aug.matrix
matrices = [
matrix_func(
np.zeros((128, 128, 3), dtype=np.uint8),
3,
iarandom.RNG(i)
) for i in range(10)
]
expected = np.float32([
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1.0/5, 1.0/5, 1.0/5, 1.0/5, 1.0/5],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected)
def test_k_is_list_angle_90(self):
# random k
aug = iaa.MotionBlur(k=[3, 5], angle=90, direction=0.0)
matrix_func = aug.matrix
matrices = [
matrix_func(
np.zeros((128, 128, 3), dtype=np.uint8),
3,
iarandom.RNG(i)
) for i in range(50)
]
expected1 = np.float32([
[0, 0, 0],
[1.0/3, 1.0/3, 1.0/3],
[0, 0, 0],
])
expected2 = np.float32([
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1.0/5, 1.0/5, 1.0/5, 1.0/5, 1.0/5],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
])
nb_seen = [0, 0]
for matrices_image in matrices:
assert np.allclose(matrices_image[0], matrices_image[1])
assert np.allclose(matrices_image[1], matrices_image[2])
for matrix_channel in matrices_image:
if (matrix_channel.shape == expected1.shape
and np.allclose(matrix_channel, expected1)):
nb_seen[0] += 1
elif (matrix_channel.shape == expected2.shape
and np.allclose(matrix_channel, expected2)):
nb_seen[1] += 1
assert nb_seen[0] > 0
assert nb_seen[1] > 0
def test_failure_on_continuous_kernel_sizes(self):
# k with choice [a, b, c, ...] must error in case of non-discrete
# values
got_exception = False
try:
_ = iaa.MotionBlur(k=[3, 3.5, 4])
except Exception as exc:
assert "to only contain integer" in str(exc)
got_exception = True
assert got_exception
# TODO extend this to test sampled kernel sizes
def test_k_is_tuple(self):
# no error in case of (a, b), checks for #215
aug = iaa.MotionBlur(k=(3, 7))
for _ in range(10):
_ = aug.augment_image(np.zeros((11, 11, 3), dtype=np.uint8))
def test_direction_is_1(self):
# direction 1.0
aug = iaa.MotionBlur(k=3, angle=0, direction=1.0)
matrix_func = aug.matrix
matrices = [
matrix_func(
np.zeros((128, 128, 3), dtype=np.uint8),
3,
iarandom.RNG(i)
) for i in range(10)
]
expected = np.float32([
[0, 1.0/1.5, 0],
[0, 0.5/1.5, 0],
[0, 0.0/1.5, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected, rtol=0, atol=1e-2)
def test_direction_is_minus_1(self):
# direction -1.0
aug = iaa.MotionBlur(k=3, angle=0, direction=-1.0)
matrix_func = aug.matrix
matrices = [
matrix_func(
np.zeros((128, 128, 3), dtype=np.uint8),
3,
iarandom.RNG(i)
) for i in range(10)
]
expected = np.float32([
[0, 0.0/1.5, 0],
[0, 0.5/1.5, 0],
[0, 1.0/1.5, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected, rtol=0, atol=1e-2)
def test_direction_is_list(self):
# random direction
aug = iaa.MotionBlur(k=3, angle=[0, 90], direction=[-1.0, 1.0])
matrix_func = aug.matrix
matrices = [
matrix_func(
np.zeros((128, 128, 3), dtype=np.uint8),
3,
iarandom.RNG(i)
) for i in range(50)
]
expected1 = np.float32([
[0, 1.0/1.5, 0],
[0, 0.5/1.5, 0],
[0, 0.0/1.5, 0]
])
expected2 = np.float32([
[0, 0.0/1.5, 0],
[0, 0.5/1.5, 0],
[0, 1.0/1.5, 0]
])
nb_seen = [0, 0]
for matrices_image in matrices:
assert np.allclose(matrices_image[0], matrices_image[1])
assert np.allclose(matrices_image[1], matrices_image[2])
for matrix_channel in matrices_image:
if np.allclose(matrix_channel, expected1, rtol=0, atol=1e-2):
nb_seen[0] += 1
elif np.allclose(matrix_channel, expected2, rtol=0, atol=1e-2):
nb_seen[1] += 1
assert nb_seen[0] > 0
assert nb_seen[1] > 0
def test_k_is_3_angle_is_90_verify_results(self):
# test of actual augmenter
img = np.zeros((7, 7, 3), dtype=np.uint8)
img[3-1:3+2, 3-1:3+2, :] = 255
aug = iaa.MotionBlur(k=3, angle=90, direction=0.0)
img_aug = aug.augment_image(img)
v1 = (255*(1/3))
v2 = (255*(1/3)) * 2
v3 = (255*(1/3)) * 3
expected = np.float32([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, v1, v2, v3, v2, v1, 0],
[0, v1, v2, v3, v2, v1, 0],
[0, v1, v2, v3, v2, v1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
]).astype(np.uint8)
expected = np.tile(expected[..., np.newaxis], (1, 1, 3))
assert np.allclose(img_aug, expected)
class TestMeanShiftBlur(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.MeanShiftBlur()
assert np.isclose(aug.spatial_window_radius.a.value, 5.0)
assert np.isclose(aug.spatial_window_radius.b.value, 40.0)
assert np.isclose(aug.color_window_radius.a.value, 5.0)
assert np.isclose(aug.color_window_radius.b.value, 40.0)
def test___init___custom(self):
aug = iaa.MeanShiftBlur(
spatial_radius=[1.0, 2.0, 3.0],
color_radius=iap.Deterministic(5)
)
assert np.allclose(aug.spatial_window_radius.a, [1.0, 2.0, 3.0])
assert aug.color_window_radius.value == 5
def test_draw_samples(self):
aug = iaa.MeanShiftBlur(
spatial_radius=[1.0, 2.0, 3.0],
color_radius=(1.0, 2.0)
)
batch = mock.Mock()
batch.nb_rows = 100
samples = aug._draw_samples(batch, iarandom.RNG(0))
assert np.all(
np.isclose(samples[0], 1.0)
| np.isclose(samples[0], 2.0)
| np.isclose(samples[0], 3.0)
)
assert np.all((1.0 <= samples[1]) | (samples[1] <= 2.0))
@mock.patch("imgaug.augmenters.blur.blur_mean_shift_")
def test_mocked(self, mock_ms):
aug = iaa.MeanShiftBlur(
spatial_radius=1,
color_radius=2
)
image = np.zeros((1, 1, 3), dtype=np.uint8)
mock_ms.return_value = image
_image_aug = aug(image=image)
kwargs = mock_ms.call_args_list[0][1]
assert mock_ms.call_count == 1
assert np.isclose(kwargs["spatial_window_radius"], 1.0)
assert np.isclose(kwargs["color_window_radius"], 2.0)
def test_batch_without_images(self):
aug = iaa.MeanShiftBlur()
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(5, 5, 3))
kps_aug = aug(keypoints=kpsoi)
assert kps_aug.keypoints[0].x == 0
assert kps_aug.keypoints[0].y == 1
def test_get_parameters(self):
aug = iaa.MeanShiftBlur()
params = aug.get_parameters()
assert params[0] is aug.spatial_window_radius
assert params[1] is aug.color_window_radius
|
from __future__ import print_function, division, absolute_import
import warnings
import sys
import itertools
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import cv2
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug import random as iarandom
from imgaug.testutils import keypoints_equal, reseed
class Test_blur_gaussian_(unittest.TestCase):
def setUp(self):
reseed()
def test_integration(self):
backends = ["auto", "scipy", "cv2"]
nb_channels_lst = [None, 1, 3, 4, 5, 10]
gen = itertools.product(backends, nb_channels_lst)
for backend, nb_channels in gen:
with self.subTest(backend=backend, nb_channels=nb_channels):
image = np.zeros((5, 5), dtype=np.uint8)
if nb_channels is not None:
image = np.tile(image[..., np.newaxis], (1, 1, nb_channels))
image[2, 2] = 255
mask = image < 255
observed = iaa.blur_gaussian_(
np.copy(image), sigma=5.0, backend=backend)
assert observed.shape == image.shape
assert observed.dtype.name == "uint8"
assert np.all(observed[2, 2] < 255)
assert np.sum(observed[mask]) > (5*5-1)
if nb_channels is not None and nb_channels > 1:
for c in sm.xrange(1, observed.shape[2]):
assert np.array_equal(observed[..., c],
observed[..., 0])
def test_sigma_zero(self):
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
observed = iaa.blur_gaussian_(np.copy(image), 0)
assert np.array_equal(observed, image)
image = np.arange(4*4).astype(np.uint8).reshape((4, 4, 1))
observed = iaa.blur_gaussian_(np.copy(image), 0)
assert np.array_equal(observed, image)
image = np.arange(4*4*3).astype(np.uint8).reshape((4, 4, 3))
observed = iaa.blur_gaussian_(np.copy(image), 0)
assert np.array_equal(observed, image)
def test_eps(self):
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
observed_no_eps = iaa.blur_gaussian_(np.copy(image), 1.0, eps=0)
observed_with_eps = iaa.blur_gaussian_(np.copy(image), 1.0, eps=1e10)
assert not np.array_equal(observed_no_eps, observed_with_eps)
assert np.array_equal(observed_with_eps, image)
def test_ksize(self):
def side_effect(image, ksize, sigmaX, sigmaY, borderType):
return image + 1
sigmas = [5.0, 5.0]
ksizes = [None, 3]
ksizes_expected = [2.6*5.0, 3]
gen = zip(sigmas, ksizes, ksizes_expected)
for (sigma, ksize, ksize_expected) in gen:
with self.subTest(sigma=sigma, ksize=ksize):
mock_GaussianBlur = mock.Mock(side_effect=side_effect)
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
with mock.patch('cv2.GaussianBlur', mock_GaussianBlur):
observed = iaa.blur_gaussian_(
np.copy(image),
sigma=sigma,
ksize=ksize,
backend="cv2")
assert np.array_equal(observed, image+1)
cargs = mock_GaussianBlur.call_args
assert mock_GaussianBlur.call_count == 1
assert np.array_equal(cargs[0][0], image)
assert isinstance(cargs[0][1], tuple)
assert np.allclose(
np.float32(cargs[0][1]),
np.float32([ksize_expected, ksize_expected]))
assert np.isclose(cargs[1]["sigmaX"], sigma)
assert np.isclose(cargs[1]["sigmaY"], sigma)
assert cargs[1]["borderType"] == cv2.BORDER_REFLECT_101
def test_more_than_four_channels(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.blur_gaussian_(np.copy(image), 1.0)
assert image_aug.shape == image.shape
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.blur_gaussian_(np.copy(image), 1.0)
assert image_aug.shape == image.shape
def test_backends_called(self):
def side_effect_cv2(image, ksize, sigmaX, sigmaY, borderType):
return image + 1
def side_effect_scipy(image, sigma, mode):
return image + 1
mock_GaussianBlur = mock.Mock(side_effect=side_effect_cv2)
mock_gaussian_filter = mock.Mock(side_effect=side_effect_scipy)
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
with mock.patch('cv2.GaussianBlur', mock_GaussianBlur):
_observed = iaa.blur_gaussian_(
np.copy(image), sigma=1.0, eps=0, backend="cv2")
assert mock_GaussianBlur.call_count == 1
with mock.patch('scipy.ndimage.gaussian_filter', mock_gaussian_filter):
_observed = iaa.blur_gaussian_(
np.copy(image), sigma=1.0, eps=0, backend="scipy")
assert mock_gaussian_filter.call_count == 1
def test_backends_similar(self):
with self.subTest(nb_channels=None):
size = 10
image = np.arange(
0, size*size).astype(np.uint8).reshape((size, size))
image_cv2 = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, ksize=20, backend="cv2")
image_scipy = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, backend="scipy")
diff = np.abs(image_cv2.astype(np.int32)
- image_scipy.astype(np.int32))
assert np.average(diff) < 0.05 * (size * size)
with self.subTest(nb_channels=3):
size = 10
image = np.arange(
0, size*size).astype(np.uint8).reshape((size, size))
image = np.tile(image[..., np.newaxis], (1, 1, 3))
image[1] += 1
image[2] += 2
image_cv2 = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, ksize=20, backend="cv2")
image_scipy = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, backend="scipy")
diff = np.abs(image_cv2.astype(np.int32)
- image_scipy.astype(np.int32))
assert np.average(diff) < 0.05 * (size * size)
for c in sm.xrange(3):
diff = np.abs(image_cv2[..., c].astype(np.int32)
- image_scipy[..., c].astype(np.int32))
assert np.average(diff) < 0.05 * (size * size)
def test_warnings(self):
# note that self.assertWarningRegex does not exist in python 2.7
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
_ = iaa.blur_gaussian_(
np.zeros((1, 1), dtype=np.uint32),
sigma=3.0,
ksize=11,
backend="scipy")
assert len(caught_warnings) == 1
assert (
"but also provided 'ksize' argument"
in str(caught_warnings[-1].message))
def test_other_dtypes_sigma_0(self):
dtypes_to_test_list = [
["bool",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64", "float128"],
["bool",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64", "float128"]
]
gen = zip(["scipy", "cv2"], dtypes_to_test_list)
for backend, dtypes_to_test in gen:
# bool
if "bool" in dtypes_to_test:
with self.subTest(backend=backend, dtype="bool"):
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0, backend=backend)
assert image_aug.dtype.name == "bool"
assert np.all(image_aug == image)
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32, np.uint64]
int_dts = [np.int8, np.int16, np.int32, np.int64]
for dtype in uint_dts + int_dts:
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = int(center_value)
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0, backend=backend)
assert image_aug.dtype.name == dtype.name
assert np.all(image_aug == image)
# float
float_dts = [np.float16, np.float32, np.float64, np.float128]
for dtype in float_dts:
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = center_value
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0, backend=backend)
assert image_aug.dtype.name == dtype.name
assert np.allclose(image_aug, image)
def test_other_dtypes_sigma_075(self):
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.int32)
# mask[2, 2] = 1000 * 1000
# kernel = ndimage.gaussian_filter(mask, 0.75)
mask = np.float64([
[ 923, 6650, 16163, 6650, 923],
[ 6650, 47896, 116408, 47896, 6650],
[ 16163, 116408, 282925, 116408, 16163],
[ 6650, 47896, 116408, 47896, 6650],
[ 923, 6650, 16163, 6650, 923]
]) / (1000.0 * 1000.0)
dtypes_to_test_list = [
# scipy
["bool",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64"],
# cv2
["bool",
"uint8", "uint16",
"int8", "int16", "int32",
"float16", "float32", "float64"]
]
gen = zip(["scipy", "cv2"], dtypes_to_test_list)
for backend, dtypes_to_test in gen:
# bool
if "bool" in dtypes_to_test:
with self.subTest(backend=backend, dtype="bool"):
image = np.zeros((5, 5), dtype=bool)
image[2, 2] = True
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0.75, backend=backend)
assert image_aug.dtype.name == "bool"
assert np.all(image_aug == (mask > 0.5))
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32, np.uint64]
int_dts = [np.int8, np.int16, np.int32, np.int64]
for dtype in uint_dts + int_dts:
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
dynamic_range = max_value - min_value
value = int(center_value + 0.4 * max_value)
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = iaa.blur_gaussian_(
image, sigma=0.75, backend=backend)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
if dtype.itemsize <= 1:
assert np.max(diff) <= 4
else:
assert np.max(diff) <= 0.01 * dynamic_range
# float
float_dts = [np.float16, np.float32, np.float64, np.float128]
values = [5000, 1000**1, 1000**2, 1000**3]
for dtype, value in zip(float_dts, values):
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = iaa.blur_gaussian_(
image, sigma=0.75, backend=backend)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
# accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1,
# 2, 4, 8 bytes, i.e. 8, 16, 32, 64 bit)
max_diff = (
np.dtype(dtype).itemsize
* 0.01
* np.float128(value))
assert np.max(diff) < max_diff
def test_other_dtypes_bool_at_sigma_06(self):
# --
# blur of bool input at sigma=0.6
# --
# here we use a special mask and sigma as otherwise the only values
# ending up with >0.5 would be the ones that
# were before the blur already at >0.5
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.float64)
# mask[1, 0] = 255
# mask[2, 0] = 255
# mask[2, 2] = 255
# mask[2, 4] = 255
# mask[3, 0] = 255
# mask = ndimage.gaussian_filter(mask, 1.0, mode="mirror")
mask_bool = np.float64([
[ 57, 14, 2, 1, 1],
[142, 42, 29, 14, 28],
[169, 69, 114, 56, 114],
[142, 42, 29, 14, 28],
[ 57, 14, 2, 1, 1]
]) / 255.0
image = np.zeros((5, 5), dtype=bool)
image[1, 0] = True
image[2, 0] = True
image[2, 2] = True
image[2, 4] = True
image[3, 0] = True
for backend in ["scipy", "cv2"]:
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0.6, backend=backend)
expected = mask_bool > 0.5
assert image_aug.shape == mask_bool.shape
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
class Test_blur_mean_shift_(unittest.TestCase):
@property
def image(self):
image = [
[1, 2, 3, 4, 200, 201, 202, 203],
[1, 2, 3, 4, 200, 201, 202, 203],
[1, 2, 3, 4, 200, 201, 202, 203],
[1, 2, 3, 4, 200, 201, 202, 203]
]
image = np.array(image, dtype=np.uint8).reshape((4, 2*4, 1))
image = np.tile(image, (1, 1, 3))
return image
def test_simple_image(self):
image = self.image
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0.5, 0.5)
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
assert 0 <= np.average(image[:, 0:4, :]) <= 5
assert 199 <= np.average(image[:, 4:, :]) <= 203
def test_hw_image(self):
image = self.image[:, :, 0]
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0.5, 0.5)
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
def test_hw1_image(self):
image = self.image[:, :, 0:1]
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0.5, 0.5)
assert image_blurred.ndim == 3
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
def test_non_contiguous_image(self):
image = self.image
image_cp = np.copy(np.fliplr(image))
image = np.fliplr(image)
assert image.flags["C_CONTIGUOUS"] is False
image_blurred = iaa.blur_mean_shift_(image, 0.5, 0.5)
assert image_blurred.shape == image_cp.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image_cp)
def test_both_parameters_are_zero(self):
image = self.image[:, :, 0]
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0, 0)
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.blur_mean_shift_(np.copy(image), 1.0, 1.0)
assert image_aug.shape == image.shape
class TestGaussianBlur(unittest.TestCase):
def setUp(self):
reseed()
def test_sigma_is_zero(self):
# no blur, shouldnt change anything
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
aug = iaa.GaussianBlur(sigma=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_low_sigma(self):
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
# weak blur of center pixel
aug = iaa.GaussianBlur(sigma=0.5)
aug_det = aug.to_deterministic()
# images as numpy array
observed = aug.augment_images(images)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
observed = aug_det.augment_images(images)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
# images as list
observed = aug.augment_images(images_list)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
observed = aug_det.augment_images(images_list)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
def test_keypoints_dont_change(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
kpsoi = [ia.KeypointsOnImage(kps, shape=(3, 3, 1))]
aug = iaa.GaussianBlur(sigma=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
def test_sigma_is_tuple(self):
# varying blur sigmas
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
aug = iaa.GaussianBlur(sigma=(0, 1))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.8)
assert nb_changed_aug_det == 0
def test_other_dtypes_bool_at_sigma_0(self):
# bool
aug = iaa.GaussianBlur(sigma=0)
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == image)
def test_other_dtypes_uint_int_at_sigma_0(self):
aug = iaa.GaussianBlur(sigma=0)
dts = [np.uint8, np.uint16, np.uint32,
np.int8, np.int16, np.int32]
for dtype in dts:
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = int(center_value)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == image)
def test_other_dtypes_float_at_sigma_0(self):
aug = iaa.GaussianBlur(sigma=0)
dts = [np.float16, np.float32, np.float64]
for dtype in dts:
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = center_value
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, image)
def test_other_dtypes_bool_at_sigma_060(self):
# --
# blur of bool input at sigma=0.6
# --
# here we use a special mask and sigma as otherwise the only values
# ending up with >0.5 would be the ones that
# were before the blur already at >0.5
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.float64)
# mask[1, 0] = 255
# mask[2, 0] = 255
# mask[2, 2] = 255
# mask[2, 4] = 255
# mask[3, 0] = 255
# mask = ndimage.gaussian_filter(mask, 1.0, mode="mirror")
aug = iaa.GaussianBlur(sigma=0.6)
mask_bool = np.float64([
[ 57, 14, 2, 1, 1],
[142, 42, 29, 14, 28],
[169, 69, 114, 56, 114],
[142, 42, 29, 14, 28],
[ 57, 14, 2, 1, 1]
]) / 255.0
image = np.zeros((5, 5), dtype=bool)
image[1, 0] = True
image[2, 0] = True
image[2, 2] = True
image[2, 4] = True
image[3, 0] = True
image_aug = aug.augment_image(image)
expected = mask_bool > 0.5
assert image_aug.shape == mask_bool.shape
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
def test_other_dtypes_at_sigma_1(self):
# --
# blur of various dtypes at sigma=1.0
# and using an example value of 100 for int/uint/float and True for
# bool
# --
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.float64)
# mask[2, 2] = 100
# mask = ndimage.gaussian_filter(mask, 1.0, mode="mirror")
aug = iaa.GaussianBlur(sigma=1.0)
mask = np.float64([
[1, 2, 3, 2, 1],
[2, 5, 9, 5, 2],
[4, 9, 15, 9, 4],
[2, 5, 9, 5, 2],
[1, 2, 3, 2, 1]
])
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32]
int_dts = [np.int8, np.int16, np.int32]
for dtype in uint_dts + int_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 4
assert np.average(diff) <= 2
# float
float_dts = [np.float16, np.float32, np.float64]
for dtype in float_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100.0
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) < 4
assert np.average(diff) < 2.0
def test_other_dtypes_at_sigma_040(self):
# --
# blur of various dtypes at sigma=0.4
# and using an example value of 100 for int/uint/float and True for
# bool
# --
aug = iaa.GaussianBlur(sigma=0.4)
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.uint8)
# mask[2, 2] = 100
# kernel = ndimage.gaussian_filter(mask, 0.4, mode="mirror")
mask = np.float64([
[0, 0, 0, 0, 0],
[0, 0, 3, 0, 0],
[0, 3, 83, 3, 0],
[0, 0, 3, 0, 0],
[0, 0, 0, 0, 0]
])
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32]
int_dts = [np.int8, np.int16, np.int32]
for dtype in uint_dts + int_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 4
# float
float_dts = [np.float16, np.float32, np.float64]
for dtype in float_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100.0
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) < 4.0
def test_other_dtypes_at_sigma_075(self):
# --
# blur of various dtypes at sigma=0.75
# and values being half-way between center and maximum for each dtype
# The goal of this test is to verify that no major loss of resolution
# happens for large dtypes.
# Such inaccuracies appear for float64 if used.
# --
aug = iaa.GaussianBlur(sigma=0.75)
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.int32)
# mask[2, 2] = 1000 * 1000
# kernel = ndimage.gaussian_filter(mask, 0.75)
mask = np.float64([
[ 923, 6650, 16163, 6650, 923],
[ 6650, 47896, 116408, 47896, 6650],
[ 16163, 116408, 282925, 116408, 16163],
[ 6650, 47896, 116408, 47896, 6650],
[ 923, 6650, 16163, 6650, 923]
]) / (1000.0 * 1000.0)
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32]
int_dts = [np.int8, np.int16, np.int32]
for dtype in uint_dts + int_dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
dynamic_range = max_value - min_value
value = int(center_value + 0.4 * max_value)
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
if np.dtype(dtype).itemsize <= 1:
assert np.max(diff) <= 4
else:
assert np.max(diff) <= 0.01 * dynamic_range
# float
float_dts = [np.float16, np.float32, np.float64]
values = [5000, 1000*1000, 1000*1000*1000]
for dtype, value in zip(float_dts, values):
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
# accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, 2, 4, 8 bytes,
# i.e. 8, 16, 32, 64 bit)
max_diff = np.dtype(dtype).itemsize * 0.01 * np.float128(value)
assert np.max(diff) < max_diff
def test_failure_on_invalid_dtypes(self):
# assert failure on invalid dtypes
aug = iaa.GaussianBlur(sigma=1.0)
for dt in [np.float128]:
got_exception = False
try:
_ = aug.augment_image(np.zeros((1, 1), dtype=dt))
except Exception as exc:
assert "forbidden dtype" in str(exc)
got_exception = True
assert got_exception
class TestAverageBlur(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestAverageBlur, self).__init__(*args, **kwargs)
base_img = np.zeros((11, 11, 1), dtype=np.uint8)
base_img[5, 5, 0] = 200
base_img[4, 5, 0] = 100
base_img[6, 5, 0] = 100
base_img[5, 4, 0] = 100
base_img[5, 6, 0] = 100
blur3x3 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],
[0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],
[0, 0, 0, 11, 56, 67, 56, 11, 0, 0, 0],
[0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],
[0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur3x3 = np.array(blur3x3, dtype=np.uint8)[..., np.newaxis]
blur4x4 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],
[0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],
[0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],
[0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],
[0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],
[0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur4x4 = np.array(blur4x4, dtype=np.uint8)[..., np.newaxis]
blur5x5 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],
[0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur5x5 = np.array(blur5x5, dtype=np.uint8)[..., np.newaxis]
self.base_img = base_img
self.blur3x3 = blur3x3
self.blur4x4 = blur4x4
self.blur5x5 = blur5x5
def setUp(self):
reseed()
def test_kernel_size_0(self):
# no blur, shouldnt change anything
aug = iaa.AverageBlur(k=0)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.base_img)
def test_kernel_size_3(self):
# k=3
aug = iaa.AverageBlur(k=3)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.blur3x3)
def test_kernel_size_5(self):
# k=5
aug = iaa.AverageBlur(k=5)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.blur5x5)
def test_kernel_size_is_tuple(self):
# k as (3, 4)
aug = iaa.AverageBlur(k=(3, 4))
nb_iterations = 100
nb_seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, self.blur4x4):
nb_seen[1] += 1
else:
raise Exception("Unexpected result in AverageBlur@1")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.4 <= p_seen[0] <= 0.6
assert 0.4 <= p_seen[1] <= 0.6
def test_kernel_size_is_tuple_with_wider_range(self):
# k as (3, 5)
aug = iaa.AverageBlur(k=(3, 5))
nb_iterations = 200
nb_seen = [0, 0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, self.blur4x4):
nb_seen[1] += 1
elif np.array_equal(observed, self.blur5x5):
nb_seen[2] += 1
else:
raise Exception("Unexpected result in AverageBlur@2")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.23 <= p_seen[0] <= 0.43
assert 0.23 <= p_seen[1] <= 0.43
assert 0.23 <= p_seen[2] <= 0.43
def test_kernel_size_is_stochastic_parameter(self):
# k as stochastic parameter
aug = iaa.AverageBlur(k=iap.Choice([3, 5]))
nb_iterations = 100
nb_seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, self.blur5x5):
nb_seen[1] += 1
else:
raise Exception("Unexpected result in AverageBlur@3")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.4 <= p_seen[0] <= 0.6
assert 0.4 <= p_seen[1] <= 0.6
def test_kernel_size_is_tuple_of_tuples(self):
# k as ((3, 5), (3, 5))
aug = iaa.AverageBlur(k=((3, 5), (3, 5)))
possible = dict()
for kh in [3, 4, 5]:
for kw in [3, 4, 5]:
key = (kh, kw)
if kh == 0 or kw == 0:
possible[key] = np.copy(self.base_img)
else:
possible[key] = cv2.blur(
self.base_img, (kh, kw))[..., np.newaxis]
nb_iterations = 250
nb_seen = dict([(key, 0) for key, val in possible.items()])
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
for key, img_aug in possible.items():
if np.array_equal(observed, img_aug):
nb_seen[key] += 1
# dont check sum here, because 0xX and Xx0 are all the same, i.e. much
# higher sum than nb_iterations
assert np.all([v > 0 for v in nb_seen.values()])
def test_more_than_four_channels(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.AverageBlur(k=3)(image=image)
assert image_aug.shape == image.shape
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.AverageBlur(k=3)(image=image)
assert image_aug.shape == image.shape
def test_keypoints_dont_change(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
kpsoi = [ia.KeypointsOnImage(kps, shape=(11, 11, 1))]
aug = iaa.AverageBlur(k=3)
aug_det = aug.to_deterministic()
observed = aug.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
def test_other_dtypes_k0(self):
aug = iaa.AverageBlur(k=0)
# bool
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image[2, 2] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == image)
# uint, int
uint_dts = [np.uint8, np.uint16]
int_dts = [np.int8, np.int16]
for dtype in uint_dts + int_dts:
_min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = int(center_value + 0.4 * max_value)
image[2, 2] = int(center_value + 0.4 * max_value)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == image)
# float
float_dts = [np.float16, np.float32, np.float64]
values = [5000, 1000*1000, 1000*1000*1000]
for dtype, value in zip(float_dts, values):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image[2, 2] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, image)
def test_other_dtypes_k3_value_100(self):
# --
# blur of various dtypes at k=3
# and using an example value of 100 for int/uint/float and True for
# bool
# --
aug = iaa.AverageBlur(k=3)
# prototype mask
# we place values in a 3x3 grid at positions (row=1, col=1) and
# (row=2, col=2) (beginning with 0)
# AverageBlur uses cv2.blur(), which uses BORDER_REFLECT_101 as its
# default padding mode,
# see https://docs.opencv.org/3.1.0/d2/de8/group__core__array.html
# the matrix below shows the 3x3 grid and the padded row/col values
# around it
# [1, 0, 1, 0, 1]
# [0, 0, 0, 0, 0]
# [1, 0, 1, 0, 1]
# [0, 0, 0, 1, 0]
# [1, 0, 1, 0, 1]
mask = np.float64([
[4/9, 2/9, 4/9],
[2/9, 2/9, 3/9],
[4/9, 3/9, 5/9]
])
# bool
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image[2, 2] = True
image_aug = aug.augment_image(image)
expected = mask > 0.5
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
# uint, int
uint_dts = [np.uint8, np.uint16]
int_dts = [np.int8, np.int16]
for dtype in uint_dts + int_dts:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = 100
image[2, 2] = 100
image_aug = aug.augment_image(image)
# cv2.blur() applies rounding for int/uint dtypes
expected = np.round(mask * 100).astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 2
# float
float_dts = [np.float16, np.float32, np.float64]
for dtype in float_dts:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = 100.0
image[2, 2] = 100.0
image_aug = aug.augment_image(image)
expected = (mask * 100.0).astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.dtype.type == dtype
assert np.max(diff) < 1.0
def test_other_dtypes_k3_dynamic_value(self):
# --
# blur of various dtypes at k=3
# and values being half-way between center and maximum for each
# dtype (bool is skipped as it doesnt make any sense here)
# The goal of this test is to verify that no major loss of resolution
# happens for large dtypes.
# --
aug = iaa.AverageBlur(k=3)
# prototype mask (see above)
mask = np.float64([
[4/9, 2/9, 4/9],
[2/9, 2/9, 3/9],
[4/9, 3/9, 5/9]
])
# uint, int
uint_dts = [np.uint8, np.uint16]
int_dts = [np.int8, np.int16]
for dtype in uint_dts + int_dts:
_min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
value = int(center_value + 0.4 * max_value)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.dtype.type == dtype
# accepts difference of 4, 8, 16 (at 1, 2, 4 bytes, i.e. 8, 16,
# 32 bit)
assert np.max(diff) <= 2**(1 + np.dtype(dtype).itemsize)
# float
float_dts = [np.float16, np.float32, np.float64]
values = [5000, 1000*1000, 1000*1000*1000]
for dtype, value in zip(float_dts, values):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.dtype.type == dtype
# accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, 2, 4, 8 bytes,
# i.e. 8, 16, 32, 64 bit)
assert np.max(diff) < 2**(1 + np.dtype(dtype).itemsize)
def test_failure_on_invalid_dtypes(self):
# assert failure on invalid dtypes
aug = iaa.AverageBlur(k=3)
for dt in [np.uint32, np.uint64, np.int32, np.int64]:
got_exception = False
try:
_ = aug.augment_image(np.zeros((1, 1), dtype=dt))
except Exception as exc:
assert "forbidden dtype" in str(exc)
got_exception = True
assert got_exception
class TestMedianBlur(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMedianBlur, self).__init__(*args, **kwargs)
base_img = np.zeros((11, 11, 1), dtype=np.uint8)
base_img[3:8, 3:8, 0] = 1
base_img[4:7, 4:7, 0] = 2
base_img[5:6, 5:6, 0] = 3
blur3x3 = np.zeros_like(base_img)
blur3x3[3:8, 3:8, 0] = 1
blur3x3[4:7, 4:7, 0] = 2
blur3x3[4, 4, 0] = 1
blur3x3[4, 6, 0] = 1
blur3x3[6, 4, 0] = 1
blur3x3[6, 6, 0] = 1
blur3x3[3, 3, 0] = 0
blur3x3[3, 7, 0] = 0
blur3x3[7, 3, 0] = 0
blur3x3[7, 7, 0] = 0
blur5x5 = np.copy(blur3x3)
blur5x5[4, 3, 0] = 0
blur5x5[3, 4, 0] = 0
blur5x5[6, 3, 0] = 0
blur5x5[7, 4, 0] = 0
blur5x5[4, 7, 0] = 0
blur5x5[3, 6, 0] = 0
blur5x5[6, 7, 0] = 0
blur5x5[7, 6, 0] = 0
blur5x5[blur5x5 > 1] = 1
self.base_img = base_img
self.blur3x3 = blur3x3
self.blur5x5 = blur5x5
def setUp(self):
reseed()
def test_k_is_1(self):
# no blur, shouldnt change anything
aug = iaa.MedianBlur(k=1)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.base_img)
def test_k_is_3(self):
# k=3
aug = iaa.MedianBlur(k=3)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.blur3x3)
def test_k_is_5(self):
# k=5
aug = iaa.MedianBlur(k=5)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.blur5x5)
def test_k_is_tuple(self):
# k as (3, 5)
aug = iaa.MedianBlur(k=(3, 5))
seen = [False, False]
for i in sm.xrange(100):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
seen[0] = True
elif np.array_equal(observed, self.blur5x5):
seen[1] = True
else:
raise Exception("Unexpected result in MedianBlur@1")
if all(seen):
break
assert np.all(seen)
def test_k_is_stochastic_parameter(self):
# k as stochastic parameter
aug = iaa.MedianBlur(k=iap.Choice([3, 5]))
seen = [False, False]
for i in sm.xrange(100):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
seen[0] += True
elif np.array_equal(observed, self.blur5x5):
seen[1] += True
else:
raise Exception("Unexpected result in MedianBlur@2")
if all(seen):
break
assert np.all(seen)
def test_more_than_four_channels(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.MedianBlur(k=3)(image=image)
assert image_aug.shape == image.shape
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.MedianBlur(k=3)(image=image)
assert image_aug.shape == image.shape
def test_keypoints_not_changed(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
kpsoi = [ia.KeypointsOnImage(kps, shape=(11, 11, 1))]
aug = iaa.MedianBlur(k=3)
aug_det = aug.to_deterministic()
observed = aug.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
# TODO extend these tests
class TestBilateralBlur(unittest.TestCase):
def setUp(self):
reseed()
def test_zero_sized_axes(self):
shapes = [
(0, 0, 3),
(0, 1, 3),
(1, 0, 3)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.BilateralBlur(3)(image=image)
assert image_aug.shape == image.shape
class TestMotionBlur(unittest.TestCase):
def setUp(self):
reseed()
def test_simple_parameters(self):
# simple scenario
aug = iaa.MotionBlur(k=3, angle=0, direction=0.0)
matrix_func = aug.matrix
matrices = [
matrix_func(
np.zeros((128, 128, 3), dtype=np.uint8),
3,
iarandom.RNG(i)
) for i in range(10)
]
expected = np.float32([
[0, 1.0/3, 0],
[0, 1.0/3, 0],
[0, 1.0/3, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected)
def test_simple_parameters_angle_is_90(self):
# 90deg angle
aug = iaa.MotionBlur(k=3, angle=90, direction=0.0)
matrix_func = aug.matrix
matrices = [
matrix_func(
np.zeros((128, 128, 3), dtype=np.uint8),
3,
iarandom.RNG(i)
) for i in range(10)
]
expected = np.float32([
[0, 0, 0],
[1.0/3, 1.0/3, 1.0/3],
[0, 0, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected)
def test_simple_parameters_angle_is_45(self):
# 45deg angle
aug = iaa.MotionBlur(k=3, angle=45, direction=0.0, order=0)
matrix_func = aug.matrix
matrices = [
matrix_func(
np.zeros((128, 128, 3), dtype=np.uint8),
3,
iarandom.RNG(i)
) for i in range(10)
]
expected = np.float32([
[0, 0, 1.0/3],
[0, 1.0/3, 0],
[1.0/3, 0, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected)
def test_simple_parameters_angle_is_list(self):
# random angle
aug = iaa.MotionBlur(k=3, angle=[0, 90], direction=0.0)
matrix_func = aug.matrix
matrices = [
matrix_func(
np.zeros((128, 128, 3), dtype=np.uint8),
3,
iarandom.RNG(i)
) for i in range(50)
]
expected1 = np.float32([
[0, 1.0/3, 0],
[0, 1.0/3, 0],
[0, 1.0/3, 0]
])
expected2 = np.float32([
[0, 0, 0],
[1.0/3, 1.0/3, 1.0/3],
[0, 0, 0],
])
nb_seen = [0, 0]
for matrices_image in matrices:
assert np.allclose(matrices_image[0], matrices_image[1])
assert np.allclose(matrices_image[1], matrices_image[2])
for matrix_channel in matrices_image:
if np.allclose(matrix_channel, expected1):
nb_seen[0] += 1
elif np.allclose(matrix_channel, expected2):
nb_seen[1] += 1
assert nb_seen[0] > 0
assert nb_seen[1] > 0
def test_k_is_5_angle_90(self):
# 5x5
aug = iaa.MotionBlur(k=5, angle=90, direction=0.0)
matrix_func = aug.matrix
matrices = [
matrix_func(
np.zeros((128, 128, 3), dtype=np.uint8),
3,
iarandom.RNG(i)
) for i in range(10)
]
expected = np.float32([
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1.0/5, 1.0/5, 1.0/5, 1.0/5, 1.0/5],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected)
def test_k_is_list_angle_90(self):
# random k
aug = iaa.MotionBlur(k=[3, 5], angle=90, direction=0.0)
matrix_func = aug.matrix
matrices = [
matrix_func(
np.zeros((128, 128, 3), dtype=np.uint8),
3,
iarandom.RNG(i)
) for i in range(50)
]
expected1 = np.float32([
[0, 0, 0],
[1.0/3, 1.0/3, 1.0/3],
[0, 0, 0],
])
expected2 = np.float32([
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1.0/5, 1.0/5, 1.0/5, 1.0/5, 1.0/5],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
])
nb_seen = [0, 0]
for matrices_image in matrices:
assert np.allclose(matrices_image[0], matrices_image[1])
assert np.allclose(matrices_image[1], matrices_image[2])
for matrix_channel in matrices_image:
if (matrix_channel.shape == expected1.shape
and np.allclose(matrix_channel, expected1)):
nb_seen[0] += 1
elif (matrix_channel.shape == expected2.shape
and np.allclose(matrix_channel, expected2)):
nb_seen[1] += 1
assert nb_seen[0] > 0
assert nb_seen[1] > 0
def test_failure_on_continuous_kernel_sizes(self):
# k with choice [a, b, c, ...] must error in case of non-discrete
# values
got_exception = False
try:
_ = iaa.MotionBlur(k=[3, 3.5, 4])
except Exception as exc:
assert "to only contain integer" in str(exc)
got_exception = True
assert got_exception
# TODO extend this to test sampled kernel sizes
def test_k_is_tuple(self):
# no error in case of (a, b), checks for #215
aug = iaa.MotionBlur(k=(3, 7))
for _ in range(10):
_ = aug.augment_image(np.zeros((11, 11, 3), dtype=np.uint8))
def test_direction_is_1(self):
# direction 1.0
aug = iaa.MotionBlur(k=3, angle=0, direction=1.0)
matrix_func = aug.matrix
matrices = [
matrix_func(
np.zeros((128, 128, 3), dtype=np.uint8),
3,
iarandom.RNG(i)
) for i in range(10)
]
expected = np.float32([
[0, 1.0/1.5, 0],
[0, 0.5/1.5, 0],
[0, 0.0/1.5, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected, rtol=0, atol=1e-2)
def test_direction_is_minus_1(self):
# direction -1.0
aug = iaa.MotionBlur(k=3, angle=0, direction=-1.0)
matrix_func = aug.matrix
matrices = [
matrix_func(
np.zeros((128, 128, 3), dtype=np.uint8),
3,
iarandom.RNG(i)
) for i in range(10)
]
expected = np.float32([
[0, 0.0/1.5, 0],
[0, 0.5/1.5, 0],
[0, 1.0/1.5, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected, rtol=0, atol=1e-2)
def test_direction_is_list(self):
# random direction
aug = iaa.MotionBlur(k=3, angle=[0, 90], direction=[-1.0, 1.0])
matrix_func = aug.matrix
matrices = [
matrix_func(
np.zeros((128, 128, 3), dtype=np.uint8),
3,
iarandom.RNG(i)
) for i in range(50)
]
expected1 = np.float32([
[0, 1.0/1.5, 0],
[0, 0.5/1.5, 0],
[0, 0.0/1.5, 0]
])
expected2 = np.float32([
[0, 0.0/1.5, 0],
[0, 0.5/1.5, 0],
[0, 1.0/1.5, 0]
])
nb_seen = [0, 0]
for matrices_image in matrices:
assert np.allclose(matrices_image[0], matrices_image[1])
assert np.allclose(matrices_image[1], matrices_image[2])
for matrix_channel in matrices_image:
if np.allclose(matrix_channel, expected1, rtol=0, atol=1e-2):
nb_seen[0] += 1
elif np.allclose(matrix_channel, expected2, rtol=0, atol=1e-2):
nb_seen[1] += 1
assert nb_seen[0] > 0
assert nb_seen[1] > 0
def test_k_is_3_angle_is_90_verify_results(self):
# test of actual augmenter
img = np.zeros((7, 7, 3), dtype=np.uint8)
img[3-1:3+2, 3-1:3+2, :] = 255
aug = iaa.MotionBlur(k=3, angle=90, direction=0.0)
img_aug = aug.augment_image(img)
v1 = (255*(1/3))
v2 = (255*(1/3)) * 2
v3 = (255*(1/3)) * 3
expected = np.float32([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, v1, v2, v3, v2, v1, 0],
[0, v1, v2, v3, v2, v1, 0],
[0, v1, v2, v3, v2, v1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
]).astype(np.uint8)
expected = np.tile(expected[..., np.newaxis], (1, 1, 3))
assert np.allclose(img_aug, expected)
class TestMeanShiftBlur(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.MeanShiftBlur()
assert np.isclose(aug.spatial_window_radius.a.value, 5.0)
assert np.isclose(aug.spatial_window_radius.b.value, 40.0)
assert np.isclose(aug.color_window_radius.a.value, 5.0)
assert np.isclose(aug.color_window_radius.b.value, 40.0)
def test___init___custom(self):
aug = iaa.MeanShiftBlur(
spatial_radius=[1.0, 2.0, 3.0],
color_radius=iap.Deterministic(5)
)
assert np.allclose(aug.spatial_window_radius.a, [1.0, 2.0, 3.0])
assert aug.color_window_radius.value == 5
def test_draw_samples(self):
aug = iaa.MeanShiftBlur(
spatial_radius=[1.0, 2.0, 3.0],
color_radius=(1.0, 2.0)
)
batch = mock.Mock()
batch.nb_rows = 100
samples = aug._draw_samples(batch, iarandom.RNG(0))
assert np.all(
np.isclose(samples[0], 1.0)
| np.isclose(samples[0], 2.0)
| np.isclose(samples[0], 3.0)
)
assert np.all((1.0 <= samples[1]) | (samples[1] <= 2.0))
@mock.patch("imgaug.augmenters.blur.blur_mean_shift_")
def test_mocked(self, mock_ms):
aug = iaa.MeanShiftBlur(
spatial_radius=1,
color_radius=2
)
image = np.zeros((1, 1, 3), dtype=np.uint8)
mock_ms.return_value = image
_image_aug = aug(image=image)
kwargs = mock_ms.call_args_list[0][1]
assert mock_ms.call_count == 1
assert np.isclose(kwargs["spatial_window_radius"], 1.0)
assert np.isclose(kwargs["color_window_radius"], 2.0)
def test_batch_without_images(self):
aug = iaa.MeanShiftBlur()
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(5, 5, 3))
kps_aug = aug(keypoints=kpsoi)
assert kps_aug.keypoints[0].x == 0
assert kps_aug.keypoints[0].y == 1
def test_get_parameters(self):
aug = iaa.MeanShiftBlur()
params = aug.get_parameters()
assert params[0] is aug.spatial_window_radius
assert params[1] is aug.color_window_radius
|
en
| 0.70988
|
# unittest only added in 3.4 self.subTest() # unittest.mock is not available in 2.7 (though unittest2 might contain it?) # fix execution of tests involving matplotlib on travis # note that self.assertWarningRegex does not exist in python 2.7 # bool # uint, int # float # prototype kernel, generated via: # mask = np.zeros((5, 5), dtype=np.int32) # mask[2, 2] = 1000 * 1000 # kernel = ndimage.gaussian_filter(mask, 0.75) # scipy # cv2 # bool # uint, int # float # accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, # 2, 4, 8 bytes, i.e. 8, 16, 32, 64 bit) # -- # blur of bool input at sigma=0.6 # -- # here we use a special mask and sigma as otherwise the only values # ending up with >0.5 would be the ones that # were before the blur already at >0.5 # prototype kernel, generated via: # mask = np.zeros((5, 5), dtype=np.float64) # mask[1, 0] = 255 # mask[2, 0] = 255 # mask[2, 2] = 255 # mask[2, 4] = 255 # mask[3, 0] = 255 # mask = ndimage.gaussian_filter(mask, 1.0, mode="mirror") # no blur, shouldnt change anything # weak blur of center pixel # images as numpy array # images as list # varying blur sigmas # bool # -- # blur of bool input at sigma=0.6 # -- # here we use a special mask and sigma as otherwise the only values # ending up with >0.5 would be the ones that # were before the blur already at >0.5 # prototype kernel, generated via: # mask = np.zeros((5, 5), dtype=np.float64) # mask[1, 0] = 255 # mask[2, 0] = 255 # mask[2, 2] = 255 # mask[2, 4] = 255 # mask[3, 0] = 255 # mask = ndimage.gaussian_filter(mask, 1.0, mode="mirror") # -- # blur of various dtypes at sigma=1.0 # and using an example value of 100 for int/uint/float and True for # bool # -- # prototype kernel, generated via: # mask = np.zeros((5, 5), dtype=np.float64) # mask[2, 2] = 100 # mask = ndimage.gaussian_filter(mask, 1.0, mode="mirror") # uint, int # float # -- # blur of various dtypes at sigma=0.4 # and using an example value of 100 for int/uint/float and True for # bool # -- # prototype kernel, generated via: # mask = np.zeros((5, 5), dtype=np.uint8) # mask[2, 2] = 100 # kernel = ndimage.gaussian_filter(mask, 0.4, mode="mirror") # uint, int # float # -- # blur of various dtypes at sigma=0.75 # and values being half-way between center and maximum for each dtype # The goal of this test is to verify that no major loss of resolution # happens for large dtypes. # Such inaccuracies appear for float64 if used. # -- # prototype kernel, generated via: # mask = np.zeros((5, 5), dtype=np.int32) # mask[2, 2] = 1000 * 1000 # kernel = ndimage.gaussian_filter(mask, 0.75) # uint, int # float # accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, 2, 4, 8 bytes, # i.e. 8, 16, 32, 64 bit) # assert failure on invalid dtypes # no blur, shouldnt change anything # k=3 # k=5 # k as (3, 4) # k as (3, 5) # k as stochastic parameter # k as ((3, 5), (3, 5)) # dont check sum here, because 0xX and Xx0 are all the same, i.e. much # higher sum than nb_iterations # bool # uint, int # float # -- # blur of various dtypes at k=3 # and using an example value of 100 for int/uint/float and True for # bool # -- # prototype mask # we place values in a 3x3 grid at positions (row=1, col=1) and # (row=2, col=2) (beginning with 0) # AverageBlur uses cv2.blur(), which uses BORDER_REFLECT_101 as its # default padding mode, # see https://docs.opencv.org/3.1.0/d2/de8/group__core__array.html # the matrix below shows the 3x3 grid and the padded row/col values # around it # [1, 0, 1, 0, 1] # [0, 0, 0, 0, 0] # [1, 0, 1, 0, 1] # [0, 0, 0, 1, 0] # [1, 0, 1, 0, 1] # bool # uint, int # cv2.blur() applies rounding for int/uint dtypes # float # -- # blur of various dtypes at k=3 # and values being half-way between center and maximum for each # dtype (bool is skipped as it doesnt make any sense here) # The goal of this test is to verify that no major loss of resolution # happens for large dtypes. # -- # prototype mask (see above) # uint, int # accepts difference of 4, 8, 16 (at 1, 2, 4 bytes, i.e. 8, 16, # 32 bit) # float # accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, 2, 4, 8 bytes, # i.e. 8, 16, 32, 64 bit) # assert failure on invalid dtypes # no blur, shouldnt change anything # k=3 # k=5 # k as (3, 5) # k as stochastic parameter # TODO extend these tests # simple scenario # 90deg angle # 45deg angle # random angle # 5x5 # random k # k with choice [a, b, c, ...] must error in case of non-discrete # values # TODO extend this to test sampled kernel sizes # no error in case of (a, b), checks for #215 # direction 1.0 # direction -1.0 # random direction # test of actual augmenter
| 2.261842
| 2
|
5.SequentialDataProcessing/AdvancedRNN/model.py
|
sdhnshu/HandsOnDeepLearningWithPytorch
| 87
|
6628376
|
<filename>5.SequentialDataProcessing/AdvancedRNN/model.py
import torch
import torch.nn as nn
class Encoder(nn.Module):
def __init__(self, config):
super(Encoder, self).__init__()
self.config = config
if config.type == 'LSTM':
self.rnn = nn.LSTM(input_size=config.embed_dim, hidden_size=config.hidden_size,
num_layers=config.n_layers, dropout=config.dropout,
bidirectional=config.birnn)
elif config.type == 'GRU':
self.rnn = nn.GRU(input_size=config.embed_dim, hidden_size=config.hidden_size,
num_layers=config.n_layers, dropout=config.dropout,
bidirectional=config.birnn)
def forward(self, inputs):
batch_size = inputs.size()[1]
state_shape = self.config.cells, batch_size, self.config.hidden_size
h0 = c0 = inputs.new(*state_shape).zero_()
outputs, (ht, ct) = self.rnn(inputs, (h0, c0))
if not self.config.birnn:
return ht[-1]
else:
return ht[-2:].transpose(0, 1).contiguous().view(batch_size, -1)
class Merger(nn.Module):
def __init__(self, size, dropout=0.5):
super().__init__()
self.bn = nn.BatchNorm1d(size)
self.dropout = nn.Dropout(p=dropout)
def forward(self, data):
prem = data[0]
hypo = data[1]
diff = prem - hypo
prod = prem * hypo
cated_data = torch.cat([prem, hypo, diff, prod], 1)
return self.dropout(self.bn(cated_data))
class RNNClassifier(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.embed = nn.Embedding(config.vocab_dim, config.embed_dim)
self.encoder = Encoder(config)
self.classifier = nn.Sequential(
Merger(4 * config.hidden_size * config.n_layers, config.dropout),
nn.Linear(
4 * config.hidden_size * config.n_layers, config.fc1_dim),
nn.ReLU(),
nn.BatchNorm1d(config.fc1_dim),
nn.Dropout(p=config.dropout),
nn.Linear(config.fc1_dim, config.fc2_dim)
)
def forward(self, batch):
prem_embed = self.embed(batch.premise)
hypo_embed = self.embed(batch.hypothesis)
premise = self.encoder(prem_embed)
hypothesis = self.encoder(hypo_embed)
scores = self.classifier((premise, hypothesis))
return scores
|
<filename>5.SequentialDataProcessing/AdvancedRNN/model.py
import torch
import torch.nn as nn
class Encoder(nn.Module):
def __init__(self, config):
super(Encoder, self).__init__()
self.config = config
if config.type == 'LSTM':
self.rnn = nn.LSTM(input_size=config.embed_dim, hidden_size=config.hidden_size,
num_layers=config.n_layers, dropout=config.dropout,
bidirectional=config.birnn)
elif config.type == 'GRU':
self.rnn = nn.GRU(input_size=config.embed_dim, hidden_size=config.hidden_size,
num_layers=config.n_layers, dropout=config.dropout,
bidirectional=config.birnn)
def forward(self, inputs):
batch_size = inputs.size()[1]
state_shape = self.config.cells, batch_size, self.config.hidden_size
h0 = c0 = inputs.new(*state_shape).zero_()
outputs, (ht, ct) = self.rnn(inputs, (h0, c0))
if not self.config.birnn:
return ht[-1]
else:
return ht[-2:].transpose(0, 1).contiguous().view(batch_size, -1)
class Merger(nn.Module):
def __init__(self, size, dropout=0.5):
super().__init__()
self.bn = nn.BatchNorm1d(size)
self.dropout = nn.Dropout(p=dropout)
def forward(self, data):
prem = data[0]
hypo = data[1]
diff = prem - hypo
prod = prem * hypo
cated_data = torch.cat([prem, hypo, diff, prod], 1)
return self.dropout(self.bn(cated_data))
class RNNClassifier(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.embed = nn.Embedding(config.vocab_dim, config.embed_dim)
self.encoder = Encoder(config)
self.classifier = nn.Sequential(
Merger(4 * config.hidden_size * config.n_layers, config.dropout),
nn.Linear(
4 * config.hidden_size * config.n_layers, config.fc1_dim),
nn.ReLU(),
nn.BatchNorm1d(config.fc1_dim),
nn.Dropout(p=config.dropout),
nn.Linear(config.fc1_dim, config.fc2_dim)
)
def forward(self, batch):
prem_embed = self.embed(batch.premise)
hypo_embed = self.embed(batch.hypothesis)
premise = self.encoder(prem_embed)
hypothesis = self.encoder(hypo_embed)
scores = self.classifier((premise, hypothesis))
return scores
|
none
| 1
| 2.689804
| 3
|
|
arkane/outputTest.py
|
pm15ma/RMG-Py
| 4
|
6628377
|
<reponame>pm15ma/RMG-Py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2020 Prof. <NAME> (<EMAIL>), #
# Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This module contains unit tests of the :mod:`arkane.ess.gaussian` module.
"""
import os
import shutil
import unittest
from nose.plugins.attrib import attr
import rmgpy
from arkane.ess.gaussian import GaussianLog
from arkane.main import Arkane
from arkane.output import prettify, get_str_xyz
from rmgpy.species import Species
################################################################################
@attr('functional')
class OutputTest(unittest.TestCase):
"""
Contains functional tests for Arkane's output module.
"""
def test_prettify(self):
"""Test that the prettify function works for an Arkane job"""
benzyl_path = os.path.join(os.path.dirname(os.path.dirname(rmgpy.__file__)),
'examples', 'arkane', 'species', 'Benzyl')
arkane = Arkane(input_file=os.path.join(benzyl_path, 'input.py'), output_directory=benzyl_path)
arkane.plot = False
arkane.execute()
with open(os.path.join(benzyl_path, 'output.py'), 'r') as f:
lines = f.readlines()
self.assertIn('conformer(\n', lines)
self.assertIn(" E0 = (193.749, 'kJ/mol'),\n", lines)
self.assertIn('thermo(\n', lines)
self.assertIn(" Cp0 = (33.2579, 'J/(mol*K)'),\n", lines)
@classmethod
def tearDownClass(cls):
"""A function that is run ONCE after all unit tests in this class."""
benzyl_path = os.path.join(os.path.dirname(os.path.dirname(rmgpy.__file__)),
'examples', 'arkane', 'species', 'Benzyl')
extensions_to_delete = ['pdf', 'csv', 'txt', 'inp']
files_to_delete = ['arkane.log', 'output.py']
for name in os.listdir(benzyl_path):
item_path = os.path.join(benzyl_path, name)
if os.path.isfile(item_path):
extension = name.split('.')[-1]
if name in files_to_delete or extension in extensions_to_delete:
os.remove(item_path)
else:
if os.path.split(item_path)[-1] in ['r0']:
continue
# This is a sub-directory. remove.
shutil.rmtree(item_path)
class OutputUnitTest(unittest.TestCase):
"""
Contains unit tests for the Arkane's output module.
"""
@classmethod
def setUpClass(cls):
"""
A method that is run before all unit tests in this class.
"""
cls.data_path = os.path.join(os.path.dirname(__file__), 'data')
def test_prettify(self):
"""Test that ``prettify`` returns the expected result"""
input_str = ("conformer(label='C7H7', E0=(193.749,'kJ/mol'), modes=[IdealGasTranslation(mass=(91.0548,'amu')), "
"NonlinearRotor(inertia=([91.0567,186.675,277.733],'amu*angstrom^2'), symmetry=2), "
"HarmonicOscillator(frequencies=([199.381,360.536,413.795,480.347,536.285,630.723,687.118,709.613,"
"776.662,830.404,834.386,901.841,973.498,975.148,993.349,998.606,1040.14,1120.69,1179.22,1189.07,"
"1292.86,1332.91,1357.18,1479.46,1495.36,1507.91,1583.14,1604.63,3156.85,3170.22,3172.78,3185.05,"
"3189.8,3203.23,3253.99],'cm^-1')), HinderedRotor(inertia=(1.70013,'amu*angstrom^2'), symmetry=2, "
"fourier=([[-0.315923,-27.7665,0.177678,3.2437,0.0509515],[-0.00164953,-0.0021925,-0.00386396,"
"-0.000912068,0.00274206]],'kJ/mol'), quantum=True, semiclassical=False)], spin_multiplicity=2, "
"optical_isomers=1)")
expected_output = """conformer(
label = 'C7H7',
E0 = (193.749, 'kJ/mol'),
modes = [
IdealGasTranslation(mass=(91.0548, 'amu')),
NonlinearRotor(
inertia = ([91.0567, 186.675, 277.733], 'amu*angstrom^2'),
symmetry = 2,
),
HarmonicOscillator(
frequencies = ([199.381, 360.536, 413.795, 480.347, 536.285, 630.723, 687.118, 709.613, 776.662, 830.404, 834.386, 901.841, 973.498, 975.148, 993.349, 998.606, 1040.14, 1120.69, 1179.22, 1189.07, 1292.86, 1332.91, 1357.18, 1479.46, 1495.36, 1507.91, 1583.14, 1604.63, 3156.85, 3170.22, 3172.78, 3185.05, 3189.8, 3203.23, 3253.99], 'cm^-1'),
),
HinderedRotor(
inertia = (1.70013, 'amu*angstrom^2'),
symmetry = 2,
fourier = (
[
[-0.315923, -27.7665, 0.177678, 3.2437, 0.0509515],
[-0.00164953, -0.0021925, -0.00386396, -0.000912068, 0.00274206],
],
'kJ/mol',
),
quantum = None,
semiclassical = None,
),
],
spin_multiplicity = 2,
optical_isomers = 1,
)"""
self.assertEqual(prettify(input_str), expected_output)
def test_get_str_xyz(self):
"""Test generating an xyz string from the species.conformer object"""
log = GaussianLog(os.path.join(self.data_path, 'gaussian', 'ethylene_G3.log'))
conformer = log.load_conformer()[0]
coords, number, mass = log.load_geometry()
conformer.coordinates, conformer.number, conformer.mass = (coords, "angstroms"), number, (mass, "amu")
spc1 = Species(smiles='C=C')
spc1.conformer = conformer
xyz_str = get_str_xyz(spc1)
expected_xyz_str = """C 0.00545100 0.00000000 0.00339700
H 0.00118700 0.00000000 1.08823200
H 0.97742900 0.00000000 -0.47841600
C -1.12745800 0.00000000 -0.70256500
H -1.12319800 0.00000000 -1.78740100
H -2.09943900 0.00000000 -0.22075700"""
self.assertEqual(xyz_str, expected_xyz_str)
################################################################################
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2020 Prof. <NAME> (<EMAIL>), #
# Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This module contains unit tests of the :mod:`arkane.ess.gaussian` module.
"""
import os
import shutil
import unittest
from nose.plugins.attrib import attr
import rmgpy
from arkane.ess.gaussian import GaussianLog
from arkane.main import Arkane
from arkane.output import prettify, get_str_xyz
from rmgpy.species import Species
################################################################################
@attr('functional')
class OutputTest(unittest.TestCase):
"""
Contains functional tests for Arkane's output module.
"""
def test_prettify(self):
"""Test that the prettify function works for an Arkane job"""
benzyl_path = os.path.join(os.path.dirname(os.path.dirname(rmgpy.__file__)),
'examples', 'arkane', 'species', 'Benzyl')
arkane = Arkane(input_file=os.path.join(benzyl_path, 'input.py'), output_directory=benzyl_path)
arkane.plot = False
arkane.execute()
with open(os.path.join(benzyl_path, 'output.py'), 'r') as f:
lines = f.readlines()
self.assertIn('conformer(\n', lines)
self.assertIn(" E0 = (193.749, 'kJ/mol'),\n", lines)
self.assertIn('thermo(\n', lines)
self.assertIn(" Cp0 = (33.2579, 'J/(mol*K)'),\n", lines)
@classmethod
def tearDownClass(cls):
"""A function that is run ONCE after all unit tests in this class."""
benzyl_path = os.path.join(os.path.dirname(os.path.dirname(rmgpy.__file__)),
'examples', 'arkane', 'species', 'Benzyl')
extensions_to_delete = ['pdf', 'csv', 'txt', 'inp']
files_to_delete = ['arkane.log', 'output.py']
for name in os.listdir(benzyl_path):
item_path = os.path.join(benzyl_path, name)
if os.path.isfile(item_path):
extension = name.split('.')[-1]
if name in files_to_delete or extension in extensions_to_delete:
os.remove(item_path)
else:
if os.path.split(item_path)[-1] in ['r0']:
continue
# This is a sub-directory. remove.
shutil.rmtree(item_path)
class OutputUnitTest(unittest.TestCase):
"""
Contains unit tests for the Arkane's output module.
"""
@classmethod
def setUpClass(cls):
"""
A method that is run before all unit tests in this class.
"""
cls.data_path = os.path.join(os.path.dirname(__file__), 'data')
def test_prettify(self):
"""Test that ``prettify`` returns the expected result"""
input_str = ("conformer(label='C7H7', E0=(193.749,'kJ/mol'), modes=[IdealGasTranslation(mass=(91.0548,'amu')), "
"NonlinearRotor(inertia=([91.0567,186.675,277.733],'amu*angstrom^2'), symmetry=2), "
"HarmonicOscillator(frequencies=([199.381,360.536,413.795,480.347,536.285,630.723,687.118,709.613,"
"776.662,830.404,834.386,901.841,973.498,975.148,993.349,998.606,1040.14,1120.69,1179.22,1189.07,"
"1292.86,1332.91,1357.18,1479.46,1495.36,1507.91,1583.14,1604.63,3156.85,3170.22,3172.78,3185.05,"
"3189.8,3203.23,3253.99],'cm^-1')), HinderedRotor(inertia=(1.70013,'amu*angstrom^2'), symmetry=2, "
"fourier=([[-0.315923,-27.7665,0.177678,3.2437,0.0509515],[-0.00164953,-0.0021925,-0.00386396,"
"-0.000912068,0.00274206]],'kJ/mol'), quantum=True, semiclassical=False)], spin_multiplicity=2, "
"optical_isomers=1)")
expected_output = """conformer(
label = 'C7H7',
E0 = (193.749, 'kJ/mol'),
modes = [
IdealGasTranslation(mass=(91.0548, 'amu')),
NonlinearRotor(
inertia = ([91.0567, 186.675, 277.733], 'amu*angstrom^2'),
symmetry = 2,
),
HarmonicOscillator(
frequencies = ([199.381, 360.536, 413.795, 480.347, 536.285, 630.723, 687.118, 709.613, 776.662, 830.404, 834.386, 901.841, 973.498, 975.148, 993.349, 998.606, 1040.14, 1120.69, 1179.22, 1189.07, 1292.86, 1332.91, 1357.18, 1479.46, 1495.36, 1507.91, 1583.14, 1604.63, 3156.85, 3170.22, 3172.78, 3185.05, 3189.8, 3203.23, 3253.99], 'cm^-1'),
),
HinderedRotor(
inertia = (1.70013, 'amu*angstrom^2'),
symmetry = 2,
fourier = (
[
[-0.315923, -27.7665, 0.177678, 3.2437, 0.0509515],
[-0.00164953, -0.0021925, -0.00386396, -0.000912068, 0.00274206],
],
'kJ/mol',
),
quantum = None,
semiclassical = None,
),
],
spin_multiplicity = 2,
optical_isomers = 1,
)"""
self.assertEqual(prettify(input_str), expected_output)
def test_get_str_xyz(self):
"""Test generating an xyz string from the species.conformer object"""
log = GaussianLog(os.path.join(self.data_path, 'gaussian', 'ethylene_G3.log'))
conformer = log.load_conformer()[0]
coords, number, mass = log.load_geometry()
conformer.coordinates, conformer.number, conformer.mass = (coords, "angstroms"), number, (mass, "amu")
spc1 = Species(smiles='C=C')
spc1.conformer = conformer
xyz_str = get_str_xyz(spc1)
expected_xyz_str = """C 0.00545100 0.00000000 0.00339700
H 0.00118700 0.00000000 1.08823200
H 0.97742900 0.00000000 -0.47841600
C -1.12745800 0.00000000 -0.70256500
H -1.12319800 0.00000000 -1.78740100
H -2.09943900 0.00000000 -0.22075700"""
self.assertEqual(xyz_str, expected_xyz_str)
################################################################################
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
|
en
| 0.545296
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ############################################################################### # # # RMG - Reaction Mechanism Generator # # # # Copyright (c) 2002-2020 Prof. <NAME> (<EMAIL>), # # Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>) # # # # Permission is hereby granted, free of charge, to any person obtaining a # # copy of this software and associated documentation files (the 'Software'), # # to deal in the Software without restriction, including without limitation # # the rights to use, copy, modify, merge, publish, distribute, sublicense, # # and/or sell copies of the Software, and to permit persons to whom the # # Software is furnished to do so, subject to the following conditions: # # # # The above copyright notice and this permission notice shall be included in # # all copies or substantial portions of the Software. # # # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # # DEALINGS IN THE SOFTWARE. # # # ############################################################################### This module contains unit tests of the :mod:`arkane.ess.gaussian` module. ################################################################################ Contains functional tests for Arkane's output module. Test that the prettify function works for an Arkane job A function that is run ONCE after all unit tests in this class. # This is a sub-directory. remove. Contains unit tests for the Arkane's output module. A method that is run before all unit tests in this class. Test that ``prettify`` returns the expected result conformer( label = 'C7H7', E0 = (193.749, 'kJ/mol'), modes = [ IdealGasTranslation(mass=(91.0548, 'amu')), NonlinearRotor( inertia = ([91.0567, 186.675, 277.733], 'amu*angstrom^2'), symmetry = 2, ), HarmonicOscillator( frequencies = ([199.381, 360.536, 413.795, 480.347, 536.285, 630.723, 687.118, 709.613, 776.662, 830.404, 834.386, 901.841, 973.498, 975.148, 993.349, 998.606, 1040.14, 1120.69, 1179.22, 1189.07, 1292.86, 1332.91, 1357.18, 1479.46, 1495.36, 1507.91, 1583.14, 1604.63, 3156.85, 3170.22, 3172.78, 3185.05, 3189.8, 3203.23, 3253.99], 'cm^-1'), ), HinderedRotor( inertia = (1.70013, 'amu*angstrom^2'), symmetry = 2, fourier = ( [ [-0.315923, -27.7665, 0.177678, 3.2437, 0.0509515], [-0.00164953, -0.0021925, -0.00386396, -0.000912068, 0.00274206], ], 'kJ/mol', ), quantum = None, semiclassical = None, ), ], spin_multiplicity = 2, optical_isomers = 1, ) Test generating an xyz string from the species.conformer object C 0.00545100 0.00000000 0.00339700 H 0.00118700 0.00000000 1.08823200 H 0.97742900 0.00000000 -0.47841600 C -1.12745800 0.00000000 -0.70256500 H -1.12319800 0.00000000 -1.78740100 H -2.09943900 0.00000000 -0.22075700 ################################################################################
| 1.492736
| 1
|
tests/integrations/conftest.py
|
vincenthcui/sentry-python
| 0
|
6628378
|
import pytest
import sentry_sdk
@pytest.fixture
def capture_exceptions(monkeypatch):
def inner():
errors = set()
old_capture_event = sentry_sdk.Hub.current.capture_event
def capture_event(event, hint=None):
if hint:
if "exc_info" in hint:
error = hint["exc_info"][1]
errors.add(error)
return old_capture_event(event, hint=hint)
monkeypatch.setattr(sentry_sdk.Hub.current, "capture_event", capture_event)
return errors
return inner
|
import pytest
import sentry_sdk
@pytest.fixture
def capture_exceptions(monkeypatch):
def inner():
errors = set()
old_capture_event = sentry_sdk.Hub.current.capture_event
def capture_event(event, hint=None):
if hint:
if "exc_info" in hint:
error = hint["exc_info"][1]
errors.add(error)
return old_capture_event(event, hint=hint)
monkeypatch.setattr(sentry_sdk.Hub.current, "capture_event", capture_event)
return errors
return inner
|
none
| 1
| 2.064346
| 2
|
|
attributes_and_methods/project_hotel/test.py
|
ivan-yosifov88/python_oop
| 1
|
6628379
|
from project_hotel.hotel import Hotel
from project_hotel.room import Room
hotel = Hotel.from_stars(5)
first_room = Room(1, 3)
second_room = Room(2, 2)
third_room = Room(3, 1)
hotel.add_room(first_room)
hotel.add_room(second_room)
hotel.add_room(third_room)
hotel.take_room(1, 4)
hotel.take_room(1, 2)
hotel.take_room(3, 1)
hotel.take_room(3, 1)
hotel.print_status()
|
from project_hotel.hotel import Hotel
from project_hotel.room import Room
hotel = Hotel.from_stars(5)
first_room = Room(1, 3)
second_room = Room(2, 2)
third_room = Room(3, 1)
hotel.add_room(first_room)
hotel.add_room(second_room)
hotel.add_room(third_room)
hotel.take_room(1, 4)
hotel.take_room(1, 2)
hotel.take_room(3, 1)
hotel.take_room(3, 1)
hotel.print_status()
|
none
| 1
| 2.356933
| 2
|
|
PyHEADTAIL/gpu/gpu_utils.py
|
fsoubelet/PyHEADTAIL
| 0
|
6628380
|
'''
GPU Utils
Memory pool, ...
This could also be the place to store the context, device, streams, etc...
The module is automatically a singleton
@author <NAME>
'''
use_streams = False
import atexit
from itertools import cycle
try:
import pycuda.tools
import pycuda.driver as drv
import pycuda.elementwise
has_pycuda = True
try:
drv.mem_get_info()
import pycuda.autoinit
except pycuda._driver.LogicError: #the error pycuda throws if no context initialized
# print ('No context initialized. Please import pycuda.autoinit at the '
# 'beginning of your script if you want to use GPU functionality')
has_pycuda = False
except ImportError:
has_pycuda = False
################################################################################
if has_pycuda:
device = drv.Context.get_device() #pycuda.autoinit.device
context = drv.Context.get_current() #pycuda.autoinit.context
memory_pool = pycuda.tools.DeviceMemoryPool()
import skcuda.misc #s
skcuda.misc.init(allocator=memory_pool.allocate)
atexit.register(skcuda.misc.shutdown)
n_streams = 4
n_streams_emittance = 6
if use_streams:
streams = [drv.Stream() for i in range(n_streams)]
stream_emittance = [drv.Stream() for i in range(n_streams_emittance)]
else:
streams = [None] * n_streams
stream_emittance = [None] * n_streams_emittance
stream_pool = cycle(streams)
def dummy_1(gpuarr, stream=None):
__dummy1(gpuarr, stream=stream)
return gpuarr
def dummy_2(gpuarr, stream=None):
__dummy2(gpuarr, stream=stream)
return gpuarr
else:
streams = [] # this way nothing bad happens if 'for stream in streams: sync'
################################################################################
|
'''
GPU Utils
Memory pool, ...
This could also be the place to store the context, device, streams, etc...
The module is automatically a singleton
@author <NAME>
'''
use_streams = False
import atexit
from itertools import cycle
try:
import pycuda.tools
import pycuda.driver as drv
import pycuda.elementwise
has_pycuda = True
try:
drv.mem_get_info()
import pycuda.autoinit
except pycuda._driver.LogicError: #the error pycuda throws if no context initialized
# print ('No context initialized. Please import pycuda.autoinit at the '
# 'beginning of your script if you want to use GPU functionality')
has_pycuda = False
except ImportError:
has_pycuda = False
################################################################################
if has_pycuda:
device = drv.Context.get_device() #pycuda.autoinit.device
context = drv.Context.get_current() #pycuda.autoinit.context
memory_pool = pycuda.tools.DeviceMemoryPool()
import skcuda.misc #s
skcuda.misc.init(allocator=memory_pool.allocate)
atexit.register(skcuda.misc.shutdown)
n_streams = 4
n_streams_emittance = 6
if use_streams:
streams = [drv.Stream() for i in range(n_streams)]
stream_emittance = [drv.Stream() for i in range(n_streams_emittance)]
else:
streams = [None] * n_streams
stream_emittance = [None] * n_streams_emittance
stream_pool = cycle(streams)
def dummy_1(gpuarr, stream=None):
__dummy1(gpuarr, stream=stream)
return gpuarr
def dummy_2(gpuarr, stream=None):
__dummy2(gpuarr, stream=stream)
return gpuarr
else:
streams = [] # this way nothing bad happens if 'for stream in streams: sync'
################################################################################
|
en
| 0.349854
|
GPU Utils Memory pool, ... This could also be the place to store the context, device, streams, etc... The module is automatically a singleton @author <NAME> #the error pycuda throws if no context initialized # print ('No context initialized. Please import pycuda.autoinit at the ' # 'beginning of your script if you want to use GPU functionality') ################################################################################ #pycuda.autoinit.device #pycuda.autoinit.context #s # this way nothing bad happens if 'for stream in streams: sync' ################################################################################
| 2.619119
| 3
|
esxi_cert_tool/vsanapiutils.py
|
cleeistaken/esxi_certtool
| 0
|
6628381
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2016-2019 VMware, Inc. All rights reserved.
This module defines basic helper functions used in the sample codes
"""
__author__ = 'VMware, Inc'
import sys
import ssl
if (sys.version_info[0] == 3):
from urllib.request import urlopen
else:
from urllib2 import urlopen
from xml.dom import minidom
from pyVmomi import vim, vmodl, SoapStubAdapter, VmomiSupport
# Import the vSAN API python bindings
VSAN_API_VC_SERVICE_ENDPOINT = '/vsanHealth'
VSAN_API_ESXI_SERVICE_ENDPOINT = '/vsan'
VSAN_VMODL_VERSION = "vsan.version.version3"
# Construct a stub for vSAN API access using vCenter or ESXi sessions from
# existing stubs. Corresponding vCenter or ESXi service endpoint is required.
# vCenter service endpoint is used by default.
def valid_ipv6(addr):
import socket
try:
socket.inet_pton(socket.AF_INET6, addr)
except socket.error:
return False
return True
def _GetVsanStub(
stub, endpoint=VSAN_API_VC_SERVICE_ENDPOINT,
context=None, version='vim.version.version11'
):
index = stub.host.rfind(':')
if valid_ipv6(stub.host[:index][1:-1]):
hostname = stub.host[:index][1:-1]
else:
hostname = stub.host[:index]
vsanStub = SoapStubAdapter(
host=hostname,
path=endpoint,
version=version,
sslContext=context
)
vsanStub.cookie = stub.cookie
return vsanStub
# Construct a stub for access vCenter side vSAN APIs.
def GetVsanVcStub(stub, context=None, version=VSAN_VMODL_VERSION):
return _GetVsanStub(stub, endpoint=VSAN_API_VC_SERVICE_ENDPOINT,
context=context, version=version)
# Construct a stub for access ESXi side vSAN APIs.
def GetVsanEsxStub(stub, context=None, version=VSAN_VMODL_VERSION):
return _GetVsanStub(stub, endpoint=VSAN_API_ESXI_SERVICE_ENDPOINT,
context=context, version=version)
# Construct a stub for access ESXi side vSAN APIs.
def GetVsanVcMos(vcStub, context=None, version=VSAN_VMODL_VERSION):
vsanStub = GetVsanVcStub(vcStub, context, version=version)
vcMos = {
'vsan-disk-management-system': vim.cluster.VsanVcDiskManagementSystem(
'vsan-disk-management-system',
vsanStub
),
'vsan-stretched-cluster-system': vim.cluster.VsanVcStretchedClusterSystem(
'vsan-stretched-cluster-system',
vsanStub
),
'vsan-cluster-config-system': vim.cluster.VsanVcClusterConfigSystem(
'vsan-cluster-config-system',
vsanStub
),
'vsan-performance-manager': vim.cluster.VsanPerformanceManager(
'vsan-performance-manager',
vsanStub
),
'vsan-cluster-health-system': vim.cluster.VsanVcClusterHealthSystem(
'vsan-cluster-health-system',
vsanStub
),
'vsan-upgrade-systemex': vim.VsanUpgradeSystemEx(
'vsan-upgrade-systemex',
vsanStub
),
'vsan-cluster-space-report-system': vim.cluster.VsanSpaceReportSystem(
'vsan-cluster-space-report-system',
vsanStub
),
'vsan-cluster-object-system': vim.cluster.VsanObjectSystem(
'vsan-cluster-object-system',
vsanStub
),
'vsan-cluster-iscsi-target-system': vim.cluster.VsanIscsiTargetSystem(
'vsan-cluster-iscsi-target-system',
vsanStub
),
'vsan-vcsa-deployer-system': vim.host.VsanVcsaDeployerSystem(
'vsan-vcsa-deployer-system',
vsanStub
),
'vsan-vds-system': vim.vsan.VsanVdsSystem('vsan-vds-system', vsanStub),
'vsan-vc-capability-system': vim.cluster.VsanCapabilitySystem(
'vsan-vc-capability-system', vsanStub),
'vsan-mass-collector': vim.VsanMassCollector('vsan-mass-collector',
vsanStub),
'vsan-phonehome-system': vim.VsanPhoneHomeSystem('vsan-phonehome-system',
vsanStub),
'vsan-vum-system': vim.cluster.VsanVumSystem('vsan-vum-system', vsanStub),
'vsan-cluster-resource-check-system': vim.vsan.VsanResourceCheckSystem(
'vsan-cluster-resource-check-system',
vsanStub),
'cns-volume-manager': vim.cns.VolumeManager('cns-volume-manager',
vsanStub
),
}
return vcMos
# Construct a stub for access ESXi side vSAN APIs.
def GetVsanEsxMos(esxStub, context=None, version=VSAN_VMODL_VERSION):
vsanStub = GetVsanEsxStub(esxStub, context, version=version)
esxMos = {
'vsan-performance-manager': vim.cluster.VsanPerformanceManager(
'vsan-performance-manager',
vsanStub
),
'vsan-cluster-health-system': vim.cluster.VsanVcClusterHealthSystem(
'vsan-cluster-health-system',
vsanStub
),
'ha-vsan-health-system': vim.host.VsanHealthSystem(
'ha-vsan-health-system',
vsanStub
),
'vsan-object-system': vim.cluster.VsanObjectSystem(
'vsan-object-system',
vsanStub
),
'vsan-vcsa-deployer-system': vim.host.VsanVcsaDeployerSystem(
'vsan-vcsa-deployer-system',
vsanStub
),
'vsan-capability-system': vim.cluster.VsanCapabilitySystem(
'vsan-capability-system', vsanStub),
'vsanSystemEx': vim.host.VsanSystemEx('vsanSystemEx', vsanStub),
'vsan-update-manager': vim.host.VsanUpdateManager('vsan-update-manager',
vsanStub),
'vsan-cluster-iscsi-target-system': vim.cluster.VsanIscsiTargetSystem(
'vsan-cluster-iscsi-target-system',
vsanStub
),
}
return esxMos
# Convert a vSAN Task to a Task MO binding to vCenter service.
def ConvertVsanTaskToVcTask(vsanTask, vcStub):
vcTask = vim.Task(vsanTask._moId, vcStub)
return vcTask
# Wait for the vCenter task and returns after tasks are completed.
def WaitForTasks(tasks, si):
pc = si.content.propertyCollector
taskList = [str(task) for task in tasks]
# Create filter
objSpecs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task)
for task in tasks]
propSpec = vmodl.query.PropertyCollector.PropertySpec(
type=vim.Task, pathSet=[], all=True)
filterSpec = vmodl.query.PropertyCollector.FilterSpec()
filterSpec.objectSet = objSpecs
filterSpec.propSet = [propSpec]
filter_ = pc.CreateFilter(filterSpec, True)
try:
version, state = None, None
# Loop looking for updates till the state moves to a completed state.
while len(taskList):
update = pc.WaitForUpdates(version)
for filterSet in update.filterSet:
for objSet in filterSet.objectSet:
task = objSet.obj
for change in objSet.changeSet:
if change.name == 'info':
state = change.val.state
elif change.name == 'info.state':
state = change.val
else:
continue
if not str(task) in taskList:
continue
if state == vim.TaskInfo.State.success:
# Remove task from taskList
taskList.remove(str(task))
elif state == vim.TaskInfo.State.error:
raise task.info.error
# Move to next version
version = update.version
finally:
if filter_:
filter_.Destroy()
# Get the VMODL version by checking the existence of vSAN namespace.
def GetLatestVmodlVersion(hostname):
try:
vsanVmodlUrl = 'https://%s/sdk/vsanServiceVersions.xml' % hostname
if (hasattr(ssl, '_create_unverified_context') and
hasattr(ssl, '_create_default_https_context')):
ssl._create_default_https_context = ssl._create_unverified_context
xmldoc = minidom.parse(urlopen(vsanVmodlUrl, timeout=5))
for element in xmldoc.getElementsByTagName('name'):
if (element.firstChild.nodeValue == "urn:vsan"):
versions = xmldoc.getElementsByTagName('version')
versionId = versions[0].firstChild.nodeValue
if versionId == '6.6':
return 'vsan.version.version3'
else:
return VmomiSupport.newestVersions.Get('vsan')
else:
return VmomiSupport.newestVersions.Get('vim')
except Exception as e:
# Any exception like failing to open the XML or failed to parse the
# the content should lead to the returning of namespace with vim.
return VmomiSupport.newestVersions.Get('vim')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2016-2019 VMware, Inc. All rights reserved.
This module defines basic helper functions used in the sample codes
"""
__author__ = 'VMware, Inc'
import sys
import ssl
if (sys.version_info[0] == 3):
from urllib.request import urlopen
else:
from urllib2 import urlopen
from xml.dom import minidom
from pyVmomi import vim, vmodl, SoapStubAdapter, VmomiSupport
# Import the vSAN API python bindings
VSAN_API_VC_SERVICE_ENDPOINT = '/vsanHealth'
VSAN_API_ESXI_SERVICE_ENDPOINT = '/vsan'
VSAN_VMODL_VERSION = "vsan.version.version3"
# Construct a stub for vSAN API access using vCenter or ESXi sessions from
# existing stubs. Corresponding vCenter or ESXi service endpoint is required.
# vCenter service endpoint is used by default.
def valid_ipv6(addr):
import socket
try:
socket.inet_pton(socket.AF_INET6, addr)
except socket.error:
return False
return True
def _GetVsanStub(
stub, endpoint=VSAN_API_VC_SERVICE_ENDPOINT,
context=None, version='vim.version.version11'
):
index = stub.host.rfind(':')
if valid_ipv6(stub.host[:index][1:-1]):
hostname = stub.host[:index][1:-1]
else:
hostname = stub.host[:index]
vsanStub = SoapStubAdapter(
host=hostname,
path=endpoint,
version=version,
sslContext=context
)
vsanStub.cookie = stub.cookie
return vsanStub
# Construct a stub for access vCenter side vSAN APIs.
def GetVsanVcStub(stub, context=None, version=VSAN_VMODL_VERSION):
return _GetVsanStub(stub, endpoint=VSAN_API_VC_SERVICE_ENDPOINT,
context=context, version=version)
# Construct a stub for access ESXi side vSAN APIs.
def GetVsanEsxStub(stub, context=None, version=VSAN_VMODL_VERSION):
return _GetVsanStub(stub, endpoint=VSAN_API_ESXI_SERVICE_ENDPOINT,
context=context, version=version)
# Construct a stub for access ESXi side vSAN APIs.
def GetVsanVcMos(vcStub, context=None, version=VSAN_VMODL_VERSION):
vsanStub = GetVsanVcStub(vcStub, context, version=version)
vcMos = {
'vsan-disk-management-system': vim.cluster.VsanVcDiskManagementSystem(
'vsan-disk-management-system',
vsanStub
),
'vsan-stretched-cluster-system': vim.cluster.VsanVcStretchedClusterSystem(
'vsan-stretched-cluster-system',
vsanStub
),
'vsan-cluster-config-system': vim.cluster.VsanVcClusterConfigSystem(
'vsan-cluster-config-system',
vsanStub
),
'vsan-performance-manager': vim.cluster.VsanPerformanceManager(
'vsan-performance-manager',
vsanStub
),
'vsan-cluster-health-system': vim.cluster.VsanVcClusterHealthSystem(
'vsan-cluster-health-system',
vsanStub
),
'vsan-upgrade-systemex': vim.VsanUpgradeSystemEx(
'vsan-upgrade-systemex',
vsanStub
),
'vsan-cluster-space-report-system': vim.cluster.VsanSpaceReportSystem(
'vsan-cluster-space-report-system',
vsanStub
),
'vsan-cluster-object-system': vim.cluster.VsanObjectSystem(
'vsan-cluster-object-system',
vsanStub
),
'vsan-cluster-iscsi-target-system': vim.cluster.VsanIscsiTargetSystem(
'vsan-cluster-iscsi-target-system',
vsanStub
),
'vsan-vcsa-deployer-system': vim.host.VsanVcsaDeployerSystem(
'vsan-vcsa-deployer-system',
vsanStub
),
'vsan-vds-system': vim.vsan.VsanVdsSystem('vsan-vds-system', vsanStub),
'vsan-vc-capability-system': vim.cluster.VsanCapabilitySystem(
'vsan-vc-capability-system', vsanStub),
'vsan-mass-collector': vim.VsanMassCollector('vsan-mass-collector',
vsanStub),
'vsan-phonehome-system': vim.VsanPhoneHomeSystem('vsan-phonehome-system',
vsanStub),
'vsan-vum-system': vim.cluster.VsanVumSystem('vsan-vum-system', vsanStub),
'vsan-cluster-resource-check-system': vim.vsan.VsanResourceCheckSystem(
'vsan-cluster-resource-check-system',
vsanStub),
'cns-volume-manager': vim.cns.VolumeManager('cns-volume-manager',
vsanStub
),
}
return vcMos
# Construct a stub for access ESXi side vSAN APIs.
def GetVsanEsxMos(esxStub, context=None, version=VSAN_VMODL_VERSION):
vsanStub = GetVsanEsxStub(esxStub, context, version=version)
esxMos = {
'vsan-performance-manager': vim.cluster.VsanPerformanceManager(
'vsan-performance-manager',
vsanStub
),
'vsan-cluster-health-system': vim.cluster.VsanVcClusterHealthSystem(
'vsan-cluster-health-system',
vsanStub
),
'ha-vsan-health-system': vim.host.VsanHealthSystem(
'ha-vsan-health-system',
vsanStub
),
'vsan-object-system': vim.cluster.VsanObjectSystem(
'vsan-object-system',
vsanStub
),
'vsan-vcsa-deployer-system': vim.host.VsanVcsaDeployerSystem(
'vsan-vcsa-deployer-system',
vsanStub
),
'vsan-capability-system': vim.cluster.VsanCapabilitySystem(
'vsan-capability-system', vsanStub),
'vsanSystemEx': vim.host.VsanSystemEx('vsanSystemEx', vsanStub),
'vsan-update-manager': vim.host.VsanUpdateManager('vsan-update-manager',
vsanStub),
'vsan-cluster-iscsi-target-system': vim.cluster.VsanIscsiTargetSystem(
'vsan-cluster-iscsi-target-system',
vsanStub
),
}
return esxMos
# Convert a vSAN Task to a Task MO binding to vCenter service.
def ConvertVsanTaskToVcTask(vsanTask, vcStub):
vcTask = vim.Task(vsanTask._moId, vcStub)
return vcTask
# Wait for the vCenter task and returns after tasks are completed.
def WaitForTasks(tasks, si):
pc = si.content.propertyCollector
taskList = [str(task) for task in tasks]
# Create filter
objSpecs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task)
for task in tasks]
propSpec = vmodl.query.PropertyCollector.PropertySpec(
type=vim.Task, pathSet=[], all=True)
filterSpec = vmodl.query.PropertyCollector.FilterSpec()
filterSpec.objectSet = objSpecs
filterSpec.propSet = [propSpec]
filter_ = pc.CreateFilter(filterSpec, True)
try:
version, state = None, None
# Loop looking for updates till the state moves to a completed state.
while len(taskList):
update = pc.WaitForUpdates(version)
for filterSet in update.filterSet:
for objSet in filterSet.objectSet:
task = objSet.obj
for change in objSet.changeSet:
if change.name == 'info':
state = change.val.state
elif change.name == 'info.state':
state = change.val
else:
continue
if not str(task) in taskList:
continue
if state == vim.TaskInfo.State.success:
# Remove task from taskList
taskList.remove(str(task))
elif state == vim.TaskInfo.State.error:
raise task.info.error
# Move to next version
version = update.version
finally:
if filter_:
filter_.Destroy()
# Get the VMODL version by checking the existence of vSAN namespace.
def GetLatestVmodlVersion(hostname):
try:
vsanVmodlUrl = 'https://%s/sdk/vsanServiceVersions.xml' % hostname
if (hasattr(ssl, '_create_unverified_context') and
hasattr(ssl, '_create_default_https_context')):
ssl._create_default_https_context = ssl._create_unverified_context
xmldoc = minidom.parse(urlopen(vsanVmodlUrl, timeout=5))
for element in xmldoc.getElementsByTagName('name'):
if (element.firstChild.nodeValue == "urn:vsan"):
versions = xmldoc.getElementsByTagName('version')
versionId = versions[0].firstChild.nodeValue
if versionId == '6.6':
return 'vsan.version.version3'
else:
return VmomiSupport.newestVersions.Get('vsan')
else:
return VmomiSupport.newestVersions.Get('vim')
except Exception as e:
# Any exception like failing to open the XML or failed to parse the
# the content should lead to the returning of namespace with vim.
return VmomiSupport.newestVersions.Get('vim')
|
en
| 0.804106
|
#!/usr/bin/env python # -*- coding: utf-8 -*- Copyright 2016-2019 VMware, Inc. All rights reserved. This module defines basic helper functions used in the sample codes # Import the vSAN API python bindings # Construct a stub for vSAN API access using vCenter or ESXi sessions from # existing stubs. Corresponding vCenter or ESXi service endpoint is required. # vCenter service endpoint is used by default. # Construct a stub for access vCenter side vSAN APIs. # Construct a stub for access ESXi side vSAN APIs. # Construct a stub for access ESXi side vSAN APIs. # Construct a stub for access ESXi side vSAN APIs. # Convert a vSAN Task to a Task MO binding to vCenter service. # Wait for the vCenter task and returns after tasks are completed. # Create filter # Loop looking for updates till the state moves to a completed state. # Remove task from taskList # Move to next version # Get the VMODL version by checking the existence of vSAN namespace. # Any exception like failing to open the XML or failed to parse the # the content should lead to the returning of namespace with vim.
| 2.150734
| 2
|
setup.py
|
yashu-seth/dummyPy
| 21
|
6628382
|
from distutils.core import setup
setup(
name = 'dummyPy',
packages = ['dummyPy'],
version = 'v0.3',
description = 'A python module to transform categorical variables to one hot encoded vectors.\
It can handle categorical variables of a dataset that cannot be fit into memory.\
It also works well with the train test framework common in machine learning tasks.',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/yashu-seth/dummyPy',
download_url = 'https://github.com/yashu-seth/dummyPy/archive/v0.3.tar.gz',
keywords = ['testing', 'logging', 'example'],
classifiers = [],
)
|
from distutils.core import setup
setup(
name = 'dummyPy',
packages = ['dummyPy'],
version = 'v0.3',
description = 'A python module to transform categorical variables to one hot encoded vectors.\
It can handle categorical variables of a dataset that cannot be fit into memory.\
It also works well with the train test framework common in machine learning tasks.',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/yashu-seth/dummyPy',
download_url = 'https://github.com/yashu-seth/dummyPy/archive/v0.3.tar.gz',
keywords = ['testing', 'logging', 'example'],
classifiers = [],
)
|
none
| 1
| 2.00196
| 2
|
|
lti/__init__.py
|
claudevervoort/ltiautotest
| 7
|
6628383
|
<reponame>claudevervoort/ltiautotest<gh_stars>1-10
from .ltiregistration import ToolRegistration, registration, get_platform_config, register_tool, base_tool_oidc_conf, get_tool_configuration, verify_11_oauth, add_coursenav_message
from .jwks import get_public_keyset, get_publickey_pem
from .gen_model import *
from .const import const
from .services import *
|
from .ltiregistration import ToolRegistration, registration, get_platform_config, register_tool, base_tool_oidc_conf, get_tool_configuration, verify_11_oauth, add_coursenav_message
from .jwks import get_public_keyset, get_publickey_pem
from .gen_model import *
from .const import const
from .services import *
|
none
| 1
| 1.073163
| 1
|
|
main.py
|
LuisMayo/meme-ocr
| 37
|
6628384
|
#!/usr/bin/env python3
import sys
from memeocr import MemeOCR
def main(argv):
if len(argv) != 2:
print('usage:')
print(' ./main.py meme-file-name')
return
meme_fname = argv[1]
ocr = MemeOCR()
txt = ocr.recognize(meme_fname)
print(txt)
if __name__ == '__main__':
main(sys.argv)
|
#!/usr/bin/env python3
import sys
from memeocr import MemeOCR
def main(argv):
if len(argv) != 2:
print('usage:')
print(' ./main.py meme-file-name')
return
meme_fname = argv[1]
ocr = MemeOCR()
txt = ocr.recognize(meme_fname)
print(txt)
if __name__ == '__main__':
main(sys.argv)
|
fr
| 0.221828
|
#!/usr/bin/env python3
| 2.178931
| 2
|
assignments/11-tictactoe/test.py
|
mattmiller899/biosys-analytics
| 4
|
6628385
|
#!/usr/bin/env python3
"""tests for outcome.py"""
from subprocess import getstatusoutput, getoutput
from random import shuffle, sample
import os.path
import re
outcome = './outcome.py'
def usage(prg):
"""usage"""
(retval, out) = getstatusoutput(prg)
assert retval > 0
assert re.match("usage", out, re.IGNORECASE)
def test_outcome_usage():
"""outcome usage"""
usage(outcome)
def bad_input(prg):
"""fails on bad input"""
tmpl = 'State "{}" must be 9 characters of only ., X, O'
"""bad input"""
state1 = '.'
out1 = getoutput('{} {}'.format(prg, state1))
assert out1.rstrip() == tmpl.format(state1)
state2 = '..X.OA..X'
out2 = getoutput('{} {}'.format(prg, state2))
assert out2.rstrip() == tmpl.format(state2)
def test_outcome_bad_input():
"""outcome bad input"""
bad_input(outcome)
def test_outcome():
wins = [('X', 'XXX......'), ('O', 'OOO......'), ('X', '...XXX...'),
('O', '...OOO...'), ('X', '......XXX'), ('O', '......OOO'),
('X', 'X..X..X..'), ('O', 'O..O..O..'), ('X', '.X..X..X.'),
('O', '.O..O..O.'), ('X', '..X..X..X'), ('O', '..O..O..O'),
('X', 'X...X...X'), ('O',
'O...O...O'), ('X',
'..X.X.X..'), ('O',
'..O.O.O..')]
for player, state in wins:
l = len(state)
dots = [i for i in range(l) if state[i] == '.']
mut = sample(dots, k=2)
other_player = 'O' if player == 'X' else 'X'
new_state = ''.join(
[other_player if i in mut else state[i] for i in range(l)])
out = getoutput('{} {}'.format(outcome, new_state))
assert out.strip() == '{} has won'.format(player)
losing_state = list('XXOO.....')
for i in range(10):
shuffle(losing_state)
out = getoutput('{} {}'.format(outcome, ''.join(losing_state)))
assert out.strip() == 'No winner'
|
#!/usr/bin/env python3
"""tests for outcome.py"""
from subprocess import getstatusoutput, getoutput
from random import shuffle, sample
import os.path
import re
outcome = './outcome.py'
def usage(prg):
"""usage"""
(retval, out) = getstatusoutput(prg)
assert retval > 0
assert re.match("usage", out, re.IGNORECASE)
def test_outcome_usage():
"""outcome usage"""
usage(outcome)
def bad_input(prg):
"""fails on bad input"""
tmpl = 'State "{}" must be 9 characters of only ., X, O'
"""bad input"""
state1 = '.'
out1 = getoutput('{} {}'.format(prg, state1))
assert out1.rstrip() == tmpl.format(state1)
state2 = '..X.OA..X'
out2 = getoutput('{} {}'.format(prg, state2))
assert out2.rstrip() == tmpl.format(state2)
def test_outcome_bad_input():
"""outcome bad input"""
bad_input(outcome)
def test_outcome():
wins = [('X', 'XXX......'), ('O', 'OOO......'), ('X', '...XXX...'),
('O', '...OOO...'), ('X', '......XXX'), ('O', '......OOO'),
('X', 'X..X..X..'), ('O', 'O..O..O..'), ('X', '.X..X..X.'),
('O', '.O..O..O.'), ('X', '..X..X..X'), ('O', '..O..O..O'),
('X', 'X...X...X'), ('O',
'O...O...O'), ('X',
'..X.X.X..'), ('O',
'..O.O.O..')]
for player, state in wins:
l = len(state)
dots = [i for i in range(l) if state[i] == '.']
mut = sample(dots, k=2)
other_player = 'O' if player == 'X' else 'X'
new_state = ''.join(
[other_player if i in mut else state[i] for i in range(l)])
out = getoutput('{} {}'.format(outcome, new_state))
assert out.strip() == '{} has won'.format(player)
losing_state = list('XXOO.....')
for i in range(10):
shuffle(losing_state)
out = getoutput('{} {}'.format(outcome, ''.join(losing_state)))
assert out.strip() == 'No winner'
|
en
| 0.367496
|
#!/usr/bin/env python3 tests for outcome.py usage outcome usage fails on bad input bad input outcome bad input
| 2.627655
| 3
|
isin.py
|
nbeguier/financial-tools
| 1
|
6628386
|
<reponame>nbeguier/financial-tools
#!/usr/bin/env python3
"""
ISIN
Copyright (c) 2020-2021 <NAME>
Licensed under the MIT License
Written by <NAME> (<EMAIL>)
"""
# Standard library imports
from argparse import ArgumentParser
import sys
# Own library
import lib.common as common
import lib.display as display
import lib.reporting as reporting
# Debug
# from pdb import set_trace as st
VERSION = '2.8.1'
def main(parameters):
"""
Main function
"""
report = reporting.get_report(parameters)
report = reporting.simplify_report(report, parameters)
if parameters['history']['healthy']:
display.print_health(report, parameters['verbose'])
else:
display.print_report(
report,
mic=parameters['mic'],
header=parameters['header'],
footer=parameters['footer'],
verbose=parameters['verbose'])
if __name__ == '__main__':
PARSER = ArgumentParser()
PARSER.add_argument('--version', action='version', version=VERSION)
PARSER.add_argument('--verbose', action='store_true',\
help="Affiche plus d'informations (=False)", default=False)
PARSER.add_argument('-i', '--isin', action='store',\
help="Code ISIN")
PARSER.add_argument('-n', '--nom', action='store',\
help="Nom de l'action")
PARSER.add_argument('-m', '--market-id-code', action='store',\
help="Code d'identification de marché (=XPAR)", default='XPAR')
PARSER.add_argument('--no-header', action='store_true',\
help="Cache les informations de bases (=False)", default=False)
PARSER.add_argument('--no-footer', action='store_true',\
help="Cache les URLs de fin (=False)", default=False)
PARSER.add_argument('--dividendes-history', action='store_true',\
help="Affiche plus d'informations sur les dividendes (=False)", default=False)
PARSER.add_argument('--per-history', action='store_true',\
help="Affiche la valeur théorique du PER (=False)", default=False)
PARSER.add_argument('--peg-history', action='store_true',\
help="Affiche la valeur théorique du PEG (=False)", default=False)
PARSER.add_argument('--is-healthy', action='store_true',\
help="Affiche l'état de santé de l'action (=False)", default=False)
ARGS = PARSER.parse_args()
PARAMS = dict()
PARAMS['isin'] = ARGS.isin
PARAMS['mic'] = ARGS.market_id_code
PARAMS['verbose'] = ARGS.verbose
PARAMS['header'] = not ARGS.no_header
PARAMS['footer'] = not ARGS.no_footer
PARAMS['history'] = dict()
PARAMS['history']['dividendes'] = ARGS.dividendes_history
PARAMS['history']['per'] = ARGS.per_history
PARAMS['history']['peg'] = ARGS.peg_history
PARAMS['history']['healthy'] = ARGS.is_healthy
if not ARGS.isin and not ARGS.nom:
PARSER.print_help()
sys.exit(1)
elif ARGS.nom is not None:
RESULT = common.autocomplete(ARGS.nom)
if not RESULT or 'ISIN' not in RESULT[0]:
print('No result for this name')
sys.exit(1)
else:
PARAMS['isin'] = RESULT[0]['ISIN']
main(PARAMS)
|
#!/usr/bin/env python3
"""
ISIN
Copyright (c) 2020-2021 <NAME>
Licensed under the MIT License
Written by <NAME> (<EMAIL>)
"""
# Standard library imports
from argparse import ArgumentParser
import sys
# Own library
import lib.common as common
import lib.display as display
import lib.reporting as reporting
# Debug
# from pdb import set_trace as st
VERSION = '2.8.1'
def main(parameters):
"""
Main function
"""
report = reporting.get_report(parameters)
report = reporting.simplify_report(report, parameters)
if parameters['history']['healthy']:
display.print_health(report, parameters['verbose'])
else:
display.print_report(
report,
mic=parameters['mic'],
header=parameters['header'],
footer=parameters['footer'],
verbose=parameters['verbose'])
if __name__ == '__main__':
PARSER = ArgumentParser()
PARSER.add_argument('--version', action='version', version=VERSION)
PARSER.add_argument('--verbose', action='store_true',\
help="Affiche plus d'informations (=False)", default=False)
PARSER.add_argument('-i', '--isin', action='store',\
help="Code ISIN")
PARSER.add_argument('-n', '--nom', action='store',\
help="Nom de l'action")
PARSER.add_argument('-m', '--market-id-code', action='store',\
help="Code d'identification de marché (=XPAR)", default='XPAR')
PARSER.add_argument('--no-header', action='store_true',\
help="Cache les informations de bases (=False)", default=False)
PARSER.add_argument('--no-footer', action='store_true',\
help="Cache les URLs de fin (=False)", default=False)
PARSER.add_argument('--dividendes-history', action='store_true',\
help="Affiche plus d'informations sur les dividendes (=False)", default=False)
PARSER.add_argument('--per-history', action='store_true',\
help="Affiche la valeur théorique du PER (=False)", default=False)
PARSER.add_argument('--peg-history', action='store_true',\
help="Affiche la valeur théorique du PEG (=False)", default=False)
PARSER.add_argument('--is-healthy', action='store_true',\
help="Affiche l'état de santé de l'action (=False)", default=False)
ARGS = PARSER.parse_args()
PARAMS = dict()
PARAMS['isin'] = ARGS.isin
PARAMS['mic'] = ARGS.market_id_code
PARAMS['verbose'] = ARGS.verbose
PARAMS['header'] = not ARGS.no_header
PARAMS['footer'] = not ARGS.no_footer
PARAMS['history'] = dict()
PARAMS['history']['dividendes'] = ARGS.dividendes_history
PARAMS['history']['per'] = ARGS.per_history
PARAMS['history']['peg'] = ARGS.peg_history
PARAMS['history']['healthy'] = ARGS.is_healthy
if not ARGS.isin and not ARGS.nom:
PARSER.print_help()
sys.exit(1)
elif ARGS.nom is not None:
RESULT = common.autocomplete(ARGS.nom)
if not RESULT or 'ISIN' not in RESULT[0]:
print('No result for this name')
sys.exit(1)
else:
PARAMS['isin'] = RESULT[0]['ISIN']
main(PARAMS)
|
en
| 0.709749
|
#!/usr/bin/env python3 ISIN Copyright (c) 2020-2021 <NAME> Licensed under the MIT License Written by <NAME> (<EMAIL>) # Standard library imports # Own library # Debug # from pdb import set_trace as st Main function
| 2.547672
| 3
|
pyjob/task.py
|
fsimkovic/pyjob
| 8
|
6628387
|
import abc
import logging
import os
import time
from pyjob import cexec, config
from pyjob.exception import (
PyJobError,
PyJobExecutableNotFoundError,
PyJobTaskLockedError,
)
from pyjob.script import ScriptCollector
logger = logging.getLogger(__name__)
class Task(abc.ABC):
"""Abstract base class for executable tasks"""
def __init__(self, script, *args, **kwargs):
"""Instantiate a new :obj:`~pyjob.task.Task`
Parameters
----------
script : :obj:`~pyjob.script.ScriptCollector`, :obj:`~pyjob.script.Script`, str, list, tuple
A :obj:`str`, :obj:`list` or :obj:`tuple` of one or more script paths
"""
self.pid = None
self.locked = False
if isinstance(script, ScriptCollector):
self.script_collector = script
else:
self.script_collector = ScriptCollector(script)
self.directory = os.path.abspath(
kwargs.get("directory") or config.get("directory") or "."
)
self.nprocesses = kwargs.get("processes") or config.get("processes") or 1
def __del__(self):
"""Exit function at instance deletion"""
if not self.locked:
self.lock()
self.close()
def __enter__(self):
"""Contextmanager entry function
Note
----
For further details see `PEP 343 <https://www.python.org/dev/peps/pep-0343/>`_.
"""
return self
def __exit__(self, *exc):
"""Contextmanager exit function
Note
----
For further details see `PEP 343 <https://www.python.org/dev/peps/pep-0343/>`_.
"""
if not self.locked:
self.lock()
self.close()
def __repr__(self):
"""Representation of the :obj:`~pyjob.task.Task`"""
return f"{self.__class__.__qualname__}(pid={self.pid})"
# ------------------ Abstract methods and properties ------------------
@property
@abc.abstractmethod
def info(self): # pragma: no cover
"""Abstract property to provide info about the :obj:`~pyjob.task.Task`"""
@abc.abstractmethod
def close(self): # pragma: no cover
"""Abstract method to end :obj:`~pyjob.task.Task`"""
@abc.abstractmethod
def kill(self): # pragma: no cover
"""Abstract method to forcefully terminate :obj:`~pyjob.task.Task`"""
@abc.abstractmethod
def _run(self): # pragma: no cover
"""Abstract property to start execution of the :obj:`~pyjob.task.Task`"""
# ------------------ Other task-specific general methods ------------------
@property
def completed(self):
"""Boolean to indicate :obj:`~pyjob.task.Task` completion"""
return self.locked and not bool(self.info)
@property
def log(self):
"""The log file path"""
return [script.log for script in self.script_collector]
@property
def script(self):
"""The script file path"""
return [script.path for script in self.script_collector]
@staticmethod
def get_time(minutes):
"""Return runtime string with format hh:mm:ss to be used in :obj:`~pyjob.task.Task`
Parameters
----------
minutes : int
Integer with the number of minutes to allocate to runtime
Raises
------
:exc:`~pyjob.exception.PyJobError`
Argument is not a positive integer
"""
if isinstance(minutes, int) and minutes > 0:
h, m = divmod(minutes, 60)
return f"{h:02d}:{m:02d}:00"
else:
raise PyJobError("Task runtime has to be a positive integer!")
def add_script(self, script):
"""Add further scripts to this :obj:`~pyjob.task.Task`
Parameters
----------
script : :obj:`~pyjob.script.Script`, str, list, tuple
Something representing one or more scripts
"""
if self.locked:
raise PyJobTaskLockedError("This task is locked!")
self.script_collector.add(script)
def lock(self):
"""Lock this :obj:`~pyjob.task.Task`"""
self.locked = True
logger.debug("Locked %s [%d]", self.__class__.__qualname__, self.pid)
def run(self):
"""Start the execution of this :obj:`~pyjob.task.Task`
Raises
------
:exc:`~pyjob.exception.PyJobError`
One or more executable scripts required prior to execution
:exc:`~pyjob.exception.PyJobTaskLockedError`
Locked task, cannot restart or rerun
"""
if self.locked:
raise PyJobTaskLockedError("This task is locked!")
if len(self.script_collector) < 1:
raise PyJobError(
"One or more executable scripts required prior to execution"
)
self.script_collector.dump()
self._run()
logger.debug(
"Started execution of %s [%d]", self.__class__.__qualname__, self.pid
)
self.lock()
def wait(self, interval=30, monitor_f=None, success_f=None):
"""Method to wait for the completion of the current :obj:`~pyjob.task.Task`
Parameters
----------
interval : int, optional
The interval to wait between checking (in seconds)
monitor_f : callable, optional
A :obj:`callable` that is regularly invoked
success_f : callable, optional
A :obj:`callable` to check for early termination of :obj:`~pyjob.task.Task`
Note
----
The `success_f` argument needs to accept a log file as input and return
a :obj:`bool`.
"""
def is_successful_run(log):
return os.path.isfile(log) and success_f(log)
def is_callable_fn(fn):
return bool(fn and callable(fn))
check_success = is_callable_fn(success_f)
callback = monitor_f if is_callable_fn(monitor_f) else lambda: None
if check_success:
msg = "Checking for %s %d success with function %s"
logger.debug(msg, self.__class__.__qualname__, self.pid, success_f.__name__)
while not self.completed:
if check_success:
for log in self.log:
if is_successful_run(log):
logger.debug(
"%s %d succeeded, run log: %s",
self.__class__.__qualname__,
self.pid,
log,
)
self.kill()
callback()
time.sleep(interval)
class ClusterTask(Task):
"""Abstract base class for executable cluster tasks"""
def __init__(self, *args, **kwargs):
"""Instantiate a new :obj:`~pyjob.task.ClusterTask`"""
super(ClusterTask, self).__init__(*args, **kwargs)
self.dependency = kwargs.get("dependency", [])
self.max_array_size = (
kwargs.get("max_array_size")
or config.get("max_array_size")
or len(self.script)
)
self.priority = kwargs.get("priority", None)
self.queue = kwargs.get("queue") or config.get("queue")
self.environment = (
kwargs.get("environment") or config.get("environment") or "mpi"
)
self.runtime = kwargs.get("runtime") or config.get("runtime")
self.shell = kwargs.get("shell") or config.get("shell")
self.name = kwargs.get("name") or config.get("name") or "pyjob"
self.extra = kwargs.get("extra", [])
self.cleanup = kwargs.get("cleanup") or config.get("cleanup") or False
self.runscript = None
self._check_requirements()
@abc.abstractmethod
def _create_runscript(self):
"""Utility method to create a :obj:`~pyjob.task.ClusterTask` runscript"""
@staticmethod
def _ensure_exec_available(exe):
"""Ensure that the specified executable is available in the system
Parameters
----------
exe : str
The executable to test
Raises
------
:exc:`~pyjob.exception.PyJobError`
The executable cannot be found
"""
try:
cexec([exe])
except PyJobExecutableNotFoundError:
raise PyJobError(
f"Cannot find executable {exe}. Please ensure environment is set up correctly."
)
def _check_requirements(self):
"""Abstract method to check if the user input meets the requirements for the task execution"""
def close(self):
"""Close this :obj:`~pyjob.sge.ClusterTask` after completion"""
self.wait()
if self.cleanup and self.runscript is not None:
self.runscript.cleanup()
def get_array_bash_extension(self, jobsf, offset):
"""Get the array job bash extension for the ``runscript``
Parameters
----------
jobsf : str
The file containing all scripts on a per-line basis
offset : int
The offset to be applied to the ``JOB_ARRAY_INDEX``
Returns
-------
list
A list of lines to be written to the ``runscript``
Raises
------
:exc:`ValueError`
Invalid offset
:exc:`ValueError`
Valid job file required
"""
if jobsf is None or not os.path.isfile(jobsf):
raise ValueError("Valid job file required")
if offset < 0:
raise ValueError("Invalid offset")
job_array_index = self.__class__.JOB_ARRAY_INDEX
if offset > 0:
script_def = (
f'script=$(awk "NR==$(({job_array_index} + {offset}))" {jobsf})'
)
else:
script_def = f'script=$(awk "NR=={job_array_index}" {jobsf})'
return [
script_def,
'log=$(echo $script | sed "s/\\.${script##*.}/\\.log/")',
"$script > $log 2>&1",
]
|
import abc
import logging
import os
import time
from pyjob import cexec, config
from pyjob.exception import (
PyJobError,
PyJobExecutableNotFoundError,
PyJobTaskLockedError,
)
from pyjob.script import ScriptCollector
logger = logging.getLogger(__name__)
class Task(abc.ABC):
"""Abstract base class for executable tasks"""
def __init__(self, script, *args, **kwargs):
"""Instantiate a new :obj:`~pyjob.task.Task`
Parameters
----------
script : :obj:`~pyjob.script.ScriptCollector`, :obj:`~pyjob.script.Script`, str, list, tuple
A :obj:`str`, :obj:`list` or :obj:`tuple` of one or more script paths
"""
self.pid = None
self.locked = False
if isinstance(script, ScriptCollector):
self.script_collector = script
else:
self.script_collector = ScriptCollector(script)
self.directory = os.path.abspath(
kwargs.get("directory") or config.get("directory") or "."
)
self.nprocesses = kwargs.get("processes") or config.get("processes") or 1
def __del__(self):
"""Exit function at instance deletion"""
if not self.locked:
self.lock()
self.close()
def __enter__(self):
"""Contextmanager entry function
Note
----
For further details see `PEP 343 <https://www.python.org/dev/peps/pep-0343/>`_.
"""
return self
def __exit__(self, *exc):
"""Contextmanager exit function
Note
----
For further details see `PEP 343 <https://www.python.org/dev/peps/pep-0343/>`_.
"""
if not self.locked:
self.lock()
self.close()
def __repr__(self):
"""Representation of the :obj:`~pyjob.task.Task`"""
return f"{self.__class__.__qualname__}(pid={self.pid})"
# ------------------ Abstract methods and properties ------------------
@property
@abc.abstractmethod
def info(self): # pragma: no cover
"""Abstract property to provide info about the :obj:`~pyjob.task.Task`"""
@abc.abstractmethod
def close(self): # pragma: no cover
"""Abstract method to end :obj:`~pyjob.task.Task`"""
@abc.abstractmethod
def kill(self): # pragma: no cover
"""Abstract method to forcefully terminate :obj:`~pyjob.task.Task`"""
@abc.abstractmethod
def _run(self): # pragma: no cover
"""Abstract property to start execution of the :obj:`~pyjob.task.Task`"""
# ------------------ Other task-specific general methods ------------------
@property
def completed(self):
"""Boolean to indicate :obj:`~pyjob.task.Task` completion"""
return self.locked and not bool(self.info)
@property
def log(self):
"""The log file path"""
return [script.log for script in self.script_collector]
@property
def script(self):
"""The script file path"""
return [script.path for script in self.script_collector]
@staticmethod
def get_time(minutes):
"""Return runtime string with format hh:mm:ss to be used in :obj:`~pyjob.task.Task`
Parameters
----------
minutes : int
Integer with the number of minutes to allocate to runtime
Raises
------
:exc:`~pyjob.exception.PyJobError`
Argument is not a positive integer
"""
if isinstance(minutes, int) and minutes > 0:
h, m = divmod(minutes, 60)
return f"{h:02d}:{m:02d}:00"
else:
raise PyJobError("Task runtime has to be a positive integer!")
def add_script(self, script):
"""Add further scripts to this :obj:`~pyjob.task.Task`
Parameters
----------
script : :obj:`~pyjob.script.Script`, str, list, tuple
Something representing one or more scripts
"""
if self.locked:
raise PyJobTaskLockedError("This task is locked!")
self.script_collector.add(script)
def lock(self):
"""Lock this :obj:`~pyjob.task.Task`"""
self.locked = True
logger.debug("Locked %s [%d]", self.__class__.__qualname__, self.pid)
def run(self):
"""Start the execution of this :obj:`~pyjob.task.Task`
Raises
------
:exc:`~pyjob.exception.PyJobError`
One or more executable scripts required prior to execution
:exc:`~pyjob.exception.PyJobTaskLockedError`
Locked task, cannot restart or rerun
"""
if self.locked:
raise PyJobTaskLockedError("This task is locked!")
if len(self.script_collector) < 1:
raise PyJobError(
"One or more executable scripts required prior to execution"
)
self.script_collector.dump()
self._run()
logger.debug(
"Started execution of %s [%d]", self.__class__.__qualname__, self.pid
)
self.lock()
def wait(self, interval=30, monitor_f=None, success_f=None):
"""Method to wait for the completion of the current :obj:`~pyjob.task.Task`
Parameters
----------
interval : int, optional
The interval to wait between checking (in seconds)
monitor_f : callable, optional
A :obj:`callable` that is regularly invoked
success_f : callable, optional
A :obj:`callable` to check for early termination of :obj:`~pyjob.task.Task`
Note
----
The `success_f` argument needs to accept a log file as input and return
a :obj:`bool`.
"""
def is_successful_run(log):
return os.path.isfile(log) and success_f(log)
def is_callable_fn(fn):
return bool(fn and callable(fn))
check_success = is_callable_fn(success_f)
callback = monitor_f if is_callable_fn(monitor_f) else lambda: None
if check_success:
msg = "Checking for %s %d success with function %s"
logger.debug(msg, self.__class__.__qualname__, self.pid, success_f.__name__)
while not self.completed:
if check_success:
for log in self.log:
if is_successful_run(log):
logger.debug(
"%s %d succeeded, run log: %s",
self.__class__.__qualname__,
self.pid,
log,
)
self.kill()
callback()
time.sleep(interval)
class ClusterTask(Task):
"""Abstract base class for executable cluster tasks"""
def __init__(self, *args, **kwargs):
"""Instantiate a new :obj:`~pyjob.task.ClusterTask`"""
super(ClusterTask, self).__init__(*args, **kwargs)
self.dependency = kwargs.get("dependency", [])
self.max_array_size = (
kwargs.get("max_array_size")
or config.get("max_array_size")
or len(self.script)
)
self.priority = kwargs.get("priority", None)
self.queue = kwargs.get("queue") or config.get("queue")
self.environment = (
kwargs.get("environment") or config.get("environment") or "mpi"
)
self.runtime = kwargs.get("runtime") or config.get("runtime")
self.shell = kwargs.get("shell") or config.get("shell")
self.name = kwargs.get("name") or config.get("name") or "pyjob"
self.extra = kwargs.get("extra", [])
self.cleanup = kwargs.get("cleanup") or config.get("cleanup") or False
self.runscript = None
self._check_requirements()
@abc.abstractmethod
def _create_runscript(self):
"""Utility method to create a :obj:`~pyjob.task.ClusterTask` runscript"""
@staticmethod
def _ensure_exec_available(exe):
"""Ensure that the specified executable is available in the system
Parameters
----------
exe : str
The executable to test
Raises
------
:exc:`~pyjob.exception.PyJobError`
The executable cannot be found
"""
try:
cexec([exe])
except PyJobExecutableNotFoundError:
raise PyJobError(
f"Cannot find executable {exe}. Please ensure environment is set up correctly."
)
def _check_requirements(self):
"""Abstract method to check if the user input meets the requirements for the task execution"""
def close(self):
"""Close this :obj:`~pyjob.sge.ClusterTask` after completion"""
self.wait()
if self.cleanup and self.runscript is not None:
self.runscript.cleanup()
def get_array_bash_extension(self, jobsf, offset):
"""Get the array job bash extension for the ``runscript``
Parameters
----------
jobsf : str
The file containing all scripts on a per-line basis
offset : int
The offset to be applied to the ``JOB_ARRAY_INDEX``
Returns
-------
list
A list of lines to be written to the ``runscript``
Raises
------
:exc:`ValueError`
Invalid offset
:exc:`ValueError`
Valid job file required
"""
if jobsf is None or not os.path.isfile(jobsf):
raise ValueError("Valid job file required")
if offset < 0:
raise ValueError("Invalid offset")
job_array_index = self.__class__.JOB_ARRAY_INDEX
if offset > 0:
script_def = (
f'script=$(awk "NR==$(({job_array_index} + {offset}))" {jobsf})'
)
else:
script_def = f'script=$(awk "NR=={job_array_index}" {jobsf})'
return [
script_def,
'log=$(echo $script | sed "s/\\.${script##*.}/\\.log/")',
"$script > $log 2>&1",
]
|
en
| 0.637583
|
Abstract base class for executable tasks Instantiate a new :obj:`~pyjob.task.Task` Parameters ---------- script : :obj:`~pyjob.script.ScriptCollector`, :obj:`~pyjob.script.Script`, str, list, tuple A :obj:`str`, :obj:`list` or :obj:`tuple` of one or more script paths Exit function at instance deletion Contextmanager entry function Note ---- For further details see `PEP 343 <https://www.python.org/dev/peps/pep-0343/>`_. Contextmanager exit function Note ---- For further details see `PEP 343 <https://www.python.org/dev/peps/pep-0343/>`_. Representation of the :obj:`~pyjob.task.Task` # ------------------ Abstract methods and properties ------------------ # pragma: no cover Abstract property to provide info about the :obj:`~pyjob.task.Task` # pragma: no cover Abstract method to end :obj:`~pyjob.task.Task` # pragma: no cover Abstract method to forcefully terminate :obj:`~pyjob.task.Task` # pragma: no cover Abstract property to start execution of the :obj:`~pyjob.task.Task` # ------------------ Other task-specific general methods ------------------ Boolean to indicate :obj:`~pyjob.task.Task` completion The log file path The script file path Return runtime string with format hh:mm:ss to be used in :obj:`~pyjob.task.Task` Parameters ---------- minutes : int Integer with the number of minutes to allocate to runtime Raises ------ :exc:`~pyjob.exception.PyJobError` Argument is not a positive integer Add further scripts to this :obj:`~pyjob.task.Task` Parameters ---------- script : :obj:`~pyjob.script.Script`, str, list, tuple Something representing one or more scripts Lock this :obj:`~pyjob.task.Task` Start the execution of this :obj:`~pyjob.task.Task` Raises ------ :exc:`~pyjob.exception.PyJobError` One or more executable scripts required prior to execution :exc:`~pyjob.exception.PyJobTaskLockedError` Locked task, cannot restart or rerun Method to wait for the completion of the current :obj:`~pyjob.task.Task` Parameters ---------- interval : int, optional The interval to wait between checking (in seconds) monitor_f : callable, optional A :obj:`callable` that is regularly invoked success_f : callable, optional A :obj:`callable` to check for early termination of :obj:`~pyjob.task.Task` Note ---- The `success_f` argument needs to accept a log file as input and return a :obj:`bool`. Abstract base class for executable cluster tasks Instantiate a new :obj:`~pyjob.task.ClusterTask` Utility method to create a :obj:`~pyjob.task.ClusterTask` runscript Ensure that the specified executable is available in the system Parameters ---------- exe : str The executable to test Raises ------ :exc:`~pyjob.exception.PyJobError` The executable cannot be found Abstract method to check if the user input meets the requirements for the task execution Close this :obj:`~pyjob.sge.ClusterTask` after completion Get the array job bash extension for the ``runscript`` Parameters ---------- jobsf : str The file containing all scripts on a per-line basis offset : int The offset to be applied to the ``JOB_ARRAY_INDEX`` Returns ------- list A list of lines to be written to the ``runscript`` Raises ------ :exc:`ValueError` Invalid offset :exc:`ValueError` Valid job file required ##*.}/\\.log/")',
| 2.271107
| 2
|
skimpy/core/modifiers.py
|
EPFL-LCSB/skimpy
| 13
|
6628388
|
<reponame>EPFL-LCSB/skimpy<filename>skimpy/core/modifiers.py
# -*- coding: utf-8 -*-
"""
.. module:: skimpy
:platform: Unix, Windows
:synopsis: Simple Kinetic Models in Python
.. moduleauthor:: SKiMPy team
[---------]
Copyright 2017 Laboratory of Computational Systems Biotechnology (LCSB),
Ecole Polytechnique Federale de Lausanne (EPFL), Switzerland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from sympy import sympify
from ..utils.general import check_is_symbol
from ..mechanisms.mechanism import KineticMechanism
from ..core.itemsets import make_parameter_set, make_reactant_set
from ..utils.namespace import *
class ExpressionModifier(object):
"""
This class describes a modifier to an expression, like a boundary condition
or constraint.
For example, changing a rate to a constant (boundary condition), or linking
it to another variable of the model (constraint).
It accepts as an argument a modifier.
A modifier is a function which will look at all your expressions, and
apply its transformation to them. As a result, its arguments have to be a
TabDict of expressions, such as KinModel.ODEFun.expressions
"""
prefix = 'MOD'
def __init__(self, name, modifier = None):
self._name = name
if modifier is not None:
self._modifier = modifier
def __call__(self,expressions):
self.modifier(expressions)
@property
def modifier(self):
return self._modifier
def link(self,model):
"""
Link the modifier to a model, to gain awareness of the inner/outer
variables
:param model:
:return:
"""
self.model = model
@property
def name(self):
return self.prefix +'_' + self._name
@name.setter
def name(self, value):
if value.startswith(self.prefix):
value = value[len(self.prefix):]
self._name = value
class BoundaryCondition(ExpressionModifier):
"""
We differentiate boundary conditions as modifiers that define the boundaries
of the observed system.
"""
prefix = 'BC'
def __init__(self, name, modifier = None):
ExpressionModifier.__init__(self, name, modifier)
class ConstantConcentration(BoundaryCondition):
"""
"""
prefix = 'CC'
def __init__(self, reactant, name = None):
# Is the reactant constant it is not a variable anymore
if name is None:
name = reactant.name
BoundaryCondition.__init__(self, name = name)
# Modify the reactant
reactant.type = PARAMETER
self.reactant = reactant
def modifier(self, expressions):
"""
Set the rate to 0
:param expressions:
:return:
"""
expressions[self.reactant.symbol] = expressions[self.reactant.symbol] * 0.0
def __del__(self):
self.reactant.type = VARIABLE
class AdditiveConcentrationRate(ExpressionModifier):
"""
Add a concentration rate term to your rate expression
"""
# FIXME Please give us an alternate name we _REALLY_ don't like it
prefix = 'ADDCR'
def __init__(self, reactant, flux_value, name=None):
if name is None:
name = reactant.__str__()
ExpressionModifier.__init__(self, name=name)
self.reactant = reactant
self.flux_value = flux_value
def modifier(self, expressions):
"""
Add to the rate expression
:param expressions:
:return:
"""
sym_value = sympify(self.flux_value)
expressions[self.reactant.symbol] = expressions[self.reactant.symbol] + sym_value
class BoundaryFlux(BoundaryCondition,AdditiveConcentrationRate):
prefix = "BF"
def __init__(self, reactant, flux_value):
# TODO: Find a way to make sure the flux_value does not depend on an
# inner variable
self.check_dependency(flux_value)
AdditiveConcentrationRate.__init__(self, reactant, flux_value)
def check_dependency(self, expression):
# TODO: Implement
pass
"""
Reaction modifiers
"""
class FirstOrderSmallMoleculeModifier(KineticMechanism,ExpressionModifier):
prefix = "HSM"
Reactants = make_reactant_set(__name__, ['small_molecule'])
Parameters = make_parameter_set( __name__,
{ })
parameter_reactant_links = {}
def __init__(self, small_molecule, mechanism_stoichiometry, name=None):
if name is None:
name = small_molecule.__repr__()
reactants = self.Reactants(small_molecule=small_molecule)
parameters = self.Parameters()
KineticMechanism.__init__(self, name, reactants, parameters)
if type(mechanism_stoichiometry) is dict:
self.reactant_stoichiometry = mechanism_stoichiometry
else:
self.reactant_stoichiometry = {'small_molecule':
float(mechanism_stoichiometry)}
def modifier(self, expressions):
"""
change the flux reaction rate expressions
:param expression: {vnet, vfwd, vbwd}
:return:
"""
# First oder modification of the of Keq
# expressions = TabDict([('v_net', rate_expression),
# ('v_fwd', forward_rate_expression),
# ('v_bwd', backward_rate_expression),
# ])
if self.reactant_stoichiometry['small_molecule'] < 0:
expressions['v_fwd'] = expressions['v_fwd']\
* self.get_qssa_rate_expression()**-self.reactant_stoichiometry['small_molecule']
if self.reactant_stoichiometry['small_molecule'] > 0:
expressions['v_bwd'] = expressions['v_bwd'] \
* self.get_qssa_rate_expression()**self.reactant_stoichiometry['small_molecule']
expressions['v_net'] = expressions['v_fwd'] - expressions['v_bwd']
def get_qssa_rate_expression(self):
sm = self.reactants.small_molecule.symbol
return sm
def update_qssa_rate_expression(self):
return None
def get_full_rate_expression(self):
raise NotImplementedError
def calculate_rate_constants(self):
raise NotImplementedError
class DisplacementSmallMoleculeModifier(KineticMechanism,ExpressionModifier):
prefix = "DSM"
Reactants = make_reactant_set(__name__, ['small_molecule',])
Parameters = make_parameter_set( __name__,
{ })
parameter_reactant_links = {}
def __init__(self, small_molecule, mechanism_stoichiometry, name=None):
if name is None:
name = small_molecule.__str__()
reactants = self.Reactants(small_molecule=small_molecule,)
parameters = self.Parameters()
KineticMechanism.__init__(self, name, reactants, parameters)
# TODO Unify between skimpy versions
if type(mechanism_stoichiometry) is dict:
self.reactant_stoichiometry = mechanism_stoichiometry
else:
self.reactant_stoichiometry = {'small_molecule':
float(mechanism_stoichiometry)}
def modifier(self, expressions):
"""
change the flux reaction rate expressions
:param expression: {vnet, vfwd, vbwd}
:return:
"""
# Modification of the of Keq
# expressions = TabDict([('v_net', rate_expression),
# ('v_fwd', forward_rate_expression),
# ('v_bwd', backward_rate_expression),
# ])
expressions['v_bwd'] = expressions['v_bwd'] \
* self.get_qssa_rate_expression()**self.reactant_stoichiometry['small_molecule']
expressions['v_net'] = expressions['v_fwd'] - expressions['v_bwd']
def get_qssa_rate_expression(self):
sm = self.reactants.small_molecule.symbol
return sm
def update_qssa_rate_expression(self):
return None
def get_full_rate_expression(self):
raise NotImplementedError
def calculate_rate_constants(self):
raise NotImplementedError
"""
Activators and inhibitors
"""
class ActivationModifier(KineticMechanism,ExpressionModifier):
prefix = "AM"
Reactants = make_reactant_set(__name__, ['activator',])
Parameters = make_parameter_set(__name__, {'k_activation': [ODE, MCA, QSSA],})
parameter_reactant_links = {'k_activation':'activator'}
def __init__(self, activator, name=None, k_activation=None):
if name is None:
name = activator.__str__()
reactants = self.Reactants(activator=activator,)
parameters = self.Parameters(k_activation=k_activation)
KineticMechanism.__init__(self, name, reactants, parameters)
self.reactant_stoichiometry = {'activator': 0 }
def modifier(self, expressions):
"""
change the flux reaction rate expressions
:param expression: {vnet, vfwd, vbwd}
:return:
"""
# Modification of the of Keq
# expressions = TabDict([('v_net', rate_expression),
# ('v_fwd', forward_rate_expression),
# ('v_bwd', backward_rate_expression),
# ])
activation = 1 + self.get_qssa_rate_expression()
expressions['v_bwd'] = expressions['v_bwd'] * activation
expressions['v_fwd'] = expressions['v_fwd'] * activation
expressions['v_net'] = expressions['v_fwd'] - expressions['v_bwd']
def get_qssa_rate_expression(self):
a = self.reactants.activator.symbol
k = self.parameters.k_activation.symbol
return a/k
def update_qssa_rate_expression(self):
return None
def get_full_rate_expression(self):
raise NotImplementedError
def calculate_rate_constants(self):
raise NotImplementedError
class InhibitionModifier(KineticMechanism,ExpressionModifier):
prefix = "AM"
Reactants = make_reactant_set(__name__, ['inhibitor',])
Parameters = make_parameter_set(__name__, {'k_inhibition': [ODE, MCA, QSSA],})
parameter_reactant_links = {'k_inhibition':'inhibitor'}
def __init__(self, inhibitor, name=None, k_inhibition=None):
if name is None:
name = inhibitor.__str__()
reactants = self.Reactants(inhibitor=inhibitor,)
parameters = self.Parameters(k_inhibition=k_inhibition)
KineticMechanism.__init__(self, name, reactants, parameters)
self.reactant_stoichiometry = {'inhibitor': 0 }
def modifier(self, expressions):
"""
change the flux reaction rate expressions
:param expression: {vnet, vfwd, vbwd}
:return:
"""
# Modification of the of Keq
# expressions = TabDict([('v_net', rate_expression),
# ('v_fwd', forward_rate_expression),
# ('v_bwd', backward_rate_expression),
# ])
inhibition = 1 + self.get_qssa_rate_expression()
expressions['v_bwd'] = expressions['v_bwd'] / inhibition
expressions['v_fwd'] = expressions['v_fwd'] / inhibition
expressions['v_net'] = expressions['v_fwd'] - expressions['v_bwd']
def get_qssa_rate_expression(self):
a = self.reactants.inhibitor.symbol
k = self.parameters.k_inhibition.symbol
return a/k
def update_qssa_rate_expression(self):
return None
def get_full_rate_expression(self):
raise NotImplementedError
def calculate_rate_constants(self):
raise NotImplementedError
|
# -*- coding: utf-8 -*-
"""
.. module:: skimpy
:platform: Unix, Windows
:synopsis: Simple Kinetic Models in Python
.. moduleauthor:: SKiMPy team
[---------]
Copyright 2017 Laboratory of Computational Systems Biotechnology (LCSB),
Ecole Polytechnique Federale de Lausanne (EPFL), Switzerland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from sympy import sympify
from ..utils.general import check_is_symbol
from ..mechanisms.mechanism import KineticMechanism
from ..core.itemsets import make_parameter_set, make_reactant_set
from ..utils.namespace import *
class ExpressionModifier(object):
"""
This class describes a modifier to an expression, like a boundary condition
or constraint.
For example, changing a rate to a constant (boundary condition), or linking
it to another variable of the model (constraint).
It accepts as an argument a modifier.
A modifier is a function which will look at all your expressions, and
apply its transformation to them. As a result, its arguments have to be a
TabDict of expressions, such as KinModel.ODEFun.expressions
"""
prefix = 'MOD'
def __init__(self, name, modifier = None):
self._name = name
if modifier is not None:
self._modifier = modifier
def __call__(self,expressions):
self.modifier(expressions)
@property
def modifier(self):
return self._modifier
def link(self,model):
"""
Link the modifier to a model, to gain awareness of the inner/outer
variables
:param model:
:return:
"""
self.model = model
@property
def name(self):
return self.prefix +'_' + self._name
@name.setter
def name(self, value):
if value.startswith(self.prefix):
value = value[len(self.prefix):]
self._name = value
class BoundaryCondition(ExpressionModifier):
"""
We differentiate boundary conditions as modifiers that define the boundaries
of the observed system.
"""
prefix = 'BC'
def __init__(self, name, modifier = None):
ExpressionModifier.__init__(self, name, modifier)
class ConstantConcentration(BoundaryCondition):
"""
"""
prefix = 'CC'
def __init__(self, reactant, name = None):
# Is the reactant constant it is not a variable anymore
if name is None:
name = reactant.name
BoundaryCondition.__init__(self, name = name)
# Modify the reactant
reactant.type = PARAMETER
self.reactant = reactant
def modifier(self, expressions):
"""
Set the rate to 0
:param expressions:
:return:
"""
expressions[self.reactant.symbol] = expressions[self.reactant.symbol] * 0.0
def __del__(self):
self.reactant.type = VARIABLE
class AdditiveConcentrationRate(ExpressionModifier):
"""
Add a concentration rate term to your rate expression
"""
# FIXME Please give us an alternate name we _REALLY_ don't like it
prefix = 'ADDCR'
def __init__(self, reactant, flux_value, name=None):
if name is None:
name = reactant.__str__()
ExpressionModifier.__init__(self, name=name)
self.reactant = reactant
self.flux_value = flux_value
def modifier(self, expressions):
"""
Add to the rate expression
:param expressions:
:return:
"""
sym_value = sympify(self.flux_value)
expressions[self.reactant.symbol] = expressions[self.reactant.symbol] + sym_value
class BoundaryFlux(BoundaryCondition,AdditiveConcentrationRate):
prefix = "BF"
def __init__(self, reactant, flux_value):
# TODO: Find a way to make sure the flux_value does not depend on an
# inner variable
self.check_dependency(flux_value)
AdditiveConcentrationRate.__init__(self, reactant, flux_value)
def check_dependency(self, expression):
# TODO: Implement
pass
"""
Reaction modifiers
"""
class FirstOrderSmallMoleculeModifier(KineticMechanism,ExpressionModifier):
prefix = "HSM"
Reactants = make_reactant_set(__name__, ['small_molecule'])
Parameters = make_parameter_set( __name__,
{ })
parameter_reactant_links = {}
def __init__(self, small_molecule, mechanism_stoichiometry, name=None):
if name is None:
name = small_molecule.__repr__()
reactants = self.Reactants(small_molecule=small_molecule)
parameters = self.Parameters()
KineticMechanism.__init__(self, name, reactants, parameters)
if type(mechanism_stoichiometry) is dict:
self.reactant_stoichiometry = mechanism_stoichiometry
else:
self.reactant_stoichiometry = {'small_molecule':
float(mechanism_stoichiometry)}
def modifier(self, expressions):
"""
change the flux reaction rate expressions
:param expression: {vnet, vfwd, vbwd}
:return:
"""
# First oder modification of the of Keq
# expressions = TabDict([('v_net', rate_expression),
# ('v_fwd', forward_rate_expression),
# ('v_bwd', backward_rate_expression),
# ])
if self.reactant_stoichiometry['small_molecule'] < 0:
expressions['v_fwd'] = expressions['v_fwd']\
* self.get_qssa_rate_expression()**-self.reactant_stoichiometry['small_molecule']
if self.reactant_stoichiometry['small_molecule'] > 0:
expressions['v_bwd'] = expressions['v_bwd'] \
* self.get_qssa_rate_expression()**self.reactant_stoichiometry['small_molecule']
expressions['v_net'] = expressions['v_fwd'] - expressions['v_bwd']
def get_qssa_rate_expression(self):
sm = self.reactants.small_molecule.symbol
return sm
def update_qssa_rate_expression(self):
return None
def get_full_rate_expression(self):
raise NotImplementedError
def calculate_rate_constants(self):
raise NotImplementedError
class DisplacementSmallMoleculeModifier(KineticMechanism,ExpressionModifier):
prefix = "DSM"
Reactants = make_reactant_set(__name__, ['small_molecule',])
Parameters = make_parameter_set( __name__,
{ })
parameter_reactant_links = {}
def __init__(self, small_molecule, mechanism_stoichiometry, name=None):
if name is None:
name = small_molecule.__str__()
reactants = self.Reactants(small_molecule=small_molecule,)
parameters = self.Parameters()
KineticMechanism.__init__(self, name, reactants, parameters)
# TODO Unify between skimpy versions
if type(mechanism_stoichiometry) is dict:
self.reactant_stoichiometry = mechanism_stoichiometry
else:
self.reactant_stoichiometry = {'small_molecule':
float(mechanism_stoichiometry)}
def modifier(self, expressions):
"""
change the flux reaction rate expressions
:param expression: {vnet, vfwd, vbwd}
:return:
"""
# Modification of the of Keq
# expressions = TabDict([('v_net', rate_expression),
# ('v_fwd', forward_rate_expression),
# ('v_bwd', backward_rate_expression),
# ])
expressions['v_bwd'] = expressions['v_bwd'] \
* self.get_qssa_rate_expression()**self.reactant_stoichiometry['small_molecule']
expressions['v_net'] = expressions['v_fwd'] - expressions['v_bwd']
def get_qssa_rate_expression(self):
sm = self.reactants.small_molecule.symbol
return sm
def update_qssa_rate_expression(self):
return None
def get_full_rate_expression(self):
raise NotImplementedError
def calculate_rate_constants(self):
raise NotImplementedError
"""
Activators and inhibitors
"""
class ActivationModifier(KineticMechanism,ExpressionModifier):
prefix = "AM"
Reactants = make_reactant_set(__name__, ['activator',])
Parameters = make_parameter_set(__name__, {'k_activation': [ODE, MCA, QSSA],})
parameter_reactant_links = {'k_activation':'activator'}
def __init__(self, activator, name=None, k_activation=None):
if name is None:
name = activator.__str__()
reactants = self.Reactants(activator=activator,)
parameters = self.Parameters(k_activation=k_activation)
KineticMechanism.__init__(self, name, reactants, parameters)
self.reactant_stoichiometry = {'activator': 0 }
def modifier(self, expressions):
"""
change the flux reaction rate expressions
:param expression: {vnet, vfwd, vbwd}
:return:
"""
# Modification of the of Keq
# expressions = TabDict([('v_net', rate_expression),
# ('v_fwd', forward_rate_expression),
# ('v_bwd', backward_rate_expression),
# ])
activation = 1 + self.get_qssa_rate_expression()
expressions['v_bwd'] = expressions['v_bwd'] * activation
expressions['v_fwd'] = expressions['v_fwd'] * activation
expressions['v_net'] = expressions['v_fwd'] - expressions['v_bwd']
def get_qssa_rate_expression(self):
a = self.reactants.activator.symbol
k = self.parameters.k_activation.symbol
return a/k
def update_qssa_rate_expression(self):
return None
def get_full_rate_expression(self):
raise NotImplementedError
def calculate_rate_constants(self):
raise NotImplementedError
class InhibitionModifier(KineticMechanism,ExpressionModifier):
prefix = "AM"
Reactants = make_reactant_set(__name__, ['inhibitor',])
Parameters = make_parameter_set(__name__, {'k_inhibition': [ODE, MCA, QSSA],})
parameter_reactant_links = {'k_inhibition':'inhibitor'}
def __init__(self, inhibitor, name=None, k_inhibition=None):
if name is None:
name = inhibitor.__str__()
reactants = self.Reactants(inhibitor=inhibitor,)
parameters = self.Parameters(k_inhibition=k_inhibition)
KineticMechanism.__init__(self, name, reactants, parameters)
self.reactant_stoichiometry = {'inhibitor': 0 }
def modifier(self, expressions):
"""
change the flux reaction rate expressions
:param expression: {vnet, vfwd, vbwd}
:return:
"""
# Modification of the of Keq
# expressions = TabDict([('v_net', rate_expression),
# ('v_fwd', forward_rate_expression),
# ('v_bwd', backward_rate_expression),
# ])
inhibition = 1 + self.get_qssa_rate_expression()
expressions['v_bwd'] = expressions['v_bwd'] / inhibition
expressions['v_fwd'] = expressions['v_fwd'] / inhibition
expressions['v_net'] = expressions['v_fwd'] - expressions['v_bwd']
def get_qssa_rate_expression(self):
a = self.reactants.inhibitor.symbol
k = self.parameters.k_inhibition.symbol
return a/k
def update_qssa_rate_expression(self):
return None
def get_full_rate_expression(self):
raise NotImplementedError
def calculate_rate_constants(self):
raise NotImplementedError
|
en
| 0.750933
|
# -*- coding: utf-8 -*- .. module:: skimpy
:platform: Unix, Windows
:synopsis: Simple Kinetic Models in Python
.. moduleauthor:: SKiMPy team
[---------]
Copyright 2017 Laboratory of Computational Systems Biotechnology (LCSB),
Ecole Polytechnique Federale de Lausanne (EPFL), Switzerland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. This class describes a modifier to an expression, like a boundary condition
or constraint.
For example, changing a rate to a constant (boundary condition), or linking
it to another variable of the model (constraint).
It accepts as an argument a modifier.
A modifier is a function which will look at all your expressions, and
apply its transformation to them. As a result, its arguments have to be a
TabDict of expressions, such as KinModel.ODEFun.expressions Link the modifier to a model, to gain awareness of the inner/outer
variables
:param model:
:return: We differentiate boundary conditions as modifiers that define the boundaries
of the observed system. # Is the reactant constant it is not a variable anymore # Modify the reactant Set the rate to 0
:param expressions:
:return: Add a concentration rate term to your rate expression # FIXME Please give us an alternate name we _REALLY_ don't like it Add to the rate expression
:param expressions:
:return: # TODO: Find a way to make sure the flux_value does not depend on an # inner variable # TODO: Implement Reaction modifiers change the flux reaction rate expressions
:param expression: {vnet, vfwd, vbwd}
:return: # First oder modification of the of Keq # expressions = TabDict([('v_net', rate_expression), # ('v_fwd', forward_rate_expression), # ('v_bwd', backward_rate_expression), # ]) # TODO Unify between skimpy versions change the flux reaction rate expressions
:param expression: {vnet, vfwd, vbwd}
:return: # Modification of the of Keq # expressions = TabDict([('v_net', rate_expression), # ('v_fwd', forward_rate_expression), # ('v_bwd', backward_rate_expression), # ]) Activators and inhibitors change the flux reaction rate expressions
:param expression: {vnet, vfwd, vbwd}
:return: # Modification of the of Keq # expressions = TabDict([('v_net', rate_expression), # ('v_fwd', forward_rate_expression), # ('v_bwd', backward_rate_expression), # ]) change the flux reaction rate expressions
:param expression: {vnet, vfwd, vbwd}
:return: # Modification of the of Keq # expressions = TabDict([('v_net', rate_expression), # ('v_fwd', forward_rate_expression), # ('v_bwd', backward_rate_expression), # ])
| 2.821651
| 3
|
release/scripts/addons/amaranth/render/samples_scene.py
|
simileV/blenderStereo29
| 1
|
6628389
|
<reponame>simileV/blenderStereo29
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Cycles: Samples per Scene
When working in production, it's often more convenient to do lighting and
compositing in different scenes (so you can later append the comp scene
to bring together nodes, settings, lamps, RenderLayers).
This would lead to work with more than one scene. When doing render tests
you want to know at a glance how many samples the other scenes have,
without manually switching. This is the idea behind the feature.
Find it on the Sampling panel, on Render properties.
Developed during Caminandes Open Movie Project
"""
import bpy
from amaranth import utils
from bpy.props import (
BoolProperty,
IntProperty,
)
class AMTH_RENDER_OT_cycles_samples_percentage_set(bpy.types.Operator):
"""Save the current number of samples per shader as final (gets saved in .blend)"""
bl_idname = "scene.amaranth_cycles_samples_percentage_set"
bl_label = "Set as Render Samples"
def execute(self, context):
cycles = context.scene.cycles
cycles.use_samples_final = True
context.scene["amth_cycles_samples_final"] = [
cycles.diffuse_samples,
cycles.glossy_samples,
cycles.transmission_samples,
cycles.ao_samples,
cycles.mesh_light_samples,
cycles.subsurface_samples,
cycles.volume_samples]
self.report({"INFO"}, "Render Samples Saved")
return {"FINISHED"}
class AMTH_RENDER_OT_cycles_samples_percentage(bpy.types.Operator):
"""Set a percentage of the final render samples"""
bl_idname = "scene.amaranth_cycles_samples_percentage"
bl_label = "Set Render Samples Percentage"
percent: IntProperty(
name="Percentage",
description="Percentage to divide render samples by",
subtype="PERCENTAGE", default=0
)
def execute(self, context):
percent = self.percent
cycles = context.scene.cycles
cycles_samples_final = context.scene["amth_cycles_samples_final"]
cycles.use_samples_final = False
if percent == 100:
cycles.use_samples_final = True
cycles.diffuse_samples = int((cycles_samples_final[0] / 100) * percent)
cycles.glossy_samples = int((cycles_samples_final[1] / 100) * percent)
cycles.transmission_samples = int(
(cycles_samples_final[2] / 100) * percent)
cycles.ao_samples = int((cycles_samples_final[3] / 100) * percent)
cycles.mesh_light_samples = int(
(cycles_samples_final[4] / 100) * percent)
cycles.subsurface_samples = int(
(cycles_samples_final[5] / 100) * percent)
cycles.volume_samples = int((cycles_samples_final[6] / 100) * percent)
return {"FINISHED"}
def render_cycles_scene_samples(self, context):
layout = self.layout
scene = context.scene
render = scene.render
if utils.cycles_exists():
cscene = scene.cycles
list_sampling = scene.amaranth_cycles_list_sampling
# Set Render Samples
if utils.cycles_exists() and cscene.progressive == "BRANCHED_PATH":
layout.separator()
split = layout.split()
col = split.column()
col.operator(
AMTH_RENDER_OT_cycles_samples_percentage_set.bl_idname,
text="%s" %
"Set as Render Samples" if cscene.use_samples_final else "Set New Render Samples",
icon="%s" %
"PINNED" if cscene.use_samples_final else "UNPINNED")
col = split.column()
row = col.row(align=True)
row.enabled = True if scene.get("amth_cycles_samples_final") else False
row.operator(
AMTH_RENDER_OT_cycles_samples_percentage.bl_idname,
text="100%").percent = 100
row.operator(
AMTH_RENDER_OT_cycles_samples_percentage.bl_idname,
text="75%").percent = 75
row.operator(
AMTH_RENDER_OT_cycles_samples_percentage.bl_idname,
text="50%").percent = 50
row.operator(
AMTH_RENDER_OT_cycles_samples_percentage.bl_idname,
text="25%").percent = 25
# List Samples
#if (len(scene.render.layers) > 1) or (len(bpy.data.scenes) > 1):
if (len(scene.render.views) > 1) or (len(bpy.data.scenes) > 1):
box = layout.box()
row = box.row(align=True)
col = row.column(align=True)
row = col.row(align=True)
row.alignment = "LEFT"
row.prop(scene, "amaranth_cycles_list_sampling",
icon="%s" % "TRIA_DOWN" if list_sampling else "TRIA_RIGHT",
emboss=False)
if list_sampling:
#if len(scene.render.layers) == 1 and render.layers[0].samples == 0:
if len(scene.render.views) == 1 and render.view_layers[0].samples == 0:
pass
else:
col.separator()
#col.label(text="RenderLayers:", icon="RENDERLAYERS")
col.label(text="View Layers:", icon="RENDERLAYERS")
#for rl in scene.render.layers:
for rl in scene.view_layers:
row = col.row(align=True)
row.label(text=rl.name, icon="BLANK1")
row.prop(
rl, "samples", text="%s" %
"Samples" if rl.samples > 0 else "Automatic (%s)" %
(cscene.aa_samples if cscene.progressive == "BRANCHED_PATH" else cscene.samples))
if (len(bpy.data.scenes) > 1):
col.separator()
col.label(text="Scenes:", icon="SCENE_DATA")
if utils.cycles_exists() and cscene.progressive == "PATH":
for s in bpy.data.scenes:
if s != scene:
row = col.row(align=True)
if s.render.engine == "CYCLES":
cscene = s.cycles
#row.label(s.name)
row.label(text=s.name)
row.prop(cscene, "samples", icon="BLANK1")
else:
row.label(
text="Scene: '%s' is not using Cycles" %
s.name)
else:
for s in bpy.data.scenes:
if s != scene:
row = col.row(align=True)
if s.render.engine == "CYCLES":
cscene = s.cycles
row.label(text=s.name, icon="BLANK1")
row.prop(cscene, "aa_samples",
text="AA Samples")
else:
row.label(
text="Scene: '%s' is not using Cycles" %
s.name)
def init():
scene = bpy.types.Scene
if utils.cycles_exists():
scene.amaranth_cycles_list_sampling = bpy.props.BoolProperty(
default=False,
name="Samples Per:")
# Note: add versioning code to adress changes introduced in 2.79.1
if bpy.app.version >= (2, 79, 1):
from cycles import properties as _cycles_props
_cycles_props.CyclesRenderSettings.use_samples_final = BoolProperty(
name="Use Final Render Samples",
description="Use current shader samples as final render samples",
default=False
)
else:
bpy.types.CyclesRenderSettings.use_samples_final = BoolProperty(
name="Use Final Render Samples",
description="Use current shader samples as final render samples",
default=False
)
def clear():
wm = bpy.context.window_manager
for p in ("amarath_cycles_list_sampling", "use_samples_final"):
if p in wm:
del wm[p]
def register():
init()
bpy.utils.register_class(AMTH_RENDER_OT_cycles_samples_percentage)
bpy.utils.register_class(AMTH_RENDER_OT_cycles_samples_percentage_set)
if utils.cycles_exists():
if bpy.app.version >= (2, 79, 1):
bpy.types.CYCLES_RENDER_PT_sampling.append(render_cycles_scene_samples)
else:
bpy.types.CyclesRender_PT_sampling.append(render_cycles_scene_samples)
def unregister():
bpy.utils.unregister_class(AMTH_RENDER_OT_cycles_samples_percentage)
bpy.utils.unregister_class(AMTH_RENDER_OT_cycles_samples_percentage_set)
if utils.cycles_exists():
if bpy.app.version >= (2, 79, 1):
bpy.types.CYCLES_RENDER_PT_sampling.remove(render_cycles_scene_samples)
else:
bpy.types.CyclesRender_PT_sampling.remove(render_cycles_scene_samples)
clear()
|
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Cycles: Samples per Scene
When working in production, it's often more convenient to do lighting and
compositing in different scenes (so you can later append the comp scene
to bring together nodes, settings, lamps, RenderLayers).
This would lead to work with more than one scene. When doing render tests
you want to know at a glance how many samples the other scenes have,
without manually switching. This is the idea behind the feature.
Find it on the Sampling panel, on Render properties.
Developed during Caminandes Open Movie Project
"""
import bpy
from amaranth import utils
from bpy.props import (
BoolProperty,
IntProperty,
)
class AMTH_RENDER_OT_cycles_samples_percentage_set(bpy.types.Operator):
"""Save the current number of samples per shader as final (gets saved in .blend)"""
bl_idname = "scene.amaranth_cycles_samples_percentage_set"
bl_label = "Set as Render Samples"
def execute(self, context):
cycles = context.scene.cycles
cycles.use_samples_final = True
context.scene["amth_cycles_samples_final"] = [
cycles.diffuse_samples,
cycles.glossy_samples,
cycles.transmission_samples,
cycles.ao_samples,
cycles.mesh_light_samples,
cycles.subsurface_samples,
cycles.volume_samples]
self.report({"INFO"}, "Render Samples Saved")
return {"FINISHED"}
class AMTH_RENDER_OT_cycles_samples_percentage(bpy.types.Operator):
"""Set a percentage of the final render samples"""
bl_idname = "scene.amaranth_cycles_samples_percentage"
bl_label = "Set Render Samples Percentage"
percent: IntProperty(
name="Percentage",
description="Percentage to divide render samples by",
subtype="PERCENTAGE", default=0
)
def execute(self, context):
percent = self.percent
cycles = context.scene.cycles
cycles_samples_final = context.scene["amth_cycles_samples_final"]
cycles.use_samples_final = False
if percent == 100:
cycles.use_samples_final = True
cycles.diffuse_samples = int((cycles_samples_final[0] / 100) * percent)
cycles.glossy_samples = int((cycles_samples_final[1] / 100) * percent)
cycles.transmission_samples = int(
(cycles_samples_final[2] / 100) * percent)
cycles.ao_samples = int((cycles_samples_final[3] / 100) * percent)
cycles.mesh_light_samples = int(
(cycles_samples_final[4] / 100) * percent)
cycles.subsurface_samples = int(
(cycles_samples_final[5] / 100) * percent)
cycles.volume_samples = int((cycles_samples_final[6] / 100) * percent)
return {"FINISHED"}
def render_cycles_scene_samples(self, context):
layout = self.layout
scene = context.scene
render = scene.render
if utils.cycles_exists():
cscene = scene.cycles
list_sampling = scene.amaranth_cycles_list_sampling
# Set Render Samples
if utils.cycles_exists() and cscene.progressive == "BRANCHED_PATH":
layout.separator()
split = layout.split()
col = split.column()
col.operator(
AMTH_RENDER_OT_cycles_samples_percentage_set.bl_idname,
text="%s" %
"Set as Render Samples" if cscene.use_samples_final else "Set New Render Samples",
icon="%s" %
"PINNED" if cscene.use_samples_final else "UNPINNED")
col = split.column()
row = col.row(align=True)
row.enabled = True if scene.get("amth_cycles_samples_final") else False
row.operator(
AMTH_RENDER_OT_cycles_samples_percentage.bl_idname,
text="100%").percent = 100
row.operator(
AMTH_RENDER_OT_cycles_samples_percentage.bl_idname,
text="75%").percent = 75
row.operator(
AMTH_RENDER_OT_cycles_samples_percentage.bl_idname,
text="50%").percent = 50
row.operator(
AMTH_RENDER_OT_cycles_samples_percentage.bl_idname,
text="25%").percent = 25
# List Samples
#if (len(scene.render.layers) > 1) or (len(bpy.data.scenes) > 1):
if (len(scene.render.views) > 1) or (len(bpy.data.scenes) > 1):
box = layout.box()
row = box.row(align=True)
col = row.column(align=True)
row = col.row(align=True)
row.alignment = "LEFT"
row.prop(scene, "amaranth_cycles_list_sampling",
icon="%s" % "TRIA_DOWN" if list_sampling else "TRIA_RIGHT",
emboss=False)
if list_sampling:
#if len(scene.render.layers) == 1 and render.layers[0].samples == 0:
if len(scene.render.views) == 1 and render.view_layers[0].samples == 0:
pass
else:
col.separator()
#col.label(text="RenderLayers:", icon="RENDERLAYERS")
col.label(text="View Layers:", icon="RENDERLAYERS")
#for rl in scene.render.layers:
for rl in scene.view_layers:
row = col.row(align=True)
row.label(text=rl.name, icon="BLANK1")
row.prop(
rl, "samples", text="%s" %
"Samples" if rl.samples > 0 else "Automatic (%s)" %
(cscene.aa_samples if cscene.progressive == "BRANCHED_PATH" else cscene.samples))
if (len(bpy.data.scenes) > 1):
col.separator()
col.label(text="Scenes:", icon="SCENE_DATA")
if utils.cycles_exists() and cscene.progressive == "PATH":
for s in bpy.data.scenes:
if s != scene:
row = col.row(align=True)
if s.render.engine == "CYCLES":
cscene = s.cycles
#row.label(s.name)
row.label(text=s.name)
row.prop(cscene, "samples", icon="BLANK1")
else:
row.label(
text="Scene: '%s' is not using Cycles" %
s.name)
else:
for s in bpy.data.scenes:
if s != scene:
row = col.row(align=True)
if s.render.engine == "CYCLES":
cscene = s.cycles
row.label(text=s.name, icon="BLANK1")
row.prop(cscene, "aa_samples",
text="AA Samples")
else:
row.label(
text="Scene: '%s' is not using Cycles" %
s.name)
def init():
scene = bpy.types.Scene
if utils.cycles_exists():
scene.amaranth_cycles_list_sampling = bpy.props.BoolProperty(
default=False,
name="Samples Per:")
# Note: add versioning code to adress changes introduced in 2.79.1
if bpy.app.version >= (2, 79, 1):
from cycles import properties as _cycles_props
_cycles_props.CyclesRenderSettings.use_samples_final = BoolProperty(
name="Use Final Render Samples",
description="Use current shader samples as final render samples",
default=False
)
else:
bpy.types.CyclesRenderSettings.use_samples_final = BoolProperty(
name="Use Final Render Samples",
description="Use current shader samples as final render samples",
default=False
)
def clear():
wm = bpy.context.window_manager
for p in ("amarath_cycles_list_sampling", "use_samples_final"):
if p in wm:
del wm[p]
def register():
init()
bpy.utils.register_class(AMTH_RENDER_OT_cycles_samples_percentage)
bpy.utils.register_class(AMTH_RENDER_OT_cycles_samples_percentage_set)
if utils.cycles_exists():
if bpy.app.version >= (2, 79, 1):
bpy.types.CYCLES_RENDER_PT_sampling.append(render_cycles_scene_samples)
else:
bpy.types.CyclesRender_PT_sampling.append(render_cycles_scene_samples)
def unregister():
bpy.utils.unregister_class(AMTH_RENDER_OT_cycles_samples_percentage)
bpy.utils.unregister_class(AMTH_RENDER_OT_cycles_samples_percentage_set)
if utils.cycles_exists():
if bpy.app.version >= (2, 79, 1):
bpy.types.CYCLES_RENDER_PT_sampling.remove(render_cycles_scene_samples)
else:
bpy.types.CyclesRender_PT_sampling.remove(render_cycles_scene_samples)
clear()
|
en
| 0.839831
|
# This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Cycles: Samples per Scene When working in production, it's often more convenient to do lighting and compositing in different scenes (so you can later append the comp scene to bring together nodes, settings, lamps, RenderLayers). This would lead to work with more than one scene. When doing render tests you want to know at a glance how many samples the other scenes have, without manually switching. This is the idea behind the feature. Find it on the Sampling panel, on Render properties. Developed during Caminandes Open Movie Project Save the current number of samples per shader as final (gets saved in .blend) Set a percentage of the final render samples # Set Render Samples # List Samples #if (len(scene.render.layers) > 1) or (len(bpy.data.scenes) > 1): #if len(scene.render.layers) == 1 and render.layers[0].samples == 0: #col.label(text="RenderLayers:", icon="RENDERLAYERS") #for rl in scene.render.layers: #row.label(s.name) # Note: add versioning code to adress changes introduced in 2.79.1
| 1.984185
| 2
|
Nested_Lists.py
|
richapatil/Hackerrank-python
| 1
|
6628390
|
<reponame>richapatil/Hackerrank-python
if __name__ == '__main__':
students=[] #Creating a list for storing Students with their marks
for _ in range(int(input())):
name = input() #taking names of student
score = float(input()) #take score of each student
students.append((name,score)) #appending the name and score of students one by one
second_lowest=sorted(list(set([x[1] for x in students])))[1] #A
second_students=sorted([s for s,g in students if g==second_lowest]) #B
for s in second_students: #For printing the second student
print(s)
#Step by step explaination of A and # B
# A : second_lowest=sorted(list(set([x[1] for x in students])))[1]
# [x[1] for x in students] - In this the x[1] is accesinh the score element from the student list
# set - Set is used to remove duplicate and during this process list is converted into Set
# List - It helps to remove set into List
# Sorting - It sort the element in accesing order
# the outer [1] - As we need to acess the second lowest score and taht is present in 1st position so we wrote [1]
# B : second_students=sorted([s for s,g in students if g==second_lowest])
# s ==> student, g==> score
# s for s,g in students - this line let us select the name in the tuples. So you select s for s,g ==> you select name for name,score
# if g == second_lowest - this line select only the name that their score are equal to the second lowest. if g == second_lowest ==> if score match the second lowest score.
# The method (sorted) sorts the name in alphabetical order.
|
if __name__ == '__main__':
students=[] #Creating a list for storing Students with their marks
for _ in range(int(input())):
name = input() #taking names of student
score = float(input()) #take score of each student
students.append((name,score)) #appending the name and score of students one by one
second_lowest=sorted(list(set([x[1] for x in students])))[1] #A
second_students=sorted([s for s,g in students if g==second_lowest]) #B
for s in second_students: #For printing the second student
print(s)
#Step by step explaination of A and # B
# A : second_lowest=sorted(list(set([x[1] for x in students])))[1]
# [x[1] for x in students] - In this the x[1] is accesinh the score element from the student list
# set - Set is used to remove duplicate and during this process list is converted into Set
# List - It helps to remove set into List
# Sorting - It sort the element in accesing order
# the outer [1] - As we need to acess the second lowest score and taht is present in 1st position so we wrote [1]
# B : second_students=sorted([s for s,g in students if g==second_lowest])
# s ==> student, g==> score
# s for s,g in students - this line let us select the name in the tuples. So you select s for s,g ==> you select name for name,score
# if g == second_lowest - this line select only the name that their score are equal to the second lowest. if g == second_lowest ==> if score match the second lowest score.
# The method (sorted) sorts the name in alphabetical order.
|
en
| 0.881504
|
#Creating a list for storing Students with their marks #taking names of student #take score of each student #appending the name and score of students one by one #A #B #For printing the second student #Step by step explaination of A and # B # A : second_lowest=sorted(list(set([x[1] for x in students])))[1] # [x[1] for x in students] - In this the x[1] is accesinh the score element from the student list # set - Set is used to remove duplicate and during this process list is converted into Set # List - It helps to remove set into List # Sorting - It sort the element in accesing order # the outer [1] - As we need to acess the second lowest score and taht is present in 1st position so we wrote [1] # B : second_students=sorted([s for s,g in students if g==second_lowest]) # s ==> student, g==> score # s for s,g in students - this line let us select the name in the tuples. So you select s for s,g ==> you select name for name,score # if g == second_lowest - this line select only the name that their score are equal to the second lowest. if g == second_lowest ==> if score match the second lowest score. # The method (sorted) sorts the name in alphabetical order.
| 4.292332
| 4
|
thingsboard_gateway/connectors/mqtt/json_mqtt_uplink_converter.py
|
DavideBorsatti/thingsboard-gateway
| 0
|
6628391
|
<reponame>DavideBorsatti/thingsboard-gateway
# Copyright 2022. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from re import search
from time import time
from simplejson import dumps
from thingsboard_gateway.connectors.mqtt.mqtt_uplink_converter import MqttUplinkConverter, log
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
class JsonMqttUplinkConverter(MqttUplinkConverter):
def __init__(self, config):
self.__config = config.get('converter')
def convert(self, config, data):
datatypes = {"attributes": "attributes",
"timeseries": "telemetry"}
dict_result = {"deviceName": None, "deviceType": None, "attributes": [], "telemetry": []}
try:
if self.__config.get("deviceNameJsonExpression") is not None:
device_name_tags = TBUtility.get_values(self.__config.get("deviceNameJsonExpression"), data,
get_tag=True)
device_name_values = TBUtility.get_values(self.__config.get("deviceNameJsonExpression"), data,
expression_instead_none=True)
dict_result['deviceName'] = self.__config.get("deviceNameJsonExpression")
for (device_name_tag, device_name_value) in zip(device_name_tags, device_name_values):
is_valid_key = "${" in self.__config.get("deviceNameJsonExpression") and "}" in \
self.__config.get("deviceNameJsonExpression")
dict_result['deviceName'] = dict_result['deviceName'].replace('${' + str(device_name_tag) + '}',
str(device_name_value)) \
if is_valid_key else device_name_tag
elif self.__config.get("deviceNameTopicExpression") is not None:
search_result = search(self.__config["deviceNameTopicExpression"], config)
if search_result is not None:
dict_result["deviceName"] = search_result.group(0)
else:
log.debug(
"Regular expression result is None. deviceNameTopicExpression parameter will be interpreted as a deviceName\n Topic: %s\nRegex: %s",
config, self.__config.get("deviceNameTopicExpression"))
dict_result["deviceName"] = self.__config.get("deviceNameTopicExpression")
else:
log.error("The expression for looking \"deviceName\" not found in config %s", dumps(self.__config))
if self.__config.get("deviceTypeJsonExpression") is not None:
device_type_tags = TBUtility.get_values(self.__config.get("deviceTypeJsonExpression"), data,
get_tag=True)
device_type_values = TBUtility.get_values(self.__config.get("deviceTypeJsonExpression"), data,
expression_instead_none=True)
dict_result["deviceType"] = self.__config.get("deviceTypeJsonExpression")
for (device_type_tag, device_type_value) in zip(device_type_tags, device_type_values):
is_valid_key = "${" in self.__config.get("deviceTypeJsonExpression") and "}" in \
self.__config.get("deviceTypeJsonExpression")
dict_result["deviceType"] = dict_result["deviceType"].replace('${' + str(device_type_tag) + '}',
str(device_type_value)) \
if is_valid_key else device_type_tag
elif self.__config.get("deviceTypeTopicExpression") is not None:
search_result = search(self.__config["deviceTypeTopicExpression"], config)
if search_result is not None:
dict_result["deviceType"] = search_result.group(0)
else:
log.debug(
"Regular expression result is None. deviceTypeTopicExpression will be interpreted as a deviceType\n Topic: %s\nRegex: %s",
config,
self.__config.get("deviceTypeTopicExpression"))
dict_result["deviceType"] = self.__config.get("deviceTypeTopicExpression")
else:
log.error("The expression for looking \"deviceType\" not found in config %s", dumps(self.__config))
except Exception as e:
log.error('Error in converter, for config: \n%s\n and message: \n%s\n', dumps(self.__config), data)
log.exception(e)
try:
for datatype in datatypes:
dict_result[datatypes[datatype]] = []
for datatype_config in self.__config.get(datatype, []):
values = TBUtility.get_values(datatype_config["value"], data, datatype_config["type"],
expression_instead_none=True)
values_tags = TBUtility.get_values(datatype_config["value"], data, datatype_config["type"],
get_tag=True)
keys = TBUtility.get_values(datatype_config["key"], data, datatype_config["type"],
expression_instead_none=True)
keys_tags = TBUtility.get_values(datatype_config["key"], data, get_tag=True)
full_key = datatype_config["key"]
for (key, key_tag) in zip(keys, keys_tags):
is_valid_key = "${" in datatype_config["key"] and "}" in \
datatype_config["key"]
full_key = full_key.replace('${' + str(key_tag) + '}',
str(key)) if is_valid_key else key_tag
full_value = datatype_config["value"]
for (value, value_tag) in zip(values, values_tags):
is_valid_value = "${" in datatype_config["value"] and "}" in \
datatype_config["value"]
full_value = full_value.replace('${' + str(value_tag) + '}',
str(value)) if is_valid_value else str(value)
if datatype == 'timeseries' and (
data.get("ts") is not None or data.get("timestamp") is not None):
dict_result[datatypes[datatype]].append(
{"ts": data.get('ts', data.get('timestamp', int(time()))),
'values': {full_key: full_value}})
else:
dict_result[datatypes[datatype]].append({full_key: full_value})
except Exception as e:
log.error('Error in converter, for config: \n%s\n and message: \n%s\n', dumps(self.__config), str(data))
log.exception(e)
return dict_result
|
# Copyright 2022. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from re import search
from time import time
from simplejson import dumps
from thingsboard_gateway.connectors.mqtt.mqtt_uplink_converter import MqttUplinkConverter, log
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
class JsonMqttUplinkConverter(MqttUplinkConverter):
def __init__(self, config):
self.__config = config.get('converter')
def convert(self, config, data):
datatypes = {"attributes": "attributes",
"timeseries": "telemetry"}
dict_result = {"deviceName": None, "deviceType": None, "attributes": [], "telemetry": []}
try:
if self.__config.get("deviceNameJsonExpression") is not None:
device_name_tags = TBUtility.get_values(self.__config.get("deviceNameJsonExpression"), data,
get_tag=True)
device_name_values = TBUtility.get_values(self.__config.get("deviceNameJsonExpression"), data,
expression_instead_none=True)
dict_result['deviceName'] = self.__config.get("deviceNameJsonExpression")
for (device_name_tag, device_name_value) in zip(device_name_tags, device_name_values):
is_valid_key = "${" in self.__config.get("deviceNameJsonExpression") and "}" in \
self.__config.get("deviceNameJsonExpression")
dict_result['deviceName'] = dict_result['deviceName'].replace('${' + str(device_name_tag) + '}',
str(device_name_value)) \
if is_valid_key else device_name_tag
elif self.__config.get("deviceNameTopicExpression") is not None:
search_result = search(self.__config["deviceNameTopicExpression"], config)
if search_result is not None:
dict_result["deviceName"] = search_result.group(0)
else:
log.debug(
"Regular expression result is None. deviceNameTopicExpression parameter will be interpreted as a deviceName\n Topic: %s\nRegex: %s",
config, self.__config.get("deviceNameTopicExpression"))
dict_result["deviceName"] = self.__config.get("deviceNameTopicExpression")
else:
log.error("The expression for looking \"deviceName\" not found in config %s", dumps(self.__config))
if self.__config.get("deviceTypeJsonExpression") is not None:
device_type_tags = TBUtility.get_values(self.__config.get("deviceTypeJsonExpression"), data,
get_tag=True)
device_type_values = TBUtility.get_values(self.__config.get("deviceTypeJsonExpression"), data,
expression_instead_none=True)
dict_result["deviceType"] = self.__config.get("deviceTypeJsonExpression")
for (device_type_tag, device_type_value) in zip(device_type_tags, device_type_values):
is_valid_key = "${" in self.__config.get("deviceTypeJsonExpression") and "}" in \
self.__config.get("deviceTypeJsonExpression")
dict_result["deviceType"] = dict_result["deviceType"].replace('${' + str(device_type_tag) + '}',
str(device_type_value)) \
if is_valid_key else device_type_tag
elif self.__config.get("deviceTypeTopicExpression") is not None:
search_result = search(self.__config["deviceTypeTopicExpression"], config)
if search_result is not None:
dict_result["deviceType"] = search_result.group(0)
else:
log.debug(
"Regular expression result is None. deviceTypeTopicExpression will be interpreted as a deviceType\n Topic: %s\nRegex: %s",
config,
self.__config.get("deviceTypeTopicExpression"))
dict_result["deviceType"] = self.__config.get("deviceTypeTopicExpression")
else:
log.error("The expression for looking \"deviceType\" not found in config %s", dumps(self.__config))
except Exception as e:
log.error('Error in converter, for config: \n%s\n and message: \n%s\n', dumps(self.__config), data)
log.exception(e)
try:
for datatype in datatypes:
dict_result[datatypes[datatype]] = []
for datatype_config in self.__config.get(datatype, []):
values = TBUtility.get_values(datatype_config["value"], data, datatype_config["type"],
expression_instead_none=True)
values_tags = TBUtility.get_values(datatype_config["value"], data, datatype_config["type"],
get_tag=True)
keys = TBUtility.get_values(datatype_config["key"], data, datatype_config["type"],
expression_instead_none=True)
keys_tags = TBUtility.get_values(datatype_config["key"], data, get_tag=True)
full_key = datatype_config["key"]
for (key, key_tag) in zip(keys, keys_tags):
is_valid_key = "${" in datatype_config["key"] and "}" in \
datatype_config["key"]
full_key = full_key.replace('${' + str(key_tag) + '}',
str(key)) if is_valid_key else key_tag
full_value = datatype_config["value"]
for (value, value_tag) in zip(values, values_tags):
is_valid_value = "${" in datatype_config["value"] and "}" in \
datatype_config["value"]
full_value = full_value.replace('${' + str(value_tag) + '}',
str(value)) if is_valid_value else str(value)
if datatype == 'timeseries' and (
data.get("ts") is not None or data.get("timestamp") is not None):
dict_result[datatypes[datatype]].append(
{"ts": data.get('ts', data.get('timestamp', int(time()))),
'values': {full_key: full_value}})
else:
dict_result[datatypes[datatype]].append({full_key: full_value})
except Exception as e:
log.error('Error in converter, for config: \n%s\n and message: \n%s\n', dumps(self.__config), str(data))
log.exception(e)
return dict_result
|
en
| 0.842097
|
# Copyright 2022. ThingsBoard # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
| 1.903666
| 2
|
export_histogram.py
|
greplova/MachineLearning
| 1
|
6628392
|
<filename>export_histogram.py
import tensorflow as tf
import numpy as np
import glob, os
# This is the path where the model is saved
# it can be a relative path, if script is in the same folder that contain the model data
inpath = 'sparse_model_batches_noisy/'
#####################################
#First we export the evaluation data#
#####################################
# First we create a list to save the steps with data
steps_list_eval = []
# First loop is over all the event files in the path
for event_file in glob.glob(inpath+'events*'):
# Then we loop over all the events in the event file
for e in tf.train.summary_iterator(event_file):
# Then we loop over each value stored for each event
for v in e.summary.value:
# Now if the value is the histogram_eval then
if v.tag == 'histogram_eval':
# we append the step number to the list
steps_list_eval.append(e.step)
# We open a files for writing
f = open('histogram_data_files_noisy/histogram_eval_'+str(e.step)+'.dat', 'w')
# Loop over all buckets in the histogram
for n in range(len(v.histo.bucket)-1):
# Write the histogram values to the file
f.write(str(v.histo.bucket_limit[n])+', '+str(v.histo.bucket[n])+'\n')
# Remeber to always close the file
f.close()
# Write a file with all the step numbers
f = open('histogram_data_files_noisy/histogram_eval_steps.dat', 'w')
for n in range(len(steps_list_eval)):
f.write(str(steps_list_eval[n])+'\n')
f.close()
#############################
#Now we export training data#
#############################
# First we create the step list
steps_list_train = []
# Now we do the same loops
# The training summaries is saved in a different path, so we add 'histogram_summary/'
for event_file in glob.glob(inpath+'histogram_summary/events*'):
for e in tf.train.summary_iterator(event_file):
for v in e.summary.value:
if v.tag == 'histogram_summary':
# Appending the step number
steps_list_train.append(e.step)
# Opening file for writing
f = open('histogram_data_files_noisy/histogram_training_'+str(e.step)+'.dat', 'w')
for n in range(len(v.histo.bucket)-1):
f.write(str(v.histo.bucket_limit[n])+', '+str(v.histo.bucket[n])+'\n')
# Remeber to always close the file
f.close()
# Write a file with all the step numbers
f = open('histogram_data_files_noisy/histogram_training_steps.dat', 'w')
for n in range(len(steps_list_train)):
f.write(str(steps_list_train[n])+'\n')
f.close()
|
<filename>export_histogram.py
import tensorflow as tf
import numpy as np
import glob, os
# This is the path where the model is saved
# it can be a relative path, if script is in the same folder that contain the model data
inpath = 'sparse_model_batches_noisy/'
#####################################
#First we export the evaluation data#
#####################################
# First we create a list to save the steps with data
steps_list_eval = []
# First loop is over all the event files in the path
for event_file in glob.glob(inpath+'events*'):
# Then we loop over all the events in the event file
for e in tf.train.summary_iterator(event_file):
# Then we loop over each value stored for each event
for v in e.summary.value:
# Now if the value is the histogram_eval then
if v.tag == 'histogram_eval':
# we append the step number to the list
steps_list_eval.append(e.step)
# We open a files for writing
f = open('histogram_data_files_noisy/histogram_eval_'+str(e.step)+'.dat', 'w')
# Loop over all buckets in the histogram
for n in range(len(v.histo.bucket)-1):
# Write the histogram values to the file
f.write(str(v.histo.bucket_limit[n])+', '+str(v.histo.bucket[n])+'\n')
# Remeber to always close the file
f.close()
# Write a file with all the step numbers
f = open('histogram_data_files_noisy/histogram_eval_steps.dat', 'w')
for n in range(len(steps_list_eval)):
f.write(str(steps_list_eval[n])+'\n')
f.close()
#############################
#Now we export training data#
#############################
# First we create the step list
steps_list_train = []
# Now we do the same loops
# The training summaries is saved in a different path, so we add 'histogram_summary/'
for event_file in glob.glob(inpath+'histogram_summary/events*'):
for e in tf.train.summary_iterator(event_file):
for v in e.summary.value:
if v.tag == 'histogram_summary':
# Appending the step number
steps_list_train.append(e.step)
# Opening file for writing
f = open('histogram_data_files_noisy/histogram_training_'+str(e.step)+'.dat', 'w')
for n in range(len(v.histo.bucket)-1):
f.write(str(v.histo.bucket_limit[n])+', '+str(v.histo.bucket[n])+'\n')
# Remeber to always close the file
f.close()
# Write a file with all the step numbers
f = open('histogram_data_files_noisy/histogram_training_steps.dat', 'w')
for n in range(len(steps_list_train)):
f.write(str(steps_list_train[n])+'\n')
f.close()
|
en
| 0.708237
|
# This is the path where the model is saved # it can be a relative path, if script is in the same folder that contain the model data ##################################### #First we export the evaluation data# ##################################### # First we create a list to save the steps with data # First loop is over all the event files in the path # Then we loop over all the events in the event file # Then we loop over each value stored for each event # Now if the value is the histogram_eval then # we append the step number to the list # We open a files for writing # Loop over all buckets in the histogram # Write the histogram values to the file # Remeber to always close the file # Write a file with all the step numbers ############################# #Now we export training data# ############################# # First we create the step list # Now we do the same loops # The training summaries is saved in a different path, so we add 'histogram_summary/' # Appending the step number # Opening file for writing # Remeber to always close the file # Write a file with all the step numbers
| 2.530294
| 3
|
Lib/site-packages/wx-2.8-msw-unicode/wx/lib/agw/labelbook.py
|
ekkipermana/robotframework-test
| 11
|
6628393
|
# --------------------------------------------------------------------------- #
# LABELBOOK And FLATIMAGEBOOK Widgets wxPython IMPLEMENTATION
#
# Original C++ Code From Eran, embedded in the FlatMenu source code
#
#
# License: wxWidgets license
#
#
# Python Code By:
#
# <NAME>, @ 03 Nov 2006
# Latest Revision: 17 Jan 2011, 15.00 GMT
#
#
# For All Kind Of Problems, Requests Of Enhancements And Bug Reports, Please
# Write To Me At:
#
# <EMAIL>
# <EMAIL>
#
# Or, Obviously, To The wxPython Mailing List!!!
#
# TODO:
# LabelBook - Support IMB_SHOW_ONLY_IMAGES
# LabelBook - An option for the draw border to only draw the border
# between the controls and the pages so the background
# colour can flow into the window background
#
#
#
# End Of Comments
# --------------------------------------------------------------------------- #
"""
LabelBook and FlatImageBook are a quasi-full generic and owner-drawn
implementations of `wx.Notebook`.
Description
===========
LabelBook and FlatImageBook are a quasi-full implementations of the `wx.Notebook`,
and designed to be a drop-in replacement for `wx.Notebook`. The API functions are
similar so one can expect the function to behave in the same way.
LabelBook anf FlatImageBook share their appearance with `wx.Toolbook` and
`wx.Listbook`, while having more options for custom drawings, label positioning,
mouse pointing and so on. Moreover, they retain also some visual characteristics
of the Outlook address book.
Some features:
- They are generic controls;
- Supports for left, right, top (FlatImageBook only), bottom (FlatImageBook
only) book styles;
- Possibility to draw images only, text only or both (FlatImageBook only);
- Support for a "pin-button", that allows the user to shrink/expand the book
tab area;
- Shadows behind tabs (LabelBook only);
- Gradient shading of the tab area (LabelBook only);
- Web-like mouse pointing on tabs style (LabelBook only);
- Many customizable colours (tab area, active tab text, tab borders, active
tab, highlight) - LabelBook only.
And much more. See the demo for a quasi-complete review of all the functionalities
of LabelBook and FlatImageBook.
Supported Platforms
===================
LabelBook and FlatImageBook have been tested on the following platforms:
* Windows (Windows XP);
* Linux Ubuntu (Dapper 6.06)
Window Styles
=============
This class supports the following window styles:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for `FlatImageBook`.
``INB_LEFT`` 0x2 Place labels on the left side. Available only for `FlatImageBook`.
``INB_RIGHT`` 0x4 Place labels on the right side.
``INB_TOP`` 0x8 Place labels above the page area.
``INB_BORDER`` 0x10 Draws a border around `LabelBook` or `FlatImageBook`.
``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for `LabelBook`.
``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for `LabelBook`.
``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control.
``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for `LabelBook`.
``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control.
``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for `LabelBook`.
``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for `LabelBook`.
``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area.
``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs.
=========================== =========== ==================================================
Events Processing
=================
This class processes the following events:
=================================== ==================================================
Event Name Description
=================================== ==================================================
``EVT_IMAGENOTEBOOK_PAGE_CHANGED`` Notify client objects when the active page in `ImageNotebook` has changed.
``EVT_IMAGENOTEBOOK_PAGE_CHANGING`` Notify client objects when the active page in `ImageNotebook` is about to change.
``EVT_IMAGENOTEBOOK_PAGE_CLOSED`` Notify client objects when a page in `ImageNotebook` has been closed.
``EVT_IMAGENOTEBOOK_PAGE_CLOSING`` Notify client objects when a page in `ImageNotebook` is closing.
=================================== ==================================================
License And Version
===================
LabelBook and FlatImageBook are distributed under the wxPython license.
Latest Revision: <NAME> @ 17 Jan 2011, 15.00 GMT
Version 0.5.
"""
__docformat__ = "epytext"
#----------------------------------------------------------------------
# Beginning Of IMAGENOTEBOOK wxPython Code
#----------------------------------------------------------------------
import wx
from artmanager import ArtManager, DCSaver
from fmresources import *
# Check for the new method in 2.7 (not present in 2.6.3.3)
if wx.VERSION_STRING < "2.7":
wx.Rect.Contains = lambda self, point: wx.Rect.Inside(self, point)
# FlatImageBook and LabelBook styles
INB_BOTTOM = 1
""" Place labels below the page area. Available only for `FlatImageBook`."""
INB_LEFT = 2
""" Place labels on the left side. Available only for `FlatImageBook`."""
INB_RIGHT = 4
""" Place labels on the right side. """
INB_TOP = 8
""" Place labels above the page area. """
INB_BORDER = 16
""" Draws a border around `LabelBook` or `FlatImageBook`. """
INB_SHOW_ONLY_TEXT = 32
""" Shows only text labels and no images. Available only for `LabelBook`."""
INB_SHOW_ONLY_IMAGES = 64
""" Shows only tab images and no label texts. Available only for `LabelBook`."""
INB_FIT_BUTTON = 128
""" Displays a pin button to show/hide the book control. """
INB_DRAW_SHADOW = 256
""" Draw shadows below the book tabs. Available only for `LabelBook`."""
INB_USE_PIN_BUTTON = 512
""" Displays a pin button to show/hide the book control. """
INB_GRADIENT_BACKGROUND = 1024
""" Draws a gradient shading on the tabs background. Available only for `LabelBook`."""
INB_WEB_HILITE = 2048
""" On mouse hovering, tabs behave like html hyperlinks. Available only for `LabelBook`."""
INB_NO_RESIZE = 4096
""" Don't allow resizing of the tab area. """
INB_FIT_LABELTEXT = 8192
""" Will fit the tab area to the longest text (or text+image if you have images) in all the tabs. """
wxEVT_IMAGENOTEBOOK_PAGE_CHANGED = wx.wxEVT_COMMAND_NOTEBOOK_PAGE_CHANGED
wxEVT_IMAGENOTEBOOK_PAGE_CHANGING = wx.wxEVT_COMMAND_NOTEBOOK_PAGE_CHANGING
wxEVT_IMAGENOTEBOOK_PAGE_CLOSING = wx.NewEventType()
wxEVT_IMAGENOTEBOOK_PAGE_CLOSED = wx.NewEventType()
#-----------------------------------#
# ImageNotebookEvent
#-----------------------------------#
EVT_IMAGENOTEBOOK_PAGE_CHANGED = wx.EVT_NOTEBOOK_PAGE_CHANGED
""" Notify client objects when the active page in `ImageNotebook` has changed. """
EVT_IMAGENOTEBOOK_PAGE_CHANGING = wx.EVT_NOTEBOOK_PAGE_CHANGING
""" Notify client objects when the active page in `ImageNotebook` is about to change. """
EVT_IMAGENOTEBOOK_PAGE_CLOSING = wx.PyEventBinder(wxEVT_IMAGENOTEBOOK_PAGE_CLOSING, 1)
""" Notify client objects when a page in `ImageNotebook` is closing. """
EVT_IMAGENOTEBOOK_PAGE_CLOSED = wx.PyEventBinder(wxEVT_IMAGENOTEBOOK_PAGE_CLOSED, 1)
""" Notify client objects when a page in `ImageNotebook` has been closed. """
# ---------------------------------------------------------------------------- #
# Class ImageNotebookEvent
# ---------------------------------------------------------------------------- #
class ImageNotebookEvent(wx.PyCommandEvent):
"""
This events will be sent when a ``EVT_IMAGENOTEBOOK_PAGE_CHANGED``,
``EVT_IMAGENOTEBOOK_PAGE_CHANGING``, ``EVT_IMAGENOTEBOOK_PAGE_CLOSING``,
``EVT_IMAGENOTEBOOK_PAGE_CLOSED`` is mapped in the parent.
"""
def __init__(self, eventType, eventId=1, sel=-1, oldsel=-1):
"""
Default class constructor.
:param `eventType`: the event type;
:param `eventId`: the event identifier;
:param `sel`: the current selection;
:param `oldsel`: the old selection.
"""
wx.PyCommandEvent.__init__(self, eventType, eventId)
self._eventType = eventType
self._sel = sel
self._oldsel = oldsel
self._allowed = True
def SetSelection(self, s):
"""
Sets the event selection.
:param `s`: an integer specifying the new selection.
"""
self._sel = s
def SetOldSelection(self, s):
"""
Sets the event old selection.
:param `s`: an integer specifying the old selection.
"""
self._oldsel = s
def GetSelection(self):
""" Returns the event selection. """
return self._sel
def GetOldSelection(self):
""" Returns the old event selection. """
return self._oldsel
def Veto(self):
"""
Prevents the change announced by this event from happening.
:note: It is in general a good idea to notify the user about the reasons
for vetoing the change because otherwise the applications behaviour (which
just refuses to do what the user wants) might be quite surprising.
"""
self._allowed = False
def Allow(self):
"""
This is the opposite of L{Veto}: it explicitly allows the event to be processed.
For most events it is not necessary to call this method as the events are
allowed anyhow but some are forbidden by default (this will be mentioned
in the corresponding event description).
"""
self._allowed = True
def IsAllowed(self):
"""
Returns ``True`` if the change is allowed (L{Veto} hasn't been called) or
``False`` otherwise (if it was).
"""
return self._allowed
# ---------------------------------------------------------------------------- #
# Class ImageInfo
# ---------------------------------------------------------------------------- #
class ImageInfo(object):
"""
This class holds all the information (caption, image, etc...) belonging to a
single tab in L{LabelBook}.
"""
def __init__(self, strCaption="", imageIndex=-1):
"""
Default class constructor.
:param `strCaption`: the tab caption;
:param `imageIndex`: the tab image index based on the assigned (set)
`wx.ImageList` (if any).
"""
self._pos = wx.Point()
self._size = wx.Size()
self._strCaption = strCaption
self._ImageIndex = imageIndex
self._captionRect = wx.Rect()
def SetCaption(self, value):
"""
Sets the tab caption.
:param `value`: the new tab caption.
"""
self._strCaption = value
def GetCaption(self):
""" Returns the tab caption. """
return self._strCaption
def SetPosition(self, value):
"""
Sets the tab position.
:param `value`: the new tab position, an instance of `wx.Point`.
"""
self._pos = value
def GetPosition(self):
""" Returns the tab position. """
return self._pos
def SetSize(self, value):
"""
Sets the tab size.
:param `value`: the new tab size, an instance of `wx.Size`.
"""
self._size = value
def GetSize(self):
""" Returns the tab size. """
return self._size
def SetImageIndex(self, value):
"""
Sets the tab image index.
:param `value`: an index into the image list..
"""
self._ImageIndex = value
def GetImageIndex(self):
""" Returns the tab image index. """
return self._ImageIndex
def SetTextRect(self, rect):
"""
Sets the client rectangle available for the tab text.
:param `rect`: the tab text client rectangle, an instance of `wx.Rect`.
"""
self._captionRect = rect
def GetTextRect(self):
""" Returns the client rectangle available for the tab text. """
return self._captionRect
# ---------------------------------------------------------------------------- #
# Class ImageContainerBase
# ---------------------------------------------------------------------------- #
class ImageContainerBase(wx.Panel):
"""
Base class for L{FlatImageBook} image container.
"""
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=0, agwStyle=0, name="ImageContainerBase"):
"""
Default class constructor.
:param `parent`: parent window. Must not be ``None``;
:param `id`: window identifier. A value of -1 indicates a default value;
:param `pos`: the control position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:param `size`: the control size. A value of (-1, -1) indicates a default size,
chosen by either the windowing system or wxPython, depending on platform;
:param `style`: the underlying `wx.Panel` window style;
:param `agwStyle`: the AGW-specific window style. This can be a combination of the
following bits:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}.
``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}.
``INB_RIGHT`` 0x4 Place labels on the right side.
``INB_TOP`` 0x8 Place labels above the page area.
``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}.
``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}.
``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}.
``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control.
``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}.
``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control.
``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}.
``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}.
``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area.
``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs.
=========================== =========== ==================================================
:param `name`: the window name.
"""
self._nIndex = -1
self._nImgSize = 16
self._ImageList = None
self._nHoeveredImgIdx = -1
self._bCollapsed = False
self._tabAreaSize = (-1, -1)
self._nPinButtonStatus = INB_PIN_NONE
self._pagesInfoVec = []
self._pinBtnRect = wx.Rect()
wx.Panel.__init__(self, parent, id, pos, size, style | wx.NO_BORDER | wx.NO_FULL_REPAINT_ON_RESIZE, name)
def HasAGWFlag(self, flag):
"""
Tests for existance of flag in the style.
:param `flag`: a window style. This can be a combination of the following bits:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}.
``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}.
``INB_RIGHT`` 0x4 Place labels on the right side.
``INB_TOP`` 0x8 Place labels above the page area.
``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}.
``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}.
``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}.
``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control.
``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}.
``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control.
``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}.
``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}.
``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area.
``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs.
=========================== =========== ==================================================
"""
style = self.GetParent().GetAGWWindowStyleFlag()
res = (style & flag and [True] or [False])[0]
return res
def ClearFlag(self, flag):
"""
Removes flag from the style.
:param `flag`: a window style flag.
:see: L{HasAGWFlag} for a list of possible window style flags.
"""
parent = self.GetParent()
agwStyle = parent.GetAGWWindowStyleFlag()
agwStyle &= ~(flag)
parent.SetAGWWindowStyleFlag(agwStyle)
def AssignImageList(self, imglist):
"""
Assigns an image list to the L{ImageContainerBase}.
:param `imglist`: an instance of `wx.ImageList`.
"""
if imglist and imglist.GetImageCount() != 0:
self._nImgSize = imglist.GetBitmap(0).GetHeight()
self._ImageList = imglist
parent = self.GetParent()
agwStyle = parent.GetAGWWindowStyleFlag()
parent.SetAGWWindowStyleFlag(agwStyle)
def GetImageList(self):
""" Return the image list for L{ImageContainerBase}. """
return self._ImageList
def GetImageSize(self):
""" Returns the image size inside the L{ImageContainerBase} image list. """
return self._nImgSize
def FixTextSize(self, dc, text, maxWidth):
"""
Fixes the text, to fit `maxWidth` value. If the text length exceeds
`maxWidth` value this function truncates it and appends two dots at
the end. ("Long Long Long Text" might become "Long Long...").
:param `dc`: an instance of `wx.DC`;
:param `text`: the text to fix/truncate;
:param `maxWidth`: the maximum allowed width for the text, in pixels.
"""
return ArtManager.Get().TruncateText(dc, text, maxWidth)
def CanDoBottomStyle(self):
"""
Allows the parent to examine the children type. Some implementation
(such as L{LabelBook}), does not support top/bottom images, only left/right.
"""
return False
def AddPage(self, caption, selected=False, imgIdx=-1):
"""
Adds a page to the container.
:param `caption`: specifies the text for the new tab;
:param `selected`: specifies whether the page should be selected;
:param `imgIdx`: specifies the optional image index for the new tab.
"""
self._pagesInfoVec.append(ImageInfo(caption, imgIdx))
if selected or len(self._pagesInfoVec) == 1:
self._nIndex = len(self._pagesInfoVec)-1
self.Refresh()
def InsertPage(self, page_idx, caption, selected=False, imgIdx=-1):
"""
Inserts a page into the container at the specified position.
:param `page_idx`: specifies the position for the new tab;
:param `caption`: specifies the text for the new tab;
:param `selected`: specifies whether the page should be selected;
:param `imgIdx`: specifies the optional image index for the new tab.
"""
self._pagesInfoVec.insert(page_idx, ImageInfo(caption, imgIdx))
if selected or len(self._pagesInfoVec) == 1:
self._nIndex = len(self._pagesInfoVec)-1
self.Refresh()
def SetPageImage(self, page, imgIdx):
"""
Sets the image for the given page.
:param `page`: the index of the tab;
:param `imgIdx`: specifies the optional image index for the tab.
"""
imgInfo = self._pagesInfoVec[page]
imgInfo.SetImageIndex(imgIdx)
def SetPageText(self, page, text):
"""
Sets the tab caption for the given page.
:param `page`: the index of the tab;
:param `text`: the new tab caption.
"""
imgInfo = self._pagesInfoVec[page]
imgInfo.SetCaption(text)
def GetPageImage(self, page):
"""
Returns the image index for the given page.
:param `page`: the index of the tab.
"""
imgInfo = self._pagesInfoVec[page]
return imgInfo.GetImageIndex()
def GetPageText(self, page):
"""
Returns the tab caption for the given page.
:param `page`: the index of the tab.
"""
imgInfo = self._pagesInfoVec[page]
return imgInfo.GetCaption()
def ClearAll(self):
""" Deletes all the pages in the container. """
self._pagesInfoVec = []
self._nIndex = wx.NOT_FOUND
def DoDeletePage(self, page):
"""
Does the actual page deletion.
:param `page`: the index of the tab.
"""
# Remove the page from the vector
book = self.GetParent()
self._pagesInfoVec.pop(page)
if self._nIndex >= page:
self._nIndex = self._nIndex - 1
# The delete page was the last first on the array,
# but the book still has more pages, so we set the
# active page to be the first one (0)
if self._nIndex < 0 and len(self._pagesInfoVec) > 0:
self._nIndex = 0
# Refresh the tabs
if self._nIndex >= 0:
book._bForceSelection = True
book.SetSelection(self._nIndex)
book._bForceSelection = False
if not self._pagesInfoVec:
# Erase the page container drawings
dc = wx.ClientDC(self)
dc.Clear()
def OnSize(self, event):
"""
Handles the ``wx.EVT_SIZE`` event for L{ImageContainerBase}.
:param `event`: a `wx.SizeEvent` event to be processed.
"""
self.Refresh() # Call on paint
event.Skip()
def OnEraseBackground(self, event):
"""
Handles the ``wx.EVT_ERASE_BACKGROUND`` event for L{ImageContainerBase}.
:param `event`: a `wx.EraseEvent` event to be processed.
:note: This method is intentionally empty to reduce flicker.
"""
pass
def HitTest(self, pt):
"""
Returns the index of the tab at the specified position or ``wx.NOT_FOUND``
if ``None``, plus the flag style of L{HitTest}.
:param `pt`: an instance of `wx.Point`, to test for hits.
:return: The index of the tab at the specified position plus the hit test
flag, which can be one of the following bits:
====================== ======= ================================
HitTest Flags Value Description
====================== ======= ================================
``IMG_OVER_IMG`` 0 The mouse is over the tab icon
``IMG_OVER_PIN`` 1 The mouse is over the pin button
``IMG_OVER_EW_BORDER`` 2 The mouse is over the east-west book border
``IMG_NONE`` 3 Nowhere
====================== ======= ================================
"""
style = self.GetParent().GetAGWWindowStyleFlag()
if style & INB_USE_PIN_BUTTON:
if self._pinBtnRect.Contains(pt):
return -1, IMG_OVER_PIN
for i in xrange(len(self._pagesInfoVec)):
if self._pagesInfoVec[i].GetPosition() == wx.Point(-1, -1):
break
# For Web Hover style, we test the TextRect
if not self.HasAGWFlag(INB_WEB_HILITE):
buttonRect = wx.RectPS(self._pagesInfoVec[i].GetPosition(), self._pagesInfoVec[i].GetSize())
else:
buttonRect = self._pagesInfoVec[i].GetTextRect()
if buttonRect.Contains(pt):
return i, IMG_OVER_IMG
if self.PointOnSash(pt):
return -1, IMG_OVER_EW_BORDER
else:
return -1, IMG_NONE
def PointOnSash(self, pt):
"""
Tests whether pt is located on the sash.
:param `pt`: an instance of `wx.Point`, to test for hits.
"""
# Check if we are on a the sash border
cltRect = self.GetClientRect()
if self.HasAGWFlag(INB_LEFT) or self.HasAGWFlag(INB_TOP):
if pt.x > cltRect.x + cltRect.width - 4:
return True
else:
if pt.x < 4:
return True
return False
def OnMouseLeftDown(self, event):
"""
Handles the ``wx.EVT_LEFT_DOWN`` event for L{ImageContainerBase}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
newSelection = -1
event.Skip()
# Support for collapse/expand
style = self.GetParent().GetAGWWindowStyleFlag()
if style & INB_USE_PIN_BUTTON:
if self._pinBtnRect.Contains(event.GetPosition()):
self._nPinButtonStatus = INB_PIN_PRESSED
dc = wx.ClientDC(self)
self.DrawPin(dc, self._pinBtnRect, not self._bCollapsed)
return
# Incase panel is collapsed, there is nothing
# to check
if self._bCollapsed:
return
tabIdx, where = self.HitTest(event.GetPosition())
if where == IMG_OVER_IMG:
self._nHoeveredImgIdx = -1
if tabIdx == -1:
return
self.GetParent().SetSelection(tabIdx)
def OnMouseLeaveWindow(self, event):
"""
Handles the ``wx.EVT_LEAVE_WINDOW`` event for L{ImageContainerBase}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
bRepaint = self._nHoeveredImgIdx != -1
self._nHoeveredImgIdx = -1
# Make sure the pin button status is NONE
# incase we were in pin button style
style = self.GetParent().GetAGWWindowStyleFlag()
if style & INB_USE_PIN_BUTTON:
self._nPinButtonStatus = INB_PIN_NONE
dc = wx.ClientDC(self)
self.DrawPin(dc, self._pinBtnRect, not self._bCollapsed)
# Restore cursor
wx.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
if bRepaint:
self.Refresh()
def OnMouseLeftUp(self, event):
"""
Handles the ``wx.EVT_LEFT_UP`` event for L{ImageContainerBase}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
style = self.GetParent().GetAGWWindowStyleFlag()
if style & INB_USE_PIN_BUTTON:
bIsLabelContainer = not self.CanDoBottomStyle()
if self._pinBtnRect.Contains(event.GetPosition()):
self._nPinButtonStatus = INB_PIN_NONE
self._bCollapsed = not self._bCollapsed
if self._bCollapsed:
# Save the current tab area width
self._tabAreaSize = self.GetSize()
if bIsLabelContainer:
self.SetSizeHints(20, self._tabAreaSize.y)
else:
if style & INB_BOTTOM or style & INB_TOP:
self.SetSizeHints(self._tabAreaSize.x, 20)
else:
self.SetSizeHints(20, self._tabAreaSize.y)
else:
if bIsLabelContainer:
self.SetSizeHints(self._tabAreaSize.x, -1)
else:
# Restore the tab area size
if style & INB_BOTTOM or style & INB_TOP:
self.SetSizeHints(-1, self._tabAreaSize.y)
else:
self.SetSizeHints(self._tabAreaSize.x, -1)
self.GetParent().GetSizer().Layout()
self.Refresh()
return
def OnMouseMove(self, event):
"""
Handles the ``wx.EVT_MOTION`` event for L{ImageContainerBase}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
style = self.GetParent().GetAGWWindowStyleFlag()
if style & INB_USE_PIN_BUTTON:
# Check to see if we are in the pin button rect
if not self._pinBtnRect.Contains(event.GetPosition()) and self._nPinButtonStatus == INB_PIN_PRESSED:
self._nPinButtonStatus = INB_PIN_NONE
dc = wx.ClientDC(self)
self.DrawPin(dc, self._pinBtnRect, not self._bCollapsed)
imgIdx, where = self.HitTest(event.GetPosition())
self._nHoeveredImgIdx = imgIdx
if not self._bCollapsed:
if self._nHoeveredImgIdx >= 0 and self._nHoeveredImgIdx < len(self._pagesInfoVec):
# Change the cursor to be Hand
if self.HasAGWFlag(INB_WEB_HILITE) and self._nHoeveredImgIdx != self._nIndex:
wx.SetCursor(wx.StockCursor(wx.CURSOR_HAND))
else:
# Restore the cursor only if we have the Web hover style set,
# and we are not currently hovering the sash
if self.HasAGWFlag(INB_WEB_HILITE) and not self.PointOnSash(event.GetPosition()):
wx.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
# Dont display hover effect when hoevering the
# selected label
if self._nHoeveredImgIdx == self._nIndex:
self._nHoeveredImgIdx = -1
self.Refresh()
def DrawPin(self, dc, rect, downPin):
"""
Draw a pin button, that allows collapsing of the image panel.
:param `dc`: an instance of `wx.DC`;
:param `rect`: the pin button client rectangle;
:param `downPin`: ``True`` if the pin button is facing downwards, ``False``
if it is facing leftwards.
"""
# Set the bitmap according to the button status
if downPin:
pinBmp = wx.BitmapFromXPMData(pin_down_xpm)
else:
pinBmp = wx.BitmapFromXPMData(pin_left_xpm)
xx = rect.x + 2
if self._nPinButtonStatus in [INB_PIN_HOVER, INB_PIN_NONE]:
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetPen(wx.BLACK_PEN)
dc.DrawRectangle(xx, rect.y, 16, 16)
# Draw upper and left border with grey colour
dc.SetPen(wx.WHITE_PEN)
dc.DrawLine(xx, rect.y, xx + 16, rect.y)
dc.DrawLine(xx, rect.y, xx, rect.y + 16)
elif self._nPinButtonStatus == INB_PIN_PRESSED:
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetPen(wx.Pen(wx.NamedColour("LIGHT GREY")))
dc.DrawRectangle(xx, rect.y, 16, 16)
# Draw upper and left border with grey colour
dc.SetPen(wx.BLACK_PEN)
dc.DrawLine(xx, rect.y, xx + 16, rect.y)
dc.DrawLine(xx, rect.y, xx, rect.y + 16)
# Set the masking
pinBmp.SetMask(wx.Mask(pinBmp, wx.WHITE))
# Draw the new bitmap
dc.DrawBitmap(pinBmp, xx, rect.y, True)
# Save the pin rect
self._pinBtnRect = rect
# ---------------------------------------------------------------------------- #
# Class ImageContainer
# ---------------------------------------------------------------------------- #
class ImageContainer(ImageContainerBase):
"""
Base class for L{FlatImageBook} image container.
"""
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=0, agwStyle=0, name="ImageContainer"):
"""
Default class constructor.
:param `parent`: parent window. Must not be ``None``;
:param `id`: window identifier. A value of -1 indicates a default value;
:param `pos`: the control position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:param `size`: the control size. A value of (-1, -1) indicates a default size,
chosen by either the windowing system or wxPython, depending on platform;
:param `style`: the underlying `wx.Panel` window style;
:param `agwStyle`: the AGW-specific window style. This can be a combination of the
following bits:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}.
``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}.
``INB_RIGHT`` 0x4 Place labels on the right side.
``INB_TOP`` 0x8 Place labels above the page area.
``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}.
``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}.
``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}.
``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control.
``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}.
``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control.
``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}.
``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}.
``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area.
``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs.
=========================== =========== ==================================================
:param `name`: the window name.
"""
ImageContainerBase.__init__(self, parent, id, pos, size, style, agwStyle, name)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_LEFT_DOWN, self.OnMouseLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnMouseLeftUp)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnMouseLeaveWindow)
def OnSize(self, event):
"""
Handles the ``wx.EVT_SIZE`` event for L{ImageContainer}.
:param `event`: a `wx.SizeEvent` event to be processed.
"""
ImageContainerBase.OnSize(self, event)
event.Skip()
def OnMouseLeftDown(self, event):
"""
Handles the ``wx.EVT_LEFT_DOWN`` event for L{ImageContainer}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
ImageContainerBase.OnMouseLeftDown(self, event)
event.Skip()
def OnMouseLeftUp(self, event):
"""
Handles the ``wx.EVT_LEFT_UP`` event for L{ImageContainer}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
ImageContainerBase.OnMouseLeftUp(self, event)
event.Skip()
def OnEraseBackground(self, event):
"""
Handles the ``wx.EVT_ERASE_BACKGROUND`` event for L{ImageContainer}.
:param `event`: a `wx.EraseEvent` event to be processed.
"""
ImageContainerBase.OnEraseBackground(self, event)
def OnMouseMove(self, event):
"""
Handles the ``wx.EVT_MOTION`` event for L{ImageContainer}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
ImageContainerBase.OnMouseMove(self, event)
event.Skip()
def OnMouseLeaveWindow(self, event):
"""
Handles the ``wx.EVT_LEAVE_WINDOW`` event for L{ImageContainer}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
ImageContainerBase.OnMouseLeaveWindow(self, event)
event.Skip()
def CanDoBottomStyle(self):
"""
Allows the parent to examine the children type. Some implementation
(such as L{LabelBook}), does not support top/bottom images, only left/right.
"""
return True
def OnPaint(self, event):
"""
Handles the ``wx.EVT_PAINT`` event for L{ImageContainer}.
:param `event`: a `wx.PaintEvent` event to be processed.
"""
dc = wx.BufferedPaintDC(self)
style = self.GetParent().GetAGWWindowStyleFlag()
backBrush = wx.WHITE_BRUSH
if style & INB_BORDER:
borderPen = wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DSHADOW))
else:
borderPen = wx.TRANSPARENT_PEN
size = self.GetSize()
# Background
dc.SetBrush(backBrush)
borderPen.SetWidth(1)
dc.SetPen(borderPen)
dc.DrawRectangle(0, 0, size.x, size.y)
bUsePin = (style & INB_USE_PIN_BUTTON and [True] or [False])[0]
if bUsePin:
# Draw the pin button
clientRect = self.GetClientRect()
pinRect = wx.Rect(clientRect.GetX() + clientRect.GetWidth() - 20, 2, 20, 20)
self.DrawPin(dc, pinRect, not self._bCollapsed)
if self._bCollapsed:
return
borderPen = wx.BLACK_PEN
borderPen.SetWidth(1)
dc.SetPen(borderPen)
dc.DrawLine(0, size.y, size.x, size.y)
dc.DrawPoint(0, size.y)
clientSize = 0
bUseYcoord = (style & INB_RIGHT or style & INB_LEFT)
if bUseYcoord:
clientSize = size.GetHeight()
else:
clientSize = size.GetWidth()
# We reserver 20 pixels for the 'pin' button
# The drawing of the images start position. This is
# depenedent of the style, especially when Pin button
# style is requested
if bUsePin:
if style & INB_TOP or style & INB_BOTTOM:
pos = (style & INB_BORDER and [0] or [1])[0]
else:
pos = (style & INB_BORDER and [20] or [21])[0]
else:
pos = (style & INB_BORDER and [0] or [1])[0]
nPadding = 4 # Pad text with 2 pixels on the left and right
nTextPaddingLeft = 2
count = 0
for i in xrange(len(self._pagesInfoVec)):
count = count + 1
# incase the 'fit button' style is applied, we set the rectangle width to the
# text width plus padding
# Incase the style IS applied, but the style is either LEFT or RIGHT
# we ignore it
normalFont = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
dc.SetFont(normalFont)
textWidth, textHeight = dc.GetTextExtent(self._pagesInfoVec[i].GetCaption())
# Restore font to be normal
normalFont.SetWeight(wx.FONTWEIGHT_NORMAL)
dc.SetFont(normalFont)
# Default values for the surronounding rectangle
# around a button
rectWidth = self._nImgSize * 2 # To avoid the recangle to 'touch' the borders
rectHeight = self._nImgSize * 2
# Incase the style requires non-fixed button (fit to text)
# recalc the rectangle width
if style & INB_FIT_BUTTON and \
not ((style & INB_LEFT) or (style & INB_RIGHT)) and \
not self._pagesInfoVec[i].GetCaption() == "" and \
not (style & INB_SHOW_ONLY_IMAGES):
rectWidth = ((textWidth + nPadding * 2) > rectWidth and [nPadding * 2 + textWidth] or [rectWidth])[0]
# Make the width an even number
if rectWidth % 2 != 0:
rectWidth += 1
# Check that we have enough space to draw the button
# If Pin button is used, consider its space as well (applicable for top/botton style)
# since in the left/right, its size is already considered in 'pos'
pinBtnSize = (bUsePin and [20] or [0])[0]
if pos + rectWidth + pinBtnSize > clientSize:
break
# Calculate the button rectangle
modRectWidth = ((style & INB_LEFT or style & INB_RIGHT) and [rectWidth - 2] or [rectWidth])[0]
modRectHeight = ((style & INB_LEFT or style & INB_RIGHT) and [rectHeight] or [rectHeight - 2])[0]
if bUseYcoord:
buttonRect = wx.Rect(1, pos, modRectWidth, modRectHeight)
else:
buttonRect = wx.Rect(pos , 1, modRectWidth, modRectHeight)
# Check if we need to draw a rectangle around the button
if self._nIndex == i:
# Set the colours
penColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION)
brushColour = ArtManager.Get().LightColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION), 75)
dc.SetPen(wx.Pen(penColour))
dc.SetBrush(wx.Brush(brushColour))
# Fix the surrounding of the rect if border is set
if style & INB_BORDER:
if style & INB_TOP or style & INB_BOTTOM:
buttonRect = wx.Rect(buttonRect.x + 1, buttonRect.y, buttonRect.width - 1, buttonRect.height)
else:
buttonRect = wx.Rect(buttonRect.x, buttonRect.y + 1, buttonRect.width, buttonRect.height - 1)
dc.DrawRectangleRect(buttonRect)
if self._nHoeveredImgIdx == i:
# Set the colours
penColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION)
brushColour = ArtManager.Get().LightColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION), 90)
dc.SetPen(wx.Pen(penColour))
dc.SetBrush(wx.Brush(brushColour))
# Fix the surrounding of the rect if border is set
if style & INB_BORDER:
if style & INB_TOP or style & INB_BOTTOM:
buttonRect = wx.Rect(buttonRect.x + 1, buttonRect.y, buttonRect.width - 1, buttonRect.height)
else:
buttonRect = wx.Rect(buttonRect.x, buttonRect.y + 1, buttonRect.width, buttonRect.height - 1)
dc.DrawRectangleRect(buttonRect)
if bUseYcoord:
rect = wx.Rect(0, pos, rectWidth, rectWidth)
else:
rect = wx.Rect(pos, 0, rectWidth, rectWidth)
# Incase user set both flags:
# INB_SHOW_ONLY_TEXT and INB_SHOW_ONLY_IMAGES
# We override them to display both
if style & INB_SHOW_ONLY_TEXT and style & INB_SHOW_ONLY_IMAGES:
style ^= INB_SHOW_ONLY_TEXT
style ^= INB_SHOW_ONLY_IMAGES
self.GetParent().SetAGWWindowStyleFlag(style)
# Draw the caption and text
imgTopPadding = 10
if not style & INB_SHOW_ONLY_TEXT and self._pagesInfoVec[i].GetImageIndex() != -1:
if bUseYcoord:
imgXcoord = self._nImgSize / 2
imgYcoord = (style & INB_SHOW_ONLY_IMAGES and [pos + self._nImgSize / 2] or [pos + imgTopPadding])[0]
else:
imgXcoord = pos + (rectWidth / 2) - (self._nImgSize / 2)
imgYcoord = (style & INB_SHOW_ONLY_IMAGES and [self._nImgSize / 2] or [imgTopPadding])[0]
self._ImageList.Draw(self._pagesInfoVec[i].GetImageIndex(), dc,
imgXcoord, imgYcoord,
wx.IMAGELIST_DRAW_TRANSPARENT, True)
# Draw the text
if not style & INB_SHOW_ONLY_IMAGES and not self._pagesInfoVec[i].GetCaption() == "":
dc.SetFont(normalFont)
# Check if the text can fit the size of the rectangle,
# if not truncate it
fixedText = self._pagesInfoVec[i].GetCaption()
if not style & INB_FIT_BUTTON or (style & INB_LEFT or (style & INB_RIGHT)):
fixedText = self.FixTextSize(dc, self._pagesInfoVec[i].GetCaption(), self._nImgSize *2 - 4)
# Update the length of the text
textWidth, textHeight = dc.GetTextExtent(fixedText)
if bUseYcoord:
textOffsetX = ((rectWidth - textWidth) / 2 )
textOffsetY = (not style & INB_SHOW_ONLY_TEXT and [pos + self._nImgSize + imgTopPadding + 3] or \
[pos + ((self._nImgSize * 2 - textHeight) / 2 )])[0]
else:
textOffsetX = (rectWidth - textWidth) / 2 + pos + nTextPaddingLeft
textOffsetY = (not style & INB_SHOW_ONLY_TEXT and [self._nImgSize + imgTopPadding + 3] or \
[((self._nImgSize * 2 - textHeight) / 2 )])[0]
dc.SetTextForeground(wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOWTEXT))
dc.DrawText(fixedText, textOffsetX, textOffsetY)
# Update the page info
self._pagesInfoVec[i].SetPosition(buttonRect.GetPosition())
self._pagesInfoVec[i].SetSize(buttonRect.GetSize())
pos += rectWidth
# Update all buttons that can not fit into the screen as non-visible
for ii in xrange(count, len(self._pagesInfoVec)):
self._pagesInfoVec[ii].SetPosition(wx.Point(-1, -1))
# Draw the pin button
if bUsePin:
clientRect = self.GetClientRect()
pinRect = wx.Rect(clientRect.GetX() + clientRect.GetWidth() - 20, 2, 20, 20)
self.DrawPin(dc, pinRect, not self._bCollapsed)
# ---------------------------------------------------------------------------- #
# Class LabelContainer
# ---------------------------------------------------------------------------- #
class LabelContainer(ImageContainerBase):
""" Base class for L{LabelBook}. """
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=0, agwStyle=0, name="LabelContainer"):
"""
Default class constructor.
:param `parent`: parent window. Must not be ``None``;
:param `id`: window identifier. A value of -1 indicates a default value;
:param `pos`: the control position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:param `size`: the control size. A value of (-1, -1) indicates a default size,
chosen by either the windowing system or wxPython, depending on platform;
:param `style`: the underlying `wx.Panel` window style;
:param `agwStyle`: the AGW-specific window style. This can be a combination of the
following bits:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}.
``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}.
``INB_RIGHT`` 0x4 Place labels on the right side.
``INB_TOP`` 0x8 Place labels above the page area.
``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}.
``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}.
``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}.
``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control.
``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}.
``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control.
``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}.
``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}.
``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area.
``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs.
=========================== =========== ==================================================
:param `name`: the window name.
"""
ImageContainerBase.__init__(self, parent, id, pos, size, style, agwStyle, name)
self._nTabAreaWidth = 100
self._oldCursor = wx.NullCursor
self._coloursMap = {}
self._skin = wx.NullBitmap
self._sashRect = wx.Rect()
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_LEFT_DOWN, self.OnMouseLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnMouseLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnMouseLeaveWindow)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
def OnSize(self, event):
"""
Handles the ``wx.EVT_SIZE`` event for L{LabelContainer}.
:param `event`: a `wx.SizeEvent` event to be processed.
"""
ImageContainerBase.OnSize(self, event)
event.Skip()
def OnEraseBackground(self, event):
"""
Handles the ``wx.EVT_ERASE_BACKGROUND`` event for L{LabelContainer}.
:param `event`: a `wx.EraseEvent` event to be processed.
"""
ImageContainerBase.OnEraseBackground(self, event)
def GetTabAreaWidth(self):
""" Returns the width of the tab area. """
return self._nTabAreaWidth
def SetTabAreaWidth(self, width):
"""
Sets the width of the tab area.
:param `width`: the width of the tab area, in pixels.
"""
self._nTabAreaWidth = width
def CanDoBottomStyle(self):
"""
Allows the parent to examine the children type. Some implementation
(such as L{LabelBook}), does not support top/bottom images, only left/right.
"""
return False
def SetBackgroundBitmap(self, bmp):
"""
Sets the background bitmap for the control.
:param `bmp`: a valid `wx.Bitmap` object.
"""
self._skin = bmp
def OnPaint(self, event):
"""
Handles the ``wx.EVT_PAINT`` event for L{LabelContainer}.
:param `event`: a `wx.PaintEvent` event to be processed.
"""
style = self.GetParent().GetAGWWindowStyleFlag()
dc = wx.BufferedPaintDC(self)
backBrush = wx.Brush(self._coloursMap[INB_TAB_AREA_BACKGROUND_COLOUR])
if self.HasAGWFlag(INB_BORDER):
borderPen = wx.Pen(self._coloursMap[INB_TABS_BORDER_COLOUR])
else:
borderPen = wx.TRANSPARENT_PEN
size = self.GetSize()
# Set the pen & brush
dc.SetBrush(backBrush)
dc.SetPen(borderPen)
# Incase user set both flags, we override them to display both
# INB_SHOW_ONLY_TEXT and INB_SHOW_ONLY_IMAGES
if style & INB_SHOW_ONLY_TEXT and style & INB_SHOW_ONLY_IMAGES:
style ^= INB_SHOW_ONLY_TEXT
style ^= INB_SHOW_ONLY_IMAGES
self.GetParent().SetAGWWindowStyleFlag(style)
if self.HasAGWFlag(INB_GRADIENT_BACKGROUND) and not self._skin.Ok():
# Draw graident in the background area
startColour = self._coloursMap[INB_TAB_AREA_BACKGROUND_COLOUR]
endColour = ArtManager.Get().LightColour(self._coloursMap[INB_TAB_AREA_BACKGROUND_COLOUR], 50)
ArtManager.Get().PaintStraightGradientBox(dc, wx.Rect(0, 0, size.x / 2, size.y), startColour, endColour, False)
ArtManager.Get().PaintStraightGradientBox(dc, wx.Rect(size.x / 2, 0, size.x / 2, size.y), endColour, startColour, False)
else:
# Draw the border and background
if self._skin.Ok():
dc.SetBrush(wx.TRANSPARENT_BRUSH)
self.DrawBackgroundBitmap(dc)
dc.DrawRectangleRect(wx.Rect(0, 0, size.x, size.y))
# Draw border
if self.HasAGWFlag(INB_BORDER) and self.HasAGWFlag(INB_GRADIENT_BACKGROUND):
# Just draw the border with transparent brush
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangleRect(wx.Rect(0, 0, size.x, size.y))
bUsePin = (self.HasAGWFlag(INB_USE_PIN_BUTTON) and [True] or [False])[0]
if bUsePin:
# Draw the pin button
clientRect = self.GetClientRect()
pinRect = wx.Rect(clientRect.GetX() + clientRect.GetWidth() - 20, 2, 20, 20)
self.DrawPin(dc, pinRect, not self._bCollapsed)
if self._bCollapsed:
return
dc.SetPen(wx.BLACK_PEN)
self.SetSizeHints(self._nTabAreaWidth, -1)
# We reserve 20 pixels for the pin button
posy = 20
count = 0
for i in xrange(len(self._pagesInfoVec)):
count = count+1
# Default values for the surronounding rectangle
# around a button
rectWidth = self._nTabAreaWidth
if self.HasAGWFlag(INB_SHOW_ONLY_TEXT):
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetPointSize(font.GetPointSize() * self.GetParent().GetFontSizeMultiple())
if self.GetParent().GetFontBold():
font.SetWeight(wx.FONTWEIGHT_BOLD)
dc.SetFont(font)
w, h = dc.GetTextExtent(self._pagesInfoVec[i].GetCaption())
rectHeight = h * 2
else:
rectHeight = self._nImgSize * 2
# Check that we have enough space to draw the button
if posy + rectHeight > size.GetHeight():
break
# Calculate the button rectangle
posx = 0
buttonRect = wx.Rect(posx, posy, rectWidth, rectHeight)
indx = self._pagesInfoVec[i].GetImageIndex()
if indx == -1:
bmp = wx.NullBitmap
else:
bmp = self._ImageList.GetBitmap(indx)
self.DrawLabel(dc, buttonRect, self._pagesInfoVec[i].GetCaption(), bmp,
self._pagesInfoVec[i], self.HasAGWFlag(INB_LEFT) or self.HasAGWFlag(INB_TOP),
i, self._nIndex == i, self._nHoeveredImgIdx == i)
posy += rectHeight
# Update all buttons that can not fit into the screen as non-visible
for ii in xrange(count, len(self._pagesInfoVec)):
self._pagesInfoVec[i].SetPosition(wx.Point(-1, -1))
if bUsePin:
clientRect = self.GetClientRect()
pinRect = wx.Rect(clientRect.GetX() + clientRect.GetWidth() - 20, 2, 20, 20)
self.DrawPin(dc, pinRect, not self._bCollapsed)
def DrawBackgroundBitmap(self, dc):
"""
Draws a bitmap as the background of the control.
:param `dc`: an instance of `wx.DC`.
"""
clientRect = self.GetClientRect()
width = clientRect.GetWidth()
height = clientRect.GetHeight()
coveredY = coveredX = 0
xstep = self._skin.GetWidth()
ystep = self._skin.GetHeight()
bmpRect = wx.Rect(0, 0, xstep, ystep)
if bmpRect != clientRect:
mem_dc = wx.MemoryDC()
bmp = wx.EmptyBitmap(width, height)
mem_dc.SelectObject(bmp)
while coveredY < height:
while coveredX < width:
mem_dc.DrawBitmap(self._skin, coveredX, coveredY, True)
coveredX += xstep
coveredX = 0
coveredY += ystep
mem_dc.SelectObject(wx.NullBitmap)
#self._skin = bmp
dc.DrawBitmap(bmp, 0, 0)
else:
dc.DrawBitmap(self._skin, 0, 0)
def OnMouseLeftUp(self, event):
"""
Handles the ``wx.EVT_LEFT_UP`` event for L{LabelContainer}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
if self.HasAGWFlag(INB_NO_RESIZE):
ImageContainerBase.OnMouseLeftUp(self, event)
return
if self.HasCapture():
self.ReleaseMouse()
# Sash was being dragged?
if not self._sashRect.IsEmpty():
# Remove sash
ArtManager.Get().DrawDragSash(self._sashRect)
self.Resize(event)
self._sashRect = wx.Rect()
return
self._sashRect = wx.Rect()
# Restore cursor
if self._oldCursor.Ok():
wx.SetCursor(self._oldCursor)
self._oldCursor = wx.NullCursor
ImageContainerBase.OnMouseLeftUp(self, event)
def Resize(self, event):
"""
Actually resizes the tab area.
:param `event`: an instance of `wx.SizeEvent`.
"""
# Resize our size
self._tabAreaSize = self.GetSize()
newWidth = self._tabAreaSize.x
x = event.GetX()
if self.HasAGWFlag(INB_BOTTOM) or self.HasAGWFlag(INB_RIGHT):
newWidth -= event.GetX()
else:
newWidth = x
if newWidth < 100: # Dont allow width to be lower than that
newWidth = 100
self.SetSizeHints(newWidth, self._tabAreaSize.y)
# Update the tab new area width
self._nTabAreaWidth = newWidth
self.GetParent().Freeze()
self.GetParent().GetSizer().Layout()
self.GetParent().Thaw()
def OnMouseMove(self, event):
"""
Handles the ``wx.EVT_MOTION`` event for L{LabelContainer}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
if self.HasAGWFlag(INB_NO_RESIZE):
ImageContainerBase.OnMouseMove(self, event)
return
# Remove old sash
if not self._sashRect.IsEmpty():
ArtManager.Get().DrawDragSash(self._sashRect)
if event.LeftIsDown():
if not self._sashRect.IsEmpty():
# Progress sash, and redraw it
clientRect = self.GetClientRect()
pt = self.ClientToScreen(wx.Point(event.GetX(), 0))
self._sashRect = wx.RectPS(pt, wx.Size(4, clientRect.height))
ArtManager.Get().DrawDragSash(self._sashRect)
else:
# Sash is not being dragged
if self._oldCursor.Ok():
wx.SetCursor(self._oldCursor)
self._oldCursor = wx.NullCursor
else:
if self.HasCapture():
self.ReleaseMouse()
if self.PointOnSash(event.GetPosition()):
# Change cursor to EW cursor
self._oldCursor = self.GetCursor()
wx.SetCursor(wx.StockCursor(wx.CURSOR_SIZEWE))
elif self._oldCursor.Ok():
wx.SetCursor(self._oldCursor)
self._oldCursor = wx.NullCursor
self._sashRect = wx.Rect()
ImageContainerBase.OnMouseMove(self, event)
def OnMouseLeftDown(self, event):
"""
Handles the ``wx.EVT_LEFT_DOWN`` event for L{LabelContainer}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
if self.HasAGWFlag(INB_NO_RESIZE):
ImageContainerBase.OnMouseLeftDown(self, event)
return
imgIdx, where = self.HitTest(event.GetPosition())
if IMG_OVER_EW_BORDER == where and not self._bCollapsed:
# We are over the sash
if not self._sashRect.IsEmpty():
ArtManager.Get().DrawDragSash(self._sashRect)
else:
# first time, begin drawing sash
self.CaptureMouse()
# Change mouse cursor
self._oldCursor = self.GetCursor()
wx.SetCursor(wx.StockCursor(wx.CURSOR_SIZEWE))
clientRect = self.GetClientRect()
pt = self.ClientToScreen(wx.Point(event.GetX(), 0))
self._sashRect = wx.RectPS(pt, wx.Size(4, clientRect.height))
ArtManager.Get().DrawDragSash(self._sashRect)
else:
ImageContainerBase.OnMouseLeftDown(self, event)
def OnMouseLeaveWindow(self, event):
"""
Handles the ``wx.EVT_LEAVE_WINDOW`` event for L{LabelContainer}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
if self.HasAGWFlag(INB_NO_RESIZE):
ImageContainerBase.OnMouseLeaveWindow(self, event)
return
# If Sash is being dragged, ignore this event
if not self.HasCapture():
ImageContainerBase.OnMouseLeaveWindow(self, event)
def DrawRegularHover(self, dc, rect):
"""
Draws a rounded rectangle around the current tab.
:param `dc`: an instance of `wx.DC`;
:param `rect`: the current tab client rectangle.
"""
# The hovered tab with default border
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetPen(wx.Pen(wx.WHITE))
# We draw CCW
if self.HasAGWFlag(INB_RIGHT) or self.HasAGWFlag(INB_TOP):
# Right images
# Upper line
dc.DrawLine(rect.x + 1, rect.y, rect.x + rect.width, rect.y)
# Right line (white)
dc.DrawLine(rect.x + rect.width, rect.y, rect.x + rect.width, rect.y + rect.height)
# Bottom diagnol - we change pen
dc.SetPen(wx.Pen(self._coloursMap[INB_TABS_BORDER_COLOUR]))
# Bottom line
dc.DrawLine(rect.x + rect.width, rect.y + rect.height, rect.x, rect.y + rect.height)
else:
# Left images
# Upper line white
dc.DrawLine(rect.x, rect.y, rect.x + rect.width - 1, rect.y)
# Left line
dc.DrawLine(rect.x, rect.y, rect.x, rect.y + rect.height)
# Bottom diagnol, we change the pen
dc.SetPen(wx.Pen(self._coloursMap[INB_TABS_BORDER_COLOUR]))
# Bottom line
dc.DrawLine(rect.x, rect.y + rect.height, rect.x + rect.width, rect.y + rect.height)
def DrawWebHover(self, dc, caption, xCoord, yCoord):
"""
Draws a web style hover effect (cursor set to hand & text is underlined).
:param `dc`: an instance of `wx.DC`;
:param `caption`: the tab caption text;
:param `xCoord`: the x position of the tab caption;
:param `yCoord`: the y position of the tab caption.
"""
# Redraw the text with underlined font
underLinedFont = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
underLinedFont.SetPointSize(underLinedFont.GetPointSize() * self.GetParent().GetFontSizeMultiple())
if self.GetParent().GetFontBold():
underLinedFont.SetWeight(wx.FONTWEIGHT_BOLD)
underLinedFont.SetUnderlined(True)
dc.SetFont(underLinedFont)
dc.DrawText(caption, xCoord, yCoord)
def SetColour(self, which, colour):
"""
Sets a colour for a parameter.
:param `which`: can be one of the following parameters:
================================== ======= ==================================
Colour Key Value Description
================================== ======= ==================================
``INB_TAB_AREA_BACKGROUND_COLOUR`` 100 The tab area background colour
``INB_ACTIVE_TAB_COLOUR`` 101 The active tab background colour
``INB_TABS_BORDER_COLOUR`` 102 The tabs border colour
``INB_TEXT_COLOUR`` 103 The tab caption text colour
``INB_ACTIVE_TEXT_COLOUR`` 104 The active tab caption text colour
``INB_HILITE_TAB_COLOUR`` 105 The tab caption highlight text colour
================================== ======= ==================================
:param `colour`: a valid `wx.Colour` object.
"""
self._coloursMap[which] = colour
def GetColour(self, which):
"""
Returns a colour for a parameter.
:param `which`: the colour key.
:see: L{SetColour} for a list of valid colour keys.
"""
if not self._coloursMap.has_key(which):
return wx.Colour()
return self._coloursMap[which]
def InitializeColours(self):
""" Initializes the colours map to be used for this control. """
# Initialize map colours
self._coloursMap.update({INB_TAB_AREA_BACKGROUND_COLOUR: ArtManager.Get().LightColour(ArtManager.Get().FrameColour(), 50)})
self._coloursMap.update({INB_ACTIVE_TAB_COLOUR: ArtManager.Get().GetMenuFaceColour()})
self._coloursMap.update({INB_TABS_BORDER_COLOUR: wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DSHADOW)})
self._coloursMap.update({INB_HILITE_TAB_COLOUR: wx.NamedColour("LIGHT BLUE")})
self._coloursMap.update({INB_TEXT_COLOUR: wx.WHITE})
self._coloursMap.update({INB_ACTIVE_TEXT_COLOUR: wx.BLACK})
# dont allow bright colour one on the other
if not ArtManager.Get().IsDark(self._coloursMap[INB_TAB_AREA_BACKGROUND_COLOUR]) and \
not ArtManager.Get().IsDark(self._coloursMap[INB_TEXT_COLOUR]):
self._coloursMap[INB_TEXT_COLOUR] = ArtManager.Get().DarkColour(self._coloursMap[INB_TEXT_COLOUR], 100)
def DrawLabel(self, dc, rect, text, bmp, imgInfo, orientationLeft, imgIdx, selected, hover):
"""
Draws a label using the specified dc.
:param `dc`: an instance of `wx.DC`;
:param `rect`: the text client rectangle;
:param `text`: the actual text string;
:param `bmp`: a bitmap to be drawn next to the text;
:param `imgInfo`: an instance of L{ImageInfo};
:param `orientationLeft`: ``True`` if the book has the ``INB_RIGHT`` or ``INB_LEFT``
style set;
:param `imgIdx`: the tab image index;
:param `selected`: ``True`` if the tab is selected, ``False`` otherwise;
:param `hover`: ``True`` if the tab is being hovered with the mouse, ``False`` otherwise.
"""
dcsaver = DCSaver(dc)
nPadding = 6
if orientationLeft:
rect.x += nPadding
rect.width -= nPadding
else:
rect.width -= nPadding
textRect = wx.Rect(*rect)
imgRect = wx.Rect(*rect)
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetPointSize(font.GetPointSize() * self.GetParent().GetFontSizeMultiple())
if self.GetParent().GetFontBold():
font.SetWeight(wx.FONTWEIGHT_BOLD)
dc.SetFont(font)
# First we define the rectangle for the text
w, h = dc.GetTextExtent(text)
#-------------------------------------------------------------------------
# Label layout:
# [ nPadding | Image | nPadding | Text | nPadding ]
#-------------------------------------------------------------------------
# Text bounding rectangle
textRect.x += nPadding
textRect.y = rect.y + (rect.height - h)/2
textRect.width = rect.width - 2 * nPadding
if bmp.Ok() and not self.HasAGWFlag(INB_SHOW_ONLY_TEXT):
textRect.x += (bmp.GetWidth() + nPadding)
textRect.width -= (bmp.GetWidth() + nPadding)
textRect.height = h
# Truncate text if needed
caption = ArtManager.Get().TruncateText(dc, text, textRect.width)
# Image bounding rectangle
if bmp.Ok() and not self.HasAGWFlag(INB_SHOW_ONLY_TEXT):
imgRect.x += nPadding
imgRect.width = bmp.GetWidth()
imgRect.y = rect.y + (rect.height - bmp.GetHeight())/2
imgRect.height = bmp.GetHeight()
# Draw bounding rectangle
if selected:
# First we colour the tab
dc.SetBrush(wx.Brush(self._coloursMap[INB_ACTIVE_TAB_COLOUR]))
if self.HasAGWFlag(INB_BORDER):
dc.SetPen(wx.Pen(self._coloursMap[INB_TABS_BORDER_COLOUR]))
else:
dc.SetPen(wx.Pen(self._coloursMap[INB_ACTIVE_TAB_COLOUR]))
labelRect = wx.Rect(*rect)
if orientationLeft:
labelRect.width += 3
else:
labelRect.width += 3
labelRect.x -= 3
dc.DrawRoundedRectangleRect(labelRect, 3)
if not orientationLeft and self.HasAGWFlag(INB_DRAW_SHADOW):
dc.SetPen(wx.BLACK_PEN)
dc.DrawPoint(labelRect.x + labelRect.width - 1, labelRect.y + labelRect.height - 1)
# Draw the text & bitmap
if caption != "":
if selected:
dc.SetTextForeground(self._coloursMap[INB_ACTIVE_TEXT_COLOUR])
else:
dc.SetTextForeground(self._coloursMap[INB_TEXT_COLOUR])
dc.DrawText(caption, textRect.x, textRect.y)
imgInfo.SetTextRect(textRect)
else:
imgInfo.SetTextRect(wx.Rect())
if bmp.Ok() and not self.HasAGWFlag(INB_SHOW_ONLY_TEXT):
dc.DrawBitmap(bmp, imgRect.x, imgRect.y, True)
# Drop shadow
if self.HasAGWFlag(INB_DRAW_SHADOW) and selected:
sstyle = 0
if orientationLeft:
sstyle = BottomShadow
else:
sstyle = BottomShadowFull | RightShadow
if self.HasAGWFlag(INB_WEB_HILITE):
# Always drop shadow for this style
ArtManager.Get().DrawBitmapShadow(dc, rect, sstyle)
else:
if imgIdx+1 != self._nHoeveredImgIdx:
ArtManager.Get().DrawBitmapShadow(dc, rect, sstyle)
# Draw hover effect
if hover:
if self.HasAGWFlag(INB_WEB_HILITE) and caption != "":
self.DrawWebHover(dc, caption, textRect.x, textRect.y)
else:
self.DrawRegularHover(dc, rect)
# Update the page information bout position and size
imgInfo.SetPosition(rect.GetPosition())
imgInfo.SetSize(rect.GetSize())
# ---------------------------------------------------------------------------- #
# Class FlatBookBase
# ---------------------------------------------------------------------------- #
class FlatBookBase(wx.Panel):
""" Base class for the containing window for L{LabelBook} and L{FlatImageBook}. """
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=0, agwStyle=0, name="FlatBookBase"):
"""
Default class constructor.
:param `parent`: parent window. Must not be ``None``;
:param `id`: window identifier. A value of -1 indicates a default value;
:param `pos`: the control position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:param `size`: the control size. A value of (-1, -1) indicates a default size,
chosen by either the windowing system or wxPython, depending on platform;
:param `style`: the underlying `wx.Panel` window style;
:param `agwStyle`: the AGW-specific window style. This can be a combination of the
following bits:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}.
``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}.
``INB_RIGHT`` 0x4 Place labels on the right side.
``INB_TOP`` 0x8 Place labels above the page area.
``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}.
``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}.
``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}.
``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control.
``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}.
``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control.
``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}.
``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}.
``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area.
``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs.
=========================== =========== ==================================================
:param `name`: the window name.
"""
self._pages = None
self._bInitializing = True
self._pages = None
self._bForceSelection = False
self._windows = []
self._fontSizeMultiple = 1.0
self._fontBold = False
style |= wx.TAB_TRAVERSAL
self._agwStyle = agwStyle
wx.Panel.__init__(self, parent, id, pos, size, style, name)
self._bInitializing = False
def SetAGWWindowStyleFlag(self, agwStyle):
"""
Sets the window style.
:param `agwStyle`: can be a combination of the following bits:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}.
``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}.
``INB_RIGHT`` 0x4 Place labels on the right side.
``INB_TOP`` 0x8 Place labels above the page area.
``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}.
``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}.
``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}.
``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control.
``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}.
``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control.
``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}.
``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}.
``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area.
``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs.
=========================== =========== ==================================================
"""
self._agwStyle = agwStyle
# Check that we are not in initialization process
if self._bInitializing:
return
if not self._pages:
return
# Detach the windows attached to the sizer
if self.GetSelection() >= 0:
self._mainSizer.Detach(self._windows[self.GetSelection()])
self._mainSizer.Detach(self._pages)
# Create new sizer with the requested orientaion
className = self.GetName()
if className == "LabelBook":
self._mainSizer = wx.BoxSizer(wx.HORIZONTAL)
else:
if agwStyle & INB_LEFT or agwStyle & INB_RIGHT:
self._mainSizer = wx.BoxSizer(wx.HORIZONTAL)
else:
self._mainSizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self._mainSizer)
# Add the tab container and the separator
self._mainSizer.Add(self._pages, 0, wx.EXPAND)
if className == "FlatImageBook":
if agwStyle & INB_LEFT or agwStyle & INB_RIGHT:
self._pages.SetSizeHints(self._pages._nImgSize * 2, -1)
else:
self._pages.SetSizeHints(-1, self._pages._nImgSize * 2)
# Attach the windows back to the sizer to the sizer
if self.GetSelection() >= 0:
self.DoSetSelection(self._windows[self.GetSelection()])
if agwStyle & INB_FIT_LABELTEXT:
self.ResizeTabArea()
self._mainSizer.Layout()
dummy = wx.SizeEvent()
wx.PostEvent(self, dummy)
self._pages.Refresh()
def GetAGWWindowStyleFlag(self):
"""
Returns the L{FlatBookBase} window style.
:see: L{SetAGWWindowStyleFlag} for a list of possible window style flags.
"""
return self._agwStyle
def HasAGWFlag(self, flag):
"""
Returns whether a flag is present in the L{FlatBookBase} style.
:param `flag`: one of the possible L{FlatBookBase} window styles.
:see: L{SetAGWWindowStyleFlag} for a list of possible window style flags.
"""
agwStyle = self.GetAGWWindowStyleFlag()
res = (agwStyle & flag and [True] or [False])[0]
return res
def AddPage(self, page, text, select=False, imageId=-1):
"""
Adds a page to the book.
:param `page`: specifies the new page;
:param `text`: specifies the text for the new page;
:param `select`: specifies whether the page should be selected;
:param `imageId`: specifies the optional image index for the new page.
:note: The call to this function generates the page changing events.
"""
if not page:
return
page.Reparent(self)
self._windows.append(page)
if select or len(self._windows) == 1:
self.DoSetSelection(page)
else:
page.Hide()
self._pages.AddPage(text, select, imageId)
self.ResizeTabArea()
self.Refresh()
def InsertPage(self, page_idx, page, text, select=False, imageId=-1):
"""
Inserts a page into the book at the specified position.
:param `page_idx`: specifies the position for the new page;
:param `page`: specifies the new page;
:param `text`: specifies the text for the new page;
:param `select`: specifies whether the page should be selected;
:param `imageId`: specifies the optional image index for the new page.
:note: The call to this function generates the page changing events.
"""
if not page:
return
page.Reparent(self)
self._windows.insert(page_idx, page)
if select or len(self._windows) == 1:
self.DoSetSelection(page)
else:
page.Hide()
self._pages.InsertPage(page_idx, text, select, imageId)
self.ResizeTabArea()
self.Refresh()
def DeletePage(self, page):
"""
Deletes the specified page, and the associated window.
:param `page`: an integer specifying the page to be deleted.
:note: The call to this function generates the page changing events.
"""
if page >= len(self._windows) or page < 0:
return
# Fire a closing event
event = ImageNotebookEvent(wxEVT_IMAGENOTEBOOK_PAGE_CLOSING, self.GetId())
event.SetSelection(page)
event.SetEventObject(self)
self.GetEventHandler().ProcessEvent(event)
# The event handler allows it?
if not event.IsAllowed():
return False
self.Freeze()
# Delete the requested page
pageRemoved = self._windows[page]
# If the page is the current window, remove it from the sizer
# as well
if page == self.GetSelection():
self._mainSizer.Detach(pageRemoved)
# Remove it from the array as well
self._windows.pop(page)
# Now we can destroy it in wxWidgets use Destroy instead of delete
pageRemoved.Destroy()
self._mainSizer.Layout()
self._pages.DoDeletePage(page)
self.ResizeTabArea()
self.Thaw()
# Fire a closed event
closedEvent = ImageNotebookEvent(wxEVT_IMAGENOTEBOOK_PAGE_CLOSED, self.GetId())
closedEvent.SetSelection(page)
closedEvent.SetEventObject(self)
self.GetEventHandler().ProcessEvent(closedEvent)
def RemovePage(self, page):
"""
Deletes the specified page, without deleting the associated window.
:param `page`: an integer specifying the page to be removed.
:note: The call to this function generates the page changing events.
"""
if page >= len(self._windows):
return False
# Fire a closing event
event = ImageNotebookEvent(wxEVT_IMAGENOTEBOOK_PAGE_CLOSING, self.GetId())
event.SetSelection(page)
event.SetEventObject(self)
self.GetEventHandler().ProcessEvent(event)
# The event handler allows it?
if not event.IsAllowed():
return False
self.Freeze()
# Remove the requested page
pageRemoved = self._windows[page]
# If the page is the current window, remove it from the sizer
# as well
if page == self.GetSelection():
self._mainSizer.Detach(pageRemoved)
# Remove it from the array as well
self._windows.pop(page)
self._mainSizer.Layout()
self.ResizeTabArea()
self.Thaw()
self._pages.DoDeletePage(page)
# Fire a closed event
closedEvent = ImageNotebookEvent(wxEVT_IMAGENOTEBOOK_PAGE_CLOSED, self.GetId())
closedEvent.SetSelection(page)
closedEvent.SetEventObject(self)
self.GetEventHandler().ProcessEvent(closedEvent)
return True
def ResizeTabArea(self):
""" Resizes the tab area if the control has the ``INB_FIT_LABELTEXT`` style set. """
agwStyle = self.GetAGWWindowStyleFlag()
if agwStyle & INB_FIT_LABELTEXT == 0:
return
if agwStyle & INB_LEFT or agwStyle & INB_RIGHT:
dc = wx.MemoryDC()
dc.SelectObject(wx.EmptyBitmap(1, 1))
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetPointSize(font.GetPointSize()*self._fontSizeMultiple)
if self.GetFontBold():
font.SetWeight(wx.FONTWEIGHT_BOLD)
dc.SetFont(font)
maxW = 0
for page in xrange(self.GetPageCount()):
caption = self._pages.GetPageText(page)
w, h = dc.GetTextExtent(caption)
maxW = max(maxW, w)
maxW += 24 #TODO this is 6*4 6 is nPadding from drawlabel
if not agwStyle & INB_SHOW_ONLY_TEXT:
maxW += self._pages._nImgSize * 2
maxW = max(maxW, 100)
self._pages.SetSizeHints(maxW, -1)
self._pages._nTabAreaWidth = maxW
def DeleteAllPages(self):
""" Deletes all the pages in the book. """
if not self._windows:
return
self.Freeze()
for win in self._windows:
win.Destroy()
self._windows = []
self.Thaw()
# remove old selection
self._pages.ClearAll()
self._pages.Refresh()
def SetSelection(self, page):
"""
Changes the selection from currently visible/selected page to the page
given by page.
:param `page`: an integer specifying the page to be selected.
:note: The call to this function generates the page changing events.
"""
if page >= len(self._windows):
return
if page == self.GetSelection() and not self._bForceSelection:
return
oldSelection = self.GetSelection()
# Generate an event that indicates that an image is about to be selected
event = ImageNotebookEvent(wxEVT_IMAGENOTEBOOK_PAGE_CHANGING, self.GetId())
event.SetSelection(page)
event.SetOldSelection(oldSelection)
event.SetEventObject(self)
self.GetEventHandler().ProcessEvent(event)
# The event handler allows it?
if not event.IsAllowed() and not self._bForceSelection:
return
self.DoSetSelection(self._windows[page])
# Now we can update the new selection
self._pages._nIndex = page
# Refresh calls the OnPaint of this class
self._pages.Refresh()
# Generate an event that indicates that an image was selected
eventChanged = ImageNotebookEvent(wxEVT_IMAGENOTEBOOK_PAGE_CHANGED, self.GetId())
eventChanged.SetEventObject(self)
eventChanged.SetOldSelection(oldSelection)
eventChanged.SetSelection(page)
self.GetEventHandler().ProcessEvent(eventChanged)
def AssignImageList(self, imglist):
"""
Assigns an image list to the control.
:param `imglist`: an instance of `wx.ImageList`.
"""
self._pages.AssignImageList(imglist)
# Force change
self.SetAGWWindowStyleFlag(self.GetAGWWindowStyleFlag())
def GetSelection(self):
""" Returns the current selection. """
if self._pages:
return self._pages._nIndex
else:
return -1
def DoSetSelection(self, window):
"""
Select the window by the provided pointer.
:param `window`: an instance of `wx.Window`.
"""
curSel = self.GetSelection()
agwStyle = self.GetAGWWindowStyleFlag()
# Replace the window in the sizer
self.Freeze()
# Check if a new selection was made
bInsertFirst = (agwStyle & INB_BOTTOM or agwStyle & INB_RIGHT)
if curSel >= 0:
# Remove the window from the main sizer
self._mainSizer.Detach(self._windows[curSel])
self._windows[curSel].Hide()
if bInsertFirst:
self._mainSizer.Insert(0, window, 1, wx.EXPAND)
else:
self._mainSizer.Add(window, 1, wx.EXPAND)
window.Show()
self._mainSizer.Layout()
self.Thaw()
def GetImageList(self):
""" Returns the associated image list. """
return self._pages.GetImageList()
def GetPageCount(self):
""" Returns the number of pages in the book. """
return len(self._windows)
def GetFontBold(self):
""" Gets the font bold status. """
return self._fontBold
def SetFontBold(self, bold):
"""
Sets whether the page captions are bold or not.
:param `bold`: ``True`` or ``False``.
"""
self._fontBold = bold
def GetFontSizeMultiple(self):
""" Gets the font size multiple for the page captions. """
return self._fontSizeMultiple
def SetFontSizeMultiple(self, multiple):
"""
Sets the font size multiple for the page captions.
:param `multiple`: The multiple to be applied to the system font to get the our font size.
"""
self._fontSizeMultiple = multiple
def SetPageImage(self, page, imageId):
"""
Sets the image index for the given page.
:param `page`: an integer specifying the page index;
:param `image`: an index into the image list.
"""
self._pages.SetPageImage(page, imageId)
self._pages.Refresh()
def SetPageText(self, page, text):
"""
Sets the text for the given page.
:param `page`: an integer specifying the page index;
:param `text`: the new tab label.
"""
self._pages.SetPageText(page, text)
self._pages.Refresh()
def GetPageText(self, page):
"""
Returns the text for the given page.
:param `page`: an integer specifying the page index.
"""
return self._pages.GetPageText(page)
def GetPageImage(self, page):
"""
Returns the image index for the given page.
:param `page`: an integer specifying the page index.
"""
return self._pages.GetPageImage(page)
def GetPage(self, page):
"""
Returns the window at the given page position.
:param `page`: an integer specifying the page to be returned.
"""
if page >= len(self._windows):
return
return self._windows[page]
def GetCurrentPage(self):
""" Returns the currently selected notebook page or ``None``. """
if self.GetSelection() < 0:
return
return self.GetPage(self.GetSelection())
def AdvanceSelection(self, forward=True):
"""
Cycles through the tabs.
:param `forward`: if ``True``, the selection is advanced in ascending order
(to the right), otherwise the selection is advanced in descending order.
:note: The call to this function generates the page changing events.
"""
nSel = self.GetSelection()
if nSel < 0:
return
nMax = self.GetPageCount() - 1
if forward:
newSelection = (nSel == nMax and [0] or [nSel + 1])[0]
else:
newSelection = (nSel == 0 and [nMax] or [nSel - 1])[0]
self.SetSelection(newSelection)
def ChangeSelection(self, page):
"""
Changes the selection for the given page, returning the previous selection.
:param `page`: an integer specifying the page to be selected.
:note: The call to this function does not generate the page changing events.
"""
if page < 0 or page >= self.GetPageCount():
return
oldPage = self.GetSelection()
self.DoSetSelection(page)
return oldPage
CurrentPage = property(GetCurrentPage, doc="See `GetCurrentPage`")
Page = property(GetPage, doc="See `GetPage`")
PageCount = property(GetPageCount, doc="See `GetPageCount`")
PageImage = property(GetPageImage, SetPageImage, doc="See `GetPageImage, SetPageImage`")
PageText = property(GetPageText, SetPageText, doc="See `GetPageText, SetPageText`")
Selection = property(GetSelection, SetSelection, doc="See `GetSelection, SetSelection`")
# ---------------------------------------------------------------------------- #
# Class FlatImageBook
# ---------------------------------------------------------------------------- #
class FlatImageBook(FlatBookBase):
"""
Default implementation of the image book, it is like a `wx.Notebook`, except that
images are used to control the different pages. This container is usually used
for configuration dialogs etc.
:note: Currently, this control works properly for images of size 32x32 and bigger.
"""
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=0, agwStyle=0, name="FlatImageBook"):
"""
Default class constructor.
:param `parent`: parent window. Must not be ``None``;
:param `id`: window identifier. A value of -1 indicates a default value;
:param `pos`: the control position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:param `size`: the control size. A value of (-1, -1) indicates a default size,
chosen by either the windowing system or wxPython, depending on platform;
:param `style`: the underlying `wx.Panel` window style;
:param `agwStyle`: the AGW-specific window style. This can be a combination of the
following bits:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}.
``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}.
``INB_RIGHT`` 0x4 Place labels on the right side.
``INB_TOP`` 0x8 Place labels above the page area.
``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}.
``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}.
``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}.
``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control.
``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}.
``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control.
``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}.
``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}.
``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area.
``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs.
=========================== =========== ==================================================
:param `name`: the window name.
"""
FlatBookBase.__init__(self, parent, id, pos, size, style, agwStyle, name)
self._pages = self.CreateImageContainer()
if agwStyle & INB_LEFT or agwStyle & INB_RIGHT:
self._mainSizer = wx.BoxSizer(wx.HORIZONTAL)
else:
self._mainSizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self._mainSizer)
# Add the tab container to the sizer
self._mainSizer.Add(self._pages, 0, wx.EXPAND)
if agwStyle & INB_LEFT or agwStyle & INB_RIGHT:
self._pages.SetSizeHints(self._pages.GetImageSize() * 2, -1)
else:
self._pages.SetSizeHints(-1, self._pages.GetImageSize() * 2)
self._mainSizer.Layout()
def CreateImageContainer(self):
""" Creates the image container class for L{FlatImageBook}. """
return ImageContainer(self, wx.ID_ANY, agwStyle=self.GetAGWWindowStyleFlag())
# ---------------------------------------------------------------------------- #
# Class LabelBook
# ---------------------------------------------------------------------------- #
class LabelBook(FlatBookBase):
"""
An implementation of a notebook control - except that instead of having
tabs to show labels, it labels to the right or left (arranged horizontally).
"""
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=0, agwStyle=0, name="LabelBook"):
"""
Default class constructor.
:param `parent`: parent window. Must not be ``None``;
:param `id`: window identifier. A value of -1 indicates a default value;
:param `pos`: the control position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:param `size`: the control size. A value of (-1, -1) indicates a default size,
chosen by either the windowing system or wxPython, depending on platform;
:param `style`: the underlying `wx.Panel` window style;
:param `agwStyle`: the AGW-specific window style. This can be a combination of the
following bits:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}.
``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}.
``INB_RIGHT`` 0x4 Place labels on the right side.
``INB_TOP`` 0x8 Place labels above the page area.
``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}.
``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}.
``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}.
``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control.
``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}.
``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control.
``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}.
``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}.
``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area.
``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs.
=========================== =========== ==================================================
:param `name`: the window name.
"""
FlatBookBase.__init__(self, parent, id, pos, size, style, agwStyle, name)
self._pages = self.CreateImageContainer()
# Label book specific initialization
self._mainSizer = wx.BoxSizer(wx.HORIZONTAL)
self.SetSizer(self._mainSizer)
# Add the tab container to the sizer
self._mainSizer.Add(self._pages, 0, wx.EXPAND)
self._pages.SetSizeHints(self._pages.GetTabAreaWidth(), -1)
# Initialize the colours maps
self._pages.InitializeColours()
self.Bind(wx.EVT_SIZE, self.OnSize)
def CreateImageContainer(self):
""" Creates the image container (LabelContainer) class for L{FlatImageBook}. """
return LabelContainer(self, wx.ID_ANY, agwStyle=self.GetAGWWindowStyleFlag())
def SetColour(self, which, colour):
"""
Sets the colour for the specified parameter.
:param `which`: the colour key;
:param `colour`: a valid `wx.Colour` instance.
:see: L{LabelContainer.SetColour} for a list of valid colour keys.
"""
self._pages.SetColour(which, colour)
def GetColour(self, which):
"""
Returns the colour for the specified parameter.
:param `which`: the colour key.
:see: L{LabelContainer.SetColour} for a list of valid colour keys.
"""
return self._pages.GetColour(which)
def OnSize(self, event):
"""
Handles the ``wx.EVT_SIZE`` event for L{LabelBook}.
:param `event`: a `wx.SizeEvent` event to be processed.
"""
self._pages.Refresh()
event.Skip()
|
# --------------------------------------------------------------------------- #
# LABELBOOK And FLATIMAGEBOOK Widgets wxPython IMPLEMENTATION
#
# Original C++ Code From Eran, embedded in the FlatMenu source code
#
#
# License: wxWidgets license
#
#
# Python Code By:
#
# <NAME>, @ 03 Nov 2006
# Latest Revision: 17 Jan 2011, 15.00 GMT
#
#
# For All Kind Of Problems, Requests Of Enhancements And Bug Reports, Please
# Write To Me At:
#
# <EMAIL>
# <EMAIL>
#
# Or, Obviously, To The wxPython Mailing List!!!
#
# TODO:
# LabelBook - Support IMB_SHOW_ONLY_IMAGES
# LabelBook - An option for the draw border to only draw the border
# between the controls and the pages so the background
# colour can flow into the window background
#
#
#
# End Of Comments
# --------------------------------------------------------------------------- #
"""
LabelBook and FlatImageBook are a quasi-full generic and owner-drawn
implementations of `wx.Notebook`.
Description
===========
LabelBook and FlatImageBook are a quasi-full implementations of the `wx.Notebook`,
and designed to be a drop-in replacement for `wx.Notebook`. The API functions are
similar so one can expect the function to behave in the same way.
LabelBook anf FlatImageBook share their appearance with `wx.Toolbook` and
`wx.Listbook`, while having more options for custom drawings, label positioning,
mouse pointing and so on. Moreover, they retain also some visual characteristics
of the Outlook address book.
Some features:
- They are generic controls;
- Supports for left, right, top (FlatImageBook only), bottom (FlatImageBook
only) book styles;
- Possibility to draw images only, text only or both (FlatImageBook only);
- Support for a "pin-button", that allows the user to shrink/expand the book
tab area;
- Shadows behind tabs (LabelBook only);
- Gradient shading of the tab area (LabelBook only);
- Web-like mouse pointing on tabs style (LabelBook only);
- Many customizable colours (tab area, active tab text, tab borders, active
tab, highlight) - LabelBook only.
And much more. See the demo for a quasi-complete review of all the functionalities
of LabelBook and FlatImageBook.
Supported Platforms
===================
LabelBook and FlatImageBook have been tested on the following platforms:
* Windows (Windows XP);
* Linux Ubuntu (Dapper 6.06)
Window Styles
=============
This class supports the following window styles:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for `FlatImageBook`.
``INB_LEFT`` 0x2 Place labels on the left side. Available only for `FlatImageBook`.
``INB_RIGHT`` 0x4 Place labels on the right side.
``INB_TOP`` 0x8 Place labels above the page area.
``INB_BORDER`` 0x10 Draws a border around `LabelBook` or `FlatImageBook`.
``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for `LabelBook`.
``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for `LabelBook`.
``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control.
``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for `LabelBook`.
``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control.
``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for `LabelBook`.
``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for `LabelBook`.
``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area.
``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs.
=========================== =========== ==================================================
Events Processing
=================
This class processes the following events:
=================================== ==================================================
Event Name Description
=================================== ==================================================
``EVT_IMAGENOTEBOOK_PAGE_CHANGED`` Notify client objects when the active page in `ImageNotebook` has changed.
``EVT_IMAGENOTEBOOK_PAGE_CHANGING`` Notify client objects when the active page in `ImageNotebook` is about to change.
``EVT_IMAGENOTEBOOK_PAGE_CLOSED`` Notify client objects when a page in `ImageNotebook` has been closed.
``EVT_IMAGENOTEBOOK_PAGE_CLOSING`` Notify client objects when a page in `ImageNotebook` is closing.
=================================== ==================================================
License And Version
===================
LabelBook and FlatImageBook are distributed under the wxPython license.
Latest Revision: <NAME> @ 17 Jan 2011, 15.00 GMT
Version 0.5.
"""
__docformat__ = "epytext"
#----------------------------------------------------------------------
# Beginning Of IMAGENOTEBOOK wxPython Code
#----------------------------------------------------------------------
import wx
from artmanager import ArtManager, DCSaver
from fmresources import *
# Check for the new method in 2.7 (not present in 2.6.3.3)
if wx.VERSION_STRING < "2.7":
wx.Rect.Contains = lambda self, point: wx.Rect.Inside(self, point)
# FlatImageBook and LabelBook styles
INB_BOTTOM = 1
""" Place labels below the page area. Available only for `FlatImageBook`."""
INB_LEFT = 2
""" Place labels on the left side. Available only for `FlatImageBook`."""
INB_RIGHT = 4
""" Place labels on the right side. """
INB_TOP = 8
""" Place labels above the page area. """
INB_BORDER = 16
""" Draws a border around `LabelBook` or `FlatImageBook`. """
INB_SHOW_ONLY_TEXT = 32
""" Shows only text labels and no images. Available only for `LabelBook`."""
INB_SHOW_ONLY_IMAGES = 64
""" Shows only tab images and no label texts. Available only for `LabelBook`."""
INB_FIT_BUTTON = 128
""" Displays a pin button to show/hide the book control. """
INB_DRAW_SHADOW = 256
""" Draw shadows below the book tabs. Available only for `LabelBook`."""
INB_USE_PIN_BUTTON = 512
""" Displays a pin button to show/hide the book control. """
INB_GRADIENT_BACKGROUND = 1024
""" Draws a gradient shading on the tabs background. Available only for `LabelBook`."""
INB_WEB_HILITE = 2048
""" On mouse hovering, tabs behave like html hyperlinks. Available only for `LabelBook`."""
INB_NO_RESIZE = 4096
""" Don't allow resizing of the tab area. """
INB_FIT_LABELTEXT = 8192
""" Will fit the tab area to the longest text (or text+image if you have images) in all the tabs. """
wxEVT_IMAGENOTEBOOK_PAGE_CHANGED = wx.wxEVT_COMMAND_NOTEBOOK_PAGE_CHANGED
wxEVT_IMAGENOTEBOOK_PAGE_CHANGING = wx.wxEVT_COMMAND_NOTEBOOK_PAGE_CHANGING
wxEVT_IMAGENOTEBOOK_PAGE_CLOSING = wx.NewEventType()
wxEVT_IMAGENOTEBOOK_PAGE_CLOSED = wx.NewEventType()
#-----------------------------------#
# ImageNotebookEvent
#-----------------------------------#
EVT_IMAGENOTEBOOK_PAGE_CHANGED = wx.EVT_NOTEBOOK_PAGE_CHANGED
""" Notify client objects when the active page in `ImageNotebook` has changed. """
EVT_IMAGENOTEBOOK_PAGE_CHANGING = wx.EVT_NOTEBOOK_PAGE_CHANGING
""" Notify client objects when the active page in `ImageNotebook` is about to change. """
EVT_IMAGENOTEBOOK_PAGE_CLOSING = wx.PyEventBinder(wxEVT_IMAGENOTEBOOK_PAGE_CLOSING, 1)
""" Notify client objects when a page in `ImageNotebook` is closing. """
EVT_IMAGENOTEBOOK_PAGE_CLOSED = wx.PyEventBinder(wxEVT_IMAGENOTEBOOK_PAGE_CLOSED, 1)
""" Notify client objects when a page in `ImageNotebook` has been closed. """
# ---------------------------------------------------------------------------- #
# Class ImageNotebookEvent
# ---------------------------------------------------------------------------- #
class ImageNotebookEvent(wx.PyCommandEvent):
"""
This events will be sent when a ``EVT_IMAGENOTEBOOK_PAGE_CHANGED``,
``EVT_IMAGENOTEBOOK_PAGE_CHANGING``, ``EVT_IMAGENOTEBOOK_PAGE_CLOSING``,
``EVT_IMAGENOTEBOOK_PAGE_CLOSED`` is mapped in the parent.
"""
def __init__(self, eventType, eventId=1, sel=-1, oldsel=-1):
"""
Default class constructor.
:param `eventType`: the event type;
:param `eventId`: the event identifier;
:param `sel`: the current selection;
:param `oldsel`: the old selection.
"""
wx.PyCommandEvent.__init__(self, eventType, eventId)
self._eventType = eventType
self._sel = sel
self._oldsel = oldsel
self._allowed = True
def SetSelection(self, s):
"""
Sets the event selection.
:param `s`: an integer specifying the new selection.
"""
self._sel = s
def SetOldSelection(self, s):
"""
Sets the event old selection.
:param `s`: an integer specifying the old selection.
"""
self._oldsel = s
def GetSelection(self):
""" Returns the event selection. """
return self._sel
def GetOldSelection(self):
""" Returns the old event selection. """
return self._oldsel
def Veto(self):
"""
Prevents the change announced by this event from happening.
:note: It is in general a good idea to notify the user about the reasons
for vetoing the change because otherwise the applications behaviour (which
just refuses to do what the user wants) might be quite surprising.
"""
self._allowed = False
def Allow(self):
"""
This is the opposite of L{Veto}: it explicitly allows the event to be processed.
For most events it is not necessary to call this method as the events are
allowed anyhow but some are forbidden by default (this will be mentioned
in the corresponding event description).
"""
self._allowed = True
def IsAllowed(self):
"""
Returns ``True`` if the change is allowed (L{Veto} hasn't been called) or
``False`` otherwise (if it was).
"""
return self._allowed
# ---------------------------------------------------------------------------- #
# Class ImageInfo
# ---------------------------------------------------------------------------- #
class ImageInfo(object):
"""
This class holds all the information (caption, image, etc...) belonging to a
single tab in L{LabelBook}.
"""
def __init__(self, strCaption="", imageIndex=-1):
"""
Default class constructor.
:param `strCaption`: the tab caption;
:param `imageIndex`: the tab image index based on the assigned (set)
`wx.ImageList` (if any).
"""
self._pos = wx.Point()
self._size = wx.Size()
self._strCaption = strCaption
self._ImageIndex = imageIndex
self._captionRect = wx.Rect()
def SetCaption(self, value):
"""
Sets the tab caption.
:param `value`: the new tab caption.
"""
self._strCaption = value
def GetCaption(self):
""" Returns the tab caption. """
return self._strCaption
def SetPosition(self, value):
"""
Sets the tab position.
:param `value`: the new tab position, an instance of `wx.Point`.
"""
self._pos = value
def GetPosition(self):
""" Returns the tab position. """
return self._pos
def SetSize(self, value):
"""
Sets the tab size.
:param `value`: the new tab size, an instance of `wx.Size`.
"""
self._size = value
def GetSize(self):
""" Returns the tab size. """
return self._size
def SetImageIndex(self, value):
"""
Sets the tab image index.
:param `value`: an index into the image list..
"""
self._ImageIndex = value
def GetImageIndex(self):
""" Returns the tab image index. """
return self._ImageIndex
def SetTextRect(self, rect):
"""
Sets the client rectangle available for the tab text.
:param `rect`: the tab text client rectangle, an instance of `wx.Rect`.
"""
self._captionRect = rect
def GetTextRect(self):
""" Returns the client rectangle available for the tab text. """
return self._captionRect
# ---------------------------------------------------------------------------- #
# Class ImageContainerBase
# ---------------------------------------------------------------------------- #
class ImageContainerBase(wx.Panel):
"""
Base class for L{FlatImageBook} image container.
"""
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=0, agwStyle=0, name="ImageContainerBase"):
"""
Default class constructor.
:param `parent`: parent window. Must not be ``None``;
:param `id`: window identifier. A value of -1 indicates a default value;
:param `pos`: the control position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:param `size`: the control size. A value of (-1, -1) indicates a default size,
chosen by either the windowing system or wxPython, depending on platform;
:param `style`: the underlying `wx.Panel` window style;
:param `agwStyle`: the AGW-specific window style. This can be a combination of the
following bits:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}.
``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}.
``INB_RIGHT`` 0x4 Place labels on the right side.
``INB_TOP`` 0x8 Place labels above the page area.
``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}.
``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}.
``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}.
``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control.
``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}.
``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control.
``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}.
``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}.
``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area.
``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs.
=========================== =========== ==================================================
:param `name`: the window name.
"""
self._nIndex = -1
self._nImgSize = 16
self._ImageList = None
self._nHoeveredImgIdx = -1
self._bCollapsed = False
self._tabAreaSize = (-1, -1)
self._nPinButtonStatus = INB_PIN_NONE
self._pagesInfoVec = []
self._pinBtnRect = wx.Rect()
wx.Panel.__init__(self, parent, id, pos, size, style | wx.NO_BORDER | wx.NO_FULL_REPAINT_ON_RESIZE, name)
def HasAGWFlag(self, flag):
"""
Tests for existance of flag in the style.
:param `flag`: a window style. This can be a combination of the following bits:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}.
``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}.
``INB_RIGHT`` 0x4 Place labels on the right side.
``INB_TOP`` 0x8 Place labels above the page area.
``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}.
``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}.
``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}.
``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control.
``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}.
``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control.
``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}.
``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}.
``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area.
``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs.
=========================== =========== ==================================================
"""
style = self.GetParent().GetAGWWindowStyleFlag()
res = (style & flag and [True] or [False])[0]
return res
def ClearFlag(self, flag):
"""
Removes flag from the style.
:param `flag`: a window style flag.
:see: L{HasAGWFlag} for a list of possible window style flags.
"""
parent = self.GetParent()
agwStyle = parent.GetAGWWindowStyleFlag()
agwStyle &= ~(flag)
parent.SetAGWWindowStyleFlag(agwStyle)
def AssignImageList(self, imglist):
"""
Assigns an image list to the L{ImageContainerBase}.
:param `imglist`: an instance of `wx.ImageList`.
"""
if imglist and imglist.GetImageCount() != 0:
self._nImgSize = imglist.GetBitmap(0).GetHeight()
self._ImageList = imglist
parent = self.GetParent()
agwStyle = parent.GetAGWWindowStyleFlag()
parent.SetAGWWindowStyleFlag(agwStyle)
def GetImageList(self):
""" Return the image list for L{ImageContainerBase}. """
return self._ImageList
def GetImageSize(self):
""" Returns the image size inside the L{ImageContainerBase} image list. """
return self._nImgSize
def FixTextSize(self, dc, text, maxWidth):
"""
Fixes the text, to fit `maxWidth` value. If the text length exceeds
`maxWidth` value this function truncates it and appends two dots at
the end. ("Long Long Long Text" might become "Long Long...").
:param `dc`: an instance of `wx.DC`;
:param `text`: the text to fix/truncate;
:param `maxWidth`: the maximum allowed width for the text, in pixels.
"""
return ArtManager.Get().TruncateText(dc, text, maxWidth)
def CanDoBottomStyle(self):
"""
Allows the parent to examine the children type. Some implementation
(such as L{LabelBook}), does not support top/bottom images, only left/right.
"""
return False
def AddPage(self, caption, selected=False, imgIdx=-1):
"""
Adds a page to the container.
:param `caption`: specifies the text for the new tab;
:param `selected`: specifies whether the page should be selected;
:param `imgIdx`: specifies the optional image index for the new tab.
"""
self._pagesInfoVec.append(ImageInfo(caption, imgIdx))
if selected or len(self._pagesInfoVec) == 1:
self._nIndex = len(self._pagesInfoVec)-1
self.Refresh()
def InsertPage(self, page_idx, caption, selected=False, imgIdx=-1):
"""
Inserts a page into the container at the specified position.
:param `page_idx`: specifies the position for the new tab;
:param `caption`: specifies the text for the new tab;
:param `selected`: specifies whether the page should be selected;
:param `imgIdx`: specifies the optional image index for the new tab.
"""
self._pagesInfoVec.insert(page_idx, ImageInfo(caption, imgIdx))
if selected or len(self._pagesInfoVec) == 1:
self._nIndex = len(self._pagesInfoVec)-1
self.Refresh()
def SetPageImage(self, page, imgIdx):
"""
Sets the image for the given page.
:param `page`: the index of the tab;
:param `imgIdx`: specifies the optional image index for the tab.
"""
imgInfo = self._pagesInfoVec[page]
imgInfo.SetImageIndex(imgIdx)
def SetPageText(self, page, text):
"""
Sets the tab caption for the given page.
:param `page`: the index of the tab;
:param `text`: the new tab caption.
"""
imgInfo = self._pagesInfoVec[page]
imgInfo.SetCaption(text)
def GetPageImage(self, page):
"""
Returns the image index for the given page.
:param `page`: the index of the tab.
"""
imgInfo = self._pagesInfoVec[page]
return imgInfo.GetImageIndex()
def GetPageText(self, page):
"""
Returns the tab caption for the given page.
:param `page`: the index of the tab.
"""
imgInfo = self._pagesInfoVec[page]
return imgInfo.GetCaption()
def ClearAll(self):
""" Deletes all the pages in the container. """
self._pagesInfoVec = []
self._nIndex = wx.NOT_FOUND
def DoDeletePage(self, page):
"""
Does the actual page deletion.
:param `page`: the index of the tab.
"""
# Remove the page from the vector
book = self.GetParent()
self._pagesInfoVec.pop(page)
if self._nIndex >= page:
self._nIndex = self._nIndex - 1
# The delete page was the last first on the array,
# but the book still has more pages, so we set the
# active page to be the first one (0)
if self._nIndex < 0 and len(self._pagesInfoVec) > 0:
self._nIndex = 0
# Refresh the tabs
if self._nIndex >= 0:
book._bForceSelection = True
book.SetSelection(self._nIndex)
book._bForceSelection = False
if not self._pagesInfoVec:
# Erase the page container drawings
dc = wx.ClientDC(self)
dc.Clear()
def OnSize(self, event):
"""
Handles the ``wx.EVT_SIZE`` event for L{ImageContainerBase}.
:param `event`: a `wx.SizeEvent` event to be processed.
"""
self.Refresh() # Call on paint
event.Skip()
def OnEraseBackground(self, event):
"""
Handles the ``wx.EVT_ERASE_BACKGROUND`` event for L{ImageContainerBase}.
:param `event`: a `wx.EraseEvent` event to be processed.
:note: This method is intentionally empty to reduce flicker.
"""
pass
def HitTest(self, pt):
"""
Returns the index of the tab at the specified position or ``wx.NOT_FOUND``
if ``None``, plus the flag style of L{HitTest}.
:param `pt`: an instance of `wx.Point`, to test for hits.
:return: The index of the tab at the specified position plus the hit test
flag, which can be one of the following bits:
====================== ======= ================================
HitTest Flags Value Description
====================== ======= ================================
``IMG_OVER_IMG`` 0 The mouse is over the tab icon
``IMG_OVER_PIN`` 1 The mouse is over the pin button
``IMG_OVER_EW_BORDER`` 2 The mouse is over the east-west book border
``IMG_NONE`` 3 Nowhere
====================== ======= ================================
"""
style = self.GetParent().GetAGWWindowStyleFlag()
if style & INB_USE_PIN_BUTTON:
if self._pinBtnRect.Contains(pt):
return -1, IMG_OVER_PIN
for i in xrange(len(self._pagesInfoVec)):
if self._pagesInfoVec[i].GetPosition() == wx.Point(-1, -1):
break
# For Web Hover style, we test the TextRect
if not self.HasAGWFlag(INB_WEB_HILITE):
buttonRect = wx.RectPS(self._pagesInfoVec[i].GetPosition(), self._pagesInfoVec[i].GetSize())
else:
buttonRect = self._pagesInfoVec[i].GetTextRect()
if buttonRect.Contains(pt):
return i, IMG_OVER_IMG
if self.PointOnSash(pt):
return -1, IMG_OVER_EW_BORDER
else:
return -1, IMG_NONE
def PointOnSash(self, pt):
"""
Tests whether pt is located on the sash.
:param `pt`: an instance of `wx.Point`, to test for hits.
"""
# Check if we are on a the sash border
cltRect = self.GetClientRect()
if self.HasAGWFlag(INB_LEFT) or self.HasAGWFlag(INB_TOP):
if pt.x > cltRect.x + cltRect.width - 4:
return True
else:
if pt.x < 4:
return True
return False
def OnMouseLeftDown(self, event):
"""
Handles the ``wx.EVT_LEFT_DOWN`` event for L{ImageContainerBase}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
newSelection = -1
event.Skip()
# Support for collapse/expand
style = self.GetParent().GetAGWWindowStyleFlag()
if style & INB_USE_PIN_BUTTON:
if self._pinBtnRect.Contains(event.GetPosition()):
self._nPinButtonStatus = INB_PIN_PRESSED
dc = wx.ClientDC(self)
self.DrawPin(dc, self._pinBtnRect, not self._bCollapsed)
return
# Incase panel is collapsed, there is nothing
# to check
if self._bCollapsed:
return
tabIdx, where = self.HitTest(event.GetPosition())
if where == IMG_OVER_IMG:
self._nHoeveredImgIdx = -1
if tabIdx == -1:
return
self.GetParent().SetSelection(tabIdx)
def OnMouseLeaveWindow(self, event):
"""
Handles the ``wx.EVT_LEAVE_WINDOW`` event for L{ImageContainerBase}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
bRepaint = self._nHoeveredImgIdx != -1
self._nHoeveredImgIdx = -1
# Make sure the pin button status is NONE
# incase we were in pin button style
style = self.GetParent().GetAGWWindowStyleFlag()
if style & INB_USE_PIN_BUTTON:
self._nPinButtonStatus = INB_PIN_NONE
dc = wx.ClientDC(self)
self.DrawPin(dc, self._pinBtnRect, not self._bCollapsed)
# Restore cursor
wx.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
if bRepaint:
self.Refresh()
def OnMouseLeftUp(self, event):
"""
Handles the ``wx.EVT_LEFT_UP`` event for L{ImageContainerBase}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
style = self.GetParent().GetAGWWindowStyleFlag()
if style & INB_USE_PIN_BUTTON:
bIsLabelContainer = not self.CanDoBottomStyle()
if self._pinBtnRect.Contains(event.GetPosition()):
self._nPinButtonStatus = INB_PIN_NONE
self._bCollapsed = not self._bCollapsed
if self._bCollapsed:
# Save the current tab area width
self._tabAreaSize = self.GetSize()
if bIsLabelContainer:
self.SetSizeHints(20, self._tabAreaSize.y)
else:
if style & INB_BOTTOM or style & INB_TOP:
self.SetSizeHints(self._tabAreaSize.x, 20)
else:
self.SetSizeHints(20, self._tabAreaSize.y)
else:
if bIsLabelContainer:
self.SetSizeHints(self._tabAreaSize.x, -1)
else:
# Restore the tab area size
if style & INB_BOTTOM or style & INB_TOP:
self.SetSizeHints(-1, self._tabAreaSize.y)
else:
self.SetSizeHints(self._tabAreaSize.x, -1)
self.GetParent().GetSizer().Layout()
self.Refresh()
return
def OnMouseMove(self, event):
"""
Handles the ``wx.EVT_MOTION`` event for L{ImageContainerBase}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
style = self.GetParent().GetAGWWindowStyleFlag()
if style & INB_USE_PIN_BUTTON:
# Check to see if we are in the pin button rect
if not self._pinBtnRect.Contains(event.GetPosition()) and self._nPinButtonStatus == INB_PIN_PRESSED:
self._nPinButtonStatus = INB_PIN_NONE
dc = wx.ClientDC(self)
self.DrawPin(dc, self._pinBtnRect, not self._bCollapsed)
imgIdx, where = self.HitTest(event.GetPosition())
self._nHoeveredImgIdx = imgIdx
if not self._bCollapsed:
if self._nHoeveredImgIdx >= 0 and self._nHoeveredImgIdx < len(self._pagesInfoVec):
# Change the cursor to be Hand
if self.HasAGWFlag(INB_WEB_HILITE) and self._nHoeveredImgIdx != self._nIndex:
wx.SetCursor(wx.StockCursor(wx.CURSOR_HAND))
else:
# Restore the cursor only if we have the Web hover style set,
# and we are not currently hovering the sash
if self.HasAGWFlag(INB_WEB_HILITE) and not self.PointOnSash(event.GetPosition()):
wx.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
# Dont display hover effect when hoevering the
# selected label
if self._nHoeveredImgIdx == self._nIndex:
self._nHoeveredImgIdx = -1
self.Refresh()
def DrawPin(self, dc, rect, downPin):
"""
Draw a pin button, that allows collapsing of the image panel.
:param `dc`: an instance of `wx.DC`;
:param `rect`: the pin button client rectangle;
:param `downPin`: ``True`` if the pin button is facing downwards, ``False``
if it is facing leftwards.
"""
# Set the bitmap according to the button status
if downPin:
pinBmp = wx.BitmapFromXPMData(pin_down_xpm)
else:
pinBmp = wx.BitmapFromXPMData(pin_left_xpm)
xx = rect.x + 2
if self._nPinButtonStatus in [INB_PIN_HOVER, INB_PIN_NONE]:
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetPen(wx.BLACK_PEN)
dc.DrawRectangle(xx, rect.y, 16, 16)
# Draw upper and left border with grey colour
dc.SetPen(wx.WHITE_PEN)
dc.DrawLine(xx, rect.y, xx + 16, rect.y)
dc.DrawLine(xx, rect.y, xx, rect.y + 16)
elif self._nPinButtonStatus == INB_PIN_PRESSED:
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetPen(wx.Pen(wx.NamedColour("LIGHT GREY")))
dc.DrawRectangle(xx, rect.y, 16, 16)
# Draw upper and left border with grey colour
dc.SetPen(wx.BLACK_PEN)
dc.DrawLine(xx, rect.y, xx + 16, rect.y)
dc.DrawLine(xx, rect.y, xx, rect.y + 16)
# Set the masking
pinBmp.SetMask(wx.Mask(pinBmp, wx.WHITE))
# Draw the new bitmap
dc.DrawBitmap(pinBmp, xx, rect.y, True)
# Save the pin rect
self._pinBtnRect = rect
# ---------------------------------------------------------------------------- #
# Class ImageContainer
# ---------------------------------------------------------------------------- #
class ImageContainer(ImageContainerBase):
"""
Base class for L{FlatImageBook} image container.
"""
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=0, agwStyle=0, name="ImageContainer"):
"""
Default class constructor.
:param `parent`: parent window. Must not be ``None``;
:param `id`: window identifier. A value of -1 indicates a default value;
:param `pos`: the control position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:param `size`: the control size. A value of (-1, -1) indicates a default size,
chosen by either the windowing system or wxPython, depending on platform;
:param `style`: the underlying `wx.Panel` window style;
:param `agwStyle`: the AGW-specific window style. This can be a combination of the
following bits:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}.
``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}.
``INB_RIGHT`` 0x4 Place labels on the right side.
``INB_TOP`` 0x8 Place labels above the page area.
``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}.
``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}.
``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}.
``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control.
``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}.
``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control.
``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}.
``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}.
``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area.
``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs.
=========================== =========== ==================================================
:param `name`: the window name.
"""
ImageContainerBase.__init__(self, parent, id, pos, size, style, agwStyle, name)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_LEFT_DOWN, self.OnMouseLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnMouseLeftUp)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnMouseLeaveWindow)
def OnSize(self, event):
"""
Handles the ``wx.EVT_SIZE`` event for L{ImageContainer}.
:param `event`: a `wx.SizeEvent` event to be processed.
"""
ImageContainerBase.OnSize(self, event)
event.Skip()
def OnMouseLeftDown(self, event):
"""
Handles the ``wx.EVT_LEFT_DOWN`` event for L{ImageContainer}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
ImageContainerBase.OnMouseLeftDown(self, event)
event.Skip()
def OnMouseLeftUp(self, event):
"""
Handles the ``wx.EVT_LEFT_UP`` event for L{ImageContainer}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
ImageContainerBase.OnMouseLeftUp(self, event)
event.Skip()
def OnEraseBackground(self, event):
"""
Handles the ``wx.EVT_ERASE_BACKGROUND`` event for L{ImageContainer}.
:param `event`: a `wx.EraseEvent` event to be processed.
"""
ImageContainerBase.OnEraseBackground(self, event)
def OnMouseMove(self, event):
"""
Handles the ``wx.EVT_MOTION`` event for L{ImageContainer}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
ImageContainerBase.OnMouseMove(self, event)
event.Skip()
def OnMouseLeaveWindow(self, event):
"""
Handles the ``wx.EVT_LEAVE_WINDOW`` event for L{ImageContainer}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
ImageContainerBase.OnMouseLeaveWindow(self, event)
event.Skip()
def CanDoBottomStyle(self):
"""
Allows the parent to examine the children type. Some implementation
(such as L{LabelBook}), does not support top/bottom images, only left/right.
"""
return True
def OnPaint(self, event):
"""
Handles the ``wx.EVT_PAINT`` event for L{ImageContainer}.
:param `event`: a `wx.PaintEvent` event to be processed.
"""
dc = wx.BufferedPaintDC(self)
style = self.GetParent().GetAGWWindowStyleFlag()
backBrush = wx.WHITE_BRUSH
if style & INB_BORDER:
borderPen = wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DSHADOW))
else:
borderPen = wx.TRANSPARENT_PEN
size = self.GetSize()
# Background
dc.SetBrush(backBrush)
borderPen.SetWidth(1)
dc.SetPen(borderPen)
dc.DrawRectangle(0, 0, size.x, size.y)
bUsePin = (style & INB_USE_PIN_BUTTON and [True] or [False])[0]
if bUsePin:
# Draw the pin button
clientRect = self.GetClientRect()
pinRect = wx.Rect(clientRect.GetX() + clientRect.GetWidth() - 20, 2, 20, 20)
self.DrawPin(dc, pinRect, not self._bCollapsed)
if self._bCollapsed:
return
borderPen = wx.BLACK_PEN
borderPen.SetWidth(1)
dc.SetPen(borderPen)
dc.DrawLine(0, size.y, size.x, size.y)
dc.DrawPoint(0, size.y)
clientSize = 0
bUseYcoord = (style & INB_RIGHT or style & INB_LEFT)
if bUseYcoord:
clientSize = size.GetHeight()
else:
clientSize = size.GetWidth()
# We reserver 20 pixels for the 'pin' button
# The drawing of the images start position. This is
# depenedent of the style, especially when Pin button
# style is requested
if bUsePin:
if style & INB_TOP or style & INB_BOTTOM:
pos = (style & INB_BORDER and [0] or [1])[0]
else:
pos = (style & INB_BORDER and [20] or [21])[0]
else:
pos = (style & INB_BORDER and [0] or [1])[0]
nPadding = 4 # Pad text with 2 pixels on the left and right
nTextPaddingLeft = 2
count = 0
for i in xrange(len(self._pagesInfoVec)):
count = count + 1
# incase the 'fit button' style is applied, we set the rectangle width to the
# text width plus padding
# Incase the style IS applied, but the style is either LEFT or RIGHT
# we ignore it
normalFont = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
dc.SetFont(normalFont)
textWidth, textHeight = dc.GetTextExtent(self._pagesInfoVec[i].GetCaption())
# Restore font to be normal
normalFont.SetWeight(wx.FONTWEIGHT_NORMAL)
dc.SetFont(normalFont)
# Default values for the surronounding rectangle
# around a button
rectWidth = self._nImgSize * 2 # To avoid the recangle to 'touch' the borders
rectHeight = self._nImgSize * 2
# Incase the style requires non-fixed button (fit to text)
# recalc the rectangle width
if style & INB_FIT_BUTTON and \
not ((style & INB_LEFT) or (style & INB_RIGHT)) and \
not self._pagesInfoVec[i].GetCaption() == "" and \
not (style & INB_SHOW_ONLY_IMAGES):
rectWidth = ((textWidth + nPadding * 2) > rectWidth and [nPadding * 2 + textWidth] or [rectWidth])[0]
# Make the width an even number
if rectWidth % 2 != 0:
rectWidth += 1
# Check that we have enough space to draw the button
# If Pin button is used, consider its space as well (applicable for top/botton style)
# since in the left/right, its size is already considered in 'pos'
pinBtnSize = (bUsePin and [20] or [0])[0]
if pos + rectWidth + pinBtnSize > clientSize:
break
# Calculate the button rectangle
modRectWidth = ((style & INB_LEFT or style & INB_RIGHT) and [rectWidth - 2] or [rectWidth])[0]
modRectHeight = ((style & INB_LEFT or style & INB_RIGHT) and [rectHeight] or [rectHeight - 2])[0]
if bUseYcoord:
buttonRect = wx.Rect(1, pos, modRectWidth, modRectHeight)
else:
buttonRect = wx.Rect(pos , 1, modRectWidth, modRectHeight)
# Check if we need to draw a rectangle around the button
if self._nIndex == i:
# Set the colours
penColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION)
brushColour = ArtManager.Get().LightColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION), 75)
dc.SetPen(wx.Pen(penColour))
dc.SetBrush(wx.Brush(brushColour))
# Fix the surrounding of the rect if border is set
if style & INB_BORDER:
if style & INB_TOP or style & INB_BOTTOM:
buttonRect = wx.Rect(buttonRect.x + 1, buttonRect.y, buttonRect.width - 1, buttonRect.height)
else:
buttonRect = wx.Rect(buttonRect.x, buttonRect.y + 1, buttonRect.width, buttonRect.height - 1)
dc.DrawRectangleRect(buttonRect)
if self._nHoeveredImgIdx == i:
# Set the colours
penColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION)
brushColour = ArtManager.Get().LightColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION), 90)
dc.SetPen(wx.Pen(penColour))
dc.SetBrush(wx.Brush(brushColour))
# Fix the surrounding of the rect if border is set
if style & INB_BORDER:
if style & INB_TOP or style & INB_BOTTOM:
buttonRect = wx.Rect(buttonRect.x + 1, buttonRect.y, buttonRect.width - 1, buttonRect.height)
else:
buttonRect = wx.Rect(buttonRect.x, buttonRect.y + 1, buttonRect.width, buttonRect.height - 1)
dc.DrawRectangleRect(buttonRect)
if bUseYcoord:
rect = wx.Rect(0, pos, rectWidth, rectWidth)
else:
rect = wx.Rect(pos, 0, rectWidth, rectWidth)
# Incase user set both flags:
# INB_SHOW_ONLY_TEXT and INB_SHOW_ONLY_IMAGES
# We override them to display both
if style & INB_SHOW_ONLY_TEXT and style & INB_SHOW_ONLY_IMAGES:
style ^= INB_SHOW_ONLY_TEXT
style ^= INB_SHOW_ONLY_IMAGES
self.GetParent().SetAGWWindowStyleFlag(style)
# Draw the caption and text
imgTopPadding = 10
if not style & INB_SHOW_ONLY_TEXT and self._pagesInfoVec[i].GetImageIndex() != -1:
if bUseYcoord:
imgXcoord = self._nImgSize / 2
imgYcoord = (style & INB_SHOW_ONLY_IMAGES and [pos + self._nImgSize / 2] or [pos + imgTopPadding])[0]
else:
imgXcoord = pos + (rectWidth / 2) - (self._nImgSize / 2)
imgYcoord = (style & INB_SHOW_ONLY_IMAGES and [self._nImgSize / 2] or [imgTopPadding])[0]
self._ImageList.Draw(self._pagesInfoVec[i].GetImageIndex(), dc,
imgXcoord, imgYcoord,
wx.IMAGELIST_DRAW_TRANSPARENT, True)
# Draw the text
if not style & INB_SHOW_ONLY_IMAGES and not self._pagesInfoVec[i].GetCaption() == "":
dc.SetFont(normalFont)
# Check if the text can fit the size of the rectangle,
# if not truncate it
fixedText = self._pagesInfoVec[i].GetCaption()
if not style & INB_FIT_BUTTON or (style & INB_LEFT or (style & INB_RIGHT)):
fixedText = self.FixTextSize(dc, self._pagesInfoVec[i].GetCaption(), self._nImgSize *2 - 4)
# Update the length of the text
textWidth, textHeight = dc.GetTextExtent(fixedText)
if bUseYcoord:
textOffsetX = ((rectWidth - textWidth) / 2 )
textOffsetY = (not style & INB_SHOW_ONLY_TEXT and [pos + self._nImgSize + imgTopPadding + 3] or \
[pos + ((self._nImgSize * 2 - textHeight) / 2 )])[0]
else:
textOffsetX = (rectWidth - textWidth) / 2 + pos + nTextPaddingLeft
textOffsetY = (not style & INB_SHOW_ONLY_TEXT and [self._nImgSize + imgTopPadding + 3] or \
[((self._nImgSize * 2 - textHeight) / 2 )])[0]
dc.SetTextForeground(wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOWTEXT))
dc.DrawText(fixedText, textOffsetX, textOffsetY)
# Update the page info
self._pagesInfoVec[i].SetPosition(buttonRect.GetPosition())
self._pagesInfoVec[i].SetSize(buttonRect.GetSize())
pos += rectWidth
# Update all buttons that can not fit into the screen as non-visible
for ii in xrange(count, len(self._pagesInfoVec)):
self._pagesInfoVec[ii].SetPosition(wx.Point(-1, -1))
# Draw the pin button
if bUsePin:
clientRect = self.GetClientRect()
pinRect = wx.Rect(clientRect.GetX() + clientRect.GetWidth() - 20, 2, 20, 20)
self.DrawPin(dc, pinRect, not self._bCollapsed)
# ---------------------------------------------------------------------------- #
# Class LabelContainer
# ---------------------------------------------------------------------------- #
class LabelContainer(ImageContainerBase):
""" Base class for L{LabelBook}. """
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=0, agwStyle=0, name="LabelContainer"):
"""
Default class constructor.
:param `parent`: parent window. Must not be ``None``;
:param `id`: window identifier. A value of -1 indicates a default value;
:param `pos`: the control position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:param `size`: the control size. A value of (-1, -1) indicates a default size,
chosen by either the windowing system or wxPython, depending on platform;
:param `style`: the underlying `wx.Panel` window style;
:param `agwStyle`: the AGW-specific window style. This can be a combination of the
following bits:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}.
``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}.
``INB_RIGHT`` 0x4 Place labels on the right side.
``INB_TOP`` 0x8 Place labels above the page area.
``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}.
``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}.
``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}.
``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control.
``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}.
``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control.
``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}.
``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}.
``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area.
``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs.
=========================== =========== ==================================================
:param `name`: the window name.
"""
ImageContainerBase.__init__(self, parent, id, pos, size, style, agwStyle, name)
self._nTabAreaWidth = 100
self._oldCursor = wx.NullCursor
self._coloursMap = {}
self._skin = wx.NullBitmap
self._sashRect = wx.Rect()
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_LEFT_DOWN, self.OnMouseLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnMouseLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnMouseLeaveWindow)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
def OnSize(self, event):
"""
Handles the ``wx.EVT_SIZE`` event for L{LabelContainer}.
:param `event`: a `wx.SizeEvent` event to be processed.
"""
ImageContainerBase.OnSize(self, event)
event.Skip()
def OnEraseBackground(self, event):
"""
Handles the ``wx.EVT_ERASE_BACKGROUND`` event for L{LabelContainer}.
:param `event`: a `wx.EraseEvent` event to be processed.
"""
ImageContainerBase.OnEraseBackground(self, event)
def GetTabAreaWidth(self):
""" Returns the width of the tab area. """
return self._nTabAreaWidth
def SetTabAreaWidth(self, width):
"""
Sets the width of the tab area.
:param `width`: the width of the tab area, in pixels.
"""
self._nTabAreaWidth = width
def CanDoBottomStyle(self):
"""
Allows the parent to examine the children type. Some implementation
(such as L{LabelBook}), does not support top/bottom images, only left/right.
"""
return False
def SetBackgroundBitmap(self, bmp):
"""
Sets the background bitmap for the control.
:param `bmp`: a valid `wx.Bitmap` object.
"""
self._skin = bmp
def OnPaint(self, event):
"""
Handles the ``wx.EVT_PAINT`` event for L{LabelContainer}.
:param `event`: a `wx.PaintEvent` event to be processed.
"""
style = self.GetParent().GetAGWWindowStyleFlag()
dc = wx.BufferedPaintDC(self)
backBrush = wx.Brush(self._coloursMap[INB_TAB_AREA_BACKGROUND_COLOUR])
if self.HasAGWFlag(INB_BORDER):
borderPen = wx.Pen(self._coloursMap[INB_TABS_BORDER_COLOUR])
else:
borderPen = wx.TRANSPARENT_PEN
size = self.GetSize()
# Set the pen & brush
dc.SetBrush(backBrush)
dc.SetPen(borderPen)
# Incase user set both flags, we override them to display both
# INB_SHOW_ONLY_TEXT and INB_SHOW_ONLY_IMAGES
if style & INB_SHOW_ONLY_TEXT and style & INB_SHOW_ONLY_IMAGES:
style ^= INB_SHOW_ONLY_TEXT
style ^= INB_SHOW_ONLY_IMAGES
self.GetParent().SetAGWWindowStyleFlag(style)
if self.HasAGWFlag(INB_GRADIENT_BACKGROUND) and not self._skin.Ok():
# Draw graident in the background area
startColour = self._coloursMap[INB_TAB_AREA_BACKGROUND_COLOUR]
endColour = ArtManager.Get().LightColour(self._coloursMap[INB_TAB_AREA_BACKGROUND_COLOUR], 50)
ArtManager.Get().PaintStraightGradientBox(dc, wx.Rect(0, 0, size.x / 2, size.y), startColour, endColour, False)
ArtManager.Get().PaintStraightGradientBox(dc, wx.Rect(size.x / 2, 0, size.x / 2, size.y), endColour, startColour, False)
else:
# Draw the border and background
if self._skin.Ok():
dc.SetBrush(wx.TRANSPARENT_BRUSH)
self.DrawBackgroundBitmap(dc)
dc.DrawRectangleRect(wx.Rect(0, 0, size.x, size.y))
# Draw border
if self.HasAGWFlag(INB_BORDER) and self.HasAGWFlag(INB_GRADIENT_BACKGROUND):
# Just draw the border with transparent brush
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangleRect(wx.Rect(0, 0, size.x, size.y))
bUsePin = (self.HasAGWFlag(INB_USE_PIN_BUTTON) and [True] or [False])[0]
if bUsePin:
# Draw the pin button
clientRect = self.GetClientRect()
pinRect = wx.Rect(clientRect.GetX() + clientRect.GetWidth() - 20, 2, 20, 20)
self.DrawPin(dc, pinRect, not self._bCollapsed)
if self._bCollapsed:
return
dc.SetPen(wx.BLACK_PEN)
self.SetSizeHints(self._nTabAreaWidth, -1)
# We reserve 20 pixels for the pin button
posy = 20
count = 0
for i in xrange(len(self._pagesInfoVec)):
count = count+1
# Default values for the surronounding rectangle
# around a button
rectWidth = self._nTabAreaWidth
if self.HasAGWFlag(INB_SHOW_ONLY_TEXT):
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetPointSize(font.GetPointSize() * self.GetParent().GetFontSizeMultiple())
if self.GetParent().GetFontBold():
font.SetWeight(wx.FONTWEIGHT_BOLD)
dc.SetFont(font)
w, h = dc.GetTextExtent(self._pagesInfoVec[i].GetCaption())
rectHeight = h * 2
else:
rectHeight = self._nImgSize * 2
# Check that we have enough space to draw the button
if posy + rectHeight > size.GetHeight():
break
# Calculate the button rectangle
posx = 0
buttonRect = wx.Rect(posx, posy, rectWidth, rectHeight)
indx = self._pagesInfoVec[i].GetImageIndex()
if indx == -1:
bmp = wx.NullBitmap
else:
bmp = self._ImageList.GetBitmap(indx)
self.DrawLabel(dc, buttonRect, self._pagesInfoVec[i].GetCaption(), bmp,
self._pagesInfoVec[i], self.HasAGWFlag(INB_LEFT) or self.HasAGWFlag(INB_TOP),
i, self._nIndex == i, self._nHoeveredImgIdx == i)
posy += rectHeight
# Update all buttons that can not fit into the screen as non-visible
for ii in xrange(count, len(self._pagesInfoVec)):
self._pagesInfoVec[i].SetPosition(wx.Point(-1, -1))
if bUsePin:
clientRect = self.GetClientRect()
pinRect = wx.Rect(clientRect.GetX() + clientRect.GetWidth() - 20, 2, 20, 20)
self.DrawPin(dc, pinRect, not self._bCollapsed)
def DrawBackgroundBitmap(self, dc):
"""
Draws a bitmap as the background of the control.
:param `dc`: an instance of `wx.DC`.
"""
clientRect = self.GetClientRect()
width = clientRect.GetWidth()
height = clientRect.GetHeight()
coveredY = coveredX = 0
xstep = self._skin.GetWidth()
ystep = self._skin.GetHeight()
bmpRect = wx.Rect(0, 0, xstep, ystep)
if bmpRect != clientRect:
mem_dc = wx.MemoryDC()
bmp = wx.EmptyBitmap(width, height)
mem_dc.SelectObject(bmp)
while coveredY < height:
while coveredX < width:
mem_dc.DrawBitmap(self._skin, coveredX, coveredY, True)
coveredX += xstep
coveredX = 0
coveredY += ystep
mem_dc.SelectObject(wx.NullBitmap)
#self._skin = bmp
dc.DrawBitmap(bmp, 0, 0)
else:
dc.DrawBitmap(self._skin, 0, 0)
def OnMouseLeftUp(self, event):
"""
Handles the ``wx.EVT_LEFT_UP`` event for L{LabelContainer}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
if self.HasAGWFlag(INB_NO_RESIZE):
ImageContainerBase.OnMouseLeftUp(self, event)
return
if self.HasCapture():
self.ReleaseMouse()
# Sash was being dragged?
if not self._sashRect.IsEmpty():
# Remove sash
ArtManager.Get().DrawDragSash(self._sashRect)
self.Resize(event)
self._sashRect = wx.Rect()
return
self._sashRect = wx.Rect()
# Restore cursor
if self._oldCursor.Ok():
wx.SetCursor(self._oldCursor)
self._oldCursor = wx.NullCursor
ImageContainerBase.OnMouseLeftUp(self, event)
def Resize(self, event):
"""
Actually resizes the tab area.
:param `event`: an instance of `wx.SizeEvent`.
"""
# Resize our size
self._tabAreaSize = self.GetSize()
newWidth = self._tabAreaSize.x
x = event.GetX()
if self.HasAGWFlag(INB_BOTTOM) or self.HasAGWFlag(INB_RIGHT):
newWidth -= event.GetX()
else:
newWidth = x
if newWidth < 100: # Dont allow width to be lower than that
newWidth = 100
self.SetSizeHints(newWidth, self._tabAreaSize.y)
# Update the tab new area width
self._nTabAreaWidth = newWidth
self.GetParent().Freeze()
self.GetParent().GetSizer().Layout()
self.GetParent().Thaw()
def OnMouseMove(self, event):
"""
Handles the ``wx.EVT_MOTION`` event for L{LabelContainer}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
if self.HasAGWFlag(INB_NO_RESIZE):
ImageContainerBase.OnMouseMove(self, event)
return
# Remove old sash
if not self._sashRect.IsEmpty():
ArtManager.Get().DrawDragSash(self._sashRect)
if event.LeftIsDown():
if not self._sashRect.IsEmpty():
# Progress sash, and redraw it
clientRect = self.GetClientRect()
pt = self.ClientToScreen(wx.Point(event.GetX(), 0))
self._sashRect = wx.RectPS(pt, wx.Size(4, clientRect.height))
ArtManager.Get().DrawDragSash(self._sashRect)
else:
# Sash is not being dragged
if self._oldCursor.Ok():
wx.SetCursor(self._oldCursor)
self._oldCursor = wx.NullCursor
else:
if self.HasCapture():
self.ReleaseMouse()
if self.PointOnSash(event.GetPosition()):
# Change cursor to EW cursor
self._oldCursor = self.GetCursor()
wx.SetCursor(wx.StockCursor(wx.CURSOR_SIZEWE))
elif self._oldCursor.Ok():
wx.SetCursor(self._oldCursor)
self._oldCursor = wx.NullCursor
self._sashRect = wx.Rect()
ImageContainerBase.OnMouseMove(self, event)
def OnMouseLeftDown(self, event):
"""
Handles the ``wx.EVT_LEFT_DOWN`` event for L{LabelContainer}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
if self.HasAGWFlag(INB_NO_RESIZE):
ImageContainerBase.OnMouseLeftDown(self, event)
return
imgIdx, where = self.HitTest(event.GetPosition())
if IMG_OVER_EW_BORDER == where and not self._bCollapsed:
# We are over the sash
if not self._sashRect.IsEmpty():
ArtManager.Get().DrawDragSash(self._sashRect)
else:
# first time, begin drawing sash
self.CaptureMouse()
# Change mouse cursor
self._oldCursor = self.GetCursor()
wx.SetCursor(wx.StockCursor(wx.CURSOR_SIZEWE))
clientRect = self.GetClientRect()
pt = self.ClientToScreen(wx.Point(event.GetX(), 0))
self._sashRect = wx.RectPS(pt, wx.Size(4, clientRect.height))
ArtManager.Get().DrawDragSash(self._sashRect)
else:
ImageContainerBase.OnMouseLeftDown(self, event)
def OnMouseLeaveWindow(self, event):
"""
Handles the ``wx.EVT_LEAVE_WINDOW`` event for L{LabelContainer}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
if self.HasAGWFlag(INB_NO_RESIZE):
ImageContainerBase.OnMouseLeaveWindow(self, event)
return
# If Sash is being dragged, ignore this event
if not self.HasCapture():
ImageContainerBase.OnMouseLeaveWindow(self, event)
def DrawRegularHover(self, dc, rect):
"""
Draws a rounded rectangle around the current tab.
:param `dc`: an instance of `wx.DC`;
:param `rect`: the current tab client rectangle.
"""
# The hovered tab with default border
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetPen(wx.Pen(wx.WHITE))
# We draw CCW
if self.HasAGWFlag(INB_RIGHT) or self.HasAGWFlag(INB_TOP):
# Right images
# Upper line
dc.DrawLine(rect.x + 1, rect.y, rect.x + rect.width, rect.y)
# Right line (white)
dc.DrawLine(rect.x + rect.width, rect.y, rect.x + rect.width, rect.y + rect.height)
# Bottom diagnol - we change pen
dc.SetPen(wx.Pen(self._coloursMap[INB_TABS_BORDER_COLOUR]))
# Bottom line
dc.DrawLine(rect.x + rect.width, rect.y + rect.height, rect.x, rect.y + rect.height)
else:
# Left images
# Upper line white
dc.DrawLine(rect.x, rect.y, rect.x + rect.width - 1, rect.y)
# Left line
dc.DrawLine(rect.x, rect.y, rect.x, rect.y + rect.height)
# Bottom diagnol, we change the pen
dc.SetPen(wx.Pen(self._coloursMap[INB_TABS_BORDER_COLOUR]))
# Bottom line
dc.DrawLine(rect.x, rect.y + rect.height, rect.x + rect.width, rect.y + rect.height)
def DrawWebHover(self, dc, caption, xCoord, yCoord):
"""
Draws a web style hover effect (cursor set to hand & text is underlined).
:param `dc`: an instance of `wx.DC`;
:param `caption`: the tab caption text;
:param `xCoord`: the x position of the tab caption;
:param `yCoord`: the y position of the tab caption.
"""
# Redraw the text with underlined font
underLinedFont = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
underLinedFont.SetPointSize(underLinedFont.GetPointSize() * self.GetParent().GetFontSizeMultiple())
if self.GetParent().GetFontBold():
underLinedFont.SetWeight(wx.FONTWEIGHT_BOLD)
underLinedFont.SetUnderlined(True)
dc.SetFont(underLinedFont)
dc.DrawText(caption, xCoord, yCoord)
def SetColour(self, which, colour):
"""
Sets a colour for a parameter.
:param `which`: can be one of the following parameters:
================================== ======= ==================================
Colour Key Value Description
================================== ======= ==================================
``INB_TAB_AREA_BACKGROUND_COLOUR`` 100 The tab area background colour
``INB_ACTIVE_TAB_COLOUR`` 101 The active tab background colour
``INB_TABS_BORDER_COLOUR`` 102 The tabs border colour
``INB_TEXT_COLOUR`` 103 The tab caption text colour
``INB_ACTIVE_TEXT_COLOUR`` 104 The active tab caption text colour
``INB_HILITE_TAB_COLOUR`` 105 The tab caption highlight text colour
================================== ======= ==================================
:param `colour`: a valid `wx.Colour` object.
"""
self._coloursMap[which] = colour
def GetColour(self, which):
"""
Returns a colour for a parameter.
:param `which`: the colour key.
:see: L{SetColour} for a list of valid colour keys.
"""
if not self._coloursMap.has_key(which):
return wx.Colour()
return self._coloursMap[which]
def InitializeColours(self):
""" Initializes the colours map to be used for this control. """
# Initialize map colours
self._coloursMap.update({INB_TAB_AREA_BACKGROUND_COLOUR: ArtManager.Get().LightColour(ArtManager.Get().FrameColour(), 50)})
self._coloursMap.update({INB_ACTIVE_TAB_COLOUR: ArtManager.Get().GetMenuFaceColour()})
self._coloursMap.update({INB_TABS_BORDER_COLOUR: wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DSHADOW)})
self._coloursMap.update({INB_HILITE_TAB_COLOUR: wx.NamedColour("LIGHT BLUE")})
self._coloursMap.update({INB_TEXT_COLOUR: wx.WHITE})
self._coloursMap.update({INB_ACTIVE_TEXT_COLOUR: wx.BLACK})
# dont allow bright colour one on the other
if not ArtManager.Get().IsDark(self._coloursMap[INB_TAB_AREA_BACKGROUND_COLOUR]) and \
not ArtManager.Get().IsDark(self._coloursMap[INB_TEXT_COLOUR]):
self._coloursMap[INB_TEXT_COLOUR] = ArtManager.Get().DarkColour(self._coloursMap[INB_TEXT_COLOUR], 100)
def DrawLabel(self, dc, rect, text, bmp, imgInfo, orientationLeft, imgIdx, selected, hover):
"""
Draws a label using the specified dc.
:param `dc`: an instance of `wx.DC`;
:param `rect`: the text client rectangle;
:param `text`: the actual text string;
:param `bmp`: a bitmap to be drawn next to the text;
:param `imgInfo`: an instance of L{ImageInfo};
:param `orientationLeft`: ``True`` if the book has the ``INB_RIGHT`` or ``INB_LEFT``
style set;
:param `imgIdx`: the tab image index;
:param `selected`: ``True`` if the tab is selected, ``False`` otherwise;
:param `hover`: ``True`` if the tab is being hovered with the mouse, ``False`` otherwise.
"""
dcsaver = DCSaver(dc)
nPadding = 6
if orientationLeft:
rect.x += nPadding
rect.width -= nPadding
else:
rect.width -= nPadding
textRect = wx.Rect(*rect)
imgRect = wx.Rect(*rect)
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetPointSize(font.GetPointSize() * self.GetParent().GetFontSizeMultiple())
if self.GetParent().GetFontBold():
font.SetWeight(wx.FONTWEIGHT_BOLD)
dc.SetFont(font)
# First we define the rectangle for the text
w, h = dc.GetTextExtent(text)
#-------------------------------------------------------------------------
# Label layout:
# [ nPadding | Image | nPadding | Text | nPadding ]
#-------------------------------------------------------------------------
# Text bounding rectangle
textRect.x += nPadding
textRect.y = rect.y + (rect.height - h)/2
textRect.width = rect.width - 2 * nPadding
if bmp.Ok() and not self.HasAGWFlag(INB_SHOW_ONLY_TEXT):
textRect.x += (bmp.GetWidth() + nPadding)
textRect.width -= (bmp.GetWidth() + nPadding)
textRect.height = h
# Truncate text if needed
caption = ArtManager.Get().TruncateText(dc, text, textRect.width)
# Image bounding rectangle
if bmp.Ok() and not self.HasAGWFlag(INB_SHOW_ONLY_TEXT):
imgRect.x += nPadding
imgRect.width = bmp.GetWidth()
imgRect.y = rect.y + (rect.height - bmp.GetHeight())/2
imgRect.height = bmp.GetHeight()
# Draw bounding rectangle
if selected:
# First we colour the tab
dc.SetBrush(wx.Brush(self._coloursMap[INB_ACTIVE_TAB_COLOUR]))
if self.HasAGWFlag(INB_BORDER):
dc.SetPen(wx.Pen(self._coloursMap[INB_TABS_BORDER_COLOUR]))
else:
dc.SetPen(wx.Pen(self._coloursMap[INB_ACTIVE_TAB_COLOUR]))
labelRect = wx.Rect(*rect)
if orientationLeft:
labelRect.width += 3
else:
labelRect.width += 3
labelRect.x -= 3
dc.DrawRoundedRectangleRect(labelRect, 3)
if not orientationLeft and self.HasAGWFlag(INB_DRAW_SHADOW):
dc.SetPen(wx.BLACK_PEN)
dc.DrawPoint(labelRect.x + labelRect.width - 1, labelRect.y + labelRect.height - 1)
# Draw the text & bitmap
if caption != "":
if selected:
dc.SetTextForeground(self._coloursMap[INB_ACTIVE_TEXT_COLOUR])
else:
dc.SetTextForeground(self._coloursMap[INB_TEXT_COLOUR])
dc.DrawText(caption, textRect.x, textRect.y)
imgInfo.SetTextRect(textRect)
else:
imgInfo.SetTextRect(wx.Rect())
if bmp.Ok() and not self.HasAGWFlag(INB_SHOW_ONLY_TEXT):
dc.DrawBitmap(bmp, imgRect.x, imgRect.y, True)
# Drop shadow
if self.HasAGWFlag(INB_DRAW_SHADOW) and selected:
sstyle = 0
if orientationLeft:
sstyle = BottomShadow
else:
sstyle = BottomShadowFull | RightShadow
if self.HasAGWFlag(INB_WEB_HILITE):
# Always drop shadow for this style
ArtManager.Get().DrawBitmapShadow(dc, rect, sstyle)
else:
if imgIdx+1 != self._nHoeveredImgIdx:
ArtManager.Get().DrawBitmapShadow(dc, rect, sstyle)
# Draw hover effect
if hover:
if self.HasAGWFlag(INB_WEB_HILITE) and caption != "":
self.DrawWebHover(dc, caption, textRect.x, textRect.y)
else:
self.DrawRegularHover(dc, rect)
# Update the page information bout position and size
imgInfo.SetPosition(rect.GetPosition())
imgInfo.SetSize(rect.GetSize())
# ---------------------------------------------------------------------------- #
# Class FlatBookBase
# ---------------------------------------------------------------------------- #
class FlatBookBase(wx.Panel):
""" Base class for the containing window for L{LabelBook} and L{FlatImageBook}. """
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=0, agwStyle=0, name="FlatBookBase"):
"""
Default class constructor.
:param `parent`: parent window. Must not be ``None``;
:param `id`: window identifier. A value of -1 indicates a default value;
:param `pos`: the control position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:param `size`: the control size. A value of (-1, -1) indicates a default size,
chosen by either the windowing system or wxPython, depending on platform;
:param `style`: the underlying `wx.Panel` window style;
:param `agwStyle`: the AGW-specific window style. This can be a combination of the
following bits:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}.
``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}.
``INB_RIGHT`` 0x4 Place labels on the right side.
``INB_TOP`` 0x8 Place labels above the page area.
``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}.
``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}.
``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}.
``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control.
``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}.
``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control.
``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}.
``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}.
``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area.
``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs.
=========================== =========== ==================================================
:param `name`: the window name.
"""
self._pages = None
self._bInitializing = True
self._pages = None
self._bForceSelection = False
self._windows = []
self._fontSizeMultiple = 1.0
self._fontBold = False
style |= wx.TAB_TRAVERSAL
self._agwStyle = agwStyle
wx.Panel.__init__(self, parent, id, pos, size, style, name)
self._bInitializing = False
def SetAGWWindowStyleFlag(self, agwStyle):
"""
Sets the window style.
:param `agwStyle`: can be a combination of the following bits:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}.
``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}.
``INB_RIGHT`` 0x4 Place labels on the right side.
``INB_TOP`` 0x8 Place labels above the page area.
``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}.
``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}.
``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}.
``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control.
``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}.
``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control.
``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}.
``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}.
``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area.
``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs.
=========================== =========== ==================================================
"""
self._agwStyle = agwStyle
# Check that we are not in initialization process
if self._bInitializing:
return
if not self._pages:
return
# Detach the windows attached to the sizer
if self.GetSelection() >= 0:
self._mainSizer.Detach(self._windows[self.GetSelection()])
self._mainSizer.Detach(self._pages)
# Create new sizer with the requested orientaion
className = self.GetName()
if className == "LabelBook":
self._mainSizer = wx.BoxSizer(wx.HORIZONTAL)
else:
if agwStyle & INB_LEFT or agwStyle & INB_RIGHT:
self._mainSizer = wx.BoxSizer(wx.HORIZONTAL)
else:
self._mainSizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self._mainSizer)
# Add the tab container and the separator
self._mainSizer.Add(self._pages, 0, wx.EXPAND)
if className == "FlatImageBook":
if agwStyle & INB_LEFT or agwStyle & INB_RIGHT:
self._pages.SetSizeHints(self._pages._nImgSize * 2, -1)
else:
self._pages.SetSizeHints(-1, self._pages._nImgSize * 2)
# Attach the windows back to the sizer to the sizer
if self.GetSelection() >= 0:
self.DoSetSelection(self._windows[self.GetSelection()])
if agwStyle & INB_FIT_LABELTEXT:
self.ResizeTabArea()
self._mainSizer.Layout()
dummy = wx.SizeEvent()
wx.PostEvent(self, dummy)
self._pages.Refresh()
def GetAGWWindowStyleFlag(self):
"""
Returns the L{FlatBookBase} window style.
:see: L{SetAGWWindowStyleFlag} for a list of possible window style flags.
"""
return self._agwStyle
def HasAGWFlag(self, flag):
"""
Returns whether a flag is present in the L{FlatBookBase} style.
:param `flag`: one of the possible L{FlatBookBase} window styles.
:see: L{SetAGWWindowStyleFlag} for a list of possible window style flags.
"""
agwStyle = self.GetAGWWindowStyleFlag()
res = (agwStyle & flag and [True] or [False])[0]
return res
def AddPage(self, page, text, select=False, imageId=-1):
"""
Adds a page to the book.
:param `page`: specifies the new page;
:param `text`: specifies the text for the new page;
:param `select`: specifies whether the page should be selected;
:param `imageId`: specifies the optional image index for the new page.
:note: The call to this function generates the page changing events.
"""
if not page:
return
page.Reparent(self)
self._windows.append(page)
if select or len(self._windows) == 1:
self.DoSetSelection(page)
else:
page.Hide()
self._pages.AddPage(text, select, imageId)
self.ResizeTabArea()
self.Refresh()
def InsertPage(self, page_idx, page, text, select=False, imageId=-1):
"""
Inserts a page into the book at the specified position.
:param `page_idx`: specifies the position for the new page;
:param `page`: specifies the new page;
:param `text`: specifies the text for the new page;
:param `select`: specifies whether the page should be selected;
:param `imageId`: specifies the optional image index for the new page.
:note: The call to this function generates the page changing events.
"""
if not page:
return
page.Reparent(self)
self._windows.insert(page_idx, page)
if select or len(self._windows) == 1:
self.DoSetSelection(page)
else:
page.Hide()
self._pages.InsertPage(page_idx, text, select, imageId)
self.ResizeTabArea()
self.Refresh()
def DeletePage(self, page):
"""
Deletes the specified page, and the associated window.
:param `page`: an integer specifying the page to be deleted.
:note: The call to this function generates the page changing events.
"""
if page >= len(self._windows) or page < 0:
return
# Fire a closing event
event = ImageNotebookEvent(wxEVT_IMAGENOTEBOOK_PAGE_CLOSING, self.GetId())
event.SetSelection(page)
event.SetEventObject(self)
self.GetEventHandler().ProcessEvent(event)
# The event handler allows it?
if not event.IsAllowed():
return False
self.Freeze()
# Delete the requested page
pageRemoved = self._windows[page]
# If the page is the current window, remove it from the sizer
# as well
if page == self.GetSelection():
self._mainSizer.Detach(pageRemoved)
# Remove it from the array as well
self._windows.pop(page)
# Now we can destroy it in wxWidgets use Destroy instead of delete
pageRemoved.Destroy()
self._mainSizer.Layout()
self._pages.DoDeletePage(page)
self.ResizeTabArea()
self.Thaw()
# Fire a closed event
closedEvent = ImageNotebookEvent(wxEVT_IMAGENOTEBOOK_PAGE_CLOSED, self.GetId())
closedEvent.SetSelection(page)
closedEvent.SetEventObject(self)
self.GetEventHandler().ProcessEvent(closedEvent)
def RemovePage(self, page):
"""
Deletes the specified page, without deleting the associated window.
:param `page`: an integer specifying the page to be removed.
:note: The call to this function generates the page changing events.
"""
if page >= len(self._windows):
return False
# Fire a closing event
event = ImageNotebookEvent(wxEVT_IMAGENOTEBOOK_PAGE_CLOSING, self.GetId())
event.SetSelection(page)
event.SetEventObject(self)
self.GetEventHandler().ProcessEvent(event)
# The event handler allows it?
if not event.IsAllowed():
return False
self.Freeze()
# Remove the requested page
pageRemoved = self._windows[page]
# If the page is the current window, remove it from the sizer
# as well
if page == self.GetSelection():
self._mainSizer.Detach(pageRemoved)
# Remove it from the array as well
self._windows.pop(page)
self._mainSizer.Layout()
self.ResizeTabArea()
self.Thaw()
self._pages.DoDeletePage(page)
# Fire a closed event
closedEvent = ImageNotebookEvent(wxEVT_IMAGENOTEBOOK_PAGE_CLOSED, self.GetId())
closedEvent.SetSelection(page)
closedEvent.SetEventObject(self)
self.GetEventHandler().ProcessEvent(closedEvent)
return True
def ResizeTabArea(self):
""" Resizes the tab area if the control has the ``INB_FIT_LABELTEXT`` style set. """
agwStyle = self.GetAGWWindowStyleFlag()
if agwStyle & INB_FIT_LABELTEXT == 0:
return
if agwStyle & INB_LEFT or agwStyle & INB_RIGHT:
dc = wx.MemoryDC()
dc.SelectObject(wx.EmptyBitmap(1, 1))
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetPointSize(font.GetPointSize()*self._fontSizeMultiple)
if self.GetFontBold():
font.SetWeight(wx.FONTWEIGHT_BOLD)
dc.SetFont(font)
maxW = 0
for page in xrange(self.GetPageCount()):
caption = self._pages.GetPageText(page)
w, h = dc.GetTextExtent(caption)
maxW = max(maxW, w)
maxW += 24 #TODO this is 6*4 6 is nPadding from drawlabel
if not agwStyle & INB_SHOW_ONLY_TEXT:
maxW += self._pages._nImgSize * 2
maxW = max(maxW, 100)
self._pages.SetSizeHints(maxW, -1)
self._pages._nTabAreaWidth = maxW
def DeleteAllPages(self):
""" Deletes all the pages in the book. """
if not self._windows:
return
self.Freeze()
for win in self._windows:
win.Destroy()
self._windows = []
self.Thaw()
# remove old selection
self._pages.ClearAll()
self._pages.Refresh()
def SetSelection(self, page):
"""
Changes the selection from currently visible/selected page to the page
given by page.
:param `page`: an integer specifying the page to be selected.
:note: The call to this function generates the page changing events.
"""
if page >= len(self._windows):
return
if page == self.GetSelection() and not self._bForceSelection:
return
oldSelection = self.GetSelection()
# Generate an event that indicates that an image is about to be selected
event = ImageNotebookEvent(wxEVT_IMAGENOTEBOOK_PAGE_CHANGING, self.GetId())
event.SetSelection(page)
event.SetOldSelection(oldSelection)
event.SetEventObject(self)
self.GetEventHandler().ProcessEvent(event)
# The event handler allows it?
if not event.IsAllowed() and not self._bForceSelection:
return
self.DoSetSelection(self._windows[page])
# Now we can update the new selection
self._pages._nIndex = page
# Refresh calls the OnPaint of this class
self._pages.Refresh()
# Generate an event that indicates that an image was selected
eventChanged = ImageNotebookEvent(wxEVT_IMAGENOTEBOOK_PAGE_CHANGED, self.GetId())
eventChanged.SetEventObject(self)
eventChanged.SetOldSelection(oldSelection)
eventChanged.SetSelection(page)
self.GetEventHandler().ProcessEvent(eventChanged)
def AssignImageList(self, imglist):
"""
Assigns an image list to the control.
:param `imglist`: an instance of `wx.ImageList`.
"""
self._pages.AssignImageList(imglist)
# Force change
self.SetAGWWindowStyleFlag(self.GetAGWWindowStyleFlag())
def GetSelection(self):
""" Returns the current selection. """
if self._pages:
return self._pages._nIndex
else:
return -1
def DoSetSelection(self, window):
"""
Select the window by the provided pointer.
:param `window`: an instance of `wx.Window`.
"""
curSel = self.GetSelection()
agwStyle = self.GetAGWWindowStyleFlag()
# Replace the window in the sizer
self.Freeze()
# Check if a new selection was made
bInsertFirst = (agwStyle & INB_BOTTOM or agwStyle & INB_RIGHT)
if curSel >= 0:
# Remove the window from the main sizer
self._mainSizer.Detach(self._windows[curSel])
self._windows[curSel].Hide()
if bInsertFirst:
self._mainSizer.Insert(0, window, 1, wx.EXPAND)
else:
self._mainSizer.Add(window, 1, wx.EXPAND)
window.Show()
self._mainSizer.Layout()
self.Thaw()
def GetImageList(self):
""" Returns the associated image list. """
return self._pages.GetImageList()
def GetPageCount(self):
""" Returns the number of pages in the book. """
return len(self._windows)
def GetFontBold(self):
""" Gets the font bold status. """
return self._fontBold
def SetFontBold(self, bold):
"""
Sets whether the page captions are bold or not.
:param `bold`: ``True`` or ``False``.
"""
self._fontBold = bold
def GetFontSizeMultiple(self):
""" Gets the font size multiple for the page captions. """
return self._fontSizeMultiple
def SetFontSizeMultiple(self, multiple):
"""
Sets the font size multiple for the page captions.
:param `multiple`: The multiple to be applied to the system font to get the our font size.
"""
self._fontSizeMultiple = multiple
def SetPageImage(self, page, imageId):
"""
Sets the image index for the given page.
:param `page`: an integer specifying the page index;
:param `image`: an index into the image list.
"""
self._pages.SetPageImage(page, imageId)
self._pages.Refresh()
def SetPageText(self, page, text):
"""
Sets the text for the given page.
:param `page`: an integer specifying the page index;
:param `text`: the new tab label.
"""
self._pages.SetPageText(page, text)
self._pages.Refresh()
def GetPageText(self, page):
"""
Returns the text for the given page.
:param `page`: an integer specifying the page index.
"""
return self._pages.GetPageText(page)
def GetPageImage(self, page):
"""
Returns the image index for the given page.
:param `page`: an integer specifying the page index.
"""
return self._pages.GetPageImage(page)
def GetPage(self, page):
"""
Returns the window at the given page position.
:param `page`: an integer specifying the page to be returned.
"""
if page >= len(self._windows):
return
return self._windows[page]
def GetCurrentPage(self):
""" Returns the currently selected notebook page or ``None``. """
if self.GetSelection() < 0:
return
return self.GetPage(self.GetSelection())
def AdvanceSelection(self, forward=True):
"""
Cycles through the tabs.
:param `forward`: if ``True``, the selection is advanced in ascending order
(to the right), otherwise the selection is advanced in descending order.
:note: The call to this function generates the page changing events.
"""
nSel = self.GetSelection()
if nSel < 0:
return
nMax = self.GetPageCount() - 1
if forward:
newSelection = (nSel == nMax and [0] or [nSel + 1])[0]
else:
newSelection = (nSel == 0 and [nMax] or [nSel - 1])[0]
self.SetSelection(newSelection)
def ChangeSelection(self, page):
"""
Changes the selection for the given page, returning the previous selection.
:param `page`: an integer specifying the page to be selected.
:note: The call to this function does not generate the page changing events.
"""
if page < 0 or page >= self.GetPageCount():
return
oldPage = self.GetSelection()
self.DoSetSelection(page)
return oldPage
CurrentPage = property(GetCurrentPage, doc="See `GetCurrentPage`")
Page = property(GetPage, doc="See `GetPage`")
PageCount = property(GetPageCount, doc="See `GetPageCount`")
PageImage = property(GetPageImage, SetPageImage, doc="See `GetPageImage, SetPageImage`")
PageText = property(GetPageText, SetPageText, doc="See `GetPageText, SetPageText`")
Selection = property(GetSelection, SetSelection, doc="See `GetSelection, SetSelection`")
# ---------------------------------------------------------------------------- #
# Class FlatImageBook
# ---------------------------------------------------------------------------- #
class FlatImageBook(FlatBookBase):
"""
Default implementation of the image book, it is like a `wx.Notebook`, except that
images are used to control the different pages. This container is usually used
for configuration dialogs etc.
:note: Currently, this control works properly for images of size 32x32 and bigger.
"""
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=0, agwStyle=0, name="FlatImageBook"):
"""
Default class constructor.
:param `parent`: parent window. Must not be ``None``;
:param `id`: window identifier. A value of -1 indicates a default value;
:param `pos`: the control position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:param `size`: the control size. A value of (-1, -1) indicates a default size,
chosen by either the windowing system or wxPython, depending on platform;
:param `style`: the underlying `wx.Panel` window style;
:param `agwStyle`: the AGW-specific window style. This can be a combination of the
following bits:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}.
``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}.
``INB_RIGHT`` 0x4 Place labels on the right side.
``INB_TOP`` 0x8 Place labels above the page area.
``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}.
``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}.
``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}.
``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control.
``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}.
``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control.
``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}.
``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}.
``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area.
``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs.
=========================== =========== ==================================================
:param `name`: the window name.
"""
FlatBookBase.__init__(self, parent, id, pos, size, style, agwStyle, name)
self._pages = self.CreateImageContainer()
if agwStyle & INB_LEFT or agwStyle & INB_RIGHT:
self._mainSizer = wx.BoxSizer(wx.HORIZONTAL)
else:
self._mainSizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self._mainSizer)
# Add the tab container to the sizer
self._mainSizer.Add(self._pages, 0, wx.EXPAND)
if agwStyle & INB_LEFT or agwStyle & INB_RIGHT:
self._pages.SetSizeHints(self._pages.GetImageSize() * 2, -1)
else:
self._pages.SetSizeHints(-1, self._pages.GetImageSize() * 2)
self._mainSizer.Layout()
def CreateImageContainer(self):
""" Creates the image container class for L{FlatImageBook}. """
return ImageContainer(self, wx.ID_ANY, agwStyle=self.GetAGWWindowStyleFlag())
# ---------------------------------------------------------------------------- #
# Class LabelBook
# ---------------------------------------------------------------------------- #
class LabelBook(FlatBookBase):
"""
An implementation of a notebook control - except that instead of having
tabs to show labels, it labels to the right or left (arranged horizontally).
"""
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=0, agwStyle=0, name="LabelBook"):
"""
Default class constructor.
:param `parent`: parent window. Must not be ``None``;
:param `id`: window identifier. A value of -1 indicates a default value;
:param `pos`: the control position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:param `size`: the control size. A value of (-1, -1) indicates a default size,
chosen by either the windowing system or wxPython, depending on platform;
:param `style`: the underlying `wx.Panel` window style;
:param `agwStyle`: the AGW-specific window style. This can be a combination of the
following bits:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}.
``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}.
``INB_RIGHT`` 0x4 Place labels on the right side.
``INB_TOP`` 0x8 Place labels above the page area.
``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}.
``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}.
``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}.
``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control.
``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}.
``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control.
``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}.
``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}.
``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area.
``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs.
=========================== =========== ==================================================
:param `name`: the window name.
"""
FlatBookBase.__init__(self, parent, id, pos, size, style, agwStyle, name)
self._pages = self.CreateImageContainer()
# Label book specific initialization
self._mainSizer = wx.BoxSizer(wx.HORIZONTAL)
self.SetSizer(self._mainSizer)
# Add the tab container to the sizer
self._mainSizer.Add(self._pages, 0, wx.EXPAND)
self._pages.SetSizeHints(self._pages.GetTabAreaWidth(), -1)
# Initialize the colours maps
self._pages.InitializeColours()
self.Bind(wx.EVT_SIZE, self.OnSize)
def CreateImageContainer(self):
""" Creates the image container (LabelContainer) class for L{FlatImageBook}. """
return LabelContainer(self, wx.ID_ANY, agwStyle=self.GetAGWWindowStyleFlag())
def SetColour(self, which, colour):
"""
Sets the colour for the specified parameter.
:param `which`: the colour key;
:param `colour`: a valid `wx.Colour` instance.
:see: L{LabelContainer.SetColour} for a list of valid colour keys.
"""
self._pages.SetColour(which, colour)
def GetColour(self, which):
"""
Returns the colour for the specified parameter.
:param `which`: the colour key.
:see: L{LabelContainer.SetColour} for a list of valid colour keys.
"""
return self._pages.GetColour(which)
def OnSize(self, event):
"""
Handles the ``wx.EVT_SIZE`` event for L{LabelBook}.
:param `event`: a `wx.SizeEvent` event to be processed.
"""
self._pages.Refresh()
event.Skip()
|
en
| 0.61068
|
# --------------------------------------------------------------------------- # # LABELBOOK And FLATIMAGEBOOK Widgets wxPython IMPLEMENTATION # # Original C++ Code From Eran, embedded in the FlatMenu source code # # # License: wxWidgets license # # # Python Code By: # # <NAME>, @ 03 Nov 2006 # Latest Revision: 17 Jan 2011, 15.00 GMT # # # For All Kind Of Problems, Requests Of Enhancements And Bug Reports, Please # Write To Me At: # # <EMAIL> # <EMAIL> # # Or, Obviously, To The wxPython Mailing List!!! # # TODO: # LabelBook - Support IMB_SHOW_ONLY_IMAGES # LabelBook - An option for the draw border to only draw the border # between the controls and the pages so the background # colour can flow into the window background # # # # End Of Comments # --------------------------------------------------------------------------- # LabelBook and FlatImageBook are a quasi-full generic and owner-drawn implementations of `wx.Notebook`. Description =========== LabelBook and FlatImageBook are a quasi-full implementations of the `wx.Notebook`, and designed to be a drop-in replacement for `wx.Notebook`. The API functions are similar so one can expect the function to behave in the same way. LabelBook anf FlatImageBook share their appearance with `wx.Toolbook` and `wx.Listbook`, while having more options for custom drawings, label positioning, mouse pointing and so on. Moreover, they retain also some visual characteristics of the Outlook address book. Some features: - They are generic controls; - Supports for left, right, top (FlatImageBook only), bottom (FlatImageBook only) book styles; - Possibility to draw images only, text only or both (FlatImageBook only); - Support for a "pin-button", that allows the user to shrink/expand the book tab area; - Shadows behind tabs (LabelBook only); - Gradient shading of the tab area (LabelBook only); - Web-like mouse pointing on tabs style (LabelBook only); - Many customizable colours (tab area, active tab text, tab borders, active tab, highlight) - LabelBook only. And much more. See the demo for a quasi-complete review of all the functionalities of LabelBook and FlatImageBook. Supported Platforms =================== LabelBook and FlatImageBook have been tested on the following platforms: * Windows (Windows XP); * Linux Ubuntu (Dapper 6.06) Window Styles ============= This class supports the following window styles: =========================== =========== ================================================== Window Styles Hex Value Description =========================== =========== ================================================== ``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for `FlatImageBook`. ``INB_LEFT`` 0x2 Place labels on the left side. Available only for `FlatImageBook`. ``INB_RIGHT`` 0x4 Place labels on the right side. ``INB_TOP`` 0x8 Place labels above the page area. ``INB_BORDER`` 0x10 Draws a border around `LabelBook` or `FlatImageBook`. ``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for `LabelBook`. ``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for `LabelBook`. ``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control. ``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for `LabelBook`. ``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control. ``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for `LabelBook`. ``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for `LabelBook`. ``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area. ``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs. =========================== =========== ================================================== Events Processing ================= This class processes the following events: =================================== ================================================== Event Name Description =================================== ================================================== ``EVT_IMAGENOTEBOOK_PAGE_CHANGED`` Notify client objects when the active page in `ImageNotebook` has changed. ``EVT_IMAGENOTEBOOK_PAGE_CHANGING`` Notify client objects when the active page in `ImageNotebook` is about to change. ``EVT_IMAGENOTEBOOK_PAGE_CLOSED`` Notify client objects when a page in `ImageNotebook` has been closed. ``EVT_IMAGENOTEBOOK_PAGE_CLOSING`` Notify client objects when a page in `ImageNotebook` is closing. =================================== ================================================== License And Version =================== LabelBook and FlatImageBook are distributed under the wxPython license. Latest Revision: <NAME> @ 17 Jan 2011, 15.00 GMT Version 0.5. #---------------------------------------------------------------------- # Beginning Of IMAGENOTEBOOK wxPython Code #---------------------------------------------------------------------- # Check for the new method in 2.7 (not present in 2.6.3.3) # FlatImageBook and LabelBook styles Place labels below the page area. Available only for `FlatImageBook`. Place labels on the left side. Available only for `FlatImageBook`. Place labels on the right side. Place labels above the page area. Draws a border around `LabelBook` or `FlatImageBook`. Shows only text labels and no images. Available only for `LabelBook`. Shows only tab images and no label texts. Available only for `LabelBook`. Displays a pin button to show/hide the book control. Draw shadows below the book tabs. Available only for `LabelBook`. Displays a pin button to show/hide the book control. Draws a gradient shading on the tabs background. Available only for `LabelBook`. On mouse hovering, tabs behave like html hyperlinks. Available only for `LabelBook`. Don't allow resizing of the tab area. Will fit the tab area to the longest text (or text+image if you have images) in all the tabs. #-----------------------------------# # ImageNotebookEvent #-----------------------------------# Notify client objects when the active page in `ImageNotebook` has changed. Notify client objects when the active page in `ImageNotebook` is about to change. Notify client objects when a page in `ImageNotebook` is closing. Notify client objects when a page in `ImageNotebook` has been closed. # ---------------------------------------------------------------------------- # # Class ImageNotebookEvent # ---------------------------------------------------------------------------- # This events will be sent when a ``EVT_IMAGENOTEBOOK_PAGE_CHANGED``, ``EVT_IMAGENOTEBOOK_PAGE_CHANGING``, ``EVT_IMAGENOTEBOOK_PAGE_CLOSING``, ``EVT_IMAGENOTEBOOK_PAGE_CLOSED`` is mapped in the parent. Default class constructor. :param `eventType`: the event type; :param `eventId`: the event identifier; :param `sel`: the current selection; :param `oldsel`: the old selection. Sets the event selection. :param `s`: an integer specifying the new selection. Sets the event old selection. :param `s`: an integer specifying the old selection. Returns the event selection. Returns the old event selection. Prevents the change announced by this event from happening. :note: It is in general a good idea to notify the user about the reasons for vetoing the change because otherwise the applications behaviour (which just refuses to do what the user wants) might be quite surprising. This is the opposite of L{Veto}: it explicitly allows the event to be processed. For most events it is not necessary to call this method as the events are allowed anyhow but some are forbidden by default (this will be mentioned in the corresponding event description). Returns ``True`` if the change is allowed (L{Veto} hasn't been called) or ``False`` otherwise (if it was). # ---------------------------------------------------------------------------- # # Class ImageInfo # ---------------------------------------------------------------------------- # This class holds all the information (caption, image, etc...) belonging to a single tab in L{LabelBook}. Default class constructor. :param `strCaption`: the tab caption; :param `imageIndex`: the tab image index based on the assigned (set) `wx.ImageList` (if any). Sets the tab caption. :param `value`: the new tab caption. Returns the tab caption. Sets the tab position. :param `value`: the new tab position, an instance of `wx.Point`. Returns the tab position. Sets the tab size. :param `value`: the new tab size, an instance of `wx.Size`. Returns the tab size. Sets the tab image index. :param `value`: an index into the image list.. Returns the tab image index. Sets the client rectangle available for the tab text. :param `rect`: the tab text client rectangle, an instance of `wx.Rect`. Returns the client rectangle available for the tab text. # ---------------------------------------------------------------------------- # # Class ImageContainerBase # ---------------------------------------------------------------------------- # Base class for L{FlatImageBook} image container. Default class constructor. :param `parent`: parent window. Must not be ``None``; :param `id`: window identifier. A value of -1 indicates a default value; :param `pos`: the control position. A value of (-1, -1) indicates a default position, chosen by either the windowing system or wxPython, depending on platform; :param `size`: the control size. A value of (-1, -1) indicates a default size, chosen by either the windowing system or wxPython, depending on platform; :param `style`: the underlying `wx.Panel` window style; :param `agwStyle`: the AGW-specific window style. This can be a combination of the following bits: =========================== =========== ================================================== Window Styles Hex Value Description =========================== =========== ================================================== ``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}. ``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}. ``INB_RIGHT`` 0x4 Place labels on the right side. ``INB_TOP`` 0x8 Place labels above the page area. ``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}. ``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}. ``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}. ``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control. ``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}. ``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control. ``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}. ``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}. ``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area. ``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs. =========================== =========== ================================================== :param `name`: the window name. Tests for existance of flag in the style. :param `flag`: a window style. This can be a combination of the following bits: =========================== =========== ================================================== Window Styles Hex Value Description =========================== =========== ================================================== ``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}. ``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}. ``INB_RIGHT`` 0x4 Place labels on the right side. ``INB_TOP`` 0x8 Place labels above the page area. ``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}. ``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}. ``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}. ``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control. ``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}. ``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control. ``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}. ``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}. ``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area. ``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs. =========================== =========== ================================================== Removes flag from the style. :param `flag`: a window style flag. :see: L{HasAGWFlag} for a list of possible window style flags. Assigns an image list to the L{ImageContainerBase}. :param `imglist`: an instance of `wx.ImageList`. Return the image list for L{ImageContainerBase}. Returns the image size inside the L{ImageContainerBase} image list. Fixes the text, to fit `maxWidth` value. If the text length exceeds `maxWidth` value this function truncates it and appends two dots at the end. ("Long Long Long Text" might become "Long Long..."). :param `dc`: an instance of `wx.DC`; :param `text`: the text to fix/truncate; :param `maxWidth`: the maximum allowed width for the text, in pixels. Allows the parent to examine the children type. Some implementation (such as L{LabelBook}), does not support top/bottom images, only left/right. Adds a page to the container. :param `caption`: specifies the text for the new tab; :param `selected`: specifies whether the page should be selected; :param `imgIdx`: specifies the optional image index for the new tab. Inserts a page into the container at the specified position. :param `page_idx`: specifies the position for the new tab; :param `caption`: specifies the text for the new tab; :param `selected`: specifies whether the page should be selected; :param `imgIdx`: specifies the optional image index for the new tab. Sets the image for the given page. :param `page`: the index of the tab; :param `imgIdx`: specifies the optional image index for the tab. Sets the tab caption for the given page. :param `page`: the index of the tab; :param `text`: the new tab caption. Returns the image index for the given page. :param `page`: the index of the tab. Returns the tab caption for the given page. :param `page`: the index of the tab. Deletes all the pages in the container. Does the actual page deletion. :param `page`: the index of the tab. # Remove the page from the vector # The delete page was the last first on the array, # but the book still has more pages, so we set the # active page to be the first one (0) # Refresh the tabs # Erase the page container drawings Handles the ``wx.EVT_SIZE`` event for L{ImageContainerBase}. :param `event`: a `wx.SizeEvent` event to be processed. # Call on paint Handles the ``wx.EVT_ERASE_BACKGROUND`` event for L{ImageContainerBase}. :param `event`: a `wx.EraseEvent` event to be processed. :note: This method is intentionally empty to reduce flicker. Returns the index of the tab at the specified position or ``wx.NOT_FOUND`` if ``None``, plus the flag style of L{HitTest}. :param `pt`: an instance of `wx.Point`, to test for hits. :return: The index of the tab at the specified position plus the hit test flag, which can be one of the following bits: ====================== ======= ================================ HitTest Flags Value Description ====================== ======= ================================ ``IMG_OVER_IMG`` 0 The mouse is over the tab icon ``IMG_OVER_PIN`` 1 The mouse is over the pin button ``IMG_OVER_EW_BORDER`` 2 The mouse is over the east-west book border ``IMG_NONE`` 3 Nowhere ====================== ======= ================================ # For Web Hover style, we test the TextRect Tests whether pt is located on the sash. :param `pt`: an instance of `wx.Point`, to test for hits. # Check if we are on a the sash border Handles the ``wx.EVT_LEFT_DOWN`` event for L{ImageContainerBase}. :param `event`: a `wx.MouseEvent` event to be processed. # Support for collapse/expand # Incase panel is collapsed, there is nothing # to check Handles the ``wx.EVT_LEAVE_WINDOW`` event for L{ImageContainerBase}. :param `event`: a `wx.MouseEvent` event to be processed. # Make sure the pin button status is NONE # incase we were in pin button style # Restore cursor Handles the ``wx.EVT_LEFT_UP`` event for L{ImageContainerBase}. :param `event`: a `wx.MouseEvent` event to be processed. # Save the current tab area width # Restore the tab area size Handles the ``wx.EVT_MOTION`` event for L{ImageContainerBase}. :param `event`: a `wx.MouseEvent` event to be processed. # Check to see if we are in the pin button rect # Change the cursor to be Hand # Restore the cursor only if we have the Web hover style set, # and we are not currently hovering the sash # Dont display hover effect when hoevering the # selected label Draw a pin button, that allows collapsing of the image panel. :param `dc`: an instance of `wx.DC`; :param `rect`: the pin button client rectangle; :param `downPin`: ``True`` if the pin button is facing downwards, ``False`` if it is facing leftwards. # Set the bitmap according to the button status # Draw upper and left border with grey colour # Draw upper and left border with grey colour # Set the masking # Draw the new bitmap # Save the pin rect # ---------------------------------------------------------------------------- # # Class ImageContainer # ---------------------------------------------------------------------------- # Base class for L{FlatImageBook} image container. Default class constructor. :param `parent`: parent window. Must not be ``None``; :param `id`: window identifier. A value of -1 indicates a default value; :param `pos`: the control position. A value of (-1, -1) indicates a default position, chosen by either the windowing system or wxPython, depending on platform; :param `size`: the control size. A value of (-1, -1) indicates a default size, chosen by either the windowing system or wxPython, depending on platform; :param `style`: the underlying `wx.Panel` window style; :param `agwStyle`: the AGW-specific window style. This can be a combination of the following bits: =========================== =========== ================================================== Window Styles Hex Value Description =========================== =========== ================================================== ``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}. ``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}. ``INB_RIGHT`` 0x4 Place labels on the right side. ``INB_TOP`` 0x8 Place labels above the page area. ``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}. ``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}. ``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}. ``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control. ``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}. ``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control. ``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}. ``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}. ``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area. ``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs. =========================== =========== ================================================== :param `name`: the window name. Handles the ``wx.EVT_SIZE`` event for L{ImageContainer}. :param `event`: a `wx.SizeEvent` event to be processed. Handles the ``wx.EVT_LEFT_DOWN`` event for L{ImageContainer}. :param `event`: a `wx.MouseEvent` event to be processed. Handles the ``wx.EVT_LEFT_UP`` event for L{ImageContainer}. :param `event`: a `wx.MouseEvent` event to be processed. Handles the ``wx.EVT_ERASE_BACKGROUND`` event for L{ImageContainer}. :param `event`: a `wx.EraseEvent` event to be processed. Handles the ``wx.EVT_MOTION`` event for L{ImageContainer}. :param `event`: a `wx.MouseEvent` event to be processed. Handles the ``wx.EVT_LEAVE_WINDOW`` event for L{ImageContainer}. :param `event`: a `wx.MouseEvent` event to be processed. Allows the parent to examine the children type. Some implementation (such as L{LabelBook}), does not support top/bottom images, only left/right. Handles the ``wx.EVT_PAINT`` event for L{ImageContainer}. :param `event`: a `wx.PaintEvent` event to be processed. # Background # Draw the pin button # We reserver 20 pixels for the 'pin' button # The drawing of the images start position. This is # depenedent of the style, especially when Pin button # style is requested # Pad text with 2 pixels on the left and right # incase the 'fit button' style is applied, we set the rectangle width to the # text width plus padding # Incase the style IS applied, but the style is either LEFT or RIGHT # we ignore it # Restore font to be normal # Default values for the surronounding rectangle # around a button # To avoid the recangle to 'touch' the borders # Incase the style requires non-fixed button (fit to text) # recalc the rectangle width # Make the width an even number # Check that we have enough space to draw the button # If Pin button is used, consider its space as well (applicable for top/botton style) # since in the left/right, its size is already considered in 'pos' # Calculate the button rectangle # Check if we need to draw a rectangle around the button # Set the colours # Fix the surrounding of the rect if border is set # Set the colours # Fix the surrounding of the rect if border is set # Incase user set both flags: # INB_SHOW_ONLY_TEXT and INB_SHOW_ONLY_IMAGES # We override them to display both # Draw the caption and text # Draw the text # Check if the text can fit the size of the rectangle, # if not truncate it # Update the length of the text # Update the page info # Update all buttons that can not fit into the screen as non-visible # Draw the pin button # ---------------------------------------------------------------------------- # # Class LabelContainer # ---------------------------------------------------------------------------- # Base class for L{LabelBook}. Default class constructor. :param `parent`: parent window. Must not be ``None``; :param `id`: window identifier. A value of -1 indicates a default value; :param `pos`: the control position. A value of (-1, -1) indicates a default position, chosen by either the windowing system or wxPython, depending on platform; :param `size`: the control size. A value of (-1, -1) indicates a default size, chosen by either the windowing system or wxPython, depending on platform; :param `style`: the underlying `wx.Panel` window style; :param `agwStyle`: the AGW-specific window style. This can be a combination of the following bits: =========================== =========== ================================================== Window Styles Hex Value Description =========================== =========== ================================================== ``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}. ``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}. ``INB_RIGHT`` 0x4 Place labels on the right side. ``INB_TOP`` 0x8 Place labels above the page area. ``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}. ``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}. ``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}. ``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control. ``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}. ``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control. ``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}. ``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}. ``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area. ``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs. =========================== =========== ================================================== :param `name`: the window name. Handles the ``wx.EVT_SIZE`` event for L{LabelContainer}. :param `event`: a `wx.SizeEvent` event to be processed. Handles the ``wx.EVT_ERASE_BACKGROUND`` event for L{LabelContainer}. :param `event`: a `wx.EraseEvent` event to be processed. Returns the width of the tab area. Sets the width of the tab area. :param `width`: the width of the tab area, in pixels. Allows the parent to examine the children type. Some implementation (such as L{LabelBook}), does not support top/bottom images, only left/right. Sets the background bitmap for the control. :param `bmp`: a valid `wx.Bitmap` object. Handles the ``wx.EVT_PAINT`` event for L{LabelContainer}. :param `event`: a `wx.PaintEvent` event to be processed. # Set the pen & brush # Incase user set both flags, we override them to display both # INB_SHOW_ONLY_TEXT and INB_SHOW_ONLY_IMAGES # Draw graident in the background area # Draw the border and background # Draw border # Just draw the border with transparent brush # Draw the pin button # We reserve 20 pixels for the pin button # Default values for the surronounding rectangle # around a button # Check that we have enough space to draw the button # Calculate the button rectangle # Update all buttons that can not fit into the screen as non-visible Draws a bitmap as the background of the control. :param `dc`: an instance of `wx.DC`. #self._skin = bmp Handles the ``wx.EVT_LEFT_UP`` event for L{LabelContainer}. :param `event`: a `wx.MouseEvent` event to be processed. # Sash was being dragged? # Remove sash # Restore cursor Actually resizes the tab area. :param `event`: an instance of `wx.SizeEvent`. # Resize our size # Dont allow width to be lower than that # Update the tab new area width Handles the ``wx.EVT_MOTION`` event for L{LabelContainer}. :param `event`: a `wx.MouseEvent` event to be processed. # Remove old sash # Progress sash, and redraw it # Sash is not being dragged # Change cursor to EW cursor Handles the ``wx.EVT_LEFT_DOWN`` event for L{LabelContainer}. :param `event`: a `wx.MouseEvent` event to be processed. # We are over the sash # first time, begin drawing sash # Change mouse cursor Handles the ``wx.EVT_LEAVE_WINDOW`` event for L{LabelContainer}. :param `event`: a `wx.MouseEvent` event to be processed. # If Sash is being dragged, ignore this event Draws a rounded rectangle around the current tab. :param `dc`: an instance of `wx.DC`; :param `rect`: the current tab client rectangle. # The hovered tab with default border # We draw CCW # Right images # Upper line # Right line (white) # Bottom diagnol - we change pen # Bottom line # Left images # Upper line white # Left line # Bottom diagnol, we change the pen # Bottom line Draws a web style hover effect (cursor set to hand & text is underlined). :param `dc`: an instance of `wx.DC`; :param `caption`: the tab caption text; :param `xCoord`: the x position of the tab caption; :param `yCoord`: the y position of the tab caption. # Redraw the text with underlined font Sets a colour for a parameter. :param `which`: can be one of the following parameters: ================================== ======= ================================== Colour Key Value Description ================================== ======= ================================== ``INB_TAB_AREA_BACKGROUND_COLOUR`` 100 The tab area background colour ``INB_ACTIVE_TAB_COLOUR`` 101 The active tab background colour ``INB_TABS_BORDER_COLOUR`` 102 The tabs border colour ``INB_TEXT_COLOUR`` 103 The tab caption text colour ``INB_ACTIVE_TEXT_COLOUR`` 104 The active tab caption text colour ``INB_HILITE_TAB_COLOUR`` 105 The tab caption highlight text colour ================================== ======= ================================== :param `colour`: a valid `wx.Colour` object. Returns a colour for a parameter. :param `which`: the colour key. :see: L{SetColour} for a list of valid colour keys. Initializes the colours map to be used for this control. # Initialize map colours # dont allow bright colour one on the other Draws a label using the specified dc. :param `dc`: an instance of `wx.DC`; :param `rect`: the text client rectangle; :param `text`: the actual text string; :param `bmp`: a bitmap to be drawn next to the text; :param `imgInfo`: an instance of L{ImageInfo}; :param `orientationLeft`: ``True`` if the book has the ``INB_RIGHT`` or ``INB_LEFT`` style set; :param `imgIdx`: the tab image index; :param `selected`: ``True`` if the tab is selected, ``False`` otherwise; :param `hover`: ``True`` if the tab is being hovered with the mouse, ``False`` otherwise. # First we define the rectangle for the text #------------------------------------------------------------------------- # Label layout: # [ nPadding | Image | nPadding | Text | nPadding ] #------------------------------------------------------------------------- # Text bounding rectangle # Truncate text if needed # Image bounding rectangle # Draw bounding rectangle # First we colour the tab # Draw the text & bitmap # Drop shadow # Always drop shadow for this style # Draw hover effect # Update the page information bout position and size # ---------------------------------------------------------------------------- # # Class FlatBookBase # ---------------------------------------------------------------------------- # Base class for the containing window for L{LabelBook} and L{FlatImageBook}. Default class constructor. :param `parent`: parent window. Must not be ``None``; :param `id`: window identifier. A value of -1 indicates a default value; :param `pos`: the control position. A value of (-1, -1) indicates a default position, chosen by either the windowing system or wxPython, depending on platform; :param `size`: the control size. A value of (-1, -1) indicates a default size, chosen by either the windowing system or wxPython, depending on platform; :param `style`: the underlying `wx.Panel` window style; :param `agwStyle`: the AGW-specific window style. This can be a combination of the following bits: =========================== =========== ================================================== Window Styles Hex Value Description =========================== =========== ================================================== ``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}. ``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}. ``INB_RIGHT`` 0x4 Place labels on the right side. ``INB_TOP`` 0x8 Place labels above the page area. ``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}. ``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}. ``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}. ``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control. ``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}. ``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control. ``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}. ``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}. ``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area. ``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs. =========================== =========== ================================================== :param `name`: the window name. Sets the window style. :param `agwStyle`: can be a combination of the following bits: =========================== =========== ================================================== Window Styles Hex Value Description =========================== =========== ================================================== ``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}. ``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}. ``INB_RIGHT`` 0x4 Place labels on the right side. ``INB_TOP`` 0x8 Place labels above the page area. ``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}. ``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}. ``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}. ``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control. ``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}. ``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control. ``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}. ``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}. ``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area. ``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs. =========================== =========== ================================================== # Check that we are not in initialization process # Detach the windows attached to the sizer # Create new sizer with the requested orientaion # Add the tab container and the separator # Attach the windows back to the sizer to the sizer Returns the L{FlatBookBase} window style. :see: L{SetAGWWindowStyleFlag} for a list of possible window style flags. Returns whether a flag is present in the L{FlatBookBase} style. :param `flag`: one of the possible L{FlatBookBase} window styles. :see: L{SetAGWWindowStyleFlag} for a list of possible window style flags. Adds a page to the book. :param `page`: specifies the new page; :param `text`: specifies the text for the new page; :param `select`: specifies whether the page should be selected; :param `imageId`: specifies the optional image index for the new page. :note: The call to this function generates the page changing events. Inserts a page into the book at the specified position. :param `page_idx`: specifies the position for the new page; :param `page`: specifies the new page; :param `text`: specifies the text for the new page; :param `select`: specifies whether the page should be selected; :param `imageId`: specifies the optional image index for the new page. :note: The call to this function generates the page changing events. Deletes the specified page, and the associated window. :param `page`: an integer specifying the page to be deleted. :note: The call to this function generates the page changing events. # Fire a closing event # The event handler allows it? # Delete the requested page # If the page is the current window, remove it from the sizer # as well # Remove it from the array as well # Now we can destroy it in wxWidgets use Destroy instead of delete # Fire a closed event Deletes the specified page, without deleting the associated window. :param `page`: an integer specifying the page to be removed. :note: The call to this function generates the page changing events. # Fire a closing event # The event handler allows it? # Remove the requested page # If the page is the current window, remove it from the sizer # as well # Remove it from the array as well # Fire a closed event Resizes the tab area if the control has the ``INB_FIT_LABELTEXT`` style set. #TODO this is 6*4 6 is nPadding from drawlabel Deletes all the pages in the book. # remove old selection Changes the selection from currently visible/selected page to the page given by page. :param `page`: an integer specifying the page to be selected. :note: The call to this function generates the page changing events. # Generate an event that indicates that an image is about to be selected # The event handler allows it? # Now we can update the new selection # Refresh calls the OnPaint of this class # Generate an event that indicates that an image was selected Assigns an image list to the control. :param `imglist`: an instance of `wx.ImageList`. # Force change Returns the current selection. Select the window by the provided pointer. :param `window`: an instance of `wx.Window`. # Replace the window in the sizer # Check if a new selection was made # Remove the window from the main sizer Returns the associated image list. Returns the number of pages in the book. Gets the font bold status. Sets whether the page captions are bold or not. :param `bold`: ``True`` or ``False``. Gets the font size multiple for the page captions. Sets the font size multiple for the page captions. :param `multiple`: The multiple to be applied to the system font to get the our font size. Sets the image index for the given page. :param `page`: an integer specifying the page index; :param `image`: an index into the image list. Sets the text for the given page. :param `page`: an integer specifying the page index; :param `text`: the new tab label. Returns the text for the given page. :param `page`: an integer specifying the page index. Returns the image index for the given page. :param `page`: an integer specifying the page index. Returns the window at the given page position. :param `page`: an integer specifying the page to be returned. Returns the currently selected notebook page or ``None``. Cycles through the tabs. :param `forward`: if ``True``, the selection is advanced in ascending order (to the right), otherwise the selection is advanced in descending order. :note: The call to this function generates the page changing events. Changes the selection for the given page, returning the previous selection. :param `page`: an integer specifying the page to be selected. :note: The call to this function does not generate the page changing events. # ---------------------------------------------------------------------------- # # Class FlatImageBook # ---------------------------------------------------------------------------- # Default implementation of the image book, it is like a `wx.Notebook`, except that images are used to control the different pages. This container is usually used for configuration dialogs etc. :note: Currently, this control works properly for images of size 32x32 and bigger. Default class constructor. :param `parent`: parent window. Must not be ``None``; :param `id`: window identifier. A value of -1 indicates a default value; :param `pos`: the control position. A value of (-1, -1) indicates a default position, chosen by either the windowing system or wxPython, depending on platform; :param `size`: the control size. A value of (-1, -1) indicates a default size, chosen by either the windowing system or wxPython, depending on platform; :param `style`: the underlying `wx.Panel` window style; :param `agwStyle`: the AGW-specific window style. This can be a combination of the following bits: =========================== =========== ================================================== Window Styles Hex Value Description =========================== =========== ================================================== ``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}. ``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}. ``INB_RIGHT`` 0x4 Place labels on the right side. ``INB_TOP`` 0x8 Place labels above the page area. ``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}. ``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}. ``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}. ``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control. ``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}. ``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control. ``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}. ``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}. ``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area. ``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs. =========================== =========== ================================================== :param `name`: the window name. # Add the tab container to the sizer Creates the image container class for L{FlatImageBook}. # ---------------------------------------------------------------------------- # # Class LabelBook # ---------------------------------------------------------------------------- # An implementation of a notebook control - except that instead of having tabs to show labels, it labels to the right or left (arranged horizontally). Default class constructor. :param `parent`: parent window. Must not be ``None``; :param `id`: window identifier. A value of -1 indicates a default value; :param `pos`: the control position. A value of (-1, -1) indicates a default position, chosen by either the windowing system or wxPython, depending on platform; :param `size`: the control size. A value of (-1, -1) indicates a default size, chosen by either the windowing system or wxPython, depending on platform; :param `style`: the underlying `wx.Panel` window style; :param `agwStyle`: the AGW-specific window style. This can be a combination of the following bits: =========================== =========== ================================================== Window Styles Hex Value Description =========================== =========== ================================================== ``INB_BOTTOM`` 0x1 Place labels below the page area. Available only for L{FlatImageBook}. ``INB_LEFT`` 0x2 Place labels on the left side. Available only for L{FlatImageBook}. ``INB_RIGHT`` 0x4 Place labels on the right side. ``INB_TOP`` 0x8 Place labels above the page area. ``INB_BORDER`` 0x10 Draws a border around L{LabelBook} or L{FlatImageBook}. ``INB_SHOW_ONLY_TEXT`` 0x20 Shows only text labels and no images. Available only for L{LabelBook}. ``INB_SHOW_ONLY_IMAGES`` 0x40 Shows only tab images and no label texts. Available only for L{LabelBook}. ``INB_FIT_BUTTON`` 0x80 Displays a pin button to show/hide the book control. ``INB_DRAW_SHADOW`` 0x100 Draw shadows below the book tabs. Available only for L{LabelBook}. ``INB_USE_PIN_BUTTON`` 0x200 Displays a pin button to show/hide the book control. ``INB_GRADIENT_BACKGROUND`` 0x400 Draws a gradient shading on the tabs background. Available only for L{LabelBook}. ``INB_WEB_HILITE`` 0x800 On mouse hovering, tabs behave like html hyperlinks. Available only for L{LabelBook}. ``INB_NO_RESIZE`` 0x1000 Don't allow resizing of the tab area. ``INB_FIT_LABELTEXT`` 0x2000 Will fit the tab area to the longest text (or text+image if you have images) in all the tabs. =========================== =========== ================================================== :param `name`: the window name. # Label book specific initialization # Add the tab container to the sizer # Initialize the colours maps Creates the image container (LabelContainer) class for L{FlatImageBook}. Sets the colour for the specified parameter. :param `which`: the colour key; :param `colour`: a valid `wx.Colour` instance. :see: L{LabelContainer.SetColour} for a list of valid colour keys. Returns the colour for the specified parameter. :param `which`: the colour key. :see: L{LabelContainer.SetColour} for a list of valid colour keys. Handles the ``wx.EVT_SIZE`` event for L{LabelBook}. :param `event`: a `wx.SizeEvent` event to be processed.
| 1.937288
| 2
|
newnnfw/externals/nnapi_test_generator/tests/P_full/addfloat.mod.py
|
kosslab-kr/Tizen-NN-Framework
| 8
|
6628394
|
<reponame>kosslab-kr/Tizen-NN-Framework
# model
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{2}") # a vector of 2 float32s
i2 = Input("op2", "TENSOR_FLOAT32", "{2}") # another vector of 2 float32s
b0 = Int32Scalar("b0", 0) # an int32_t scalar bias
i3 = Output("op3", "TENSOR_FLOAT32", "{2}")
model = model.Operation("ADD", i1, i2, b0).To(i3)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[1.0, 2.0],
i2: # input 1
[3.0, 4.0]}
output0 = {i3: # output 0
[4.0, 6.0]}
# Instantiate an example
Example((input0, output0))
|
# model
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{2}") # a vector of 2 float32s
i2 = Input("op2", "TENSOR_FLOAT32", "{2}") # another vector of 2 float32s
b0 = Int32Scalar("b0", 0) # an int32_t scalar bias
i3 = Output("op3", "TENSOR_FLOAT32", "{2}")
model = model.Operation("ADD", i1, i2, b0).To(i3)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[1.0, 2.0],
i2: # input 1
[3.0, 4.0]}
output0 = {i3: # output 0
[4.0, 6.0]}
# Instantiate an example
Example((input0, output0))
|
en
| 0.606322
|
# model # a vector of 2 float32s # another vector of 2 float32s # an int32_t scalar bias # Example 1. Input in operand 0, # input 0 # input 1 # output 0 # Instantiate an example
| 3.215554
| 3
|
{{cookiecutter.project_slug}}/_/frameworks/Django/application/settings.py
|
ruxi/cookiecutter-ruxi-ds
| 0
|
6628395
|
<filename>{{cookiecutter.project_slug}}/_/frameworks/Django/application/settings.py
"""
Django settings for application project.
"""
from os.path import abspath, dirname, join
from environ import Env
env = Env() # pylint: disable=invalid-name
ENVIRONMENT = env('ENVIRONMENT', default='local')
REVISION = env('REVISION', default=None)
{%- if cookiecutter.monitoring == 'Sentry' %}
SENTRY_DSN = env('SENTRY_DSN', default=None)
if SENTRY_DSN:
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=[DjangoIntegration()],
environment=ENVIRONMENT,
release=REVISION)
{%- endif %}
BASE_DIR = dirname(dirname(abspath(__file__)))
DEBUG = env.bool('DJANGO_DEBUG', default=False)
SECRET_KEY = 'dummy-secret' if DEBUG else env('DJANGO_SECRET_KEY')
ALLOWED_HOSTS = ['*']
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': env('DJANGO_LOG_LEVEL', default='INFO'),
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
},
'django.request': {
'handlers': ['console'],
'level': 'ERROR',
},
},
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
{%- if cookiecutter.monitoring == 'Datadog' %}
'django_datadog',
{%- endif %}
'django_probes',
]
MIDDLEWARE = [
{%- if cookiecutter.monitoring == 'Datadog' %}
'django_datadog.middleware.DatadogMiddleware',
{%- endif %}
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'application.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'application.wsgi.application'
# Database
DATABASES = {
'default': env.db(
'DJANGO_DATABASE_URL',
{%- if cookiecutter.database == '(none)' %}
default='sqlite://%s' % join(BASE_DIR, 'db.sqlite3')
{%- elif cookiecutter.database == 'Postgres' %}
default='postgres://postgres:postgres@database/postgres'
{%- elif cookiecutter.database == 'MySQL' %}
default='mysql://mysql:mysql@database/mysql'
{%- endif %}
),
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.'
'UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
if DEBUG:
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TEMPLATE_CONTEXT': True,
'SHOW_TOOLBAR_CALLBACK': lambda request: True,
}
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware']
INSTALLED_APPS += ['debug_toolbar']
if SECRET_KEY == 'testing':
INSTALLED_APPS += ['behave_django']
{%- if cookiecutter.monitoring == 'Datadog' %}
DATADOG_API_KEY = env('DATADOG_API_KEY', default=None)
DATADOG_APP_KEY = env('DATADOG_APP_KEY', default=None)
DATADOG_APP_NAME = env('DATADOG_APP_NAME', default=None)
{%- elif cookiecutter.monitoring == 'NewRelic' %}
NEWRELIC_LICENSE_KEY = env('NEWRELIC_LICENSE_KEY', default=None)
if NEWRELIC_LICENSE_KEY:
import newrelic.agent
newrelic.agent.initialize(join(BASE_DIR, 'newrelic.ini'))
{%- endif %}
|
<filename>{{cookiecutter.project_slug}}/_/frameworks/Django/application/settings.py
"""
Django settings for application project.
"""
from os.path import abspath, dirname, join
from environ import Env
env = Env() # pylint: disable=invalid-name
ENVIRONMENT = env('ENVIRONMENT', default='local')
REVISION = env('REVISION', default=None)
{%- if cookiecutter.monitoring == 'Sentry' %}
SENTRY_DSN = env('SENTRY_DSN', default=None)
if SENTRY_DSN:
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=[DjangoIntegration()],
environment=ENVIRONMENT,
release=REVISION)
{%- endif %}
BASE_DIR = dirname(dirname(abspath(__file__)))
DEBUG = env.bool('DJANGO_DEBUG', default=False)
SECRET_KEY = 'dummy-secret' if DEBUG else env('DJANGO_SECRET_KEY')
ALLOWED_HOSTS = ['*']
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': env('DJANGO_LOG_LEVEL', default='INFO'),
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
},
'django.request': {
'handlers': ['console'],
'level': 'ERROR',
},
},
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
{%- if cookiecutter.monitoring == 'Datadog' %}
'django_datadog',
{%- endif %}
'django_probes',
]
MIDDLEWARE = [
{%- if cookiecutter.monitoring == 'Datadog' %}
'django_datadog.middleware.DatadogMiddleware',
{%- endif %}
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'application.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'application.wsgi.application'
# Database
DATABASES = {
'default': env.db(
'DJANGO_DATABASE_URL',
{%- if cookiecutter.database == '(none)' %}
default='sqlite://%s' % join(BASE_DIR, 'db.sqlite3')
{%- elif cookiecutter.database == 'Postgres' %}
default='postgres://postgres:postgres@database/postgres'
{%- elif cookiecutter.database == 'MySQL' %}
default='mysql://mysql:mysql@database/mysql'
{%- endif %}
),
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.'
'UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
if DEBUG:
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TEMPLATE_CONTEXT': True,
'SHOW_TOOLBAR_CALLBACK': lambda request: True,
}
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware']
INSTALLED_APPS += ['debug_toolbar']
if SECRET_KEY == 'testing':
INSTALLED_APPS += ['behave_django']
{%- if cookiecutter.monitoring == 'Datadog' %}
DATADOG_API_KEY = env('DATADOG_API_KEY', default=None)
DATADOG_APP_KEY = env('DATADOG_APP_KEY', default=None)
DATADOG_APP_NAME = env('DATADOG_APP_NAME', default=None)
{%- elif cookiecutter.monitoring == 'NewRelic' %}
NEWRELIC_LICENSE_KEY = env('NEWRELIC_LICENSE_KEY', default=None)
if NEWRELIC_LICENSE_KEY:
import newrelic.agent
newrelic.agent.initialize(join(BASE_DIR, 'newrelic.ini'))
{%- endif %}
|
en
| 0.574124
|
Django settings for application project. # pylint: disable=invalid-name # Application definition # Database # Password validation # Internationalization # Static files (CSS, JavaScript, Images)
| 1.793303
| 2
|
care/facility/migrations/0092_recompute_facility_types.py
|
gigincg/care
| 189
|
6628396
|
<reponame>gigincg/care
# Generated by Django 2.2.11 on 2020-04-15 07:53
from django.db import migrations, transaction
OLD_TO_NEW_FACILITY_TYPE_MAP_LABS = {
850: 950,
}
OLD_TO_NEW_FACILITY_TYPE_MAP_GOVT_HOSPITALS = {
200: 800,
201: 801,
202: 802,
203: 803,
220: 820,
230: 830,
231: 831,
240: 840,
250: 850,
260: 860,
270: 870,
}
def recompute_facility_types(apps, *args):
facility_model = apps.get_model('facility', 'Facility')
with transaction.atomic():
for facility in facility_model.objects.filter(
facility_type__in=list(OLD_TO_NEW_FACILITY_TYPE_MAP_LABS.keys())):
facility.facility_type = OLD_TO_NEW_FACILITY_TYPE_MAP_LABS[facility.facility_type]
facility.save()
for facility in facility_model.objects.filter(
facility_type__in=list(OLD_TO_NEW_FACILITY_TYPE_MAP_GOVT_HOSPITALS.keys())):
facility.facility_type = OLD_TO_NEW_FACILITY_TYPE_MAP_GOVT_HOSPITALS[facility.facility_type]
facility.save()
def reverse_recompute_facility_types(apps, *args):
facility_model = apps.get_model('facility', 'Facility')
with transaction.atomic():
reverse_map = {v: k for k, v in OLD_TO_NEW_FACILITY_TYPE_MAP_GOVT_HOSPITALS.items()}
for facility in facility_model.objects.filter(
facility_type__in=list(reverse_map.keys())):
facility.facility_type = reverse_map[facility.facility_type]
facility.save()
reverse_map = {v: k for k, v in OLD_TO_NEW_FACILITY_TYPE_MAP_LABS.items()}
for facility in facility_model.objects.filter(
facility_type__in=list(reverse_map.keys())):
facility.facility_type = reverse_map[facility.facility_type]
facility.save()
class Migration(migrations.Migration):
dependencies = [
('facility', '0091_auto_20200415_1158'),
]
operations = [
migrations.RunPython(
recompute_facility_types,
reverse_code=reverse_recompute_facility_types
)
]
|
# Generated by Django 2.2.11 on 2020-04-15 07:53
from django.db import migrations, transaction
OLD_TO_NEW_FACILITY_TYPE_MAP_LABS = {
850: 950,
}
OLD_TO_NEW_FACILITY_TYPE_MAP_GOVT_HOSPITALS = {
200: 800,
201: 801,
202: 802,
203: 803,
220: 820,
230: 830,
231: 831,
240: 840,
250: 850,
260: 860,
270: 870,
}
def recompute_facility_types(apps, *args):
facility_model = apps.get_model('facility', 'Facility')
with transaction.atomic():
for facility in facility_model.objects.filter(
facility_type__in=list(OLD_TO_NEW_FACILITY_TYPE_MAP_LABS.keys())):
facility.facility_type = OLD_TO_NEW_FACILITY_TYPE_MAP_LABS[facility.facility_type]
facility.save()
for facility in facility_model.objects.filter(
facility_type__in=list(OLD_TO_NEW_FACILITY_TYPE_MAP_GOVT_HOSPITALS.keys())):
facility.facility_type = OLD_TO_NEW_FACILITY_TYPE_MAP_GOVT_HOSPITALS[facility.facility_type]
facility.save()
def reverse_recompute_facility_types(apps, *args):
facility_model = apps.get_model('facility', 'Facility')
with transaction.atomic():
reverse_map = {v: k for k, v in OLD_TO_NEW_FACILITY_TYPE_MAP_GOVT_HOSPITALS.items()}
for facility in facility_model.objects.filter(
facility_type__in=list(reverse_map.keys())):
facility.facility_type = reverse_map[facility.facility_type]
facility.save()
reverse_map = {v: k for k, v in OLD_TO_NEW_FACILITY_TYPE_MAP_LABS.items()}
for facility in facility_model.objects.filter(
facility_type__in=list(reverse_map.keys())):
facility.facility_type = reverse_map[facility.facility_type]
facility.save()
class Migration(migrations.Migration):
dependencies = [
('facility', '0091_auto_20200415_1158'),
]
operations = [
migrations.RunPython(
recompute_facility_types,
reverse_code=reverse_recompute_facility_types
)
]
|
en
| 0.698888
|
# Generated by Django 2.2.11 on 2020-04-15 07:53
| 1.988702
| 2
|
mmdet3d/datasets/sunrgbd_dataset.py
|
maskjp/mmdetection3d
| 1
|
6628397
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from os import path as osp
import numpy as np
from mmdet3d.core import show_multi_modality_result, show_result
from mmdet3d.core.bbox import DepthInstance3DBoxes
from mmdet.core import eval_map
from mmdet.datasets import DATASETS
from .custom_3d import Custom3DDataset
from .pipelines import Compose
@DATASETS.register_module()
class SUNRGBDDataset(Custom3DDataset):
r"""SUNRGBD Dataset.
This class serves as the API for experiments on the SUNRGBD Dataset.
See the `download page <http://rgbd.cs.princeton.edu/challenge.html>`_
for data downloading.
Args:
data_root (str): Path of dataset root.
ann_file (str): Path of annotation file.
pipeline (list[dict], optional): Pipeline used for data processing.
Defaults to None.
classes (tuple[str], optional): Classes used in the dataset.
Defaults to None.
modality (dict, optional): Modality to specify the sensor data used
as input. Defaults to None.
box_type_3d (str, optional): Type of 3D box of this dataset.
Based on the `box_type_3d`, the dataset will encapsulate the box
to its original format then converted them to `box_type_3d`.
Defaults to 'Depth' in this dataset. Available options includes
- 'LiDAR': Box in LiDAR coordinates.
- 'Depth': Box in depth coordinates, usually for indoor dataset.
- 'Camera': Box in camera coordinates.
filter_empty_gt (bool, optional): Whether to filter empty GT.
Defaults to True.
test_mode (bool, optional): Whether the dataset is in test mode.
Defaults to False.
"""
CLASSES = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser',
'night_stand', 'bookshelf', 'bathtub')
def __init__(self,
data_root,
ann_file,
pipeline=None,
classes=None,
modality=dict(use_camera=True, use_lidar=True),
box_type_3d='Depth',
filter_empty_gt=True,
test_mode=False):
super().__init__(
data_root=data_root,
ann_file=ann_file,
pipeline=pipeline,
classes=classes,
modality=modality,
box_type_3d=box_type_3d,
filter_empty_gt=filter_empty_gt,
test_mode=test_mode)
assert 'use_camera' in self.modality and \
'use_lidar' in self.modality
assert self.modality['use_camera'] or self.modality['use_lidar']
def get_data_info(self, index):
"""Get data info according to the given index.
Args:
index (int): Index of the sample data to get.
Returns:
dict: Data information that will be passed to the data
preprocessing pipelines. It includes the following keys:
- sample_idx (str): Sample index.
- pts_filename (str, optional): Filename of point clouds.
- file_name (str, optional): Filename of point clouds.
- img_prefix (str, optional): Prefix of image files.
- img_info (dict, optional): Image info.
- calib (dict, optional): Camera calibration info.
- ann_info (dict): Annotation info.
"""
info = self.data_infos[index]
sample_idx = info['point_cloud']['lidar_idx']
assert info['point_cloud']['lidar_idx'] == info['image']['image_idx']
input_dict = dict(sample_idx=sample_idx)
if self.modality['use_lidar']:
pts_filename = osp.join(self.data_root, info['pts_path'])
input_dict['pts_filename'] = pts_filename
input_dict['file_name'] = pts_filename
if self.modality['use_camera']:
img_filename = osp.join(
osp.join(self.data_root, 'sunrgbd_trainval'),
info['image']['image_path'])
input_dict['img_prefix'] = None
input_dict['img_info'] = dict(filename=img_filename)
calib = info['calib']
rt_mat = calib['Rt']
# follow Coord3DMode.convert_point
rt_mat = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]
]) @ rt_mat.transpose(1, 0)
depth2img = calib['K'] @ rt_mat
input_dict['depth2img'] = depth2img
if not self.test_mode:
annos = self.get_ann_info(index)
input_dict['ann_info'] = annos
if self.filter_empty_gt and len(annos['gt_bboxes_3d']) == 0:
return None
return input_dict
def get_ann_info(self, index):
"""Get annotation info according to the given index.
Args:
index (int): Index of the annotation data to get.
Returns:
dict: annotation information consists of the following keys:
- gt_bboxes_3d (:obj:`DepthInstance3DBoxes`):
3D ground truth bboxes
- gt_labels_3d (np.ndarray): Labels of ground truths.
- pts_instance_mask_path (str): Path of instance masks.
- pts_semantic_mask_path (str): Path of semantic masks.
"""
# Use index to get the annos, thus the evalhook could also use this api
info = self.data_infos[index]
if info['annos']['gt_num'] != 0:
gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(
np.float32) # k, 6
gt_labels_3d = info['annos']['class'].astype(np.long)
else:
gt_bboxes_3d = np.zeros((0, 7), dtype=np.float32)
gt_labels_3d = np.zeros((0, ), dtype=np.long)
# to target box structure
gt_bboxes_3d = DepthInstance3DBoxes(
gt_bboxes_3d, origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)
anns_results = dict(
gt_bboxes_3d=gt_bboxes_3d, gt_labels_3d=gt_labels_3d)
if self.modality['use_camera']:
if info['annos']['gt_num'] != 0:
gt_bboxes_2d = info['annos']['bbox'].astype(np.float32)
else:
gt_bboxes_2d = np.zeros((0, 4), dtype=np.float32)
anns_results['bboxes'] = gt_bboxes_2d
anns_results['labels'] = gt_labels_3d
return anns_results
def _build_default_pipeline(self):
"""Build the default pipeline for this dataset."""
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
load_dim=6,
use_dim=[0, 1, 2]),
dict(
type='DefaultFormatBundle3D',
class_names=self.CLASSES,
with_label=False),
dict(type='Collect3D', keys=['points'])
]
if self.modality['use_camera']:
pipeline.insert(0, dict(type='LoadImageFromFile'))
return Compose(pipeline)
def show(self, results, out_dir, show=True, pipeline=None):
"""Results visualization.
Args:
results (list[dict]): List of bounding boxes results.
out_dir (str): Output directory of visualization result.
show (bool): Visualize the results online.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
"""
assert out_dir is not None, 'Expect out_dir, got none.'
pipeline = self._get_pipeline(pipeline)
for i, result in enumerate(results):
data_info = self.data_infos[i]
pts_path = data_info['pts_path']
file_name = osp.split(pts_path)[-1].split('.')[0]
points, img_metas, img = self._extract_data(
i, pipeline, ['points', 'img_metas', 'img'])
# scale colors to [0, 255]
points = points.numpy()
points[:, 3:] *= 255
gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor.numpy()
pred_bboxes = result['boxes_3d'].tensor.numpy()
show_result(points, gt_bboxes.copy(), pred_bboxes.copy(), out_dir,
file_name, show)
# multi-modality visualization
if self.modality['use_camera']:
img = img.numpy()
# need to transpose channel to first dim
img = img.transpose(1, 2, 0)
pred_bboxes = DepthInstance3DBoxes(
pred_bboxes, origin=(0.5, 0.5, 0))
gt_bboxes = DepthInstance3DBoxes(
gt_bboxes, origin=(0.5, 0.5, 0))
show_multi_modality_result(
img,
gt_bboxes,
pred_bboxes,
None,
out_dir,
file_name,
box_mode='depth',
img_metas=img_metas,
show=show)
def evaluate(self,
results,
metric=None,
iou_thr=(0.25, 0.5),
iou_thr_2d=(0.5, ),
logger=None,
show=False,
out_dir=None,
pipeline=None):
"""Evaluate.
Evaluation in indoor protocol.
Args:
results (list[dict]): List of results.
metric (str | list[str], optional): Metrics to be evaluated.
Default: None.
iou_thr (list[float], optional): AP IoU thresholds for 3D
evaluation. Default: (0.25, 0.5).
iou_thr_2d (list[float], optional): AP IoU thresholds for 2D
evaluation. Default: (0.5, ).
show (bool, optional): Whether to visualize.
Default: False.
out_dir (str, optional): Path to save the visualization results.
Default: None.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
Returns:
dict: Evaluation results.
"""
# evaluate 3D detection performance
if isinstance(results[0], dict):
return super().evaluate(results, metric, iou_thr, logger, show,
out_dir, pipeline)
# evaluate 2D detection performance
else:
eval_results = OrderedDict()
annotations = [self.get_ann_info(i) for i in range(len(self))]
iou_thr_2d = (iou_thr_2d) if isinstance(iou_thr_2d,
float) else iou_thr_2d
for iou_thr_2d_single in iou_thr_2d:
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=None,
iou_thr=iou_thr_2d_single,
dataset=self.CLASSES,
logger=logger)
eval_results['mAP_' + str(iou_thr_2d_single)] = mean_ap
return eval_results
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from os import path as osp
import numpy as np
from mmdet3d.core import show_multi_modality_result, show_result
from mmdet3d.core.bbox import DepthInstance3DBoxes
from mmdet.core import eval_map
from mmdet.datasets import DATASETS
from .custom_3d import Custom3DDataset
from .pipelines import Compose
@DATASETS.register_module()
class SUNRGBDDataset(Custom3DDataset):
r"""SUNRGBD Dataset.
This class serves as the API for experiments on the SUNRGBD Dataset.
See the `download page <http://rgbd.cs.princeton.edu/challenge.html>`_
for data downloading.
Args:
data_root (str): Path of dataset root.
ann_file (str): Path of annotation file.
pipeline (list[dict], optional): Pipeline used for data processing.
Defaults to None.
classes (tuple[str], optional): Classes used in the dataset.
Defaults to None.
modality (dict, optional): Modality to specify the sensor data used
as input. Defaults to None.
box_type_3d (str, optional): Type of 3D box of this dataset.
Based on the `box_type_3d`, the dataset will encapsulate the box
to its original format then converted them to `box_type_3d`.
Defaults to 'Depth' in this dataset. Available options includes
- 'LiDAR': Box in LiDAR coordinates.
- 'Depth': Box in depth coordinates, usually for indoor dataset.
- 'Camera': Box in camera coordinates.
filter_empty_gt (bool, optional): Whether to filter empty GT.
Defaults to True.
test_mode (bool, optional): Whether the dataset is in test mode.
Defaults to False.
"""
CLASSES = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser',
'night_stand', 'bookshelf', 'bathtub')
def __init__(self,
data_root,
ann_file,
pipeline=None,
classes=None,
modality=dict(use_camera=True, use_lidar=True),
box_type_3d='Depth',
filter_empty_gt=True,
test_mode=False):
super().__init__(
data_root=data_root,
ann_file=ann_file,
pipeline=pipeline,
classes=classes,
modality=modality,
box_type_3d=box_type_3d,
filter_empty_gt=filter_empty_gt,
test_mode=test_mode)
assert 'use_camera' in self.modality and \
'use_lidar' in self.modality
assert self.modality['use_camera'] or self.modality['use_lidar']
def get_data_info(self, index):
"""Get data info according to the given index.
Args:
index (int): Index of the sample data to get.
Returns:
dict: Data information that will be passed to the data
preprocessing pipelines. It includes the following keys:
- sample_idx (str): Sample index.
- pts_filename (str, optional): Filename of point clouds.
- file_name (str, optional): Filename of point clouds.
- img_prefix (str, optional): Prefix of image files.
- img_info (dict, optional): Image info.
- calib (dict, optional): Camera calibration info.
- ann_info (dict): Annotation info.
"""
info = self.data_infos[index]
sample_idx = info['point_cloud']['lidar_idx']
assert info['point_cloud']['lidar_idx'] == info['image']['image_idx']
input_dict = dict(sample_idx=sample_idx)
if self.modality['use_lidar']:
pts_filename = osp.join(self.data_root, info['pts_path'])
input_dict['pts_filename'] = pts_filename
input_dict['file_name'] = pts_filename
if self.modality['use_camera']:
img_filename = osp.join(
osp.join(self.data_root, 'sunrgbd_trainval'),
info['image']['image_path'])
input_dict['img_prefix'] = None
input_dict['img_info'] = dict(filename=img_filename)
calib = info['calib']
rt_mat = calib['Rt']
# follow Coord3DMode.convert_point
rt_mat = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]
]) @ rt_mat.transpose(1, 0)
depth2img = calib['K'] @ rt_mat
input_dict['depth2img'] = depth2img
if not self.test_mode:
annos = self.get_ann_info(index)
input_dict['ann_info'] = annos
if self.filter_empty_gt and len(annos['gt_bboxes_3d']) == 0:
return None
return input_dict
def get_ann_info(self, index):
"""Get annotation info according to the given index.
Args:
index (int): Index of the annotation data to get.
Returns:
dict: annotation information consists of the following keys:
- gt_bboxes_3d (:obj:`DepthInstance3DBoxes`):
3D ground truth bboxes
- gt_labels_3d (np.ndarray): Labels of ground truths.
- pts_instance_mask_path (str): Path of instance masks.
- pts_semantic_mask_path (str): Path of semantic masks.
"""
# Use index to get the annos, thus the evalhook could also use this api
info = self.data_infos[index]
if info['annos']['gt_num'] != 0:
gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(
np.float32) # k, 6
gt_labels_3d = info['annos']['class'].astype(np.long)
else:
gt_bboxes_3d = np.zeros((0, 7), dtype=np.float32)
gt_labels_3d = np.zeros((0, ), dtype=np.long)
# to target box structure
gt_bboxes_3d = DepthInstance3DBoxes(
gt_bboxes_3d, origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)
anns_results = dict(
gt_bboxes_3d=gt_bboxes_3d, gt_labels_3d=gt_labels_3d)
if self.modality['use_camera']:
if info['annos']['gt_num'] != 0:
gt_bboxes_2d = info['annos']['bbox'].astype(np.float32)
else:
gt_bboxes_2d = np.zeros((0, 4), dtype=np.float32)
anns_results['bboxes'] = gt_bboxes_2d
anns_results['labels'] = gt_labels_3d
return anns_results
def _build_default_pipeline(self):
"""Build the default pipeline for this dataset."""
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
load_dim=6,
use_dim=[0, 1, 2]),
dict(
type='DefaultFormatBundle3D',
class_names=self.CLASSES,
with_label=False),
dict(type='Collect3D', keys=['points'])
]
if self.modality['use_camera']:
pipeline.insert(0, dict(type='LoadImageFromFile'))
return Compose(pipeline)
def show(self, results, out_dir, show=True, pipeline=None):
"""Results visualization.
Args:
results (list[dict]): List of bounding boxes results.
out_dir (str): Output directory of visualization result.
show (bool): Visualize the results online.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
"""
assert out_dir is not None, 'Expect out_dir, got none.'
pipeline = self._get_pipeline(pipeline)
for i, result in enumerate(results):
data_info = self.data_infos[i]
pts_path = data_info['pts_path']
file_name = osp.split(pts_path)[-1].split('.')[0]
points, img_metas, img = self._extract_data(
i, pipeline, ['points', 'img_metas', 'img'])
# scale colors to [0, 255]
points = points.numpy()
points[:, 3:] *= 255
gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor.numpy()
pred_bboxes = result['boxes_3d'].tensor.numpy()
show_result(points, gt_bboxes.copy(), pred_bboxes.copy(), out_dir,
file_name, show)
# multi-modality visualization
if self.modality['use_camera']:
img = img.numpy()
# need to transpose channel to first dim
img = img.transpose(1, 2, 0)
pred_bboxes = DepthInstance3DBoxes(
pred_bboxes, origin=(0.5, 0.5, 0))
gt_bboxes = DepthInstance3DBoxes(
gt_bboxes, origin=(0.5, 0.5, 0))
show_multi_modality_result(
img,
gt_bboxes,
pred_bboxes,
None,
out_dir,
file_name,
box_mode='depth',
img_metas=img_metas,
show=show)
def evaluate(self,
results,
metric=None,
iou_thr=(0.25, 0.5),
iou_thr_2d=(0.5, ),
logger=None,
show=False,
out_dir=None,
pipeline=None):
"""Evaluate.
Evaluation in indoor protocol.
Args:
results (list[dict]): List of results.
metric (str | list[str], optional): Metrics to be evaluated.
Default: None.
iou_thr (list[float], optional): AP IoU thresholds for 3D
evaluation. Default: (0.25, 0.5).
iou_thr_2d (list[float], optional): AP IoU thresholds for 2D
evaluation. Default: (0.5, ).
show (bool, optional): Whether to visualize.
Default: False.
out_dir (str, optional): Path to save the visualization results.
Default: None.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
Returns:
dict: Evaluation results.
"""
# evaluate 3D detection performance
if isinstance(results[0], dict):
return super().evaluate(results, metric, iou_thr, logger, show,
out_dir, pipeline)
# evaluate 2D detection performance
else:
eval_results = OrderedDict()
annotations = [self.get_ann_info(i) for i in range(len(self))]
iou_thr_2d = (iou_thr_2d) if isinstance(iou_thr_2d,
float) else iou_thr_2d
for iou_thr_2d_single in iou_thr_2d:
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=None,
iou_thr=iou_thr_2d_single,
dataset=self.CLASSES,
logger=logger)
eval_results['mAP_' + str(iou_thr_2d_single)] = mean_ap
return eval_results
|
en
| 0.64992
|
# Copyright (c) OpenMMLab. All rights reserved. SUNRGBD Dataset. This class serves as the API for experiments on the SUNRGBD Dataset. See the `download page <http://rgbd.cs.princeton.edu/challenge.html>`_ for data downloading. Args: data_root (str): Path of dataset root. ann_file (str): Path of annotation file. pipeline (list[dict], optional): Pipeline used for data processing. Defaults to None. classes (tuple[str], optional): Classes used in the dataset. Defaults to None. modality (dict, optional): Modality to specify the sensor data used as input. Defaults to None. box_type_3d (str, optional): Type of 3D box of this dataset. Based on the `box_type_3d`, the dataset will encapsulate the box to its original format then converted them to `box_type_3d`. Defaults to 'Depth' in this dataset. Available options includes - 'LiDAR': Box in LiDAR coordinates. - 'Depth': Box in depth coordinates, usually for indoor dataset. - 'Camera': Box in camera coordinates. filter_empty_gt (bool, optional): Whether to filter empty GT. Defaults to True. test_mode (bool, optional): Whether the dataset is in test mode. Defaults to False. Get data info according to the given index. Args: index (int): Index of the sample data to get. Returns: dict: Data information that will be passed to the data preprocessing pipelines. It includes the following keys: - sample_idx (str): Sample index. - pts_filename (str, optional): Filename of point clouds. - file_name (str, optional): Filename of point clouds. - img_prefix (str, optional): Prefix of image files. - img_info (dict, optional): Image info. - calib (dict, optional): Camera calibration info. - ann_info (dict): Annotation info. # follow Coord3DMode.convert_point Get annotation info according to the given index. Args: index (int): Index of the annotation data to get. Returns: dict: annotation information consists of the following keys: - gt_bboxes_3d (:obj:`DepthInstance3DBoxes`): 3D ground truth bboxes - gt_labels_3d (np.ndarray): Labels of ground truths. - pts_instance_mask_path (str): Path of instance masks. - pts_semantic_mask_path (str): Path of semantic masks. # Use index to get the annos, thus the evalhook could also use this api # k, 6 # to target box structure Build the default pipeline for this dataset. Results visualization. Args: results (list[dict]): List of bounding boxes results. out_dir (str): Output directory of visualization result. show (bool): Visualize the results online. pipeline (list[dict], optional): raw data loading for showing. Default: None. # scale colors to [0, 255] # multi-modality visualization # need to transpose channel to first dim Evaluate. Evaluation in indoor protocol. Args: results (list[dict]): List of results. metric (str | list[str], optional): Metrics to be evaluated. Default: None. iou_thr (list[float], optional): AP IoU thresholds for 3D evaluation. Default: (0.25, 0.5). iou_thr_2d (list[float], optional): AP IoU thresholds for 2D evaluation. Default: (0.5, ). show (bool, optional): Whether to visualize. Default: False. out_dir (str, optional): Path to save the visualization results. Default: None. pipeline (list[dict], optional): raw data loading for showing. Default: None. Returns: dict: Evaluation results. # evaluate 3D detection performance # evaluate 2D detection performance
| 2.251451
| 2
|
tests/kyu_8_tests/test_remove_exclamation_marks.py
|
the-zebulan/CodeWars
| 40
|
6628398
|
<reponame>the-zebulan/CodeWars<filename>tests/kyu_8_tests/test_remove_exclamation_marks.py
import unittest
from katas.kyu_8.remove_exclamation_marks import remove_exclamation_marks
class RemoveExclamationMarksTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(remove_exclamation_marks('Hello World!'),
'Hello World')
def test_equal_2(self):
self.assertEqual(remove_exclamation_marks('Hello World!!!'),
'Hello World')
def test_equal_3(self):
self.assertEqual(remove_exclamation_marks('Hi! Hello!'), 'Hi Hello')
def test_equal_4(self):
self.assertEqual(remove_exclamation_marks(''), '')
def test_equal_5(self):
self.assertEqual(remove_exclamation_marks('Oh, no!!!'), 'Oh, no')
|
import unittest
from katas.kyu_8.remove_exclamation_marks import remove_exclamation_marks
class RemoveExclamationMarksTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(remove_exclamation_marks('Hello World!'),
'Hello World')
def test_equal_2(self):
self.assertEqual(remove_exclamation_marks('Hello World!!!'),
'Hello World')
def test_equal_3(self):
self.assertEqual(remove_exclamation_marks('Hi! Hello!'), 'Hi Hello')
def test_equal_4(self):
self.assertEqual(remove_exclamation_marks(''), '')
def test_equal_5(self):
self.assertEqual(remove_exclamation_marks('Oh, no!!!'), 'Oh, no')
|
none
| 1
| 3.707176
| 4
|
|
Server/SendKeys.py
|
And0r-/RaspBox3000
| 0
|
6628399
|
#socket_echo_client.py
import socket
import sys
import kb_map
import keyboard
import time
NULL_CHAR = chr(0)
release_key = (NULL_CHAR*8).encode()
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = ('192.168.50.242', 10000)
server_address2 = ('192.168.50.117', 10000)
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
print('connecting to {} port {}'.format(*server_address2))
sock2.connect(server_address2)
def send_key(key):
try:
# Send data
message = (chr(kb_map.convert(key)[0])+NULL_CHAR+chr(kb_map.convert(key)[1])+NULL_CHAR*5).encode()
print('sending {!r}'.format(message))
sock.sendall(message)
sock.sendall(release_key)
sock2.sendall(message)
sock2.sendall(release_key)
# Look for the response
amount_received = 0
amount_expected = len(message)
while amount_received < amount_expected:
data = sock.recv(16)
amount_received += len(data)
print('received {!r}'.format(data))
finally:
print('gesendet')
def key_press(key):
send_key(key.name)
keyboard.on_press(key_press)
while True:
time.sleep(1)
|
#socket_echo_client.py
import socket
import sys
import kb_map
import keyboard
import time
NULL_CHAR = chr(0)
release_key = (NULL_CHAR*8).encode()
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = ('192.168.50.242', 10000)
server_address2 = ('192.168.50.117', 10000)
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
print('connecting to {} port {}'.format(*server_address2))
sock2.connect(server_address2)
def send_key(key):
try:
# Send data
message = (chr(kb_map.convert(key)[0])+NULL_CHAR+chr(kb_map.convert(key)[1])+NULL_CHAR*5).encode()
print('sending {!r}'.format(message))
sock.sendall(message)
sock.sendall(release_key)
sock2.sendall(message)
sock2.sendall(release_key)
# Look for the response
amount_received = 0
amount_expected = len(message)
while amount_received < amount_expected:
data = sock.recv(16)
amount_received += len(data)
print('received {!r}'.format(data))
finally:
print('gesendet')
def key_press(key):
send_key(key.name)
keyboard.on_press(key_press)
while True:
time.sleep(1)
|
en
| 0.688585
|
#socket_echo_client.py # Create a TCP/IP socket # Connect the socket to the port where the server is listening # Send data # Look for the response
| 3.038043
| 3
|
tests/test_sugar.py
|
DeJona/busypie
| 13
|
6628400
|
from busypie import given, wait, ONE_SECOND, wait_at_most, MILLISECOND
def test_start_with_given():
assert given() == wait()
assert given().wait().at_most(ONE_SECOND) == wait().at_most(ONE_SECOND)
def test_combine_wait_and_at_most():
assert wait().at_most(ONE_SECOND) == wait_at_most(ONE_SECOND)
assert wait().at_most(2, MILLISECOND) == wait_at_most(2, MILLISECOND)
assert given().wait_at_most(2, MILLISECOND) == wait().at_most(2, MILLISECOND)
|
from busypie import given, wait, ONE_SECOND, wait_at_most, MILLISECOND
def test_start_with_given():
assert given() == wait()
assert given().wait().at_most(ONE_SECOND) == wait().at_most(ONE_SECOND)
def test_combine_wait_and_at_most():
assert wait().at_most(ONE_SECOND) == wait_at_most(ONE_SECOND)
assert wait().at_most(2, MILLISECOND) == wait_at_most(2, MILLISECOND)
assert given().wait_at_most(2, MILLISECOND) == wait().at_most(2, MILLISECOND)
|
none
| 1
| 2.89507
| 3
|
|
tests/test_function_call_assignment.py
|
PseuToPy/PseuToPy
| 6
|
6628401
|
<gh_stars>1-10
from tests.utils import check_ast
class TestFunctionCallAssignment:
def test_function_call_assignment_input(self, pseutopy):
pseudo_str = "set a to the result of input (\"Hello\", myVar)"
python_str = "a = input(\"Hello\", myVar)"
assert check_ast(pseutopy, python_str, pseudo_str)
def test_function_call_assignment_input_int(self, pseutopy):
pseudo_str = "set a to the result of input integer (1)"
python_str = "a = int(input(1))"
assert check_ast(pseutopy, python_str, pseudo_str)
def test_function_call_assignment_input_float(self, pseutopy):
pseudo_str = "set a to the result of input number (\"Hello\")"
python_str = "a = float(input(\"Hello\"))"
assert check_ast(pseutopy, python_str, pseudo_str)
def test_function_call_assignment_with_function(self, pseutopy):
pseudo_str = """
set a to the result of call function foo with parameter 10
set b to the result of call function bar with parameters 0, 10
set c to the result of call function foobar with parameter myVar
set d to the result of call function fizzbuzz with parameters var1, var2
set e to the result of call function fizbuz
"""
python_str = """
a = foo(10)
b = bar(0, 10)
c = foobar(myVar)
d = fizzbuzz(var1, var2)
e = fizbuz()
"""
assert check_ast(pseutopy, python_str, pseudo_str)
def test_function_call_assignment_with_TestList(self, pseutopy):
pseudo_str = """
set a to the result of range(10)
set b to the result of range(0, 10)
set c to the result of range(var1)
set d to the result of range(var1, var2)
"""
python_str = """
a = range(10)
b = range(0, 10)
c = range(var1)
d = range(var1, var2)
"""
assert check_ast(pseutopy, python_str, pseudo_str)
|
from tests.utils import check_ast
class TestFunctionCallAssignment:
def test_function_call_assignment_input(self, pseutopy):
pseudo_str = "set a to the result of input (\"Hello\", myVar)"
python_str = "a = input(\"Hello\", myVar)"
assert check_ast(pseutopy, python_str, pseudo_str)
def test_function_call_assignment_input_int(self, pseutopy):
pseudo_str = "set a to the result of input integer (1)"
python_str = "a = int(input(1))"
assert check_ast(pseutopy, python_str, pseudo_str)
def test_function_call_assignment_input_float(self, pseutopy):
pseudo_str = "set a to the result of input number (\"Hello\")"
python_str = "a = float(input(\"Hello\"))"
assert check_ast(pseutopy, python_str, pseudo_str)
def test_function_call_assignment_with_function(self, pseutopy):
pseudo_str = """
set a to the result of call function foo with parameter 10
set b to the result of call function bar with parameters 0, 10
set c to the result of call function foobar with parameter myVar
set d to the result of call function fizzbuzz with parameters var1, var2
set e to the result of call function fizbuz
"""
python_str = """
a = foo(10)
b = bar(0, 10)
c = foobar(myVar)
d = fizzbuzz(var1, var2)
e = fizbuz()
"""
assert check_ast(pseutopy, python_str, pseudo_str)
def test_function_call_assignment_with_TestList(self, pseutopy):
pseudo_str = """
set a to the result of range(10)
set b to the result of range(0, 10)
set c to the result of range(var1)
set d to the result of range(var1, var2)
"""
python_str = """
a = range(10)
b = range(0, 10)
c = range(var1)
d = range(var1, var2)
"""
assert check_ast(pseutopy, python_str, pseudo_str)
|
en
| 0.279852
|
set a to the result of call function foo with parameter 10 set b to the result of call function bar with parameters 0, 10 set c to the result of call function foobar with parameter myVar set d to the result of call function fizzbuzz with parameters var1, var2 set e to the result of call function fizbuz a = foo(10) b = bar(0, 10) c = foobar(myVar) d = fizzbuzz(var1, var2) e = fizbuz() set a to the result of range(10) set b to the result of range(0, 10) set c to the result of range(var1) set d to the result of range(var1, var2) a = range(10) b = range(0, 10) c = range(var1) d = range(var1, var2)
| 3.296861
| 3
|
projects/tests.py
|
Waithera-m/project_rater
| 0
|
6628402
|
<gh_stars>0
from django.test import TestCase
from .models import Profile, Tags, Project, Votes
from django.contrib.auth.models import User
import factory
from django.db.models import signals
# Create your tests here.
class ProfileModelTests(TestCase):
"""
class supports the creation of tests to test model behavior
"""
@factory.django.mute_signals(signals.pre_save, signals.post_save)
def setUp(self):
"""
method defines the object to be created and instructions to be executed before each test
"""
self.user = User(username='peaches', first_name='name', last_name='other', email='<EMAIL>', password='<PASSWORD>')
self.user.save()
self.profile = Profile(user=self.user, bio='something boring', location='Fiji', profile_pic='base.jpg')
# def tearDown(self):
# """
# method returns database to pristine condition after all tests run
# """
# Profile.objects.all().delete()
# User.objects.all().delete()
def test_instance(self):
"""
method checks if a profile object is initialized properly
"""
self.assertTrue(isinstance(self.profile, Profile))
def test_save_profile(self):
"""
method tests if added profile is saved
"""
self.profile.save_profile()
profiles = Profile.objects.all()
self.assertTrue(len(profiles) > 0)
def test_update_profile(self):
"""
method test if one can update a profile
"""
profile = Profile.objects.create(user=self.user, bio='something boring', location='Fiji', profile_pic='base.jpg')
Profile.objects.filter(id=profile.id).update(bio='a tad bit interesting')
profile.update_profile()
self.assertEqual(profile.bio, 'a tad bit interesting')
def test_delete_profile(self):
"""
method tests delete class method
"""
usertrois = User(username='peachesaf', first_name='name2', last_name='other3', email='<EMAIL>', password='<PASSWORD>')
usertrois.save()
profileuno = Profile.objects.create(user=usertrois, bio='something frustrating', location='Fiji', profile_pic='base.jpg')
Profile.objects.filter(pk=profileuno.user.pk).delete()
profileuno.delete_profile()
profiles = Profile.objects.all()
self.assertTrue(len(profiles) == 0)
class TagsModelTests(TestCase):
"""
class facilitates the creation of unit tests for tags model's behavior
"""
def setUp(self):
"""
method defines the properties of tags' objects before each test
"""
self.tags = Tags(name='animation')
def test_instance(self):
"""
method checks if a tags object is initialized properly
"""
self.assertIsInstance(self.tags, Tags)
def test_save_tag(self):
"""
method checks if an added tag is saved
"""
self.tags.save_tags()
tags = Tags.objects.all()
self.assertTrue(len(tags) > 0)
def test_update_tag(self):
"""
method check if saved tag can be updated
"""
self.tags.save_tags()
Tags.objects.filter(pk=self.tags.pk).update(name='CSS3')
self.tags.update_tags()
self.assertEqual(self.tags.name, 'CSS3')
def test_delete_tag(self):
"""
method checks if saved tag can be deleted
"""
self.tags.save_tags()
self.tags.delete_tags()
tags = Tags.objects.all()
self.assertTrue(len(tags) == 0)
class ProjectModelTests(TestCase):
"""
class facilitates the creation of test units for the project model
"""
@factory.django.mute_signals(signals.pre_save, signals.post_save)
def setUp(self):
"""
method defines the objects to be created before each test
"""
self.tags = Tags.objects.create(name='HTML5')
self.userz = User.objects.create(username='fuzzy', first_name='namethree', last_name='othername', email='<EMAIL>', password='<PASSWORD>')
self.user_profile = Profile.objects.create(user=self.userz, bio='something boring', location='Fiji', profile_pic='base.jpg')
self.project = Project.objects.create(title='partage',creator=self.user_profile, project_image='partage.jpg', description='possibly blogging', live_link="<EMAIL>")
self.project.tags.add(self.tags)
def tearDown(self):
"""
method ensures that the database is pristine after all tests run
"""
Tags.objects.all().delete()
Project.objects.all().delete()
Profile.objects.all().delete()
def test_instance(self):
"""
method tests if project object is initialized properly
"""
self.assertIsInstance(self.project, Project)
def test_save_project(self):
"""
method tests if an added project is saved
"""
self.project.save_project()
projects = Project.objects.all()
self.assertTrue(len(projects) > 0)
def test_update_project(self):
"""
method checks if a saved project can be updated
"""
self.project.save_project()
Project.objects.filter(pk=self.project.pk).update(title='pomodoro')
self.project.update_project()
self.assertEqual(self.project.title, 'pomodoro')
def test_delete_project(self):
"""
method tests if a saved object can be deleted
"""
self.project.save_project()
self.project.delete_project()
projects = Project.objects.all()
self.assertTrue(len(projects) == 0)
def test_search_by_title(self):
"""
test checks if the search by title class method returns expected results
"""
self.project.save_project()
found_project = Project.search_by_title(self.project.title)
initial_project = Project.objects.filter(pk=self.project.pk)
self.assertQuerysetEqual(found_project, initial_project, transform=lambda x:x)
class VotesModelTests(TestCase):
"""
class facilitates the creation of Votes model's test units
"""
def setUp(self):
"""
class defines the properties of votes object to be created before each test
"""
self.user_two = User.objects.create(username='fuzzy', first_name='namethree', last_name='othername', email='<EMAIL>', password='<PASSWORD>')
self.rater = Profile.objects.create(user=self.user_two, bio='something boring', location='Fiji', profile_pic='base.jpg')
self.project = Project.objects.create(title='partage',creator=self.rater, project_image='partage.jpg', description='possibly blogging', live_link="<EMAIL>")
self.new_rating = Votes.objects.create(design=2, usability=6, content=5, project=self.project, rater=self.rater)
def test_instance(self):
"""
method tests if a rating object is initialized properly
"""
self.assertIsInstance(self.new_rating, Votes)
|
from django.test import TestCase
from .models import Profile, Tags, Project, Votes
from django.contrib.auth.models import User
import factory
from django.db.models import signals
# Create your tests here.
class ProfileModelTests(TestCase):
"""
class supports the creation of tests to test model behavior
"""
@factory.django.mute_signals(signals.pre_save, signals.post_save)
def setUp(self):
"""
method defines the object to be created and instructions to be executed before each test
"""
self.user = User(username='peaches', first_name='name', last_name='other', email='<EMAIL>', password='<PASSWORD>')
self.user.save()
self.profile = Profile(user=self.user, bio='something boring', location='Fiji', profile_pic='base.jpg')
# def tearDown(self):
# """
# method returns database to pristine condition after all tests run
# """
# Profile.objects.all().delete()
# User.objects.all().delete()
def test_instance(self):
"""
method checks if a profile object is initialized properly
"""
self.assertTrue(isinstance(self.profile, Profile))
def test_save_profile(self):
"""
method tests if added profile is saved
"""
self.profile.save_profile()
profiles = Profile.objects.all()
self.assertTrue(len(profiles) > 0)
def test_update_profile(self):
"""
method test if one can update a profile
"""
profile = Profile.objects.create(user=self.user, bio='something boring', location='Fiji', profile_pic='base.jpg')
Profile.objects.filter(id=profile.id).update(bio='a tad bit interesting')
profile.update_profile()
self.assertEqual(profile.bio, 'a tad bit interesting')
def test_delete_profile(self):
"""
method tests delete class method
"""
usertrois = User(username='peachesaf', first_name='name2', last_name='other3', email='<EMAIL>', password='<PASSWORD>')
usertrois.save()
profileuno = Profile.objects.create(user=usertrois, bio='something frustrating', location='Fiji', profile_pic='base.jpg')
Profile.objects.filter(pk=profileuno.user.pk).delete()
profileuno.delete_profile()
profiles = Profile.objects.all()
self.assertTrue(len(profiles) == 0)
class TagsModelTests(TestCase):
"""
class facilitates the creation of unit tests for tags model's behavior
"""
def setUp(self):
"""
method defines the properties of tags' objects before each test
"""
self.tags = Tags(name='animation')
def test_instance(self):
"""
method checks if a tags object is initialized properly
"""
self.assertIsInstance(self.tags, Tags)
def test_save_tag(self):
"""
method checks if an added tag is saved
"""
self.tags.save_tags()
tags = Tags.objects.all()
self.assertTrue(len(tags) > 0)
def test_update_tag(self):
"""
method check if saved tag can be updated
"""
self.tags.save_tags()
Tags.objects.filter(pk=self.tags.pk).update(name='CSS3')
self.tags.update_tags()
self.assertEqual(self.tags.name, 'CSS3')
def test_delete_tag(self):
"""
method checks if saved tag can be deleted
"""
self.tags.save_tags()
self.tags.delete_tags()
tags = Tags.objects.all()
self.assertTrue(len(tags) == 0)
class ProjectModelTests(TestCase):
"""
class facilitates the creation of test units for the project model
"""
@factory.django.mute_signals(signals.pre_save, signals.post_save)
def setUp(self):
"""
method defines the objects to be created before each test
"""
self.tags = Tags.objects.create(name='HTML5')
self.userz = User.objects.create(username='fuzzy', first_name='namethree', last_name='othername', email='<EMAIL>', password='<PASSWORD>')
self.user_profile = Profile.objects.create(user=self.userz, bio='something boring', location='Fiji', profile_pic='base.jpg')
self.project = Project.objects.create(title='partage',creator=self.user_profile, project_image='partage.jpg', description='possibly blogging', live_link="<EMAIL>")
self.project.tags.add(self.tags)
def tearDown(self):
"""
method ensures that the database is pristine after all tests run
"""
Tags.objects.all().delete()
Project.objects.all().delete()
Profile.objects.all().delete()
def test_instance(self):
"""
method tests if project object is initialized properly
"""
self.assertIsInstance(self.project, Project)
def test_save_project(self):
"""
method tests if an added project is saved
"""
self.project.save_project()
projects = Project.objects.all()
self.assertTrue(len(projects) > 0)
def test_update_project(self):
"""
method checks if a saved project can be updated
"""
self.project.save_project()
Project.objects.filter(pk=self.project.pk).update(title='pomodoro')
self.project.update_project()
self.assertEqual(self.project.title, 'pomodoro')
def test_delete_project(self):
"""
method tests if a saved object can be deleted
"""
self.project.save_project()
self.project.delete_project()
projects = Project.objects.all()
self.assertTrue(len(projects) == 0)
def test_search_by_title(self):
"""
test checks if the search by title class method returns expected results
"""
self.project.save_project()
found_project = Project.search_by_title(self.project.title)
initial_project = Project.objects.filter(pk=self.project.pk)
self.assertQuerysetEqual(found_project, initial_project, transform=lambda x:x)
class VotesModelTests(TestCase):
"""
class facilitates the creation of Votes model's test units
"""
def setUp(self):
"""
class defines the properties of votes object to be created before each test
"""
self.user_two = User.objects.create(username='fuzzy', first_name='namethree', last_name='othername', email='<EMAIL>', password='<PASSWORD>')
self.rater = Profile.objects.create(user=self.user_two, bio='something boring', location='Fiji', profile_pic='base.jpg')
self.project = Project.objects.create(title='partage',creator=self.rater, project_image='partage.jpg', description='possibly blogging', live_link="<EMAIL>")
self.new_rating = Votes.objects.create(design=2, usability=6, content=5, project=self.project, rater=self.rater)
def test_instance(self):
"""
method tests if a rating object is initialized properly
"""
self.assertIsInstance(self.new_rating, Votes)
|
en
| 0.821812
|
# Create your tests here. class supports the creation of tests to test model behavior method defines the object to be created and instructions to be executed before each test # def tearDown(self): # """ # method returns database to pristine condition after all tests run # """ # Profile.objects.all().delete() # User.objects.all().delete() method checks if a profile object is initialized properly method tests if added profile is saved method test if one can update a profile method tests delete class method class facilitates the creation of unit tests for tags model's behavior method defines the properties of tags' objects before each test method checks if a tags object is initialized properly method checks if an added tag is saved method check if saved tag can be updated method checks if saved tag can be deleted class facilitates the creation of test units for the project model method defines the objects to be created before each test method ensures that the database is pristine after all tests run method tests if project object is initialized properly method tests if an added project is saved method checks if a saved project can be updated method tests if a saved object can be deleted test checks if the search by title class method returns expected results class facilitates the creation of Votes model's test units class defines the properties of votes object to be created before each test method tests if a rating object is initialized properly
| 2.684822
| 3
|
chapter_3.py
|
jeremyn/python-machine-learning-book
| 7
|
6628403
|
<reponame>jeremyn/python-machine-learning-book
# Copyright <NAME>.
# Released under the MIT license. See included LICENSE.txt.
#
# Almost entirely copied from code created by <NAME> released under
# the MIT license. See included LICENSE.raschka.txt.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import (
LogisticRegression,
Perceptron,
)
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn import datasets
from visualization import plot_decision_regions
def gini(p):
return 2 * p * (1-p)
def entropy(p):
return -p * np.log2(p) - (1-p) * np.log2(1-p)
def error(p):
return 1 - max(p, 1-p)
def plot_impurity_indexes():
probs = np.arange(0.0, 1.0, 0.01)
entropies = [entropy(p) if p != 0 else None for p in probs]
scaled_entropies = [e * 0.5 if e is not None else None for e in entropies]
errors = [error(p) for p in probs]
plt.figure()
ax = plt.subplot(111)
plots = (
(entropies, 'Entropy', '-', 'black'),
(scaled_entropies, 'Entropy (scaled)', '-', 'lightgray'),
(gini(probs), 'Gini Impurity', '--', 'red'),
(errors, 'Misclassification Error', '-.', 'green'),
)
for y, label, linestyle, color in plots:
ax.plot(probs, y, label=label, linestyle=linestyle, lw=2, color=color)
ax.legend(
loc='upper center',
bbox_to_anchor=(0.5, 1.15),
ncol=3,
fancybox=True,
shadow=False,
)
ax.axhline(y=0.5, linewidth=1, color='k', linestyle='--')
ax.axhline(y=1.0, linewidth=1, color='k', linestyle='--')
plt.ylim([0, 1.1])
plt.xlabel('p(i=1)')
plt.ylabel('Impurity Index')
plt.show()
def plot_iris_with_classifier(clf, print_accuracy=False, standardize=True):
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.3,
random_state=0,
)
if standardize:
sc = StandardScaler()
sc.fit(X_train)
X_train = sc.transform(X_train)
X_test = sc.transform(X_test)
units = 'standardized'
else:
units = 'cm'
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if print_accuracy:
print("Misclassified samples: %d" % (y_test != y_pred).sum())
print("Accuracy: %.2f" % accuracy_score(y_test, y_pred))
X_combined = np.vstack((X_train, X_test))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(
X=X_combined,
y=y_combined,
classifier=clf,
test_index=range(105, 150),
)
plt.xlabel("petal length [%s]" % units)
plt.ylabel("petal width [%s]" % units)
plt.legend(loc='upper left')
plt.show()
def plot_lr_regularization():
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
X_train, _, y_train, _ = train_test_split(
X,
y,
test_size=0.3,
random_state=0,
)
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
weights = []
params = []
for c in np.logspace(-5, 4, num=10):
lr = LogisticRegression(C=c, random_state=0)
lr.fit(X_train_std, y_train)
weights.append(lr.coef_[1])
params.append(c)
weights = np.array(weights)
plt.plot(params, weights[:, 0], label='petal length')
plt.plot(params, weights[:, 1], linestyle='--', label='petal width')
plt.ylabel('weight coefficient')
plt.xlabel('C')
plt.legend(loc='upper left')
plt.xscale('log')
plt.show()
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
def plot_sigmoid():
z = np.arange(-7, 7, 0.1)
phi_z = sigmoid(z)
plt.plot(z, phi_z)
plt.axvline(0.0, color='k')
plt.ylim(-0.1, 1.1)
plt.xlabel('z')
plt.ylabel('$\phi (z)$')
plt.yticks([0.0, 0.5, 1.0])
ax = plt.gca()
ax.yaxis.grid(True)
plt.show()
def plot_xor():
np.random.seed(0)
X_xor = np.random.randn(200, 2)
y_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] > 0)
y_xor = np.where(y_xor, 1, -1)
svm = SVC(kernel='rbf', random_state=0, gamma=0.1, C=10.0)
svm.fit(X_xor, y_xor)
plot_decision_regions(X_xor, y_xor, classifier=svm)
plt.legend(loc='upper left')
plt.show()
if __name__ == '__main__':
# clf = Perceptron(n_iter=40, eta0=0.1, random_state=0)
# clf = LogisticRegression(C=1000.0, random_state=0)
# clf = SVC(kernel='linear', C=1.0, random_state=0)
# clf = SVC(kernel='rbf', random_state=0, gamma=0.2, C=1.0)
# clf = SVC(kernel='rbf', random_state=0, gamma=100.0, C=1.0)
clf = KNeighborsClassifier(n_neighbors=5, p=2, metric='minkowski')
plot_iris_with_classifier(clf)
# clf = DecisionTreeClassifier(criterion='entropy', max_depth=3, random_state=0)
# clf = RandomForestClassifier(criterion='entropy', n_estimators=10, random_state=1, n_jobs=2)
# plot_iris_with_classifier(clf, standardize=False)
# plot_sigmoid()
# plot_lr_regularization()
# plot_xor()
# plot_impurity_indexes()
|
# Copyright <NAME>.
# Released under the MIT license. See included LICENSE.txt.
#
# Almost entirely copied from code created by <NAME> released under
# the MIT license. See included LICENSE.raschka.txt.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import (
LogisticRegression,
Perceptron,
)
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn import datasets
from visualization import plot_decision_regions
def gini(p):
return 2 * p * (1-p)
def entropy(p):
return -p * np.log2(p) - (1-p) * np.log2(1-p)
def error(p):
return 1 - max(p, 1-p)
def plot_impurity_indexes():
probs = np.arange(0.0, 1.0, 0.01)
entropies = [entropy(p) if p != 0 else None for p in probs]
scaled_entropies = [e * 0.5 if e is not None else None for e in entropies]
errors = [error(p) for p in probs]
plt.figure()
ax = plt.subplot(111)
plots = (
(entropies, 'Entropy', '-', 'black'),
(scaled_entropies, 'Entropy (scaled)', '-', 'lightgray'),
(gini(probs), 'Gini Impurity', '--', 'red'),
(errors, 'Misclassification Error', '-.', 'green'),
)
for y, label, linestyle, color in plots:
ax.plot(probs, y, label=label, linestyle=linestyle, lw=2, color=color)
ax.legend(
loc='upper center',
bbox_to_anchor=(0.5, 1.15),
ncol=3,
fancybox=True,
shadow=False,
)
ax.axhline(y=0.5, linewidth=1, color='k', linestyle='--')
ax.axhline(y=1.0, linewidth=1, color='k', linestyle='--')
plt.ylim([0, 1.1])
plt.xlabel('p(i=1)')
plt.ylabel('Impurity Index')
plt.show()
def plot_iris_with_classifier(clf, print_accuracy=False, standardize=True):
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.3,
random_state=0,
)
if standardize:
sc = StandardScaler()
sc.fit(X_train)
X_train = sc.transform(X_train)
X_test = sc.transform(X_test)
units = 'standardized'
else:
units = 'cm'
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if print_accuracy:
print("Misclassified samples: %d" % (y_test != y_pred).sum())
print("Accuracy: %.2f" % accuracy_score(y_test, y_pred))
X_combined = np.vstack((X_train, X_test))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(
X=X_combined,
y=y_combined,
classifier=clf,
test_index=range(105, 150),
)
plt.xlabel("petal length [%s]" % units)
plt.ylabel("petal width [%s]" % units)
plt.legend(loc='upper left')
plt.show()
def plot_lr_regularization():
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
X_train, _, y_train, _ = train_test_split(
X,
y,
test_size=0.3,
random_state=0,
)
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
weights = []
params = []
for c in np.logspace(-5, 4, num=10):
lr = LogisticRegression(C=c, random_state=0)
lr.fit(X_train_std, y_train)
weights.append(lr.coef_[1])
params.append(c)
weights = np.array(weights)
plt.plot(params, weights[:, 0], label='petal length')
plt.plot(params, weights[:, 1], linestyle='--', label='petal width')
plt.ylabel('weight coefficient')
plt.xlabel('C')
plt.legend(loc='upper left')
plt.xscale('log')
plt.show()
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
def plot_sigmoid():
z = np.arange(-7, 7, 0.1)
phi_z = sigmoid(z)
plt.plot(z, phi_z)
plt.axvline(0.0, color='k')
plt.ylim(-0.1, 1.1)
plt.xlabel('z')
plt.ylabel('$\phi (z)$')
plt.yticks([0.0, 0.5, 1.0])
ax = plt.gca()
ax.yaxis.grid(True)
plt.show()
def plot_xor():
np.random.seed(0)
X_xor = np.random.randn(200, 2)
y_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] > 0)
y_xor = np.where(y_xor, 1, -1)
svm = SVC(kernel='rbf', random_state=0, gamma=0.1, C=10.0)
svm.fit(X_xor, y_xor)
plot_decision_regions(X_xor, y_xor, classifier=svm)
plt.legend(loc='upper left')
plt.show()
if __name__ == '__main__':
# clf = Perceptron(n_iter=40, eta0=0.1, random_state=0)
# clf = LogisticRegression(C=1000.0, random_state=0)
# clf = SVC(kernel='linear', C=1.0, random_state=0)
# clf = SVC(kernel='rbf', random_state=0, gamma=0.2, C=1.0)
# clf = SVC(kernel='rbf', random_state=0, gamma=100.0, C=1.0)
clf = KNeighborsClassifier(n_neighbors=5, p=2, metric='minkowski')
plot_iris_with_classifier(clf)
# clf = DecisionTreeClassifier(criterion='entropy', max_depth=3, random_state=0)
# clf = RandomForestClassifier(criterion='entropy', n_estimators=10, random_state=1, n_jobs=2)
# plot_iris_with_classifier(clf, standardize=False)
# plot_sigmoid()
# plot_lr_regularization()
# plot_xor()
# plot_impurity_indexes()
|
en
| 0.388531
|
# Copyright <NAME>. # Released under the MIT license. See included LICENSE.txt. # # Almost entirely copied from code created by <NAME> released under # the MIT license. See included LICENSE.raschka.txt. # clf = Perceptron(n_iter=40, eta0=0.1, random_state=0) # clf = LogisticRegression(C=1000.0, random_state=0) # clf = SVC(kernel='linear', C=1.0, random_state=0) # clf = SVC(kernel='rbf', random_state=0, gamma=0.2, C=1.0) # clf = SVC(kernel='rbf', random_state=0, gamma=100.0, C=1.0) # clf = DecisionTreeClassifier(criterion='entropy', max_depth=3, random_state=0) # clf = RandomForestClassifier(criterion='entropy', n_estimators=10, random_state=1, n_jobs=2) # plot_iris_with_classifier(clf, standardize=False) # plot_sigmoid() # plot_lr_regularization() # plot_xor() # plot_impurity_indexes()
| 2.564527
| 3
|
src/third_party/skia/infra/bots/assets/android_ndk_linux/create.py
|
rhencke/engine
| 54
|
6628404
|
<filename>src/third_party/skia/infra/bots/assets/android_ndk_linux/create.py
#!/usr/bin/env python
#
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import glob
import os.path
import shutil
import subprocess
NDK_VER = "android-ndk-r21d"
NDK_URL = \
"https://dl.google.com/android/repository/%s-linux-x86_64.zip" % NDK_VER
def create_asset(target_dir):
"""Create the asset."""
subprocess.check_call(["curl", NDK_URL, "-o", "ndk.zip"])
subprocess.check_call(["unzip", "ndk.zip", "-d", target_dir])
for f in glob.glob(os.path.join(target_dir, NDK_VER, "*")):
shutil.move(f, target_dir)
subprocess.check_call(["rm", "ndk.zip"])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
|
<filename>src/third_party/skia/infra/bots/assets/android_ndk_linux/create.py
#!/usr/bin/env python
#
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import glob
import os.path
import shutil
import subprocess
NDK_VER = "android-ndk-r21d"
NDK_URL = \
"https://dl.google.com/android/repository/%s-linux-x86_64.zip" % NDK_VER
def create_asset(target_dir):
"""Create the asset."""
subprocess.check_call(["curl", NDK_URL, "-o", "ndk.zip"])
subprocess.check_call(["unzip", "ndk.zip", "-d", target_dir])
for f in glob.glob(os.path.join(target_dir, NDK_VER, "*")):
shutil.move(f, target_dir)
subprocess.check_call(["rm", "ndk.zip"])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
|
en
| 0.809237
|
#!/usr/bin/env python # # Copyright 2016 Google Inc. # # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Create the asset. Create the asset.
| 2.185386
| 2
|
tflite_tools.py
|
oxmlsys/tflite-tools
| 18
|
6628405
|
<gh_stars>10-100
import argparse
import os
from tflite_tools import TFLiteModel
def main():
parser = argparse.ArgumentParser(description='TFLite model analyser & memory optimizer')
parser.add_argument("-i", type=str, dest="input_path", help="input model file (.tflite)")
parser.add_argument("-o", type=str, dest="output_path", default=None, help="output model file (.tflite)")
parser.add_argument("--clusters", type=int, default=0,
help="cluster weights into n-many values (simulate code-book quantization)")
parser.add_argument("--optimize", action="store_true", default=False, help="optimize peak working set size")
parser.add_argument("--csv", type=str, dest="csv_output_folder", default=None,
help="output model analysis in CSV format into the specified folder")
parser.add_argument("--plot", type=str, dest="plot_file", default=None,
help="plot memory usage for each operator during the execution")
parser.add_argument("--calc-macs", default=False, action="store_true", help="Calculate approximate MAC usage")
parser.add_argument("--calc-size", default=False, action="store_true", help="Calculate parameter size")
args = parser.parse_args()
model = TFLiteModel.load_from_file(args.input_path)
if args.optimize:
print("Optimizing peak memory usage...")
model.optimize_memory()
if args.csv_output_folder:
print(f"Writing model analysis to {args.csv_output_folder} in CSV format")
os.makedirs(args.csv_output_folder, exist_ok=True)
model.output_model_analysis_to_csv(args.csv_output_folder, calc_macs=args.calc_macs, calc_size=args.calc_size)
else:
model.print_model_analysis(calc_macs=args.calc_macs, calc_size=args.calc_size)
if args.clusters > 0:
model.cluster_weights(args.clusters)
if args.plot_file:
print(f"Plotting operator memory usage to {args.plot_file}")
model.plot_memory_usage(args.plot_file)
if args.output_path:
print(f"Saving the model to {args.output_path}...")
model.write_to_file(args.output_path)
if __name__ == "__main__":
main()
|
import argparse
import os
from tflite_tools import TFLiteModel
def main():
parser = argparse.ArgumentParser(description='TFLite model analyser & memory optimizer')
parser.add_argument("-i", type=str, dest="input_path", help="input model file (.tflite)")
parser.add_argument("-o", type=str, dest="output_path", default=None, help="output model file (.tflite)")
parser.add_argument("--clusters", type=int, default=0,
help="cluster weights into n-many values (simulate code-book quantization)")
parser.add_argument("--optimize", action="store_true", default=False, help="optimize peak working set size")
parser.add_argument("--csv", type=str, dest="csv_output_folder", default=None,
help="output model analysis in CSV format into the specified folder")
parser.add_argument("--plot", type=str, dest="plot_file", default=None,
help="plot memory usage for each operator during the execution")
parser.add_argument("--calc-macs", default=False, action="store_true", help="Calculate approximate MAC usage")
parser.add_argument("--calc-size", default=False, action="store_true", help="Calculate parameter size")
args = parser.parse_args()
model = TFLiteModel.load_from_file(args.input_path)
if args.optimize:
print("Optimizing peak memory usage...")
model.optimize_memory()
if args.csv_output_folder:
print(f"Writing model analysis to {args.csv_output_folder} in CSV format")
os.makedirs(args.csv_output_folder, exist_ok=True)
model.output_model_analysis_to_csv(args.csv_output_folder, calc_macs=args.calc_macs, calc_size=args.calc_size)
else:
model.print_model_analysis(calc_macs=args.calc_macs, calc_size=args.calc_size)
if args.clusters > 0:
model.cluster_weights(args.clusters)
if args.plot_file:
print(f"Plotting operator memory usage to {args.plot_file}")
model.plot_memory_usage(args.plot_file)
if args.output_path:
print(f"Saving the model to {args.output_path}...")
model.write_to_file(args.output_path)
if __name__ == "__main__":
main()
|
none
| 1
| 2.931737
| 3
|
|
cycy/__init__.py
|
Magnetic/cycy
| 26
|
6628406
|
<reponame>Magnetic/cycy
from cycy._version import __version__
|
from cycy._version import __version__
|
none
| 1
| 1.011109
| 1
|
|
src/logger.py
|
hazimavdal/gpurge
| 1
|
6628407
|
<reponame>hazimavdal/gpurge
import time
DEBUG_LEVEL = 0
INFO_LEVEL = 1
WARNING_LEVEL = 2
ERROR_LEVEL = 3
FATAL_LEVEL = 4
def level_str(level):
if level == 0:
return "DEBUG"
if level == 1:
return "INFO"
if level == 2:
return "WARN"
if level == 3:
return "ERROR"
if level == 4:
return "FATAL"
class Logger:
def __init__(self, verbosity):
self.lines = []
self.verbosity = verbosity
def __log(self, level, msg, *args):
for i, arg in enumerate(*args):
msg = msg.replace(f'%{str(i)}', str(arg))
if level >= self.verbosity:
print(msg)
stamp = time.strftime("%Y/%m/%d\t%H:%M:%S")
self.lines.append(f'<{level_str(level)}>\t{stamp}: {msg}')
def infof(self, msg, *args):
self.__log(INFO_LEVEL, msg, args)
def errorf(self, msg, *args):
self.__log(ERROR_LEVEL, msg, args)
def save(self, filename):
with open(filename, "w+") as f:
lines = [l + '\n' for l in self.lines]
f.writelines(lines)
|
import time
DEBUG_LEVEL = 0
INFO_LEVEL = 1
WARNING_LEVEL = 2
ERROR_LEVEL = 3
FATAL_LEVEL = 4
def level_str(level):
if level == 0:
return "DEBUG"
if level == 1:
return "INFO"
if level == 2:
return "WARN"
if level == 3:
return "ERROR"
if level == 4:
return "FATAL"
class Logger:
def __init__(self, verbosity):
self.lines = []
self.verbosity = verbosity
def __log(self, level, msg, *args):
for i, arg in enumerate(*args):
msg = msg.replace(f'%{str(i)}', str(arg))
if level >= self.verbosity:
print(msg)
stamp = time.strftime("%Y/%m/%d\t%H:%M:%S")
self.lines.append(f'<{level_str(level)}>\t{stamp}: {msg}')
def infof(self, msg, *args):
self.__log(INFO_LEVEL, msg, args)
def errorf(self, msg, *args):
self.__log(ERROR_LEVEL, msg, args)
def save(self, filename):
with open(filename, "w+") as f:
lines = [l + '\n' for l in self.lines]
f.writelines(lines)
|
none
| 1
| 3.099941
| 3
|
|
VirtualBox-5.0.0/src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/Library/String.py
|
egraba/vbox_openbsd
| 1
|
6628408
|
<reponame>egraba/vbox_openbsd
## @file
# This file is used to define common string related functions used in parsing
# process
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
String
'''
##
# Import Modules
#
import re
import os.path
from string import strip
import Logger.Log as Logger
import Library.DataType as DataType
from Logger.ToolError import FORMAT_INVALID
from Logger.ToolError import PARSER_ERROR
from Logger import StringTable as ST
#
# Regular expression for matching macro used in DSC/DEC/INF file inclusion
#
gMACRO_PATTERN = re.compile("\$\(([_A-Z][_A-Z0-9]*)\)", re.UNICODE)
## GetSplitValueList
#
# Get a value list from a string with multiple values splited with SplitTag
# The default SplitTag is DataType.TAB_VALUE_SPLIT
# 'AAA|BBB|CCC' -> ['AAA', 'BBB', 'CCC']
#
# @param String: The input string to be splitted
# @param SplitTag: The split key, default is DataType.TAB_VALUE_SPLIT
# @param MaxSplit: The max number of split values, default is -1
#
#
def GetSplitValueList(String, SplitTag=DataType.TAB_VALUE_SPLIT, MaxSplit=-1):
return map(lambda l: l.strip(), String.split(SplitTag, MaxSplit))
## MergeArches
#
# Find a key's all arches in dict, add the new arch to the list
# If not exist any arch, set the arch directly
#
# @param Dict: The input value for Dict
# @param Key: The input value for Key
# @param Arch: The Arch to be added or merged
#
def MergeArches(Dict, Key, Arch):
if Key in Dict.keys():
Dict[Key].append(Arch)
else:
Dict[Key] = Arch.split()
## GenDefines
#
# Parse a string with format "DEFINE <VarName> = <PATH>"
# Generate a map Defines[VarName] = PATH
# Return False if invalid format
#
# @param String: String with DEFINE statement
# @param Arch: Supportted Arch
# @param Defines: DEFINE statement to be parsed
#
def GenDefines(String, Arch, Defines):
if String.find(DataType.TAB_DEFINE + ' ') > -1:
List = String.replace(DataType.TAB_DEFINE + ' ', '').\
split(DataType.TAB_EQUAL_SPLIT)
if len(List) == 2:
Defines[(CleanString(List[0]), Arch)] = CleanString(List[1])
return 0
else:
return -1
return 1
## GetLibraryClassesWithModuleType
#
# Get Library Class definition when no module type defined
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetLibraryClassesWithModuleType(Lines, Key, KeyValues, CommentCharacter):
NewKey = SplitModuleType(Key)
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.splitlines()
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append([CleanString(Line, CommentCharacter), NewKey[1]])
return True
## GetDynamics
#
# Get Dynamic Pcds
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetDynamics(Lines, Key, KeyValues, CommentCharacter):
#
# Get SkuId Name List
#
SkuIdNameList = SplitModuleType(Key)
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.splitlines()
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append([CleanString(Line, CommentCharacter), SkuIdNameList[1]])
return True
## SplitModuleType
#
# Split ModuleType out of section defien to get key
# [LibraryClass.Arch.ModuleType|ModuleType|ModuleType] -> [
# 'LibraryClass.Arch', ['ModuleType', 'ModuleType', 'ModuleType'] ]
#
# @param Key: String to be parsed
#
def SplitModuleType(Key):
KeyList = Key.split(DataType.TAB_SPLIT)
#
# Fill in for arch
#
KeyList.append('')
#
# Fill in for moduletype
#
KeyList.append('')
ReturnValue = []
KeyValue = KeyList[0]
if KeyList[1] != '':
KeyValue = KeyValue + DataType.TAB_SPLIT + KeyList[1]
ReturnValue.append(KeyValue)
ReturnValue.append(GetSplitValueList(KeyList[2]))
return ReturnValue
## Replace macro in string
#
# This method replace macros used in given string. The macros are given in a
# dictionary.
#
# @param String String to be processed
# @param MacroDefinitions The macro definitions in the form of dictionary
# @param SelfReplacement To decide whether replace un-defined macro to ''
# @param Line: The content contain line string and line number
# @param FileName: The meta-file file name
#
def ReplaceMacro(String, MacroDefinitions = None, SelfReplacement = False, Line = None, FileName = None, Flag = False):
LastString = String
if MacroDefinitions == None:
MacroDefinitions = {}
while MacroDefinitions:
QuotedStringList = []
HaveQuotedMacroFlag = False
if not Flag:
MacroUsed = gMACRO_PATTERN.findall(String)
else:
ReQuotedString = re.compile('\"')
QuotedStringList = ReQuotedString.split(String)
if len(QuotedStringList) >= 3:
HaveQuotedMacroFlag = True
Count = 0
MacroString = ""
for QuotedStringItem in QuotedStringList:
Count += 1
if Count % 2 != 0:
MacroString += QuotedStringItem
if Count == len(QuotedStringList) and Count%2 == 0:
MacroString += QuotedStringItem
MacroUsed = gMACRO_PATTERN.findall(MacroString)
#
# no macro found in String, stop replacing
#
if len(MacroUsed) == 0:
break
for Macro in MacroUsed:
if Macro not in MacroDefinitions:
if SelfReplacement:
String = String.replace("$(%s)" % Macro, '')
Logger.Debug(5, "Delete undefined MACROs in file %s line %d: %s!" %(FileName, Line[1], Line[0]))
continue
if not HaveQuotedMacroFlag:
String = String.replace("$(%s)" % Macro, MacroDefinitions[Macro])
else:
Count = 0
for QuotedStringItem in QuotedStringList:
Count += 1
if Count % 2 != 0:
QuotedStringList[Count-1] = QuotedStringList[Count-1].replace("$(%s)" % Macro,
MacroDefinitions[Macro])
elif Count == len(QuotedStringList) and Count%2 == 0:
QuotedStringList[Count-1] = QuotedStringList[Count-1].replace("$(%s)" % Macro,
MacroDefinitions[Macro])
RetString = ''
if HaveQuotedMacroFlag:
Count = 0
for QuotedStringItem in QuotedStringList:
Count += 1
if Count != len(QuotedStringList):
RetString += QuotedStringList[Count-1] + "\""
else:
RetString += QuotedStringList[Count-1]
String = RetString
#
# in case there's macro not defined
#
if String == LastString:
break
LastString = String
return String
## NormPath
#
# Create a normal path
# And replace DFEINE in the path
#
# @param Path: The input value for Path to be converted
# @param Defines: A set for DEFINE statement
#
def NormPath(Path, Defines = None):
IsRelativePath = False
if Defines == None:
Defines = {}
if Path:
if Path[0] == '.':
IsRelativePath = True
#
# Replace with Define
#
if Defines:
Path = ReplaceMacro(Path, Defines)
#
# To local path format
#
Path = os.path.normpath(Path)
if IsRelativePath and Path[0] != '.':
Path = os.path.join('.', Path)
return Path
## CleanString
#
# Remove comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content,
# default is DataType.TAB_COMMENT_SPLIT
#
def CleanString(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
#
# remove whitespace
#
Line = Line.strip()
#
# Replace EDK1's comment character
#
if AllowCppStyleComment:
Line = Line.replace(DataType.TAB_COMMENT_EDK1_SPLIT, CommentCharacter)
#
# remove comments, but we should escape comment character in string
#
InString = False
for Index in range(0, len(Line)):
if Line[Index] == '"':
InString = not InString
elif Line[Index] == CommentCharacter and not InString:
Line = Line[0: Index]
break
#
# remove whitespace again
#
Line = Line.strip()
return Line
## CleanString2
#
# Split comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content,
# default is DataType.TAB_COMMENT_SPLIT
#
def CleanString2(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
#
# remove whitespace
#
Line = Line.strip()
#
# Replace EDK1's comment character
#
if AllowCppStyleComment:
Line = Line.replace(DataType.TAB_COMMENT_EDK1_SPLIT, CommentCharacter)
#
# separate comments and statements
#
LineParts = Line.split(CommentCharacter, 1)
#
# remove whitespace again
#
Line = LineParts[0].strip()
if len(LineParts) > 1:
Comment = LineParts[1].strip()
#
# Remove prefixed and trailing comment characters
#
Start = 0
End = len(Comment)
while Start < End and Comment.startswith(CommentCharacter, Start, End):
Start += 1
while End >= 0 and Comment.endswith(CommentCharacter, Start, End):
End -= 1
Comment = Comment[Start:End]
Comment = Comment.strip()
else:
Comment = ''
return Line, Comment
## GetMultipleValuesOfKeyFromLines
#
# Parse multiple strings to clean comment and spaces
# The result is saved to KeyValues
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetMultipleValuesOfKeyFromLines(Lines, Key, KeyValues, CommentCharacter):
if Key:
pass
if KeyValues:
pass
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.split('\n')
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues += [Line]
return True
## GetDefineValue
#
# Parse a DEFINE statement to get defined value
# DEFINE Key Value
#
# @param String: The content to be parsed
# @param Key: The key of DEFINE statement
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetDefineValue(String, Key, CommentCharacter):
if CommentCharacter:
pass
String = CleanString(String)
return String[String.find(Key + ' ') + len(Key + ' ') : ]
## GetSingleValueOfKeyFromLines
#
# Parse multiple strings as below to get value of each definition line
# Key1 = Value1
# Key2 = Value2
# The result is saved to Dictionary
#
# @param Lines: The content to be parsed
# @param Dictionary: To store data after parsing
# @param CommentCharacter: Comment char, be used to ignore comment content
# @param KeySplitCharacter: Key split char, between key name and key value.
# Key1 = Value1, '=' is the key split char
# @param ValueSplitFlag: Value split flag, be used to decide if has
# multiple values
# @param ValueSplitCharacter: Value split char, be used to split multiple
# values. Key1 = Value1|Value2, '|' is the value
# split char
#
def GetSingleValueOfKeyFromLines(Lines, Dictionary, CommentCharacter, KeySplitCharacter, \
ValueSplitFlag, ValueSplitCharacter):
Lines = Lines.split('\n')
Keys = []
Value = ''
DefineValues = ['']
SpecValues = ['']
for Line in Lines:
#
# Handle DEFINE and SPEC
#
if Line.find(DataType.TAB_INF_DEFINES_DEFINE + ' ') > -1:
if '' in DefineValues:
DefineValues.remove('')
DefineValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_DEFINE, CommentCharacter))
continue
if Line.find(DataType.TAB_INF_DEFINES_SPEC + ' ') > -1:
if '' in SpecValues:
SpecValues.remove('')
SpecValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_SPEC, CommentCharacter))
continue
#
# Handle Others
#
LineList = Line.split(KeySplitCharacter, 1)
if len(LineList) >= 2:
Key = LineList[0].split()
if len(Key) == 1 and Key[0][0] != CommentCharacter:
#
# Remove comments and white spaces
#
LineList[1] = CleanString(LineList[1], CommentCharacter)
if ValueSplitFlag:
Value = map(strip, LineList[1].split(ValueSplitCharacter))
else:
Value = CleanString(LineList[1], CommentCharacter).splitlines()
if Key[0] in Dictionary:
if Key[0] not in Keys:
Dictionary[Key[0]] = Value
Keys.append(Key[0])
else:
Dictionary[Key[0]].extend(Value)
else:
Dictionary[DataType.TAB_INF_DEFINES_MACRO][Key[0]] = Value[0]
if DefineValues == []:
DefineValues = ['']
if SpecValues == []:
SpecValues = ['']
Dictionary[DataType.TAB_INF_DEFINES_DEFINE] = DefineValues
Dictionary[DataType.TAB_INF_DEFINES_SPEC] = SpecValues
return True
## The content to be parsed
#
# Do pre-check for a file before it is parsed
# Check $()
# Check []
#
# @param FileName: Used for error report
# @param FileContent: File content to be parsed
# @param SupSectionTag: Used for error report
#
def PreCheck(FileName, FileContent, SupSectionTag):
if SupSectionTag:
pass
LineNo = 0
IsFailed = False
NewFileContent = ''
for Line in FileContent.splitlines():
LineNo = LineNo + 1
#
# Clean current line
#
Line = CleanString(Line)
#
# Remove commented line
#
if Line.find(DataType.TAB_COMMA_SPLIT) == 0:
Line = ''
#
# Check $()
#
if Line.find('$') > -1:
if Line.find('$(') < 0 or Line.find(')') < 0:
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
#
# Check []
#
if Line.find('[') > -1 or Line.find(']') > -1:
#
# Only get one '[' or one ']'
#
if not (Line.find('[') > -1 and Line.find(']') > -1):
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
#
# Regenerate FileContent
#
NewFileContent = NewFileContent + Line + '\r\n'
if IsFailed:
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
return NewFileContent
## CheckFileType
#
# Check if the Filename is including ExtName
# Return True if it exists
# Raise a error message if it not exists
#
# @param CheckFilename: Name of the file to be checked
# @param ExtName: Ext name of the file to be checked
# @param ContainerFilename: The container file which describes the file to be
# checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the file
# to be checked
#
def CheckFileType(CheckFilename, ExtName, ContainerFilename, SectionName, Line, LineNo=-1):
if CheckFilename != '' and CheckFilename != None:
(Root, Ext) = os.path.splitext(CheckFilename)
if Ext.upper() != ExtName.upper() and Root:
ContainerFile = open(ContainerFilename, 'r').read()
if LineNo == -1:
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = ST.ERR_SECTIONNAME_INVALID % (SectionName, CheckFilename, ExtName)
Logger.Error("Parser", PARSER_ERROR, ErrorMsg, Line=LineNo, \
File=ContainerFilename, RaiseError=Logger.IS_RAISE_ERROR)
return True
## CheckFileExist
#
# Check if the file exists
# Return True if it exists
# Raise a error message if it not exists
#
# @param CheckFilename: Name of the file to be checked
# @param WorkspaceDir: Current workspace dir
# @param ContainerFilename: The container file which describes the file to
# be checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the
# file to be checked
#
def CheckFileExist(WorkspaceDir, CheckFilename, ContainerFilename, SectionName, Line, LineNo=-1):
CheckFile = ''
if CheckFilename != '' and CheckFilename != None:
CheckFile = WorkspaceFile(WorkspaceDir, CheckFilename)
if not os.path.isfile(CheckFile):
ContainerFile = open(ContainerFilename, 'r').read()
if LineNo == -1:
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = ST.ERR_CHECKFILE_NOTFOUND % (CheckFile, SectionName)
Logger.Error("Parser", PARSER_ERROR, ErrorMsg,
File=ContainerFilename, Line = LineNo, RaiseError=Logger.IS_RAISE_ERROR)
return CheckFile
## GetLineNo
#
# Find the index of a line in a file
#
# @param FileContent: Search scope
# @param Line: Search key
#
def GetLineNo(FileContent, Line, IsIgnoreComment=True):
LineList = FileContent.splitlines()
for Index in range(len(LineList)):
if LineList[Index].find(Line) > -1:
#
# Ignore statement in comment
#
if IsIgnoreComment:
if LineList[Index].strip()[0] == DataType.TAB_COMMENT_SPLIT:
continue
return Index + 1
return -1
## RaiseParserError
#
# Raise a parser error
#
# @param Line: String which has error
# @param Section: Used for error report
# @param File: File which has the string
# @param Format: Correct format
#
def RaiseParserError(Line, Section, File, Format='', LineNo=-1):
if LineNo == -1:
LineNo = GetLineNo(open(os.path.normpath(File), 'r').read(), Line)
ErrorMsg = ST.ERR_INVALID_NOTFOUND % (Line, Section)
if Format != '':
Format = "Correct format is " + Format
Logger.Error("Parser", PARSER_ERROR, ErrorMsg, File=File, Line=LineNo, \
ExtraData=Format, RaiseError=Logger.IS_RAISE_ERROR)
## WorkspaceFile
#
# Return a full path with workspace dir
#
# @param WorkspaceDir: Workspace dir
# @param Filename: Relative file name
#
def WorkspaceFile(WorkspaceDir, Filename):
return os.path.join(NormPath(WorkspaceDir), NormPath(Filename))
## Split string
#
# Revmove '"' which startswith and endswith string
#
# @param String: The string need to be splited
#
def SplitString(String):
if String.startswith('\"'):
String = String[1:]
if String.endswith('\"'):
String = String[:-1]
return String
## Convert To Sql String
#
# Replace "'" with "''" in each item of StringList
#
# @param StringList: A list for strings to be converted
#
def ConvertToSqlString(StringList):
return map(lambda s: s.replace("'", "''") , StringList)
## Convert To Sql String
#
# Replace "'" with "''" in the String
#
# @param String: A String to be converted
#
def ConvertToSqlString2(String):
return String.replace("'", "''")
## GetStringOfList
#
# Get String of a List
#
# @param Lines: string list
# @param Split: split character
#
def GetStringOfList(List, Split = ' '):
if type(List) != type([]):
return List
Str = ''
for Item in List:
Str = Str + Item + Split
return Str.strip()
## Get HelpTextList
#
# Get HelpTextList from HelpTextClassList
#
# @param HelpTextClassList: Help Text Class List
#
def GetHelpTextList(HelpTextClassList):
List = []
if HelpTextClassList:
for HelpText in HelpTextClassList:
if HelpText.String.endswith('\n'):
HelpText.String = HelpText.String[0: len(HelpText.String) - len('\n')]
List.extend(HelpText.String.split('\n'))
return List
## Get String Array Length
#
# Get String Array Length
#
# @param String: the source string
#
def StringArrayLength(String):
if isinstance(String, unicode):
return (len(String) + 1) * 2 + 1
elif String.startswith('L"'):
return (len(String) - 3 + 1) * 2
elif String.startswith('"'):
return (len(String) - 2 + 1)
else:
return len(String.split()) + 1
## RemoveDupOption
#
# Remove Dup Option
#
# @param OptionString: the option string
# @param Which: Which flag
# @param Against: Against flag
#
def RemoveDupOption(OptionString, Which="/I", Against=None):
OptionList = OptionString.split()
ValueList = []
if Against:
ValueList += Against
for Index in range(len(OptionList)):
Opt = OptionList[Index]
if not Opt.startswith(Which):
continue
if len(Opt) > len(Which):
Val = Opt[len(Which):]
else:
Val = ""
if Val in ValueList:
OptionList[Index] = ""
else:
ValueList.append(Val)
return " ".join(OptionList)
## Check if the string is HexDgit
#
# Return true if all characters in the string are digits and there is at
# least one character
# or valid Hexs (started with 0x, following by hexdigit letters)
# , false otherwise.
# @param string: input string
#
def IsHexDigit(Str):
try:
int(Str, 10)
return True
except ValueError:
if len(Str) > 2 and Str.upper().startswith('0X'):
try:
int(Str, 16)
return True
except ValueError:
return False
return False
## Check if the string is HexDgit and its interger value within limit of UINT32
#
# Return true if all characters in the string are digits and there is at
# least one character
# or valid Hexs (started with 0x, following by hexdigit letters)
# , false otherwise.
# @param string: input string
#
def IsHexDigitUINT32(Str):
try:
Value = int(Str, 10)
if (Value <= 0xFFFFFFFF) and (Value >= 0):
return True
except ValueError:
if len(Str) > 2 and Str.upper().startswith('0X'):
try:
Value = int(Str, 16)
if (Value <= 0xFFFFFFFF) and (Value >= 0):
return True
except ValueError:
return False
return False
## CleanSpecialChar
#
# The ASCII text files of type INF, DEC, INI are edited by developers,
# and may contain characters that cannot be directly translated to strings that
# are conformant with the UDP XML Schema. Any characters in this category
# (0x00-0x08, TAB [0x09], 0x0B, 0x0C, 0x0E-0x1F, 0x80-0xFF)
# must be converted to a space character[0x20] as part of the parsing process.
#
def ConvertSpecialChar(Lines):
RetLines = []
for line in Lines:
ReMatchSpecialChar = re.compile(r"[\x00-\x08]|\x09|\x0b|\x0c|[\x0e-\x1f]|[\x7f-\xff]")
RetLines.append(ReMatchSpecialChar.sub(' ', line))
return RetLines
## __GetTokenList
#
# Assume Str is a valid feature flag expression.
# Return a list which contains tokens: alpha numeric token and other token
# Whitespace are not stripped
#
def __GetTokenList(Str):
InQuote = False
Token = ''
TokenOP = ''
PreChar = ''
List = []
for Char in Str:
if InQuote:
Token += Char
if Char == '"' and PreChar != '\\':
InQuote = not InQuote
List.append(Token)
Token = ''
continue
if Char == '"':
if Token and Token != 'L':
List.append(Token)
Token = ''
if TokenOP:
List.append(TokenOP)
TokenOP = ''
InQuote = not InQuote
Token += Char
continue
if not (Char.isalnum() or Char in '_'):
TokenOP += Char
if Token:
List.append(Token)
Token = ''
else:
Token += Char
if TokenOP:
List.append(TokenOP)
TokenOP = ''
if PreChar == '\\' and Char == '\\':
PreChar = ''
else:
PreChar = Char
if Token:
List.append(Token)
if TokenOP:
List.append(TokenOP)
return List
## ConvertNEToNOTEQ
#
# Convert NE operator to NOT EQ
# For example: 1 NE 2 -> 1 NOT EQ 2
#
# @param Expr: Feature flag expression to be converted
#
def ConvertNEToNOTEQ(Expr):
List = __GetTokenList(Expr)
for Index in range(len(List)):
if List[Index] == 'NE':
List[Index] = 'NOT EQ'
return ''.join(List)
## ConvertNOTEQToNE
#
# Convert NOT EQ operator to NE
# For example: 1 NOT NE 2 -> 1 NE 2
#
# @param Expr: Feature flag expression to be converted
#
def ConvertNOTEQToNE(Expr):
List = __GetTokenList(Expr)
HasNOT = False
RetList = []
for Token in List:
if HasNOT and Token == 'EQ':
# At least, 'NOT' is in the list
while not RetList[-1].strip():
RetList.pop()
RetList[-1] = 'NE'
HasNOT = False
continue
if Token == 'NOT':
HasNOT = True
elif Token.strip():
HasNOT = False
RetList.append(Token)
return ''.join(RetList)
## SplitPcdEntry
#
# Split an PCD entry string to Token.CName and PCD value and FFE.
# NOTE: PCD Value and FFE can contain "|" in it's expression. And in INF specification, have below rule.
# When using the characters "|" or "||" in an expression, the expression must be encapsulated in
# open "(" and close ")" parenthesis.
#
# @param String An PCD entry string need to be split.
#
# @return List [PcdTokenCName, Value, FFE]
#
def SplitPcdEntry(String):
if not String:
return ['', '',''], False
PcdTokenCName = ''
PcdValue = ''
PcdFeatureFlagExp = ''
ValueList = GetSplitValueList(String, "|", 1)
#
# Only contain TokenCName
#
if len(ValueList) == 1:
return [ValueList[0]], True
NewValueList = []
if len(ValueList) == 2:
PcdTokenCName = ValueList[0]
ValueList = GetSplitValueList(ValueList[1], "|")
RemainCount = 0
for Item in ValueList:
ParenthesisCount = 0
for Char in Item:
if Char == "(":
ParenthesisCount += 1
if Char == ")":
ParenthesisCount -= 1
#
# An individual item
#
if RemainCount == 0 and ParenthesisCount >= 0:
NewValueList.append(Item)
RemainCount = ParenthesisCount
elif RemainCount > 0 and RemainCount + ParenthesisCount >= 0:
NewValueList[-1] = NewValueList[-1] + '|' + Item
RemainCount = RemainCount + ParenthesisCount
elif RemainCount > 0 and RemainCount + ParenthesisCount < 0:
#
# ERROR, return
#
return ['', '', ''], False
if len(NewValueList) == 1:
PcdValue = NewValueList[0]
return [PcdTokenCName, PcdValue], True
elif len(NewValueList) == 2:
PcdValue = NewValueList[0]
PcdFeatureFlagExp = NewValueList[1]
return [PcdTokenCName, PcdValue, PcdFeatureFlagExp], True
else:
return ['', '', ''], False
return ['', '', ''], False
|
## @file
# This file is used to define common string related functions used in parsing
# process
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
String
'''
##
# Import Modules
#
import re
import os.path
from string import strip
import Logger.Log as Logger
import Library.DataType as DataType
from Logger.ToolError import FORMAT_INVALID
from Logger.ToolError import PARSER_ERROR
from Logger import StringTable as ST
#
# Regular expression for matching macro used in DSC/DEC/INF file inclusion
#
gMACRO_PATTERN = re.compile("\$\(([_A-Z][_A-Z0-9]*)\)", re.UNICODE)
## GetSplitValueList
#
# Get a value list from a string with multiple values splited with SplitTag
# The default SplitTag is DataType.TAB_VALUE_SPLIT
# 'AAA|BBB|CCC' -> ['AAA', 'BBB', 'CCC']
#
# @param String: The input string to be splitted
# @param SplitTag: The split key, default is DataType.TAB_VALUE_SPLIT
# @param MaxSplit: The max number of split values, default is -1
#
#
def GetSplitValueList(String, SplitTag=DataType.TAB_VALUE_SPLIT, MaxSplit=-1):
return map(lambda l: l.strip(), String.split(SplitTag, MaxSplit))
## MergeArches
#
# Find a key's all arches in dict, add the new arch to the list
# If not exist any arch, set the arch directly
#
# @param Dict: The input value for Dict
# @param Key: The input value for Key
# @param Arch: The Arch to be added or merged
#
def MergeArches(Dict, Key, Arch):
if Key in Dict.keys():
Dict[Key].append(Arch)
else:
Dict[Key] = Arch.split()
## GenDefines
#
# Parse a string with format "DEFINE <VarName> = <PATH>"
# Generate a map Defines[VarName] = PATH
# Return False if invalid format
#
# @param String: String with DEFINE statement
# @param Arch: Supportted Arch
# @param Defines: DEFINE statement to be parsed
#
def GenDefines(String, Arch, Defines):
if String.find(DataType.TAB_DEFINE + ' ') > -1:
List = String.replace(DataType.TAB_DEFINE + ' ', '').\
split(DataType.TAB_EQUAL_SPLIT)
if len(List) == 2:
Defines[(CleanString(List[0]), Arch)] = CleanString(List[1])
return 0
else:
return -1
return 1
## GetLibraryClassesWithModuleType
#
# Get Library Class definition when no module type defined
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetLibraryClassesWithModuleType(Lines, Key, KeyValues, CommentCharacter):
NewKey = SplitModuleType(Key)
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.splitlines()
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append([CleanString(Line, CommentCharacter), NewKey[1]])
return True
## GetDynamics
#
# Get Dynamic Pcds
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetDynamics(Lines, Key, KeyValues, CommentCharacter):
#
# Get SkuId Name List
#
SkuIdNameList = SplitModuleType(Key)
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.splitlines()
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append([CleanString(Line, CommentCharacter), SkuIdNameList[1]])
return True
## SplitModuleType
#
# Split ModuleType out of section defien to get key
# [LibraryClass.Arch.ModuleType|ModuleType|ModuleType] -> [
# 'LibraryClass.Arch', ['ModuleType', 'ModuleType', 'ModuleType'] ]
#
# @param Key: String to be parsed
#
def SplitModuleType(Key):
KeyList = Key.split(DataType.TAB_SPLIT)
#
# Fill in for arch
#
KeyList.append('')
#
# Fill in for moduletype
#
KeyList.append('')
ReturnValue = []
KeyValue = KeyList[0]
if KeyList[1] != '':
KeyValue = KeyValue + DataType.TAB_SPLIT + KeyList[1]
ReturnValue.append(KeyValue)
ReturnValue.append(GetSplitValueList(KeyList[2]))
return ReturnValue
## Replace macro in string
#
# This method replace macros used in given string. The macros are given in a
# dictionary.
#
# @param String String to be processed
# @param MacroDefinitions The macro definitions in the form of dictionary
# @param SelfReplacement To decide whether replace un-defined macro to ''
# @param Line: The content contain line string and line number
# @param FileName: The meta-file file name
#
def ReplaceMacro(String, MacroDefinitions = None, SelfReplacement = False, Line = None, FileName = None, Flag = False):
LastString = String
if MacroDefinitions == None:
MacroDefinitions = {}
while MacroDefinitions:
QuotedStringList = []
HaveQuotedMacroFlag = False
if not Flag:
MacroUsed = gMACRO_PATTERN.findall(String)
else:
ReQuotedString = re.compile('\"')
QuotedStringList = ReQuotedString.split(String)
if len(QuotedStringList) >= 3:
HaveQuotedMacroFlag = True
Count = 0
MacroString = ""
for QuotedStringItem in QuotedStringList:
Count += 1
if Count % 2 != 0:
MacroString += QuotedStringItem
if Count == len(QuotedStringList) and Count%2 == 0:
MacroString += QuotedStringItem
MacroUsed = gMACRO_PATTERN.findall(MacroString)
#
# no macro found in String, stop replacing
#
if len(MacroUsed) == 0:
break
for Macro in MacroUsed:
if Macro not in MacroDefinitions:
if SelfReplacement:
String = String.replace("$(%s)" % Macro, '')
Logger.Debug(5, "Delete undefined MACROs in file %s line %d: %s!" %(FileName, Line[1], Line[0]))
continue
if not HaveQuotedMacroFlag:
String = String.replace("$(%s)" % Macro, MacroDefinitions[Macro])
else:
Count = 0
for QuotedStringItem in QuotedStringList:
Count += 1
if Count % 2 != 0:
QuotedStringList[Count-1] = QuotedStringList[Count-1].replace("$(%s)" % Macro,
MacroDefinitions[Macro])
elif Count == len(QuotedStringList) and Count%2 == 0:
QuotedStringList[Count-1] = QuotedStringList[Count-1].replace("$(%s)" % Macro,
MacroDefinitions[Macro])
RetString = ''
if HaveQuotedMacroFlag:
Count = 0
for QuotedStringItem in QuotedStringList:
Count += 1
if Count != len(QuotedStringList):
RetString += QuotedStringList[Count-1] + "\""
else:
RetString += QuotedStringList[Count-1]
String = RetString
#
# in case there's macro not defined
#
if String == LastString:
break
LastString = String
return String
## NormPath
#
# Create a normal path
# And replace DFEINE in the path
#
# @param Path: The input value for Path to be converted
# @param Defines: A set for DEFINE statement
#
def NormPath(Path, Defines = None):
IsRelativePath = False
if Defines == None:
Defines = {}
if Path:
if Path[0] == '.':
IsRelativePath = True
#
# Replace with Define
#
if Defines:
Path = ReplaceMacro(Path, Defines)
#
# To local path format
#
Path = os.path.normpath(Path)
if IsRelativePath and Path[0] != '.':
Path = os.path.join('.', Path)
return Path
## CleanString
#
# Remove comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content,
# default is DataType.TAB_COMMENT_SPLIT
#
def CleanString(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
#
# remove whitespace
#
Line = Line.strip()
#
# Replace EDK1's comment character
#
if AllowCppStyleComment:
Line = Line.replace(DataType.TAB_COMMENT_EDK1_SPLIT, CommentCharacter)
#
# remove comments, but we should escape comment character in string
#
InString = False
for Index in range(0, len(Line)):
if Line[Index] == '"':
InString = not InString
elif Line[Index] == CommentCharacter and not InString:
Line = Line[0: Index]
break
#
# remove whitespace again
#
Line = Line.strip()
return Line
## CleanString2
#
# Split comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content,
# default is DataType.TAB_COMMENT_SPLIT
#
def CleanString2(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
#
# remove whitespace
#
Line = Line.strip()
#
# Replace EDK1's comment character
#
if AllowCppStyleComment:
Line = Line.replace(DataType.TAB_COMMENT_EDK1_SPLIT, CommentCharacter)
#
# separate comments and statements
#
LineParts = Line.split(CommentCharacter, 1)
#
# remove whitespace again
#
Line = LineParts[0].strip()
if len(LineParts) > 1:
Comment = LineParts[1].strip()
#
# Remove prefixed and trailing comment characters
#
Start = 0
End = len(Comment)
while Start < End and Comment.startswith(CommentCharacter, Start, End):
Start += 1
while End >= 0 and Comment.endswith(CommentCharacter, Start, End):
End -= 1
Comment = Comment[Start:End]
Comment = Comment.strip()
else:
Comment = ''
return Line, Comment
## GetMultipleValuesOfKeyFromLines
#
# Parse multiple strings to clean comment and spaces
# The result is saved to KeyValues
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetMultipleValuesOfKeyFromLines(Lines, Key, KeyValues, CommentCharacter):
if Key:
pass
if KeyValues:
pass
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.split('\n')
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues += [Line]
return True
## GetDefineValue
#
# Parse a DEFINE statement to get defined value
# DEFINE Key Value
#
# @param String: The content to be parsed
# @param Key: The key of DEFINE statement
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetDefineValue(String, Key, CommentCharacter):
if CommentCharacter:
pass
String = CleanString(String)
return String[String.find(Key + ' ') + len(Key + ' ') : ]
## GetSingleValueOfKeyFromLines
#
# Parse multiple strings as below to get value of each definition line
# Key1 = Value1
# Key2 = Value2
# The result is saved to Dictionary
#
# @param Lines: The content to be parsed
# @param Dictionary: To store data after parsing
# @param CommentCharacter: Comment char, be used to ignore comment content
# @param KeySplitCharacter: Key split char, between key name and key value.
# Key1 = Value1, '=' is the key split char
# @param ValueSplitFlag: Value split flag, be used to decide if has
# multiple values
# @param ValueSplitCharacter: Value split char, be used to split multiple
# values. Key1 = Value1|Value2, '|' is the value
# split char
#
def GetSingleValueOfKeyFromLines(Lines, Dictionary, CommentCharacter, KeySplitCharacter, \
ValueSplitFlag, ValueSplitCharacter):
Lines = Lines.split('\n')
Keys = []
Value = ''
DefineValues = ['']
SpecValues = ['']
for Line in Lines:
#
# Handle DEFINE and SPEC
#
if Line.find(DataType.TAB_INF_DEFINES_DEFINE + ' ') > -1:
if '' in DefineValues:
DefineValues.remove('')
DefineValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_DEFINE, CommentCharacter))
continue
if Line.find(DataType.TAB_INF_DEFINES_SPEC + ' ') > -1:
if '' in SpecValues:
SpecValues.remove('')
SpecValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_SPEC, CommentCharacter))
continue
#
# Handle Others
#
LineList = Line.split(KeySplitCharacter, 1)
if len(LineList) >= 2:
Key = LineList[0].split()
if len(Key) == 1 and Key[0][0] != CommentCharacter:
#
# Remove comments and white spaces
#
LineList[1] = CleanString(LineList[1], CommentCharacter)
if ValueSplitFlag:
Value = map(strip, LineList[1].split(ValueSplitCharacter))
else:
Value = CleanString(LineList[1], CommentCharacter).splitlines()
if Key[0] in Dictionary:
if Key[0] not in Keys:
Dictionary[Key[0]] = Value
Keys.append(Key[0])
else:
Dictionary[Key[0]].extend(Value)
else:
Dictionary[DataType.TAB_INF_DEFINES_MACRO][Key[0]] = Value[0]
if DefineValues == []:
DefineValues = ['']
if SpecValues == []:
SpecValues = ['']
Dictionary[DataType.TAB_INF_DEFINES_DEFINE] = DefineValues
Dictionary[DataType.TAB_INF_DEFINES_SPEC] = SpecValues
return True
## The content to be parsed
#
# Do pre-check for a file before it is parsed
# Check $()
# Check []
#
# @param FileName: Used for error report
# @param FileContent: File content to be parsed
# @param SupSectionTag: Used for error report
#
def PreCheck(FileName, FileContent, SupSectionTag):
if SupSectionTag:
pass
LineNo = 0
IsFailed = False
NewFileContent = ''
for Line in FileContent.splitlines():
LineNo = LineNo + 1
#
# Clean current line
#
Line = CleanString(Line)
#
# Remove commented line
#
if Line.find(DataType.TAB_COMMA_SPLIT) == 0:
Line = ''
#
# Check $()
#
if Line.find('$') > -1:
if Line.find('$(') < 0 or Line.find(')') < 0:
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
#
# Check []
#
if Line.find('[') > -1 or Line.find(']') > -1:
#
# Only get one '[' or one ']'
#
if not (Line.find('[') > -1 and Line.find(']') > -1):
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
#
# Regenerate FileContent
#
NewFileContent = NewFileContent + Line + '\r\n'
if IsFailed:
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
return NewFileContent
## CheckFileType
#
# Check if the Filename is including ExtName
# Return True if it exists
# Raise a error message if it not exists
#
# @param CheckFilename: Name of the file to be checked
# @param ExtName: Ext name of the file to be checked
# @param ContainerFilename: The container file which describes the file to be
# checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the file
# to be checked
#
def CheckFileType(CheckFilename, ExtName, ContainerFilename, SectionName, Line, LineNo=-1):
if CheckFilename != '' and CheckFilename != None:
(Root, Ext) = os.path.splitext(CheckFilename)
if Ext.upper() != ExtName.upper() and Root:
ContainerFile = open(ContainerFilename, 'r').read()
if LineNo == -1:
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = ST.ERR_SECTIONNAME_INVALID % (SectionName, CheckFilename, ExtName)
Logger.Error("Parser", PARSER_ERROR, ErrorMsg, Line=LineNo, \
File=ContainerFilename, RaiseError=Logger.IS_RAISE_ERROR)
return True
## CheckFileExist
#
# Check if the file exists
# Return True if it exists
# Raise a error message if it not exists
#
# @param CheckFilename: Name of the file to be checked
# @param WorkspaceDir: Current workspace dir
# @param ContainerFilename: The container file which describes the file to
# be checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the
# file to be checked
#
def CheckFileExist(WorkspaceDir, CheckFilename, ContainerFilename, SectionName, Line, LineNo=-1):
CheckFile = ''
if CheckFilename != '' and CheckFilename != None:
CheckFile = WorkspaceFile(WorkspaceDir, CheckFilename)
if not os.path.isfile(CheckFile):
ContainerFile = open(ContainerFilename, 'r').read()
if LineNo == -1:
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = ST.ERR_CHECKFILE_NOTFOUND % (CheckFile, SectionName)
Logger.Error("Parser", PARSER_ERROR, ErrorMsg,
File=ContainerFilename, Line = LineNo, RaiseError=Logger.IS_RAISE_ERROR)
return CheckFile
## GetLineNo
#
# Find the index of a line in a file
#
# @param FileContent: Search scope
# @param Line: Search key
#
def GetLineNo(FileContent, Line, IsIgnoreComment=True):
LineList = FileContent.splitlines()
for Index in range(len(LineList)):
if LineList[Index].find(Line) > -1:
#
# Ignore statement in comment
#
if IsIgnoreComment:
if LineList[Index].strip()[0] == DataType.TAB_COMMENT_SPLIT:
continue
return Index + 1
return -1
## RaiseParserError
#
# Raise a parser error
#
# @param Line: String which has error
# @param Section: Used for error report
# @param File: File which has the string
# @param Format: Correct format
#
def RaiseParserError(Line, Section, File, Format='', LineNo=-1):
if LineNo == -1:
LineNo = GetLineNo(open(os.path.normpath(File), 'r').read(), Line)
ErrorMsg = ST.ERR_INVALID_NOTFOUND % (Line, Section)
if Format != '':
Format = "Correct format is " + Format
Logger.Error("Parser", PARSER_ERROR, ErrorMsg, File=File, Line=LineNo, \
ExtraData=Format, RaiseError=Logger.IS_RAISE_ERROR)
## WorkspaceFile
#
# Return a full path with workspace dir
#
# @param WorkspaceDir: Workspace dir
# @param Filename: Relative file name
#
def WorkspaceFile(WorkspaceDir, Filename):
return os.path.join(NormPath(WorkspaceDir), NormPath(Filename))
## Split string
#
# Revmove '"' which startswith and endswith string
#
# @param String: The string need to be splited
#
def SplitString(String):
if String.startswith('\"'):
String = String[1:]
if String.endswith('\"'):
String = String[:-1]
return String
## Convert To Sql String
#
# Replace "'" with "''" in each item of StringList
#
# @param StringList: A list for strings to be converted
#
def ConvertToSqlString(StringList):
return map(lambda s: s.replace("'", "''") , StringList)
## Convert To Sql String
#
# Replace "'" with "''" in the String
#
# @param String: A String to be converted
#
def ConvertToSqlString2(String):
return String.replace("'", "''")
## GetStringOfList
#
# Get String of a List
#
# @param Lines: string list
# @param Split: split character
#
def GetStringOfList(List, Split = ' '):
if type(List) != type([]):
return List
Str = ''
for Item in List:
Str = Str + Item + Split
return Str.strip()
## Get HelpTextList
#
# Get HelpTextList from HelpTextClassList
#
# @param HelpTextClassList: Help Text Class List
#
def GetHelpTextList(HelpTextClassList):
List = []
if HelpTextClassList:
for HelpText in HelpTextClassList:
if HelpText.String.endswith('\n'):
HelpText.String = HelpText.String[0: len(HelpText.String) - len('\n')]
List.extend(HelpText.String.split('\n'))
return List
## Get String Array Length
#
# Get String Array Length
#
# @param String: the source string
#
def StringArrayLength(String):
if isinstance(String, unicode):
return (len(String) + 1) * 2 + 1
elif String.startswith('L"'):
return (len(String) - 3 + 1) * 2
elif String.startswith('"'):
return (len(String) - 2 + 1)
else:
return len(String.split()) + 1
## RemoveDupOption
#
# Remove Dup Option
#
# @param OptionString: the option string
# @param Which: Which flag
# @param Against: Against flag
#
def RemoveDupOption(OptionString, Which="/I", Against=None):
OptionList = OptionString.split()
ValueList = []
if Against:
ValueList += Against
for Index in range(len(OptionList)):
Opt = OptionList[Index]
if not Opt.startswith(Which):
continue
if len(Opt) > len(Which):
Val = Opt[len(Which):]
else:
Val = ""
if Val in ValueList:
OptionList[Index] = ""
else:
ValueList.append(Val)
return " ".join(OptionList)
## Check if the string is HexDgit
#
# Return true if all characters in the string are digits and there is at
# least one character
# or valid Hexs (started with 0x, following by hexdigit letters)
# , false otherwise.
# @param string: input string
#
def IsHexDigit(Str):
try:
int(Str, 10)
return True
except ValueError:
if len(Str) > 2 and Str.upper().startswith('0X'):
try:
int(Str, 16)
return True
except ValueError:
return False
return False
## Check if the string is HexDgit and its interger value within limit of UINT32
#
# Return true if all characters in the string are digits and there is at
# least one character
# or valid Hexs (started with 0x, following by hexdigit letters)
# , false otherwise.
# @param string: input string
#
def IsHexDigitUINT32(Str):
try:
Value = int(Str, 10)
if (Value <= 0xFFFFFFFF) and (Value >= 0):
return True
except ValueError:
if len(Str) > 2 and Str.upper().startswith('0X'):
try:
Value = int(Str, 16)
if (Value <= 0xFFFFFFFF) and (Value >= 0):
return True
except ValueError:
return False
return False
## CleanSpecialChar
#
# The ASCII text files of type INF, DEC, INI are edited by developers,
# and may contain characters that cannot be directly translated to strings that
# are conformant with the UDP XML Schema. Any characters in this category
# (0x00-0x08, TAB [0x09], 0x0B, 0x0C, 0x0E-0x1F, 0x80-0xFF)
# must be converted to a space character[0x20] as part of the parsing process.
#
def ConvertSpecialChar(Lines):
RetLines = []
for line in Lines:
ReMatchSpecialChar = re.compile(r"[\x00-\x08]|\x09|\x0b|\x0c|[\x0e-\x1f]|[\x7f-\xff]")
RetLines.append(ReMatchSpecialChar.sub(' ', line))
return RetLines
## __GetTokenList
#
# Assume Str is a valid feature flag expression.
# Return a list which contains tokens: alpha numeric token and other token
# Whitespace are not stripped
#
def __GetTokenList(Str):
InQuote = False
Token = ''
TokenOP = ''
PreChar = ''
List = []
for Char in Str:
if InQuote:
Token += Char
if Char == '"' and PreChar != '\\':
InQuote = not InQuote
List.append(Token)
Token = ''
continue
if Char == '"':
if Token and Token != 'L':
List.append(Token)
Token = ''
if TokenOP:
List.append(TokenOP)
TokenOP = ''
InQuote = not InQuote
Token += Char
continue
if not (Char.isalnum() or Char in '_'):
TokenOP += Char
if Token:
List.append(Token)
Token = ''
else:
Token += Char
if TokenOP:
List.append(TokenOP)
TokenOP = ''
if PreChar == '\\' and Char == '\\':
PreChar = ''
else:
PreChar = Char
if Token:
List.append(Token)
if TokenOP:
List.append(TokenOP)
return List
## ConvertNEToNOTEQ
#
# Convert NE operator to NOT EQ
# For example: 1 NE 2 -> 1 NOT EQ 2
#
# @param Expr: Feature flag expression to be converted
#
def ConvertNEToNOTEQ(Expr):
List = __GetTokenList(Expr)
for Index in range(len(List)):
if List[Index] == 'NE':
List[Index] = 'NOT EQ'
return ''.join(List)
## ConvertNOTEQToNE
#
# Convert NOT EQ operator to NE
# For example: 1 NOT NE 2 -> 1 NE 2
#
# @param Expr: Feature flag expression to be converted
#
def ConvertNOTEQToNE(Expr):
List = __GetTokenList(Expr)
HasNOT = False
RetList = []
for Token in List:
if HasNOT and Token == 'EQ':
# At least, 'NOT' is in the list
while not RetList[-1].strip():
RetList.pop()
RetList[-1] = 'NE'
HasNOT = False
continue
if Token == 'NOT':
HasNOT = True
elif Token.strip():
HasNOT = False
RetList.append(Token)
return ''.join(RetList)
## SplitPcdEntry
#
# Split an PCD entry string to Token.CName and PCD value and FFE.
# NOTE: PCD Value and FFE can contain "|" in it's expression. And in INF specification, have below rule.
# When using the characters "|" or "||" in an expression, the expression must be encapsulated in
# open "(" and close ")" parenthesis.
#
# @param String An PCD entry string need to be split.
#
# @return List [PcdTokenCName, Value, FFE]
#
def SplitPcdEntry(String):
if not String:
return ['', '',''], False
PcdTokenCName = ''
PcdValue = ''
PcdFeatureFlagExp = ''
ValueList = GetSplitValueList(String, "|", 1)
#
# Only contain TokenCName
#
if len(ValueList) == 1:
return [ValueList[0]], True
NewValueList = []
if len(ValueList) == 2:
PcdTokenCName = ValueList[0]
ValueList = GetSplitValueList(ValueList[1], "|")
RemainCount = 0
for Item in ValueList:
ParenthesisCount = 0
for Char in Item:
if Char == "(":
ParenthesisCount += 1
if Char == ")":
ParenthesisCount -= 1
#
# An individual item
#
if RemainCount == 0 and ParenthesisCount >= 0:
NewValueList.append(Item)
RemainCount = ParenthesisCount
elif RemainCount > 0 and RemainCount + ParenthesisCount >= 0:
NewValueList[-1] = NewValueList[-1] + '|' + Item
RemainCount = RemainCount + ParenthesisCount
elif RemainCount > 0 and RemainCount + ParenthesisCount < 0:
#
# ERROR, return
#
return ['', '', ''], False
if len(NewValueList) == 1:
PcdValue = NewValueList[0]
return [PcdTokenCName, PcdValue], True
elif len(NewValueList) == 2:
PcdValue = NewValueList[0]
PcdFeatureFlagExp = NewValueList[1]
return [PcdTokenCName, PcdValue, PcdFeatureFlagExp], True
else:
return ['', '', ''], False
return ['', '', ''], False
|
en
| 0.573198
|
## @file # This file is used to define common string related functions used in parsing # process # # Copyright (c) 2011, Intel Corporation. All rights reserved.<BR> # # This program and the accompanying materials are licensed and made available # under the terms and conditions of the BSD License which accompanies this # distribution. The full text of the license may be found at # http://opensource.org/licenses/bsd-license.php # # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. # String ## # Import Modules # # # Regular expression for matching macro used in DSC/DEC/INF file inclusion # ## GetSplitValueList # # Get a value list from a string with multiple values splited with SplitTag # The default SplitTag is DataType.TAB_VALUE_SPLIT # 'AAA|BBB|CCC' -> ['AAA', 'BBB', 'CCC'] # # @param String: The input string to be splitted # @param SplitTag: The split key, default is DataType.TAB_VALUE_SPLIT # @param MaxSplit: The max number of split values, default is -1 # # ## MergeArches # # Find a key's all arches in dict, add the new arch to the list # If not exist any arch, set the arch directly # # @param Dict: The input value for Dict # @param Key: The input value for Key # @param Arch: The Arch to be added or merged # ## GenDefines # # Parse a string with format "DEFINE <VarName> = <PATH>" # Generate a map Defines[VarName] = PATH # Return False if invalid format # # @param String: String with DEFINE statement # @param Arch: Supportted Arch # @param Defines: DEFINE statement to be parsed # ## GetLibraryClassesWithModuleType # # Get Library Class definition when no module type defined # # @param Lines: The content to be parsed # @param Key: Reserved # @param KeyValues: To store data after parsing # @param CommentCharacter: Comment char, used to ignore comment content # ## GetDynamics # # Get Dynamic Pcds # # @param Lines: The content to be parsed # @param Key: Reserved # @param KeyValues: To store data after parsing # @param CommentCharacter: Comment char, used to ignore comment content # # # Get SkuId Name List # ## SplitModuleType # # Split ModuleType out of section defien to get key # [LibraryClass.Arch.ModuleType|ModuleType|ModuleType] -> [ # 'LibraryClass.Arch', ['ModuleType', 'ModuleType', 'ModuleType'] ] # # @param Key: String to be parsed # # # Fill in for arch # # # Fill in for moduletype # ## Replace macro in string # # This method replace macros used in given string. The macros are given in a # dictionary. # # @param String String to be processed # @param MacroDefinitions The macro definitions in the form of dictionary # @param SelfReplacement To decide whether replace un-defined macro to '' # @param Line: The content contain line string and line number # @param FileName: The meta-file file name # # # no macro found in String, stop replacing # # # in case there's macro not defined # ## NormPath # # Create a normal path # And replace DFEINE in the path # # @param Path: The input value for Path to be converted # @param Defines: A set for DEFINE statement # # # Replace with Define # # # To local path format # ## CleanString # # Remove comments in a string # Remove spaces # # @param Line: The string to be cleaned # @param CommentCharacter: Comment char, used to ignore comment content, # default is DataType.TAB_COMMENT_SPLIT # # # remove whitespace # # # Replace EDK1's comment character # # # remove comments, but we should escape comment character in string # # # remove whitespace again # ## CleanString2 # # Split comments in a string # Remove spaces # # @param Line: The string to be cleaned # @param CommentCharacter: Comment char, used to ignore comment content, # default is DataType.TAB_COMMENT_SPLIT # # # remove whitespace # # # Replace EDK1's comment character # # # separate comments and statements # # # remove whitespace again # # # Remove prefixed and trailing comment characters # ## GetMultipleValuesOfKeyFromLines # # Parse multiple strings to clean comment and spaces # The result is saved to KeyValues # # @param Lines: The content to be parsed # @param Key: Reserved # @param KeyValues: To store data after parsing # @param CommentCharacter: Comment char, used to ignore comment content # ## GetDefineValue # # Parse a DEFINE statement to get defined value # DEFINE Key Value # # @param String: The content to be parsed # @param Key: The key of DEFINE statement # @param CommentCharacter: Comment char, used to ignore comment content # ## GetSingleValueOfKeyFromLines # # Parse multiple strings as below to get value of each definition line # Key1 = Value1 # Key2 = Value2 # The result is saved to Dictionary # # @param Lines: The content to be parsed # @param Dictionary: To store data after parsing # @param CommentCharacter: Comment char, be used to ignore comment content # @param KeySplitCharacter: Key split char, between key name and key value. # Key1 = Value1, '=' is the key split char # @param ValueSplitFlag: Value split flag, be used to decide if has # multiple values # @param ValueSplitCharacter: Value split char, be used to split multiple # values. Key1 = Value1|Value2, '|' is the value # split char # # # Handle DEFINE and SPEC # # # Handle Others # # # Remove comments and white spaces # ## The content to be parsed # # Do pre-check for a file before it is parsed # Check $() # Check [] # # @param FileName: Used for error report # @param FileContent: File content to be parsed # @param SupSectionTag: Used for error report # # # Clean current line # # # Remove commented line # # # Check $() # # # Check [] # # # Only get one '[' or one ']' # # # Regenerate FileContent # ## CheckFileType # # Check if the Filename is including ExtName # Return True if it exists # Raise a error message if it not exists # # @param CheckFilename: Name of the file to be checked # @param ExtName: Ext name of the file to be checked # @param ContainerFilename: The container file which describes the file to be # checked, used for error report # @param SectionName: Used for error report # @param Line: The line in container file which defines the file # to be checked # ## CheckFileExist # # Check if the file exists # Return True if it exists # Raise a error message if it not exists # # @param CheckFilename: Name of the file to be checked # @param WorkspaceDir: Current workspace dir # @param ContainerFilename: The container file which describes the file to # be checked, used for error report # @param SectionName: Used for error report # @param Line: The line in container file which defines the # file to be checked # ## GetLineNo # # Find the index of a line in a file # # @param FileContent: Search scope # @param Line: Search key # # # Ignore statement in comment # ## RaiseParserError # # Raise a parser error # # @param Line: String which has error # @param Section: Used for error report # @param File: File which has the string # @param Format: Correct format # ## WorkspaceFile # # Return a full path with workspace dir # # @param WorkspaceDir: Workspace dir # @param Filename: Relative file name # ## Split string # # Revmove '"' which startswith and endswith string # # @param String: The string need to be splited # ## Convert To Sql String # # Replace "'" with "''" in each item of StringList # # @param StringList: A list for strings to be converted # ## Convert To Sql String # # Replace "'" with "''" in the String # # @param String: A String to be converted # ## GetStringOfList # # Get String of a List # # @param Lines: string list # @param Split: split character # ## Get HelpTextList # # Get HelpTextList from HelpTextClassList # # @param HelpTextClassList: Help Text Class List # ## Get String Array Length # # Get String Array Length # # @param String: the source string # ## RemoveDupOption # # Remove Dup Option # # @param OptionString: the option string # @param Which: Which flag # @param Against: Against flag # ## Check if the string is HexDgit # # Return true if all characters in the string are digits and there is at # least one character # or valid Hexs (started with 0x, following by hexdigit letters) # , false otherwise. # @param string: input string # ## Check if the string is HexDgit and its interger value within limit of UINT32 # # Return true if all characters in the string are digits and there is at # least one character # or valid Hexs (started with 0x, following by hexdigit letters) # , false otherwise. # @param string: input string # ## CleanSpecialChar # # The ASCII text files of type INF, DEC, INI are edited by developers, # and may contain characters that cannot be directly translated to strings that # are conformant with the UDP XML Schema. Any characters in this category # (0x00-0x08, TAB [0x09], 0x0B, 0x0C, 0x0E-0x1F, 0x80-0xFF) # must be converted to a space character[0x20] as part of the parsing process. # ## __GetTokenList # # Assume Str is a valid feature flag expression. # Return a list which contains tokens: alpha numeric token and other token # Whitespace are not stripped # ## ConvertNEToNOTEQ # # Convert NE operator to NOT EQ # For example: 1 NE 2 -> 1 NOT EQ 2 # # @param Expr: Feature flag expression to be converted # ## ConvertNOTEQToNE # # Convert NOT EQ operator to NE # For example: 1 NOT NE 2 -> 1 NE 2 # # @param Expr: Feature flag expression to be converted # # At least, 'NOT' is in the list ## SplitPcdEntry # # Split an PCD entry string to Token.CName and PCD value and FFE. # NOTE: PCD Value and FFE can contain "|" in it's expression. And in INF specification, have below rule. # When using the characters "|" or "||" in an expression, the expression must be encapsulated in # open "(" and close ")" parenthesis. # # @param String An PCD entry string need to be split. # # @return List [PcdTokenCName, Value, FFE] # # # Only contain TokenCName # # # An individual item # # # ERROR, return #
| 2.597574
| 3
|
apps/secure_url/api/tests/tests_secured_entity_access.py
|
fryta/sercure-url
| 0
|
6628409
|
from datetime import timedelta
from django.conf import settings
from rest_framework import status
from rest_framework.reverse import reverse
from .tests_base import BaseApiTestCase
from ...models import SecuredEntity
class SecuredEntityAccessApiTest(BaseApiTestCase):
def __finish_create_secured_entity(self):
self.assertEqual(status.HTTP_201_CREATED, self.response.status_code)
self.secured_entity = SecuredEntity.objects.get(pk=self.response.data['id'])
self.access_url = reverse('secure_url.api:secured-entity-get-access-api-view', args=(self.response.data['id'],))
def _create_secured_entity_from_url(self):
self.response = self.client.post(self.list_create_url, self.data_with_url, format='json',
**self.extra_with_permissions)
self.__finish_create_secured_entity()
def _create_secured_entity_from_file(self):
tmp_file = self._get_tmp_file()
with open(tmp_file.name, 'rb') as file:
self.response = self.client.post(self.list_create_url, {'file': file}, format='multipart',
**self.extra_with_permissions)
self.__finish_create_secured_entity()
def test_access_secured_entity_from_url_without_password_results_in_400__authorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_url_without_password_returns_correct_response__authorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {}, format='json',
**self.extra_with_permissions)
self.assertDictEqual({'password': ['<PASSWORD>.']}, response.data)
def test_access_secured_entity_from_url_wrong_password_results_in_400__authorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {'password': '<PASSWORD>'}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_url_wrong_password_returns_correct_response__authorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {'password': '<PASSWORD>'}, format='json',
**self.extra_with_permissions)
self.assertDictEqual({'password': ['<PASSWORD>.']}, response.data)
def test_access_secured_entity_from_url_correct_password_results_in_200__authorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_access_secured_entity_from_url_correct_password_returns_correct_response__authorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra_with_permissions)
self.assertDictEqual({'secured_entity': self.data_with_url['url']}, response.data)
def test_access_secured_entity_from_url_correct_password_just_before_deadline_results_in_200__authorized(self):
self._create_secured_entity_from_url()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME + timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': <PASSWORD>['password']}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_access_secured_entity_from_url_correct_password_just_before_deadline_returns_correct_response__authorized(
self):
self._create_secured_entity_from_url()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME + timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': <PASSWORD>['password']}, format='json',
**self.extra_with_permissions)
self.assertDictEqual({'secured_entity': self.data_with_url['url']}, response.data)
def test_access_secured_entity_from_url_correct_password_just_after_deadline_results_in_400__authorized(self):
self._create_secured_entity_from_url()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME - timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': <PASSWORD>['password']}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_url_correct_password_just_after_deadline_returns_correct_response__authorized(
self):
self._create_secured_entity_from_url()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME - timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra_with_permissions)
self.assertDictEqual({
"non_field_errors": [
"Sorry, this secured entity is no longer available."
]
}, response.data)
def test_access_secured_entity_from_file_without_password_results_in_400__authorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_file_without_password_returns_correct_response__authorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {}, format='json',
**self.extra_with_permissions)
self.assertDictEqual({'password': ['<PASSWORD>.']}, response.data)
def test_access_secured_entity_from_file_wrong_password_results_in_400__authorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {'password': '<PASSWORD>'}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_file_wrong_password_returns_correct_response__authorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {'password': '<PASSWORD>'}, format='json',
**self.extra_with_permissions)
self.assertDictEqual({'password': ['Password do not match.']}, response.data)
def test_access_secured_entity_from_file_correct_password_results_in_200__authorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_access_secured_entity_from_file_correct_password_returns_correct_response__authorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra_with_permissions)
self.assertIn('secured_entity', response.data)
self.assertIn('http://testserver/media/secure_url/files/', response.data['secured_entity'])
def test_access_secured_entity_from_file_correct_password_just_before_deadline_results_in_200__authorized(self):
self._create_secured_entity_from_file()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME + timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_access_secured_entity_from_file_correct_password_just_before_deadline_returns_correct_response__authorized(
self):
self._create_secured_entity_from_file()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME + timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra_with_permissions)
self.assertIn('secured_entity', response.data)
self.assertIn('http://testserver/media/secure_url/files/', response.data['secured_entity'])
def test_access_secured_entity_from_file_correct_password_just_after_deadline_results_in_400__authorized(self):
self._create_secured_entity_from_file()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME - timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': <PASSWORD>.data['password']}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_file_correct_password_just_after_deadline_returns_correct_response__authorized(
self):
self._create_secured_entity_from_file()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME - timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': <PASSWORD>.data['password']}, format='json',
**self.extra_with_permissions)
self.assertDictEqual({
"non_field_errors": [
"Sorry, this secured entity is no longer available."
]
}, response.data)
def test_access_secured_entity_from_url_without_password_results_in_400__unauthorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {}, format='json',
**self.extra)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_url_without_password_returns_correct_response__unauthorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {}, format='json',
**self.extra)
self.assertDictEqual({'password': ['<PASSWORD>.']}, response.data)
def test_access_secured_entity_from_url_wrong_password_results_in_400__unauthorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {'password': '<PASSWORD>'}, format='json',
**self.extra)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_url_wrong_password_returns_correct_response__unauthorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {'password': '<PASSWORD>'}, format='json',
**self.extra)
self.assertDictEqual({'password': ['<PASSWORD>.']}, response.data)
def test_access_secured_entity_from_url_correct_password_results_in_200__unauthorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_access_secured_entity_from_url_correct_password_returns_correct_response__unauthorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertDictEqual({'secured_entity': self.data_with_url['url']}, response.data)
def test_access_secured_entity_from_url_correct_password_just_before_deadline_results_in_200__unauthorized(self):
self._create_secured_entity_from_url()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME + timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_access_secured_entity_from_url_correct_password_just_before_deadline_returns_correct_response__unauthorized(
self):
self._create_secured_entity_from_url()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME + timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertDictEqual({'secured_entity': self.data_with_url['url']}, response.data)
def test_access_secured_entity_from_url_correct_password_just_after_deadline_results_in_400__unauthorized(self):
self._create_secured_entity_from_url()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME - timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_url_correct_password_just_after_deadline_returns_correct_response__unauthorized(
self):
self._create_secured_entity_from_url()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME - timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertDictEqual({
"non_field_errors": [
"Sorry, this secured entity is no longer available."
]
}, response.data)
def test_access_secured_entity_from_file_without_password_results_in_400__unauthorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {}, format='json',
**self.extra)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_file_without_password_returns_correct_response__unauthorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {}, format='json',
**self.extra)
self.assertDictEqual({'password': ['<PASSWORD>.']}, response.data)
def test_access_secured_entity_from_file_wrong_password_results_in_400__unauthorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {'password': '<PASSWORD>'}, format='json',
**self.extra)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_file_wrong_password_returns_correct_response__unauthorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {'password': '<PASSWORD>'}, format='json',
**self.extra)
self.assertDictEqual({'password': ['<PASSWORD>.']}, response.data)
def test_access_secured_entity_from_file_correct_password_results_in_200__unauthorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_access_secured_entity_from_file_correct_password_returns_correct_response__unauthorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertIn('secured_entity', response.data)
self.assertIn('http://testserver/media/secure_url/files/', response.data['secured_entity'])
def test_access_secured_entity_from_file_correct_password_just_before_deadline_results_in_200__unauthorized(self):
self._create_secured_entity_from_file()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME + timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_access_secured_entity_from_file_correct_password_just_before_deadline_returns_correct_response__unauthorized(
self):
self._create_secured_entity_from_file()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME + timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertIn('secured_entity', response.data)
self.assertIn('http://testserver/media/secure_url/files/', response.data['secured_entity'])
def test_access_secured_entity_from_file_correct_password_just_after_deadline_results_in_400__unauthorized(self):
self._create_secured_entity_from_file()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME - timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_file_correct_password_just_after_deadline_returns_correct_response__unauthorized(
self):
self._create_secured_entity_from_file()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME - timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertDictEqual({
"non_field_errors": [
"Sorry, this secured entity is no longer available."
]
}, response.data)
|
from datetime import timedelta
from django.conf import settings
from rest_framework import status
from rest_framework.reverse import reverse
from .tests_base import BaseApiTestCase
from ...models import SecuredEntity
class SecuredEntityAccessApiTest(BaseApiTestCase):
def __finish_create_secured_entity(self):
self.assertEqual(status.HTTP_201_CREATED, self.response.status_code)
self.secured_entity = SecuredEntity.objects.get(pk=self.response.data['id'])
self.access_url = reverse('secure_url.api:secured-entity-get-access-api-view', args=(self.response.data['id'],))
def _create_secured_entity_from_url(self):
self.response = self.client.post(self.list_create_url, self.data_with_url, format='json',
**self.extra_with_permissions)
self.__finish_create_secured_entity()
def _create_secured_entity_from_file(self):
tmp_file = self._get_tmp_file()
with open(tmp_file.name, 'rb') as file:
self.response = self.client.post(self.list_create_url, {'file': file}, format='multipart',
**self.extra_with_permissions)
self.__finish_create_secured_entity()
def test_access_secured_entity_from_url_without_password_results_in_400__authorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_url_without_password_returns_correct_response__authorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {}, format='json',
**self.extra_with_permissions)
self.assertDictEqual({'password': ['<PASSWORD>.']}, response.data)
def test_access_secured_entity_from_url_wrong_password_results_in_400__authorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {'password': '<PASSWORD>'}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_url_wrong_password_returns_correct_response__authorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {'password': '<PASSWORD>'}, format='json',
**self.extra_with_permissions)
self.assertDictEqual({'password': ['<PASSWORD>.']}, response.data)
def test_access_secured_entity_from_url_correct_password_results_in_200__authorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_access_secured_entity_from_url_correct_password_returns_correct_response__authorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra_with_permissions)
self.assertDictEqual({'secured_entity': self.data_with_url['url']}, response.data)
def test_access_secured_entity_from_url_correct_password_just_before_deadline_results_in_200__authorized(self):
self._create_secured_entity_from_url()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME + timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': <PASSWORD>['password']}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_access_secured_entity_from_url_correct_password_just_before_deadline_returns_correct_response__authorized(
self):
self._create_secured_entity_from_url()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME + timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': <PASSWORD>['password']}, format='json',
**self.extra_with_permissions)
self.assertDictEqual({'secured_entity': self.data_with_url['url']}, response.data)
def test_access_secured_entity_from_url_correct_password_just_after_deadline_results_in_400__authorized(self):
self._create_secured_entity_from_url()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME - timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': <PASSWORD>['password']}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_url_correct_password_just_after_deadline_returns_correct_response__authorized(
self):
self._create_secured_entity_from_url()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME - timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra_with_permissions)
self.assertDictEqual({
"non_field_errors": [
"Sorry, this secured entity is no longer available."
]
}, response.data)
def test_access_secured_entity_from_file_without_password_results_in_400__authorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_file_without_password_returns_correct_response__authorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {}, format='json',
**self.extra_with_permissions)
self.assertDictEqual({'password': ['<PASSWORD>.']}, response.data)
def test_access_secured_entity_from_file_wrong_password_results_in_400__authorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {'password': '<PASSWORD>'}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_file_wrong_password_returns_correct_response__authorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {'password': '<PASSWORD>'}, format='json',
**self.extra_with_permissions)
self.assertDictEqual({'password': ['Password do not match.']}, response.data)
def test_access_secured_entity_from_file_correct_password_results_in_200__authorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_access_secured_entity_from_file_correct_password_returns_correct_response__authorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra_with_permissions)
self.assertIn('secured_entity', response.data)
self.assertIn('http://testserver/media/secure_url/files/', response.data['secured_entity'])
def test_access_secured_entity_from_file_correct_password_just_before_deadline_results_in_200__authorized(self):
self._create_secured_entity_from_file()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME + timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_access_secured_entity_from_file_correct_password_just_before_deadline_returns_correct_response__authorized(
self):
self._create_secured_entity_from_file()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME + timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra_with_permissions)
self.assertIn('secured_entity', response.data)
self.assertIn('http://testserver/media/secure_url/files/', response.data['secured_entity'])
def test_access_secured_entity_from_file_correct_password_just_after_deadline_results_in_400__authorized(self):
self._create_secured_entity_from_file()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME - timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': <PASSWORD>.data['password']}, format='json',
**self.extra_with_permissions)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_file_correct_password_just_after_deadline_returns_correct_response__authorized(
self):
self._create_secured_entity_from_file()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME - timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': <PASSWORD>.data['password']}, format='json',
**self.extra_with_permissions)
self.assertDictEqual({
"non_field_errors": [
"Sorry, this secured entity is no longer available."
]
}, response.data)
def test_access_secured_entity_from_url_without_password_results_in_400__unauthorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {}, format='json',
**self.extra)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_url_without_password_returns_correct_response__unauthorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {}, format='json',
**self.extra)
self.assertDictEqual({'password': ['<PASSWORD>.']}, response.data)
def test_access_secured_entity_from_url_wrong_password_results_in_400__unauthorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {'password': '<PASSWORD>'}, format='json',
**self.extra)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_url_wrong_password_returns_correct_response__unauthorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {'password': '<PASSWORD>'}, format='json',
**self.extra)
self.assertDictEqual({'password': ['<PASSWORD>.']}, response.data)
def test_access_secured_entity_from_url_correct_password_results_in_200__unauthorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_access_secured_entity_from_url_correct_password_returns_correct_response__unauthorized(self):
self._create_secured_entity_from_url()
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertDictEqual({'secured_entity': self.data_with_url['url']}, response.data)
def test_access_secured_entity_from_url_correct_password_just_before_deadline_results_in_200__unauthorized(self):
self._create_secured_entity_from_url()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME + timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_access_secured_entity_from_url_correct_password_just_before_deadline_returns_correct_response__unauthorized(
self):
self._create_secured_entity_from_url()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME + timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertDictEqual({'secured_entity': self.data_with_url['url']}, response.data)
def test_access_secured_entity_from_url_correct_password_just_after_deadline_results_in_400__unauthorized(self):
self._create_secured_entity_from_url()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME - timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_url_correct_password_just_after_deadline_returns_correct_response__unauthorized(
self):
self._create_secured_entity_from_url()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME - timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertDictEqual({
"non_field_errors": [
"Sorry, this secured entity is no longer available."
]
}, response.data)
def test_access_secured_entity_from_file_without_password_results_in_400__unauthorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {}, format='json',
**self.extra)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_file_without_password_returns_correct_response__unauthorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {}, format='json',
**self.extra)
self.assertDictEqual({'password': ['<PASSWORD>.']}, response.data)
def test_access_secured_entity_from_file_wrong_password_results_in_400__unauthorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {'password': '<PASSWORD>'}, format='json',
**self.extra)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_file_wrong_password_returns_correct_response__unauthorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {'password': '<PASSWORD>'}, format='json',
**self.extra)
self.assertDictEqual({'password': ['<PASSWORD>.']}, response.data)
def test_access_secured_entity_from_file_correct_password_results_in_200__unauthorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_access_secured_entity_from_file_correct_password_returns_correct_response__unauthorized(self):
self._create_secured_entity_from_file()
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertIn('secured_entity', response.data)
self.assertIn('http://testserver/media/secure_url/files/', response.data['secured_entity'])
def test_access_secured_entity_from_file_correct_password_just_before_deadline_results_in_200__unauthorized(self):
self._create_secured_entity_from_file()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME + timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_access_secured_entity_from_file_correct_password_just_before_deadline_returns_correct_response__unauthorized(
self):
self._create_secured_entity_from_file()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME + timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertIn('secured_entity', response.data)
self.assertIn('http://testserver/media/secure_url/files/', response.data['secured_entity'])
def test_access_secured_entity_from_file_correct_password_just_after_deadline_results_in_400__unauthorized(self):
self._create_secured_entity_from_file()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME - timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_access_secured_entity_from_file_correct_password_just_after_deadline_returns_correct_response__unauthorized(
self):
self._create_secured_entity_from_file()
SecuredEntity.objects.filter(pk=self.secured_entity.pk).update(
created=self.secured_entity.created - settings.SECURED_ENTITY_ACCESSIBLE_TIME - timedelta(seconds=1))
response = self.client.post(self.access_url, {'password': self.response.data['password']}, format='json',
**self.extra)
self.assertDictEqual({
"non_field_errors": [
"Sorry, this secured entity is no longer available."
]
}, response.data)
|
none
| 1
| 2.149061
| 2
|
|
submissions/abc130/d.py
|
m-star18/atcoder
| 1
|
6628410
|
<reponame>m-star18/atcoder
import bisect
import sys
input = sys.stdin.readline
n, k = map(int, input().split())
a = list(map(int, input().split()))
A = [0]*(n+1)
ans = 0
for i in range(n):
A[i+1] = a[i]+A[i]
for i in range(n):
s = bisect.bisect_left(A, k+A[i])
ans += n+1-s
print(ans)
|
import bisect
import sys
input = sys.stdin.readline
n, k = map(int, input().split())
a = list(map(int, input().split()))
A = [0]*(n+1)
ans = 0
for i in range(n):
A[i+1] = a[i]+A[i]
for i in range(n):
s = bisect.bisect_left(A, k+A[i])
ans += n+1-s
print(ans)
|
none
| 1
| 2.770462
| 3
|
|
mbuild/lib/moieties/peg.py
|
daico007/mbuild
| 101
|
6628411
|
"""mBuild polyethylene glycol (PEG) monomer moiety."""
__author__ = "jonestj1"
import mbuild as mb
class PegMonomer(mb.Compound):
"""A monomer of polyethylene glycol (PEG)."""
def __init__(self):
super(PegMonomer, self).__init__()
mb.load(
"peg_monomer.pdb",
compound=self,
relative_to_module=self.__module__,
infer_hierarchy=False,
)
self.translate(-self[0].pos)
self.add(mb.Port(anchor=self[0]), "down")
self["down"].translate([0, -0.07, 0])
self.add(mb.Port(anchor=self[6]), "up")
self["up"].translate([0, 0.073, 0])
if __name__ == "__main__":
peg = PegMonomer()
peg.save("peg.mol2")
|
"""mBuild polyethylene glycol (PEG) monomer moiety."""
__author__ = "jonestj1"
import mbuild as mb
class PegMonomer(mb.Compound):
"""A monomer of polyethylene glycol (PEG)."""
def __init__(self):
super(PegMonomer, self).__init__()
mb.load(
"peg_monomer.pdb",
compound=self,
relative_to_module=self.__module__,
infer_hierarchy=False,
)
self.translate(-self[0].pos)
self.add(mb.Port(anchor=self[0]), "down")
self["down"].translate([0, -0.07, 0])
self.add(mb.Port(anchor=self[6]), "up")
self["up"].translate([0, 0.073, 0])
if __name__ == "__main__":
peg = PegMonomer()
peg.save("peg.mol2")
|
en
| 0.38053
|
mBuild polyethylene glycol (PEG) monomer moiety. A monomer of polyethylene glycol (PEG).
| 2.748107
| 3
|
OpenCV/video_cut.py
|
Tripleler/Tistory_blog
| 0
|
6628412
|
import sys
import cv2
cap = cv2.VideoCapture('md.mp4')
if not cap.isOpened():
print("Video open failed!")
sys.exit()
fps = cap.get(cv2.CAP_PROP_FPS)
print('FPS:', fps)
w = round(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
print('Frame width:', w)
h = round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print('Frame height:', h)
while True:
ret, frame = cap.read()
if not ret:
break
f = round(cap.get(cv2.CAP_PROP_POS_FRAMES))
print('Prop pos frames:', f)
cv2.imshow('frame', frame)
cv2.moveWindow('frame', 300, 100)
key = cv2.waitKey()
if key == 27:
break
if key == ord('b'):
cap.set(cv2.CAP_PROP_POS_FRAMES, f - 2)
cap.release()
cv2.destroyAllWindows()
# cap = cv2.VideoCapture('Raw.mp4')
# out = cv2.VideoWriter('Cut.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
# cap.set(cv2.CAP_PROP_POS_FRAMES, 116)
# while True:
# ret, frame = cap.read()
# if not ret:
# break
# out.write(frame)
#
# cap.release()
# out.release()
# cv2.destroyAllWindows()
#
# print('Edit Finished')
|
import sys
import cv2
cap = cv2.VideoCapture('md.mp4')
if not cap.isOpened():
print("Video open failed!")
sys.exit()
fps = cap.get(cv2.CAP_PROP_FPS)
print('FPS:', fps)
w = round(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
print('Frame width:', w)
h = round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print('Frame height:', h)
while True:
ret, frame = cap.read()
if not ret:
break
f = round(cap.get(cv2.CAP_PROP_POS_FRAMES))
print('Prop pos frames:', f)
cv2.imshow('frame', frame)
cv2.moveWindow('frame', 300, 100)
key = cv2.waitKey()
if key == 27:
break
if key == ord('b'):
cap.set(cv2.CAP_PROP_POS_FRAMES, f - 2)
cap.release()
cv2.destroyAllWindows()
# cap = cv2.VideoCapture('Raw.mp4')
# out = cv2.VideoWriter('Cut.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
# cap.set(cv2.CAP_PROP_POS_FRAMES, 116)
# while True:
# ret, frame = cap.read()
# if not ret:
# break
# out.write(frame)
#
# cap.release()
# out.release()
# cv2.destroyAllWindows()
#
# print('Edit Finished')
|
en
| 0.319214
|
# cap = cv2.VideoCapture('Raw.mp4') # out = cv2.VideoWriter('Cut.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) # cap.set(cv2.CAP_PROP_POS_FRAMES, 116) # while True: # ret, frame = cap.read() # if not ret: # break # out.write(frame) # # cap.release() # out.release() # cv2.destroyAllWindows() # # print('Edit Finished')
| 2.686834
| 3
|
SAC/models.py
|
pnnayyeri/Reinforcement-learning
| 0
|
6628413
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal, Uniform
class ValueNetwork(nn.Module):
def __init__(self, input_dim, output_dim, init_w=3e-3):
super(ValueNetwork, self).__init__()
self.fc1 = nn.Linear(input_dim, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, output_dim)
self.fc3.weight.data.uniform_(-init_w, init_w)
self.fc3.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class SoftQNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size=256, init_w=3e-3):
super(SoftQNetwork, self).__init__()
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action):
x = torch.cat([state, action], 1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
class GaussianPolicy(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size=256, init_w=3e-3, log_std_min=-20, log_std_max=2):
super(GaussianPolicy, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.mean_linear = nn.Linear(hidden_size, num_actions)
self.mean_linear.weight.data.uniform_(-init_w, init_w)
self.mean_linear.bias.data.uniform_(-init_w, init_w)
self.log_std_linear = nn.Linear(hidden_size, num_actions)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
mean = self.mean_linear(x)
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
return mean, log_std
def sample(self, state, epsilon=1e-6):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.rsample()
log_pi = (normal.log_prob(z) - torch.log(1 - (torch.tanh(z)).pow(2) + epsilon)).sum(1, keepdim=True)
return mean, std, z, log_pi
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal, Uniform
class ValueNetwork(nn.Module):
def __init__(self, input_dim, output_dim, init_w=3e-3):
super(ValueNetwork, self).__init__()
self.fc1 = nn.Linear(input_dim, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, output_dim)
self.fc3.weight.data.uniform_(-init_w, init_w)
self.fc3.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class SoftQNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size=256, init_w=3e-3):
super(SoftQNetwork, self).__init__()
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action):
x = torch.cat([state, action], 1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
class GaussianPolicy(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size=256, init_w=3e-3, log_std_min=-20, log_std_max=2):
super(GaussianPolicy, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.mean_linear = nn.Linear(hidden_size, num_actions)
self.mean_linear.weight.data.uniform_(-init_w, init_w)
self.mean_linear.bias.data.uniform_(-init_w, init_w)
self.log_std_linear = nn.Linear(hidden_size, num_actions)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
mean = self.mean_linear(x)
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
return mean, log_std
def sample(self, state, epsilon=1e-6):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
z = normal.rsample()
log_pi = (normal.log_prob(z) - torch.log(1 - (torch.tanh(z)).pow(2) + epsilon)).sum(1, keepdim=True)
return mean, std, z, log_pi
|
none
| 1
| 2.69978
| 3
|
|
learning/codesearcher.py
|
linzeqipku/drm_codesearch
| 0
|
6628414
|
from __future__ import absolute_import
import os
import numpy as np
import re
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from learning.model.rnn import RnnModel
from preprocess.dataset import CodeSearchDataset
from preprocess.lex.token import Tokenizer
from preprocess.lex.word_sim import WordSim
class CodeSearcher:
def __init__(self, conf):
self.conf = conf
self.wkdir = self.conf['data']['wkdir']
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
train_data = CodeSearchDataset(os.path.join(conf['data']['wkdir'], conf['data']['train_db_path']))
self.model = RnnModel(int(conf['data']['query_max_len']), train_data.core_term_size, int(conf['model']['core_term_embedding_size']), int(conf['model']['lstm_hidden_size']), int(conf['model']['lstm_layers']), float(self.conf['train']['margin'])).to(self.device)
self.batch_size = int(self.conf['train']['batch_size'])
def save_model(self, epoch):
model_dir = os.path.join(self.wkdir, 'models')
if not os.path.exists(model_dir):
os.mkdir(model_dir)
torch.save(self.model.state_dict(), os.path.join(model_dir, 'epoch%d.h5' % epoch))
def load_model(self, epoch):
model_path = os.path.join(self.wkdir, 'models/epoch%d.h5' % epoch)
assert os.path.exists(model_path), 'Weights not found.'
self.model.load_state_dict(torch.load(model_path))
def train(self):
train_data = CodeSearchDataset(os.path.join(self.wkdir, self.conf['data']['train_db_path']))
valid_data = CodeSearchDataset(os.path.join(self.wkdir, self.conf['data']['valid_db_path']))
test_data = CodeSearchDataset(os.path.join(self.wkdir, self.conf['data']['test_db_path']))
train_size = len(train_data)
if torch.cuda.device_count() > 1:
print("let's use ", torch.cuda.device_count(), "GPUs")
save_round = int(self.conf['train']['save_round'])
nb_epoch = int(self.conf['train']['nb_epoch'])
batch_size = self.batch_size
dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
optimizer = optim.Adam(self.model.parameters(), lr=float(self.conf['train']['lr']))
for epoch in range(nb_epoch):
self.model.train()
epoch_loss = 0
for _, pos_matrix, pos_core_terms, pos_length, neg_matrix, neg_core_terms, neg_length, neg_ids in tqdm(dataloader):
pos_length = [self.gVar(x) for x in pos_length]
neg_length = [self.gVar(x) for x in neg_length]
loss = self.model.loss(self.gVar(pos_matrix), self.gVar(pos_core_terms), pos_length,
self.gVar(neg_matrix), self.gVar(neg_core_terms), neg_length)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss += loss.item()
print('epoch', epoch, ': Loss =', epoch_loss / (train_size/batch_size))
if epoch % save_round == 0:
self.save_model(epoch)
print('Validation...')
self.eval(valid_data)
print('Test...')
self.eval(test_data)
def eval2(self):
data = Tokenizer().parse(os.path.join(self.wkdir, self.conf['data']['test_nl_path']), os.path.join(self.wkdir, self.conf['data']['test_code_path']))
fasttext_corpus_path = os.path.join(self.wkdir, re.sub(r'\.db$', '.txt', self.conf['data']['test_db_path']))
core_term_path = os.path.join(self.wkdir, 'conf/core_terms.txt')
word_sim = WordSim(core_term_path, pretrain=(self.conf['model']['pretrained_wordvec'] == str(True)), update=False, fasttext_corpus_path=fasttext_corpus_path)
CodeSearchDataset.eval(self.model, data, word_sim, int(self.conf['data']['query_max_len']), int(self.conf['data']['code_max_len']), self.device)
def eval(self, test_data):
self.model.eval()
batch_size = self.batch_size
dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=True)
def top_k_acc(pos_score, neg_score, k):
ranks = compute_rank(pos_score, neg_score)
result = [1 for r in ranks if r <= k]
count = sum(result)
return count/len(ranks)
def mrr(pos_score, neg_score):
ranks = compute_rank(pos_score, neg_score)
reciprocal = [1/r for r in ranks]
return sum(reciprocal)/len(ranks)
def compute_rank(pos_score, neg_score):
ranks = [len(neg_score[0])+1]*len(pos_score)
for i, pos_ in enumerate(pos_score):
sort_neg_score = sorted(neg_score[i], reverse=True)
for j, neg_ in enumerate(sort_neg_score):
if pos_ > neg_:
ranks[i] = j + 1
break
return ranks
top_k = 10
accs = [[] for _ in range(top_k)]
mrrs = []
for q_id, pos_matrix, pos_core_terms, pos_length, neg_matrix, neg_core_terms, neg_length, neg_ids in dataloader:
pos_length = [self.gVar(x) for x in pos_length]
neg_length = [self.gVar(x) for x in neg_length]
pos_score = self.model(self.gVar(pos_matrix), pos_length, self.gVar(pos_core_terms)).data.cpu().numpy()
neg_score = self.model(self.gVar(neg_matrix), neg_length, self.gVar(neg_core_terms)).data.cpu().numpy()
neg_score = np.split(neg_score, len(pos_score))
for i in range(top_k):
accs[i].append(top_k_acc(pos_score, neg_score, i+1))
mrrs.append(mrr(pos_score, neg_score))
for i in range(top_k):
print('Hit@{}: {}'.format(i+1, np.mean(accs[i])))
print('MRR: {}'.format(np.mean(mrrs)))
def gVar(self, tensor):
return tensor.to(self.device)
|
from __future__ import absolute_import
import os
import numpy as np
import re
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from learning.model.rnn import RnnModel
from preprocess.dataset import CodeSearchDataset
from preprocess.lex.token import Tokenizer
from preprocess.lex.word_sim import WordSim
class CodeSearcher:
def __init__(self, conf):
self.conf = conf
self.wkdir = self.conf['data']['wkdir']
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
train_data = CodeSearchDataset(os.path.join(conf['data']['wkdir'], conf['data']['train_db_path']))
self.model = RnnModel(int(conf['data']['query_max_len']), train_data.core_term_size, int(conf['model']['core_term_embedding_size']), int(conf['model']['lstm_hidden_size']), int(conf['model']['lstm_layers']), float(self.conf['train']['margin'])).to(self.device)
self.batch_size = int(self.conf['train']['batch_size'])
def save_model(self, epoch):
model_dir = os.path.join(self.wkdir, 'models')
if not os.path.exists(model_dir):
os.mkdir(model_dir)
torch.save(self.model.state_dict(), os.path.join(model_dir, 'epoch%d.h5' % epoch))
def load_model(self, epoch):
model_path = os.path.join(self.wkdir, 'models/epoch%d.h5' % epoch)
assert os.path.exists(model_path), 'Weights not found.'
self.model.load_state_dict(torch.load(model_path))
def train(self):
train_data = CodeSearchDataset(os.path.join(self.wkdir, self.conf['data']['train_db_path']))
valid_data = CodeSearchDataset(os.path.join(self.wkdir, self.conf['data']['valid_db_path']))
test_data = CodeSearchDataset(os.path.join(self.wkdir, self.conf['data']['test_db_path']))
train_size = len(train_data)
if torch.cuda.device_count() > 1:
print("let's use ", torch.cuda.device_count(), "GPUs")
save_round = int(self.conf['train']['save_round'])
nb_epoch = int(self.conf['train']['nb_epoch'])
batch_size = self.batch_size
dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
optimizer = optim.Adam(self.model.parameters(), lr=float(self.conf['train']['lr']))
for epoch in range(nb_epoch):
self.model.train()
epoch_loss = 0
for _, pos_matrix, pos_core_terms, pos_length, neg_matrix, neg_core_terms, neg_length, neg_ids in tqdm(dataloader):
pos_length = [self.gVar(x) for x in pos_length]
neg_length = [self.gVar(x) for x in neg_length]
loss = self.model.loss(self.gVar(pos_matrix), self.gVar(pos_core_terms), pos_length,
self.gVar(neg_matrix), self.gVar(neg_core_terms), neg_length)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss += loss.item()
print('epoch', epoch, ': Loss =', epoch_loss / (train_size/batch_size))
if epoch % save_round == 0:
self.save_model(epoch)
print('Validation...')
self.eval(valid_data)
print('Test...')
self.eval(test_data)
def eval2(self):
data = Tokenizer().parse(os.path.join(self.wkdir, self.conf['data']['test_nl_path']), os.path.join(self.wkdir, self.conf['data']['test_code_path']))
fasttext_corpus_path = os.path.join(self.wkdir, re.sub(r'\.db$', '.txt', self.conf['data']['test_db_path']))
core_term_path = os.path.join(self.wkdir, 'conf/core_terms.txt')
word_sim = WordSim(core_term_path, pretrain=(self.conf['model']['pretrained_wordvec'] == str(True)), update=False, fasttext_corpus_path=fasttext_corpus_path)
CodeSearchDataset.eval(self.model, data, word_sim, int(self.conf['data']['query_max_len']), int(self.conf['data']['code_max_len']), self.device)
def eval(self, test_data):
self.model.eval()
batch_size = self.batch_size
dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=True)
def top_k_acc(pos_score, neg_score, k):
ranks = compute_rank(pos_score, neg_score)
result = [1 for r in ranks if r <= k]
count = sum(result)
return count/len(ranks)
def mrr(pos_score, neg_score):
ranks = compute_rank(pos_score, neg_score)
reciprocal = [1/r for r in ranks]
return sum(reciprocal)/len(ranks)
def compute_rank(pos_score, neg_score):
ranks = [len(neg_score[0])+1]*len(pos_score)
for i, pos_ in enumerate(pos_score):
sort_neg_score = sorted(neg_score[i], reverse=True)
for j, neg_ in enumerate(sort_neg_score):
if pos_ > neg_:
ranks[i] = j + 1
break
return ranks
top_k = 10
accs = [[] for _ in range(top_k)]
mrrs = []
for q_id, pos_matrix, pos_core_terms, pos_length, neg_matrix, neg_core_terms, neg_length, neg_ids in dataloader:
pos_length = [self.gVar(x) for x in pos_length]
neg_length = [self.gVar(x) for x in neg_length]
pos_score = self.model(self.gVar(pos_matrix), pos_length, self.gVar(pos_core_terms)).data.cpu().numpy()
neg_score = self.model(self.gVar(neg_matrix), neg_length, self.gVar(neg_core_terms)).data.cpu().numpy()
neg_score = np.split(neg_score, len(pos_score))
for i in range(top_k):
accs[i].append(top_k_acc(pos_score, neg_score, i+1))
mrrs.append(mrr(pos_score, neg_score))
for i in range(top_k):
print('Hit@{}: {}'.format(i+1, np.mean(accs[i])))
print('MRR: {}'.format(np.mean(mrrs)))
def gVar(self, tensor):
return tensor.to(self.device)
|
none
| 1
| 2.142048
| 2
|
|
qpath/utils.py
|
vladpopovici/QPath
| 0
|
6628415
|
# -*- coding: utf-8 -*-
#############################################################################
# Copyright <NAME> <<EMAIL>>
#
# Licensed under the MIT License. See LICENSE file in root folder.
#############################################################################
__author__ = "<NAME> <<EMAIL>>"
__version__ = 0.1
#
# QPATH.UTILS: handy functions
#
__all__ = []
import numpy as np
import shapely.geometry
import simplejson as json
import pyvips
from . import Error
def geom2xy(geom: shapely.geometry, as_type=None) -> np.array:
"""Return the coordinates of a 2D geometrical object as a numpy array (N x 2).
:param geom: shapely.geometry
a 2D geometrical object
:return:
numpy.array
"""
if as_type is None:
z = np.array(geom.array_interface_base['data'])
else:
z = np.array(geom.array_interface_base['data'], dtype=as_type)
n = z.size // 2
return z.reshape((n, 2))
##
class NumpyJSONEncoder(json.JSONEncoder):
"""Provides an encoder for Numpy types for serialization."""
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super().default(obj)
##
def np2vips(img: np.array) -> pyvips.Image:
"""Converts a NumPy image (3d array) to VIPS Image."""
dtype_to_format = {
'uint8': 'uchar',
'int8': 'char',
'uint16': 'ushort',
'int16': 'short',
'uint32': 'uint',
'int32': 'int',
'float32': 'float',
'float64': 'double',
'complex64': 'complex',
'complex128': 'dpcomplex',
}
if img.ndim > 3:
raise Error("the image may have at most 3 dimensions")
if img.ndim == 3:
height, width, bands = img.shape[:3]
else:
height, width, bands = img.shape[:2], 1
linear = img.reshape(width * height * bands)
vi = pyvips.Image.new_from_memory(linear.data, width, height, bands,
dtype_to_format[str(img.dtype)])
return vi
##
def write_pyramidal_tiff(img: np.array, file_name: str) -> None:
"""Write a Numpy array as a pyramidal tiled TIFF file.
:param: img (np.array)
the image
:param: file_name (str)
file to write to
"""
v_img = np2vips(img)
v_img.write_to_file(file_name, pyramid=True, tile=True, compression="jpeg")
return
##
|
# -*- coding: utf-8 -*-
#############################################################################
# Copyright <NAME> <<EMAIL>>
#
# Licensed under the MIT License. See LICENSE file in root folder.
#############################################################################
__author__ = "<NAME> <<EMAIL>>"
__version__ = 0.1
#
# QPATH.UTILS: handy functions
#
__all__ = []
import numpy as np
import shapely.geometry
import simplejson as json
import pyvips
from . import Error
def geom2xy(geom: shapely.geometry, as_type=None) -> np.array:
"""Return the coordinates of a 2D geometrical object as a numpy array (N x 2).
:param geom: shapely.geometry
a 2D geometrical object
:return:
numpy.array
"""
if as_type is None:
z = np.array(geom.array_interface_base['data'])
else:
z = np.array(geom.array_interface_base['data'], dtype=as_type)
n = z.size // 2
return z.reshape((n, 2))
##
class NumpyJSONEncoder(json.JSONEncoder):
"""Provides an encoder for Numpy types for serialization."""
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super().default(obj)
##
def np2vips(img: np.array) -> pyvips.Image:
"""Converts a NumPy image (3d array) to VIPS Image."""
dtype_to_format = {
'uint8': 'uchar',
'int8': 'char',
'uint16': 'ushort',
'int16': 'short',
'uint32': 'uint',
'int32': 'int',
'float32': 'float',
'float64': 'double',
'complex64': 'complex',
'complex128': 'dpcomplex',
}
if img.ndim > 3:
raise Error("the image may have at most 3 dimensions")
if img.ndim == 3:
height, width, bands = img.shape[:3]
else:
height, width, bands = img.shape[:2], 1
linear = img.reshape(width * height * bands)
vi = pyvips.Image.new_from_memory(linear.data, width, height, bands,
dtype_to_format[str(img.dtype)])
return vi
##
def write_pyramidal_tiff(img: np.array, file_name: str) -> None:
"""Write a Numpy array as a pyramidal tiled TIFF file.
:param: img (np.array)
the image
:param: file_name (str)
file to write to
"""
v_img = np2vips(img)
v_img.write_to_file(file_name, pyramid=True, tile=True, compression="jpeg")
return
##
|
en
| 0.33186
|
# -*- coding: utf-8 -*- ############################################################################# # Copyright <NAME> <<EMAIL>> # # Licensed under the MIT License. See LICENSE file in root folder. ############################################################################# # # QPATH.UTILS: handy functions # Return the coordinates of a 2D geometrical object as a numpy array (N x 2). :param geom: shapely.geometry a 2D geometrical object :return: numpy.array ## Provides an encoder for Numpy types for serialization. ## Converts a NumPy image (3d array) to VIPS Image. ## Write a Numpy array as a pyramidal tiled TIFF file. :param: img (np.array) the image :param: file_name (str) file to write to ##
| 2.492383
| 2
|
coordination/migrations/0011_add_ml_quest_type.py
|
PhobosXIII/qc
| 0
|
6628416
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-20 13:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coordination', '0010_increase_hint_max_delay'),
]
operations = [
migrations.AlterField(
model_name='quest',
name='type',
field=models.CharField(choices=[('L', 'Линейный'), ('NL', 'Нелинейный'), ('LNL', 'Линейно-нелинейный'), ('ML', 'Многолинейный')], default='L', max_length=3, verbose_name='тип'),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-20 13:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coordination', '0010_increase_hint_max_delay'),
]
operations = [
migrations.AlterField(
model_name='quest',
name='type',
field=models.CharField(choices=[('L', 'Линейный'), ('NL', 'Нелинейный'), ('LNL', 'Линейно-нелинейный'), ('ML', 'Многолинейный')], default='L', max_length=3, verbose_name='тип'),
),
]
|
en
| 0.7973
|
# -*- coding: utf-8 -*- # Generated by Django 1.9.2 on 2016-03-20 13:28
| 1.514562
| 2
|
curso_em_video/0064.py
|
marinaoliveira96/python-exercises
| 0
|
6628417
|
<filename>curso_em_video/0064.py
n = 0
cont = 0
soma = 0
while n != 999:
n = int(input('Digite um numero. [Digite 999 para parar]: '))
cont += 1
soma += n
print(f'foram digitados {cont} numeros e a soma deles é igual a {soma-999}')
|
<filename>curso_em_video/0064.py
n = 0
cont = 0
soma = 0
while n != 999:
n = int(input('Digite um numero. [Digite 999 para parar]: '))
cont += 1
soma += n
print(f'foram digitados {cont} numeros e a soma deles é igual a {soma-999}')
|
none
| 1
| 3.445309
| 3
|
|
app/services/models/pool_member.py
|
tirinox/thorchainmonitorbot
| 3
|
6628418
|
<reponame>tirinox/thorchainmonitorbot
from dataclasses import dataclass
@dataclass
class PoolMemberDetails:
asset_added: int = 0
asset_withdrawn: int = 0
asset_address: str = ''
rune_added: int = 0
rune_withdrawn: int = 0
run_address: str = ''
date_first_added: int = 0
date_last_added: int = 0
liquidity_units: int = 0
pool: str = ''
|
from dataclasses import dataclass
@dataclass
class PoolMemberDetails:
asset_added: int = 0
asset_withdrawn: int = 0
asset_address: str = ''
rune_added: int = 0
rune_withdrawn: int = 0
run_address: str = ''
date_first_added: int = 0
date_last_added: int = 0
liquidity_units: int = 0
pool: str = ''
|
none
| 1
| 2.130255
| 2
|
|
machine-learning/utils/figures.py
|
tusharsingh62/nasaapps-rgb-awl
| 0
|
6628419
|
<filename>machine-learning/utils/figures.py
import colorlover as cl
import plotly.graph_objs as go
import numpy as np
from sklearn import metrics
def serve_prediction_plot(
model, X_train, X_test, y_train, y_test, Z, xx, yy, mesh_step, threshold, image
):
# Get train and test score from model
y_pred_train = (model.decision_function(X_train) > threshold).astype(int)
y_pred_test = (model.decision_function(X_test) > threshold).astype(int)
train_score = metrics.accuracy_score(y_true=y_train, y_pred=y_pred_train)
test_score = metrics.accuracy_score(y_true=y_test, y_pred=y_pred_test)
# Compute threshold
scaled_threshold = threshold * (Z.max() - Z.min()) + Z.min()
range = max(abs(scaled_threshold - Z.min()), abs(scaled_threshold - Z.max()))
# Colorscale
bright_cscale = [[0, "#ff3700"], [1, "#0b8bff"]]
cscale = [
[0.0000000, "#ff744c"],
[0.1428571, "#ff916d"],
[0.2857143, "#ffc0a8"],
[0.4285714, "#ffe7dc"],
[0.5714286, "#e5fcff"],
[0.7142857, "#c8feff"],
[0.8571429, "#9af8ff"],
[1.0000000, "#20e6ff"],
]
# Create the plot
# Plot the prediction contour of the SVM
trace0 = go.Contour(
x=np.arange(xx.min(), xx.max(), mesh_step),
y=np.arange(yy.min(), yy.max(), mesh_step),
z=Z.reshape(xx.shape),
zmin=scaled_threshold - range,
zmax=scaled_threshold + range,
hoverinfo="none",
showscale=False,
contours=dict(showlines=False),
colorscale=cscale,
opacity=0.9,
)
# Plot the threshold
trace1 = go.Contour(
x=np.arange(xx.min(), xx.max(), mesh_step),
y=np.arange(yy.min(), yy.max(), mesh_step),
z=Z.reshape(xx.shape),
showscale=False,
hoverinfo="none",
contours=dict(
showlines=False, type="constraint", operation="=", value=scaled_threshold
),
name=f"Threshold ({scaled_threshold:.3f})",
line=dict(color="#708090"),
)
# Plot Training Data
trace2 = go.Scatter(
x=X_train[:, 0],
y=X_train[:, 1],
mode="markers",
name=f"Resource exists with high probability (accuracy={train_score:.3f})",
marker=dict(size=10, color=y_train, colorscale=bright_cscale),
)
# Plot Test Data
trace3 = go.Scatter(
x=X_test[:, 0],
y=X_test[:, 1],
mode="markers",
name=f"Resource exists with low probability (accuracy={test_score:.3f})",
marker=dict(
size=10,
# symbol="triangle-up",
color=y_test,
# color=y_train,
colorscale=bright_cscale
),
)
layout = go.Layout(
images=[dict(
source= image,
xref= "x",
yref= "y",
x= -4.0,
y= 4.0,
sizex= 12,
sizey= 12,
sizing= "stretch",
opacity= 0.5,
layer= "above"
)],
xaxis=dict(ticks="", showticklabels=False, showgrid=False, zeroline=False),
yaxis=dict(ticks="", showticklabels=False, showgrid=False, zeroline=False),
hovermode="closest",
legend=dict(x=0, y=-0.01, orientation="h"),
margin=dict(l=0, r=0, t=0, b=0),
plot_bgcolor="#282b38",
paper_bgcolor="#282b38",
font={"color": "#a5b1cd"},
)
data = [trace0, trace1, trace2, trace3]
figure = go.Figure(data=data, layout=layout)
return figure
def serve_pie_confusion_matrix(model, X_test, y_test, Z, threshold):
# Compute threshold
scaled_threshold = threshold * (Z.max() - Z.min()) + Z.min()
y_pred_test = (model.decision_function(X_test) > scaled_threshold).astype(int)
matrix = metrics.confusion_matrix(y_true=y_test, y_pred=y_pred_test)
tn, fp, fn, tp = matrix.ravel()
values = [tp, fn, fp, tn]
label_text = ["Low probability", "Low probability points in red area", "High probability points in blue area", "High probability"]
labels = ["LP", "BinH", "HinB", "HP"]
blue = cl.flipper()["seq"]["9"]["Blues"]
red = cl.flipper()["seq"]["9"]["Reds"]
colors = ["#13c6e9", blue[1], "#ff916d", "#ff744c"]
trace0 = go.Pie(
labels=label_text,
values=values,
hoverinfo="label+value+percent",
textinfo="text+value",
text=labels,
sort=False,
marker=dict(colors=colors),
insidetextfont={"color": "white"},
rotation=90,
)
layout = go.Layout(
title="Existence Ratio",
margin=dict(l=50, r=50, t=100, b=10),
legend=dict(bgcolor="#282b38", font={"color": "#a5b1cd"}, orientation="h"),
plot_bgcolor="#282b38",
paper_bgcolor="#282b38",
font={"color": "#a5b1cd"},
)
data = [trace0]
figure = go.Figure(data=data, layout=layout)
return figure
|
<filename>machine-learning/utils/figures.py
import colorlover as cl
import plotly.graph_objs as go
import numpy as np
from sklearn import metrics
def serve_prediction_plot(
model, X_train, X_test, y_train, y_test, Z, xx, yy, mesh_step, threshold, image
):
# Get train and test score from model
y_pred_train = (model.decision_function(X_train) > threshold).astype(int)
y_pred_test = (model.decision_function(X_test) > threshold).astype(int)
train_score = metrics.accuracy_score(y_true=y_train, y_pred=y_pred_train)
test_score = metrics.accuracy_score(y_true=y_test, y_pred=y_pred_test)
# Compute threshold
scaled_threshold = threshold * (Z.max() - Z.min()) + Z.min()
range = max(abs(scaled_threshold - Z.min()), abs(scaled_threshold - Z.max()))
# Colorscale
bright_cscale = [[0, "#ff3700"], [1, "#0b8bff"]]
cscale = [
[0.0000000, "#ff744c"],
[0.1428571, "#ff916d"],
[0.2857143, "#ffc0a8"],
[0.4285714, "#ffe7dc"],
[0.5714286, "#e5fcff"],
[0.7142857, "#c8feff"],
[0.8571429, "#9af8ff"],
[1.0000000, "#20e6ff"],
]
# Create the plot
# Plot the prediction contour of the SVM
trace0 = go.Contour(
x=np.arange(xx.min(), xx.max(), mesh_step),
y=np.arange(yy.min(), yy.max(), mesh_step),
z=Z.reshape(xx.shape),
zmin=scaled_threshold - range,
zmax=scaled_threshold + range,
hoverinfo="none",
showscale=False,
contours=dict(showlines=False),
colorscale=cscale,
opacity=0.9,
)
# Plot the threshold
trace1 = go.Contour(
x=np.arange(xx.min(), xx.max(), mesh_step),
y=np.arange(yy.min(), yy.max(), mesh_step),
z=Z.reshape(xx.shape),
showscale=False,
hoverinfo="none",
contours=dict(
showlines=False, type="constraint", operation="=", value=scaled_threshold
),
name=f"Threshold ({scaled_threshold:.3f})",
line=dict(color="#708090"),
)
# Plot Training Data
trace2 = go.Scatter(
x=X_train[:, 0],
y=X_train[:, 1],
mode="markers",
name=f"Resource exists with high probability (accuracy={train_score:.3f})",
marker=dict(size=10, color=y_train, colorscale=bright_cscale),
)
# Plot Test Data
trace3 = go.Scatter(
x=X_test[:, 0],
y=X_test[:, 1],
mode="markers",
name=f"Resource exists with low probability (accuracy={test_score:.3f})",
marker=dict(
size=10,
# symbol="triangle-up",
color=y_test,
# color=y_train,
colorscale=bright_cscale
),
)
layout = go.Layout(
images=[dict(
source= image,
xref= "x",
yref= "y",
x= -4.0,
y= 4.0,
sizex= 12,
sizey= 12,
sizing= "stretch",
opacity= 0.5,
layer= "above"
)],
xaxis=dict(ticks="", showticklabels=False, showgrid=False, zeroline=False),
yaxis=dict(ticks="", showticklabels=False, showgrid=False, zeroline=False),
hovermode="closest",
legend=dict(x=0, y=-0.01, orientation="h"),
margin=dict(l=0, r=0, t=0, b=0),
plot_bgcolor="#282b38",
paper_bgcolor="#282b38",
font={"color": "#a5b1cd"},
)
data = [trace0, trace1, trace2, trace3]
figure = go.Figure(data=data, layout=layout)
return figure
def serve_pie_confusion_matrix(model, X_test, y_test, Z, threshold):
# Compute threshold
scaled_threshold = threshold * (Z.max() - Z.min()) + Z.min()
y_pred_test = (model.decision_function(X_test) > scaled_threshold).astype(int)
matrix = metrics.confusion_matrix(y_true=y_test, y_pred=y_pred_test)
tn, fp, fn, tp = matrix.ravel()
values = [tp, fn, fp, tn]
label_text = ["Low probability", "Low probability points in red area", "High probability points in blue area", "High probability"]
labels = ["LP", "BinH", "HinB", "HP"]
blue = cl.flipper()["seq"]["9"]["Blues"]
red = cl.flipper()["seq"]["9"]["Reds"]
colors = ["#13c6e9", blue[1], "#ff916d", "#ff744c"]
trace0 = go.Pie(
labels=label_text,
values=values,
hoverinfo="label+value+percent",
textinfo="text+value",
text=labels,
sort=False,
marker=dict(colors=colors),
insidetextfont={"color": "white"},
rotation=90,
)
layout = go.Layout(
title="Existence Ratio",
margin=dict(l=50, r=50, t=100, b=10),
legend=dict(bgcolor="#282b38", font={"color": "#a5b1cd"}, orientation="h"),
plot_bgcolor="#282b38",
paper_bgcolor="#282b38",
font={"color": "#a5b1cd"},
)
data = [trace0]
figure = go.Figure(data=data, layout=layout)
return figure
|
en
| 0.741326
|
# Get train and test score from model # Compute threshold # Colorscale # Create the plot # Plot the prediction contour of the SVM # Plot the threshold # Plot Training Data # Plot Test Data # symbol="triangle-up", # color=y_train, # Compute threshold
| 2.773356
| 3
|
c.calculation.py
|
anmol1455/python
| 0
|
6628420
|
#class creation
class calculation:
num1=0
num2=0
def inputdata(self):
self.num1=int(input("enter first number"))
self.num2=int(input("enter second number"))
def addition(self):
add=self.num1+self.num2
print("sum=",add)
def subtraction(self):
sub=self.num1-self.num2
print("subtraction=",sub)
# object creation
calc=calculation()
calc.inputdata()
calc.addition()
calc.subtraction()
|
#class creation
class calculation:
num1=0
num2=0
def inputdata(self):
self.num1=int(input("enter first number"))
self.num2=int(input("enter second number"))
def addition(self):
add=self.num1+self.num2
print("sum=",add)
def subtraction(self):
sub=self.num1-self.num2
print("subtraction=",sub)
# object creation
calc=calculation()
calc.inputdata()
calc.addition()
calc.subtraction()
|
en
| 0.653471
|
#class creation # object creation
| 3.954929
| 4
|
ool/oppositions/migrations/0016_opposition_inform_jury_member.py
|
HeLsEroC/bbr
| 0
|
6628421
|
<gh_stars>0
# Generated by Django 3.1.12 on 2021-07-27 06:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('oppositions', '0015_auto_20210713_1303'),
]
operations = [
migrations.AddField(
model_name='opposition',
name='inform_jury_member',
field=models.BooleanField(blank=True, null=True, verbose_name='Informer le membre du jury'),
),
]
|
# Generated by Django 3.1.12 on 2021-07-27 06:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('oppositions', '0015_auto_20210713_1303'),
]
operations = [
migrations.AddField(
model_name='opposition',
name='inform_jury_member',
field=models.BooleanField(blank=True, null=True, verbose_name='Informer le membre du jury'),
),
]
|
en
| 0.796867
|
# Generated by Django 3.1.12 on 2021-07-27 06:34
| 1.504373
| 2
|
test/unit/test_decoder.py
|
blchu/sockeye
| 0
|
6628422
|
# Copyright 2017--2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import numpy as onp
import pytest
import torch as pt
import sockeye.constants as C
import sockeye.decoder_pt
import sockeye.transformer_pt
@pytest.mark.parametrize('lhuc', [
(False,),
(True,)
])
def test_get_decoder(lhuc):
config = sockeye.transformer_pt.TransformerConfig(
model_size=20,
attention_heads=10,
feed_forward_num_hidden=30,
act_type='test_act',
num_layers=50,
dropout_attention=0.5,
dropout_act=0.6,
dropout_prepost=0.1,
positional_embedding_type=C.FIXED_POSITIONAL_EMBEDDING,
preprocess_sequence=C.FIXED_POSITIONAL_EMBEDDING,
postprocess_sequence='test_post_seq',
max_seq_len_source=60,
max_seq_len_target=70,
use_lhuc=lhuc)
decoder = sockeye.decoder_pt.pytorch_get_decoder(config, inference_only=False)
assert type(decoder) == sockeye.decoder_pt.PyTorchTransformerDecoder
@pytest.mark.parametrize("inference_only", [False, True])
def test_mx_pt_eq_transformer_decoder(inference_only):
pytest.importorskip("mxnet")
import sockeye.transformer
import sockeye.decoder
import mxnet as mx
from mxnet import np
pt.manual_seed(13)
mx.random.seed(13)
config_mx = sockeye.transformer.TransformerConfig(model_size=128,
attention_heads=8,
feed_forward_num_hidden=256,
act_type='relu',
num_layers=12,
dropout_attention=0,
dropout_act=0,
dropout_prepost=0,
positional_embedding_type=C.FIXED_POSITIONAL_EMBEDDING,
preprocess_sequence='n',
postprocess_sequence='r',
max_seq_len_source=50,
max_seq_len_target=60,
depth_key_value=128,
use_lhuc=False)
config_pt = sockeye.transformer_pt.TransformerConfig(model_size=128,
attention_heads=8,
feed_forward_num_hidden=256,
act_type='relu',
num_layers=12,
dropout_attention=0,
dropout_act=0,
dropout_prepost=0,
positional_embedding_type=C.FIXED_POSITIONAL_EMBEDDING,
preprocess_sequence='n',
postprocess_sequence='r',
max_seq_len_source=50,
max_seq_len_target=60,
depth_key_value=128,
use_lhuc=False)
batch = 12
encoder_seq_len = 45
decoder_seq_len = 39 if not inference_only else 1
encoder_outputs_mx = np.random.uniform(0, 1, (batch, encoder_seq_len, config_mx.model_size))
encoder_outputs_pt = pt.tensor(encoder_outputs_mx.asnumpy())
encoder_valid_length_mx = np.random.randint(1, encoder_seq_len, (batch,))
encoder_valid_length_pt = pt.tensor(encoder_valid_length_mx.asnumpy())
inputs_mx = np.random.uniform(0, 1, (batch, decoder_seq_len, config_mx.model_size))
inputs_pt = pt.tensor(inputs_mx.asnumpy())
# mx
decoder_mx = sockeye.decoder.get_decoder(config_mx, inference_only=inference_only, dtype=C.DTYPE_FP32)
decoder_mx.initialize()
init_states_mx = decoder_mx.init_state_from_encoder(encoder_outputs_mx, encoder_valid_length_mx)
output_mx, new_states_mx = decoder_mx(inputs_mx, init_states_mx)
if inference_only: # do a second decoder step
output_mx, new_states_mx = decoder_mx(output_mx, new_states_mx)
# pt
decoder_pt = sockeye.decoder_pt.pytorch_get_decoder(config_pt, inference_only=inference_only)
decoder_pt.weights_from_mxnet_block(decoder_mx)
decoder_pt.eval()
init_states_pt = decoder_pt.init_state_from_encoder(encoder_outputs_pt, encoder_valid_length_pt)
output_pt, new_states_pt = decoder_pt(inputs_pt, init_states_pt)
if inference_only: # do a second decoder step
output_pt, new_states_pt = decoder_pt(output_pt, new_states_pt)
assert decoder_mx.state_structure() == decoder_pt.state_structure()
assert decoder_mx.get_num_hidden() == decoder_pt.get_num_hidden()
assert len(init_states_mx) == len(init_states_pt)
for s_mx, s_pt, structure in zip(init_states_mx, init_states_pt, decoder_mx.state_structure()):
if structure != C.MASK_STATE: # MASK state is new in Pytorch and not equivalent
assert np.allclose(s_mx.asnumpy(), s_pt.detach().numpy(), atol=1e-05)
output_mx = output_mx.asnumpy()
output_pt = output_pt.detach().numpy()
print("Max deviation:", onp.abs(output_mx - output_pt).max())
assert np.allclose(output_mx, output_pt, atol=1e-05)
assert len(new_states_mx) == len(new_states_pt)
for i, (s_mx, s_pt, structure) in enumerate(zip(new_states_mx, new_states_pt, decoder_mx.state_structure())):
if structure != C.MASK_STATE: # MASK state is new in Pytorch and not equivalent
assert np.allclose(s_mx.asnumpy(), s_pt.detach().numpy(), atol=1e-05)
|
# Copyright 2017--2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import numpy as onp
import pytest
import torch as pt
import sockeye.constants as C
import sockeye.decoder_pt
import sockeye.transformer_pt
@pytest.mark.parametrize('lhuc', [
(False,),
(True,)
])
def test_get_decoder(lhuc):
config = sockeye.transformer_pt.TransformerConfig(
model_size=20,
attention_heads=10,
feed_forward_num_hidden=30,
act_type='test_act',
num_layers=50,
dropout_attention=0.5,
dropout_act=0.6,
dropout_prepost=0.1,
positional_embedding_type=C.FIXED_POSITIONAL_EMBEDDING,
preprocess_sequence=C.FIXED_POSITIONAL_EMBEDDING,
postprocess_sequence='test_post_seq',
max_seq_len_source=60,
max_seq_len_target=70,
use_lhuc=lhuc)
decoder = sockeye.decoder_pt.pytorch_get_decoder(config, inference_only=False)
assert type(decoder) == sockeye.decoder_pt.PyTorchTransformerDecoder
@pytest.mark.parametrize("inference_only", [False, True])
def test_mx_pt_eq_transformer_decoder(inference_only):
pytest.importorskip("mxnet")
import sockeye.transformer
import sockeye.decoder
import mxnet as mx
from mxnet import np
pt.manual_seed(13)
mx.random.seed(13)
config_mx = sockeye.transformer.TransformerConfig(model_size=128,
attention_heads=8,
feed_forward_num_hidden=256,
act_type='relu',
num_layers=12,
dropout_attention=0,
dropout_act=0,
dropout_prepost=0,
positional_embedding_type=C.FIXED_POSITIONAL_EMBEDDING,
preprocess_sequence='n',
postprocess_sequence='r',
max_seq_len_source=50,
max_seq_len_target=60,
depth_key_value=128,
use_lhuc=False)
config_pt = sockeye.transformer_pt.TransformerConfig(model_size=128,
attention_heads=8,
feed_forward_num_hidden=256,
act_type='relu',
num_layers=12,
dropout_attention=0,
dropout_act=0,
dropout_prepost=0,
positional_embedding_type=C.FIXED_POSITIONAL_EMBEDDING,
preprocess_sequence='n',
postprocess_sequence='r',
max_seq_len_source=50,
max_seq_len_target=60,
depth_key_value=128,
use_lhuc=False)
batch = 12
encoder_seq_len = 45
decoder_seq_len = 39 if not inference_only else 1
encoder_outputs_mx = np.random.uniform(0, 1, (batch, encoder_seq_len, config_mx.model_size))
encoder_outputs_pt = pt.tensor(encoder_outputs_mx.asnumpy())
encoder_valid_length_mx = np.random.randint(1, encoder_seq_len, (batch,))
encoder_valid_length_pt = pt.tensor(encoder_valid_length_mx.asnumpy())
inputs_mx = np.random.uniform(0, 1, (batch, decoder_seq_len, config_mx.model_size))
inputs_pt = pt.tensor(inputs_mx.asnumpy())
# mx
decoder_mx = sockeye.decoder.get_decoder(config_mx, inference_only=inference_only, dtype=C.DTYPE_FP32)
decoder_mx.initialize()
init_states_mx = decoder_mx.init_state_from_encoder(encoder_outputs_mx, encoder_valid_length_mx)
output_mx, new_states_mx = decoder_mx(inputs_mx, init_states_mx)
if inference_only: # do a second decoder step
output_mx, new_states_mx = decoder_mx(output_mx, new_states_mx)
# pt
decoder_pt = sockeye.decoder_pt.pytorch_get_decoder(config_pt, inference_only=inference_only)
decoder_pt.weights_from_mxnet_block(decoder_mx)
decoder_pt.eval()
init_states_pt = decoder_pt.init_state_from_encoder(encoder_outputs_pt, encoder_valid_length_pt)
output_pt, new_states_pt = decoder_pt(inputs_pt, init_states_pt)
if inference_only: # do a second decoder step
output_pt, new_states_pt = decoder_pt(output_pt, new_states_pt)
assert decoder_mx.state_structure() == decoder_pt.state_structure()
assert decoder_mx.get_num_hidden() == decoder_pt.get_num_hidden()
assert len(init_states_mx) == len(init_states_pt)
for s_mx, s_pt, structure in zip(init_states_mx, init_states_pt, decoder_mx.state_structure()):
if structure != C.MASK_STATE: # MASK state is new in Pytorch and not equivalent
assert np.allclose(s_mx.asnumpy(), s_pt.detach().numpy(), atol=1e-05)
output_mx = output_mx.asnumpy()
output_pt = output_pt.detach().numpy()
print("Max deviation:", onp.abs(output_mx - output_pt).max())
assert np.allclose(output_mx, output_pt, atol=1e-05)
assert len(new_states_mx) == len(new_states_pt)
for i, (s_mx, s_pt, structure) in enumerate(zip(new_states_mx, new_states_pt, decoder_mx.state_structure())):
if structure != C.MASK_STATE: # MASK state is new in Pytorch and not equivalent
assert np.allclose(s_mx.asnumpy(), s_pt.detach().numpy(), atol=1e-05)
|
en
| 0.870358
|
# Copyright 2017--2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not # use this file except in compliance with the License. A copy of the License # is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # mx # do a second decoder step # pt # do a second decoder step # MASK state is new in Pytorch and not equivalent # MASK state is new in Pytorch and not equivalent
| 1.958842
| 2
|
lemur/common/validators.py
|
dck25/lemur
| 1,656
|
6628423
|
import re
from cryptography import x509
from cryptography.exceptions import UnsupportedAlgorithm, InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.x509 import NameOID
from flask import current_app
from marshmallow.exceptions import ValidationError
from lemur.auth.permissions import SensitiveDomainPermission
from lemur.common.utils import check_cert_signature, is_weekend
def common_name(value):
"""If the common name could be a domain name, apply domain validation rules."""
# Common name could be a domain name, or a human-readable name of the subject (often used in CA names or client
# certificates). As a simple heuristic, we assume that human-readable names always include a space.
# However, to avoid confusion for humans, we also don't count spaces at the beginning or end of the string.
if " " not in value.strip():
return sensitive_domain(value)
def sensitive_domain(domain):
"""
Checks if user has the admin role, the domain does not match sensitive domains and allowed domain patterns.
:param domain: domain name (str)
:return:
"""
if SensitiveDomainPermission().can():
# User has permission, no need to check anything
return
allowlist = current_app.config.get("LEMUR_ALLOWED_DOMAINS", [])
if allowlist and not any(re.match(pattern, domain) for pattern in allowlist):
raise ValidationError(
"Domain {0} does not match allowed domain patterns. "
"Contact an administrator to issue the certificate.".format(domain)
)
# Avoid circular import.
from lemur.domains import service as domain_service
if domain_service.is_domain_sensitive(domain):
raise ValidationError(
"Domain {0} has been marked as sensitive. "
"Contact an administrator to issue the certificate.".format(domain)
)
def encoding(oid_encoding):
"""
Determines if the specified oid type is valid.
:param oid_encoding:
:return:
"""
valid_types = ["b64asn1", "string", "ia5string"]
if oid_encoding.lower() not in [o_type.lower() for o_type in valid_types]:
raise ValidationError(
"Invalid Oid Encoding: {0} choose from {1}".format(
oid_encoding, ",".join(valid_types)
)
)
def sub_alt_type(alt_type):
"""
Determines if the specified subject alternate type is valid.
:param alt_type:
:return:
"""
valid_types = [
"DNSName",
"IPAddress",
"uniFormResourceIdentifier",
"directoryName",
"rfc822Name",
"registrationID",
"otherName",
"x400Address",
"EDIPartyName",
]
if alt_type.lower() not in [a_type.lower() for a_type in valid_types]:
raise ValidationError(
"Invalid SubAltName Type: {0} choose from {1}".format(
type, ",".join(valid_types)
)
)
def csr(data):
"""
Determines if the CSR is valid and allowed.
:param data:
:return:
"""
try:
request = x509.load_pem_x509_csr(data.encode("utf-8"), default_backend())
except Exception:
raise ValidationError("CSR presented is not valid.")
# Validate common name and SubjectAltNames
try:
for name in request.subject.get_attributes_for_oid(NameOID.COMMON_NAME):
common_name(name.value)
except ValueError as err:
current_app.logger.info("Error parsing Subject from CSR: %s", err)
raise ValidationError("Invalid Subject value in supplied CSR")
try:
alt_names = request.extensions.get_extension_for_class(
x509.SubjectAlternativeName
)
for name in alt_names.value.get_values_for_type(x509.DNSName):
sensitive_domain(name)
except x509.ExtensionNotFound:
pass
def dates(data):
if not data.get("validity_start") and data.get("validity_end"):
raise ValidationError("If validity start is specified so must validity end.")
if not data.get("validity_end") and data.get("validity_start"):
raise ValidationError("If validity end is specified so must validity start.")
if data.get("validity_start") and data.get("validity_end"):
if not current_app.config.get("LEMUR_ALLOW_WEEKEND_EXPIRATION", True):
if is_weekend(data.get("validity_end")):
raise ValidationError("Validity end must not land on a weekend.")
if not data["validity_start"] < data["validity_end"]:
raise ValidationError("Validity start must be before validity end.")
if data.get("authority"):
if (
data.get("validity_start").date()
< data["authority"].authority_certificate.not_before.date()
):
raise ValidationError(
"Validity start must not be before {0}".format(
data["authority"].authority_certificate.not_before
)
)
if (
data.get("validity_end").date()
> data["authority"].authority_certificate.not_after.date()
):
raise ValidationError(
"Validity end must not be after {0}".format(
data["authority"].authority_certificate.not_after
)
)
return data
def verify_private_key_match(key, cert, error_class=ValidationError):
"""
Checks that the supplied private key matches the certificate.
:param cert: Parsed certificate
:param key: Parsed private key
:param error_class: Exception class to raise on error
"""
if key.public_key().public_numbers() != cert.public_key().public_numbers():
raise error_class("Private key does not match certificate.")
def verify_cert_chain(certs, error_class=ValidationError):
"""
Verifies that the certificates in the chain are correct.
We don't bother with full cert validation but just check that certs in the chain are signed by the next, to avoid
basic human errors -- such as pasting the wrong certificate.
:param certs: List of parsed certificates, use parse_cert_chain()
:param error_class: Exception class to raise on error
"""
cert = certs[0]
for issuer in certs[1:]:
# Use the current cert's public key to verify the previous signature.
# "certificate validation is a complex problem that involves much more than just signature checks"
try:
check_cert_signature(cert, issuer.public_key())
except InvalidSignature:
# Avoid circular import.
from lemur.common import defaults
raise error_class(
"Incorrect chain certificate(s) provided: '%s' is not signed by '%s'"
% (
defaults.common_name(cert) or "Unknown",
defaults.common_name(issuer),
)
)
except UnsupportedAlgorithm as err:
current_app.logger.warning("Skipping chain validation: %s", err)
# Next loop will validate that *this issuer* cert is signed by the next chain cert.
cert = issuer
|
import re
from cryptography import x509
from cryptography.exceptions import UnsupportedAlgorithm, InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.x509 import NameOID
from flask import current_app
from marshmallow.exceptions import ValidationError
from lemur.auth.permissions import SensitiveDomainPermission
from lemur.common.utils import check_cert_signature, is_weekend
def common_name(value):
"""If the common name could be a domain name, apply domain validation rules."""
# Common name could be a domain name, or a human-readable name of the subject (often used in CA names or client
# certificates). As a simple heuristic, we assume that human-readable names always include a space.
# However, to avoid confusion for humans, we also don't count spaces at the beginning or end of the string.
if " " not in value.strip():
return sensitive_domain(value)
def sensitive_domain(domain):
"""
Checks if user has the admin role, the domain does not match sensitive domains and allowed domain patterns.
:param domain: domain name (str)
:return:
"""
if SensitiveDomainPermission().can():
# User has permission, no need to check anything
return
allowlist = current_app.config.get("LEMUR_ALLOWED_DOMAINS", [])
if allowlist and not any(re.match(pattern, domain) for pattern in allowlist):
raise ValidationError(
"Domain {0} does not match allowed domain patterns. "
"Contact an administrator to issue the certificate.".format(domain)
)
# Avoid circular import.
from lemur.domains import service as domain_service
if domain_service.is_domain_sensitive(domain):
raise ValidationError(
"Domain {0} has been marked as sensitive. "
"Contact an administrator to issue the certificate.".format(domain)
)
def encoding(oid_encoding):
"""
Determines if the specified oid type is valid.
:param oid_encoding:
:return:
"""
valid_types = ["b64asn1", "string", "ia5string"]
if oid_encoding.lower() not in [o_type.lower() for o_type in valid_types]:
raise ValidationError(
"Invalid Oid Encoding: {0} choose from {1}".format(
oid_encoding, ",".join(valid_types)
)
)
def sub_alt_type(alt_type):
"""
Determines if the specified subject alternate type is valid.
:param alt_type:
:return:
"""
valid_types = [
"DNSName",
"IPAddress",
"uniFormResourceIdentifier",
"directoryName",
"rfc822Name",
"registrationID",
"otherName",
"x400Address",
"EDIPartyName",
]
if alt_type.lower() not in [a_type.lower() for a_type in valid_types]:
raise ValidationError(
"Invalid SubAltName Type: {0} choose from {1}".format(
type, ",".join(valid_types)
)
)
def csr(data):
"""
Determines if the CSR is valid and allowed.
:param data:
:return:
"""
try:
request = x509.load_pem_x509_csr(data.encode("utf-8"), default_backend())
except Exception:
raise ValidationError("CSR presented is not valid.")
# Validate common name and SubjectAltNames
try:
for name in request.subject.get_attributes_for_oid(NameOID.COMMON_NAME):
common_name(name.value)
except ValueError as err:
current_app.logger.info("Error parsing Subject from CSR: %s", err)
raise ValidationError("Invalid Subject value in supplied CSR")
try:
alt_names = request.extensions.get_extension_for_class(
x509.SubjectAlternativeName
)
for name in alt_names.value.get_values_for_type(x509.DNSName):
sensitive_domain(name)
except x509.ExtensionNotFound:
pass
def dates(data):
if not data.get("validity_start") and data.get("validity_end"):
raise ValidationError("If validity start is specified so must validity end.")
if not data.get("validity_end") and data.get("validity_start"):
raise ValidationError("If validity end is specified so must validity start.")
if data.get("validity_start") and data.get("validity_end"):
if not current_app.config.get("LEMUR_ALLOW_WEEKEND_EXPIRATION", True):
if is_weekend(data.get("validity_end")):
raise ValidationError("Validity end must not land on a weekend.")
if not data["validity_start"] < data["validity_end"]:
raise ValidationError("Validity start must be before validity end.")
if data.get("authority"):
if (
data.get("validity_start").date()
< data["authority"].authority_certificate.not_before.date()
):
raise ValidationError(
"Validity start must not be before {0}".format(
data["authority"].authority_certificate.not_before
)
)
if (
data.get("validity_end").date()
> data["authority"].authority_certificate.not_after.date()
):
raise ValidationError(
"Validity end must not be after {0}".format(
data["authority"].authority_certificate.not_after
)
)
return data
def verify_private_key_match(key, cert, error_class=ValidationError):
"""
Checks that the supplied private key matches the certificate.
:param cert: Parsed certificate
:param key: Parsed private key
:param error_class: Exception class to raise on error
"""
if key.public_key().public_numbers() != cert.public_key().public_numbers():
raise error_class("Private key does not match certificate.")
def verify_cert_chain(certs, error_class=ValidationError):
"""
Verifies that the certificates in the chain are correct.
We don't bother with full cert validation but just check that certs in the chain are signed by the next, to avoid
basic human errors -- such as pasting the wrong certificate.
:param certs: List of parsed certificates, use parse_cert_chain()
:param error_class: Exception class to raise on error
"""
cert = certs[0]
for issuer in certs[1:]:
# Use the current cert's public key to verify the previous signature.
# "certificate validation is a complex problem that involves much more than just signature checks"
try:
check_cert_signature(cert, issuer.public_key())
except InvalidSignature:
# Avoid circular import.
from lemur.common import defaults
raise error_class(
"Incorrect chain certificate(s) provided: '%s' is not signed by '%s'"
% (
defaults.common_name(cert) or "Unknown",
defaults.common_name(issuer),
)
)
except UnsupportedAlgorithm as err:
current_app.logger.warning("Skipping chain validation: %s", err)
# Next loop will validate that *this issuer* cert is signed by the next chain cert.
cert = issuer
|
en
| 0.836972
|
If the common name could be a domain name, apply domain validation rules. # Common name could be a domain name, or a human-readable name of the subject (often used in CA names or client # certificates). As a simple heuristic, we assume that human-readable names always include a space. # However, to avoid confusion for humans, we also don't count spaces at the beginning or end of the string. Checks if user has the admin role, the domain does not match sensitive domains and allowed domain patterns. :param domain: domain name (str) :return: # User has permission, no need to check anything # Avoid circular import. Determines if the specified oid type is valid. :param oid_encoding: :return: Determines if the specified subject alternate type is valid. :param alt_type: :return: Determines if the CSR is valid and allowed. :param data: :return: # Validate common name and SubjectAltNames Checks that the supplied private key matches the certificate. :param cert: Parsed certificate :param key: Parsed private key :param error_class: Exception class to raise on error Verifies that the certificates in the chain are correct. We don't bother with full cert validation but just check that certs in the chain are signed by the next, to avoid basic human errors -- such as pasting the wrong certificate. :param certs: List of parsed certificates, use parse_cert_chain() :param error_class: Exception class to raise on error # Use the current cert's public key to verify the previous signature. # "certificate validation is a complex problem that involves much more than just signature checks" # Avoid circular import. # Next loop will validate that *this issuer* cert is signed by the next chain cert.
| 2.394251
| 2
|
methods/segmentation/utils.py
|
ciampluca/counting_perineuronal_nets
| 6
|
6628424
|
import pandas as pd
from skimage import measure
def segmentation_map_to_points(y_pred, thr=None):
""" Find connected components of a segmentation map and
returns a pandas DataFrame with the centroids' coordinates
and the score (computes as maximum value of the centroid in the map).
Args:
y_pred (ndarray): (H,W)-shaped array with values in [0, 1]
thr (float, optional): Optional threshold used to binarize the map;
if None, the map should be already binary. Defaults to None.
"""
y_pred_hard = y_pred if thr is None else y_pred >= thr
# find connected components and centroids
labeled_map, num_components = measure.label(y_pred_hard, return_num=True, connectivity=1)
localizations = measure.regionprops_table(labeled_map, properties=('centroid', 'bbox'))
localizations = pd.DataFrame(localizations).rename({
'centroid-0': 'Y',
'centroid-1': 'X',
'bbox-0': 'y0',
'bbox-1': 'x0',
'bbox-2': 'y1',
'bbox-3': 'x1',
}, axis=1)
bboxes = localizations[['y0', 'x0', 'y1', 'x1']].values
localizations['score'] = [y_pred[y0:y1,x0:x1].max() for y0, x0, y1, x1 in bboxes]
localizations = localizations.drop(columns=['y0', 'x0', 'y1', 'x1'])
return localizations
|
import pandas as pd
from skimage import measure
def segmentation_map_to_points(y_pred, thr=None):
""" Find connected components of a segmentation map and
returns a pandas DataFrame with the centroids' coordinates
and the score (computes as maximum value of the centroid in the map).
Args:
y_pred (ndarray): (H,W)-shaped array with values in [0, 1]
thr (float, optional): Optional threshold used to binarize the map;
if None, the map should be already binary. Defaults to None.
"""
y_pred_hard = y_pred if thr is None else y_pred >= thr
# find connected components and centroids
labeled_map, num_components = measure.label(y_pred_hard, return_num=True, connectivity=1)
localizations = measure.regionprops_table(labeled_map, properties=('centroid', 'bbox'))
localizations = pd.DataFrame(localizations).rename({
'centroid-0': 'Y',
'centroid-1': 'X',
'bbox-0': 'y0',
'bbox-1': 'x0',
'bbox-2': 'y1',
'bbox-3': 'x1',
}, axis=1)
bboxes = localizations[['y0', 'x0', 'y1', 'x1']].values
localizations['score'] = [y_pred[y0:y1,x0:x1].max() for y0, x0, y1, x1 in bboxes]
localizations = localizations.drop(columns=['y0', 'x0', 'y1', 'x1'])
return localizations
|
en
| 0.747188
|
Find connected components of a segmentation map and returns a pandas DataFrame with the centroids' coordinates and the score (computes as maximum value of the centroid in the map). Args: y_pred (ndarray): (H,W)-shaped array with values in [0, 1] thr (float, optional): Optional threshold used to binarize the map; if None, the map should be already binary. Defaults to None. # find connected components and centroids
| 2.916562
| 3
|
smiles2actions/name_filters/state_filter.py
|
rxn4chemistry/smiles2actions
| 8
|
6628425
|
<reponame>rxn4chemistry/smiles2actions
import re
from typing import List
from .filter import Filter
from ..regex_utils import RegexMatch, match_all, alternation, optional
from ..utils import dash_characters
_optional_of = optional(' of')
_descriptors = [
r'\b[Ss]olid\b' + _optional_of,
r'\b[Ll]iquid\b' + _optional_of,
r'\bgas\b',
r'\(s\)',
r'\(g\)',
r'\b[Mm]etal' + optional('lic'),
]
class StateFilter(Filter):
"""
Looks for substrings related to the state (solid, liquid, gaseous).
"""
def __init__(self):
regex_string = alternation(_descriptors)
self.regex = re.compile(regex_string)
def find_matches(self, chemical_name: str) -> List[RegexMatch]:
matches = match_all(self.regex, chemical_name)
return [m for m in matches if self._is_valid(m, chemical_name)]
def _is_valid(self, match: RegexMatch, chemical_name: str) -> bool:
"""
The regex matching in 'find_matches' is a bit too generous.
This function checks whether the match should be kept.
"""
# if "(s)" is followed by a dash, it probably refers to the chirality -> ignore it
if match.text == '(s)':
next_char_index = match.span.stop
try:
if chemical_name[next_char_index] in dash_characters:
return False
except IndexError:
pass
return True
|
import re
from typing import List
from .filter import Filter
from ..regex_utils import RegexMatch, match_all, alternation, optional
from ..utils import dash_characters
_optional_of = optional(' of')
_descriptors = [
r'\b[Ss]olid\b' + _optional_of,
r'\b[Ll]iquid\b' + _optional_of,
r'\bgas\b',
r'\(s\)',
r'\(g\)',
r'\b[Mm]etal' + optional('lic'),
]
class StateFilter(Filter):
"""
Looks for substrings related to the state (solid, liquid, gaseous).
"""
def __init__(self):
regex_string = alternation(_descriptors)
self.regex = re.compile(regex_string)
def find_matches(self, chemical_name: str) -> List[RegexMatch]:
matches = match_all(self.regex, chemical_name)
return [m for m in matches if self._is_valid(m, chemical_name)]
def _is_valid(self, match: RegexMatch, chemical_name: str) -> bool:
"""
The regex matching in 'find_matches' is a bit too generous.
This function checks whether the match should be kept.
"""
# if "(s)" is followed by a dash, it probably refers to the chirality -> ignore it
if match.text == '(s)':
next_char_index = match.span.stop
try:
if chemical_name[next_char_index] in dash_characters:
return False
except IndexError:
pass
return True
|
en
| 0.853543
|
Looks for substrings related to the state (solid, liquid, gaseous). The regex matching in 'find_matches' is a bit too generous. This function checks whether the match should be kept. # if "(s)" is followed by a dash, it probably refers to the chirality -> ignore it
| 2.703151
| 3
|
disk2.py
|
berrnd/linuxfabrik-lib
| 0
|
6628426
|
<gh_stars>0
#! /usr/bin/env python2
# -*- coding: utf-8; py-indent-offset: 4 -*-
#
# Author: Linuxfabrik GmbH, Zurich, Switzerland
# Contact: info (at) linuxfabrik (dot) ch
# https://www.linuxfabrik.ch/
# License: The Unlicense, see LICENSE file.
# https://github.com/Linuxfabrik/monitoring-plugins/blob/main/CONTRIBUTING.rst
"""Offers file and disk related functions, like getting a list of
partitions, grepping a file, etc.
"""
__author__ = 'Linuxfabrik GmbH, Zurich/Switzerland'
__version__ = '2022021601'
import csv
import os
import re
import sys
import tempfile
def get_cwd():
"""Gets the current working directory.
"""
return os.getcwd()
def get_tmpdir():
""" Return the name of the directory used for temporary files, always
without trailing '/'.
Searches a standard list of directories to find one which the calling user
can create files in. The list is:
* The directory named by the TMPDIR environment variable.
* The directory named by the TEMP environment variable.
* The directory named by the TMP environment variable.
* A platform-specific location:
- On Windows, the directories C:\\TEMP, C:\\TMP, \\TEMP, and \\TMP,
in that order.
- On all other platforms, the directories /tmp, /var/tmp, and /usr/tmp,
in that order.
* As a last resort, the current working directory.
"""
try:
return tempfile.gettempdir()
except:
return '/tmp'
def grep_file(filename, pattern):
"""Like `grep` searches for `pattern` in `filename`. Returns the
match, otherwise `False`.
>>> success, nc_version=lib.disk2.grep_file('version.php', r'\\$OC_version=array\\((.*)\\)')
Parameters
----------
filename : str
The file.
pattern : str
A Python regular expression.
Returns
-------
tuple
tuple[0]: bool: if successful (no I/O or file handling errors) or not
tuple[1]: str: the string matched by `pattern` (if any)
"""
try:
with open(filename, 'r') as file:
data = file.read()
except IOError as e:
return (False, u'I/O error "{}" while opening or reading {}'.format(e.strerror, filename))
except:
return (False, u'Unknown error opening or reading {}'.format(filename))
else:
match = re.search(pattern, data).group(1)
return (True, match)
def read_csv(filename, delimiter=',', quotechar='"', newline='', as_dict=False, skip_empty_rows=False):
"""Reads a CSV file, and returns a list or a dict.
"""
try:
with open(filename) as csvfile:
if not as_dict:
reader = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar)
else:
reader = csv.DictReader(csvfile, delimiter=delimiter, quotechar=quotechar)
data = []
for row in reader:
# check if the list contains empty strings only
if skip_empty_rows and all('' == s or s.isspace() for s in row):
continue
data.append(row)
except csv.Error as e:
return (False, u'CSV error in file {}, line {}: {}'.format(filename, reader.line_num, e))
except IOError as e:
return (False, u'I/O error "{}" while opening or reading {}'.format(e.strerror, filename))
except:
return (False, u'Unknown error opening or reading {}'.format(filename))
return (True, data)
def read_file(filename):
"""Reads a file.
"""
try:
with open(filename, 'r') as f:
data = f.read()
except IOError as e:
return (False, u'I/O error "{}" while opening or reading {}'.format(e.strerror, filename))
except:
return (False, u'Unknown error opening or reading {}'.format(filename))
return (True, data)
def rm_file(filename):
"""Deletes/Removes a file.
>>> rm_file('test.txt')
(True, None)
"""
try:
os.remove(filename)
except OSError as e:
return (False, u'OS error "{}" while deleting {}'.format(e.strerror, filename))
except:
return (False, u'Unknown error deleting {}'.format(filename))
return (True, None)
def walk_directory(path, exclude_pattern=r'', include_pattern=r'', relative=True):
"""Walks recursively through a directory and creates a list of files.
If an exclude_pattern (regex) is specified, files matching this pattern
are ignored. If an include_pattern (regex) is specified, only files matching
this pattern are put on the list (in this particular order).
>>> lib.disk2.walk_directory('/tmp')
['cpu-usage.db', 'segv_output.MCiVt9']
>>> lib.disk2.walk_directory('/tmp', exclude_pattern='.*Temp-.*', relative=False)
['/tmp/cpu-usage.db', '/tmp/segv_output.MCiVt9']
"""
if exclude_pattern:
exclude_pattern = re.compile(exclude_pattern, re.IGNORECASE)
if include_pattern:
include_pattern = re.compile(include_pattern, re.IGNORECASE)
if not path.endswith('/'):
path += '/'
result = []
for current, dirs, files in os.walk(path):
for file in files:
file = os.path.join(current, file)
if exclude_pattern and exclude_pattern.match(file) is not None:
continue
if include_pattern and include_pattern.match(file) is None:
continue
if relative:
result.append(file.replace(path, ''))
else:
result.append(file)
return result
def write_file(filename, content, append=False):
"""Writes a string to a file.
>>> write_file('test.txt', 'First line\nSecond line')
(True, None)
"""
try:
with open(filename, 'w' if not append else 'a') as f:
f.write(content)
f.close()
except IOError as e:
return (False, u'I/O error "{}" while writing {}'.format(e.strerror, filename))
except:
return (False, u'Unknown error writing {}, or content is not a string'.format(filename))
return (True, None)
|
#! /usr/bin/env python2
# -*- coding: utf-8; py-indent-offset: 4 -*-
#
# Author: Linuxfabrik GmbH, Zurich, Switzerland
# Contact: info (at) linuxfabrik (dot) ch
# https://www.linuxfabrik.ch/
# License: The Unlicense, see LICENSE file.
# https://github.com/Linuxfabrik/monitoring-plugins/blob/main/CONTRIBUTING.rst
"""Offers file and disk related functions, like getting a list of
partitions, grepping a file, etc.
"""
__author__ = 'Linuxfabrik GmbH, Zurich/Switzerland'
__version__ = '2022021601'
import csv
import os
import re
import sys
import tempfile
def get_cwd():
"""Gets the current working directory.
"""
return os.getcwd()
def get_tmpdir():
""" Return the name of the directory used for temporary files, always
without trailing '/'.
Searches a standard list of directories to find one which the calling user
can create files in. The list is:
* The directory named by the TMPDIR environment variable.
* The directory named by the TEMP environment variable.
* The directory named by the TMP environment variable.
* A platform-specific location:
- On Windows, the directories C:\\TEMP, C:\\TMP, \\TEMP, and \\TMP,
in that order.
- On all other platforms, the directories /tmp, /var/tmp, and /usr/tmp,
in that order.
* As a last resort, the current working directory.
"""
try:
return tempfile.gettempdir()
except:
return '/tmp'
def grep_file(filename, pattern):
"""Like `grep` searches for `pattern` in `filename`. Returns the
match, otherwise `False`.
>>> success, nc_version=lib.disk2.grep_file('version.php', r'\\$OC_version=array\\((.*)\\)')
Parameters
----------
filename : str
The file.
pattern : str
A Python regular expression.
Returns
-------
tuple
tuple[0]: bool: if successful (no I/O or file handling errors) or not
tuple[1]: str: the string matched by `pattern` (if any)
"""
try:
with open(filename, 'r') as file:
data = file.read()
except IOError as e:
return (False, u'I/O error "{}" while opening or reading {}'.format(e.strerror, filename))
except:
return (False, u'Unknown error opening or reading {}'.format(filename))
else:
match = re.search(pattern, data).group(1)
return (True, match)
def read_csv(filename, delimiter=',', quotechar='"', newline='', as_dict=False, skip_empty_rows=False):
"""Reads a CSV file, and returns a list or a dict.
"""
try:
with open(filename) as csvfile:
if not as_dict:
reader = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar)
else:
reader = csv.DictReader(csvfile, delimiter=delimiter, quotechar=quotechar)
data = []
for row in reader:
# check if the list contains empty strings only
if skip_empty_rows and all('' == s or s.isspace() for s in row):
continue
data.append(row)
except csv.Error as e:
return (False, u'CSV error in file {}, line {}: {}'.format(filename, reader.line_num, e))
except IOError as e:
return (False, u'I/O error "{}" while opening or reading {}'.format(e.strerror, filename))
except:
return (False, u'Unknown error opening or reading {}'.format(filename))
return (True, data)
def read_file(filename):
"""Reads a file.
"""
try:
with open(filename, 'r') as f:
data = f.read()
except IOError as e:
return (False, u'I/O error "{}" while opening or reading {}'.format(e.strerror, filename))
except:
return (False, u'Unknown error opening or reading {}'.format(filename))
return (True, data)
def rm_file(filename):
"""Deletes/Removes a file.
>>> rm_file('test.txt')
(True, None)
"""
try:
os.remove(filename)
except OSError as e:
return (False, u'OS error "{}" while deleting {}'.format(e.strerror, filename))
except:
return (False, u'Unknown error deleting {}'.format(filename))
return (True, None)
def walk_directory(path, exclude_pattern=r'', include_pattern=r'', relative=True):
"""Walks recursively through a directory and creates a list of files.
If an exclude_pattern (regex) is specified, files matching this pattern
are ignored. If an include_pattern (regex) is specified, only files matching
this pattern are put on the list (in this particular order).
>>> lib.disk2.walk_directory('/tmp')
['cpu-usage.db', 'segv_output.MCiVt9']
>>> lib.disk2.walk_directory('/tmp', exclude_pattern='.*Temp-.*', relative=False)
['/tmp/cpu-usage.db', '/tmp/segv_output.MCiVt9']
"""
if exclude_pattern:
exclude_pattern = re.compile(exclude_pattern, re.IGNORECASE)
if include_pattern:
include_pattern = re.compile(include_pattern, re.IGNORECASE)
if not path.endswith('/'):
path += '/'
result = []
for current, dirs, files in os.walk(path):
for file in files:
file = os.path.join(current, file)
if exclude_pattern and exclude_pattern.match(file) is not None:
continue
if include_pattern and include_pattern.match(file) is None:
continue
if relative:
result.append(file.replace(path, ''))
else:
result.append(file)
return result
def write_file(filename, content, append=False):
"""Writes a string to a file.
>>> write_file('test.txt', 'First line\nSecond line')
(True, None)
"""
try:
with open(filename, 'w' if not append else 'a') as f:
f.write(content)
f.close()
except IOError as e:
return (False, u'I/O error "{}" while writing {}'.format(e.strerror, filename))
except:
return (False, u'Unknown error writing {}, or content is not a string'.format(filename))
return (True, None)
|
en
| 0.683255
|
#! /usr/bin/env python2 # -*- coding: utf-8; py-indent-offset: 4 -*- # # Author: Linuxfabrik GmbH, Zurich, Switzerland # Contact: info (at) linuxfabrik (dot) ch # https://www.linuxfabrik.ch/ # License: The Unlicense, see LICENSE file. # https://github.com/Linuxfabrik/monitoring-plugins/blob/main/CONTRIBUTING.rst Offers file and disk related functions, like getting a list of partitions, grepping a file, etc. Gets the current working directory. Return the name of the directory used for temporary files, always without trailing '/'. Searches a standard list of directories to find one which the calling user can create files in. The list is: * The directory named by the TMPDIR environment variable. * The directory named by the TEMP environment variable. * The directory named by the TMP environment variable. * A platform-specific location: - On Windows, the directories C:\\TEMP, C:\\TMP, \\TEMP, and \\TMP, in that order. - On all other platforms, the directories /tmp, /var/tmp, and /usr/tmp, in that order. * As a last resort, the current working directory. Like `grep` searches for `pattern` in `filename`. Returns the match, otherwise `False`. >>> success, nc_version=lib.disk2.grep_file('version.php', r'\\$OC_version=array\\((.*)\\)') Parameters ---------- filename : str The file. pattern : str A Python regular expression. Returns ------- tuple tuple[0]: bool: if successful (no I/O or file handling errors) or not tuple[1]: str: the string matched by `pattern` (if any) Reads a CSV file, and returns a list or a dict. # check if the list contains empty strings only Reads a file. Deletes/Removes a file. >>> rm_file('test.txt') (True, None) Walks recursively through a directory and creates a list of files. If an exclude_pattern (regex) is specified, files matching this pattern are ignored. If an include_pattern (regex) is specified, only files matching this pattern are put on the list (in this particular order). >>> lib.disk2.walk_directory('/tmp') ['cpu-usage.db', 'segv_output.MCiVt9'] >>> lib.disk2.walk_directory('/tmp', exclude_pattern='.*Temp-.*', relative=False) ['/tmp/cpu-usage.db', '/tmp/segv_output.MCiVt9'] Writes a string to a file. >>> write_file('test.txt', 'First line\nSecond line') (True, None)
| 2.52869
| 3
|
lib/testcode2/validation.py
|
giovannipizzi/testcode
| 17
|
6628427
|
'''
testcode2.validation
--------------------
Classes and functions for comparing data.
:copyright: (c) 2012 <NAME>.
:license: modified BSD; see LICENSE for more details.
'''
import re
import sys
import warnings
import testcode2.ansi as ansi
import testcode2.compatibility as compat
import testcode2.exceptions as exceptions
class Status:
'''Enum-esque object for storing whether an object passed a comparison.
bools: iterable of boolean objects. If all booleans are True (False) then the
status is set to pass (fail) and if only some booleans are True, the
status is set to warning (partial pass).
status: existing status to use. bools is ignored if status is supplied.
name: name of status (unknown, skipped, passed, partial, failed) to use.
Setting name overrides bools and status.
'''
def __init__(self, bools=None, status=None, name=None):
(self._unknown, self._skipped) = (-2, -1)
(self._passed, self._partial, self._failed) = (0, 1, 2)
if name is not None:
setattr(self, 'status', getattr(self, '_'+name))
elif status is not None:
self.status = status
elif bools:
if compat.compat_all(bools):
self.status = self._passed
elif compat.compat_any(bools):
self.status = self._partial
else:
self.status = self._failed
else:
self.status = self._unknown
def unknown(self):
'''Return true if stored status is unknown.'''
return self.status == self._unknown
def skipped(self):
'''Return true if stored status is skipped.'''
return self.status == self._skipped
def passed(self):
'''Return true if stored status is passed.'''
return self.status == self._passed
def warning(self):
'''Return true if stored status is a partial pass.'''
return self.status == self._partial
def failed(self):
'''Return true if stored status is failed.'''
return self.status == self._failed
def print_status(self, msg=None, verbose=1, vspace=True):
'''Print status.
msg: optional message to print out after status.
verbose: 0: suppress all output except for . (for pass), U (for unknown),
W (for warning/partial pass) and F (for fail) without a newline.
1: print 'Passed', 'Unknown', 'WARNING' or '**FAILED**'.
2: as for 1 plus print msg (if supplied).
3: as for 2 plus print a blank line.
vspace: print out extra new line afterwards if verbose > 1.
'''
if verbose > 0:
if self.status == self._unknown:
print('Unknown.')
elif self.status == self._passed:
print('Passed.')
elif self.status == self._skipped:
print('%s.' % ansi.ansi_format('SKIPPED', 'blue'))
elif self.status == self._partial:
print('%s.' % ansi.ansi_format('WARNING', 'blue'))
else:
print('%s.' % ansi.ansi_format('**FAILED**', 'red', 'normal', 'bold'))
if msg and verbose > 1:
print(msg)
if vspace and verbose > 1:
print('')
else:
if self.status == self._unknown:
sys.stdout.write('U')
elif self.status == self._skipped:
sys.stdout.write('S')
elif self.status == self._passed:
sys.stdout.write('.')
elif self.status == self._partial:
sys.stdout.write('W')
else:
sys.stdout.write('F')
sys.stdout.flush()
def __add__(self, other):
'''Add two status objects.
Return the maximum level (ie most "failed") status.'''
return Status(status=max(self.status, other.status))
class Tolerance:
'''Store absolute and relative tolerances
Given are regarded as equal if they are within these tolerances.
name: name of tolerance object.
absolute: threshold for absolute difference between two numbers.
relative: threshold for relative difference between two numbers.
strict: if true, then require numbers to be within both thresholds.
'''
def __init__(self, name='', absolute=None, relative=None, strict=True):
self.name = name
self.absolute = absolute
self.relative = relative
if not self.absolute and not self.relative:
err = 'Neither absolute nor relative tolerance given.'
raise exceptions.TestCodeError(err)
self.strict = strict
def __repr__(self):
return (self.absolute, self.relative, self.strict).__repr__()
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__)
def validate(self, test_val, benchmark_val, key=''):
'''Compare test and benchmark values to within the tolerances.'''
status = Status([True])
msg = ['values are within tolerance.']
compare = '(Test: %s. Benchmark: %s.)' % (test_val, benchmark_val)
try:
# Check float is not NaN (which we can't compare).
if compat.isnan(test_val) or compat.isnan(benchmark_val):
status = Status([False])
msg = ['cannot compare NaNs.']
else:
# Check if values are within tolerances.
(status_absolute, msg_absolute) = \
self.validate_absolute(benchmark_val, test_val)
(status_relative, msg_relative) = \
self.validate_relative(benchmark_val, test_val)
if self.absolute and self.relative and not self.strict:
# Require only one of thresholds to be met.
status = Status([status_relative.passed(),
status_absolute.passed()])
else:
# Only have one or other of thresholds (require active one
# to be met) or have both and strict mode is on (require
# both to be met).
status = status_relative + status_absolute
err_stat = ''
if status.warning():
err_stat = 'Warning: '
elif status.failed():
err_stat = 'ERROR: '
msg = []
if self.absolute and msg_absolute:
msg.append('%s%s %s' % (err_stat, msg_absolute, compare))
if self.relative and msg_relative:
msg.append('%s%s %s' % (err_stat, msg_relative, compare))
except TypeError:
if test_val != benchmark_val:
# require test and benchmark values to be equal (within python's
# definition of equality).
status = Status([False])
msg = ['values are different. ' + compare]
if key and msg:
msg.insert(0, key)
msg = '\n '.join(msg)
else:
msg = '\n'.join(msg)
return (status, msg)
def validate_absolute(self, benchmark_val, test_val):
'''Compare test and benchmark values to the absolute tolerance.'''
if self.absolute:
diff = test_val - benchmark_val
err = abs(diff)
passed = err < self.absolute
msg = ''
if not passed:
msg = ('absolute error %.2e greater than %.2e.' %
(err, self.absolute))
else:
passed = True
msg = 'No absolute tolerance set. Passing without checking.'
return (Status([passed]), msg)
def validate_relative(self, benchmark_val, test_val):
'''Compare test and benchmark values to the relative tolerance.'''
if self.relative:
diff = test_val - benchmark_val
if benchmark_val == 0 and diff == 0:
err = 0
elif benchmark_val == 0:
err = float("Inf")
else:
err = abs(diff/benchmark_val)
passed = err < self.relative
msg = ''
if not passed:
msg = ('relative error %.2e greater than %.2e.' %
(err, self.relative))
else:
passed = True
msg = 'No relative tolerance set. Passing without checking.'
return (Status([passed]), msg)
def compare_data(benchmark, test, default_tolerance, tolerances,
ignore_fields=None):
'''Compare two data dictionaries.'''
ignored_params = compat.compat_set(ignore_fields or tuple())
bench_params = compat.compat_set(benchmark) - ignored_params
test_params = compat.compat_set(test) - ignored_params
# Check both the key names and the number of keys in case there are
# different numbers of duplicate keys.
comparable = (bench_params == test_params)
key_counts = dict((key,0) for key in bench_params | test_params)
for (key, val) in benchmark.items():
if key not in ignored_params:
key_counts[key] += len(val)
for (key, val) in test.items():
if key not in ignored_params:
key_counts[key] -= len(val)
comparable = comparable and compat.compat_all(kc == 0 for kc in key_counts.values())
status = Status()
msg = []
if not comparable:
status = Status([False])
bench_only = bench_params - test_params
test_only = test_params - bench_params
msg.append('Different sets of data extracted from benchmark and test.')
if bench_only:
msg.append(" Data only in benchmark: %s." % ", ".join(bench_only))
if test_only:
msg.append(" Data only in test: %s." % ", ".join(test_only))
bench_more = [key for key in key_counts
if key_counts[key] > 0 and key not in bench_only]
test_more = [key for key in key_counts
if key_counts[key] < 0 and key not in test_only]
if bench_more:
msg.append(" More data in benchmark than in test: %s." %
", ".join(bench_more))
if test_more:
msg.append(" More data in test than in benchmark: %s." %
", ".join(test_more))
for param in (bench_params & test_params):
param_tol = tolerances.get(param, default_tolerance)
if param_tol == default_tolerance:
# See if there's a regex that matches.
tol_matches = [tol for tol in tolerances.values()
if tol.name and re.match(tol.name, param)]
if tol_matches:
param_tol = tol_matches[0]
if len(tol_matches) > 1:
warnings.warn('Multiple tolerance regexes match. '
'Using %s.' % (param_tol.name))
for bench_value, test_value in zip(benchmark[param], test[param]):
key_status, err = param_tol.validate(test_value, bench_value, param)
status += key_status
if not key_status.passed() and err:
msg.append(err)
return (comparable, status, "\n".join(msg))
|
'''
testcode2.validation
--------------------
Classes and functions for comparing data.
:copyright: (c) 2012 <NAME>.
:license: modified BSD; see LICENSE for more details.
'''
import re
import sys
import warnings
import testcode2.ansi as ansi
import testcode2.compatibility as compat
import testcode2.exceptions as exceptions
class Status:
'''Enum-esque object for storing whether an object passed a comparison.
bools: iterable of boolean objects. If all booleans are True (False) then the
status is set to pass (fail) and if only some booleans are True, the
status is set to warning (partial pass).
status: existing status to use. bools is ignored if status is supplied.
name: name of status (unknown, skipped, passed, partial, failed) to use.
Setting name overrides bools and status.
'''
def __init__(self, bools=None, status=None, name=None):
(self._unknown, self._skipped) = (-2, -1)
(self._passed, self._partial, self._failed) = (0, 1, 2)
if name is not None:
setattr(self, 'status', getattr(self, '_'+name))
elif status is not None:
self.status = status
elif bools:
if compat.compat_all(bools):
self.status = self._passed
elif compat.compat_any(bools):
self.status = self._partial
else:
self.status = self._failed
else:
self.status = self._unknown
def unknown(self):
'''Return true if stored status is unknown.'''
return self.status == self._unknown
def skipped(self):
'''Return true if stored status is skipped.'''
return self.status == self._skipped
def passed(self):
'''Return true if stored status is passed.'''
return self.status == self._passed
def warning(self):
'''Return true if stored status is a partial pass.'''
return self.status == self._partial
def failed(self):
'''Return true if stored status is failed.'''
return self.status == self._failed
def print_status(self, msg=None, verbose=1, vspace=True):
'''Print status.
msg: optional message to print out after status.
verbose: 0: suppress all output except for . (for pass), U (for unknown),
W (for warning/partial pass) and F (for fail) without a newline.
1: print 'Passed', 'Unknown', 'WARNING' or '**FAILED**'.
2: as for 1 plus print msg (if supplied).
3: as for 2 plus print a blank line.
vspace: print out extra new line afterwards if verbose > 1.
'''
if verbose > 0:
if self.status == self._unknown:
print('Unknown.')
elif self.status == self._passed:
print('Passed.')
elif self.status == self._skipped:
print('%s.' % ansi.ansi_format('SKIPPED', 'blue'))
elif self.status == self._partial:
print('%s.' % ansi.ansi_format('WARNING', 'blue'))
else:
print('%s.' % ansi.ansi_format('**FAILED**', 'red', 'normal', 'bold'))
if msg and verbose > 1:
print(msg)
if vspace and verbose > 1:
print('')
else:
if self.status == self._unknown:
sys.stdout.write('U')
elif self.status == self._skipped:
sys.stdout.write('S')
elif self.status == self._passed:
sys.stdout.write('.')
elif self.status == self._partial:
sys.stdout.write('W')
else:
sys.stdout.write('F')
sys.stdout.flush()
def __add__(self, other):
'''Add two status objects.
Return the maximum level (ie most "failed") status.'''
return Status(status=max(self.status, other.status))
class Tolerance:
'''Store absolute and relative tolerances
Given are regarded as equal if they are within these tolerances.
name: name of tolerance object.
absolute: threshold for absolute difference between two numbers.
relative: threshold for relative difference between two numbers.
strict: if true, then require numbers to be within both thresholds.
'''
def __init__(self, name='', absolute=None, relative=None, strict=True):
self.name = name
self.absolute = absolute
self.relative = relative
if not self.absolute and not self.relative:
err = 'Neither absolute nor relative tolerance given.'
raise exceptions.TestCodeError(err)
self.strict = strict
def __repr__(self):
return (self.absolute, self.relative, self.strict).__repr__()
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__)
def validate(self, test_val, benchmark_val, key=''):
'''Compare test and benchmark values to within the tolerances.'''
status = Status([True])
msg = ['values are within tolerance.']
compare = '(Test: %s. Benchmark: %s.)' % (test_val, benchmark_val)
try:
# Check float is not NaN (which we can't compare).
if compat.isnan(test_val) or compat.isnan(benchmark_val):
status = Status([False])
msg = ['cannot compare NaNs.']
else:
# Check if values are within tolerances.
(status_absolute, msg_absolute) = \
self.validate_absolute(benchmark_val, test_val)
(status_relative, msg_relative) = \
self.validate_relative(benchmark_val, test_val)
if self.absolute and self.relative and not self.strict:
# Require only one of thresholds to be met.
status = Status([status_relative.passed(),
status_absolute.passed()])
else:
# Only have one or other of thresholds (require active one
# to be met) or have both and strict mode is on (require
# both to be met).
status = status_relative + status_absolute
err_stat = ''
if status.warning():
err_stat = 'Warning: '
elif status.failed():
err_stat = 'ERROR: '
msg = []
if self.absolute and msg_absolute:
msg.append('%s%s %s' % (err_stat, msg_absolute, compare))
if self.relative and msg_relative:
msg.append('%s%s %s' % (err_stat, msg_relative, compare))
except TypeError:
if test_val != benchmark_val:
# require test and benchmark values to be equal (within python's
# definition of equality).
status = Status([False])
msg = ['values are different. ' + compare]
if key and msg:
msg.insert(0, key)
msg = '\n '.join(msg)
else:
msg = '\n'.join(msg)
return (status, msg)
def validate_absolute(self, benchmark_val, test_val):
'''Compare test and benchmark values to the absolute tolerance.'''
if self.absolute:
diff = test_val - benchmark_val
err = abs(diff)
passed = err < self.absolute
msg = ''
if not passed:
msg = ('absolute error %.2e greater than %.2e.' %
(err, self.absolute))
else:
passed = True
msg = 'No absolute tolerance set. Passing without checking.'
return (Status([passed]), msg)
def validate_relative(self, benchmark_val, test_val):
'''Compare test and benchmark values to the relative tolerance.'''
if self.relative:
diff = test_val - benchmark_val
if benchmark_val == 0 and diff == 0:
err = 0
elif benchmark_val == 0:
err = float("Inf")
else:
err = abs(diff/benchmark_val)
passed = err < self.relative
msg = ''
if not passed:
msg = ('relative error %.2e greater than %.2e.' %
(err, self.relative))
else:
passed = True
msg = 'No relative tolerance set. Passing without checking.'
return (Status([passed]), msg)
def compare_data(benchmark, test, default_tolerance, tolerances,
ignore_fields=None):
'''Compare two data dictionaries.'''
ignored_params = compat.compat_set(ignore_fields or tuple())
bench_params = compat.compat_set(benchmark) - ignored_params
test_params = compat.compat_set(test) - ignored_params
# Check both the key names and the number of keys in case there are
# different numbers of duplicate keys.
comparable = (bench_params == test_params)
key_counts = dict((key,0) for key in bench_params | test_params)
for (key, val) in benchmark.items():
if key not in ignored_params:
key_counts[key] += len(val)
for (key, val) in test.items():
if key not in ignored_params:
key_counts[key] -= len(val)
comparable = comparable and compat.compat_all(kc == 0 for kc in key_counts.values())
status = Status()
msg = []
if not comparable:
status = Status([False])
bench_only = bench_params - test_params
test_only = test_params - bench_params
msg.append('Different sets of data extracted from benchmark and test.')
if bench_only:
msg.append(" Data only in benchmark: %s." % ", ".join(bench_only))
if test_only:
msg.append(" Data only in test: %s." % ", ".join(test_only))
bench_more = [key for key in key_counts
if key_counts[key] > 0 and key not in bench_only]
test_more = [key for key in key_counts
if key_counts[key] < 0 and key not in test_only]
if bench_more:
msg.append(" More data in benchmark than in test: %s." %
", ".join(bench_more))
if test_more:
msg.append(" More data in test than in benchmark: %s." %
", ".join(test_more))
for param in (bench_params & test_params):
param_tol = tolerances.get(param, default_tolerance)
if param_tol == default_tolerance:
# See if there's a regex that matches.
tol_matches = [tol for tol in tolerances.values()
if tol.name and re.match(tol.name, param)]
if tol_matches:
param_tol = tol_matches[0]
if len(tol_matches) > 1:
warnings.warn('Multiple tolerance regexes match. '
'Using %s.' % (param_tol.name))
for bench_value, test_value in zip(benchmark[param], test[param]):
key_status, err = param_tol.validate(test_value, bench_value, param)
status += key_status
if not key_status.passed() and err:
msg.append(err)
return (comparable, status, "\n".join(msg))
|
en
| 0.819025
|
testcode2.validation -------------------- Classes and functions for comparing data. :copyright: (c) 2012 <NAME>. :license: modified BSD; see LICENSE for more details. Enum-esque object for storing whether an object passed a comparison. bools: iterable of boolean objects. If all booleans are True (False) then the status is set to pass (fail) and if only some booleans are True, the status is set to warning (partial pass). status: existing status to use. bools is ignored if status is supplied. name: name of status (unknown, skipped, passed, partial, failed) to use. Setting name overrides bools and status. Return true if stored status is unknown. Return true if stored status is skipped. Return true if stored status is passed. Return true if stored status is a partial pass. Return true if stored status is failed. Print status. msg: optional message to print out after status. verbose: 0: suppress all output except for . (for pass), U (for unknown), W (for warning/partial pass) and F (for fail) without a newline. 1: print 'Passed', 'Unknown', 'WARNING' or '**FAILED**'. 2: as for 1 plus print msg (if supplied). 3: as for 2 plus print a blank line. vspace: print out extra new line afterwards if verbose > 1. Add two status objects. Return the maximum level (ie most "failed") status. Store absolute and relative tolerances Given are regarded as equal if they are within these tolerances. name: name of tolerance object. absolute: threshold for absolute difference between two numbers. relative: threshold for relative difference between two numbers. strict: if true, then require numbers to be within both thresholds. Compare test and benchmark values to within the tolerances. # Check float is not NaN (which we can't compare). # Check if values are within tolerances. # Require only one of thresholds to be met. # Only have one or other of thresholds (require active one # to be met) or have both and strict mode is on (require # both to be met). # require test and benchmark values to be equal (within python's # definition of equality). Compare test and benchmark values to the absolute tolerance. Compare test and benchmark values to the relative tolerance. Compare two data dictionaries. # Check both the key names and the number of keys in case there are # different numbers of duplicate keys. # See if there's a regex that matches.
| 3.247451
| 3
|
sudoku_solver/board.py
|
Blondberg/py-sudoku-solver-mk2
| 0
|
6628428
|
<filename>sudoku_solver/board.py
from pygame.constants import K_LEFT, K_RIGHT
from input_box import InputBox
import pygame
class Board:
def __init__(self) -> None:
self.ROW_COUNT = 9
self.COL_COUNT = 9
self.BOX_WIDTH = 50
self.BOX_HEIGHT = 50
self.active_row = 0
self.active_col = 0
self.numeric_board = [[0 for i in range(self.COL_COUNT)] for j in range(self.ROW_COUNT)]
self.board = [[InputBox((self.BOX_WIDTH + 3)*i,
(self.BOX_HEIGHT + 3)*j,
self.BOX_WIDTH,
self.BOX_HEIGHT)
for i in range(self.COL_COUNT)] for j in range(self.ROW_COUNT)]
def board_to_numeric_board(self):
print("Converting board to numeric board")
for row in range(self.ROW_COUNT):
for col in range(self.COL_COUNT):
self.numeric_board[row][col] = int(self.board[row][col].get_text() if str(self.board[row][col].get_text()).isnumeric() else 0)
def numeric_board_to_board(self):
print("Converting numeric board to board")
for row in range(self.ROW_COUNT):
for col in range(self.COL_COUNT):
self.board[row][col].set_text(str(self.numeric_board[row][col]))
def print_board(self):
print("####")
for row in self.board:
print(row)
print("####")
def draw(self, screen):
for row in self.board:
for input_box in row:
input_box.draw(screen)
for i in range(3):
for j in range(3):
pygame.draw.rect(screen, pygame.Color(0,0,0),pygame.Rect(i*159, j*159, 158, 158), 3)
def handle_event(self, event):
for row in range(self.ROW_COUNT):
for col in range(self.COL_COUNT): # Could do foreach, but need the number of row and col
box = self.board[row][col]
box.handle_event(event)
if event.type == pygame.MOUSEBUTTONDOWN:
if box.active:
self.active_row, self.active_col = row, col
if event.type == pygame.KEYDOWN:
if event.key == K_LEFT:
if self.active_col > 0:
self.board[self.active_row][self.active_col].set_active(False)
self.active_col -= 1
self.board[self.active_row][self.active_col].set_active(True)
if event.key == K_RIGHT:
if self.active_col < 8:
self.board[self.active_row][self.active_col].set_active(False)
self.active_col += 1
self.board[self.active_row][self.active_col].set_active(True)
if event.key == pygame.K_UP:
if self.active_row > 0:
self.board[self.active_row][self.active_col].set_active(False)
self.active_row -= 1
self.board[self.active_row][self.active_col].set_active(True)
if event.key == pygame.K_DOWN:
if self.active_row < 8:
self.board[self.active_row][self.active_col].set_active(False)
self.active_row += 1
self.board[self.active_row][self.active_col].set_active(True)
def get_board(self):
return self.board
def get_numeric_board(self):
return self.numeric_board
|
<filename>sudoku_solver/board.py
from pygame.constants import K_LEFT, K_RIGHT
from input_box import InputBox
import pygame
class Board:
def __init__(self) -> None:
self.ROW_COUNT = 9
self.COL_COUNT = 9
self.BOX_WIDTH = 50
self.BOX_HEIGHT = 50
self.active_row = 0
self.active_col = 0
self.numeric_board = [[0 for i in range(self.COL_COUNT)] for j in range(self.ROW_COUNT)]
self.board = [[InputBox((self.BOX_WIDTH + 3)*i,
(self.BOX_HEIGHT + 3)*j,
self.BOX_WIDTH,
self.BOX_HEIGHT)
for i in range(self.COL_COUNT)] for j in range(self.ROW_COUNT)]
def board_to_numeric_board(self):
print("Converting board to numeric board")
for row in range(self.ROW_COUNT):
for col in range(self.COL_COUNT):
self.numeric_board[row][col] = int(self.board[row][col].get_text() if str(self.board[row][col].get_text()).isnumeric() else 0)
def numeric_board_to_board(self):
print("Converting numeric board to board")
for row in range(self.ROW_COUNT):
for col in range(self.COL_COUNT):
self.board[row][col].set_text(str(self.numeric_board[row][col]))
def print_board(self):
print("####")
for row in self.board:
print(row)
print("####")
def draw(self, screen):
for row in self.board:
for input_box in row:
input_box.draw(screen)
for i in range(3):
for j in range(3):
pygame.draw.rect(screen, pygame.Color(0,0,0),pygame.Rect(i*159, j*159, 158, 158), 3)
def handle_event(self, event):
for row in range(self.ROW_COUNT):
for col in range(self.COL_COUNT): # Could do foreach, but need the number of row and col
box = self.board[row][col]
box.handle_event(event)
if event.type == pygame.MOUSEBUTTONDOWN:
if box.active:
self.active_row, self.active_col = row, col
if event.type == pygame.KEYDOWN:
if event.key == K_LEFT:
if self.active_col > 0:
self.board[self.active_row][self.active_col].set_active(False)
self.active_col -= 1
self.board[self.active_row][self.active_col].set_active(True)
if event.key == K_RIGHT:
if self.active_col < 8:
self.board[self.active_row][self.active_col].set_active(False)
self.active_col += 1
self.board[self.active_row][self.active_col].set_active(True)
if event.key == pygame.K_UP:
if self.active_row > 0:
self.board[self.active_row][self.active_col].set_active(False)
self.active_row -= 1
self.board[self.active_row][self.active_col].set_active(True)
if event.key == pygame.K_DOWN:
if self.active_row < 8:
self.board[self.active_row][self.active_col].set_active(False)
self.active_row += 1
self.board[self.active_row][self.active_col].set_active(True)
def get_board(self):
return self.board
def get_numeric_board(self):
return self.numeric_board
|
en
| 0.714461
|
###") ###") # Could do foreach, but need the number of row and col
| 3.625557
| 4
|
PR/production/personal_rank.py
|
Cathy-t/basic_recommendation_algorithm
| 6
|
6628429
|
# -*- coding: utf-8 -*-
# @Time : 2019/3/18 17:40
# @Author : Cathy
# @FileName: personal_rank.py
# @Software: PyCharm
from __future__ import division
import sys
sys.path.append("../util")
import util.read as read
import operator
import util.mat_util as mat_util
# 解稀疏矩阵的方程所需使用的模块 gmres
from scipy.sparse.linalg import gmres
import numpy as np
def personal_rank(graph,root,alpha,iter_num,recom_num=10):
"""
:param graph: user item graph 之前得到的user和item的图结构
:param root: the fixed user for which to recom 将要给哪个user推荐
:param alpha: the prob to go to random walk 以alpha的概率选择向下游走,以1-alpha的概率选择回到起点
:param iter_num: iteration num 迭代次序
:param recom_num: recom item num 推荐的结果
:return: a dict: key itemid,value pr值 字典的长度即为指定的推荐的item的个数
"""
# 定义一个数据结构来存储所有的顶点对于root顶点的pr值
rank = {}
# pr算法中,pr值的初始条件中:除root顶点外其余顶点的pr值均为0
rank = {point:1 for point in graph}
rank[root] = 1
# 定义一个输出数据结构
recom_result = {}
for iter_index in range(iter_num):
# 初始化一个临时的数据结构,此数据结构用于存储该迭代轮次下,其余顶点对root顶点的pr值
tmp_rank = {}
tmp_rank = {point:0 for point in graph}
# PR算法公式书写:分为上下两部分
# 在上部分中,如果该顶点不是root顶点,它的pr值就是所有连接到该顶点的顶点
# 将自己的pr值,以1/n的概率贡献到该顶点上(n就是连接到该顶点的顶点的出度)
for out_point,out_dict in graph.items():
for inner_point,value in graph[out_point].items():
tmp_rank[inner_point] += round(alpha * rank[out_point]/len(out_dict),4)
if inner_point == root:
tmp_rank[inner_point] += round(1-alpha,4)
# 如果该迭代轮次下的临时的数据结构和装载所有顶点对root顶点pr值的数据结构完全相同时,即为迭代充分
# 此时可提前结束迭代
if tmp_rank == rank:
# 是否是迭代完成了iter_num次,还是迭代到中间的部分就完成了收敛
print("out" + str(iter_index))
break
# 若不相同,则需将本轮次最新迭代出的root顶点的pr值,赋值给rank
rank = tmp_rank
# rank迭代完成后,对rank中的pr值进行排序,并过滤掉其中的user顶点和root顶点已经行为过的item,这样就能得到最终的推荐结果
# 定义一个计数器,帮助记录如果推荐的item的数目达到了要求,就可以返回
right_num = 0
# Step1:排序
for zuhe in sorted(rank.items(),key=operator.itemgetter(1),reverse=True):
point,pr_score = zuhe[0],zuhe[1]
# 如果该顶点不是item顶点,则需要过滤掉
if len(point.split('_')) < 2:
continue
# 如果该顶点是item顶点,且被root顶点行为过,仍需要过滤掉
if point in graph[root]:
continue
recom_result[point] = pr_score
right_num += 1
if right_num > recom_num:
break
return recom_result
def personal_rank_mat(graph,root,alpha,recom_num=10):
"""
:param graph: user item graph 用户物品的二分图
:param root: the fix user to recom 固定用户推荐
:param alpha: the prob to random walk 随机游走的概率
:param recom_num: recom item num
:return: a dict, key :itemid ,value:pr score
线代相关知识:求矩阵的逆矩阵,即解线性方程 Ax = E (A*r = r0)
"""
m, vertex, address_dict = mat_util.graph_to_m(graph)
if root not in address_dict:
return {}
score_dict = {}
recom_dict = {}
# 求其逆,便可以得到推荐结果
mat_all = mat_util.mat_all_point(m,vertex,alpha)
# 首先得到root顶点的index,得到index的目的是为了获得r0矩阵
index = address_dict[root]
# 初始化r0矩阵
initial_list = [[0] for row in range(len(vertex))]
initial_list[index] = [1]
r_zero = np.array(initial_list)
# r_zero = np.concatenate(r_zero,axis=0)
# 解线性方程,得到的是一个元组,其中tol指的是误差
res = gmres(mat_all,r_zero,tol=1e-8)[0]
for index in range(len(res)):
# 首先判断该顶点是否是item顶点
point = vertex[index]
if len(point.strip().split("_")) < 2:
continue
# 若已经行为过,则也没有必要记录
if point in graph[root]:
continue
score_dict[point] = round(res[index],3)
# 讲pr值排序,返回推荐结果
for zuhe in sorted(score_dict.items(),key=operator.itemgetter(1),reverse=True)[:recom_num]:
point,score = zuhe[0],zuhe[1]
recom_dict[point] = score
return recom_dict
# personal rank 基础版本
def get_one_user_recom():
"""
give one fix_user recom result
"""
user = "1"
alpha = 0.8
graph = read.get_graph_from_data("../data/ratings.txt")
iter_num = 100
recom_result = personal_rank(graph,user,alpha,iter_num,100)
return recom_result
"""
item_info = read.get_item_info("../data/movies.txt")
# 打印出用户感兴趣的item ,以便于分析结果
for itemid in graph[user]:
pure_itemid = itemid.split("_")[1]
print(item_info[pure_itemid])
print("result---")
for itemid in recom_result:
pure_itemid = itemid.split("_")[1]
print(item_info[pure_itemid])
print(recom_result[itemid])
"""
# personal rank采用矩阵版本
def get_one_user_by_mat():
"""
give one fix user by mat
"""
user = "1"
alpha = 0.8
graph = read.get_graph_from_data("../data/ratings.txt")
recom_result = personal_rank_mat(graph,user,alpha,100)
return recom_result
if __name__ == "__main__":
# 将两种方式进行对比
recom_result_base = get_one_user_recom()
recom_result_mat = get_one_user_by_mat()
# 二种方式下的推荐结果有多少是相同
num = 0
for ele in recom_result_base:
if ele in recom_result_mat:
num += 1
# 输出的num说明两种方式推荐出来的结果的相似度,99说明在top-N=100中,重合率很高,即两种方式效果一样
print(num)
|
# -*- coding: utf-8 -*-
# @Time : 2019/3/18 17:40
# @Author : Cathy
# @FileName: personal_rank.py
# @Software: PyCharm
from __future__ import division
import sys
sys.path.append("../util")
import util.read as read
import operator
import util.mat_util as mat_util
# 解稀疏矩阵的方程所需使用的模块 gmres
from scipy.sparse.linalg import gmres
import numpy as np
def personal_rank(graph,root,alpha,iter_num,recom_num=10):
"""
:param graph: user item graph 之前得到的user和item的图结构
:param root: the fixed user for which to recom 将要给哪个user推荐
:param alpha: the prob to go to random walk 以alpha的概率选择向下游走,以1-alpha的概率选择回到起点
:param iter_num: iteration num 迭代次序
:param recom_num: recom item num 推荐的结果
:return: a dict: key itemid,value pr值 字典的长度即为指定的推荐的item的个数
"""
# 定义一个数据结构来存储所有的顶点对于root顶点的pr值
rank = {}
# pr算法中,pr值的初始条件中:除root顶点外其余顶点的pr值均为0
rank = {point:1 for point in graph}
rank[root] = 1
# 定义一个输出数据结构
recom_result = {}
for iter_index in range(iter_num):
# 初始化一个临时的数据结构,此数据结构用于存储该迭代轮次下,其余顶点对root顶点的pr值
tmp_rank = {}
tmp_rank = {point:0 for point in graph}
# PR算法公式书写:分为上下两部分
# 在上部分中,如果该顶点不是root顶点,它的pr值就是所有连接到该顶点的顶点
# 将自己的pr值,以1/n的概率贡献到该顶点上(n就是连接到该顶点的顶点的出度)
for out_point,out_dict in graph.items():
for inner_point,value in graph[out_point].items():
tmp_rank[inner_point] += round(alpha * rank[out_point]/len(out_dict),4)
if inner_point == root:
tmp_rank[inner_point] += round(1-alpha,4)
# 如果该迭代轮次下的临时的数据结构和装载所有顶点对root顶点pr值的数据结构完全相同时,即为迭代充分
# 此时可提前结束迭代
if tmp_rank == rank:
# 是否是迭代完成了iter_num次,还是迭代到中间的部分就完成了收敛
print("out" + str(iter_index))
break
# 若不相同,则需将本轮次最新迭代出的root顶点的pr值,赋值给rank
rank = tmp_rank
# rank迭代完成后,对rank中的pr值进行排序,并过滤掉其中的user顶点和root顶点已经行为过的item,这样就能得到最终的推荐结果
# 定义一个计数器,帮助记录如果推荐的item的数目达到了要求,就可以返回
right_num = 0
# Step1:排序
for zuhe in sorted(rank.items(),key=operator.itemgetter(1),reverse=True):
point,pr_score = zuhe[0],zuhe[1]
# 如果该顶点不是item顶点,则需要过滤掉
if len(point.split('_')) < 2:
continue
# 如果该顶点是item顶点,且被root顶点行为过,仍需要过滤掉
if point in graph[root]:
continue
recom_result[point] = pr_score
right_num += 1
if right_num > recom_num:
break
return recom_result
def personal_rank_mat(graph,root,alpha,recom_num=10):
"""
:param graph: user item graph 用户物品的二分图
:param root: the fix user to recom 固定用户推荐
:param alpha: the prob to random walk 随机游走的概率
:param recom_num: recom item num
:return: a dict, key :itemid ,value:pr score
线代相关知识:求矩阵的逆矩阵,即解线性方程 Ax = E (A*r = r0)
"""
m, vertex, address_dict = mat_util.graph_to_m(graph)
if root not in address_dict:
return {}
score_dict = {}
recom_dict = {}
# 求其逆,便可以得到推荐结果
mat_all = mat_util.mat_all_point(m,vertex,alpha)
# 首先得到root顶点的index,得到index的目的是为了获得r0矩阵
index = address_dict[root]
# 初始化r0矩阵
initial_list = [[0] for row in range(len(vertex))]
initial_list[index] = [1]
r_zero = np.array(initial_list)
# r_zero = np.concatenate(r_zero,axis=0)
# 解线性方程,得到的是一个元组,其中tol指的是误差
res = gmres(mat_all,r_zero,tol=1e-8)[0]
for index in range(len(res)):
# 首先判断该顶点是否是item顶点
point = vertex[index]
if len(point.strip().split("_")) < 2:
continue
# 若已经行为过,则也没有必要记录
if point in graph[root]:
continue
score_dict[point] = round(res[index],3)
# 讲pr值排序,返回推荐结果
for zuhe in sorted(score_dict.items(),key=operator.itemgetter(1),reverse=True)[:recom_num]:
point,score = zuhe[0],zuhe[1]
recom_dict[point] = score
return recom_dict
# personal rank 基础版本
def get_one_user_recom():
"""
give one fix_user recom result
"""
user = "1"
alpha = 0.8
graph = read.get_graph_from_data("../data/ratings.txt")
iter_num = 100
recom_result = personal_rank(graph,user,alpha,iter_num,100)
return recom_result
"""
item_info = read.get_item_info("../data/movies.txt")
# 打印出用户感兴趣的item ,以便于分析结果
for itemid in graph[user]:
pure_itemid = itemid.split("_")[1]
print(item_info[pure_itemid])
print("result---")
for itemid in recom_result:
pure_itemid = itemid.split("_")[1]
print(item_info[pure_itemid])
print(recom_result[itemid])
"""
# personal rank采用矩阵版本
def get_one_user_by_mat():
"""
give one fix user by mat
"""
user = "1"
alpha = 0.8
graph = read.get_graph_from_data("../data/ratings.txt")
recom_result = personal_rank_mat(graph,user,alpha,100)
return recom_result
if __name__ == "__main__":
# 将两种方式进行对比
recom_result_base = get_one_user_recom()
recom_result_mat = get_one_user_by_mat()
# 二种方式下的推荐结果有多少是相同
num = 0
for ele in recom_result_base:
if ele in recom_result_mat:
num += 1
# 输出的num说明两种方式推荐出来的结果的相似度,99说明在top-N=100中,重合率很高,即两种方式效果一样
print(num)
|
zh
| 0.798295
|
# -*- coding: utf-8 -*- # @Time : 2019/3/18 17:40 # @Author : Cathy # @FileName: personal_rank.py # @Software: PyCharm # 解稀疏矩阵的方程所需使用的模块 gmres :param graph: user item graph 之前得到的user和item的图结构 :param root: the fixed user for which to recom 将要给哪个user推荐 :param alpha: the prob to go to random walk 以alpha的概率选择向下游走,以1-alpha的概率选择回到起点 :param iter_num: iteration num 迭代次序 :param recom_num: recom item num 推荐的结果 :return: a dict: key itemid,value pr值 字典的长度即为指定的推荐的item的个数 # 定义一个数据结构来存储所有的顶点对于root顶点的pr值 # pr算法中,pr值的初始条件中:除root顶点外其余顶点的pr值均为0 # 定义一个输出数据结构 # 初始化一个临时的数据结构,此数据结构用于存储该迭代轮次下,其余顶点对root顶点的pr值 # PR算法公式书写:分为上下两部分 # 在上部分中,如果该顶点不是root顶点,它的pr值就是所有连接到该顶点的顶点 # 将自己的pr值,以1/n的概率贡献到该顶点上(n就是连接到该顶点的顶点的出度) # 如果该迭代轮次下的临时的数据结构和装载所有顶点对root顶点pr值的数据结构完全相同时,即为迭代充分 # 此时可提前结束迭代 # 是否是迭代完成了iter_num次,还是迭代到中间的部分就完成了收敛 # 若不相同,则需将本轮次最新迭代出的root顶点的pr值,赋值给rank # rank迭代完成后,对rank中的pr值进行排序,并过滤掉其中的user顶点和root顶点已经行为过的item,这样就能得到最终的推荐结果 # 定义一个计数器,帮助记录如果推荐的item的数目达到了要求,就可以返回 # Step1:排序 # 如果该顶点不是item顶点,则需要过滤掉 # 如果该顶点是item顶点,且被root顶点行为过,仍需要过滤掉 :param graph: user item graph 用户物品的二分图 :param root: the fix user to recom 固定用户推荐 :param alpha: the prob to random walk 随机游走的概率 :param recom_num: recom item num :return: a dict, key :itemid ,value:pr score 线代相关知识:求矩阵的逆矩阵,即解线性方程 Ax = E (A*r = r0) # 求其逆,便可以得到推荐结果 # 首先得到root顶点的index,得到index的目的是为了获得r0矩阵 # 初始化r0矩阵 # r_zero = np.concatenate(r_zero,axis=0) # 解线性方程,得到的是一个元组,其中tol指的是误差 # 首先判断该顶点是否是item顶点 # 若已经行为过,则也没有必要记录 # 讲pr值排序,返回推荐结果 # personal rank 基础版本 give one fix_user recom result item_info = read.get_item_info("../data/movies.txt") # 打印出用户感兴趣的item ,以便于分析结果 for itemid in graph[user]: pure_itemid = itemid.split("_")[1] print(item_info[pure_itemid]) print("result---") for itemid in recom_result: pure_itemid = itemid.split("_")[1] print(item_info[pure_itemid]) print(recom_result[itemid]) # personal rank采用矩阵版本 give one fix user by mat # 将两种方式进行对比 # 二种方式下的推荐结果有多少是相同 # 输出的num说明两种方式推荐出来的结果的相似度,99说明在top-N=100中,重合率很高,即两种方式效果一样
| 2.62358
| 3
|
mfem/_par/densemat.py
|
kennyweiss/PyMFEM
| 0
|
6628430
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _densemat
else:
import _densemat
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _densemat.SWIG_PyInstanceMethod_New
_swig_new_static_method = _densemat.SWIG_PyStaticMethod_New
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import weakref
import mfem._par.mem_manager
import mfem._par.array
import mfem._par.vector
import mfem._par.operators
import mfem._par.matrix
class DenseMatrix(mfem._par.matrix.Matrix):
r"""Proxy of C++ mfem::DenseMatrix class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(DenseMatrix self) -> DenseMatrix
__init__(DenseMatrix self, DenseMatrix arg2) -> DenseMatrix
__init__(DenseMatrix self, int s) -> DenseMatrix
__init__(DenseMatrix self, int m, int n) -> DenseMatrix
__init__(DenseMatrix self, DenseMatrix mat, char ch) -> DenseMatrix
__init__(DenseMatrix self, double * d, int h, int w) -> DenseMatrix
"""
_densemat.DenseMatrix_swiginit(self, _densemat.new_DenseMatrix(*args))
def UseExternalData(self, d, h, w):
r"""UseExternalData(DenseMatrix self, double * d, int h, int w)"""
return _densemat.DenseMatrix_UseExternalData(self, d, h, w)
UseExternalData = _swig_new_instance_method(_densemat.DenseMatrix_UseExternalData)
def Reset(self, d, h, w):
r"""Reset(DenseMatrix self, double * d, int h, int w)"""
return _densemat.DenseMatrix_Reset(self, d, h, w)
Reset = _swig_new_instance_method(_densemat.DenseMatrix_Reset)
def ClearExternalData(self):
r"""ClearExternalData(DenseMatrix self)"""
return _densemat.DenseMatrix_ClearExternalData(self)
ClearExternalData = _swig_new_instance_method(_densemat.DenseMatrix_ClearExternalData)
def Clear(self):
r"""Clear(DenseMatrix self)"""
return _densemat.DenseMatrix_Clear(self)
Clear = _swig_new_instance_method(_densemat.DenseMatrix_Clear)
def Size(self):
r"""Size(DenseMatrix self) -> int"""
return _densemat.DenseMatrix_Size(self)
Size = _swig_new_instance_method(_densemat.DenseMatrix_Size)
def SetSize(self, *args):
r"""
SetSize(DenseMatrix self, int s)
SetSize(DenseMatrix self, int h, int w)
"""
return _densemat.DenseMatrix_SetSize(self, *args)
SetSize = _swig_new_instance_method(_densemat.DenseMatrix_SetSize)
def Data(self):
r"""Data(DenseMatrix self) -> double *"""
return _densemat.DenseMatrix_Data(self)
Data = _swig_new_instance_method(_densemat.DenseMatrix_Data)
def GetData(self):
r"""GetData(DenseMatrix self) -> double *"""
return _densemat.DenseMatrix_GetData(self)
GetData = _swig_new_instance_method(_densemat.DenseMatrix_GetData)
def GetMemory(self, *args):
r"""
GetMemory(DenseMatrix self) -> mfem::Memory< double >
GetMemory(DenseMatrix self) -> mfem::Memory< double > const &
"""
return _densemat.DenseMatrix_GetMemory(self, *args)
GetMemory = _swig_new_instance_method(_densemat.DenseMatrix_GetMemory)
def OwnsData(self):
r"""OwnsData(DenseMatrix self) -> bool"""
return _densemat.DenseMatrix_OwnsData(self)
OwnsData = _swig_new_instance_method(_densemat.DenseMatrix_OwnsData)
def __call__(self, *args):
r"""
__call__(DenseMatrix self, int i, int j) -> double
__call__(DenseMatrix self, int i, int j) -> double const &
"""
return _densemat.DenseMatrix___call__(self, *args)
__call__ = _swig_new_instance_method(_densemat.DenseMatrix___call__)
def __mul__(self, m):
r"""__mul__(DenseMatrix self, DenseMatrix m) -> double"""
return _densemat.DenseMatrix___mul__(self, m)
__mul__ = _swig_new_instance_method(_densemat.DenseMatrix___mul__)
def Trace(self):
r"""Trace(DenseMatrix self) -> double"""
return _densemat.DenseMatrix_Trace(self)
Trace = _swig_new_instance_method(_densemat.DenseMatrix_Trace)
def Elem(self, *args):
r"""
Elem(DenseMatrix self, int i, int j) -> double
Elem(DenseMatrix self, int i, int j) -> double const &
"""
return _densemat.DenseMatrix_Elem(self, *args)
Elem = _swig_new_instance_method(_densemat.DenseMatrix_Elem)
def Mult(self, *args):
r"""
Mult(DenseMatrix self, double const * x, double * y)
Mult(DenseMatrix self, Vector x, Vector y)
"""
return _densemat.DenseMatrix_Mult(self, *args)
Mult = _swig_new_instance_method(_densemat.DenseMatrix_Mult)
def MultTranspose(self, *args):
r"""
MultTranspose(DenseMatrix self, double const * x, double * y)
MultTranspose(DenseMatrix self, Vector x, Vector y)
"""
return _densemat.DenseMatrix_MultTranspose(self, *args)
MultTranspose = _swig_new_instance_method(_densemat.DenseMatrix_MultTranspose)
def AddMult(self, x, y):
r"""AddMult(DenseMatrix self, Vector x, Vector y)"""
return _densemat.DenseMatrix_AddMult(self, x, y)
AddMult = _swig_new_instance_method(_densemat.DenseMatrix_AddMult)
def AddMultTranspose(self, x, y):
r"""AddMultTranspose(DenseMatrix self, Vector x, Vector y)"""
return _densemat.DenseMatrix_AddMultTranspose(self, x, y)
AddMultTranspose = _swig_new_instance_method(_densemat.DenseMatrix_AddMultTranspose)
def AddMult_a(self, a, x, y):
r"""AddMult_a(DenseMatrix self, double a, Vector x, Vector y)"""
return _densemat.DenseMatrix_AddMult_a(self, a, x, y)
AddMult_a = _swig_new_instance_method(_densemat.DenseMatrix_AddMult_a)
def AddMultTranspose_a(self, a, x, y):
r"""AddMultTranspose_a(DenseMatrix self, double a, Vector x, Vector y)"""
return _densemat.DenseMatrix_AddMultTranspose_a(self, a, x, y)
AddMultTranspose_a = _swig_new_instance_method(_densemat.DenseMatrix_AddMultTranspose_a)
def LeftScaling(self, s):
r"""LeftScaling(DenseMatrix self, Vector s)"""
return _densemat.DenseMatrix_LeftScaling(self, s)
LeftScaling = _swig_new_instance_method(_densemat.DenseMatrix_LeftScaling)
def InvLeftScaling(self, s):
r"""InvLeftScaling(DenseMatrix self, Vector s)"""
return _densemat.DenseMatrix_InvLeftScaling(self, s)
InvLeftScaling = _swig_new_instance_method(_densemat.DenseMatrix_InvLeftScaling)
def RightScaling(self, s):
r"""RightScaling(DenseMatrix self, Vector s)"""
return _densemat.DenseMatrix_RightScaling(self, s)
RightScaling = _swig_new_instance_method(_densemat.DenseMatrix_RightScaling)
def InvRightScaling(self, s):
r"""InvRightScaling(DenseMatrix self, Vector s)"""
return _densemat.DenseMatrix_InvRightScaling(self, s)
InvRightScaling = _swig_new_instance_method(_densemat.DenseMatrix_InvRightScaling)
def SymmetricScaling(self, s):
r"""SymmetricScaling(DenseMatrix self, Vector s)"""
return _densemat.DenseMatrix_SymmetricScaling(self, s)
SymmetricScaling = _swig_new_instance_method(_densemat.DenseMatrix_SymmetricScaling)
def InvSymmetricScaling(self, s):
r"""InvSymmetricScaling(DenseMatrix self, Vector s)"""
return _densemat.DenseMatrix_InvSymmetricScaling(self, s)
InvSymmetricScaling = _swig_new_instance_method(_densemat.DenseMatrix_InvSymmetricScaling)
def InnerProduct(self, *args):
r"""
InnerProduct(DenseMatrix self, double const * x, double const * y) -> double
InnerProduct(DenseMatrix self, Vector x, Vector y) -> double
"""
return _densemat.DenseMatrix_InnerProduct(self, *args)
InnerProduct = _swig_new_instance_method(_densemat.DenseMatrix_InnerProduct)
def Inverse(self):
r"""Inverse(DenseMatrix self) -> MatrixInverse"""
return _densemat.DenseMatrix_Inverse(self)
Inverse = _swig_new_instance_method(_densemat.DenseMatrix_Inverse)
def Invert(self):
r"""Invert(DenseMatrix self)"""
return _densemat.DenseMatrix_Invert(self)
Invert = _swig_new_instance_method(_densemat.DenseMatrix_Invert)
def SquareRootInverse(self):
r"""SquareRootInverse(DenseMatrix self)"""
return _densemat.DenseMatrix_SquareRootInverse(self)
SquareRootInverse = _swig_new_instance_method(_densemat.DenseMatrix_SquareRootInverse)
def Det(self):
r"""Det(DenseMatrix self) -> double"""
return _densemat.DenseMatrix_Det(self)
Det = _swig_new_instance_method(_densemat.DenseMatrix_Det)
def Weight(self):
r"""Weight(DenseMatrix self) -> double"""
return _densemat.DenseMatrix_Weight(self)
Weight = _swig_new_instance_method(_densemat.DenseMatrix_Weight)
def Set(self, *args):
r"""
Set(DenseMatrix self, double alpha, double const * A)
Set(DenseMatrix self, double alpha, DenseMatrix A)
"""
return _densemat.DenseMatrix_Set(self, *args)
Set = _swig_new_instance_method(_densemat.DenseMatrix_Set)
def Add(self, c, A):
r"""Add(DenseMatrix self, double const c, DenseMatrix A)"""
return _densemat.DenseMatrix_Add(self, c, A)
Add = _swig_new_instance_method(_densemat.DenseMatrix_Add)
def __iadd__(self, v):
ret = _densemat.DenseMatrix___iadd__(self, v)
ret.thisown = self.thisown
self.thisown = 0
return ret
def __isub__(self, v):
ret = _densemat.DenseMatrix___isub__(self, v)
ret.thisown = self.thisown
self.thisown = 0
return ret
def __imul__(self, v):
ret = _densemat.DenseMatrix___imul__(self, v)
ret.thisown = self.thisown
self.thisown = 0
return ret
def Neg(self):
r"""Neg(DenseMatrix self)"""
return _densemat.DenseMatrix_Neg(self)
Neg = _swig_new_instance_method(_densemat.DenseMatrix_Neg)
def Norm2(self, v):
r"""Norm2(DenseMatrix self, double * v)"""
return _densemat.DenseMatrix_Norm2(self, v)
Norm2 = _swig_new_instance_method(_densemat.DenseMatrix_Norm2)
def MaxMaxNorm(self):
r"""MaxMaxNorm(DenseMatrix self) -> double"""
return _densemat.DenseMatrix_MaxMaxNorm(self)
MaxMaxNorm = _swig_new_instance_method(_densemat.DenseMatrix_MaxMaxNorm)
def FNorm(self):
r"""FNorm(DenseMatrix self) -> double"""
return _densemat.DenseMatrix_FNorm(self)
FNorm = _swig_new_instance_method(_densemat.DenseMatrix_FNorm)
def FNorm2(self):
r"""FNorm2(DenseMatrix self) -> double"""
return _densemat.DenseMatrix_FNorm2(self)
FNorm2 = _swig_new_instance_method(_densemat.DenseMatrix_FNorm2)
def Eigenvalues(self, *args):
r"""
Eigenvalues(DenseMatrix self, Vector ev)
Eigenvalues(DenseMatrix self, Vector ev, DenseMatrix evect)
Eigenvalues(DenseMatrix self, DenseMatrix b, Vector ev)
Eigenvalues(DenseMatrix self, DenseMatrix b, Vector ev, DenseMatrix evect)
"""
return _densemat.DenseMatrix_Eigenvalues(self, *args)
Eigenvalues = _swig_new_instance_method(_densemat.DenseMatrix_Eigenvalues)
def Eigensystem(self, *args):
r"""
Eigensystem(DenseMatrix self, Vector ev, DenseMatrix evect)
Eigensystem(DenseMatrix self, DenseMatrix b, Vector ev, DenseMatrix evect)
"""
return _densemat.DenseMatrix_Eigensystem(self, *args)
Eigensystem = _swig_new_instance_method(_densemat.DenseMatrix_Eigensystem)
def SingularValues(self, sv):
r"""SingularValues(DenseMatrix self, Vector sv)"""
return _densemat.DenseMatrix_SingularValues(self, sv)
SingularValues = _swig_new_instance_method(_densemat.DenseMatrix_SingularValues)
def Rank(self, tol):
r"""Rank(DenseMatrix self, double tol) -> int"""
return _densemat.DenseMatrix_Rank(self, tol)
Rank = _swig_new_instance_method(_densemat.DenseMatrix_Rank)
def CalcSingularvalue(self, i):
r"""CalcSingularvalue(DenseMatrix self, int const i) -> double"""
return _densemat.DenseMatrix_CalcSingularvalue(self, i)
CalcSingularvalue = _swig_new_instance_method(_densemat.DenseMatrix_CalcSingularvalue)
def CalcEigenvalues(self, _lambda, vec):
r"""CalcEigenvalues(DenseMatrix self, double * _lambda, double * vec)"""
return _densemat.DenseMatrix_CalcEigenvalues(self, _lambda, vec)
CalcEigenvalues = _swig_new_instance_method(_densemat.DenseMatrix_CalcEigenvalues)
def GetRow(self, r, row):
r"""GetRow(DenseMatrix self, int r, Vector row)"""
return _densemat.DenseMatrix_GetRow(self, r, row)
GetRow = _swig_new_instance_method(_densemat.DenseMatrix_GetRow)
def GetColumn(self, *args):
r"""
GetColumn(DenseMatrix self, int c, Vector col)
GetColumn(DenseMatrix self, int col) -> double
GetColumn(DenseMatrix self, int col) -> double const *
"""
return _densemat.DenseMatrix_GetColumn(self, *args)
GetColumn = _swig_new_instance_method(_densemat.DenseMatrix_GetColumn)
def GetColumnReference(self, c, col):
r"""GetColumnReference(DenseMatrix self, int c, Vector col)"""
return _densemat.DenseMatrix_GetColumnReference(self, c, col)
GetColumnReference = _swig_new_instance_method(_densemat.DenseMatrix_GetColumnReference)
def SetRow(self, *args):
r"""
SetRow(DenseMatrix self, int r, double const * row)
SetRow(DenseMatrix self, int r, Vector row)
SetRow(DenseMatrix self, int row, double value)
"""
return _densemat.DenseMatrix_SetRow(self, *args)
SetRow = _swig_new_instance_method(_densemat.DenseMatrix_SetRow)
def SetCol(self, *args):
r"""
SetCol(DenseMatrix self, int c, double const * col)
SetCol(DenseMatrix self, int c, Vector col)
SetCol(DenseMatrix self, int col, double value)
"""
return _densemat.DenseMatrix_SetCol(self, *args)
SetCol = _swig_new_instance_method(_densemat.DenseMatrix_SetCol)
def GetDiag(self, d):
r"""GetDiag(DenseMatrix self, Vector d)"""
return _densemat.DenseMatrix_GetDiag(self, d)
GetDiag = _swig_new_instance_method(_densemat.DenseMatrix_GetDiag)
def Getl1Diag(self, l):
r"""Getl1Diag(DenseMatrix self, Vector l)"""
return _densemat.DenseMatrix_Getl1Diag(self, l)
Getl1Diag = _swig_new_instance_method(_densemat.DenseMatrix_Getl1Diag)
def GetRowSums(self, l):
r"""GetRowSums(DenseMatrix self, Vector l)"""
return _densemat.DenseMatrix_GetRowSums(self, l)
GetRowSums = _swig_new_instance_method(_densemat.DenseMatrix_GetRowSums)
def Diag(self, *args):
r"""
Diag(DenseMatrix self, double c, int n)
Diag(DenseMatrix self, double * diag, int n)
"""
return _densemat.DenseMatrix_Diag(self, *args)
Diag = _swig_new_instance_method(_densemat.DenseMatrix_Diag)
def Transpose(self, *args):
r"""
Transpose(DenseMatrix self)
Transpose(DenseMatrix self, DenseMatrix A)
"""
return _densemat.DenseMatrix_Transpose(self, *args)
Transpose = _swig_new_instance_method(_densemat.DenseMatrix_Transpose)
def Symmetrize(self):
r"""Symmetrize(DenseMatrix self)"""
return _densemat.DenseMatrix_Symmetrize(self)
Symmetrize = _swig_new_instance_method(_densemat.DenseMatrix_Symmetrize)
def Lump(self):
r"""Lump(DenseMatrix self)"""
return _densemat.DenseMatrix_Lump(self)
Lump = _swig_new_instance_method(_densemat.DenseMatrix_Lump)
def GradToCurl(self, curl):
r"""GradToCurl(DenseMatrix self, DenseMatrix curl)"""
return _densemat.DenseMatrix_GradToCurl(self, curl)
GradToCurl = _swig_new_instance_method(_densemat.DenseMatrix_GradToCurl)
def GradToDiv(self, div):
r"""GradToDiv(DenseMatrix self, Vector div)"""
return _densemat.DenseMatrix_GradToDiv(self, div)
GradToDiv = _swig_new_instance_method(_densemat.DenseMatrix_GradToDiv)
def CopyRows(self, A, row1, row2):
r"""CopyRows(DenseMatrix self, DenseMatrix A, int row1, int row2)"""
return _densemat.DenseMatrix_CopyRows(self, A, row1, row2)
CopyRows = _swig_new_instance_method(_densemat.DenseMatrix_CopyRows)
def CopyCols(self, A, col1, col2):
r"""CopyCols(DenseMatrix self, DenseMatrix A, int col1, int col2)"""
return _densemat.DenseMatrix_CopyCols(self, A, col1, col2)
CopyCols = _swig_new_instance_method(_densemat.DenseMatrix_CopyCols)
def CopyMNt(self, A, row_offset, col_offset):
r"""CopyMNt(DenseMatrix self, DenseMatrix A, int row_offset, int col_offset)"""
return _densemat.DenseMatrix_CopyMNt(self, A, row_offset, col_offset)
CopyMNt = _swig_new_instance_method(_densemat.DenseMatrix_CopyMNt)
def CopyMN(self, *args):
r"""
CopyMN(DenseMatrix self, DenseMatrix A, int m, int n, int Aro, int Aco)
CopyMN(DenseMatrix self, DenseMatrix A, int row_offset, int col_offset)
CopyMN(DenseMatrix self, DenseMatrix A, int m, int n, int Aro, int Aco, int row_offset, int col_offset)
"""
return _densemat.DenseMatrix_CopyMN(self, *args)
CopyMN = _swig_new_instance_method(_densemat.DenseMatrix_CopyMN)
def CopyMNDiag(self, *args):
r"""
CopyMNDiag(DenseMatrix self, double c, int n, int row_offset, int col_offset)
CopyMNDiag(DenseMatrix self, double * diag, int n, int row_offset, int col_offset)
"""
return _densemat.DenseMatrix_CopyMNDiag(self, *args)
CopyMNDiag = _swig_new_instance_method(_densemat.DenseMatrix_CopyMNDiag)
def CopyExceptMN(self, A, m, n):
r"""CopyExceptMN(DenseMatrix self, DenseMatrix A, int m, int n)"""
return _densemat.DenseMatrix_CopyExceptMN(self, A, m, n)
CopyExceptMN = _swig_new_instance_method(_densemat.DenseMatrix_CopyExceptMN)
def AddMatrix(self, *args):
r"""
AddMatrix(DenseMatrix self, DenseMatrix A, int ro, int co)
AddMatrix(DenseMatrix self, double a, DenseMatrix A, int ro, int co)
"""
return _densemat.DenseMatrix_AddMatrix(self, *args)
AddMatrix = _swig_new_instance_method(_densemat.DenseMatrix_AddMatrix)
def AddToVector(self, offset, v):
r"""AddToVector(DenseMatrix self, int offset, Vector v)"""
return _densemat.DenseMatrix_AddToVector(self, offset, v)
AddToVector = _swig_new_instance_method(_densemat.DenseMatrix_AddToVector)
def GetFromVector(self, offset, v):
r"""GetFromVector(DenseMatrix self, int offset, Vector v)"""
return _densemat.DenseMatrix_GetFromVector(self, offset, v)
GetFromVector = _swig_new_instance_method(_densemat.DenseMatrix_GetFromVector)
def AdjustDofDirection(self, dofs):
r"""AdjustDofDirection(DenseMatrix self, intArray dofs)"""
return _densemat.DenseMatrix_AdjustDofDirection(self, dofs)
AdjustDofDirection = _swig_new_instance_method(_densemat.DenseMatrix_AdjustDofDirection)
def Threshold(self, eps):
r"""Threshold(DenseMatrix self, double eps)"""
return _densemat.DenseMatrix_Threshold(self, eps)
Threshold = _swig_new_instance_method(_densemat.DenseMatrix_Threshold)
def CheckFinite(self):
r"""CheckFinite(DenseMatrix self) -> int"""
return _densemat.DenseMatrix_CheckFinite(self)
CheckFinite = _swig_new_instance_method(_densemat.DenseMatrix_CheckFinite)
def TestInversion(self):
r"""TestInversion(DenseMatrix self)"""
return _densemat.DenseMatrix_TestInversion(self)
TestInversion = _swig_new_instance_method(_densemat.DenseMatrix_TestInversion)
def MemoryUsage(self):
r"""MemoryUsage(DenseMatrix self) -> long"""
return _densemat.DenseMatrix_MemoryUsage(self)
MemoryUsage = _swig_new_instance_method(_densemat.DenseMatrix_MemoryUsage)
def Read(self, on_dev=True):
r"""Read(DenseMatrix self, bool on_dev=True) -> double const *"""
return _densemat.DenseMatrix_Read(self, on_dev)
Read = _swig_new_instance_method(_densemat.DenseMatrix_Read)
def HostRead(self):
r"""HostRead(DenseMatrix self) -> double const *"""
return _densemat.DenseMatrix_HostRead(self)
HostRead = _swig_new_instance_method(_densemat.DenseMatrix_HostRead)
def Write(self, on_dev=True):
r"""Write(DenseMatrix self, bool on_dev=True) -> double *"""
return _densemat.DenseMatrix_Write(self, on_dev)
Write = _swig_new_instance_method(_densemat.DenseMatrix_Write)
def HostWrite(self):
r"""HostWrite(DenseMatrix self) -> double *"""
return _densemat.DenseMatrix_HostWrite(self)
HostWrite = _swig_new_instance_method(_densemat.DenseMatrix_HostWrite)
def ReadWrite(self, on_dev=True):
r"""ReadWrite(DenseMatrix self, bool on_dev=True) -> double *"""
return _densemat.DenseMatrix_ReadWrite(self, on_dev)
ReadWrite = _swig_new_instance_method(_densemat.DenseMatrix_ReadWrite)
def HostReadWrite(self):
r"""HostReadWrite(DenseMatrix self) -> double *"""
return _densemat.DenseMatrix_HostReadWrite(self)
HostReadWrite = _swig_new_instance_method(_densemat.DenseMatrix_HostReadWrite)
__swig_destroy__ = _densemat.delete_DenseMatrix
def Assign(self, *args):
r"""
Assign(DenseMatrix self, double const v)
Assign(DenseMatrix self, DenseMatrix m)
Assign(DenseMatrix self, PyObject * numpymat)
"""
from numpy import ndarray, ascontiguousarray
keep_link = False
if len(args) == 1 and isinstance(args[0], ndarray):
if args[0].dtype != 'float64':
raise ValueError('Must be float64 array:' + str(args[0].dtype) + ' was given')
elif args[0].ndim != 2:
raise ValueError('Ndim must be two')
elif args[0].shape[1] != _densemat.DenseMatrix_Size(self):
raise ValueError('Length does not match')
else:
args = (ascontiguousarray(args[0]),)
val = _densemat.DenseMatrix_Assign(self, *args)
return self
return val
def __getitem__(self, *args):
i, j = args[0][0], args[0][1]
return _densemat.DenseMatrix___getitem__(self, i, j)
def __setitem__(self, *args):
i, j, v = args[0][0], args[0][1], args[1]
return _densemat.DenseMatrix___setitem__(self, i, j, v)
def GetDataArray(self):
r"""GetDataArray(DenseMatrix self) -> PyObject *"""
return _densemat.DenseMatrix_GetDataArray(self)
GetDataArray = _swig_new_instance_method(_densemat.DenseMatrix_GetDataArray)
def Print(self, *args):
r"""
Print(DenseMatrix self, std::ostream & out=mfem::out, int width_=4)
Print(DenseMatrix self, char const * file, int precision=8)
"""
return _densemat.DenseMatrix_Print(self, *args)
Print = _swig_new_instance_method(_densemat.DenseMatrix_Print)
def PrintT(self, *args):
r"""
PrintT(DenseMatrix self, std::ostream & out=mfem::out, int width_=4)
PrintT(DenseMatrix self, char const * file, int precision=8)
"""
return _densemat.DenseMatrix_PrintT(self, *args)
PrintT = _swig_new_instance_method(_densemat.DenseMatrix_PrintT)
def PrintMatlab(self, *args):
r"""
PrintMatlab(DenseMatrix self, std::ostream & out=mfem::out)
PrintMatlab(DenseMatrix self, char const * file, int precision=8)
"""
return _densemat.DenseMatrix_PrintMatlab(self, *args)
PrintMatlab = _swig_new_instance_method(_densemat.DenseMatrix_PrintMatlab)
# Register DenseMatrix in _densemat:
_densemat.DenseMatrix_swigregister(DenseMatrix)
def LinearSolve(A, X, TOL=1.e-9):
r"""LinearSolve(DenseMatrix A, double * X, double TOL=1.e-9) -> bool"""
return _densemat.LinearSolve(A, X, TOL)
LinearSolve = _densemat.LinearSolve
def AddMult(b, c, a):
r"""AddMult(DenseMatrix b, DenseMatrix c, DenseMatrix a)"""
return _densemat.AddMult(b, c, a)
AddMult = _densemat.AddMult
def AddMult_a(alpha, b, c, a):
r"""AddMult_a(double alpha, DenseMatrix b, DenseMatrix c, DenseMatrix a)"""
return _densemat.AddMult_a(alpha, b, c, a)
AddMult_a = _densemat.AddMult_a
def CalcAdjugate(a, adja):
r"""CalcAdjugate(DenseMatrix a, DenseMatrix adja)"""
return _densemat.CalcAdjugate(a, adja)
CalcAdjugate = _densemat.CalcAdjugate
def CalcAdjugateTranspose(a, adjat):
r"""CalcAdjugateTranspose(DenseMatrix a, DenseMatrix adjat)"""
return _densemat.CalcAdjugateTranspose(a, adjat)
CalcAdjugateTranspose = _densemat.CalcAdjugateTranspose
def CalcInverse(a, inva):
r"""CalcInverse(DenseMatrix a, DenseMatrix inva)"""
return _densemat.CalcInverse(a, inva)
CalcInverse = _densemat.CalcInverse
def CalcInverseTranspose(a, inva):
r"""CalcInverseTranspose(DenseMatrix a, DenseMatrix inva)"""
return _densemat.CalcInverseTranspose(a, inva)
CalcInverseTranspose = _densemat.CalcInverseTranspose
def CalcOrtho(J, n):
r"""CalcOrtho(DenseMatrix J, Vector n)"""
return _densemat.CalcOrtho(J, n)
CalcOrtho = _densemat.CalcOrtho
def MultAAt(a, aat):
r"""MultAAt(DenseMatrix a, DenseMatrix aat)"""
return _densemat.MultAAt(a, aat)
MultAAt = _densemat.MultAAt
def MultADAt(A, D, ADAt):
r"""MultADAt(DenseMatrix A, Vector D, DenseMatrix ADAt)"""
return _densemat.MultADAt(A, D, ADAt)
MultADAt = _densemat.MultADAt
def AddMultADAt(A, D, ADAt):
r"""AddMultADAt(DenseMatrix A, Vector D, DenseMatrix ADAt)"""
return _densemat.AddMultADAt(A, D, ADAt)
AddMultADAt = _densemat.AddMultADAt
def MultABt(A, B, ABt):
r"""MultABt(DenseMatrix A, DenseMatrix B, DenseMatrix ABt)"""
return _densemat.MultABt(A, B, ABt)
MultABt = _densemat.MultABt
def MultADBt(A, D, B, ADBt):
r"""MultADBt(DenseMatrix A, Vector D, DenseMatrix B, DenseMatrix ADBt)"""
return _densemat.MultADBt(A, D, B, ADBt)
MultADBt = _densemat.MultADBt
def AddMultABt(A, B, ABt):
r"""AddMultABt(DenseMatrix A, DenseMatrix B, DenseMatrix ABt)"""
return _densemat.AddMultABt(A, B, ABt)
AddMultABt = _densemat.AddMultABt
def AddMultADBt(A, D, B, ADBt):
r"""AddMultADBt(DenseMatrix A, Vector D, DenseMatrix B, DenseMatrix ADBt)"""
return _densemat.AddMultADBt(A, D, B, ADBt)
AddMultADBt = _densemat.AddMultADBt
def AddMult_a_ABt(a, A, B, ABt):
r"""AddMult_a_ABt(double a, DenseMatrix A, DenseMatrix B, DenseMatrix ABt)"""
return _densemat.AddMult_a_ABt(a, A, B, ABt)
AddMult_a_ABt = _densemat.AddMult_a_ABt
def MultAtB(A, B, AtB):
r"""MultAtB(DenseMatrix A, DenseMatrix B, DenseMatrix AtB)"""
return _densemat.MultAtB(A, B, AtB)
MultAtB = _densemat.MultAtB
def AddMult_a_AAt(a, A, AAt):
r"""AddMult_a_AAt(double a, DenseMatrix A, DenseMatrix AAt)"""
return _densemat.AddMult_a_AAt(a, A, AAt)
AddMult_a_AAt = _densemat.AddMult_a_AAt
def Mult_a_AAt(a, A, AAt):
r"""Mult_a_AAt(double a, DenseMatrix A, DenseMatrix AAt)"""
return _densemat.Mult_a_AAt(a, A, AAt)
Mult_a_AAt = _densemat.Mult_a_AAt
def MultVVt(v, vvt):
r"""MultVVt(Vector v, DenseMatrix vvt)"""
return _densemat.MultVVt(v, vvt)
MultVVt = _densemat.MultVVt
def MultVWt(v, w, VWt):
r"""MultVWt(Vector v, Vector w, DenseMatrix VWt)"""
return _densemat.MultVWt(v, w, VWt)
MultVWt = _densemat.MultVWt
def AddMultVWt(v, w, VWt):
r"""AddMultVWt(Vector v, Vector w, DenseMatrix VWt)"""
return _densemat.AddMultVWt(v, w, VWt)
AddMultVWt = _densemat.AddMultVWt
def AddMultVVt(v, VWt):
r"""AddMultVVt(Vector v, DenseMatrix VWt)"""
return _densemat.AddMultVVt(v, VWt)
AddMultVVt = _densemat.AddMultVVt
def AddMult_a_VWt(a, v, w, VWt):
r"""AddMult_a_VWt(double const a, Vector v, Vector w, DenseMatrix VWt)"""
return _densemat.AddMult_a_VWt(a, v, w, VWt)
AddMult_a_VWt = _densemat.AddMult_a_VWt
def AddMult_a_VVt(a, v, VVt):
r"""AddMult_a_VVt(double const a, Vector v, DenseMatrix VVt)"""
return _densemat.AddMult_a_VVt(a, v, VVt)
AddMult_a_VVt = _densemat.AddMult_a_VVt
class LUFactors(object):
r"""Proxy of C++ mfem::LUFactors class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
data = property(_densemat.LUFactors_data_get, _densemat.LUFactors_data_set, doc=r"""data : p.double""")
ipiv = property(_densemat.LUFactors_ipiv_get, _densemat.LUFactors_ipiv_set, doc=r"""ipiv : p.int""")
ipiv_base = _densemat.LUFactors_ipiv_base
def __init__(self, *args):
r"""
__init__(LUFactors self) -> LUFactors
__init__(LUFactors self, double * data_, int * ipiv_) -> LUFactors
"""
_densemat.LUFactors_swiginit(self, _densemat.new_LUFactors(*args))
def Factor(self, m, TOL=0.0):
r"""Factor(LUFactors self, int m, double TOL=0.0) -> bool"""
return _densemat.LUFactors_Factor(self, m, TOL)
Factor = _swig_new_instance_method(_densemat.LUFactors_Factor)
def Det(self, m):
r"""Det(LUFactors self, int m) -> double"""
return _densemat.LUFactors_Det(self, m)
Det = _swig_new_instance_method(_densemat.LUFactors_Det)
def Mult(self, m, n, X):
r"""Mult(LUFactors self, int m, int n, double * X)"""
return _densemat.LUFactors_Mult(self, m, n, X)
Mult = _swig_new_instance_method(_densemat.LUFactors_Mult)
def LSolve(self, m, n, X):
r"""LSolve(LUFactors self, int m, int n, double * X)"""
return _densemat.LUFactors_LSolve(self, m, n, X)
LSolve = _swig_new_instance_method(_densemat.LUFactors_LSolve)
def USolve(self, m, n, X):
r"""USolve(LUFactors self, int m, int n, double * X)"""
return _densemat.LUFactors_USolve(self, m, n, X)
USolve = _swig_new_instance_method(_densemat.LUFactors_USolve)
def Solve(self, m, n, X):
r"""Solve(LUFactors self, int m, int n, double * X)"""
return _densemat.LUFactors_Solve(self, m, n, X)
Solve = _swig_new_instance_method(_densemat.LUFactors_Solve)
def RightSolve(self, m, n, X):
r"""RightSolve(LUFactors self, int m, int n, double * X)"""
return _densemat.LUFactors_RightSolve(self, m, n, X)
RightSolve = _swig_new_instance_method(_densemat.LUFactors_RightSolve)
def GetInverseMatrix(self, m, X):
r"""GetInverseMatrix(LUFactors self, int m, double * X)"""
return _densemat.LUFactors_GetInverseMatrix(self, m, X)
GetInverseMatrix = _swig_new_instance_method(_densemat.LUFactors_GetInverseMatrix)
@staticmethod
def SubMult(m, n, r, A21, X1, X2):
r"""SubMult(int m, int n, int r, double const * A21, double const * X1, double * X2)"""
return _densemat.LUFactors_SubMult(m, n, r, A21, X1, X2)
SubMult = _swig_new_static_method(_densemat.LUFactors_SubMult)
def BlockFactor(self, m, n, A12, A21, A22):
r"""BlockFactor(LUFactors self, int m, int n, double * A12, double * A21, double * A22)"""
return _densemat.LUFactors_BlockFactor(self, m, n, A12, A21, A22)
BlockFactor = _swig_new_instance_method(_densemat.LUFactors_BlockFactor)
def BlockForwSolve(self, m, n, r, L21, B1, B2):
r"""BlockForwSolve(LUFactors self, int m, int n, int r, double const * L21, double * B1, double * B2)"""
return _densemat.LUFactors_BlockForwSolve(self, m, n, r, L21, B1, B2)
BlockForwSolve = _swig_new_instance_method(_densemat.LUFactors_BlockForwSolve)
def BlockBackSolve(self, m, n, r, U12, X2, Y1):
r"""BlockBackSolve(LUFactors self, int m, int n, int r, double const * U12, double const * X2, double * Y1)"""
return _densemat.LUFactors_BlockBackSolve(self, m, n, r, U12, X2, Y1)
BlockBackSolve = _swig_new_instance_method(_densemat.LUFactors_BlockBackSolve)
__swig_destroy__ = _densemat.delete_LUFactors
# Register LUFactors in _densemat:
_densemat.LUFactors_swigregister(LUFactors)
def LUFactors_SubMult(m, n, r, A21, X1, X2):
r"""LUFactors_SubMult(int m, int n, int r, double const * A21, double const * X1, double * X2)"""
return _densemat.LUFactors_SubMult(m, n, r, A21, X1, X2)
LUFactors_SubMult = _densemat.LUFactors_SubMult
class DenseMatrixInverse(mfem._par.matrix.MatrixInverse):
r"""Proxy of C++ mfem::DenseMatrixInverse class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(DenseMatrixInverse self) -> DenseMatrixInverse
__init__(DenseMatrixInverse self, DenseMatrix mat) -> DenseMatrixInverse
__init__(DenseMatrixInverse self, DenseMatrix mat) -> DenseMatrixInverse
"""
_densemat.DenseMatrixInverse_swiginit(self, _densemat.new_DenseMatrixInverse(*args))
def Size(self):
r"""Size(DenseMatrixInverse self) -> int"""
return _densemat.DenseMatrixInverse_Size(self)
Size = _swig_new_instance_method(_densemat.DenseMatrixInverse_Size)
def Factor(self, *args):
r"""
Factor(DenseMatrixInverse self)
Factor(DenseMatrixInverse self, DenseMatrix mat)
"""
return _densemat.DenseMatrixInverse_Factor(self, *args)
Factor = _swig_new_instance_method(_densemat.DenseMatrixInverse_Factor)
def SetOperator(self, op):
r"""SetOperator(DenseMatrixInverse self, Operator op)"""
return _densemat.DenseMatrixInverse_SetOperator(self, op)
SetOperator = _swig_new_instance_method(_densemat.DenseMatrixInverse_SetOperator)
def Mult(self, *args):
r"""
Mult(DenseMatrixInverse self, double const * x, double * y)
Mult(DenseMatrixInverse self, Vector x, Vector y)
Mult(DenseMatrixInverse self, DenseMatrix B, DenseMatrix X)
Mult(DenseMatrixInverse self, DenseMatrix X)
"""
return _densemat.DenseMatrixInverse_Mult(self, *args)
Mult = _swig_new_instance_method(_densemat.DenseMatrixInverse_Mult)
def GetInverseMatrix(self, Ainv):
r"""GetInverseMatrix(DenseMatrixInverse self, DenseMatrix Ainv)"""
return _densemat.DenseMatrixInverse_GetInverseMatrix(self, Ainv)
GetInverseMatrix = _swig_new_instance_method(_densemat.DenseMatrixInverse_GetInverseMatrix)
def Det(self):
r"""Det(DenseMatrixInverse self) -> double"""
return _densemat.DenseMatrixInverse_Det(self)
Det = _swig_new_instance_method(_densemat.DenseMatrixInverse_Det)
def TestInversion(self):
r"""TestInversion(DenseMatrixInverse self)"""
return _densemat.DenseMatrixInverse_TestInversion(self)
TestInversion = _swig_new_instance_method(_densemat.DenseMatrixInverse_TestInversion)
__swig_destroy__ = _densemat.delete_DenseMatrixInverse
# Register DenseMatrixInverse in _densemat:
_densemat.DenseMatrixInverse_swigregister(DenseMatrixInverse)
class DenseMatrixEigensystem(object):
r"""Proxy of C++ mfem::DenseMatrixEigensystem class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(DenseMatrixEigensystem self, DenseMatrix m) -> DenseMatrixEigensystem
__init__(DenseMatrixEigensystem self, DenseMatrixEigensystem other) -> DenseMatrixEigensystem
"""
_densemat.DenseMatrixEigensystem_swiginit(self, _densemat.new_DenseMatrixEigensystem(*args))
def Eval(self):
r"""Eval(DenseMatrixEigensystem self)"""
return _densemat.DenseMatrixEigensystem_Eval(self)
Eval = _swig_new_instance_method(_densemat.DenseMatrixEigensystem_Eval)
def Eigenvalues(self):
r"""Eigenvalues(DenseMatrixEigensystem self) -> Vector"""
return _densemat.DenseMatrixEigensystem_Eigenvalues(self)
Eigenvalues = _swig_new_instance_method(_densemat.DenseMatrixEigensystem_Eigenvalues)
def Eigenvectors(self):
r"""Eigenvectors(DenseMatrixEigensystem self) -> DenseMatrix"""
return _densemat.DenseMatrixEigensystem_Eigenvectors(self)
Eigenvectors = _swig_new_instance_method(_densemat.DenseMatrixEigensystem_Eigenvectors)
def Eigenvalue(self, i):
r"""Eigenvalue(DenseMatrixEigensystem self, int i) -> double"""
return _densemat.DenseMatrixEigensystem_Eigenvalue(self, i)
Eigenvalue = _swig_new_instance_method(_densemat.DenseMatrixEigensystem_Eigenvalue)
def Eigenvector(self, i):
r"""Eigenvector(DenseMatrixEigensystem self, int i) -> Vector"""
return _densemat.DenseMatrixEigensystem_Eigenvector(self, i)
Eigenvector = _swig_new_instance_method(_densemat.DenseMatrixEigensystem_Eigenvector)
__swig_destroy__ = _densemat.delete_DenseMatrixEigensystem
# Register DenseMatrixEigensystem in _densemat:
_densemat.DenseMatrixEigensystem_swigregister(DenseMatrixEigensystem)
class DenseMatrixSVD(object):
r"""Proxy of C++ mfem::DenseMatrixSVD class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(DenseMatrixSVD self, DenseMatrix M) -> DenseMatrixSVD
__init__(DenseMatrixSVD self, int h, int w) -> DenseMatrixSVD
"""
_densemat.DenseMatrixSVD_swiginit(self, _densemat.new_DenseMatrixSVD(*args))
def Eval(self, M):
r"""Eval(DenseMatrixSVD self, DenseMatrix M)"""
return _densemat.DenseMatrixSVD_Eval(self, M)
Eval = _swig_new_instance_method(_densemat.DenseMatrixSVD_Eval)
def Singularvalues(self):
r"""Singularvalues(DenseMatrixSVD self) -> Vector"""
return _densemat.DenseMatrixSVD_Singularvalues(self)
Singularvalues = _swig_new_instance_method(_densemat.DenseMatrixSVD_Singularvalues)
def Singularvalue(self, i):
r"""Singularvalue(DenseMatrixSVD self, int i) -> double"""
return _densemat.DenseMatrixSVD_Singularvalue(self, i)
Singularvalue = _swig_new_instance_method(_densemat.DenseMatrixSVD_Singularvalue)
__swig_destroy__ = _densemat.delete_DenseMatrixSVD
# Register DenseMatrixSVD in _densemat:
_densemat.DenseMatrixSVD_swigregister(DenseMatrixSVD)
class DenseTensor(object):
r"""Proxy of C++ mfem::DenseTensor class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(DenseTensor self) -> DenseTensor
__init__(DenseTensor self, int i, int j, int k) -> DenseTensor
__init__(DenseTensor self, DenseTensor other) -> DenseTensor
"""
_densemat.DenseTensor_swiginit(self, _densemat.new_DenseTensor(*args))
def SizeI(self):
r"""SizeI(DenseTensor self) -> int"""
return _densemat.DenseTensor_SizeI(self)
SizeI = _swig_new_instance_method(_densemat.DenseTensor_SizeI)
def SizeJ(self):
r"""SizeJ(DenseTensor self) -> int"""
return _densemat.DenseTensor_SizeJ(self)
SizeJ = _swig_new_instance_method(_densemat.DenseTensor_SizeJ)
def SizeK(self):
r"""SizeK(DenseTensor self) -> int"""
return _densemat.DenseTensor_SizeK(self)
SizeK = _swig_new_instance_method(_densemat.DenseTensor_SizeK)
def TotalSize(self):
r"""TotalSize(DenseTensor self) -> int"""
return _densemat.DenseTensor_TotalSize(self)
TotalSize = _swig_new_instance_method(_densemat.DenseTensor_TotalSize)
def SetSize(self, i, j, k):
r"""SetSize(DenseTensor self, int i, int j, int k)"""
return _densemat.DenseTensor_SetSize(self, i, j, k)
SetSize = _swig_new_instance_method(_densemat.DenseTensor_SetSize)
def UseExternalData(self, ext_data, i, j, k):
r"""UseExternalData(DenseTensor self, double * ext_data, int i, int j, int k)"""
return _densemat.DenseTensor_UseExternalData(self, ext_data, i, j, k)
UseExternalData = _swig_new_instance_method(_densemat.DenseTensor_UseExternalData)
def __call__(self, *args):
r"""
__call__(DenseTensor self, int k) -> DenseMatrix
__call__(DenseTensor self, int k) -> DenseMatrix
__call__(DenseTensor self, int i, int j, int k) -> double
__call__(DenseTensor self, int i, int j, int k) -> double const &
"""
return _densemat.DenseTensor___call__(self, *args)
__call__ = _swig_new_instance_method(_densemat.DenseTensor___call__)
def GetData(self, k):
r"""GetData(DenseTensor self, int k) -> double *"""
return _densemat.DenseTensor_GetData(self, k)
GetData = _swig_new_instance_method(_densemat.DenseTensor_GetData)
def Data(self, *args):
r"""
Data(DenseTensor self) -> double
Data(DenseTensor self) -> double const *
"""
return _densemat.DenseTensor_Data(self, *args)
Data = _swig_new_instance_method(_densemat.DenseTensor_Data)
def GetMemory(self, *args):
r"""
GetMemory(DenseTensor self) -> mfem::Memory< double >
GetMemory(DenseTensor self) -> mfem::Memory< double > const &
"""
return _densemat.DenseTensor_GetMemory(self, *args)
GetMemory = _swig_new_instance_method(_densemat.DenseTensor_GetMemory)
def AddMult(self, elem_dof, x, y):
r"""AddMult(DenseTensor self, mfem::Table const & elem_dof, Vector x, Vector y)"""
return _densemat.DenseTensor_AddMult(self, elem_dof, x, y)
AddMult = _swig_new_instance_method(_densemat.DenseTensor_AddMult)
def Clear(self):
r"""Clear(DenseTensor self)"""
return _densemat.DenseTensor_Clear(self)
Clear = _swig_new_instance_method(_densemat.DenseTensor_Clear)
def MemoryUsage(self):
r"""MemoryUsage(DenseTensor self) -> long"""
return _densemat.DenseTensor_MemoryUsage(self)
MemoryUsage = _swig_new_instance_method(_densemat.DenseTensor_MemoryUsage)
def Read(self, on_dev=True):
r"""Read(DenseTensor self, bool on_dev=True) -> double const *"""
return _densemat.DenseTensor_Read(self, on_dev)
Read = _swig_new_instance_method(_densemat.DenseTensor_Read)
def HostRead(self):
r"""HostRead(DenseTensor self) -> double const *"""
return _densemat.DenseTensor_HostRead(self)
HostRead = _swig_new_instance_method(_densemat.DenseTensor_HostRead)
def Write(self, on_dev=True):
r"""Write(DenseTensor self, bool on_dev=True) -> double *"""
return _densemat.DenseTensor_Write(self, on_dev)
Write = _swig_new_instance_method(_densemat.DenseTensor_Write)
def HostWrite(self):
r"""HostWrite(DenseTensor self) -> double *"""
return _densemat.DenseTensor_HostWrite(self)
HostWrite = _swig_new_instance_method(_densemat.DenseTensor_HostWrite)
def ReadWrite(self, on_dev=True):
r"""ReadWrite(DenseTensor self, bool on_dev=True) -> double *"""
return _densemat.DenseTensor_ReadWrite(self, on_dev)
ReadWrite = _swig_new_instance_method(_densemat.DenseTensor_ReadWrite)
def HostReadWrite(self):
r"""HostReadWrite(DenseTensor self) -> double *"""
return _densemat.DenseTensor_HostReadWrite(self)
HostReadWrite = _swig_new_instance_method(_densemat.DenseTensor_HostReadWrite)
__swig_destroy__ = _densemat.delete_DenseTensor
def Assign(self, c):
r"""Assign(DenseTensor self, double const c)"""
val = _densemat.DenseTensor_Assign(self, c)
return self
return val
def __getitem__(self, *args):
try:
check = len(args[0]) == 3
except:
check = False
if check:
i, j, k = args[0][0], args[0][1], args[0][2]
return _densemat.DenseTensor___getitem__(self, i, j, k)
try:
check = int(args[0])
except:
check = -1
if check >= 0:
return _densemat.DenseTensor___getitem__(self, check)
def __setitem__(self, *args):
i, j, k, v = args[0][0], args[0][1], args[0][2], args[1]
return _densemat.DenseTensor___setitem__(self, i, j, k, v)
def GetDataArray(self):
r"""GetDataArray(DenseTensor self) -> PyObject *"""
return _densemat.DenseTensor_GetDataArray(self)
GetDataArray = _swig_new_instance_method(_densemat.DenseTensor_GetDataArray)
# Register DenseTensor in _densemat:
_densemat.DenseTensor_swigregister(DenseTensor)
def BatchLUFactor(Mlu, P, TOL=0.0):
r"""BatchLUFactor(DenseTensor Mlu, intArray P, double const TOL=0.0)"""
return _densemat.BatchLUFactor(Mlu, P, TOL)
BatchLUFactor = _densemat.BatchLUFactor
def BatchLUSolve(Mlu, P, X):
r"""BatchLUSolve(DenseTensor Mlu, intArray P, Vector X)"""
return _densemat.BatchLUSolve(Mlu, P, X)
BatchLUSolve = _densemat.BatchLUSolve
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _densemat
else:
import _densemat
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _densemat.SWIG_PyInstanceMethod_New
_swig_new_static_method = _densemat.SWIG_PyStaticMethod_New
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import weakref
import mfem._par.mem_manager
import mfem._par.array
import mfem._par.vector
import mfem._par.operators
import mfem._par.matrix
class DenseMatrix(mfem._par.matrix.Matrix):
r"""Proxy of C++ mfem::DenseMatrix class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(DenseMatrix self) -> DenseMatrix
__init__(DenseMatrix self, DenseMatrix arg2) -> DenseMatrix
__init__(DenseMatrix self, int s) -> DenseMatrix
__init__(DenseMatrix self, int m, int n) -> DenseMatrix
__init__(DenseMatrix self, DenseMatrix mat, char ch) -> DenseMatrix
__init__(DenseMatrix self, double * d, int h, int w) -> DenseMatrix
"""
_densemat.DenseMatrix_swiginit(self, _densemat.new_DenseMatrix(*args))
def UseExternalData(self, d, h, w):
r"""UseExternalData(DenseMatrix self, double * d, int h, int w)"""
return _densemat.DenseMatrix_UseExternalData(self, d, h, w)
UseExternalData = _swig_new_instance_method(_densemat.DenseMatrix_UseExternalData)
def Reset(self, d, h, w):
r"""Reset(DenseMatrix self, double * d, int h, int w)"""
return _densemat.DenseMatrix_Reset(self, d, h, w)
Reset = _swig_new_instance_method(_densemat.DenseMatrix_Reset)
def ClearExternalData(self):
r"""ClearExternalData(DenseMatrix self)"""
return _densemat.DenseMatrix_ClearExternalData(self)
ClearExternalData = _swig_new_instance_method(_densemat.DenseMatrix_ClearExternalData)
def Clear(self):
r"""Clear(DenseMatrix self)"""
return _densemat.DenseMatrix_Clear(self)
Clear = _swig_new_instance_method(_densemat.DenseMatrix_Clear)
def Size(self):
r"""Size(DenseMatrix self) -> int"""
return _densemat.DenseMatrix_Size(self)
Size = _swig_new_instance_method(_densemat.DenseMatrix_Size)
def SetSize(self, *args):
r"""
SetSize(DenseMatrix self, int s)
SetSize(DenseMatrix self, int h, int w)
"""
return _densemat.DenseMatrix_SetSize(self, *args)
SetSize = _swig_new_instance_method(_densemat.DenseMatrix_SetSize)
def Data(self):
r"""Data(DenseMatrix self) -> double *"""
return _densemat.DenseMatrix_Data(self)
Data = _swig_new_instance_method(_densemat.DenseMatrix_Data)
def GetData(self):
r"""GetData(DenseMatrix self) -> double *"""
return _densemat.DenseMatrix_GetData(self)
GetData = _swig_new_instance_method(_densemat.DenseMatrix_GetData)
def GetMemory(self, *args):
r"""
GetMemory(DenseMatrix self) -> mfem::Memory< double >
GetMemory(DenseMatrix self) -> mfem::Memory< double > const &
"""
return _densemat.DenseMatrix_GetMemory(self, *args)
GetMemory = _swig_new_instance_method(_densemat.DenseMatrix_GetMemory)
def OwnsData(self):
r"""OwnsData(DenseMatrix self) -> bool"""
return _densemat.DenseMatrix_OwnsData(self)
OwnsData = _swig_new_instance_method(_densemat.DenseMatrix_OwnsData)
def __call__(self, *args):
r"""
__call__(DenseMatrix self, int i, int j) -> double
__call__(DenseMatrix self, int i, int j) -> double const &
"""
return _densemat.DenseMatrix___call__(self, *args)
__call__ = _swig_new_instance_method(_densemat.DenseMatrix___call__)
def __mul__(self, m):
r"""__mul__(DenseMatrix self, DenseMatrix m) -> double"""
return _densemat.DenseMatrix___mul__(self, m)
__mul__ = _swig_new_instance_method(_densemat.DenseMatrix___mul__)
def Trace(self):
r"""Trace(DenseMatrix self) -> double"""
return _densemat.DenseMatrix_Trace(self)
Trace = _swig_new_instance_method(_densemat.DenseMatrix_Trace)
def Elem(self, *args):
r"""
Elem(DenseMatrix self, int i, int j) -> double
Elem(DenseMatrix self, int i, int j) -> double const &
"""
return _densemat.DenseMatrix_Elem(self, *args)
Elem = _swig_new_instance_method(_densemat.DenseMatrix_Elem)
def Mult(self, *args):
r"""
Mult(DenseMatrix self, double const * x, double * y)
Mult(DenseMatrix self, Vector x, Vector y)
"""
return _densemat.DenseMatrix_Mult(self, *args)
Mult = _swig_new_instance_method(_densemat.DenseMatrix_Mult)
def MultTranspose(self, *args):
r"""
MultTranspose(DenseMatrix self, double const * x, double * y)
MultTranspose(DenseMatrix self, Vector x, Vector y)
"""
return _densemat.DenseMatrix_MultTranspose(self, *args)
MultTranspose = _swig_new_instance_method(_densemat.DenseMatrix_MultTranspose)
def AddMult(self, x, y):
r"""AddMult(DenseMatrix self, Vector x, Vector y)"""
return _densemat.DenseMatrix_AddMult(self, x, y)
AddMult = _swig_new_instance_method(_densemat.DenseMatrix_AddMult)
def AddMultTranspose(self, x, y):
r"""AddMultTranspose(DenseMatrix self, Vector x, Vector y)"""
return _densemat.DenseMatrix_AddMultTranspose(self, x, y)
AddMultTranspose = _swig_new_instance_method(_densemat.DenseMatrix_AddMultTranspose)
def AddMult_a(self, a, x, y):
r"""AddMult_a(DenseMatrix self, double a, Vector x, Vector y)"""
return _densemat.DenseMatrix_AddMult_a(self, a, x, y)
AddMult_a = _swig_new_instance_method(_densemat.DenseMatrix_AddMult_a)
def AddMultTranspose_a(self, a, x, y):
r"""AddMultTranspose_a(DenseMatrix self, double a, Vector x, Vector y)"""
return _densemat.DenseMatrix_AddMultTranspose_a(self, a, x, y)
AddMultTranspose_a = _swig_new_instance_method(_densemat.DenseMatrix_AddMultTranspose_a)
def LeftScaling(self, s):
r"""LeftScaling(DenseMatrix self, Vector s)"""
return _densemat.DenseMatrix_LeftScaling(self, s)
LeftScaling = _swig_new_instance_method(_densemat.DenseMatrix_LeftScaling)
def InvLeftScaling(self, s):
r"""InvLeftScaling(DenseMatrix self, Vector s)"""
return _densemat.DenseMatrix_InvLeftScaling(self, s)
InvLeftScaling = _swig_new_instance_method(_densemat.DenseMatrix_InvLeftScaling)
def RightScaling(self, s):
r"""RightScaling(DenseMatrix self, Vector s)"""
return _densemat.DenseMatrix_RightScaling(self, s)
RightScaling = _swig_new_instance_method(_densemat.DenseMatrix_RightScaling)
def InvRightScaling(self, s):
r"""InvRightScaling(DenseMatrix self, Vector s)"""
return _densemat.DenseMatrix_InvRightScaling(self, s)
InvRightScaling = _swig_new_instance_method(_densemat.DenseMatrix_InvRightScaling)
def SymmetricScaling(self, s):
r"""SymmetricScaling(DenseMatrix self, Vector s)"""
return _densemat.DenseMatrix_SymmetricScaling(self, s)
SymmetricScaling = _swig_new_instance_method(_densemat.DenseMatrix_SymmetricScaling)
def InvSymmetricScaling(self, s):
r"""InvSymmetricScaling(DenseMatrix self, Vector s)"""
return _densemat.DenseMatrix_InvSymmetricScaling(self, s)
InvSymmetricScaling = _swig_new_instance_method(_densemat.DenseMatrix_InvSymmetricScaling)
def InnerProduct(self, *args):
r"""
InnerProduct(DenseMatrix self, double const * x, double const * y) -> double
InnerProduct(DenseMatrix self, Vector x, Vector y) -> double
"""
return _densemat.DenseMatrix_InnerProduct(self, *args)
InnerProduct = _swig_new_instance_method(_densemat.DenseMatrix_InnerProduct)
def Inverse(self):
r"""Inverse(DenseMatrix self) -> MatrixInverse"""
return _densemat.DenseMatrix_Inverse(self)
Inverse = _swig_new_instance_method(_densemat.DenseMatrix_Inverse)
def Invert(self):
r"""Invert(DenseMatrix self)"""
return _densemat.DenseMatrix_Invert(self)
Invert = _swig_new_instance_method(_densemat.DenseMatrix_Invert)
def SquareRootInverse(self):
r"""SquareRootInverse(DenseMatrix self)"""
return _densemat.DenseMatrix_SquareRootInverse(self)
SquareRootInverse = _swig_new_instance_method(_densemat.DenseMatrix_SquareRootInverse)
def Det(self):
r"""Det(DenseMatrix self) -> double"""
return _densemat.DenseMatrix_Det(self)
Det = _swig_new_instance_method(_densemat.DenseMatrix_Det)
def Weight(self):
r"""Weight(DenseMatrix self) -> double"""
return _densemat.DenseMatrix_Weight(self)
Weight = _swig_new_instance_method(_densemat.DenseMatrix_Weight)
def Set(self, *args):
r"""
Set(DenseMatrix self, double alpha, double const * A)
Set(DenseMatrix self, double alpha, DenseMatrix A)
"""
return _densemat.DenseMatrix_Set(self, *args)
Set = _swig_new_instance_method(_densemat.DenseMatrix_Set)
def Add(self, c, A):
r"""Add(DenseMatrix self, double const c, DenseMatrix A)"""
return _densemat.DenseMatrix_Add(self, c, A)
Add = _swig_new_instance_method(_densemat.DenseMatrix_Add)
def __iadd__(self, v):
ret = _densemat.DenseMatrix___iadd__(self, v)
ret.thisown = self.thisown
self.thisown = 0
return ret
def __isub__(self, v):
ret = _densemat.DenseMatrix___isub__(self, v)
ret.thisown = self.thisown
self.thisown = 0
return ret
def __imul__(self, v):
ret = _densemat.DenseMatrix___imul__(self, v)
ret.thisown = self.thisown
self.thisown = 0
return ret
def Neg(self):
r"""Neg(DenseMatrix self)"""
return _densemat.DenseMatrix_Neg(self)
Neg = _swig_new_instance_method(_densemat.DenseMatrix_Neg)
def Norm2(self, v):
r"""Norm2(DenseMatrix self, double * v)"""
return _densemat.DenseMatrix_Norm2(self, v)
Norm2 = _swig_new_instance_method(_densemat.DenseMatrix_Norm2)
def MaxMaxNorm(self):
r"""MaxMaxNorm(DenseMatrix self) -> double"""
return _densemat.DenseMatrix_MaxMaxNorm(self)
MaxMaxNorm = _swig_new_instance_method(_densemat.DenseMatrix_MaxMaxNorm)
def FNorm(self):
r"""FNorm(DenseMatrix self) -> double"""
return _densemat.DenseMatrix_FNorm(self)
FNorm = _swig_new_instance_method(_densemat.DenseMatrix_FNorm)
def FNorm2(self):
r"""FNorm2(DenseMatrix self) -> double"""
return _densemat.DenseMatrix_FNorm2(self)
FNorm2 = _swig_new_instance_method(_densemat.DenseMatrix_FNorm2)
def Eigenvalues(self, *args):
r"""
Eigenvalues(DenseMatrix self, Vector ev)
Eigenvalues(DenseMatrix self, Vector ev, DenseMatrix evect)
Eigenvalues(DenseMatrix self, DenseMatrix b, Vector ev)
Eigenvalues(DenseMatrix self, DenseMatrix b, Vector ev, DenseMatrix evect)
"""
return _densemat.DenseMatrix_Eigenvalues(self, *args)
Eigenvalues = _swig_new_instance_method(_densemat.DenseMatrix_Eigenvalues)
def Eigensystem(self, *args):
r"""
Eigensystem(DenseMatrix self, Vector ev, DenseMatrix evect)
Eigensystem(DenseMatrix self, DenseMatrix b, Vector ev, DenseMatrix evect)
"""
return _densemat.DenseMatrix_Eigensystem(self, *args)
Eigensystem = _swig_new_instance_method(_densemat.DenseMatrix_Eigensystem)
def SingularValues(self, sv):
r"""SingularValues(DenseMatrix self, Vector sv)"""
return _densemat.DenseMatrix_SingularValues(self, sv)
SingularValues = _swig_new_instance_method(_densemat.DenseMatrix_SingularValues)
def Rank(self, tol):
r"""Rank(DenseMatrix self, double tol) -> int"""
return _densemat.DenseMatrix_Rank(self, tol)
Rank = _swig_new_instance_method(_densemat.DenseMatrix_Rank)
def CalcSingularvalue(self, i):
r"""CalcSingularvalue(DenseMatrix self, int const i) -> double"""
return _densemat.DenseMatrix_CalcSingularvalue(self, i)
CalcSingularvalue = _swig_new_instance_method(_densemat.DenseMatrix_CalcSingularvalue)
def CalcEigenvalues(self, _lambda, vec):
r"""CalcEigenvalues(DenseMatrix self, double * _lambda, double * vec)"""
return _densemat.DenseMatrix_CalcEigenvalues(self, _lambda, vec)
CalcEigenvalues = _swig_new_instance_method(_densemat.DenseMatrix_CalcEigenvalues)
def GetRow(self, r, row):
r"""GetRow(DenseMatrix self, int r, Vector row)"""
return _densemat.DenseMatrix_GetRow(self, r, row)
GetRow = _swig_new_instance_method(_densemat.DenseMatrix_GetRow)
def GetColumn(self, *args):
r"""
GetColumn(DenseMatrix self, int c, Vector col)
GetColumn(DenseMatrix self, int col) -> double
GetColumn(DenseMatrix self, int col) -> double const *
"""
return _densemat.DenseMatrix_GetColumn(self, *args)
GetColumn = _swig_new_instance_method(_densemat.DenseMatrix_GetColumn)
def GetColumnReference(self, c, col):
r"""GetColumnReference(DenseMatrix self, int c, Vector col)"""
return _densemat.DenseMatrix_GetColumnReference(self, c, col)
GetColumnReference = _swig_new_instance_method(_densemat.DenseMatrix_GetColumnReference)
def SetRow(self, *args):
r"""
SetRow(DenseMatrix self, int r, double const * row)
SetRow(DenseMatrix self, int r, Vector row)
SetRow(DenseMatrix self, int row, double value)
"""
return _densemat.DenseMatrix_SetRow(self, *args)
SetRow = _swig_new_instance_method(_densemat.DenseMatrix_SetRow)
def SetCol(self, *args):
r"""
SetCol(DenseMatrix self, int c, double const * col)
SetCol(DenseMatrix self, int c, Vector col)
SetCol(DenseMatrix self, int col, double value)
"""
return _densemat.DenseMatrix_SetCol(self, *args)
SetCol = _swig_new_instance_method(_densemat.DenseMatrix_SetCol)
def GetDiag(self, d):
r"""GetDiag(DenseMatrix self, Vector d)"""
return _densemat.DenseMatrix_GetDiag(self, d)
GetDiag = _swig_new_instance_method(_densemat.DenseMatrix_GetDiag)
def Getl1Diag(self, l):
r"""Getl1Diag(DenseMatrix self, Vector l)"""
return _densemat.DenseMatrix_Getl1Diag(self, l)
Getl1Diag = _swig_new_instance_method(_densemat.DenseMatrix_Getl1Diag)
def GetRowSums(self, l):
r"""GetRowSums(DenseMatrix self, Vector l)"""
return _densemat.DenseMatrix_GetRowSums(self, l)
GetRowSums = _swig_new_instance_method(_densemat.DenseMatrix_GetRowSums)
def Diag(self, *args):
r"""
Diag(DenseMatrix self, double c, int n)
Diag(DenseMatrix self, double * diag, int n)
"""
return _densemat.DenseMatrix_Diag(self, *args)
Diag = _swig_new_instance_method(_densemat.DenseMatrix_Diag)
def Transpose(self, *args):
r"""
Transpose(DenseMatrix self)
Transpose(DenseMatrix self, DenseMatrix A)
"""
return _densemat.DenseMatrix_Transpose(self, *args)
Transpose = _swig_new_instance_method(_densemat.DenseMatrix_Transpose)
def Symmetrize(self):
r"""Symmetrize(DenseMatrix self)"""
return _densemat.DenseMatrix_Symmetrize(self)
Symmetrize = _swig_new_instance_method(_densemat.DenseMatrix_Symmetrize)
def Lump(self):
r"""Lump(DenseMatrix self)"""
return _densemat.DenseMatrix_Lump(self)
Lump = _swig_new_instance_method(_densemat.DenseMatrix_Lump)
def GradToCurl(self, curl):
r"""GradToCurl(DenseMatrix self, DenseMatrix curl)"""
return _densemat.DenseMatrix_GradToCurl(self, curl)
GradToCurl = _swig_new_instance_method(_densemat.DenseMatrix_GradToCurl)
def GradToDiv(self, div):
r"""GradToDiv(DenseMatrix self, Vector div)"""
return _densemat.DenseMatrix_GradToDiv(self, div)
GradToDiv = _swig_new_instance_method(_densemat.DenseMatrix_GradToDiv)
def CopyRows(self, A, row1, row2):
r"""CopyRows(DenseMatrix self, DenseMatrix A, int row1, int row2)"""
return _densemat.DenseMatrix_CopyRows(self, A, row1, row2)
CopyRows = _swig_new_instance_method(_densemat.DenseMatrix_CopyRows)
def CopyCols(self, A, col1, col2):
r"""CopyCols(DenseMatrix self, DenseMatrix A, int col1, int col2)"""
return _densemat.DenseMatrix_CopyCols(self, A, col1, col2)
CopyCols = _swig_new_instance_method(_densemat.DenseMatrix_CopyCols)
def CopyMNt(self, A, row_offset, col_offset):
r"""CopyMNt(DenseMatrix self, DenseMatrix A, int row_offset, int col_offset)"""
return _densemat.DenseMatrix_CopyMNt(self, A, row_offset, col_offset)
CopyMNt = _swig_new_instance_method(_densemat.DenseMatrix_CopyMNt)
def CopyMN(self, *args):
r"""
CopyMN(DenseMatrix self, DenseMatrix A, int m, int n, int Aro, int Aco)
CopyMN(DenseMatrix self, DenseMatrix A, int row_offset, int col_offset)
CopyMN(DenseMatrix self, DenseMatrix A, int m, int n, int Aro, int Aco, int row_offset, int col_offset)
"""
return _densemat.DenseMatrix_CopyMN(self, *args)
CopyMN = _swig_new_instance_method(_densemat.DenseMatrix_CopyMN)
def CopyMNDiag(self, *args):
r"""
CopyMNDiag(DenseMatrix self, double c, int n, int row_offset, int col_offset)
CopyMNDiag(DenseMatrix self, double * diag, int n, int row_offset, int col_offset)
"""
return _densemat.DenseMatrix_CopyMNDiag(self, *args)
CopyMNDiag = _swig_new_instance_method(_densemat.DenseMatrix_CopyMNDiag)
def CopyExceptMN(self, A, m, n):
r"""CopyExceptMN(DenseMatrix self, DenseMatrix A, int m, int n)"""
return _densemat.DenseMatrix_CopyExceptMN(self, A, m, n)
CopyExceptMN = _swig_new_instance_method(_densemat.DenseMatrix_CopyExceptMN)
def AddMatrix(self, *args):
r"""
AddMatrix(DenseMatrix self, DenseMatrix A, int ro, int co)
AddMatrix(DenseMatrix self, double a, DenseMatrix A, int ro, int co)
"""
return _densemat.DenseMatrix_AddMatrix(self, *args)
AddMatrix = _swig_new_instance_method(_densemat.DenseMatrix_AddMatrix)
def AddToVector(self, offset, v):
r"""AddToVector(DenseMatrix self, int offset, Vector v)"""
return _densemat.DenseMatrix_AddToVector(self, offset, v)
AddToVector = _swig_new_instance_method(_densemat.DenseMatrix_AddToVector)
def GetFromVector(self, offset, v):
r"""GetFromVector(DenseMatrix self, int offset, Vector v)"""
return _densemat.DenseMatrix_GetFromVector(self, offset, v)
GetFromVector = _swig_new_instance_method(_densemat.DenseMatrix_GetFromVector)
def AdjustDofDirection(self, dofs):
r"""AdjustDofDirection(DenseMatrix self, intArray dofs)"""
return _densemat.DenseMatrix_AdjustDofDirection(self, dofs)
AdjustDofDirection = _swig_new_instance_method(_densemat.DenseMatrix_AdjustDofDirection)
def Threshold(self, eps):
r"""Threshold(DenseMatrix self, double eps)"""
return _densemat.DenseMatrix_Threshold(self, eps)
Threshold = _swig_new_instance_method(_densemat.DenseMatrix_Threshold)
def CheckFinite(self):
r"""CheckFinite(DenseMatrix self) -> int"""
return _densemat.DenseMatrix_CheckFinite(self)
CheckFinite = _swig_new_instance_method(_densemat.DenseMatrix_CheckFinite)
def TestInversion(self):
r"""TestInversion(DenseMatrix self)"""
return _densemat.DenseMatrix_TestInversion(self)
TestInversion = _swig_new_instance_method(_densemat.DenseMatrix_TestInversion)
def MemoryUsage(self):
r"""MemoryUsage(DenseMatrix self) -> long"""
return _densemat.DenseMatrix_MemoryUsage(self)
MemoryUsage = _swig_new_instance_method(_densemat.DenseMatrix_MemoryUsage)
def Read(self, on_dev=True):
r"""Read(DenseMatrix self, bool on_dev=True) -> double const *"""
return _densemat.DenseMatrix_Read(self, on_dev)
Read = _swig_new_instance_method(_densemat.DenseMatrix_Read)
def HostRead(self):
r"""HostRead(DenseMatrix self) -> double const *"""
return _densemat.DenseMatrix_HostRead(self)
HostRead = _swig_new_instance_method(_densemat.DenseMatrix_HostRead)
def Write(self, on_dev=True):
r"""Write(DenseMatrix self, bool on_dev=True) -> double *"""
return _densemat.DenseMatrix_Write(self, on_dev)
Write = _swig_new_instance_method(_densemat.DenseMatrix_Write)
def HostWrite(self):
r"""HostWrite(DenseMatrix self) -> double *"""
return _densemat.DenseMatrix_HostWrite(self)
HostWrite = _swig_new_instance_method(_densemat.DenseMatrix_HostWrite)
def ReadWrite(self, on_dev=True):
r"""ReadWrite(DenseMatrix self, bool on_dev=True) -> double *"""
return _densemat.DenseMatrix_ReadWrite(self, on_dev)
ReadWrite = _swig_new_instance_method(_densemat.DenseMatrix_ReadWrite)
def HostReadWrite(self):
r"""HostReadWrite(DenseMatrix self) -> double *"""
return _densemat.DenseMatrix_HostReadWrite(self)
HostReadWrite = _swig_new_instance_method(_densemat.DenseMatrix_HostReadWrite)
__swig_destroy__ = _densemat.delete_DenseMatrix
def Assign(self, *args):
r"""
Assign(DenseMatrix self, double const v)
Assign(DenseMatrix self, DenseMatrix m)
Assign(DenseMatrix self, PyObject * numpymat)
"""
from numpy import ndarray, ascontiguousarray
keep_link = False
if len(args) == 1 and isinstance(args[0], ndarray):
if args[0].dtype != 'float64':
raise ValueError('Must be float64 array:' + str(args[0].dtype) + ' was given')
elif args[0].ndim != 2:
raise ValueError('Ndim must be two')
elif args[0].shape[1] != _densemat.DenseMatrix_Size(self):
raise ValueError('Length does not match')
else:
args = (ascontiguousarray(args[0]),)
val = _densemat.DenseMatrix_Assign(self, *args)
return self
return val
def __getitem__(self, *args):
i, j = args[0][0], args[0][1]
return _densemat.DenseMatrix___getitem__(self, i, j)
def __setitem__(self, *args):
i, j, v = args[0][0], args[0][1], args[1]
return _densemat.DenseMatrix___setitem__(self, i, j, v)
def GetDataArray(self):
r"""GetDataArray(DenseMatrix self) -> PyObject *"""
return _densemat.DenseMatrix_GetDataArray(self)
GetDataArray = _swig_new_instance_method(_densemat.DenseMatrix_GetDataArray)
def Print(self, *args):
r"""
Print(DenseMatrix self, std::ostream & out=mfem::out, int width_=4)
Print(DenseMatrix self, char const * file, int precision=8)
"""
return _densemat.DenseMatrix_Print(self, *args)
Print = _swig_new_instance_method(_densemat.DenseMatrix_Print)
def PrintT(self, *args):
r"""
PrintT(DenseMatrix self, std::ostream & out=mfem::out, int width_=4)
PrintT(DenseMatrix self, char const * file, int precision=8)
"""
return _densemat.DenseMatrix_PrintT(self, *args)
PrintT = _swig_new_instance_method(_densemat.DenseMatrix_PrintT)
def PrintMatlab(self, *args):
r"""
PrintMatlab(DenseMatrix self, std::ostream & out=mfem::out)
PrintMatlab(DenseMatrix self, char const * file, int precision=8)
"""
return _densemat.DenseMatrix_PrintMatlab(self, *args)
PrintMatlab = _swig_new_instance_method(_densemat.DenseMatrix_PrintMatlab)
# Register DenseMatrix in _densemat:
_densemat.DenseMatrix_swigregister(DenseMatrix)
def LinearSolve(A, X, TOL=1.e-9):
r"""LinearSolve(DenseMatrix A, double * X, double TOL=1.e-9) -> bool"""
return _densemat.LinearSolve(A, X, TOL)
LinearSolve = _densemat.LinearSolve
def AddMult(b, c, a):
r"""AddMult(DenseMatrix b, DenseMatrix c, DenseMatrix a)"""
return _densemat.AddMult(b, c, a)
AddMult = _densemat.AddMult
def AddMult_a(alpha, b, c, a):
r"""AddMult_a(double alpha, DenseMatrix b, DenseMatrix c, DenseMatrix a)"""
return _densemat.AddMult_a(alpha, b, c, a)
AddMult_a = _densemat.AddMult_a
def CalcAdjugate(a, adja):
r"""CalcAdjugate(DenseMatrix a, DenseMatrix adja)"""
return _densemat.CalcAdjugate(a, adja)
CalcAdjugate = _densemat.CalcAdjugate
def CalcAdjugateTranspose(a, adjat):
r"""CalcAdjugateTranspose(DenseMatrix a, DenseMatrix adjat)"""
return _densemat.CalcAdjugateTranspose(a, adjat)
CalcAdjugateTranspose = _densemat.CalcAdjugateTranspose
def CalcInverse(a, inva):
r"""CalcInverse(DenseMatrix a, DenseMatrix inva)"""
return _densemat.CalcInverse(a, inva)
CalcInverse = _densemat.CalcInverse
def CalcInverseTranspose(a, inva):
r"""CalcInverseTranspose(DenseMatrix a, DenseMatrix inva)"""
return _densemat.CalcInverseTranspose(a, inva)
CalcInverseTranspose = _densemat.CalcInverseTranspose
def CalcOrtho(J, n):
r"""CalcOrtho(DenseMatrix J, Vector n)"""
return _densemat.CalcOrtho(J, n)
CalcOrtho = _densemat.CalcOrtho
def MultAAt(a, aat):
r"""MultAAt(DenseMatrix a, DenseMatrix aat)"""
return _densemat.MultAAt(a, aat)
MultAAt = _densemat.MultAAt
def MultADAt(A, D, ADAt):
r"""MultADAt(DenseMatrix A, Vector D, DenseMatrix ADAt)"""
return _densemat.MultADAt(A, D, ADAt)
MultADAt = _densemat.MultADAt
def AddMultADAt(A, D, ADAt):
r"""AddMultADAt(DenseMatrix A, Vector D, DenseMatrix ADAt)"""
return _densemat.AddMultADAt(A, D, ADAt)
AddMultADAt = _densemat.AddMultADAt
def MultABt(A, B, ABt):
r"""MultABt(DenseMatrix A, DenseMatrix B, DenseMatrix ABt)"""
return _densemat.MultABt(A, B, ABt)
MultABt = _densemat.MultABt
def MultADBt(A, D, B, ADBt):
r"""MultADBt(DenseMatrix A, Vector D, DenseMatrix B, DenseMatrix ADBt)"""
return _densemat.MultADBt(A, D, B, ADBt)
MultADBt = _densemat.MultADBt
def AddMultABt(A, B, ABt):
r"""AddMultABt(DenseMatrix A, DenseMatrix B, DenseMatrix ABt)"""
return _densemat.AddMultABt(A, B, ABt)
AddMultABt = _densemat.AddMultABt
def AddMultADBt(A, D, B, ADBt):
r"""AddMultADBt(DenseMatrix A, Vector D, DenseMatrix B, DenseMatrix ADBt)"""
return _densemat.AddMultADBt(A, D, B, ADBt)
AddMultADBt = _densemat.AddMultADBt
def AddMult_a_ABt(a, A, B, ABt):
r"""AddMult_a_ABt(double a, DenseMatrix A, DenseMatrix B, DenseMatrix ABt)"""
return _densemat.AddMult_a_ABt(a, A, B, ABt)
AddMult_a_ABt = _densemat.AddMult_a_ABt
def MultAtB(A, B, AtB):
r"""MultAtB(DenseMatrix A, DenseMatrix B, DenseMatrix AtB)"""
return _densemat.MultAtB(A, B, AtB)
MultAtB = _densemat.MultAtB
def AddMult_a_AAt(a, A, AAt):
r"""AddMult_a_AAt(double a, DenseMatrix A, DenseMatrix AAt)"""
return _densemat.AddMult_a_AAt(a, A, AAt)
AddMult_a_AAt = _densemat.AddMult_a_AAt
def Mult_a_AAt(a, A, AAt):
r"""Mult_a_AAt(double a, DenseMatrix A, DenseMatrix AAt)"""
return _densemat.Mult_a_AAt(a, A, AAt)
Mult_a_AAt = _densemat.Mult_a_AAt
def MultVVt(v, vvt):
r"""MultVVt(Vector v, DenseMatrix vvt)"""
return _densemat.MultVVt(v, vvt)
MultVVt = _densemat.MultVVt
def MultVWt(v, w, VWt):
r"""MultVWt(Vector v, Vector w, DenseMatrix VWt)"""
return _densemat.MultVWt(v, w, VWt)
MultVWt = _densemat.MultVWt
def AddMultVWt(v, w, VWt):
r"""AddMultVWt(Vector v, Vector w, DenseMatrix VWt)"""
return _densemat.AddMultVWt(v, w, VWt)
AddMultVWt = _densemat.AddMultVWt
def AddMultVVt(v, VWt):
r"""AddMultVVt(Vector v, DenseMatrix VWt)"""
return _densemat.AddMultVVt(v, VWt)
AddMultVVt = _densemat.AddMultVVt
def AddMult_a_VWt(a, v, w, VWt):
r"""AddMult_a_VWt(double const a, Vector v, Vector w, DenseMatrix VWt)"""
return _densemat.AddMult_a_VWt(a, v, w, VWt)
AddMult_a_VWt = _densemat.AddMult_a_VWt
def AddMult_a_VVt(a, v, VVt):
r"""AddMult_a_VVt(double const a, Vector v, DenseMatrix VVt)"""
return _densemat.AddMult_a_VVt(a, v, VVt)
AddMult_a_VVt = _densemat.AddMult_a_VVt
class LUFactors(object):
r"""Proxy of C++ mfem::LUFactors class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
data = property(_densemat.LUFactors_data_get, _densemat.LUFactors_data_set, doc=r"""data : p.double""")
ipiv = property(_densemat.LUFactors_ipiv_get, _densemat.LUFactors_ipiv_set, doc=r"""ipiv : p.int""")
ipiv_base = _densemat.LUFactors_ipiv_base
def __init__(self, *args):
r"""
__init__(LUFactors self) -> LUFactors
__init__(LUFactors self, double * data_, int * ipiv_) -> LUFactors
"""
_densemat.LUFactors_swiginit(self, _densemat.new_LUFactors(*args))
def Factor(self, m, TOL=0.0):
r"""Factor(LUFactors self, int m, double TOL=0.0) -> bool"""
return _densemat.LUFactors_Factor(self, m, TOL)
Factor = _swig_new_instance_method(_densemat.LUFactors_Factor)
def Det(self, m):
r"""Det(LUFactors self, int m) -> double"""
return _densemat.LUFactors_Det(self, m)
Det = _swig_new_instance_method(_densemat.LUFactors_Det)
def Mult(self, m, n, X):
r"""Mult(LUFactors self, int m, int n, double * X)"""
return _densemat.LUFactors_Mult(self, m, n, X)
Mult = _swig_new_instance_method(_densemat.LUFactors_Mult)
def LSolve(self, m, n, X):
r"""LSolve(LUFactors self, int m, int n, double * X)"""
return _densemat.LUFactors_LSolve(self, m, n, X)
LSolve = _swig_new_instance_method(_densemat.LUFactors_LSolve)
def USolve(self, m, n, X):
r"""USolve(LUFactors self, int m, int n, double * X)"""
return _densemat.LUFactors_USolve(self, m, n, X)
USolve = _swig_new_instance_method(_densemat.LUFactors_USolve)
def Solve(self, m, n, X):
r"""Solve(LUFactors self, int m, int n, double * X)"""
return _densemat.LUFactors_Solve(self, m, n, X)
Solve = _swig_new_instance_method(_densemat.LUFactors_Solve)
def RightSolve(self, m, n, X):
r"""RightSolve(LUFactors self, int m, int n, double * X)"""
return _densemat.LUFactors_RightSolve(self, m, n, X)
RightSolve = _swig_new_instance_method(_densemat.LUFactors_RightSolve)
def GetInverseMatrix(self, m, X):
r"""GetInverseMatrix(LUFactors self, int m, double * X)"""
return _densemat.LUFactors_GetInverseMatrix(self, m, X)
GetInverseMatrix = _swig_new_instance_method(_densemat.LUFactors_GetInverseMatrix)
@staticmethod
def SubMult(m, n, r, A21, X1, X2):
r"""SubMult(int m, int n, int r, double const * A21, double const * X1, double * X2)"""
return _densemat.LUFactors_SubMult(m, n, r, A21, X1, X2)
SubMult = _swig_new_static_method(_densemat.LUFactors_SubMult)
def BlockFactor(self, m, n, A12, A21, A22):
r"""BlockFactor(LUFactors self, int m, int n, double * A12, double * A21, double * A22)"""
return _densemat.LUFactors_BlockFactor(self, m, n, A12, A21, A22)
BlockFactor = _swig_new_instance_method(_densemat.LUFactors_BlockFactor)
def BlockForwSolve(self, m, n, r, L21, B1, B2):
r"""BlockForwSolve(LUFactors self, int m, int n, int r, double const * L21, double * B1, double * B2)"""
return _densemat.LUFactors_BlockForwSolve(self, m, n, r, L21, B1, B2)
BlockForwSolve = _swig_new_instance_method(_densemat.LUFactors_BlockForwSolve)
def BlockBackSolve(self, m, n, r, U12, X2, Y1):
r"""BlockBackSolve(LUFactors self, int m, int n, int r, double const * U12, double const * X2, double * Y1)"""
return _densemat.LUFactors_BlockBackSolve(self, m, n, r, U12, X2, Y1)
BlockBackSolve = _swig_new_instance_method(_densemat.LUFactors_BlockBackSolve)
__swig_destroy__ = _densemat.delete_LUFactors
# Register LUFactors in _densemat:
_densemat.LUFactors_swigregister(LUFactors)
def LUFactors_SubMult(m, n, r, A21, X1, X2):
r"""LUFactors_SubMult(int m, int n, int r, double const * A21, double const * X1, double * X2)"""
return _densemat.LUFactors_SubMult(m, n, r, A21, X1, X2)
LUFactors_SubMult = _densemat.LUFactors_SubMult
class DenseMatrixInverse(mfem._par.matrix.MatrixInverse):
r"""Proxy of C++ mfem::DenseMatrixInverse class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(DenseMatrixInverse self) -> DenseMatrixInverse
__init__(DenseMatrixInverse self, DenseMatrix mat) -> DenseMatrixInverse
__init__(DenseMatrixInverse self, DenseMatrix mat) -> DenseMatrixInverse
"""
_densemat.DenseMatrixInverse_swiginit(self, _densemat.new_DenseMatrixInverse(*args))
def Size(self):
r"""Size(DenseMatrixInverse self) -> int"""
return _densemat.DenseMatrixInverse_Size(self)
Size = _swig_new_instance_method(_densemat.DenseMatrixInverse_Size)
def Factor(self, *args):
r"""
Factor(DenseMatrixInverse self)
Factor(DenseMatrixInverse self, DenseMatrix mat)
"""
return _densemat.DenseMatrixInverse_Factor(self, *args)
Factor = _swig_new_instance_method(_densemat.DenseMatrixInverse_Factor)
def SetOperator(self, op):
r"""SetOperator(DenseMatrixInverse self, Operator op)"""
return _densemat.DenseMatrixInverse_SetOperator(self, op)
SetOperator = _swig_new_instance_method(_densemat.DenseMatrixInverse_SetOperator)
def Mult(self, *args):
r"""
Mult(DenseMatrixInverse self, double const * x, double * y)
Mult(DenseMatrixInverse self, Vector x, Vector y)
Mult(DenseMatrixInverse self, DenseMatrix B, DenseMatrix X)
Mult(DenseMatrixInverse self, DenseMatrix X)
"""
return _densemat.DenseMatrixInverse_Mult(self, *args)
Mult = _swig_new_instance_method(_densemat.DenseMatrixInverse_Mult)
def GetInverseMatrix(self, Ainv):
r"""GetInverseMatrix(DenseMatrixInverse self, DenseMatrix Ainv)"""
return _densemat.DenseMatrixInverse_GetInverseMatrix(self, Ainv)
GetInverseMatrix = _swig_new_instance_method(_densemat.DenseMatrixInverse_GetInverseMatrix)
def Det(self):
r"""Det(DenseMatrixInverse self) -> double"""
return _densemat.DenseMatrixInverse_Det(self)
Det = _swig_new_instance_method(_densemat.DenseMatrixInverse_Det)
def TestInversion(self):
r"""TestInversion(DenseMatrixInverse self)"""
return _densemat.DenseMatrixInverse_TestInversion(self)
TestInversion = _swig_new_instance_method(_densemat.DenseMatrixInverse_TestInversion)
__swig_destroy__ = _densemat.delete_DenseMatrixInverse
# Register DenseMatrixInverse in _densemat:
_densemat.DenseMatrixInverse_swigregister(DenseMatrixInverse)
class DenseMatrixEigensystem(object):
r"""Proxy of C++ mfem::DenseMatrixEigensystem class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(DenseMatrixEigensystem self, DenseMatrix m) -> DenseMatrixEigensystem
__init__(DenseMatrixEigensystem self, DenseMatrixEigensystem other) -> DenseMatrixEigensystem
"""
_densemat.DenseMatrixEigensystem_swiginit(self, _densemat.new_DenseMatrixEigensystem(*args))
def Eval(self):
r"""Eval(DenseMatrixEigensystem self)"""
return _densemat.DenseMatrixEigensystem_Eval(self)
Eval = _swig_new_instance_method(_densemat.DenseMatrixEigensystem_Eval)
def Eigenvalues(self):
r"""Eigenvalues(DenseMatrixEigensystem self) -> Vector"""
return _densemat.DenseMatrixEigensystem_Eigenvalues(self)
Eigenvalues = _swig_new_instance_method(_densemat.DenseMatrixEigensystem_Eigenvalues)
def Eigenvectors(self):
r"""Eigenvectors(DenseMatrixEigensystem self) -> DenseMatrix"""
return _densemat.DenseMatrixEigensystem_Eigenvectors(self)
Eigenvectors = _swig_new_instance_method(_densemat.DenseMatrixEigensystem_Eigenvectors)
def Eigenvalue(self, i):
r"""Eigenvalue(DenseMatrixEigensystem self, int i) -> double"""
return _densemat.DenseMatrixEigensystem_Eigenvalue(self, i)
Eigenvalue = _swig_new_instance_method(_densemat.DenseMatrixEigensystem_Eigenvalue)
def Eigenvector(self, i):
r"""Eigenvector(DenseMatrixEigensystem self, int i) -> Vector"""
return _densemat.DenseMatrixEigensystem_Eigenvector(self, i)
Eigenvector = _swig_new_instance_method(_densemat.DenseMatrixEigensystem_Eigenvector)
__swig_destroy__ = _densemat.delete_DenseMatrixEigensystem
# Register DenseMatrixEigensystem in _densemat:
_densemat.DenseMatrixEigensystem_swigregister(DenseMatrixEigensystem)
class DenseMatrixSVD(object):
r"""Proxy of C++ mfem::DenseMatrixSVD class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(DenseMatrixSVD self, DenseMatrix M) -> DenseMatrixSVD
__init__(DenseMatrixSVD self, int h, int w) -> DenseMatrixSVD
"""
_densemat.DenseMatrixSVD_swiginit(self, _densemat.new_DenseMatrixSVD(*args))
def Eval(self, M):
r"""Eval(DenseMatrixSVD self, DenseMatrix M)"""
return _densemat.DenseMatrixSVD_Eval(self, M)
Eval = _swig_new_instance_method(_densemat.DenseMatrixSVD_Eval)
def Singularvalues(self):
r"""Singularvalues(DenseMatrixSVD self) -> Vector"""
return _densemat.DenseMatrixSVD_Singularvalues(self)
Singularvalues = _swig_new_instance_method(_densemat.DenseMatrixSVD_Singularvalues)
def Singularvalue(self, i):
r"""Singularvalue(DenseMatrixSVD self, int i) -> double"""
return _densemat.DenseMatrixSVD_Singularvalue(self, i)
Singularvalue = _swig_new_instance_method(_densemat.DenseMatrixSVD_Singularvalue)
__swig_destroy__ = _densemat.delete_DenseMatrixSVD
# Register DenseMatrixSVD in _densemat:
_densemat.DenseMatrixSVD_swigregister(DenseMatrixSVD)
class DenseTensor(object):
r"""Proxy of C++ mfem::DenseTensor class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(DenseTensor self) -> DenseTensor
__init__(DenseTensor self, int i, int j, int k) -> DenseTensor
__init__(DenseTensor self, DenseTensor other) -> DenseTensor
"""
_densemat.DenseTensor_swiginit(self, _densemat.new_DenseTensor(*args))
def SizeI(self):
r"""SizeI(DenseTensor self) -> int"""
return _densemat.DenseTensor_SizeI(self)
SizeI = _swig_new_instance_method(_densemat.DenseTensor_SizeI)
def SizeJ(self):
r"""SizeJ(DenseTensor self) -> int"""
return _densemat.DenseTensor_SizeJ(self)
SizeJ = _swig_new_instance_method(_densemat.DenseTensor_SizeJ)
def SizeK(self):
r"""SizeK(DenseTensor self) -> int"""
return _densemat.DenseTensor_SizeK(self)
SizeK = _swig_new_instance_method(_densemat.DenseTensor_SizeK)
def TotalSize(self):
r"""TotalSize(DenseTensor self) -> int"""
return _densemat.DenseTensor_TotalSize(self)
TotalSize = _swig_new_instance_method(_densemat.DenseTensor_TotalSize)
def SetSize(self, i, j, k):
r"""SetSize(DenseTensor self, int i, int j, int k)"""
return _densemat.DenseTensor_SetSize(self, i, j, k)
SetSize = _swig_new_instance_method(_densemat.DenseTensor_SetSize)
def UseExternalData(self, ext_data, i, j, k):
r"""UseExternalData(DenseTensor self, double * ext_data, int i, int j, int k)"""
return _densemat.DenseTensor_UseExternalData(self, ext_data, i, j, k)
UseExternalData = _swig_new_instance_method(_densemat.DenseTensor_UseExternalData)
def __call__(self, *args):
r"""
__call__(DenseTensor self, int k) -> DenseMatrix
__call__(DenseTensor self, int k) -> DenseMatrix
__call__(DenseTensor self, int i, int j, int k) -> double
__call__(DenseTensor self, int i, int j, int k) -> double const &
"""
return _densemat.DenseTensor___call__(self, *args)
__call__ = _swig_new_instance_method(_densemat.DenseTensor___call__)
def GetData(self, k):
r"""GetData(DenseTensor self, int k) -> double *"""
return _densemat.DenseTensor_GetData(self, k)
GetData = _swig_new_instance_method(_densemat.DenseTensor_GetData)
def Data(self, *args):
r"""
Data(DenseTensor self) -> double
Data(DenseTensor self) -> double const *
"""
return _densemat.DenseTensor_Data(self, *args)
Data = _swig_new_instance_method(_densemat.DenseTensor_Data)
def GetMemory(self, *args):
r"""
GetMemory(DenseTensor self) -> mfem::Memory< double >
GetMemory(DenseTensor self) -> mfem::Memory< double > const &
"""
return _densemat.DenseTensor_GetMemory(self, *args)
GetMemory = _swig_new_instance_method(_densemat.DenseTensor_GetMemory)
def AddMult(self, elem_dof, x, y):
r"""AddMult(DenseTensor self, mfem::Table const & elem_dof, Vector x, Vector y)"""
return _densemat.DenseTensor_AddMult(self, elem_dof, x, y)
AddMult = _swig_new_instance_method(_densemat.DenseTensor_AddMult)
def Clear(self):
r"""Clear(DenseTensor self)"""
return _densemat.DenseTensor_Clear(self)
Clear = _swig_new_instance_method(_densemat.DenseTensor_Clear)
def MemoryUsage(self):
r"""MemoryUsage(DenseTensor self) -> long"""
return _densemat.DenseTensor_MemoryUsage(self)
MemoryUsage = _swig_new_instance_method(_densemat.DenseTensor_MemoryUsage)
def Read(self, on_dev=True):
r"""Read(DenseTensor self, bool on_dev=True) -> double const *"""
return _densemat.DenseTensor_Read(self, on_dev)
Read = _swig_new_instance_method(_densemat.DenseTensor_Read)
def HostRead(self):
r"""HostRead(DenseTensor self) -> double const *"""
return _densemat.DenseTensor_HostRead(self)
HostRead = _swig_new_instance_method(_densemat.DenseTensor_HostRead)
def Write(self, on_dev=True):
r"""Write(DenseTensor self, bool on_dev=True) -> double *"""
return _densemat.DenseTensor_Write(self, on_dev)
Write = _swig_new_instance_method(_densemat.DenseTensor_Write)
def HostWrite(self):
r"""HostWrite(DenseTensor self) -> double *"""
return _densemat.DenseTensor_HostWrite(self)
HostWrite = _swig_new_instance_method(_densemat.DenseTensor_HostWrite)
def ReadWrite(self, on_dev=True):
r"""ReadWrite(DenseTensor self, bool on_dev=True) -> double *"""
return _densemat.DenseTensor_ReadWrite(self, on_dev)
ReadWrite = _swig_new_instance_method(_densemat.DenseTensor_ReadWrite)
def HostReadWrite(self):
r"""HostReadWrite(DenseTensor self) -> double *"""
return _densemat.DenseTensor_HostReadWrite(self)
HostReadWrite = _swig_new_instance_method(_densemat.DenseTensor_HostReadWrite)
__swig_destroy__ = _densemat.delete_DenseTensor
def Assign(self, c):
r"""Assign(DenseTensor self, double const c)"""
val = _densemat.DenseTensor_Assign(self, c)
return self
return val
def __getitem__(self, *args):
try:
check = len(args[0]) == 3
except:
check = False
if check:
i, j, k = args[0][0], args[0][1], args[0][2]
return _densemat.DenseTensor___getitem__(self, i, j, k)
try:
check = int(args[0])
except:
check = -1
if check >= 0:
return _densemat.DenseTensor___getitem__(self, check)
def __setitem__(self, *args):
i, j, k, v = args[0][0], args[0][1], args[0][2], args[1]
return _densemat.DenseTensor___setitem__(self, i, j, k, v)
def GetDataArray(self):
r"""GetDataArray(DenseTensor self) -> PyObject *"""
return _densemat.DenseTensor_GetDataArray(self)
GetDataArray = _swig_new_instance_method(_densemat.DenseTensor_GetDataArray)
# Register DenseTensor in _densemat:
_densemat.DenseTensor_swigregister(DenseTensor)
def BatchLUFactor(Mlu, P, TOL=0.0):
r"""BatchLUFactor(DenseTensor Mlu, intArray P, double const TOL=0.0)"""
return _densemat.BatchLUFactor(Mlu, P, TOL)
BatchLUFactor = _densemat.BatchLUFactor
def BatchLUSolve(Mlu, P, X):
r"""BatchLUSolve(DenseTensor Mlu, intArray P, Vector X)"""
return _densemat.BatchLUSolve(Mlu, P, X)
BatchLUSolve = _densemat.BatchLUSolve
|
en
| 0.38803
|
# This file was automatically generated by SWIG (http://www.swig.org). # Version 4.0.2 # # Do not make changes to this file unless you know what you are doing--modify # the SWIG interface file instead. # Import the low-level C/C++ module Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass Meta class to enforce nondynamic attributes (no new attributes) for a class Proxy of C++ mfem::DenseMatrix class. __init__(DenseMatrix self) -> DenseMatrix __init__(DenseMatrix self, DenseMatrix arg2) -> DenseMatrix __init__(DenseMatrix self, int s) -> DenseMatrix __init__(DenseMatrix self, int m, int n) -> DenseMatrix __init__(DenseMatrix self, DenseMatrix mat, char ch) -> DenseMatrix __init__(DenseMatrix self, double * d, int h, int w) -> DenseMatrix UseExternalData(DenseMatrix self, double * d, int h, int w) Reset(DenseMatrix self, double * d, int h, int w) ClearExternalData(DenseMatrix self) Clear(DenseMatrix self) Size(DenseMatrix self) -> int SetSize(DenseMatrix self, int s) SetSize(DenseMatrix self, int h, int w) Data(DenseMatrix self) -> double * GetData(DenseMatrix self) -> double * GetMemory(DenseMatrix self) -> mfem::Memory< double > GetMemory(DenseMatrix self) -> mfem::Memory< double > const & OwnsData(DenseMatrix self) -> bool __call__(DenseMatrix self, int i, int j) -> double __call__(DenseMatrix self, int i, int j) -> double const & __mul__(DenseMatrix self, DenseMatrix m) -> double Trace(DenseMatrix self) -> double Elem(DenseMatrix self, int i, int j) -> double Elem(DenseMatrix self, int i, int j) -> double const & Mult(DenseMatrix self, double const * x, double * y) Mult(DenseMatrix self, Vector x, Vector y) MultTranspose(DenseMatrix self, double const * x, double * y) MultTranspose(DenseMatrix self, Vector x, Vector y) AddMult(DenseMatrix self, Vector x, Vector y) AddMultTranspose(DenseMatrix self, Vector x, Vector y) AddMult_a(DenseMatrix self, double a, Vector x, Vector y) AddMultTranspose_a(DenseMatrix self, double a, Vector x, Vector y) LeftScaling(DenseMatrix self, Vector s) InvLeftScaling(DenseMatrix self, Vector s) RightScaling(DenseMatrix self, Vector s) InvRightScaling(DenseMatrix self, Vector s) SymmetricScaling(DenseMatrix self, Vector s) InvSymmetricScaling(DenseMatrix self, Vector s) InnerProduct(DenseMatrix self, double const * x, double const * y) -> double InnerProduct(DenseMatrix self, Vector x, Vector y) -> double Inverse(DenseMatrix self) -> MatrixInverse Invert(DenseMatrix self) SquareRootInverse(DenseMatrix self) Det(DenseMatrix self) -> double Weight(DenseMatrix self) -> double Set(DenseMatrix self, double alpha, double const * A) Set(DenseMatrix self, double alpha, DenseMatrix A) Add(DenseMatrix self, double const c, DenseMatrix A) Neg(DenseMatrix self) Norm2(DenseMatrix self, double * v) MaxMaxNorm(DenseMatrix self) -> double FNorm(DenseMatrix self) -> double FNorm2(DenseMatrix self) -> double Eigenvalues(DenseMatrix self, Vector ev) Eigenvalues(DenseMatrix self, Vector ev, DenseMatrix evect) Eigenvalues(DenseMatrix self, DenseMatrix b, Vector ev) Eigenvalues(DenseMatrix self, DenseMatrix b, Vector ev, DenseMatrix evect) Eigensystem(DenseMatrix self, Vector ev, DenseMatrix evect) Eigensystem(DenseMatrix self, DenseMatrix b, Vector ev, DenseMatrix evect) SingularValues(DenseMatrix self, Vector sv) Rank(DenseMatrix self, double tol) -> int CalcSingularvalue(DenseMatrix self, int const i) -> double CalcEigenvalues(DenseMatrix self, double * _lambda, double * vec) GetRow(DenseMatrix self, int r, Vector row) GetColumn(DenseMatrix self, int c, Vector col) GetColumn(DenseMatrix self, int col) -> double GetColumn(DenseMatrix self, int col) -> double const * GetColumnReference(DenseMatrix self, int c, Vector col) SetRow(DenseMatrix self, int r, double const * row) SetRow(DenseMatrix self, int r, Vector row) SetRow(DenseMatrix self, int row, double value) SetCol(DenseMatrix self, int c, double const * col) SetCol(DenseMatrix self, int c, Vector col) SetCol(DenseMatrix self, int col, double value) GetDiag(DenseMatrix self, Vector d) Getl1Diag(DenseMatrix self, Vector l) GetRowSums(DenseMatrix self, Vector l) Diag(DenseMatrix self, double c, int n) Diag(DenseMatrix self, double * diag, int n) Transpose(DenseMatrix self) Transpose(DenseMatrix self, DenseMatrix A) Symmetrize(DenseMatrix self) Lump(DenseMatrix self) GradToCurl(DenseMatrix self, DenseMatrix curl) GradToDiv(DenseMatrix self, Vector div) CopyRows(DenseMatrix self, DenseMatrix A, int row1, int row2) CopyCols(DenseMatrix self, DenseMatrix A, int col1, int col2) CopyMNt(DenseMatrix self, DenseMatrix A, int row_offset, int col_offset) CopyMN(DenseMatrix self, DenseMatrix A, int m, int n, int Aro, int Aco) CopyMN(DenseMatrix self, DenseMatrix A, int row_offset, int col_offset) CopyMN(DenseMatrix self, DenseMatrix A, int m, int n, int Aro, int Aco, int row_offset, int col_offset) CopyMNDiag(DenseMatrix self, double c, int n, int row_offset, int col_offset) CopyMNDiag(DenseMatrix self, double * diag, int n, int row_offset, int col_offset) CopyExceptMN(DenseMatrix self, DenseMatrix A, int m, int n) AddMatrix(DenseMatrix self, DenseMatrix A, int ro, int co) AddMatrix(DenseMatrix self, double a, DenseMatrix A, int ro, int co) AddToVector(DenseMatrix self, int offset, Vector v) GetFromVector(DenseMatrix self, int offset, Vector v) AdjustDofDirection(DenseMatrix self, intArray dofs) Threshold(DenseMatrix self, double eps) CheckFinite(DenseMatrix self) -> int TestInversion(DenseMatrix self) MemoryUsage(DenseMatrix self) -> long Read(DenseMatrix self, bool on_dev=True) -> double const * HostRead(DenseMatrix self) -> double const * Write(DenseMatrix self, bool on_dev=True) -> double * HostWrite(DenseMatrix self) -> double * ReadWrite(DenseMatrix self, bool on_dev=True) -> double * HostReadWrite(DenseMatrix self) -> double * Assign(DenseMatrix self, double const v) Assign(DenseMatrix self, DenseMatrix m) Assign(DenseMatrix self, PyObject * numpymat) GetDataArray(DenseMatrix self) -> PyObject * Print(DenseMatrix self, std::ostream & out=mfem::out, int width_=4) Print(DenseMatrix self, char const * file, int precision=8) PrintT(DenseMatrix self, std::ostream & out=mfem::out, int width_=4) PrintT(DenseMatrix self, char const * file, int precision=8) PrintMatlab(DenseMatrix self, std::ostream & out=mfem::out) PrintMatlab(DenseMatrix self, char const * file, int precision=8) # Register DenseMatrix in _densemat: LinearSolve(DenseMatrix A, double * X, double TOL=1.e-9) -> bool AddMult(DenseMatrix b, DenseMatrix c, DenseMatrix a) AddMult_a(double alpha, DenseMatrix b, DenseMatrix c, DenseMatrix a) CalcAdjugate(DenseMatrix a, DenseMatrix adja) CalcAdjugateTranspose(DenseMatrix a, DenseMatrix adjat) CalcInverse(DenseMatrix a, DenseMatrix inva) CalcInverseTranspose(DenseMatrix a, DenseMatrix inva) CalcOrtho(DenseMatrix J, Vector n) MultAAt(DenseMatrix a, DenseMatrix aat) MultADAt(DenseMatrix A, Vector D, DenseMatrix ADAt) AddMultADAt(DenseMatrix A, Vector D, DenseMatrix ADAt) MultABt(DenseMatrix A, DenseMatrix B, DenseMatrix ABt) MultADBt(DenseMatrix A, Vector D, DenseMatrix B, DenseMatrix ADBt) AddMultABt(DenseMatrix A, DenseMatrix B, DenseMatrix ABt) AddMultADBt(DenseMatrix A, Vector D, DenseMatrix B, DenseMatrix ADBt) AddMult_a_ABt(double a, DenseMatrix A, DenseMatrix B, DenseMatrix ABt) MultAtB(DenseMatrix A, DenseMatrix B, DenseMatrix AtB) AddMult_a_AAt(double a, DenseMatrix A, DenseMatrix AAt) Mult_a_AAt(double a, DenseMatrix A, DenseMatrix AAt) MultVVt(Vector v, DenseMatrix vvt) MultVWt(Vector v, Vector w, DenseMatrix VWt) AddMultVWt(Vector v, Vector w, DenseMatrix VWt) AddMultVVt(Vector v, DenseMatrix VWt) AddMult_a_VWt(double const a, Vector v, Vector w, DenseMatrix VWt) AddMult_a_VVt(double const a, Vector v, DenseMatrix VVt) Proxy of C++ mfem::LUFactors class. data : p.double ipiv : p.int __init__(LUFactors self) -> LUFactors __init__(LUFactors self, double * data_, int * ipiv_) -> LUFactors Factor(LUFactors self, int m, double TOL=0.0) -> bool Det(LUFactors self, int m) -> double Mult(LUFactors self, int m, int n, double * X) LSolve(LUFactors self, int m, int n, double * X) USolve(LUFactors self, int m, int n, double * X) Solve(LUFactors self, int m, int n, double * X) RightSolve(LUFactors self, int m, int n, double * X) GetInverseMatrix(LUFactors self, int m, double * X) SubMult(int m, int n, int r, double const * A21, double const * X1, double * X2) BlockFactor(LUFactors self, int m, int n, double * A12, double * A21, double * A22) BlockForwSolve(LUFactors self, int m, int n, int r, double const * L21, double * B1, double * B2) BlockBackSolve(LUFactors self, int m, int n, int r, double const * U12, double const * X2, double * Y1) # Register LUFactors in _densemat: LUFactors_SubMult(int m, int n, int r, double const * A21, double const * X1, double * X2) Proxy of C++ mfem::DenseMatrixInverse class. __init__(DenseMatrixInverse self) -> DenseMatrixInverse __init__(DenseMatrixInverse self, DenseMatrix mat) -> DenseMatrixInverse __init__(DenseMatrixInverse self, DenseMatrix mat) -> DenseMatrixInverse Size(DenseMatrixInverse self) -> int Factor(DenseMatrixInverse self) Factor(DenseMatrixInverse self, DenseMatrix mat) SetOperator(DenseMatrixInverse self, Operator op) Mult(DenseMatrixInverse self, double const * x, double * y) Mult(DenseMatrixInverse self, Vector x, Vector y) Mult(DenseMatrixInverse self, DenseMatrix B, DenseMatrix X) Mult(DenseMatrixInverse self, DenseMatrix X) GetInverseMatrix(DenseMatrixInverse self, DenseMatrix Ainv) Det(DenseMatrixInverse self) -> double TestInversion(DenseMatrixInverse self) # Register DenseMatrixInverse in _densemat: Proxy of C++ mfem::DenseMatrixEigensystem class. __init__(DenseMatrixEigensystem self, DenseMatrix m) -> DenseMatrixEigensystem __init__(DenseMatrixEigensystem self, DenseMatrixEigensystem other) -> DenseMatrixEigensystem Eval(DenseMatrixEigensystem self) Eigenvalues(DenseMatrixEigensystem self) -> Vector Eigenvectors(DenseMatrixEigensystem self) -> DenseMatrix Eigenvalue(DenseMatrixEigensystem self, int i) -> double Eigenvector(DenseMatrixEigensystem self, int i) -> Vector # Register DenseMatrixEigensystem in _densemat: Proxy of C++ mfem::DenseMatrixSVD class. __init__(DenseMatrixSVD self, DenseMatrix M) -> DenseMatrixSVD __init__(DenseMatrixSVD self, int h, int w) -> DenseMatrixSVD Eval(DenseMatrixSVD self, DenseMatrix M) Singularvalues(DenseMatrixSVD self) -> Vector Singularvalue(DenseMatrixSVD self, int i) -> double # Register DenseMatrixSVD in _densemat: Proxy of C++ mfem::DenseTensor class. __init__(DenseTensor self) -> DenseTensor __init__(DenseTensor self, int i, int j, int k) -> DenseTensor __init__(DenseTensor self, DenseTensor other) -> DenseTensor SizeI(DenseTensor self) -> int SizeJ(DenseTensor self) -> int SizeK(DenseTensor self) -> int TotalSize(DenseTensor self) -> int SetSize(DenseTensor self, int i, int j, int k) UseExternalData(DenseTensor self, double * ext_data, int i, int j, int k) __call__(DenseTensor self, int k) -> DenseMatrix __call__(DenseTensor self, int k) -> DenseMatrix __call__(DenseTensor self, int i, int j, int k) -> double __call__(DenseTensor self, int i, int j, int k) -> double const & GetData(DenseTensor self, int k) -> double * Data(DenseTensor self) -> double Data(DenseTensor self) -> double const * GetMemory(DenseTensor self) -> mfem::Memory< double > GetMemory(DenseTensor self) -> mfem::Memory< double > const & AddMult(DenseTensor self, mfem::Table const & elem_dof, Vector x, Vector y) Clear(DenseTensor self) MemoryUsage(DenseTensor self) -> long Read(DenseTensor self, bool on_dev=True) -> double const * HostRead(DenseTensor self) -> double const * Write(DenseTensor self, bool on_dev=True) -> double * HostWrite(DenseTensor self) -> double * ReadWrite(DenseTensor self, bool on_dev=True) -> double * HostReadWrite(DenseTensor self) -> double * Assign(DenseTensor self, double const c) GetDataArray(DenseTensor self) -> PyObject * # Register DenseTensor in _densemat: BatchLUFactor(DenseTensor Mlu, intArray P, double const TOL=0.0) BatchLUSolve(DenseTensor Mlu, intArray P, Vector X)
| 1.991092
| 2
|
ABC143/ABC143d.py
|
VolgaKurvar/AtCoder
| 0
|
6628431
|
<filename>ABC143/ABC143d.py
# ABC143d
import bisect
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
n = int(input())
l = list(map(int, input().split()))
ans = 0
l.sort()
# print(l)
for x in range(n):
for y in range(x+1, n):
t = bisect.bisect_left(l, max(l[x] - l[y], l[y] - l[x])+1, y+1)
#print(l[x], l[y], max(l[x] - l[y], l[y] - l[x]), t, l[t])
t2 = bisect.bisect_left(l, l[x] + l[y], y+1)
#print(t2, l[t2] if t2 < n else None)
#print(t2 - t)
t3 = t2 - t
if t3 > 0:
ans += t3
print(ans)
|
<filename>ABC143/ABC143d.py
# ABC143d
import bisect
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
n = int(input())
l = list(map(int, input().split()))
ans = 0
l.sort()
# print(l)
for x in range(n):
for y in range(x+1, n):
t = bisect.bisect_left(l, max(l[x] - l[y], l[y] - l[x])+1, y+1)
#print(l[x], l[y], max(l[x] - l[y], l[y] - l[x]), t, l[t])
t2 = bisect.bisect_left(l, l[x] + l[y], y+1)
#print(t2, l[t2] if t2 < n else None)
#print(t2 - t)
t3 = t2 - t
if t3 > 0:
ans += t3
print(ans)
|
en
| 0.259213
|
# ABC143d # print(l) #print(l[x], l[y], max(l[x] - l[y], l[y] - l[x]), t, l[t]) #print(t2, l[t2] if t2 < n else None) #print(t2 - t)
| 2.614245
| 3
|
CIS41B/class_examples/Args.py
|
jackh423/python
| 1
|
6628432
|
<reponame>jackh423/python<filename>CIS41B/class_examples/Args.py
def Pack(*args, **kwargs):
print(type(args))
print(type(kwargs))
def Add(*num):
sum = 0
for n in num:
sum = sum + n
return sum
def Multiply(*args):
product = 1
for x in args:
product = product * x
return product
def Average(*args):
total = 0
print('Packed Argument Tuple ->', args)
for i in args:
total += i
return total / len(args)
def Identify(**data):
print("\nData type of argument:",type(data))
for key, value in data.items():
print("{} is {}".format(key,value))
def Concatenate(**words):
result = ""
for arg in words.values():
result += arg
return result
def Sum(**numbers):
sum = 0
for n in numbers.values():
sum += n
return sum
Pack()
print(Add(3,5))
print(Add(4,5,6,7))
print(Add(1,2,3,5,6))
t = (10, 30, 60)
print(Add(*t))
print(Average(1, 2, 3))
print(Average(1, 2, 3, 4, 5))
print(Average(1, 2, 3, 4, 5, 6, 7, 8, 9))
print(Multiply(1, 2, 3))
print(Multiply(1, 2, 3, 4, 5))
print(Multiply(1, 2, 3, 4, 5, 6, 7, 8, 9))
Identify(Firstname="Alice", Lastname="Zhu")
Identify(Firstname="Bob", Lastname="Smith", Age=25, Phone=1234567890)
Identify(Firstname="John", Lastname="Jones", Email="<EMAIL>", Country="US", Age=35, Phone=9876543210)
print(Concatenate(a='United',b='States'))
print(Concatenate(a='De',b='Anza',c='College'))
print(Concatenate(v='Python',w='Programming',x='Language',y='Guido',z='vanRossum'))
d = {'a':10,'b':20,'c':30}
print(Sum(**d))
|
def Pack(*args, **kwargs):
print(type(args))
print(type(kwargs))
def Add(*num):
sum = 0
for n in num:
sum = sum + n
return sum
def Multiply(*args):
product = 1
for x in args:
product = product * x
return product
def Average(*args):
total = 0
print('Packed Argument Tuple ->', args)
for i in args:
total += i
return total / len(args)
def Identify(**data):
print("\nData type of argument:",type(data))
for key, value in data.items():
print("{} is {}".format(key,value))
def Concatenate(**words):
result = ""
for arg in words.values():
result += arg
return result
def Sum(**numbers):
sum = 0
for n in numbers.values():
sum += n
return sum
Pack()
print(Add(3,5))
print(Add(4,5,6,7))
print(Add(1,2,3,5,6))
t = (10, 30, 60)
print(Add(*t))
print(Average(1, 2, 3))
print(Average(1, 2, 3, 4, 5))
print(Average(1, 2, 3, 4, 5, 6, 7, 8, 9))
print(Multiply(1, 2, 3))
print(Multiply(1, 2, 3, 4, 5))
print(Multiply(1, 2, 3, 4, 5, 6, 7, 8, 9))
Identify(Firstname="Alice", Lastname="Zhu")
Identify(Firstname="Bob", Lastname="Smith", Age=25, Phone=1234567890)
Identify(Firstname="John", Lastname="Jones", Email="<EMAIL>", Country="US", Age=35, Phone=9876543210)
print(Concatenate(a='United',b='States'))
print(Concatenate(a='De',b='Anza',c='College'))
print(Concatenate(v='Python',w='Programming',x='Language',y='Guido',z='vanRossum'))
d = {'a':10,'b':20,'c':30}
print(Sum(**d))
|
none
| 1
| 3.634439
| 4
|
|
frcnn.py
|
xwshi/faster-rcnn-keras
| 0
|
6628433
|
import colorsys
import copy
import os
import time
import numpy as np
from keras import backend as K
from keras.applications.imagenet_utils import preprocess_input
from PIL import Image, ImageDraw, ImageFont
import nets.frcnn as frcnn
from nets.frcnn_training import get_new_img_size
from utils.anchors import get_anchors
from utils.config import Config
from utils.utils import BBoxUtility
#--------------------------------------------#
# 使用自己训练好的模型预测需要修改2个参数
# model_path和classes_path都需要修改!
# 如果出现shape不匹配
# 一定要注意训练时的NUM_CLASSES、
# model_path和classes_path参数的修改
#--------------------------------------------#
class FRCNN(object):
_defaults = {
"model_path" : 'model_data/voc_weights.h5',
"classes_path" : 'model_data/voc_classes.txt',
"confidence" : 0.5,
"iou" : 0.3
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
#---------------------------------------------------#
# 初始化faster RCNN
#---------------------------------------------------#
def __init__(self, **kwargs):
self.__dict__.update(self._defaults)
self.class_names = self._get_class()
self.sess = K.get_session()
self.config = Config()
self.generate()
self.bbox_util = BBoxUtility(classifier_nms=self.iou, top_k=self.config.num_RPN_predict_pre)
#---------------------------------------------------#
# 获得所有的分类
#---------------------------------------------------#
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
#---------------------------------------------------#
# 载入模型
#---------------------------------------------------#
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
#-------------------------------#
# 计算总的类的数量
#-------------------------------#
self.num_classes = len(self.class_names)+1
#-------------------------------#
# 载入模型与权值
#-------------------------------#
self.model_rpn, self.model_classifier = frcnn.get_predict_model(self.config, self.num_classes)
self.model_rpn.load_weights(self.model_path, by_name=True)
self.model_classifier.load_weights(self.model_path, by_name=True)
print('{} model, anchors, and classes loaded.'.format(model_path))
# 画框设置不同的颜色
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
#---------------------------------------------------#
# 用于计算共享特征层的大小
#---------------------------------------------------#
def get_img_output_length(self, width, height):
def get_output_length(input_length):
filter_sizes = [7, 3, 1, 1]
padding = [3,1,0,0]
stride = 2
for i in range(4):
# input_length = (input_length - filter_size + stride) // stride
input_length = (input_length + 2*padding[i]-filter_sizes[i]) // stride + 1
return input_length
return get_output_length(width), get_output_length(height)
#---------------------------------------------------#
# 检测图片
#---------------------------------------------------#
def detect_image(self, image):
#-------------------------------------#
# 转换成RGB图片,可以用于灰度图预测。
#-------------------------------------#
image = image.convert("RGB")
image_shape = np.array(np.shape(image)[0:2])
old_width, old_height = image_shape[1], image_shape[0]
old_image = copy.deepcopy(image)
#---------------------------------------------------------#
# 给原图像进行resize,resize到短边为600的大小上
#---------------------------------------------------------#
width, height = get_new_img_size(old_width, old_height)
image = image.resize([width,height], Image.BICUBIC)
photo = np.array(image,dtype = np.float64)
#-----------------------------------------------------------#
# 图片预处理,归一化。
#-----------------------------------------------------------#
photo = preprocess_input(np.expand_dims(photo,0))
rpn_pred = self.model_rpn.predict(photo)
#-----------------------------------------------------------#
# 将建议框网络的预测结果进行解码
#-----------------------------------------------------------#
base_feature_width, base_feature_height = self.get_img_output_length(width, height)
anchors = get_anchors([base_feature_width, base_feature_height], width, height)
rpn_results = self.bbox_util.detection_out_rpn(rpn_pred, anchors)
#-------------------------------------------------------------#
# 在获得建议框和共享特征层后,将二者传入classifier中进行预测
#-------------------------------------------------------------#
base_layer = rpn_pred[2]
proposal_box = np.array(rpn_results)[:, :, 1:]
temp_ROIs = np.zeros_like(proposal_box)
temp_ROIs[:, :, [0, 1, 2, 3]] = proposal_box[:, :, [1, 0, 3, 2]]
classifier_pred = self.model_classifier.predict([base_layer, temp_ROIs])
#-------------------------------------------------------------#
# 利用classifier的预测结果对建议框进行解码,获得预测框
#-------------------------------------------------------------#
results = self.bbox_util.detection_out_classifier(classifier_pred, proposal_box, self.config, self.confidence)
if len(results[0])==0:
return old_image
results = np.array(results[0])
boxes = results[:, :4]
top_conf = results[:, 4]
top_label_indices = results[:, 5]
boxes[:, [0, 2]] = boxes[:, [0, 2]] * old_width
boxes[:, [1, 3]] = boxes[:, [1, 3]] * old_height
font = ImageFont.truetype(font='model_data/simhei.ttf',size=np.floor(3e-2 * np.shape(image)[1] + 0.5).astype('int32'))
thickness = max((np.shape(old_image)[0] + np.shape(old_image)[1]) // old_width * 2, 1)
image = old_image
for i, c in enumerate(top_label_indices):
predicted_class = self.class_names[int(c)]
score = top_conf[i]
left, top, right, bottom = boxes[i]
top = top - 5
left = left - 5
bottom = bottom + 5
right = right + 5
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(np.shape(image)[0], np.floor(bottom + 0.5).astype('int32'))
right = min(np.shape(image)[1], np.floor(right + 0.5).astype('int32'))
# 画框框
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
label = label.encode('utf-8')
print(label, top, left, bottom, right)
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[int(c)])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[int(c)])
draw.text(text_origin, str(label,'UTF-8'), fill=(0, 0, 0), font=font)
del draw
return image
def get_FPS(self, image, test_interval):
#-------------------------------------#
# 转换成RGB图片,可以用于灰度图预测。
#-------------------------------------#
image = image.convert("RGB")
image_shape = np.array(np.shape(image)[0:2])
old_width, old_height = image_shape[1], image_shape[0]
#---------------------------------------------------------#
# 给原图像进行resize,resize到短边为600的大小上
#---------------------------------------------------------#
width, height = get_new_img_size(old_width, old_height)
image = image.resize([width,height], Image.BICUBIC)
photo = np.array(image,dtype = np.float64)
#-----------------------------------------------------------#
# 图片预处理,归一化。
#-----------------------------------------------------------#
photo = preprocess_input(np.expand_dims(photo,0))
rpn_pred = self.model_rpn.predict(photo)
#-----------------------------------------------------------#
# 将建议框网络的预测结果进行解码
#-----------------------------------------------------------#
base_feature_width, base_feature_height = self.get_img_output_length(width, height)
anchors = get_anchors([base_feature_width, base_feature_height], width, height)
rpn_results = self.bbox_util.detection_out_rpn(rpn_pred, anchors)
#-------------------------------------------------------------#
# 在获得建议框和共享特征层后,将二者传入classifier中进行预测
#-------------------------------------------------------------#
base_layer = rpn_pred[2]
proposal_box = np.array(rpn_results)[:, :, 1:]
temp_ROIs = np.zeros_like(proposal_box)
temp_ROIs[:, :, [0, 1, 2, 3]] = proposal_box[:, :, [1, 0, 3, 2]]
classifier_pred = self.model_classifier.predict([base_layer, temp_ROIs])
#-------------------------------------------------------------#
# 利用classifier的预测结果对建议框进行解码,获得预测框
#-------------------------------------------------------------#
results = self.bbox_util.detection_out_classifier(classifier_pred, proposal_box, self.config, self.confidence)
if len(results[0])>0:
results = np.array(results[0])
boxes = results[:, :4]
top_conf = results[:, 4]
top_label_indices = results[:, 5]
boxes[:, [0, 2]] = boxes[:, [0, 2]] * old_width
boxes[:, [1, 3]] = boxes[:, [1, 3]] * old_height
t1 = time.time()
for _ in range(test_interval):
rpn_pred = self.model_rpn.predict(photo)
#-----------------------------------------------------------#
# 将建议框网络的预测结果进行解码
#-----------------------------------------------------------#
base_feature_width, base_feature_height = self.get_img_output_length(width, height)
anchors = get_anchors([base_feature_width, base_feature_height], width, height)
rpn_results = self.bbox_util.detection_out_rpn(rpn_pred, anchors)
#-------------------------------------------------------------#
# 在获得建议框和共享特征层后,将二者传入classifier中进行预测
#-------------------------------------------------------------#
base_layer = rpn_pred[2]
proposal_box = np.array(rpn_results)[:, :, 1:]
temp_ROIs = np.zeros_like(proposal_box)
temp_ROIs[:, :, [0, 1, 2, 3]] = proposal_box[:, :, [1, 0, 3, 2]]
classifier_pred = self.model_classifier.predict([base_layer, temp_ROIs])
#-------------------------------------------------------------#
# 利用classifier的预测结果对建议框进行解码,获得预测框
#-------------------------------------------------------------#
results = self.bbox_util.detection_out_classifier(classifier_pred, proposal_box, self.config, self.confidence)
if len(results[0])>0:
results = np.array(results[0])
boxes = results[:, :4]
top_conf = results[:, 4]
top_label_indices = results[:, 5]
boxes[:, [0, 2]] = boxes[:, [0, 2]] * old_width
boxes[:, [1, 3]] = boxes[:, [1, 3]] * old_height
t2 = time.time()
tact_time = (t2 - t1) / test_interval
return tact_time
def close_session(self):
self.sess.close()
|
import colorsys
import copy
import os
import time
import numpy as np
from keras import backend as K
from keras.applications.imagenet_utils import preprocess_input
from PIL import Image, ImageDraw, ImageFont
import nets.frcnn as frcnn
from nets.frcnn_training import get_new_img_size
from utils.anchors import get_anchors
from utils.config import Config
from utils.utils import BBoxUtility
#--------------------------------------------#
# 使用自己训练好的模型预测需要修改2个参数
# model_path和classes_path都需要修改!
# 如果出现shape不匹配
# 一定要注意训练时的NUM_CLASSES、
# model_path和classes_path参数的修改
#--------------------------------------------#
class FRCNN(object):
_defaults = {
"model_path" : 'model_data/voc_weights.h5',
"classes_path" : 'model_data/voc_classes.txt',
"confidence" : 0.5,
"iou" : 0.3
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
#---------------------------------------------------#
# 初始化faster RCNN
#---------------------------------------------------#
def __init__(self, **kwargs):
self.__dict__.update(self._defaults)
self.class_names = self._get_class()
self.sess = K.get_session()
self.config = Config()
self.generate()
self.bbox_util = BBoxUtility(classifier_nms=self.iou, top_k=self.config.num_RPN_predict_pre)
#---------------------------------------------------#
# 获得所有的分类
#---------------------------------------------------#
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
#---------------------------------------------------#
# 载入模型
#---------------------------------------------------#
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
#-------------------------------#
# 计算总的类的数量
#-------------------------------#
self.num_classes = len(self.class_names)+1
#-------------------------------#
# 载入模型与权值
#-------------------------------#
self.model_rpn, self.model_classifier = frcnn.get_predict_model(self.config, self.num_classes)
self.model_rpn.load_weights(self.model_path, by_name=True)
self.model_classifier.load_weights(self.model_path, by_name=True)
print('{} model, anchors, and classes loaded.'.format(model_path))
# 画框设置不同的颜色
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
#---------------------------------------------------#
# 用于计算共享特征层的大小
#---------------------------------------------------#
def get_img_output_length(self, width, height):
def get_output_length(input_length):
filter_sizes = [7, 3, 1, 1]
padding = [3,1,0,0]
stride = 2
for i in range(4):
# input_length = (input_length - filter_size + stride) // stride
input_length = (input_length + 2*padding[i]-filter_sizes[i]) // stride + 1
return input_length
return get_output_length(width), get_output_length(height)
#---------------------------------------------------#
# 检测图片
#---------------------------------------------------#
def detect_image(self, image):
#-------------------------------------#
# 转换成RGB图片,可以用于灰度图预测。
#-------------------------------------#
image = image.convert("RGB")
image_shape = np.array(np.shape(image)[0:2])
old_width, old_height = image_shape[1], image_shape[0]
old_image = copy.deepcopy(image)
#---------------------------------------------------------#
# 给原图像进行resize,resize到短边为600的大小上
#---------------------------------------------------------#
width, height = get_new_img_size(old_width, old_height)
image = image.resize([width,height], Image.BICUBIC)
photo = np.array(image,dtype = np.float64)
#-----------------------------------------------------------#
# 图片预处理,归一化。
#-----------------------------------------------------------#
photo = preprocess_input(np.expand_dims(photo,0))
rpn_pred = self.model_rpn.predict(photo)
#-----------------------------------------------------------#
# 将建议框网络的预测结果进行解码
#-----------------------------------------------------------#
base_feature_width, base_feature_height = self.get_img_output_length(width, height)
anchors = get_anchors([base_feature_width, base_feature_height], width, height)
rpn_results = self.bbox_util.detection_out_rpn(rpn_pred, anchors)
#-------------------------------------------------------------#
# 在获得建议框和共享特征层后,将二者传入classifier中进行预测
#-------------------------------------------------------------#
base_layer = rpn_pred[2]
proposal_box = np.array(rpn_results)[:, :, 1:]
temp_ROIs = np.zeros_like(proposal_box)
temp_ROIs[:, :, [0, 1, 2, 3]] = proposal_box[:, :, [1, 0, 3, 2]]
classifier_pred = self.model_classifier.predict([base_layer, temp_ROIs])
#-------------------------------------------------------------#
# 利用classifier的预测结果对建议框进行解码,获得预测框
#-------------------------------------------------------------#
results = self.bbox_util.detection_out_classifier(classifier_pred, proposal_box, self.config, self.confidence)
if len(results[0])==0:
return old_image
results = np.array(results[0])
boxes = results[:, :4]
top_conf = results[:, 4]
top_label_indices = results[:, 5]
boxes[:, [0, 2]] = boxes[:, [0, 2]] * old_width
boxes[:, [1, 3]] = boxes[:, [1, 3]] * old_height
font = ImageFont.truetype(font='model_data/simhei.ttf',size=np.floor(3e-2 * np.shape(image)[1] + 0.5).astype('int32'))
thickness = max((np.shape(old_image)[0] + np.shape(old_image)[1]) // old_width * 2, 1)
image = old_image
for i, c in enumerate(top_label_indices):
predicted_class = self.class_names[int(c)]
score = top_conf[i]
left, top, right, bottom = boxes[i]
top = top - 5
left = left - 5
bottom = bottom + 5
right = right + 5
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(np.shape(image)[0], np.floor(bottom + 0.5).astype('int32'))
right = min(np.shape(image)[1], np.floor(right + 0.5).astype('int32'))
# 画框框
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
label = label.encode('utf-8')
print(label, top, left, bottom, right)
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[int(c)])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[int(c)])
draw.text(text_origin, str(label,'UTF-8'), fill=(0, 0, 0), font=font)
del draw
return image
def get_FPS(self, image, test_interval):
#-------------------------------------#
# 转换成RGB图片,可以用于灰度图预测。
#-------------------------------------#
image = image.convert("RGB")
image_shape = np.array(np.shape(image)[0:2])
old_width, old_height = image_shape[1], image_shape[0]
#---------------------------------------------------------#
# 给原图像进行resize,resize到短边为600的大小上
#---------------------------------------------------------#
width, height = get_new_img_size(old_width, old_height)
image = image.resize([width,height], Image.BICUBIC)
photo = np.array(image,dtype = np.float64)
#-----------------------------------------------------------#
# 图片预处理,归一化。
#-----------------------------------------------------------#
photo = preprocess_input(np.expand_dims(photo,0))
rpn_pred = self.model_rpn.predict(photo)
#-----------------------------------------------------------#
# 将建议框网络的预测结果进行解码
#-----------------------------------------------------------#
base_feature_width, base_feature_height = self.get_img_output_length(width, height)
anchors = get_anchors([base_feature_width, base_feature_height], width, height)
rpn_results = self.bbox_util.detection_out_rpn(rpn_pred, anchors)
#-------------------------------------------------------------#
# 在获得建议框和共享特征层后,将二者传入classifier中进行预测
#-------------------------------------------------------------#
base_layer = rpn_pred[2]
proposal_box = np.array(rpn_results)[:, :, 1:]
temp_ROIs = np.zeros_like(proposal_box)
temp_ROIs[:, :, [0, 1, 2, 3]] = proposal_box[:, :, [1, 0, 3, 2]]
classifier_pred = self.model_classifier.predict([base_layer, temp_ROIs])
#-------------------------------------------------------------#
# 利用classifier的预测结果对建议框进行解码,获得预测框
#-------------------------------------------------------------#
results = self.bbox_util.detection_out_classifier(classifier_pred, proposal_box, self.config, self.confidence)
if len(results[0])>0:
results = np.array(results[0])
boxes = results[:, :4]
top_conf = results[:, 4]
top_label_indices = results[:, 5]
boxes[:, [0, 2]] = boxes[:, [0, 2]] * old_width
boxes[:, [1, 3]] = boxes[:, [1, 3]] * old_height
t1 = time.time()
for _ in range(test_interval):
rpn_pred = self.model_rpn.predict(photo)
#-----------------------------------------------------------#
# 将建议框网络的预测结果进行解码
#-----------------------------------------------------------#
base_feature_width, base_feature_height = self.get_img_output_length(width, height)
anchors = get_anchors([base_feature_width, base_feature_height], width, height)
rpn_results = self.bbox_util.detection_out_rpn(rpn_pred, anchors)
#-------------------------------------------------------------#
# 在获得建议框和共享特征层后,将二者传入classifier中进行预测
#-------------------------------------------------------------#
base_layer = rpn_pred[2]
proposal_box = np.array(rpn_results)[:, :, 1:]
temp_ROIs = np.zeros_like(proposal_box)
temp_ROIs[:, :, [0, 1, 2, 3]] = proposal_box[:, :, [1, 0, 3, 2]]
classifier_pred = self.model_classifier.predict([base_layer, temp_ROIs])
#-------------------------------------------------------------#
# 利用classifier的预测结果对建议框进行解码,获得预测框
#-------------------------------------------------------------#
results = self.bbox_util.detection_out_classifier(classifier_pred, proposal_box, self.config, self.confidence)
if len(results[0])>0:
results = np.array(results[0])
boxes = results[:, :4]
top_conf = results[:, 4]
top_label_indices = results[:, 5]
boxes[:, [0, 2]] = boxes[:, [0, 2]] * old_width
boxes[:, [1, 3]] = boxes[:, [1, 3]] * old_height
t2 = time.time()
tact_time = (t2 - t1) / test_interval
return tact_time
def close_session(self):
self.sess.close()
|
zh
| 0.21379
|
#--------------------------------------------# # 使用自己训练好的模型预测需要修改2个参数 # model_path和classes_path都需要修改! # 如果出现shape不匹配 # 一定要注意训练时的NUM_CLASSES、 # model_path和classes_path参数的修改 #--------------------------------------------# #---------------------------------------------------# # 初始化faster RCNN #---------------------------------------------------# #---------------------------------------------------# # 获得所有的分类 #---------------------------------------------------# #---------------------------------------------------# # 载入模型 #---------------------------------------------------# #-------------------------------# # 计算总的类的数量 #-------------------------------# #-------------------------------# # 载入模型与权值 #-------------------------------# # 画框设置不同的颜色 #---------------------------------------------------# # 用于计算共享特征层的大小 #---------------------------------------------------# # input_length = (input_length - filter_size + stride) // stride #---------------------------------------------------# # 检测图片 #---------------------------------------------------# #-------------------------------------# # 转换成RGB图片,可以用于灰度图预测。 #-------------------------------------# #---------------------------------------------------------# # 给原图像进行resize,resize到短边为600的大小上 #---------------------------------------------------------# #-----------------------------------------------------------# # 图片预处理,归一化。 #-----------------------------------------------------------# #-----------------------------------------------------------# # 将建议框网络的预测结果进行解码 #-----------------------------------------------------------# #-------------------------------------------------------------# # 在获得建议框和共享特征层后,将二者传入classifier中进行预测 #-------------------------------------------------------------# #-------------------------------------------------------------# # 利用classifier的预测结果对建议框进行解码,获得预测框 #-------------------------------------------------------------# # 画框框 #-------------------------------------# # 转换成RGB图片,可以用于灰度图预测。 #-------------------------------------# #---------------------------------------------------------# # 给原图像进行resize,resize到短边为600的大小上 #---------------------------------------------------------# #-----------------------------------------------------------# # 图片预处理,归一化。 #-----------------------------------------------------------# #-----------------------------------------------------------# # 将建议框网络的预测结果进行解码 #-----------------------------------------------------------# #-------------------------------------------------------------# # 在获得建议框和共享特征层后,将二者传入classifier中进行预测 #-------------------------------------------------------------# #-------------------------------------------------------------# # 利用classifier的预测结果对建议框进行解码,获得预测框 #-------------------------------------------------------------# #-----------------------------------------------------------# # 将建议框网络的预测结果进行解码 #-----------------------------------------------------------# #-------------------------------------------------------------# # 在获得建议框和共享特征层后,将二者传入classifier中进行预测 #-------------------------------------------------------------# #-------------------------------------------------------------# # 利用classifier的预测结果对建议框进行解码,获得预测框 #-------------------------------------------------------------#
| 2.068793
| 2
|
Desafios/desafio13.py
|
ArthurBrito1/MY-SCRIPTS-PYTHON
| 1
|
6628434
|
<gh_stars>1-10
temperatura = int(input('informe a temperatura em graus celcius:'))
converssão = (temperatura*9/5)+32
print('A temperatura em graus celcius é de {}C \nApós de ser convertida para fharenheit fica {}F'.format(temperatura, converssão))
|
temperatura = int(input('informe a temperatura em graus celcius:'))
converssão = (temperatura*9/5)+32
print('A temperatura em graus celcius é de {}C \nApós de ser convertida para fharenheit fica {}F'.format(temperatura, converssão))
|
none
| 1
| 3.682393
| 4
|
|
integration/idea/root0/apputils_setup.py
|
hapylestat/appcore
| 0
|
6628435
|
<filename>integration/idea/root0/apputils_setup.py
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.cmd import Command
from distutils.dir_util import copy_tree
from shutil import copyfile
class AppUtilsCommand(Command):
description = "Manage AppUtils libs integration to the application"
user_options = []
_apputils_git = "https://github.com/hapylestat/apputils.git"
_requirements_file = "apputils-requirements.txt"
_name = "apputils"
def initialize_options(self) -> None:
pass
def finalize_options(self) -> None:
pass
def run(self):
import os
current_path = os.path.dirname(__file__)
git_path = f"{current_path}{os.sep}build{os.sep}external-libs"
repo_path = f"{git_path}{os.sep}{self._name}"
git_rel_path = f"src{os.sep}modules{os.sep}{self._name}"
print("Looking for requirements....")
if not os.path.exists(f"{current_path}{os.sep}{self._requirements_file}"):
print(f"Error!!! No {self._requirements_file} found at {current_path}")
return
with open(f"{current_path}{os.sep}{self._requirements_file}", "r") as f:
modules = [line.strip("\n").strip() for line in f.readlines() if line and not line.startswith("\#")]
rel_modules_install_path = f"{modules[:1][0]}{os.sep}{self._name}"
modules = modules[1:]
if not modules:
print("Error!!! No modules to be integrated")
return
print(f"Modules to integrate: {', '.join(modules)}")
if os.path.exists(repo_path):
print("Trying to update existing repository....")
cur_dir = os.path.abspath(".")
os.chdir(repo_path)
try:
os.system("git reset --hard HEAD")
os.system("git pull")
finally:
os.chdir(cur_dir)
else:
print(f"Creating directory for checkout {git_path}")
os.makedirs(git_path, exist_ok=True)
os.system(f"git clone {self._apputils_git} {repo_path}")
git_modules_path = os.path.join(repo_path, git_rel_path)
if not os.path.exists(git_modules_path):
print(f"Unable to access modules location: {git_modules_path}")
print("Verifying modules availability:")
git_available_modules = os.listdir(git_modules_path)
for module in modules:
if module in git_available_modules:
print(f" {module} ... OK")
else:
print(f" {module} ... NO FOUND")
return
old_modules_path = os.path.abspath(os.path.join(current_path, rel_modules_install_path))
if not os.path.exists(old_modules_path):
print(f"Preparing modules folder '{old_modules_path}' ...")
os.makedirs(old_modules_path)
old_installed_modules = set(os.listdir(old_modules_path)) & set(modules)
print("Removing old installed modules:")
for module in old_installed_modules:
print(f" Removing old module {module} ....")
print("Installing requested modules:")
if not os.path.exists(os.path.join(old_modules_path, "__init__.py")):
copyfile(os.path.join(git_modules_path, "__init__.py"), os.path.join(old_modules_path, "__init__.py"))
for module in modules:
copy_from_path = os.path.join(git_modules_path, module)
copy_to_path = os.path.join(old_modules_path, module)
print(f" {module}...", end="")
try:
copy_tree(copy_from_path, copy_to_path, verbose=0)
print("OK")
except Exception as e:
print("FAIL")
raise e
|
<filename>integration/idea/root0/apputils_setup.py
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.cmd import Command
from distutils.dir_util import copy_tree
from shutil import copyfile
class AppUtilsCommand(Command):
description = "Manage AppUtils libs integration to the application"
user_options = []
_apputils_git = "https://github.com/hapylestat/apputils.git"
_requirements_file = "apputils-requirements.txt"
_name = "apputils"
def initialize_options(self) -> None:
pass
def finalize_options(self) -> None:
pass
def run(self):
import os
current_path = os.path.dirname(__file__)
git_path = f"{current_path}{os.sep}build{os.sep}external-libs"
repo_path = f"{git_path}{os.sep}{self._name}"
git_rel_path = f"src{os.sep}modules{os.sep}{self._name}"
print("Looking for requirements....")
if not os.path.exists(f"{current_path}{os.sep}{self._requirements_file}"):
print(f"Error!!! No {self._requirements_file} found at {current_path}")
return
with open(f"{current_path}{os.sep}{self._requirements_file}", "r") as f:
modules = [line.strip("\n").strip() for line in f.readlines() if line and not line.startswith("\#")]
rel_modules_install_path = f"{modules[:1][0]}{os.sep}{self._name}"
modules = modules[1:]
if not modules:
print("Error!!! No modules to be integrated")
return
print(f"Modules to integrate: {', '.join(modules)}")
if os.path.exists(repo_path):
print("Trying to update existing repository....")
cur_dir = os.path.abspath(".")
os.chdir(repo_path)
try:
os.system("git reset --hard HEAD")
os.system("git pull")
finally:
os.chdir(cur_dir)
else:
print(f"Creating directory for checkout {git_path}")
os.makedirs(git_path, exist_ok=True)
os.system(f"git clone {self._apputils_git} {repo_path}")
git_modules_path = os.path.join(repo_path, git_rel_path)
if not os.path.exists(git_modules_path):
print(f"Unable to access modules location: {git_modules_path}")
print("Verifying modules availability:")
git_available_modules = os.listdir(git_modules_path)
for module in modules:
if module in git_available_modules:
print(f" {module} ... OK")
else:
print(f" {module} ... NO FOUND")
return
old_modules_path = os.path.abspath(os.path.join(current_path, rel_modules_install_path))
if not os.path.exists(old_modules_path):
print(f"Preparing modules folder '{old_modules_path}' ...")
os.makedirs(old_modules_path)
old_installed_modules = set(os.listdir(old_modules_path)) & set(modules)
print("Removing old installed modules:")
for module in old_installed_modules:
print(f" Removing old module {module} ....")
print("Installing requested modules:")
if not os.path.exists(os.path.join(old_modules_path, "__init__.py")):
copyfile(os.path.join(git_modules_path, "__init__.py"), os.path.join(old_modules_path, "__init__.py"))
for module in modules:
copy_from_path = os.path.join(git_modules_path, module)
copy_to_path = os.path.join(old_modules_path, module)
print(f" {module}...", end="")
try:
copy_tree(copy_from_path, copy_to_path, verbose=0)
print("OK")
except Exception as e:
print("FAIL")
raise e
|
en
| 0.859601
|
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #")]
| 1.822595
| 2
|
main.py
|
nalbarr/halamka_nlp_tf_keras
| 0
|
6628436
|
import pandas as pd
import numpy as np
import random
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
def dump_tf_info():
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print(
"GPU is",
"available"
if tf.config.experimental.list_physical_devices("GPU")
else "NOT AVAILABLE",
)
def load_data(file_path):
df = pd.read_csv(file_path, sep="\t")
print(df.head())
return df
def remove_header(df):
df = df[1:]
return df
def remove_category_2_rows(df):
df = df[(df.category == 0) | (df.category == 1)]
return df
def get_nrows(df):
(nrows, ncols) = df.shape
return nrows
def get_nlp_hyperparameters(nrows):
embedding_dim = 100
max_length = 16
trunc_type = "post"
padding_type = "post"
oov_tok = "<OOV>"
training_size = int(0.9 * nrows)
test_portion = 0.1
return (
embedding_dim,
max_length,
trunc_type,
padding_type,
oov_tok,
training_size,
test_portion,
)
def get_corpus(df):
corpus = []
num_sentences = 0
for index, row in df.iterrows():
list_item = []
list_item.append(row["title"])
this_label = row["category"]
if this_label == 0:
list_item.append(0)
elif this_label == 1:
list_item.append(1)
else:
print("Unknown category.")
num_sentences += 1
corpus.append(list_item)
print("num_sentences: {0}".format(num_sentences))
print("len(corpus): {0}".format(len(corpus)))
print("corpus[0]: {0}".format(corpus[0]))
return num_sentences, corpus
def tokenize(corpus, test_portion, training_size, max_length, padding_type, trunc_type):
sentences = []
labels = []
random.shuffle(corpus)
for x in range(training_size):
sentences.append(corpus[x][0])
labels.append(corpus[x][1])
tokenizer = Tokenizer()
tokenizer.fit_on_texts(sentences)
word_index = tokenizer.word_index
vocab_size = len(word_index)
sequences = tokenizer.texts_to_sequences(sentences)
padded = pad_sequences(
sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type
)
split = int(test_portion * training_size)
test_sequences = padded[0:split]
training_sequences = padded[split:training_size]
test_labels = labels[0:split]
training_labels = labels[split:training_size]
return (
word_index,
vocab_size,
training_sequences,
training_labels,
test_sequences,
test_labels,
)
def get_embeddings_matrix(word_index, vocab_size, embedding_dim):
embeddings_index = {}
with open("/tmp/glove.6B.100d.txt") as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype="float32")
embeddings_index[word] = coefs
embeddings_matrix = np.zeros((vocab_size + 1, embedding_dim))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embeddings_matrix[i] = embedding_vector
return embeddings_matrix
def create_model(vocab_size, embedding_dim, max_length, embeddings_matrix):
model = tf.keras.Sequential(
[
tf.keras.layers.Embedding(
vocab_size + 1,
embedding_dim,
input_length=max_length,
weights=[embeddings_matrix],
trainable=False,
),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Conv1D(64, 5, activation="relu"),
tf.keras.layers.MaxPooling1D(pool_size=4),
tf.keras.layers.LSTM(64),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
print(model.summary())
return model
def dump_input_types(training_sequences, training_labels, test_sequences, test_labels):
print(
"training_sequences: ",
training_sequences.shape,
type(training_sequences),
training_sequences.dtype,
)
print("training_labels: ", type(training_labels))
print(
"test_sequences: ",
test_sequences.shape,
type(test_sequences),
test_sequences.dtype,
)
print("test_labels: ", type(test_labels))
def convert_input_type(
training_sequences, training_labels, testing_sequences, test_labels
):
training_labels = np.array(training_labels)
test_labels = np.array(test_labels)
return training_sequences, training_labels, testing_sequences, test_labels
def train_model(
model, training_sequences, training_labels, test_sequences, test_labels, num_epochs
):
history = model.fit(
training_sequences,
training_labels,
epochs=num_epochs,
validation_data=(test_sequences, test_labels),
verbose=2,
)
print(history)
def save_model(model):
model.save("models/halamka_nlp_tf.h5")
def main():
dump_tf_info()
df = load_data("data/halamka_posts_1836.tsv")
df = remove_header(df)
df = remove_category_2_rows(df)
nrows = get_nrows(df)
(
embedding_dim,
max_length,
trunc_type,
padding_type,
oov_tok,
training_size,
test_portion,
) = get_nlp_hyperparameters(nrows)
num_sentences, corpus = get_corpus(df)
(
word_index,
vocab_size,
training_sequences,
training_labels,
test_sequences,
test_labels,
) = tokenize(
corpus, test_portion, training_size, max_length, padding_type, trunc_type
)
embeddings_matrix = get_embeddings_matrix(word_index, vocab_size, embedding_dim)
model = create_model(vocab_size, embedding_dim, max_length, embeddings_matrix)
dump_input_types(training_sequences, training_labels, test_sequences, test_labels)
(
training_sequences2,
training_labels2,
test_sequences2,
test_labels2,
) = convert_input_type(
training_sequences, training_labels, test_sequences, test_labels
)
train_model(
model,
training_sequences2,
training_labels2,
test_sequences2,
test_labels2,
num_epochs=50,
)
save_model(model)
if __name__ == "__main__":
import time
start_time = time.time()
main()
print("--- {} seconds ---".format(time.time() - start_time))
|
import pandas as pd
import numpy as np
import random
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
def dump_tf_info():
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print(
"GPU is",
"available"
if tf.config.experimental.list_physical_devices("GPU")
else "NOT AVAILABLE",
)
def load_data(file_path):
df = pd.read_csv(file_path, sep="\t")
print(df.head())
return df
def remove_header(df):
df = df[1:]
return df
def remove_category_2_rows(df):
df = df[(df.category == 0) | (df.category == 1)]
return df
def get_nrows(df):
(nrows, ncols) = df.shape
return nrows
def get_nlp_hyperparameters(nrows):
embedding_dim = 100
max_length = 16
trunc_type = "post"
padding_type = "post"
oov_tok = "<OOV>"
training_size = int(0.9 * nrows)
test_portion = 0.1
return (
embedding_dim,
max_length,
trunc_type,
padding_type,
oov_tok,
training_size,
test_portion,
)
def get_corpus(df):
corpus = []
num_sentences = 0
for index, row in df.iterrows():
list_item = []
list_item.append(row["title"])
this_label = row["category"]
if this_label == 0:
list_item.append(0)
elif this_label == 1:
list_item.append(1)
else:
print("Unknown category.")
num_sentences += 1
corpus.append(list_item)
print("num_sentences: {0}".format(num_sentences))
print("len(corpus): {0}".format(len(corpus)))
print("corpus[0]: {0}".format(corpus[0]))
return num_sentences, corpus
def tokenize(corpus, test_portion, training_size, max_length, padding_type, trunc_type):
sentences = []
labels = []
random.shuffle(corpus)
for x in range(training_size):
sentences.append(corpus[x][0])
labels.append(corpus[x][1])
tokenizer = Tokenizer()
tokenizer.fit_on_texts(sentences)
word_index = tokenizer.word_index
vocab_size = len(word_index)
sequences = tokenizer.texts_to_sequences(sentences)
padded = pad_sequences(
sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type
)
split = int(test_portion * training_size)
test_sequences = padded[0:split]
training_sequences = padded[split:training_size]
test_labels = labels[0:split]
training_labels = labels[split:training_size]
return (
word_index,
vocab_size,
training_sequences,
training_labels,
test_sequences,
test_labels,
)
def get_embeddings_matrix(word_index, vocab_size, embedding_dim):
embeddings_index = {}
with open("/tmp/glove.6B.100d.txt") as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype="float32")
embeddings_index[word] = coefs
embeddings_matrix = np.zeros((vocab_size + 1, embedding_dim))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embeddings_matrix[i] = embedding_vector
return embeddings_matrix
def create_model(vocab_size, embedding_dim, max_length, embeddings_matrix):
model = tf.keras.Sequential(
[
tf.keras.layers.Embedding(
vocab_size + 1,
embedding_dim,
input_length=max_length,
weights=[embeddings_matrix],
trainable=False,
),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Conv1D(64, 5, activation="relu"),
tf.keras.layers.MaxPooling1D(pool_size=4),
tf.keras.layers.LSTM(64),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
print(model.summary())
return model
def dump_input_types(training_sequences, training_labels, test_sequences, test_labels):
print(
"training_sequences: ",
training_sequences.shape,
type(training_sequences),
training_sequences.dtype,
)
print("training_labels: ", type(training_labels))
print(
"test_sequences: ",
test_sequences.shape,
type(test_sequences),
test_sequences.dtype,
)
print("test_labels: ", type(test_labels))
def convert_input_type(
training_sequences, training_labels, testing_sequences, test_labels
):
training_labels = np.array(training_labels)
test_labels = np.array(test_labels)
return training_sequences, training_labels, testing_sequences, test_labels
def train_model(
model, training_sequences, training_labels, test_sequences, test_labels, num_epochs
):
history = model.fit(
training_sequences,
training_labels,
epochs=num_epochs,
validation_data=(test_sequences, test_labels),
verbose=2,
)
print(history)
def save_model(model):
model.save("models/halamka_nlp_tf.h5")
def main():
dump_tf_info()
df = load_data("data/halamka_posts_1836.tsv")
df = remove_header(df)
df = remove_category_2_rows(df)
nrows = get_nrows(df)
(
embedding_dim,
max_length,
trunc_type,
padding_type,
oov_tok,
training_size,
test_portion,
) = get_nlp_hyperparameters(nrows)
num_sentences, corpus = get_corpus(df)
(
word_index,
vocab_size,
training_sequences,
training_labels,
test_sequences,
test_labels,
) = tokenize(
corpus, test_portion, training_size, max_length, padding_type, trunc_type
)
embeddings_matrix = get_embeddings_matrix(word_index, vocab_size, embedding_dim)
model = create_model(vocab_size, embedding_dim, max_length, embeddings_matrix)
dump_input_types(training_sequences, training_labels, test_sequences, test_labels)
(
training_sequences2,
training_labels2,
test_sequences2,
test_labels2,
) = convert_input_type(
training_sequences, training_labels, test_sequences, test_labels
)
train_model(
model,
training_sequences2,
training_labels2,
test_sequences2,
test_labels2,
num_epochs=50,
)
save_model(model)
if __name__ == "__main__":
import time
start_time = time.time()
main()
print("--- {} seconds ---".format(time.time() - start_time))
|
none
| 1
| 2.730041
| 3
|
|
book/_build/jupyter_execute/descriptive/m3-demo-04-SummaryStatisticsAndVisualizations.py
|
hossainlab/statswithpy
| 0
|
6628437
|
<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# ## Univariate and Bivariate Analysis
# <b>Dataset</b>: https://www.kaggle.com/mustafaali96/weight-height
# The variables used are:
# * Height
# * Weight
# * Gender
# ### Import libraries
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# ### Load and read dataset
# In[2]:
data = pd.read_csv('datasets/weight-height.csv')
data.head()
# In[3]:
data.shape
# ### Mean, median, mode, min, max and quantiles
# In[4]:
data.describe()
# ## Univariate Analysis
# #### Gender count
# In[5]:
sns.countplot(data['Gender'])
plt.show
# ### Considering only Height
# In[6]:
height = data['Height']
height.head()
# In[7]:
height.shape
# #### Histogram-plot
# In[8]:
plt.figure(figsize=(12, 8))
height.plot(kind = 'hist',
title = 'Height Histogram')
# #### Box-plot
# In[9]:
plt.figure(figsize=(12, 8))
height.plot(kind = 'box',
title = 'Height Box-plot')
# #### KDE distribution for height
# In[10]:
height.plot(kind = 'kde',
title = 'Height KDE', figsize=(12, 8))
# #### Analysis
# As we can see we have a high count for height in the range 60 to 75.
# ### Considering only weight
# In[11]:
weight = data['Weight']
weight.head()
# In[12]:
weight.shape
# #### Histogram-plot
# In[13]:
plt.figure(figsize=(12, 8))
weight.plot(kind = 'hist',
title = 'Weight Histogram')
# #### Box-plot
# In[14]:
plt.figure(figsize=(12, 8))
weight.plot(kind = 'box',
title = 'Weight Box-plot')
# #### KDE distribution for height
# In[15]:
plt.figure(figsize=(12, 8))
weight.plot(kind = 'kde',
title = 'Weight KDE')
# In[ ]:
# ## Bivariate Analysis
# #### Considering both height and weight
# In[16]:
plt.figure(figsize=(12, 8))
sns.scatterplot(x = "Height", y = "Weight", data=data)
# In[17]:
plt.figure(figsize=(12, 8))
sns.scatterplot(x = "Height", y = "Weight", hue='Gender', data=data)
# In[18]:
gender_groupby = data.groupby('Gender', as_index=False)
gender_groupby.head()
# In[19]:
gender_groupby.describe().T
# ### Distribution plots
# #### Considering both gender and height
# In[20]:
sns.FacetGrid(data, hue = 'Gender', height = 5) .map(sns.distplot, 'Height') .add_legend()
# #### Considering both gender and weight
# In[21]:
sns.FacetGrid(data, hue = 'Gender', height = 5) .map(sns.distplot, 'Weight').add_legend()
# ### Violin Plot
# #### Gender vs Height
# In[22]:
plt.figure(figsize=(12, 8))
sns.boxplot(x = 'Gender', y ='Height', data = data)
# #### Gender vs Weight
# In[23]:
plt.figure(figsize=(12, 8))
sns.boxplot(x = 'Gender', y ='Weight', data = data)
# In[24]:
plt.figure(figsize=(12, 8))
sns.violinplot(x = 'Gender', y ='Height', data = data)
# In[25]:
plt.figure(figsize=(12, 8))
sns.violinplot(x = 'Gender', y ='Weight', data = data)
# ### Multivariate Analysis
# In[26]:
sns.pairplot(data, hue = 'Gender', height = 4)
# In[ ]:
|
#!/usr/bin/env python
# coding: utf-8
# ## Univariate and Bivariate Analysis
# <b>Dataset</b>: https://www.kaggle.com/mustafaali96/weight-height
# The variables used are:
# * Height
# * Weight
# * Gender
# ### Import libraries
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# ### Load and read dataset
# In[2]:
data = pd.read_csv('datasets/weight-height.csv')
data.head()
# In[3]:
data.shape
# ### Mean, median, mode, min, max and quantiles
# In[4]:
data.describe()
# ## Univariate Analysis
# #### Gender count
# In[5]:
sns.countplot(data['Gender'])
plt.show
# ### Considering only Height
# In[6]:
height = data['Height']
height.head()
# In[7]:
height.shape
# #### Histogram-plot
# In[8]:
plt.figure(figsize=(12, 8))
height.plot(kind = 'hist',
title = 'Height Histogram')
# #### Box-plot
# In[9]:
plt.figure(figsize=(12, 8))
height.plot(kind = 'box',
title = 'Height Box-plot')
# #### KDE distribution for height
# In[10]:
height.plot(kind = 'kde',
title = 'Height KDE', figsize=(12, 8))
# #### Analysis
# As we can see we have a high count for height in the range 60 to 75.
# ### Considering only weight
# In[11]:
weight = data['Weight']
weight.head()
# In[12]:
weight.shape
# #### Histogram-plot
# In[13]:
plt.figure(figsize=(12, 8))
weight.plot(kind = 'hist',
title = 'Weight Histogram')
# #### Box-plot
# In[14]:
plt.figure(figsize=(12, 8))
weight.plot(kind = 'box',
title = 'Weight Box-plot')
# #### KDE distribution for height
# In[15]:
plt.figure(figsize=(12, 8))
weight.plot(kind = 'kde',
title = 'Weight KDE')
# In[ ]:
# ## Bivariate Analysis
# #### Considering both height and weight
# In[16]:
plt.figure(figsize=(12, 8))
sns.scatterplot(x = "Height", y = "Weight", data=data)
# In[17]:
plt.figure(figsize=(12, 8))
sns.scatterplot(x = "Height", y = "Weight", hue='Gender', data=data)
# In[18]:
gender_groupby = data.groupby('Gender', as_index=False)
gender_groupby.head()
# In[19]:
gender_groupby.describe().T
# ### Distribution plots
# #### Considering both gender and height
# In[20]:
sns.FacetGrid(data, hue = 'Gender', height = 5) .map(sns.distplot, 'Height') .add_legend()
# #### Considering both gender and weight
# In[21]:
sns.FacetGrid(data, hue = 'Gender', height = 5) .map(sns.distplot, 'Weight').add_legend()
# ### Violin Plot
# #### Gender vs Height
# In[22]:
plt.figure(figsize=(12, 8))
sns.boxplot(x = 'Gender', y ='Height', data = data)
# #### Gender vs Weight
# In[23]:
plt.figure(figsize=(12, 8))
sns.boxplot(x = 'Gender', y ='Weight', data = data)
# In[24]:
plt.figure(figsize=(12, 8))
sns.violinplot(x = 'Gender', y ='Height', data = data)
# In[25]:
plt.figure(figsize=(12, 8))
sns.violinplot(x = 'Gender', y ='Weight', data = data)
# ### Multivariate Analysis
# In[26]:
sns.pairplot(data, hue = 'Gender', height = 4)
# In[ ]:
|
en
| 0.457787
|
#!/usr/bin/env python # coding: utf-8 # ## Univariate and Bivariate Analysis # <b>Dataset</b>: https://www.kaggle.com/mustafaali96/weight-height # The variables used are: # * Height # * Weight # * Gender # ### Import libraries # In[1]: # ### Load and read dataset # In[2]: # In[3]: # ### Mean, median, mode, min, max and quantiles # In[4]: # ## Univariate Analysis # #### Gender count # In[5]: # ### Considering only Height # In[6]: # In[7]: # #### Histogram-plot # In[8]: # #### Box-plot # In[9]: # #### KDE distribution for height # In[10]: # #### Analysis # As we can see we have a high count for height in the range 60 to 75. # ### Considering only weight # In[11]: # In[12]: # #### Histogram-plot # In[13]: # #### Box-plot # In[14]: # #### KDE distribution for height # In[15]: # In[ ]: # ## Bivariate Analysis # #### Considering both height and weight # In[16]: # In[17]: # In[18]: # In[19]: # ### Distribution plots # #### Considering both gender and height # In[20]: # #### Considering both gender and weight # In[21]: # ### Violin Plot # #### Gender vs Height # In[22]: # #### Gender vs Weight # In[23]: # In[24]: # In[25]: # ### Multivariate Analysis # In[26]: # In[ ]:
| 3.903969
| 4
|
conf.py
|
orishamir/OriScapy
| 0
|
6628438
|
iface = 'eth0'
|
iface = 'eth0'
|
none
| 1
| 1.05878
| 1
|
|
Auto-differentiation/auto_class.py
|
Robertboy18/Numerical-Algorithms-Implementation
| 0
|
6628439
|
<reponame>Robertboy18/Numerical-Algorithms-Implementation<gh_stars>0
# original author : Professor <NAME>
class Autodiff_Node(object):
## A class is a recipe for creating objects (with methods and atributes).
## This is called a 'base class', which is like a boiler plate recipe that
## many other classes will use a starting point, each making specific
## changes.
## All methods (unless otherwise specified) must have the first argument
## a variable called `self`, which is a copy of the object itself. Hence,
## one can access any method or atribute in the object throught the `self`
## variable.
def __init__(self, parents):
"""Parameters:
---------------
`parents` a list of `Autodiff_Node` objects corresponding to the graph
parents."""
## initializer gets called once when you create (or instantiate) an
## object
self._set_parents(parents)
self._output_data = None
def _set_parents(self, parents):
self.parents = parents
return None
def set_output_data(self, y):
self._output_data = y
return None
def get_output_data(self):
return self._output_data
## a static modthod just means it doesn't depend on the data in `self`, so
## `self` does not need to be an argument
@staticmethod
def function(x):
"""Given input `x` return output `y`"""
## this is just a place holder (or template) to be used to create
## specific types of Node objects
return NotImplementedError
## a static modthod just means it doesn't depend on the data in `self`, so
## `self` does not need to be an argument
@staticmethod
def backpropagation_function(x, y, output_gradient):
"""
Parameters:
--------------------
`x` is the input variable(s): a list of tensors one for each input from
a graph parent.
`y` is the output variable(s): a list of tensors one for each ouput to
a graph child.
`output_gradient` is the gradient (list of partial derivatives) of a
scalar function with respect to one or more output variables.
Returns:
--------------------
`input_gradient` is the gradient (list of partial derivatives) of a
scalar function with respect to one or more input variables."""
## this is just a place holder (or template) to be used to create
## specific types of Node objects
return NotImplementedError
def eval(self):
"""Evaluate the output of the node, moving from necessary inputs
through the DAG in the forward direction."""
## recursively call eval for each node until input variables are reached
x = [node.eval() for node in self.parents]
return self.function(x)
def _eval_and_save_output(self):
## this is a stateful approach and should be used with care. This method
## will alter one of the atributes. This can lead to confusing and hard
## to diagnose bugs. It is best to avoid doing this whenever possible.
## recursively call eval for each node until inputs are reached
x = [node._eval_and_save_output() for node in self.parents]
y = self.function(x)
## internal data, or state, is modified here. Specifically the
## `self._output_data` attribute.
self.set_output_data(y)
return y
def _get_gradient(self, output_gradient):
## This is a helper function to assemble the gradients, moving backward
## through the DAG. We must call `_eval_and_save_output()` before
## using this method
x = [node.get_output_data() for node in self.parents]
## We use internal state here, which assumes that
## `_eval_and_save_output()` was called before using this method
y = self.get_output_data()
input_gradient = self.backpropagation_function(x, y, output_gradient)
## We use recursion combined with generators (see examples at the end of
## this notebook)
for node, sub_gradient in zip(self.parents, input_gradient):
## recursive call to the same method attached to the parent nodes
for inner_gradient in node._get_gradient(sub_gradient):
yield inner_gradient
def compute_gradient(self):
"""Assumes the node has scalar output"""
## computing gradients is very simple with the `Autodiff_node` class
## the dangerous stateful call must precede the gradient calculation
self._eval_and_save_output()
## the input is always simply `1.0` because partial_L/partial_L = 1
return [g for g in self._get_gradient(1.)]
class Add(Autodiff_Node):
"""Add two input nodes"""
## this defines a node type specifically for addition, it 'inherits' all
## of the methods and atributes from its base class, `Autodiff_Node`. Think
## of these as default methods. Any methods that are redefined here are used
## instead of the default methods from the base class
def __init__(self, a, b):
## initializer gets called once when you create (or instantiate) an
## object
parents = [a, b]
super().__init__(parents) ## calls `__init__` method of the base class
## a static modthod just means it doesn't depend on the data in `self`, so
## `self` does not need to be an argument
@staticmethod
def function(x):
a = x[0]
b = x[1]
return a + b
@staticmethod
def backpropagation_function(x, y, output_gradient):
input_gradient = [output_gradient*1, output_gradient*1]
return input_gradient
class Multiply(Autodiff_Node):
"""Multiply two input nodes"""
def __init__(self, a, b):
parents = [a, b]
super().__init__(parents)
@staticmethod
def function(x):
a = x[0]
b = x[1]
return a*b
@staticmethod
def backpropagation_function(x, y, output_gradient):
a = x[0]
b = x[1]
input_gradient = [output_gradient*b, output_gradient*a]
return input_gradient
class Tanh(Autodiff_Node):
"""Apply the `tanh` function to an input node"""
def __init__(self, x):
parents = [x]
super().__init__(parents)
@staticmethod
def function(x):
return np.tanh(x[0])
@staticmethod
def backpropagation_function(x, y, output_gradient):
dydx = 1./np.cosh(x[0])**2
input_gradient = [output_gradient*dydx]
return input_gradient
class Input_Variable(Autodiff_Node):
"""Input Variables have a specific fixed value. Use these to hold parameters
and variables. Gradient of a node with a scalar output will be a list of
partial derivatives with respect to these Input Variables.
Parameters:
---------------
`value` the numerical value of the variable (scalar in this example)."""
def __init__(self, value):
self.value = value
parents = []
super().__init__(parents)
@staticmethod
def function(x):
return self.value
@staticmethod
def backpropagation_function(x, y, output_gradient):
input_gradient = output_gradient
return input_gradient
def eval(self):
## this overrides the default `eval` method defined in `Autodiff_Node`
## base class
return self.value
def _eval_and_save_output(self): ## another override
self.set_output_data(self.value)
return self.value
def _get_gradient(self, output_gradient): ## another override
yield output_gradient
|
# original author : Professor <NAME>
class Autodiff_Node(object):
## A class is a recipe for creating objects (with methods and atributes).
## This is called a 'base class', which is like a boiler plate recipe that
## many other classes will use a starting point, each making specific
## changes.
## All methods (unless otherwise specified) must have the first argument
## a variable called `self`, which is a copy of the object itself. Hence,
## one can access any method or atribute in the object throught the `self`
## variable.
def __init__(self, parents):
"""Parameters:
---------------
`parents` a list of `Autodiff_Node` objects corresponding to the graph
parents."""
## initializer gets called once when you create (or instantiate) an
## object
self._set_parents(parents)
self._output_data = None
def _set_parents(self, parents):
self.parents = parents
return None
def set_output_data(self, y):
self._output_data = y
return None
def get_output_data(self):
return self._output_data
## a static modthod just means it doesn't depend on the data in `self`, so
## `self` does not need to be an argument
@staticmethod
def function(x):
"""Given input `x` return output `y`"""
## this is just a place holder (or template) to be used to create
## specific types of Node objects
return NotImplementedError
## a static modthod just means it doesn't depend on the data in `self`, so
## `self` does not need to be an argument
@staticmethod
def backpropagation_function(x, y, output_gradient):
"""
Parameters:
--------------------
`x` is the input variable(s): a list of tensors one for each input from
a graph parent.
`y` is the output variable(s): a list of tensors one for each ouput to
a graph child.
`output_gradient` is the gradient (list of partial derivatives) of a
scalar function with respect to one or more output variables.
Returns:
--------------------
`input_gradient` is the gradient (list of partial derivatives) of a
scalar function with respect to one or more input variables."""
## this is just a place holder (or template) to be used to create
## specific types of Node objects
return NotImplementedError
def eval(self):
"""Evaluate the output of the node, moving from necessary inputs
through the DAG in the forward direction."""
## recursively call eval for each node until input variables are reached
x = [node.eval() for node in self.parents]
return self.function(x)
def _eval_and_save_output(self):
## this is a stateful approach and should be used with care. This method
## will alter one of the atributes. This can lead to confusing and hard
## to diagnose bugs. It is best to avoid doing this whenever possible.
## recursively call eval for each node until inputs are reached
x = [node._eval_and_save_output() for node in self.parents]
y = self.function(x)
## internal data, or state, is modified here. Specifically the
## `self._output_data` attribute.
self.set_output_data(y)
return y
def _get_gradient(self, output_gradient):
## This is a helper function to assemble the gradients, moving backward
## through the DAG. We must call `_eval_and_save_output()` before
## using this method
x = [node.get_output_data() for node in self.parents]
## We use internal state here, which assumes that
## `_eval_and_save_output()` was called before using this method
y = self.get_output_data()
input_gradient = self.backpropagation_function(x, y, output_gradient)
## We use recursion combined with generators (see examples at the end of
## this notebook)
for node, sub_gradient in zip(self.parents, input_gradient):
## recursive call to the same method attached to the parent nodes
for inner_gradient in node._get_gradient(sub_gradient):
yield inner_gradient
def compute_gradient(self):
"""Assumes the node has scalar output"""
## computing gradients is very simple with the `Autodiff_node` class
## the dangerous stateful call must precede the gradient calculation
self._eval_and_save_output()
## the input is always simply `1.0` because partial_L/partial_L = 1
return [g for g in self._get_gradient(1.)]
class Add(Autodiff_Node):
"""Add two input nodes"""
## this defines a node type specifically for addition, it 'inherits' all
## of the methods and atributes from its base class, `Autodiff_Node`. Think
## of these as default methods. Any methods that are redefined here are used
## instead of the default methods from the base class
def __init__(self, a, b):
## initializer gets called once when you create (or instantiate) an
## object
parents = [a, b]
super().__init__(parents) ## calls `__init__` method of the base class
## a static modthod just means it doesn't depend on the data in `self`, so
## `self` does not need to be an argument
@staticmethod
def function(x):
a = x[0]
b = x[1]
return a + b
@staticmethod
def backpropagation_function(x, y, output_gradient):
input_gradient = [output_gradient*1, output_gradient*1]
return input_gradient
class Multiply(Autodiff_Node):
"""Multiply two input nodes"""
def __init__(self, a, b):
parents = [a, b]
super().__init__(parents)
@staticmethod
def function(x):
a = x[0]
b = x[1]
return a*b
@staticmethod
def backpropagation_function(x, y, output_gradient):
a = x[0]
b = x[1]
input_gradient = [output_gradient*b, output_gradient*a]
return input_gradient
class Tanh(Autodiff_Node):
"""Apply the `tanh` function to an input node"""
def __init__(self, x):
parents = [x]
super().__init__(parents)
@staticmethod
def function(x):
return np.tanh(x[0])
@staticmethod
def backpropagation_function(x, y, output_gradient):
dydx = 1./np.cosh(x[0])**2
input_gradient = [output_gradient*dydx]
return input_gradient
class Input_Variable(Autodiff_Node):
"""Input Variables have a specific fixed value. Use these to hold parameters
and variables. Gradient of a node with a scalar output will be a list of
partial derivatives with respect to these Input Variables.
Parameters:
---------------
`value` the numerical value of the variable (scalar in this example)."""
def __init__(self, value):
self.value = value
parents = []
super().__init__(parents)
@staticmethod
def function(x):
return self.value
@staticmethod
def backpropagation_function(x, y, output_gradient):
input_gradient = output_gradient
return input_gradient
def eval(self):
## this overrides the default `eval` method defined in `Autodiff_Node`
## base class
return self.value
def _eval_and_save_output(self): ## another override
self.set_output_data(self.value)
return self.value
def _get_gradient(self, output_gradient): ## another override
yield output_gradient
|
en
| 0.812549
|
# original author : Professor <NAME> ## A class is a recipe for creating objects (with methods and atributes). ## This is called a 'base class', which is like a boiler plate recipe that ## many other classes will use a starting point, each making specific ## changes. ## All methods (unless otherwise specified) must have the first argument ## a variable called `self`, which is a copy of the object itself. Hence, ## one can access any method or atribute in the object throught the `self` ## variable. Parameters: --------------- `parents` a list of `Autodiff_Node` objects corresponding to the graph parents. ## initializer gets called once when you create (or instantiate) an ## object ## a static modthod just means it doesn't depend on the data in `self`, so ## `self` does not need to be an argument Given input `x` return output `y` ## this is just a place holder (or template) to be used to create ## specific types of Node objects ## a static modthod just means it doesn't depend on the data in `self`, so ## `self` does not need to be an argument Parameters: -------------------- `x` is the input variable(s): a list of tensors one for each input from a graph parent. `y` is the output variable(s): a list of tensors one for each ouput to a graph child. `output_gradient` is the gradient (list of partial derivatives) of a scalar function with respect to one or more output variables. Returns: -------------------- `input_gradient` is the gradient (list of partial derivatives) of a scalar function with respect to one or more input variables. ## this is just a place holder (or template) to be used to create ## specific types of Node objects Evaluate the output of the node, moving from necessary inputs through the DAG in the forward direction. ## recursively call eval for each node until input variables are reached ## this is a stateful approach and should be used with care. This method ## will alter one of the atributes. This can lead to confusing and hard ## to diagnose bugs. It is best to avoid doing this whenever possible. ## recursively call eval for each node until inputs are reached ## internal data, or state, is modified here. Specifically the ## `self._output_data` attribute. ## This is a helper function to assemble the gradients, moving backward ## through the DAG. We must call `_eval_and_save_output()` before ## using this method ## We use internal state here, which assumes that ## `_eval_and_save_output()` was called before using this method ## We use recursion combined with generators (see examples at the end of ## this notebook) ## recursive call to the same method attached to the parent nodes Assumes the node has scalar output ## computing gradients is very simple with the `Autodiff_node` class ## the dangerous stateful call must precede the gradient calculation ## the input is always simply `1.0` because partial_L/partial_L = 1 Add two input nodes ## this defines a node type specifically for addition, it 'inherits' all ## of the methods and atributes from its base class, `Autodiff_Node`. Think ## of these as default methods. Any methods that are redefined here are used ## instead of the default methods from the base class ## initializer gets called once when you create (or instantiate) an ## object ## calls `__init__` method of the base class ## a static modthod just means it doesn't depend on the data in `self`, so ## `self` does not need to be an argument Multiply two input nodes Apply the `tanh` function to an input node Input Variables have a specific fixed value. Use these to hold parameters and variables. Gradient of a node with a scalar output will be a list of partial derivatives with respect to these Input Variables. Parameters: --------------- `value` the numerical value of the variable (scalar in this example). ## this overrides the default `eval` method defined in `Autodiff_Node` ## base class ## another override ## another override
| 3.608301
| 4
|
tests/e2e/rnn_rollout/test_deal_or_not.py
|
haojiepan1/CrossWOZ
| 1
|
6628440
|
import argparse
from convlab2.e2e.rnn_rollout.deal_or_not import DealornotAgent
from convlab2.e2e.rnn_rollout.deal_or_not.model import get_context_generator
from convlab2 import DealornotSession
import convlab2.e2e.rnn_rollout.utils as utils
import numpy as np
session_num = 20
def rnn_model_args():
parser = argparse.ArgumentParser(description='selfplaying script')
parser.add_argument('--nembed_word', type=int, default=256,
help='size of word embeddings')
parser.add_argument('--nembed_ctx', type=int, default=64,
help='size of context embeddings')
parser.add_argument('--nhid_lang', type=int, default=128,
help='size of the hidden state for the language module')
parser.add_argument('--nhid_cluster', type=int, default=256,
help='size of the hidden state for the language module')
parser.add_argument('--nhid_ctx', type=int, default=64,
help='size of the hidden state for the context module')
parser.add_argument('--nhid_strat', type=int, default=64,
help='size of the hidden state for the strategy module')
parser.add_argument('--nhid_attn', type=int, default=64,
help='size of the hidden state for the attention module')
parser.add_argument('--nhid_sel', type=int, default=128,
help='size of the hidden state for the selection module')
parser.add_argument('--lr', type=float, default=0.001,
help='initial learning rate')
parser.add_argument('--min_lr', type=float, default=1e-07,
help='min threshold for learning rate annealing')
parser.add_argument('--decay_rate', type=float, default=5.0,
help='decrease learning rate by this factor')
parser.add_argument('--decay_every', type=int, default=1,
help='decrease learning rate after decay_every epochs')
parser.add_argument('--momentum', type=float, default=0.1,
help='momentum for sgd')
parser.add_argument('--clip', type=float, default=2.0,
help='gradient clipping')
parser.add_argument('--dropout', type=float, default=0.1,
help='dropout rate in embedding layer')
parser.add_argument('--init_range', type=float, default=0.2,
help='initialization range')
parser.add_argument('--max_epoch', type=int, default=30,
help='max number of epochs')
parser.add_argument('--num_clusters', type=int, default=50,
help='number of clusters')
parser.add_argument('--partner_ctx_weight', type=float, default=0.0,
help='selection weight')
parser.add_argument('--sel_weight', type=float, default=0.6,
help='selection weight')
parser.add_argument('--prediction_model_file', type=str, default='',
help='path to save the prediction model')
parser.add_argument('--cluster_model_file', type=str, default='',
help='path to save the cluster model')
parser.add_argument('--lang_model_file', type=str, default='',
help='path to save the language model')
parser.add_argument('--model_file', type=str,
help='model file (use algorithm/dataset/configs as root path)',
default="models/rnn_model_state_dict.th")
parser.add_argument('--alice_forward_model_file', type=str,
help='Alice forward model file')
parser.add_argument('--bob_model_file', type=str,
help='Bob model file')
parser.add_argument('--context_file', type=str, default='data/deal_or_not/selfplay.txt',
help='context file')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature')
parser.add_argument('--pred_temperature', type=float, default=1.0,
help='temperature')
parser.add_argument('--verbose', action='store_true', default=False,
help='print out converations')
parser.add_argument('--seed', type=int, default=1,
help='random seed')
parser.add_argument('--score_threshold', type=int, default=6,
help='successful dialog should have more than score_threshold in score')
parser.add_argument('--max_turns', type=int, default=20,
help='maximum number of turns in a dialog')
parser.add_argument('--log_file', type=str, default='',
help='log successful dialogs to file for training')
parser.add_argument('--smart_alice', action='store_true', default=False,
help='make Alice smart again')
parser.add_argument('--diverse_alice', action='store_true', default=False,
help='make Alice smart again')
parser.add_argument('--rollout_bsz', type=int, default=3,
help='rollout batch size')
parser.add_argument('--rollout_count_threshold', type=int, default=3,
help='rollout count threshold')
parser.add_argument('--smart_bob', action='store_true', default=False,
help='make Bob smart again')
parser.add_argument('--selection_model_file', type=str, default='models/selection_model.th',
help='path to save the final model')
parser.add_argument('--rollout_model_file', type=str, default='',
help='path to save the final model')
parser.add_argument('--diverse_bob', action='store_true', default=False,
help='make Alice smart again')
parser.add_argument('--ref_text', type=str,
help='file with the reference text')
parser.add_argument('--cuda', action='store_true', default=False,
help='use CUDA')
parser.add_argument('--domain', type=str, default='object_division',
help='domain for the dialogue')
parser.add_argument('--visual', action='store_true', default=False,
help='plot graphs')
parser.add_argument('--eps', type=float, default=0.0,
help='eps greedy')
parser.add_argument('--data', type=str, default='data/deal_or_not',
help='location of the data corpus (use project path root path)')
parser.add_argument('--unk_threshold', type=int, default=20,
help='minimum word frequency to be in dictionary')
parser.add_argument('--bsz', type=int, default=16,
help='batch size')
parser.add_argument('--validate', action='store_true', default=False,
help='plot graphs')
parser.add_argument('--sep_sel', action='store_true', default=True,
help='use separate classifiers for selection')
args = parser.parse_args()
return args
def sel_model_args():
parser = argparse.ArgumentParser(description='training script')
parser.add_argument('--data', type=str, default='data/negotiate',
help='location of the data corpus')
parser.add_argument('--nembed_word', type=int, default=128,
help='size of word embeddings')
parser.add_argument('--nembed_ctx', type=int, default=128,
help='size of context embeddings')
parser.add_argument('--nhid_lang', type=int, default=128,
help='size of the hidden state for the language module')
parser.add_argument('--nhid_cluster', type=int, default=256,
help='size of the hidden state for the language module')
parser.add_argument('--nhid_ctx', type=int, default=64,
help='size of the hidden state for the context module')
parser.add_argument('--nhid_strat', type=int, default=256,
help='size of the hidden state for the strategy module')
parser.add_argument('--nhid_attn', type=int, default=128,
help='size of the hidden state for the attention module')
parser.add_argument('--nhid_sel', type=int, default=128,
help='size of the hidden state for the selection module')
parser.add_argument('--lr', type=float, default=0.001,
help='initial learning rate')
parser.add_argument('--min_lr', type=float, default=1e-5,
help='min threshold for learning rate annealing')
parser.add_argument('--decay_rate', type=float, default=5.0,
help='decrease learning rate by this factor')
parser.add_argument('--decay_every', type=int, default=1,
help='decrease learning rate after decay_every epochs')
parser.add_argument('--momentum', type=float, default=0.1,
help='momentum for sgd')
parser.add_argument('--clip', type=float, default=0.2,
help='gradient clipping')
parser.add_argument('--dropout', type=float, default=0.1,
help='dropout rate in embedding layer')
parser.add_argument('--init_range', type=float, default=0.2,
help='initialization range')
parser.add_argument('--max_epoch', type=int, default=7,
help='max number of epochs')
parser.add_argument('--num_clusters', type=int, default=50,
help='number of clusters')
parser.add_argument('--bsz', type=int, default=25,
help='batch size')
parser.add_argument('--unk_threshold', type=int, default=20,
help='minimum word frequency to be in dictionary')
parser.add_argument('--temperature', type=float, default=0.1,
help='temperature')
parser.add_argument('--partner_ctx_weight', type=float, default=0.0,
help='selection weight')
parser.add_argument('--sel_weight', type=float, default=0.6,
help='selection weight')
parser.add_argument('--seed', type=int, default=1,
help='random seed')
parser.add_argument('--cuda', action='store_true', default=False,
help='use CUDA')
parser.add_argument('--model_file', type=str, default='',
help='path to save the final model')
parser.add_argument('--prediction_model_file', type=str, default='',
help='path to save the prediction model')
parser.add_argument('--selection_model_file', type=str, default='models/selection_model_state_dict.th',
help='path to save the selection model')
parser.add_argument('--cluster_model_file', type=str, default='',
help='path to save the cluster model')
parser.add_argument('--lang_model_file', type=str, default='',
help='path to save the language model')
parser.add_argument('--visual', action='store_true', default=False,
help='plot graphs')
parser.add_argument('--skip_values', action='store_true', default=True,
help='skip values in ctx encoder')
parser.add_argument('--model_type', type=str, default='selection_model',
help='model type')
parser.add_argument('--domain', type=str, default='object_division',
help='domain for the dialogue')
parser.add_argument('--clustering', action='store_true', default=False,
help='use clustering')
parser.add_argument('--sep_sel', action='store_true', default=True,
help='use separate classifiers for selection')
args = parser.parse_args()
return args
# agent
alice_agent = DealornotAgent('Alice', rnn_model_args(), sel_model_args())
bob_agent = DealornotAgent('Bob', rnn_model_args(), sel_model_args())
agents = [alice_agent, bob_agent]
context_generator = get_context_generator(rnn_model_args().context_file)
# session
session = DealornotSession(alice_agent, bob_agent)
session_idx = 0
rewards = [[], []]
for ctxs in context_generator.iter():
print('session_idx', session_idx)
for agent, ctx, partner_ctx in zip(agents, ctxs, reversed(ctxs)):
agent.feed_context(ctx)
agent.feed_partner_context(partner_ctx)
last_observation = None
while True:
response = session.next_response(last_observation)
print('\t', ' '.join(response))
session_over = session.is_terminated()
if session_over:
break
last_observation = response
agree, [alice_r, bob_r] = session.get_rewards(ctxs)
print('session [{}] alice vs bos: {:.1f}/{:.1f}'.format(session_idx, alice_r, bob_r))
rewards[0].append(alice_r)
rewards[1].append(bob_r)
session.init_session()
session_idx += 1
# print(np.mean(rewards, axis=1))
|
import argparse
from convlab2.e2e.rnn_rollout.deal_or_not import DealornotAgent
from convlab2.e2e.rnn_rollout.deal_or_not.model import get_context_generator
from convlab2 import DealornotSession
import convlab2.e2e.rnn_rollout.utils as utils
import numpy as np
session_num = 20
def rnn_model_args():
parser = argparse.ArgumentParser(description='selfplaying script')
parser.add_argument('--nembed_word', type=int, default=256,
help='size of word embeddings')
parser.add_argument('--nembed_ctx', type=int, default=64,
help='size of context embeddings')
parser.add_argument('--nhid_lang', type=int, default=128,
help='size of the hidden state for the language module')
parser.add_argument('--nhid_cluster', type=int, default=256,
help='size of the hidden state for the language module')
parser.add_argument('--nhid_ctx', type=int, default=64,
help='size of the hidden state for the context module')
parser.add_argument('--nhid_strat', type=int, default=64,
help='size of the hidden state for the strategy module')
parser.add_argument('--nhid_attn', type=int, default=64,
help='size of the hidden state for the attention module')
parser.add_argument('--nhid_sel', type=int, default=128,
help='size of the hidden state for the selection module')
parser.add_argument('--lr', type=float, default=0.001,
help='initial learning rate')
parser.add_argument('--min_lr', type=float, default=1e-07,
help='min threshold for learning rate annealing')
parser.add_argument('--decay_rate', type=float, default=5.0,
help='decrease learning rate by this factor')
parser.add_argument('--decay_every', type=int, default=1,
help='decrease learning rate after decay_every epochs')
parser.add_argument('--momentum', type=float, default=0.1,
help='momentum for sgd')
parser.add_argument('--clip', type=float, default=2.0,
help='gradient clipping')
parser.add_argument('--dropout', type=float, default=0.1,
help='dropout rate in embedding layer')
parser.add_argument('--init_range', type=float, default=0.2,
help='initialization range')
parser.add_argument('--max_epoch', type=int, default=30,
help='max number of epochs')
parser.add_argument('--num_clusters', type=int, default=50,
help='number of clusters')
parser.add_argument('--partner_ctx_weight', type=float, default=0.0,
help='selection weight')
parser.add_argument('--sel_weight', type=float, default=0.6,
help='selection weight')
parser.add_argument('--prediction_model_file', type=str, default='',
help='path to save the prediction model')
parser.add_argument('--cluster_model_file', type=str, default='',
help='path to save the cluster model')
parser.add_argument('--lang_model_file', type=str, default='',
help='path to save the language model')
parser.add_argument('--model_file', type=str,
help='model file (use algorithm/dataset/configs as root path)',
default="models/rnn_model_state_dict.th")
parser.add_argument('--alice_forward_model_file', type=str,
help='Alice forward model file')
parser.add_argument('--bob_model_file', type=str,
help='Bob model file')
parser.add_argument('--context_file', type=str, default='data/deal_or_not/selfplay.txt',
help='context file')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature')
parser.add_argument('--pred_temperature', type=float, default=1.0,
help='temperature')
parser.add_argument('--verbose', action='store_true', default=False,
help='print out converations')
parser.add_argument('--seed', type=int, default=1,
help='random seed')
parser.add_argument('--score_threshold', type=int, default=6,
help='successful dialog should have more than score_threshold in score')
parser.add_argument('--max_turns', type=int, default=20,
help='maximum number of turns in a dialog')
parser.add_argument('--log_file', type=str, default='',
help='log successful dialogs to file for training')
parser.add_argument('--smart_alice', action='store_true', default=False,
help='make Alice smart again')
parser.add_argument('--diverse_alice', action='store_true', default=False,
help='make Alice smart again')
parser.add_argument('--rollout_bsz', type=int, default=3,
help='rollout batch size')
parser.add_argument('--rollout_count_threshold', type=int, default=3,
help='rollout count threshold')
parser.add_argument('--smart_bob', action='store_true', default=False,
help='make Bob smart again')
parser.add_argument('--selection_model_file', type=str, default='models/selection_model.th',
help='path to save the final model')
parser.add_argument('--rollout_model_file', type=str, default='',
help='path to save the final model')
parser.add_argument('--diverse_bob', action='store_true', default=False,
help='make Alice smart again')
parser.add_argument('--ref_text', type=str,
help='file with the reference text')
parser.add_argument('--cuda', action='store_true', default=False,
help='use CUDA')
parser.add_argument('--domain', type=str, default='object_division',
help='domain for the dialogue')
parser.add_argument('--visual', action='store_true', default=False,
help='plot graphs')
parser.add_argument('--eps', type=float, default=0.0,
help='eps greedy')
parser.add_argument('--data', type=str, default='data/deal_or_not',
help='location of the data corpus (use project path root path)')
parser.add_argument('--unk_threshold', type=int, default=20,
help='minimum word frequency to be in dictionary')
parser.add_argument('--bsz', type=int, default=16,
help='batch size')
parser.add_argument('--validate', action='store_true', default=False,
help='plot graphs')
parser.add_argument('--sep_sel', action='store_true', default=True,
help='use separate classifiers for selection')
args = parser.parse_args()
return args
def sel_model_args():
parser = argparse.ArgumentParser(description='training script')
parser.add_argument('--data', type=str, default='data/negotiate',
help='location of the data corpus')
parser.add_argument('--nembed_word', type=int, default=128,
help='size of word embeddings')
parser.add_argument('--nembed_ctx', type=int, default=128,
help='size of context embeddings')
parser.add_argument('--nhid_lang', type=int, default=128,
help='size of the hidden state for the language module')
parser.add_argument('--nhid_cluster', type=int, default=256,
help='size of the hidden state for the language module')
parser.add_argument('--nhid_ctx', type=int, default=64,
help='size of the hidden state for the context module')
parser.add_argument('--nhid_strat', type=int, default=256,
help='size of the hidden state for the strategy module')
parser.add_argument('--nhid_attn', type=int, default=128,
help='size of the hidden state for the attention module')
parser.add_argument('--nhid_sel', type=int, default=128,
help='size of the hidden state for the selection module')
parser.add_argument('--lr', type=float, default=0.001,
help='initial learning rate')
parser.add_argument('--min_lr', type=float, default=1e-5,
help='min threshold for learning rate annealing')
parser.add_argument('--decay_rate', type=float, default=5.0,
help='decrease learning rate by this factor')
parser.add_argument('--decay_every', type=int, default=1,
help='decrease learning rate after decay_every epochs')
parser.add_argument('--momentum', type=float, default=0.1,
help='momentum for sgd')
parser.add_argument('--clip', type=float, default=0.2,
help='gradient clipping')
parser.add_argument('--dropout', type=float, default=0.1,
help='dropout rate in embedding layer')
parser.add_argument('--init_range', type=float, default=0.2,
help='initialization range')
parser.add_argument('--max_epoch', type=int, default=7,
help='max number of epochs')
parser.add_argument('--num_clusters', type=int, default=50,
help='number of clusters')
parser.add_argument('--bsz', type=int, default=25,
help='batch size')
parser.add_argument('--unk_threshold', type=int, default=20,
help='minimum word frequency to be in dictionary')
parser.add_argument('--temperature', type=float, default=0.1,
help='temperature')
parser.add_argument('--partner_ctx_weight', type=float, default=0.0,
help='selection weight')
parser.add_argument('--sel_weight', type=float, default=0.6,
help='selection weight')
parser.add_argument('--seed', type=int, default=1,
help='random seed')
parser.add_argument('--cuda', action='store_true', default=False,
help='use CUDA')
parser.add_argument('--model_file', type=str, default='',
help='path to save the final model')
parser.add_argument('--prediction_model_file', type=str, default='',
help='path to save the prediction model')
parser.add_argument('--selection_model_file', type=str, default='models/selection_model_state_dict.th',
help='path to save the selection model')
parser.add_argument('--cluster_model_file', type=str, default='',
help='path to save the cluster model')
parser.add_argument('--lang_model_file', type=str, default='',
help='path to save the language model')
parser.add_argument('--visual', action='store_true', default=False,
help='plot graphs')
parser.add_argument('--skip_values', action='store_true', default=True,
help='skip values in ctx encoder')
parser.add_argument('--model_type', type=str, default='selection_model',
help='model type')
parser.add_argument('--domain', type=str, default='object_division',
help='domain for the dialogue')
parser.add_argument('--clustering', action='store_true', default=False,
help='use clustering')
parser.add_argument('--sep_sel', action='store_true', default=True,
help='use separate classifiers for selection')
args = parser.parse_args()
return args
# agent
alice_agent = DealornotAgent('Alice', rnn_model_args(), sel_model_args())
bob_agent = DealornotAgent('Bob', rnn_model_args(), sel_model_args())
agents = [alice_agent, bob_agent]
context_generator = get_context_generator(rnn_model_args().context_file)
# session
session = DealornotSession(alice_agent, bob_agent)
session_idx = 0
rewards = [[], []]
for ctxs in context_generator.iter():
print('session_idx', session_idx)
for agent, ctx, partner_ctx in zip(agents, ctxs, reversed(ctxs)):
agent.feed_context(ctx)
agent.feed_partner_context(partner_ctx)
last_observation = None
while True:
response = session.next_response(last_observation)
print('\t', ' '.join(response))
session_over = session.is_terminated()
if session_over:
break
last_observation = response
agree, [alice_r, bob_r] = session.get_rewards(ctxs)
print('session [{}] alice vs bos: {:.1f}/{:.1f}'.format(session_idx, alice_r, bob_r))
rewards[0].append(alice_r)
rewards[1].append(bob_r)
session.init_session()
session_idx += 1
# print(np.mean(rewards, axis=1))
|
en
| 0.658939
|
# agent # session # print(np.mean(rewards, axis=1))
| 2.211698
| 2
|
src/estimagic/visualization/convergence_plot.py
|
janosg/estimagic
| 7
|
6628441
|
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
from estimagic.benchmarking.process_benchmark_results import (
create_convergence_histories,
)
from estimagic.config import PLOTLY_TEMPLATE
from estimagic.utilities import propose_alternatives
from estimagic.visualization.plotting_utilities import create_grid_plot
from estimagic.visualization.plotting_utilities import create_ind_dict
def convergence_plot(
problems,
results,
*,
problem_subset=None,
algorithm_subset=None,
n_cols=2,
distance_measure="criterion",
monotone=True,
normalize_distance=True,
runtime_measure="n_evaluations",
stopping_criterion="y",
x_precision=1e-4,
y_precision=1e-4,
combine_plots_in_grid=True,
template=PLOTLY_TEMPLATE,
palette=px.colors.qualitative.Plotly,
):
"""Plot convergence of optimizers for a set of problems.
This creates a grid of plots, showing the convergence of the different
algorithms on each problem. The faster a line falls, the faster the algorithm
improved on the problem. The algorithm converged where its line reaches 0
(if normalize_distance is True) or the horizontal blue line labeled "true solution".
Each plot shows on the x axis the runtime_measure, which can be walltime or number
of evaluations. Each algorithm's convergence is a line in the plot. Convergence can
be measured by the criterion value of the particular time/evaluation. The
convergence can be made monotone (i.e. always taking the bast value so far) or
normalized such that the distance from the start to the true solution is one.
Args:
problems (dict): estimagic benchmarking problems dictionary. Keys are the
problem names. Values contain information on the problem, including the
solution value.
results (dict): estimagic benchmarking results dictionary. Keys are
tuples of the form (problem, algorithm), values are dictionaries of the
collected information on the benchmark run, including 'criterion_history'
and 'time_history'.
problem_subset (list, optional): List of problem names. These must be a subset
of the keys of the problems dictionary. If provided the convergence plot is
only created for the problems specified in this list.
algorithm_subset (list, optional): List of algorithm names. These must be a
subset of the keys of the optimizer_options passed to run_benchmark. If
provided only the convergence of the given algorithms are shown.
n_cols (int): number of columns in the plot of grids. The number
of rows is determined automatically.
distance_measure (str): One of "criterion", "parameter_distance".
monotone (bool): If True the best found criterion value so far is plotted.
If False the particular criterion evaluation of that time is used.
normalize_distance (bool): If True the progress is scaled by the total distance
between the start value and the optimal value, i.e. 1 means the algorithm
is as far from the solution as the start value and 0 means the algorithm
has reached the solution value.
runtime_measure (str): "n_evaluations" or "walltime".
stopping_criterion (str): "x_and_y", "x_or_y", "x", "y" or None. If None, no
clipping is done.
x_precision (float or None): how close an algorithm must have gotten to the
true parameter values (as percent of the Euclidean distance between start
and solution parameters) before the criterion for clipping and convergence
is fulfilled.
y_precision (float or None): how close an algorithm must have gotten to the
true criterion values (as percent of the distance between start
and solution criterion value) before the criterion for clipping and
convergence is fulfilled.
combine_plots_in_grid (bool): decide whether to return a one
figure containing subplots for each factor pair or a dictionary
of individual plots. Default True.
template (str): The template for the figure. Default is "plotly_white".
palette: The coloring palette for traces. Default is "qualitative.Plotly".
Returns:
plotly.Figure: The grid plot or dict of individual plots
"""
df, _ = create_convergence_histories(
problems=problems,
results=results,
stopping_criterion=stopping_criterion,
x_precision=x_precision,
y_precision=y_precision,
)
# handle string provision for single problems / algorithms
if isinstance(problem_subset, str):
problem_subset = [problem_subset]
if isinstance(algorithm_subset, str):
algorithm_subset = [algorithm_subset]
_check_only_allowed_subset_provided(problem_subset, df["problem"], "problem")
_check_only_allowed_subset_provided(algorithm_subset, df["algorithm"], "algorithm")
if problem_subset is not None:
df = df[df["problem"].isin(problem_subset)]
if algorithm_subset is not None:
df = df[df["algorithm"].isin(algorithm_subset)]
# plot configuration
outcome = (
f"{'monotone_' if monotone else ''}"
+ distance_measure
+ f"{'_normalized' if normalize_distance else ''}"
)
remaining_problems = df["problem"].unique()
n_rows = int(np.ceil(len(remaining_problems) / n_cols))
# pre - style plots labels
y_labels = {
"criterion": "Current Function Value",
"monotone_criterion": "Best Function Value Found So Far",
"criterion_normalized": "Share of Function Distance to Optimum<br>"
+ "Missing From Current Criterion Value",
"monotone_criterion_normalized": "Share of Function Distance to Optimum<br>"
+ "Missing From Best So Far",
"parameter_distance": "Distance Between Current and Optimal Parameters",
"parameter_distance_normalized": "Share of Parameter Distance to Optimum<br>"
+ "Missing From Current Parameters",
"monotone_parameter_distance_normalized": "Share of the Parameter Distance "
+ "to Optimum<br> Missing From the Best Parameters So Far",
"monotone_parameter_distance": "Distance Between the Best Parameters So Far<br>"
"and the Optimal Parameters",
}
x_labels = {
"n_evaluations": "Number of Function Evaluations",
"walltime": "Elapsed Time",
}
# container for individual plots
g_list = []
# container for titles
titles = []
# creating data traces for plotting faceted/individual plots
# dropping usage of palette for algoritms, but use the built in pallete
for prob_name in remaining_problems:
g_ind = [] # container for data for traces in individual plot
to_plot = df[df["problem"] == prob_name]
for i, alg in enumerate(to_plot["algorithm"].unique()):
temp = to_plot[to_plot["algorithm"] == alg]
trace_1 = go.Scatter(
x=temp[runtime_measure],
y=temp[outcome],
mode="lines",
legendgroup=i,
name=alg,
line={"color": palette[i]},
)
g_ind.append(trace_1)
if distance_measure == "criterion" and not normalize_distance:
f_opt = problems[prob_name]["solution"]["value"]
trace_2 = go.Scatter(
y=[f_opt for i in to_plot[runtime_measure]],
x=to_plot[runtime_measure],
mode="lines",
line={"color": palette[i + 1]},
name="true solution",
legendgroup=i + 1,
)
g_ind.append(trace_2)
g_list.append(g_ind)
titles.append(prob_name.replace("_", " ").title())
xaxis_title = [x_labels[runtime_measure] for ind in range(len(g_list))]
yaxis_title = [y_labels[outcome] for ind in range(len(g_list))]
common_dependencies = {
"ind_list": g_list,
"names": titles,
"clean_legend": True,
"x_title": xaxis_title,
"y_title": yaxis_title,
}
common_layout = {
"template": template,
"margin": {"l": 10, "r": 10, "t": 30, "b": 10},
}
# Plot with subplots
if combine_plots_in_grid:
g = create_grid_plot(
rows=n_rows,
cols=n_cols,
**common_dependencies,
kws={"height": 320 * n_rows, "width": 500 * n_cols, **common_layout},
)
out = g
# Dictionary for individual plots
else:
ind_dict = create_ind_dict(
**common_dependencies,
kws={"height": 320, "width": 500, "title_x": 0.5, **common_layout},
)
out = ind_dict
return out
def _check_only_allowed_subset_provided(subset, allowed, name):
"""Check if all entries of a proposed subset are in a Series.
Args:
subset (iterable or None): If None, no checks are performed. Else a ValueError
is raised listing all entries that are not in the provided Series.
allowed (iterable): allowed entries.
name (str): name of the provided entries to use for the ValueError.
Raises:
ValueError
"""
allowed = set(allowed)
if subset is not None:
missing = [entry for entry in subset if entry not in allowed]
if missing:
missing_msg = ""
for entry in missing:
proposed = propose_alternatives(entry, allowed)
missing_msg += f"Invalid {name}: {entry}. Did you mean {proposed}?\n"
raise ValueError(missing_msg)
|
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
from estimagic.benchmarking.process_benchmark_results import (
create_convergence_histories,
)
from estimagic.config import PLOTLY_TEMPLATE
from estimagic.utilities import propose_alternatives
from estimagic.visualization.plotting_utilities import create_grid_plot
from estimagic.visualization.plotting_utilities import create_ind_dict
def convergence_plot(
problems,
results,
*,
problem_subset=None,
algorithm_subset=None,
n_cols=2,
distance_measure="criterion",
monotone=True,
normalize_distance=True,
runtime_measure="n_evaluations",
stopping_criterion="y",
x_precision=1e-4,
y_precision=1e-4,
combine_plots_in_grid=True,
template=PLOTLY_TEMPLATE,
palette=px.colors.qualitative.Plotly,
):
"""Plot convergence of optimizers for a set of problems.
This creates a grid of plots, showing the convergence of the different
algorithms on each problem. The faster a line falls, the faster the algorithm
improved on the problem. The algorithm converged where its line reaches 0
(if normalize_distance is True) or the horizontal blue line labeled "true solution".
Each plot shows on the x axis the runtime_measure, which can be walltime or number
of evaluations. Each algorithm's convergence is a line in the plot. Convergence can
be measured by the criterion value of the particular time/evaluation. The
convergence can be made monotone (i.e. always taking the bast value so far) or
normalized such that the distance from the start to the true solution is one.
Args:
problems (dict): estimagic benchmarking problems dictionary. Keys are the
problem names. Values contain information on the problem, including the
solution value.
results (dict): estimagic benchmarking results dictionary. Keys are
tuples of the form (problem, algorithm), values are dictionaries of the
collected information on the benchmark run, including 'criterion_history'
and 'time_history'.
problem_subset (list, optional): List of problem names. These must be a subset
of the keys of the problems dictionary. If provided the convergence plot is
only created for the problems specified in this list.
algorithm_subset (list, optional): List of algorithm names. These must be a
subset of the keys of the optimizer_options passed to run_benchmark. If
provided only the convergence of the given algorithms are shown.
n_cols (int): number of columns in the plot of grids. The number
of rows is determined automatically.
distance_measure (str): One of "criterion", "parameter_distance".
monotone (bool): If True the best found criterion value so far is plotted.
If False the particular criterion evaluation of that time is used.
normalize_distance (bool): If True the progress is scaled by the total distance
between the start value and the optimal value, i.e. 1 means the algorithm
is as far from the solution as the start value and 0 means the algorithm
has reached the solution value.
runtime_measure (str): "n_evaluations" or "walltime".
stopping_criterion (str): "x_and_y", "x_or_y", "x", "y" or None. If None, no
clipping is done.
x_precision (float or None): how close an algorithm must have gotten to the
true parameter values (as percent of the Euclidean distance between start
and solution parameters) before the criterion for clipping and convergence
is fulfilled.
y_precision (float or None): how close an algorithm must have gotten to the
true criterion values (as percent of the distance between start
and solution criterion value) before the criterion for clipping and
convergence is fulfilled.
combine_plots_in_grid (bool): decide whether to return a one
figure containing subplots for each factor pair or a dictionary
of individual plots. Default True.
template (str): The template for the figure. Default is "plotly_white".
palette: The coloring palette for traces. Default is "qualitative.Plotly".
Returns:
plotly.Figure: The grid plot or dict of individual plots
"""
df, _ = create_convergence_histories(
problems=problems,
results=results,
stopping_criterion=stopping_criterion,
x_precision=x_precision,
y_precision=y_precision,
)
# handle string provision for single problems / algorithms
if isinstance(problem_subset, str):
problem_subset = [problem_subset]
if isinstance(algorithm_subset, str):
algorithm_subset = [algorithm_subset]
_check_only_allowed_subset_provided(problem_subset, df["problem"], "problem")
_check_only_allowed_subset_provided(algorithm_subset, df["algorithm"], "algorithm")
if problem_subset is not None:
df = df[df["problem"].isin(problem_subset)]
if algorithm_subset is not None:
df = df[df["algorithm"].isin(algorithm_subset)]
# plot configuration
outcome = (
f"{'monotone_' if monotone else ''}"
+ distance_measure
+ f"{'_normalized' if normalize_distance else ''}"
)
remaining_problems = df["problem"].unique()
n_rows = int(np.ceil(len(remaining_problems) / n_cols))
# pre - style plots labels
y_labels = {
"criterion": "Current Function Value",
"monotone_criterion": "Best Function Value Found So Far",
"criterion_normalized": "Share of Function Distance to Optimum<br>"
+ "Missing From Current Criterion Value",
"monotone_criterion_normalized": "Share of Function Distance to Optimum<br>"
+ "Missing From Best So Far",
"parameter_distance": "Distance Between Current and Optimal Parameters",
"parameter_distance_normalized": "Share of Parameter Distance to Optimum<br>"
+ "Missing From Current Parameters",
"monotone_parameter_distance_normalized": "Share of the Parameter Distance "
+ "to Optimum<br> Missing From the Best Parameters So Far",
"monotone_parameter_distance": "Distance Between the Best Parameters So Far<br>"
"and the Optimal Parameters",
}
x_labels = {
"n_evaluations": "Number of Function Evaluations",
"walltime": "Elapsed Time",
}
# container for individual plots
g_list = []
# container for titles
titles = []
# creating data traces for plotting faceted/individual plots
# dropping usage of palette for algoritms, but use the built in pallete
for prob_name in remaining_problems:
g_ind = [] # container for data for traces in individual plot
to_plot = df[df["problem"] == prob_name]
for i, alg in enumerate(to_plot["algorithm"].unique()):
temp = to_plot[to_plot["algorithm"] == alg]
trace_1 = go.Scatter(
x=temp[runtime_measure],
y=temp[outcome],
mode="lines",
legendgroup=i,
name=alg,
line={"color": palette[i]},
)
g_ind.append(trace_1)
if distance_measure == "criterion" and not normalize_distance:
f_opt = problems[prob_name]["solution"]["value"]
trace_2 = go.Scatter(
y=[f_opt for i in to_plot[runtime_measure]],
x=to_plot[runtime_measure],
mode="lines",
line={"color": palette[i + 1]},
name="true solution",
legendgroup=i + 1,
)
g_ind.append(trace_2)
g_list.append(g_ind)
titles.append(prob_name.replace("_", " ").title())
xaxis_title = [x_labels[runtime_measure] for ind in range(len(g_list))]
yaxis_title = [y_labels[outcome] for ind in range(len(g_list))]
common_dependencies = {
"ind_list": g_list,
"names": titles,
"clean_legend": True,
"x_title": xaxis_title,
"y_title": yaxis_title,
}
common_layout = {
"template": template,
"margin": {"l": 10, "r": 10, "t": 30, "b": 10},
}
# Plot with subplots
if combine_plots_in_grid:
g = create_grid_plot(
rows=n_rows,
cols=n_cols,
**common_dependencies,
kws={"height": 320 * n_rows, "width": 500 * n_cols, **common_layout},
)
out = g
# Dictionary for individual plots
else:
ind_dict = create_ind_dict(
**common_dependencies,
kws={"height": 320, "width": 500, "title_x": 0.5, **common_layout},
)
out = ind_dict
return out
def _check_only_allowed_subset_provided(subset, allowed, name):
"""Check if all entries of a proposed subset are in a Series.
Args:
subset (iterable or None): If None, no checks are performed. Else a ValueError
is raised listing all entries that are not in the provided Series.
allowed (iterable): allowed entries.
name (str): name of the provided entries to use for the ValueError.
Raises:
ValueError
"""
allowed = set(allowed)
if subset is not None:
missing = [entry for entry in subset if entry not in allowed]
if missing:
missing_msg = ""
for entry in missing:
proposed = propose_alternatives(entry, allowed)
missing_msg += f"Invalid {name}: {entry}. Did you mean {proposed}?\n"
raise ValueError(missing_msg)
|
en
| 0.826932
|
Plot convergence of optimizers for a set of problems. This creates a grid of plots, showing the convergence of the different algorithms on each problem. The faster a line falls, the faster the algorithm improved on the problem. The algorithm converged where its line reaches 0 (if normalize_distance is True) or the horizontal blue line labeled "true solution". Each plot shows on the x axis the runtime_measure, which can be walltime or number of evaluations. Each algorithm's convergence is a line in the plot. Convergence can be measured by the criterion value of the particular time/evaluation. The convergence can be made monotone (i.e. always taking the bast value so far) or normalized such that the distance from the start to the true solution is one. Args: problems (dict): estimagic benchmarking problems dictionary. Keys are the problem names. Values contain information on the problem, including the solution value. results (dict): estimagic benchmarking results dictionary. Keys are tuples of the form (problem, algorithm), values are dictionaries of the collected information on the benchmark run, including 'criterion_history' and 'time_history'. problem_subset (list, optional): List of problem names. These must be a subset of the keys of the problems dictionary. If provided the convergence plot is only created for the problems specified in this list. algorithm_subset (list, optional): List of algorithm names. These must be a subset of the keys of the optimizer_options passed to run_benchmark. If provided only the convergence of the given algorithms are shown. n_cols (int): number of columns in the plot of grids. The number of rows is determined automatically. distance_measure (str): One of "criterion", "parameter_distance". monotone (bool): If True the best found criterion value so far is plotted. If False the particular criterion evaluation of that time is used. normalize_distance (bool): If True the progress is scaled by the total distance between the start value and the optimal value, i.e. 1 means the algorithm is as far from the solution as the start value and 0 means the algorithm has reached the solution value. runtime_measure (str): "n_evaluations" or "walltime". stopping_criterion (str): "x_and_y", "x_or_y", "x", "y" or None. If None, no clipping is done. x_precision (float or None): how close an algorithm must have gotten to the true parameter values (as percent of the Euclidean distance between start and solution parameters) before the criterion for clipping and convergence is fulfilled. y_precision (float or None): how close an algorithm must have gotten to the true criterion values (as percent of the distance between start and solution criterion value) before the criterion for clipping and convergence is fulfilled. combine_plots_in_grid (bool): decide whether to return a one figure containing subplots for each factor pair or a dictionary of individual plots. Default True. template (str): The template for the figure. Default is "plotly_white". palette: The coloring palette for traces. Default is "qualitative.Plotly". Returns: plotly.Figure: The grid plot or dict of individual plots # handle string provision for single problems / algorithms # plot configuration # pre - style plots labels # container for individual plots # container for titles # creating data traces for plotting faceted/individual plots # dropping usage of palette for algoritms, but use the built in pallete # container for data for traces in individual plot # Plot with subplots # Dictionary for individual plots Check if all entries of a proposed subset are in a Series. Args: subset (iterable or None): If None, no checks are performed. Else a ValueError is raised listing all entries that are not in the provided Series. allowed (iterable): allowed entries. name (str): name of the provided entries to use for the ValueError. Raises: ValueError
| 2.92027
| 3
|
alipay/aop/api/response/ZhimaCustomerCertificationCertifyResponse.py
|
snowxmas/alipay-sdk-python-all
| 213
|
6628442
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class ZhimaCustomerCertificationCertifyResponse(AlipayResponse):
def __init__(self):
super(ZhimaCustomerCertificationCertifyResponse, self).__init__()
self._biz_no = None
self._failed_reason = None
self._passed = None
@property
def biz_no(self):
return self._biz_no
@biz_no.setter
def biz_no(self, value):
self._biz_no = value
@property
def failed_reason(self):
return self._failed_reason
@failed_reason.setter
def failed_reason(self, value):
self._failed_reason = value
@property
def passed(self):
return self._passed
@passed.setter
def passed(self, value):
self._passed = value
def parse_response_content(self, response_content):
response = super(ZhimaCustomerCertificationCertifyResponse, self).parse_response_content(response_content)
if 'biz_no' in response:
self.biz_no = response['biz_no']
if 'failed_reason' in response:
self.failed_reason = response['failed_reason']
if 'passed' in response:
self.passed = response['passed']
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class ZhimaCustomerCertificationCertifyResponse(AlipayResponse):
def __init__(self):
super(ZhimaCustomerCertificationCertifyResponse, self).__init__()
self._biz_no = None
self._failed_reason = None
self._passed = None
@property
def biz_no(self):
return self._biz_no
@biz_no.setter
def biz_no(self, value):
self._biz_no = value
@property
def failed_reason(self):
return self._failed_reason
@failed_reason.setter
def failed_reason(self, value):
self._failed_reason = value
@property
def passed(self):
return self._passed
@passed.setter
def passed(self, value):
self._passed = value
def parse_response_content(self, response_content):
response = super(ZhimaCustomerCertificationCertifyResponse, self).parse_response_content(response_content)
if 'biz_no' in response:
self.biz_no = response['biz_no']
if 'failed_reason' in response:
self.failed_reason = response['failed_reason']
if 'passed' in response:
self.passed = response['passed']
|
en
| 0.352855
|
#!/usr/bin/env python # -*- coding: utf-8 -*-
| 2.130936
| 2
|
handlers/base.py
|
binux/webrtc_video
| 32
|
6628443
|
<reponame>binux/webrtc_video
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<<EMAIL>>
# http://binux.me
# Created on 2012-12-15 16:16:38
import logging
import tornado.web
import tornado.websocket
from tornado.web import HTTPError
from tornado.options import options
__ALL__ = ['HTTPError', 'BaseHandler', 'BaseWebSocket', 'BaseUIModule', ]
class BaseHandler(tornado.web.RequestHandler):
application_export = set(('room_manager', ))
def __getattr__(self, key):
if key in self.application_export:
return getattr(self.application, key)
super(BaseHandler, self).__getattr__(key)
def render_string(self, template_name, **kwargs):
if "options" not in kwargs:
kwargs["options"] = options
return super(BaseHandler, self).render_string(template_name, **kwargs)
class BaseWebSocket(tornado.websocket.WebSocketHandler):
application_export = set(('room_manager', ))
def __getattr__(self, key):
if key in self.application_export:
return getattr(self.application, key)
super(BaseWebSocket, self).__getattr__(key)
class BaseUIModule(tornado.web.UIModule):
pass
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<<EMAIL>>
# http://binux.me
# Created on 2012-12-15 16:16:38
import logging
import tornado.web
import tornado.websocket
from tornado.web import HTTPError
from tornado.options import options
__ALL__ = ['HTTPError', 'BaseHandler', 'BaseWebSocket', 'BaseUIModule', ]
class BaseHandler(tornado.web.RequestHandler):
application_export = set(('room_manager', ))
def __getattr__(self, key):
if key in self.application_export:
return getattr(self.application, key)
super(BaseHandler, self).__getattr__(key)
def render_string(self, template_name, **kwargs):
if "options" not in kwargs:
kwargs["options"] = options
return super(BaseHandler, self).render_string(template_name, **kwargs)
class BaseWebSocket(tornado.websocket.WebSocketHandler):
application_export = set(('room_manager', ))
def __getattr__(self, key):
if key in self.application_export:
return getattr(self.application, key)
super(BaseWebSocket, self).__getattr__(key)
class BaseUIModule(tornado.web.UIModule):
pass
|
en
| 0.225609
|
#!/usr/bin/env python # -*- encoding: utf-8 -*- # vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8: # Author: Binux<<EMAIL>> # http://binux.me # Created on 2012-12-15 16:16:38
| 1.805149
| 2
|
button.py
|
mn1del/rpigpio
| 0
|
6628444
|
#!/usr/bin/env python3
"""
Class to handle momentary switches
"""
import RPi.GPIO as GPIO
import time
if __name__ == "__main__":
from base import BaseIO
else:
from rpigpio.base import BaseIO
class Button(BaseIO):
def __init__(self,
button_pin=12,
pull_up=True,
debounce_delay_secs=0.05):
"""
Class to handle momentary switch input.
Note that STATE behaviour will depend on whether a pullup or pull-down resistor is used,
and whether the circuit is wired normally open or normally closed.
args:
button_pin: (int) GPIO pin (BCM)
pull_up: (bool) if True set pull_up_down to GPIO.PUD_UP
debounce_delay_secs: (float) seconds delay to handle debouncing
"""
GPIO.setmode(GPIO.BCM)
# set class variables
self.BUTTON = button_pin
self.DEBOUNCE_MS = int(debounce_delay_secs * 1000) # convert to milliseconds
# setup pins
if pull_up:
GPIO.setup(self.BUTTON, GPIO.IN, pull_up_down=GPIO.PUD_UP)
else:
GPIO.setup(self.BUTTON, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
time.sleep(self.DEBOUNCE_MS/1000)
self.STATE = GPIO.input(self.BUTTON)
# setup event detection
GPIO.add_event_detect(self.BUTTON, GPIO.BOTH, callback=self.set_state, bouncetime=self.DEBOUNCE_MS)
def set_state(self, channel):
"""
Sets and returns state using GPIO.event_detected() logic.
Note that STATE behaviour will depend on whether a pullup or pull-down resistor is used,
and whether the circuit is wired normally open or normally closed.
"""
time.sleep(self.DEBOUNCE_MS/1000)
self.STATE = GPIO.input(self.BUTTON)
if __name__ == "__main__":
"""
With a normally closed switch wired from GND to PIN, and pullup resister
the STATE==1 when the switch is pressed (because the circuit is broken and the resister
pulls the PIN high)
"""
try:
button = Button(button_pin=12, pull_up=True)
while True:
print("State: {}".format(button.STATE))
time.sleep(0.01)
except:
pass
finally:
GPIO.cleanup()
|
#!/usr/bin/env python3
"""
Class to handle momentary switches
"""
import RPi.GPIO as GPIO
import time
if __name__ == "__main__":
from base import BaseIO
else:
from rpigpio.base import BaseIO
class Button(BaseIO):
def __init__(self,
button_pin=12,
pull_up=True,
debounce_delay_secs=0.05):
"""
Class to handle momentary switch input.
Note that STATE behaviour will depend on whether a pullup or pull-down resistor is used,
and whether the circuit is wired normally open or normally closed.
args:
button_pin: (int) GPIO pin (BCM)
pull_up: (bool) if True set pull_up_down to GPIO.PUD_UP
debounce_delay_secs: (float) seconds delay to handle debouncing
"""
GPIO.setmode(GPIO.BCM)
# set class variables
self.BUTTON = button_pin
self.DEBOUNCE_MS = int(debounce_delay_secs * 1000) # convert to milliseconds
# setup pins
if pull_up:
GPIO.setup(self.BUTTON, GPIO.IN, pull_up_down=GPIO.PUD_UP)
else:
GPIO.setup(self.BUTTON, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
time.sleep(self.DEBOUNCE_MS/1000)
self.STATE = GPIO.input(self.BUTTON)
# setup event detection
GPIO.add_event_detect(self.BUTTON, GPIO.BOTH, callback=self.set_state, bouncetime=self.DEBOUNCE_MS)
def set_state(self, channel):
"""
Sets and returns state using GPIO.event_detected() logic.
Note that STATE behaviour will depend on whether a pullup or pull-down resistor is used,
and whether the circuit is wired normally open or normally closed.
"""
time.sleep(self.DEBOUNCE_MS/1000)
self.STATE = GPIO.input(self.BUTTON)
if __name__ == "__main__":
"""
With a normally closed switch wired from GND to PIN, and pullup resister
the STATE==1 when the switch is pressed (because the circuit is broken and the resister
pulls the PIN high)
"""
try:
button = Button(button_pin=12, pull_up=True)
while True:
print("State: {}".format(button.STATE))
time.sleep(0.01)
except:
pass
finally:
GPIO.cleanup()
|
en
| 0.772461
|
#!/usr/bin/env python3 Class to handle momentary switches Class to handle momentary switch input. Note that STATE behaviour will depend on whether a pullup or pull-down resistor is used, and whether the circuit is wired normally open or normally closed. args: button_pin: (int) GPIO pin (BCM) pull_up: (bool) if True set pull_up_down to GPIO.PUD_UP debounce_delay_secs: (float) seconds delay to handle debouncing # set class variables # convert to milliseconds # setup pins # setup event detection Sets and returns state using GPIO.event_detected() logic. Note that STATE behaviour will depend on whether a pullup or pull-down resistor is used, and whether the circuit is wired normally open or normally closed. With a normally closed switch wired from GND to PIN, and pullup resister the STATE==1 when the switch is pressed (because the circuit is broken and the resister pulls the PIN high)
| 3.437627
| 3
|
villes_en_france.py
|
mbrewer/dictionary_magic
| 0
|
6628445
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: latin-1 -*-
combien_de_departements = {
'Auvergne-Rhônes-Alpes': 12,
'Île-de-France': 8,
'Normandie': 5,
'Provence-Alpes-Côte d\'Azur': 8,
'Nouvelle-Aquitaine': 12,
'Grand Est': 10,
'Occitanie': 13,
'Bretagne': 4,
'Nord-Pas-de-Calais': 5
}
villes_et_regions = {
'Lyon': 'Auvergne-Rhônes-Alpes',
'Paris': 'Île-de-France',
'Caen': 'Normandie',
'Marseille': 'Provence-Alpes-Côte d\'Azur',
'Le Mont-Saint-Michel': 'Normandie',
'Grenoble': 'Auvergne-Rhônes-Alpes',
'Bordeaux': 'Nouvelle-Aquitaine',
'Strasbourg': 'Grand Est',
'Perpignan': 'Occitanie',
'Saint-Malo': 'Bretagne',
'Lille': 'Nord-Pas-de-Calais'
}
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
combien_de_departements = {
'Auvergne-Rhônes-Alpes': 12,
'Île-de-France': 8,
'Normandie': 5,
'Provence-Alpes-Côte d\'Azur': 8,
'Nouvelle-Aquitaine': 12,
'Grand Est': 10,
'Occitanie': 13,
'Bretagne': 4,
'Nord-Pas-de-Calais': 5
}
villes_et_regions = {
'Lyon': 'Auvergne-Rhônes-Alpes',
'Paris': 'Île-de-France',
'Caen': 'Normandie',
'Marseille': 'Provence-Alpes-Côte d\'Azur',
'Le Mont-Saint-Michel': 'Normandie',
'Grenoble': 'Auvergne-Rhônes-Alpes',
'Bordeaux': 'Nouvelle-Aquitaine',
'Strasbourg': 'Grand Est',
'Perpignan': 'Occitanie',
'Saint-Malo': 'Bretagne',
'Lille': 'Nord-Pas-de-Calais'
}
|
en
| 0.184027
|
#!/usr/bin/env python # -*- coding: latin-1 -*-
| 2.090516
| 2
|
src/npx/__init__.py
|
rohankumardubey/npx
| 0
|
6628446
|
<gh_stars>0
from ._isin import isin_rows
from ._main import add_at, dot, outer, solve, subtract_at, sum_at
from ._mean import mean
from ._unique import unique, unique_rows
__all__ = [
"dot",
"outer",
"solve",
"sum_at",
"add_at",
"subtract_at",
"unique_rows",
"isin_rows",
"mean",
"unique",
"unique_rows",
]
|
from ._isin import isin_rows
from ._main import add_at, dot, outer, solve, subtract_at, sum_at
from ._mean import mean
from ._unique import unique, unique_rows
__all__ = [
"dot",
"outer",
"solve",
"sum_at",
"add_at",
"subtract_at",
"unique_rows",
"isin_rows",
"mean",
"unique",
"unique_rows",
]
|
none
| 1
| 1.936957
| 2
|
|
docker/app.py
|
icedwater/dole
| 0
|
6628447
|
<filename>docker/app.py
#! /usr/bin/env python
from flask import Flask
from redis import Redis, RedisError
import os
import socket
# Connect to Redis
redis = Redis(host="redis", db=0, socket_connect_timeout=2, socket_timeout=2)
app = Flask(__name__)
@app.route("/")
def hello():
try:
visits = redis.incr("counter")
except RedisError:
visits = "<span class = \"error\">Cannot connect to Redis. Counter disabled.</span>"
html = "<h3>Hello {name}!</h3>"
html += "<strong>Hostname</strong>: {hostname}<br/>"
html += "<strong>Visits</strong>: {visits}"
return html.format(name=os.getenv("NAME", "world"), hostname=socket.gethostname(), visits=visits)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=80)
|
<filename>docker/app.py
#! /usr/bin/env python
from flask import Flask
from redis import Redis, RedisError
import os
import socket
# Connect to Redis
redis = Redis(host="redis", db=0, socket_connect_timeout=2, socket_timeout=2)
app = Flask(__name__)
@app.route("/")
def hello():
try:
visits = redis.incr("counter")
except RedisError:
visits = "<span class = \"error\">Cannot connect to Redis. Counter disabled.</span>"
html = "<h3>Hello {name}!</h3>"
html += "<strong>Hostname</strong>: {hostname}<br/>"
html += "<strong>Visits</strong>: {visits}"
return html.format(name=os.getenv("NAME", "world"), hostname=socket.gethostname(), visits=visits)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=80)
|
en
| 0.342116
|
#! /usr/bin/env python # Connect to Redis
| 2.640903
| 3
|
functions/RedditDownloader.py
|
RafaelRCamargo/reddit-downloader-plus
| 0
|
6628448
|
<filename>functions/RedditDownloader.py<gh_stars>0
# FRTS
# ? Reddit Api + Downloader
# * Imports
# Sys os
import os
# Sys Path
from pathlib import Path
# Date
from datetime import datetime
# Reddit Downloader
from redvid import Downloader
# Cool Terminal Colors
from rich import print
duration = 0
def reddit_downloader(post):
global duration
path = str(Path(__file__).cwd()) + "\\assets\\videos\\" + \
post.split("/")[2] + "\\" + datetime.today().strftime('%d-%m-%Y')
isExist = os.path.exists(path)
if not isExist:
os.makedirs(path)
print(">> [italic blue]The new directory is created![/italic blue]\n")
# * Basics
# Redvid setup
reddit = Downloader()
# Video path
reddit.path = path
# Video url
reddit.url = 'https://www.reddit.com' + post + '_/'
# * Defs
# Max size of the file in MB
reddit.max_s = 24 * (1 << 20)
# * Props
# Auto max video quality based on the file size
reddit.auto_max = True
# Video overwrite method
reddit.overwrite = True
try:
# * Get Videos Stats
reddit.check()
duration += int(reddit.duration)
if duration <= 90:
# * General Stats
print("\n>> [bold yellow]General Stats:[/bold yellow]")
print("- Duration: [bold blue]" +
str(duration) + "[/bold blue] seconds")
# * Video Stats
print("\n>> [bold blue]Video Stats:[/bold blue]")
print("- Duration: [blue]" +
str(reddit.duration) + "[/blue] seconds")
print("- Size: [blue]" + str(reddit.size) + "[/blue] bytes\n")
# * Downloading
if reddit.duration < 90 and reddit.duration > 2 and reddit.size <= 24 * (1 << 20):
reddit.download()
print('\n>> [green]Video downloaded![/green]\n')
else:
print(
'>> [red]Not that good for shorts![/red] [red bold]:([/red bold]\n')
return True
else:
# * General Stats
print("\n>> [bold yellow]General Stats:[/bold yellow]")
print("- Duration: [bold blue]" +
str(duration) + "[/bold blue] seconds\n")
print('>> [green]We already have enough videos![/green]')
print('>> [bold yellow]Let\'s build it?[/bold yellow]\n')
return False
except:
print('\n>> [red]Video not found![/red]\n')
|
<filename>functions/RedditDownloader.py<gh_stars>0
# FRTS
# ? Reddit Api + Downloader
# * Imports
# Sys os
import os
# Sys Path
from pathlib import Path
# Date
from datetime import datetime
# Reddit Downloader
from redvid import Downloader
# Cool Terminal Colors
from rich import print
duration = 0
def reddit_downloader(post):
global duration
path = str(Path(__file__).cwd()) + "\\assets\\videos\\" + \
post.split("/")[2] + "\\" + datetime.today().strftime('%d-%m-%Y')
isExist = os.path.exists(path)
if not isExist:
os.makedirs(path)
print(">> [italic blue]The new directory is created![/italic blue]\n")
# * Basics
# Redvid setup
reddit = Downloader()
# Video path
reddit.path = path
# Video url
reddit.url = 'https://www.reddit.com' + post + '_/'
# * Defs
# Max size of the file in MB
reddit.max_s = 24 * (1 << 20)
# * Props
# Auto max video quality based on the file size
reddit.auto_max = True
# Video overwrite method
reddit.overwrite = True
try:
# * Get Videos Stats
reddit.check()
duration += int(reddit.duration)
if duration <= 90:
# * General Stats
print("\n>> [bold yellow]General Stats:[/bold yellow]")
print("- Duration: [bold blue]" +
str(duration) + "[/bold blue] seconds")
# * Video Stats
print("\n>> [bold blue]Video Stats:[/bold blue]")
print("- Duration: [blue]" +
str(reddit.duration) + "[/blue] seconds")
print("- Size: [blue]" + str(reddit.size) + "[/blue] bytes\n")
# * Downloading
if reddit.duration < 90 and reddit.duration > 2 and reddit.size <= 24 * (1 << 20):
reddit.download()
print('\n>> [green]Video downloaded![/green]\n')
else:
print(
'>> [red]Not that good for shorts![/red] [red bold]:([/red bold]\n')
return True
else:
# * General Stats
print("\n>> [bold yellow]General Stats:[/bold yellow]")
print("- Duration: [bold blue]" +
str(duration) + "[/bold blue] seconds\n")
print('>> [green]We already have enough videos![/green]')
print('>> [bold yellow]Let\'s build it?[/bold yellow]\n')
return False
except:
print('\n>> [red]Video not found![/red]\n')
|
en
| 0.605856
|
# FRTS # ? Reddit Api + Downloader # * Imports # Sys os # Sys Path # Date # Reddit Downloader # Cool Terminal Colors # * Basics # Redvid setup # Video path # Video url # * Defs # Max size of the file in MB # * Props # Auto max video quality based on the file size # Video overwrite method # * Get Videos Stats # * General Stats # * Video Stats # * Downloading # * General Stats
| 3.186541
| 3
|
io_scene_webaverse/blender/exp/gltf2_blender_gather_animation_sampler_keyframes.py
|
chrislatorres/blender-plugin
| 3
|
6628449
|
<reponame>chrislatorres/blender-plugin<gh_stars>1-10
# Copyright 2018-2019 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
import mathutils
import typing
from io_scene_webaverse.blender.exp.gltf2_blender_gather_cache import cached, bonecache
from io_scene_webaverse.blender.com import gltf2_blender_math
from io_scene_webaverse.blender.exp import gltf2_blender_get
from io_scene_webaverse.blender.exp.gltf2_blender_gather_drivers import get_sk_drivers, get_sk_driver_values
from . import gltf2_blender_export_keys
from io_scene_webaverse.io.com import gltf2_io_debug
class Keyframe:
def __init__(self, channels: typing.Tuple[bpy.types.FCurve], frame: float, bake_channel: typing.Union[str, None]):
self.seconds = frame / bpy.context.scene.render.fps
self.frame = frame
self.fps = bpy.context.scene.render.fps
self.__length_morph = 0
# Note: channels has some None items only for SK if some SK are not animated
if bake_channel is None:
self.target = [c for c in channels if c is not None][0].data_path.split('.')[-1]
if self.target != "value":
self.__indices = [c.array_index for c in channels]
else:
self.__indices = [i for i, c in enumerate(channels) if c is not None]
self.__length_morph = len(channels)
else:
self.target = bake_channel
self.__indices = []
for i in range(self.get_target_len()):
self.__indices.append(i)
# Data holders for virtual properties
self.__value = None
self.__in_tangent = None
self.__out_tangent = None
def get_target_len(self):
length = {
"delta_location": 3,
"delta_rotation_euler": 3,
"location": 3,
"rotation_axis_angle": 4,
"rotation_euler": 3,
"rotation_quaternion": 4,
"scale": 3,
"value": self.__length_morph
}.get(self.target)
if length is None:
raise RuntimeError("Animations with target type '{}' are not supported.".format(self.target))
return length
def __set_indexed(self, value):
# Sometimes blender animations only reference a subset of components of a data target. Keyframe should always
# contain a complete Vector/ Quaternion --> use the array_index value of the keyframe to set components in such
# structures
# For SK, must contains all SK values
result = [0.0] * self.get_target_len()
for i, v in zip(self.__indices, value):
result[i] = v
return result
def get_indices(self):
return self.__indices
def set_value_index(self, idx, val):
self.__value[idx] = val
def set_value_index_in(self, idx, val):
self.__in_tangent[idx] = val
def set_value_index_out(self, idx, val):
self.__out_tangent[idx] = val
def set_first_tangent(self):
self.__in_tangent = self.__value
def set_last_tangent(self):
self.__out_tangent = self.__value
@property
def value(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:
if self.target == "value":
return self.__value
return gltf2_blender_math.list_to_mathutils(self.__value, self.target)
@value.setter
def value(self, value: typing.List[float]):
self.__value = self.__set_indexed(value)
@property
def in_tangent(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:
if self.__in_tangent is None:
return None
if self.target == "value":
return self.__in_tangent
return gltf2_blender_math.list_to_mathutils(self.__in_tangent, self.target)
@in_tangent.setter
def in_tangent(self, value: typing.List[float]):
self.__in_tangent = self.__set_indexed(value)
@property
def out_tangent(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:
if self.__out_tangent is None:
return None
if self.target == "value":
return self.__out_tangent
return gltf2_blender_math.list_to_mathutils(self.__out_tangent, self.target)
@out_tangent.setter
def out_tangent(self, value: typing.List[float]):
self.__out_tangent = self.__set_indexed(value)
@bonecache
def get_bone_matrix(blender_object_if_armature: typing.Optional[bpy.types.Object],
channels: typing.Tuple[bpy.types.FCurve],
bake_bone: typing.Union[str, None],
bake_channel: typing.Union[str, None],
bake_range_start,
bake_range_end,
action_name: str,
current_frame: int,
step: int
):
data = {}
# Always using bake_range, because some bones may need to be baked,
# even if user didn't request it
start_frame = bake_range_start
end_frame = bake_range_end
frame = start_frame
while frame <= end_frame:
data[frame] = {}
# we need to bake in the constraints
bpy.context.scene.frame_set(frame)
for pbone in blender_object_if_armature.pose.bones:
if bake_bone is None:
matrix = pbone.matrix_basis.copy()
else:
if (pbone.bone.use_inherit_rotation == False or pbone.bone.inherit_scale != "FULL") and pbone.parent != None:
rest_mat = (pbone.parent.bone.matrix_local.inverted_safe() @ pbone.bone.matrix_local)
matrix = (rest_mat.inverted_safe() @ pbone.parent.matrix.inverted_safe() @ pbone.matrix)
else:
matrix = pbone.matrix
matrix = blender_object_if_armature.convert_space(pose_bone=pbone, matrix=matrix, from_space='POSE', to_space='LOCAL')
data[frame][pbone.name] = matrix
# If some drivers must be evaluated, do it here, to avoid to have to change frame by frame later
obj_driver = blender_object_if_armature.proxy if blender_object_if_armature.proxy else blender_object_if_armature
drivers_to_manage = get_sk_drivers(obj_driver)
for dr_obj, dr_fcurves in drivers_to_manage:
vals = get_sk_driver_values(dr_obj, frame, dr_fcurves)
frame += step
return data
# cache for performance reasons
@cached
def gather_keyframes(blender_object_if_armature: typing.Optional[bpy.types.Object],
channels: typing.Tuple[bpy.types.FCurve],
non_keyed_values: typing.Tuple[typing.Optional[float]],
bake_bone: typing.Union[str, None],
bake_channel: typing.Union[str, None],
bake_range_start,
bake_range_end,
action_name: str,
driver_obj,
export_settings
) -> typing.List[Keyframe]:
"""Convert the blender action groups' fcurves to keyframes for use in glTF."""
if bake_bone is None and driver_obj is None:
# Find the start and end of the whole action group
# Note: channels has some None items only for SK if some SK are not animated
ranges = [channel.range() for channel in channels if channel is not None]
start_frame = min([channel.range()[0] for channel in channels if channel is not None])
end_frame = max([channel.range()[1] for channel in channels if channel is not None])
else:
start_frame = bake_range_start
end_frame = bake_range_end
keyframes = []
if needs_baking(blender_object_if_armature, channels, export_settings):
# Bake the animation, by evaluating the animation for all frames
# TODO: maybe baking can also be done with FCurve.convert_to_samples
if blender_object_if_armature is not None and driver_obj is None:
if bake_bone is None:
pose_bone_if_armature = gltf2_blender_get.get_object_from_datapath(blender_object_if_armature,
channels[0].data_path)
else:
pose_bone_if_armature = blender_object_if_armature.pose.bones[bake_bone]
else:
pose_bone_if_armature = None
# sample all frames
frame = start_frame
step = export_settings['gltf_frame_step']
while frame <= end_frame:
key = Keyframe(channels, frame, bake_channel)
if isinstance(pose_bone_if_armature, bpy.types.PoseBone):
mat = get_bone_matrix(
blender_object_if_armature,
channels,
bake_bone,
bake_channel,
bake_range_start,
bake_range_end,
action_name,
frame,
step
)
trans, rot, scale = mat.decompose()
if bake_channel is None:
target_property = channels[0].data_path.split('.')[-1]
else:
target_property = bake_channel
key.value = {
"location": trans,
"rotation_axis_angle": rot,
"rotation_euler": rot,
"rotation_quaternion": rot,
"scale": scale
}[target_property]
else:
if driver_obj is None:
# Note: channels has some None items only for SK if some SK are not animated
key.value = [c.evaluate(frame) for c in channels if c is not None]
complete_key(key, non_keyed_values)
else:
key.value = get_sk_driver_values(driver_obj, frame, channels)
complete_key(key, non_keyed_values)
keyframes.append(key)
frame += step
else:
# Just use the keyframes as they are specified in blender
# Note: channels has some None items only for SK if some SK are not animated
frames = [keyframe.co[0] for keyframe in [c for c in channels if c is not None][0].keyframe_points]
# some weird files have duplicate frame at same time, removed them
frames = sorted(set(frames))
for i, frame in enumerate(frames):
key = Keyframe(channels, frame, bake_channel)
# key.value = [c.keyframe_points[i].co[0] for c in action_group.channels]
key.value = [c.evaluate(frame) for c in channels if c is not None]
# Complete key with non keyed values, if needed
if len([c for c in channels if c is not None]) != key.get_target_len():
complete_key(key, non_keyed_values)
# compute tangents for cubic spline interpolation
if [c for c in channels if c is not None][0].keyframe_points[0].interpolation == "BEZIER":
# Construct the in tangent
if frame == frames[0]:
# start in-tangent should become all zero
key.set_first_tangent()
else:
# otherwise construct an in tangent coordinate from the keyframes control points. We intermediately
# use a point at t-1 to define the tangent. This allows the tangent control point to be transformed
# normally
key.in_tangent = [
c.keyframe_points[i].co[1] + ((c.keyframe_points[i].co[1] - c.keyframe_points[i].handle_left[1]
) / (frame - frames[i - 1]))
for c in channels if c is not None
]
# Construct the out tangent
if frame == frames[-1]:
# end out-tangent should become all zero
key.set_last_tangent()
else:
# otherwise construct an in tangent coordinate from the keyframes control points. We intermediately
# use a point at t+1 to define the tangent. This allows the tangent control point to be transformed
# normally
key.out_tangent = [
c.keyframe_points[i].co[1] + ((c.keyframe_points[i].handle_right[1] - c.keyframe_points[i].co[1]
) / (frames[i + 1] - frame))
for c in channels if c is not None
]
complete_key_tangents(key, non_keyed_values)
keyframes.append(key)
return keyframes
def complete_key(key: Keyframe, non_keyed_values: typing.Tuple[typing.Optional[float]]):
"""
Complete keyframe with non keyed values
"""
for i in range(0, key.get_target_len()):
if i in key.get_indices():
continue # this is a keyed array_index or a SK animated
key.set_value_index(i, non_keyed_values[i])
def complete_key_tangents(key: Keyframe, non_keyed_values: typing.Tuple[typing.Optional[float]]):
"""
Complete keyframe with non keyed values for tangents
"""
for i in range(0, key.get_target_len()):
if i in key.get_indices():
continue # this is a keyed array_index or a SK animated
if key.in_tangent is not None:
key.set_value_index_in(i, non_keyed_values[i])
if key.out_tangent is not None:
key.set_value_index_out(i, non_keyed_values[i])
def needs_baking(blender_object_if_armature: typing.Optional[bpy.types.Object],
channels: typing.Tuple[bpy.types.FCurve],
export_settings
) -> bool:
"""
Check if baking is needed.
Some blender animations need to be baked as they can not directly be expressed in glTF.
"""
def all_equal(lst):
return lst[1:] == lst[:-1]
# Note: channels has some None items only for SK if some SK are not animated
# Sampling is forced
if export_settings[gltf2_blender_export_keys.FORCE_SAMPLING]:
return True
# Sampling due to unsupported interpolation
interpolation = [c for c in channels if c is not None][0].keyframe_points[0].interpolation
if interpolation not in ["BEZIER", "LINEAR", "CONSTANT"]:
gltf2_io_debug.print_console("WARNING",
"Baking animation because of an unsupported interpolation method: {}".format(
interpolation)
)
return True
if any(any(k.interpolation != interpolation for k in c.keyframe_points) for c in channels if c is not None):
# There are different interpolation methods in one action group
gltf2_io_debug.print_console("WARNING",
"Baking animation because there are keyframes with different "
"interpolation methods in one channel"
)
return True
if not all_equal([len(c.keyframe_points) for c in channels if c is not None]):
gltf2_io_debug.print_console("WARNING",
"Baking animation because the number of keyframes is not "
"equal for all channel tracks")
return True
if len([c for c in channels if c is not None][0].keyframe_points) <= 1:
# we need to bake to 'STEP', as at least two keyframes are required to interpolate
return True
if not all_equal(list(zip([[k.co[0] for k in c.keyframe_points] for c in channels if c is not None]))):
# The channels have differently located keyframes
gltf2_io_debug.print_console("WARNING",
"Baking animation because of differently located keyframes in one channel")
return True
if blender_object_if_armature is not None:
animation_target = gltf2_blender_get.get_object_from_datapath(blender_object_if_armature, [c for c in channels if c is not None][0].data_path)
if isinstance(animation_target, bpy.types.PoseBone):
if len(animation_target.constraints) != 0:
# Constraints such as IK act on the bone -> can not be represented in glTF atm
gltf2_io_debug.print_console("WARNING",
"Baking animation because of unsupported constraints acting on the bone")
return True
return False
|
# Copyright 2018-2019 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
import mathutils
import typing
from io_scene_webaverse.blender.exp.gltf2_blender_gather_cache import cached, bonecache
from io_scene_webaverse.blender.com import gltf2_blender_math
from io_scene_webaverse.blender.exp import gltf2_blender_get
from io_scene_webaverse.blender.exp.gltf2_blender_gather_drivers import get_sk_drivers, get_sk_driver_values
from . import gltf2_blender_export_keys
from io_scene_webaverse.io.com import gltf2_io_debug
class Keyframe:
def __init__(self, channels: typing.Tuple[bpy.types.FCurve], frame: float, bake_channel: typing.Union[str, None]):
self.seconds = frame / bpy.context.scene.render.fps
self.frame = frame
self.fps = bpy.context.scene.render.fps
self.__length_morph = 0
# Note: channels has some None items only for SK if some SK are not animated
if bake_channel is None:
self.target = [c for c in channels if c is not None][0].data_path.split('.')[-1]
if self.target != "value":
self.__indices = [c.array_index for c in channels]
else:
self.__indices = [i for i, c in enumerate(channels) if c is not None]
self.__length_morph = len(channels)
else:
self.target = bake_channel
self.__indices = []
for i in range(self.get_target_len()):
self.__indices.append(i)
# Data holders for virtual properties
self.__value = None
self.__in_tangent = None
self.__out_tangent = None
def get_target_len(self):
length = {
"delta_location": 3,
"delta_rotation_euler": 3,
"location": 3,
"rotation_axis_angle": 4,
"rotation_euler": 3,
"rotation_quaternion": 4,
"scale": 3,
"value": self.__length_morph
}.get(self.target)
if length is None:
raise RuntimeError("Animations with target type '{}' are not supported.".format(self.target))
return length
def __set_indexed(self, value):
# Sometimes blender animations only reference a subset of components of a data target. Keyframe should always
# contain a complete Vector/ Quaternion --> use the array_index value of the keyframe to set components in such
# structures
# For SK, must contains all SK values
result = [0.0] * self.get_target_len()
for i, v in zip(self.__indices, value):
result[i] = v
return result
def get_indices(self):
return self.__indices
def set_value_index(self, idx, val):
self.__value[idx] = val
def set_value_index_in(self, idx, val):
self.__in_tangent[idx] = val
def set_value_index_out(self, idx, val):
self.__out_tangent[idx] = val
def set_first_tangent(self):
self.__in_tangent = self.__value
def set_last_tangent(self):
self.__out_tangent = self.__value
@property
def value(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:
if self.target == "value":
return self.__value
return gltf2_blender_math.list_to_mathutils(self.__value, self.target)
@value.setter
def value(self, value: typing.List[float]):
self.__value = self.__set_indexed(value)
@property
def in_tangent(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:
if self.__in_tangent is None:
return None
if self.target == "value":
return self.__in_tangent
return gltf2_blender_math.list_to_mathutils(self.__in_tangent, self.target)
@in_tangent.setter
def in_tangent(self, value: typing.List[float]):
self.__in_tangent = self.__set_indexed(value)
@property
def out_tangent(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:
if self.__out_tangent is None:
return None
if self.target == "value":
return self.__out_tangent
return gltf2_blender_math.list_to_mathutils(self.__out_tangent, self.target)
@out_tangent.setter
def out_tangent(self, value: typing.List[float]):
self.__out_tangent = self.__set_indexed(value)
@bonecache
def get_bone_matrix(blender_object_if_armature: typing.Optional[bpy.types.Object],
channels: typing.Tuple[bpy.types.FCurve],
bake_bone: typing.Union[str, None],
bake_channel: typing.Union[str, None],
bake_range_start,
bake_range_end,
action_name: str,
current_frame: int,
step: int
):
data = {}
# Always using bake_range, because some bones may need to be baked,
# even if user didn't request it
start_frame = bake_range_start
end_frame = bake_range_end
frame = start_frame
while frame <= end_frame:
data[frame] = {}
# we need to bake in the constraints
bpy.context.scene.frame_set(frame)
for pbone in blender_object_if_armature.pose.bones:
if bake_bone is None:
matrix = pbone.matrix_basis.copy()
else:
if (pbone.bone.use_inherit_rotation == False or pbone.bone.inherit_scale != "FULL") and pbone.parent != None:
rest_mat = (pbone.parent.bone.matrix_local.inverted_safe() @ pbone.bone.matrix_local)
matrix = (rest_mat.inverted_safe() @ pbone.parent.matrix.inverted_safe() @ pbone.matrix)
else:
matrix = pbone.matrix
matrix = blender_object_if_armature.convert_space(pose_bone=pbone, matrix=matrix, from_space='POSE', to_space='LOCAL')
data[frame][pbone.name] = matrix
# If some drivers must be evaluated, do it here, to avoid to have to change frame by frame later
obj_driver = blender_object_if_armature.proxy if blender_object_if_armature.proxy else blender_object_if_armature
drivers_to_manage = get_sk_drivers(obj_driver)
for dr_obj, dr_fcurves in drivers_to_manage:
vals = get_sk_driver_values(dr_obj, frame, dr_fcurves)
frame += step
return data
# cache for performance reasons
@cached
def gather_keyframes(blender_object_if_armature: typing.Optional[bpy.types.Object],
channels: typing.Tuple[bpy.types.FCurve],
non_keyed_values: typing.Tuple[typing.Optional[float]],
bake_bone: typing.Union[str, None],
bake_channel: typing.Union[str, None],
bake_range_start,
bake_range_end,
action_name: str,
driver_obj,
export_settings
) -> typing.List[Keyframe]:
"""Convert the blender action groups' fcurves to keyframes for use in glTF."""
if bake_bone is None and driver_obj is None:
# Find the start and end of the whole action group
# Note: channels has some None items only for SK if some SK are not animated
ranges = [channel.range() for channel in channels if channel is not None]
start_frame = min([channel.range()[0] for channel in channels if channel is not None])
end_frame = max([channel.range()[1] for channel in channels if channel is not None])
else:
start_frame = bake_range_start
end_frame = bake_range_end
keyframes = []
if needs_baking(blender_object_if_armature, channels, export_settings):
# Bake the animation, by evaluating the animation for all frames
# TODO: maybe baking can also be done with FCurve.convert_to_samples
if blender_object_if_armature is not None and driver_obj is None:
if bake_bone is None:
pose_bone_if_armature = gltf2_blender_get.get_object_from_datapath(blender_object_if_armature,
channels[0].data_path)
else:
pose_bone_if_armature = blender_object_if_armature.pose.bones[bake_bone]
else:
pose_bone_if_armature = None
# sample all frames
frame = start_frame
step = export_settings['gltf_frame_step']
while frame <= end_frame:
key = Keyframe(channels, frame, bake_channel)
if isinstance(pose_bone_if_armature, bpy.types.PoseBone):
mat = get_bone_matrix(
blender_object_if_armature,
channels,
bake_bone,
bake_channel,
bake_range_start,
bake_range_end,
action_name,
frame,
step
)
trans, rot, scale = mat.decompose()
if bake_channel is None:
target_property = channels[0].data_path.split('.')[-1]
else:
target_property = bake_channel
key.value = {
"location": trans,
"rotation_axis_angle": rot,
"rotation_euler": rot,
"rotation_quaternion": rot,
"scale": scale
}[target_property]
else:
if driver_obj is None:
# Note: channels has some None items only for SK if some SK are not animated
key.value = [c.evaluate(frame) for c in channels if c is not None]
complete_key(key, non_keyed_values)
else:
key.value = get_sk_driver_values(driver_obj, frame, channels)
complete_key(key, non_keyed_values)
keyframes.append(key)
frame += step
else:
# Just use the keyframes as they are specified in blender
# Note: channels has some None items only for SK if some SK are not animated
frames = [keyframe.co[0] for keyframe in [c for c in channels if c is not None][0].keyframe_points]
# some weird files have duplicate frame at same time, removed them
frames = sorted(set(frames))
for i, frame in enumerate(frames):
key = Keyframe(channels, frame, bake_channel)
# key.value = [c.keyframe_points[i].co[0] for c in action_group.channels]
key.value = [c.evaluate(frame) for c in channels if c is not None]
# Complete key with non keyed values, if needed
if len([c for c in channels if c is not None]) != key.get_target_len():
complete_key(key, non_keyed_values)
# compute tangents for cubic spline interpolation
if [c for c in channels if c is not None][0].keyframe_points[0].interpolation == "BEZIER":
# Construct the in tangent
if frame == frames[0]:
# start in-tangent should become all zero
key.set_first_tangent()
else:
# otherwise construct an in tangent coordinate from the keyframes control points. We intermediately
# use a point at t-1 to define the tangent. This allows the tangent control point to be transformed
# normally
key.in_tangent = [
c.keyframe_points[i].co[1] + ((c.keyframe_points[i].co[1] - c.keyframe_points[i].handle_left[1]
) / (frame - frames[i - 1]))
for c in channels if c is not None
]
# Construct the out tangent
if frame == frames[-1]:
# end out-tangent should become all zero
key.set_last_tangent()
else:
# otherwise construct an in tangent coordinate from the keyframes control points. We intermediately
# use a point at t+1 to define the tangent. This allows the tangent control point to be transformed
# normally
key.out_tangent = [
c.keyframe_points[i].co[1] + ((c.keyframe_points[i].handle_right[1] - c.keyframe_points[i].co[1]
) / (frames[i + 1] - frame))
for c in channels if c is not None
]
complete_key_tangents(key, non_keyed_values)
keyframes.append(key)
return keyframes
def complete_key(key: Keyframe, non_keyed_values: typing.Tuple[typing.Optional[float]]):
"""
Complete keyframe with non keyed values
"""
for i in range(0, key.get_target_len()):
if i in key.get_indices():
continue # this is a keyed array_index or a SK animated
key.set_value_index(i, non_keyed_values[i])
def complete_key_tangents(key: Keyframe, non_keyed_values: typing.Tuple[typing.Optional[float]]):
"""
Complete keyframe with non keyed values for tangents
"""
for i in range(0, key.get_target_len()):
if i in key.get_indices():
continue # this is a keyed array_index or a SK animated
if key.in_tangent is not None:
key.set_value_index_in(i, non_keyed_values[i])
if key.out_tangent is not None:
key.set_value_index_out(i, non_keyed_values[i])
def needs_baking(blender_object_if_armature: typing.Optional[bpy.types.Object],
channels: typing.Tuple[bpy.types.FCurve],
export_settings
) -> bool:
"""
Check if baking is needed.
Some blender animations need to be baked as they can not directly be expressed in glTF.
"""
def all_equal(lst):
return lst[1:] == lst[:-1]
# Note: channels has some None items only for SK if some SK are not animated
# Sampling is forced
if export_settings[gltf2_blender_export_keys.FORCE_SAMPLING]:
return True
# Sampling due to unsupported interpolation
interpolation = [c for c in channels if c is not None][0].keyframe_points[0].interpolation
if interpolation not in ["BEZIER", "LINEAR", "CONSTANT"]:
gltf2_io_debug.print_console("WARNING",
"Baking animation because of an unsupported interpolation method: {}".format(
interpolation)
)
return True
if any(any(k.interpolation != interpolation for k in c.keyframe_points) for c in channels if c is not None):
# There are different interpolation methods in one action group
gltf2_io_debug.print_console("WARNING",
"Baking animation because there are keyframes with different "
"interpolation methods in one channel"
)
return True
if not all_equal([len(c.keyframe_points) for c in channels if c is not None]):
gltf2_io_debug.print_console("WARNING",
"Baking animation because the number of keyframes is not "
"equal for all channel tracks")
return True
if len([c for c in channels if c is not None][0].keyframe_points) <= 1:
# we need to bake to 'STEP', as at least two keyframes are required to interpolate
return True
if not all_equal(list(zip([[k.co[0] for k in c.keyframe_points] for c in channels if c is not None]))):
# The channels have differently located keyframes
gltf2_io_debug.print_console("WARNING",
"Baking animation because of differently located keyframes in one channel")
return True
if blender_object_if_armature is not None:
animation_target = gltf2_blender_get.get_object_from_datapath(blender_object_if_armature, [c for c in channels if c is not None][0].data_path)
if isinstance(animation_target, bpy.types.PoseBone):
if len(animation_target.constraints) != 0:
# Constraints such as IK act on the bone -> can not be represented in glTF atm
gltf2_io_debug.print_console("WARNING",
"Baking animation because of unsupported constraints acting on the bone")
return True
return False
|
en
| 0.882033
|
# Copyright 2018-2019 The glTF-Blender-IO authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Note: channels has some None items only for SK if some SK are not animated # Data holders for virtual properties # Sometimes blender animations only reference a subset of components of a data target. Keyframe should always # contain a complete Vector/ Quaternion --> use the array_index value of the keyframe to set components in such # structures # For SK, must contains all SK values # Always using bake_range, because some bones may need to be baked, # even if user didn't request it # we need to bake in the constraints # If some drivers must be evaluated, do it here, to avoid to have to change frame by frame later # cache for performance reasons Convert the blender action groups' fcurves to keyframes for use in glTF. # Find the start and end of the whole action group # Note: channels has some None items only for SK if some SK are not animated # Bake the animation, by evaluating the animation for all frames # TODO: maybe baking can also be done with FCurve.convert_to_samples # sample all frames # Note: channels has some None items only for SK if some SK are not animated # Just use the keyframes as they are specified in blender # Note: channels has some None items only for SK if some SK are not animated # some weird files have duplicate frame at same time, removed them # key.value = [c.keyframe_points[i].co[0] for c in action_group.channels] # Complete key with non keyed values, if needed # compute tangents for cubic spline interpolation # Construct the in tangent # start in-tangent should become all zero # otherwise construct an in tangent coordinate from the keyframes control points. We intermediately # use a point at t-1 to define the tangent. This allows the tangent control point to be transformed # normally # Construct the out tangent # end out-tangent should become all zero # otherwise construct an in tangent coordinate from the keyframes control points. We intermediately # use a point at t+1 to define the tangent. This allows the tangent control point to be transformed # normally Complete keyframe with non keyed values # this is a keyed array_index or a SK animated Complete keyframe with non keyed values for tangents # this is a keyed array_index or a SK animated Check if baking is needed. Some blender animations need to be baked as they can not directly be expressed in glTF. # Note: channels has some None items only for SK if some SK are not animated # Sampling is forced # Sampling due to unsupported interpolation # There are different interpolation methods in one action group # we need to bake to 'STEP', as at least two keyframes are required to interpolate # The channels have differently located keyframes # Constraints such as IK act on the bone -> can not be represented in glTF atm
| 1.626919
| 2
|
src/StockSight/EsMap/StockPrice.py
|
oreoluwa/stocksight
| 3
|
6628450
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Stock Price Mapping
Copyright (C) <NAME> 2018-2019
Copyright (C) Allen (<NAME>) Xie 2019
stocksight is released under the Apache 2.0 license. See
LICENSE for the full license text.
"""
# set up elasticsearch mappings and create index
mapping = {
"mappings": {
"properties": {
"symbol": {
"type": "keyword"
},
"price_last": {
"type": "float"
},
"date": {
"type": "date"
},
"change": {
"type": "float"
},
"price_high": {
"type": "float"
},
"price_low": {
"type": "float"
},
"price_open": {
"type": "float"
},
"price_close": {
"type": "float"
},
"vol": {
"type": "integer"
}
}
},
"settings": {
"index": {
"number_of_replicas": "0"
}
}
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Stock Price Mapping
Copyright (C) <NAME> 2018-2019
Copyright (C) Allen (<NAME>) Xie 2019
stocksight is released under the Apache 2.0 license. See
LICENSE for the full license text.
"""
# set up elasticsearch mappings and create index
mapping = {
"mappings": {
"properties": {
"symbol": {
"type": "keyword"
},
"price_last": {
"type": "float"
},
"date": {
"type": "date"
},
"change": {
"type": "float"
},
"price_high": {
"type": "float"
},
"price_low": {
"type": "float"
},
"price_open": {
"type": "float"
},
"price_close": {
"type": "float"
},
"vol": {
"type": "integer"
}
}
},
"settings": {
"index": {
"number_of_replicas": "0"
}
}
}
|
en
| 0.67356
|
#!/usr/bin/env python # -*- coding: utf-8 -*- Stock Price Mapping Copyright (C) <NAME> 2018-2019 Copyright (C) Allen (<NAME>) Xie 2019 stocksight is released under the Apache 2.0 license. See LICENSE for the full license text. # set up elasticsearch mappings and create index
| 1.597385
| 2
|