hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73a0a857e15ef305c67c6934661566d9dbe8cfb | 1,954 | py | Python | src/models/predict_model.py | BasianLesi/Master-Thesis | 3417ab9d4f05e23da16203374fe9aaf20e51fab1 | [
"MIT"
] | null | null | null | src/models/predict_model.py | BasianLesi/Master-Thesis | 3417ab9d4f05e23da16203374fe9aaf20e51fab1 | [
"MIT"
] | null | null | null | src/models/predict_model.py | BasianLesi/Master-Thesis | 3417ab9d4f05e23da16203374fe9aaf20e51fab1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from helper import *
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
ROOT_DIR = g.ROOT_DIR
raw_data_dir = g.raw_data_dir
processed_data_dir = g.processed_data_dir
print(f"ROOT DIR = {ROOT_DIR}")
data_directory = ROOT_DIR+"/data/raw/"
@click.command()
@click.option('--data_dir', default=ROOT_DIR+"/data/processed/", help='input data directory.')
@click.option('--model_dir', default=ROOT_DIR+"/models/", help='ouput data directory.')
def main(data_dir, model_dir):
""" Runs the script for model training """
logger = logging.getLogger(__name__)
logger.info('Starting model training script')
try:
df_pv = pd.read_csv(processed_data_dir + 'pv_norm.csv')
pv_model = load_model(model_dir + "pv_model/")
pv_forecast = pd.read_csv(processed_data_dir + "PV_predict_data.csv")
except:
logger.error("Unalbe loading df or model dir: " + processed_data_dir)
sys.exit(1)
try:
df_wp = pd.read_csv(processed_data_dir + 'wp_norm.csv')
wp_model = load_model(model_dir + "wp_model/")
wp_forecast = pd.read_csv(processed_data_dir + "PV_predict_data.csv")
except:
logger.error("Unalbe loading df or model from dir: " + processed_data_dir)
sys.exit(1)
# predict(df_pv, pv_model, "PV power")
# predict(df_wp, wp_model, "Wind power")
forecast(pv_forecast, pv_model, "PV power")
forecast(wp_forecast, wp_model, "Wind power")
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main() | 34.280702 | 100 | 0.670931 |
from helper import *
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
ROOT_DIR = g.ROOT_DIR
raw_data_dir = g.raw_data_dir
processed_data_dir = g.processed_data_dir
print(f"ROOT DIR = {ROOT_DIR}")
data_directory = ROOT_DIR+"/data/raw/"
@click.command()
@click.option('--data_dir', default=ROOT_DIR+"/data/processed/", help='input data directory.')
@click.option('--model_dir', default=ROOT_DIR+"/models/", help='ouput data directory.')
def main(data_dir, model_dir):
logger = logging.getLogger(__name__)
logger.info('Starting model training script')
try:
df_pv = pd.read_csv(processed_data_dir + 'pv_norm.csv')
pv_model = load_model(model_dir + "pv_model/")
pv_forecast = pd.read_csv(processed_data_dir + "PV_predict_data.csv")
except:
logger.error("Unalbe loading df or model dir: " + processed_data_dir)
sys.exit(1)
try:
df_wp = pd.read_csv(processed_data_dir + 'wp_norm.csv')
wp_model = load_model(model_dir + "wp_model/")
wp_forecast = pd.read_csv(processed_data_dir + "PV_predict_data.csv")
except:
logger.error("Unalbe loading df or model from dir: " + processed_data_dir)
sys.exit(1)
forecast(pv_forecast, pv_model, "PV power")
forecast(wp_forecast, wp_model, "Wind power")
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
project_dir = Path(__file__).resolve().parents[2]
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main() | true | true |
f73a0c1ffc56ec2f6589ebc91cff322666f2312e | 7,218 | py | Python | neurom/fst/__init__.py | mgeplf/NeuroM | e21c01979de3db643c309b6bf2fe0b5dc9363c3a | [
"BSD-3-Clause"
] | null | null | null | neurom/fst/__init__.py | mgeplf/NeuroM | e21c01979de3db643c309b6bf2fe0b5dc9363c3a | [
"BSD-3-Clause"
] | null | null | null | neurom/fst/__init__.py | mgeplf/NeuroM | e21c01979de3db643c309b6bf2fe0b5dc9363c3a | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
''' NeuroM, lightweight and fast
Examples:
Obtain some morphometrics
>>> ap_seg_len = fst.get('segment_lengths', nrn, neurite_type=neurom.APICAL_DENDRITE)
>>> ax_sec_len = fst.get('section_lengths', nrn, neurite_type=neurom.AXON)
'''
import numpy as _np
from . import _neuritefunc as _nrt
from . import _neuronfunc as _nrn
from ..core import NeuriteType as _ntype
from ..core import iter_neurites as _ineurites
from ..core.types import tree_type_checker as _is_type
from ..exceptions import NeuroMError
from ._core import FstNeuron
NEURITEFEATURES = {
'total_length': _nrt.total_length,
'total_length_per_neurite': _nrt.total_length_per_neurite,
'neurite_lengths': _nrt.total_length_per_neurite,
'terminal_path_lengths_per_neurite': _nrt.terminal_path_lengths_per_neurite,
'section_lengths': _nrt.section_lengths,
'section_term_lengths': _nrt.section_term_lengths,
'section_bif_lengths': _nrt.section_bif_lengths,
'neurite_volumes': _nrt.total_volume_per_neurite,
'neurite_volume_density': _nrt.neurite_volume_density,
'section_volumes': _nrt.section_volumes,
'section_areas': _nrt.section_areas,
'section_tortuosity': _nrt.section_tortuosity,
'section_path_distances': _nrt.section_path_lengths,
'number_of_sections': _nrt.number_of_sections,
'number_of_sections_per_neurite': _nrt.number_of_sections_per_neurite,
'number_of_neurites': _nrt.number_of_neurites,
'number_of_bifurcations': _nrt.number_of_bifurcations,
'number_of_forking_points': _nrt.number_of_forking_points,
'number_of_terminations': _nrt.number_of_terminations,
'section_branch_orders': _nrt.section_branch_orders,
'section_term_branch_orders': _nrt.section_term_branch_orders,
'section_bif_branch_orders': _nrt.section_bif_branch_orders,
'section_radial_distances': _nrt.section_radial_distances,
'section_bif_radial_distances': _nrt.section_bif_radial_distances,
'section_term_radial_distances': _nrt.section_term_radial_distances,
'section_end_distances': _nrt.section_end_distances,
'section_strahler_orders': _nrt.section_strahler_orders,
'local_bifurcation_angles': _nrt.local_bifurcation_angles,
'remote_bifurcation_angles': _nrt.remote_bifurcation_angles,
'partition': _nrt.bifurcation_partitions,
'partition_asymmetry': _nrt.partition_asymmetries,
'partition_pairs': _nrt.partition_pairs,
'number_of_segments': _nrt.number_of_segments,
'segment_lengths': _nrt.segment_lengths,
'segment_volumes': _nrt.segment_volumes,
'segment_radii': _nrt.segment_radii,
'segment_midpoints': _nrt.segment_midpoints,
'segment_taper_rates': _nrt.segment_taper_rates,
'segment_radial_distances': _nrt.segment_radial_distances,
'segment_meander_angles': _nrt.segment_meander_angles,
'principal_direction_extents': _nrt.principal_direction_extents,
'total_area_per_neurite': _nrt.total_area_per_neurite,
}
NEURONFEATURES = {
'soma_radii': _nrn.soma_radii,
'soma_surface_areas': _nrn.soma_surface_areas,
'trunk_origin_radii': _nrn.trunk_origin_radii,
'trunk_origin_azimuths': _nrn.trunk_origin_azimuths,
'trunk_origin_elevations': _nrn.trunk_origin_elevations,
'trunk_section_lengths': _nrn.trunk_section_lengths,
'trunk_angles': _nrn.trunk_angles,
'trunk_vectors': _nrn.trunk_vectors,
'sholl_frequency': _nrn.sholl_frequency,
}
def register_neurite_feature(name, func):
'''Register a feature to be applied to neurites
Parameters:
name: name of the feature, used for access via get() function.
func: single parameter function of a neurite.
'''
if name in NEURITEFEATURES:
raise NeuroMError('Attempt to hide registered feature %s' % name)
def _fun(neurites, neurite_type=_ntype.all):
'''Wrap neurite function from outer scope and map into list'''
return list(func(n) for n in _ineurites(neurites, filt=_is_type(neurite_type)))
NEURONFEATURES[name] = _fun
def get(feature, obj, **kwargs):
'''Obtain a feature from a set of morphology objects
Parameters:
feature(string): feature to extract
obj: a neuron, population or neurite tree
**kwargs: parameters to forward to underlying worker functions
Returns:
features as a 1D or 2D numpy array.
'''
feature = (NEURITEFEATURES[feature] if feature in NEURITEFEATURES
else NEURONFEATURES[feature])
return _np.array(list(feature(obj, **kwargs)))
_INDENT = ' ' * 4
def _indent(string, count):
'''indent `string` by `count` * INDENT'''
indent = _INDENT * count
ret = indent + string.replace('\n', '\n' + indent)
return ret.rstrip()
def _get_doc():
'''Get a description of all the known available features'''
def get_docstring(func):
'''extract doctstring, if possible'''
docstring = ':\n'
if func.__doc__:
docstring += _indent(func.__doc__, 2)
return docstring
ret = ['\nNeurite features (neurite, neuron, neuron population):']
ret.extend(_INDENT + '- ' + feature + get_docstring(func)
for feature, func in sorted(NEURITEFEATURES.items()))
ret.append('\nNeuron features (neuron, neuron population):')
ret.extend(_INDENT + '- ' + feature + get_docstring(func)
for feature, func in sorted(NEURONFEATURES.items()))
return '\n'.join(ret)
get.__doc__ += _indent('\nFeatures:\n', 1) + _indent(_get_doc(), 2) # pylint: disable=no-member
| 41.245714 | 96 | 0.744389 |
import numpy as _np
from . import _neuritefunc as _nrt
from . import _neuronfunc as _nrn
from ..core import NeuriteType as _ntype
from ..core import iter_neurites as _ineurites
from ..core.types import tree_type_checker as _is_type
from ..exceptions import NeuroMError
from ._core import FstNeuron
NEURITEFEATURES = {
'total_length': _nrt.total_length,
'total_length_per_neurite': _nrt.total_length_per_neurite,
'neurite_lengths': _nrt.total_length_per_neurite,
'terminal_path_lengths_per_neurite': _nrt.terminal_path_lengths_per_neurite,
'section_lengths': _nrt.section_lengths,
'section_term_lengths': _nrt.section_term_lengths,
'section_bif_lengths': _nrt.section_bif_lengths,
'neurite_volumes': _nrt.total_volume_per_neurite,
'neurite_volume_density': _nrt.neurite_volume_density,
'section_volumes': _nrt.section_volumes,
'section_areas': _nrt.section_areas,
'section_tortuosity': _nrt.section_tortuosity,
'section_path_distances': _nrt.section_path_lengths,
'number_of_sections': _nrt.number_of_sections,
'number_of_sections_per_neurite': _nrt.number_of_sections_per_neurite,
'number_of_neurites': _nrt.number_of_neurites,
'number_of_bifurcations': _nrt.number_of_bifurcations,
'number_of_forking_points': _nrt.number_of_forking_points,
'number_of_terminations': _nrt.number_of_terminations,
'section_branch_orders': _nrt.section_branch_orders,
'section_term_branch_orders': _nrt.section_term_branch_orders,
'section_bif_branch_orders': _nrt.section_bif_branch_orders,
'section_radial_distances': _nrt.section_radial_distances,
'section_bif_radial_distances': _nrt.section_bif_radial_distances,
'section_term_radial_distances': _nrt.section_term_radial_distances,
'section_end_distances': _nrt.section_end_distances,
'section_strahler_orders': _nrt.section_strahler_orders,
'local_bifurcation_angles': _nrt.local_bifurcation_angles,
'remote_bifurcation_angles': _nrt.remote_bifurcation_angles,
'partition': _nrt.bifurcation_partitions,
'partition_asymmetry': _nrt.partition_asymmetries,
'partition_pairs': _nrt.partition_pairs,
'number_of_segments': _nrt.number_of_segments,
'segment_lengths': _nrt.segment_lengths,
'segment_volumes': _nrt.segment_volumes,
'segment_radii': _nrt.segment_radii,
'segment_midpoints': _nrt.segment_midpoints,
'segment_taper_rates': _nrt.segment_taper_rates,
'segment_radial_distances': _nrt.segment_radial_distances,
'segment_meander_angles': _nrt.segment_meander_angles,
'principal_direction_extents': _nrt.principal_direction_extents,
'total_area_per_neurite': _nrt.total_area_per_neurite,
}
NEURONFEATURES = {
'soma_radii': _nrn.soma_radii,
'soma_surface_areas': _nrn.soma_surface_areas,
'trunk_origin_radii': _nrn.trunk_origin_radii,
'trunk_origin_azimuths': _nrn.trunk_origin_azimuths,
'trunk_origin_elevations': _nrn.trunk_origin_elevations,
'trunk_section_lengths': _nrn.trunk_section_lengths,
'trunk_angles': _nrn.trunk_angles,
'trunk_vectors': _nrn.trunk_vectors,
'sholl_frequency': _nrn.sholl_frequency,
}
def register_neurite_feature(name, func):
if name in NEURITEFEATURES:
raise NeuroMError('Attempt to hide registered feature %s' % name)
def _fun(neurites, neurite_type=_ntype.all):
return list(func(n) for n in _ineurites(neurites, filt=_is_type(neurite_type)))
NEURONFEATURES[name] = _fun
def get(feature, obj, **kwargs):
feature = (NEURITEFEATURES[feature] if feature in NEURITEFEATURES
else NEURONFEATURES[feature])
return _np.array(list(feature(obj, **kwargs)))
_INDENT = ' ' * 4
def _indent(string, count):
indent = _INDENT * count
ret = indent + string.replace('\n', '\n' + indent)
return ret.rstrip()
def _get_doc():
def get_docstring(func):
docstring = ':\n'
if func.__doc__:
docstring += _indent(func.__doc__, 2)
return docstring
ret = ['\nNeurite features (neurite, neuron, neuron population):']
ret.extend(_INDENT + '- ' + feature + get_docstring(func)
for feature, func in sorted(NEURITEFEATURES.items()))
ret.append('\nNeuron features (neuron, neuron population):')
ret.extend(_INDENT + '- ' + feature + get_docstring(func)
for feature, func in sorted(NEURONFEATURES.items()))
return '\n'.join(ret)
get.__doc__ += _indent('\nFeatures:\n', 1) + _indent(_get_doc(), 2)
| true | true |
f73a0dbea7dcdfb682ba16834db550c8863f64e4 | 10,289 | py | Python | tests/testing/cPerf.py | sx-aurora-dev/llvm-lnt | 1befd8e072138ca843305a0b5e20e0883d19eafd | [
"Apache-2.0"
] | null | null | null | tests/testing/cPerf.py | sx-aurora-dev/llvm-lnt | 1befd8e072138ca843305a0b5e20e0883d19eafd | [
"Apache-2.0"
] | null | null | null | tests/testing/cPerf.py | sx-aurora-dev/llvm-lnt | 1befd8e072138ca843305a0b5e20e0883d19eafd | [
"Apache-2.0"
] | null | null | null | # RUN: python %s
import unittest
import sys
import os
import tempfile
from lnt.testing.profile.perf import LinuxPerfProfile
class CPerfTest(unittest.TestCase):
def setUp(self):
self.inputs = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'Inputs')
self.fake_nm = 'python %s/fake-nm.py' % self.inputs
self.expected_data = {
"fib-aarch64": {
u"counters": {u"cycles": 240949386},
u"functions": {
u"fib": {
u"counters": {u"cycles": 99.77243187496647},
u"data": [
[
{u"cycles": 22.476272172208624},
4196040,
u"\ta9be4ff4 \tstp\tx20, x19, [sp,#-32]!",
],
[
{u"cycles": 20.81533649797271},
4196044,
u"\ta9017bfd \tstp\tx29, x30, [sp,#16]",
],
[{}, 4196048, u"\t910043fd \tadd\tx29, sp, #0x10"],
[{}, 4196052, u"\t71000813 \tsubs\tw19, w0, #0x2"],
[{}, 4196056, u"\t540000eb \tb.lt\t4006f4 <fib+0x2c>"],
[
{u"cycles": 10.065491723992467},
4196060,
u"\t51000400 \tsub\tw0, w0, #0x1",
],
[{}, 4196064, u"\t97fffffa \tbl\t4006c8 <fib>"],
[
{u"cycles": 5.858831022967777},
4196068,
u"\t2a0003f4 \tmov\tw20, w0",
],
[{}, 4196072, u"\t2a1303e0 \tmov\tw0, w19"],
[{}, 4196076, u"\t97fffff7 \tbl\t4006c8 <fib>"],
[
{u"cycles": 7.57924022814841},
4196080,
u"\t0b140000 \tadd\tw0, w0, w20",
],
[
{u"cycles": 19.240308514111305},
4196084,
u"\ta9417bfd \tldp\tx29, x30, [sp,#16]",
],
[
{u"cycles": 13.964519840598708},
4196088,
u"\ta8c24ff4 \tldp\tx20, x19, [sp],#32",
],
[{}, 4196092, u"\td65f03c0 \tret"],
],
}
},
},
"fib2-aarch64": {
u"counters": {
u"branch-misses": 1820692,
u"cache-misses": 33054,
u"cycles": 243618286,
},
u"functions": {
u"fib": {
u"counters": {
u"branch-misses": 99.7405382129432,
u"cache-misses": 75.18000847098688,
u"cycles": 99.78902404723429,
},
u"data": [
[
{
u"branch-misses": 21.904846340904687,
u"cache-misses": 37.4486921529175,
u"cycles": 23.48637833693693,
},
4196040,
u"\ta9be4ff4 \tstp\tx20, x19, [sp,#-32]!",
],
[
{
u"branch-misses": 2.6443747907452115,
u"cache-misses": 17.08651911468813,
u"cycles": 20.34001001463117,
},
4196044,
u"\ta9017bfd \tstp\tx29, x30, [sp,#16]",
],
[{}, 4196048, u"\t910043fd \tadd\tx29, sp, #0x10"],
[{}, 4196052, u"\t71000813 \tsubs\tw19, w0, #0x2"],
[{}, 4196056, u"\t540000eb \tb.lt\t4006f4 <fib+0x2c>"],
[
{
u"branch-misses": 30.264575146698622,
u"cache-misses": 20.69215291750503,
u"cycles": 9.787981545863996,
},
4196060,
u"\t51000400 \tsub\tw0, w0, #0x1",
],
[{}, 4196064, u"\t97fffffa \tbl\t4006c8 <fib>"],
[
{
u"branch-misses": 0.11195131191739062,
u"cache-misses": 2.3621730382293764,
u"cycles": 7.702120542412432,
},
4196068,
u"\t2a0003f4 \tmov\tw20, w0",
],
[{}, 4196072, u"\t2a1303e0 \tmov\tw0, w19"],
[{}, 4196076, u"\t97fffff7 \tbl\t4006c8 <fib>"],
[
{
u"branch-misses": 19.03265916580028,
u"cache-misses": 3.8229376257545273,
u"cycles": 7.362266427937867,
},
4196080,
u"\t0b140000 \tadd\tw0, w0, w20",
],
[
{
u"branch-misses": 4.9891297644011345,
u"cache-misses": 7.553319919517103,
u"cycles": 18.387547715628735,
},
4196084,
u"\ta9417bfd \tldp\tx29, x30, [sp,#16]",
],
[
{
u"branch-misses": 21.05246347953268,
u"cache-misses": 11.03420523138833,
u"cycles": 12.93369541658887,
},
4196088,
u"\ta8c24ff4 \tldp\tx20, x19, [sp],#32",
],
[{}, 4196092, u"\td65f03c0 \tret"],
],
}
},
},
}
def _getNm(self, perf_data_fname, non_dynamic=False):
stub = perf_data_fname.rsplit('.perf_data', 1)[0]
s = 'python %s/fake-nm.py %s.nm.out' % (self.inputs, stub)
if non_dynamic:
s += ' --fake-nm-be-non-dynamic'
return s
def _getObjdump(self, perf_data_fname):
stub = perf_data_fname.rsplit('.perf_data', 1)[0]
return 'python %s/fake-objdump.py %s.objdump' % (self.inputs, stub)
def _getInput(self, fname):
return os.path.join(self.inputs, fname)
def test_check_file(self):
self.assertTrue(LinuxPerfProfile.checkFile(self._getInput('fib-aarch64.perf_data')))
def test_aarch64_fib(self):
perf_data = self._getInput('fib-aarch64.perf_data')
p = LinuxPerfProfile.deserialize(open(perf_data),
nm=self._getNm(perf_data),
objdump=self._getObjdump(perf_data),
propagateExceptions=True)
self.assertEqual(p.data, self.expected_data['fib-aarch64'])
def test_aarch64_fib2(self):
perf_data = self._getInput('fib2-aarch64.perf_data')
p = LinuxPerfProfile.deserialize(open(perf_data),
nm=self._getNm(perf_data),
objdump=self._getObjdump(perf_data),
propagateExceptions=True)
self.assertEqual(p.data, self.expected_data['fib2-aarch64'])
def test_aarch64_fib2_nondynamic(self):
perf_data = self._getInput('fib2-aarch64.perf_data')
p = LinuxPerfProfile.deserialize(open(perf_data),
nm=self._getNm(perf_data, True),
objdump=self._getObjdump(perf_data),
propagateExceptions=True)
self.assertEqual(p.data, self.expected_data['fib2-aarch64'])
def test_random_guff(self):
# Create complete rubbish and throw it at cPerf, expecting an
# AssertionError.
data = '6492gbiajng295akgjowj210441'
with tempfile.NamedTemporaryFile() as fd:
open(fd.name, 'w').write(data)
with self.assertRaises(AssertionError):
LinuxPerfProfile.deserialize(open(fd.name),
propagateExceptions=True)
def test_random_guff2(self):
# Create complete rubbish and throw it at cPerf, expecting an
# AssertionError. This version contains the correct magic number.
data = 'PERFILE28620k hshjsjhs&6362kkjh25090nnjh'
with tempfile.NamedTemporaryFile() as fd:
open(fd.name, 'w').write(data)
with self.assertRaises(AssertionError):
LinuxPerfProfile.deserialize(open(fd.name),
propagateExceptions=True)
if __name__ == '__main__':
unittest.main(argv=[sys.argv[0], ])
| 45.526549 | 92 | 0.372631 |
import unittest
import sys
import os
import tempfile
from lnt.testing.profile.perf import LinuxPerfProfile
class CPerfTest(unittest.TestCase):
def setUp(self):
self.inputs = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'Inputs')
self.fake_nm = 'python %s/fake-nm.py' % self.inputs
self.expected_data = {
"fib-aarch64": {
u"counters": {u"cycles": 240949386},
u"functions": {
u"fib": {
u"counters": {u"cycles": 99.77243187496647},
u"data": [
[
{u"cycles": 22.476272172208624},
4196040,
u"\ta9be4ff4 \tstp\tx20, x19, [sp,#-32]!",
],
[
{u"cycles": 20.81533649797271},
4196044,
u"\ta9017bfd \tstp\tx29, x30, [sp,#16]",
],
[{}, 4196048, u"\t910043fd \tadd\tx29, sp, #0x10"],
[{}, 4196052, u"\t71000813 \tsubs\tw19, w0, #0x2"],
[{}, 4196056, u"\t540000eb \tb.lt\t4006f4 <fib+0x2c>"],
[
{u"cycles": 10.065491723992467},
4196060,
u"\t51000400 \tsub\tw0, w0, #0x1",
],
[{}, 4196064, u"\t97fffffa \tbl\t4006c8 <fib>"],
[
{u"cycles": 5.858831022967777},
4196068,
u"\t2a0003f4 \tmov\tw20, w0",
],
[{}, 4196072, u"\t2a1303e0 \tmov\tw0, w19"],
[{}, 4196076, u"\t97fffff7 \tbl\t4006c8 <fib>"],
[
{u"cycles": 7.57924022814841},
4196080,
u"\t0b140000 \tadd\tw0, w0, w20",
],
[
{u"cycles": 19.240308514111305},
4196084,
u"\ta9417bfd \tldp\tx29, x30, [sp,#16]",
],
[
{u"cycles": 13.964519840598708},
4196088,
u"\ta8c24ff4 \tldp\tx20, x19, [sp],#32",
],
[{}, 4196092, u"\td65f03c0 \tret"],
],
}
},
},
"fib2-aarch64": {
u"counters": {
u"branch-misses": 1820692,
u"cache-misses": 33054,
u"cycles": 243618286,
},
u"functions": {
u"fib": {
u"counters": {
u"branch-misses": 99.7405382129432,
u"cache-misses": 75.18000847098688,
u"cycles": 99.78902404723429,
},
u"data": [
[
{
u"branch-misses": 21.904846340904687,
u"cache-misses": 37.4486921529175,
u"cycles": 23.48637833693693,
},
4196040,
u"\ta9be4ff4 \tstp\tx20, x19, [sp,#-32]!",
],
[
{
u"branch-misses": 2.6443747907452115,
u"cache-misses": 17.08651911468813,
u"cycles": 20.34001001463117,
},
4196044,
u"\ta9017bfd \tstp\tx29, x30, [sp,#16]",
],
[{}, 4196048, u"\t910043fd \tadd\tx29, sp, #0x10"],
[{}, 4196052, u"\t71000813 \tsubs\tw19, w0, #0x2"],
[{}, 4196056, u"\t540000eb \tb.lt\t4006f4 <fib+0x2c>"],
[
{
u"branch-misses": 30.264575146698622,
u"cache-misses": 20.69215291750503,
u"cycles": 9.787981545863996,
},
4196060,
u"\t51000400 \tsub\tw0, w0, #0x1",
],
[{}, 4196064, u"\t97fffffa \tbl\t4006c8 <fib>"],
[
{
u"branch-misses": 0.11195131191739062,
u"cache-misses": 2.3621730382293764,
u"cycles": 7.702120542412432,
},
4196068,
u"\t2a0003f4 \tmov\tw20, w0",
],
[{}, 4196072, u"\t2a1303e0 \tmov\tw0, w19"],
[{}, 4196076, u"\t97fffff7 \tbl\t4006c8 <fib>"],
[
{
u"branch-misses": 19.03265916580028,
u"cache-misses": 3.8229376257545273,
u"cycles": 7.362266427937867,
},
4196080,
u"\t0b140000 \tadd\tw0, w0, w20",
],
[
{
u"branch-misses": 4.9891297644011345,
u"cache-misses": 7.553319919517103,
u"cycles": 18.387547715628735,
},
4196084,
u"\ta9417bfd \tldp\tx29, x30, [sp,#16]",
],
[
{
u"branch-misses": 21.05246347953268,
u"cache-misses": 11.03420523138833,
u"cycles": 12.93369541658887,
},
4196088,
u"\ta8c24ff4 \tldp\tx20, x19, [sp],#32",
],
[{}, 4196092, u"\td65f03c0 \tret"],
],
}
},
},
}
def _getNm(self, perf_data_fname, non_dynamic=False):
stub = perf_data_fname.rsplit('.perf_data', 1)[0]
s = 'python %s/fake-nm.py %s.nm.out' % (self.inputs, stub)
if non_dynamic:
s += ' --fake-nm-be-non-dynamic'
return s
def _getObjdump(self, perf_data_fname):
stub = perf_data_fname.rsplit('.perf_data', 1)[0]
return 'python %s/fake-objdump.py %s.objdump' % (self.inputs, stub)
def _getInput(self, fname):
return os.path.join(self.inputs, fname)
def test_check_file(self):
self.assertTrue(LinuxPerfProfile.checkFile(self._getInput('fib-aarch64.perf_data')))
def test_aarch64_fib(self):
perf_data = self._getInput('fib-aarch64.perf_data')
p = LinuxPerfProfile.deserialize(open(perf_data),
nm=self._getNm(perf_data),
objdump=self._getObjdump(perf_data),
propagateExceptions=True)
self.assertEqual(p.data, self.expected_data['fib-aarch64'])
def test_aarch64_fib2(self):
perf_data = self._getInput('fib2-aarch64.perf_data')
p = LinuxPerfProfile.deserialize(open(perf_data),
nm=self._getNm(perf_data),
objdump=self._getObjdump(perf_data),
propagateExceptions=True)
self.assertEqual(p.data, self.expected_data['fib2-aarch64'])
def test_aarch64_fib2_nondynamic(self):
perf_data = self._getInput('fib2-aarch64.perf_data')
p = LinuxPerfProfile.deserialize(open(perf_data),
nm=self._getNm(perf_data, True),
objdump=self._getObjdump(perf_data),
propagateExceptions=True)
self.assertEqual(p.data, self.expected_data['fib2-aarch64'])
def test_random_guff(self):
data = '6492gbiajng295akgjowj210441'
with tempfile.NamedTemporaryFile() as fd:
open(fd.name, 'w').write(data)
with self.assertRaises(AssertionError):
LinuxPerfProfile.deserialize(open(fd.name),
propagateExceptions=True)
def test_random_guff2(self):
data = 'PERFILE28620k hshjsjhs&6362kkjh25090nnjh'
with tempfile.NamedTemporaryFile() as fd:
open(fd.name, 'w').write(data)
with self.assertRaises(AssertionError):
LinuxPerfProfile.deserialize(open(fd.name),
propagateExceptions=True)
if __name__ == '__main__':
unittest.main(argv=[sys.argv[0], ])
| true | true |
f73a0ddb2db194fce0ec4cf9ef67bddd85155a2d | 1,962 | py | Python | bw2io/strategies/biosphere.py | mfastudillo/brightway2-io | dc383ddb6003a46e78259aeb7f87b9d80965d689 | [
"BSD-3-Clause"
] | null | null | null | bw2io/strategies/biosphere.py | mfastudillo/brightway2-io | dc383ddb6003a46e78259aeb7f87b9d80965d689 | [
"BSD-3-Clause"
] | null | null | null | bw2io/strategies/biosphere.py | mfastudillo/brightway2-io | dc383ddb6003a46e78259aeb7f87b9d80965d689 | [
"BSD-3-Clause"
] | null | null | null | from .migrations import migrate_exchanges, migrate_datasets
def drop_unspecified_subcategories(db):
"""Drop subcategories if they are in the following:
* ``unspecified``
* ``(unspecified)``
* ``''`` (empty string)
* ``None``
"""
UNSPECIFIED = {"unspecified", "(unspecified)", "", None}
for ds in db:
if ds.get("categories"):
while ds["categories"] and ds["categories"][-1] in UNSPECIFIED:
ds["categories"] = ds["categories"][:-1]
for exc in ds.get("exchanges", []):
if exc.get("categories"):
while exc["categories"] and exc["categories"][-1] in UNSPECIFIED:
exc["categories"] = exc["categories"][:-1]
return db
def normalize_biosphere_names(db, lcia=False):
"""Normalize biosphere flow names to ecoinvent 3.1 standard.
Assumes that each dataset and each exchange have a ``name``. Will change names even if exchange is already linked."""
db = migrate_exchanges(db, migration="biosphere-2-3-names")
if not lcia:
db = migrate_datasets(db, migration="biosphere-2-3-names")
return db
def normalize_biosphere_categories(db, lcia=False):
"""Normalize biosphere categories to ecoinvent 3.1 standard"""
db = migrate_exchanges(db, migration="biosphere-2-3-categories")
if not lcia:
db = migrate_datasets(db, migration="biosphere-2-3-categories")
return db
def strip_biosphere_exc_locations(db):
"""Biosphere flows don't have locations - if any are included they can confuse linking"""
for ds in db:
for exc in ds.get("exchanges", []):
if exc.get("type") == "biosphere" and "location" in exc:
del exc["location"]
return db
def ensure_categories_are_tuples(db):
for ds in db:
if ds.get("categories") and type(ds["categories"]) != tuple:
ds["categories"] = tuple(ds["categories"])
return db
| 35.035714 | 121 | 0.626911 | from .migrations import migrate_exchanges, migrate_datasets
def drop_unspecified_subcategories(db):
UNSPECIFIED = {"unspecified", "(unspecified)", "", None}
for ds in db:
if ds.get("categories"):
while ds["categories"] and ds["categories"][-1] in UNSPECIFIED:
ds["categories"] = ds["categories"][:-1]
for exc in ds.get("exchanges", []):
if exc.get("categories"):
while exc["categories"] and exc["categories"][-1] in UNSPECIFIED:
exc["categories"] = exc["categories"][:-1]
return db
def normalize_biosphere_names(db, lcia=False):
db = migrate_exchanges(db, migration="biosphere-2-3-names")
if not lcia:
db = migrate_datasets(db, migration="biosphere-2-3-names")
return db
def normalize_biosphere_categories(db, lcia=False):
db = migrate_exchanges(db, migration="biosphere-2-3-categories")
if not lcia:
db = migrate_datasets(db, migration="biosphere-2-3-categories")
return db
def strip_biosphere_exc_locations(db):
for ds in db:
for exc in ds.get("exchanges", []):
if exc.get("type") == "biosphere" and "location" in exc:
del exc["location"]
return db
def ensure_categories_are_tuples(db):
for ds in db:
if ds.get("categories") and type(ds["categories"]) != tuple:
ds["categories"] = tuple(ds["categories"])
return db
| true | true |
f73a0de2c7e56bea4206d064a99da01d4126616a | 1,159 | py | Python | corehq/apps/styleguide/views/docs.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | 1 | 2017-02-10T03:14:51.000Z | 2017-02-10T03:14:51.000Z | corehq/apps/styleguide/views/docs.py | bglar/commcare-hq | 972129fc26864c08c7bef07874bd2a7218550bff | [
"BSD-3-Clause"
] | 1 | 2022-03-12T01:03:25.000Z | 2022-03-12T01:03:25.000Z | corehq/apps/styleguide/views/docs.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | null | null | null | from django.http import HttpResponse
from django.utils.translation import ugettext_noop
from corehq.apps.styleguide.examples.simple_crispy_form.views import \
BaseSimpleCrispyFormSectionView
def default(request):
return HttpResponse('woot')
class FormsSimpleCrispyFormExampleView(BaseSimpleCrispyFormSectionView):
urlname = 'ex_simple_crispy_form_doc_forms'
template_name = 'styleguide/docs/simple_crispy_form/forms.html'
page_title = ugettext_noop("forms.py")
class ViewsSimpleCrispyFormExampleView(BaseSimpleCrispyFormSectionView):
urlname = 'ex_simple_crispy_form_doc_views'
template_name = 'styleguide/docs/simple_crispy_form/views.html'
page_title = ugettext_noop("views.py")
class SelectControlFormExampleView(BaseSimpleCrispyFormSectionView):
urlname = 'ex_controls_demo_doc_forms'
template_name = 'styleguide/docs/controls_demo/forms.html'
page_title = ugettext_noop("forms.py")
class SelectControlViewExampleView(BaseSimpleCrispyFormSectionView):
urlname = 'ex_controls_demo_doc_views'
template_name = 'styleguide/docs/controls_demo/views.html'
page_title = ugettext_noop("views.py")
| 35.121212 | 72 | 0.811044 | from django.http import HttpResponse
from django.utils.translation import ugettext_noop
from corehq.apps.styleguide.examples.simple_crispy_form.views import \
BaseSimpleCrispyFormSectionView
def default(request):
return HttpResponse('woot')
class FormsSimpleCrispyFormExampleView(BaseSimpleCrispyFormSectionView):
urlname = 'ex_simple_crispy_form_doc_forms'
template_name = 'styleguide/docs/simple_crispy_form/forms.html'
page_title = ugettext_noop("forms.py")
class ViewsSimpleCrispyFormExampleView(BaseSimpleCrispyFormSectionView):
urlname = 'ex_simple_crispy_form_doc_views'
template_name = 'styleguide/docs/simple_crispy_form/views.html'
page_title = ugettext_noop("views.py")
class SelectControlFormExampleView(BaseSimpleCrispyFormSectionView):
urlname = 'ex_controls_demo_doc_forms'
template_name = 'styleguide/docs/controls_demo/forms.html'
page_title = ugettext_noop("forms.py")
class SelectControlViewExampleView(BaseSimpleCrispyFormSectionView):
urlname = 'ex_controls_demo_doc_views'
template_name = 'styleguide/docs/controls_demo/views.html'
page_title = ugettext_noop("views.py")
| true | true |
f73a0e68b9586d049c9d0d1b572cb4200bb93bde | 44,179 | py | Python | dbaas/maintenance/migrations/0025_auto__add_field_databasecreate_plan_name__chg_field_databasecreate_pla.py | didindinn/database-as-a-service | 747de31ff8546f7874ddd654af860e130afd17a0 | [
"BSD-3-Clause"
] | 303 | 2015-01-08T10:35:54.000Z | 2022-02-28T08:54:06.000Z | dbaas/maintenance/migrations/0025_auto__add_field_databasecreate_plan_name__chg_field_databasecreate_pla.py | nouraellm/database-as-a-service | 5e655c9347bea991b7218a01549f5e44f161d7be | [
"BSD-3-Clause"
] | 124 | 2015-01-14T12:56:15.000Z | 2022-03-22T20:45:11.000Z | dbaas/maintenance/migrations/0025_auto__add_field_databasecreate_plan_name__chg_field_databasecreate_pla.py | nouraellm/database-as-a-service | 5e655c9347bea991b7218a01549f5e44f161d7be | [
"BSD-3-Clause"
] | 110 | 2015-01-02T11:59:48.000Z | 2022-02-28T08:54:06.000Z | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DatabaseCreate.plan_name'
db.add_column(u'maintenance_databasecreate', 'plan_name',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Changing field 'DatabaseCreate.plan'
db.alter_column(u'maintenance_databasecreate', 'plan_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['physical.Plan']))
def backwards(self, orm):
# Deleting field 'DatabaseCreate.plan_name'
db.delete_column(u'maintenance_databasecreate', 'plan_name')
# User chose to not deal with backwards NULL issues for 'DatabaseCreate.plan'
#raise RuntimeError("Cannot reverse this migration. 'DatabaseCreate.plan' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'DatabaseCreate.plan'
db.alter_column(u'maintenance_databasecreate', 'plan_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['physical.Plan']))
models = {
u'account.team': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Team'},
'contacts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_alocation_limit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'backup.backupgroup': {
'Meta': {'object_name': 'BackupGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'dbaas_cloudstack.cloudstackoffering': {
'Meta': {'object_name': 'CloudStackOffering'},
'cpus': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'equivalent_offering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dbaas_cloudstack.CloudStackOffering']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory_size_mb': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'cs_offering_region'", 'null': 'True', 'to': u"orm['dbaas_cloudstack.CloudStackRegion']"}),
'serviceofferingid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'weaker': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'dbaas_cloudstack.cloudstackpack': {
'Meta': {'object_name': 'CloudStackPack'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'cs_packs'", 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'cs_offering_packs'", 'to': u"orm['dbaas_cloudstack.CloudStackOffering']"}),
'script_file': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'dbaas_cloudstack.cloudstackregion': {
'Meta': {'object_name': 'CloudStackRegion'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'cs_environment_region'", 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'logical.database': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'name', u'environment'),)", 'object_name': 'Database'},
'backup_path': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DatabaseInfra']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_auto_resize': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_quarantine': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_protected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['logical.Project']"}),
'quarantine_dt': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'quarantine_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_quarantine'", 'null': 'True', 'to': u"orm['auth.User']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'subscribe_to_email_events': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases'", 'null': 'True', 'to': u"orm['account.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
u'logical.project': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Project'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databasechangeparameter': {
'Meta': {'object_name': 'DatabaseChangeParameter'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'change_parameters'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_change_parameters'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databasecreate': {
'Meta': {'object_name': 'DatabaseCreate'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_create'", 'null': 'True', 'to': u"orm['logical.Database']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases_create'", 'to': u"orm['physical.Environment']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'infra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases_create'", 'to': u"orm['physical.DatabaseInfra']"}),
'is_protected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_create'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'plan_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_create'", 'null': 'True', 'to': u"orm['logical.Project']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subscribe_to_email_events': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'create_database'", 'to': u"orm['notification.TaskHistory']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases_create'", 'to': u"orm['account.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'maintenance.databasereinstallvm': {
'Meta': {'object_name': 'DatabaseReinstallVM'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'reinstall_vm'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_reinstall_vm'", 'to': u"orm['physical.Instance']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_reinsgtall_vm'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databaseresize': {
'Meta': {'object_name': 'DatabaseResize'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'resizes'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_offer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_resizes_source'", 'to': u"orm['dbaas_cloudstack.CloudStackPack']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'target_offer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_resizes_target'", 'to': u"orm['dbaas_cloudstack.CloudStackPack']"}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_resizes'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databaserestore': {
'Meta': {'object_name': 'DatabaseRestore'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_restore'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_restore'", 'to': u"orm['backup.BackupGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'database_restore_new'", 'null': 'True', 'to': u"orm['backup.BackupGroup']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_restore'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databaserestoreinstancepair': {
'Meta': {'unique_together': "((u'master', u'slave', u'restore'),)", 'object_name': 'DatabaseRestoreInstancePair'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'restore_master'", 'to': u"orm['physical.Instance']"}),
'restore': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'restore_instances'", 'to': u"orm['maintenance.DatabaseRestore']"}),
'slave': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'restore_slave'", 'to': u"orm['physical.Instance']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databaseupgrade': {
'Meta': {'object_name': 'DatabaseUpgrade'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'upgrades'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'database_upgrades_source'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'source_plan_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'target_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'database_upgrades_target'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'target_plan_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_upgrades'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.hostmaintenance': {
'Meta': {'unique_together': "((u'host', u'maintenance'),)", 'object_name': 'HostMaintenance', 'index_together': "[[u'host', u'maintenance']]"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'host_maintenance'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Host']"}),
'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'maintenance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'maintenance'", 'to': u"orm['maintenance.Maintenance']"}),
'rollback_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.maintenance': {
'Meta': {'object_name': 'Maintenance'},
'affected_hosts': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'celery_task_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hostsid': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '10000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_script': ('django.db.models.fields.TextField', [], {}),
'maximum_workers': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'revoked_by': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'rollback_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'scheduled_for': ('django.db.models.fields.DateTimeField', [], {'unique': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.maintenanceparameters': {
'Meta': {'object_name': 'MaintenanceParameters'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'function_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maintenance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'maintenance_params'", 'to': u"orm['maintenance.Maintenance']"}),
'parameter_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'notification.taskhistory': {
'Meta': {'object_name': 'TaskHistory'},
'arguments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ended_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_class': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'task_status': ('django.db.models.fields.CharField', [], {'default': "u'WAITING'", 'max_length': '100', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shard': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.parameter': {
'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_method': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'migrate_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'can_change_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_clone_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_reinstall_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_resize_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_switch_master': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_upgrade_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.script': {
'Meta': {'object_name': 'Script'},
'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['maintenance']
| 94.804721 | 227 | 0.58143 |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.add_column(u'maintenance_databasecreate', 'plan_name',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
db.alter_column(u'maintenance_databasecreate', 'plan_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['physical.Plan']))
def backwards(self, orm):
db.delete_column(u'maintenance_databasecreate', 'plan_name')
db.alter_column(u'maintenance_databasecreate', 'plan_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['physical.Plan']))
models = {
u'account.team': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Team'},
'contacts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_alocation_limit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'backup.backupgroup': {
'Meta': {'object_name': 'BackupGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'dbaas_cloudstack.cloudstackoffering': {
'Meta': {'object_name': 'CloudStackOffering'},
'cpus': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'equivalent_offering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dbaas_cloudstack.CloudStackOffering']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory_size_mb': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'cs_offering_region'", 'null': 'True', 'to': u"orm['dbaas_cloudstack.CloudStackRegion']"}),
'serviceofferingid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'weaker': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'dbaas_cloudstack.cloudstackpack': {
'Meta': {'object_name': 'CloudStackPack'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'cs_packs'", 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'cs_offering_packs'", 'to': u"orm['dbaas_cloudstack.CloudStackOffering']"}),
'script_file': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'dbaas_cloudstack.cloudstackregion': {
'Meta': {'object_name': 'CloudStackRegion'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'cs_environment_region'", 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'logical.database': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'name', u'environment'),)", 'object_name': 'Database'},
'backup_path': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DatabaseInfra']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_auto_resize': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_quarantine': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_protected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['logical.Project']"}),
'quarantine_dt': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'quarantine_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_quarantine'", 'null': 'True', 'to': u"orm['auth.User']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'subscribe_to_email_events': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases'", 'null': 'True', 'to': u"orm['account.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
u'logical.project': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Project'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databasechangeparameter': {
'Meta': {'object_name': 'DatabaseChangeParameter'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'change_parameters'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_change_parameters'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databasecreate': {
'Meta': {'object_name': 'DatabaseCreate'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_create'", 'null': 'True', 'to': u"orm['logical.Database']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases_create'", 'to': u"orm['physical.Environment']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'infra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases_create'", 'to': u"orm['physical.DatabaseInfra']"}),
'is_protected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_create'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'plan_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_create'", 'null': 'True', 'to': u"orm['logical.Project']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subscribe_to_email_events': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'create_database'", 'to': u"orm['notification.TaskHistory']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases_create'", 'to': u"orm['account.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'maintenance.databasereinstallvm': {
'Meta': {'object_name': 'DatabaseReinstallVM'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'reinstall_vm'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_reinstall_vm'", 'to': u"orm['physical.Instance']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_reinsgtall_vm'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databaseresize': {
'Meta': {'object_name': 'DatabaseResize'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'resizes'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_offer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_resizes_source'", 'to': u"orm['dbaas_cloudstack.CloudStackPack']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'target_offer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_resizes_target'", 'to': u"orm['dbaas_cloudstack.CloudStackPack']"}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_resizes'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databaserestore': {
'Meta': {'object_name': 'DatabaseRestore'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_restore'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_restore'", 'to': u"orm['backup.BackupGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'database_restore_new'", 'null': 'True', 'to': u"orm['backup.BackupGroup']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_restore'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databaserestoreinstancepair': {
'Meta': {'unique_together': "((u'master', u'slave', u'restore'),)", 'object_name': 'DatabaseRestoreInstancePair'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'restore_master'", 'to': u"orm['physical.Instance']"}),
'restore': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'restore_instances'", 'to': u"orm['maintenance.DatabaseRestore']"}),
'slave': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'restore_slave'", 'to': u"orm['physical.Instance']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databaseupgrade': {
'Meta': {'object_name': 'DatabaseUpgrade'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'upgrades'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'database_upgrades_source'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'source_plan_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'target_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'database_upgrades_target'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'target_plan_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_upgrades'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.hostmaintenance': {
'Meta': {'unique_together': "((u'host', u'maintenance'),)", 'object_name': 'HostMaintenance', 'index_together': "[[u'host', u'maintenance']]"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'host_maintenance'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Host']"}),
'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'maintenance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'maintenance'", 'to': u"orm['maintenance.Maintenance']"}),
'rollback_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.maintenance': {
'Meta': {'object_name': 'Maintenance'},
'affected_hosts': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'celery_task_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hostsid': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '10000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_script': ('django.db.models.fields.TextField', [], {}),
'maximum_workers': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'revoked_by': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'rollback_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'scheduled_for': ('django.db.models.fields.DateTimeField', [], {'unique': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.maintenanceparameters': {
'Meta': {'object_name': 'MaintenanceParameters'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'function_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maintenance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'maintenance_params'", 'to': u"orm['maintenance.Maintenance']"}),
'parameter_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'notification.taskhistory': {
'Meta': {'object_name': 'TaskHistory'},
'arguments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ended_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_class': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'task_status': ('django.db.models.fields.CharField', [], {'default': "u'WAITING'", 'max_length': '100', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shard': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.parameter': {
'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_method': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'migrate_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'can_change_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_clone_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_reinstall_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_resize_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_switch_master': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_upgrade_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.script': {
'Meta': {'object_name': 'Script'},
'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['maintenance']
| true | true |
f73a0f687f6d4f639ddfc53aa61e341146b91a13 | 1,704 | gyp | Python | src/trusted/validator/validator.gyp | MicrohexHQ/nacl_contracts | 3efab5eecb3cf7ba43f2d61000e65918aa4ba77a | [
"BSD-3-Clause"
] | 6 | 2015-02-06T23:41:01.000Z | 2015-10-21T03:08:51.000Z | src/trusted/validator/validator.gyp | MicrohexHQ/nacl_contracts | 3efab5eecb3cf7ba43f2d61000e65918aa4ba77a | [
"BSD-3-Clause"
] | null | null | null | src/trusted/validator/validator.gyp | MicrohexHQ/nacl_contracts | 3efab5eecb3cf7ba43f2d61000e65918aa4ba77a | [
"BSD-3-Clause"
] | 1 | 2019-10-02T08:41:50.000Z | 2019-10-02T08:41:50.000Z | # -*- gyp -*-
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'../../../build/common.gypi',
],
'targets': [
{
'target_name': 'validators',
'type': 'static_library',
'sources' : [
'validator_init.c',
],
'conditions': [
['nacl_validator_ragel!=0', {
'defines': [
'NACL_VALIDATOR_RAGEL=1',
],
}],
],
},
{
'target_name': 'validation_cache',
'type': 'static_library',
'sources' : [
'validation_cache.c',
],
'dependencies': [
'<(DEPTH)/native_client/src/shared/platform/platform.gyp:platform',
],
},
],
'conditions': [
['OS=="win" and target_arch=="ia32"', {
'targets': [
{
'target_name': 'validators64',
'type': 'static_library',
'sources' : [
'validator_init.c',
],
'variables': {
'win_target': 'x64',
},
'conditions': [
['nacl_validator_ragel!=0', {
'defines': [
'NACL_VALIDATOR_RAGEL=1',
],
}],
],
},
{
'target_name': 'validation_cache64',
'type': 'static_library',
'sources' : [
'validation_cache.c',
],
'variables': {
'win_target': 'x64',
},
'dependencies': [
'<(DEPTH)/native_client/src/shared/platform/platform.gyp:platform64',
],
},
],
}],
],
}
| 23.342466 | 81 | 0.44777 |
{
'includes': [
'../../../build/common.gypi',
],
'targets': [
{
'target_name': 'validators',
'type': 'static_library',
'sources' : [
'validator_init.c',
],
'conditions': [
['nacl_validator_ragel!=0', {
'defines': [
'NACL_VALIDATOR_RAGEL=1',
],
}],
],
},
{
'target_name': 'validation_cache',
'type': 'static_library',
'sources' : [
'validation_cache.c',
],
'dependencies': [
'<(DEPTH)/native_client/src/shared/platform/platform.gyp:platform',
],
},
],
'conditions': [
['OS=="win" and target_arch=="ia32"', {
'targets': [
{
'target_name': 'validators64',
'type': 'static_library',
'sources' : [
'validator_init.c',
],
'variables': {
'win_target': 'x64',
},
'conditions': [
['nacl_validator_ragel!=0', {
'defines': [
'NACL_VALIDATOR_RAGEL=1',
],
}],
],
},
{
'target_name': 'validation_cache64',
'type': 'static_library',
'sources' : [
'validation_cache.c',
],
'variables': {
'win_target': 'x64',
},
'dependencies': [
'<(DEPTH)/native_client/src/shared/platform/platform.gyp:platform64',
],
},
],
}],
],
}
| true | true |
f73a0f70a34dbddd528d3e34e9f52558a60c5191 | 4,292 | py | Python | mythril/analysis/symbolic.py | soad003/mythril | 08882bfd9fcb90cef7fa623e66b7f9aec11f004d | [
"MIT"
] | null | null | null | mythril/analysis/symbolic.py | soad003/mythril | 08882bfd9fcb90cef7fa623e66b7f9aec11f004d | [
"MIT"
] | null | null | null | mythril/analysis/symbolic.py | soad003/mythril | 08882bfd9fcb90cef7fa623e66b7f9aec11f004d | [
"MIT"
] | null | null | null | from mythril import ether
from mythril.laser.ethereum import svm
import copy
import logging
from .ops import get_variable, SStore, Call, VarType
from mythril.laser.ethereum.strategy.basic import DepthFirstSearchStrategy, BreadthFirstSearchStrategy
class SymExecWrapper:
'''
Wrapper class for the LASER Symbolic virtual machine. Symbolically executes the code and does a bit of pre-analysis for convenience.
'''
def __init__(self, contract, address, strategy, dynloader=None, max_depth=22, execution_timeout=None):
s_strategy = None
if strategy == 'dfs':
s_strategy = DepthFirstSearchStrategy
elif strategy == 'bfs':
s_strategy = BreadthFirstSearchStrategy
else:
raise ValueError("Invalid strategy argument supplied")
account = svm.Account(address, contract.disassembly, contract_name=contract.name)
self.accounts = {address: account}
self.laser = svm.LaserEVM(self.accounts, dynamic_loader=dynloader, max_depth=max_depth, execution_timeout=execution_timeout, strategy=s_strategy)
self.laser.sym_exec(address)
self.nodes = self.laser.nodes
self.edges = self.laser.edges
# Generate lists of interesting operations
self.calls = []
self.sstors = {}
for key in self.nodes:
state_index = 0
for state in self.nodes[key].states:
instruction = state.get_current_instruction()
if instruction == None:
continue
op = instruction['opcode']
if op in ('CALL', 'CALLCODE', 'DELEGATECALL', 'STATICCALL'):
stack = state.mstate.stack
if op in ('CALL', 'CALLCODE'):
gas, to, value, meminstart, meminsz, memoutstart, memoutsz = \
get_variable(stack[-1]), get_variable(stack[-2]), get_variable(stack[-3]), get_variable(stack[-4]), get_variable(stack[-5]), get_variable(stack[-6]), get_variable(stack[-7])
if to.type == VarType.CONCRETE and to.val < 5:
# ignore prebuilts
continue
if (meminstart.type == VarType.CONCRETE and meminsz.type == VarType.CONCRETE):
self.calls.append(Call(self.nodes[key], state, state_index, op, to, gas, value, state.mstate.memory[meminstart.val:meminsz.val * 4]))
else:
self.calls.append(Call(self.nodes[key], state, state_index, op, to, gas, value))
else:
gas, to, meminstart, meminsz, memoutstart, memoutsz = \
get_variable(stack[-1]), get_variable(stack[-2]), get_variable(stack[-3]), get_variable(stack[-4]), get_variable(stack[-5]), get_variable(stack[-6])
self.calls.append(Call(self.nodes[key], state, state_index, op, to, gas))
elif op == 'SSTORE':
stack = copy.deepcopy(state.mstate.stack)
address = state.environment.active_account.address
index, value = stack.pop(), stack.pop()
try:
self.sstors[address]
except KeyError:
self.sstors[address] = {}
try:
self.sstors[address][str(index)].append(SStore(self.nodes[key], state, state_index, value))
except KeyError:
self.sstors[address][str(index)] = [SStore(self.nodes[key], state, state_index, value)]
state_index += 1
def find_storage_write(self, address, index):
# Find an SSTOR not constrained by caller that writes to storage index "index"
try:
for s in self.sstors[address][index]:
taint = True
for constraint in s.node.constraints:
if ("caller" in str(constraint)):
taint = False
break
if taint:
return s.node.function_name
return None
except KeyError:
return None
| 37.982301 | 201 | 0.561277 | from mythril import ether
from mythril.laser.ethereum import svm
import copy
import logging
from .ops import get_variable, SStore, Call, VarType
from mythril.laser.ethereum.strategy.basic import DepthFirstSearchStrategy, BreadthFirstSearchStrategy
class SymExecWrapper:
def __init__(self, contract, address, strategy, dynloader=None, max_depth=22, execution_timeout=None):
s_strategy = None
if strategy == 'dfs':
s_strategy = DepthFirstSearchStrategy
elif strategy == 'bfs':
s_strategy = BreadthFirstSearchStrategy
else:
raise ValueError("Invalid strategy argument supplied")
account = svm.Account(address, contract.disassembly, contract_name=contract.name)
self.accounts = {address: account}
self.laser = svm.LaserEVM(self.accounts, dynamic_loader=dynloader, max_depth=max_depth, execution_timeout=execution_timeout, strategy=s_strategy)
self.laser.sym_exec(address)
self.nodes = self.laser.nodes
self.edges = self.laser.edges
self.calls = []
self.sstors = {}
for key in self.nodes:
state_index = 0
for state in self.nodes[key].states:
instruction = state.get_current_instruction()
if instruction == None:
continue
op = instruction['opcode']
if op in ('CALL', 'CALLCODE', 'DELEGATECALL', 'STATICCALL'):
stack = state.mstate.stack
if op in ('CALL', 'CALLCODE'):
gas, to, value, meminstart, meminsz, memoutstart, memoutsz = \
get_variable(stack[-1]), get_variable(stack[-2]), get_variable(stack[-3]), get_variable(stack[-4]), get_variable(stack[-5]), get_variable(stack[-6]), get_variable(stack[-7])
if to.type == VarType.CONCRETE and to.val < 5:
continue
if (meminstart.type == VarType.CONCRETE and meminsz.type == VarType.CONCRETE):
self.calls.append(Call(self.nodes[key], state, state_index, op, to, gas, value, state.mstate.memory[meminstart.val:meminsz.val * 4]))
else:
self.calls.append(Call(self.nodes[key], state, state_index, op, to, gas, value))
else:
gas, to, meminstart, meminsz, memoutstart, memoutsz = \
get_variable(stack[-1]), get_variable(stack[-2]), get_variable(stack[-3]), get_variable(stack[-4]), get_variable(stack[-5]), get_variable(stack[-6])
self.calls.append(Call(self.nodes[key], state, state_index, op, to, gas))
elif op == 'SSTORE':
stack = copy.deepcopy(state.mstate.stack)
address = state.environment.active_account.address
index, value = stack.pop(), stack.pop()
try:
self.sstors[address]
except KeyError:
self.sstors[address] = {}
try:
self.sstors[address][str(index)].append(SStore(self.nodes[key], state, state_index, value))
except KeyError:
self.sstors[address][str(index)] = [SStore(self.nodes[key], state, state_index, value)]
state_index += 1
def find_storage_write(self, address, index):
try:
for s in self.sstors[address][index]:
taint = True
for constraint in s.node.constraints:
if ("caller" in str(constraint)):
taint = False
break
if taint:
return s.node.function_name
return None
except KeyError:
return None
| true | true |
f73a0fcca07df0f7b112e6705b8b0aeea447e9f2 | 12,827 | py | Python | python/GafferUI/__init__.py | Tuftux/gaffer | 5acaf7cbfadbae841dc06854121ca85dcc5c338c | [
"BSD-3-Clause"
] | null | null | null | python/GafferUI/__init__.py | Tuftux/gaffer | 5acaf7cbfadbae841dc06854121ca85dcc5c338c | [
"BSD-3-Clause"
] | null | null | null | python/GafferUI/__init__.py | Tuftux/gaffer | 5acaf7cbfadbae841dc06854121ca85dcc5c338c | [
"BSD-3-Clause"
] | null | null | null | ##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
# Work around a bug which causes segfaults if uuid is imported after
# PyQt. See here for details :
#
# https://bugs.gentoo.org/show_bug.cgi?id=317557
# http://www.riverbankcomputing.com/pipermail/pyqt/2010-December/028773.html
#
# Using __import__ rather than import so that we don't pollute the GafferUI
# namespace.
__import__( "uuid" )
## Deprecated. This legacy function only supports use with Qt4. For
# combined Qt4/Qt5 support use `from Qt import name` instead.
# Also note that the lazy argument is no longer effective, because Qt.py
# imports all modules at startup.
__qtModuleName = None
def _qtImport( name, lazy=False ) :
# decide which qt bindings to use, and apply any fix-ups we need
# to shield us from PyQt/PySide differences.
global __qtModuleName
if __qtModuleName is None :
import os
if "GAFFERUI_QT_BINDINGS" in os.environ :
__qtModuleName = os.environ["GAFFERUI_QT_BINDINGS"]
else :
# no preference stated via environment - see what we shipped with
if os.path.exists( os.environ["GAFFER_ROOT"] + "/python/PySide" ) :
__qtModuleName = "PySide"
else :
__qtModuleName = "PyQt4"
# PyQt unfortunately uses an implementation-specific
# naming scheme for its new-style signal and slot classes.
# We use this to make it compatible with PySide, according to :
#
# http://qt-project.org/wiki/Differences_Between_PySide_and_PyQt
if "PyQt" in __qtModuleName :
QtCore = __import__( __qtModuleName + ".QtCore" ).QtCore
QtCore.Signal = QtCore.pyqtSignal
# import the submodule from those bindings and return it
if lazy :
import Gaffer
return Gaffer.lazyImport( __qtModuleName + "." + name )
else :
qtModule = __import__( __qtModuleName + "." + name )
return getattr( qtModule, name )
##########################################################################
# Function to return the C++ address of a wrapped Qt object. This can
# be useful if needing to implement part of the UI in C++ and the rest
# in Python.
##########################################################################
def _qtAddress( o ) :
import Qt
if "PyQt" in Qt.__binding__ :
import sip
return sip.unwrapinstance( o )
else :
return __shiboken().getCppPointer( o )[0]
##########################################################################
# Function to return a wrapped Qt object from the given C++ address.
# This can be useful if needing to implement part of the UI in C++ and
# the rest in Python.
##########################################################################
def _qtObject( address, type ) :
import Qt
if "PyQt" in Qt.__binding__ :
import sip
return sip.wrapinstance( address, type )
else :
return __shiboken().wrapInstance( address, type )
##########################################################################
# Determines if the wrapped Qt object is still valid
# Useful when having to deal with the consequences of C++/Python deletion
# order challeneges, see:
# https://github.com/GafferHQ/gaffer/pull/3179
##########################################################################
def _qtObjectIsValid( o ) :
import Qt
if "PyQt" in Qt.__binding__ :
import sip
return not sip.isdeleted( o )
else :
return __shiboken().isValid( o )
##########################################################################
# Shiboken lives in a variety of places depending on which PySide it is.
##########################################################################
def __shiboken() :
import Qt
assert( "PyQt" not in Qt.__binding__ )
if Qt.__binding__ == "PySide2" :
try :
import PySide2.shiboken2 as shiboken
except ImportError :
import shiboken2 as shiboken
else :
try :
import PySide.shiboken
except ImportError :
import shiboken
return shiboken
##########################################################################
# now import our actual functionality
##########################################################################
# Import modules that must be imported before _GafferUI, using __import__
# to avoid polluting the GafferUI namespace.
__import__( "IECore" )
__import__( "Gaffer" )
from ._GafferUI import *
# general ui stuff first
from .Enums import *
from .Widget import Widget
from .LazyMethod import LazyMethod
from .Menu import Menu
from .ContainerWidget import ContainerWidget
from .Window import Window
from .SplitContainer import SplitContainer
from .ListContainer import ListContainer
from .GridContainer import GridContainer
from .MenuBar import MenuBar
from .EventLoop import EventLoop
from .TabbedContainer import TabbedContainer
from .TextWidget import TextWidget
from .NumericWidget import NumericWidget
from .Button import Button
from .MultiLineTextWidget import MultiLineTextWidget
from .Label import Label
from .GLWidget import GLWidget
from .ScrolledContainer import ScrolledContainer
from .PathWidget import PathWidget
from .PathListingWidget import PathListingWidget
from .PathChooserWidget import PathChooserWidget
from .Dialogue import Dialogue
from .PathChooserDialogue import PathChooserDialogue
from .TextInputDialogue import TextInputDialogue
from .Collapsible import Collapsible
from .ColorSwatch import ColorSwatch
from .Slider import Slider
from .ShowURL import showURL
from .Spacer import Spacer
from .BoolWidget import BoolWidget, CheckBox
from .Image import Image
from .ErrorDialogue import ErrorDialogue
from ._Variant import _Variant
from .VectorDataWidget import VectorDataWidget
from .PathVectorDataWidget import PathVectorDataWidget
from .ProgressBar import ProgressBar
from .SelectionMenu import SelectionMenu
from .PathFilterWidget import PathFilterWidget
from .CompoundPathFilterWidget import CompoundPathFilterWidget
from .InfoPathFilterWidget import InfoPathFilterWidget
from .MatchPatternPathFilterWidget import MatchPatternPathFilterWidget
from .FileSequencePathFilterWidget import FileSequencePathFilterWidget
from .BusyWidget import BusyWidget
from .NumericSlider import NumericSlider
from .ColorChooser import ColorChooser
from .ColorChooserDialogue import ColorChooserDialogue
from .MessageWidget import MessageWidget
from .NotificationMessageHandler import NotificationMessageHandler
from .MenuButton import MenuButton
from .MultiSelectionMenu import MultiSelectionMenu
from .PopupWindow import PopupWindow
from .ConfirmationDialogue import ConfirmationDialogue
from .DisplayTransform import DisplayTransform
from .Divider import Divider
from . import _Pointer
from .SplineWidget import SplineWidget
from .Bookmarks import Bookmarks
from . import WidgetAlgo
# then all the PathPreviewWidgets. note that the order
# of import controls the order of display.
from .PathPreviewWidget import PathPreviewWidget
from .CompoundPathPreview import CompoundPathPreview
from .DeferredPathPreview import DeferredPathPreview
from .InfoPathPreview import InfoPathPreview
from .HeaderPathPreview import HeaderPathPreview
from .DataPathPreview import DataPathPreview
# then stuff specific to graph uis
from .BackgroundMethod import BackgroundMethod
from .PlugValueWidget import PlugValueWidget
from .StringPlugValueWidget import StringPlugValueWidget
from .NumericPlugValueWidget import NumericPlugValueWidget
from .BoolPlugValueWidget import BoolPlugValueWidget
from .PathPlugValueWidget import PathPlugValueWidget
from .FileSystemPathPlugValueWidget import FileSystemPathPlugValueWidget
from .VectorDataPlugValueWidget import VectorDataPlugValueWidget
from .PathVectorDataPlugValueWidget import PathVectorDataPlugValueWidget
from .FileSystemPathVectorDataPlugValueWidget import FileSystemPathVectorDataPlugValueWidget
from .PlugWidget import PlugWidget
from .PlugLayout import PlugLayout
from .Editor import Editor
from .PythonEditor import PythonEditor
from .GadgetWidget import GadgetWidget
from .GraphEditor import GraphEditor
from .ScriptWindow import ScriptWindow
from .CompoundEditor import CompoundEditor
from .NameWidget import NameWidget
from .NameLabel import NameLabel
from .NodeSetEditor import NodeSetEditor
from .NodeEditor import NodeEditor
from .Layouts import Layouts
from .NodeMenu import NodeMenu
from . import FileMenu
from . import LayoutMenu
from . import EditMenu
from . import UserPlugs
from .Frame import Frame
from .CompoundNumericPlugValueWidget import CompoundNumericPlugValueWidget
from .BoxPlugValueWidget import BoxPlugValueWidget
from .NodeUI import NodeUI
from .StandardNodeUI import StandardNodeUI
from .NodeToolbar import NodeToolbar
from .StandardNodeToolbar import StandardNodeToolbar
from .Viewer import Viewer
from .ColorSwatchPlugValueWidget import ColorSwatchPlugValueWidget
from .ColorPlugValueWidget import ColorPlugValueWidget
from .AboutWindow import AboutWindow
from . import ApplicationMenu
from .BrowserEditor import BrowserEditor
from .Timeline import Timeline
from .MultiLineStringPlugValueWidget import MultiLineStringPlugValueWidget
from .PresetsPlugValueWidget import PresetsPlugValueWidget
from .GraphComponentBrowserMode import GraphComponentBrowserMode
from .ToolPlugValueWidget import ToolPlugValueWidget
from .LabelPlugValueWidget import LabelPlugValueWidget
from .CompoundDataPlugValueWidget import CompoundDataPlugValueWidget
from .LayoutPlugValueWidget import LayoutPlugValueWidget
from . import ScriptNodeUI
from .RefreshPlugValueWidget import RefreshPlugValueWidget
from . import PreferencesUI
from .SplinePlugValueWidget import SplinePlugValueWidget
from .RampPlugValueWidget import RampPlugValueWidget
from .NodeFinderDialogue import NodeFinderDialogue
from .ConnectionPlugValueWidget import ConnectionPlugValueWidget
from .ButtonPlugValueWidget import ButtonPlugValueWidget
from . import ViewUI
from . import ToolUI
from .Playback import Playback
from . import MetadataWidget
from .UIEditor import UIEditor
from . import GraphBookmarksUI
from . import DocumentationAlgo
from . import _PlugAdder
from .Backups import Backups
from .AnimationEditor import AnimationEditor
from . import CompoundNumericNoduleUI
from . import Examples
from .NameValuePlugValueWidget import NameValuePlugValueWidget
from .ShufflePlugValueWidget import ShufflePlugValueWidget
from .ShufflePlugValueWidget import ShufflesPlugValueWidget
# and then specific node uis
from . import DependencyNodeUI
from . import ComputeNodeUI
from . import RandomUI
from . import SpreadsheetUI
from . import ExpressionUI
from . import BoxUI
from . import ReferenceUI
from . import BackdropUI
from . import DotUI
from . import SubGraphUI
from . import SwitchUI
from . import ContextProcessorUI
from . import ContextVariablesUI
from . import DeleteContextVariablesUI
from . import TimeWarpUI
from . import LoopUI
from . import AnimationUI
from . import BoxIOUI
from . import BoxInUI
from . import BoxOutUI
from . import NameSwitchUI
from . import EditScopeUI
# backwards compatibility
## \todo Remove me
Metadata = __import__( "Gaffer" ).Metadata
__import__( "IECore" ).loadConfig( "GAFFER_STARTUP_PATHS", subdirectory = "GafferUI" )
| 37.505848 | 92 | 0.753801 | mport SelectionMenu
from .PathFilterWidget import PathFilterWidget
from .CompoundPathFilterWidget import CompoundPathFilterWidget
from .InfoPathFilterWidget import InfoPathFilterWidget
from .MatchPatternPathFilterWidget import MatchPatternPathFilterWidget
from .FileSequencePathFilterWidget import FileSequencePathFilterWidget
from .BusyWidget import BusyWidget
from .NumericSlider import NumericSlider
from .ColorChooser import ColorChooser
from .ColorChooserDialogue import ColorChooserDialogue
from .MessageWidget import MessageWidget
from .NotificationMessageHandler import NotificationMessageHandler
from .MenuButton import MenuButton
from .MultiSelectionMenu import MultiSelectionMenu
from .PopupWindow import PopupWindow
from .ConfirmationDialogue import ConfirmationDialogue
from .DisplayTransform import DisplayTransform
from .Divider import Divider
from . import _Pointer
from .SplineWidget import SplineWidget
from .Bookmarks import Bookmarks
from . import WidgetAlgo
# then all the PathPreviewWidgets. note that the order
# of import controls the order of display.
from .PathPreviewWidget import PathPreviewWidget
from .CompoundPathPreview import CompoundPathPreview
from .DeferredPathPreview import DeferredPathPreview
from .InfoPathPreview import InfoPathPreview
from .HeaderPathPreview import HeaderPathPreview
from .DataPathPreview import DataPathPreview
# then stuff specific to graph uis
from .BackgroundMethod import BackgroundMethod
from .PlugValueWidget import PlugValueWidget
from .StringPlugValueWidget import StringPlugValueWidget
from .NumericPlugValueWidget import NumericPlugValueWidget
from .BoolPlugValueWidget import BoolPlugValueWidget
from .PathPlugValueWidget import PathPlugValueWidget
from .FileSystemPathPlugValueWidget import FileSystemPathPlugValueWidget
from .VectorDataPlugValueWidget import VectorDataPlugValueWidget
from .PathVectorDataPlugValueWidget import PathVectorDataPlugValueWidget
from .FileSystemPathVectorDataPlugValueWidget import FileSystemPathVectorDataPlugValueWidget
from .PlugWidget import PlugWidget
from .PlugLayout import PlugLayout
from .Editor import Editor
from .PythonEditor import PythonEditor
from .GadgetWidget import GadgetWidget
from .GraphEditor import GraphEditor
from .ScriptWindow import ScriptWindow
from .CompoundEditor import CompoundEditor
from .NameWidget import NameWidget
from .NameLabel import NameLabel
from .NodeSetEditor import NodeSetEditor
from .NodeEditor import NodeEditor
from .Layouts import Layouts
from .NodeMenu import NodeMenu
from . import FileMenu
from . import LayoutMenu
from . import EditMenu
from . import UserPlugs
from .Frame import Frame
from .CompoundNumericPlugValueWidget import CompoundNumericPlugValueWidget
from .BoxPlugValueWidget import BoxPlugValueWidget
from .NodeUI import NodeUI
from .StandardNodeUI import StandardNodeUI
from .NodeToolbar import NodeToolbar
from .StandardNodeToolbar import StandardNodeToolbar
from .Viewer import Viewer
from .ColorSwatchPlugValueWidget import ColorSwatchPlugValueWidget
from .ColorPlugValueWidget import ColorPlugValueWidget
from .AboutWindow import AboutWindow
from . import ApplicationMenu
from .BrowserEditor import BrowserEditor
from .Timeline import Timeline
from .MultiLineStringPlugValueWidget import MultiLineStringPlugValueWidget
from .PresetsPlugValueWidget import PresetsPlugValueWidget
from .GraphComponentBrowserMode import GraphComponentBrowserMode
from .ToolPlugValueWidget import ToolPlugValueWidget
from .LabelPlugValueWidget import LabelPlugValueWidget
from .CompoundDataPlugValueWidget import CompoundDataPlugValueWidget
from .LayoutPlugValueWidget import LayoutPlugValueWidget
from . import ScriptNodeUI
from .RefreshPlugValueWidget import RefreshPlugValueWidget
from . import PreferencesUI
from .SplinePlugValueWidget import SplinePlugValueWidget
from .RampPlugValueWidget import RampPlugValueWidget
from .NodeFinderDialogue import NodeFinderDialogue
from .ConnectionPlugValueWidget import ConnectionPlugValueWidget
from .ButtonPlugValueWidget import ButtonPlugValueWidget
from . import ViewUI
from . import ToolUI
from .Playback import Playback
from . import MetadataWidget
from .UIEditor import UIEditor
from . import GraphBookmarksUI
from . import DocumentationAlgo
from . import _PlugAdder
from .Backups import Backups
from .AnimationEditor import AnimationEditor
from . import CompoundNumericNoduleUI
from . import Examples
from .NameValuePlugValueWidget import NameValuePlugValueWidget
from .ShufflePlugValueWidget import ShufflePlugValueWidget
from .ShufflePlugValueWidget import ShufflesPlugValueWidget
# and then specific node uis
from . import DependencyNodeUI
from . import ComputeNodeUI
from . import RandomUI
from . import SpreadsheetUI
from . import ExpressionUI
from . import BoxUI
from . import ReferenceUI
from . import BackdropUI
from . import DotUI
from . import SubGraphUI
from . import SwitchUI
from . import ContextProcessorUI
from . import ContextVariablesUI
from . import DeleteContextVariablesUI
from . import TimeWarpUI
from . import LoopUI
from . import AnimationUI
from . import BoxIOUI
from . import BoxInUI
from . import BoxOutUI
from . import NameSwitchUI
from . import EditScopeUI
# backwards compatibility
## \todo Remove me
Metadata = __import__( "Gaffer" ).Metadata
__import__( "IECore" ).loadConfig( "GAFFER_STARTUP_PATHS", subdirectory = "GafferUI" )
| true | true |
f73a10bcb4592c94e60c746029271dd88c1e6e9b | 305 | py | Python | graphic_templates/slopegraph/graphic_config.py | stlpublicradio/dailygraphics | ce29a89ed99209579d849cdd6077529f63009fa8 | [
"MIT"
] | null | null | null | graphic_templates/slopegraph/graphic_config.py | stlpublicradio/dailygraphics | ce29a89ed99209579d849cdd6077529f63009fa8 | [
"MIT"
] | 1 | 2019-01-23T21:45:24.000Z | 2019-01-23T21:45:24.000Z | graphic_templates/slopegraph/graphic_config.py | stlpublicradio/dailygraphics | ce29a89ed99209579d849cdd6077529f63009fa8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1dF4lI8j77VOLP6SiGzByvbgg-yQOqZRq5FWJniOcrBE'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| 21.785714 | 77 | 0.816393 |
import base_filters
COPY_GOOGLE_DOC_KEY = '1dF4lI8j77VOLP6SiGzByvbgg-yQOqZRq5FWJniOcrBE'
USE_ASSETS = False
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| true | true |
f73a10df600fc03d8edd6652f5d7c50b27f6036e | 1,191 | py | Python | examples/capture_x.py | elerac/codepattern | 8ee7d04870b1d9b64045a15c488792b0f0f9aef3 | [
"MIT"
] | 45 | 2020-09-10T19:36:19.000Z | 2022-03-23T07:52:22.000Z | examples/capture_x.py | elerac/codepattern | 8ee7d04870b1d9b64045a15c488792b0f0f9aef3 | [
"MIT"
] | 3 | 2021-05-17T01:25:20.000Z | 2021-11-09T13:03:09.000Z | examples/capture_x.py | elerac/codepattern | 8ee7d04870b1d9b64045a15c488792b0f0f9aef3 | [
"MIT"
] | 11 | 2020-09-12T09:23:52.000Z | 2022-03-13T16:08:08.000Z | """
Capture projection pattern and decode x-coorde.
"""
import cv2
import numpy as np
import structuredlight as sl
def imshowAndCapture(cap, img_pattern, delay=250):
cv2.imshow("", img_pattern)
cv2.waitKey(delay)
ret, img_frame = cap.read()
img_gray = cv2.cvtColor(img_frame, cv2.COLOR_BGR2GRAY)
return img_gray
def main():
width = 640
height = 480
cap = cv2.VideoCapture(1) # External web camera
gray = sl.Gray()
# Generate and Decode x-coord
# Generate
imlist_posi_pat = gray.generate((width, height))
imlist_nega_pat = sl.invert(imlist_posi_pat)
# Capture
imlist_posi_cap = [ imshowAndCapture(cap, img) for img in imlist_posi_pat]
imlist_nega_cap = [ imshowAndCapture(cap, img) for img in imlist_nega_pat]
# Decode
img_index = gray.decode(imlist_posi_cap, imlist_nega_cap)
# Visualize decode result
img_correspondence = np.clip(img_index/width*255.0, 0, 255).astype(np.uint8)
cv2.imshow("corresponnence map", img_correspondence)
cv2.waitKey(0)
cv2.imwrite("correspondence.png", img_correspondence)
cv2.destroyAllWindows()
cap.release()
if __name__=="__main__":
main()
| 27.068182 | 80 | 0.700252 | import cv2
import numpy as np
import structuredlight as sl
def imshowAndCapture(cap, img_pattern, delay=250):
cv2.imshow("", img_pattern)
cv2.waitKey(delay)
ret, img_frame = cap.read()
img_gray = cv2.cvtColor(img_frame, cv2.COLOR_BGR2GRAY)
return img_gray
def main():
width = 640
height = 480
cap = cv2.VideoCapture(1)
gray = sl.Gray()
imlist_posi_pat = gray.generate((width, height))
imlist_nega_pat = sl.invert(imlist_posi_pat)
imlist_posi_cap = [ imshowAndCapture(cap, img) for img in imlist_posi_pat]
imlist_nega_cap = [ imshowAndCapture(cap, img) for img in imlist_nega_pat]
img_index = gray.decode(imlist_posi_cap, imlist_nega_cap)
img_correspondence = np.clip(img_index/width*255.0, 0, 255).astype(np.uint8)
cv2.imshow("corresponnence map", img_correspondence)
cv2.waitKey(0)
cv2.imwrite("correspondence.png", img_correspondence)
cv2.destroyAllWindows()
cap.release()
if __name__=="__main__":
main()
| true | true |
f73a11105fc65c11f7e6b663732253973747f0af | 3,103 | py | Python | torchplasma/filters/gaussian.py | hdkai/Plasma | 1942d7fe5f6b41c9a16c8e2d1b6c7cf263307c39 | [
"Apache-2.0"
] | null | null | null | torchplasma/filters/gaussian.py | hdkai/Plasma | 1942d7fe5f6b41c9a16c8e2d1b6c7cf263307c39 | [
"Apache-2.0"
] | null | null | null | torchplasma/filters/gaussian.py | hdkai/Plasma | 1942d7fe5f6b41c9a16c8e2d1b6c7cf263307c39 | [
"Apache-2.0"
] | null | null | null | #
# Plasma
# Copyright (c) 2021 Yusuf Olokoba.
#
from torch import arange, exp, tensor, Tensor
from torch.nn.functional import conv2d, conv3d, pad
from typing import Tuple
def gaussian_kernel (kernel_size: int, sigma: float = -1.) -> Tensor:
"""
Normalized 1D Gaussian kernel.
This operation is NOT differentiable w.r.t its arguments.
Parameters:
kernel_size (int): Kernel size, should be odd.
sigma (float): Gaussian standard deviation. If less than 1, it is automatically computed from the kernel size.
Returns:
Tensor: Normalized Gaussian kernel with shape (K,).
"""
sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8 if sigma < 0 else sigma # From OpenCV ::getGaussianKernel
x = arange(kernel_size).float() - kernel_size // 2
x = x + 0.5 if kernel_size % 2 == 0 else x
kernel = exp((-x.pow(2.) / (2. * sigma ** 2)))
return kernel / kernel.sum()
def gaussian_filter (input: Tensor, kernel_size: Tuple[int, int]) -> Tensor:
"""
Apply a Gaussian filter to an image.
Parameters:
input (Tensor): Input image with shape (N,C,H,W).
kernel_size (tuple): Kernel size in each dimension (Ky,Kx).
Returns:
Tensor: Filtered image with shape (N,C,H,W).
"""
_,channels,_,_ = input.shape
kernel_size_y, kernel_size_x = kernel_size
# Compute kernels
kernel_x = gaussian_kernel(kernel_size_x).to(input.device)
kernel_y = gaussian_kernel(kernel_size_y).to(input.device)
# Reshape
kernel_x = kernel_x.expand(channels, 1, 1, -1)
kernel_y = kernel_y.expand(channels, 1, 1, -1).permute(0, 1, 3, 2).contiguous()
# Seperable convolution
result = conv2d(input, kernel_x, padding=(0, kernel_size_x // 2), groups=channels)
result = conv2d(result, kernel_y, padding=(kernel_size_y // 2, 0), groups=channels)
return result
def gaussian_filter_3d (input: Tensor, kernel_size: Tuple[int, int, int]) -> Tensor:
"""
Apply a Gaussian filter to a volume.
Parameters:
input (Tensor): Input volume with shape (N,C,D,H,W).
kernel_size (tuple): Kernel size in each dimension (Kz,Ky,Kx).
Returns:
Tensor: Filtered volume with shape (N,C,D,H,W).
"""
_,channels,_,_,_ = input.shape
kernel_size_z, kernel_size_y, kernel_size_x = kernel_size
# Compute kernels
kernel_x = gaussian_kernel(kernel_size_x).to(input.device)
kernel_y = gaussian_kernel(kernel_size_y).to(input.device)
kernel_z = gaussian_kernel(kernel_size_z).to(input.device)
# Reshape
kernel_x = kernel_x.expand(channels, 1, 1, 1, -1)
kernel_y = kernel_y.expand(channels, 1, 1, 1, -1).permute(0, 1, 2, 4, 3).contiguous()
kernel_z = kernel_z.expand(channels, 1, 1, 1, -1).permute(0, 1, 4, 2, 3).contiguous()
# Seperable convolution
result = conv3d(input, kernel_x, padding=(0, 0, kernel_size_x // 2), groups=channels)
result = conv3d(result, kernel_y, padding=(0, kernel_size_y // 2, 0), groups=channels)
result = conv3d(result, kernel_z, padding=(kernel_size_z // 2, 0, 0), groups=channels)
return result | 40.298701 | 118 | 0.66613 |
from torch import arange, exp, tensor, Tensor
from torch.nn.functional import conv2d, conv3d, pad
from typing import Tuple
def gaussian_kernel (kernel_size: int, sigma: float = -1.) -> Tensor:
sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8 if sigma < 0 else sigma
x = arange(kernel_size).float() - kernel_size // 2
x = x + 0.5 if kernel_size % 2 == 0 else x
kernel = exp((-x.pow(2.) / (2. * sigma ** 2)))
return kernel / kernel.sum()
def gaussian_filter (input: Tensor, kernel_size: Tuple[int, int]) -> Tensor:
_,channels,_,_ = input.shape
kernel_size_y, kernel_size_x = kernel_size
kernel_x = gaussian_kernel(kernel_size_x).to(input.device)
kernel_y = gaussian_kernel(kernel_size_y).to(input.device)
kernel_x = kernel_x.expand(channels, 1, 1, -1)
kernel_y = kernel_y.expand(channels, 1, 1, -1).permute(0, 1, 3, 2).contiguous()
result = conv2d(input, kernel_x, padding=(0, kernel_size_x // 2), groups=channels)
result = conv2d(result, kernel_y, padding=(kernel_size_y // 2, 0), groups=channels)
return result
def gaussian_filter_3d (input: Tensor, kernel_size: Tuple[int, int, int]) -> Tensor:
_,channels,_,_,_ = input.shape
kernel_size_z, kernel_size_y, kernel_size_x = kernel_size
kernel_x = gaussian_kernel(kernel_size_x).to(input.device)
kernel_y = gaussian_kernel(kernel_size_y).to(input.device)
kernel_z = gaussian_kernel(kernel_size_z).to(input.device)
kernel_x = kernel_x.expand(channels, 1, 1, 1, -1)
kernel_y = kernel_y.expand(channels, 1, 1, 1, -1).permute(0, 1, 2, 4, 3).contiguous()
kernel_z = kernel_z.expand(channels, 1, 1, 1, -1).permute(0, 1, 4, 2, 3).contiguous()
result = conv3d(input, kernel_x, padding=(0, 0, kernel_size_x // 2), groups=channels)
result = conv3d(result, kernel_y, padding=(0, kernel_size_y // 2, 0), groups=channels)
result = conv3d(result, kernel_z, padding=(kernel_size_z // 2, 0, 0), groups=channels)
return result | true | true |
f73a114ece41202ab698575bee28dd0d752a99ef | 1,689 | py | Python | src/python/WMCore/BossAir/MySQL/RunJobByStatus.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 21 | 2015-11-19T16:18:45.000Z | 2021-12-02T18:20:39.000Z | src/python/WMCore/BossAir/MySQL/RunJobByStatus.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 5,671 | 2015-01-06T14:38:52.000Z | 2022-03-31T22:11:14.000Z | src/python/WMCore/BossAir/MySQL/RunJobByStatus.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 67 | 2015-01-21T15:55:38.000Z | 2022-02-03T19:53:13.000Z | """
_RunJobByStatus_
Monitoring DAO classes for Jobs in BossAir database.
It groups jobs in each sched_status and bossAir status and guarantee
all sched_status are always present in the output.
"""
from __future__ import print_function, division
from WMCore.Database.DBFormatter import DBFormatter
class RunJobByStatus(DBFormatter):
sql = """
SELECT bl_status.name AS sched_status, count(bl_runjob.sched_status) AS count, bl_runjob.status
FROM bl_status
LEFT OUTER JOIN bl_runjob ON bl_runjob.sched_status = bl_status.id
GROUP BY bl_status.name, bl_runjob.status
"""
def formatDict(self, results):
"""
_formatDict_
Creates a dictionary of active (status=1) and completed (status=0)
jobs in BossAir with their sched_status and the amount of jobs in that status
"""
formattedResults = DBFormatter.formatDict(self, results)
results = {'active': {}, 'completed': {}}
for res in formattedResults:
results['active'].setdefault(res['sched_status'], 0)
results['completed'].setdefault(res['sched_status'], 0)
if res['status'] is None:
pass # job count is always 0 for this case
elif int(res['status']) == 0:
results['completed'][res['sched_status']] += int(res['count'])
else: # status = 1
results['active'][res['sched_status']] += int(res['count'])
return results
def execute(self, conn=None, transaction=False):
result = self.dbi.processData(self.sql, conn=conn, transaction=transaction)
return self.formatDict(result)
| 35.93617 | 105 | 0.64476 | from __future__ import print_function, division
from WMCore.Database.DBFormatter import DBFormatter
class RunJobByStatus(DBFormatter):
sql = """
SELECT bl_status.name AS sched_status, count(bl_runjob.sched_status) AS count, bl_runjob.status
FROM bl_status
LEFT OUTER JOIN bl_runjob ON bl_runjob.sched_status = bl_status.id
GROUP BY bl_status.name, bl_runjob.status
"""
def formatDict(self, results):
formattedResults = DBFormatter.formatDict(self, results)
results = {'active': {}, 'completed': {}}
for res in formattedResults:
results['active'].setdefault(res['sched_status'], 0)
results['completed'].setdefault(res['sched_status'], 0)
if res['status'] is None:
pass
elif int(res['status']) == 0:
results['completed'][res['sched_status']] += int(res['count'])
else:
results['active'][res['sched_status']] += int(res['count'])
return results
def execute(self, conn=None, transaction=False):
result = self.dbi.processData(self.sql, conn=conn, transaction=transaction)
return self.formatDict(result)
| true | true |
f73a11ec74c510ce53e589d16f35b0e29075e224 | 2,447 | py | Python | Hamiltonian_Cycle.py | jp20indian/HacktoberFest2021 | 093dc9a9a2b400039107df8a2ff09648ecc0eede | [
"Apache-2.0"
] | 2 | 2021-10-03T08:08:55.000Z | 2021-10-03T11:42:21.000Z | Hamiltonian_Cycle.py | jp20indian/HacktoberFest2021 | 093dc9a9a2b400039107df8a2ff09648ecc0eede | [
"Apache-2.0"
] | 1 | 2021-10-21T04:23:00.000Z | 2021-10-21T04:23:00.000Z | Hamiltonian_Cycle.py | jp20indian/HacktoberFest2021 | 093dc9a9a2b400039107df8a2ff09648ecc0eede | [
"Apache-2.0"
] | 17 | 2021-10-03T11:42:25.000Z | 2021-10-31T01:34:25.000Z |
# Hamiltonian cycle problem
class Graph():
def __init__(self, vertices):
self.graph = [[0 for column in range(vertices)]
for row in range(vertices)]
self.V = vertices
''' Check if this vertex is an adjacent vertex
of the previously added vertex and is not
included in the path earlier '''
def isSafe(self, v, pos, path):
# Check if current vertex and last vertex
# in path are adjacent
if self.graph[ path[pos-1] ][v] == 0:
return False
# Check if current vertex not already in path
for vertex in path:
if vertex == v:
return False
return True
# A recursive utility function to solve
# hamiltonian cycle problem
def hamCycleUtil(self, path, pos):
# base case: if all vertices are
# included in the path
if pos == self.V:
# Last vertex must be adjacent to the
# first vertex in path to make a cyle
if self.graph[ path[pos-1] ][ path[0] ] == 1:
return True
else:
return False
# Try different vertices as a next candidate
# in Hamiltonian Cycle. We don't try for 0 as
# we included 0 as starting point in hamCycle()
for v in range(1,self.V):
if self.isSafe(v, pos, path) == True:
path[pos] = v
if self.hamCycleUtil(path, pos+1) == True:
return True
# Remove current vertex if it doesn't
# lead to a solution
path[pos] = -1
return False
def hamCycle(self):
path = [-1] * self.V
''' Let us put vertex 0 as the first vertex
in the path. If there is a Hamiltonian Cycle,
then the path can be started from any point
of the cycle as the graph is undirected '''
path[0] = 0
if self.hamCycleUtil(path,1) == False:
print ("Solution does not exist\n")
return False
self.printSolution(path)
return True
def printSolution(self, path):
print ("Solution Exists: Following",
"is one Hamiltonian Cycle")
for vertex in path:
print (vertex, end = " ")
print (path[0], "\n")
# Driver Code
''' Let us create the following graph
(0)--(1)--(2)
| / \ |
| / \ |
| / \ |
(3)-------(4) '''
g1 = Graph(5)
g1.graph = [ [0, 1, 0, 1, 0], [1, 0, 1, 1, 1],
[0, 1, 0, 0, 1,],[1, 1, 0, 0, 1],
[0, 1, 1, 1, 0], ]
# Print the solution
g1.hamCycle();
''' Let us create the following graph
(0)--(1)--(2)
| / \ |
| / \ |
| / \ |
(3) (4) '''
g2 = Graph(5)
g2.graph = [ [0, 1, 0, 1, 0], [1, 0, 1, 1, 1],
[0, 1, 0, 0, 1,], [1, 1, 0, 0, 0],
[0, 1, 1, 0, 0], ]
# Print the solution
g2.hamCycle();
| 21.848214 | 49 | 0.604005 |
class Graph():
def __init__(self, vertices):
self.graph = [[0 for column in range(vertices)]
for row in range(vertices)]
self.V = vertices
def isSafe(self, v, pos, path):
if self.graph[ path[pos-1] ][v] == 0:
return False
for vertex in path:
if vertex == v:
return False
return True
def hamCycleUtil(self, path, pos):
if pos == self.V:
if self.graph[ path[pos-1] ][ path[0] ] == 1:
return True
else:
return False
# we included 0 as starting point in hamCycle()
for v in range(1,self.V):
if self.isSafe(v, pos, path) == True:
path[pos] = v
if self.hamCycleUtil(path, pos+1) == True:
return True
# Remove current vertex if it doesn't
path[pos] = -1
return False
def hamCycle(self):
path = [-1] * self.V
path[0] = 0
if self.hamCycleUtil(path,1) == False:
print ("Solution does not exist\n")
return False
self.printSolution(path)
return True
def printSolution(self, path):
print ("Solution Exists: Following",
"is one Hamiltonian Cycle")
for vertex in path:
print (vertex, end = " ")
print (path[0], "\n")
g1 = Graph(5)
g1.graph = [ [0, 1, 0, 1, 0], [1, 0, 1, 1, 1],
[0, 1, 0, 0, 1,],[1, 1, 0, 0, 1],
[0, 1, 1, 1, 0], ]
g1.hamCycle();
g2 = Graph(5)
g2.graph = [ [0, 1, 0, 1, 0], [1, 0, 1, 1, 1],
[0, 1, 0, 0, 1,], [1, 1, 0, 0, 0],
[0, 1, 1, 0, 0], ]
g2.hamCycle();
| true | true |
f73a12a5c0a914cbd4457a81f7e3871ccb3db12a | 2,289 | py | Python | Modules/nn/architectures/DenseNet/classes.py | iheb-brini/fitness-lab | 2d82d7a2ecba27f535cda880865e6d9ed446eac5 | [
"MIT"
] | null | null | null | Modules/nn/architectures/DenseNet/classes.py | iheb-brini/fitness-lab | 2d82d7a2ecba27f535cda880865e6d9ed446eac5 | [
"MIT"
] | null | null | null | Modules/nn/architectures/DenseNet/classes.py | iheb-brini/fitness-lab | 2d82d7a2ecba27f535cda880865e6d9ed446eac5 | [
"MIT"
] | null | null | null | from torch import nn, cat
from .constants import NUM_CONVS_IN_DENSE_BLOCKS
def conv_block(in_channels, out_channels):
blk = nn.Sequential(
nn.BatchNorm2d(in_channels), nn.ReLU(),
nn.Conv2d(in_channels, out_channels,
kernel_size=3, padding=1)
)
return blk
class DenseBlock(nn.Module):
def __init__(self, num_convs, in_channels, out_channels, **kwargs):
super().__init__(**kwargs)
block_list = []
for i in range(num_convs):
block_list.append(conv_block(
out_channels*i + in_channels, out_channels))
self.net = nn.Sequential(*block_list)
def forward(self, X):
for layer in self.net:
Y = layer(X)
# Concatenate the input and output of each block on the channel dimension
X = cat((X, Y), axis=1)
return X
def transitive_block(in_channels, out_channels):
blk = nn.Sequential(
nn.BatchNorm2d(in_channels), nn.ReLU(),
nn.Conv2d(in_channels, out_channels,
kernel_size=1),
nn.AvgPool2d(kernel_size=2, stride=2)
)
return blk
class DenseNet(nn.Module):
def __init__(self, in_channels, **kwargs):
super().__init__(**kwargs)
num_channels, growth_rate = 64, 32
num_convs_in_dense_blocks = NUM_CONVS_IN_DENSE_BLOCKS
list_blocks = [
nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2d(64), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
]
for i, num_convs in enumerate(num_convs_in_dense_blocks):
list_blocks.append(DenseBlock(
num_convs, num_channels, growth_rate))
num_channels += num_convs * growth_rate
if i != len(num_convs_in_dense_blocks) - 1:
list_blocks.append(transitive_block(
num_channels, num_channels // 2))
num_channels = num_channels // 2
list_blocks.extend([nn.BatchNorm2d(num_channels), nn.ReLU(), nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(), nn.Linear(num_channels, 10)])
self.blocks = nn.Sequential(*list_blocks)
def forward(self, X):
return self.blocks(X)
| 30.118421 | 98 | 0.609 | from torch import nn, cat
from .constants import NUM_CONVS_IN_DENSE_BLOCKS
def conv_block(in_channels, out_channels):
blk = nn.Sequential(
nn.BatchNorm2d(in_channels), nn.ReLU(),
nn.Conv2d(in_channels, out_channels,
kernel_size=3, padding=1)
)
return blk
class DenseBlock(nn.Module):
def __init__(self, num_convs, in_channels, out_channels, **kwargs):
super().__init__(**kwargs)
block_list = []
for i in range(num_convs):
block_list.append(conv_block(
out_channels*i + in_channels, out_channels))
self.net = nn.Sequential(*block_list)
def forward(self, X):
for layer in self.net:
Y = layer(X)
X = cat((X, Y), axis=1)
return X
def transitive_block(in_channels, out_channels):
blk = nn.Sequential(
nn.BatchNorm2d(in_channels), nn.ReLU(),
nn.Conv2d(in_channels, out_channels,
kernel_size=1),
nn.AvgPool2d(kernel_size=2, stride=2)
)
return blk
class DenseNet(nn.Module):
def __init__(self, in_channels, **kwargs):
super().__init__(**kwargs)
num_channels, growth_rate = 64, 32
num_convs_in_dense_blocks = NUM_CONVS_IN_DENSE_BLOCKS
list_blocks = [
nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2d(64), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
]
for i, num_convs in enumerate(num_convs_in_dense_blocks):
list_blocks.append(DenseBlock(
num_convs, num_channels, growth_rate))
num_channels += num_convs * growth_rate
if i != len(num_convs_in_dense_blocks) - 1:
list_blocks.append(transitive_block(
num_channels, num_channels // 2))
num_channels = num_channels // 2
list_blocks.extend([nn.BatchNorm2d(num_channels), nn.ReLU(), nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(), nn.Linear(num_channels, 10)])
self.blocks = nn.Sequential(*list_blocks)
def forward(self, X):
return self.blocks(X)
| true | true |
f73a12cc371c83d417aff8ac443d2d5075ef6a7b | 17,480 | py | Python | myGym/envs/base_env.py | gabinsane/myGym | a41c6b11a47eaf19d0c69e67aeb48cf7a999d45a | [
"MIT"
] | 1 | 2021-04-23T20:52:39.000Z | 2021-04-23T20:52:39.000Z | myGym/envs/base_env.py | gabinsane/myGym | a41c6b11a47eaf19d0c69e67aeb48cf7a999d45a | [
"MIT"
] | null | null | null | myGym/envs/base_env.py | gabinsane/myGym | a41c6b11a47eaf19d0c69e67aeb48cf7a999d45a | [
"MIT"
] | 1 | 2021-01-22T16:46:48.000Z | 2021-01-22T16:46:48.000Z | import pybullet_data
import glob
import pybullet
import pybullet_utils.bullet_client as bc
import time
import numpy as np
from gym.utils import seeding
import gym
import os
import inspect
from myGym.envs.camera import Camera
import pkg_resources
currentdir = pkg_resources.resource_filename("myGym", "envs")
repodir = pkg_resources.resource_filename("myGym", "./")
class BaseEnv(gym.Env):
"""
The base class for environments without rendering
Parameters:
:param gui_on: (bool) Whether or not to use PyBullet built-in GUI
:param objects_dir_path: (str) Path to directory with URDF files for objects
:param max_steps: (int) The maximum number of actions per episode
:param show_bounding_boxes_gui: (bool) Whether or not to show bounding boxes in GUI
:param changing_light_gui: (bool) Whether or not to change light in GUI
:param shadows_on_gui: (bool) Whether or not to show shadows in GUI
"""
metadata = {'render.modes': [
'human', 'rgb_array'], 'video.frames_per_second': 50}
def __init__(self,
gui_on=True,
objects_dir_path=pkg_resources.resource_filename("myGym", "envs/"),
max_steps=1024,
show_bounding_boxes_gui=False,
changing_light_gui=False,
shadows_on_gui=True,
timestep=1./240.
):
self.gui_on = gui_on
self.max_steps = max_steps
self.show_bounding_boxes_gui = show_bounding_boxes_gui
self.changing_light_gui = changing_light_gui
self.shadows_on_gui = shadows_on_gui
# Set episode information
self.episode_start_time = None
self.episode_over = False
self.episode_failed = False
self.episode_reward = 0.0
self.episode_final_reward = []
self.episode_final_distance = []
self.episode_number = 0
self.episode_steps = 0
self.episode_max_time = 300
self.episode_info = ""
# Set general params
self.time_step = 1. / 240.
#self.time_step = timestep
self.urdf_root = pybullet_data.getDataPath()
self.observation = {}
# Set objects information
self.objects_dir_path = objects_dir_path
self.env_objects = []
self.scene_objects_uids = {}
self.all_objects_filenames = self._get_all_urdf_filenames(self.objects_dir_path)
# Set GUI
self._connect_to_physics_server()
# Set env params and load models
self._set_physics()
self._setup_scene()
self._set_observation_space()
self._set_action_space()
def _connect_to_physics_server(self):
"""
Connect to the PyBullet physics server in SHARED_MEMORY, GUI or DIRECT mode
"""
if self.gui_on:
self.p = bc.BulletClient(connection_mode=pybullet.GUI)
# if (self.p < 0):
# self.p = bc.BulletClient(connection_mode=p.GUI)
self._set_gui_mode()
else:
self.p = bc.BulletClient(connection_mode=pybullet.DIRECT)
self.p.setPhysicsEngineParameter(enableFileCaching=0)
def _set_gui_mode(self):
"""
Set GUI parameters: camera, shadows, extra elements
"""
self.p.resetDebugVisualizerCamera(3.3, -40, -41, [0.0, 0.0, 0.33])
self.p.configureDebugVisualizer(self.p.COV_ENABLE_SHADOWS, self.shadows_on_gui)
self.p.configureDebugVisualizer(self.p.COV_ENABLE_GUI, 0)
def _set_physics(self):
"""
Set physics engine parameters
"""
self.p.setGravity(0, 0, -9.81)
self.p.setPhysicsEngineParameter(solverResidualThreshold=0.001, numSolverIterations=150, numSubSteps=10, useSplitImpulse=1, collisionFilterMode=1, constraintSolverType=self.p.CONSTRAINT_SOLVER_LCP_DANTZIG, globalCFM=0.000001)
self.p.setTimeStep(self.time_step)
self.p.setRealTimeSimulation(0)
self.p.setPhysicsEngineParameter(enableConeFriction=1)
print(self.p.getPhysicsEngineParameters())
def _setup_scene(self):
"""
Set up scene elements (furniture, objects, robots)
"""
raise NotImplementedError
def _set_observation_space(self):
"""
Set limits of observations
"""
raise NotImplementedError
def _set_action_space(self):
"""
Set limits of actions
"""
raise NotImplementedError
def _get_observation(self):
"""
Get info about the state of the environment
Returns:
:return observation: (object) Observation of the environment
"""
raise NotImplementedError
def step(self, action):
"""
Apply action on the environment
Parameters:
:param action: (object) An action provided by the agent
Returns:
:return observation: (object)
:return reward: (float)
:return done: (bool):
:return info: (dict):
"""
raise NotImplementedError
def _add_scene_object_uid(self, scene_object_uid, name):
"""
Call this method in order to enable texturization of object
Parameters:
:param scene_object: (int)
"""
self.scene_objects_uids[scene_object_uid] = name
def get_scene_object_uid_by_name(self, name):
for uid, object_name in self.scene_objects_uids.items():
if name == object_name:
return uid
return None
def seed(self, seed=None):
"""
Set the seed for this env's random number generator(s)
"""
self.np_random, seed = seeding.np_random(seed)
return [seed]
def hard_reset(self):
"""
Full reset of the simulation. Delete and load again all objects and reset physics.
"""
self.p.resetSimulation()
self.p.disconnect()
self._connect_to_physics_server()
self.scene_objects_uids = {}
#self.episode_number = 0
self._set_physics()
self._setup_scene()
def _restart_episode(self):
"""
Reset episode information and delete all objects
"""
self.p.removeAllUserDebugItems()
self.episode_start_time = time.time()
self.episode_over = False
self.episode_failed = False
self.episode_reward = 0.0
self.episode_steps = 0
def reset(self, hard=False):
"""
Reset the state of the environment
"""
if hard:
self.hard_reset()
else:
self._remove_all_objects()
self._restart_episode()
def _draw_bounding_boxes(self):
"""
Show bounding boxes in tne PyBullet GUI
"""
for object in self.env_objects:
object.draw_bounding_box()
def _compute_reward(self):
"""
Compute reward for the agent
"""
return NotImplementedError
def _print_episode_summary(self, info_dict={}):
"""
Show an extra information about the episode
Parameters:
:param info_dict: (dict) Extra info
"""
if self.episode_failed:
episode_status = "FAILURE"
else:
episode_status = "SUCCESS"
print("#---------Episode-Summary---------#")
print("Episode number: " + str(self.episode_number))
print("Episode's number of steps: " + str(self.episode_steps))
#print("Episode status: " + episode_status)
print("Episode info: " + self.episode_info)
print("Episode reward: " + str(self.episode_reward))
#print("Last step reward: " + str(self.reward.rewards_history[-1]))
print("#---------------------------------#")
for key, value in info_dict.items():
print(key + ": " + str(value))
def _get_random_urdf_filenames(self, n, used_objects=None):
"""
Sample random URDF files from directory with objects URDFs
Parameters:
:param n: (int) Number of URDF's
:param used_objects: (list) Specified subset of objects
Returns:
:return selected_objects_filenames: (list)
"""
if used_objects or (self.all_objects_filenames is None):
all_objects_filenames = []
for object_name in used_objects:
if "virtual" in object_name:
all_objects_filenames.append(object_name)
for file in self.all_objects_filenames:
if '/'+object_name+'.' in file:
all_objects_filenames.append(file)
else:
# uses self.all_objects_filenames
pass
assert all_objects_filenames is not None
selected_objects_filenames = []
total_num_objects = len(all_objects_filenames)
if (n <= total_num_objects):
selected_objects = np.random.choice(
np.arange(total_num_objects), n, replace=True)
else:
selected_objects = list(np.arange(total_num_objects))
remain = n - total_num_objects
selected_objects += list(np.random.choice(
np.arange(total_num_objects), remain))
for object_id in selected_objects:
selected_objects_filenames.append(all_objects_filenames[object_id])
return selected_objects_filenames
def _get_all_urdf_filenames(self, dir):
"""
Get all URDF filenames from directory
Parameters:
:param dir: (int) Number of URDFs
Returns:
:return filenames: (list)
"""
list_all = []
for (dirpath, dirnames, filenames) in os.walk(self.objects_dir_path):
if '_old' not in dirpath and 'urdf' in dirpath:
list_all += [os.path.join(dirpath, file) for file in filenames]
return list_all
def _remove_object(self, object):
"""
Totally remove object from the simulation
Parameters:
:param object: (EnvObject) Object to remove
"""
self.env_objects.remove(object)
self.p.removeBody(object.uid)
def _remove_all_objects(self):
"""
Remove all objects from simulation (not scene objects or robots)
"""
env_objects_copy = self.env_objects[:]
for env_object in env_objects_copy:
self._remove_object(env_object)
def get_texturizable_objects_uids(self):
"""
Get all objects in the environment, on which textures can be applied
Returns:
:return texturizable_objects_uids: (list)
"""
return [object.get_uid() for object in self.env_objects] + list(self.scene_objects_uids.keys())
def get_colorizable_objects_uids(self):
"""
Get all objects in the environment, which color can be changed
Returns:
:return colorizable_objects_uids: (list)
"""
return [object.get_uid() for object in self.env_objects] + list(self.scene_objects_uids.keys())
def __del__(self):
"""
Disconnect from the physics server
"""
self.p.disconnect()
class CameraEnv(BaseEnv):
"""
The class for environments with rendering
Parameters:
:param camera_resolution: (list) The number of pixels in image (WxH)
:param shadows_on: (bool) Whether or not to use shadows while rendering, only applies to ER_TINY_RENDERER
:param render_on: (bool) Turn on rendering
:param renderer: (int) self.p.ER_TINY_RENDERER (CPU) or self.p.ER_BULLET_HARDWARE_OPENGL (GPU)
:param active_cameras: (list) Set 1 at a position(=camera number) to save images from this camera
"""
def __init__(self, camera_resolution=[640, 480], shadows_on=True,
render_on=True, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL,
active_cameras=None, **kwargs):
super(CameraEnv, self).__init__(**kwargs)
self.camera_resolution = camera_resolution
self.shadows_on = shadows_on
self.render_on = render_on
self.renderer = renderer
self.active_cameras = active_cameras
self.cameras = []
self.set_light()
self._set_cameras()
def set_light(self, light_direction=[1, 1, 1], light_color=[0.1, 0.1, 0.1],
light_distance=1., light_ambient=1., light_diffuse=1.,
light_specular=1.):
"""
Set light parameters for rendering, doesn't affect PyBullet GUI. Appart from light_direction, all parameters only apply to ER_TINY_RENDERER.
Parameters:
:param light_direction: (list) Specifies the world position of the light source
:param light_color: (list) Directional light color in RGB in range 0..1
:param light_distance: (float) Distance of the light along the normalized light_direction
:param light_ambient: (float) Light ambient coefficient in range 0..1
:param light_diffuse: (float) Light diffuse coefficient in range 0..1
:param light_specular: (float) Light specular coefficient in range 0..1
"""
self.light_direction = light_direction
self.light_color = light_color
self.light_distance = light_distance
self.light_ambient = light_ambient
self.light_diffuse = light_diffuse
self.light_specular = light_specular
def get_render_parameters(self):
"""
Return environment parameters for rendering, initially is intended to
use by cameras
Returns:
:return render_parameters: (dict) Render parameters
"""
return {
"width": self.camera_resolution[0],
"height": self.camera_resolution[1],
"lightDirection": self.light_direction,
"lightColor": self.light_color,
"lightDistance": self.light_distance,
"shadow": 1 if self.shadows_on else 0,
"lightAmbientCoeff": self.light_ambient,
"lightDiffuseCoeff": self.light_diffuse,
"lightSpecularCoeff": self.light_specular,
"renderer": self.renderer
}
def _set_cameras(self):
"""
Set cameras available to use for rendering
"""
raise NotImplementedError
def get_cameras(self):
return self.cameras
def add_camera(self, **kwargs):
"""
Add new camera to the environment
Parameters:
:param position: (list) Eye position in Cartesian world coordinates
:prarm target_position: (list) Position of the target point
:param up_vector: (list) Up vector of the camera
:param up_axis_index: (int) Either 1 for Y or 2 for Z axis up
:param yaw: (float) Yaw angle in degrees left/right around up-axis
:param pitch: (float) Pitch in degrees up/down
:param roll: (float) Roll in degrees around forward vector
:param distance: (float) Distance from eye to focus point
:param field_of_view: (float) Field of view
:param near_plane_distance: (float) Near plane distance
:param far_plane_distance: (float) Far plane distance
"""
self.cameras.append(Camera(env=self, **kwargs))
def set_active_cameras(self, active_cameras):
if (len(active_cameras) == len(self.cameras)):
self.active_cameras = active_cameras
def change_current_camera(self, camera_num):
print("Change camera to " + str(self.current_camera))
self.current_camera = camera_num
def render(self, mode="rgb_array", camera_id=None):
"""
Get image (image, depth, segmentation_mask) from camera or active cameras
Parameters:
:param mode: (str) rgb_array to return RGB image
:param camera_id: (int) Get image from specified camera
Returns:
:return camera_data: (dict) Key: camera_id, Value: info from camera
"""
if mode != "rgb_array":
return np.array([])
camera_data = {}
if self.render_on:
if camera_id is not None:
camera_data[camera_id] = self.cameras[camera_id].render()
else:
for camera_num in range(len(self.active_cameras)):
if self.active_cameras[camera_num]:
camera_data[camera_num] = self.cameras[camera_num].render()
return camera_data
def project_point_to_camera_image(self, point, camera_id):
"""
Project 3D point in Cartesian world coordinates to 2D point in pixel space
Parameters:
:param point: (list) 3D point in Cartesian world coordinates
:param camera_id: (int) Index of camera to project on
Returns:
:return 2d_point: (list) 2D coordinates of point on imageg
"""
return self.cameras[camera_id].project_point_to_image(point)
def get_camera_opencv_matrix_values(self, camera_id):
"""
Compute values of OpenCV matrix
Parameters:
:param camera_id: (int) Index of camera to get matrix from
Returns:
:return values: (dict) fx, fy, cx, cy values
"""
return self.cameras[camera_id].get_opencv_camera_matrix_values()
| 35.528455 | 233 | 0.618593 | import pybullet_data
import glob
import pybullet
import pybullet_utils.bullet_client as bc
import time
import numpy as np
from gym.utils import seeding
import gym
import os
import inspect
from myGym.envs.camera import Camera
import pkg_resources
currentdir = pkg_resources.resource_filename("myGym", "envs")
repodir = pkg_resources.resource_filename("myGym", "./")
class BaseEnv(gym.Env):
metadata = {'render.modes': [
'human', 'rgb_array'], 'video.frames_per_second': 50}
def __init__(self,
gui_on=True,
objects_dir_path=pkg_resources.resource_filename("myGym", "envs/"),
max_steps=1024,
show_bounding_boxes_gui=False,
changing_light_gui=False,
shadows_on_gui=True,
timestep=1./240.
):
self.gui_on = gui_on
self.max_steps = max_steps
self.show_bounding_boxes_gui = show_bounding_boxes_gui
self.changing_light_gui = changing_light_gui
self.shadows_on_gui = shadows_on_gui
self.episode_start_time = None
self.episode_over = False
self.episode_failed = False
self.episode_reward = 0.0
self.episode_final_reward = []
self.episode_final_distance = []
self.episode_number = 0
self.episode_steps = 0
self.episode_max_time = 300
self.episode_info = ""
self.time_step = 1. / 240.
self.urdf_root = pybullet_data.getDataPath()
self.observation = {}
self.objects_dir_path = objects_dir_path
self.env_objects = []
self.scene_objects_uids = {}
self.all_objects_filenames = self._get_all_urdf_filenames(self.objects_dir_path)
self._connect_to_physics_server()
self._set_physics()
self._setup_scene()
self._set_observation_space()
self._set_action_space()
def _connect_to_physics_server(self):
if self.gui_on:
self.p = bc.BulletClient(connection_mode=pybullet.GUI)
self._set_gui_mode()
else:
self.p = bc.BulletClient(connection_mode=pybullet.DIRECT)
self.p.setPhysicsEngineParameter(enableFileCaching=0)
def _set_gui_mode(self):
self.p.resetDebugVisualizerCamera(3.3, -40, -41, [0.0, 0.0, 0.33])
self.p.configureDebugVisualizer(self.p.COV_ENABLE_SHADOWS, self.shadows_on_gui)
self.p.configureDebugVisualizer(self.p.COV_ENABLE_GUI, 0)
def _set_physics(self):
self.p.setGravity(0, 0, -9.81)
self.p.setPhysicsEngineParameter(solverResidualThreshold=0.001, numSolverIterations=150, numSubSteps=10, useSplitImpulse=1, collisionFilterMode=1, constraintSolverType=self.p.CONSTRAINT_SOLVER_LCP_DANTZIG, globalCFM=0.000001)
self.p.setTimeStep(self.time_step)
self.p.setRealTimeSimulation(0)
self.p.setPhysicsEngineParameter(enableConeFriction=1)
print(self.p.getPhysicsEngineParameters())
def _setup_scene(self):
raise NotImplementedError
def _set_observation_space(self):
raise NotImplementedError
def _set_action_space(self):
raise NotImplementedError
def _get_observation(self):
raise NotImplementedError
def step(self, action):
raise NotImplementedError
def _add_scene_object_uid(self, scene_object_uid, name):
self.scene_objects_uids[scene_object_uid] = name
def get_scene_object_uid_by_name(self, name):
for uid, object_name in self.scene_objects_uids.items():
if name == object_name:
return uid
return None
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def hard_reset(self):
self.p.resetSimulation()
self.p.disconnect()
self._connect_to_physics_server()
self.scene_objects_uids = {}
self._set_physics()
self._setup_scene()
def _restart_episode(self):
self.p.removeAllUserDebugItems()
self.episode_start_time = time.time()
self.episode_over = False
self.episode_failed = False
self.episode_reward = 0.0
self.episode_steps = 0
def reset(self, hard=False):
if hard:
self.hard_reset()
else:
self._remove_all_objects()
self._restart_episode()
def _draw_bounding_boxes(self):
for object in self.env_objects:
object.draw_bounding_box()
def _compute_reward(self):
return NotImplementedError
def _print_episode_summary(self, info_dict={}):
if self.episode_failed:
episode_status = "FAILURE"
else:
episode_status = "SUCCESS"
print("#---------Episode-Summary---------#")
print("Episode number: " + str(self.episode_number))
print("Episode's number of steps: " + str(self.episode_steps))
#print("Episode status: " + episode_status)
print("Episode info: " + self.episode_info)
print("Episode reward: " + str(self.episode_reward))
#print("Last step reward: " + str(self.reward.rewards_history[-1]))
print("#---------------------------------#")
for key, value in info_dict.items():
print(key + ": " + str(value))
def _get_random_urdf_filenames(self, n, used_objects=None):
if used_objects or (self.all_objects_filenames is None):
all_objects_filenames = []
for object_name in used_objects:
if "virtual" in object_name:
all_objects_filenames.append(object_name)
for file in self.all_objects_filenames:
if '/'+object_name+'.' in file:
all_objects_filenames.append(file)
else:
# uses self.all_objects_filenames
pass
assert all_objects_filenames is not None
selected_objects_filenames = []
total_num_objects = len(all_objects_filenames)
if (n <= total_num_objects):
selected_objects = np.random.choice(
np.arange(total_num_objects), n, replace=True)
else:
selected_objects = list(np.arange(total_num_objects))
remain = n - total_num_objects
selected_objects += list(np.random.choice(
np.arange(total_num_objects), remain))
for object_id in selected_objects:
selected_objects_filenames.append(all_objects_filenames[object_id])
return selected_objects_filenames
def _get_all_urdf_filenames(self, dir):
list_all = []
for (dirpath, dirnames, filenames) in os.walk(self.objects_dir_path):
if '_old' not in dirpath and 'urdf' in dirpath:
list_all += [os.path.join(dirpath, file) for file in filenames]
return list_all
def _remove_object(self, object):
self.env_objects.remove(object)
self.p.removeBody(object.uid)
def _remove_all_objects(self):
env_objects_copy = self.env_objects[:]
for env_object in env_objects_copy:
self._remove_object(env_object)
def get_texturizable_objects_uids(self):
return [object.get_uid() for object in self.env_objects] + list(self.scene_objects_uids.keys())
def get_colorizable_objects_uids(self):
return [object.get_uid() for object in self.env_objects] + list(self.scene_objects_uids.keys())
def __del__(self):
self.p.disconnect()
class CameraEnv(BaseEnv):
def __init__(self, camera_resolution=[640, 480], shadows_on=True,
render_on=True, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL,
active_cameras=None, **kwargs):
super(CameraEnv, self).__init__(**kwargs)
self.camera_resolution = camera_resolution
self.shadows_on = shadows_on
self.render_on = render_on
self.renderer = renderer
self.active_cameras = active_cameras
self.cameras = []
self.set_light()
self._set_cameras()
def set_light(self, light_direction=[1, 1, 1], light_color=[0.1, 0.1, 0.1],
light_distance=1., light_ambient=1., light_diffuse=1.,
light_specular=1.):
self.light_direction = light_direction
self.light_color = light_color
self.light_distance = light_distance
self.light_ambient = light_ambient
self.light_diffuse = light_diffuse
self.light_specular = light_specular
def get_render_parameters(self):
return {
"width": self.camera_resolution[0],
"height": self.camera_resolution[1],
"lightDirection": self.light_direction,
"lightColor": self.light_color,
"lightDistance": self.light_distance,
"shadow": 1 if self.shadows_on else 0,
"lightAmbientCoeff": self.light_ambient,
"lightDiffuseCoeff": self.light_diffuse,
"lightSpecularCoeff": self.light_specular,
"renderer": self.renderer
}
def _set_cameras(self):
raise NotImplementedError
def get_cameras(self):
return self.cameras
def add_camera(self, **kwargs):
self.cameras.append(Camera(env=self, **kwargs))
def set_active_cameras(self, active_cameras):
if (len(active_cameras) == len(self.cameras)):
self.active_cameras = active_cameras
def change_current_camera(self, camera_num):
print("Change camera to " + str(self.current_camera))
self.current_camera = camera_num
def render(self, mode="rgb_array", camera_id=None):
if mode != "rgb_array":
return np.array([])
camera_data = {}
if self.render_on:
if camera_id is not None:
camera_data[camera_id] = self.cameras[camera_id].render()
else:
for camera_num in range(len(self.active_cameras)):
if self.active_cameras[camera_num]:
camera_data[camera_num] = self.cameras[camera_num].render()
return camera_data
def project_point_to_camera_image(self, point, camera_id):
return self.cameras[camera_id].project_point_to_image(point)
def get_camera_opencv_matrix_values(self, camera_id):
return self.cameras[camera_id].get_opencv_camera_matrix_values()
| true | true |
f73a1347135db69cb5b55591e12984998b2b6ef0 | 1,897 | py | Python | minisculus/wheel/_wheel_chain.py | rvodden/minisculus | 097f0be1e061c1e313d929e1d71c17c2a402d71c | [
"MIT"
] | null | null | null | minisculus/wheel/_wheel_chain.py | rvodden/minisculus | 097f0be1e061c1e313d929e1d71c17c2a402d71c | [
"MIT"
] | null | null | null | minisculus/wheel/_wheel_chain.py | rvodden/minisculus | 097f0be1e061c1e313d929e1d71c17c2a402d71c | [
"MIT"
] | null | null | null | from typing import List
from pydantic import validate_arguments
from minisculus.wheel._wheel import Wheel
class WheelChain:
"""Processes indexes using a chain of wheels."""
_wheels: List[Wheel]
def __init__(self, wheels: List[Wheel]):
self._validate_wheels(wheels)
self._wheels = wheels
@validate_arguments
def encode(self, idx: int) -> int:
"""This is the encoding function.
Args:
idx: the list of index to encode.
Returns:
the encoded index.
"""
idxs = [idx]
for wheel in self._wheels:
idxs.append(wheel.encode(idxs[-1]))
for wheel in self._wheels:
wheel.post_encode(idxs)
return idxs[-1]
@validate_arguments()
def decode(self, idx: int) -> int:
"""This is the decoding function.
Args:
idx: the list of indexes to be decoded.
Returns:
the decoded index.
"""
idxs = [idx]
for wheel in self._wheels:
idxs.append(wheel.decode(idxs[-1]))
for wheel in self._wheels:
wheel.post_decode(idxs)
return idxs[-1]
@property
@validate_arguments
def wheels(self) -> List[Wheel]:
"""Returns the wheels which constitutes the WheelChain.
Returns:
list of wheels.
"""
return self._wheels
@property
@validate_arguments
def values(self) -> List[int]:
"""Returns a list of the values of each of the wheels.
Returns:
list of wheels.
"""
return [w.value for w in self._wheels]
@staticmethod
def _validate_wheels(wheels: List[Wheel]) -> None:
l: int = len(wheels)
if l > 10:
raise ValueError(
f"WheelChain can not have more than 10 wheels. {l} provided."
)
| 23.7125 | 77 | 0.565103 | from typing import List
from pydantic import validate_arguments
from minisculus.wheel._wheel import Wheel
class WheelChain:
_wheels: List[Wheel]
def __init__(self, wheels: List[Wheel]):
self._validate_wheels(wheels)
self._wheels = wheels
@validate_arguments
def encode(self, idx: int) -> int:
idxs = [idx]
for wheel in self._wheels:
idxs.append(wheel.encode(idxs[-1]))
for wheel in self._wheels:
wheel.post_encode(idxs)
return idxs[-1]
@validate_arguments()
def decode(self, idx: int) -> int:
idxs = [idx]
for wheel in self._wheels:
idxs.append(wheel.decode(idxs[-1]))
for wheel in self._wheels:
wheel.post_decode(idxs)
return idxs[-1]
@property
@validate_arguments
def wheels(self) -> List[Wheel]:
return self._wheels
@property
@validate_arguments
def values(self) -> List[int]:
return [w.value for w in self._wheels]
@staticmethod
def _validate_wheels(wheels: List[Wheel]) -> None:
l: int = len(wheels)
if l > 10:
raise ValueError(
f"WheelChain can not have more than 10 wheels. {l} provided."
)
| true | true |
f73a14ce9ea1e132cdb8761e5f061240a16538fd | 520 | py | Python | Season 09 - Advanced built-in functions in Python/Episode 02 - Generators class and iterators.py | Pythobit/Python-tutorial | b0743eaa9c237c3578131ead1b3f2c295f11b7ee | [
"MIT"
] | 3 | 2021-02-19T18:33:00.000Z | 2021-08-03T14:56:50.000Z | Season 09 - Advanced built-in functions in Python/Episode 02 - Generators class and iterators.py | barawalojas/Python-tutorial | 3f4b2b073e421888b3d62ff634658317d9abcb9b | [
"MIT"
] | 1 | 2021-07-10T14:37:57.000Z | 2021-07-20T09:51:39.000Z | Season 09 - Advanced built-in functions in Python/Episode 02 - Generators class and iterators.py | barawalojas/Python-tutorial | 3f4b2b073e421888b3d62ff634658317d9abcb9b | [
"MIT"
] | 1 | 2021-08-02T05:39:38.000Z | 2021-08-02T05:39:38.000Z | # generator class and iterators
class FirstHundredNumbers:
def __init__(self):
self.numbers = 0
def __next__(self):
if self.numbers < 100:
current = self.numbers
self.numbers += 1
return current
else:
raise StopIteration()
my_gen = FirstHundredNumbers()
print(next(my_gen))
print(next(my_gen))
"""
def __next__ is an iterator and class FirstHundredNumbers are not iterable
and there's a difference between iterators and iterable.
"""
| 20.8 | 74 | 0.651923 |
class FirstHundredNumbers:
def __init__(self):
self.numbers = 0
def __next__(self):
if self.numbers < 100:
current = self.numbers
self.numbers += 1
return current
else:
raise StopIteration()
my_gen = FirstHundredNumbers()
print(next(my_gen))
print(next(my_gen))
| true | true |
f73a14ed8bf965f6136ac165652854c5f0b67b0f | 3,157 | py | Python | Fracktory3-3.0_b11/plugins/Tools/MirrorTool/MirrorTool.py | ganeshmev/Fracktory3-3.0_b11_KLE | 16066e6993b96a880aa1a2f044a27930cbd0787d | [
"MIT"
] | null | null | null | Fracktory3-3.0_b11/plugins/Tools/MirrorTool/MirrorTool.py | ganeshmev/Fracktory3-3.0_b11_KLE | 16066e6993b96a880aa1a2f044a27930cbd0787d | [
"MIT"
] | null | null | null | Fracktory3-3.0_b11/plugins/Tools/MirrorTool/MirrorTool.py | ganeshmev/Fracktory3-3.0_b11_KLE | 16066e6993b96a880aa1a2f044a27930cbd0787d | [
"MIT"
] | null | null | null | # Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from UM.Tool import Tool
from UM.Event import Event, MouseEvent
from UM.Math.Vector import Vector
from UM.Operations.MirrorOperation import MirrorOperation
from UM.Operations.GroupedOperation import GroupedOperation
from UM.Scene.Selection import Selection
from UM.Scene.ToolHandle import ToolHandle
from PyQt5.QtCore import Qt
from . import MirrorToolHandle
## Provides the tool to mirror meshes and groups
class MirrorTool(Tool):
def __init__(self):
super().__init__()
self._handle = MirrorToolHandle.MirrorToolHandle()
self._shortcut_key = Qt.Key_M
self._operation_started = False
## Handle mouse and keyboard events
#
# \param event type(Event)
def event(self, event):
super().event(event)
if event.type == Event.MousePressEvent and self._controller.getToolsEnabled():
# Initialise a mirror operation
if MouseEvent.LeftButton not in event.buttons:
return False
id = self._selection_pass.getIdAtPosition(event.x, event.y)
if not id:
return False
if self._handle.isAxis(id):
self.setLockedAxis(id)
self._operation_started = True
self.operationStarted.emit(self)
return True
if event.type == Event.MouseReleaseEvent:
if self._operation_started:
self._operation_started = False
self.operationStopped.emit(self)
# Perform a mirror operation
if self.getLockedAxis() != ToolHandle.NoAxis:
if Selection.getCount() == 1:
node = Selection.getSelectedObject(0)
if self.getLockedAxis() == ToolHandle.XAxis:
mirror = Vector(-1, 1, 1)
elif self.getLockedAxis() == ToolHandle.YAxis:
mirror = Vector(1, -1, 1)
elif self.getLockedAxis() == ToolHandle.ZAxis:
mirror = Vector(1, 1, -1)
else:
mirror = Vector(1, 1, 1)
op = MirrorOperation(node, mirror, mirror_around_center = True)
else:
op = GroupedOperation()
for node in self._getSelectedObjectsWithoutSelectedAncestors():
if self.getLockedAxis() == ToolHandle.XAxis:
mirror = Vector(-1, 1, 1)
elif self.getLockedAxis() == ToolHandle.YAxis:
mirror = Vector(1, -1, 1)
elif self.getLockedAxis() == ToolHandle.ZAxis:
mirror = Vector(1, 1, -1)
else:
mirror = Vector(1, 1, 1)
op.addOperation(MirrorOperation(node, mirror, mirror_around_center = True))
op.push()
self.setLockedAxis(ToolHandle.NoAxis)
return True
return False
| 35.077778 | 99 | 0.558125 |
from UM.Tool import Tool
from UM.Event import Event, MouseEvent
from UM.Math.Vector import Vector
from UM.Operations.MirrorOperation import MirrorOperation
from UM.Operations.GroupedOperation import GroupedOperation
from UM.Scene.Selection import Selection
from UM.Scene.ToolHandle import ToolHandle
from PyQt5.QtCore import Qt
from . import MirrorToolHandle
:
super().__init__()
self._handle = MirrorToolHandle.MirrorToolHandle()
self._shortcut_key = Qt.Key_M
self._operation_started = False
t):
super().event(event)
if event.type == Event.MousePressEvent and self._controller.getToolsEnabled():
if MouseEvent.LeftButton not in event.buttons:
return False
id = self._selection_pass.getIdAtPosition(event.x, event.y)
if not id:
return False
if self._handle.isAxis(id):
self.setLockedAxis(id)
self._operation_started = True
self.operationStarted.emit(self)
return True
if event.type == Event.MouseReleaseEvent:
if self._operation_started:
self._operation_started = False
self.operationStopped.emit(self)
if self.getLockedAxis() != ToolHandle.NoAxis:
if Selection.getCount() == 1:
node = Selection.getSelectedObject(0)
if self.getLockedAxis() == ToolHandle.XAxis:
mirror = Vector(-1, 1, 1)
elif self.getLockedAxis() == ToolHandle.YAxis:
mirror = Vector(1, -1, 1)
elif self.getLockedAxis() == ToolHandle.ZAxis:
mirror = Vector(1, 1, -1)
else:
mirror = Vector(1, 1, 1)
op = MirrorOperation(node, mirror, mirror_around_center = True)
else:
op = GroupedOperation()
for node in self._getSelectedObjectsWithoutSelectedAncestors():
if self.getLockedAxis() == ToolHandle.XAxis:
mirror = Vector(-1, 1, 1)
elif self.getLockedAxis() == ToolHandle.YAxis:
mirror = Vector(1, -1, 1)
elif self.getLockedAxis() == ToolHandle.ZAxis:
mirror = Vector(1, 1, -1)
else:
mirror = Vector(1, 1, 1)
op.addOperation(MirrorOperation(node, mirror, mirror_around_center = True))
op.push()
self.setLockedAxis(ToolHandle.NoAxis)
return True
return False
| true | true |
f73a151eaf488ae0f3ef70dec4055c8c6362b10b | 2,810 | py | Python | src/TreeDetector.py | dsilvalo28/AIVA-DAIA | 55b1f547aaf850df1ea3ddd9a2f6b5a2af410889 | [
"CC0-1.0"
] | 1 | 2020-02-25T15:21:13.000Z | 2020-02-25T15:21:13.000Z | src/TreeDetector.py | dsilvalo28/AIVA-DAIA | 55b1f547aaf850df1ea3ddd9a2f6b5a2af410889 | [
"CC0-1.0"
] | 22 | 2020-02-28T10:31:59.000Z | 2020-04-21T20:04:11.000Z | src/TreeDetector.py | dsilvalo28/AIVA-DAIA | 55b1f547aaf850df1ea3ddd9a2f6b5a2af410889 | [
"CC0-1.0"
] | null | null | null | import cv2
import numpy as np
from src.Detector import Detector
# Tree detector class #
class TreeDetector(Detector):
def __init__(self, image_path=None):
self.__image_path = image_path
self.image = None
if image_path is not None:
self.read(self.__image_path)
# *** CONSTANTS *** #
self.__threshold_down = 127
self.__threshold_up = 255
self.__totalm2 = 12000
self.__treesperm2 = 0.6
# *** PRIVATE *** #
def __preprocess_image(self):
"""
:return: Preprocessed set image
"""
preprocessed_image = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
hsv_image = cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV)
return preprocessed_image, hsv_image
# *** PUBLIC *** #
def read(self, image):
"""
:param image: Set the image to work with
"""
self.image = image
def read_from_path(self, image_path):
"""
:param image_path: Set the path to read the image and the image
"""
self.__image_path = image_path
self.image = cv2.imread(self.__image_path)
return self.image
def process_image(self, lc=[0, 100, 100], uc=[120, 255, 255]):
"""
:param lc: [int, int, int] Lower HSV color values
:param uc: [int, int, int] Lower HSV color values
:return: [np.array] 3 channel segmentation mask of the set image
"""
preprocessed_image, hsv_image = self.__preprocess_image()
ret, segmented_image = cv2.threshold(preprocessed_image, self.__threshold_down, self.__threshold_up,
cv2.THRESH_BINARY)
# Creaccion de mascara
lower_color = np.array(lc, dtype='uint8')
upper_color = np.array(uc, dtype='uint8')
mask = cv2.inRange(hsv_image, lower_color, upper_color)
mask_3_channels = np.dstack((mask, mask, mask))
# ret2, thresh = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# segmented_image_boolean = segmented_image.astype(np.bool)
return mask_3_channels
def calculate_percentage(self):
"""
:return: Percentage of tree mass of the set image
"""
segmented_image = self.process_image()
percentage = np.mean(segmented_image/2.55)
return percentage
def calculate_m2(self):
"""
:return: m² of tree mass of the set image
"""
percentage = self.calculate_percentage()
m2 = percentage * self.__totalm2
return m2
def calculate_number_trees(self):
"""
:return: Number of trees of the set image
"""
m2 = self.calculate_m2()
n_trees = int(m2 * self.__treesperm2)
return n_trees
| 31.931818 | 108 | 0.603559 | import cv2
import numpy as np
from src.Detector import Detector
class TreeDetector(Detector):
def __init__(self, image_path=None):
self.__image_path = image_path
self.image = None
if image_path is not None:
self.read(self.__image_path)
self.__threshold_down = 127
self.__threshold_up = 255
self.__totalm2 = 12000
self.__treesperm2 = 0.6
def __preprocess_image(self):
preprocessed_image = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
hsv_image = cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV)
return preprocessed_image, hsv_image
def read(self, image):
self.image = image
def read_from_path(self, image_path):
self.__image_path = image_path
self.image = cv2.imread(self.__image_path)
return self.image
def process_image(self, lc=[0, 100, 100], uc=[120, 255, 255]):
preprocessed_image, hsv_image = self.__preprocess_image()
ret, segmented_image = cv2.threshold(preprocessed_image, self.__threshold_down, self.__threshold_up,
cv2.THRESH_BINARY)
lower_color = np.array(lc, dtype='uint8')
upper_color = np.array(uc, dtype='uint8')
mask = cv2.inRange(hsv_image, lower_color, upper_color)
mask_3_channels = np.dstack((mask, mask, mask))
return mask_3_channels
def calculate_percentage(self):
segmented_image = self.process_image()
percentage = np.mean(segmented_image/2.55)
return percentage
def calculate_m2(self):
percentage = self.calculate_percentage()
m2 = percentage * self.__totalm2
return m2
def calculate_number_trees(self):
m2 = self.calculate_m2()
n_trees = int(m2 * self.__treesperm2)
return n_trees
| true | true |
f73a15c0d6a54f3000c9bd50909d3ca60aa4dd51 | 3,178 | py | Python | codes/trainer/networks.py | neonbjb/DL-Art-School | a6f0f854b987ac724e258af8b042ea4459a571bc | [
"Apache-2.0"
] | 12 | 2020-12-13T12:45:03.000Z | 2022-03-29T09:58:15.000Z | codes/trainer/networks.py | neonbjb/DL-Art-School | a6f0f854b987ac724e258af8b042ea4459a571bc | [
"Apache-2.0"
] | 1 | 2020-12-31T01:12:45.000Z | 2021-03-31T11:43:52.000Z | codes/trainer/networks.py | neonbjb/DL-Art-School | a6f0f854b987ac724e258af8b042ea4459a571bc | [
"Apache-2.0"
] | 3 | 2020-12-14T06:04:04.000Z | 2020-12-26T19:11:41.000Z | import importlib
import logging
import os
import pkgutil
import sys
from collections import OrderedDict
from inspect import isfunction, getmembers, signature
import torch
import models.feature_arch as feature_arch
logger = logging.getLogger('base')
class RegisteredModelNameError(Exception):
def __init__(self, name_error):
super().__init__(f'Registered DLAS modules must start with `register_`. Incorrect registration: {name_error}')
# Decorator that allows API clients to show DLAS how to build a nn.Module from an opt dict.
# Functions with this decorator should have a specific naming format:
# `register_<name>` where <name> is the name that will be used in configuration files to reference this model.
# Functions with this decorator are expected to take a single argument:
# - opt: A dict with the configuration options for building the module.
# They should return:
# - A torch.nn.Module object for the model being defined.
def register_model(func):
if func.__name__.startswith("register_"):
func._dlas_model_name = func.__name__[9:]
assert func._dlas_model_name
else:
raise RegisteredModelNameError(func.__name__)
func._dlas_registered_model = True
return func
def find_registered_model_fns(base_path='models'):
found_fns = {}
module_iter = pkgutil.walk_packages([base_path])
for mod in module_iter:
if os.name == 'nt':
if os.path.join(os.getcwd(), base_path) not in mod.module_finder.path:
continue # I have no idea why this is necessary - I think it's a bug in the latest PyWindows release.
if mod.ispkg:
EXCLUSION_LIST = ['flownet2']
if mod.name not in EXCLUSION_LIST:
found_fns.update(find_registered_model_fns(f'{base_path}/{mod.name}'))
else:
mod_name = f'{base_path}/{mod.name}'.replace('/', '.')
importlib.import_module(mod_name)
for mod_fn in getmembers(sys.modules[mod_name], isfunction):
if hasattr(mod_fn[1], "_dlas_registered_model"):
found_fns[mod_fn[1]._dlas_model_name] = mod_fn[1]
return found_fns
class CreateModelError(Exception):
def __init__(self, name, available):
super().__init__(f'Could not find the specified model name: {name}. Tip: If your model is in a'
f' subdirectory, that directory must contain an __init__.py to be scanned. Available models:'
f'{available}')
def create_model(opt, opt_net, other_nets=None):
which_model = opt_net['which_model']
# For backwards compatibility.
if not which_model:
which_model = opt_net['which_model_G']
if not which_model:
which_model = opt_net['which_model_D']
registered_fns = find_registered_model_fns()
if which_model not in registered_fns.keys():
raise CreateModelError(which_model, list(registered_fns.keys()))
num_params = len(signature(registered_fns[which_model]).parameters)
if num_params == 2:
return registered_fns[which_model](opt_net, opt)
else:
return registered_fns[which_model](opt_net, opt, other_nets) | 41.272727 | 119 | 0.697294 | import importlib
import logging
import os
import pkgutil
import sys
from collections import OrderedDict
from inspect import isfunction, getmembers, signature
import torch
import models.feature_arch as feature_arch
logger = logging.getLogger('base')
class RegisteredModelNameError(Exception):
def __init__(self, name_error):
super().__init__(f'Registered DLAS modules must start with `register_`. Incorrect registration: {name_error}')
def register_model(func):
if func.__name__.startswith("register_"):
func._dlas_model_name = func.__name__[9:]
assert func._dlas_model_name
else:
raise RegisteredModelNameError(func.__name__)
func._dlas_registered_model = True
return func
def find_registered_model_fns(base_path='models'):
found_fns = {}
module_iter = pkgutil.walk_packages([base_path])
for mod in module_iter:
if os.name == 'nt':
if os.path.join(os.getcwd(), base_path) not in mod.module_finder.path:
continue
if mod.ispkg:
EXCLUSION_LIST = ['flownet2']
if mod.name not in EXCLUSION_LIST:
found_fns.update(find_registered_model_fns(f'{base_path}/{mod.name}'))
else:
mod_name = f'{base_path}/{mod.name}'.replace('/', '.')
importlib.import_module(mod_name)
for mod_fn in getmembers(sys.modules[mod_name], isfunction):
if hasattr(mod_fn[1], "_dlas_registered_model"):
found_fns[mod_fn[1]._dlas_model_name] = mod_fn[1]
return found_fns
class CreateModelError(Exception):
def __init__(self, name, available):
super().__init__(f'Could not find the specified model name: {name}. Tip: If your model is in a'
f' subdirectory, that directory must contain an __init__.py to be scanned. Available models:'
f'{available}')
def create_model(opt, opt_net, other_nets=None):
which_model = opt_net['which_model']
# For backwards compatibility.
if not which_model:
which_model = opt_net['which_model_G']
if not which_model:
which_model = opt_net['which_model_D']
registered_fns = find_registered_model_fns()
if which_model not in registered_fns.keys():
raise CreateModelError(which_model, list(registered_fns.keys()))
num_params = len(signature(registered_fns[which_model]).parameters)
if num_params == 2:
return registered_fns[which_model](opt_net, opt)
else:
return registered_fns[which_model](opt_net, opt, other_nets) | true | true |
f73a17dc043e21a9af4216bfb716cd677517acfb | 8,086 | py | Python | panda/tests/automated/helpers.py | BoneE562/openpilot | bc0934f8c0d49cb971f0aa1c20361f0b0959650f | [
"MIT"
] | 114 | 2020-02-24T14:18:01.000Z | 2022-03-19T03:42:00.000Z | panda/tests/automated/helpers.py | BoneE562/openpilot | bc0934f8c0d49cb971f0aa1c20361f0b0959650f | [
"MIT"
] | 15 | 2020-02-25T03:37:44.000Z | 2021-09-08T01:51:15.000Z | panda/tests/automated/helpers.py | BoneE562/openpilot | bc0934f8c0d49cb971f0aa1c20361f0b0959650f | [
"MIT"
] | 55 | 2020-02-24T09:43:04.000Z | 2022-02-15T04:52:00.000Z | import os
import sys
import time
import random
import binascii
import subprocess
import requests
import _thread
from functools import wraps
from panda import Panda
from nose.tools import timed, assert_equal, assert_less, assert_greater
from parameterized import parameterized, param
SPEED_NORMAL = 500
SPEED_GMLAN = 33.3
test_all_types = parameterized([
param(panda_type=Panda.HW_TYPE_WHITE_PANDA),
param(panda_type=Panda.HW_TYPE_GREY_PANDA),
param(panda_type=Panda.HW_TYPE_BLACK_PANDA)
])
test_all_pandas = parameterized(
Panda.list()
)
test_white_and_grey = parameterized([
param(panda_type=Panda.HW_TYPE_WHITE_PANDA),
param(panda_type=Panda.HW_TYPE_GREY_PANDA)
])
test_white = parameterized([
param(panda_type=Panda.HW_TYPE_WHITE_PANDA)
])
test_grey = parameterized([
param(panda_type=Panda.HW_TYPE_GREY_PANDA)
])
test_two_panda = parameterized([
param(panda_type=[Panda.HW_TYPE_GREY_PANDA, Panda.HW_TYPE_WHITE_PANDA]),
param(panda_type=[Panda.HW_TYPE_WHITE_PANDA, Panda.HW_TYPE_GREY_PANDA]),
param(panda_type=[Panda.HW_TYPE_BLACK_PANDA, Panda.HW_TYPE_BLACK_PANDA])
])
test_two_black_panda = parameterized([
param(panda_type=[Panda.HW_TYPE_BLACK_PANDA, Panda.HW_TYPE_BLACK_PANDA])
])
def connect_wifi(serial=None):
p = Panda(serial=serial)
p.set_esp_power(True)
dongle_id, pw = p.get_serial()
assert(dongle_id.isalnum())
_connect_wifi(dongle_id, pw)
FNULL = open(os.devnull, 'w')
def _connect_wifi(dongle_id, pw, insecure_okay=False):
ssid = "panda-" + dongle_id.decode("utf8")
r = subprocess.call(["ping", "-W", "4", "-c", "1", "192.168.0.10"], stdout=FNULL, stderr=subprocess.STDOUT)
if not r:
#Can already ping, try connecting on wifi
try:
p = Panda("WIFI")
p.get_serial()
print("Already connected")
return
except:
pass
print("WIFI: connecting to %s" % ssid)
while 1:
if sys.platform == "darwin":
os.system("networksetup -setairportnetwork en0 %s %s" % (ssid, pw))
else:
wlan_interface = subprocess.check_output(["sh", "-c", "iw dev | awk '/Interface/ {print $2}'"]).strip()
cnt = 0
MAX_TRIES = 10
while cnt < MAX_TRIES:
print("WIFI: scanning %d" % cnt)
os.system("iwlist %s scanning > /dev/null" % wlan_interface)
os.system("nmcli device wifi rescan")
wifi_networks = [x.decode("utf8") for x in subprocess.check_output(["nmcli","dev", "wifi", "list"]).split(b"\n")]
wifi_scan = [x for x in wifi_networks if ssid in x]
if len(wifi_scan) != 0:
break
time.sleep(0.1)
# MAX_TRIES tries, ~10 seconds max
cnt += 1
assert cnt < MAX_TRIES
if "-pair" in wifi_scan[0]:
os.system("nmcli d wifi connect %s-pair" % (ssid))
connect_cnt = 0
MAX_TRIES = 20
while connect_cnt < MAX_TRIES:
connect_cnt += 1
r = subprocess.call(["ping", "-W", "4", "-c", "1", "192.168.0.10"], stdout=FNULL, stderr=subprocess.STDOUT)
if r:
print("Waiting for panda to ping...")
time.sleep(0.1)
else:
break
if insecure_okay:
break
# fetch webpage
print("connecting to insecure network to secure")
try:
r = requests.get("http://192.168.0.10/")
except requests.ConnectionError:
r = requests.get("http://192.168.0.10/")
assert r.status_code==200
print("securing")
try:
r = requests.get("http://192.168.0.10/secure", timeout=0.01)
except requests.exceptions.Timeout:
print("timeout http request to secure")
pass
else:
ret = os.system("nmcli d wifi connect %s password %s" % (ssid, pw))
if os.WEXITSTATUS(ret) == 0:
#check ping too
ping_ok = False
connect_cnt = 0
MAX_TRIES = 10
while connect_cnt < MAX_TRIES:
connect_cnt += 1
r = subprocess.call(["ping", "-W", "4", "-c", "1", "192.168.0.10"], stdout=FNULL, stderr=subprocess.STDOUT)
if r:
print("Waiting for panda to ping...")
time.sleep(0.1)
else:
ping_ok = True
break
if ping_ok:
break
# TODO: confirm that it's connected to the right panda
def time_many_sends(p, bus, precv=None, msg_count=100, msg_id=None, two_pandas=False):
if precv == None:
precv = p
if msg_id == None:
msg_id = random.randint(0x100, 0x200)
if p == precv and two_pandas:
raise ValueError("Cannot have two pandas that are the same panda")
st = time.time()
p.can_send_many([(msg_id, 0, b"\xaa"*8, bus)]*msg_count)
r = []
r_echo = []
r_len_expected = msg_count if two_pandas else msg_count*2
r_echo_len_exected = msg_count if two_pandas else 0
while len(r) < r_len_expected and (time.time() - st) < 5:
r.extend(precv.can_recv())
et = time.time()
if two_pandas:
while len(r_echo) < r_echo_len_exected and (time.time() - st) < 10:
r_echo.extend(p.can_recv())
sent_echo = [x for x in r if x[3] == 0x80 | bus and x[0] == msg_id]
sent_echo.extend([x for x in r_echo if x[3] == 0x80 | bus and x[0] == msg_id])
resp = [x for x in r if x[3] == bus and x[0] == msg_id]
leftovers = [x for x in r if (x[3] != 0x80 | bus and x[3] != bus) or x[0] != msg_id]
assert_equal(len(leftovers), 0)
assert_equal(len(resp), msg_count)
assert_equal(len(sent_echo), msg_count)
et = (et-st)*1000.0
comp_kbps = (1+11+1+1+1+4+8*8+15+1+1+1+7)*msg_count / et
return comp_kbps
_panda_serials = None
def panda_type_to_serial(fn):
@wraps(fn)
def wrapper(panda_type=None, **kwargs):
# Change panda_types to a list
if panda_type is not None:
if not isinstance(panda_type, list):
panda_type = [panda_type]
# If not done already, get panda serials and their type
global _panda_serials
if _panda_serials == None:
_panda_serials = []
for serial in Panda.list():
p = Panda(serial=serial)
_panda_serials.append((serial, p.get_type()))
p.close()
# Find a panda with the correct types and add the corresponding serial
serials = []
for p_type in panda_type:
found = False
for serial, pt in _panda_serials:
# Never take the same panda twice
if (pt == p_type) and (serial not in serials):
serials.append(serial)
found = True
break
if not found:
raise IOError("No unused panda found for type: {}".format(p_type))
return fn(serials, **kwargs)
return wrapper
def heartbeat_thread(p):
while True:
try:
p.send_heartbeat()
time.sleep(1)
except:
break
def panda_connect_and_init(fn):
@wraps(fn)
def wrapper(panda_serials=None, **kwargs):
# Change panda_serials to a list
if panda_serials is not None:
if not isinstance(panda_serials, list):
panda_serials = [panda_serials]
# Connect to pandas
pandas = []
for panda_serial in panda_serials:
pandas.append(Panda(serial=panda_serial))
# Initialize pandas
for panda in pandas:
panda.set_can_loopback(False)
panda.set_gmlan(None)
panda.set_esp_power(False)
for bus, speed in [(0, SPEED_NORMAL), (1, SPEED_NORMAL), (2, SPEED_NORMAL), (3, SPEED_GMLAN)]:
panda.set_can_speed_kbps(bus, speed)
clear_can_buffers(panda)
_thread.start_new_thread(heartbeat_thread, (panda,))
# Run test function
ret = fn(*pandas, **kwargs)
# Close all connections
for panda in pandas:
panda.close()
# Return test function result
return ret
return wrapper
def clear_can_buffers(panda):
# clear tx buffers
for i in range(4):
panda.can_clear(i)
# clear rx buffers
panda.can_clear(0xFFFF)
r = [1]
st = time.time()
while len(r) > 0:
r = panda.can_recv()
time.sleep(0.05)
if (time.time() - st) > 10:
print("Unable to clear can buffers for panda ", panda.get_serial())
assert False
| 30.745247 | 121 | 0.633193 | import os
import sys
import time
import random
import binascii
import subprocess
import requests
import _thread
from functools import wraps
from panda import Panda
from nose.tools import timed, assert_equal, assert_less, assert_greater
from parameterized import parameterized, param
SPEED_NORMAL = 500
SPEED_GMLAN = 33.3
test_all_types = parameterized([
param(panda_type=Panda.HW_TYPE_WHITE_PANDA),
param(panda_type=Panda.HW_TYPE_GREY_PANDA),
param(panda_type=Panda.HW_TYPE_BLACK_PANDA)
])
test_all_pandas = parameterized(
Panda.list()
)
test_white_and_grey = parameterized([
param(panda_type=Panda.HW_TYPE_WHITE_PANDA),
param(panda_type=Panda.HW_TYPE_GREY_PANDA)
])
test_white = parameterized([
param(panda_type=Panda.HW_TYPE_WHITE_PANDA)
])
test_grey = parameterized([
param(panda_type=Panda.HW_TYPE_GREY_PANDA)
])
test_two_panda = parameterized([
param(panda_type=[Panda.HW_TYPE_GREY_PANDA, Panda.HW_TYPE_WHITE_PANDA]),
param(panda_type=[Panda.HW_TYPE_WHITE_PANDA, Panda.HW_TYPE_GREY_PANDA]),
param(panda_type=[Panda.HW_TYPE_BLACK_PANDA, Panda.HW_TYPE_BLACK_PANDA])
])
test_two_black_panda = parameterized([
param(panda_type=[Panda.HW_TYPE_BLACK_PANDA, Panda.HW_TYPE_BLACK_PANDA])
])
def connect_wifi(serial=None):
p = Panda(serial=serial)
p.set_esp_power(True)
dongle_id, pw = p.get_serial()
assert(dongle_id.isalnum())
_connect_wifi(dongle_id, pw)
FNULL = open(os.devnull, 'w')
def _connect_wifi(dongle_id, pw, insecure_okay=False):
ssid = "panda-" + dongle_id.decode("utf8")
r = subprocess.call(["ping", "-W", "4", "-c", "1", "192.168.0.10"], stdout=FNULL, stderr=subprocess.STDOUT)
if not r:
try:
p = Panda("WIFI")
p.get_serial()
print("Already connected")
return
except:
pass
print("WIFI: connecting to %s" % ssid)
while 1:
if sys.platform == "darwin":
os.system("networksetup -setairportnetwork en0 %s %s" % (ssid, pw))
else:
wlan_interface = subprocess.check_output(["sh", "-c", "iw dev | awk '/Interface/ {print $2}'"]).strip()
cnt = 0
MAX_TRIES = 10
while cnt < MAX_TRIES:
print("WIFI: scanning %d" % cnt)
os.system("iwlist %s scanning > /dev/null" % wlan_interface)
os.system("nmcli device wifi rescan")
wifi_networks = [x.decode("utf8") for x in subprocess.check_output(["nmcli","dev", "wifi", "list"]).split(b"\n")]
wifi_scan = [x for x in wifi_networks if ssid in x]
if len(wifi_scan) != 0:
break
time.sleep(0.1)
cnt += 1
assert cnt < MAX_TRIES
if "-pair" in wifi_scan[0]:
os.system("nmcli d wifi connect %s-pair" % (ssid))
connect_cnt = 0
MAX_TRIES = 20
while connect_cnt < MAX_TRIES:
connect_cnt += 1
r = subprocess.call(["ping", "-W", "4", "-c", "1", "192.168.0.10"], stdout=FNULL, stderr=subprocess.STDOUT)
if r:
print("Waiting for panda to ping...")
time.sleep(0.1)
else:
break
if insecure_okay:
break
print("connecting to insecure network to secure")
try:
r = requests.get("http://192.168.0.10/")
except requests.ConnectionError:
r = requests.get("http://192.168.0.10/")
assert r.status_code==200
print("securing")
try:
r = requests.get("http://192.168.0.10/secure", timeout=0.01)
except requests.exceptions.Timeout:
print("timeout http request to secure")
pass
else:
ret = os.system("nmcli d wifi connect %s password %s" % (ssid, pw))
if os.WEXITSTATUS(ret) == 0:
ping_ok = False
connect_cnt = 0
MAX_TRIES = 10
while connect_cnt < MAX_TRIES:
connect_cnt += 1
r = subprocess.call(["ping", "-W", "4", "-c", "1", "192.168.0.10"], stdout=FNULL, stderr=subprocess.STDOUT)
if r:
print("Waiting for panda to ping...")
time.sleep(0.1)
else:
ping_ok = True
break
if ping_ok:
break
def time_many_sends(p, bus, precv=None, msg_count=100, msg_id=None, two_pandas=False):
if precv == None:
precv = p
if msg_id == None:
msg_id = random.randint(0x100, 0x200)
if p == precv and two_pandas:
raise ValueError("Cannot have two pandas that are the same panda")
st = time.time()
p.can_send_many([(msg_id, 0, b"\xaa"*8, bus)]*msg_count)
r = []
r_echo = []
r_len_expected = msg_count if two_pandas else msg_count*2
r_echo_len_exected = msg_count if two_pandas else 0
while len(r) < r_len_expected and (time.time() - st) < 5:
r.extend(precv.can_recv())
et = time.time()
if two_pandas:
while len(r_echo) < r_echo_len_exected and (time.time() - st) < 10:
r_echo.extend(p.can_recv())
sent_echo = [x for x in r if x[3] == 0x80 | bus and x[0] == msg_id]
sent_echo.extend([x for x in r_echo if x[3] == 0x80 | bus and x[0] == msg_id])
resp = [x for x in r if x[3] == bus and x[0] == msg_id]
leftovers = [x for x in r if (x[3] != 0x80 | bus and x[3] != bus) or x[0] != msg_id]
assert_equal(len(leftovers), 0)
assert_equal(len(resp), msg_count)
assert_equal(len(sent_echo), msg_count)
et = (et-st)*1000.0
comp_kbps = (1+11+1+1+1+4+8*8+15+1+1+1+7)*msg_count / et
return comp_kbps
_panda_serials = None
def panda_type_to_serial(fn):
@wraps(fn)
def wrapper(panda_type=None, **kwargs):
# Change panda_types to a list
if panda_type is not None:
if not isinstance(panda_type, list):
panda_type = [panda_type]
# If not done already, get panda serials and their type
global _panda_serials
if _panda_serials == None:
_panda_serials = []
for serial in Panda.list():
p = Panda(serial=serial)
_panda_serials.append((serial, p.get_type()))
p.close()
# Find a panda with the correct types and add the corresponding serial
serials = []
for p_type in panda_type:
found = False
for serial, pt in _panda_serials:
# Never take the same panda twice
if (pt == p_type) and (serial not in serials):
serials.append(serial)
found = True
break
if not found:
raise IOError("No unused panda found for type: {}".format(p_type))
return fn(serials, **kwargs)
return wrapper
def heartbeat_thread(p):
while True:
try:
p.send_heartbeat()
time.sleep(1)
except:
break
def panda_connect_and_init(fn):
@wraps(fn)
def wrapper(panda_serials=None, **kwargs):
# Change panda_serials to a list
if panda_serials is not None:
if not isinstance(panda_serials, list):
panda_serials = [panda_serials]
# Connect to pandas
pandas = []
for panda_serial in panda_serials:
pandas.append(Panda(serial=panda_serial))
# Initialize pandas
for panda in pandas:
panda.set_can_loopback(False)
panda.set_gmlan(None)
panda.set_esp_power(False)
for bus, speed in [(0, SPEED_NORMAL), (1, SPEED_NORMAL), (2, SPEED_NORMAL), (3, SPEED_GMLAN)]:
panda.set_can_speed_kbps(bus, speed)
clear_can_buffers(panda)
_thread.start_new_thread(heartbeat_thread, (panda,))
# Run test function
ret = fn(*pandas, **kwargs)
# Close all connections
for panda in pandas:
panda.close()
# Return test function result
return ret
return wrapper
def clear_can_buffers(panda):
# clear tx buffers
for i in range(4):
panda.can_clear(i)
# clear rx buffers
panda.can_clear(0xFFFF)
r = [1]
st = time.time()
while len(r) > 0:
r = panda.can_recv()
time.sleep(0.05)
if (time.time() - st) > 10:
print("Unable to clear can buffers for panda ", panda.get_serial())
assert False
| true | true |
f73a194a33be4e5988caca75413dd53fe934f568 | 2,827 | py | Python | pex/finders.py | alexey-tereshenkov-oxb/pex | 2e2d1e50e604fdee48b0d51aea482ca255521ff0 | [
"Apache-2.0"
] | null | null | null | pex/finders.py | alexey-tereshenkov-oxb/pex | 2e2d1e50e604fdee48b0d51aea482ca255521ff0 | [
"Apache-2.0"
] | null | null | null | pex/finders.py | alexey-tereshenkov-oxb/pex | 2e2d1e50e604fdee48b0d51aea482ca255521ff0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import ast
import os
from pex.common import is_python_script
from pex.third_party.pkg_resources import Distribution
from pex.typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
from typing import Optional
import attr # vendor:skip
else:
from pex.third_party import attr
@attr.s(frozen=True)
class DistributionScript(object):
@classmethod
def find(
cls,
dist, # type: Distribution
name, # type: str
):
# type: (...) -> Optional[DistributionScript]
script_path = os.path.join(dist.location, "bin", name)
return cls(dist=dist, path=script_path) if os.path.isfile(script_path) else None
dist = attr.ib() # type: Distribution
path = attr.ib() # type: str
def read_contents(self):
# type: () -> bytes
with open(self.path, "rb") as fp:
return fp.read()
def python_script(self):
# type: () -> Optional[ast.AST]
if not is_python_script(self.path):
return None
try:
return cast(
ast.AST, compile(self.read_contents(), self.path, "exec", flags=0, dont_inherit=1)
)
except (SyntaxError, TypeError):
return None
def get_script_from_distributions(name, dists):
for dist in dists:
distribution_script = DistributionScript.find(dist, name)
if distribution_script:
return distribution_script
def get_entry_point_from_console_script(script, dists):
# Check all distributions for the console_script "script". De-dup by dist key to allow for a
# duplicate console script IFF the distribution is platform-specific and this is a multi-platform
# pex.
def get_entrypoint(dist):
script_entry = dist.get_entry_map().get("console_scripts", {}).get(script)
if script_entry is not None:
# Entry points are of the form 'foo = bar', we just want the 'bar' part.
return str(script_entry).split("=")[1].strip()
entries = {}
for dist in dists:
entry_point = get_entrypoint(dist)
if entry_point is not None:
entries[dist.key] = (dist, entry_point)
if len(entries) > 1:
raise RuntimeError(
"Ambiguous script specification %s matches multiple entry points:\n\t%s"
% (
script,
"\n\t".join(
"%r from %r" % (entry_point, dist) for dist, entry_point in entries.values()
),
)
)
dist, entry_point = None, None
if entries:
dist, entry_point = next(iter(entries.values()))
return dist, entry_point
| 30.728261 | 101 | 0.625752 |
from __future__ import absolute_import
import ast
import os
from pex.common import is_python_script
from pex.third_party.pkg_resources import Distribution
from pex.typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
from typing import Optional
import attr
else:
from pex.third_party import attr
@attr.s(frozen=True)
class DistributionScript(object):
@classmethod
def find(
cls,
dist,
name,
):
script_path = os.path.join(dist.location, "bin", name)
return cls(dist=dist, path=script_path) if os.path.isfile(script_path) else None
dist = attr.ib()
path = attr.ib()
def read_contents(self):
with open(self.path, "rb") as fp:
return fp.read()
def python_script(self):
if not is_python_script(self.path):
return None
try:
return cast(
ast.AST, compile(self.read_contents(), self.path, "exec", flags=0, dont_inherit=1)
)
except (SyntaxError, TypeError):
return None
def get_script_from_distributions(name, dists):
for dist in dists:
distribution_script = DistributionScript.find(dist, name)
if distribution_script:
return distribution_script
def get_entry_point_from_console_script(script, dists):
def get_entrypoint(dist):
script_entry = dist.get_entry_map().get("console_scripts", {}).get(script)
if script_entry is not None:
return str(script_entry).split("=")[1].strip()
entries = {}
for dist in dists:
entry_point = get_entrypoint(dist)
if entry_point is not None:
entries[dist.key] = (dist, entry_point)
if len(entries) > 1:
raise RuntimeError(
"Ambiguous script specification %s matches multiple entry points:\n\t%s"
% (
script,
"\n\t".join(
"%r from %r" % (entry_point, dist) for dist, entry_point in entries.values()
),
)
)
dist, entry_point = None, None
if entries:
dist, entry_point = next(iter(entries.values()))
return dist, entry_point
| true | true |
f73a1a4aa5e6d5e05df796daddc33504ebe32372 | 49,673 | py | Python | pypowervm/tests/test_adapter.py | VedaAnnayappa/pypowervm | 266e5cb2f8725c63267b41b617ba5a1db2adadfa | [
"Apache-2.0"
] | null | null | null | pypowervm/tests/test_adapter.py | VedaAnnayappa/pypowervm | 266e5cb2f8725c63267b41b617ba5a1db2adadfa | [
"Apache-2.0"
] | null | null | null | pypowervm/tests/test_adapter.py | VedaAnnayappa/pypowervm | 266e5cb2f8725c63267b41b617ba5a1db2adadfa | [
"Apache-2.0"
] | null | null | null | # Copyright 2014, 2015, 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import errno
import fixtures
import gc
from lxml import etree
import six
import subunit
if six.PY2:
import __builtin__ as builtins
elif six.PY3:
import builtins
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
import mock
import requests.models as req_mod
import requests.structures as req_struct
import testtools
import pypowervm.adapter as adp
import pypowervm.const as c
import pypowervm.entities as ent
import pypowervm.exceptions as pvmex
import pypowervm.tests.lib as testlib
import pypowervm.tests.test_fixtures as fx
from pypowervm.tests.test_utils import pvmhttp
from pypowervm.wrappers import storage as pvm_stor
logon_text = testlib.file2b("logon.xml")
response_text = testlib.file2b("event.xml")
NET_BRIDGE_FILE = 'fake_network_bridge.txt'
class TestAdapter(testtools.TestCase):
"""Test cases to test the adapter classes and methods."""
def _mk_response(self, status, content=None):
reasons = {200: 'OK', 204: 'No Content', 401: 'Unauthorized'}
# Create a Response object, that will serve as a mock return value
my_response = req_mod.Response()
my_response.status_code = status
my_response.reason = reasons[status]
clen = '0'
if status == 200 and content:
clen = str(len(content))
dict_headers = {
'content-length': clen, 'x-powered-by': 'Servlet/3.0',
'set-cookie': ('JSESSIONID=0000a41BnJsGTNQvBGERA3wR1nj:759878cb-4f'
'9a-4b05-a09a-3357abfea3b4; Path=/; Secure; HttpOnl'
'y, CCFWSESSION=E4C0FFBE9130431DBF1864171ECC6A6E; P'
'ath=/; Secure; HttpOnly'),
'expires': 'Thu, 01 Dec 1994 16:00:00 GMT',
'x-transaction-id': 'XT10000073',
'cache-control': 'no-cache="set-cookie, set-cookie2"',
'date': 'Wed, 23 Jul 2014 21:51:10 GMT',
'content-type': 'application/vnd.ibm.powervm'}
my_response.headers = req_struct.CaseInsensitiveDict(dict_headers)
my_response._content = content
return my_response
def setUp(self):
super(TestAdapter, self).setUp()
"""Set up a mocked Session instance."""
# Init test data
host = '0.0.0.0'
user = 'user'
pwd = 'pwd'
auditmemento = 'audit'
# Create a Response object, that will serve as a mock return value
my_response = self._mk_response(200, logon_text)
# Mock out the method and class we are not currently testing
with mock.patch('requests.Session') as mock_session:
session = mock_session.return_value
session.request.return_value = my_response
# Create session for the test to use
self.sess = adp.Session(host, user, pwd,
auditmemento=auditmemento,
certpath=None)
# Mock out the logoff, which gets called when the session
# goes out of scope during tearDown()
self.sess._logoff = mock.Mock()
def tearDown(self):
"""Tear down the Session instance."""
self.sess = None
super(TestAdapter, self).tearDown()
@mock.patch('pypowervm.wrappers.event.Event.wrap')
@mock.patch('time.sleep')
def test_event_listener(self, mock_sleep, mock_evt_wrap):
with mock.patch.object(adp._EventListener, '_get_events') as m_events,\
mock.patch.object(adp, '_EventPollThread') as mock_poll:
# With some fake events, event listener can be initialized
self.sess._sessToken = 'token'.encode('utf-8')
m_events.return_value = {'general': 'init'}, 'raw_evt', 'wrap_evt'
event_listen = self.sess.get_event_listener()
self.assertIsNotNone(event_listen)
# Register the fake handlers and ensure they are called
evh = mock.Mock(spec=adp.EventHandler, autospec=True)
raw_evh = mock.Mock(spec=adp.RawEventHandler, autospec=True)
wrap_evh = mock.Mock(spec=adp.WrapperEventHandler, autospec=True)
event_listen.subscribe(evh)
event_listen.subscribe(raw_evh)
event_listen.subscribe(wrap_evh)
events, raw_events, evtwraps = event_listen._get_events()
event_listen._dispatch_events(events, raw_events, evtwraps)
evh.process.assert_called_once_with({'general': 'init'})
raw_evh.process.assert_called_once_with('raw_evt')
wrap_evh.process.assert_called_once_with('wrap_evt')
self.assertTrue(mock_poll.return_value.start.called)
# Ensure getevents() gets legacy events
self.assertEqual({'general': 'init'}, event_listen.getevents())
# Outside our patching of _get_events, get the formatted events
with mock.patch.object(event_listen, '_format_events') as mock_format,\
mock.patch.object(event_listen.adp, 'read') as mock_read:
# Ensure exception path doesn't kill the thread
mock_read.side_effect = Exception()
self.assertEqual(({}, [], []), event_listen._get_events())
self.assertEqual(1, mock_read.call_count)
mock_format.assert_not_called()
mock_evt_wrap.assert_not_called()
mock_sleep.assert_called_once_with(5)
mock_read.reset_mock()
# side_effect takes precedence over return_value; so kill it.
mock_read.side_effect = None
# Fabricate some mock entries, so format gets called.
mock_read.return_value.feed.entries = (['entry1', 'entry2'])
self.assertEqual(({}, [], mock_evt_wrap.return_value),
event_listen._get_events())
self.assertEqual(1, mock_read.call_count)
mock_format.assert_has_calls([mock.call('entry1', {}, []),
mock.call('entry2', {}, [])])
mock_evt_wrap.assert_called_once_with(mock_read.return_value)
# Test _format_events
event_data = [
{
'EventType': 'NEW_CLIENT',
'EventData': 'href1',
'EventID': '1',
'EventDetail': 'detail',
},
{
'EventType': 'CACHE_CLEARED',
'EventData': 'href2',
'EventID': '2',
'EventDetail': 'detail2',
},
{
'EventType': 'ADD_URI',
'EventData': 'LPAR1',
'EventID': '3',
'EventDetail': 'detail3',
},
{
'EventType': 'DELETE_URI',
'EventData': 'LPAR1',
'EventID': '4',
'EventDetail': 'detail4',
},
{
'EventType': 'INVALID_URI',
'EventData': 'LPAR1',
'EventID': '4',
'EventDetail': 'detail4',
},
]
# Setup a side effect that returns events from the test data.
def get_event_data(item):
data = event_data[0][item]
if item == 'EventDetail':
event_data.pop(0)
return data
# Raw events returns a sequence the same as the test data
raw_result = copy.deepcopy(event_data)
# Legacy events overwrites some events.
dict_result = {'general': 'invalidate', 'LPAR1': 'delete'}
# Build a mock entry
entry = mock.Mock()
entry.element.findtext.side_effect = get_event_data
events = {}
raw_events = []
x = len(raw_result)
while x:
x -= 1
event_listen._format_events(entry, events, raw_events)
self.assertEqual(raw_result, raw_events)
self.assertEqual(dict_result, events)
@mock.patch('pypowervm.adapter.Session')
def test_empty_init(self, mock_sess):
adp.Adapter()
mock_sess.assert_called_with()
def test_no_cache(self):
self.assertRaises(pvmex.CacheNotSupportedException,
adp.Adapter, use_cache=True)
@mock.patch('requests.Session')
def test_read(self, mock_session):
"""Test read() method found in the Adapter class."""
# Init test data
root_type = 'ManagedSystem'
root_id = 'caae9209-25e5-35cd-a71a-ed55c03f294d'
child_type = 'child'
child_id = 'child'
suffix_type = 'quick'
adapter = adp.Adapter(self.sess)
# Create a Response object, that will serve as a mock return value
read_response = self._mk_response(200, response_text)
# Mock out the method and class we are not currently testing
session = mock_session.return_value
session.request.return_value = read_response
# Run the actual test
ret_read_value = adapter.read(root_type, root_id, child_type,
child_id, suffix_type)
# Verify Correct path was built in build_path()
reqpath = adp.Adapter.build_path('uom', root_type, root_id,
child_type, child_id, suffix_type)
# Verify the return value
# self.assertIsInstance(ret_read_value, adp.Response)
self.assertEqual('GET', ret_read_value.reqmethod)
self.assertEqual(200, ret_read_value.status)
self.assertEqual(reqpath, ret_read_value.reqpath)
@mock.patch('pypowervm.adapter.Adapter._validate')
@mock.patch('pypowervm.adapter.Adapter.build_path')
@mock.patch('pypowervm.adapter.Adapter.read_by_path')
def test_read2(self, mock_rbp, mock_bld, mock_val):
"""Validate shallow flow & arg passing."""
adap = adp.Adapter(session=self.sess)
# Defaults
self.assertEqual(mock_rbp.return_value, adap.read('root_type'))
mock_val.assert_called_once_with(
'read', 'root_type', None, None, None, None, None, None)
mock_bld.assert_called_once_with(
'uom', 'root_type', None, None, None, None, None, None, xag=None,
add_qp=None)
mock_rbp.assert_called_once_with(
mock_bld.return_value, None, timeout=-1, auditmemento=None, age=-1,
sensitive=False, helpers=None)
# Specified kwargs
mock_val.reset_mock()
mock_bld.reset_mock()
mock_rbp.reset_mock()
self.assertEqual(mock_rbp.return_value, adap.read(
'root_type', root_id='root_id', child_type='child_type',
child_id='child_id', suffix_type='suffix_type',
suffix_parm='suffix_parm', detail='detail', service='service',
etag='etag', timeout='timeout', auditmemento='auditmemento',
age='age', xag='xag', sensitive='sensitive', helpers='helpers',
add_qp='add_qp'))
mock_val.assert_called_once_with(
'read', 'root_type', 'root_id', 'child_type', 'child_id',
'suffix_type', 'suffix_parm', 'detail')
mock_bld.assert_called_once_with(
'service', 'root_type', 'root_id', 'child_type', 'child_id',
'suffix_type', 'suffix_parm', 'detail', xag='xag', add_qp='add_qp')
mock_rbp.assert_called_once_with(
mock_bld.return_value, 'etag', timeout='timeout',
auditmemento='auditmemento', age='age', sensitive='sensitive',
helpers='helpers')
@mock.patch('pypowervm.adapter.Adapter.extend_path')
def test_build_path(self, mock_exp):
"""Validate build_path."""
adap = adp.Adapter(session=self.sess)
# Defaults
self.assertEqual(mock_exp.return_value, adap.build_path(
'service', 'root_type'))
mock_exp.assert_called_once_with(
'/rest/api/service/root_type', suffix_type=None, suffix_parm=None,
detail=None, xag=None, add_qp=None)
# child specs ignored if no root ID
mock_exp.reset_mock()
self.assertEqual(mock_exp.return_value, adap.build_path(
'service', 'root_type', child_type='child_type',
child_id='child_id'))
mock_exp.assert_called_once_with(
'/rest/api/service/root_type', suffix_type=None, suffix_parm=None,
detail=None, xag=None, add_qp=None)
# child ID ignored if no child type
mock_exp.reset_mock()
self.assertEqual(mock_exp.return_value, adap.build_path(
'service', 'root_type', root_id='root_id', child_id='child_id'))
mock_exp.assert_called_once_with(
'/rest/api/service/root_type/root_id', suffix_type=None,
suffix_parm=None, detail=None, xag=None, add_qp=None)
# Specified kwargs (including full child spec
mock_exp.reset_mock()
self.assertEqual(mock_exp.return_value, adap.build_path(
'service', 'root_type', root_id='root_id', child_type='child_type',
child_id='child_id', suffix_type='suffix_type',
suffix_parm='suffix_parm', detail='detail', xag='xag',
add_qp='add_qp'))
mock_exp.assert_called_once_with(
'/rest/api/service/root_type/root_id/child_type/child_id',
suffix_type='suffix_type', suffix_parm='suffix_parm',
detail='detail', xag='xag', add_qp='add_qp')
@mock.patch('pypowervm.adapter.Adapter._request')
def test_headers(self, mock_request):
def validate_hdrs_func(acc=None, inm=None):
expected_headers = {}
if acc is not None:
expected_headers['Accept'] = acc
if inm is not None:
expected_headers['If-None-Match'] = inm
def validate_request(meth, path, **kwargs):
self.assertEqual(expected_headers, kwargs['headers'])
return validate_request
adpt = adp.Adapter(mock.Mock())
basepath = c.API_BASE_PATH + 'uom/SomeRootObject'
uuid = "abcdef01-2345-2345-2345-67890abcdef0"
hdr_xml = 'application/atom+xml'
hdr_json = '*/*'
etag = 'abc123'
# Root feed
mock_request.side_effect = validate_hdrs_func(acc=hdr_xml)
adpt._read_by_path(basepath, None, None, None, None)
# Root instance with etag
mock_request.side_effect = validate_hdrs_func(acc=hdr_xml, inm=etag)
adpt._read_by_path(basepath + '/' + uuid, etag, None, None, None)
# Quick root anchor (produces XML report of available quick properties
mock_request.side_effect = validate_hdrs_func(acc=hdr_xml)
adpt._read_by_path(basepath + '/quick', None, None, None, None)
# Quick root instance (JSON of all quick properties)
mock_request.side_effect = validate_hdrs_func(acc=hdr_json)
adpt._read_by_path('/'.join([basepath, uuid, 'quick']), None, None,
None, None)
# Specific quick property
mock_request.side_effect = validate_hdrs_func(acc=hdr_json)
adpt._read_by_path('/'.join([basepath, uuid, 'quick', 'property']),
None, None, None, None)
# Explicit JSON file
mock_request.side_effect = validate_hdrs_func(acc=hdr_json)
adpt._read_by_path('/'.join([basepath, 'somefile.json']), None, None,
None, None)
# Object that happens to end in 'json'
mock_request.side_effect = validate_hdrs_func(acc=hdr_xml)
adpt._read_by_path('/'.join([basepath, 'xml_about_json']), None, None,
None, None)
# Quick with query params and fragments
mock_request.side_effect = validate_hdrs_func(acc=hdr_json)
adpt._read_by_path('/'.join([basepath, uuid, 'quick']) +
'?group=None#frag', None, None, None, None)
@mock.patch('requests.Session')
def test_create(self, mock_session):
"""Test create() method found in the Adapter class."""
# Init test data
adapter = adp.Adapter(self.sess)
new_scsi = pvm_stor.VSCSIClientAdapterElement.bld(adapter)
element = new_scsi
root_type = 'ManagedSystem'
root_id = 'id'
child_type = 'LogicalPartition'
create_response = self._mk_response(200, response_text)
# Mock out the method and class we are not currently testing
session = mock_session.return_value
session.request.return_value = create_response
# Run the actual test
ret_create_value = adapter.create(element, root_type, root_id,
child_type)
# Verify Correct path was built in build_path()
reqpath = adp.Adapter.build_path('uom', root_type, root_id,
child_type, xag=[])
# Verify the return value
# self.assertIsInstance(ret_create_value, adp.Response)
self.assertEqual('PUT', ret_create_value.reqmethod)
self.assertEqual(200, ret_create_value.status)
self.assertEqual(reqpath, ret_create_value.reqpath)
@mock.patch('requests.Session')
def test_update(self, mock_session):
"""Test update() method found in the Adapter class."""
# Init test data
data = 'data'
etag = 'etag'
root_type = 'root type'
root_id = 'root id'
adapter = adp.Adapter(self.sess)
update_response = self._mk_response(200, response_text)
# Mock out the method and class we are not currently testing
session = mock_session.return_value
session.request.return_value = update_response
# Run the actual test
ret_update_value = adapter.update(data, etag, root_type, root_id)
# Verify Correct path was built in build_path()
reqpath = adp.Adapter.build_path('uom', root_type, root_id)
# Verify the return value
# self.assertIsInstance(ret_update_value, adp.Response)
self.assertEqual('POST', ret_update_value.reqmethod)
self.assertEqual(200, ret_update_value.status)
self.assertEqual(reqpath, ret_update_value.reqpath)
@mock.patch('requests.Session')
def test_upload(self, mock_session):
# Build the adapter
adapter = adp.Adapter(self.sess)
# Mock data
filedesc_mock = mock.MagicMock()
filedesc_mock.findtext.side_effect = ['uuid', 'mime']
with mock.patch.object(adapter, '_request') as mock_request:
adapter.upload_file(filedesc_mock, None)
# Validate
expected_headers = {'Accept': 'application/vnd.ibm.powervm.web+xml',
'Content-Type': 'mime'}
expected_path = '/rest/api/web/File/contents/uuid'
mock_request.assert_called_once_with(
'PUT', expected_path, helpers=None, headers=expected_headers,
timeout=-1, auditmemento=None, filehandle=None, chunksize=65536)
def _test_upload_request(self, mock_rq, mock_fh, fhdata):
"""Test an upload requests with different kinds of "filehandle"."""
adapter = adp.Adapter(self.sess)
mock_fd = mock.Mock(findtext=mock.Mock(side_effect=['uuid', 'mime']))
def check_request(method, url, data=None, headers=None, timeout=None):
"""Validate the session.request call."""
self.assertEqual('PUT', method)
self.assertEqual(
self.sess.dest + '/rest/api/web/File/contents/uuid', url)
# Verify that data is iterable
self.assertEqual(fhdata, [chunk for chunk in data])
return mock.Mock(status_code=c.HTTPStatus.OK_NO_CONTENT)
mock_rq.side_effect = check_request
adapter.upload_file(mock_fd, mock_fh)
@mock.patch('requests.sessions.Session.request')
def test_upload_request_iter(self, mock_rq):
"""Test an upload request with an iterable."""
fhdata = ['one', 'two']
self._test_upload_request(mock_rq, fhdata, fhdata)
@mock.patch('requests.sessions.Session.request')
def test_upload_request_fh(self, mock_rq):
"""Test an upload request with a filehandle."""
# filehandle is a read()able
fhdata = ['one', 'two']
mock_fh = mock.Mock(read=mock.Mock(side_effect=fhdata))
self._test_upload_request(mock_rq, mock_fh, fhdata)
# Make sure the file handle's read method was invoked
mock_fh.read.assert_has_calls([mock.call(65536)] * len(fhdata))
def _assert_paths_equivalent(self, exp, act):
"""Ensures two paths or hrefs are "the same".
Query parameter keys may be specified in any order, though their values
must match exactly. The rest of the path must be identical.
:param exp: Expected path
:param act: Actual path (produced by test)
"""
p_exp = urlparse.urlparse(exp)
p_act = urlparse.urlparse(act)
self.assertEqual(p_exp.scheme, p_act.scheme)
self.assertEqual(p_exp.netloc, p_act.netloc)
self.assertEqual(p_exp.path, p_act.path)
self.assertEqual(p_exp.fragment, p_act.fragment)
qs_exp = urlparse.parse_qs(p_exp.query)
qs_act = urlparse.parse_qs(p_act.query)
for vals in qs_exp.values():
vals.sort()
for vals in qs_act.values():
vals.sort()
self.assertEqual(qs_exp, qs_act)
@mock.patch('requests.Session')
def test_extend_path(self, mock_session):
# Init test data
adapter = adp.Adapter(self.sess)
path = adapter.extend_path('basepath', suffix_type='suffix',
suffix_parm='suffix_parm',
detail='detail',
xag=[c.XAG.VIO_FMAP])
expected_path = ('basepath/suffix/suffix_parm?detail=detail&'
'group=ViosFCMapping')
self._assert_paths_equivalent(expected_path, path)
# Multiple XAGs in a set
path = adapter.extend_path('basepath', suffix_type='suffix',
suffix_parm='suffix_parm',
detail='detail',
xag={c.XAG.VIO_FMAP, c.XAG.VIO_NET})
expected_path = ('basepath/suffix/suffix_parm?detail=detail&'
'group=ViosFCMapping,ViosNetwork')
self._assert_paths_equivalent(expected_path, path)
# Verify sorting
path = adapter.extend_path('basepath', suffix_type='suffix',
suffix_parm='suffix_parm',
detail='detail',
xag=[c.XAG.VIO_NET, c.XAG.VIO_FMAP])
expected_path = ('basepath/suffix/suffix_parm?detail=detail&'
'group=ViosFCMapping,ViosNetwork')
self._assert_paths_equivalent(expected_path, path)
# Explicitly no XAG
path = adapter.extend_path('basepath', suffix_type='suffix',
suffix_parm='suffix_parm', detail='detail',
xag=[])
expected_path = 'basepath/suffix/suffix_parm?detail=detail'
self._assert_paths_equivalent(expected_path, path)
# Ensure unspecified XAG defaults to group=None
path = adapter.extend_path('basepath', suffix_type='suffix',
suffix_parm='suffix_parm')
expected_path = 'basepath/suffix/suffix_parm?group=None'
self._assert_paths_equivalent(expected_path, path)
# ...except for specific suffix types 'quick' and 'do'
path = adapter.extend_path('basepath', suffix_type='quick',
suffix_parm='suffix_parm')
expected_path = 'basepath/quick/suffix_parm'
self._assert_paths_equivalent(expected_path, path)
path = adapter.extend_path('basepath', suffix_type='do',
suffix_parm='suffix_parm')
expected_path = 'basepath/do/suffix_parm'
self._assert_paths_equivalent(expected_path, path)
# Ensure arg xags and path xags interact correctly
# path_xag=None, arg_xag=None => group=None
self._assert_paths_equivalent(
'basepath?group=None', adapter.extend_path('basepath'))
# path_xag='None', arg_xag=None => group=None
self._assert_paths_equivalent(
'basepath?group=None', adapter.extend_path('basepath?group=None'))
# path_xag='a,b,c', arg_xag=None => group=a,b,c
self._assert_paths_equivalent(
'basepath?group=a,b,c',
adapter.extend_path('basepath?group=a,b,c'))
# path_xag=None, arg_xag=() => no group=
self._assert_paths_equivalent(
'basepath', adapter.extend_path('basepath', xag=()))
# path_xag='None', arg_xag={} => no group=
self._assert_paths_equivalent(
'basepath', adapter.extend_path('basepath?group=None', xag={}))
# path_xag='a,b,c', arg_xag=[] => ValueError
self.assertRaises(
ValueError, adapter.extend_path, 'basepath?group=a,b,c', xag=[])
# path_xag=None, arg_xag='a,b,c' => group='a,b,c'
self._assert_paths_equivalent(
'basepath?group=a,b,c',
adapter.extend_path('basepath', xag={'a', 'b', 'c'}))
# path_xag='None', arg_xag='a,b,c' => group='a,b,c'
self._assert_paths_equivalent(
'basepath?group=a,b,c',
adapter.extend_path('basepath?group=None', xag=('a', 'b', 'c')))
# path_xag='a,b,c', arg_xag='a,b,c' => group='a,b,c'
self._assert_paths_equivalent(
'basepath?group=a,b,c',
adapter.extend_path('basepath?group=a,b,c', xag=['a', 'b', 'c']))
# path_xag='a,b,c', arg_xag='d,e,f' => ValueError
self.assertRaises(ValueError, adapter.extend_path,
'basepath?group=a,b,c', xag=['d', 'e', 'f'])
# Multi-instance query params properly reassembled.
self._assert_paths_equivalent(
'basepath?foo=1,2,3&group=a,b,c&foo=4,5,6',
adapter.extend_path('basepath?foo=4,5,6&group=None&foo=1,2,3',
xag=['a', 'b', 'c']))
# Additional queryparams (add_qp)
# Explicit None
self._assert_paths_equivalent(
'basepath', adapter.extend_path('basepath', xag=[], add_qp=None))
# Proper escaping
self._assert_paths_equivalent(
'basepath?one=%23%24%25%5E%26',
adapter.extend_path('basepath', xag=[], add_qp=[('one', '#$%^&')]))
# Duplicated keys (order preserved) and proper handling of non-strings
self._assert_paths_equivalent(
'basepath?1=3&1=2',
adapter.extend_path('basepath', xag=[], add_qp=[(1, 3), (1, 2)]))
# Proper behavior combined with implicit xag
self._assert_paths_equivalent(
'basepath?group=None&key=value&something=else',
adapter.extend_path(
'basepath', add_qp=[('key', 'value'), ('something', 'else')]))
# Combined with xags and an existing querystring
self._assert_paths_equivalent(
'basepath?already=here&group=a,b,c&key=value&something=else',
adapter.extend_path(
'basepath?already=here', xag=['a', 'b', 'c'],
add_qp=[('key', 'value'), ('something', 'else')]))
@mock.patch('pypowervm.adapter.LOG')
@mock.patch('pypowervm.adapter.Adapter.read_by_path')
def test_read_by_href(self, mock_read_by_path, mock_log):
"""Ensure read_by_href correctly extends, preserves query strings."""
def validate_read_by_path(expected):
def _read_by_path(path, etag, timeout, auditmemento, age,
sensitive, helpers):
self._assert_paths_equivalent(expected, path)
for param in (etag, auditmemento, helpers):
self.assertIsNone(param)
for param2 in (age, timeout):
self.assertEqual(-1, param2)
self.assertFalse(sensitive)
return _read_by_path
self.sess.host = 'foo'
self.sess.port = 123
adapter = adp.Adapter(self.sess)
mock_read_by_path.side_effect = validate_read_by_path(
'/rest/api/uom/Bar?k=v&group=None#frag')
adapter.read_by_href('http://foo:123/rest/api/uom/Bar?k=v#frag')
self.assertFalse(mock_log.debug.called)
self.sess.host = 'bar'
mock_read_by_path.side_effect = validate_read_by_path(
'/rest/api/uom/Bar?k=v&group=None#frag')
adapter.read_by_href('http://foo:123/rest/api/uom/Bar?k=v#frag')
self.assertTrue(mock_log.debug.called)
mock_read_by_path.side_effect = validate_read_by_path(
'/rest/api/uom/Bar?k=v&group=RealGroup#frag')
adapter.read_by_href(
'http://foo:123/rest/api/uom/Bar?k=v&group=RealGroup#frag')
@mock.patch('requests.Session')
def test_delete(self, mock_session):
"""Test delete() method found in the Adapter class."""
# Init test data
root_type = 'ManagedSystem'
root_id = 'id'
adapter = adp.Adapter(self.sess)
delete_response = self._mk_response(204)
# Mock out the method and class we are not currently testing
session = mock_session.return_value
session.request.return_value = delete_response
# Run the actual test
ret_delete_value = adapter.delete(root_type, root_id)
# Verify Correct path was built in build_path()
reqpath = adp.Adapter.build_path('uom', root_type, root_id, xag=[])
# Verify the return value
# self.assertIsInstance(ret_delete_value, adp.Response)
self.assertEqual('DELETE', ret_delete_value.reqmethod)
self.assertEqual(204, ret_delete_value.status)
self.assertEqual(reqpath, ret_delete_value.reqpath)
@mock.patch.object(builtins, 'open')
def test_auth_file_error(self, mock_open_patch):
mock_open_patch.side_effect = IOError(errno.EACCES, 'Error')
self.assertRaises(pvmex.AuthFileReadError,
self.sess._get_auth_tok_from_file,
mock.Mock(), mock.Mock())
mock_open_patch.side_effect = IOError(errno.EIO, 'Error')
self.assertRaises(pvmex.AuthFileAccessError,
self.sess._get_auth_tok_from_file,
mock.Mock(), mock.Mock())
@mock.patch('pypowervm.adapter.LOG')
@mock.patch('requests.Session')
def test_unauthorized_error(self, mock_session, mock_log):
"""401 (unauthorized) calling Adapter.create()."""
# Init test data
adapter = adp.Adapter(self.sess)
new_scsi = pvm_stor.VSCSIClientAdapterElement.bld(adapter)
element = new_scsi
root_type = 'ManagedSystem'
root_id = 'id'
child_type = 'LogicalPartition'
create_response = self._mk_response(401)
# Mock out the method and class we are not currently testing
session = mock_session.return_value
session.request.return_value = create_response
# Run the actual test
self.assertRaises(pvmex.HttpError, adapter.create, element,
root_type, root_id, child_type)
self.assertEqual(1, mock_log.warning.call_count)
def test_element_iter(self):
"""Test the ETElement iter() method found in the Adapter class."""
# Init test data
children = [ent.Element('Type1', None, text='T1_0'),
ent.Element('Type12', None, text='T12_0'),
ent.Element('Type1', None, text='T1_1'),
ent.Element('Type12', None, text='T12_1'),
ent.Element('Type1', None, text='T1_2')]
top_element = ent.Element('Top', None,
attrib={'schemaVersion': 'V1_0'},
children=children)
def _count_elem(top, tag, it=None, assert_tag=True):
elem_count = 0
it = it if it else top.iter(tag=tag)
for elem in it:
if assert_tag:
self.assertEqual(elem.tag, tag)
elem_count += 1
return elem_count
# Run the actual tests
# Ensure all elements are traversed if we don't specify a tag
self.assertEqual(_count_elem(top_element, 'Type1',
it=top_element.iter(),
assert_tag=False), 6)
# Ensure all elements are traversed for tag=*
self.assertEqual(_count_elem(top_element, 'Type1',
it=top_element.iter(tag='*'),
assert_tag=False), 6)
# Ensure all elements are traversed for tag=None
self.assertEqual(_count_elem(top_element, 'Type1',
it=top_element.iter(tag=None),
assert_tag=False), 6)
# Get only the Type1 elements
self.assertEqual(_count_elem(top_element, 'Type1'), 3)
# Get only the top
self.assertEqual(_count_elem(top_element, 'Top'), 1)
@mock.patch('pypowervm.entities.Feed.unmarshal_atom_feed')
@mock.patch('pypowervm.entities.Entry.unmarshal_atom_entry')
@mock.patch('lxml.etree.fromstring')
def test_extract_atom(self, mock_fromstring, mock_unm_ent, mock_unm_feed):
resp = adp.Response('meth', '/rest/api/uom/Debug/SetLoggingLevel',
'status', 'reason', 'headers', body='body')
feed_ret = mock.Mock(tag=etree.QName(c.ATOM_NS, 'feed'))
entry_ret = mock.Mock(tag=etree.QName(c.ATOM_NS, 'entry'))
# Empty content; "Response is not an Atom feed/entry"
mock_fromstring.return_value = None
self.assertIsNotNone(resp._extract_atom())
mock_fromstring.assert_called_with('body')
mock_unm_feed.assert_not_called()
mock_unm_ent.assert_not_called()
# Unmarshal feed (returns None)
mock_fromstring.return_value = feed_ret
self.assertIsNone(resp._extract_atom())
mock_unm_feed.assert_called_once_with(feed_ret, resp)
mock_unm_ent.assert_not_called()
mock_unm_feed.reset_mock()
# Unmarshal entry (returns None)
mock_fromstring.return_value = entry_ret
self.assertIsNone(resp._extract_atom())
mock_unm_ent.assert_called_once_with(entry_ret, resp)
mock_unm_feed.assert_not_called()
mock_unm_ent.reset_mock()
# Unmarshal a 'Debug' response (returns None)
mock_fromstring.return_value = mock.Mock(tag='debug output')
self.assertIsNone(resp._extract_atom())
mock_unm_feed.assert_not_called()
mock_unm_ent.assert_not_called()
# 'fromstring' raises. Make sure the return message came from the
# right place (will include the exception text)
mock_fromstring.side_effect = Exception("test_extract_atom")
self.assertIn("test_extract_atom", resp._extract_atom())
mock_unm_feed.assert_not_called()
mock_unm_ent.assert_not_called()
@mock.patch('pypowervm.adapter.Adapter.read')
def test_sys_uuid(self, mock_read):
# Set and return the sys_uuid if not yet defined
adapter = adp.Adapter(self.sess)
mock_resp = mock.MagicMock()
mock_resp.feed.entries[0].uuid = 'uuid'
mock_read.return_value = mock_resp
sys_uuid = adapter.sys_uuid
mock_read.assert_called_once_with('ManagedSystem')
self.assertEqual('uuid', sys_uuid)
self.assertEqual('uuid', adapter._sys_uuid)
# Return sys_uuid if defined already
mock_read.reset_mock()
sys_uuid = adapter.sys_uuid
mock_read.assert_not_called()
class TestElement(testtools.TestCase):
def setUp(self):
super(TestElement, self).setUp()
self.adpt = self.useFixture(fx.AdapterFx()).adpt
def test_cdata(self):
no_cdata = ent.Element('tag', self.adpt, text='text', cdata=False)
with_cdata = ent.Element('tag', self.adpt, text='text', cdata=True)
self.assertEqual(
no_cdata.toxmlstring(),
'<uom:tag xmlns:uom="http://www.ibm.com/xmlns/systems/power/'
'firmware/uom/mc/2012_10/">text</uom:tag>'.encode('utf-8'))
self.assertEqual(
with_cdata.toxmlstring(),
'<uom:tag xmlns:uom="http://www.ibm.com/xmlns/systems/power/firmwa'
're/uom/mc/2012_10/"><![CDATA[text]]></uom:tag>'.encode('utf-8'))
def test_tag_namespace(self):
el = ent.Element('tag', self.adpt)
self.assertEqual(el.element.tag, '{http://www.ibm.com/xmlns/systems/po'
'wer/firmware/uom/mc/2012_10/}tag')
# entities.Element.tag strips the namespace
self.assertEqual(el.tag, 'tag')
self.assertEqual(el.namespace, 'http://www.ibm.com/xmlns/systems/powe'
'r/firmware/uom/mc/2012_10/')
# Test setter
el.tag = 'gat'
self.assertEqual(el.element.tag, '{http://www.ibm.com/xmlns/systems/po'
'wer/firmware/uom/mc/2012_10/}gat')
self.assertEqual(el.tag, 'gat')
el.namespace = 'foo'
self.assertEqual(el.namespace, 'foo')
# Now with no namespace
el = ent.Element('tag', self.adpt, ns='')
self.assertEqual(el.element.tag, 'tag')
self.assertEqual(el.tag, 'tag')
self.assertEqual(el.namespace, '')
el.tag = 'gat'
self.assertEqual(el.element.tag, 'gat')
self.assertEqual(el.tag, 'gat')
el.namespace = 'foo'
self.assertEqual(el.namespace, 'foo')
class TestAdapterClasses(subunit.IsolatedTestCase, testtools.TestCase):
def setUp(self):
super(TestAdapterClasses, self).setUp()
self.mock_logoff = self.useFixture(
fixtures.MockPatchObject(adp.Session, '_logoff')).mock
self.mock_logon = self.useFixture(
fixtures.MockPatchObject(adp.Session, '_logon')).mock
self.mock_events = self.useFixture(
fixtures.MockPatchObject(adp._EventListener, '_get_events')).mock
# Mock the initial events coming in on start
self.mock_events.return_value = {'general': 'init'}, [], []
def test_instantiation(self):
"""Direct instantiation of EventListener is not allowed."""
# Get a session
sess = adp.Session()
# Now get the EventListener
self.assertRaises(TypeError, adp.EventListener, sess)
# Mock the session token like we logged on
sess._sessToken = 'token'.encode('utf-8')
# Ensure we get an EventListener
self.assertIsInstance(sess.get_event_listener(), adp.EventListener)
def test_shutdown_session(self):
"""Test garbage collection of the session.
Ensures the Session can be properly garbage collected.
"""
# Get a session
sess = adp.Session()
# Mock the session token like we logged on
sess._sessToken = 'token'.encode('utf-8')
# It should have logged on but not off.
self.assertTrue(self.mock_logon.called)
self.assertFalse(self.mock_logoff.called)
# Get an event listener to test the weak references
event_listen = sess.get_event_listener()
# Test the circular reference (but one link is weak)
sess.hello = 'hello'
self.assertEqual(sess.hello, event_listen.adp.session.hello)
# There should be 1 reference to the session (ours)
self.assertEqual(1, len(gc.get_referrers(sess)))
def test_shutdown_adapter(self):
"""Test garbage collection of the session, event listener.
Ensures the proper shutdown of the session and event listener when
we start with constructing an Adapter, implicit session and
EventListener.
"""
# Get Adapter, implicit session
adapter = adp.Adapter()
adapter.session._sessToken = 'token'.encode('utf-8')
# Get construct and event listener
adapter.session.get_event_listener()
# Turn off the event listener
adapter.session.get_event_listener().shutdown()
# Session is still active
self.assertFalse(self.mock_logoff.called)
# The only thing that refers the adapter is our reference
self.assertEqual(1, len(gc.get_referrers(adapter)))
class TestElementInject(testtools.TestCase):
def setUp(self):
super(TestElementInject, self).setUp()
self.adpt = self.useFixture(fx.AdapterFx()).adpt
self.ordering_list = ('AdapterType', 'UseNextAvailableSlotID',
'RemoteLogicalPartitionID', 'RemoteSlotNumber')
self.child_at = ent.Element('AdapterType', self.adpt, text='Client')
self.child_unasi = ent.Element('UseNextAvailableSlotID', self.adpt,
text='true')
self.child_rlpi1 = ent.Element('RemoteLogicalPartitionID', self.adpt,
text='1')
self.child_rlpi2 = ent.Element('RemoteLogicalPartitionID', self.adpt,
text='2')
self.child_rlpi3 = ent.Element('RemoteLogicalPartitionID', self.adpt,
text='3')
self.child_rsn = ent.Element('RemoteSlotNumber', self.adpt,
text='12')
self.all_children = [
self.child_at, self.child_unasi, self.child_rlpi1, self.child_rsn]
def _mk_el(self, children):
return ent.Element('VirtualSCSIClientAdapter', self.adpt,
attrib={'schemaVersion': 'V1_0'},
children=children)
def assert_expected_children(self, parent, *expected_children):
"""Assert that *children are the children of parent, in that order.
:param parent: Parent adapter.Element
:param children: Child adapter.Elements
"""
# etree.Element doesn't implement __eq__, so different instances of the
# same Element aren't "equal". Compare XML strings instead.
actual = [etree.tostring(elem) for elem in list(parent.element)]
expected = [etree.tostring(chld.element) for chld in expected_children]
self.assertEqual(actual, expected)
def test_no_children(self):
"""Inject when the element has no children - should "append"."""
el = self._mk_el([])
el.inject(self.child_rlpi1)
self.assert_expected_children(el, self.child_rlpi1)
# Result should be same regardless of other params
el = self._mk_el([])
el.inject(self.child_rlpi1, self.ordering_list, replace=False)
self.assert_expected_children(el, self.child_rlpi1)
def test_subelement_found_one_replace_true(self):
"""Replace existing child with same tag."""
el = self._mk_el(self.all_children)
el.inject(self.child_rlpi2, self.ordering_list)
self.assert_expected_children(el, self.child_at, self.child_unasi,
self.child_rlpi2, self.child_rsn)
# Proving default replace=True - same result if specified
el = self._mk_el(self.all_children)
el.inject(self.child_rlpi2, self.ordering_list, replace=True)
self.assert_expected_children(el, self.child_at, self.child_unasi,
self.child_rlpi2, self.child_rsn)
def test_subelement_found_mult_replace_true(self):
"""Replace existing child with same tag when >1 such children.
Should replace the last such child.
"""
el = self._mk_el([self.child_at, self.child_unasi, self.child_rlpi1,
self.child_rlpi3, self.child_rsn])
el.inject(self.child_rlpi2, self.ordering_list)
self.assert_expected_children(el, self.child_at, self.child_unasi,
self.child_rlpi1, self.child_rlpi2,
self.child_rsn)
def test_subelement_found_replace_false(self):
"""Inject after existing child(ren) with same tag."""
el = self._mk_el(self.all_children)
el.inject(self.child_rlpi2, self.ordering_list, False)
self.assert_expected_children(el, self.child_at, self.child_unasi,
self.child_rlpi1, self.child_rlpi2,
self.child_rsn)
el.inject(self.child_rlpi3, self.ordering_list, False)
self.assert_expected_children(el, self.child_at, self.child_unasi,
self.child_rlpi1, self.child_rlpi2,
self.child_rlpi3, self.child_rsn)
def test_subelement_not_in_ordering_list(self):
"""Subelement not in ordering list - should append."""
el = self._mk_el(self.all_children)
ch = ent.Element('SomeNewElement', self.adpt, text='foo')
el.inject(ch, ordering_list=self.ordering_list)
self.assert_expected_children(el, self.child_at, self.child_unasi,
self.child_rlpi1, self.child_rsn, ch)
def test_first_populated(self):
"""Inject the first child when children are otherwise populated."""
el = self._mk_el(self.all_children[1:])
el.inject(self.child_at, self.ordering_list)
self.assert_expected_children(el, self.child_at, self.child_unasi,
self.child_rlpi1, self.child_rsn)
def test_first_sparse(self):
"""Inject the first child when children are sparsely populated."""
# This is most interesting when the existing child is not the one right
# next to the injectee.
el = self._mk_el([self.child_rlpi1])
el.inject(self.child_at, self.ordering_list)
self.assert_expected_children(el, self.child_at, self.child_rlpi1)
def test_last_populated(self):
"""Inject the last child when children are otherwise populated."""
el = self._mk_el(self.all_children[:-1])
el.inject(self.child_rsn, self.ordering_list)
self.assert_expected_children(el, self.child_at, self.child_unasi,
self.child_rlpi1, self.child_rsn)
def test_last_sparse(self):
"""Inject the last child when children are sparsely populated."""
# This is most interesting when the existing child is not the one right
# next to the injectee.
el = self._mk_el([self.child_unasi])
el.inject(self.child_rsn, self.ordering_list)
self.assert_expected_children(el, self.child_unasi, self.child_rsn)
def test_middle_populated(self):
"""Inject a middle child when children are otherwise populated."""
el = self._mk_el([self.child_at, self.child_unasi, self.child_rsn])
el.inject(self.child_rlpi1, self.ordering_list)
self.assert_expected_children(el, self.child_at, self.child_unasi,
self.child_rlpi1, self.child_rsn)
def test_middle_sparse(self):
"""Inject a middle child when children are sparsely populated."""
el = self._mk_el([self.child_at, self.child_rsn])
el.inject(self.child_rlpi1, self.ordering_list)
self.assert_expected_children(
el, self.child_at, self.child_rlpi1, self.child_rsn)
class TestElementWrapper(testtools.TestCase):
"""Tests for the ElementWrapper class."""
def setUp(self):
super(TestElementWrapper, self).setUp()
self.resp = pvmhttp.load_pvm_resp(NET_BRIDGE_FILE).get_response()
self.nb1 = self.resp.feed.entries[0]
self.resp2 = pvmhttp.load_pvm_resp(NET_BRIDGE_FILE).get_response()
self.nb2 = self.resp2.feed.entries[0]
def test_equality(self):
"""Validates that two elements loaded from the same data is equal."""
sea1 = self._find_seas(self.nb1)[0]
sea2 = self._find_seas(self.nb2)[0]
self.assertTrue(sea1 == sea2)
# Change the other SEA
sea2.element.append(etree.Element('Bob'))
self.assertFalse(sea1 == sea2)
def test_inequality_by_subelem_change(self):
sea1 = self._find_seas(self.nb1)[0]
sea2 = self._find_seas(self.nb2)[0]
sea_trunk = sea2.findall('TrunkAdapters/TrunkAdapter')[0]
pvid = sea_trunk.find('PortVLANID')
pvid.text = '1'
self.assertFalse(sea1 == sea2)
def _find_seas(self, entry):
"""Wrapper for the SEAs."""
return entry.element.findall('SharedEthernetAdapters/'
'SharedEthernetAdapter')
| 43.344677 | 79 | 0.615707 |
import copy
import errno
import fixtures
import gc
from lxml import etree
import six
import subunit
if six.PY2:
import __builtin__ as builtins
elif six.PY3:
import builtins
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
import mock
import requests.models as req_mod
import requests.structures as req_struct
import testtools
import pypowervm.adapter as adp
import pypowervm.const as c
import pypowervm.entities as ent
import pypowervm.exceptions as pvmex
import pypowervm.tests.lib as testlib
import pypowervm.tests.test_fixtures as fx
from pypowervm.tests.test_utils import pvmhttp
from pypowervm.wrappers import storage as pvm_stor
logon_text = testlib.file2b("logon.xml")
response_text = testlib.file2b("event.xml")
NET_BRIDGE_FILE = 'fake_network_bridge.txt'
class TestAdapter(testtools.TestCase):
def _mk_response(self, status, content=None):
reasons = {200: 'OK', 204: 'No Content', 401: 'Unauthorized'}
my_response = req_mod.Response()
my_response.status_code = status
my_response.reason = reasons[status]
clen = '0'
if status == 200 and content:
clen = str(len(content))
dict_headers = {
'content-length': clen, 'x-powered-by': 'Servlet/3.0',
'set-cookie': ('JSESSIONID=0000a41BnJsGTNQvBGERA3wR1nj:759878cb-4f'
'9a-4b05-a09a-3357abfea3b4; Path=/; Secure; HttpOnl'
'y, CCFWSESSION=E4C0FFBE9130431DBF1864171ECC6A6E; P'
'ath=/; Secure; HttpOnly'),
'expires': 'Thu, 01 Dec 1994 16:00:00 GMT',
'x-transaction-id': 'XT10000073',
'cache-control': 'no-cache="set-cookie, set-cookie2"',
'date': 'Wed, 23 Jul 2014 21:51:10 GMT',
'content-type': 'application/vnd.ibm.powervm'}
my_response.headers = req_struct.CaseInsensitiveDict(dict_headers)
my_response._content = content
return my_response
def setUp(self):
super(TestAdapter, self).setUp()
host = '0.0.0.0'
user = 'user'
pwd = 'pwd'
auditmemento = 'audit'
my_response = self._mk_response(200, logon_text)
with mock.patch('requests.Session') as mock_session:
session = mock_session.return_value
session.request.return_value = my_response
self.sess = adp.Session(host, user, pwd,
auditmemento=auditmemento,
certpath=None)
self.sess._logoff = mock.Mock()
def tearDown(self):
self.sess = None
super(TestAdapter, self).tearDown()
@mock.patch('pypowervm.wrappers.event.Event.wrap')
@mock.patch('time.sleep')
def test_event_listener(self, mock_sleep, mock_evt_wrap):
with mock.patch.object(adp._EventListener, '_get_events') as m_events,\
mock.patch.object(adp, '_EventPollThread') as mock_poll:
self.sess._sessToken = 'token'.encode('utf-8')
m_events.return_value = {'general': 'init'}, 'raw_evt', 'wrap_evt'
event_listen = self.sess.get_event_listener()
self.assertIsNotNone(event_listen)
evh = mock.Mock(spec=adp.EventHandler, autospec=True)
raw_evh = mock.Mock(spec=adp.RawEventHandler, autospec=True)
wrap_evh = mock.Mock(spec=adp.WrapperEventHandler, autospec=True)
event_listen.subscribe(evh)
event_listen.subscribe(raw_evh)
event_listen.subscribe(wrap_evh)
events, raw_events, evtwraps = event_listen._get_events()
event_listen._dispatch_events(events, raw_events, evtwraps)
evh.process.assert_called_once_with({'general': 'init'})
raw_evh.process.assert_called_once_with('raw_evt')
wrap_evh.process.assert_called_once_with('wrap_evt')
self.assertTrue(mock_poll.return_value.start.called)
self.assertEqual({'general': 'init'}, event_listen.getevents())
with mock.patch.object(event_listen, '_format_events') as mock_format,\
mock.patch.object(event_listen.adp, 'read') as mock_read:
mock_read.side_effect = Exception()
self.assertEqual(({}, [], []), event_listen._get_events())
self.assertEqual(1, mock_read.call_count)
mock_format.assert_not_called()
mock_evt_wrap.assert_not_called()
mock_sleep.assert_called_once_with(5)
mock_read.reset_mock()
# side_effect takes precedence over return_value; so kill it.
mock_read.side_effect = None
# Fabricate some mock entries, so format gets called.
mock_read.return_value.feed.entries = (['entry1', 'entry2'])
self.assertEqual(({}, [], mock_evt_wrap.return_value),
event_listen._get_events())
self.assertEqual(1, mock_read.call_count)
mock_format.assert_has_calls([mock.call('entry1', {}, []),
mock.call('entry2', {}, [])])
mock_evt_wrap.assert_called_once_with(mock_read.return_value)
# Test _format_events
event_data = [
{
'EventType': 'NEW_CLIENT',
'EventData': 'href1',
'EventID': '1',
'EventDetail': 'detail',
},
{
'EventType': 'CACHE_CLEARED',
'EventData': 'href2',
'EventID': '2',
'EventDetail': 'detail2',
},
{
'EventType': 'ADD_URI',
'EventData': 'LPAR1',
'EventID': '3',
'EventDetail': 'detail3',
},
{
'EventType': 'DELETE_URI',
'EventData': 'LPAR1',
'EventID': '4',
'EventDetail': 'detail4',
},
{
'EventType': 'INVALID_URI',
'EventData': 'LPAR1',
'EventID': '4',
'EventDetail': 'detail4',
},
]
# Setup a side effect that returns events from the test data.
def get_event_data(item):
data = event_data[0][item]
if item == 'EventDetail':
event_data.pop(0)
return data
# Raw events returns a sequence the same as the test data
raw_result = copy.deepcopy(event_data)
# Legacy events overwrites some events.
dict_result = {'general': 'invalidate', 'LPAR1': 'delete'}
# Build a mock entry
entry = mock.Mock()
entry.element.findtext.side_effect = get_event_data
events = {}
raw_events = []
x = len(raw_result)
while x:
x -= 1
event_listen._format_events(entry, events, raw_events)
self.assertEqual(raw_result, raw_events)
self.assertEqual(dict_result, events)
@mock.patch('pypowervm.adapter.Session')
def test_empty_init(self, mock_sess):
adp.Adapter()
mock_sess.assert_called_with()
def test_no_cache(self):
self.assertRaises(pvmex.CacheNotSupportedException,
adp.Adapter, use_cache=True)
@mock.patch('requests.Session')
def test_read(self, mock_session):
# Init test data
root_type = 'ManagedSystem'
root_id = 'caae9209-25e5-35cd-a71a-ed55c03f294d'
child_type = 'child'
child_id = 'child'
suffix_type = 'quick'
adapter = adp.Adapter(self.sess)
# Create a Response object, that will serve as a mock return value
read_response = self._mk_response(200, response_text)
# Mock out the method and class we are not currently testing
session = mock_session.return_value
session.request.return_value = read_response
# Run the actual test
ret_read_value = adapter.read(root_type, root_id, child_type,
child_id, suffix_type)
# Verify Correct path was built in build_path()
reqpath = adp.Adapter.build_path('uom', root_type, root_id,
child_type, child_id, suffix_type)
# Verify the return value
# self.assertIsInstance(ret_read_value, adp.Response)
self.assertEqual('GET', ret_read_value.reqmethod)
self.assertEqual(200, ret_read_value.status)
self.assertEqual(reqpath, ret_read_value.reqpath)
@mock.patch('pypowervm.adapter.Adapter._validate')
@mock.patch('pypowervm.adapter.Adapter.build_path')
@mock.patch('pypowervm.adapter.Adapter.read_by_path')
def test_read2(self, mock_rbp, mock_bld, mock_val):
adap = adp.Adapter(session=self.sess)
# Defaults
self.assertEqual(mock_rbp.return_value, adap.read('root_type'))
mock_val.assert_called_once_with(
'read', 'root_type', None, None, None, None, None, None)
mock_bld.assert_called_once_with(
'uom', 'root_type', None, None, None, None, None, None, xag=None,
add_qp=None)
mock_rbp.assert_called_once_with(
mock_bld.return_value, None, timeout=-1, auditmemento=None, age=-1,
sensitive=False, helpers=None)
# Specified kwargs
mock_val.reset_mock()
mock_bld.reset_mock()
mock_rbp.reset_mock()
self.assertEqual(mock_rbp.return_value, adap.read(
'root_type', root_id='root_id', child_type='child_type',
child_id='child_id', suffix_type='suffix_type',
suffix_parm='suffix_parm', detail='detail', service='service',
etag='etag', timeout='timeout', auditmemento='auditmemento',
age='age', xag='xag', sensitive='sensitive', helpers='helpers',
add_qp='add_qp'))
mock_val.assert_called_once_with(
'read', 'root_type', 'root_id', 'child_type', 'child_id',
'suffix_type', 'suffix_parm', 'detail')
mock_bld.assert_called_once_with(
'service', 'root_type', 'root_id', 'child_type', 'child_id',
'suffix_type', 'suffix_parm', 'detail', xag='xag', add_qp='add_qp')
mock_rbp.assert_called_once_with(
mock_bld.return_value, 'etag', timeout='timeout',
auditmemento='auditmemento', age='age', sensitive='sensitive',
helpers='helpers')
@mock.patch('pypowervm.adapter.Adapter.extend_path')
def test_build_path(self, mock_exp):
adap = adp.Adapter(session=self.sess)
# Defaults
self.assertEqual(mock_exp.return_value, adap.build_path(
'service', 'root_type'))
mock_exp.assert_called_once_with(
'/rest/api/service/root_type', suffix_type=None, suffix_parm=None,
detail=None, xag=None, add_qp=None)
# child specs ignored if no root ID
mock_exp.reset_mock()
self.assertEqual(mock_exp.return_value, adap.build_path(
'service', 'root_type', child_type='child_type',
child_id='child_id'))
mock_exp.assert_called_once_with(
'/rest/api/service/root_type', suffix_type=None, suffix_parm=None,
detail=None, xag=None, add_qp=None)
# child ID ignored if no child type
mock_exp.reset_mock()
self.assertEqual(mock_exp.return_value, adap.build_path(
'service', 'root_type', root_id='root_id', child_id='child_id'))
mock_exp.assert_called_once_with(
'/rest/api/service/root_type/root_id', suffix_type=None,
suffix_parm=None, detail=None, xag=None, add_qp=None)
# Specified kwargs (including full child spec
mock_exp.reset_mock()
self.assertEqual(mock_exp.return_value, adap.build_path(
'service', 'root_type', root_id='root_id', child_type='child_type',
child_id='child_id', suffix_type='suffix_type',
suffix_parm='suffix_parm', detail='detail', xag='xag',
add_qp='add_qp'))
mock_exp.assert_called_once_with(
'/rest/api/service/root_type/root_id/child_type/child_id',
suffix_type='suffix_type', suffix_parm='suffix_parm',
detail='detail', xag='xag', add_qp='add_qp')
@mock.patch('pypowervm.adapter.Adapter._request')
def test_headers(self, mock_request):
def validate_hdrs_func(acc=None, inm=None):
expected_headers = {}
if acc is not None:
expected_headers['Accept'] = acc
if inm is not None:
expected_headers['If-None-Match'] = inm
def validate_request(meth, path, **kwargs):
self.assertEqual(expected_headers, kwargs['headers'])
return validate_request
adpt = adp.Adapter(mock.Mock())
basepath = c.API_BASE_PATH + 'uom/SomeRootObject'
uuid = "abcdef01-2345-2345-2345-67890abcdef0"
hdr_xml = 'application/atom+xml'
hdr_json = '*/*'
etag = 'abc123'
# Root feed
mock_request.side_effect = validate_hdrs_func(acc=hdr_xml)
adpt._read_by_path(basepath, None, None, None, None)
# Root instance with etag
mock_request.side_effect = validate_hdrs_func(acc=hdr_xml, inm=etag)
adpt._read_by_path(basepath + '/' + uuid, etag, None, None, None)
# Quick root anchor (produces XML report of available quick properties
mock_request.side_effect = validate_hdrs_func(acc=hdr_xml)
adpt._read_by_path(basepath + '/quick', None, None, None, None)
# Quick root instance (JSON of all quick properties)
mock_request.side_effect = validate_hdrs_func(acc=hdr_json)
adpt._read_by_path('/'.join([basepath, uuid, 'quick']), None, None,
None, None)
# Specific quick property
mock_request.side_effect = validate_hdrs_func(acc=hdr_json)
adpt._read_by_path('/'.join([basepath, uuid, 'quick', 'property']),
None, None, None, None)
# Explicit JSON file
mock_request.side_effect = validate_hdrs_func(acc=hdr_json)
adpt._read_by_path('/'.join([basepath, 'somefile.json']), None, None,
None, None)
# Object that happens to end in 'json'
mock_request.side_effect = validate_hdrs_func(acc=hdr_xml)
adpt._read_by_path('/'.join([basepath, 'xml_about_json']), None, None,
None, None)
# Quick with query params and fragments
mock_request.side_effect = validate_hdrs_func(acc=hdr_json)
adpt._read_by_path('/'.join([basepath, uuid, 'quick']) +
'?group=None
@mock.patch('requests.Session')
def test_create(self, mock_session):
# Init test data
adapter = adp.Adapter(self.sess)
new_scsi = pvm_stor.VSCSIClientAdapterElement.bld(adapter)
element = new_scsi
root_type = 'ManagedSystem'
root_id = 'id'
child_type = 'LogicalPartition'
create_response = self._mk_response(200, response_text)
# Mock out the method and class we are not currently testing
session = mock_session.return_value
session.request.return_value = create_response
# Run the actual test
ret_create_value = adapter.create(element, root_type, root_id,
child_type)
# Verify Correct path was built in build_path()
reqpath = adp.Adapter.build_path('uom', root_type, root_id,
child_type, xag=[])
# Verify the return value
# self.assertIsInstance(ret_create_value, adp.Response)
self.assertEqual('PUT', ret_create_value.reqmethod)
self.assertEqual(200, ret_create_value.status)
self.assertEqual(reqpath, ret_create_value.reqpath)
@mock.patch('requests.Session')
def test_update(self, mock_session):
# Init test data
data = 'data'
etag = 'etag'
root_type = 'root type'
root_id = 'root id'
adapter = adp.Adapter(self.sess)
update_response = self._mk_response(200, response_text)
# Mock out the method and class we are not currently testing
session = mock_session.return_value
session.request.return_value = update_response
# Run the actual test
ret_update_value = adapter.update(data, etag, root_type, root_id)
# Verify Correct path was built in build_path()
reqpath = adp.Adapter.build_path('uom', root_type, root_id)
# Verify the return value
# self.assertIsInstance(ret_update_value, adp.Response)
self.assertEqual('POST', ret_update_value.reqmethod)
self.assertEqual(200, ret_update_value.status)
self.assertEqual(reqpath, ret_update_value.reqpath)
@mock.patch('requests.Session')
def test_upload(self, mock_session):
# Build the adapter
adapter = adp.Adapter(self.sess)
# Mock data
filedesc_mock = mock.MagicMock()
filedesc_mock.findtext.side_effect = ['uuid', 'mime']
with mock.patch.object(adapter, '_request') as mock_request:
adapter.upload_file(filedesc_mock, None)
# Validate
expected_headers = {'Accept': 'application/vnd.ibm.powervm.web+xml',
'Content-Type': 'mime'}
expected_path = '/rest/api/web/File/contents/uuid'
mock_request.assert_called_once_with(
'PUT', expected_path, helpers=None, headers=expected_headers,
timeout=-1, auditmemento=None, filehandle=None, chunksize=65536)
def _test_upload_request(self, mock_rq, mock_fh, fhdata):
adapter = adp.Adapter(self.sess)
mock_fd = mock.Mock(findtext=mock.Mock(side_effect=['uuid', 'mime']))
def check_request(method, url, data=None, headers=None, timeout=None):
self.assertEqual('PUT', method)
self.assertEqual(
self.sess.dest + '/rest/api/web/File/contents/uuid', url)
# Verify that data is iterable
self.assertEqual(fhdata, [chunk for chunk in data])
return mock.Mock(status_code=c.HTTPStatus.OK_NO_CONTENT)
mock_rq.side_effect = check_request
adapter.upload_file(mock_fd, mock_fh)
@mock.patch('requests.sessions.Session.request')
def test_upload_request_iter(self, mock_rq):
fhdata = ['one', 'two']
self._test_upload_request(mock_rq, fhdata, fhdata)
@mock.patch('requests.sessions.Session.request')
def test_upload_request_fh(self, mock_rq):
# filehandle is a read()able
fhdata = ['one', 'two']
mock_fh = mock.Mock(read=mock.Mock(side_effect=fhdata))
self._test_upload_request(mock_rq, mock_fh, fhdata)
# Make sure the file handle's read method was invoked
mock_fh.read.assert_has_calls([mock.call(65536)] * len(fhdata))
def _assert_paths_equivalent(self, exp, act):
p_exp = urlparse.urlparse(exp)
p_act = urlparse.urlparse(act)
self.assertEqual(p_exp.scheme, p_act.scheme)
self.assertEqual(p_exp.netloc, p_act.netloc)
self.assertEqual(p_exp.path, p_act.path)
self.assertEqual(p_exp.fragment, p_act.fragment)
qs_exp = urlparse.parse_qs(p_exp.query)
qs_act = urlparse.parse_qs(p_act.query)
for vals in qs_exp.values():
vals.sort()
for vals in qs_act.values():
vals.sort()
self.assertEqual(qs_exp, qs_act)
@mock.patch('requests.Session')
def test_extend_path(self, mock_session):
adapter = adp.Adapter(self.sess)
path = adapter.extend_path('basepath', suffix_type='suffix',
suffix_parm='suffix_parm',
detail='detail',
xag=[c.XAG.VIO_FMAP])
expected_path = ('basepath/suffix/suffix_parm?detail=detail&'
'group=ViosFCMapping')
self._assert_paths_equivalent(expected_path, path)
path = adapter.extend_path('basepath', suffix_type='suffix',
suffix_parm='suffix_parm',
detail='detail',
xag={c.XAG.VIO_FMAP, c.XAG.VIO_NET})
expected_path = ('basepath/suffix/suffix_parm?detail=detail&'
'group=ViosFCMapping,ViosNetwork')
self._assert_paths_equivalent(expected_path, path)
path = adapter.extend_path('basepath', suffix_type='suffix',
suffix_parm='suffix_parm',
detail='detail',
xag=[c.XAG.VIO_NET, c.XAG.VIO_FMAP])
expected_path = ('basepath/suffix/suffix_parm?detail=detail&'
'group=ViosFCMapping,ViosNetwork')
self._assert_paths_equivalent(expected_path, path)
path = adapter.extend_path('basepath', suffix_type='suffix',
suffix_parm='suffix_parm', detail='detail',
xag=[])
expected_path = 'basepath/suffix/suffix_parm?detail=detail'
self._assert_paths_equivalent(expected_path, path)
path = adapter.extend_path('basepath', suffix_type='suffix',
suffix_parm='suffix_parm')
expected_path = 'basepath/suffix/suffix_parm?group=None'
self._assert_paths_equivalent(expected_path, path)
path = adapter.extend_path('basepath', suffix_type='quick',
suffix_parm='suffix_parm')
expected_path = 'basepath/quick/suffix_parm'
self._assert_paths_equivalent(expected_path, path)
path = adapter.extend_path('basepath', suffix_type='do',
suffix_parm='suffix_parm')
expected_path = 'basepath/do/suffix_parm'
self._assert_paths_equivalent(expected_path, path)
self._assert_paths_equivalent(
'basepath?group=None', adapter.extend_path('basepath'))
self._assert_paths_equivalent(
'basepath?group=None', adapter.extend_path('basepath?group=None'))
self._assert_paths_equivalent(
'basepath?group=a,b,c',
adapter.extend_path('basepath?group=a,b,c'))
self._assert_paths_equivalent(
'basepath', adapter.extend_path('basepath', xag=()))
self._assert_paths_equivalent(
'basepath', adapter.extend_path('basepath?group=None', xag={}))
self.assertRaises(
ValueError, adapter.extend_path, 'basepath?group=a,b,c', xag=[])
self._assert_paths_equivalent(
'basepath?group=a,b,c',
adapter.extend_path('basepath', xag={'a', 'b', 'c'}))
self._assert_paths_equivalent(
'basepath?group=a,b,c',
adapter.extend_path('basepath?group=None', xag=('a', 'b', 'c')))
self._assert_paths_equivalent(
'basepath?group=a,b,c',
adapter.extend_path('basepath?group=a,b,c', xag=['a', 'b', 'c']))
self.assertRaises(ValueError, adapter.extend_path,
'basepath?group=a,b,c', xag=['d', 'e', 'f'])
self._assert_paths_equivalent(
'basepath?foo=1,2,3&group=a,b,c&foo=4,5,6',
adapter.extend_path('basepath?foo=4,5,6&group=None&foo=1,2,3',
xag=['a', 'b', 'c']))
self._assert_paths_equivalent(
'basepath', adapter.extend_path('basepath', xag=[], add_qp=None))
self._assert_paths_equivalent(
'basepath?one=%23%24%25%5E%26',
adapter.extend_path('basepath', xag=[], add_qp=[('one', '#$%^&')]))
self._assert_paths_equivalent(
'basepath?1=3&1=2',
adapter.extend_path('basepath', xag=[], add_qp=[(1, 3), (1, 2)]))
self._assert_paths_equivalent(
'basepath?group=None&key=value&something=else',
adapter.extend_path(
'basepath', add_qp=[('key', 'value'), ('something', 'else')]))
self._assert_paths_equivalent(
'basepath?already=here&group=a,b,c&key=value&something=else',
adapter.extend_path(
'basepath?already=here', xag=['a', 'b', 'c'],
add_qp=[('key', 'value'), ('something', 'else')]))
@mock.patch('pypowervm.adapter.LOG')
@mock.patch('pypowervm.adapter.Adapter.read_by_path')
def test_read_by_href(self, mock_read_by_path, mock_log):
def validate_read_by_path(expected):
def _read_by_path(path, etag, timeout, auditmemento, age,
sensitive, helpers):
self._assert_paths_equivalent(expected, path)
for param in (etag, auditmemento, helpers):
self.assertIsNone(param)
for param2 in (age, timeout):
self.assertEqual(-1, param2)
self.assertFalse(sensitive)
return _read_by_path
self.sess.host = 'foo'
self.sess.port = 123
adapter = adp.Adapter(self.sess)
mock_read_by_path.side_effect = validate_read_by_path(
'/rest/api/uom/Bar?k=v&group=None#frag')
adapter.read_by_href('http://foo:123/rest/api/uom/Bar?k=v#frag')
self.assertFalse(mock_log.debug.called)
self.sess.host = 'bar'
mock_read_by_path.side_effect = validate_read_by_path(
'/rest/api/uom/Bar?k=v&group=None#frag')
adapter.read_by_href('http://foo:123/rest/api/uom/Bar?k=v#frag')
self.assertTrue(mock_log.debug.called)
mock_read_by_path.side_effect = validate_read_by_path(
'/rest/api/uom/Bar?k=v&group=RealGroup#frag')
adapter.read_by_href(
'http://foo:123/rest/api/uom/Bar?k=v&group=RealGroup#frag')
@mock.patch('requests.Session')
def test_delete(self, mock_session):
root_type = 'ManagedSystem'
root_id = 'id'
adapter = adp.Adapter(self.sess)
delete_response = self._mk_response(204)
session = mock_session.return_value
session.request.return_value = delete_response
ret_delete_value = adapter.delete(root_type, root_id)
reqpath = adp.Adapter.build_path('uom', root_type, root_id, xag=[])
self.assertEqual('DELETE', ret_delete_value.reqmethod)
self.assertEqual(204, ret_delete_value.status)
self.assertEqual(reqpath, ret_delete_value.reqpath)
@mock.patch.object(builtins, 'open')
def test_auth_file_error(self, mock_open_patch):
mock_open_patch.side_effect = IOError(errno.EACCES, 'Error')
self.assertRaises(pvmex.AuthFileReadError,
self.sess._get_auth_tok_from_file,
mock.Mock(), mock.Mock())
mock_open_patch.side_effect = IOError(errno.EIO, 'Error')
self.assertRaises(pvmex.AuthFileAccessError,
self.sess._get_auth_tok_from_file,
mock.Mock(), mock.Mock())
@mock.patch('pypowervm.adapter.LOG')
@mock.patch('requests.Session')
def test_unauthorized_error(self, mock_session, mock_log):
adapter = adp.Adapter(self.sess)
new_scsi = pvm_stor.VSCSIClientAdapterElement.bld(adapter)
element = new_scsi
root_type = 'ManagedSystem'
root_id = 'id'
child_type = 'LogicalPartition'
create_response = self._mk_response(401)
session = mock_session.return_value
session.request.return_value = create_response
self.assertRaises(pvmex.HttpError, adapter.create, element,
root_type, root_id, child_type)
self.assertEqual(1, mock_log.warning.call_count)
def test_element_iter(self):
children = [ent.Element('Type1', None, text='T1_0'),
ent.Element('Type12', None, text='T12_0'),
ent.Element('Type1', None, text='T1_1'),
ent.Element('Type12', None, text='T12_1'),
ent.Element('Type1', None, text='T1_2')]
top_element = ent.Element('Top', None,
attrib={'schemaVersion': 'V1_0'},
children=children)
def _count_elem(top, tag, it=None, assert_tag=True):
elem_count = 0
it = it if it else top.iter(tag=tag)
for elem in it:
if assert_tag:
self.assertEqual(elem.tag, tag)
elem_count += 1
return elem_count
self.assertEqual(_count_elem(top_element, 'Type1',
it=top_element.iter(),
assert_tag=False), 6)
# Ensure all elements are traversed for tag=*
self.assertEqual(_count_elem(top_element, 'Type1',
it=top_element.iter(tag='*'),
assert_tag=False), 6)
# Ensure all elements are traversed for tag=None
self.assertEqual(_count_elem(top_element, 'Type1',
it=top_element.iter(tag=None),
assert_tag=False), 6)
# Get only the Type1 elements
self.assertEqual(_count_elem(top_element, 'Type1'), 3)
# Get only the top
self.assertEqual(_count_elem(top_element, 'Top'), 1)
@mock.patch('pypowervm.entities.Feed.unmarshal_atom_feed')
@mock.patch('pypowervm.entities.Entry.unmarshal_atom_entry')
@mock.patch('lxml.etree.fromstring')
def test_extract_atom(self, mock_fromstring, mock_unm_ent, mock_unm_feed):
resp = adp.Response('meth', '/rest/api/uom/Debug/SetLoggingLevel',
'status', 'reason', 'headers', body='body')
feed_ret = mock.Mock(tag=etree.QName(c.ATOM_NS, 'feed'))
entry_ret = mock.Mock(tag=etree.QName(c.ATOM_NS, 'entry'))
# Empty content; "Response is not an Atom feed/entry"
mock_fromstring.return_value = None
self.assertIsNotNone(resp._extract_atom())
mock_fromstring.assert_called_with('body')
mock_unm_feed.assert_not_called()
mock_unm_ent.assert_not_called()
# Unmarshal feed (returns None)
mock_fromstring.return_value = feed_ret
self.assertIsNone(resp._extract_atom())
mock_unm_feed.assert_called_once_with(feed_ret, resp)
mock_unm_ent.assert_not_called()
mock_unm_feed.reset_mock()
# Unmarshal entry (returns None)
mock_fromstring.return_value = entry_ret
self.assertIsNone(resp._extract_atom())
mock_unm_ent.assert_called_once_with(entry_ret, resp)
mock_unm_feed.assert_not_called()
mock_unm_ent.reset_mock()
# Unmarshal a 'Debug' response (returns None)
mock_fromstring.return_value = mock.Mock(tag='debug output')
self.assertIsNone(resp._extract_atom())
mock_unm_feed.assert_not_called()
mock_unm_ent.assert_not_called()
# 'fromstring' raises. Make sure the return message came from the
# right place (will include the exception text)
mock_fromstring.side_effect = Exception("test_extract_atom")
self.assertIn("test_extract_atom", resp._extract_atom())
mock_unm_feed.assert_not_called()
mock_unm_ent.assert_not_called()
@mock.patch('pypowervm.adapter.Adapter.read')
def test_sys_uuid(self, mock_read):
# Set and return the sys_uuid if not yet defined
adapter = adp.Adapter(self.sess)
mock_resp = mock.MagicMock()
mock_resp.feed.entries[0].uuid = 'uuid'
mock_read.return_value = mock_resp
sys_uuid = adapter.sys_uuid
mock_read.assert_called_once_with('ManagedSystem')
self.assertEqual('uuid', sys_uuid)
self.assertEqual('uuid', adapter._sys_uuid)
# Return sys_uuid if defined already
mock_read.reset_mock()
sys_uuid = adapter.sys_uuid
mock_read.assert_not_called()
class TestElement(testtools.TestCase):
def setUp(self):
super(TestElement, self).setUp()
self.adpt = self.useFixture(fx.AdapterFx()).adpt
def test_cdata(self):
no_cdata = ent.Element('tag', self.adpt, text='text', cdata=False)
with_cdata = ent.Element('tag', self.adpt, text='text', cdata=True)
self.assertEqual(
no_cdata.toxmlstring(),
'<uom:tag xmlns:uom="http://www.ibm.com/xmlns/systems/power/'
'firmware/uom/mc/2012_10/">text</uom:tag>'.encode('utf-8'))
self.assertEqual(
with_cdata.toxmlstring(),
'<uom:tag xmlns:uom="http://www.ibm.com/xmlns/systems/power/firmwa'
're/uom/mc/2012_10/"><![CDATA[text]]></uom:tag>'.encode('utf-8'))
def test_tag_namespace(self):
el = ent.Element('tag', self.adpt)
self.assertEqual(el.element.tag, '{http://www.ibm.com/xmlns/systems/po'
'wer/firmware/uom/mc/2012_10/}tag')
# entities.Element.tag strips the namespace
self.assertEqual(el.tag, 'tag')
self.assertEqual(el.namespace, 'http://www.ibm.com/xmlns/systems/powe'
'r/firmware/uom/mc/2012_10/')
# Test setter
el.tag = 'gat'
self.assertEqual(el.element.tag, '{http://www.ibm.com/xmlns/systems/po'
'wer/firmware/uom/mc/2012_10/}gat')
self.assertEqual(el.tag, 'gat')
el.namespace = 'foo'
self.assertEqual(el.namespace, 'foo')
# Now with no namespace
el = ent.Element('tag', self.adpt, ns='')
self.assertEqual(el.element.tag, 'tag')
self.assertEqual(el.tag, 'tag')
self.assertEqual(el.namespace, '')
el.tag = 'gat'
self.assertEqual(el.element.tag, 'gat')
self.assertEqual(el.tag, 'gat')
el.namespace = 'foo'
self.assertEqual(el.namespace, 'foo')
class TestAdapterClasses(subunit.IsolatedTestCase, testtools.TestCase):
def setUp(self):
super(TestAdapterClasses, self).setUp()
self.mock_logoff = self.useFixture(
fixtures.MockPatchObject(adp.Session, '_logoff')).mock
self.mock_logon = self.useFixture(
fixtures.MockPatchObject(adp.Session, '_logon')).mock
self.mock_events = self.useFixture(
fixtures.MockPatchObject(adp._EventListener, '_get_events')).mock
# Mock the initial events coming in on start
self.mock_events.return_value = {'general': 'init'}, [], []
def test_instantiation(self):
# Get a session
sess = adp.Session()
# Now get the EventListener
self.assertRaises(TypeError, adp.EventListener, sess)
# Mock the session token like we logged on
sess._sessToken = 'token'.encode('utf-8')
# Ensure we get an EventListener
self.assertIsInstance(sess.get_event_listener(), adp.EventListener)
def test_shutdown_session(self):
# Get a session
sess = adp.Session()
# Mock the session token like we logged on
sess._sessToken = 'token'.encode('utf-8')
# It should have logged on but not off.
self.assertTrue(self.mock_logon.called)
self.assertFalse(self.mock_logoff.called)
# Get an event listener to test the weak references
event_listen = sess.get_event_listener()
# Test the circular reference (but one link is weak)
sess.hello = 'hello'
self.assertEqual(sess.hello, event_listen.adp.session.hello)
# There should be 1 reference to the session (ours)
self.assertEqual(1, len(gc.get_referrers(sess)))
def test_shutdown_adapter(self):
# Get Adapter, implicit session
adapter = adp.Adapter()
adapter.session._sessToken = 'token'.encode('utf-8')
# Get construct and event listener
adapter.session.get_event_listener()
# Turn off the event listener
adapter.session.get_event_listener().shutdown()
# Session is still active
self.assertFalse(self.mock_logoff.called)
# The only thing that refers the adapter is our reference
self.assertEqual(1, len(gc.get_referrers(adapter)))
class TestElementInject(testtools.TestCase):
def setUp(self):
super(TestElementInject, self).setUp()
self.adpt = self.useFixture(fx.AdapterFx()).adpt
self.ordering_list = ('AdapterType', 'UseNextAvailableSlotID',
'RemoteLogicalPartitionID', 'RemoteSlotNumber')
self.child_at = ent.Element('AdapterType', self.adpt, text='Client')
self.child_unasi = ent.Element('UseNextAvailableSlotID', self.adpt,
text='true')
self.child_rlpi1 = ent.Element('RemoteLogicalPartitionID', self.adpt,
text='1')
self.child_rlpi2 = ent.Element('RemoteLogicalPartitionID', self.adpt,
text='2')
self.child_rlpi3 = ent.Element('RemoteLogicalPartitionID', self.adpt,
text='3')
self.child_rsn = ent.Element('RemoteSlotNumber', self.adpt,
text='12')
self.all_children = [
self.child_at, self.child_unasi, self.child_rlpi1, self.child_rsn]
def _mk_el(self, children):
return ent.Element('VirtualSCSIClientAdapter', self.adpt,
attrib={'schemaVersion': 'V1_0'},
children=children)
def assert_expected_children(self, parent, *expected_children):
# etree.Element doesn't implement __eq__, so different instances of the
actual = [etree.tostring(elem) for elem in list(parent.element)]
expected = [etree.tostring(chld.element) for chld in expected_children]
self.assertEqual(actual, expected)
def test_no_children(self):
el = self._mk_el([])
el.inject(self.child_rlpi1)
self.assert_expected_children(el, self.child_rlpi1)
# Result should be same regardless of other params
el = self._mk_el([])
el.inject(self.child_rlpi1, self.ordering_list, replace=False)
self.assert_expected_children(el, self.child_rlpi1)
def test_subelement_found_one_replace_true(self):
el = self._mk_el(self.all_children)
el.inject(self.child_rlpi2, self.ordering_list)
self.assert_expected_children(el, self.child_at, self.child_unasi,
self.child_rlpi2, self.child_rsn)
# Proving default replace=True - same result if specified
el = self._mk_el(self.all_children)
el.inject(self.child_rlpi2, self.ordering_list, replace=True)
self.assert_expected_children(el, self.child_at, self.child_unasi,
self.child_rlpi2, self.child_rsn)
def test_subelement_found_mult_replace_true(self):
el = self._mk_el([self.child_at, self.child_unasi, self.child_rlpi1,
self.child_rlpi3, self.child_rsn])
el.inject(self.child_rlpi2, self.ordering_list)
self.assert_expected_children(el, self.child_at, self.child_unasi,
self.child_rlpi1, self.child_rlpi2,
self.child_rsn)
def test_subelement_found_replace_false(self):
el = self._mk_el(self.all_children)
el.inject(self.child_rlpi2, self.ordering_list, False)
self.assert_expected_children(el, self.child_at, self.child_unasi,
self.child_rlpi1, self.child_rlpi2,
self.child_rsn)
el.inject(self.child_rlpi3, self.ordering_list, False)
self.assert_expected_children(el, self.child_at, self.child_unasi,
self.child_rlpi1, self.child_rlpi2,
self.child_rlpi3, self.child_rsn)
def test_subelement_not_in_ordering_list(self):
el = self._mk_el(self.all_children)
ch = ent.Element('SomeNewElement', self.adpt, text='foo')
el.inject(ch, ordering_list=self.ordering_list)
self.assert_expected_children(el, self.child_at, self.child_unasi,
self.child_rlpi1, self.child_rsn, ch)
def test_first_populated(self):
el = self._mk_el(self.all_children[1:])
el.inject(self.child_at, self.ordering_list)
self.assert_expected_children(el, self.child_at, self.child_unasi,
self.child_rlpi1, self.child_rsn)
def test_first_sparse(self):
# This is most interesting when the existing child is not the one right
# next to the injectee.
el = self._mk_el([self.child_rlpi1])
el.inject(self.child_at, self.ordering_list)
self.assert_expected_children(el, self.child_at, self.child_rlpi1)
def test_last_populated(self):
el = self._mk_el(self.all_children[:-1])
el.inject(self.child_rsn, self.ordering_list)
self.assert_expected_children(el, self.child_at, self.child_unasi,
self.child_rlpi1, self.child_rsn)
def test_last_sparse(self):
# This is most interesting when the existing child is not the one right
# next to the injectee.
el = self._mk_el([self.child_unasi])
el.inject(self.child_rsn, self.ordering_list)
self.assert_expected_children(el, self.child_unasi, self.child_rsn)
def test_middle_populated(self):
el = self._mk_el([self.child_at, self.child_unasi, self.child_rsn])
el.inject(self.child_rlpi1, self.ordering_list)
self.assert_expected_children(el, self.child_at, self.child_unasi,
self.child_rlpi1, self.child_rsn)
def test_middle_sparse(self):
el = self._mk_el([self.child_at, self.child_rsn])
el.inject(self.child_rlpi1, self.ordering_list)
self.assert_expected_children(
el, self.child_at, self.child_rlpi1, self.child_rsn)
class TestElementWrapper(testtools.TestCase):
def setUp(self):
super(TestElementWrapper, self).setUp()
self.resp = pvmhttp.load_pvm_resp(NET_BRIDGE_FILE).get_response()
self.nb1 = self.resp.feed.entries[0]
self.resp2 = pvmhttp.load_pvm_resp(NET_BRIDGE_FILE).get_response()
self.nb2 = self.resp2.feed.entries[0]
def test_equality(self):
sea1 = self._find_seas(self.nb1)[0]
sea2 = self._find_seas(self.nb2)[0]
self.assertTrue(sea1 == sea2)
# Change the other SEA
sea2.element.append(etree.Element('Bob'))
self.assertFalse(sea1 == sea2)
def test_inequality_by_subelem_change(self):
sea1 = self._find_seas(self.nb1)[0]
sea2 = self._find_seas(self.nb2)[0]
sea_trunk = sea2.findall('TrunkAdapters/TrunkAdapter')[0]
pvid = sea_trunk.find('PortVLANID')
pvid.text = '1'
self.assertFalse(sea1 == sea2)
def _find_seas(self, entry):
return entry.element.findall('SharedEthernetAdapters/'
'SharedEthernetAdapter')
| true | true |
f73a1a4b745e4ff53cf589027674c09f70c7c395 | 4,829 | gyp | Python | libyuv.gyp | DeepARSDK/libyuv | dc0a9aebe75f2ef3e005ff1d31d88817e9aecd88 | [
"BSD-3-Clause"
] | 97 | 2019-10-28T13:10:03.000Z | 2022-03-08T09:48:37.000Z | libyuv.gyp | DeepARSDK/libyuv | dc0a9aebe75f2ef3e005ff1d31d88817e9aecd88 | [
"BSD-3-Clause"
] | 7 | 2019-12-03T02:54:24.000Z | 2021-09-08T09:36:06.000Z | libyuv.gyp | DeepARSDK/libyuv | dc0a9aebe75f2ef3e005ff1d31d88817e9aecd88 | [
"BSD-3-Clause"
] | 31 | 2019-11-14T14:51:13.000Z | 2022-02-18T06:46:48.000Z | # Copyright 2011 The LibYuv Project Authors. All rights reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'includes': [
'libyuv.gypi',
],
# Make sure that if we are being compiled to an xcodeproj, nothing tries to
# include a .pch.
'xcode_settings': {
'GCC_PREFIX_HEADER': '',
'GCC_PRECOMPILE_PREFIX_HEADER': 'NO',
},
'variables': {
'use_system_libjpeg%': 0,
# Can be enabled if your jpeg has GYP support.
'libyuv_disable_jpeg%': 1,
# 'chromium_code' treats libyuv as internal and increases warning level.
'chromium_code': 1,
# clang compiler default variable usable by other apps that include libyuv.
'clang%': 0,
# Link-Time Optimizations.
'use_lto%': 0,
'mips_msa%': 0, # Default to msa off.
'build_neon': 0,
'build_msa': 0,
'conditions': [
['(target_arch == "armv7" or target_arch == "armv7s" or \
(target_arch == "arm" and arm_version >= 7) or target_arch == "arm64")\
and (arm_neon == 1 or arm_neon_optional == 1)', {
'build_neon': 1,
}],
['(target_arch == "mipsel" or target_arch == "mips64el")\
and (mips_msa == 1)',
{
'build_msa': 1,
}],
],
},
'targets': [
{
'target_name': 'libyuv',
# Change type to 'shared_library' to build .so or .dll files.
'type': 'static_library',
'variables': {
'optimize': 'max', # enable O2 and ltcg.
},
# Allows libyuv.a redistributable library without external dependencies.
'standalone_static_library': 1,
'conditions': [
# Disable -Wunused-parameter
['clang == 1', {
'cflags': [
'-Wno-unused-parameter',
],
}],
['build_neon != 0', {
'defines': [
'LIBYUV_NEON',
],
'cflags!': [
'-mfpu=vfp',
'-mfpu=vfpv3',
'-mfpu=vfpv3-d16',
# '-mthumb', # arm32 not thumb
],
'conditions': [
# Disable LTO in libyuv_neon target due to gcc 4.9 compiler bug.
['clang == 0 and use_lto == 1', {
'cflags!': [
'-flto',
'-ffat-lto-objects',
],
}],
# arm64 does not need -mfpu=neon option as neon is not optional
['target_arch != "arm64"', {
'cflags': [
'-mfpu=neon',
# '-marm', # arm32 not thumb
],
}],
],
}],
['build_msa != 0', {
'defines': [
'LIBYUV_MSA',
],
}],
['OS != "ios" and libyuv_disable_jpeg != 1', {
'defines': [
'HAVE_JPEG'
],
'conditions': [
# Caveat system jpeg support may not support motion jpeg
[ 'use_system_libjpeg == 1', {
'dependencies': [
'<(DEPTH)/third_party/libjpeg/libjpeg.gyp:libjpeg',
],
}, {
'dependencies': [
'<(DEPTH)/third_party/libjpeg_turbo/libjpeg.gyp:libjpeg',
],
}],
[ 'use_system_libjpeg == 1', {
'link_settings': {
'libraries': [
'-ljpeg',
],
}
}],
],
}],
], #conditions
'defines': [
# Enable the following 3 macros to turn off assembly for specified CPU.
# 'LIBYUV_DISABLE_X86',
# 'LIBYUV_DISABLE_NEON',
# 'LIBYUV_DISABLE_DSPR2',
# Enable the following macro to build libyuv as a shared library (dll).
# 'LIBYUV_USING_SHARED_LIBRARY',
# TODO(fbarchard): Make these into gyp defines.
],
'include_dirs': [
'include',
'.',
],
'direct_dependent_settings': {
'include_dirs': [
'include',
'.',
],
'conditions': [
['OS == "android" and target_arch == "arm64"', {
'ldflags': [
'-Wl,--dynamic-linker,/system/bin/linker64',
],
}],
['OS == "android" and target_arch != "arm64"', {
'ldflags': [
'-Wl,--dynamic-linker,/system/bin/linker',
],
}],
], #conditions
},
'sources': [
'<@(libyuv_sources)',
],
},
], # targets.
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
| 29.625767 | 79 | 0.490578 |
{
'includes': [
'libyuv.gypi',
],
'xcode_settings': {
'GCC_PREFIX_HEADER': '',
'GCC_PRECOMPILE_PREFIX_HEADER': 'NO',
},
'variables': {
'use_system_libjpeg%': 0,
'libyuv_disable_jpeg%': 1,
'chromium_code': 1,
'clang%': 0,
'use_lto%': 0,
'mips_msa%': 0,
'build_neon': 0,
'build_msa': 0,
'conditions': [
['(target_arch == "armv7" or target_arch == "armv7s" or \
(target_arch == "arm" and arm_version >= 7) or target_arch == "arm64")\
and (arm_neon == 1 or arm_neon_optional == 1)', {
'build_neon': 1,
}],
['(target_arch == "mipsel" or target_arch == "mips64el")\
and (mips_msa == 1)',
{
'build_msa': 1,
}],
],
},
'targets': [
{
'target_name': 'libyuv',
'type': 'static_library',
'variables': {
'optimize': 'max',
},
'standalone_static_library': 1,
'conditions': [
['clang == 1', {
'cflags': [
'-Wno-unused-parameter',
],
}],
['build_neon != 0', {
'defines': [
'LIBYUV_NEON',
],
'cflags!': [
'-mfpu=vfp',
'-mfpu=vfpv3',
'-mfpu=vfpv3-d16',
'conditions': [
['clang == 0 and use_lto == 1', {
'cflags!': [
'-flto',
'-ffat-lto-objects',
],
}],
['target_arch != "arm64"', {
'cflags': [
'-mfpu=neon',
}],
],
}],
['build_msa != 0', {
'defines': [
'LIBYUV_MSA',
],
}],
['OS != "ios" and libyuv_disable_jpeg != 1', {
'defines': [
'HAVE_JPEG'
],
'conditions': [
[ 'use_system_libjpeg == 1', {
'dependencies': [
'<(DEPTH)/third_party/libjpeg/libjpeg.gyp:libjpeg',
],
}, {
'dependencies': [
'<(DEPTH)/third_party/libjpeg_turbo/libjpeg.gyp:libjpeg',
],
}],
[ 'use_system_libjpeg == 1', {
'link_settings': {
'libraries': [
'-ljpeg',
],
}
}],
],
}],
],
'defines': [
],
'include_dirs': [
'include',
'.',
],
'direct_dependent_settings': {
'include_dirs': [
'include',
'.',
],
'conditions': [
['OS == "android" and target_arch == "arm64"', {
'ldflags': [
'-Wl,--dynamic-linker,/system/bin/linker64',
],
}],
['OS == "android" and target_arch != "arm64"', {
'ldflags': [
'-Wl,--dynamic-linker,/system/bin/linker',
],
}],
],
},
'sources': [
'<@(libyuv_sources)',
],
},
],
}
| true | true |
f73a1b0d6ca587bbe2d19600256e4c0c3ca40241 | 4,641 | py | Python | util/kgh.py | aholinch/Keplers-Goat-Herd | 18cc49465353eb6ce6ce9e9e84d81fca9f5d3c59 | [
"MIT"
] | null | null | null | util/kgh.py | aholinch/Keplers-Goat-Herd | 18cc49465353eb6ce6ce9e9e84d81fca9f5d3c59 | [
"MIT"
] | null | null | null | util/kgh.py | aholinch/Keplers-Goat-Herd | 18cc49465353eb6ce6ce9e9e84d81fca9f5d3c59 | [
"MIT"
] | null | null | null | import numpy as np, time
def mToE(m, e):
if e <= 0.5:
return mToE(m,e,10)
if e <= 0.9:
return mToE(m,e,25)
if e <= 0.95:
return mToE(m,e,50)
if e <= 0.99:
return mToE(m,e,128)
return mToE(m,e,256)
def mToE(m, eccentricity, N_it):
"""Solve Kepler's equation, E - e sin E = ell, via the contour integration method of Philcox et al. (2021)
This uses techniques described in Ullisch (2020) to solve the `geometric goat problem'.
Args:
m: mean anomaly, in the range (0,2 pi).
eccentricity (float): Eccentricity. Must be in the range 0<e<1.
N_it (float): Number of grid-points.
Returns:
(float): eccentric anomaly, E.
"""
# Check inputs
if eccentricity<=0.:
raise Exception("Eccentricity must be greater than zero!")
elif eccentricity>=1:
raise Exception("Eccentricity must be less than unity!")
if m>2.*np.pi:
raise Exception("Mean anomaly should be in the range (0, 2 pi)")
if m<0:
raise Exception("Mean anomaly should be in the range (0, 2 pi)")
if N_it<2:
raise Exception("Need at least two sampling points!")
# Define sampling points
N_points = N_it - 2
N_fft = (N_it-1)*2
# Define contour radius
radius = eccentricity/2
# Generate e^{ikx} sampling points and precompute real and imaginary parts
j_arr = np.arange(N_points)
freq = (2*np.pi*(j_arr+1.)/N_fft)[:,np.newaxis]
exp2R = np.cos(freq)
exp2I = np.sin(freq)
ecosR= eccentricity*np.cos(radius*exp2R)
esinR = eccentricity*np.sin(radius*exp2R)
exp4R = exp2R*exp2R-exp2I*exp2I
exp4I = 2.*exp2R*exp2I
coshI = np.cosh(radius*exp2I)
sinhI = np.sinh(radius*exp2I)
# Precompute e sin(e/2) and e cos(e/2)
esinRadius = eccentricity*np.sin(radius);
ecosRadius = eccentricity*np.cos(radius);
# Define contour center for each ell and precompute sin(center), cos(center)
center = m-eccentricity/2.
if m < np.pi:
center += eccentricity
sinC = np.sin(center)
cosC = np.cos(center)
output = center
## Accumulate Fourier coefficients
# NB: we halve the integration range by symmetry, absorbing factor of 2 into ratio
## Separate out j = 0 piece, which is simpler
# Compute z in real and imaginary parts (zI = 0 here)
zR = center + radius
# Compute e*sin(zR) from precomputed quantities
tmpsin = sinC*ecosRadius+cosC*esinRadius
# Compute f(z(x)) in real and imaginary parts (fxI = 0)
fxR = zR - tmpsin - m
# Add to arrays, with factor of 1/2 since an edge
ft_gx2 = 0.5/fxR
ft_gx1 = 0.5/fxR
## Compute j = 1 to N_points pieces
# Compute z in real and imaginary parts
zR = center + radius*exp2R
zI = radius*exp2I
# Compute f(z(x)) in real and imaginary parts
# can use precomputed cosh / sinh / cos / sin for this!
tmpsin = sinC*ecosR+cosC*esinR # e sin(zR)
tmpcos = cosC*ecosR-sinC*esinR # e cos(zR)
fxR = zR - tmpsin*coshI-m
fxI = zI - tmpcos*sinhI
# Compute 1/f(z) and append to array
ftmp = fxR*fxR+fxI*fxI;
fxR /= ftmp;
fxI /= ftmp;
ft_gx2 += np.sum(exp4R*fxR+exp4I*fxI,axis=0)
ft_gx1 += np.sum(exp2R*fxR+exp2I*fxI,axis=0)
## Separate out j = N_it piece, which is simpler
# Compute z in real and imaginary parts (zI = 0 here)
zR = center - radius
# Compute sin(zR) from precomputed quantities
tmpsin = sinC*ecosRadius-cosC*esinRadius
# Compute f(z(x)) in real and imaginary parts (fxI = 0 here)
fxR = zR - tmpsin-m
# Add to sum, with 1/2 factor for edges
ft_gx2 += 0.5/fxR;
ft_gx1 += -0.5/fxR;
### Compute and return the solution E(ell,e)
output += radius*ft_gx2/ft_gx1;
return output[0]
if __name__=="__main__":
"""Test the Python function above with a simple example"""
# Parameters
N_ell = 10000
eccentricity = 0.5
N_it = 10
print("\n##### PARAMETERS #####")
print("# N_ell = %d"%N_ell)
print("# Eccentricity = %.2f"%eccentricity)
print("# Iterations: %d"%N_it)
print("######################")
# Create ell array from E
E_true = (2.0*np.pi*(np.arange(N_ell)+0.5))/N_ell
ell_input = E_true - eccentricity*np.sin(E_true)
E_out = [0 for i in range(len(ell_input))]
# Time the function
init = time.time()
for i in range(len(ell_input)):
E_out[i] = mToE(ell_input[i],eccentricity,N_it)
runtime = time.time()-init
print("\nEstimation complete after %.1f millseconds, achieving mean error %.2e.\n"%(runtime*1000.,np.mean(np.abs(E_out-E_true))))
| 28.826087 | 133 | 0.623357 | import numpy as np, time
def mToE(m, e):
if e <= 0.5:
return mToE(m,e,10)
if e <= 0.9:
return mToE(m,e,25)
if e <= 0.95:
return mToE(m,e,50)
if e <= 0.99:
return mToE(m,e,128)
return mToE(m,e,256)
def mToE(m, eccentricity, N_it):
if eccentricity<=0.:
raise Exception("Eccentricity must be greater than zero!")
elif eccentricity>=1:
raise Exception("Eccentricity must be less than unity!")
if m>2.*np.pi:
raise Exception("Mean anomaly should be in the range (0, 2 pi)")
if m<0:
raise Exception("Mean anomaly should be in the range (0, 2 pi)")
if N_it<2:
raise Exception("Need at least two sampling points!")
N_points = N_it - 2
N_fft = (N_it-1)*2
radius = eccentricity/2
j_arr = np.arange(N_points)
freq = (2*np.pi*(j_arr+1.)/N_fft)[:,np.newaxis]
exp2R = np.cos(freq)
exp2I = np.sin(freq)
ecosR= eccentricity*np.cos(radius*exp2R)
esinR = eccentricity*np.sin(radius*exp2R)
exp4R = exp2R*exp2R-exp2I*exp2I
exp4I = 2.*exp2R*exp2I
coshI = np.cosh(radius*exp2I)
sinhI = np.sinh(radius*exp2I)
esinRadius = eccentricity*np.sin(radius);
ecosRadius = eccentricity*np.cos(radius);
center = m-eccentricity/2.
if m < np.pi:
center += eccentricity
sinC = np.sin(center)
cosC = np.cos(center)
output = center
+cosC*esinRadius
fxR = zR - tmpsin - m
ft_gx2 = 0.5/fxR
ft_gx1 = 0.5/fxR
p2R
zI = radius*exp2I
tmpsin = sinC*ecosR+cosC*esinR
tmpcos = cosC*ecosR-sinC*esinR
fxR = zR - tmpsin*coshI-m
fxI = zI - tmpcos*sinhI
ftmp = fxR*fxR+fxI*fxI;
fxR /= ftmp;
fxI /= ftmp;
ft_gx2 += np.sum(exp4R*fxR+exp4I*fxI,axis=0)
ft_gx1 += np.sum(exp2R*fxR+exp2I*fxI,axis=0)
n = sinC*ecosRadius-cosC*esinRadius
fxR = zR - tmpsin-m
ft_gx2 += 0.5/fxR;
ft_gx1 += -0.5/fxR;
N_ell = 10000
eccentricity = 0.5
N_it = 10
print("\n##### PARAMETERS #####")
print("# N_ell = %d"%N_ell)
print("# Eccentricity = %.2f"%eccentricity)
print("# Iterations: %d"%N_it)
print("######################")
E_true = (2.0*np.pi*(np.arange(N_ell)+0.5))/N_ell
ell_input = E_true - eccentricity*np.sin(E_true)
E_out = [0 for i in range(len(ell_input))]
init = time.time()
for i in range(len(ell_input)):
E_out[i] = mToE(ell_input[i],eccentricity,N_it)
runtime = time.time()-init
print("\nEstimation complete after %.1f millseconds, achieving mean error %.2e.\n"%(runtime*1000.,np.mean(np.abs(E_out-E_true))))
| true | true |
f73a1b559f33bf3c084c5088359ddb186c947187 | 1,118 | py | Python | qf_09_条件判断.py | tianming-jianai/QFPython | bf14fc5da077e745670c5898f1d3322cb87e6f6b | [
"MIT"
] | null | null | null | qf_09_条件判断.py | tianming-jianai/QFPython | bf14fc5da077e745670c5898f1d3322cb87e6f6b | [
"MIT"
] | null | null | null | qf_09_条件判断.py | tianming-jianai/QFPython | bf14fc5da077e745670c5898f1d3322cb87e6f6b | [
"MIT"
] | null | null | null | import random
# pass 关键字在Python里面没有意义,知识单纯的用来占位,保证豫剧的完整性
# 输入年,写代码判断输入的年是否是闰年,并且打印对应的结果。
# (是闰年的条件:能被4 整除但是不能被100整除或者能够被400整除的年)
year = int(input('请输入一个年份:'))
if (year % 4 == 0 and year % 100 != 0) or (year % 1400 == 0):
print("您输入的是闰年" + str(year))
pass
# ---------------猜拳游戏---------------------------
print("0剪刀 1石头 2布")
computer = random.randint(0, 2)
print('电脑' + str(computer))
player = int(input('请输入:'))
if (player == 0 and computer == 2) or (player == 1 and computer == 0) or (player == 2 and computer == 1):
print("你赢了")
elif player == computer:
print("平局")
else:
print("你输了")
pass
# ------------------if语句注意点------------------------
# 1. 区间判断
score = float(input('请输入您的份数:'))
# 在某些语言里,判断区间不能连写,需要使用逻辑运算符来连接
# score > 0 and score < 60
# Python里可以使用连续的区间判断
if 0 <= score < 60:
print('不及格')
# 2. 隐式类型转换
if 4: # if后面需要的是一个bool类型的值,如果if后面不是布尔类型,会自动转换为布尔类型
print('hello world')
# 3. 三元表达式:对if else语句的简写
num1 = int(input('请输入一个数字:'))
num2 = int(input('请再输入一个数字:'))
# if num1 > num2:
# x = num1
# else:
# x = num2
x = num1 if num1 > num2 else num2
print('两个数里较大的是:', x)
| 24.844444 | 105 | 0.592129 | import random
year = int(input('请输入一个年份:'))
if (year % 4 == 0 and year % 100 != 0) or (year % 1400 == 0):
print("您输入的是闰年" + str(year))
pass
print("0剪刀 1石头 2布")
computer = random.randint(0, 2)
print('电脑' + str(computer))
player = int(input('请输入:'))
if (player == 0 and computer == 2) or (player == 1 and computer == 0) or (player == 2 and computer == 1):
print("你赢了")
elif player == computer:
print("平局")
else:
print("你输了")
pass
score = float(input('请输入您的份数:'))
if 0 <= score < 60:
print('不及格')
if 4:
print('hello world')
num1 = int(input('请输入一个数字:'))
num2 = int(input('请再输入一个数字:'))
x = num1 if num1 > num2 else num2
print('两个数里较大的是:', x)
| true | true |
f73a1b6e36386c2c059dd4e7bcb51e7dcb4e93b3 | 3,438 | py | Python | tests/test_compynent.py | caioaao/compynent | 433bb23ed6edff81b67ba9be2f4d142f01f4db0c | [
"MIT"
] | 3 | 2020-11-16T01:58:43.000Z | 2021-08-16T19:29:19.000Z | tests/test_compynent.py | caioaao/compynent | 433bb23ed6edff81b67ba9be2f4d142f01f4db0c | [
"MIT"
] | null | null | null | tests/test_compynent.py | caioaao/compynent | 433bb23ed6edff81b67ba9be2f4d142f01f4db0c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Tests for `compynent` package."""
from contextlib import AbstractContextManager, contextmanager
from compynent import System
class InitCounter(AbstractContextManager):
def __init__(self):
self.cnt = -1
def incr(self):
self.cnt += 1
return self.cnt
def __enter__(self):
self.cnt = 0
return self
def __exit__(self, *args):
self.cnt = -1
class Config(AbstractContextManager):
def __init__(self, init_counter):
self._counter = init_counter
def __enter__(self):
self.bar = 1
self.incr = 10
self._when = self._counter.incr()
return self
def __exit__(self, *args):
self.bar = None
self.incr = None
class Counter(AbstractContextManager):
def __init__(self, counter, config: Config):
self._config = config
self._counter = counter
def increment(self):
self.counter += self._config.incr
def __enter__(self):
self.counter = self._config.bar
self._when = self._counter.incr()
return self
def __exit__(self, *args):
self.counter = None
class App(AbstractContextManager):
def __init__(self, cfg: Config, counter: Counter, init_counter):
self._config = cfg
self._counter = counter
self._init_counter = init_counter
def get_counter(self):
return self._counter.counter
def incr_counter(self):
return self._counter.increment()
def __enter__(self):
self._when = self._init_counter.incr()
return self
def __exit__(self, *args):
pass
def sys_config():
return {'app': (App, ['counter', 'cfg', 'init_counter']),
'init_counter': (InitCounter, []),
'cfg': (Config, ['init_counter']),
'counter': (Counter, {'cfg': 'config',
'init_counter': 'counter'})}
def test_dag():
sys = System(sys_config())
assert sys.order == ['init_counter', 'cfg', 'counter', 'app']
pass
def test_system_map():
sys = System(sys_config())
# assert top level
with sys.start() as ctx:
assert isinstance(ctx['app'], App)
assert isinstance(ctx['cfg'], Config)
assert isinstance(ctx['counter'], Counter)
# assert dependencies
assert ctx['app']._config is ctx['cfg']
assert ctx['app']._counter is ctx['counter']
assert ctx['counter']._config is ctx['cfg']
def test_initialization_order():
with System(sys_config()).start() as ctx:
pass
assert ctx['cfg']._when == 1
assert ctx['counter']._when == 2
assert ctx['app']._when == 3
def test_context_management():
with System(sys_config()).start() as ctx:
assert ctx['app'].get_counter() == 1
ctx['app'].incr_counter()
assert ctx['app'].get_counter() == 11
assert ctx['app'].get_counter() is None
def test_using_generators():
@contextmanager
def make_counter():
counter = [0]
try:
yield counter
finally:
counter[0] -= 1
@contextmanager
def make_outer(counter):
yield counter[0] + 1
system = System({'cnt': (make_counter, []),
'outer': (make_outer, {'cnt': 'counter'})})
with system.start() as ctx:
assert ctx['cnt'] == [0]
ctx['cnt'][0] = 123
assert ctx['cnt'] == [122]
| 24.211268 | 68 | 0.589005 |
from contextlib import AbstractContextManager, contextmanager
from compynent import System
class InitCounter(AbstractContextManager):
def __init__(self):
self.cnt = -1
def incr(self):
self.cnt += 1
return self.cnt
def __enter__(self):
self.cnt = 0
return self
def __exit__(self, *args):
self.cnt = -1
class Config(AbstractContextManager):
def __init__(self, init_counter):
self._counter = init_counter
def __enter__(self):
self.bar = 1
self.incr = 10
self._when = self._counter.incr()
return self
def __exit__(self, *args):
self.bar = None
self.incr = None
class Counter(AbstractContextManager):
def __init__(self, counter, config: Config):
self._config = config
self._counter = counter
def increment(self):
self.counter += self._config.incr
def __enter__(self):
self.counter = self._config.bar
self._when = self._counter.incr()
return self
def __exit__(self, *args):
self.counter = None
class App(AbstractContextManager):
def __init__(self, cfg: Config, counter: Counter, init_counter):
self._config = cfg
self._counter = counter
self._init_counter = init_counter
def get_counter(self):
return self._counter.counter
def incr_counter(self):
return self._counter.increment()
def __enter__(self):
self._when = self._init_counter.incr()
return self
def __exit__(self, *args):
pass
def sys_config():
return {'app': (App, ['counter', 'cfg', 'init_counter']),
'init_counter': (InitCounter, []),
'cfg': (Config, ['init_counter']),
'counter': (Counter, {'cfg': 'config',
'init_counter': 'counter'})}
def test_dag():
sys = System(sys_config())
assert sys.order == ['init_counter', 'cfg', 'counter', 'app']
pass
def test_system_map():
sys = System(sys_config())
with sys.start() as ctx:
assert isinstance(ctx['app'], App)
assert isinstance(ctx['cfg'], Config)
assert isinstance(ctx['counter'], Counter)
assert ctx['app']._config is ctx['cfg']
assert ctx['app']._counter is ctx['counter']
assert ctx['counter']._config is ctx['cfg']
def test_initialization_order():
with System(sys_config()).start() as ctx:
pass
assert ctx['cfg']._when == 1
assert ctx['counter']._when == 2
assert ctx['app']._when == 3
def test_context_management():
with System(sys_config()).start() as ctx:
assert ctx['app'].get_counter() == 1
ctx['app'].incr_counter()
assert ctx['app'].get_counter() == 11
assert ctx['app'].get_counter() is None
def test_using_generators():
@contextmanager
def make_counter():
counter = [0]
try:
yield counter
finally:
counter[0] -= 1
@contextmanager
def make_outer(counter):
yield counter[0] + 1
system = System({'cnt': (make_counter, []),
'outer': (make_outer, {'cnt': 'counter'})})
with system.start() as ctx:
assert ctx['cnt'] == [0]
ctx['cnt'][0] = 123
assert ctx['cnt'] == [122]
| true | true |
f73a1c5ace41ac50664cf0171d2f25fb60c1fd44 | 4,618 | py | Python | models/backbone.py | liuky74/detr | e2b59573dcb86720562dfbdb02977ef996857025 | [
"Apache-2.0"
] | null | null | null | models/backbone.py | liuky74/detr | e2b59573dcb86720562dfbdb02977ef996857025 | [
"Apache-2.0"
] | null | null | null | models/backbone.py | liuky74/detr | e2b59573dcb86720562dfbdb02977ef996857025 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Backbone modules.
"""
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from util.misc import NestedTensor, is_main_process
from .position_encoding import build_position_encoding
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans.
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
# 固定参数的batch norm,读取到本层参数时删除它
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = 1e-5
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
super().__init__()
for name, parameter in backbone.named_parameters():
if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name: # 初始层和第一层不参与训练
parameter.requires_grad_(False)
if return_interm_layers: # 说明取数据的层
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
else:
return_layers = {'layer4': "0"}
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) # 这个函数可以返回一个新模型,新模型的输出为指定层名的输出
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors) # 输出
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
class Backbone(BackboneBase):
"""ResNet backbone with frozen BatchNorm."""
def __init__(self, name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool):
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d)
num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list) # boneNet输出
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
# position encoding
pos.append(self[1](x).to(x.tensors.dtype)) # position embedding
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args) #构建特征图像素坐标
train_backbone = args.lr_backbone > 0 # 是否训练主干网络
return_interm_layers = args.masks
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation) # 生成主干网络
model = Joiner(backbone, position_embedding) # 将embedding与主函数融合
model.num_channels = backbone.num_channels
return model
| 38.483333 | 123 | 0.663058 |
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from util.misc import NestedTensor, is_main_process
from .position_encoding import build_position_encoding
class FrozenBatchNorm2d(torch.nn.Module):
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = 1e-5
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
super().__init__()
for name, parameter in backbone.named_parameters():
if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
parameter.requires_grad_(False)
if return_interm_layers:
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
else:
return_layers = {'layer4': "0"}
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
class Backbone(BackboneBase):
def __init__(self, name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool):
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d)
num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
pos.append(self[1](x).to(x.tensors.dtype))
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = args.masks
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model
| true | true |
f73a1d131da66c45f3619b5cce1359f15cf14184 | 628 | py | Python | src/create_dataset.py | garciadias/k-means_on_apogee | 7c3315a0d305f255c121a015607e22e5a46bba82 | [
"CC0-1.0"
] | null | null | null | src/create_dataset.py | garciadias/k-means_on_apogee | 7c3315a0d305f255c121a015607e22e5a46bba82 | [
"CC0-1.0"
] | null | null | null | src/create_dataset.py | garciadias/k-means_on_apogee | 7c3315a0d305f255c121a015607e22e5a46bba82 | [
"CC0-1.0"
] | null | null | null | """Create csv with spectral data"""
from os import getcwd
from pathlib import Path
from astropy.io import fits
import pandas as pd
PROJECT_PATH = getcwd()
SPECTRA = {}
for spectrum_path in Path('%s/data/fits/' % PROJECT_PATH).glob('*fits'):
spectrum_fits = fits.open(spectrum_path)
spectrum = spectrum_fits[1].data[0]
SPECTRA[spectrum_fits[0].header['OBJID']] = spectrum
Path(spectrum_path).unlink()
wavelenght = spectrum_fits[4].data[0]
all_spectra = pd.DataFrame(SPECTRA, index=wavelenght).T
all_spectra.to_csv('%s/data/all_spectra.csv' % PROJECT_PATH)
Path(PROJECT_PATH + '/models').mkdir(exist_ok=True)
| 28.545455 | 72 | 0.738854 | from os import getcwd
from pathlib import Path
from astropy.io import fits
import pandas as pd
PROJECT_PATH = getcwd()
SPECTRA = {}
for spectrum_path in Path('%s/data/fits/' % PROJECT_PATH).glob('*fits'):
spectrum_fits = fits.open(spectrum_path)
spectrum = spectrum_fits[1].data[0]
SPECTRA[spectrum_fits[0].header['OBJID']] = spectrum
Path(spectrum_path).unlink()
wavelenght = spectrum_fits[4].data[0]
all_spectra = pd.DataFrame(SPECTRA, index=wavelenght).T
all_spectra.to_csv('%s/data/all_spectra.csv' % PROJECT_PATH)
Path(PROJECT_PATH + '/models').mkdir(exist_ok=True)
| true | true |
f73a1e1e0fb8f6902c832d12edbfb271d89b0b69 | 3,245 | py | Python | starthinker/task/bqflow/run.py | arbrown/starthinker | 1a14664fb1a8f2a757b100363ea8958833b7754c | [
"Apache-2.0"
] | 138 | 2018-11-28T21:42:44.000Z | 2022-03-30T17:26:35.000Z | starthinker/task/bqflow/run.py | arbrown/starthinker | 1a14664fb1a8f2a757b100363ea8958833b7754c | [
"Apache-2.0"
] | 36 | 2019-02-19T18:33:20.000Z | 2022-01-24T18:02:44.000Z | starthinker/task/bqflow/run.py | arbrown/starthinker | 1a14664fb1a8f2a757b100363ea8958833b7754c | [
"Apache-2.0"
] | 54 | 2018-12-06T05:47:32.000Z | 2022-02-21T22:01:01.000Z | ###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from googleapiclient.errors import HttpError
from starthinker.util.bigquery import table_list
from starthinker.util.data import get_rows
from starthinker.util.discovery_to_bigquery import Discovery_To_BigQuery
from starthinker.task.google_api.run import google_api_build_errors
from starthinker.task.google_api.run import google_api_build_results
from starthinker.task.google_api.run import google_api_execute
from starthinker.task.google_api.run import google_api_initilaize
def build_request(endpoint):
return {
"bigquery": {
"dataset": endpoint['dataset'],
"table": endpoint['table']
}
}
def build_results(config, auth, api_call, endpoint):
return google_api_build_results(
config, auth, api_call, {
'bigquery': {
'dataset':
endpoint['dataset'],
'table':
endpoint['table'].replace('BQFlow__', 'BQFlow__RESULTS__')
}
})
def build_errors(config, auth, api_call, endpoint):
return google_api_build_errors(
config, auth, api_call, {
'bigquery': {
'dataset': endpoint['dataset'],
'table': endpoint['table'].replace('BQFlow__', 'BQFlow__ERRORS__')
}
})
def bqflow(config, task):
if config.verbose: print('BQFLOW')
endpoints = []
# load dataset / table list
for dataset, table, kind in table_list(config, task['auth'], config.project):
if table.startswith('BQFlow__') and not table.startswith('BQFlow__RESULTS__') and not table.startswith('BQFlow__ERRORS__'):
print(table, kind)
endpoints.append({'dataset': dataset, kind.lower(): table})
for endpoint in endpoints:
if 'table' in endpoint:
_, api, function = endpoint['table'].split('__', 2)
function = function.replace('__', '.')
api_call = {
'auth':'user',
'api':api,
'version':Discovery_To_BigQuery.preferred_version(api, task.get('key')),
'function':function,
}
kwargs_list = get_rows(
config, task['auth'], build_request(endpoint), as_object=True)
results = build_results(config, task['auth'], api_call, endpoint)
errors = build_errors(config, task['auth'], api_call, endpoint)
for kwargs in kwargs_list:
api_call['kwargs'] = kwargs
if config.verbose: print('BQFLOW API CALL:', api_call)
google_api_initilaize(config, api_call)
google_api_execute(config, task['auth'], api_call, results, errors)
| 32.777778 | 127 | 0.646225 | true | true | |
f73a1ebf24f3b1ed80fa8aa279e730ef9ae2e7ac | 2,601 | py | Python | struct2tensor/calculate_options.py | jay90099/struct2tensor | 47d651757efa27586bf75f991b2174d8173a750b | [
"Apache-2.0"
] | 30 | 2019-10-07T21:31:44.000Z | 2022-03-30T17:11:44.000Z | struct2tensor/calculate_options.py | jay90099/struct2tensor | 47d651757efa27586bf75f991b2174d8173a750b | [
"Apache-2.0"
] | 2 | 2020-03-23T20:48:14.000Z | 2021-04-16T15:05:33.000Z | struct2tensor/calculate_options.py | jay90099/struct2tensor | 47d651757efa27586bf75f991b2174d8173a750b | [
"Apache-2.0"
] | 30 | 2019-07-16T13:01:53.000Z | 2022-03-01T22:04:36.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set options for struct2tensor.
This object can be passed to several methods. It is passed
as an argument to calculate, get_sparse_tensor, and get_ragged_tensor.
"""
class Options(object):
"""Options for calculate functions.
Do not construct Options directly. The preferred method of creating an object
is calling get_default_options() or get_options_with_minimal_checks() below.
Any fine-tuning can be done by modifying the properties of the Options object
after creation.
When a method takes an optional Options object but none is provided, it will
replace it with get_default_options() .
Available options:
ragged_checks: if True, add assertion ops when converting a Prensor object
to RaggedTensors.
sparse_checks: if True, add assertion ops when converting a Prensor object
to SparseTensors.
use_string_view: if True, decode sub-messages into string views to avoid
copying.
experimental_honor_proto3_optional_semantics: if True, if a proto3 primitive
optional field without the presence semantic (i.e. the field is without
the "optional" or "repeated" label) is requested to be parsed, it will
always have a value for each input parent message. If a value is not
present on wire, the default value (0 or "") will be used.
"""
def __init__(self, ragged_checks: bool, sparse_checks: bool):
"""Create options."""
self.ragged_checks = ragged_checks
self.sparse_checks = sparse_checks
self.use_string_view = False
self.experimental_honor_proto3_optional_semantics = False
def __str__(self):
return ("{ragged_checks:" + str(self.ragged_checks) + ", sparse_checks: " +
str(self.sparse_checks) + "}")
def get_default_options() -> Options:
"""Get the default options."""
return Options(ragged_checks=True, sparse_checks=True)
def get_options_with_minimal_checks() -> Options:
"""Options for calculation with minimal runtime checks."""
return Options(ragged_checks=False, sparse_checks=False)
| 38.820896 | 80 | 0.748174 |
class Options(object):
def __init__(self, ragged_checks: bool, sparse_checks: bool):
self.ragged_checks = ragged_checks
self.sparse_checks = sparse_checks
self.use_string_view = False
self.experimental_honor_proto3_optional_semantics = False
def __str__(self):
return ("{ragged_checks:" + str(self.ragged_checks) + ", sparse_checks: " +
str(self.sparse_checks) + "}")
def get_default_options() -> Options:
return Options(ragged_checks=True, sparse_checks=True)
def get_options_with_minimal_checks() -> Options:
return Options(ragged_checks=False, sparse_checks=False)
| true | true |
f73a1fb3d2cf15ad986a8bcd12e39f7a10c685e1 | 1,441 | py | Python | djangocms_fil_permissions/permissions.py | FidelityInternational/djangocms-fil-permissions | 59e759b320ef44c3cf91695383d097d69fb4b3e9 | [
"BSD-3-Clause"
] | null | null | null | djangocms_fil_permissions/permissions.py | FidelityInternational/djangocms-fil-permissions | 59e759b320ef44c3cf91695383d097d69fb4b3e9 | [
"BSD-3-Clause"
] | null | null | null | djangocms_fil_permissions/permissions.py | FidelityInternational/djangocms-fil-permissions | 59e759b320ef44c3cf91695383d097d69fb4b3e9 | [
"BSD-3-Clause"
] | 1 | 2019-02-22T13:58:28.000Z | 2019-02-22T13:58:28.000Z | from django.core.exceptions import PermissionDenied
from rules.rulesets import RuleSet
from .rules import has_site_access
site_permissions = RuleSet()
site_permissions.add_rule("site_perm", has_site_access)
class SitePermissionBackend(object):
"""Authentication backend that checks row-level permissions granted
on site-level.
"""
def authenticate(self, request, **credentials):
"""Pass authentication process to the next authentication
backend.
"""
return None
def has_perm(self, user, perm, obj=None):
"""Checks if ``user` belongs to a site associated with ``obj``.
Denies access if ``obj`` is registered for site-level
permissions and ``user`` does not belong to the same site
as ``obj``.
In any other case (``user`` passed the test or ``obj``
is not registered for site-level permissions,
no ``obj`` is passed), permission checking continues to the
next authentication backend.
:param user: User instance
:param perm: Permission codename
:param obj: Object checked against
"""
if not site_permissions.test_rule("site_perm", user, obj):
raise PermissionDenied()
return None
def has_module_perms(self, user, app_label):
"""Pass module permission checking process to the next
authentication backend.
"""
return None
| 30.020833 | 71 | 0.659264 | from django.core.exceptions import PermissionDenied
from rules.rulesets import RuleSet
from .rules import has_site_access
site_permissions = RuleSet()
site_permissions.add_rule("site_perm", has_site_access)
class SitePermissionBackend(object):
def authenticate(self, request, **credentials):
return None
def has_perm(self, user, perm, obj=None):
if not site_permissions.test_rule("site_perm", user, obj):
raise PermissionDenied()
return None
def has_module_perms(self, user, app_label):
return None
| true | true |
f73a20aee35d8cd5dea6f8d2e7fd7dcb9d75d040 | 6,100 | py | Python | tensorflow_federated/python/core/impl/executors/execution_context_test.py | Vishal-V/federated | 3cf0e4017c6a072ddb428ff993f2db9254c00cc0 | [
"Apache-2.0"
] | 1 | 2020-06-11T16:34:24.000Z | 2020-06-11T16:34:24.000Z | tensorflow_federated/python/core/impl/executors/execution_context_test.py | savitakumbhare/federated | 2575ac3c571004ba554bd0c0d11c2e307ff22d57 | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/core/impl/executors/execution_context_test.py | savitakumbhare/federated | 2575ac3c571004ba554bd0c0d11c2e307ff22d57 | [
"Apache-2.0"
] | 1 | 2021-09-06T03:33:14.000Z | 2021-09-06T03:33:14.000Z | # Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
from absl.testing import absltest
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.api import intrinsics
from tensorflow_federated.python.core.impl.compiler import type_factory
from tensorflow_federated.python.core.impl.executors import execution_context
from tensorflow_federated.python.core.impl.executors import executor_stacks
tf.compat.v1.enable_v2_behavior()
@contextlib.contextmanager
def _execution_context(num_clients=None):
executor_factory = executor_stacks.local_executor_factory(num_clients)
yield execution_context.ExecutionContext(executor_factory)
class RetryableErrorTest(absltest.TestCase):
def test_is_retryable_error(self):
retryable_error = execution_context.RetryableError()
self.assertTrue(execution_context._is_retryable_error(retryable_error))
self.assertFalse(execution_context._is_retryable_error(TypeError()))
self.assertFalse(execution_context._is_retryable_error(1))
self.assertFalse(execution_context._is_retryable_error('a'))
self.assertFalse(execution_context._is_retryable_error(None))
class ExecutionContextIntegrationTest(absltest.TestCase):
def test_simple_no_arg_tf_computation_with_int_result(self):
@computations.tf_computation
def comp():
return tf.constant(10)
with _execution_context():
result = comp()
self.assertEqual(result, 10)
def test_one_arg_tf_computation_with_int_param_and_result(self):
@computations.tf_computation(tf.int32)
def comp(x):
return tf.add(x, 10)
with _execution_context():
result = comp(3)
self.assertEqual(result, 13)
def test_three_arg_tf_computation_with_int_params_and_result(self):
@computations.tf_computation(tf.int32, tf.int32, tf.int32)
def comp(x, y, z):
return tf.multiply(tf.add(x, y), z)
with _execution_context():
result = comp(3, 4, 5)
self.assertEqual(result, 35)
def test_tf_computation_with_dataset_params_and_int_result(self):
@computations.tf_computation(computation_types.SequenceType(tf.int32))
def comp(ds):
return ds.reduce(np.int32(0), lambda x, y: x + y)
with _execution_context():
ds = tf.data.Dataset.range(10).map(lambda x: tf.cast(x, tf.int32))
result = comp(ds)
self.assertEqual(result, 45)
def test_tf_computation_with_structured_result(self):
@computations.tf_computation
def comp():
return collections.OrderedDict([
('a', tf.constant(10)),
('b', tf.constant(20)),
])
with _execution_context():
result = comp()
self.assertIsInstance(result, collections.OrderedDict)
self.assertDictEqual(result, {'a': 10, 'b': 20})
def test_with_temperature_sensor_example(self):
@computations.tf_computation(
computation_types.SequenceType(tf.float32), tf.float32)
def count_over(ds, t):
return ds.reduce(
np.float32(0), lambda n, x: n + tf.cast(tf.greater(x, t), tf.float32))
@computations.tf_computation(computation_types.SequenceType(tf.float32))
def count_total(ds):
return ds.reduce(np.float32(0.0), lambda n, _: n + 1.0)
@computations.federated_computation(
type_factory.at_clients(computation_types.SequenceType(tf.float32)),
type_factory.at_server(tf.float32))
def comp(temperatures, threshold):
return intrinsics.federated_mean(
intrinsics.federated_map(
count_over,
intrinsics.federated_zip(
[temperatures,
intrinsics.federated_broadcast(threshold)])),
intrinsics.federated_map(count_total, temperatures))
with _execution_context():
to_float = lambda x: tf.cast(x, tf.float32)
temperatures = [
tf.data.Dataset.range(10).map(to_float),
tf.data.Dataset.range(20).map(to_float),
tf.data.Dataset.range(30).map(to_float),
]
threshold = 15.0
result = comp(temperatures, threshold)
self.assertAlmostEqual(result, 8.333, places=3)
num_clients = 3
with _execution_context(num_clients):
to_float = lambda x: tf.cast(x, tf.float32)
temperatures = [
tf.data.Dataset.range(10).map(to_float),
tf.data.Dataset.range(20).map(to_float),
tf.data.Dataset.range(30).map(to_float),
]
threshold = 15.0
result = comp(temperatures, threshold)
self.assertAlmostEqual(result, 8.333, places=3)
def test_changing_cardinalities_across_calls(self):
@computations.federated_computation(type_factory.at_clients(tf.int32))
def comp(x):
return x
five_ints = list(range(5))
ten_ints = list(range(10))
with _execution_context():
five = comp(five_ints)
ten = comp(ten_ints)
self.assertEqual(five, five_ints)
self.assertEqual(ten, ten_ints)
def test_conflicting_cardinalities_within_call(self):
@computations.federated_computation(
[type_factory.at_clients(tf.int32),
type_factory.at_clients(tf.int32)])
def comp(x):
return x
five_ints = list(range(5))
ten_ints = list(range(10))
with _execution_context():
with self.assertRaisesRegex(ValueError, 'Conflicting cardinalities'):
comp([five_ints, ten_ints])
if __name__ == '__main__':
absltest.main()
| 31.606218 | 80 | 0.718033 |
import collections
import contextlib
from absl.testing import absltest
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.api import intrinsics
from tensorflow_federated.python.core.impl.compiler import type_factory
from tensorflow_federated.python.core.impl.executors import execution_context
from tensorflow_federated.python.core.impl.executors import executor_stacks
tf.compat.v1.enable_v2_behavior()
@contextlib.contextmanager
def _execution_context(num_clients=None):
executor_factory = executor_stacks.local_executor_factory(num_clients)
yield execution_context.ExecutionContext(executor_factory)
class RetryableErrorTest(absltest.TestCase):
def test_is_retryable_error(self):
retryable_error = execution_context.RetryableError()
self.assertTrue(execution_context._is_retryable_error(retryable_error))
self.assertFalse(execution_context._is_retryable_error(TypeError()))
self.assertFalse(execution_context._is_retryable_error(1))
self.assertFalse(execution_context._is_retryable_error('a'))
self.assertFalse(execution_context._is_retryable_error(None))
class ExecutionContextIntegrationTest(absltest.TestCase):
def test_simple_no_arg_tf_computation_with_int_result(self):
@computations.tf_computation
def comp():
return tf.constant(10)
with _execution_context():
result = comp()
self.assertEqual(result, 10)
def test_one_arg_tf_computation_with_int_param_and_result(self):
@computations.tf_computation(tf.int32)
def comp(x):
return tf.add(x, 10)
with _execution_context():
result = comp(3)
self.assertEqual(result, 13)
def test_three_arg_tf_computation_with_int_params_and_result(self):
@computations.tf_computation(tf.int32, tf.int32, tf.int32)
def comp(x, y, z):
return tf.multiply(tf.add(x, y), z)
with _execution_context():
result = comp(3, 4, 5)
self.assertEqual(result, 35)
def test_tf_computation_with_dataset_params_and_int_result(self):
@computations.tf_computation(computation_types.SequenceType(tf.int32))
def comp(ds):
return ds.reduce(np.int32(0), lambda x, y: x + y)
with _execution_context():
ds = tf.data.Dataset.range(10).map(lambda x: tf.cast(x, tf.int32))
result = comp(ds)
self.assertEqual(result, 45)
def test_tf_computation_with_structured_result(self):
@computations.tf_computation
def comp():
return collections.OrderedDict([
('a', tf.constant(10)),
('b', tf.constant(20)),
])
with _execution_context():
result = comp()
self.assertIsInstance(result, collections.OrderedDict)
self.assertDictEqual(result, {'a': 10, 'b': 20})
def test_with_temperature_sensor_example(self):
@computations.tf_computation(
computation_types.SequenceType(tf.float32), tf.float32)
def count_over(ds, t):
return ds.reduce(
np.float32(0), lambda n, x: n + tf.cast(tf.greater(x, t), tf.float32))
@computations.tf_computation(computation_types.SequenceType(tf.float32))
def count_total(ds):
return ds.reduce(np.float32(0.0), lambda n, _: n + 1.0)
@computations.federated_computation(
type_factory.at_clients(computation_types.SequenceType(tf.float32)),
type_factory.at_server(tf.float32))
def comp(temperatures, threshold):
return intrinsics.federated_mean(
intrinsics.federated_map(
count_over,
intrinsics.federated_zip(
[temperatures,
intrinsics.federated_broadcast(threshold)])),
intrinsics.federated_map(count_total, temperatures))
with _execution_context():
to_float = lambda x: tf.cast(x, tf.float32)
temperatures = [
tf.data.Dataset.range(10).map(to_float),
tf.data.Dataset.range(20).map(to_float),
tf.data.Dataset.range(30).map(to_float),
]
threshold = 15.0
result = comp(temperatures, threshold)
self.assertAlmostEqual(result, 8.333, places=3)
num_clients = 3
with _execution_context(num_clients):
to_float = lambda x: tf.cast(x, tf.float32)
temperatures = [
tf.data.Dataset.range(10).map(to_float),
tf.data.Dataset.range(20).map(to_float),
tf.data.Dataset.range(30).map(to_float),
]
threshold = 15.0
result = comp(temperatures, threshold)
self.assertAlmostEqual(result, 8.333, places=3)
def test_changing_cardinalities_across_calls(self):
@computations.federated_computation(type_factory.at_clients(tf.int32))
def comp(x):
return x
five_ints = list(range(5))
ten_ints = list(range(10))
with _execution_context():
five = comp(five_ints)
ten = comp(ten_ints)
self.assertEqual(five, five_ints)
self.assertEqual(ten, ten_ints)
def test_conflicting_cardinalities_within_call(self):
@computations.federated_computation(
[type_factory.at_clients(tf.int32),
type_factory.at_clients(tf.int32)])
def comp(x):
return x
five_ints = list(range(5))
ten_ints = list(range(10))
with _execution_context():
with self.assertRaisesRegex(ValueError, 'Conflicting cardinalities'):
comp([five_ints, ten_ints])
if __name__ == '__main__':
absltest.main()
| true | true |
f73a2151abde4ec417d246705e0947cf7228530a | 35,026 | py | Python | .history/neuroformer/model_perceiver_20220121144506.py | woanderer/neuroformer | df3462d55977b6c9adcb6753e7c474b8b76e8021 | [
"MIT"
] | null | null | null | .history/neuroformer/model_perceiver_20220121144506.py | woanderer/neuroformer | df3462d55977b6c9adcb6753e7c474b8b76e8021 | [
"MIT"
] | null | null | null | .history/neuroformer/model_perceiver_20220121144506.py | woanderer/neuroformer | df3462d55977b6c9adcb6753e7c474b8b76e8021 | [
"MIT"
] | null | null | null | # from code.transformer_vid.utils import convert_weights
# import rotary_embedding_torch
from torch.nn.modules.activation import GELU, ReLU
# from data.OneCombo3.trainer import TrainerConfig
import math
import numpy as np
import itertools
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
from torchvision.models.video import r3d_18
# from ResNet3D import r3d_18
from scipy.optimize import linear_sum_assignment
# from rotary_embedding_torch import apply_rotary_emb, RotaryEmbedding
from einops.layers.torch import Rearrange
logger = logging.getLogger(__name__)
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): # nn.Conv3d,
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
class GPTConfig:
""" base GPT config, params common to all GPT versions """
embd_pdrop = 0.2
resid_pdrop = 0.2
attn_pdrop = 0.2
pos_pdrop = 0.2
temp_pdrop = 0.2
pos_emb = True
temp_emb = True
start_prune = 30
epoch = 0
def __init__(self, vocab_size, block_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k, v in kwargs.items():
setattr(self, k, v)
class neuralGPTConfig:
""" base GPT config, params common to all GPT versions """
n = 0.4
im_drop = 0.2
id_drop = n
embd_pdrop = n
resid_pdrop = n
attn_pdrop = n
pos_pdrop = n
temp_pdrop = n
pos_emb = True
temp_emb = True
def __init__(self, vocab_size, block_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k, v in kwargs.items():
setattr(self, k, v)
class GPT1Config(GPTConfig):
""" GPT-1 like network roughly 125M params """
n_layer = 12
n_head = 12
n_embd = 768
class VideoFeaturesExtractor(nn.Module):
"""
R3D: (3 x T x H x W)
H, W = 112
"""
def __init__(self):
super().__init__()
self.backbone = torch.nn.Sequential(*(list(r3d_18(pretrained=True).children())[:-2]))
convert_weights(self.backbone)
# # freeze backbone
# for k, v in self.backbone.named_parameters():
# v.requires_grad = False
def forward(self, x):
# B = Batch, T, C, Fm, H, W
features = self.backbone(x) # (B, C, T, H, W)
B, C, T, H, W = features.shape
features = features.permute(0, 2, 3, 4, 1)
features = features.view(B, -1, C)
return features
class VideoEncoder(nn.Module):
def __init__(self, n_embd):
super().__init__()
p1, p2 = 16
assert n_embd % (p1 * p2) == 0, "n_embd must be divisible by p1 * p2"
c = n_embd // (p1 * p2)
self.to_patch_embedding = nn.Sequential(
Rearrange(f'b {c} t (h {p1}) (w {p2}) -> b (t h w) (p1 p2 {c})', p1=16, p2=16)
)
def forward(self, x):
return self.to_patch_embedding(x)
class CausalSelfAttention(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
"""
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
self.config = config
# key, query, value projections for all heads
self.key = nn.Linear(config.n_embd, config.n_embd)
self.query = nn.Linear(config.n_embd, config.n_embd)
self.value = nn.Linear(config.n_embd, config.n_embd)
# regularization
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
# output projection
self.proj = nn.Linear(config.n_embd, config.n_embd)
self.register_buffer("mask", self.build_mask(config.block_size))
self.n_head = config.n_head
self.att = None
self.T = config.block_size
# self.rotary_embedding = RotarySpatioTemporalEmbedding(config)
def build_mask(self, block_size):
mask = torch.tril(torch.ones((block_size, block_size)),
).view(1, 1, block_size, block_size)
return mask
def generate_sparse_mask(self, att, p, config):
"""
Generate a sparse mask according to p.
"""
assert p >= 0 and p <= 1, "p should be in [0, 1]"
T = config.block_size
mask = torch.rand((1, T)) < p
mask = mask.repeat(T, 1)
mask[0, 0] = False # don't mask 1st step
# check if any step is fully masked and umask it
idx_all_true = (True == torch.all(mask, dim=0)).nonzero()
for step in idx_all_true:
sampler = torch.distributions.Uniform(low=0, high=step.item()+1)
idx_false = sampler.sample((1,1)).long()
mask[step, idx_false] = False
# mask = mask.repeat(T, 1)
mask = mask.view(1, 1, T, T).cuda() if att.is_cuda else mask.view(1, 1, T, T)
att = att.masked_fill(mask, float('-inf'))
return att
def forward(self, x, pad=None, dtx=None):
# B = Batch, T = Sequence, C = n_embed
B, T, C = x.size()
# calculate query, key, values for all head in batch and move head forward to the batch dim
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
# # apply rotary embeddings
# if dtx is not None:
# q, k = self.rotary_embedding(q, k, dtx)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))
if self.training:
att = self.generate_sparse_mask(att, 0.25, self.config)
if pad is not None:
for idx, i in enumerate(pad):
att[idx, :, :, self.T - i:] = float('-inf') # only able to see first padding token
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
self.att = att
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_drop(self.proj(y))
return y
class PositionalEmbedding(nn.Module):
""" Implement the PE function. """
def __init__(self, n_embd, p_drop, max_len=1500):
super().__init__()
self.dropout = nn.Dropout(p=p_drop)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, n_embd)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, n_embd, 2) *
-(math.log(10000.0) / n_embd))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
# class RotarySpatioTemporalEmbedding(nn.Module):
# """ Rotary temporal embeddings - block_size = id_blk_sz """
# def __init__(self, config):
# super().__init__()
# self.frame_block_size = config.frame_block_size
# self.id_block_size = config.id_block_size
# self.emb = RotaryEmbedding(dim=32)
# def forward(self, q, k, t):
# b = t.shape[0]
# tf = self.frame_block_size
# queries = []
# keys = []
# for B in range(b):
# im_temp_emb = torch.tensor([-0.5] * (tf//2) + [0.5] * (tf//2))
# im_pos_emb = torch.arange(self.frame_block_size)
# im_emb = torch.stack([im_temp_emb, im_pos_emb], dim=0)
# id_temp_emb = self.temp_emb(t[B], cache_key=self.block_size)
# freqs = self.emb(torch.cat(im_emb, id_temp_emb))
# queries.append(apply_rotary_emb(freqs, q[B][None, ...]))
# keys.append(apply_rotary_emb(freqs, k[B][None, ...]))
# q, k = torch.cat(queries), torch.cat(keys)
# return q, k
class TemporalEmbedding(nn.Module):
""" encoding temporal information using fourrier signals """
def __init__(self, n_embd, p_drop, max_len=1500):
super().__init__()
self.dropout = nn.Dropout(p=p_drop)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, n_embd)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, n_embd, 2) *
-(math.log(10000.0) / n_embd))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
class LearntTemporalEmbedding(nn.Module):
"""
Project B x T x 1 time sequence to
B x T x C
"""
def __init__(self, block_sz, n_embd, p_drop=0.2):
super().__init__()
self.temp_emb = nn.Sequential(
nn.Linear(1, n_embd // 2),
nn.GELU(),
nn.Linear(n_embd // 2, n_embd),
nn.Dropout(p_drop)
)
def forward(self, x):
return self.temp_emb(x.unsqueeze(-1))
class Decoder(nn.Module):
def __init__(self, config):
super().__init__()
# decoder_layer = nn.TransformerDecoderLayer(config.n_embd, config.n_head,
# activation='gelu', dropout=0.2, batch_first=True)
# self.decoder = nn.TransformerDecoder(decoder_layer, config.n_layer)
self.decoder = nn.Transformer(d_model=config.n_embd, nhead=config.n_head,
num_encoder_layers=3, num_decoder_layers=config.n_layer,
activation="gelu", dropout=0.4, batch_first=True)
self.register_buffer("tgt_mask", self.generate_square_subsequent_mask(config.id_block_size))
# self.register_buffer("tgt_pad_mask", self.generate_padding_mask(config.ids_block_size))
self.T = config.id_block_size
def generate_square_subsequent_mask(self, sz: int, pad=None):
r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
mask = (torch.triu(torch.ones(sz, sz), diagonal=0) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def generate_padding_mask(self, sz: int, pad=None):
r"""Build a (B x T) mask that resides on the GPU and can be
manipulated by build_padding_mask according to padded sequence
"""
mask = torch.zeros(1, sz, dtype=torch.bool)
return mask
def generate_sparse_mask(self, sz: int, pad=None):
r""" Build a square mask that employs
teacher forcing according to P
"""
rand_mat = torch.rand(1, sz)
k = round(0.75 * sz)
k_th_quant = torch.topk(rand_mat, k, largest = False)[0][:,-1:]
bool_tensor = rand_mat <= k_th_quant
mask = torch.where(bool_tensor, torch.tensor(1), torch.tensor(0)).repeat(sz, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask.cuda(self.tgt_mask.get_device()) if self.tgt_mask.is_cuda else mask
def build_padding_mask(self, tgt, pad):
# mask = self.tgt_pad_mask.repeat(tgt.shape[0], 1)
mask = torch.zeros(tgt.shape[0], self.T, dtype=torch.bool)
for B, P in enumerate(pad):
mask[B, self.T - P:] = True
return mask # .to(torch.cuda.current_device())
def forward(self, tgt, memory, pad):
# padding_mask = self.build_padding_mask(tgt, pad)
# tgt_mask = self.generate_sparse_mask(self.T) if self.training else self.tgt_mask
return self.decoder(src=memory, tgt=tgt, tgt_mask=self.tgt_mask,
tgt_key_padding_mask=None)
class ProjectNorm(nn.Module):
def __init__(self, feat_size, target_size):
super().__init__()
self.ln = nn.LayerNorm(feat_size)
self.mlp = nn.Sequential(
nn.Linear(feat_size, math.floor(2 * feat_size), bias=False),
nn.GELU(),
nn.Linear(math.floor(2 * feat_size), target_size, bias=False),
)
def forward(self, x):
return self.mlp(self.ln(x))
class TimeProjection(nn.Module):
def __init__(self, seq_size, id_seq_size, feat_size, target_size):
super().__init__()
self.mlp_seq = nn.Sequential(
nn.Linear(seq_size, id_seq_size),
nn.ReLU(),
nn.Dropout(p=0.3),
nn.Linear(id_seq_size, id_seq_size)
)
self.mlp_t = nn.Sequential(
nn.Linear(feat_size, feat_size // 2),
nn.ReLU(),
nn.Dropout(p=0.3),
nn.Linear(feat_size // 2, target_size)
)
def forward(self, x):
x = x.permute(0, 2, 1) # B, T, C -> B, C, T
x = self.mlp_seq(x) # B, C, T / 2
x = x.permute(0, 2, 1) # B, T / 2, C
return self.mlp_t(x) # B, T / 2, 1
class PSTHProjection(nn.Module):
"""Takes Last Output of Block -> (B, C)
Builds PSTH table
"""
def __init__(self, config):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd, bias=False),
nn.Dropout(p=0.2),
nn.GELU(),
nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False)
)
def forward(self, x):
return self.mlp(x)
# class PSTHProjection(nn.Module):
# def __init__(self, config):
# super().__init__()
# self.mlp_seq = nn.Sequential(
# nn.Linear(config.id_block_size, config.id_block_size // 2, bias=False),
# nn.GELU(),
# nn.Dropout(p=0.2),
# nn.Linear(config.id_block_size // 2, 1, bias=False)
# )
# self.mlp_t = nn.Sequential(
# nn.Linear(config.n_embd, config.n_embd * 4, bias=False),
# nn.GELU(),
# nn.Dropout(p=0.2),
# nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False)
# )
# def forward(self, x):
# x = x.transpose(-1, -2) # B, T, C -> B, C, T
# x = self.mlp_seq(x) # B, C, 1
# x = x.transpose(-2, -1) # B, 1, Vocab_id
# return self.mlp_t(x)
class TimeRNN(nn.Module):
def __init__(self, feat_size, target_size):
super().__init__()
class Block(nn.Module):
""" an unassuming Transformer block """
def __init__(self, config):
super().__init__()
self.ln1 = nn.LayerNorm(config.n_embd)
self.ln2 = nn.LayerNorm(config.n_embd)
self.attn = CausalSelfAttention(config)
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd),
nn.GELU(),
nn.Linear(4 * config.n_embd, config.n_embd),
nn.Dropout(config.resid_pdrop),
)
def forward(self, x, pad=None, dtx=None):
x = x + self.attn(self.ln1(x), pad)
x = x + self.mlp(self.ln2(x))
return x
class BlockSequential(nn.Sequential):
def forward(self, x, pad=None, dtx=None):
for module in self._modules.values():
x = module(x, pad, dtx)
return x
class DiceLossPSTH(nn.Module):
def __init__(self, size_average=True, smooth=1):
super().__init__()
def cross_entropy(self, input, target):
return torch.mean(-torch.sum(target * torch.log(input), 1))
def forward(self, logits, targets, smooth=1, class_weights=None):
total_logits = F.layer_norm(torch.sum(logits, dim=-2), [logits.size()[-1]])
# probs = F.log_softmax(logits, dim=-1)
probs = F.softmax(total_logits, dim=-1)
# logits = F.gelu(logits)
# probs = logits / (logits.max(dim=-1).values.unsqueeze(-1))
# flatten label and prediction tensors
outputs = probs.contiguous().view(-1)
targets = targets.contiguous().view(-1)
labels = torch.zeros_like(outputs)
labels[targets] = 1 / len(targets)
# intersection = (outputs * labels).sum()
# dice = (2. * intersection + smooth) / (outputs.sum() + labels.sum() + smooth)
return self.cross_entropy(outputs[None, ...], labels[None, ...])
class SetLoss(nn.Module):
def __init__(self):
super().__init__()
def cross_entropy(self, input, target):
return torch.mean(-torch.sum(target * torch.log(input), 1))
def forward(self, logits, targets):
targets = targets.contiguous().view(-1)
loss = 0
for n_step, n_logits in enumerate(logits):
n_logits = F.softmax(n_logits, dim=-1)
n_target = targets[n_step:]
n_target_dist = torch.zeros_like(n_logits)
if len(n_target) != 0:
n_target_dist[n_target] = 1 / len(n_target)
loss += self.cross_entropy(n_logits[None,...], n_target_dist[None, ...])
return loss / len(logits)
class TruncatedLoss(nn.Module):
def __init__(self, q=0.8, k=0.2, trainset_size=50000):
super(TruncatedLoss, self).__init__()
self.q = q
self.k = k
self.weight = torch.nn.Parameter(data=torch.ones(trainset_size, 1), requires_grad=False)
def forward(self, logits, targets, indexes):
p = F.softmax(logits, dim=-1)
Yg = torch.gather(p, 2, targets.unsqueeze(2))
loss = ((1-(Yg**self.q))/self.q)*self.weight[indexes] - ((1-(self.k**self.q))/self.q)*self.weight[indexes]
loss = torch.mean(loss)
return loss
def update_weight(self, logits, targets, indexes):
p = F.softmax(logits, dim=-1)
Yg = torch.gather(p, 2, targets.unsqueeze(2))
Lq = ((1-(Yg**self.q))/self.q)
Lqk = np.repeat(((1-(self.k**self.q))/self.q), targets.size(0))
Lqk = torch.from_numpy(Lqk).type(torch.cuda.FloatTensor)
Lqk = torch.unsqueeze(Lqk, 1)
condition = torch.gt(Lqk, Lq)
self.weight[indexes] = condition.type(torch.cuda.FloatTensor)
# class PSTHLOSS(nn.Module):
# def __init__(self):
# super().__init__()
# def forward(self, logits, targets):
# total_logits = torch.sum(logits, dim=-2) # sum over sequence dimension
# probs = F.softmax(total_logits, dim=-1)
# outptu
class HungarianMatcher(nn.Module):
def __init__(self):
super().__init__()
@torch.no_grad()
def forward(self, logits, targets):
T, C = logits.size()
probs = F.softmax(logits, dim=-1)
cost_id = (1 - probs[:, targets]).cpu().view(T, -1).unsqueeze(0)
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_id.split(len(targets), -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
class KLDivLoss(nn.Module):
def __init__(self):
super().__init__()
self.log_softmax = nn.LogSoftmax(dim=-1)
self.KLdiv = nn.KLDivLoss()
def forward(self, logits, targets):
log_probs = self.log_softmax(logits)
return self.KLdiv(log_probs.long(), targets)
class PoissonCrossEntropyLoss(nn.Module):
def __init__(self):
super().__init__()
self.log_softmax = nn.LogSoftmax(dim=-1)
# self.softmax = nn.Softmax(dim=-1)
self.nll_poisson = nn.PoissonNLLLoss()
# self.nll_poisson = nn.NLLLoss()
def forward(self, logits, targets):
log_probs = self.log_softmax(logits)
return self.nll_poisson(log_probs, targets)
class GPT(nn.Module):
""" the full GPT language model, with a context size of block_size """
def __init__(self, config):
super().__init__()
self.device = 'cpu'
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
self.config = config
# input embedding stem
self.n_embd = config.n_embd
self.tok_emb = nn.Embedding(config.id_vocab_size, config.n_embd)
self.pos_emb = PositionalEmbedding(config.n_embd, p_drop=0.2)
# self.pos_emb_id = nn.Parameter(torch.zeros(1, config.id_block_size, config.n_embd))
self.pos_emb_frames = nn.Parameter(torch.zeros(1, config.frame_block_size, config.n_embd))
# self.temp_emb = TemporalEmbedding(config.n_embd, p_drop=0.2)
# self.temp_emb = RotaryTemporalEmbedding(config.id_block_size)
self.temp_emb = LearntTemporalEmbedding(config.id_block_size, config.n_embd)
self.frame_temp_emb = LearntTemporalEmbedding(config.frame_block_size, config.n_embd)
self.id_drop = nn.Dropout(config.id_drop)
self.im_drop = nn.Dropout(config.im_drop)
self.drop = nn.Dropout(config.embd_pdrop)
# -- Visual Backbone -- #
# self.visual_backbone = VideoFeaturesExtractor()
self.video_encoder = VideoEncoder()
frame_temp_emb = torch.tensor(list(itertools.chain(*[[n * 0.05] * (config.frame_block_size//20) for n in range(20)]))).unsqueeze(0)
self.register_buffer("frame_temp_emb_seq", frame_temp_emb)
# -- Contrastive Loss -- ##
# self.proj_id = ProjectNorm(config.n_embd, config.n_embd)
# self.proj_vid = VidProjectNorm(config.n_embd, config.n_embd) # im_shape
## -- IM_Decoder -- ##
# self.blocks_id = BlockSequential(*[Block(config) for _ in range(2)])
# self.blocks_im = BlockSequential(*[Block(config) for _ in range(2)])
# self.ln_f_id = nn.LayerNorm(config.n_embd)
# self.ln_f_im = nn.LayerNorm(config.n_embd)
## -- Decoder -- ##
# self.ln_f = nn.LayerNorm(config.n_embd)
## GPT
# self.blocks = BlockSequential(*[Block(config) for _ in range(config.n_layer)])
# self.ln_f = nn.LayerNorm(config.n_embd)
## enc_dec
self.state_decoder = Decoder(config)
self.ln_f_state_dec = nn.LayerNorm(config.n_embd)
self.stimulus_decoder = Decoder(config)
self.ln_f_stimulus_dec = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
## -- Time -- ##
# self.proj_time = TimeProjection(config.block_size, config.id_block_size, config.n_embd, config.n_dt)
self.proj_time = ProjectNorm(config.n_embd, config.n_dt)
# self.proj_time = ProjectNorm(config.n_embd, 1)
## -- PSTH -- ##
# self.proj_psth = PSTHProjection(config)
# Loss
# self.dice_loss = DiceLossPSTH()
# self.poisson_loss = PoissonCrossEntropyLoss()
# self.hungarian_matcher = HungarianMatcher()
# self.kldiv_loss = KLDivLoss()
# self.truncated_loss = TruncatedLoss(trainset_size=config.data_size)
# self.set_loss = SetLoss()
# self.a = torch.tensor(0.5, requires_grad=True)
self.block_size = config.block_size
self.apply(self._init_weights)
if config.class_weights is not None:
for key in config.class_weights.keys():
self.register_buffer(f"class_weights_{key}", config.class_weights[key])
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def configure_optimizers(self, train_config):
"""
Separates parameters into those who will experience weight decay and those that will not
"""
if train_config.decay_weights:
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
else: no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
black_list_mods = ['pos_emb', 'temp_emb']
for mods in black_list_mods:
for name, param in self.named_parameters():
if mods in name:
no_decay.add(name) # also pos_emb
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.named_parameters()}
no_decay -= decay & no_decay
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": train_config.weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=train_config.learning_rate, betas=train_config.betas)
else:
parameters = self.parameters()
optimizer = torch.optim.Adam(parameters, lr=train_config.learning_rate)
return optimizer
def process_features(self, x):
# batch, block_size, feature
p_idx = x['id_prev']
idx = x['id']
dtx = x['dt']
dtx_prev = x['dt_prev']
frames = self.video_encoder(x['frames'])
pad = x['pad']
b, t = idx.size()
# b_p, t_p = p_idx.size()
bf, tf = frames.size()[0:2]
# forward the GPT model
'''
positional and temporal embeddings implemented in multiple ways, learnt,
fourrier decomposition and in the case of time, just passed as is.
'''
# # Embeddings
prev_id_position_embeddings = self.pos_emb(p_idx)
prev_id_temporal_embeddings = self.temp_emb(dtx_prev.float())
id_position_embeddings = self.pos_emb(idx)
im_position_embeddings = self.pos_emb_frames
temporal_embeddings = self.temp_emb(dtx.float())
# Extract ID features
prev_token_embeddings = self.id_drop(self.tok_emb(p_idx) + prev_id_temporal_embeddings + prev_id_position_embeddings)
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
token_embeddings = token_embeddings + temporal_embeddings + id_position_embeddings
token_embeddings = self.id_drop(token_embeddings)
# Extract image features and add time embeddings
im_temporal_embeddings = self.frame_temp_emb(self.frame_temp_emb_seq)
im_embeddings = frames # self.tok_emb(frames)
im_embeddings = im_embeddings + im_position_embeddings + im_temporal_embeddings
im_embeddings = self.im_drop(im_embeddings) # separate pos emb?
# Tidy up
features = dict()
features['id_prev'] = prev_token_embeddings
features['id'] = token_embeddings
features['frames'] = im_embeddings
return features, pad
def perceiver(self, features, pad):
x = self.state_decoder(tgt=features['id'], memory=features['id_prev'], pad=pad)
x = self.ln_f_state_dec(x)
x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad)
x = self.ln_f_stimulus_dec(x)
logits = self.head(x)
return logits, x
def enc_dec(self, features, pad):
x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad)
x = self.ln_f_stimulus_dec(x)
logits = self.head(x)
return logits, x
def GPTdecoder(self, features, pad, dtx=None):
# image + neural features
x = torch.cat((features['frames'], features['id']), dim=1)
# Decoder
x = self.blocks(x, pad, dtx) # (B, T, C)
x = self.ln_f(x)
logits = self.head(x)
# print(logits.shape) # (B, T, Vocab)
# logits_psth = x[:, -1] # (B, C)
return logits, x
def forward(self, x, targets=None):
idx = x['id']
dtx = x['dt']
frames = x['frames']
pad = x['pad']
b, t = idx.size()
# b, t = x['id'].shape[0], x['id'].shape[1] + x['id_prev'].shape[1]
bf, tf = frames.size()[0:2]
tf = self.config.frame_block_size
# assert t + tf == self.config.block_size, f"{tf} {t}"
# assert t <= self.block_size, "Cannot forward, model block size is exhausted"
features, pad = self.process_features(x)
logits, x = self.perceiver(features, pad)
# logits, x = self.enc_dec(features, pad)
# logits, x = self.GPTdecoder(features, pad)
time = self.proj_time(x) # (B, T_id, 1)
# print(x[:, 0].shape)
# psth = self.proj_psth(x) # (B, Vocab_id)
# if targets, calculate loss
# calculate loss on logits up to padding token for each batch
loss = None
loss_frames = 0
loss_id = []
loss_time = []
loss_dice = []
loss_psth = []
loss_hungarian = []
if targets is not None:
# loss_psth = self.dice_loss(psth, targets['modes'][:, tf:])
for B, P in enumerate(pad):
tf = 0
# im_logits = logits[B, :tf]
# im_targets = targets['frames'][B, :tf]
# loss_frames += F.cross_entropy(im_logits.view(-1, im_logits.size(-1)), im_targets.view(-1))
id_logits = logits[B, tf:tf + t - P]
id_targets = targets['id'][B, :t - P]
loss_id_ = F.cross_entropy(id_logits.view(-1, id_logits.size(-1)), id_targets.view(-1), weight=self.class_weights_id)
# if self.config.epoch >= 15:
# self.truncated_loss.update_weight(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...])
# loss_id_ = self.truncated_loss(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...])
time_preds = time[B, :t - P]
time_targets = targets['dt'][B, :t - P]
loss_time_ = F.cross_entropy(time_preds.view(-1, time_preds.size(-1)), time_targets.view(-1), weight=self.class_weights_dt)
# loss_time_ = F.mse_loss(time_preds.squeeze(-1), time_targets)
# loss_id_ = self.poisson_loss(id_logits.view(-1, id_logits.size(-1)), F.one_hot(id_targets, self.config.vocab_size))
# if len(id_targets) > 0:
# indices = self.hungarian_matcher(id_logits, id_targets)
# probs_matching, targets_matching = id_logits[indices[0][0]], id_targets[indices[0][1]]
# loss_hungarian_ = F.cross_entropy(probs_matching, targets_matching, weight=self.class_weights).to(self.device)
# loss_hungarian.append(loss_hungarian_)
# # psth = self.proj_psth(x[B, -1]) # from the EOS position
# loss_psth.append(torch.nan_to_num(self.set_loss(id_logits, id_targets)))
# loss_psth_ = self.dice_loss(id_logits, id_targets)
# loss_psth.append(torch.nan_to_num(loss_psth_))
loss_time.append(torch.nan_to_num(loss_time_))
loss_id.append(torch.nan_to_num(loss_id_))
loss = dict()
# loss['frames'] = loss_frames / (b / 3)
loss['id'] = sum(loss_id) / (b * 2) # sum(loss_id) / (b * 2) # / len(loss_id)
loss['time'] = sum(loss_time) / (b * 2)
# loss['dice'] = sum(loss_dice) / len(loss_dice)
# loss['dt'] = loss_time / (b * 50)
# loss['hungarian'] = sum(loss_hungarian) / (b * 2)
# loss['psth'] = sum(loss_psth) / (b * 2)
for key in list(loss):
if isinstance(loss[key], float):
del loss[key]
preds = dict()
preds['id'] = logits # [:, tf:] # only id logits
preds['dt'] = time
return preds, features, loss | 39.266816 | 139 | 0.581739 |
from torch.nn.modules.activation import GELU, ReLU
import math
import numpy as np
import itertools
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
from torchvision.models.video import r3d_18
from scipy.optimize import linear_sum_assignment
from einops.layers.torch import Rearrange
logger = logging.getLogger(__name__)
def convert_weights(model: nn.Module):
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
class GPTConfig:
embd_pdrop = 0.2
resid_pdrop = 0.2
attn_pdrop = 0.2
pos_pdrop = 0.2
temp_pdrop = 0.2
pos_emb = True
temp_emb = True
start_prune = 30
epoch = 0
def __init__(self, vocab_size, block_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k, v in kwargs.items():
setattr(self, k, v)
class neuralGPTConfig:
n = 0.4
im_drop = 0.2
id_drop = n
embd_pdrop = n
resid_pdrop = n
attn_pdrop = n
pos_pdrop = n
temp_pdrop = n
pos_emb = True
temp_emb = True
def __init__(self, vocab_size, block_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k, v in kwargs.items():
setattr(self, k, v)
class GPT1Config(GPTConfig):
n_layer = 12
n_head = 12
n_embd = 768
class VideoFeaturesExtractor(nn.Module):
def __init__(self):
super().__init__()
self.backbone = torch.nn.Sequential(*(list(r3d_18(pretrained=True).children())[:-2]))
convert_weights(self.backbone)
def forward(self, x):
features = self.backbone(x)
B, C, T, H, W = features.shape
features = features.permute(0, 2, 3, 4, 1)
features = features.view(B, -1, C)
return features
class VideoEncoder(nn.Module):
def __init__(self, n_embd):
super().__init__()
p1, p2 = 16
assert n_embd % (p1 * p2) == 0, "n_embd must be divisible by p1 * p2"
c = n_embd // (p1 * p2)
self.to_patch_embedding = nn.Sequential(
Rearrange(f'b {c} t (h {p1}) (w {p2}) -> b (t h w) (p1 p2 {c})', p1=16, p2=16)
)
def forward(self, x):
return self.to_patch_embedding(x)
class CausalSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
self.config = config
self.key = nn.Linear(config.n_embd, config.n_embd)
self.query = nn.Linear(config.n_embd, config.n_embd)
self.value = nn.Linear(config.n_embd, config.n_embd)
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
self.proj = nn.Linear(config.n_embd, config.n_embd)
self.register_buffer("mask", self.build_mask(config.block_size))
self.n_head = config.n_head
self.att = None
self.T = config.block_size
def build_mask(self, block_size):
mask = torch.tril(torch.ones((block_size, block_size)),
).view(1, 1, block_size, block_size)
return mask
def generate_sparse_mask(self, att, p, config):
assert p >= 0 and p <= 1, "p should be in [0, 1]"
T = config.block_size
mask = torch.rand((1, T)) < p
mask = mask.repeat(T, 1)
mask[0, 0] = False
# check if any step is fully masked and umask it
idx_all_true = (True == torch.all(mask, dim=0)).nonzero()
for step in idx_all_true:
sampler = torch.distributions.Uniform(low=0, high=step.item()+1)
idx_false = sampler.sample((1,1)).long()
mask[step, idx_false] = False
# mask = mask.repeat(T, 1)
mask = mask.view(1, 1, T, T).cuda() if att.is_cuda else mask.view(1, 1, T, T)
att = att.masked_fill(mask, float('-inf'))
return att
def forward(self, x, pad=None, dtx=None):
# B = Batch, T = Sequence, C = n_embed
B, T, C = x.size()
# calculate query, key, values for all head in batch and move head forward to the batch dim
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
# # apply rotary embeddings
# if dtx is not None:
# q, k = self.rotary_embedding(q, k, dtx)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))
if self.training:
att = self.generate_sparse_mask(att, 0.25, self.config)
if pad is not None:
for idx, i in enumerate(pad):
att[idx, :, :, self.T - i:] = float('-inf') # only able to see first padding token
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
self.att = att
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_drop(self.proj(y))
return y
class PositionalEmbedding(nn.Module):
def __init__(self, n_embd, p_drop, max_len=1500):
super().__init__()
self.dropout = nn.Dropout(p=p_drop)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, n_embd)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, n_embd, 2) *
-(math.log(10000.0) / n_embd))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
# class RotarySpatioTemporalEmbedding(nn.Module):
# """ Rotary temporal embeddings - block_size = id_blk_sz """
# def __init__(self, config):
# super().__init__()
# self.frame_block_size = config.frame_block_size
# self.id_block_size = config.id_block_size
# self.emb = RotaryEmbedding(dim=32)
# def forward(self, q, k, t):
# b = t.shape[0]
# tf = self.frame_block_size
# queries = []
# keys = []
# for B in range(b):
# im_temp_emb = torch.tensor([-0.5] * (tf//2) + [0.5] * (tf//2))
# im_pos_emb = torch.arange(self.frame_block_size)
# im_emb = torch.stack([im_temp_emb, im_pos_emb], dim=0)
# id_temp_emb = self.temp_emb(t[B], cache_key=self.block_size)
# freqs = self.emb(torch.cat(im_emb, id_temp_emb))
# queries.append(apply_rotary_emb(freqs, q[B][None, ...]))
# keys.append(apply_rotary_emb(freqs, k[B][None, ...]))
# q, k = torch.cat(queries), torch.cat(keys)
# return q, k
class TemporalEmbedding(nn.Module):
def __init__(self, n_embd, p_drop, max_len=1500):
super().__init__()
self.dropout = nn.Dropout(p=p_drop)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, n_embd)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, n_embd, 2) *
-(math.log(10000.0) / n_embd))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
class LearntTemporalEmbedding(nn.Module):
def __init__(self, block_sz, n_embd, p_drop=0.2):
super().__init__()
self.temp_emb = nn.Sequential(
nn.Linear(1, n_embd // 2),
nn.GELU(),
nn.Linear(n_embd // 2, n_embd),
nn.Dropout(p_drop)
)
def forward(self, x):
return self.temp_emb(x.unsqueeze(-1))
class Decoder(nn.Module):
def __init__(self, config):
super().__init__()
# decoder_layer = nn.TransformerDecoderLayer(config.n_embd, config.n_head,
# activation='gelu', dropout=0.2, batch_first=True)
# self.decoder = nn.TransformerDecoder(decoder_layer, config.n_layer)
self.decoder = nn.Transformer(d_model=config.n_embd, nhead=config.n_head,
num_encoder_layers=3, num_decoder_layers=config.n_layer,
activation="gelu", dropout=0.4, batch_first=True)
self.register_buffer("tgt_mask", self.generate_square_subsequent_mask(config.id_block_size))
# self.register_buffer("tgt_pad_mask", self.generate_padding_mask(config.ids_block_size))
self.T = config.id_block_size
def generate_square_subsequent_mask(self, sz: int, pad=None):
mask = (torch.triu(torch.ones(sz, sz), diagonal=0) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def generate_padding_mask(self, sz: int, pad=None):
mask = torch.zeros(1, sz, dtype=torch.bool)
return mask
def generate_sparse_mask(self, sz: int, pad=None):
rand_mat = torch.rand(1, sz)
k = round(0.75 * sz)
k_th_quant = torch.topk(rand_mat, k, largest = False)[0][:,-1:]
bool_tensor = rand_mat <= k_th_quant
mask = torch.where(bool_tensor, torch.tensor(1), torch.tensor(0)).repeat(sz, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask.cuda(self.tgt_mask.get_device()) if self.tgt_mask.is_cuda else mask
def build_padding_mask(self, tgt, pad):
# mask = self.tgt_pad_mask.repeat(tgt.shape[0], 1)
mask = torch.zeros(tgt.shape[0], self.T, dtype=torch.bool)
for B, P in enumerate(pad):
mask[B, self.T - P:] = True
return mask # .to(torch.cuda.current_device())
def forward(self, tgt, memory, pad):
# padding_mask = self.build_padding_mask(tgt, pad)
# tgt_mask = self.generate_sparse_mask(self.T) if self.training else self.tgt_mask
return self.decoder(src=memory, tgt=tgt, tgt_mask=self.tgt_mask,
tgt_key_padding_mask=None)
class ProjectNorm(nn.Module):
def __init__(self, feat_size, target_size):
super().__init__()
self.ln = nn.LayerNorm(feat_size)
self.mlp = nn.Sequential(
nn.Linear(feat_size, math.floor(2 * feat_size), bias=False),
nn.GELU(),
nn.Linear(math.floor(2 * feat_size), target_size, bias=False),
)
def forward(self, x):
return self.mlp(self.ln(x))
class TimeProjection(nn.Module):
def __init__(self, seq_size, id_seq_size, feat_size, target_size):
super().__init__()
self.mlp_seq = nn.Sequential(
nn.Linear(seq_size, id_seq_size),
nn.ReLU(),
nn.Dropout(p=0.3),
nn.Linear(id_seq_size, id_seq_size)
)
self.mlp_t = nn.Sequential(
nn.Linear(feat_size, feat_size // 2),
nn.ReLU(),
nn.Dropout(p=0.3),
nn.Linear(feat_size // 2, target_size)
)
def forward(self, x):
x = x.permute(0, 2, 1) # B, T, C -> B, C, T
x = self.mlp_seq(x) # B, C, T / 2
x = x.permute(0, 2, 1) # B, T / 2, C
return self.mlp_t(x) # B, T / 2, 1
class PSTHProjection(nn.Module):
def __init__(self, config):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd, bias=False),
nn.Dropout(p=0.2),
nn.GELU(),
nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False)
)
def forward(self, x):
return self.mlp(x)
# class PSTHProjection(nn.Module):
# def __init__(self, config):
# super().__init__()
# self.mlp_seq = nn.Sequential(
# nn.Linear(config.id_block_size, config.id_block_size // 2, bias=False),
# nn.GELU(),
# nn.Dropout(p=0.2),
# nn.Linear(config.id_block_size // 2, 1, bias=False)
# )
# self.mlp_t = nn.Sequential(
# nn.Linear(config.n_embd, config.n_embd * 4, bias=False),
# nn.GELU(),
# nn.Dropout(p=0.2),
# nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False)
# )
# def forward(self, x):
# x = x.transpose(-1, -2) # B, T, C -> B, C, T
# x = self.mlp_seq(x) # B, C, 1
# x = x.transpose(-2, -1) # B, 1, Vocab_id
# return self.mlp_t(x)
class TimeRNN(nn.Module):
def __init__(self, feat_size, target_size):
super().__init__()
class Block(nn.Module):
def __init__(self, config):
super().__init__()
self.ln1 = nn.LayerNorm(config.n_embd)
self.ln2 = nn.LayerNorm(config.n_embd)
self.attn = CausalSelfAttention(config)
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd),
nn.GELU(),
nn.Linear(4 * config.n_embd, config.n_embd),
nn.Dropout(config.resid_pdrop),
)
def forward(self, x, pad=None, dtx=None):
x = x + self.attn(self.ln1(x), pad)
x = x + self.mlp(self.ln2(x))
return x
class BlockSequential(nn.Sequential):
def forward(self, x, pad=None, dtx=None):
for module in self._modules.values():
x = module(x, pad, dtx)
return x
class DiceLossPSTH(nn.Module):
def __init__(self, size_average=True, smooth=1):
super().__init__()
def cross_entropy(self, input, target):
return torch.mean(-torch.sum(target * torch.log(input), 1))
def forward(self, logits, targets, smooth=1, class_weights=None):
total_logits = F.layer_norm(torch.sum(logits, dim=-2), [logits.size()[-1]])
# probs = F.log_softmax(logits, dim=-1)
probs = F.softmax(total_logits, dim=-1)
# logits = F.gelu(logits)
# probs = logits / (logits.max(dim=-1).values.unsqueeze(-1))
# flatten label and prediction tensors
outputs = probs.contiguous().view(-1)
targets = targets.contiguous().view(-1)
labels = torch.zeros_like(outputs)
labels[targets] = 1 / len(targets)
# intersection = (outputs * labels).sum()
# dice = (2. * intersection + smooth) / (outputs.sum() + labels.sum() + smooth)
return self.cross_entropy(outputs[None, ...], labels[None, ...])
class SetLoss(nn.Module):
def __init__(self):
super().__init__()
def cross_entropy(self, input, target):
return torch.mean(-torch.sum(target * torch.log(input), 1))
def forward(self, logits, targets):
targets = targets.contiguous().view(-1)
loss = 0
for n_step, n_logits in enumerate(logits):
n_logits = F.softmax(n_logits, dim=-1)
n_target = targets[n_step:]
n_target_dist = torch.zeros_like(n_logits)
if len(n_target) != 0:
n_target_dist[n_target] = 1 / len(n_target)
loss += self.cross_entropy(n_logits[None,...], n_target_dist[None, ...])
return loss / len(logits)
class TruncatedLoss(nn.Module):
def __init__(self, q=0.8, k=0.2, trainset_size=50000):
super(TruncatedLoss, self).__init__()
self.q = q
self.k = k
self.weight = torch.nn.Parameter(data=torch.ones(trainset_size, 1), requires_grad=False)
def forward(self, logits, targets, indexes):
p = F.softmax(logits, dim=-1)
Yg = torch.gather(p, 2, targets.unsqueeze(2))
loss = ((1-(Yg**self.q))/self.q)*self.weight[indexes] - ((1-(self.k**self.q))/self.q)*self.weight[indexes]
loss = torch.mean(loss)
return loss
def update_weight(self, logits, targets, indexes):
p = F.softmax(logits, dim=-1)
Yg = torch.gather(p, 2, targets.unsqueeze(2))
Lq = ((1-(Yg**self.q))/self.q)
Lqk = np.repeat(((1-(self.k**self.q))/self.q), targets.size(0))
Lqk = torch.from_numpy(Lqk).type(torch.cuda.FloatTensor)
Lqk = torch.unsqueeze(Lqk, 1)
condition = torch.gt(Lqk, Lq)
self.weight[indexes] = condition.type(torch.cuda.FloatTensor)
# class PSTHLOSS(nn.Module):
# def __init__(self):
# super().__init__()
# def forward(self, logits, targets):
# total_logits = torch.sum(logits, dim=-2) # sum over sequence dimension
# probs = F.softmax(total_logits, dim=-1)
# outptu
class HungarianMatcher(nn.Module):
def __init__(self):
super().__init__()
@torch.no_grad()
def forward(self, logits, targets):
T, C = logits.size()
probs = F.softmax(logits, dim=-1)
cost_id = (1 - probs[:, targets]).cpu().view(T, -1).unsqueeze(0)
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_id.split(len(targets), -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
class KLDivLoss(nn.Module):
def __init__(self):
super().__init__()
self.log_softmax = nn.LogSoftmax(dim=-1)
self.KLdiv = nn.KLDivLoss()
def forward(self, logits, targets):
log_probs = self.log_softmax(logits)
return self.KLdiv(log_probs.long(), targets)
class PoissonCrossEntropyLoss(nn.Module):
def __init__(self):
super().__init__()
self.log_softmax = nn.LogSoftmax(dim=-1)
# self.softmax = nn.Softmax(dim=-1)
self.nll_poisson = nn.PoissonNLLLoss()
# self.nll_poisson = nn.NLLLoss()
def forward(self, logits, targets):
log_probs = self.log_softmax(logits)
return self.nll_poisson(log_probs, targets)
class GPT(nn.Module):
def __init__(self, config):
super().__init__()
self.device = 'cpu'
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
self.config = config
# input embedding stem
self.n_embd = config.n_embd
self.tok_emb = nn.Embedding(config.id_vocab_size, config.n_embd)
self.pos_emb = PositionalEmbedding(config.n_embd, p_drop=0.2)
# self.pos_emb_id = nn.Parameter(torch.zeros(1, config.id_block_size, config.n_embd))
self.pos_emb_frames = nn.Parameter(torch.zeros(1, config.frame_block_size, config.n_embd))
# self.temp_emb = TemporalEmbedding(config.n_embd, p_drop=0.2)
# self.temp_emb = RotaryTemporalEmbedding(config.id_block_size)
self.temp_emb = LearntTemporalEmbedding(config.id_block_size, config.n_embd)
self.frame_temp_emb = LearntTemporalEmbedding(config.frame_block_size, config.n_embd)
self.id_drop = nn.Dropout(config.id_drop)
self.im_drop = nn.Dropout(config.im_drop)
self.drop = nn.Dropout(config.embd_pdrop)
# -- Visual Backbone -- #
# self.visual_backbone = VideoFeaturesExtractor()
self.video_encoder = VideoEncoder()
frame_temp_emb = torch.tensor(list(itertools.chain(*[[n * 0.05] * (config.frame_block_size//20) for n in range(20)]))).unsqueeze(0)
self.register_buffer("frame_temp_emb_seq", frame_temp_emb)
# -- Contrastive Loss -- ##
# self.proj_id = ProjectNorm(config.n_embd, config.n_embd)
# self.proj_vid = VidProjectNorm(config.n_embd, config.n_embd) # im_shape
## -- IM_Decoder -- ##
# self.blocks_id = BlockSequential(*[Block(config) for _ in range(2)])
# self.blocks_im = BlockSequential(*[Block(config) for _ in range(2)])
# self.ln_f_id = nn.LayerNorm(config.n_embd)
# self.ln_f_im = nn.LayerNorm(config.n_embd)
## -- Decoder -- ##
# self.ln_f = nn.LayerNorm(config.n_embd)
## GPT
# self.blocks = BlockSequential(*[Block(config) for _ in range(config.n_layer)])
# self.ln_f = nn.LayerNorm(config.n_embd)
## enc_dec
self.state_decoder = Decoder(config)
self.ln_f_state_dec = nn.LayerNorm(config.n_embd)
self.stimulus_decoder = Decoder(config)
self.ln_f_stimulus_dec = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
## -- Time -- ##
# self.proj_time = TimeProjection(config.block_size, config.id_block_size, config.n_embd, config.n_dt)
self.proj_time = ProjectNorm(config.n_embd, config.n_dt)
# self.proj_time = ProjectNorm(config.n_embd, 1)
## -- PSTH -- ##
# self.proj_psth = PSTHProjection(config)
# Loss
# self.dice_loss = DiceLossPSTH()
# self.poisson_loss = PoissonCrossEntropyLoss()
# self.hungarian_matcher = HungarianMatcher()
# self.kldiv_loss = KLDivLoss()
# self.truncated_loss = TruncatedLoss(trainset_size=config.data_size)
# self.set_loss = SetLoss()
# self.a = torch.tensor(0.5, requires_grad=True)
self.block_size = config.block_size
self.apply(self._init_weights)
if config.class_weights is not None:
for key in config.class_weights.keys():
self.register_buffer(f"class_weights_{key}", config.class_weights[key])
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def configure_optimizers(self, train_config):
if train_config.decay_weights:
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
else: no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
black_list_mods = ['pos_emb', 'temp_emb']
for mods in black_list_mods:
for name, param in self.named_parameters():
if mods in name:
no_decay.add(name) # also pos_emb
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.named_parameters()}
no_decay -= decay & no_decay
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": train_config.weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=train_config.learning_rate, betas=train_config.betas)
else:
parameters = self.parameters()
optimizer = torch.optim.Adam(parameters, lr=train_config.learning_rate)
return optimizer
def process_features(self, x):
# batch, block_size, feature
p_idx = x['id_prev']
idx = x['id']
dtx = x['dt']
dtx_prev = x['dt_prev']
frames = self.video_encoder(x['frames'])
pad = x['pad']
b, t = idx.size()
# b_p, t_p = p_idx.size()
bf, tf = frames.size()[0:2]
# forward the GPT model
# # Embeddings
prev_id_position_embeddings = self.pos_emb(p_idx)
prev_id_temporal_embeddings = self.temp_emb(dtx_prev.float())
id_position_embeddings = self.pos_emb(idx)
im_position_embeddings = self.pos_emb_frames
temporal_embeddings = self.temp_emb(dtx.float())
# Extract ID features
prev_token_embeddings = self.id_drop(self.tok_emb(p_idx) + prev_id_temporal_embeddings + prev_id_position_embeddings)
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
token_embeddings = token_embeddings + temporal_embeddings + id_position_embeddings
token_embeddings = self.id_drop(token_embeddings)
# Extract image features and add time embeddings
im_temporal_embeddings = self.frame_temp_emb(self.frame_temp_emb_seq)
im_embeddings = frames # self.tok_emb(frames)
im_embeddings = im_embeddings + im_position_embeddings + im_temporal_embeddings
im_embeddings = self.im_drop(im_embeddings) # separate pos emb?
# Tidy up
features = dict()
features['id_prev'] = prev_token_embeddings
features['id'] = token_embeddings
features['frames'] = im_embeddings
return features, pad
def perceiver(self, features, pad):
x = self.state_decoder(tgt=features['id'], memory=features['id_prev'], pad=pad)
x = self.ln_f_state_dec(x)
x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad)
x = self.ln_f_stimulus_dec(x)
logits = self.head(x)
return logits, x
def enc_dec(self, features, pad):
x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad)
x = self.ln_f_stimulus_dec(x)
logits = self.head(x)
return logits, x
def GPTdecoder(self, features, pad, dtx=None):
# image + neural features
x = torch.cat((features['frames'], features['id']), dim=1)
# Decoder
x = self.blocks(x, pad, dtx) # (B, T, C)
x = self.ln_f(x)
logits = self.head(x)
# print(logits.shape) # (B, T, Vocab)
# logits_psth = x[:, -1] # (B, C)
return logits, x
def forward(self, x, targets=None):
idx = x['id']
dtx = x['dt']
frames = x['frames']
pad = x['pad']
b, t = idx.size()
# b, t = x['id'].shape[0], x['id'].shape[1] + x['id_prev'].shape[1]
bf, tf = frames.size()[0:2]
tf = self.config.frame_block_size
# assert t + tf == self.config.block_size, f"{tf} {t}"
# assert t <= self.block_size, "Cannot forward, model block size is exhausted"
features, pad = self.process_features(x)
logits, x = self.perceiver(features, pad)
# logits, x = self.enc_dec(features, pad)
# logits, x = self.GPTdecoder(features, pad)
time = self.proj_time(x) # (B, T_id, 1)
# print(x[:, 0].shape)
# psth = self.proj_psth(x) # (B, Vocab_id)
# if targets, calculate loss
# calculate loss on logits up to padding token for each batch
loss = None
loss_frames = 0
loss_id = []
loss_time = []
loss_dice = []
loss_psth = []
loss_hungarian = []
if targets is not None:
# loss_psth = self.dice_loss(psth, targets['modes'][:, tf:])
for B, P in enumerate(pad):
tf = 0
# im_logits = logits[B, :tf]
# im_targets = targets['frames'][B, :tf]
# loss_frames += F.cross_entropy(im_logits.view(-1, im_logits.size(-1)), im_targets.view(-1))
id_logits = logits[B, tf:tf + t - P]
id_targets = targets['id'][B, :t - P]
loss_id_ = F.cross_entropy(id_logits.view(-1, id_logits.size(-1)), id_targets.view(-1), weight=self.class_weights_id)
# if self.config.epoch >= 15:
# self.truncated_loss.update_weight(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...])
# loss_id_ = self.truncated_loss(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...])
time_preds = time[B, :t - P]
time_targets = targets['dt'][B, :t - P]
loss_time_ = F.cross_entropy(time_preds.view(-1, time_preds.size(-1)), time_targets.view(-1), weight=self.class_weights_dt)
# loss_time_ = F.mse_loss(time_preds.squeeze(-1), time_targets)
# loss_id_ = self.poisson_loss(id_logits.view(-1, id_logits.size(-1)), F.one_hot(id_targets, self.config.vocab_size))
# if len(id_targets) > 0:
# indices = self.hungarian_matcher(id_logits, id_targets)
# probs_matching, targets_matching = id_logits[indices[0][0]], id_targets[indices[0][1]]
# loss_hungarian_ = F.cross_entropy(probs_matching, targets_matching, weight=self.class_weights).to(self.device)
# loss_hungarian.append(loss_hungarian_)
# # psth = self.proj_psth(x[B, -1]) # from the EOS position
# loss_psth.append(torch.nan_to_num(self.set_loss(id_logits, id_targets)))
# loss_psth_ = self.dice_loss(id_logits, id_targets)
# loss_psth.append(torch.nan_to_num(loss_psth_))
loss_time.append(torch.nan_to_num(loss_time_))
loss_id.append(torch.nan_to_num(loss_id_))
loss = dict()
# loss['frames'] = loss_frames / (b / 3)
loss['id'] = sum(loss_id) / (b * 2) # sum(loss_id) / (b * 2) # / len(loss_id)
loss['time'] = sum(loss_time) / (b * 2)
# loss['dice'] = sum(loss_dice) / len(loss_dice)
# loss['dt'] = loss_time / (b * 50)
# loss['hungarian'] = sum(loss_hungarian) / (b * 2)
# loss['psth'] = sum(loss_psth) / (b * 2)
for key in list(loss):
if isinstance(loss[key], float):
del loss[key]
preds = dict()
preds['id'] = logits # [:, tf:] # only id logits
preds['dt'] = time
return preds, features, loss | true | true |
f73a21885d0d1792bc3b8c64a5f050f218b5234b | 16,125 | py | Python | dags/deal_finder_dag.py | arbrown/starthinker | 1a14664fb1a8f2a757b100363ea8958833b7754c | [
"Apache-2.0"
] | null | null | null | dags/deal_finder_dag.py | arbrown/starthinker | 1a14664fb1a8f2a757b100363ea8958833b7754c | [
"Apache-2.0"
] | null | null | null | dags/deal_finder_dag.py | arbrown/starthinker | 1a14664fb1a8f2a757b100363ea8958833b7754c | [
"Apache-2.0"
] | null | null | null | ###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
DV360 Deal Finder
Compares open vs. deal CPM, CPC, and CPA so that clients can decide which sites, inventory, and deals work best.
- Wait for <b>BigQuery->StarThinker Data->(field:recipe_slug}->Deal_Finder_Dashboard</b> to be created.
- Join the <a href='https://groups.google.com/d/forum/starthinker-assets' target='_blank'>StarThinker Assets Group</a> to access the following assets
- Copy <a href='https://datastudio.google.com/open/1QrWNTurvQT6nx20vnzdDveSzSmRjqHxQ' target='_blank'>Deal Finder Sample Data</a>.
- Click Edit Connection, and change to <b>BigQuery->StarThinker Data->->Deal_Finder_Dashboard</b>.
- Copy <a href='https://datastudio.google.com/open/1fjRI5AIKTYTA4fWs-pYkJbIMgCumlMyO' target='_blank'>Deal Finder Sample Report</a>.
- When prompted choose the new data source you just created.
- Or give these intructions to the client.
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {
'recipe_slug': '', # Place where tables will be written in BigQuery.
'recipe_timezone': 'America/Los_Angeles', # Timezone for report dates.
'recipe_name': '', # Name of report in DV360, should be unique.
'auth_write': 'service', # Credentials used for writing data.
'auth_read': 'user', # Credentials used for reading data.
'partners': [], # DV360 partner id.
'advertisers': [], # Comma delimited list of DV360 advertiser ids.
}
RECIPE = {
'setup': {
'day': [
'Mon',
'Tue',
'Wed',
'Thu',
'Fri',
'Sat',
'Sun'
],
'hour': [
3,
4
]
},
'tasks': [
{
'dataset': {
'description': 'Create a dataset for bigquery tables.',
'hour': [
4
],
'auth': {
'field': {
'name': 'auth_write',
'kind': 'authentication',
'order': 1,
'default': 'service',
'description': 'Credentials used for writing data.'
}
},
'dataset': {
'field': {
'name': 'recipe_slug',
'kind': 'string',
'description': 'Place where tables will be created in BigQuery.'
}
}
}
},
{
'dbm': {
'description': 'Create a DV360 report.',
'hour': [
3
],
'auth': {
'field': {
'name': 'auth_read',
'kind': 'authentication',
'order': 1,
'default': 'user',
'description': 'Credentials used for reading data.'
}
},
'report': {
'filters': {
'FILTER_PARTNER': {
'values': {
'field': {
'name': 'partners',
'kind': 'integer_list',
'order': 5,
'default': [
],
'description': 'DV360 partner id.'
}
}
},
'FILTER_ADVERTISER': {
'values': {
'field': {
'name': 'advertisers',
'kind': 'integer_list',
'order': 6,
'default': [
],
'description': 'Comma delimited list of DV360 advertiser ids.'
}
}
}
},
'body': {
'timezoneCode': {
'field': {
'name': 'recipe_timezone',
'kind': 'timezone',
'description': 'Timezone for report dates.',
'default': 'America/Los_Angeles'
}
},
'metadata': {
'title': {
'field': {
'name': 'recipe_name',
'kind': 'string',
'prefix': 'Deal Finder For ',
'description': 'Name of report in DV360, should be unique.'
}
},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV'
},
'params': {
'type': 'TYPE_CROSS_PARTNER',
'groupBys': [
'FILTER_PARTNER_NAME',
'FILTER_PARTNER',
'FILTER_ADVERTISER_NAME',
'FILTER_ADVERTISER',
'FILTER_APP_URL',
'FILTER_SITE_ID',
'FILTER_INVENTORY_SOURCE_NAME',
'FILTER_INVENTORY_SOURCE',
'FILTER_INVENTORY_SOURCE_TYPE',
'FILTER_ADVERTISER_CURRENCY',
'FILTER_CREATIVE_WIDTH',
'FILTER_CREATIVE_HEIGHT',
'FILTER_CREATIVE_TYPE'
],
'metrics': [
'METRIC_IMPRESSIONS',
'METRIC_CLICKS',
'METRIC_TOTAL_CONVERSIONS',
'METRIC_TOTAL_MEDIA_COST_ADVERTISER',
'METRIC_REVENUE_ADVERTISER',
'METRIC_ACTIVE_VIEW_MEASURABLE_IMPRESSIONS',
'METRIC_ACTIVE_VIEW_VIEWABLE_IMPRESSIONS'
]
}
}
}
}
},
{
'dbm': {
'description': 'Copy a DV360 report to BigQuery.',
'hour': [
4
],
'auth': {
'field': {
'name': 'auth_read',
'kind': 'authentication',
'order': 1,
'default': 'user',
'description': 'Credentials used for reading data.'
}
},
'report': {
'name': {
'field': {
'name': 'recipe_name',
'kind': 'string',
'prefix': 'Deal Finder For ',
'description': 'Name of report in DV360, should be unique.'
}
},
'timeout': 10
},
'out': {
'bigquery': {
'dataset': {
'field': {
'name': 'recipe_slug',
'kind': 'string',
'description': 'Place where tables will be written in BigQuery.'
}
},
'table': 'Deal_Finder_DV360_Report',
'header': True,
'schema': [
{
'name': 'Partner',
'type': 'STRING'
},
{
'name': 'Partner_ID',
'type': 'INTEGER'
},
{
'name': 'Advertiser',
'type': 'STRING'
},
{
'name': 'Advertiser_ID',
'type': 'INTEGER'
},
{
'name': 'Site',
'type': 'STRING'
},
{
'name': 'Site_ID',
'type': 'INTEGER'
},
{
'name': 'Inventory',
'type': 'STRING',
'mode': 'NULLABLE'
},
{
'name': 'Inventory_ID',
'type': 'INTEGER',
'mode': 'NULLABLE'
},
{
'name': 'Inventory_Type',
'type': 'STRING'
},
{
'name': 'Advertiser_Currency',
'type': 'STRING'
},
{
'name': 'Creative_Width',
'type': 'STRING',
'mode': 'NULLABLE'
},
{
'name': 'Creative_Height',
'type': 'STRING',
'mode': 'NULLABLE'
},
{
'name': 'Creative_Type',
'type': 'STRING'
},
{
'name': 'Impressions',
'type': 'INTEGER'
},
{
'name': 'Clicks',
'type': 'INTEGER'
},
{
'name': 'Conversions',
'type': 'FLOAT'
},
{
'name': 'Cost',
'type': 'FLOAT'
},
{
'name': 'Revenue',
'type': 'FLOAT'
},
{
'name': 'AV_Impressions_Measurable',
'type': 'INTEGER'
},
{
'name': 'AV_Impressions_Viewable',
'type': 'INTEGER'
}
]
}
}
}
},
{
'bigquery': {
'description': 'The logic query for Deal Finder, transforms report into view used by datastudio.',
'hour': [
4
],
'auth': {
'field': {
'name': 'auth_write',
'kind': 'authentication',
'order': 1,
'default': 'service',
'description': 'Credentials used for writing data.'
}
},
'from': {
'query': "SELECT Partner, Partner_ID, Advertiser, Advertiser_ID, Site, Site_ID, Inventory, Inventory_Type, Creative_Type, Creative_Size, Always_On, Deal_Impressions, Open_Impressions, Rank_Impressions, Deal_Clicks, Open_Clicks, Rank_Clicks, Deal_Conversions, Open_Conversions, Rank_Conversions, Deal_Impressions_Viewable, Open_Impressions_Viewable, Rank_Impressions_Viewable, Deal_Impressions_Measurable, Open_Impressions_Measurable, Rank_Impressions_Measurable, Deal_Cost, Open_Cost, Rank_Cost, FROM ( SELECT FIRST(Partner) AS Partner, FIRST(Partner_ID) AS Partner_ID, FIRST(Advertiser) AS Advertiser, Advertiser_ID, First(Site) AS Site, Site_ID, Inventory, Inventory_Type, Creative_Type, Creative_Width + ' x ' + Creative_Height AS Creative_Size, IF (LEFT(Inventory, 5) == 'AO - ', True, False) AS Always_On, SUM(Deal_Impressions) AS Deal_Impressions, SUM(Open_Impressions) AS Open_Impressions, SUM(Open_Impressions) + SUM(Deal_Impressions) AS Total_Impressions, ROW_NUMBER() OVER (PARTITION BY Advertiser_ID ORDER BY Total_Impressions DESC) AS Rank_Impressions, SUM(Deal_Clicks) AS Deal_Clicks, SUM(Open_Clicks) AS Open_Clicks, SUM(Open_Clicks) + SUM(Deal_Clicks) AS Total_Clicks, ROW_NUMBER() OVER (PARTITION BY Advertiser_ID ORDER BY Total_Clicks DESC) AS Rank_Clicks, SUM(Deal_Conversions) AS Deal_Conversions, SUM(Open_Conversions) AS Open_Conversions, SUM(Open_Conversions) + SUM(Deal_Conversions) AS Total_Conversions, ROW_NUMBER() OVER (PARTITION BY Advertiser_ID ORDER BY Total_Conversions DESC) AS Rank_Conversions, SUM(Deal_Cost) AS Deal_Cost, SUM(Open_Cost) AS Open_Cost, SUM(Open_Cost) + SUM(Deal_Cost) AS Total_Cost, RANK() OVER (PARTITION BY Advertiser_ID ORDER BY Total_Cost DESC) AS Rank_Cost, SUM(Deal_Impressions_Viewable) AS Deal_Impressions_Viewable, SUM(Open_Impressions_Viewable) AS Open_Impressions_Viewable, SUM(Open_Impressions_Viewable) + SUM(Deal_Impressions_Viewable) AS Total_Impressions_Viewable, ROW_NUMBER() OVER (PARTITION BY Advertiser_ID ORDER BY Total_Impressions_Viewable DESC) AS Rank_Impressions_Viewable, SUM(Deal_Impressions_Measurable) AS Deal_Impressions_Measurable, SUM(Open_Impressions_Measurable) AS Open_Impressions_Measurable, SUM(Open_Impressions_Measurable) + SUM(Deal_Impressions_Measurable) AS Total_Impressions_Measurable, ROW_NUMBER() OVER (PARTITION BY Advertiser_ID ORDER BY Total_Impressions_Measurable DESC) AS Rank_Impressions_Measurable, FROM ( SELECT Partner, Partner_ID, Advertiser, Advertiser_ID, Site, Site_ID, Inventory, Inventory_Type, Creative_Type, Creative_Width, Creative_Height, IF(Inventory_ID IS NULL, Impressions, 0) AS Open_Impressions, IF(Inventory_ID IS NULL, 0, Impressions) AS Deal_Impressions, IF(Inventory_ID IS NULL, Clicks, 0) AS Open_Clicks, IF(Inventory_ID IS NULL, 0, Clicks) AS Deal_Clicks, IF(Inventory_ID IS NULL, Conversions, 0) AS Open_Conversions, IF(Inventory_ID IS NULL, 0, Conversions) AS Deal_Conversions, IF(Inventory_ID IS NULL, Cost, 0) AS Open_Cost, IF(Inventory_ID IS NULL, 0, Cost) AS Deal_Cost, IF(Inventory_ID IS NULL, AV_Impressions_Viewable, 0) AS Open_Impressions_Viewable, IF(Inventory_ID IS NULL, 0, AV_Impressions_Viewable) AS Deal_Impressions_Viewable, IF(Inventory_ID IS NULL, AV_Impressions_Measurable, 0) AS Open_Impressions_Measurable, IF(Inventory_ID IS NULL, 0, AV_Impressions_Measurable) AS Deal_Impressions_Measurable, FROM [[PARAMETER].Deal_Finder_DV360_Report] OMIT RECORD IF Site == 'Low volume inventory') GROUP By Advertiser_ID, Site_ID, Inventory, Inventory_Type, Creative_Type, Creative_Size, Always_On) WHERE Rank_Impressions < 100 OR Rank_Clicks < 100 OR Rank_Conversions < 100 OR Rank_Cost < 100;",
'parameters': [
{
'field': {
'name': 'recipe_slug',
'kind': 'string',
'description': 'Place where tables will be written in BigQuery.'
}
}
]
},
'to': {
'dataset': {
'field': {
'name': 'recipe_slug',
'kind': 'string',
'description': 'Place where tables will be written in BigQuery.'
}
},
'view': 'Deal_Finder_Dashboard'
}
}
}
]
}
dag_maker = DAG_Factory('deal_finder', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
| 39.716749 | 3,640 | 0.536992 | },
{
'name': 'Site_ID',
'type': 'INTEGER'
},
{
'name': 'Inventory',
'type': 'STRING',
'mode': 'NULLABLE'
},
{
'name': 'Inventory_ID',
'type': 'INTEGER',
'mode': 'NULLABLE'
},
{
'name': 'Inventory_Type',
'type': 'STRING'
},
{
'name': 'Advertiser_Currency',
'type': 'STRING'
},
{
'name': 'Creative_Width',
'type': 'STRING',
'mode': 'NULLABLE'
},
{
'name': 'Creative_Height',
'type': 'STRING',
'mode': 'NULLABLE'
},
{
'name': 'Creative_Type',
'type': 'STRING'
},
{
'name': 'Impressions',
'type': 'INTEGER'
},
{
'name': 'Clicks',
'type': 'INTEGER'
},
{
'name': 'Conversions',
'type': 'FLOAT'
},
{
'name': 'Cost',
'type': 'FLOAT'
},
{
'name': 'Revenue',
'type': 'FLOAT'
},
{
'name': 'AV_Impressions_Measurable',
'type': 'INTEGER'
},
{
'name': 'AV_Impressions_Viewable',
'type': 'INTEGER'
}
]
}
}
}
},
{
'bigquery': {
'description': 'The logic query for Deal Finder, transforms report into view used by datastudio.',
'hour': [
4
],
'auth': {
'field': {
'name': 'auth_write',
'kind': 'authentication',
'order': 1,
'default': 'service',
'description': 'Credentials used for writing data.'
}
},
'from': {
'query': "SELECT Partner, Partner_ID, Advertiser, Advertiser_ID, Site, Site_ID, Inventory, Inventory_Type, Creative_Type, Creative_Size, Always_On, Deal_Impressions, Open_Impressions, Rank_Impressions, Deal_Clicks, Open_Clicks, Rank_Clicks, Deal_Conversions, Open_Conversions, Rank_Conversions, Deal_Impressions_Viewable, Open_Impressions_Viewable, Rank_Impressions_Viewable, Deal_Impressions_Measurable, Open_Impressions_Measurable, Rank_Impressions_Measurable, Deal_Cost, Open_Cost, Rank_Cost, FROM ( SELECT FIRST(Partner) AS Partner, FIRST(Partner_ID) AS Partner_ID, FIRST(Advertiser) AS Advertiser, Advertiser_ID, First(Site) AS Site, Site_ID, Inventory, Inventory_Type, Creative_Type, Creative_Width + ' x ' + Creative_Height AS Creative_Size, IF (LEFT(Inventory, 5) == 'AO - ', True, False) AS Always_On, SUM(Deal_Impressions) AS Deal_Impressions, SUM(Open_Impressions) AS Open_Impressions, SUM(Open_Impressions) + SUM(Deal_Impressions) AS Total_Impressions, ROW_NUMBER() OVER (PARTITION BY Advertiser_ID ORDER BY Total_Impressions DESC) AS Rank_Impressions, SUM(Deal_Clicks) AS Deal_Clicks, SUM(Open_Clicks) AS Open_Clicks, SUM(Open_Clicks) + SUM(Deal_Clicks) AS Total_Clicks, ROW_NUMBER() OVER (PARTITION BY Advertiser_ID ORDER BY Total_Clicks DESC) AS Rank_Clicks, SUM(Deal_Conversions) AS Deal_Conversions, SUM(Open_Conversions) AS Open_Conversions, SUM(Open_Conversions) + SUM(Deal_Conversions) AS Total_Conversions, ROW_NUMBER() OVER (PARTITION BY Advertiser_ID ORDER BY Total_Conversions DESC) AS Rank_Conversions, SUM(Deal_Cost) AS Deal_Cost, SUM(Open_Cost) AS Open_Cost, SUM(Open_Cost) + SUM(Deal_Cost) AS Total_Cost, RANK() OVER (PARTITION BY Advertiser_ID ORDER BY Total_Cost DESC) AS Rank_Cost, SUM(Deal_Impressions_Viewable) AS Deal_Impressions_Viewable, SUM(Open_Impressions_Viewable) AS Open_Impressions_Viewable, SUM(Open_Impressions_Viewable) + SUM(Deal_Impressions_Viewable) AS Total_Impressions_Viewable, ROW_NUMBER() OVER (PARTITION BY Advertiser_ID ORDER BY Total_Impressions_Viewable DESC) AS Rank_Impressions_Viewable, SUM(Deal_Impressions_Measurable) AS Deal_Impressions_Measurable, SUM(Open_Impressions_Measurable) AS Open_Impressions_Measurable, SUM(Open_Impressions_Measurable) + SUM(Deal_Impressions_Measurable) AS Total_Impressions_Measurable, ROW_NUMBER() OVER (PARTITION BY Advertiser_ID ORDER BY Total_Impressions_Measurable DESC) AS Rank_Impressions_Measurable, FROM ( SELECT Partner, Partner_ID, Advertiser, Advertiser_ID, Site, Site_ID, Inventory, Inventory_Type, Creative_Type, Creative_Width, Creative_Height, IF(Inventory_ID IS NULL, Impressions, 0) AS Open_Impressions, IF(Inventory_ID IS NULL, 0, Impressions) AS Deal_Impressions, IF(Inventory_ID IS NULL, Clicks, 0) AS Open_Clicks, IF(Inventory_ID IS NULL, 0, Clicks) AS Deal_Clicks, IF(Inventory_ID IS NULL, Conversions, 0) AS Open_Conversions, IF(Inventory_ID IS NULL, 0, Conversions) AS Deal_Conversions, IF(Inventory_ID IS NULL, Cost, 0) AS Open_Cost, IF(Inventory_ID IS NULL, 0, Cost) AS Deal_Cost, IF(Inventory_ID IS NULL, AV_Impressions_Viewable, 0) AS Open_Impressions_Viewable, IF(Inventory_ID IS NULL, 0, AV_Impressions_Viewable) AS Deal_Impressions_Viewable, IF(Inventory_ID IS NULL, AV_Impressions_Measurable, 0) AS Open_Impressions_Measurable, IF(Inventory_ID IS NULL, 0, AV_Impressions_Measurable) AS Deal_Impressions_Measurable, FROM [[PARAMETER].Deal_Finder_DV360_Report] OMIT RECORD IF Site == 'Low volume inventory') GROUP By Advertiser_ID, Site_ID, Inventory, Inventory_Type, Creative_Type, Creative_Size, Always_On) WHERE Rank_Impressions < 100 OR Rank_Clicks < 100 OR Rank_Conversions < 100 OR Rank_Cost < 100;",
'parameters': [
{
'field': {
'name': 'recipe_slug',
'kind': 'string',
'description': 'Place where tables will be written in BigQuery.'
}
}
]
},
'to': {
'dataset': {
'field': {
'name': 'recipe_slug',
'kind': 'string',
'description': 'Place where tables will be written in BigQuery.'
}
},
'view': 'Deal_Finder_Dashboard'
}
}
}
]
}
dag_maker = DAG_Factory('deal_finder', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
| true | true |
f73a219dc110568fa8e73c4546516659de8f6158 | 87,688 | py | Python | scripts/layer_chassis_generator.py | hysw/Vulkan-ValidationLayers | ad5d043ff34503d0bac122fe1221667b3e7bb36a | [
"Apache-2.0"
] | 20 | 2019-04-18T07:37:34.000Z | 2022-02-02T21:43:47.000Z | scripts/layer_chassis_generator.py | hysw/Vulkan-ValidationLayers | ad5d043ff34503d0bac122fe1221667b3e7bb36a | [
"Apache-2.0"
] | 11 | 2019-10-21T13:39:41.000Z | 2021-11-05T08:11:54.000Z | scripts/layer_chassis_generator.py | hysw/Vulkan-ValidationLayers | ad5d043ff34503d0bac122fe1221667b3e7bb36a | [
"Apache-2.0"
] | 1 | 2021-12-03T18:11:36.000Z | 2021-12-03T18:11:36.000Z | #!/usr/bin/python3 -i
#
# Copyright (c) 2015-2019 Valve Corporation
# Copyright (c) 2015-2019 LunarG, Inc.
# Copyright (c) 2015-2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Tobin Ehlis <tobine@google.com>
# Author: Mark Lobodzinski <mark@lunarg.com>
#
# This script generates the dispatch portion of a factory layer which intercepts
# all Vulkan functions. The resultant factory layer allows rapid development of
# layers and interceptors.
import os,re,sys
from generator import *
from common_codegen import *
# LayerFactoryGeneratorOptions - subclass of GeneratorOptions.
#
# Adds options used by LayerFactoryOutputGenerator objects during factory
# layer generation.
#
# Additional members
# prefixText - list of strings to prefix generated header with
# (usually a copyright statement + calling convention macros).
# protectFile - True if multiple inclusion protection should be
# generated (based on the filename) around the entire header.
# protectFeature - True if #ifndef..#endif protection should be
# generated around a feature interface in the header file.
# genFuncPointers - True if function pointer typedefs should be
# generated
# protectProto - If conditional protection should be generated
# around prototype declarations, set to either '#ifdef'
# to require opt-in (#ifdef protectProtoStr) or '#ifndef'
# to require opt-out (#ifndef protectProtoStr). Otherwise
# set to None.
# protectProtoStr - #ifdef/#ifndef symbol to use around prototype
# declarations, if protectProto is set
# apicall - string to use for the function declaration prefix,
# such as APICALL on Windows.
# apientry - string to use for the calling convention macro,
# in typedefs, such as APIENTRY.
# apientryp - string to use for the calling convention macro
# in function pointer typedefs, such as APIENTRYP.
# indentFuncProto - True if prototype declarations should put each
# parameter on a separate line
# indentFuncPointer - True if typedefed function pointers should put each
# parameter on a separate line
# alignFuncParam - if nonzero and parameters are being put on a
# separate line, align parameter names at the specified column
class LayerChassisGeneratorOptions(GeneratorOptions):
def __init__(self,
conventions = None,
filename = None,
directory = '.',
apiname = None,
profile = None,
versions = '.*',
emitversions = '.*',
defaultExtensions = None,
addExtensions = None,
removeExtensions = None,
emitExtensions = None,
sortProcedure = regSortFeatures,
prefixText = "",
genFuncPointers = True,
protectFile = True,
protectFeature = True,
apicall = '',
apientry = '',
apientryp = '',
indentFuncProto = True,
indentFuncPointer = False,
alignFuncParam = 0,
helper_file_type = '',
expandEnumerants = True):
GeneratorOptions.__init__(self, conventions, filename, directory, apiname, profile,
versions, emitversions, defaultExtensions,
addExtensions, removeExtensions, emitExtensions, sortProcedure)
self.prefixText = prefixText
self.genFuncPointers = genFuncPointers
self.protectFile = protectFile
self.protectFeature = protectFeature
self.apicall = apicall
self.apientry = apientry
self.apientryp = apientryp
self.indentFuncProto = indentFuncProto
self.indentFuncPointer = indentFuncPointer
self.alignFuncParam = alignFuncParam
# LayerChassisOutputGenerator - subclass of OutputGenerator.
# Generates a LayerFactory layer that intercepts all API entrypoints
# This is intended to be used as a starting point for creating custom layers
#
# ---- methods ----
# LayerChassisOutputGenerator(errFile, warnFile, diagFile) - args as for
# OutputGenerator. Defines additional internal state.
# ---- methods overriding base class ----
# beginFile(genOpts)
# endFile()
# beginFeature(interface, emit)
# endFeature()
# genType(typeinfo,name)
# genStruct(typeinfo,name)
# genGroup(groupinfo,name)
# genEnum(enuminfo, name)
# genCmd(cmdinfo)
class LayerChassisOutputGenerator(OutputGenerator):
"""Generate specified API interfaces in a specific style, such as a C header"""
# This is an ordered list of sections in the header file.
TYPE_SECTIONS = ['include', 'define', 'basetype', 'handle', 'enum',
'group', 'bitmask', 'funcpointer', 'struct']
ALL_SECTIONS = TYPE_SECTIONS + ['command']
manual_functions = [
# Include functions here to be interecpted w/ manually implemented function bodies
'vkGetDeviceProcAddr',
'vkGetInstanceProcAddr',
'vkCreateDevice',
'vkDestroyDevice',
'vkCreateInstance',
'vkDestroyInstance',
'vkEnumerateInstanceLayerProperties',
'vkEnumerateInstanceExtensionProperties',
'vkEnumerateDeviceLayerProperties',
'vkEnumerateDeviceExtensionProperties',
# Functions that are handled explicitly due to chassis architecture violations
'vkCreateGraphicsPipelines',
'vkCreateComputePipelines',
'vkCreateRayTracingPipelinesNV',
'vkCreatePipelineLayout',
'vkCreateShaderModule',
'vkAllocateDescriptorSets',
'vkCreateBuffer',
# ValidationCache functions do not get dispatched
'vkCreateValidationCacheEXT',
'vkDestroyValidationCacheEXT',
'vkMergeValidationCachesEXT',
'vkGetValidationCacheDataEXT',
# We don't wanna hook this function
'vkGetPhysicalDeviceProcAddr',
]
alt_ret_codes = [
# Include functions here which must tolerate VK_INCOMPLETE as a return code
'vkEnumeratePhysicalDevices',
'vkEnumeratePhysicalDeviceGroupsKHR',
'vkGetValidationCacheDataEXT',
'vkGetPipelineCacheData',
'vkGetShaderInfoAMD',
'vkGetPhysicalDeviceDisplayPropertiesKHR',
'vkGetPhysicalDeviceDisplayProperties2KHR',
'vkGetPhysicalDeviceDisplayPlanePropertiesKHR',
'vkGetDisplayPlaneSupportedDisplaysKHR',
'vkGetDisplayModePropertiesKHR',
'vkGetDisplayModeProperties2KHR',
'vkGetPhysicalDeviceSurfaceFormatsKHR',
'vkGetPhysicalDeviceSurfacePresentModesKHR',
'vkGetPhysicalDevicePresentRectanglesKHR',
'vkGetPastPresentationTimingGOOGLE',
'vkGetSwapchainImagesKHR',
'vkEnumerateInstanceLayerProperties',
'vkEnumerateDeviceLayerProperties',
'vkEnumerateInstanceExtensionProperties',
'vkEnumerateDeviceExtensionProperties',
'vkGetPhysicalDeviceCalibrateableTimeDomainsEXT',
]
pre_dispatch_debug_utils_functions = {
'vkDebugMarkerSetObjectNameEXT' : 'layer_data->report_data->DebugReportSetMarkerObjectName(pNameInfo);',
'vkSetDebugUtilsObjectNameEXT' : 'layer_data->report_data->DebugReportSetUtilsObjectName(pNameInfo);',
'vkQueueBeginDebugUtilsLabelEXT' : 'BeginQueueDebugUtilsLabel(layer_data->report_data, queue, pLabelInfo);',
'vkQueueInsertDebugUtilsLabelEXT' : 'InsertQueueDebugUtilsLabel(layer_data->report_data, queue, pLabelInfo);',
}
post_dispatch_debug_utils_functions = {
'vkQueueEndDebugUtilsLabelEXT' : 'EndQueueDebugUtilsLabel(layer_data->report_data, queue);',
'vkCreateDebugReportCallbackEXT' : 'layer_create_report_callback(layer_data->report_data, false, pCreateInfo, pAllocator, pCallback);',
'vkDestroyDebugReportCallbackEXT' : 'layer_destroy_callback(layer_data->report_data, callback, pAllocator);',
'vkCreateDebugUtilsMessengerEXT' : 'layer_create_messenger_callback(layer_data->report_data, false, pCreateInfo, pAllocator, pMessenger);',
'vkDestroyDebugUtilsMessengerEXT' : 'layer_destroy_callback(layer_data->report_data, messenger, pAllocator);',
}
precallvalidate_loop = "for (auto intercept : layer_data->object_dispatch) {"
precallrecord_loop = precallvalidate_loop
postcallrecord_loop = "for (auto intercept : layer_data->object_dispatch) {"
inline_custom_header_preamble = """
#define NOMINMAX
#include <atomic>
#include <mutex>
#include <cinttypes>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unordered_map>
#include <unordered_set>
#include <algorithm>
#include <memory>
#include "vk_loader_platform.h"
#include "vulkan/vulkan.h"
#include "vk_layer_config.h"
#include "vk_layer_data.h"
#include "vk_layer_logging.h"
#include "vk_object_types.h"
#include "vulkan/vk_layer.h"
#include "vk_enum_string_helper.h"
#include "vk_layer_extension_utils.h"
#include "vk_layer_utils.h"
#include "vulkan/vk_layer.h"
#include "vk_dispatch_table_helper.h"
#include "vk_extension_helper.h"
#include "vk_safe_struct.h"
#include "vk_typemap_helper.h"
extern std::atomic<uint64_t> global_unique_id;
// To avoid re-hashing unique ids on each use, we precompute the hash and store the
// hash's LSBs in the high 24 bits.
struct HashedUint64 {
static const int HASHED_UINT64_SHIFT = 40;
size_t operator()(const uint64_t &t) const { return t >> HASHED_UINT64_SHIFT; }
static uint64_t hash(uint64_t id) {
uint64_t h = (uint64_t)std::hash<uint64_t>()(id);
id |= h << HASHED_UINT64_SHIFT;
return id;
}
};
extern vl_concurrent_unordered_map<uint64_t, uint64_t, 4, HashedUint64> unique_id_mapping;
"""
inline_custom_header_class_definition = """
// Layer object type identifiers
enum LayerObjectTypeId {
LayerObjectTypeInstance, // Container for an instance dispatch object
LayerObjectTypeDevice, // Container for a device dispatch object
LayerObjectTypeThreading, // Instance or device threading layer object
LayerObjectTypeParameterValidation, // Instance or device parameter validation layer object
LayerObjectTypeObjectTracker, // Instance or device object tracker layer object
LayerObjectTypeCoreValidation, // Instance or device core validation layer object
LayerObjectTypeBestPractices, // Instance or device best practices layer object
LayerObjectTypeMaxEnum, // Max enum count
};
struct TEMPLATE_STATE {
VkDescriptorUpdateTemplateKHR desc_update_template;
safe_VkDescriptorUpdateTemplateCreateInfo create_info;
TEMPLATE_STATE(VkDescriptorUpdateTemplateKHR update_template, safe_VkDescriptorUpdateTemplateCreateInfo *pCreateInfo)
: desc_update_template(update_template), create_info(*pCreateInfo) {}
};
class LAYER_PHYS_DEV_PROPERTIES {
public:
VkPhysicalDeviceProperties properties;
std::vector<VkQueueFamilyProperties> queue_family_properties;
};
typedef enum ValidationCheckDisables {
VALIDATION_CHECK_DISABLE_COMMAND_BUFFER_STATE,
VALIDATION_CHECK_DISABLE_OBJECT_IN_USE,
VALIDATION_CHECK_DISABLE_IDLE_DESCRIPTOR_SET,
VALIDATION_CHECK_DISABLE_PUSH_CONSTANT_RANGE,
VALIDATION_CHECK_DISABLE_QUERY_VALIDATION,
VALIDATION_CHECK_DISABLE_IMAGE_LAYOUT_VALIDATION,
} ValidationCheckDisables;
// CHECK_DISABLED struct is a container for bools that can block validation checks from being performed.
// These bools are all "false" by default meaning that all checks are enabled. Enum values can be specified
// via the vk_layer_setting.txt config file or at CreateInstance time via the VK_EXT_validation_features extension
// that can selectively disable checks.
struct CHECK_DISABLED {
bool command_buffer_state; // Skip command buffer state validation
bool object_in_use; // Skip all object in_use checking
bool idle_descriptor_set; // Skip check to verify that descriptor set is not in-use
bool push_constant_range; // Skip push constant range checks
bool query_validation; // Disable all core validation query-related checks
bool image_layout_validation; // Disable image layout validation
bool object_tracking; // Disable object lifetime validation
bool core_checks; // Disable core validation checks
bool thread_safety; // Disable thread safety validation
bool stateless_checks; // Disable stateless validation checks
bool handle_wrapping; // Disable unique handles/handle wrapping
bool shader_validation; // Skip validation for shaders
void SetAll(bool value) { std::fill(&command_buffer_state, &shader_validation + 1, value); }
};
struct CHECK_ENABLED {
bool gpu_validation;
bool gpu_validation_reserve_binding_slot;
bool best_practices;
void SetAll(bool value) { std::fill(&gpu_validation, &gpu_validation_reserve_binding_slot + 1, value); }
};
// Layer chassis validation object base class definition
class ValidationObject {
public:
uint32_t api_version;
debug_report_data* report_data = nullptr;
VkLayerInstanceDispatchTable instance_dispatch_table;
VkLayerDispatchTable device_dispatch_table;
InstanceExtensions instance_extensions;
DeviceExtensions device_extensions = {};
CHECK_DISABLED disabled = {};
CHECK_ENABLED enabled = {};
VkInstance instance = VK_NULL_HANDLE;
VkPhysicalDevice physical_device = VK_NULL_HANDLE;
VkDevice device = VK_NULL_HANDLE;
LAYER_PHYS_DEV_PROPERTIES phys_dev_properties = {};
std::vector<ValidationObject*> object_dispatch;
LayerObjectTypeId container_type;
std::string layer_name = "CHASSIS";
// Constructor
ValidationObject(){};
// Destructor
virtual ~ValidationObject() {};
std::mutex validation_object_mutex;
virtual std::unique_lock<std::mutex> write_lock() {
return std::unique_lock<std::mutex>(validation_object_mutex);
}
ValidationObject* GetValidationObject(std::vector<ValidationObject*>& object_dispatch, LayerObjectTypeId object_type) {
for (auto validation_object : object_dispatch) {
if (validation_object->container_type == object_type) {
return validation_object;
}
}
return nullptr;
};
// Handle Wrapping Data
// Reverse map display handles
vl_concurrent_unordered_map<VkDisplayKHR, uint64_t, 0> display_id_reverse_mapping;
// Wrapping Descriptor Template Update structures requires access to the template createinfo structs
std::unordered_map<uint64_t, std::unique_ptr<TEMPLATE_STATE>> desc_template_createinfo_map;
struct SubpassesUsageStates {
std::unordered_set<uint32_t> subpasses_using_color_attachment;
std::unordered_set<uint32_t> subpasses_using_depthstencil_attachment;
};
// Uses unwrapped handles
std::unordered_map<VkRenderPass, SubpassesUsageStates> renderpasses_states;
// Map of wrapped swapchain handles to arrays of wrapped swapchain image IDs
// Each swapchain has an immutable list of wrapped swapchain image IDs -- always return these IDs if they exist
std::unordered_map<VkSwapchainKHR, std::vector<VkImage>> swapchain_wrapped_image_handle_map;
// Map of wrapped descriptor pools to set of wrapped descriptor sets allocated from each pool
std::unordered_map<VkDescriptorPool, std::unordered_set<VkDescriptorSet>> pool_descriptor_sets_map;
// Unwrap a handle.
template <typename HandleType>
HandleType Unwrap(HandleType wrappedHandle) {
auto iter = unique_id_mapping.find(reinterpret_cast<uint64_t const &>(wrappedHandle));
if (iter == unique_id_mapping.end())
return (HandleType)0;
return (HandleType)iter->second;
}
// Wrap a newly created handle with a new unique ID, and return the new ID.
template <typename HandleType>
HandleType WrapNew(HandleType newlyCreatedHandle) {
auto unique_id = global_unique_id++;
unique_id = HashedUint64::hash(unique_id);
unique_id_mapping.insert_or_assign(unique_id, reinterpret_cast<uint64_t const &>(newlyCreatedHandle));
return (HandleType)unique_id;
}
// Specialized handling for VkDisplayKHR. Adds an entry to enable reverse-lookup.
VkDisplayKHR WrapDisplay(VkDisplayKHR newlyCreatedHandle, ValidationObject *map_data) {
auto unique_id = global_unique_id++;
unique_id = HashedUint64::hash(unique_id);
unique_id_mapping.insert_or_assign(unique_id, reinterpret_cast<uint64_t const &>(newlyCreatedHandle));
map_data->display_id_reverse_mapping.insert_or_assign(newlyCreatedHandle, unique_id);
return (VkDisplayKHR)unique_id;
}
// VkDisplayKHR objects don't have a single point of creation, so we need to see if one already exists in the map before
// creating another.
VkDisplayKHR MaybeWrapDisplay(VkDisplayKHR handle, ValidationObject *map_data) {
// See if this display is already known
auto it = map_data->display_id_reverse_mapping.find(handle);
if (it != map_data->display_id_reverse_mapping.end()) return (VkDisplayKHR)it->second;
// Unknown, so wrap
return WrapDisplay(handle, map_data);
}
// Pre/post hook point declarations
"""
inline_copyright_message = """
// This file is ***GENERATED***. Do Not Edit.
// See layer_chassis_generator.py for modifications.
/* Copyright (c) 2015-2019 The Khronos Group Inc.
* Copyright (c) 2015-2019 Valve Corporation
* Copyright (c) 2015-2019 LunarG, Inc.
* Copyright (c) 2015-2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Mark Lobodzinski <mark@lunarg.com>
*/"""
inline_custom_source_preamble = """
#include <string.h>
#include <mutex>
#define VALIDATION_ERROR_MAP_IMPL
#include "chassis.h"
#include "layer_chassis_dispatch.h"
small_unordered_map<void*, ValidationObject*, 2> layer_data_map;
// Global unique object identifier.
std::atomic<uint64_t> global_unique_id(1ULL);
// Map uniqueID to actual object handle. Accesses to the map itself are
// internally synchronized.
vl_concurrent_unordered_map<uint64_t, uint64_t, 4, HashedUint64> unique_id_mapping;
bool wrap_handles = true;
#define OBJECT_LAYER_NAME "VK_LAYER_KHRONOS_validation"
#define OBJECT_LAYER_DESCRIPTION "khronos_validation"
// Include layer validation object definitions
#include "object_lifetime_validation.h"
#include "thread_safety.h"
#include "stateless_validation.h"
#include "core_validation.h"
#include "best_practices.h"
namespace vulkan_layer_chassis {
using std::unordered_map;
static const VkLayerProperties global_layer = {
OBJECT_LAYER_NAME, VK_LAYER_API_VERSION, 1, "LunarG validation Layer",
};
static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION},
{VK_EXT_DEBUG_UTILS_EXTENSION_NAME, VK_EXT_DEBUG_UTILS_SPEC_VERSION}};
static const VkExtensionProperties device_extensions[] = {
{VK_EXT_VALIDATION_CACHE_EXTENSION_NAME, VK_EXT_VALIDATION_CACHE_SPEC_VERSION},
{VK_EXT_DEBUG_MARKER_EXTENSION_NAME, VK_EXT_DEBUG_MARKER_SPEC_VERSION},
};
typedef struct {
bool is_instance_api;
void* funcptr;
} function_data;
extern const std::unordered_map<std::string, function_data> name_to_funcptr_map;
// Manually written functions
// Check enabled instance extensions against supported instance extension whitelist
static void InstanceExtensionWhitelist(ValidationObject *layer_data, const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) {
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
// Check for recognized instance extensions
if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kInstanceExtensionNames)) {
log_msg(layer_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUIDUndefined,
"Instance Extension %s is not supported by this layer. Using this extension may adversely affect validation "
"results and/or produce undefined behavior.",
pCreateInfo->ppEnabledExtensionNames[i]);
}
}
}
// Check enabled device extensions against supported device extension whitelist
static void DeviceExtensionWhitelist(ValidationObject *layer_data, const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
// Check for recognized device extensions
if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kDeviceExtensionNames)) {
log_msg(layer_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUIDUndefined,
"Device Extension %s is not supported by this layer. Using this extension may adversely affect validation "
"results and/or produce undefined behavior.",
pCreateInfo->ppEnabledExtensionNames[i]);
}
}
}
// Process validation features, flags and settings specified through extensions, a layer settings file, or environment variables
static const std::unordered_map<std::string, VkValidationFeatureDisableEXT> VkValFeatureDisableLookup = {
{"VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT", VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT},
{"VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT", VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT},
{"VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT", VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT},
{"VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT", VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT},
{"VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT", VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT},
{"VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT", VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT},
{"VK_VALIDATION_FEATURE_DISABLE_ALL_EXT", VK_VALIDATION_FEATURE_DISABLE_ALL_EXT},
};
static const std::unordered_map<std::string, VkValidationFeatureEnableEXT> VkValFeatureEnableLookup = {
{"VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT", VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT},
{"VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT", VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT},
{"VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT", VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT},
};
static const std::unordered_map<std::string, ValidationCheckDisables> ValidationDisableLookup = {
{"VALIDATION_CHECK_DISABLE_COMMAND_BUFFER_STATE", VALIDATION_CHECK_DISABLE_COMMAND_BUFFER_STATE},
{"VALIDATION_CHECK_DISABLE_OBJECT_IN_USE", VALIDATION_CHECK_DISABLE_OBJECT_IN_USE},
{"VALIDATION_CHECK_DISABLE_IDLE_DESCRIPTOR_SET", VALIDATION_CHECK_DISABLE_IDLE_DESCRIPTOR_SET},
{"VALIDATION_CHECK_DISABLE_PUSH_CONSTANT_RANGE", VALIDATION_CHECK_DISABLE_PUSH_CONSTANT_RANGE},
{"VALIDATION_CHECK_DISABLE_QUERY_VALIDATION", VALIDATION_CHECK_DISABLE_QUERY_VALIDATION},
{"VALIDATION_CHECK_DISABLE_IMAGE_LAYOUT_VALIDATION", VALIDATION_CHECK_DISABLE_IMAGE_LAYOUT_VALIDATION},
};
// Set the local disable flag for the appropriate VALIDATION_CHECK_DISABLE enum
void SetValidationDisable(CHECK_DISABLED* disable_data, const ValidationCheckDisables disable_id) {
switch (disable_id) {
case VALIDATION_CHECK_DISABLE_COMMAND_BUFFER_STATE:
disable_data->command_buffer_state = true;
break;
case VALIDATION_CHECK_DISABLE_OBJECT_IN_USE:
disable_data->object_in_use = true;
break;
case VALIDATION_CHECK_DISABLE_IDLE_DESCRIPTOR_SET:
disable_data->idle_descriptor_set = true;
break;
case VALIDATION_CHECK_DISABLE_PUSH_CONSTANT_RANGE:
disable_data->push_constant_range = true;
break;
case VALIDATION_CHECK_DISABLE_QUERY_VALIDATION:
disable_data->query_validation = true;
break;
case VALIDATION_CHECK_DISABLE_IMAGE_LAYOUT_VALIDATION:
disable_data->image_layout_validation = true;
break;
default:
assert(true);
}
}
// Set the local disable flag for a single VK_VALIDATION_FEATURE_DISABLE_* flag
void SetValidationFeatureDisable(CHECK_DISABLED* disable_data, const VkValidationFeatureDisableEXT feature_disable) {
switch (feature_disable) {
case VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT:
disable_data->shader_validation = true;
break;
case VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT:
disable_data->thread_safety = true;
break;
case VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT:
disable_data->stateless_checks = true;
break;
case VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT:
disable_data->object_tracking = true;
break;
case VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT:
disable_data->core_checks = true;
break;
case VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT:
disable_data->handle_wrapping = true;
break;
case VK_VALIDATION_FEATURE_DISABLE_ALL_EXT:
// Set all disabled flags to true
disable_data->SetAll(true);
break;
default:
break;
}
}
// Set the local enable flag for a single VK_VALIDATION_FEATURE_ENABLE_* flag
void SetValidationFeatureEnable(CHECK_ENABLED *enable_data, const VkValidationFeatureEnableEXT feature_enable) {
switch (feature_enable) {
case VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT:
enable_data->gpu_validation = true;
break;
case VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT:
enable_data->gpu_validation_reserve_binding_slot = true;
break;
case VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT:
enable_data->best_practices = true;
break;
default:
break;
}
}
// Set the local disable flag for settings specified through the VK_EXT_validation_flags extension
void SetValidationFlags(CHECK_DISABLED* disables, const VkValidationFlagsEXT* val_flags_struct) {
for (uint32_t i = 0; i < val_flags_struct->disabledValidationCheckCount; ++i) {
switch (val_flags_struct->pDisabledValidationChecks[i]) {
case VK_VALIDATION_CHECK_SHADERS_EXT:
disables->shader_validation = true;
break;
case VK_VALIDATION_CHECK_ALL_EXT:
// Set all disabled flags to true
disables->SetAll(true);
break;
default:
break;
}
}
}
// Process Validation Features flags specified through the ValidationFeature extension
void SetValidationFeatures(CHECK_DISABLED *disable_data, CHECK_ENABLED *enable_data,
const VkValidationFeaturesEXT *val_features_struct) {
for (uint32_t i = 0; i < val_features_struct->disabledValidationFeatureCount; ++i) {
SetValidationFeatureDisable(disable_data, val_features_struct->pDisabledValidationFeatures[i]);
}
for (uint32_t i = 0; i < val_features_struct->enabledValidationFeatureCount; ++i) {
SetValidationFeatureEnable(enable_data, val_features_struct->pEnabledValidationFeatures[i]);
}
}
// Given a string representation of a list of enable enum values, call the appropriate setter function
void SetLocalEnableSetting(std::string list_of_enables, std::string delimiter, CHECK_ENABLED* enables) {
size_t pos = 0;
std::string token;
while (list_of_enables.length() != 0) {
pos = list_of_enables.find(delimiter);
if (pos != std::string::npos) {
token = list_of_enables.substr(0, pos);
} else {
pos = list_of_enables.length() - delimiter.length();
token = list_of_enables;
}
if (token.find("VK_VALIDATION_FEATURE_ENABLE_") != std::string::npos) {
auto result = VkValFeatureEnableLookup.find(token);
if (result != VkValFeatureEnableLookup.end()) {
SetValidationFeatureEnable(enables, result->second);
}
}
list_of_enables.erase(0, pos + delimiter.length());
}
}
// Given a string representation of a list of disable enum values, call the appropriate setter function
void SetLocalDisableSetting(std::string list_of_disables, std::string delimiter, CHECK_DISABLED* disables) {
size_t pos = 0;
std::string token;
while (list_of_disables.length() != 0) {
pos = list_of_disables.find(delimiter);
if (pos != std::string::npos) {
token = list_of_disables.substr(0, pos);
} else {
pos = list_of_disables.length() - delimiter.length();
token = list_of_disables;
}
if (token.find("VK_VALIDATION_FEATURE_DISABLE_") != std::string::npos) {
auto result = VkValFeatureDisableLookup.find(token);
if (result != VkValFeatureDisableLookup.end()) {
SetValidationFeatureDisable(disables, result->second);
}
}
if (token.find("VALIDATION_CHECK_DISABLE_") != std::string::npos) {
auto result = ValidationDisableLookup.find(token);
if (result != ValidationDisableLookup.end()) {
SetValidationDisable(disables, result->second);
}
}
list_of_disables.erase(0, pos + delimiter.length());
}
}
// Process enables and disables set though the vk_layer_settings.txt config file or through an environment variable
void ProcessConfigAndEnvSettings(const char* layer_description, CHECK_ENABLED* enables, CHECK_DISABLED* disables) {
std::string enable_key = layer_description;
std::string disable_key = layer_description;
enable_key.append(".enables");
disable_key.append(".disables");
std::string list_of_config_enables = getLayerOption(enable_key.c_str());
std::string list_of_env_enables = GetLayerEnvVar("VK_LAYER_ENABLES");
std::string list_of_config_disables = getLayerOption(disable_key.c_str());
std::string list_of_env_disables = GetLayerEnvVar("VK_LAYER_DISABLES");
#if defined(_WIN32)
std::string env_delimiter = ";";
#else
std::string env_delimiter = ":";
#endif
SetLocalEnableSetting(list_of_config_enables, ",", enables);
SetLocalEnableSetting(list_of_env_enables, env_delimiter, enables);
SetLocalDisableSetting(list_of_config_disables, ",", disables);
SetLocalDisableSetting(list_of_env_disables, env_delimiter, disables);
}
// Non-code-generated chassis API functions
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!ApiParentExtensionEnabled(funcName, &layer_data->device_extensions)) {
return nullptr;
}
const auto &item = name_to_funcptr_map.find(funcName);
if (item != name_to_funcptr_map.end()) {
if (item->second.is_instance_api) {
return nullptr;
} else {
return reinterpret_cast<PFN_vkVoidFunction>(item->second.funcptr);
}
}
auto &table = layer_data->device_dispatch_table;
if (!table.GetDeviceProcAddr) return nullptr;
return table.GetDeviceProcAddr(device, funcName);
}
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
const auto &item = name_to_funcptr_map.find(funcName);
if (item != name_to_funcptr_map.end()) {
return reinterpret_cast<PFN_vkVoidFunction>(item->second.funcptr);
}
auto layer_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
auto &table = layer_data->instance_dispatch_table;
if (!table.GetInstanceProcAddr) return nullptr;
return table.GetInstanceProcAddr(instance, funcName);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
VkLayerProperties *pProperties) {
return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
return util_GetExtensionProperties(ARRAY_SIZE(instance_extensions), instance_extensions, pCount, pProperties);
return VK_ERROR_LAYER_NOT_PRESENT;
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
uint32_t *pCount, VkExtensionProperties *pProperties) {
if (pLayerName && !strcmp(pLayerName, global_layer.layerName)) return util_GetExtensionProperties(ARRAY_SIZE(device_extensions), device_extensions, pCount, pProperties);
assert(physicalDevice);
auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
return layer_data->instance_dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, pLayerName, pCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
VkInstance *pInstance) {
VkLayerInstanceCreateInfo* chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
uint32_t specified_version = (pCreateInfo->pApplicationInfo ? pCreateInfo->pApplicationInfo->apiVersion : VK_API_VERSION_1_0);
uint32_t api_version = (specified_version < VK_API_VERSION_1_1) ? VK_API_VERSION_1_0 : VK_API_VERSION_1_1;
auto report_data = new debug_report_data{};
report_data->instance_pnext_chain = SafePnextCopy(pCreateInfo->pNext);
ActivateInstanceDebugCallbacks(report_data);
CHECK_ENABLED local_enables {};
CHECK_DISABLED local_disables {};
const auto *validation_features_ext = lvl_find_in_chain<VkValidationFeaturesEXT>(pCreateInfo->pNext);
if (validation_features_ext) {
SetValidationFeatures(&local_disables, &local_enables, validation_features_ext);
}
const auto *validation_flags_ext = lvl_find_in_chain<VkValidationFlagsEXT>(pCreateInfo->pNext);
if (validation_flags_ext) {
SetValidationFlags(&local_disables, validation_flags_ext);
}
ProcessConfigAndEnvSettings(OBJECT_LAYER_DESCRIPTION, &local_enables, &local_disables);
// Create temporary dispatch vector for pre-calls until instance is created
std::vector<ValidationObject*> local_object_dispatch;
// Add VOs to dispatch vector. Order here will be the validation dispatch order!
auto thread_checker = new ThreadSafety(nullptr);
if (!local_disables.thread_safety) {
local_object_dispatch.emplace_back(thread_checker);
}
thread_checker->container_type = LayerObjectTypeThreading;
thread_checker->api_version = api_version;
thread_checker->report_data = report_data;
auto parameter_validation = new StatelessValidation;
if (!local_disables.stateless_checks) {
local_object_dispatch.emplace_back(parameter_validation);
}
parameter_validation->container_type = LayerObjectTypeParameterValidation;
parameter_validation->api_version = api_version;
parameter_validation->report_data = report_data;
auto object_tracker = new ObjectLifetimes;
if (!local_disables.object_tracking) {
local_object_dispatch.emplace_back(object_tracker);
}
object_tracker->container_type = LayerObjectTypeObjectTracker;
object_tracker->api_version = api_version;
object_tracker->report_data = report_data;
auto core_checks = new CoreChecks;
if (!local_disables.core_checks) {
local_object_dispatch.emplace_back(core_checks);
}
core_checks->container_type = LayerObjectTypeCoreValidation;
core_checks->api_version = api_version;
core_checks->report_data = report_data;
auto best_practices = new BestPractices;
if (local_enables.best_practices) {
local_object_dispatch.emplace_back(best_practices);
}
best_practices->container_type = LayerObjectTypeBestPractices;
best_practices->api_version = api_version;
best_practices->report_data = report_data;
// If handle wrapping is disabled via the ValidationFeatures extension, override build flag
if (local_disables.handle_wrapping) {
wrap_handles = false;
}
// Init dispatch array and call registration functions
for (auto intercept : local_object_dispatch) {
intercept->PreCallValidateCreateInstance(pCreateInfo, pAllocator, pInstance);
}
for (auto intercept : local_object_dispatch) {
intercept->PreCallRecordCreateInstance(pCreateInfo, pAllocator, pInstance);
}
VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
if (result != VK_SUCCESS) return result;
auto framework = GetLayerDataPtr(get_dispatch_key(*pInstance), layer_data_map);
framework->object_dispatch = local_object_dispatch;
framework->container_type = LayerObjectTypeInstance;
framework->disabled = local_disables;
framework->enabled = local_enables;
framework->instance = *pInstance;
layer_init_instance_dispatch_table(*pInstance, &framework->instance_dispatch_table, fpGetInstanceProcAddr);
framework->report_data = report_data;
framework->api_version = api_version;
framework->instance_extensions.InitFromInstanceCreateInfo(specified_version, pCreateInfo);
layer_debug_messenger_actions(framework->report_data, pAllocator, OBJECT_LAYER_DESCRIPTION);
object_tracker->instance_dispatch_table = framework->instance_dispatch_table;
object_tracker->enabled = framework->enabled;
object_tracker->disabled = framework->disabled;
thread_checker->instance_dispatch_table = framework->instance_dispatch_table;
thread_checker->enabled = framework->enabled;
thread_checker->disabled = framework->disabled;
parameter_validation->instance_dispatch_table = framework->instance_dispatch_table;
parameter_validation->enabled = framework->enabled;
parameter_validation->disabled = framework->disabled;
core_checks->instance_dispatch_table = framework->instance_dispatch_table;
core_checks->instance = *pInstance;
core_checks->enabled = framework->enabled;
core_checks->disabled = framework->disabled;
core_checks->instance_state = core_checks;
best_practices->instance_dispatch_table = framework->instance_dispatch_table;
best_practices->enabled = framework->enabled;
best_practices->disabled = framework->disabled;
for (auto intercept : framework->object_dispatch) {
intercept->PostCallRecordCreateInstance(pCreateInfo, pAllocator, pInstance, result);
}
InstanceExtensionWhitelist(framework, pCreateInfo, *pInstance);
DeactivateInstanceDebugCallbacks(report_data);
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
dispatch_key key = get_dispatch_key(instance);
auto layer_data = GetLayerDataPtr(key, layer_data_map);
ActivateInstanceDebugCallbacks(layer_data->report_data);
""" + precallvalidate_loop + """
auto lock = intercept->write_lock();
intercept->PreCallValidateDestroyInstance(instance, pAllocator);
}
""" + precallrecord_loop + """
auto lock = intercept->write_lock();
intercept->PreCallRecordDestroyInstance(instance, pAllocator);
}
layer_data->instance_dispatch_table.DestroyInstance(instance, pAllocator);
""" + postcallrecord_loop + """
auto lock = intercept->write_lock();
intercept->PostCallRecordDestroyInstance(instance, pAllocator);
}
DeactivateInstanceDebugCallbacks(layer_data->report_data);
FreePnextChain(layer_data->report_data->instance_pnext_chain);
layer_debug_utils_destroy_instance(layer_data->report_data);
for (auto item = layer_data->object_dispatch.begin(); item != layer_data->object_dispatch.end(); item++) {
delete *item;
}
FreeLayerDataPtr(key, layer_data_map);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
auto instance_interceptor = GetLayerDataPtr(get_dispatch_key(gpu), layer_data_map);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(instance_interceptor->instance, "vkCreateDevice");
if (fpCreateDevice == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
// Get physical device limits for device
VkPhysicalDeviceProperties device_properties = {};
instance_interceptor->instance_dispatch_table.GetPhysicalDeviceProperties(gpu, &device_properties);
// Setup the validation tables based on the application API version from the instance and the capabilities of the device driver
uint32_t effective_api_version = std::min(device_properties.apiVersion, instance_interceptor->api_version);
DeviceExtensions device_extensions = {};
device_extensions.InitFromDeviceCreateInfo(&instance_interceptor->instance_extensions, effective_api_version, pCreateInfo);
for (auto item : instance_interceptor->object_dispatch) {
item->device_extensions = device_extensions;
}
safe_VkDeviceCreateInfo modified_create_info(pCreateInfo);
bool skip = false;
for (auto intercept : instance_interceptor->object_dispatch) {
auto lock = intercept->write_lock();
skip |= intercept->PreCallValidateCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : instance_interceptor->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, &modified_create_info);
}
VkResult result = fpCreateDevice(gpu, reinterpret_cast<VkDeviceCreateInfo *>(&modified_create_info), pAllocator, pDevice);
if (result != VK_SUCCESS) {
return result;
}
auto device_interceptor = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
device_interceptor->container_type = LayerObjectTypeDevice;
// Save local info in device object
device_interceptor->phys_dev_properties.properties = device_properties;
device_interceptor->api_version = device_interceptor->device_extensions.InitFromDeviceCreateInfo(
&instance_interceptor->instance_extensions, effective_api_version, pCreateInfo);
device_interceptor->device_extensions = device_extensions;
layer_init_device_dispatch_table(*pDevice, &device_interceptor->device_dispatch_table, fpGetDeviceProcAddr);
device_interceptor->device = *pDevice;
device_interceptor->physical_device = gpu;
device_interceptor->instance = instance_interceptor->instance;
device_interceptor->report_data = instance_interceptor->report_data;
// Note that this defines the order in which the layer validation objects are called
auto thread_safety = new ThreadSafety(reinterpret_cast<ThreadSafety *>(instance_interceptor->GetValidationObject(instance_interceptor->object_dispatch, LayerObjectTypeThreading)));
thread_safety->container_type = LayerObjectTypeThreading;
if (!instance_interceptor->disabled.thread_safety) {
device_interceptor->object_dispatch.emplace_back(thread_safety);
}
auto stateless_validation = new StatelessValidation;
stateless_validation->container_type = LayerObjectTypeParameterValidation;
if (!instance_interceptor->disabled.stateless_checks) {
device_interceptor->object_dispatch.emplace_back(stateless_validation);
}
auto object_tracker = new ObjectLifetimes;
object_tracker->container_type = LayerObjectTypeObjectTracker;
if (!instance_interceptor->disabled.object_tracking) {
device_interceptor->object_dispatch.emplace_back(object_tracker);
}
auto core_checks = new CoreChecks;
core_checks->container_type = LayerObjectTypeCoreValidation;
core_checks->instance_state = reinterpret_cast<CoreChecks *>(
core_checks->GetValidationObject(instance_interceptor->object_dispatch, LayerObjectTypeCoreValidation));
if (!instance_interceptor->disabled.core_checks) {
device_interceptor->object_dispatch.emplace_back(core_checks);
}
auto best_practices = new BestPractices;
best_practices->container_type = LayerObjectTypeBestPractices;
best_practices->instance_state = reinterpret_cast<BestPractices *>(
best_practices->GetValidationObject(instance_interceptor->object_dispatch, LayerObjectTypeBestPractices));
if (instance_interceptor->enabled.best_practices) {
device_interceptor->object_dispatch.emplace_back(best_practices);
}
// Set per-intercept common data items
for (auto dev_intercept : device_interceptor->object_dispatch) {
dev_intercept->device = *pDevice;
dev_intercept->physical_device = gpu;
dev_intercept->instance = instance_interceptor->instance;
dev_intercept->report_data = device_interceptor->report_data;
dev_intercept->device_dispatch_table = device_interceptor->device_dispatch_table;
dev_intercept->api_version = device_interceptor->api_version;
dev_intercept->disabled = instance_interceptor->disabled;
dev_intercept->enabled = instance_interceptor->enabled;
dev_intercept->instance_dispatch_table = instance_interceptor->instance_dispatch_table;
dev_intercept->instance_extensions = instance_interceptor->instance_extensions;
dev_intercept->device_extensions = device_interceptor->device_extensions;
}
for (auto intercept : instance_interceptor->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
}
DeviceExtensionWhitelist(device_interceptor, pCreateInfo, *pDevice);
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
dispatch_key key = get_dispatch_key(device);
auto layer_data = GetLayerDataPtr(key, layer_data_map);
""" + precallvalidate_loop + """
auto lock = intercept->write_lock();
intercept->PreCallValidateDestroyDevice(device, pAllocator);
}
""" + precallrecord_loop + """
auto lock = intercept->write_lock();
intercept->PreCallRecordDestroyDevice(device, pAllocator);
}
layer_data->device_dispatch_table.DestroyDevice(device, pAllocator);
""" + postcallrecord_loop + """
auto lock = intercept->write_lock();
intercept->PostCallRecordDestroyDevice(device, pAllocator);
}
for (auto item = layer_data->object_dispatch.begin(); item != layer_data->object_dispatch.end(); item++) {
delete *item;
}
FreeLayerDataPtr(key, layer_data_map);
}
// Special-case APIs for which core_validation needs custom parameter lists and/or modifies parameters
VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(
VkDevice device,
VkPipelineCache pipelineCache,
uint32_t createInfoCount,
const VkGraphicsPipelineCreateInfo* pCreateInfos,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipelines) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
create_graphics_pipeline_api_state cgpl_state[LayerObjectTypeMaxEnum]{};
for (auto intercept : layer_data->object_dispatch) {
cgpl_state[intercept->container_type].pCreateInfos = pCreateInfos;
auto lock = intercept->write_lock();
skip |= intercept->PreCallValidateCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &(cgpl_state[intercept->container_type]));
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &(cgpl_state[intercept->container_type]));
}
auto usepCreateInfos = (!cgpl_state[LayerObjectTypeCoreValidation].pCreateInfos) ? pCreateInfos : cgpl_state[LayerObjectTypeCoreValidation].pCreateInfos;
VkResult result = DispatchCreateGraphicsPipelines(device, pipelineCache, createInfoCount, usepCreateInfos, pAllocator, pPipelines);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, result, &(cgpl_state[intercept->container_type]));
}
return result;
}
// This API saves some core_validation pipeline state state on the stack for performance purposes
VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(
VkDevice device,
VkPipelineCache pipelineCache,
uint32_t createInfoCount,
const VkComputePipelineCreateInfo* pCreateInfos,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipelines) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
create_compute_pipeline_api_state ccpl_state[LayerObjectTypeMaxEnum]{};
for (auto intercept : layer_data->object_dispatch) {
ccpl_state[intercept->container_type].pCreateInfos = pCreateInfos;
auto lock = intercept->write_lock();
skip |= intercept->PreCallValidateCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &(ccpl_state[intercept->container_type]));
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &(ccpl_state[intercept->container_type]));
}
auto usepCreateInfos = (!ccpl_state[LayerObjectTypeCoreValidation].pCreateInfos) ? pCreateInfos : ccpl_state[LayerObjectTypeCoreValidation].pCreateInfos;
VkResult result = DispatchCreateComputePipelines(device, pipelineCache, createInfoCount, usepCreateInfos, pAllocator, pPipelines);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, result, &(ccpl_state[intercept->container_type]));
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateRayTracingPipelinesNV(
VkDevice device,
VkPipelineCache pipelineCache,
uint32_t createInfoCount,
const VkRayTracingPipelineCreateInfoNV* pCreateInfos,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipelines) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
create_ray_tracing_pipeline_api_state crtpl_state[LayerObjectTypeMaxEnum]{};
for (auto intercept : layer_data->object_dispatch) {
crtpl_state[intercept->container_type].pCreateInfos = pCreateInfos;
auto lock = intercept->write_lock();
skip |= intercept->PreCallValidateCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos,
pAllocator, pPipelines, &(crtpl_state[intercept->container_type]));
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator,
pPipelines, &(crtpl_state[intercept->container_type]));
}
VkResult result = DispatchCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator,
pPipelines, result, &(crtpl_state[intercept->container_type]));
}
return result;
}
// This API needs the ability to modify a down-chain parameter
VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(
VkDevice device,
const VkPipelineLayoutCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkPipelineLayout* pPipelineLayout) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
create_pipeline_layout_api_state cpl_state{};
cpl_state.modified_create_info = *pCreateInfo;
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
skip |= intercept->PreCallValidateCreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout, &cpl_state);
}
VkResult result = DispatchCreatePipelineLayout(device, &cpl_state.modified_create_info, pAllocator, pPipelineLayout);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout, result);
}
return result;
}
// This API needs some local stack data for performance reasons and also may modify a parameter
VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(
VkDevice device,
const VkShaderModuleCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkShaderModule* pShaderModule) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
create_shader_module_api_state csm_state{};
csm_state.instrumented_create_info = *pCreateInfo;
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
skip |= intercept->PreCallValidateCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule, &csm_state);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule, &csm_state);
}
VkResult result = DispatchCreateShaderModule(device, &csm_state.instrumented_create_info, pAllocator, pShaderModule);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule, result, &csm_state);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(
VkDevice device,
const VkDescriptorSetAllocateInfo* pAllocateInfo,
VkDescriptorSet* pDescriptorSets) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
cvdescriptorset::AllocateDescriptorSetsData ads_state(pAllocateInfo->descriptorSetCount);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
skip |= intercept->PreCallValidateAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets, &ads_state);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
}
VkResult result = DispatchAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets, result, &ads_state);
}
return result;
}
// This API needs the ability to modify a down-chain parameter
VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(
VkDevice device,
const VkBufferCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkBuffer* pBuffer) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
create_buffer_api_state cb_state{};
cb_state.modified_create_info = *pCreateInfo;
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
skip |= intercept->PreCallValidateCreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreateBuffer(device, pCreateInfo, pAllocator, pBuffer, &cb_state);
}
VkResult result = DispatchCreateBuffer(device, &cb_state.modified_create_info, pAllocator, pBuffer);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreateBuffer(device, pCreateInfo, pAllocator, pBuffer, result);
}
return result;
}
// ValidationCache APIs do not dispatch
VKAPI_ATTR VkResult VKAPI_CALL CreateValidationCacheEXT(
VkDevice device,
const VkValidationCacheCreateInfoEXT* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkValidationCacheEXT* pValidationCache) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_SUCCESS;
ValidationObject *validation_data = layer_data->GetValidationObject(layer_data->object_dispatch, LayerObjectTypeCoreValidation);
if (validation_data) {
auto lock = validation_data->write_lock();
result = validation_data->CoreLayerCreateValidationCacheEXT(device, pCreateInfo, pAllocator, pValidationCache);
}
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyValidationCacheEXT(
VkDevice device,
VkValidationCacheEXT validationCache,
const VkAllocationCallbacks* pAllocator) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
ValidationObject *validation_data = layer_data->GetValidationObject(layer_data->object_dispatch, LayerObjectTypeCoreValidation);
if (validation_data) {
auto lock = validation_data->write_lock();
validation_data->CoreLayerDestroyValidationCacheEXT(device, validationCache, pAllocator);
}
}
VKAPI_ATTR VkResult VKAPI_CALL MergeValidationCachesEXT(
VkDevice device,
VkValidationCacheEXT dstCache,
uint32_t srcCacheCount,
const VkValidationCacheEXT* pSrcCaches) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_SUCCESS;
ValidationObject *validation_data = layer_data->GetValidationObject(layer_data->object_dispatch, LayerObjectTypeCoreValidation);
if (validation_data) {
auto lock = validation_data->write_lock();
result = validation_data->CoreLayerMergeValidationCachesEXT(device, dstCache, srcCacheCount, pSrcCaches);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetValidationCacheDataEXT(
VkDevice device,
VkValidationCacheEXT validationCache,
size_t* pDataSize,
void* pData) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_SUCCESS;
ValidationObject *validation_data = layer_data->GetValidationObject(layer_data->object_dispatch, LayerObjectTypeCoreValidation);
if (validation_data) {
auto lock = validation_data->write_lock();
result = validation_data->CoreLayerGetValidationCacheDataEXT(device, validationCache, pDataSize, pData);
}
return result;
}"""
inline_custom_validation_class_definitions = """
virtual VkResult CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache) { return VK_SUCCESS; };
virtual void CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator) {};
virtual VkResult CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches) { return VK_SUCCESS; };
virtual VkResult CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData) { return VK_SUCCESS; };
// Allow additional state parameter for CreateGraphicsPipelines
virtual bool PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* cgpl_state) {
return PreCallValidateCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
};
virtual void PreCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* cgpl_state) {
PreCallRecordCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
};
virtual void PostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, VkResult result, void* cgpl_state) {
PostCallRecordCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, result);
};
// Allow additional state parameter for CreateComputePipelines
virtual bool PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* pipe_state) {
return PreCallValidateCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
};
virtual void PreCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* ccpl_state) {
PreCallRecordCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
};
virtual void PostCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, VkResult result, void* pipe_state) {
PostCallRecordCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, result);
};
// Allow additional state parameter for CreateRayTracingPipelinesNV
virtual bool PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* pipe_state) {
return PreCallValidateCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
};
virtual void PreCallRecordCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* ccpl_state) {
PreCallRecordCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
};
virtual void PostCallRecordCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, VkResult result, void* pipe_state) {
PostCallRecordCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, result);
};
// Allow modification of a down-chain parameter for CreatePipelineLayout
virtual void PreCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout, void *cpl_state) {
PreCallRecordCreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
};
// Enable the CreateShaderModule API to take an extra argument for state preservation and paramter modification
virtual bool PreCallValidateCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule, void* csm_state) {
return PreCallValidateCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
};
virtual void PreCallRecordCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule, void* csm_state) {
PreCallRecordCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
};
virtual void PostCallRecordCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule, VkResult result, void* csm_state) {
PostCallRecordCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule, result);
};
// Allow AllocateDescriptorSets to use some local stack storage for performance purposes
virtual bool PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets, void* ads_state) {
return PreCallValidateAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
};
virtual void PostCallRecordAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets, VkResult result, void* ads_state) {
PostCallRecordAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets, result);
};
// Allow modification of a down-chain parameter for CreateBuffer
virtual void PreCallRecordCreateBuffer(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer, void *cb_state) {
PreCallRecordCreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
};
// Modify a parameter to CreateDevice
virtual void PreCallRecordCreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice, safe_VkDeviceCreateInfo *modified_create_info) {
PreCallRecordCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice);
};
"""
inline_custom_source_postamble = """
// loader-layer interface v0, just wrappers since there is only a layer
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
return vulkan_layer_chassis::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
VkLayerProperties *pProperties) {
return vulkan_layer_chassis::EnumerateInstanceLayerProperties(pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
VkLayerProperties *pProperties) {
// the layer command handles VK_NULL_HANDLE just fine internally
assert(physicalDevice == VK_NULL_HANDLE);
return vulkan_layer_chassis::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
// the layer command handles VK_NULL_HANDLE just fine internally
assert(physicalDevice == VK_NULL_HANDLE);
return vulkan_layer_chassis::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
return vulkan_layer_chassis::GetDeviceProcAddr(dev, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
return vulkan_layer_chassis::GetInstanceProcAddr(instance, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
assert(pVersionStruct != NULL);
assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
// Fill in the function pointers if our version is at least capable of having the structure contain them.
if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
pVersionStruct->pfnGetPhysicalDeviceProcAddr = nullptr;
}
return VK_SUCCESS;
}"""
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
OutputGenerator.__init__(self, errFile, warnFile, diagFile)
# Internal state - accumulators for different inner block text
self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
self.intercepts = []
self.layer_factory = '' # String containing base layer factory class definition
# Check if the parameter passed in is a pointer to an array
def paramIsArray(self, param):
return param.attrib.get('len') is not None
# Check if the parameter passed in is a pointer
def paramIsPointer(self, param):
ispointer = False
for elem in param:
if elem.tag == 'type' and elem.tail is not None and '*' in elem.tail:
ispointer = True
return ispointer
#
#
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
# Output Copyright
write(self.inline_copyright_message, file=self.outFile)
# Multiple inclusion protection
self.header = False
if (self.genOpts.filename and 'h' == self.genOpts.filename[-1]):
self.header = True
write('#pragma once', file=self.outFile)
self.newline()
if self.header:
write(self.inline_custom_header_preamble, file=self.outFile)
else:
write(self.inline_custom_source_preamble, file=self.outFile)
self.layer_factory += self.inline_custom_header_class_definition
#
#
def endFile(self):
# Finish C++ namespace and multiple inclusion protection
self.newline()
if not self.header:
# Record intercepted procedures
write('// Map of intercepted ApiName to its associated function data', file=self.outFile)
write('const std::unordered_map<std::string, function_data> name_to_funcptr_map = {', file=self.outFile)
write('\n'.join(self.intercepts), file=self.outFile)
write('};\n', file=self.outFile)
self.newline()
write('} // namespace vulkan_layer_chassis', file=self.outFile)
if self.header:
self.newline()
# Output Layer Factory Class Definitions
self.layer_factory += self.inline_custom_validation_class_definitions
self.layer_factory += '};\n\n'
self.layer_factory += 'extern small_unordered_map<void*, ValidationObject*, 2> layer_data_map;'
write(self.layer_factory, file=self.outFile)
else:
write(self.inline_custom_source_postamble, file=self.outFile)
# Finish processing in superclass
OutputGenerator.endFile(self)
def beginFeature(self, interface, emit):
# Start processing in superclass
OutputGenerator.beginFeature(self, interface, emit)
# Get feature extra protect
self.featureExtraProtect = GetFeatureProtect(interface)
# Accumulate includes, defines, types, enums, function pointer typedefs, end function prototypes separately for this
# feature. They're only printed in endFeature().
self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
def endFeature(self):
# Actually write the interface to the output file.
if (self.emit):
self.newline()
# If type declarations are needed by other features based on this one, it may be necessary to suppress the ExtraProtect,
# or move it below the 'for section...' loop.
if (self.featureExtraProtect != None):
write('#ifdef', self.featureExtraProtect, file=self.outFile)
for section in self.TYPE_SECTIONS:
contents = self.sections[section]
if contents:
write('\n'.join(contents), file=self.outFile)
self.newline()
if (self.sections['command']):
write('\n'.join(self.sections['command']), end=u'', file=self.outFile)
self.newline()
if (self.featureExtraProtect != None):
write('#endif //', self.featureExtraProtect, file=self.outFile)
# Finish processing in superclass
OutputGenerator.endFeature(self)
#
# Append a definition to the specified section
def appendSection(self, section, text):
self.sections[section].append(text)
#
# Type generation
def genType(self, typeinfo, name, alias):
pass
#
# Struct (e.g. C "struct" type) generation. This is a special case of the <type> tag where the contents are
# interpreted as a set of <member> tags instead of freeform C type declarations. The <member> tags are just like <param>
# tags - they are a declaration of a struct or union member. Only simple member declarations are supported (no nested
# structs etc.)
def genStruct(self, typeinfo, typeName):
OutputGenerator.genStruct(self, typeinfo, typeName)
body = 'typedef ' + typeinfo.elem.get('category') + ' ' + typeName + ' {\n'
# paramdecl = self.makeCParamDecl(typeinfo.elem, self.genOpts.alignFuncParam)
for member in typeinfo.elem.findall('.//member'):
body += self.makeCParamDecl(member, self.genOpts.alignFuncParam)
body += ';\n'
body += '} ' + typeName + ';\n'
self.appendSection('struct', body)
#
# Group (e.g. C "enum" type) generation. These are concatenated together with other types.
def genGroup(self, groupinfo, groupName, alias):
pass
# Enumerant generation
# <enum> tags may specify their values in several ways, but are usually just integers.
def genEnum(self, enuminfo, name, alias):
pass
#
# Customize Cdecl for layer factory base class
def BaseClassCdecl(self, elem, name):
raw = self.makeCDecls(elem)[1]
# Toss everything before the undecorated name
prototype = raw.split("VKAPI_PTR *PFN_vk")[1]
prototype = prototype.replace(")", "", 1)
prototype = prototype.replace(";", " {};")
# Build up pre/post call virtual function declarations
pre_call_validate = 'virtual bool PreCallValidate' + prototype
pre_call_validate = pre_call_validate.replace("{}", " { return false; }")
pre_call_record = 'virtual void PreCallRecord' + prototype
post_call_record = 'virtual void PostCallRecord' + prototype
resulttype = elem.find('proto/type')
if resulttype.text == 'VkResult':
post_call_record = post_call_record.replace(')', ', VkResult result)')
elif resulttype.text == 'VkDeviceAddress':
post_call_record = post_call_record.replace(')', ', VkDeviceAddress result)')
return ' %s\n %s\n %s\n' % (pre_call_validate, pre_call_record, post_call_record)
#
# Command generation
def genCmd(self, cmdinfo, name, alias):
ignore_functions = [
'vkEnumerateInstanceVersion',
]
if name in ignore_functions:
return
if self.header: # In the header declare all intercepts
self.appendSection('command', '')
self.appendSection('command', self.makeCDecls(cmdinfo.elem)[0])
if (self.featureExtraProtect != None):
self.layer_factory += '#ifdef %s\n' % self.featureExtraProtect
# Update base class with virtual function declarations
if 'ValidationCache' not in name:
self.layer_factory += self.BaseClassCdecl(cmdinfo.elem, name)
if (self.featureExtraProtect != None):
self.layer_factory += '#endif\n'
return
is_instance = 'false'
dispatchable_type = cmdinfo.elem.find('param/type').text
if dispatchable_type in ["VkPhysicalDevice", "VkInstance"] or name == 'vkCreateInstance':
is_instance = 'true'
if name in self.manual_functions:
self.intercepts += [ ' {"%s", {%s, (void*)%s}},' % (name, is_instance, name[2:]) ]
return
# Record that the function will be intercepted
if (self.featureExtraProtect != None):
self.intercepts += [ '#ifdef %s' % self.featureExtraProtect ]
self.intercepts += [ ' {"%s", {%s, (void*)%s}},' % (name, is_instance, name[2:]) ]
if (self.featureExtraProtect != None):
self.intercepts += [ '#endif' ]
OutputGenerator.genCmd(self, cmdinfo, name, alias)
#
decls = self.makeCDecls(cmdinfo.elem)
self.appendSection('command', '')
self.appendSection('command', '%s {' % decls[0][:-1])
# Setup common to call wrappers. First parameter is always dispatchable
dispatchable_name = cmdinfo.elem.find('param/name').text
self.appendSection('command', ' auto layer_data = GetLayerDataPtr(get_dispatch_key(%s), layer_data_map);' % (dispatchable_name))
api_function_name = cmdinfo.elem.attrib.get('name')
params = cmdinfo.elem.findall('param/name')
paramstext = ', '.join([str(param.text) for param in params])
API = api_function_name.replace('vk','Dispatch') + '('
# Declare result variable, if any.
return_map = {
'PFN_vkVoidFunction': 'return nullptr;',
'VkBool32': 'return VK_FALSE;',
'VkDeviceAddress': 'return 0;',
'VkResult': 'return VK_ERROR_VALIDATION_FAILED_EXT;',
'void': 'return;',
'uint32_t': 'return 0;'
}
resulttype = cmdinfo.elem.find('proto/type')
assignresult = ''
if (resulttype.text != 'void'):
assignresult = resulttype.text + ' result = '
# Set up skip and locking
self.appendSection('command', ' bool skip = false;')
# Generate pre-call validation source code
self.appendSection('command', ' %s' % self.precallvalidate_loop)
self.appendSection('command', ' auto lock = intercept->write_lock();')
self.appendSection('command', ' skip |= intercept->PreCallValidate%s(%s);' % (api_function_name[2:], paramstext))
self.appendSection('command', ' if (skip) %s' % return_map[resulttype.text])
self.appendSection('command', ' }')
# Generate pre-call state recording source code
self.appendSection('command', ' %s' % self.precallrecord_loop)
self.appendSection('command', ' auto lock = intercept->write_lock();')
self.appendSection('command', ' intercept->PreCallRecord%s(%s);' % (api_function_name[2:], paramstext))
self.appendSection('command', ' }')
# Insert pre-dispatch debug utils function call
if name in self.pre_dispatch_debug_utils_functions:
self.appendSection('command', ' %s' % self.pre_dispatch_debug_utils_functions[name])
# Output dispatch (down-chain) function call
self.appendSection('command', ' ' + assignresult + API + paramstext + ');')
# Insert post-dispatch debug utils function call
if name in self.post_dispatch_debug_utils_functions:
self.appendSection('command', ' %s' % self.post_dispatch_debug_utils_functions[name])
# Generate post-call object processing source code
self.appendSection('command', ' %s' % self.postcallrecord_loop)
returnparam = ''
if (resulttype.text == 'VkResult' or resulttype.text == 'VkDeviceAddress'):
returnparam = ', result'
self.appendSection('command', ' auto lock = intercept->write_lock();')
self.appendSection('command', ' intercept->PostCallRecord%s(%s%s);' % (api_function_name[2:], paramstext, returnparam))
self.appendSection('command', ' }')
# Return result variable, if any.
if (resulttype.text != 'void'):
self.appendSection('command', ' return result;')
self.appendSection('command', '}')
#
# Override makeProtoName to drop the "vk" prefix
def makeProtoName(self, name, tail):
return self.genOpts.apientry + name[2:] + tail
| 50.482441 | 292 | 0.710747 |
import os,re,sys
from generator import *
from common_codegen import *
apiname = None,
profile = None,
versions = '.*',
emitversions = '.*',
defaultExtensions = None,
addExtensions = None,
removeExtensions = None,
emitExtensions = None,
sortProcedure = regSortFeatures,
prefixText = "",
genFuncPointers = True,
protectFile = True,
protectFeature = True,
apicall = '',
apientry = '',
apientryp = '',
indentFuncProto = True,
indentFuncPointer = False,
alignFuncParam = 0,
helper_file_type = '',
expandEnumerants = True):
GeneratorOptions.__init__(self, conventions, filename, directory, apiname, profile,
versions, emitversions, defaultExtensions,
addExtensions, removeExtensions, emitExtensions, sortProcedure)
self.prefixText = prefixText
self.genFuncPointers = genFuncPointers
self.protectFile = protectFile
self.protectFeature = protectFeature
self.apicall = apicall
self.apientry = apientry
self.apientryp = apientryp
self.indentFuncProto = indentFuncProto
self.indentFuncPointer = indentFuncPointer
self.alignFuncParam = alignFuncParam
class LayerChassisOutputGenerator(OutputGenerator):
TYPE_SECTIONS = ['include', 'define', 'basetype', 'handle', 'enum',
'group', 'bitmask', 'funcpointer', 'struct']
ALL_SECTIONS = TYPE_SECTIONS + ['command']
manual_functions = [
'vkGetDeviceProcAddr',
'vkGetInstanceProcAddr',
'vkCreateDevice',
'vkDestroyDevice',
'vkCreateInstance',
'vkDestroyInstance',
'vkEnumerateInstanceLayerProperties',
'vkEnumerateInstanceExtensionProperties',
'vkEnumerateDeviceLayerProperties',
'vkEnumerateDeviceExtensionProperties',
'vkCreateGraphicsPipelines',
'vkCreateComputePipelines',
'vkCreateRayTracingPipelinesNV',
'vkCreatePipelineLayout',
'vkCreateShaderModule',
'vkAllocateDescriptorSets',
'vkCreateBuffer',
'vkCreateValidationCacheEXT',
'vkDestroyValidationCacheEXT',
'vkMergeValidationCachesEXT',
'vkGetValidationCacheDataEXT',
'vkGetPhysicalDeviceProcAddr',
]
alt_ret_codes = [
# Include functions here which must tolerate VK_INCOMPLETE as a return code
'vkEnumeratePhysicalDevices',
'vkEnumeratePhysicalDeviceGroupsKHR',
'vkGetValidationCacheDataEXT',
'vkGetPipelineCacheData',
'vkGetShaderInfoAMD',
'vkGetPhysicalDeviceDisplayPropertiesKHR',
'vkGetPhysicalDeviceDisplayProperties2KHR',
'vkGetPhysicalDeviceDisplayPlanePropertiesKHR',
'vkGetDisplayPlaneSupportedDisplaysKHR',
'vkGetDisplayModePropertiesKHR',
'vkGetDisplayModeProperties2KHR',
'vkGetPhysicalDeviceSurfaceFormatsKHR',
'vkGetPhysicalDeviceSurfacePresentModesKHR',
'vkGetPhysicalDevicePresentRectanglesKHR',
'vkGetPastPresentationTimingGOOGLE',
'vkGetSwapchainImagesKHR',
'vkEnumerateInstanceLayerProperties',
'vkEnumerateDeviceLayerProperties',
'vkEnumerateInstanceExtensionProperties',
'vkEnumerateDeviceExtensionProperties',
'vkGetPhysicalDeviceCalibrateableTimeDomainsEXT',
]
pre_dispatch_debug_utils_functions = {
'vkDebugMarkerSetObjectNameEXT' : 'layer_data->report_data->DebugReportSetMarkerObjectName(pNameInfo);',
'vkSetDebugUtilsObjectNameEXT' : 'layer_data->report_data->DebugReportSetUtilsObjectName(pNameInfo);',
'vkQueueBeginDebugUtilsLabelEXT' : 'BeginQueueDebugUtilsLabel(layer_data->report_data, queue, pLabelInfo);',
'vkQueueInsertDebugUtilsLabelEXT' : 'InsertQueueDebugUtilsLabel(layer_data->report_data, queue, pLabelInfo);',
}
post_dispatch_debug_utils_functions = {
'vkQueueEndDebugUtilsLabelEXT' : 'EndQueueDebugUtilsLabel(layer_data->report_data, queue);',
'vkCreateDebugReportCallbackEXT' : 'layer_create_report_callback(layer_data->report_data, false, pCreateInfo, pAllocator, pCallback);',
'vkDestroyDebugReportCallbackEXT' : 'layer_destroy_callback(layer_data->report_data, callback, pAllocator);',
'vkCreateDebugUtilsMessengerEXT' : 'layer_create_messenger_callback(layer_data->report_data, false, pCreateInfo, pAllocator, pMessenger);',
'vkDestroyDebugUtilsMessengerEXT' : 'layer_destroy_callback(layer_data->report_data, messenger, pAllocator);',
}
precallvalidate_loop = "for (auto intercept : layer_data->object_dispatch) {"
precallrecord_loop = precallvalidate_loop
postcallrecord_loop = "for (auto intercept : layer_data->object_dispatch) {"
inline_custom_header_preamble = """
#define NOMINMAX
#include <atomic>
#include <mutex>
#include <cinttypes>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unordered_map>
#include <unordered_set>
#include <algorithm>
#include <memory>
#include "vk_loader_platform.h"
#include "vulkan/vulkan.h"
#include "vk_layer_config.h"
#include "vk_layer_data.h"
#include "vk_layer_logging.h"
#include "vk_object_types.h"
#include "vulkan/vk_layer.h"
#include "vk_enum_string_helper.h"
#include "vk_layer_extension_utils.h"
#include "vk_layer_utils.h"
#include "vulkan/vk_layer.h"
#include "vk_dispatch_table_helper.h"
#include "vk_extension_helper.h"
#include "vk_safe_struct.h"
#include "vk_typemap_helper.h"
extern std::atomic<uint64_t> global_unique_id;
// To avoid re-hashing unique ids on each use, we precompute the hash and store the
// hash's LSBs in the high 24 bits.
struct HashedUint64 {
static const int HASHED_UINT64_SHIFT = 40;
size_t operator()(const uint64_t &t) const { return t >> HASHED_UINT64_SHIFT; }
static uint64_t hash(uint64_t id) {
uint64_t h = (uint64_t)std::hash<uint64_t>()(id);
id |= h << HASHED_UINT64_SHIFT;
return id;
}
};
extern vl_concurrent_unordered_map<uint64_t, uint64_t, 4, HashedUint64> unique_id_mapping;
"""
inline_custom_header_class_definition = """
// Layer object type identifiers
enum LayerObjectTypeId {
LayerObjectTypeInstance, // Container for an instance dispatch object
LayerObjectTypeDevice, // Container for a device dispatch object
LayerObjectTypeThreading, // Instance or device threading layer object
LayerObjectTypeParameterValidation, // Instance or device parameter validation layer object
LayerObjectTypeObjectTracker, // Instance or device object tracker layer object
LayerObjectTypeCoreValidation, // Instance or device core validation layer object
LayerObjectTypeBestPractices, // Instance or device best practices layer object
LayerObjectTypeMaxEnum, // Max enum count
};
struct TEMPLATE_STATE {
VkDescriptorUpdateTemplateKHR desc_update_template;
safe_VkDescriptorUpdateTemplateCreateInfo create_info;
TEMPLATE_STATE(VkDescriptorUpdateTemplateKHR update_template, safe_VkDescriptorUpdateTemplateCreateInfo *pCreateInfo)
: desc_update_template(update_template), create_info(*pCreateInfo) {}
};
class LAYER_PHYS_DEV_PROPERTIES {
public:
VkPhysicalDeviceProperties properties;
std::vector<VkQueueFamilyProperties> queue_family_properties;
};
typedef enum ValidationCheckDisables {
VALIDATION_CHECK_DISABLE_COMMAND_BUFFER_STATE,
VALIDATION_CHECK_DISABLE_OBJECT_IN_USE,
VALIDATION_CHECK_DISABLE_IDLE_DESCRIPTOR_SET,
VALIDATION_CHECK_DISABLE_PUSH_CONSTANT_RANGE,
VALIDATION_CHECK_DISABLE_QUERY_VALIDATION,
VALIDATION_CHECK_DISABLE_IMAGE_LAYOUT_VALIDATION,
} ValidationCheckDisables;
// CHECK_DISABLED struct is a container for bools that can block validation checks from being performed.
// These bools are all "false" by default meaning that all checks are enabled. Enum values can be specified
// via the vk_layer_setting.txt config file or at CreateInstance time via the VK_EXT_validation_features extension
// that can selectively disable checks.
struct CHECK_DISABLED {
bool command_buffer_state; // Skip command buffer state validation
bool object_in_use; // Skip all object in_use checking
bool idle_descriptor_set; // Skip check to verify that descriptor set is not in-use
bool push_constant_range; // Skip push constant range checks
bool query_validation; // Disable all core validation query-related checks
bool image_layout_validation; // Disable image layout validation
bool object_tracking; // Disable object lifetime validation
bool core_checks; // Disable core validation checks
bool thread_safety; // Disable thread safety validation
bool stateless_checks; // Disable stateless validation checks
bool handle_wrapping; // Disable unique handles/handle wrapping
bool shader_validation; // Skip validation for shaders
void SetAll(bool value) { std::fill(&command_buffer_state, &shader_validation + 1, value); }
};
struct CHECK_ENABLED {
bool gpu_validation;
bool gpu_validation_reserve_binding_slot;
bool best_practices;
void SetAll(bool value) { std::fill(&gpu_validation, &gpu_validation_reserve_binding_slot + 1, value); }
};
// Layer chassis validation object base class definition
class ValidationObject {
public:
uint32_t api_version;
debug_report_data* report_data = nullptr;
VkLayerInstanceDispatchTable instance_dispatch_table;
VkLayerDispatchTable device_dispatch_table;
InstanceExtensions instance_extensions;
DeviceExtensions device_extensions = {};
CHECK_DISABLED disabled = {};
CHECK_ENABLED enabled = {};
VkInstance instance = VK_NULL_HANDLE;
VkPhysicalDevice physical_device = VK_NULL_HANDLE;
VkDevice device = VK_NULL_HANDLE;
LAYER_PHYS_DEV_PROPERTIES phys_dev_properties = {};
std::vector<ValidationObject*> object_dispatch;
LayerObjectTypeId container_type;
std::string layer_name = "CHASSIS";
// Constructor
ValidationObject(){};
// Destructor
virtual ~ValidationObject() {};
std::mutex validation_object_mutex;
virtual std::unique_lock<std::mutex> write_lock() {
return std::unique_lock<std::mutex>(validation_object_mutex);
}
ValidationObject* GetValidationObject(std::vector<ValidationObject*>& object_dispatch, LayerObjectTypeId object_type) {
for (auto validation_object : object_dispatch) {
if (validation_object->container_type == object_type) {
return validation_object;
}
}
return nullptr;
};
// Handle Wrapping Data
// Reverse map display handles
vl_concurrent_unordered_map<VkDisplayKHR, uint64_t, 0> display_id_reverse_mapping;
// Wrapping Descriptor Template Update structures requires access to the template createinfo structs
std::unordered_map<uint64_t, std::unique_ptr<TEMPLATE_STATE>> desc_template_createinfo_map;
struct SubpassesUsageStates {
std::unordered_set<uint32_t> subpasses_using_color_attachment;
std::unordered_set<uint32_t> subpasses_using_depthstencil_attachment;
};
// Uses unwrapped handles
std::unordered_map<VkRenderPass, SubpassesUsageStates> renderpasses_states;
// Map of wrapped swapchain handles to arrays of wrapped swapchain image IDs
// Each swapchain has an immutable list of wrapped swapchain image IDs -- always return these IDs if they exist
std::unordered_map<VkSwapchainKHR, std::vector<VkImage>> swapchain_wrapped_image_handle_map;
// Map of wrapped descriptor pools to set of wrapped descriptor sets allocated from each pool
std::unordered_map<VkDescriptorPool, std::unordered_set<VkDescriptorSet>> pool_descriptor_sets_map;
// Unwrap a handle.
template <typename HandleType>
HandleType Unwrap(HandleType wrappedHandle) {
auto iter = unique_id_mapping.find(reinterpret_cast<uint64_t const &>(wrappedHandle));
if (iter == unique_id_mapping.end())
return (HandleType)0;
return (HandleType)iter->second;
}
// Wrap a newly created handle with a new unique ID, and return the new ID.
template <typename HandleType>
HandleType WrapNew(HandleType newlyCreatedHandle) {
auto unique_id = global_unique_id++;
unique_id = HashedUint64::hash(unique_id);
unique_id_mapping.insert_or_assign(unique_id, reinterpret_cast<uint64_t const &>(newlyCreatedHandle));
return (HandleType)unique_id;
}
// Specialized handling for VkDisplayKHR. Adds an entry to enable reverse-lookup.
VkDisplayKHR WrapDisplay(VkDisplayKHR newlyCreatedHandle, ValidationObject *map_data) {
auto unique_id = global_unique_id++;
unique_id = HashedUint64::hash(unique_id);
unique_id_mapping.insert_or_assign(unique_id, reinterpret_cast<uint64_t const &>(newlyCreatedHandle));
map_data->display_id_reverse_mapping.insert_or_assign(newlyCreatedHandle, unique_id);
return (VkDisplayKHR)unique_id;
}
// VkDisplayKHR objects don't have a single point of creation, so we need to see if one already exists in the map before
// creating another.
VkDisplayKHR MaybeWrapDisplay(VkDisplayKHR handle, ValidationObject *map_data) {
// See if this display is already known
auto it = map_data->display_id_reverse_mapping.find(handle);
if (it != map_data->display_id_reverse_mapping.end()) return (VkDisplayKHR)it->second;
// Unknown, so wrap
return WrapDisplay(handle, map_data);
}
// Pre/post hook point declarations
"""
inline_copyright_message = """
// This file is ***GENERATED***. Do Not Edit.
// See layer_chassis_generator.py for modifications.
/* Copyright (c) 2015-2019 The Khronos Group Inc.
* Copyright (c) 2015-2019 Valve Corporation
* Copyright (c) 2015-2019 LunarG, Inc.
* Copyright (c) 2015-2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Mark Lobodzinski <mark@lunarg.com>
*/"""
inline_custom_source_preamble = """
#include <string.h>
#include <mutex>
#define VALIDATION_ERROR_MAP_IMPL
#include "chassis.h"
#include "layer_chassis_dispatch.h"
small_unordered_map<void*, ValidationObject*, 2> layer_data_map;
// Global unique object identifier.
std::atomic<uint64_t> global_unique_id(1ULL);
// Map uniqueID to actual object handle. Accesses to the map itself are
// internally synchronized.
vl_concurrent_unordered_map<uint64_t, uint64_t, 4, HashedUint64> unique_id_mapping;
bool wrap_handles = true;
#define OBJECT_LAYER_NAME "VK_LAYER_KHRONOS_validation"
#define OBJECT_LAYER_DESCRIPTION "khronos_validation"
// Include layer validation object definitions
#include "object_lifetime_validation.h"
#include "thread_safety.h"
#include "stateless_validation.h"
#include "core_validation.h"
#include "best_practices.h"
namespace vulkan_layer_chassis {
using std::unordered_map;
static const VkLayerProperties global_layer = {
OBJECT_LAYER_NAME, VK_LAYER_API_VERSION, 1, "LunarG validation Layer",
};
static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION},
{VK_EXT_DEBUG_UTILS_EXTENSION_NAME, VK_EXT_DEBUG_UTILS_SPEC_VERSION}};
static const VkExtensionProperties device_extensions[] = {
{VK_EXT_VALIDATION_CACHE_EXTENSION_NAME, VK_EXT_VALIDATION_CACHE_SPEC_VERSION},
{VK_EXT_DEBUG_MARKER_EXTENSION_NAME, VK_EXT_DEBUG_MARKER_SPEC_VERSION},
};
typedef struct {
bool is_instance_api;
void* funcptr;
} function_data;
extern const std::unordered_map<std::string, function_data> name_to_funcptr_map;
// Manually written functions
// Check enabled instance extensions against supported instance extension whitelist
static void InstanceExtensionWhitelist(ValidationObject *layer_data, const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) {
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
// Check for recognized instance extensions
if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kInstanceExtensionNames)) {
log_msg(layer_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUIDUndefined,
"Instance Extension %s is not supported by this layer. Using this extension may adversely affect validation "
"results and/or produce undefined behavior.",
pCreateInfo->ppEnabledExtensionNames[i]);
}
}
}
// Check enabled device extensions against supported device extension whitelist
static void DeviceExtensionWhitelist(ValidationObject *layer_data, const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
// Check for recognized device extensions
if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kDeviceExtensionNames)) {
log_msg(layer_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUIDUndefined,
"Device Extension %s is not supported by this layer. Using this extension may adversely affect validation "
"results and/or produce undefined behavior.",
pCreateInfo->ppEnabledExtensionNames[i]);
}
}
}
// Process validation features, flags and settings specified through extensions, a layer settings file, or environment variables
static const std::unordered_map<std::string, VkValidationFeatureDisableEXT> VkValFeatureDisableLookup = {
{"VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT", VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT},
{"VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT", VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT},
{"VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT", VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT},
{"VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT", VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT},
{"VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT", VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT},
{"VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT", VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT},
{"VK_VALIDATION_FEATURE_DISABLE_ALL_EXT", VK_VALIDATION_FEATURE_DISABLE_ALL_EXT},
};
static const std::unordered_map<std::string, VkValidationFeatureEnableEXT> VkValFeatureEnableLookup = {
{"VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT", VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT},
{"VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT", VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT},
{"VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT", VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT},
};
static const std::unordered_map<std::string, ValidationCheckDisables> ValidationDisableLookup = {
{"VALIDATION_CHECK_DISABLE_COMMAND_BUFFER_STATE", VALIDATION_CHECK_DISABLE_COMMAND_BUFFER_STATE},
{"VALIDATION_CHECK_DISABLE_OBJECT_IN_USE", VALIDATION_CHECK_DISABLE_OBJECT_IN_USE},
{"VALIDATION_CHECK_DISABLE_IDLE_DESCRIPTOR_SET", VALIDATION_CHECK_DISABLE_IDLE_DESCRIPTOR_SET},
{"VALIDATION_CHECK_DISABLE_PUSH_CONSTANT_RANGE", VALIDATION_CHECK_DISABLE_PUSH_CONSTANT_RANGE},
{"VALIDATION_CHECK_DISABLE_QUERY_VALIDATION", VALIDATION_CHECK_DISABLE_QUERY_VALIDATION},
{"VALIDATION_CHECK_DISABLE_IMAGE_LAYOUT_VALIDATION", VALIDATION_CHECK_DISABLE_IMAGE_LAYOUT_VALIDATION},
};
// Set the local disable flag for the appropriate VALIDATION_CHECK_DISABLE enum
void SetValidationDisable(CHECK_DISABLED* disable_data, const ValidationCheckDisables disable_id) {
switch (disable_id) {
case VALIDATION_CHECK_DISABLE_COMMAND_BUFFER_STATE:
disable_data->command_buffer_state = true;
break;
case VALIDATION_CHECK_DISABLE_OBJECT_IN_USE:
disable_data->object_in_use = true;
break;
case VALIDATION_CHECK_DISABLE_IDLE_DESCRIPTOR_SET:
disable_data->idle_descriptor_set = true;
break;
case VALIDATION_CHECK_DISABLE_PUSH_CONSTANT_RANGE:
disable_data->push_constant_range = true;
break;
case VALIDATION_CHECK_DISABLE_QUERY_VALIDATION:
disable_data->query_validation = true;
break;
case VALIDATION_CHECK_DISABLE_IMAGE_LAYOUT_VALIDATION:
disable_data->image_layout_validation = true;
break;
default:
assert(true);
}
}
// Set the local disable flag for a single VK_VALIDATION_FEATURE_DISABLE_* flag
void SetValidationFeatureDisable(CHECK_DISABLED* disable_data, const VkValidationFeatureDisableEXT feature_disable) {
switch (feature_disable) {
case VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT:
disable_data->shader_validation = true;
break;
case VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT:
disable_data->thread_safety = true;
break;
case VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT:
disable_data->stateless_checks = true;
break;
case VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT:
disable_data->object_tracking = true;
break;
case VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT:
disable_data->core_checks = true;
break;
case VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT:
disable_data->handle_wrapping = true;
break;
case VK_VALIDATION_FEATURE_DISABLE_ALL_EXT:
// Set all disabled flags to true
disable_data->SetAll(true);
break;
default:
break;
}
}
// Set the local enable flag for a single VK_VALIDATION_FEATURE_ENABLE_* flag
void SetValidationFeatureEnable(CHECK_ENABLED *enable_data, const VkValidationFeatureEnableEXT feature_enable) {
switch (feature_enable) {
case VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT:
enable_data->gpu_validation = true;
break;
case VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT:
enable_data->gpu_validation_reserve_binding_slot = true;
break;
case VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT:
enable_data->best_practices = true;
break;
default:
break;
}
}
// Set the local disable flag for settings specified through the VK_EXT_validation_flags extension
void SetValidationFlags(CHECK_DISABLED* disables, const VkValidationFlagsEXT* val_flags_struct) {
for (uint32_t i = 0; i < val_flags_struct->disabledValidationCheckCount; ++i) {
switch (val_flags_struct->pDisabledValidationChecks[i]) {
case VK_VALIDATION_CHECK_SHADERS_EXT:
disables->shader_validation = true;
break;
case VK_VALIDATION_CHECK_ALL_EXT:
// Set all disabled flags to true
disables->SetAll(true);
break;
default:
break;
}
}
}
// Process Validation Features flags specified through the ValidationFeature extension
void SetValidationFeatures(CHECK_DISABLED *disable_data, CHECK_ENABLED *enable_data,
const VkValidationFeaturesEXT *val_features_struct) {
for (uint32_t i = 0; i < val_features_struct->disabledValidationFeatureCount; ++i) {
SetValidationFeatureDisable(disable_data, val_features_struct->pDisabledValidationFeatures[i]);
}
for (uint32_t i = 0; i < val_features_struct->enabledValidationFeatureCount; ++i) {
SetValidationFeatureEnable(enable_data, val_features_struct->pEnabledValidationFeatures[i]);
}
}
// Given a string representation of a list of enable enum values, call the appropriate setter function
void SetLocalEnableSetting(std::string list_of_enables, std::string delimiter, CHECK_ENABLED* enables) {
size_t pos = 0;
std::string token;
while (list_of_enables.length() != 0) {
pos = list_of_enables.find(delimiter);
if (pos != std::string::npos) {
token = list_of_enables.substr(0, pos);
} else {
pos = list_of_enables.length() - delimiter.length();
token = list_of_enables;
}
if (token.find("VK_VALIDATION_FEATURE_ENABLE_") != std::string::npos) {
auto result = VkValFeatureEnableLookup.find(token);
if (result != VkValFeatureEnableLookup.end()) {
SetValidationFeatureEnable(enables, result->second);
}
}
list_of_enables.erase(0, pos + delimiter.length());
}
}
// Given a string representation of a list of disable enum values, call the appropriate setter function
void SetLocalDisableSetting(std::string list_of_disables, std::string delimiter, CHECK_DISABLED* disables) {
size_t pos = 0;
std::string token;
while (list_of_disables.length() != 0) {
pos = list_of_disables.find(delimiter);
if (pos != std::string::npos) {
token = list_of_disables.substr(0, pos);
} else {
pos = list_of_disables.length() - delimiter.length();
token = list_of_disables;
}
if (token.find("VK_VALIDATION_FEATURE_DISABLE_") != std::string::npos) {
auto result = VkValFeatureDisableLookup.find(token);
if (result != VkValFeatureDisableLookup.end()) {
SetValidationFeatureDisable(disables, result->second);
}
}
if (token.find("VALIDATION_CHECK_DISABLE_") != std::string::npos) {
auto result = ValidationDisableLookup.find(token);
if (result != ValidationDisableLookup.end()) {
SetValidationDisable(disables, result->second);
}
}
list_of_disables.erase(0, pos + delimiter.length());
}
}
// Process enables and disables set though the vk_layer_settings.txt config file or through an environment variable
void ProcessConfigAndEnvSettings(const char* layer_description, CHECK_ENABLED* enables, CHECK_DISABLED* disables) {
std::string enable_key = layer_description;
std::string disable_key = layer_description;
enable_key.append(".enables");
disable_key.append(".disables");
std::string list_of_config_enables = getLayerOption(enable_key.c_str());
std::string list_of_env_enables = GetLayerEnvVar("VK_LAYER_ENABLES");
std::string list_of_config_disables = getLayerOption(disable_key.c_str());
std::string list_of_env_disables = GetLayerEnvVar("VK_LAYER_DISABLES");
#if defined(_WIN32)
std::string env_delimiter = ";";
#else
std::string env_delimiter = ":";
#endif
SetLocalEnableSetting(list_of_config_enables, ",", enables);
SetLocalEnableSetting(list_of_env_enables, env_delimiter, enables);
SetLocalDisableSetting(list_of_config_disables, ",", disables);
SetLocalDisableSetting(list_of_env_disables, env_delimiter, disables);
}
// Non-code-generated chassis API functions
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!ApiParentExtensionEnabled(funcName, &layer_data->device_extensions)) {
return nullptr;
}
const auto &item = name_to_funcptr_map.find(funcName);
if (item != name_to_funcptr_map.end()) {
if (item->second.is_instance_api) {
return nullptr;
} else {
return reinterpret_cast<PFN_vkVoidFunction>(item->second.funcptr);
}
}
auto &table = layer_data->device_dispatch_table;
if (!table.GetDeviceProcAddr) return nullptr;
return table.GetDeviceProcAddr(device, funcName);
}
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
const auto &item = name_to_funcptr_map.find(funcName);
if (item != name_to_funcptr_map.end()) {
return reinterpret_cast<PFN_vkVoidFunction>(item->second.funcptr);
}
auto layer_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
auto &table = layer_data->instance_dispatch_table;
if (!table.GetInstanceProcAddr) return nullptr;
return table.GetInstanceProcAddr(instance, funcName);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
VkLayerProperties *pProperties) {
return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
return util_GetExtensionProperties(ARRAY_SIZE(instance_extensions), instance_extensions, pCount, pProperties);
return VK_ERROR_LAYER_NOT_PRESENT;
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
uint32_t *pCount, VkExtensionProperties *pProperties) {
if (pLayerName && !strcmp(pLayerName, global_layer.layerName)) return util_GetExtensionProperties(ARRAY_SIZE(device_extensions), device_extensions, pCount, pProperties);
assert(physicalDevice);
auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
return layer_data->instance_dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, pLayerName, pCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
VkInstance *pInstance) {
VkLayerInstanceCreateInfo* chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
uint32_t specified_version = (pCreateInfo->pApplicationInfo ? pCreateInfo->pApplicationInfo->apiVersion : VK_API_VERSION_1_0);
uint32_t api_version = (specified_version < VK_API_VERSION_1_1) ? VK_API_VERSION_1_0 : VK_API_VERSION_1_1;
auto report_data = new debug_report_data{};
report_data->instance_pnext_chain = SafePnextCopy(pCreateInfo->pNext);
ActivateInstanceDebugCallbacks(report_data);
CHECK_ENABLED local_enables {};
CHECK_DISABLED local_disables {};
const auto *validation_features_ext = lvl_find_in_chain<VkValidationFeaturesEXT>(pCreateInfo->pNext);
if (validation_features_ext) {
SetValidationFeatures(&local_disables, &local_enables, validation_features_ext);
}
const auto *validation_flags_ext = lvl_find_in_chain<VkValidationFlagsEXT>(pCreateInfo->pNext);
if (validation_flags_ext) {
SetValidationFlags(&local_disables, validation_flags_ext);
}
ProcessConfigAndEnvSettings(OBJECT_LAYER_DESCRIPTION, &local_enables, &local_disables);
// Create temporary dispatch vector for pre-calls until instance is created
std::vector<ValidationObject*> local_object_dispatch;
// Add VOs to dispatch vector. Order here will be the validation dispatch order!
auto thread_checker = new ThreadSafety(nullptr);
if (!local_disables.thread_safety) {
local_object_dispatch.emplace_back(thread_checker);
}
thread_checker->container_type = LayerObjectTypeThreading;
thread_checker->api_version = api_version;
thread_checker->report_data = report_data;
auto parameter_validation = new StatelessValidation;
if (!local_disables.stateless_checks) {
local_object_dispatch.emplace_back(parameter_validation);
}
parameter_validation->container_type = LayerObjectTypeParameterValidation;
parameter_validation->api_version = api_version;
parameter_validation->report_data = report_data;
auto object_tracker = new ObjectLifetimes;
if (!local_disables.object_tracking) {
local_object_dispatch.emplace_back(object_tracker);
}
object_tracker->container_type = LayerObjectTypeObjectTracker;
object_tracker->api_version = api_version;
object_tracker->report_data = report_data;
auto core_checks = new CoreChecks;
if (!local_disables.core_checks) {
local_object_dispatch.emplace_back(core_checks);
}
core_checks->container_type = LayerObjectTypeCoreValidation;
core_checks->api_version = api_version;
core_checks->report_data = report_data;
auto best_practices = new BestPractices;
if (local_enables.best_practices) {
local_object_dispatch.emplace_back(best_practices);
}
best_practices->container_type = LayerObjectTypeBestPractices;
best_practices->api_version = api_version;
best_practices->report_data = report_data;
// If handle wrapping is disabled via the ValidationFeatures extension, override build flag
if (local_disables.handle_wrapping) {
wrap_handles = false;
}
// Init dispatch array and call registration functions
for (auto intercept : local_object_dispatch) {
intercept->PreCallValidateCreateInstance(pCreateInfo, pAllocator, pInstance);
}
for (auto intercept : local_object_dispatch) {
intercept->PreCallRecordCreateInstance(pCreateInfo, pAllocator, pInstance);
}
VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
if (result != VK_SUCCESS) return result;
auto framework = GetLayerDataPtr(get_dispatch_key(*pInstance), layer_data_map);
framework->object_dispatch = local_object_dispatch;
framework->container_type = LayerObjectTypeInstance;
framework->disabled = local_disables;
framework->enabled = local_enables;
framework->instance = *pInstance;
layer_init_instance_dispatch_table(*pInstance, &framework->instance_dispatch_table, fpGetInstanceProcAddr);
framework->report_data = report_data;
framework->api_version = api_version;
framework->instance_extensions.InitFromInstanceCreateInfo(specified_version, pCreateInfo);
layer_debug_messenger_actions(framework->report_data, pAllocator, OBJECT_LAYER_DESCRIPTION);
object_tracker->instance_dispatch_table = framework->instance_dispatch_table;
object_tracker->enabled = framework->enabled;
object_tracker->disabled = framework->disabled;
thread_checker->instance_dispatch_table = framework->instance_dispatch_table;
thread_checker->enabled = framework->enabled;
thread_checker->disabled = framework->disabled;
parameter_validation->instance_dispatch_table = framework->instance_dispatch_table;
parameter_validation->enabled = framework->enabled;
parameter_validation->disabled = framework->disabled;
core_checks->instance_dispatch_table = framework->instance_dispatch_table;
core_checks->instance = *pInstance;
core_checks->enabled = framework->enabled;
core_checks->disabled = framework->disabled;
core_checks->instance_state = core_checks;
best_practices->instance_dispatch_table = framework->instance_dispatch_table;
best_practices->enabled = framework->enabled;
best_practices->disabled = framework->disabled;
for (auto intercept : framework->object_dispatch) {
intercept->PostCallRecordCreateInstance(pCreateInfo, pAllocator, pInstance, result);
}
InstanceExtensionWhitelist(framework, pCreateInfo, *pInstance);
DeactivateInstanceDebugCallbacks(report_data);
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
dispatch_key key = get_dispatch_key(instance);
auto layer_data = GetLayerDataPtr(key, layer_data_map);
ActivateInstanceDebugCallbacks(layer_data->report_data);
""" + precallvalidate_loop + """
auto lock = intercept->write_lock();
intercept->PreCallValidateDestroyInstance(instance, pAllocator);
}
""" + precallrecord_loop + """
auto lock = intercept->write_lock();
intercept->PreCallRecordDestroyInstance(instance, pAllocator);
}
layer_data->instance_dispatch_table.DestroyInstance(instance, pAllocator);
""" + postcallrecord_loop + """
auto lock = intercept->write_lock();
intercept->PostCallRecordDestroyInstance(instance, pAllocator);
}
DeactivateInstanceDebugCallbacks(layer_data->report_data);
FreePnextChain(layer_data->report_data->instance_pnext_chain);
layer_debug_utils_destroy_instance(layer_data->report_data);
for (auto item = layer_data->object_dispatch.begin(); item != layer_data->object_dispatch.end(); item++) {
delete *item;
}
FreeLayerDataPtr(key, layer_data_map);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
auto instance_interceptor = GetLayerDataPtr(get_dispatch_key(gpu), layer_data_map);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(instance_interceptor->instance, "vkCreateDevice");
if (fpCreateDevice == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
// Get physical device limits for device
VkPhysicalDeviceProperties device_properties = {};
instance_interceptor->instance_dispatch_table.GetPhysicalDeviceProperties(gpu, &device_properties);
// Setup the validation tables based on the application API version from the instance and the capabilities of the device driver
uint32_t effective_api_version = std::min(device_properties.apiVersion, instance_interceptor->api_version);
DeviceExtensions device_extensions = {};
device_extensions.InitFromDeviceCreateInfo(&instance_interceptor->instance_extensions, effective_api_version, pCreateInfo);
for (auto item : instance_interceptor->object_dispatch) {
item->device_extensions = device_extensions;
}
safe_VkDeviceCreateInfo modified_create_info(pCreateInfo);
bool skip = false;
for (auto intercept : instance_interceptor->object_dispatch) {
auto lock = intercept->write_lock();
skip |= intercept->PreCallValidateCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : instance_interceptor->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, &modified_create_info);
}
VkResult result = fpCreateDevice(gpu, reinterpret_cast<VkDeviceCreateInfo *>(&modified_create_info), pAllocator, pDevice);
if (result != VK_SUCCESS) {
return result;
}
auto device_interceptor = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
device_interceptor->container_type = LayerObjectTypeDevice;
// Save local info in device object
device_interceptor->phys_dev_properties.properties = device_properties;
device_interceptor->api_version = device_interceptor->device_extensions.InitFromDeviceCreateInfo(
&instance_interceptor->instance_extensions, effective_api_version, pCreateInfo);
device_interceptor->device_extensions = device_extensions;
layer_init_device_dispatch_table(*pDevice, &device_interceptor->device_dispatch_table, fpGetDeviceProcAddr);
device_interceptor->device = *pDevice;
device_interceptor->physical_device = gpu;
device_interceptor->instance = instance_interceptor->instance;
device_interceptor->report_data = instance_interceptor->report_data;
// Note that this defines the order in which the layer validation objects are called
auto thread_safety = new ThreadSafety(reinterpret_cast<ThreadSafety *>(instance_interceptor->GetValidationObject(instance_interceptor->object_dispatch, LayerObjectTypeThreading)));
thread_safety->container_type = LayerObjectTypeThreading;
if (!instance_interceptor->disabled.thread_safety) {
device_interceptor->object_dispatch.emplace_back(thread_safety);
}
auto stateless_validation = new StatelessValidation;
stateless_validation->container_type = LayerObjectTypeParameterValidation;
if (!instance_interceptor->disabled.stateless_checks) {
device_interceptor->object_dispatch.emplace_back(stateless_validation);
}
auto object_tracker = new ObjectLifetimes;
object_tracker->container_type = LayerObjectTypeObjectTracker;
if (!instance_interceptor->disabled.object_tracking) {
device_interceptor->object_dispatch.emplace_back(object_tracker);
}
auto core_checks = new CoreChecks;
core_checks->container_type = LayerObjectTypeCoreValidation;
core_checks->instance_state = reinterpret_cast<CoreChecks *>(
core_checks->GetValidationObject(instance_interceptor->object_dispatch, LayerObjectTypeCoreValidation));
if (!instance_interceptor->disabled.core_checks) {
device_interceptor->object_dispatch.emplace_back(core_checks);
}
auto best_practices = new BestPractices;
best_practices->container_type = LayerObjectTypeBestPractices;
best_practices->instance_state = reinterpret_cast<BestPractices *>(
best_practices->GetValidationObject(instance_interceptor->object_dispatch, LayerObjectTypeBestPractices));
if (instance_interceptor->enabled.best_practices) {
device_interceptor->object_dispatch.emplace_back(best_practices);
}
// Set per-intercept common data items
for (auto dev_intercept : device_interceptor->object_dispatch) {
dev_intercept->device = *pDevice;
dev_intercept->physical_device = gpu;
dev_intercept->instance = instance_interceptor->instance;
dev_intercept->report_data = device_interceptor->report_data;
dev_intercept->device_dispatch_table = device_interceptor->device_dispatch_table;
dev_intercept->api_version = device_interceptor->api_version;
dev_intercept->disabled = instance_interceptor->disabled;
dev_intercept->enabled = instance_interceptor->enabled;
dev_intercept->instance_dispatch_table = instance_interceptor->instance_dispatch_table;
dev_intercept->instance_extensions = instance_interceptor->instance_extensions;
dev_intercept->device_extensions = device_interceptor->device_extensions;
}
for (auto intercept : instance_interceptor->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
}
DeviceExtensionWhitelist(device_interceptor, pCreateInfo, *pDevice);
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
dispatch_key key = get_dispatch_key(device);
auto layer_data = GetLayerDataPtr(key, layer_data_map);
""" + precallvalidate_loop + """
auto lock = intercept->write_lock();
intercept->PreCallValidateDestroyDevice(device, pAllocator);
}
""" + precallrecord_loop + """
auto lock = intercept->write_lock();
intercept->PreCallRecordDestroyDevice(device, pAllocator);
}
layer_data->device_dispatch_table.DestroyDevice(device, pAllocator);
""" + postcallrecord_loop + """
auto lock = intercept->write_lock();
intercept->PostCallRecordDestroyDevice(device, pAllocator);
}
for (auto item = layer_data->object_dispatch.begin(); item != layer_data->object_dispatch.end(); item++) {
delete *item;
}
FreeLayerDataPtr(key, layer_data_map);
}
// Special-case APIs for which core_validation needs custom parameter lists and/or modifies parameters
VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(
VkDevice device,
VkPipelineCache pipelineCache,
uint32_t createInfoCount,
const VkGraphicsPipelineCreateInfo* pCreateInfos,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipelines) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
create_graphics_pipeline_api_state cgpl_state[LayerObjectTypeMaxEnum]{};
for (auto intercept : layer_data->object_dispatch) {
cgpl_state[intercept->container_type].pCreateInfos = pCreateInfos;
auto lock = intercept->write_lock();
skip |= intercept->PreCallValidateCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &(cgpl_state[intercept->container_type]));
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &(cgpl_state[intercept->container_type]));
}
auto usepCreateInfos = (!cgpl_state[LayerObjectTypeCoreValidation].pCreateInfos) ? pCreateInfos : cgpl_state[LayerObjectTypeCoreValidation].pCreateInfos;
VkResult result = DispatchCreateGraphicsPipelines(device, pipelineCache, createInfoCount, usepCreateInfos, pAllocator, pPipelines);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, result, &(cgpl_state[intercept->container_type]));
}
return result;
}
// This API saves some core_validation pipeline state state on the stack for performance purposes
VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(
VkDevice device,
VkPipelineCache pipelineCache,
uint32_t createInfoCount,
const VkComputePipelineCreateInfo* pCreateInfos,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipelines) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
create_compute_pipeline_api_state ccpl_state[LayerObjectTypeMaxEnum]{};
for (auto intercept : layer_data->object_dispatch) {
ccpl_state[intercept->container_type].pCreateInfos = pCreateInfos;
auto lock = intercept->write_lock();
skip |= intercept->PreCallValidateCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &(ccpl_state[intercept->container_type]));
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &(ccpl_state[intercept->container_type]));
}
auto usepCreateInfos = (!ccpl_state[LayerObjectTypeCoreValidation].pCreateInfos) ? pCreateInfos : ccpl_state[LayerObjectTypeCoreValidation].pCreateInfos;
VkResult result = DispatchCreateComputePipelines(device, pipelineCache, createInfoCount, usepCreateInfos, pAllocator, pPipelines);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, result, &(ccpl_state[intercept->container_type]));
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateRayTracingPipelinesNV(
VkDevice device,
VkPipelineCache pipelineCache,
uint32_t createInfoCount,
const VkRayTracingPipelineCreateInfoNV* pCreateInfos,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipelines) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
create_ray_tracing_pipeline_api_state crtpl_state[LayerObjectTypeMaxEnum]{};
for (auto intercept : layer_data->object_dispatch) {
crtpl_state[intercept->container_type].pCreateInfos = pCreateInfos;
auto lock = intercept->write_lock();
skip |= intercept->PreCallValidateCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos,
pAllocator, pPipelines, &(crtpl_state[intercept->container_type]));
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator,
pPipelines, &(crtpl_state[intercept->container_type]));
}
VkResult result = DispatchCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator,
pPipelines, result, &(crtpl_state[intercept->container_type]));
}
return result;
}
// This API needs the ability to modify a down-chain parameter
VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(
VkDevice device,
const VkPipelineLayoutCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkPipelineLayout* pPipelineLayout) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
create_pipeline_layout_api_state cpl_state{};
cpl_state.modified_create_info = *pCreateInfo;
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
skip |= intercept->PreCallValidateCreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout, &cpl_state);
}
VkResult result = DispatchCreatePipelineLayout(device, &cpl_state.modified_create_info, pAllocator, pPipelineLayout);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout, result);
}
return result;
}
// This API needs some local stack data for performance reasons and also may modify a parameter
VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(
VkDevice device,
const VkShaderModuleCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkShaderModule* pShaderModule) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
create_shader_module_api_state csm_state{};
csm_state.instrumented_create_info = *pCreateInfo;
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
skip |= intercept->PreCallValidateCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule, &csm_state);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule, &csm_state);
}
VkResult result = DispatchCreateShaderModule(device, &csm_state.instrumented_create_info, pAllocator, pShaderModule);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule, result, &csm_state);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(
VkDevice device,
const VkDescriptorSetAllocateInfo* pAllocateInfo,
VkDescriptorSet* pDescriptorSets) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
cvdescriptorset::AllocateDescriptorSetsData ads_state(pAllocateInfo->descriptorSetCount);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
skip |= intercept->PreCallValidateAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets, &ads_state);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
}
VkResult result = DispatchAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets, result, &ads_state);
}
return result;
}
// This API needs the ability to modify a down-chain parameter
VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(
VkDevice device,
const VkBufferCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkBuffer* pBuffer) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
create_buffer_api_state cb_state{};
cb_state.modified_create_info = *pCreateInfo;
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
skip |= intercept->PreCallValidateCreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreateBuffer(device, pCreateInfo, pAllocator, pBuffer, &cb_state);
}
VkResult result = DispatchCreateBuffer(device, &cb_state.modified_create_info, pAllocator, pBuffer);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreateBuffer(device, pCreateInfo, pAllocator, pBuffer, result);
}
return result;
}
// ValidationCache APIs do not dispatch
VKAPI_ATTR VkResult VKAPI_CALL CreateValidationCacheEXT(
VkDevice device,
const VkValidationCacheCreateInfoEXT* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkValidationCacheEXT* pValidationCache) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_SUCCESS;
ValidationObject *validation_data = layer_data->GetValidationObject(layer_data->object_dispatch, LayerObjectTypeCoreValidation);
if (validation_data) {
auto lock = validation_data->write_lock();
result = validation_data->CoreLayerCreateValidationCacheEXT(device, pCreateInfo, pAllocator, pValidationCache);
}
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyValidationCacheEXT(
VkDevice device,
VkValidationCacheEXT validationCache,
const VkAllocationCallbacks* pAllocator) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
ValidationObject *validation_data = layer_data->GetValidationObject(layer_data->object_dispatch, LayerObjectTypeCoreValidation);
if (validation_data) {
auto lock = validation_data->write_lock();
validation_data->CoreLayerDestroyValidationCacheEXT(device, validationCache, pAllocator);
}
}
VKAPI_ATTR VkResult VKAPI_CALL MergeValidationCachesEXT(
VkDevice device,
VkValidationCacheEXT dstCache,
uint32_t srcCacheCount,
const VkValidationCacheEXT* pSrcCaches) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_SUCCESS;
ValidationObject *validation_data = layer_data->GetValidationObject(layer_data->object_dispatch, LayerObjectTypeCoreValidation);
if (validation_data) {
auto lock = validation_data->write_lock();
result = validation_data->CoreLayerMergeValidationCachesEXT(device, dstCache, srcCacheCount, pSrcCaches);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetValidationCacheDataEXT(
VkDevice device,
VkValidationCacheEXT validationCache,
size_t* pDataSize,
void* pData) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_SUCCESS;
ValidationObject *validation_data = layer_data->GetValidationObject(layer_data->object_dispatch, LayerObjectTypeCoreValidation);
if (validation_data) {
auto lock = validation_data->write_lock();
result = validation_data->CoreLayerGetValidationCacheDataEXT(device, validationCache, pDataSize, pData);
}
return result;
}"""
inline_custom_validation_class_definitions = """
virtual VkResult CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache) { return VK_SUCCESS; };
virtual void CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator) {};
virtual VkResult CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches) { return VK_SUCCESS; };
virtual VkResult CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData) { return VK_SUCCESS; };
// Allow additional state parameter for CreateGraphicsPipelines
virtual bool PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* cgpl_state) {
return PreCallValidateCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
};
virtual void PreCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* cgpl_state) {
PreCallRecordCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
};
virtual void PostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, VkResult result, void* cgpl_state) {
PostCallRecordCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, result);
};
// Allow additional state parameter for CreateComputePipelines
virtual bool PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* pipe_state) {
return PreCallValidateCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
};
virtual void PreCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* ccpl_state) {
PreCallRecordCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
};
virtual void PostCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, VkResult result, void* pipe_state) {
PostCallRecordCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, result);
};
// Allow additional state parameter for CreateRayTracingPipelinesNV
virtual bool PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* pipe_state) {
return PreCallValidateCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
};
virtual void PreCallRecordCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* ccpl_state) {
PreCallRecordCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
};
virtual void PostCallRecordCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, VkResult result, void* pipe_state) {
PostCallRecordCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, result);
};
// Allow modification of a down-chain parameter for CreatePipelineLayout
virtual void PreCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout, void *cpl_state) {
PreCallRecordCreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
};
// Enable the CreateShaderModule API to take an extra argument for state preservation and paramter modification
virtual bool PreCallValidateCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule, void* csm_state) {
return PreCallValidateCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
};
virtual void PreCallRecordCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule, void* csm_state) {
PreCallRecordCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
};
virtual void PostCallRecordCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule, VkResult result, void* csm_state) {
PostCallRecordCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule, result);
};
// Allow AllocateDescriptorSets to use some local stack storage for performance purposes
virtual bool PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets, void* ads_state) {
return PreCallValidateAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
};
virtual void PostCallRecordAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets, VkResult result, void* ads_state) {
PostCallRecordAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets, result);
};
// Allow modification of a down-chain parameter for CreateBuffer
virtual void PreCallRecordCreateBuffer(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer, void *cb_state) {
PreCallRecordCreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
};
// Modify a parameter to CreateDevice
virtual void PreCallRecordCreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice, safe_VkDeviceCreateInfo *modified_create_info) {
PreCallRecordCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice);
};
"""
inline_custom_source_postamble = """
// loader-layer interface v0, just wrappers since there is only a layer
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
return vulkan_layer_chassis::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
VkLayerProperties *pProperties) {
return vulkan_layer_chassis::EnumerateInstanceLayerProperties(pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
VkLayerProperties *pProperties) {
// the layer command handles VK_NULL_HANDLE just fine internally
assert(physicalDevice == VK_NULL_HANDLE);
return vulkan_layer_chassis::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
// the layer command handles VK_NULL_HANDLE just fine internally
assert(physicalDevice == VK_NULL_HANDLE);
return vulkan_layer_chassis::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
return vulkan_layer_chassis::GetDeviceProcAddr(dev, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
return vulkan_layer_chassis::GetInstanceProcAddr(instance, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
assert(pVersionStruct != NULL);
assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
// Fill in the function pointers if our version is at least capable of having the structure contain them.
if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
pVersionStruct->pfnGetPhysicalDeviceProcAddr = nullptr;
}
return VK_SUCCESS;
}"""
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
OutputGenerator.__init__(self, errFile, warnFile, diagFile)
# Internal state - accumulators for different inner block text
self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
self.intercepts = []
self.layer_factory = '' # String containing base layer factory class definition
# Check if the parameter passed in is a pointer to an array
def paramIsArray(self, param):
return param.attrib.get('len') is not None
# Check if the parameter passed in is a pointer
def paramIsPointer(self, param):
ispointer = False
for elem in param:
if elem.tag == 'type' and elem.tail is not None and '*' in elem.tail:
ispointer = True
return ispointer
#
#
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
# Output Copyright
write(self.inline_copyright_message, file=self.outFile)
# Multiple inclusion protection
self.header = False
if (self.genOpts.filename and 'h' == self.genOpts.filename[-1]):
self.header = True
write('
self.newline()
if self.header:
write(self.inline_custom_header_preamble, file=self.outFile)
else:
write(self.inline_custom_source_preamble, file=self.outFile)
self.layer_factory += self.inline_custom_header_class_definition
#
#
def endFile(self):
# Finish C++ namespace and multiple inclusion protection
self.newline()
if not self.header:
# Record intercepted procedures
write('// Map of intercepted ApiName to its associated function data', file=self.outFile)
write('const std::unordered_map<std::string, function_data> name_to_funcptr_map = {', file=self.outFile)
write('\n'.join(self.intercepts), file=self.outFile)
write('};\n', file=self.outFile)
self.newline()
write('} // namespace vulkan_layer_chassis', file=self.outFile)
if self.header:
self.newline()
# Output Layer Factory Class Definitions
self.layer_factory += self.inline_custom_validation_class_definitions
self.layer_factory += '};\n\n'
self.layer_factory += 'extern small_unordered_map<void*, ValidationObject*, 2> layer_data_map;'
write(self.layer_factory, file=self.outFile)
else:
write(self.inline_custom_source_postamble, file=self.outFile)
# Finish processing in superclass
OutputGenerator.endFile(self)
def beginFeature(self, interface, emit):
# Start processing in superclass
OutputGenerator.beginFeature(self, interface, emit)
# Get feature extra protect
self.featureExtraProtect = GetFeatureProtect(interface)
# Accumulate includes, defines, types, enums, function pointer typedefs, end function prototypes separately for this
# feature. They're only printed in endFeature().
self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
def endFeature(self):
if (self.emit):
self.newline()
if (self.featureExtraProtect != None):
write('#ifdef', self.featureExtraProtect, file=self.outFile)
for section in self.TYPE_SECTIONS:
contents = self.sections[section]
if contents:
write('\n'.join(contents), file=self.outFile)
self.newline()
if (self.sections['command']):
write('\n'.join(self.sections['command']), end=u'', file=self.outFile)
self.newline()
if (self.featureExtraProtect != None):
write('#endif //', self.featureExtraProtect, file=self.outFile)
OutputGenerator.endFeature(self)
def appendSection(self, section, text):
self.sections[section].append(text)
def genType(self, typeinfo, name, alias):
pass
def genStruct(self, typeinfo, typeName):
OutputGenerator.genStruct(self, typeinfo, typeName)
body = 'typedef ' + typeinfo.elem.get('category') + ' ' + typeName + ' {\n'
for member in typeinfo.elem.findall('.//member'):
body += self.makeCParamDecl(member, self.genOpts.alignFuncParam)
body += ';\n'
body += '} ' + typeName + ';\n'
self.appendSection('struct', body)
def genGroup(self, groupinfo, groupName, alias):
pass
def genEnum(self, enuminfo, name, alias):
pass
def BaseClassCdecl(self, elem, name):
raw = self.makeCDecls(elem)[1]
prototype = raw.split("VKAPI_PTR *PFN_vk")[1]
prototype = prototype.replace(")", "", 1)
prototype = prototype.replace(";", " {};")
pre_call_validate = 'virtual bool PreCallValidate' + prototype
pre_call_validate = pre_call_validate.replace("{}", " { return false; }")
pre_call_record = 'virtual void PreCallRecord' + prototype
post_call_record = 'virtual void PostCallRecord' + prototype
resulttype = elem.find('proto/type')
if resulttype.text == 'VkResult':
post_call_record = post_call_record.replace(')', ', VkResult result)')
elif resulttype.text == 'VkDeviceAddress':
post_call_record = post_call_record.replace(')', ', VkDeviceAddress result)')
return ' %s\n %s\n %s\n' % (pre_call_validate, pre_call_record, post_call_record)
def genCmd(self, cmdinfo, name, alias):
ignore_functions = [
'vkEnumerateInstanceVersion',
]
if name in ignore_functions:
return
if self.header:
self.appendSection('command', '')
self.appendSection('command', self.makeCDecls(cmdinfo.elem)[0])
if (self.featureExtraProtect != None):
self.layer_factory += '#ifdef %s\n' % self.featureExtraProtect
if 'ValidationCache' not in name:
self.layer_factory += self.BaseClassCdecl(cmdinfo.elem, name)
if (self.featureExtraProtect != None):
self.layer_factory += '#endif\n'
return
is_instance = 'false'
dispatchable_type = cmdinfo.elem.find('param/type').text
if dispatchable_type in ["VkPhysicalDevice", "VkInstance"] or name == 'vkCreateInstance':
is_instance = 'true'
if name in self.manual_functions:
self.intercepts += [ ' {"%s", {%s, (void*)%s}},' % (name, is_instance, name[2:]) ]
return
if (self.featureExtraProtect != None):
self.intercepts += [ '#ifdef %s' % self.featureExtraProtect ]
self.intercepts += [ ' {"%s", {%s, (void*)%s}},' % (name, is_instance, name[2:]) ]
if (self.featureExtraProtect != None):
self.intercepts += [ '#endif' ]
OutputGenerator.genCmd(self, cmdinfo, name, alias)
decls = self.makeCDecls(cmdinfo.elem)
self.appendSection('command', '')
self.appendSection('command', '%s {' % decls[0][:-1])
dispatchable_name = cmdinfo.elem.find('param/name').text
self.appendSection('command', ' auto layer_data = GetLayerDataPtr(get_dispatch_key(%s), layer_data_map);' % (dispatchable_name))
api_function_name = cmdinfo.elem.attrib.get('name')
params = cmdinfo.elem.findall('param/name')
paramstext = ', '.join([str(param.text) for param in params])
API = api_function_name.replace('vk','Dispatch') + '('
return_map = {
'PFN_vkVoidFunction': 'return nullptr;',
'VkBool32': 'return VK_FALSE;',
'VkDeviceAddress': 'return 0;',
'VkResult': 'return VK_ERROR_VALIDATION_FAILED_EXT;',
'void': 'return;',
'uint32_t': 'return 0;'
}
resulttype = cmdinfo.elem.find('proto/type')
assignresult = ''
if (resulttype.text != 'void'):
assignresult = resulttype.text + ' result = '
self.appendSection('command', ' bool skip = false;')
self.appendSection('command', ' %s' % self.precallvalidate_loop)
self.appendSection('command', ' auto lock = intercept->write_lock();')
self.appendSection('command', ' skip |= intercept->PreCallValidate%s(%s);' % (api_function_name[2:], paramstext))
self.appendSection('command', ' if (skip) %s' % return_map[resulttype.text])
self.appendSection('command', ' }')
self.appendSection('command', ' %s' % self.precallrecord_loop)
self.appendSection('command', ' auto lock = intercept->write_lock();')
self.appendSection('command', ' intercept->PreCallRecord%s(%s);' % (api_function_name[2:], paramstext))
self.appendSection('command', ' }')
if name in self.pre_dispatch_debug_utils_functions:
self.appendSection('command', ' %s' % self.pre_dispatch_debug_utils_functions[name])
self.appendSection('command', ' ' + assignresult + API + paramstext + ');')
if name in self.post_dispatch_debug_utils_functions:
self.appendSection('command', ' %s' % self.post_dispatch_debug_utils_functions[name])
self.appendSection('command', ' %s' % self.postcallrecord_loop)
returnparam = ''
if (resulttype.text == 'VkResult' or resulttype.text == 'VkDeviceAddress'):
returnparam = ', result'
self.appendSection('command', ' auto lock = intercept->write_lock();')
self.appendSection('command', ' intercept->PostCallRecord%s(%s%s);' % (api_function_name[2:], paramstext, returnparam))
self.appendSection('command', ' }')
if (resulttype.text != 'void'):
self.appendSection('command', ' return result;')
self.appendSection('command', '}')
def makeProtoName(self, name, tail):
return self.genOpts.apientry + name[2:] + tail
| true | true |
f73a2275431944df3f862dc93c2b8e649be1cf91 | 8,800 | py | Python | onadata/apps/api/tests/viewsets/test_note_viewset.py | childhelpline/myhelpline | d72120ee31b6713cbaec79f299f5ee8bcb7ea429 | [
"BSD-3-Clause"
] | 1 | 2018-07-15T13:13:43.000Z | 2018-07-15T13:13:43.000Z | onadata/apps/api/tests/viewsets/test_note_viewset.py | aondiaye/myhelpline | d72120ee31b6713cbaec79f299f5ee8bcb7ea429 | [
"BSD-3-Clause"
] | 14 | 2018-07-10T12:48:46.000Z | 2022-03-11T23:24:51.000Z | onadata/apps/api/tests/viewsets/test_note_viewset.py | aondiaye/myhelpline | d72120ee31b6713cbaec79f299f5ee8bcb7ea429 | [
"BSD-3-Clause"
] | 5 | 2018-07-04T07:59:14.000Z | 2020-01-28T07:50:18.000Z | import os
from datetime import datetime
from django.conf import settings
from django.utils.timezone import make_aware
from django.test import RequestFactory
from guardian.shortcuts import assign_perm
from onadata.apps.api.viewsets.note_viewset import NoteViewSet
from onadata.apps.api.viewsets.xform_viewset import XFormViewSet
from onadata.apps.logger.models import Note
from onadata.apps.main.tests.test_base import TestBase
from onadata.libs.serializers.note_serializer import NoteSerializer
class TestNoteViewSet(TestBase):
"""
Test NoteViewSet
"""
def setUp(self):
super(TestNoteViewSet, self).setUp()
self._create_user_and_login()
self._publish_transportation_form()
self._make_submissions()
self.view = NoteViewSet.as_view({
'get': 'list',
'post': 'create',
'delete': 'destroy'
})
self.factory = RequestFactory()
self.extra = {'HTTP_AUTHORIZATION': 'Token %s' % self.user.auth_token}
@property
def _first_xform_instance(self):
return self.xform.instances.all().order_by('pk')[0]
def _add_notes_to_data_point(self):
# add a note to a specific data point
note = {'note': u"Road Warrior"}
dataid = self._first_xform_instance.pk
note['instance'] = dataid
request = self.factory.post('/', data=note, **self.extra)
self.assertTrue(self.xform.instances.count())
response = self.view(request)
self.assertEqual(response.status_code, 201)
self.pk = response.data['id']
note['id'] = self.pk
self.note = note
def test_note_list(self):
self._add_notes_to_data_point()
request = self.factory.get('/', **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertTrue(len(response.data) > 0)
self.assertDictContainsSubset(self.note, response.data[0])
def test_note_get(self):
self._add_notes_to_data_point()
view = NoteViewSet.as_view({'get': 'retrieve'})
request = self.factory.get('/', **self.extra)
response = view(request, pk=self.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['owner'], self.user.username)
self.assertDictContainsSubset(self.note, response.data)
def test_get_note_for_specific_instance(self):
self._add_notes_to_data_point()
view = NoteViewSet.as_view({'get': 'retrieve'})
instance = self.xform.instances.first()
query_params = {"instance": instance.id}
request = self.factory.get('/', data=query_params, **self.extra)
response = view(request, pk=self.pk)
self.assertEqual(response.status_code, 200)
self.assertDictContainsSubset(self.note, response.data)
second_instance = self.xform.instances.last()
query_params = {"instance": second_instance.id}
request = self.factory.get('/', data=query_params, **self.extra)
response = view(request, pk=self.pk)
self.assertEqual(response.status_code, 200)
self.assertListEqual(response.data, [])
def test_add_notes_to_data_point(self):
self._add_notes_to_data_point()
self.assertEquals(len(self._first_xform_instance.json["_notes"]), 1)
def test_other_user_notes_access(self):
self._create_user_and_login('lilly', '1234')
extra = {'HTTP_AUTHORIZATION': 'Token %s' % self.user.auth_token}
note = {'note': u"Road Warrior"}
dataid = self.xform.instances.first().pk
note['instance'] = dataid
# Other user 'lilly' should not be able to create notes
# to xform instance owned by 'bob'
request = self.factory.post('/', data=note)
self.assertTrue(self.xform.instances.count())
response = self.view(request)
self.assertEqual(response.status_code, 401)
# save some notes
self._add_notes_to_data_point()
# access to /notes endpoint,should be empty list
request = self.factory.get('/', **extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [])
# Other user 'lilly' sees an empty list when accessing bob's notes
view = NoteViewSet.as_view({'get': 'retrieve'})
query_params = {"instance": dataid}
request = self.factory.get('/', data=query_params, **extra)
response = view(request, pk=self.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [])
def test_collaborator_with_readonly_permission_can_add_comment(self):
self._create_user_and_login('lilly', '1234')
extra = {'HTTP_AUTHORIZATION': 'Token %s' % self.user.auth_token}
# save some notes
self._add_notes_to_data_point()
# post note to submission as lilly without permissions
note = {'note': u"Road Warrior"}
dataid = self._first_xform_instance.pk
note['instance'] = dataid
request = self.factory.post('/', data=note)
self.assertTrue(self.xform.instances.count())
response = self.view(request)
self.assertEqual(response.status_code, 401)
# post note to submission with permissions to form
assign_perm('view_xform', self.user, self._first_xform_instance.xform)
note = {'note': u"Road Warrior"}
dataid = self._first_xform_instance.pk
note['instance'] = dataid
request = self.factory.post('/', data=note, **extra)
self.assertTrue(self.xform.instances.count())
response = self.view(request)
self.assertEqual(response.status_code, 201)
def test_delete_note(self):
self._add_notes_to_data_point()
request = self.factory.delete('/', **self.extra)
response = self.view(request, pk=self.pk)
self.assertEqual(response.status_code, 204)
request = self.factory.get('/', **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertEquals(response.data, [])
def test_question_level_notes(self):
field = "transport"
dataid = self.xform.instances.all()[0].pk
note = {
'note': "Road Warrior",
'instance': dataid,
'instance_field': field
}
request = self.factory.post('/', data=note, **self.extra)
self.assertTrue(self.xform.instances.count())
response = self.view(request)
self.assertEqual(response.status_code, 201)
instance = self.xform.instances.all()[0]
self.assertEquals(len(instance.json["_notes"]), 1)
note = instance.json["_notes"][0]
self.assertEquals(note['instance_field'], field)
def test_only_add_question_notes_to_existing_fields(self):
field = "bla"
dataid = self.xform.instances.all()[0].pk
note = {
'note': "Road Warrior",
'instance': dataid,
'instance_field': field
}
request = self.factory.post('/', data=note, **self.extra)
self.assertTrue(self.xform.instances.count())
response = self.view(request)
self.assertEqual(response.status_code, 400)
instance = self.xform.instances.all()[0]
self.assertEquals(len(instance.json["_notes"]), 0)
def test_csv_export_form_w_notes(self):
"""
Test CSV exports include notes for submissions that have notes.
"""
self._add_notes_to_data_point()
self._add_notes_to_data_point()
time = make_aware(datetime(2016, 7, 1))
for instance in self.xform.instances.all():
instance.date_created = time
instance.save()
instance.parsed_instance.save()
view = XFormViewSet.as_view({'get': 'retrieve'})
request = self.factory.get('/', **self.extra)
response = view(request, pk=self.xform.pk, format='csv')
self.assertTrue(response.status_code, 200)
test_file_path = os.path.join(settings.PROJECT_ROOT, 'apps', 'viewer',
'tests', 'fixtures',
'transportation_w_notes.csv')
self._test_csv_response(response, test_file_path)
def test_attribute_error_bug(self):
"""NoteSerializer: Should not raise AttributeError exeption"""
note = Note(note='Hello', instance=self._first_xform_instance)
note.save()
data = NoteSerializer(note).data
self.assertDictContainsSubset({
'created_by': None,
'note': u'Hello',
'instance': note.instance_id,
'owner': None
}, data)
| 37.606838 | 78 | 0.637841 | import os
from datetime import datetime
from django.conf import settings
from django.utils.timezone import make_aware
from django.test import RequestFactory
from guardian.shortcuts import assign_perm
from onadata.apps.api.viewsets.note_viewset import NoteViewSet
from onadata.apps.api.viewsets.xform_viewset import XFormViewSet
from onadata.apps.logger.models import Note
from onadata.apps.main.tests.test_base import TestBase
from onadata.libs.serializers.note_serializer import NoteSerializer
class TestNoteViewSet(TestBase):
def setUp(self):
super(TestNoteViewSet, self).setUp()
self._create_user_and_login()
self._publish_transportation_form()
self._make_submissions()
self.view = NoteViewSet.as_view({
'get': 'list',
'post': 'create',
'delete': 'destroy'
})
self.factory = RequestFactory()
self.extra = {'HTTP_AUTHORIZATION': 'Token %s' % self.user.auth_token}
@property
def _first_xform_instance(self):
return self.xform.instances.all().order_by('pk')[0]
def _add_notes_to_data_point(self):
note = {'note': u"Road Warrior"}
dataid = self._first_xform_instance.pk
note['instance'] = dataid
request = self.factory.post('/', data=note, **self.extra)
self.assertTrue(self.xform.instances.count())
response = self.view(request)
self.assertEqual(response.status_code, 201)
self.pk = response.data['id']
note['id'] = self.pk
self.note = note
def test_note_list(self):
self._add_notes_to_data_point()
request = self.factory.get('/', **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertTrue(len(response.data) > 0)
self.assertDictContainsSubset(self.note, response.data[0])
def test_note_get(self):
self._add_notes_to_data_point()
view = NoteViewSet.as_view({'get': 'retrieve'})
request = self.factory.get('/', **self.extra)
response = view(request, pk=self.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['owner'], self.user.username)
self.assertDictContainsSubset(self.note, response.data)
def test_get_note_for_specific_instance(self):
self._add_notes_to_data_point()
view = NoteViewSet.as_view({'get': 'retrieve'})
instance = self.xform.instances.first()
query_params = {"instance": instance.id}
request = self.factory.get('/', data=query_params, **self.extra)
response = view(request, pk=self.pk)
self.assertEqual(response.status_code, 200)
self.assertDictContainsSubset(self.note, response.data)
second_instance = self.xform.instances.last()
query_params = {"instance": second_instance.id}
request = self.factory.get('/', data=query_params, **self.extra)
response = view(request, pk=self.pk)
self.assertEqual(response.status_code, 200)
self.assertListEqual(response.data, [])
def test_add_notes_to_data_point(self):
self._add_notes_to_data_point()
self.assertEquals(len(self._first_xform_instance.json["_notes"]), 1)
def test_other_user_notes_access(self):
self._create_user_and_login('lilly', '1234')
extra = {'HTTP_AUTHORIZATION': 'Token %s' % self.user.auth_token}
note = {'note': u"Road Warrior"}
dataid = self.xform.instances.first().pk
note['instance'] = dataid
request = self.factory.post('/', data=note)
self.assertTrue(self.xform.instances.count())
response = self.view(request)
self.assertEqual(response.status_code, 401)
self._add_notes_to_data_point()
request = self.factory.get('/', **extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [])
view = NoteViewSet.as_view({'get': 'retrieve'})
query_params = {"instance": dataid}
request = self.factory.get('/', data=query_params, **extra)
response = view(request, pk=self.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [])
def test_collaborator_with_readonly_permission_can_add_comment(self):
self._create_user_and_login('lilly', '1234')
extra = {'HTTP_AUTHORIZATION': 'Token %s' % self.user.auth_token}
# save some notes
self._add_notes_to_data_point()
# post note to submission as lilly without permissions
note = {'note': u"Road Warrior"}
dataid = self._first_xform_instance.pk
note['instance'] = dataid
request = self.factory.post('/', data=note)
self.assertTrue(self.xform.instances.count())
response = self.view(request)
self.assertEqual(response.status_code, 401)
# post note to submission with permissions to form
assign_perm('view_xform', self.user, self._first_xform_instance.xform)
note = {'note': u"Road Warrior"}
dataid = self._first_xform_instance.pk
note['instance'] = dataid
request = self.factory.post('/', data=note, **extra)
self.assertTrue(self.xform.instances.count())
response = self.view(request)
self.assertEqual(response.status_code, 201)
def test_delete_note(self):
self._add_notes_to_data_point()
request = self.factory.delete('/', **self.extra)
response = self.view(request, pk=self.pk)
self.assertEqual(response.status_code, 204)
request = self.factory.get('/', **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertEquals(response.data, [])
def test_question_level_notes(self):
field = "transport"
dataid = self.xform.instances.all()[0].pk
note = {
'note': "Road Warrior",
'instance': dataid,
'instance_field': field
}
request = self.factory.post('/', data=note, **self.extra)
self.assertTrue(self.xform.instances.count())
response = self.view(request)
self.assertEqual(response.status_code, 201)
instance = self.xform.instances.all()[0]
self.assertEquals(len(instance.json["_notes"]), 1)
note = instance.json["_notes"][0]
self.assertEquals(note['instance_field'], field)
def test_only_add_question_notes_to_existing_fields(self):
field = "bla"
dataid = self.xform.instances.all()[0].pk
note = {
'note': "Road Warrior",
'instance': dataid,
'instance_field': field
}
request = self.factory.post('/', data=note, **self.extra)
self.assertTrue(self.xform.instances.count())
response = self.view(request)
self.assertEqual(response.status_code, 400)
instance = self.xform.instances.all()[0]
self.assertEquals(len(instance.json["_notes"]), 0)
def test_csv_export_form_w_notes(self):
self._add_notes_to_data_point()
self._add_notes_to_data_point()
time = make_aware(datetime(2016, 7, 1))
for instance in self.xform.instances.all():
instance.date_created = time
instance.save()
instance.parsed_instance.save()
view = XFormViewSet.as_view({'get': 'retrieve'})
request = self.factory.get('/', **self.extra)
response = view(request, pk=self.xform.pk, format='csv')
self.assertTrue(response.status_code, 200)
test_file_path = os.path.join(settings.PROJECT_ROOT, 'apps', 'viewer',
'tests', 'fixtures',
'transportation_w_notes.csv')
self._test_csv_response(response, test_file_path)
def test_attribute_error_bug(self):
note = Note(note='Hello', instance=self._first_xform_instance)
note.save()
data = NoteSerializer(note).data
self.assertDictContainsSubset({
'created_by': None,
'note': u'Hello',
'instance': note.instance_id,
'owner': None
}, data)
| true | true |
f73a24b814c65a9339fbcf5f01245b82951c31e5 | 9,773 | py | Python | discord/widget.py | BillSchumacher/discord.py | bba09204cbbe3661ac2fa869e25497e5eef422c4 | [
"MIT"
] | null | null | null | discord/widget.py | BillSchumacher/discord.py | bba09204cbbe3661ac2fa869e25497e5eef422c4 | [
"MIT"
] | 1 | 2022-01-21T08:20:30.000Z | 2022-01-21T08:20:30.000Z | discord/widget.py | BillSchumacher/discord.py | bba09204cbbe3661ac2fa869e25497e5eef422c4 | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Any, List, Optional, TYPE_CHECKING, Union
from .utils import snowflake_time, _get_as_snowflake, resolve_invite
from .user import BaseUser
from .activity import Activity, BaseActivity, Spotify, create_activity
from .invite import Invite
from .enums import Status, try_enum
if TYPE_CHECKING:
import datetime
from .state import ConnectionState
from .types.widget import (
WidgetMember as WidgetMemberPayload,
Widget as WidgetPayload,
)
__all__ = (
'WidgetChannel',
'WidgetMember',
'Widget',
)
class WidgetChannel:
"""Represents a "partial" widget channel.
.. container:: operations
.. describe:: x == y
Checks if two partial channels are the same.
.. describe:: x != y
Checks if two partial channels are not the same.
.. describe:: hash(x)
Return the partial channel's hash.
.. describe:: str(x)
Returns the partial channel's name.
Attributes
-----------
id: :class:`int`
The channel's ID.
name: :class:`str`
The channel's name.
position: :class:`int`
The channel's position
"""
__slots__ = ('id', 'name', 'position')
def __init__(self, id: int, name: str, position: int) -> None:
self.id: int = id
self.name: str = name
self.position: int = position
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return f'<WidgetChannel id={self.id} name={self.name!r} position={self.position!r}>'
@property
def mention(self) -> str:
""":class:`str`: The string that allows you to mention the channel."""
return f'<#{self.id}>'
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the channel's creation time in UTC."""
return snowflake_time(self.id)
class WidgetMember(BaseUser):
"""Represents a "partial" member of the widget's guild.
.. container:: operations
.. describe:: x == y
Checks if two widget members are the same.
.. describe:: x != y
Checks if two widget members are not the same.
.. describe:: hash(x)
Return the widget member's hash.
.. describe:: str(x)
Returns the widget member's `name#discriminator`.
Attributes
-----------
id: :class:`int`
The member's ID.
name: :class:`str`
The member's username.
discriminator: :class:`str`
The member's discriminator.
bot: :class:`bool`
Whether the member is a bot.
status: :class:`Status`
The member's status.
nick: Optional[:class:`str`]
The member's nickname.
avatar: Optional[:class:`str`]
The member's avatar hash.
activity: Optional[Union[:class:`BaseActivity`, :class:`Spotify`]]
The member's activity.
deafened: Optional[:class:`bool`]
Whether the member is currently deafened.
muted: Optional[:class:`bool`]
Whether the member is currently muted.
suppress: Optional[:class:`bool`]
Whether the member is currently being suppressed.
connected_channel: Optional[:class:`WidgetChannel`]
Which channel the member is connected to.
"""
__slots__ = ('name', 'status', 'nick', 'avatar', 'discriminator',
'id', 'bot', 'activity', 'deafened', 'suppress', 'muted',
'connected_channel')
if TYPE_CHECKING:
activity: Optional[Union[BaseActivity, Spotify]]
def __init__(
self,
*,
state: ConnectionState,
data: WidgetMemberPayload,
connected_channel: Optional[WidgetChannel] = None
) -> None:
super().__init__(state=state, data=data)
self.nick: Optional[str] = data.get('nick')
self.status: Status = try_enum(Status, data.get('status'))
self.deafened: Optional[bool] = data.get('deaf', False) or data.get('self_deaf', False)
self.muted: Optional[bool] = data.get('mute', False) or data.get('self_mute', False)
self.suppress: Optional[bool] = data.get('suppress', False)
try:
game = data['game']
except KeyError:
activity = None
else:
activity = create_activity(game)
self.activity: Optional[Union[BaseActivity, Spotify]] = activity
self.connected_channel: Optional[WidgetChannel] = connected_channel
def __repr__(self) -> str:
return (
f"<WidgetMember name={self.name!r} discriminator={self.discriminator!r}"
f" bot={self.bot} nick={self.nick!r}>"
)
@property
def display_name(self) -> str:
""":class:`str`: Returns the member's display name."""
return self.nick or self.name
class Widget:
"""Represents a :class:`Guild` widget.
.. container:: operations
.. describe:: x == y
Checks if two widgets are the same.
.. describe:: x != y
Checks if two widgets are not the same.
.. describe:: str(x)
Returns the widget's JSON URL.
Attributes
-----------
id: :class:`int`
The guild's ID.
name: :class:`str`
The guild's name.
channels: List[:class:`WidgetChannel`]
The accessible voice channels in the guild.
members: List[:class:`Member`]
The online members in the server. Offline members
do not appear in the widget.
.. note::
Due to a Discord limitation, if this data is available
the users will be "anonymized" with linear IDs and discriminator
information being incorrect. Likewise, the number of members
retrieved is capped.
"""
__slots__ = ('_state', 'channels', '_invite', 'id', 'members', 'name')
def __init__(self, *, state: ConnectionState, data: WidgetPayload) -> None:
self._state = state
self._invite = data['instant_invite']
self.name: str = data['name']
self.id: int = int(data['id'])
self.channels: List[WidgetChannel] = []
for channel in data.get('channels', []):
_id = int(channel['id'])
self.channels.append(WidgetChannel(id=_id, name=channel['name'], position=channel['position']))
self.members: List[WidgetMember] = []
channels = {channel.id: channel for channel in self.channels}
for member in data.get('members', []):
connected_channel = _get_as_snowflake(member, 'channel_id')
if connected_channel in channels:
connected_channel = channels[connected_channel] # type: ignore
elif connected_channel:
connected_channel = WidgetChannel(id=connected_channel, name='', position=0)
self.members.append(WidgetMember(state=self._state, data=member, connected_channel=connected_channel)) # type: ignore
def __str__(self) -> str:
return self.json_url
def __eq__(self, other: Any) -> bool:
return self.id == other.id if isinstance(other, Widget) else False
def __repr__(self) -> str:
return f'<Widget id={self.id} name={self.name!r} invite_url={self.invite_url!r}>'
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the member's creation time in UTC."""
return snowflake_time(self.id)
@property
def json_url(self) -> str:
""":class:`str`: The JSON URL of the widget."""
return f"https://discord.com/api/guilds/{self.id}/widget.json"
@property
def invite_url(self) -> str:
"""Optional[:class:`str`]: The invite URL for the guild, if available."""
return self._invite
async def fetch_invite(self, *, with_counts: bool = True) -> Invite:
"""|coro|
Retrieves an :class:`Invite` from the widget's invite URL.
This is the same as :meth:`Client.fetch_invite`; the invite
code is abstracted away.
Parameters
-----------
with_counts: :class:`bool`
Whether to include count information in the invite. This fills the
:attr:`Invite.approximate_member_count` and :attr:`Invite.approximate_presence_count`
fields.
Returns
--------
:class:`Invite`
The invite from the widget's invite URL.
"""
invite_id = resolve_invite(self._invite)
data = await self._state.http.get_invite(invite_id, with_counts=with_counts)
return Invite.from_incomplete(state=self._state, data=data)
| 32.576667 | 130 | 0.629694 |
from __future__ import annotations
from typing import Any, List, Optional, TYPE_CHECKING, Union
from .utils import snowflake_time, _get_as_snowflake, resolve_invite
from .user import BaseUser
from .activity import Activity, BaseActivity, Spotify, create_activity
from .invite import Invite
from .enums import Status, try_enum
if TYPE_CHECKING:
import datetime
from .state import ConnectionState
from .types.widget import (
WidgetMember as WidgetMemberPayload,
Widget as WidgetPayload,
)
__all__ = (
'WidgetChannel',
'WidgetMember',
'Widget',
)
class WidgetChannel:
__slots__ = ('id', 'name', 'position')
def __init__(self, id: int, name: str, position: int) -> None:
self.id: int = id
self.name: str = name
self.position: int = position
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return f'<WidgetChannel id={self.id} name={self.name!r} position={self.position!r}>'
@property
def mention(self) -> str:
return f'<#{self.id}>'
@property
def created_at(self) -> datetime.datetime:
return snowflake_time(self.id)
class WidgetMember(BaseUser):
__slots__ = ('name', 'status', 'nick', 'avatar', 'discriminator',
'id', 'bot', 'activity', 'deafened', 'suppress', 'muted',
'connected_channel')
if TYPE_CHECKING:
activity: Optional[Union[BaseActivity, Spotify]]
def __init__(
self,
*,
state: ConnectionState,
data: WidgetMemberPayload,
connected_channel: Optional[WidgetChannel] = None
) -> None:
super().__init__(state=state, data=data)
self.nick: Optional[str] = data.get('nick')
self.status: Status = try_enum(Status, data.get('status'))
self.deafened: Optional[bool] = data.get('deaf', False) or data.get('self_deaf', False)
self.muted: Optional[bool] = data.get('mute', False) or data.get('self_mute', False)
self.suppress: Optional[bool] = data.get('suppress', False)
try:
game = data['game']
except KeyError:
activity = None
else:
activity = create_activity(game)
self.activity: Optional[Union[BaseActivity, Spotify]] = activity
self.connected_channel: Optional[WidgetChannel] = connected_channel
def __repr__(self) -> str:
return (
f"<WidgetMember name={self.name!r} discriminator={self.discriminator!r}"
f" bot={self.bot} nick={self.nick!r}>"
)
@property
def display_name(self) -> str:
return self.nick or self.name
class Widget:
__slots__ = ('_state', 'channels', '_invite', 'id', 'members', 'name')
def __init__(self, *, state: ConnectionState, data: WidgetPayload) -> None:
self._state = state
self._invite = data['instant_invite']
self.name: str = data['name']
self.id: int = int(data['id'])
self.channels: List[WidgetChannel] = []
for channel in data.get('channels', []):
_id = int(channel['id'])
self.channels.append(WidgetChannel(id=_id, name=channel['name'], position=channel['position']))
self.members: List[WidgetMember] = []
channels = {channel.id: channel for channel in self.channels}
for member in data.get('members', []):
connected_channel = _get_as_snowflake(member, 'channel_id')
if connected_channel in channels:
connected_channel = channels[connected_channel]
elif connected_channel:
connected_channel = WidgetChannel(id=connected_channel, name='', position=0)
self.members.append(WidgetMember(state=self._state, data=member, connected_channel=connected_channel))
def __str__(self) -> str:
return self.json_url
def __eq__(self, other: Any) -> bool:
return self.id == other.id if isinstance(other, Widget) else False
def __repr__(self) -> str:
return f'<Widget id={self.id} name={self.name!r} invite_url={self.invite_url!r}>'
@property
def created_at(self) -> datetime.datetime:
return snowflake_time(self.id)
@property
def json_url(self) -> str:
return f"https://discord.com/api/guilds/{self.id}/widget.json"
@property
def invite_url(self) -> str:
return self._invite
async def fetch_invite(self, *, with_counts: bool = True) -> Invite:
invite_id = resolve_invite(self._invite)
data = await self._state.http.get_invite(invite_id, with_counts=with_counts)
return Invite.from_incomplete(state=self._state, data=data)
| true | true |
f73a25dff7fcc5665c27e55e9470b27bb07f770b | 8,363 | py | Python | invoke/executor.py | oynil/Invoke-Taskset | 4a206ce125926d52bc20f8c3bb5373912c65e91f | [
"BSD-2-Clause"
] | null | null | null | invoke/executor.py | oynil/Invoke-Taskset | 4a206ce125926d52bc20f8c3bb5373912c65e91f | [
"BSD-2-Clause"
] | null | null | null | invoke/executor.py | oynil/Invoke-Taskset | 4a206ce125926d52bc20f8c3bb5373912c65e91f | [
"BSD-2-Clause"
] | null | null | null | from .util import six
from .config import Config
from .parser import ParserContext
from .util import debug
from .tasks import Call, Task
class Executor(object):
"""
An execution strategy for Task objects.
Subclasses may override various extension points to change, add or remove
behavior.
.. versionadded:: 1.0
"""
def __init__(self, collection, config=None, core=None):
"""
Initialize executor with handles to necessary data structures.
:param collection:
A `.Collection` used to look up requested tasks (and their default
config data, if any) by name during execution.
:param config:
An optional `.Config` holding configuration state. Defaults to an
empty `.Config` if not given.
:param core:
An optional `.ParseResult` holding parsed core program arguments.
Defaults to ``None``.
"""
self.collection = collection
self.config = config if config is not None else Config()
self.core = core
def execute(self, *tasks):
"""
Execute one or more ``tasks`` in sequence.
:param tasks:
An all-purpose iterable of "tasks to execute", each member of which
may take one of the following forms:
**A string** naming a task from the Executor's `.Collection`. This
name may contain dotted syntax appropriate for calling namespaced
tasks, e.g. ``subcollection.taskname``. Such tasks are executed
without arguments.
**A two-tuple** whose first element is a task name string (as
above) and whose second element is a dict suitable for use as
``**kwargs`` when calling the named task. E.g.::
[
('task1', {}),
('task2', {'arg1': 'val1'}),
...
]
is equivalent, roughly, to::
task1()
task2(arg1='val1')
**A `.ParserContext`** instance, whose ``.name`` attribute is used
as the task name and whose ``.as_kwargs`` attribute is used as the
task kwargs (again following the above specifications).
.. note::
When called without any arguments at all (i.e. when ``*tasks``
is empty), the default task from ``self.collection`` is used
instead, if defined.
:returns:
A dict mapping task objects to their return values.
This dict may include pre- and post-tasks if any were executed. For
example, in a collection with a ``build`` task depending on another
task named ``setup``, executing ``build`` will result in a dict
with two keys, one for ``build`` and one for ``setup``.
.. versionadded:: 1.0
"""
# Normalize input
debug("Examining top level tasks {!r}".format([x for x in tasks]))
calls = self.normalize(tasks)
debug("Tasks (now Calls) with kwargs: {!r}".format(calls))
# Obtain copy of directly-given tasks since they should sometimes
# behave differently
direct = list(calls)
# Expand pre/post tasks
# TODO: may make sense to bundle expansion & deduping now eh?
expanded = self.expand_calls(calls)
# Get some good value for dedupe option, even if config doesn't have
# the tree we expect. (This is a concession to testing.)
try:
dedupe = self.config.tasks.dedupe
except AttributeError:
dedupe = True
# Dedupe across entire run now that we know about all calls in order
calls = self.dedupe(expanded) if dedupe else expanded
# Execute
results = {}
# TODO: maybe clone initial config here? Probably not necessary,
# especially given Executor is not designed to execute() >1 time at the
# moment...
for call in calls:
autoprint = call in direct and call.autoprint
args = call.args
debug("Executing {!r}".format(call))
# Hand in reference to our config, which will preserve user
# modifications across the lifetime of the session.
config = self.config
# But make sure we reset its task-sensitive levels each time
# (collection & shell env)
# TODO: load_collection needs to be skipped if task is anonymous
# (Fabric 2 or other subclassing libs only)
collection_config = self.collection.configuration(call.called_as)
config.load_collection(collection_config)
config.load_shell_env()
debug("Finished loading collection & shell env configs")
# Get final context from the Call (which will know how to generate
# an appropriate one; e.g. subclasses might use extra data from
# being parameterized), handing in this config for use there.
context = call.make_context(config)
if not call.task.taskset:
args = (context,) + args
result = call.task(*args, **call.kwargs)
if autoprint:
print(result)
# TODO: handle the non-dedupe case / the same-task-different-args
# case, wherein one task obj maps to >1 result.
results[call.task] = result
return results
def normalize(self, tasks):
"""
Transform arbitrary task list w/ various types, into `.Call` objects.
See docstring for `~.Executor.execute` for details.
.. versionadded:: 1.0
"""
calls = []
for task in tasks:
name, kwargs = None, {}
if isinstance(task, six.string_types):
name = task
elif isinstance(task, ParserContext):
name = task.name
kwargs = task.as_kwargs
else:
name, kwargs = task
c = Call(task=self.collection[name], kwargs=kwargs, called_as=name)
calls.append(c)
if not tasks and self.collection.default is not None:
calls = [Call(task=self.collection[self.collection.default])]
return calls
def dedupe(self, calls):
"""
Deduplicate a list of `tasks <.Call>`.
:param calls: An iterable of `.Call` objects representing tasks.
:returns: A list of `.Call` objects.
.. versionadded:: 1.0
"""
deduped = []
debug("Deduplicating tasks...")
for call in calls:
if call not in deduped:
debug("{!r}: no duplicates found, ok".format(call))
deduped.append(call)
else:
debug("{!r}: found in list already, skipping".format(call))
return deduped
def expand_calls(self, calls):
"""
Expand a list of `.Call` objects into a near-final list of same.
The default implementation of this method simply adds a task's
pre/post-task list before/after the task itself, as necessary.
Subclasses may wish to do other things in addition (or instead of) the
above, such as multiplying the `calls <.Call>` by argument vectors or
similar.
.. versionadded:: 1.0
"""
ret = []
for call in calls:
# Normalize to Call (this method is sometimes called with pre/post
# task lists, which may contain 'raw' Task objects)
if isinstance(call, Task):
call = Call(task=call)
debug("Expanding task-call {!r}".format(call))
# TODO: this is where we _used_ to call Executor.config_for(call,
# config)...
# TODO: now we may need to preserve more info like where the call
# came from, etc, but I feel like that shit should go _on the call
# itself_ right???
# TODO: we _probably_ don't even want the config in here anymore,
# we want this to _just_ be about the recursion across pre/post
# tasks or parameterization...?
ret.extend(self.expand_calls(call.pre))
ret.append(call)
ret.extend(self.expand_calls(call.post))
return ret
| 39.079439 | 79 | 0.580892 | from .util import six
from .config import Config
from .parser import ParserContext
from .util import debug
from .tasks import Call, Task
class Executor(object):
def __init__(self, collection, config=None, core=None):
self.collection = collection
self.config = config if config is not None else Config()
self.core = core
def execute(self, *tasks):
debug("Examining top level tasks {!r}".format([x for x in tasks]))
calls = self.normalize(tasks)
debug("Tasks (now Calls) with kwargs: {!r}".format(calls))
direct = list(calls)
expanded = self.expand_calls(calls)
# the tree we expect. (This is a concession to testing.)
try:
dedupe = self.config.tasks.dedupe
except AttributeError:
dedupe = True
# Dedupe across entire run now that we know about all calls in order
calls = self.dedupe(expanded) if dedupe else expanded
# Execute
results = {}
# TODO: maybe clone initial config here? Probably not necessary,
# especially given Executor is not designed to execute() >1 time at the
# moment...
for call in calls:
autoprint = call in direct and call.autoprint
args = call.args
debug("Executing {!r}".format(call))
# Hand in reference to our config, which will preserve user
# modifications across the lifetime of the session.
config = self.config
# But make sure we reset its task-sensitive levels each time
# (collection & shell env)
# TODO: load_collection needs to be skipped if task is anonymous
# (Fabric 2 or other subclassing libs only)
collection_config = self.collection.configuration(call.called_as)
config.load_collection(collection_config)
config.load_shell_env()
debug("Finished loading collection & shell env configs")
# Get final context from the Call (which will know how to generate
# an appropriate one; e.g. subclasses might use extra data from
# being parameterized), handing in this config for use there.
context = call.make_context(config)
if not call.task.taskset:
args = (context,) + args
result = call.task(*args, **call.kwargs)
if autoprint:
print(result)
# TODO: handle the non-dedupe case / the same-task-different-args
# case, wherein one task obj maps to >1 result.
results[call.task] = result
return results
def normalize(self, tasks):
calls = []
for task in tasks:
name, kwargs = None, {}
if isinstance(task, six.string_types):
name = task
elif isinstance(task, ParserContext):
name = task.name
kwargs = task.as_kwargs
else:
name, kwargs = task
c = Call(task=self.collection[name], kwargs=kwargs, called_as=name)
calls.append(c)
if not tasks and self.collection.default is not None:
calls = [Call(task=self.collection[self.collection.default])]
return calls
def dedupe(self, calls):
deduped = []
debug("Deduplicating tasks...")
for call in calls:
if call not in deduped:
debug("{!r}: no duplicates found, ok".format(call))
deduped.append(call)
else:
debug("{!r}: found in list already, skipping".format(call))
return deduped
def expand_calls(self, calls):
ret = []
for call in calls:
# Normalize to Call (this method is sometimes called with pre/post
# task lists, which may contain 'raw' Task objects)
if isinstance(call, Task):
call = Call(task=call)
debug("Expanding task-call {!r}".format(call))
# TODO: this is where we _used_ to call Executor.config_for(call,
# config)...
# TODO: now we may need to preserve more info like where the call
# came from, etc, but I feel like that shit should go _on the call
# itself_ right???
# TODO: we _probably_ don't even want the config in here anymore,
ret.extend(self.expand_calls(call.pre))
ret.append(call)
ret.extend(self.expand_calls(call.post))
return ret
| true | true |
f73a26d83a8acf23c96c6d692226c322322968e1 | 2,139 | py | Python | mmaction/models/__init__.py | andreeacosma/mmaction2 | 925a8813fb4b443e45566eb83e6b55576e3f2aad | [
"Apache-2.0"
] | null | null | null | mmaction/models/__init__.py | andreeacosma/mmaction2 | 925a8813fb4b443e45566eb83e6b55576e3f2aad | [
"Apache-2.0"
] | null | null | null | mmaction/models/__init__.py | andreeacosma/mmaction2 | 925a8813fb4b443e45566eb83e6b55576e3f2aad | [
"Apache-2.0"
] | 1 | 2022-03-22T02:18:40.000Z | 2022-03-22T02:18:40.000Z | from .backbones import (C3D, X3D, MobileNetV2, MobileNetV2TSM, ResNet,
ResNet2Plus1d, ResNet3d, ResNet3dCSN, ResNet3dLayer,
ResNet3dSlowFast, ResNet3dSlowOnly, ResNetAudio,
ResNetTIN, ResNetTSM, TANet)
from .builder import (DETECTORS, build_backbone, build_detector, build_head,
build_localizer, build_loss, build_model, build_neck,
build_recognizer)
from .common import LFB, TAM, Conv2plus1d, ConvAudio
from .heads import (AudioTSNHead, AVARoIHead, BaseHead, BBoxHeadAVA, FBOHead,
I3DHead, LFBInferHead, SlowFastHead, TPNHead, TRNHead,
TSMHead, TSNHead, X3DHead)
from .localizers import BMN, PEM, TEM
from .losses import (BCELossWithLogits, BinaryLogisticRegressionLoss, BMNLoss,
CrossEntropyLoss, HVULoss, NLLLoss, OHEMHingeLoss,
SSNLoss)
from .necks import TPN
from .recognizers import (AudioRecognizer, BaseRecognizer, recognizer2d,
recognizer3d)
from .registry import BACKBONES, HEADS, LOCALIZERS, LOSSES, RECOGNIZERS
from .roi_extractors import SingleRoIExtractor3D
__all__ = [
'BACKBONES', 'HEADS', 'RECOGNIZERS', 'build_recognizer', 'build_head',
'build_backbone', 'recognizer2d', 'recognizer3d', 'C3D', 'ResNet',
'ResNet3d', 'ResNet2Plus1d', 'I3DHead', 'TSNHead', 'TSMHead', 'BaseHead',
'BaseRecognizer', 'LOSSES', 'CrossEntropyLoss', 'NLLLoss', 'HVULoss',
'ResNetTSM', 'ResNet3dSlowFast', 'SlowFastHead', 'Conv2plus1d',
'ResNet3dSlowOnly', 'BCELossWithLogits', 'LOCALIZERS', 'build_localizer',
'PEM', 'TAM', 'TEM', 'BinaryLogisticRegressionLoss', 'BMN', 'BMNLoss',
'build_model', 'OHEMHingeLoss', 'SSNLoss', 'ResNet3dCSN', 'ResNetTIN',
'TPN', 'TPNHead', 'build_loss', 'build_neck', 'AudioRecognizer',
'AudioTSNHead', 'X3D', 'X3DHead', 'ResNet3dLayer', 'DETECTORS',
'SingleRoIExtractor3D', 'BBoxHeadAVA', 'ResNetAudio', 'build_detector',
'ConvAudio', 'AVARoIHead', 'MobileNetV2', 'MobileNetV2TSM', 'TANet', 'LFB',
'FBOHead', 'LFBInferHead', 'TRNHead'
]
| 57.810811 | 79 | 0.672277 | from .backbones import (C3D, X3D, MobileNetV2, MobileNetV2TSM, ResNet,
ResNet2Plus1d, ResNet3d, ResNet3dCSN, ResNet3dLayer,
ResNet3dSlowFast, ResNet3dSlowOnly, ResNetAudio,
ResNetTIN, ResNetTSM, TANet)
from .builder import (DETECTORS, build_backbone, build_detector, build_head,
build_localizer, build_loss, build_model, build_neck,
build_recognizer)
from .common import LFB, TAM, Conv2plus1d, ConvAudio
from .heads import (AudioTSNHead, AVARoIHead, BaseHead, BBoxHeadAVA, FBOHead,
I3DHead, LFBInferHead, SlowFastHead, TPNHead, TRNHead,
TSMHead, TSNHead, X3DHead)
from .localizers import BMN, PEM, TEM
from .losses import (BCELossWithLogits, BinaryLogisticRegressionLoss, BMNLoss,
CrossEntropyLoss, HVULoss, NLLLoss, OHEMHingeLoss,
SSNLoss)
from .necks import TPN
from .recognizers import (AudioRecognizer, BaseRecognizer, recognizer2d,
recognizer3d)
from .registry import BACKBONES, HEADS, LOCALIZERS, LOSSES, RECOGNIZERS
from .roi_extractors import SingleRoIExtractor3D
__all__ = [
'BACKBONES', 'HEADS', 'RECOGNIZERS', 'build_recognizer', 'build_head',
'build_backbone', 'recognizer2d', 'recognizer3d', 'C3D', 'ResNet',
'ResNet3d', 'ResNet2Plus1d', 'I3DHead', 'TSNHead', 'TSMHead', 'BaseHead',
'BaseRecognizer', 'LOSSES', 'CrossEntropyLoss', 'NLLLoss', 'HVULoss',
'ResNetTSM', 'ResNet3dSlowFast', 'SlowFastHead', 'Conv2plus1d',
'ResNet3dSlowOnly', 'BCELossWithLogits', 'LOCALIZERS', 'build_localizer',
'PEM', 'TAM', 'TEM', 'BinaryLogisticRegressionLoss', 'BMN', 'BMNLoss',
'build_model', 'OHEMHingeLoss', 'SSNLoss', 'ResNet3dCSN', 'ResNetTIN',
'TPN', 'TPNHead', 'build_loss', 'build_neck', 'AudioRecognizer',
'AudioTSNHead', 'X3D', 'X3DHead', 'ResNet3dLayer', 'DETECTORS',
'SingleRoIExtractor3D', 'BBoxHeadAVA', 'ResNetAudio', 'build_detector',
'ConvAudio', 'AVARoIHead', 'MobileNetV2', 'MobileNetV2TSM', 'TANet', 'LFB',
'FBOHead', 'LFBInferHead', 'TRNHead'
]
| true | true |
f73a27231a5734c402e39f01f9abb3d2b71ff0a8 | 3,750 | py | Python | bitbots_navigation/bitbots_localization/src/bitbots_localization/localization_dsd/decisions/decisions.py | MosHumanoid/bitbots_thmos_meta | f45ccc362dc689b69027be5b0d000d2a08580de4 | [
"MIT"
] | null | null | null | bitbots_navigation/bitbots_localization/src/bitbots_localization/localization_dsd/decisions/decisions.py | MosHumanoid/bitbots_thmos_meta | f45ccc362dc689b69027be5b0d000d2a08580de4 | [
"MIT"
] | null | null | null | bitbots_navigation/bitbots_localization/src/bitbots_localization/localization_dsd/decisions/decisions.py | MosHumanoid/bitbots_thmos_meta | f45ccc362dc689b69027be5b0d000d2a08580de4 | [
"MIT"
] | null | null | null | import rospy
from humanoid_league_msgs.msg import GameState, RobotControlState
from dynamic_stack_decider.abstract_decision_element import AbstractDecisionElement
class CheckFallen(AbstractDecisionElement):
"""
Checks if robot is fallen
"""
def perform(self, reevaluate=False):
self.clear_debug_data()
if self.blackboard.robot_control_state == RobotControlState.FALLEN:
return "FALLEN"
return "NOT_FALLEN"
def get_reevaluate(self):
return True
class CheckFalling(AbstractDecisionElement):
"""
Checks if robot is falling
"""
def perform(self, reevaluate=False):
self.clear_debug_data()
if self.blackboard.robot_control_state == RobotControlState.FALLING:
return "FALLING"
return "NOT_FALLING"
def get_reevaluate(self):
return True
class CheckGettingUp(AbstractDecisionElement):
"""
Checks if robot is getting up
"""
def perform(self, reevaluate=False):
self.clear_debug_data()
if self.blackboard.robot_control_state == RobotControlState.GETTING_UP:
return "GETTING_UP"
return "NOT_GETTING_UP"
def get_reevaluate(self):
return True
class CheckPickup(AbstractDecisionElement):
"""
Checks if robot is picked up
"""
def perform(self, reevaluate=False):
self.clear_debug_data()
if self.blackboard.robot_control_state == RobotControlState.PICKED_UP:
self.blackboard.last_state_pickup = True
return "UP"
else:
if self.blackboard.last_state_pickup:
self.blackboard.last_state_pickup = False
return "JUST_DOWN"
return "DOWN"
def get_reevaluate(self):
return True
class GettingUpState(AbstractDecisionElement):
"""
Checks if the robot falls, stands up or is freshly standing
"""
def __init__(self, blackboard, dsd, parameters=None):
super(GettingUpState, self).__init__(blackboard, dsd, parameters)
self.get_up_states = [
RobotControlState.FALLING,
RobotControlState.FALLEN,
RobotControlState.GETTING_UP]
def perform(self, reevaluate=False):
self.clear_debug_data()
if self.blackboard.robot_control_state in self.get_up_states:
self.blackboard.last_state_get_up = True
return "YES"
else:
if self.blackboard.last_state_get_up:
self.blackboard.last_state_get_up = False
return "GOTUP"
return "NO"
def get_reevaluate(self):
return True
class CheckGameStateReceived(AbstractDecisionElement):
"""
Checks if gamestate from gamecontroller is received.
"""
def perform(self, reevaluate=False):
self.clear_debug_data()
if not self.blackboard.game_state_received:
if not self.blackboard.initialized:
self.blackboard.initialized = True
return "NO_GAMESTATE_INIT"
else:
return "DO_NOTHING"
return "GAMESTATE_RECEIVED"
def get_reevaluate(self):
return True
class CheckGameState(AbstractDecisionElement):
"""
Checks which game state we are in
"""
def perform(self, reevaluate=False):
self.clear_debug_data()
if self.blackboard.penalized:
return "PENALTY"
elif self.blackboard.game_state == 0:
return "INIT"
elif self.blackboard.game_state == 2:
return "SET"
elif self.blackboard.game_state == 3:
return "PLAYING"
return "NO_INFORMATION"
def get_reevaluate(self):
return True
| 25.167785 | 83 | 0.6416 | import rospy
from humanoid_league_msgs.msg import GameState, RobotControlState
from dynamic_stack_decider.abstract_decision_element import AbstractDecisionElement
class CheckFallen(AbstractDecisionElement):
def perform(self, reevaluate=False):
self.clear_debug_data()
if self.blackboard.robot_control_state == RobotControlState.FALLEN:
return "FALLEN"
return "NOT_FALLEN"
def get_reevaluate(self):
return True
class CheckFalling(AbstractDecisionElement):
def perform(self, reevaluate=False):
self.clear_debug_data()
if self.blackboard.robot_control_state == RobotControlState.FALLING:
return "FALLING"
return "NOT_FALLING"
def get_reevaluate(self):
return True
class CheckGettingUp(AbstractDecisionElement):
def perform(self, reevaluate=False):
self.clear_debug_data()
if self.blackboard.robot_control_state == RobotControlState.GETTING_UP:
return "GETTING_UP"
return "NOT_GETTING_UP"
def get_reevaluate(self):
return True
class CheckPickup(AbstractDecisionElement):
def perform(self, reevaluate=False):
self.clear_debug_data()
if self.blackboard.robot_control_state == RobotControlState.PICKED_UP:
self.blackboard.last_state_pickup = True
return "UP"
else:
if self.blackboard.last_state_pickup:
self.blackboard.last_state_pickup = False
return "JUST_DOWN"
return "DOWN"
def get_reevaluate(self):
return True
class GettingUpState(AbstractDecisionElement):
def __init__(self, blackboard, dsd, parameters=None):
super(GettingUpState, self).__init__(blackboard, dsd, parameters)
self.get_up_states = [
RobotControlState.FALLING,
RobotControlState.FALLEN,
RobotControlState.GETTING_UP]
def perform(self, reevaluate=False):
self.clear_debug_data()
if self.blackboard.robot_control_state in self.get_up_states:
self.blackboard.last_state_get_up = True
return "YES"
else:
if self.blackboard.last_state_get_up:
self.blackboard.last_state_get_up = False
return "GOTUP"
return "NO"
def get_reevaluate(self):
return True
class CheckGameStateReceived(AbstractDecisionElement):
def perform(self, reevaluate=False):
self.clear_debug_data()
if not self.blackboard.game_state_received:
if not self.blackboard.initialized:
self.blackboard.initialized = True
return "NO_GAMESTATE_INIT"
else:
return "DO_NOTHING"
return "GAMESTATE_RECEIVED"
def get_reevaluate(self):
return True
class CheckGameState(AbstractDecisionElement):
def perform(self, reevaluate=False):
self.clear_debug_data()
if self.blackboard.penalized:
return "PENALTY"
elif self.blackboard.game_state == 0:
return "INIT"
elif self.blackboard.game_state == 2:
return "SET"
elif self.blackboard.game_state == 3:
return "PLAYING"
return "NO_INFORMATION"
def get_reevaluate(self):
return True
| true | true |
f73a272f69474bd8614deac8cab5edccb4b283d6 | 8,583 | py | Python | nablapps/nablashop/views.py | NablaWebkom/nablaweb | 9247c5e3f7b5d965d9437c74530638f925d0e9c6 | [
"MIT"
] | 1 | 2019-10-07T13:59:19.000Z | 2019-10-07T13:59:19.000Z | nablapps/nablashop/views.py | NablaWebkom/nablaweb | 9247c5e3f7b5d965d9437c74530638f925d0e9c6 | [
"MIT"
] | 2 | 2019-10-07T14:47:37.000Z | 2019-10-07T14:49:49.000Z | nablapps/nablashop/views.py | NablaWebkom/nablaweb | 9247c5e3f7b5d965d9437c74530638f925d0e9c6 | [
"MIT"
] | null | null | null | from datetime import datetime
from django import forms
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect, render
from django.utils import timezone
from django.views.generic import DetailView, ListView, TemplateView, View
from nablapps.accounts.models import NablaUser
from nablapps.officeBeer.models import Account
from nablapps.officeBeer.views import Transaction # PurchaseForm
from .models import Category, Order, OrderProduct, Product
class IndexView(ListView):
queryset = Product.objects.order_by("-pub_date")
template_name = "nablashop/index.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["categories"] = Category.objects.all()
return context
class ProductDetailView(DetailView):
model = Product
template_name = "nablashop/product_detail.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["categories"] = Category.objects.all()
return context
class CategoryDetailView(DetailView):
model = Category
template_name = "nablashop/category_detail.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["categories"] = Category.objects.all()
context["products"] = self.object.product_set.order_by("-pub_date")
return context
@login_required
def add_to_cart(request, slug):
product = get_object_or_404(Product, slug=slug)
order_product, created = OrderProduct.objects.get_or_create(
product=product, user=request.user, ordered=False
)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
if order.products.filter(product__slug=product.slug).exists():
order_product.quantity += 1
order_product.save()
messages.info(request, "Antall varer ble oppdatert.")
return redirect("nablashop:order-summary")
else:
order.products.add(order_product)
messages.info(request, "Varen ble lagt til i handlevognen.")
return redirect("nablashop:order-summary")
else:
ordered_date = timezone.now()
order = Order.objects.create(user=request.user, ordered_date=ordered_date)
order.products.add(order_product)
messages.info(request, "Varen ble lagt til i handlevognen.")
return redirect("nablashop:order-summary")
@login_required
def remove_from_cart(request, slug):
product = get_object_or_404(Product, slug=slug)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
if order.products.filter(product__slug=product.slug).exists():
order_product = OrderProduct.objects.filter(
product=product, user=request.user, ordered=False
)[0]
order.products.remove(order_product)
messages.info(request, "Varen ble fjernet fra handlevognen")
return redirect("nablashop:order-summary")
else:
messages.info(request, "Varen ble ikke funnet i handlevognen.")
return redirect("nablashop:product_detail", slug=slug)
else:
messages.info(request, "Du har ingen aktiv ordere.")
return redirect("nablashop:product_detail", slug=slug)
class OrderSummaryView(LoginRequiredMixin, View):
def get(self, *args, **kwargs):
try:
order = Order.objects.get(user=self.request.user, ordered=False)
context = {"object": order}
return render(self.request, "order_summary.html", context)
except ObjectDoesNotExist:
messages.error(self.request, "Du har ingen aktiv ordre")
return redirect("/")
@login_required
def remove_single_product_from_cart(request, slug):
product = get_object_or_404(Product, slug=slug)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
if order.products.filter(product__slug=product.slug).exists():
order_product = OrderProduct.objects.filter(
product=product, user=request.user, ordered=False
)[0]
if order_product.quantity > 1:
order_product.quantity -= 1
order_product.save()
else:
order.products.remove(order_product)
messages.info(request, "Antall varer ble oppdatert.")
return redirect("nablashop:order-summary")
else:
messages.info(request, "Varen ble ikke funnet i handlevognen.")
return redirect("nablashop:product_detail", slug=slug)
else:
messages.info(request, "Du har ingen aktiv ordere.")
return redirect("nablashop:product_detail", slug=slug)
class CheckoutView(TemplateView):
template_name = "nablashop/purchase.html"
def post(self, request, *args, **kwargs):
purchase_form = PurchaseForm(request.POST)
if purchase_form.is_valid():
user = NablaUser.objects.get_from_rfid(
purchase_form.cleaned_data["user_card_key"]
)
account = Account.objects.get_or_create(user=user)[0]
order = Order.objects.get(user=user)
# Should this rather be in clean form?
if account.balance < order.get_total():
messages.error(
request,
"Ikke nok Nabla-Coin på konto. Kunne ikke gjennomføre handel.",
)
return HttpResponseRedirect("/shop/")
account.balance -= order.get_total()
products_list = order.products
for item in products_list.all():
if item.product.stock < item.quantity:
messages.error(
request,
f"Ikke nok {item.product} på lager. Kunne ikke gjennomføre handel.",
)
return HttpResponseRedirect("/shop/")
item.product.stock -= item.quantity
Product(
name=item.product.name,
description_short=item.product.description_short,
description=item.product.description,
pub_date=item.product.pub_date,
photo=item.product.photo,
price=item.product.price,
stock=item.product.stock,
category=item.product.category,
slug=item.product.slug,
).save()
item.product.delete()
Transaction(
description=f"{order.get_total()} Nabla-Coin ble trukket fra {account.user.username}'s konto.",
amount=0,
account=account,
date=datetime.now(),
).save()
account.save()
messages.success(
request, f"Gjennomført! Nabla-Coin på konto {user}: {account.balance}"
)
return HttpResponseRedirect("/shop/")
context = {"form": purchase_form}
return render(request, self.template_name, context)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["form"] = PurchaseForm()
context["last_transactions"] = Transaction.objects.filter(
amount__lt=0
).order_by("-date")[:3]
return context
class PurchaseForm(forms.Form):
# product = forms.ChoiceField(widget=forms.RadioSelect)
user_card_key = forms.IntegerField(
label="Kortnummer",
widget=forms.TextInput(attrs={"placeholder": "Scan kort", "autofocus": "true"}),
)
# todo valid product
def clean_user_card_key(self):
data = self.cleaned_data["user_card_key"]
# Check that there is an account with the given card key
if not NablaUser.objects.get_from_rfid(data):
raise ValidationError(
"Det er ingen registrerte kontoer med den kortnøkkelen,\
brukeren har kanskje ikke registrert NTNU-kortet sitt."
)
return data
| 37.977876 | 111 | 0.633345 | from datetime import datetime
from django import forms
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect, render
from django.utils import timezone
from django.views.generic import DetailView, ListView, TemplateView, View
from nablapps.accounts.models import NablaUser
from nablapps.officeBeer.models import Account
from nablapps.officeBeer.views import Transaction
from .models import Category, Order, OrderProduct, Product
class IndexView(ListView):
queryset = Product.objects.order_by("-pub_date")
template_name = "nablashop/index.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["categories"] = Category.objects.all()
return context
class ProductDetailView(DetailView):
model = Product
template_name = "nablashop/product_detail.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["categories"] = Category.objects.all()
return context
class CategoryDetailView(DetailView):
model = Category
template_name = "nablashop/category_detail.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["categories"] = Category.objects.all()
context["products"] = self.object.product_set.order_by("-pub_date")
return context
@login_required
def add_to_cart(request, slug):
product = get_object_or_404(Product, slug=slug)
order_product, created = OrderProduct.objects.get_or_create(
product=product, user=request.user, ordered=False
)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
if order.products.filter(product__slug=product.slug).exists():
order_product.quantity += 1
order_product.save()
messages.info(request, "Antall varer ble oppdatert.")
return redirect("nablashop:order-summary")
else:
order.products.add(order_product)
messages.info(request, "Varen ble lagt til i handlevognen.")
return redirect("nablashop:order-summary")
else:
ordered_date = timezone.now()
order = Order.objects.create(user=request.user, ordered_date=ordered_date)
order.products.add(order_product)
messages.info(request, "Varen ble lagt til i handlevognen.")
return redirect("nablashop:order-summary")
@login_required
def remove_from_cart(request, slug):
product = get_object_or_404(Product, slug=slug)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
if order.products.filter(product__slug=product.slug).exists():
order_product = OrderProduct.objects.filter(
product=product, user=request.user, ordered=False
)[0]
order.products.remove(order_product)
messages.info(request, "Varen ble fjernet fra handlevognen")
return redirect("nablashop:order-summary")
else:
messages.info(request, "Varen ble ikke funnet i handlevognen.")
return redirect("nablashop:product_detail", slug=slug)
else:
messages.info(request, "Du har ingen aktiv ordere.")
return redirect("nablashop:product_detail", slug=slug)
class OrderSummaryView(LoginRequiredMixin, View):
def get(self, *args, **kwargs):
try:
order = Order.objects.get(user=self.request.user, ordered=False)
context = {"object": order}
return render(self.request, "order_summary.html", context)
except ObjectDoesNotExist:
messages.error(self.request, "Du har ingen aktiv ordre")
return redirect("/")
@login_required
def remove_single_product_from_cart(request, slug):
product = get_object_or_404(Product, slug=slug)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
if order.products.filter(product__slug=product.slug).exists():
order_product = OrderProduct.objects.filter(
product=product, user=request.user, ordered=False
)[0]
if order_product.quantity > 1:
order_product.quantity -= 1
order_product.save()
else:
order.products.remove(order_product)
messages.info(request, "Antall varer ble oppdatert.")
return redirect("nablashop:order-summary")
else:
messages.info(request, "Varen ble ikke funnet i handlevognen.")
return redirect("nablashop:product_detail", slug=slug)
else:
messages.info(request, "Du har ingen aktiv ordere.")
return redirect("nablashop:product_detail", slug=slug)
class CheckoutView(TemplateView):
template_name = "nablashop/purchase.html"
def post(self, request, *args, **kwargs):
purchase_form = PurchaseForm(request.POST)
if purchase_form.is_valid():
user = NablaUser.objects.get_from_rfid(
purchase_form.cleaned_data["user_card_key"]
)
account = Account.objects.get_or_create(user=user)[0]
order = Order.objects.get(user=user)
if account.balance < order.get_total():
messages.error(
request,
"Ikke nok Nabla-Coin på konto. Kunne ikke gjennomføre handel.",
)
return HttpResponseRedirect("/shop/")
account.balance -= order.get_total()
products_list = order.products
for item in products_list.all():
if item.product.stock < item.quantity:
messages.error(
request,
f"Ikke nok {item.product} på lager. Kunne ikke gjennomføre handel.",
)
return HttpResponseRedirect("/shop/")
item.product.stock -= item.quantity
Product(
name=item.product.name,
description_short=item.product.description_short,
description=item.product.description,
pub_date=item.product.pub_date,
photo=item.product.photo,
price=item.product.price,
stock=item.product.stock,
category=item.product.category,
slug=item.product.slug,
).save()
item.product.delete()
Transaction(
description=f"{order.get_total()} Nabla-Coin ble trukket fra {account.user.username}'s konto.",
amount=0,
account=account,
date=datetime.now(),
).save()
account.save()
messages.success(
request, f"Gjennomført! Nabla-Coin på konto {user}: {account.balance}"
)
return HttpResponseRedirect("/shop/")
context = {"form": purchase_form}
return render(request, self.template_name, context)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["form"] = PurchaseForm()
context["last_transactions"] = Transaction.objects.filter(
amount__lt=0
).order_by("-date")[:3]
return context
class PurchaseForm(forms.Form):
# product = forms.ChoiceField(widget=forms.RadioSelect)
user_card_key = forms.IntegerField(
label="Kortnummer",
widget=forms.TextInput(attrs={"placeholder": "Scan kort", "autofocus": "true"}),
)
# todo valid product
def clean_user_card_key(self):
data = self.cleaned_data["user_card_key"]
# Check that there is an account with the given card key
if not NablaUser.objects.get_from_rfid(data):
raise ValidationError(
"Det er ingen registrerte kontoer med den kortnøkkelen,\
brukeren har kanskje ikke registrert NTNU-kortet sitt."
)
return data
| true | true |
f73a27c9847522d5f8fd8651a5bc8aab84329317 | 3,810 | py | Python | util/im_processing.py | ronghanghu/cmn | 85644ad56f8f62d04a5e8636ad3efe9ef7b34705 | [
"MIT"
] | 72 | 2017-04-12T17:07:36.000Z | 2021-06-18T08:20:47.000Z | util/im_processing.py | ronghanghu/cmn | 85644ad56f8f62d04a5e8636ad3efe9ef7b34705 | [
"MIT"
] | 8 | 2017-07-06T04:24:04.000Z | 2020-09-17T10:29:44.000Z | util/im_processing.py | ronghanghu/cmn | 85644ad56f8f62d04a5e8636ad3efe9ef7b34705 | [
"MIT"
] | 21 | 2017-04-19T07:38:09.000Z | 2021-02-28T13:39:22.000Z | from __future__ import absolute_import, division, print_function
import skimage.transform
import numpy as np
def rectify_bboxes(bboxes, height, width):
bboxes = np.maximum(bboxes, 0)
bboxes[:, 2:4] = np.maximum(bboxes[:, 0:2], bboxes[:, 2:4])
bboxes[:, 0] = np.minimum(bboxes[:, 0], width-1)
bboxes[:, 1] = np.minimum(bboxes[:, 1], height-1)
bboxes[:, 2] = np.minimum(bboxes[:, 2], width-1)
bboxes[:, 3] = np.minimum(bboxes[:, 3], height-1)
return bboxes
def resize_and_pad(im, input_h, input_w):
# Resize and pad im to input_h x input_w size
im_h, im_w = im.shape[:2]
scale = min(input_h / im_h, input_w / im_w)
resized_h = int(np.round(im_h * scale))
resized_w = int(np.round(im_w * scale))
pad_h = int(np.floor(input_h - resized_h) / 2)
pad_w = int(np.floor(input_w - resized_w) / 2)
resized_im = skimage.transform.resize(im, [resized_h, resized_w])
if im.ndim > 2:
new_im = np.zeros((input_h, input_w, im.shape[2]), dtype=resized_im.dtype)
else:
new_im = np.zeros((input_h, input_w), dtype=resized_im.dtype)
new_im[pad_h:pad_h+resized_h, pad_w:pad_w+resized_w, ...] = resized_im
return new_im
def resize_and_crop(im, input_h, input_w):
# Resize and crop im to input_h x input_w size
im_h, im_w = im.shape[:2]
scale = max(input_h / im_h, input_w / im_w)
resized_h = int(np.round(im_h * scale))
resized_w = int(np.round(im_w * scale))
crop_h = int(np.floor(resized_h - input_h) / 2)
crop_w = int(np.floor(resized_w - input_w) / 2)
resized_im = skimage.transform.resize(im, [resized_h, resized_w])
if im.ndim > 2:
new_im = np.zeros((input_h, input_w, im.shape[2]), dtype=resized_im.dtype)
else:
new_im = np.zeros((input_h, input_w), dtype=resized_im.dtype)
new_im[...] = resized_im[crop_h:crop_h+input_h, crop_w:crop_w+input_w, ...]
return new_im
def crop_bboxes_subtract_mean(im, bboxes, crop_size, image_mean):
if isinstance(bboxes, list):
bboxes = np.array(bboxes)
bboxes = bboxes.reshape((-1, 4))
im = skimage.img_as_ubyte(im)
num_bbox = bboxes.shape[0]
imcrop_batch = np.zeros((num_bbox, crop_size, crop_size, 3), dtype=np.float32)
for n_bbox in range(bboxes.shape[0]):
xmin, ymin, xmax, ymax = bboxes[n_bbox]
# crop and resize
imcrop = im[ymin:ymax+1, xmin:xmax+1, :]
imcrop_batch[n_bbox, ...] = skimage.img_as_ubyte(
skimage.transform.resize(imcrop, [crop_size, crop_size]))
imcrop_batch -= image_mean
return imcrop_batch
def bboxes_from_masks(masks):
if masks.ndim == 2:
masks = masks[np.newaxis, ...]
num_mask = masks.shape[0]
bboxes = np.zeros((num_mask, 4), dtype=np.int32)
for n_mask in range(num_mask):
idx = np.nonzero(masks[n_mask])
xmin, xmax = np.min(idx[1]), np.max(idx[1])
ymin, ymax = np.min(idx[0]), np.max(idx[0])
bboxes[n_mask, :] = [xmin, ymin, xmax, ymax]
return bboxes
def crop_masks_subtract_mean(im, masks, crop_size, image_mean):
if masks.ndim == 2:
masks = masks[np.newaxis, ...]
num_mask = masks.shape[0]
im = skimage.img_as_ubyte(im)
bboxes = bboxes_from_masks(masks)
imcrop_batch = np.zeros((num_mask, crop_size, crop_size, 3), dtype=np.float32)
for n_mask in range(num_mask):
xmin, ymin, xmax, ymax = bboxes[n_mask]
# crop and resize
im_masked = im.copy()
mask = masks[n_mask, ..., np.newaxis]
im_masked *= mask
im_masked += image_mean.astype(np.uint8) * (1 - mask)
imcrop = im_masked[ymin:ymax+1, xmin:xmax+1, :]
imcrop_batch[n_mask, ...] = skimage.img_as_ubyte(skimage.transform.resize(imcrop, [224, 224]))
imcrop_batch -= image_mean
return imcrop_batch
| 37.722772 | 102 | 0.642782 | from __future__ import absolute_import, division, print_function
import skimage.transform
import numpy as np
def rectify_bboxes(bboxes, height, width):
bboxes = np.maximum(bboxes, 0)
bboxes[:, 2:4] = np.maximum(bboxes[:, 0:2], bboxes[:, 2:4])
bboxes[:, 0] = np.minimum(bboxes[:, 0], width-1)
bboxes[:, 1] = np.minimum(bboxes[:, 1], height-1)
bboxes[:, 2] = np.minimum(bboxes[:, 2], width-1)
bboxes[:, 3] = np.minimum(bboxes[:, 3], height-1)
return bboxes
def resize_and_pad(im, input_h, input_w):
im_h, im_w = im.shape[:2]
scale = min(input_h / im_h, input_w / im_w)
resized_h = int(np.round(im_h * scale))
resized_w = int(np.round(im_w * scale))
pad_h = int(np.floor(input_h - resized_h) / 2)
pad_w = int(np.floor(input_w - resized_w) / 2)
resized_im = skimage.transform.resize(im, [resized_h, resized_w])
if im.ndim > 2:
new_im = np.zeros((input_h, input_w, im.shape[2]), dtype=resized_im.dtype)
else:
new_im = np.zeros((input_h, input_w), dtype=resized_im.dtype)
new_im[pad_h:pad_h+resized_h, pad_w:pad_w+resized_w, ...] = resized_im
return new_im
def resize_and_crop(im, input_h, input_w):
im_h, im_w = im.shape[:2]
scale = max(input_h / im_h, input_w / im_w)
resized_h = int(np.round(im_h * scale))
resized_w = int(np.round(im_w * scale))
crop_h = int(np.floor(resized_h - input_h) / 2)
crop_w = int(np.floor(resized_w - input_w) / 2)
resized_im = skimage.transform.resize(im, [resized_h, resized_w])
if im.ndim > 2:
new_im = np.zeros((input_h, input_w, im.shape[2]), dtype=resized_im.dtype)
else:
new_im = np.zeros((input_h, input_w), dtype=resized_im.dtype)
new_im[...] = resized_im[crop_h:crop_h+input_h, crop_w:crop_w+input_w, ...]
return new_im
def crop_bboxes_subtract_mean(im, bboxes, crop_size, image_mean):
if isinstance(bboxes, list):
bboxes = np.array(bboxes)
bboxes = bboxes.reshape((-1, 4))
im = skimage.img_as_ubyte(im)
num_bbox = bboxes.shape[0]
imcrop_batch = np.zeros((num_bbox, crop_size, crop_size, 3), dtype=np.float32)
for n_bbox in range(bboxes.shape[0]):
xmin, ymin, xmax, ymax = bboxes[n_bbox]
imcrop = im[ymin:ymax+1, xmin:xmax+1, :]
imcrop_batch[n_bbox, ...] = skimage.img_as_ubyte(
skimage.transform.resize(imcrop, [crop_size, crop_size]))
imcrop_batch -= image_mean
return imcrop_batch
def bboxes_from_masks(masks):
if masks.ndim == 2:
masks = masks[np.newaxis, ...]
num_mask = masks.shape[0]
bboxes = np.zeros((num_mask, 4), dtype=np.int32)
for n_mask in range(num_mask):
idx = np.nonzero(masks[n_mask])
xmin, xmax = np.min(idx[1]), np.max(idx[1])
ymin, ymax = np.min(idx[0]), np.max(idx[0])
bboxes[n_mask, :] = [xmin, ymin, xmax, ymax]
return bboxes
def crop_masks_subtract_mean(im, masks, crop_size, image_mean):
if masks.ndim == 2:
masks = masks[np.newaxis, ...]
num_mask = masks.shape[0]
im = skimage.img_as_ubyte(im)
bboxes = bboxes_from_masks(masks)
imcrop_batch = np.zeros((num_mask, crop_size, crop_size, 3), dtype=np.float32)
for n_mask in range(num_mask):
xmin, ymin, xmax, ymax = bboxes[n_mask]
im_masked = im.copy()
mask = masks[n_mask, ..., np.newaxis]
im_masked *= mask
im_masked += image_mean.astype(np.uint8) * (1 - mask)
imcrop = im_masked[ymin:ymax+1, xmin:xmax+1, :]
imcrop_batch[n_mask, ...] = skimage.img_as_ubyte(skimage.transform.resize(imcrop, [224, 224]))
imcrop_batch -= image_mean
return imcrop_batch
| true | true |
f73a28171f08fbe4fdec729da8d06cf7f77356a9 | 7,392 | py | Python | src/generated-spec/iam.py | wheerd/cloudformation-to-terraform | 5411b33293e1f7d7673bb5d4cb52ff0537240db3 | [
"MIT"
] | null | null | null | src/generated-spec/iam.py | wheerd/cloudformation-to-terraform | 5411b33293e1f7d7673bb5d4cb52ff0537240db3 | [
"MIT"
] | null | null | null | src/generated-spec/iam.py | wheerd/cloudformation-to-terraform | 5411b33293e1f7d7673bb5d4cb52ff0537240db3 | [
"MIT"
] | null | null | null | from . import *
class AWS_IAM_Role_Policy(CloudFormationProperty):
def write(self, w):
with w.block("policy"):
self.property(w, "PolicyDocument", "policy_document", JsonValueConverter())
self.property(w, "PolicyName", "policy_name", StringValueConverter())
class AWS_IAM_Group_Policy(CloudFormationProperty):
def write(self, w):
with w.block("policy"):
self.property(w, "PolicyDocument", "policy_document", JsonValueConverter())
self.property(w, "PolicyName", "policy_name", StringValueConverter())
class AWS_IAM_User_LoginProfile(CloudFormationProperty):
def write(self, w):
with w.block("login_profile"):
self.property(w, "Password", "password", StringValueConverter())
self.property(w, "PasswordResetRequired", "password_reset_required", BasicValueConverter())
class AWS_IAM_User_Policy(CloudFormationProperty):
def write(self, w):
with w.block("policy"):
self.property(w, "PolicyDocument", "policy_document", JsonValueConverter())
self.property(w, "PolicyName", "policy_name", StringValueConverter())
class AWS_IAM_Group(CloudFormationResource):
cfn_type = "AWS::IAM::Group"
tf_type = "aws_iam_group"
ref = "id"
attrs = {
"Arn": "arn",
# Additional TF attributes: unique_id
}
def write(self, w):
with self.resource_block(w):
self.property(w, "GroupName", "name", StringValueConverter())
self.property(w, "ManagedPolicyArns", "arn", ListValueConverter(StringValueConverter()))
self.property(w, "Path", "path", StringValueConverter())
self.repeated_block(w, "Policies", AWS_IAM_Group_Policy) # TODO: Probably not the correct mapping
class AWS_IAM_Policy(CloudFormationResource):
cfn_type = "AWS::IAM::Policy"
tf_type = "aws_iam_policy_attachment"
ref = "id"
attrs = {}
def write(self, w):
with self.resource_block(w):
self.property(w, "Groups", "groups", ListValueConverter(StringValueConverter()))
self.property(w, "PolicyDocument", "policy_document", JsonValueConverter()) # TODO: Probably not the correct mapping
self.property(w, "PolicyName", "name", StringValueConverter())
self.property(w, "Roles", "roles", ListValueConverter(StringValueConverter()))
self.property(w, "Users", "users", ListValueConverter(StringValueConverter()))
class AWS_IAM_ServiceLinkedRole(CloudFormationResource):
cfn_type = "AWS::IAM::ServiceLinkedRole"
tf_type = "aws_iam_service_linked_role"
ref = "id"
attrs = {} # Additional TF attributes: arn, create_date, name, path, unique_id
def write(self, w):
with self.resource_block(w):
self.property(w, "CustomSuffix", "custom_suffix", StringValueConverter())
self.property(w, "Description", "description", StringValueConverter())
self.property(w, "AWSServiceName", "aws_service_name", StringValueConverter())
class AWS_IAM_AccessKey(CloudFormationResource):
cfn_type = "AWS::IAM::AccessKey"
tf_type = "aws_iam_access_key"
ref = "id"
attrs = {
"SecretAccessKey": "secret",
# Additional TF attributes: encrypted_secret, key_fingerprint, ses_smtp_password, ses_smtp_password_v4, status
}
def write(self, w):
with self.resource_block(w):
self.property(w, "Serial", "serial", BasicValueConverter()) # TODO: Probably not the correct mapping
self.property(w, "Status", "status", StringValueConverter())
self.property(w, "UserName", "user", StringValueConverter())
class AWS_IAM_User(CloudFormationResource):
cfn_type = "AWS::IAM::User"
tf_type = "aws_iam_user_group_membership"
ref = "id"
attrs = {
"Arn": "arn", # TODO: Probably not the correct mapping
}
def write(self, w):
with self.resource_block(w):
self.property(w, "Groups", "groups", ListValueConverter(StringValueConverter()))
self.block(w, "LoginProfile", AWS_IAM_User_LoginProfile) # TODO: Probably not the correct mapping
self.property(w, "ManagedPolicyArns", "managed_policy_arns", ListValueConverter(StringValueConverter())) # TODO: Probably not the correct mapping
self.property(w, "Path", "path", StringValueConverter()) # TODO: Probably not the correct mapping
self.property(w, "PermissionsBoundary", "permissions_boundary", StringValueConverter()) # TODO: Probably not the correct mapping
self.repeated_block(w, "Policies", AWS_IAM_User_Policy) # TODO: Probably not the correct mapping
self.property(w, "Tags", "tags", ListValueConverter(ResourceTag())) # TODO: Probably not the correct mapping
self.property(w, "UserName", "user", StringValueConverter())
class AWS_IAM_Role(CloudFormationResource):
cfn_type = "AWS::IAM::Role"
tf_type = "aws_iam_role"
ref = "id"
attrs = {
"Arn": "arn",
"RoleId": "id",
# Additional TF attributes: create_date, name, unique_id
}
def write(self, w):
with self.resource_block(w):
self.property(w, "AssumeRolePolicyDocument", "assume_role_policy", JsonValueConverter())
self.property(w, "Description", "description", StringValueConverter())
self.property(w, "ManagedPolicyArns", "arn", ListValueConverter(StringValueConverter()))
self.property(w, "MaxSessionDuration", "max_session_duration", BasicValueConverter())
self.property(w, "Path", "path", StringValueConverter())
self.property(w, "PermissionsBoundary", "permissions_boundary", StringValueConverter())
self.repeated_block(w, "Policies", AWS_IAM_Role_Policy)
self.property(w, "RoleName", "name", StringValueConverter())
self.property(w, "Tags", "tags", ListValueConverter(ResourceTag()))
class AWS_IAM_UserToGroupAddition(CloudFormationResource):
cfn_type = "AWS::IAM::UserToGroupAddition"
tf_type = "aws_iam_user"
ref = "id"
attrs = {} # Additional TF attributes: arn, unique_id
def write(self, w):
with self.resource_block(w):
self.property(w, "GroupName", "name", StringValueConverter())
self.property(w, "Users", "users", ListValueConverter(StringValueConverter())) # TODO: Probably not the correct mapping
class AWS_IAM_InstanceProfile(CloudFormationResource):
cfn_type = "AWS::IAM::InstanceProfile"
tf_type = "aws_iam_instance_profile"
ref = "id"
attrs = {
"Arn": "arn",
# Additional TF attributes: create_date, name, role, roles, unique_id
}
def write(self, w):
with self.resource_block(w):
self.property(w, "InstanceProfileName", "name", StringValueConverter())
self.property(w, "Path", "path", StringValueConverter())
self.property(w, "Roles", "roles", ListValueConverter(StringValueConverter()))
class AWS_IAM_ManagedPolicy(CloudFormationResource):
cfn_type = "AWS::IAM::ManagedPolicy"
tf_type = "aws_iam_managed_policy" # TODO: Most likely not working
ref = "arn"
attrs = {}
def write(self, w):
with self.resource_block(w):
self.property(w, "Description", "description", StringValueConverter())
self.property(w, "Groups", "groups", ListValueConverter(StringValueConverter()))
self.property(w, "ManagedPolicyName", "managed_policy_name", StringValueConverter())
self.property(w, "Path", "path", StringValueConverter())
self.property(w, "PolicyDocument", "policy_document", JsonValueConverter())
self.property(w, "Roles", "roles", ListValueConverter(StringValueConverter()))
self.property(w, "Users", "users", ListValueConverter(StringValueConverter()))
| 41.066667 | 151 | 0.713745 | from . import *
class AWS_IAM_Role_Policy(CloudFormationProperty):
def write(self, w):
with w.block("policy"):
self.property(w, "PolicyDocument", "policy_document", JsonValueConverter())
self.property(w, "PolicyName", "policy_name", StringValueConverter())
class AWS_IAM_Group_Policy(CloudFormationProperty):
def write(self, w):
with w.block("policy"):
self.property(w, "PolicyDocument", "policy_document", JsonValueConverter())
self.property(w, "PolicyName", "policy_name", StringValueConverter())
class AWS_IAM_User_LoginProfile(CloudFormationProperty):
def write(self, w):
with w.block("login_profile"):
self.property(w, "Password", "password", StringValueConverter())
self.property(w, "PasswordResetRequired", "password_reset_required", BasicValueConverter())
class AWS_IAM_User_Policy(CloudFormationProperty):
def write(self, w):
with w.block("policy"):
self.property(w, "PolicyDocument", "policy_document", JsonValueConverter())
self.property(w, "PolicyName", "policy_name", StringValueConverter())
class AWS_IAM_Group(CloudFormationResource):
cfn_type = "AWS::IAM::Group"
tf_type = "aws_iam_group"
ref = "id"
attrs = {
"Arn": "arn",
}
def write(self, w):
with self.resource_block(w):
self.property(w, "GroupName", "name", StringValueConverter())
self.property(w, "ManagedPolicyArns", "arn", ListValueConverter(StringValueConverter()))
self.property(w, "Path", "path", StringValueConverter())
self.repeated_block(w, "Policies", AWS_IAM_Group_Policy)
class AWS_IAM_Policy(CloudFormationResource):
cfn_type = "AWS::IAM::Policy"
tf_type = "aws_iam_policy_attachment"
ref = "id"
attrs = {}
def write(self, w):
with self.resource_block(w):
self.property(w, "Groups", "groups", ListValueConverter(StringValueConverter()))
self.property(w, "PolicyDocument", "policy_document", JsonValueConverter())
self.property(w, "PolicyName", "name", StringValueConverter())
self.property(w, "Roles", "roles", ListValueConverter(StringValueConverter()))
self.property(w, "Users", "users", ListValueConverter(StringValueConverter()))
class AWS_IAM_ServiceLinkedRole(CloudFormationResource):
cfn_type = "AWS::IAM::ServiceLinkedRole"
tf_type = "aws_iam_service_linked_role"
ref = "id"
attrs = {}
def write(self, w):
with self.resource_block(w):
self.property(w, "CustomSuffix", "custom_suffix", StringValueConverter())
self.property(w, "Description", "description", StringValueConverter())
self.property(w, "AWSServiceName", "aws_service_name", StringValueConverter())
class AWS_IAM_AccessKey(CloudFormationResource):
cfn_type = "AWS::IAM::AccessKey"
tf_type = "aws_iam_access_key"
ref = "id"
attrs = {
"SecretAccessKey": "secret",
}
def write(self, w):
with self.resource_block(w):
self.property(w, "Serial", "serial", BasicValueConverter())
self.property(w, "Status", "status", StringValueConverter())
self.property(w, "UserName", "user", StringValueConverter())
class AWS_IAM_User(CloudFormationResource):
cfn_type = "AWS::IAM::User"
tf_type = "aws_iam_user_group_membership"
ref = "id"
attrs = {
"Arn": "arn",
}
def write(self, w):
with self.resource_block(w):
self.property(w, "Groups", "groups", ListValueConverter(StringValueConverter()))
self.block(w, "LoginProfile", AWS_IAM_User_LoginProfile)
self.property(w, "ManagedPolicyArns", "managed_policy_arns", ListValueConverter(StringValueConverter()))
self.property(w, "Path", "path", StringValueConverter())
self.property(w, "PermissionsBoundary", "permissions_boundary", StringValueConverter())
self.repeated_block(w, "Policies", AWS_IAM_User_Policy)
self.property(w, "Tags", "tags", ListValueConverter(ResourceTag()))
self.property(w, "UserName", "user", StringValueConverter())
class AWS_IAM_Role(CloudFormationResource):
cfn_type = "AWS::IAM::Role"
tf_type = "aws_iam_role"
ref = "id"
attrs = {
"Arn": "arn",
"RoleId": "id",
}
def write(self, w):
with self.resource_block(w):
self.property(w, "AssumeRolePolicyDocument", "assume_role_policy", JsonValueConverter())
self.property(w, "Description", "description", StringValueConverter())
self.property(w, "ManagedPolicyArns", "arn", ListValueConverter(StringValueConverter()))
self.property(w, "MaxSessionDuration", "max_session_duration", BasicValueConverter())
self.property(w, "Path", "path", StringValueConverter())
self.property(w, "PermissionsBoundary", "permissions_boundary", StringValueConverter())
self.repeated_block(w, "Policies", AWS_IAM_Role_Policy)
self.property(w, "RoleName", "name", StringValueConverter())
self.property(w, "Tags", "tags", ListValueConverter(ResourceTag()))
class AWS_IAM_UserToGroupAddition(CloudFormationResource):
cfn_type = "AWS::IAM::UserToGroupAddition"
tf_type = "aws_iam_user"
ref = "id"
attrs = {}
def write(self, w):
with self.resource_block(w):
self.property(w, "GroupName", "name", StringValueConverter())
self.property(w, "Users", "users", ListValueConverter(StringValueConverter()))
class AWS_IAM_InstanceProfile(CloudFormationResource):
cfn_type = "AWS::IAM::InstanceProfile"
tf_type = "aws_iam_instance_profile"
ref = "id"
attrs = {
"Arn": "arn",
}
def write(self, w):
with self.resource_block(w):
self.property(w, "InstanceProfileName", "name", StringValueConverter())
self.property(w, "Path", "path", StringValueConverter())
self.property(w, "Roles", "roles", ListValueConverter(StringValueConverter()))
class AWS_IAM_ManagedPolicy(CloudFormationResource):
cfn_type = "AWS::IAM::ManagedPolicy"
tf_type = "aws_iam_managed_policy"
ref = "arn"
attrs = {}
def write(self, w):
with self.resource_block(w):
self.property(w, "Description", "description", StringValueConverter())
self.property(w, "Groups", "groups", ListValueConverter(StringValueConverter()))
self.property(w, "ManagedPolicyName", "managed_policy_name", StringValueConverter())
self.property(w, "Path", "path", StringValueConverter())
self.property(w, "PolicyDocument", "policy_document", JsonValueConverter())
self.property(w, "Roles", "roles", ListValueConverter(StringValueConverter()))
self.property(w, "Users", "users", ListValueConverter(StringValueConverter()))
| true | true |
f73a29167bde13bfd58d1992fd74947483c2b0de | 401 | py | Python | code/dataset/__init__.py | aarashfeizi/Proxy-Anchor-CVPR2020 | a7b9ed46d9d44841bd6bce78f4fddb95107a022b | [
"MIT"
] | null | null | null | code/dataset/__init__.py | aarashfeizi/Proxy-Anchor-CVPR2020 | a7b9ed46d9d44841bd6bce78f4fddb95107a022b | [
"MIT"
] | null | null | null | code/dataset/__init__.py | aarashfeizi/Proxy-Anchor-CVPR2020 | a7b9ed46d9d44841bd6bce78f4fddb95107a022b | [
"MIT"
] | null | null | null | from .cars import Cars
from .cub import CUBirds
from .SOP import SOP
from .hotels import Hotels
from .import utils
from .base import BaseDataset
_type = {
'cars': Cars,
'cub': CUBirds,
'SOP': SOP,
'hotels': Hotels
}
def load(name, root, mode, transform = None, project_dir=None):
return _type[name](root = root, mode = mode, transform = transform, project_dir=project_dir)
| 21.105263 | 96 | 0.685786 | from .cars import Cars
from .cub import CUBirds
from .SOP import SOP
from .hotels import Hotels
from .import utils
from .base import BaseDataset
_type = {
'cars': Cars,
'cub': CUBirds,
'SOP': SOP,
'hotels': Hotels
}
def load(name, root, mode, transform = None, project_dir=None):
return _type[name](root = root, mode = mode, transform = transform, project_dir=project_dir)
| true | true |
f73a291f4a9842a6bfa0d29eee9e379595558c23 | 18,878 | py | Python | PyFlow/stylesheet.py | pedroCabrera/PyFlow | 8b439d9b47fff450e91c09d40c7b286e88cb624f | [
"MIT"
] | 7 | 2018-06-24T15:55:00.000Z | 2021-07-13T08:11:25.000Z | PyFlow/stylesheet.py | pedroCabrera/PyFlow | 8b439d9b47fff450e91c09d40c7b286e88cb624f | [
"MIT"
] | 32 | 2019-02-18T20:47:46.000Z | 2019-05-30T12:51:10.000Z | PyFlow/stylesheet.py | pedroCabrera/PyFlow | 8b439d9b47fff450e91c09d40c7b286e88cb624f | [
"MIT"
] | 5 | 2019-02-19T23:26:21.000Z | 2020-12-23T00:32:59.000Z | from Qt import QtGui
import inspect
from Core.Settings import Colors
def clamp(val,min_value,max_value):
return max(min(val, max_value), min_value)
class editableStyleSheet():
def __init__(self):
self.MainColor = Colors.Orange
self.MainColor_Lighter = Colors.OrangeLighter
self.MainColor_Lighter_2 = Colors.OrangeLighter2
self.MainColor_Darker = Colors.OrangeDarker
self.BG_COLOR = Colors.Black
self.BLACK = Colors.AbsoluteBlack
self.GREY = Colors.Grey
self.GreyGrad1 = Colors.Grey1
self.GreyGrad2 = Colors.Grey2
self.GreyGrad3 = Colors.Grey3
self.TEXT_COLOR = QtGui.QColor(177, 177, 177)
self.BORDER_COLOR = Colors.SceneBackground
self.SHADOW_COLOR = Colors.Shadow
self.storeDeffaults()
def storeDeffaults(self):
for name,obj in inspect.getmembers(self):
if isinstance(obj,QtGui.QColor):
obj.default = obj.name()
def setHue(self,hue):
for name,obj in inspect.getmembers(self):
if isinstance(obj,QtGui.QColor) and name in ["MainColor","MainColor_Lighter","MainColor_Lighter_2","MainColor_Darker"]:
c = QtGui.QColor(obj.default)
h,s,l,a = c.getHslF()
obj.setHslF((h+hue)%1, s, l, a)
def setLightness(self,light):
for name,obj in inspect.getmembers(self):
if isinstance(obj,QtGui.QColor) and name in ["MainColor_Lighter","MainColor_Lighter_2","MainColor_Darker"]:
c = QtGui.QColor(self.MainColor.default)
h0,s0,l0,a0 = c.getHslF()
c = QtGui.QColor(obj.default)
h1,s1,l1,a1 = c.getHslF()
h,s,l,a = obj.getHslF()
obj.setHslF(h, s, clamp(l1-l0+light,0,1), a)
elif isinstance(obj,QtGui.QColor) and name == "MainColor":
h,s,l,a = obj.getHslF()
obj.setHslF(h, s, light, a)
def setBg(self,value):
c = QtGui.QColor(self.BG_COLOR.default)
h0,s0,l0,a0 = c.getHslF()
self.BG_COLOR.setHslF(h0,s0,value,a0)
c = QtGui.QColor(self.TEXT_COLOR.default)
h,s,l,a = c.getHslF()
self.TEXT_COLOR.setHslF(h,s,clamp(1.0-(value+0.25),0,1),a)
for i in [self.GreyGrad1,self.GreyGrad2,self.GreyGrad3]:
c = QtGui.QColor(i.default)
h1,s1,l1,a1 = c.getHslF()
h,s,l,a = i.getHslF()
i.setHslF(h,s,clamp(l1-l0+value,0,1),a)
def getStyleSheet(self):
return """
QToolTip {{ border: 1px solid black;
background-color: {0};
padding: 1px;
border-radius: 3px;
opacity: 100; }}
QWidget {{ color: {7};
background-color: {1};
border-radius: 3px; }}
QWidget:disabled {{ color: {6};
background-color: {1}; }}
QWidget:focus {{ /*border: 2px solid QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 1 {2});*/ }}
QWidget:item:hover {{ background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 1 {3});
color: {4}; }}
QWidget:item:selected {{ background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 1 {2}); }}
QMenuBar::item {{ background: transparent; }}
QMenuBar::item:selected
{{ background: transparent;
border: 1px solid {5}; }}
QMenuBar::item:pressed{{ background: {6};
border: 1px solid {4};
background-color: QLinearGradient( x1:0, y1:0,x2:0, y2:1,stop:0.3 {1},stop:0.1 {0});
margin-bottom:-1px;
padding-bottom:1px; }}
QMenu {{ border: 1px solid {4}; }}
QMenu::item {{ padding: 2px 20px 2px 20px; }}
QMenu::item:selected {{ color: {4}; }}
QMenu::separator {{ height: 2px;
background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:0 #161616, stop: 0.5 {9}, stop: 0.6 {8}, stop:1 #343434);
color: white;
padding-left: 4px;
margin-left: 10px;
margin-right: 5px; }}
QAbstractItemView {{ background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {10}, stop: 0.1 {11}, stop: 1 {12}); }}
QLineEdit {{ background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {11}, stop: 1 {12});
padding: 1px;
border-style: solid;
border: 1px solid {8};
border-radius: 5; }}
QToolButton:menu-button{{
color: none;
background-color: none;
border-style: none;
padding-top: 20px;
padding-right: 3px;
}}
QToolButton:menu-arrow:open {{
top: 1px; left: 1px; /* shift it a bit */
}}
QPushButton,QToolButton {{ color: {7};
background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {10}, stop: 1 {11});
border-width: 1px;
border-color: {8};
border-style: solid;
border-radius: 6;
font-size: 12px;
padding: 3px;
padding-left: 5px; padding-right: 5px; }}
QPushButton:pressed,QToolButton::pressed {{ background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 1 {2}); }}
QComboBox {{ selection-background-color: {5};
background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {10}, stop: 1 {11});
border-style: solid;
border: 1px solid {8};
border-radius: 5; }}
QPushButton:checked{{
background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {10}, stop: 1 {12});
border: 2px solid QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 1 {2});
}}
QComboBox:hover,QPushButton:hover,QSpinBox:hover,QDoubleSpinBox:hover,QToolButton::hover
{{ border: 2px solid QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 1 {2}); }}
QComboBox:on {{ padding-top: 3px;
padding-left: 4px;
background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {10}, stop:0.3 {1} , stop: 1 {11} );
selection-background-color: {5}; }}
QComboBox QAbstractItemView
{{ border: 2px solid darkgray;
selection-background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 1 {2}); }}
QComboBox::drop-down {{ subcontrol-origin: padding;
subcontrol-position: top right;
width: 15px;
border-left-width: 0px;
border-left-color: darkgray;
border-left-style: solid; /* just a single line */
border-top-right-radius: 3px; /* same radius as the QComboBox */
border-bottom-right-radius: 3px; }}
QGroupBox {{ border: 1px solid #9f988f; }}
QScrollBar:horizontal {{ border: 1px solid #222222;
background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {11}, stop: 1 {12});
height: 12px;
margin: 0px 16px 0 16px; }}
QScrollBar::handle:horizontal
{{ background: QLinearGradient( x1: 0, y1: 0, x2: 1, y2: 0, stop: 0 {0}, stop: 0.5 {2}, stop: 1 {0});
min-height: 20px;
border-radius: 2px; }}
QScrollBar::add-line:horizontal
{{ border: 1px solid #1b1b19;
border-radius: 2px;
background: QLinearGradient( x1: 0, y1: 0, x2: 1, y2: 0, stop: 0 {0}, stop: 1 {2});
width: 14px;
subcontrol-position: right;
subcontrol-origin: margin; }}
QScrollBar::sub-line:horizontal
{{ border: 1px solid #1b1b19;
border-radius: 2px;
background: QLinearGradient( x1: 0, y1: 0, x2: 1, y2: 0, stop: 0 {0}, stop: 1 {2});
width: 14px;
subcontrol-position: left;
subcontrol-origin: margin; }}
QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal
{{ background: none; }}
QScrollBar:vertical {{ background: QLinearGradient( x1: 0, y1: 0, x2: 1, y2: 0, stop: 0 {11}, stop: 1 {12});
width: 12px;
margin: 16px 0 16px 0;
border: 1px solid #222222; }}
QScrollBar::handle:vertical
{{ background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 0.5 {2}, stop: 1 {0});
min-height: 20px;
border-radius: 2px; }}
QScrollBar::add-line:vertical
{{ border: 1px solid #1b1b19;
border-radius: 2px;
background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 1 {2});
height: 14px;
subcontrol-position: bottom;
subcontrol-origin: margin; }}
QScrollBar::sub-line:vertical
{{ border: 1px solid #1b1b19;
border-radius: 2px;
background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {2}, stop: 1 {0});
height: 14px;
subcontrol-position: top;
subcontrol-origin: margin; }}
QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical
{{ background: none; }}
QTextEdit {{ background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {10}, stop: 0.1 {11}, stop: 1 {12}); }}
QPlainTextEdit {{ background-color:{1}; }}
QHeaderView::section {{ background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:0 #616161, stop: 0.5 #505050, stop: 0.6 #434343, stop:1 #656565);
background-color: #505050;
color: white;
padding-left: 4px;
border-radius: 2px;
border: 1px solid #6c6c6c; }}
QCheckBox:disabled {{ color: #414141; }}
QCheckBox {{
background-color: transparent; }}
QCheckBox::indicator {{ color: {7};
background-color: {1};
border: 1px solid {7};
width: 13px;
height: 13px; }}
QCheckBox::indicator:disabled, QRadioButton::indicator:disabled
{{ border: 1px solid {6}; }}
QRadioButton::indicator:checked, QRadioButton::indicator:unchecked
{{ color: {7};
background-color: {1};
border: 1px solid {7};
border-radius: 6px; }}
QRadioButton::indicator:checked
{{ background-color: qradialgradient(cx: 0.5, cy: 0.5,fx: 0.5, fy: 0.5, radius: 1.0, stop: 0.25 {5}, stop: 0.3 {1}); }}
QRadioButton::indicator
{{ border-radius: 6px; }}
QRadioButton::indicator:hover, QCheckBox::indicator:hover
{{ border: 1px solid {5}; }}
QDockWidget::title {{ text-align: center;
spacing: 3px; /* spacing between items in the tool bar */
border: 1px solid {9};
background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:0 {1}, stop:1 {1}); }}
QDockWidget::close-button, QDockWidget::float-button
{{ text-align: center;
spacing: 1px; /* spacing between items in the tool bar */
background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:0 {1}, stop:1 {1}); }}
QDockWidget::close-button:hover, QDockWidget::float-button:hover
{{ background: #242424; }}
QDockWidget::close-button:pressed, QDockWidget::float-button:pressed
{{ padding: 1px -1px -1px 1px; }}
QMainWindow::separator{{ background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:0 {6}, stop:1 {6});
color: white;
padding-left: 4px;
border: 1px solid #4c4c4c;
spacing: 3px; /* spacing between items in the tool bar */ }}
QMainWindow::separator:hover
{{ background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:0 {2}, stop:1 {0});
color: white;
padding-left: 4px;
border: 1px solid #6c6c6c;
spacing: 3px; /* spacing between items in the tool bar */ }}
QProgressBar {{ border: 2px solid grey;
border-radius: 5px;
text-align: center; }}
QProgressBar::chunk {{ background-color: {2};
width: 2.15px;
margin: 0.5px; }}
QTabBar::tab {{ color: {7};
border: 1px solid {6};
border-bottom-style: none;
background-color: {1};
padding-left: 10px;
padding-right: 10px;
padding-top: 3px;
padding-bottom: 2px;
margin-right: -1px; }}
QTabBar::tab:last {{ margin-right: 0; /* the last selected tab has nothing to overlap with on the right */
border-top-right-radius: 3px; }}
QTabBar::tab:first:!selected
{{ margin-left: 0px; /* the last selected tab has nothing to overlap with on the right */
border-top-left-radius: 3px; }}
QTabBar::tab:!selected{{ color: {7};
border-bottom-style: solid;
margin-top: 3px;
background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:1 {10}, stop:.4 {12}); }}
QTabBar::tab:selected {{ border-top-left-radius: 3px;
border-top-right-radius: 3px;
margin-bottom: 0px; }}
QTabBar::tab:!selected:hover
{{ /*border-top: 2px solid {5};
padding-bottom: 3px;*/
border-top-left-radius: 3px;
border-top-right-radius: 3px;
background-color: QLinearGradient( x1:0, y1:0, x2:0, y2:1, stop:1 {12}, stop:0.1 {1} ); }}
QTabWidget::pane {{ border: 1px solid {6};
top: 1px; }}
QSpinBox,QDoubleSpinBox {{
selection-background-color: {5};
background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {11}, stop: 1 {12});
border-style: solid;
border: 1px solid {8};
border-radius: 5;
}}
QSpinBox::up-button,QDoubleSpinBox::up-button {{ subcontrol-origin: border;
subcontrol-position: top right;
width: 16px;
border-width: 0;
border-top-width: 0; }}
QSpinBox::down-button,QDoubleSpinBox::down-button {{ subcontrol-origin: border;
subcontrol-position: bottom right;
width: 16px;
border-width: 0;
border-top-width: 0; }}
QSpinBox:focus,QDoubleSpinBox:focus,QTreeWidget:focus,QTextEdit:focus,QGroupBox:focus,QLineEdit:focus
{{
border: 2px solid QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 1 {2});
}}
QComboBox::down-arrow {{ image:url(:/arrow_down.png); }}
QToolBar::handle {{ spacing: 3px; /* spacing between items in the tool bar */ }}
QCheckBox::indicator:checked {{ image:url(:/checkbox.png); }}
QCheckBox::indicator:disabled:checked {{ image:url(:/checkbox_disabled.png); }}
QSplitter::handle:horizontal {{ image:url(:/Orange_spliter_Horizontal.png); }}
QSplitter::handle:vertical {{ image:url(:/Orange_spliter_Vertical_low.png); }}
QSpinBox::down-arrow,QDoubleSpinBox::down-arrow {{ image: url(:/arrow_down.png); }}
QSpinBox::up-arrow,QDoubleSpinBox::up-arrow {{ image: url(:/arrow_up.png); }}
QTreeView::branch:open:has-children {{ image: url(:/arrow_down_tree.png); }}
QTreeView::branch:closed:has-children {{ image: url(:/arrow_right.png); }}
""".format( self.MainColor.name(), #0
self.BG_COLOR.name(), #1
self.MainColor_Darker.name(), #2
self.MainColor_Lighter.name(), #3
self.BLACK.name(), #4
self.MainColor_Lighter_2.name(), #5
self.GREY.name(), #6
self.TEXT_COLOR.name(), #7
self.BORDER_COLOR.name(), #8
self.SHADOW_COLOR.name(), #9
self.GreyGrad1.name(), #10
self.GreyGrad2.name(), #11
self.GreyGrad3.name(), #12
)
style = editableStyleSheet()
style.setHue(1)
| 44.947619 | 155 | 0.471342 | from Qt import QtGui
import inspect
from Core.Settings import Colors
def clamp(val,min_value,max_value):
return max(min(val, max_value), min_value)
class editableStyleSheet():
def __init__(self):
self.MainColor = Colors.Orange
self.MainColor_Lighter = Colors.OrangeLighter
self.MainColor_Lighter_2 = Colors.OrangeLighter2
self.MainColor_Darker = Colors.OrangeDarker
self.BG_COLOR = Colors.Black
self.BLACK = Colors.AbsoluteBlack
self.GREY = Colors.Grey
self.GreyGrad1 = Colors.Grey1
self.GreyGrad2 = Colors.Grey2
self.GreyGrad3 = Colors.Grey3
self.TEXT_COLOR = QtGui.QColor(177, 177, 177)
self.BORDER_COLOR = Colors.SceneBackground
self.SHADOW_COLOR = Colors.Shadow
self.storeDeffaults()
def storeDeffaults(self):
for name,obj in inspect.getmembers(self):
if isinstance(obj,QtGui.QColor):
obj.default = obj.name()
def setHue(self,hue):
for name,obj in inspect.getmembers(self):
if isinstance(obj,QtGui.QColor) and name in ["MainColor","MainColor_Lighter","MainColor_Lighter_2","MainColor_Darker"]:
c = QtGui.QColor(obj.default)
h,s,l,a = c.getHslF()
obj.setHslF((h+hue)%1, s, l, a)
def setLightness(self,light):
for name,obj in inspect.getmembers(self):
if isinstance(obj,QtGui.QColor) and name in ["MainColor_Lighter","MainColor_Lighter_2","MainColor_Darker"]:
c = QtGui.QColor(self.MainColor.default)
h0,s0,l0,a0 = c.getHslF()
c = QtGui.QColor(obj.default)
h1,s1,l1,a1 = c.getHslF()
h,s,l,a = obj.getHslF()
obj.setHslF(h, s, clamp(l1-l0+light,0,1), a)
elif isinstance(obj,QtGui.QColor) and name == "MainColor":
h,s,l,a = obj.getHslF()
obj.setHslF(h, s, light, a)
def setBg(self,value):
c = QtGui.QColor(self.BG_COLOR.default)
h0,s0,l0,a0 = c.getHslF()
self.BG_COLOR.setHslF(h0,s0,value,a0)
c = QtGui.QColor(self.TEXT_COLOR.default)
h,s,l,a = c.getHslF()
self.TEXT_COLOR.setHslF(h,s,clamp(1.0-(value+0.25),0,1),a)
for i in [self.GreyGrad1,self.GreyGrad2,self.GreyGrad3]:
c = QtGui.QColor(i.default)
h1,s1,l1,a1 = c.getHslF()
h,s,l,a = i.getHslF()
i.setHslF(h,s,clamp(l1-l0+value,0,1),a)
def getStyleSheet(self):
return """
QToolTip {{ border: 1px solid black;
background-color: {0};
padding: 1px;
border-radius: 3px;
opacity: 100; }}
QWidget {{ color: {7};
background-color: {1};
border-radius: 3px; }}
QWidget:disabled {{ color: {6};
background-color: {1}; }}
QWidget:focus {{ /*border: 2px solid QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 1 {2});*/ }}
QWidget:item:hover {{ background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 1 {3});
color: {4}; }}
QWidget:item:selected {{ background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 1 {2}); }}
QMenuBar::item {{ background: transparent; }}
QMenuBar::item:selected
{{ background: transparent;
border: 1px solid {5}; }}
QMenuBar::item:pressed{{ background: {6};
border: 1px solid {4};
background-color: QLinearGradient( x1:0, y1:0,x2:0, y2:1,stop:0.3 {1},stop:0.1 {0});
margin-bottom:-1px;
padding-bottom:1px; }}
QMenu {{ border: 1px solid {4}; }}
QMenu::item {{ padding: 2px 20px 2px 20px; }}
QMenu::item:selected {{ color: {4}; }}
QMenu::separator {{ height: 2px;
background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:0 #161616, stop: 0.5 {9}, stop: 0.6 {8}, stop:1 #343434);
color: white;
padding-left: 4px;
margin-left: 10px;
margin-right: 5px; }}
QAbstractItemView {{ background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {10}, stop: 0.1 {11}, stop: 1 {12}); }}
QLineEdit {{ background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {11}, stop: 1 {12});
padding: 1px;
border-style: solid;
border: 1px solid {8};
border-radius: 5; }}
QToolButton:menu-button{{
color: none;
background-color: none;
border-style: none;
padding-top: 20px;
padding-right: 3px;
}}
QToolButton:menu-arrow:open {{
top: 1px; left: 1px; /* shift it a bit */
}}
QPushButton,QToolButton {{ color: {7};
background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {10}, stop: 1 {11});
border-width: 1px;
border-color: {8};
border-style: solid;
border-radius: 6;
font-size: 12px;
padding: 3px;
padding-left: 5px; padding-right: 5px; }}
QPushButton:pressed,QToolButton::pressed {{ background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 1 {2}); }}
QComboBox {{ selection-background-color: {5};
background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {10}, stop: 1 {11});
border-style: solid;
border: 1px solid {8};
border-radius: 5; }}
QPushButton:checked{{
background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {10}, stop: 1 {12});
border: 2px solid QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 1 {2});
}}
QComboBox:hover,QPushButton:hover,QSpinBox:hover,QDoubleSpinBox:hover,QToolButton::hover
{{ border: 2px solid QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 1 {2}); }}
QComboBox:on {{ padding-top: 3px;
padding-left: 4px;
background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {10}, stop:0.3 {1} , stop: 1 {11} );
selection-background-color: {5}; }}
QComboBox QAbstractItemView
{{ border: 2px solid darkgray;
selection-background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 1 {2}); }}
QComboBox::drop-down {{ subcontrol-origin: padding;
subcontrol-position: top right;
width: 15px;
border-left-width: 0px;
border-left-color: darkgray;
border-left-style: solid; /* just a single line */
border-top-right-radius: 3px; /* same radius as the QComboBox */
border-bottom-right-radius: 3px; }}
QGroupBox {{ border: 1px solid #9f988f; }}
QScrollBar:horizontal {{ border: 1px solid #222222;
background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {11}, stop: 1 {12});
height: 12px;
margin: 0px 16px 0 16px; }}
QScrollBar::handle:horizontal
{{ background: QLinearGradient( x1: 0, y1: 0, x2: 1, y2: 0, stop: 0 {0}, stop: 0.5 {2}, stop: 1 {0});
min-height: 20px;
border-radius: 2px; }}
QScrollBar::add-line:horizontal
{{ border: 1px solid #1b1b19;
border-radius: 2px;
background: QLinearGradient( x1: 0, y1: 0, x2: 1, y2: 0, stop: 0 {0}, stop: 1 {2});
width: 14px;
subcontrol-position: right;
subcontrol-origin: margin; }}
QScrollBar::sub-line:horizontal
{{ border: 1px solid #1b1b19;
border-radius: 2px;
background: QLinearGradient( x1: 0, y1: 0, x2: 1, y2: 0, stop: 0 {0}, stop: 1 {2});
width: 14px;
subcontrol-position: left;
subcontrol-origin: margin; }}
QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal
{{ background: none; }}
QScrollBar:vertical {{ background: QLinearGradient( x1: 0, y1: 0, x2: 1, y2: 0, stop: 0 {11}, stop: 1 {12});
width: 12px;
margin: 16px 0 16px 0;
border: 1px solid #222222; }}
QScrollBar::handle:vertical
{{ background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 0.5 {2}, stop: 1 {0});
min-height: 20px;
border-radius: 2px; }}
QScrollBar::add-line:vertical
{{ border: 1px solid #1b1b19;
border-radius: 2px;
background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 1 {2});
height: 14px;
subcontrol-position: bottom;
subcontrol-origin: margin; }}
QScrollBar::sub-line:vertical
{{ border: 1px solid #1b1b19;
border-radius: 2px;
background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {2}, stop: 1 {0});
height: 14px;
subcontrol-position: top;
subcontrol-origin: margin; }}
QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical
{{ background: none; }}
QTextEdit {{ background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {10}, stop: 0.1 {11}, stop: 1 {12}); }}
QPlainTextEdit {{ background-color:{1}; }}
QHeaderView::section {{ background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:0 #616161, stop: 0.5 #505050, stop: 0.6 #434343, stop:1 #656565);
background-color: #505050;
color: white;
padding-left: 4px;
border-radius: 2px;
border: 1px solid #6c6c6c; }}
QCheckBox:disabled {{ color: #414141; }}
QCheckBox {{
background-color: transparent; }}
QCheckBox::indicator {{ color: {7};
background-color: {1};
border: 1px solid {7};
width: 13px;
height: 13px; }}
QCheckBox::indicator:disabled, QRadioButton::indicator:disabled
{{ border: 1px solid {6}; }}
QRadioButton::indicator:checked, QRadioButton::indicator:unchecked
{{ color: {7};
background-color: {1};
border: 1px solid {7};
border-radius: 6px; }}
QRadioButton::indicator:checked
{{ background-color: qradialgradient(cx: 0.5, cy: 0.5,fx: 0.5, fy: 0.5, radius: 1.0, stop: 0.25 {5}, stop: 0.3 {1}); }}
QRadioButton::indicator
{{ border-radius: 6px; }}
QRadioButton::indicator:hover, QCheckBox::indicator:hover
{{ border: 1px solid {5}; }}
QDockWidget::title {{ text-align: center;
spacing: 3px; /* spacing between items in the tool bar */
border: 1px solid {9};
background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:0 {1}, stop:1 {1}); }}
QDockWidget::close-button, QDockWidget::float-button
{{ text-align: center;
spacing: 1px; /* spacing between items in the tool bar */
background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:0 {1}, stop:1 {1}); }}
QDockWidget::close-button:hover, QDockWidget::float-button:hover
{{ background: #242424; }}
QDockWidget::close-button:pressed, QDockWidget::float-button:pressed
{{ padding: 1px -1px -1px 1px; }}
QMainWindow::separator{{ background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:0 {6}, stop:1 {6});
color: white;
padding-left: 4px;
border: 1px solid #4c4c4c;
spacing: 3px; /* spacing between items in the tool bar */ }}
QMainWindow::separator:hover
{{ background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:0 {2}, stop:1 {0});
color: white;
padding-left: 4px;
border: 1px solid #6c6c6c;
spacing: 3px; /* spacing between items in the tool bar */ }}
QProgressBar {{ border: 2px solid grey;
border-radius: 5px;
text-align: center; }}
QProgressBar::chunk {{ background-color: {2};
width: 2.15px;
margin: 0.5px; }}
QTabBar::tab {{ color: {7};
border: 1px solid {6};
border-bottom-style: none;
background-color: {1};
padding-left: 10px;
padding-right: 10px;
padding-top: 3px;
padding-bottom: 2px;
margin-right: -1px; }}
QTabBar::tab:last {{ margin-right: 0; /* the last selected tab has nothing to overlap with on the right */
border-top-right-radius: 3px; }}
QTabBar::tab:first:!selected
{{ margin-left: 0px; /* the last selected tab has nothing to overlap with on the right */
border-top-left-radius: 3px; }}
QTabBar::tab:!selected{{ color: {7};
border-bottom-style: solid;
margin-top: 3px;
background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:1 {10}, stop:.4 {12}); }}
QTabBar::tab:selected {{ border-top-left-radius: 3px;
border-top-right-radius: 3px;
margin-bottom: 0px; }}
QTabBar::tab:!selected:hover
{{ /*border-top: 2px solid {5};
padding-bottom: 3px;*/
border-top-left-radius: 3px;
border-top-right-radius: 3px;
background-color: QLinearGradient( x1:0, y1:0, x2:0, y2:1, stop:1 {12}, stop:0.1 {1} ); }}
QTabWidget::pane {{ border: 1px solid {6};
top: 1px; }}
QSpinBox,QDoubleSpinBox {{
selection-background-color: {5};
background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {11}, stop: 1 {12});
border-style: solid;
border: 1px solid {8};
border-radius: 5;
}}
QSpinBox::up-button,QDoubleSpinBox::up-button {{ subcontrol-origin: border;
subcontrol-position: top right;
width: 16px;
border-width: 0;
border-top-width: 0; }}
QSpinBox::down-button,QDoubleSpinBox::down-button {{ subcontrol-origin: border;
subcontrol-position: bottom right;
width: 16px;
border-width: 0;
border-top-width: 0; }}
QSpinBox:focus,QDoubleSpinBox:focus,QTreeWidget:focus,QTextEdit:focus,QGroupBox:focus,QLineEdit:focus
{{
border: 2px solid QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 {0}, stop: 1 {2});
}}
QComboBox::down-arrow {{ image:url(:/arrow_down.png); }}
QToolBar::handle {{ spacing: 3px; /* spacing between items in the tool bar */ }}
QCheckBox::indicator:checked {{ image:url(:/checkbox.png); }}
QCheckBox::indicator:disabled:checked {{ image:url(:/checkbox_disabled.png); }}
QSplitter::handle:horizontal {{ image:url(:/Orange_spliter_Horizontal.png); }}
QSplitter::handle:vertical {{ image:url(:/Orange_spliter_Vertical_low.png); }}
QSpinBox::down-arrow,QDoubleSpinBox::down-arrow {{ image: url(:/arrow_down.png); }}
QSpinBox::up-arrow,QDoubleSpinBox::up-arrow {{ image: url(:/arrow_up.png); }}
QTreeView::branch:open:has-children {{ image: url(:/arrow_down_tree.png); }}
QTreeView::branch:closed:has-children {{ image: url(:/arrow_right.png); }}
""".format( self.MainColor.name(),
self.BG_COLOR.name(),
self.MainColor_Darker.name(),
self.MainColor_Lighter.name(),
self.BLACK.name(),
self.MainColor_Lighter_2.name(),
self.GREY.name(),
self.TEXT_COLOR.name(),
self.BORDER_COLOR.name(),
self.SHADOW_COLOR.name(),
self.GreyGrad1.name(),
self.GreyGrad2.name(),
self.GreyGrad3.name(),
)
style = editableStyleSheet()
style.setHue(1)
| true | true |
f73a2a3fdc2c3465104276ed39f89bdfcaae950c | 422 | py | Python | retweet/py/config.py | adventuringImagineer/estimator-retweet-adventure | 3c3ea925f38cd50870c6150a804014bfd07ca190 | [
"MIT"
] | null | null | null | retweet/py/config.py | adventuringImagineer/estimator-retweet-adventure | 3c3ea925f38cd50870c6150a804014bfd07ca190 | [
"MIT"
] | null | null | null | retweet/py/config.py | adventuringImagineer/estimator-retweet-adventure | 3c3ea925f38cd50870c6150a804014bfd07ca190 | [
"MIT"
] | null | null | null | tweepy_consumer_key = "cVjPt6UCDPHFxGCk5M8wKz9Bo"
tweepy_consumer_secret = "ImLY50oHMd2noPrchO2qXYXKJQxxjng4UK7Rp1kj74GUDTCfTF"
tweepy_access_token = "34813916-plJKktZVBPOqKPQ7zdV5uTEuRiiDWeX9weZNliYct"
tweepy_access_token_secret = "CWwzOqAkkxfKl6VDK6OUBoYFKPZD2JDfOQjOcPjQYz7pP"
# estimator_url = 'wss://passgraf.com:2083/ws/00uau42fbewkR6zsm4x6'
estimator_url = 'wss://bypass.passgraf.com:8100/ws/00u45o8xj0VFSMts14x7'
| 52.75 | 77 | 0.869668 | tweepy_consumer_key = "cVjPt6UCDPHFxGCk5M8wKz9Bo"
tweepy_consumer_secret = "ImLY50oHMd2noPrchO2qXYXKJQxxjng4UK7Rp1kj74GUDTCfTF"
tweepy_access_token = "34813916-plJKktZVBPOqKPQ7zdV5uTEuRiiDWeX9weZNliYct"
tweepy_access_token_secret = "CWwzOqAkkxfKl6VDK6OUBoYFKPZD2JDfOQjOcPjQYz7pP"
estimator_url = 'wss://bypass.passgraf.com:8100/ws/00u45o8xj0VFSMts14x7'
| true | true |
f73a2ae3c540a9a90052d279a3881c4aaf86097f | 3,410 | py | Python | kopf/engines/probing.py | ankitdobhal/kopf | 2765eda2a08e7e42195446cc23f02ba91603db53 | [
"MIT"
] | null | null | null | kopf/engines/probing.py | ankitdobhal/kopf | 2765eda2a08e7e42195446cc23f02ba91603db53 | [
"MIT"
] | null | null | null | kopf/engines/probing.py | ankitdobhal/kopf | 2765eda2a08e7e42195446cc23f02ba91603db53 | [
"MIT"
] | null | null | null | import asyncio
import datetime
import logging
import urllib.parse
from typing import MutableMapping, Optional, Tuple
import aiohttp.web
from kopf.reactor import activities, lifecycles, registries
from kopf.structs import callbacks, configuration, handlers, memos
logger = logging.getLogger(__name__)
LOCALHOST: str = 'localhost'
HTTP_PORT: int = 80
_Key = Tuple[str, int] # hostname, port
async def health_reporter(
endpoint: str,
*,
memo: memos.AnyMemo,
registry: registries.OperatorRegistry,
settings: configuration.OperatorSettings,
ready_flag: Optional[asyncio.Event] = None, # used for testing
) -> None:
"""
Simple HTTP(S)/TCP server to report the operator's health to K8s probes.
Runs forever until cancelled (which happens if any other root task
is cancelled or failed). Once it will stop responding for any reason,
Kubernetes will assume the pod is not alive anymore, and will restart it.
"""
probing_container: MutableMapping[handlers.HandlerId, callbacks.Result] = {}
probing_timestamp: Optional[datetime.datetime] = None
probing_max_age = datetime.timedelta(seconds=10.0)
probing_lock = asyncio.Lock()
async def get_health(
request: aiohttp.web.Request,
) -> aiohttp.web.Response:
nonlocal probing_timestamp
# Recollect the data on-demand, and only if is is older that a reasonable caching period.
# Protect against multiple parallel requests performing the same heavy activity.
now = datetime.datetime.utcnow()
if probing_timestamp is None or now - probing_timestamp >= probing_max_age:
async with probing_lock:
now = datetime.datetime.utcnow()
if probing_timestamp is None or now - probing_timestamp >= probing_max_age:
activity_results = await activities.run_activity(
lifecycle=lifecycles.all_at_once,
registry=registry,
settings=settings,
activity=handlers.Activity.PROBE,
memo=memo,
)
probing_container.clear()
probing_container.update(activity_results)
probing_timestamp = datetime.datetime.utcnow()
return aiohttp.web.json_response(probing_container)
parts = urllib.parse.urlsplit(endpoint)
if parts.scheme == 'http':
host = parts.hostname or LOCALHOST
port = parts.port or HTTP_PORT
path = parts.path
else:
raise Exception(f"Unsupported scheme: {endpoint}")
app = aiohttp.web.Application()
app.add_routes([aiohttp.web.get(path, get_health)])
runner = aiohttp.web.AppRunner(app, handle_signals=False)
await runner.setup()
site = aiohttp.web.TCPSite(runner, host, port, shutdown_timeout=1.0)
await site.start()
# Log with the actual URL: normalised, with hostname/port set.
url = urllib.parse.urlunsplit([parts.scheme, f'{host}:{port}', path, '', ''])
logger.debug("Serving health status at %s", url)
if ready_flag is not None:
ready_flag.set()
try:
# Sleep forever. No activity is needed.
await asyncio.Event().wait()
finally:
# On any reason of exit, stop reporting the health.
await asyncio.shield(runner.cleanup())
| 35.894737 | 97 | 0.657478 | import asyncio
import datetime
import logging
import urllib.parse
from typing import MutableMapping, Optional, Tuple
import aiohttp.web
from kopf.reactor import activities, lifecycles, registries
from kopf.structs import callbacks, configuration, handlers, memos
logger = logging.getLogger(__name__)
LOCALHOST: str = 'localhost'
HTTP_PORT: int = 80
_Key = Tuple[str, int]
async def health_reporter(
endpoint: str,
*,
memo: memos.AnyMemo,
registry: registries.OperatorRegistry,
settings: configuration.OperatorSettings,
ready_flag: Optional[asyncio.Event] = None,
) -> None:
probing_container: MutableMapping[handlers.HandlerId, callbacks.Result] = {}
probing_timestamp: Optional[datetime.datetime] = None
probing_max_age = datetime.timedelta(seconds=10.0)
probing_lock = asyncio.Lock()
async def get_health(
request: aiohttp.web.Request,
) -> aiohttp.web.Response:
nonlocal probing_timestamp
now = datetime.datetime.utcnow()
if probing_timestamp is None or now - probing_timestamp >= probing_max_age:
async with probing_lock:
now = datetime.datetime.utcnow()
if probing_timestamp is None or now - probing_timestamp >= probing_max_age:
activity_results = await activities.run_activity(
lifecycle=lifecycles.all_at_once,
registry=registry,
settings=settings,
activity=handlers.Activity.PROBE,
memo=memo,
)
probing_container.clear()
probing_container.update(activity_results)
probing_timestamp = datetime.datetime.utcnow()
return aiohttp.web.json_response(probing_container)
parts = urllib.parse.urlsplit(endpoint)
if parts.scheme == 'http':
host = parts.hostname or LOCALHOST
port = parts.port or HTTP_PORT
path = parts.path
else:
raise Exception(f"Unsupported scheme: {endpoint}")
app = aiohttp.web.Application()
app.add_routes([aiohttp.web.get(path, get_health)])
runner = aiohttp.web.AppRunner(app, handle_signals=False)
await runner.setup()
site = aiohttp.web.TCPSite(runner, host, port, shutdown_timeout=1.0)
await site.start()
url = urllib.parse.urlunsplit([parts.scheme, f'{host}:{port}', path, '', ''])
logger.debug("Serving health status at %s", url)
if ready_flag is not None:
ready_flag.set()
try:
await asyncio.Event().wait()
finally:
await asyncio.shield(runner.cleanup())
| true | true |
f73a2d1b39502b040e25908c237c6ab73a1553a9 | 3,898 | py | Python | accounts/tests/test_view_password_change.py | sureshkunku/Dispatch | eda68d5bf94029a324d22f5b6eb6c5087923ab7e | [
"MIT"
] | null | null | null | accounts/tests/test_view_password_change.py | sureshkunku/Dispatch | eda68d5bf94029a324d22f5b6eb6c5087923ab7e | [
"MIT"
] | 7 | 2019-10-22T14:15:59.000Z | 2022-02-10T08:50:49.000Z | accounts/tests/test_view_password_change.py | sureshkunku/Dispatch | eda68d5bf94029a324d22f5b6eb6c5087923ab7e | [
"MIT"
] | null | null | null | from django.contrib.auth import views as auth_views
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import resolve, reverse
class PasswordChangeTests(TestCase):
def setUp(self):
username = 'john'
password = 'secret123'
User.objects.create_user(username=username, email='john@doe.com', password=password)
url = reverse('password_change')
self.client.login(username=username, password=password)
self.response = self.client.get(url)
def test_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_url_resolves_correct_view(self):
view = resolve('/settings/password/')
self.assertEquals(view.func.view_class, auth_views.PasswordChangeView)
def test_csrf(self):
self.assertContains(self.response, 'csrfmiddlewaretoken')
def test_contains_form(self):
form = self.response.context.get('form')
self.assertIsInstance(form, PasswordChangeForm)
def test_form_inputs(self):
'''
The view must contain four inputs: csrf, old_password, new_password1, new_password2
'''
self.assertContains(self.response, '<input', 4)
self.assertContains(self.response, 'type="password"', 3)
class LoginRequiredPasswordChangeTests(TestCase):
def test_redirection(self):
url = reverse('password_change')
login_url = reverse('login')
response = self.client.get(url)
self.assertRedirects(response, f'{login_url}?next={url}')
class PasswordChangeTestCase(TestCase):
'''
Base test case for form processing
accepts a `data` dict to POST to the view.
'''
def setUp(self, data={}):
self.user = User.objects.create_user(username='john', email='john@doe.com', password='old_password')
self.url = reverse('password_change')
self.client.login(username='john', password='old_password')
self.response = self.client.post(self.url, data)
class SuccessfulPasswordChangeTests(PasswordChangeTestCase):
def setUp(self):
super().setUp({
'old_password': 'old_password',
'new_password1': 'new_password',
'new_password2': 'new_password',
})
def test_redirection(self):
'''
A valid form submission should redirect the user
'''
self.assertRedirects(self.response, reverse('password_change_done'))
def test_password_changed(self):
'''
refresh the user instance from database to get the new password
hash updated by the change password view.
'''
self.user.refresh_from_db()
self.assertTrue(self.user.check_password('new_password'))
def test_user_authentication(self):
'''
Create a new request to an arbitrary page.
The resulting response should now have an `user` to its context, after a successful sign up.
'''
response = self.client.get(reverse('home'))
user = response.context.get('user')
self.assertTrue(user.is_authenticated)
class InvalidPasswordChangeTests(PasswordChangeTestCase):
def test_status_code(self):
'''
An invalid form submission should return to the same page
'''
self.assertEquals(self.response.status_code, 200)
def test_form_errors(self):
form = self.response.context.get('form')
self.assertTrue(form.errors)
def test_didnt_change_password(self):
'''
refresh the user instance from the database to make
sure we have the latest data.
'''
self.user.refresh_from_db()
self.assertTrue(self.user.check_password('old_password'))
| 35.761468 | 109 | 0.653155 | from django.contrib.auth import views as auth_views
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import resolve, reverse
class PasswordChangeTests(TestCase):
def setUp(self):
username = 'john'
password = 'secret123'
User.objects.create_user(username=username, email='john@doe.com', password=password)
url = reverse('password_change')
self.client.login(username=username, password=password)
self.response = self.client.get(url)
def test_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_url_resolves_correct_view(self):
view = resolve('/settings/password/')
self.assertEquals(view.func.view_class, auth_views.PasswordChangeView)
def test_csrf(self):
self.assertContains(self.response, 'csrfmiddlewaretoken')
def test_contains_form(self):
form = self.response.context.get('form')
self.assertIsInstance(form, PasswordChangeForm)
def test_form_inputs(self):
self.assertContains(self.response, '<input', 4)
self.assertContains(self.response, 'type="password"', 3)
class LoginRequiredPasswordChangeTests(TestCase):
def test_redirection(self):
url = reverse('password_change')
login_url = reverse('login')
response = self.client.get(url)
self.assertRedirects(response, f'{login_url}?next={url}')
class PasswordChangeTestCase(TestCase):
def setUp(self, data={}):
self.user = User.objects.create_user(username='john', email='john@doe.com', password='old_password')
self.url = reverse('password_change')
self.client.login(username='john', password='old_password')
self.response = self.client.post(self.url, data)
class SuccessfulPasswordChangeTests(PasswordChangeTestCase):
def setUp(self):
super().setUp({
'old_password': 'old_password',
'new_password1': 'new_password',
'new_password2': 'new_password',
})
def test_redirection(self):
self.assertRedirects(self.response, reverse('password_change_done'))
def test_password_changed(self):
self.user.refresh_from_db()
self.assertTrue(self.user.check_password('new_password'))
def test_user_authentication(self):
response = self.client.get(reverse('home'))
user = response.context.get('user')
self.assertTrue(user.is_authenticated)
class InvalidPasswordChangeTests(PasswordChangeTestCase):
def test_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_form_errors(self):
form = self.response.context.get('form')
self.assertTrue(form.errors)
def test_didnt_change_password(self):
self.user.refresh_from_db()
self.assertTrue(self.user.check_password('old_password'))
| true | true |
f73a2d9712f3fc4ebd6f7b04a3d34803f74e8d1a | 318 | py | Python | examples/load_samples.py | jieggii/mc.py | 74e0489370c7c1a1cbc5e40fbc295ce32a124dd1 | [
"MIT"
] | 30 | 2019-08-20T14:56:39.000Z | 2022-03-30T14:03:28.000Z | examples/load_samples.py | babydickdanilko/mc.py | 74e0489370c7c1a1cbc5e40fbc295ce32a124dd1 | [
"MIT"
] | 4 | 2019-11-30T17:56:54.000Z | 2022-03-25T11:59:55.000Z | examples/load_samples.py | babydickdanilko/mc.py | 74e0489370c7c1a1cbc5e40fbc295ce32a124dd1 | [
"MIT"
] | 10 | 2019-09-15T19:11:58.000Z | 2021-08-06T08:13:17.000Z | import mc
samples_from_txt = mc.util.load_txt_samples("samples.txt", separator=";")
print(samples_from_txt)
# >> "['hello world', 'hello world of cutes', 'string with escaped ";"']"
samples_from_json = mc.util.load_json_samples("samples.json")
print(samples_from_json)
# >> ['hello world', 'hello world of cuties']
| 28.909091 | 73 | 0.726415 | import mc
samples_from_txt = mc.util.load_txt_samples("samples.txt", separator=";")
print(samples_from_txt)
samples_from_json = mc.util.load_json_samples("samples.json")
print(samples_from_json)
| true | true |
f73a2dba0d726fef3d4b923de1c75b4b846cc6ab | 343 | py | Python | viewFile.py | PrathikShirolkar/AutomaticImageColization | 981a011cbd32f741668738cafc1dd9ed44965402 | [
"Apache-2.0"
] | null | null | null | viewFile.py | PrathikShirolkar/AutomaticImageColization | 981a011cbd32f741668738cafc1dd9ed44965402 | [
"Apache-2.0"
] | null | null | null | viewFile.py | PrathikShirolkar/AutomaticImageColization | 981a011cbd32f741668738cafc1dd9ed44965402 | [
"Apache-2.0"
] | null | null | null | from tensorflow.python import pywrap_tensorflow
checkpoint_path = 'tmodel.ckpt-100'
#checkpoint_path = "deeplab_resnet_init.ckpt"
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
print("tensor_name: ", key)
print(reader.get_tensor(key))
| 38.111111 | 63 | 0.816327 | from tensorflow.python import pywrap_tensorflow
checkpoint_path = 'tmodel.ckpt-100'
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
print("tensor_name: ", key)
print(reader.get_tensor(key))
| true | true |
f73a2f0b8fa8c1b64a8d69888a568d2ea69714a7 | 176 | py | Python | Diverso4.py | Friedned/PC4 | 6feeee1240f95683d6dcdb7ddb6ea47d09c07832 | [
"Apache-2.0"
] | null | null | null | Diverso4.py | Friedned/PC4 | 6feeee1240f95683d6dcdb7ddb6ea47d09c07832 | [
"Apache-2.0"
] | null | null | null | Diverso4.py | Friedned/PC4 | 6feeee1240f95683d6dcdb7ddb6ea47d09c07832 | [
"Apache-2.0"
] | null | null | null | import re
s = '@robot9! @robot4& I have a good feeling that the show isgoing to be amazing! @robot9$ @robot7%'
encontrados=re.findall(r"@robot\d\W",s)
print(encontrados)
| 29.333333 | 101 | 0.704545 | import re
s = '@robot9! @robot4& I have a good feeling that the show isgoing to be amazing! @robot9$ @robot7%'
encontrados=re.findall(r"@robot\d\W",s)
print(encontrados)
| true | true |
f73a3077834965fc05f558a0496935737fe42672 | 9,619 | py | Python | fuzzers/046-clk-bufg-muxed-pips/top.py | rw1nkler/prjxray | aff076b47dcf6d653eb3ce791b41fd6cf4343edd | [
"ISC"
] | 583 | 2017-12-21T11:06:13.000Z | 2022-02-20T21:27:33.000Z | fuzzers/046-clk-bufg-muxed-pips/top.py | rw1nkler/prjxray | aff076b47dcf6d653eb3ce791b41fd6cf4343edd | [
"ISC"
] | 1,212 | 2017-12-22T15:05:06.000Z | 2022-02-19T13:04:59.000Z | fuzzers/046-clk-bufg-muxed-pips/top.py | mfkiwl/prjxray-xilinx-7-bitstream-fortmat | 5349556bc2c230801d6df0cf11bccb9cfd171639 | [
"ISC"
] | 134 | 2017-12-21T10:16:50.000Z | 2022-02-16T06:42:04.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
""" Emits top.v's for various BUFHCE routing inputs. """
import os
import random
random.seed(int(os.getenv("SEED"), 16))
from prjxray import util
from prjxray.lut_maker import LutMaker
from prjxray.db import Database
from io import StringIO
CMT_XY_FUN = util.create_xy_fun(prefix='')
BUFGCTRL_XY_FUN = util.create_xy_fun('BUFGCTRL_')
def read_site_to_cmt():
""" Yields clock sources and which CMT they route within. """
with open(os.path.join(os.getenv('FUZDIR'), 'build',
'cmt_regions.csv')) as f:
for l in f:
site, cmt = l.strip().split(',')
yield (site, cmt)
class ClockSources(object):
""" Class for tracking clock sources.
Some clock sources can be routed to any CMT, for these, cmt='ANY'.
For clock sources that belong to a CMT, cmt should be set to the CMT of
the source.
"""
def __init__(self):
self.sources = {}
self.merged_sources = {}
self.source_to_cmt = {}
self.used_sources_from_cmt = {}
def add_clock_source(self, source, cmt):
""" Adds a source from a specific CMT.
cmt='ANY' indicates that this source can be routed to any CMT.
"""
if cmt not in self.sources:
self.sources[cmt] = []
self.sources[cmt].append(source)
assert source not in self.source_to_cmt or self.source_to_cmt[
source] == cmt, source
self.source_to_cmt[source] = cmt
def get_random_source(self, cmt):
""" Get a random source that is routable to the specific CMT.
get_random_source will return a source that is either cmt='ANY',
cmt equal to the input CMT, or the adjecent CMT.
"""
if cmt not in self.merged_sources:
choices = []
if 'ANY' in self.sources:
choices.extend(self.sources['ANY'])
if cmt in self.sources:
choices.extend(self.sources[cmt])
x, y = CMT_XY_FUN(cmt)
if x % 2 == 0:
x += 1
else:
x -= 1
paired_cmt = 'X{}Y{}'.format(x, y)
if paired_cmt in self.sources:
choices.extend(self.sources[paired_cmt])
self.merged_sources[cmt] = choices
if self.merged_sources[cmt]:
source = random.choice(self.merged_sources[cmt])
source_cmt = self.source_to_cmt[source]
if source_cmt not in self.used_sources_from_cmt:
self.used_sources_from_cmt[source_cmt] = set()
self.used_sources_from_cmt[source_cmt].add(source)
if source_cmt != 'ANY' and len(
self.used_sources_from_cmt[source_cmt]) > 14:
print('//', self.used_sources_from_cmt)
self.used_sources_from_cmt[source_cmt].remove(source)
return None
else:
return source
def main():
"""
BUFG's can be driven from:
Interconnect
HROW cascade
"""
print(
'''
module top();
(* KEEP, DONT_TOUCH *)
LUT6 dummy();
''')
site_to_cmt = dict(read_site_to_cmt())
luts = LutMaker()
wires = StringIO()
bufgs = StringIO()
clock_sources = ClockSources()
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
def gen_sites(desired_site_type):
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
for site, site_type in gridinfo.sites.items():
if site_type == desired_site_type:
yield tile_name, site
for _, site in gen_sites('MMCME2_ADV'):
mmcm_clocks = [
'mmcm_clock_{site}_{idx}'.format(site=site, idx=idx)
for idx in range(13)
]
for clk in mmcm_clocks:
clock_sources.add_clock_source(clk, site_to_cmt[site])
print(
"""
wire {c0}, {c1}, {c2}, {c3}, {c4}, {c5};
(* KEEP, DONT_TOUCH, LOC = "{site}" *)
MMCME2_ADV pll_{site} (
.CLKOUT0({c0}),
.CLKOUT0B({c1}),
.CLKOUT1({c2}),
.CLKOUT1B({c3}),
.CLKOUT2({c4}),
.CLKOUT2B({c5}),
.CLKOUT3({c6}),
.CLKOUT3B({c7}),
.CLKOUT4({c8}),
.CLKOUT5({c9}),
.CLKOUT6({c10}),
.CLKFBOUT({c11}),
.CLKFBOUTB({c12})
);
""".format(
site=site,
c0=mmcm_clocks[0],
c1=mmcm_clocks[1],
c2=mmcm_clocks[2],
c3=mmcm_clocks[3],
c4=mmcm_clocks[4],
c5=mmcm_clocks[5],
c6=mmcm_clocks[6],
c7=mmcm_clocks[7],
c8=mmcm_clocks[8],
c9=mmcm_clocks[9],
c10=mmcm_clocks[10],
c11=mmcm_clocks[11],
c12=mmcm_clocks[12],
))
for _, site in sorted(gen_sites("BUFGCTRL"),
key=lambda x: BUFGCTRL_XY_FUN(x[1])):
print(
"""
wire O_{site};
wire S1_{site};
wire S0_{site};
wire IGNORE1_{site};
wire IGNORE0_{site};
wire I1_{site};
wire I0_{site};
wire CE1_{site};
wire CE0_{site};
""".format(site=site),
file=wires)
print(
"""
(* KEEP, DONT_TOUCH, LOC = "{site}" *)
BUFGCTRL bufg_{site} (
.O(O_{site}),
.S1(S1_{site}),
.S0(S0_{site}),
.IGNORE1(IGNORE1_{site}),
.IGNORE0(IGNORE0_{site}),
.I1(I1_{site}),
.I0(I0_{site}),
.CE1(CE1_{site}),
.CE0(CE0_{site})
);
""".format(site=site),
file=bufgs)
""" BUFG clock sources:
2 from interconnect
Output of BUFG +/- 1
Cascade in (e.g. PLL, MMCM)
"""
CLOCK_CHOICES = (
'LUT',
'BUFG_+1',
'BUFG_-1',
'CASCADE',
)
def find_bufg_cmt(tile):
if '_BOT_' in tile:
inc = 1
else:
inc = -1
loc = grid.loc_of_tilename(tile)
offset = 1
while True:
gridinfo = grid.gridinfo_at_loc(
(loc.grid_x, loc.grid_y + offset * inc))
if gridinfo.tile_type.startswith('CLK_HROW_'):
return site_to_cmt[list(gridinfo.sites.keys())[0]]
offset += 1
def get_clock_net(tile, site, source_type):
if source_type == 'LUT':
return luts.get_next_output_net()
elif source_type == 'BUFG_+1':
x, y = BUFGCTRL_XY_FUN(site)
target_y = y + 1
max_y = ((y // 16) + 1) * 16
if target_y >= max_y:
target_y -= 16
return 'O_BUFGCTRL_X{x}Y{y}'.format(x=x, y=target_y)
elif source_type == 'BUFG_-1':
x, y = BUFGCTRL_XY_FUN(site)
target_y = y - 1
min_y = (y // 16) * 16
if target_y < min_y:
target_y += 16
return 'O_BUFGCTRL_X{x}Y{y}'.format(x=x, y=target_y)
elif source_type == 'CASCADE':
cmt = find_bufg_cmt(tile)
return clock_sources.get_random_source(cmt)
else:
assert False, source_type
for tile, site in sorted(gen_sites("BUFGCTRL"),
key=lambda x: BUFGCTRL_XY_FUN(x[1])):
if random.randint(0, 1):
print(
"""
assign I0_{site} = {i0_net};""".format(
site=site,
i0_net=get_clock_net(
tile, site, random.choice(CLOCK_CHOICES))),
file=bufgs)
if random.randint(0, 1):
print(
"""
assign I1_{site} = {i1_net};""".format(
site=site,
i1_net=get_clock_net(
tile, site, random.choice(CLOCK_CHOICES))),
file=bufgs)
print(
"""
assign S0_{site} = {s0_net};
assign S1_{site} = {s1_net};
assign IGNORE0_{site} = {ignore0_net};
assign IGNORE1_{site} = {ignore1_net};
assign CE0_{site} = {ce0_net};
assign CE1_{site} = {ce1_net};
""".format(
site=site,
s0_net=luts.get_next_output_net(),
s1_net=luts.get_next_output_net(),
ignore0_net=luts.get_next_output_net(),
ignore1_net=luts.get_next_output_net(),
ce0_net=luts.get_next_output_net(),
ce1_net=luts.get_next_output_net(),
),
file=bufgs)
for l in luts.create_wires_and_luts():
print(l)
print(wires.getvalue())
print(bufgs.getvalue())
itr = iter(gen_sites('BUFHCE'))
for tile, site in sorted(gen_sites("BUFGCTRL"),
key=lambda x: BUFGCTRL_XY_FUN(x[1])):
if random.randint(0, 1):
_, bufhce_site = next(itr)
print(
"""
(* KEEP, DONT_TOUCH, LOC = "{bufhce_site}" *)
BUFHCE bufhce_{bufhce_site} (
.I(O_{site})
);""".format(
site=site,
bufhce_site=bufhce_site,
))
print("endmodule")
if __name__ == '__main__':
main()
| 27.640805 | 75 | 0.521156 |
import os
import random
random.seed(int(os.getenv("SEED"), 16))
from prjxray import util
from prjxray.lut_maker import LutMaker
from prjxray.db import Database
from io import StringIO
CMT_XY_FUN = util.create_xy_fun(prefix='')
BUFGCTRL_XY_FUN = util.create_xy_fun('BUFGCTRL_')
def read_site_to_cmt():
with open(os.path.join(os.getenv('FUZDIR'), 'build',
'cmt_regions.csv')) as f:
for l in f:
site, cmt = l.strip().split(',')
yield (site, cmt)
class ClockSources(object):
def __init__(self):
self.sources = {}
self.merged_sources = {}
self.source_to_cmt = {}
self.used_sources_from_cmt = {}
def add_clock_source(self, source, cmt):
if cmt not in self.sources:
self.sources[cmt] = []
self.sources[cmt].append(source)
assert source not in self.source_to_cmt or self.source_to_cmt[
source] == cmt, source
self.source_to_cmt[source] = cmt
def get_random_source(self, cmt):
if cmt not in self.merged_sources:
choices = []
if 'ANY' in self.sources:
choices.extend(self.sources['ANY'])
if cmt in self.sources:
choices.extend(self.sources[cmt])
x, y = CMT_XY_FUN(cmt)
if x % 2 == 0:
x += 1
else:
x -= 1
paired_cmt = 'X{}Y{}'.format(x, y)
if paired_cmt in self.sources:
choices.extend(self.sources[paired_cmt])
self.merged_sources[cmt] = choices
if self.merged_sources[cmt]:
source = random.choice(self.merged_sources[cmt])
source_cmt = self.source_to_cmt[source]
if source_cmt not in self.used_sources_from_cmt:
self.used_sources_from_cmt[source_cmt] = set()
self.used_sources_from_cmt[source_cmt].add(source)
if source_cmt != 'ANY' and len(
self.used_sources_from_cmt[source_cmt]) > 14:
print('//', self.used_sources_from_cmt)
self.used_sources_from_cmt[source_cmt].remove(source)
return None
else:
return source
def main():
print(
'''
module top();
(* KEEP, DONT_TOUCH *)
LUT6 dummy();
''')
site_to_cmt = dict(read_site_to_cmt())
luts = LutMaker()
wires = StringIO()
bufgs = StringIO()
clock_sources = ClockSources()
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
def gen_sites(desired_site_type):
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
for site, site_type in gridinfo.sites.items():
if site_type == desired_site_type:
yield tile_name, site
for _, site in gen_sites('MMCME2_ADV'):
mmcm_clocks = [
'mmcm_clock_{site}_{idx}'.format(site=site, idx=idx)
for idx in range(13)
]
for clk in mmcm_clocks:
clock_sources.add_clock_source(clk, site_to_cmt[site])
print(
"""
wire {c0}, {c1}, {c2}, {c3}, {c4}, {c5};
(* KEEP, DONT_TOUCH, LOC = "{site}" *)
MMCME2_ADV pll_{site} (
.CLKOUT0({c0}),
.CLKOUT0B({c1}),
.CLKOUT1({c2}),
.CLKOUT1B({c3}),
.CLKOUT2({c4}),
.CLKOUT2B({c5}),
.CLKOUT3({c6}),
.CLKOUT3B({c7}),
.CLKOUT4({c8}),
.CLKOUT5({c9}),
.CLKOUT6({c10}),
.CLKFBOUT({c11}),
.CLKFBOUTB({c12})
);
""".format(
site=site,
c0=mmcm_clocks[0],
c1=mmcm_clocks[1],
c2=mmcm_clocks[2],
c3=mmcm_clocks[3],
c4=mmcm_clocks[4],
c5=mmcm_clocks[5],
c6=mmcm_clocks[6],
c7=mmcm_clocks[7],
c8=mmcm_clocks[8],
c9=mmcm_clocks[9],
c10=mmcm_clocks[10],
c11=mmcm_clocks[11],
c12=mmcm_clocks[12],
))
for _, site in sorted(gen_sites("BUFGCTRL"),
key=lambda x: BUFGCTRL_XY_FUN(x[1])):
print(
"""
wire O_{site};
wire S1_{site};
wire S0_{site};
wire IGNORE1_{site};
wire IGNORE0_{site};
wire I1_{site};
wire I0_{site};
wire CE1_{site};
wire CE0_{site};
""".format(site=site),
file=wires)
print(
"""
(* KEEP, DONT_TOUCH, LOC = "{site}" *)
BUFGCTRL bufg_{site} (
.O(O_{site}),
.S1(S1_{site}),
.S0(S0_{site}),
.IGNORE1(IGNORE1_{site}),
.IGNORE0(IGNORE0_{site}),
.I1(I1_{site}),
.I0(I0_{site}),
.CE1(CE1_{site}),
.CE0(CE0_{site})
);
""".format(site=site),
file=bufgs)
CLOCK_CHOICES = (
'LUT',
'BUFG_+1',
'BUFG_-1',
'CASCADE',
)
def find_bufg_cmt(tile):
if '_BOT_' in tile:
inc = 1
else:
inc = -1
loc = grid.loc_of_tilename(tile)
offset = 1
while True:
gridinfo = grid.gridinfo_at_loc(
(loc.grid_x, loc.grid_y + offset * inc))
if gridinfo.tile_type.startswith('CLK_HROW_'):
return site_to_cmt[list(gridinfo.sites.keys())[0]]
offset += 1
def get_clock_net(tile, site, source_type):
if source_type == 'LUT':
return luts.get_next_output_net()
elif source_type == 'BUFG_+1':
x, y = BUFGCTRL_XY_FUN(site)
target_y = y + 1
max_y = ((y // 16) + 1) * 16
if target_y >= max_y:
target_y -= 16
return 'O_BUFGCTRL_X{x}Y{y}'.format(x=x, y=target_y)
elif source_type == 'BUFG_-1':
x, y = BUFGCTRL_XY_FUN(site)
target_y = y - 1
min_y = (y // 16) * 16
if target_y < min_y:
target_y += 16
return 'O_BUFGCTRL_X{x}Y{y}'.format(x=x, y=target_y)
elif source_type == 'CASCADE':
cmt = find_bufg_cmt(tile)
return clock_sources.get_random_source(cmt)
else:
assert False, source_type
for tile, site in sorted(gen_sites("BUFGCTRL"),
key=lambda x: BUFGCTRL_XY_FUN(x[1])):
if random.randint(0, 1):
print(
"""
assign I0_{site} = {i0_net};""".format(
site=site,
i0_net=get_clock_net(
tile, site, random.choice(CLOCK_CHOICES))),
file=bufgs)
if random.randint(0, 1):
print(
"""
assign I1_{site} = {i1_net};""".format(
site=site,
i1_net=get_clock_net(
tile, site, random.choice(CLOCK_CHOICES))),
file=bufgs)
print(
"""
assign S0_{site} = {s0_net};
assign S1_{site} = {s1_net};
assign IGNORE0_{site} = {ignore0_net};
assign IGNORE1_{site} = {ignore1_net};
assign CE0_{site} = {ce0_net};
assign CE1_{site} = {ce1_net};
""".format(
site=site,
s0_net=luts.get_next_output_net(),
s1_net=luts.get_next_output_net(),
ignore0_net=luts.get_next_output_net(),
ignore1_net=luts.get_next_output_net(),
ce0_net=luts.get_next_output_net(),
ce1_net=luts.get_next_output_net(),
),
file=bufgs)
for l in luts.create_wires_and_luts():
print(l)
print(wires.getvalue())
print(bufgs.getvalue())
itr = iter(gen_sites('BUFHCE'))
for tile, site in sorted(gen_sites("BUFGCTRL"),
key=lambda x: BUFGCTRL_XY_FUN(x[1])):
if random.randint(0, 1):
_, bufhce_site = next(itr)
print(
"""
(* KEEP, DONT_TOUCH, LOC = "{bufhce_site}" *)
BUFHCE bufhce_{bufhce_site} (
.I(O_{site})
);""".format(
site=site,
bufhce_site=bufhce_site,
))
print("endmodule")
if __name__ == '__main__':
main()
| true | true |
f73a313155ef0145e6a34cd648f6ea35f544b056 | 1,545 | py | Python | source/FAST/Examples/Python/convert_video_to_image_frames.py | skn123/FAST | d66522260bf65c5ab74d75050131d5a353cbf602 | [
"BSD-2-Clause"
] | 1 | 2021-02-10T16:01:23.000Z | 2021-02-10T16:01:23.000Z | source/FAST/Examples/Python/convert_video_to_image_frames.py | skn123/FAST | d66522260bf65c5ab74d75050131d5a353cbf602 | [
"BSD-2-Clause"
] | null | null | null | source/FAST/Examples/Python/convert_video_to_image_frames.py | skn123/FAST | d66522260bf65c5ab74d75050131d5a353cbf602 | [
"BSD-2-Clause"
] | null | null | null | ## @example convert_video_to_image_frames.py
# This example loads a video and converts to a stream of image frames and display the
# individual frames with matplotlib.
#
# Note that additional dependencies are required to stream videos in FAST:
# Linux: sudo apt install ubuntu-restricted-extras libgstreamer1.0-dev libgstreamer-plugins-bad1.0-dev libgstreamer-plugins-base1.0-dev libgstreamer-plugins-good1.0-dev
# Windows: K-lite codec pack https://codecguide.com/download_kl.htm
import fast
import matplotlib.pyplot as plt
import numpy as np
#fast.Reporter.setGlobalReportMethod(fast.Reporter.COUT) # Uncomment to show debug info
fast.downloadTestDataIfNotExists() # This will download the test data needed to run the example
streamer = fast.MovieStreamer.New()
streamer.setFilename(fast.Config.getTestDataPath() + 'US/sagittal_spine.avi')
dataChannel = streamer.getOutputPort()
streamer.update() # Start pipeline
frame_list = []
counter = 0
while True:
frame = dataChannel.getNextImage()
counter += 1
if frame.isLastFrame():
break
# Only show every X frame
if counter % 20 == 0: frame_list.append((np.asarray(frame), counter))
if len(frame_list) == 9:
# Display the 9 last frames
f, axes = plt.subplots(3,3, figsize=(10,10))
for i in range(3):
for j in range(3):
axes[j, i].set_title('Frame: ' + str(frame_list[i + j*3][1]))
axes[j, i].imshow(frame_list[i + j*3][0][..., 0], cmap='gray')
plt.show()
frame_list.clear()
| 36.785714 | 168 | 0.700971 | as plt
import numpy as np
ists()
streamer = fast.MovieStreamer.New()
streamer.setFilename(fast.Config.getTestDataPath() + 'US/sagittal_spine.avi')
dataChannel = streamer.getOutputPort()
streamer.update()
frame_list = []
counter = 0
while True:
frame = dataChannel.getNextImage()
counter += 1
if frame.isLastFrame():
break
if counter % 20 == 0: frame_list.append((np.asarray(frame), counter))
if len(frame_list) == 9:
f, axes = plt.subplots(3,3, figsize=(10,10))
for i in range(3):
for j in range(3):
axes[j, i].set_title('Frame: ' + str(frame_list[i + j*3][1]))
axes[j, i].imshow(frame_list[i + j*3][0][..., 0], cmap='gray')
plt.show()
frame_list.clear()
| true | true |
f73a324ce6b0fc748fe4b1613d20bdd33eb89eb9 | 15,822 | py | Python | projects/TGS_salt/obsolete/train8_Unet_scSE_hyper_LR2.py | liaopeiyuan/ml-arsenal-public | f8938ce3cb58b35fc7cc20d096c39a85ec9780b2 | [
"Apache-2.0"
] | 280 | 2018-10-21T01:07:18.000Z | 2021-12-30T11:29:48.000Z | projects/TGS_salt/obsolete/train8_Unet_scSE_hyper_LR2.py | liaopeiyuan/ml-arsenal-public | f8938ce3cb58b35fc7cc20d096c39a85ec9780b2 | [
"Apache-2.0"
] | 3 | 2018-11-13T08:04:48.000Z | 2020-04-17T09:20:03.000Z | projects/TGS_salt/obsolete/train8_Unet_scSE_hyper_LR2.py | liaopeiyuan/ml-arsenal-public | f8938ce3cb58b35fc7cc20d096c39a85ec9780b2 | [
"Apache-2.0"
] | 59 | 2018-10-21T04:38:23.000Z | 2021-03-29T07:58:47.000Z | import os
import sys
sys.path.append('../../')
from dependencies import *
from settings import *
from reproducibility import *
from models.TGS_salt.Unet34_scSE_hyper import Unet_scSE_hyper as Net
SIZE = 101
PAD = 27
Y0, Y1, X0, X1 = PAD,PAD+SIZE,PAD,PAD+SIZE,
def time_to_str(time, str):
#if str == 'min':
# return str(round(float(time)/60,5))+" min(s)"
return time
#TODO: Instead of directly printing to stdout, copy it into a txt file
class Logger():
def __init__(self,path=None):
super().__init__()
self.path=path
def write(str):
print(str)
def valid_augment(image,mask,index):
cache = Struct(image = image.copy(), mask = mask.copy())
image, mask = do_resize2(image, mask, SIZE, SIZE)
image, mask = do_center_pad_to_factor2(image, mask)
return image,mask,index,cache
def train_augment(image,mask,index):
cache = Struct(image = image.copy(), mask = mask.copy())
if np.random.rand() < 0.5:
image, mask = do_horizontal_flip2(image, mask)
pass
if np.random.rand() < 0.5:
c = np.random.choice(4)
if c==0:
image, mask = do_random_shift_scale_crop_pad2(image, mask, 0.2) #0.125
if c==1:
image, mask = do_horizontal_shear2( image, mask, dx=np.random.uniform(-0.07,0.07) )
pass
if c==2:
image, mask = do_shift_scale_rotate2( image, mask, dx=0, dy=0, scale=1, angle=np.random.uniform(0,15)) #10
if c==3:
image, mask = do_elastic_transform2(image, mask, grid=10, distort=np.random.uniform(0,0.15))#0.10
pass
if np.random.rand() < 0.5:
c = np.random.choice(3)
if c==0:
image = do_brightness_shift(image,np.random.uniform(-0.1,+0.1))
if c==1:
image = do_brightness_multiply(image,np.random.uniform(1-0.08,1+0.08))
if c==2:
image = do_gamma(image,np.random.uniform(1-0.08,1+0.08))
# if c==1:
# image = do_invert_intensity(image)
image, mask = do_resize2(image, mask, SIZE, SIZE)
image, mask = do_center_pad_to_factor2(image, mask)
#print(image.shape)
return image,mask,index,cache
def validation( net, valid_loader ):
valid_num = 0
valid_loss = np.zeros(3,np.float32)
predicts = []
truths = []
for input, truth, index, cache in valid_loader:
input = input.cuda()
truth = truth.cuda()
with torch.no_grad():
logit = data_parallel(net,input) #net(input)
prob = F.sigmoid(logit)
loss = net.criterion(logit, truth)
dice = net.metric(logit, truth)
batch_size = len(index)
valid_loss += batch_size*np.array(( loss.item(), dice.item(), 0))
valid_num += batch_size
prob = prob [:,:,Y0:Y1, X0:X1]
truth = truth[:,:,Y0:Y1, X0:X1]
prob = F.avg_pool2d(prob, kernel_size=2, stride=2)
truth = F.avg_pool2d(truth, kernel_size=2, stride=2)
predicts.append(prob.data.cpu().numpy())
truths.append(truth.data.cpu().numpy())
assert(valid_num == len(valid_loader.sampler))
valid_loss = valid_loss/valid_num
#--------------------------------------------------------
predicts = np.concatenate(predicts).squeeze()
truths = np.concatenate(truths).squeeze()
precision, result, threshold = do_kaggle_metric(predicts, truths)
valid_loss[2] = precision.mean()
return valid_loss
def train():
initial_checkpoint = None
#'checkpoint/00048500_model.pth'\
# None #'/root/share/project/kaggle/tgs/results/resnet34-resize128-focus/fold0-1a/checkpoint/00003500_model.pth'
## setup -----------------
os.makedirs(CHECKPOINTS +'/checkpoint', exist_ok=True)
os.makedirs(CHECKPOINTS +'/train', exist_ok=True)
os.makedirs(CHECKPOINTS +'/backup', exist_ok=True)
#backup_project_as_zip(PROJECT_PATH, RESULT +'/backup/code.train.%s.zip'%IDENTIFIER)
log = Logger()
#log.open(RESULT+'/log.train.txt',mode='a')
print('\n--- [START %s] %s\n\n' % (IDENTIFIER, '-' * 64))
print('\tSEED = %u\n' % SEED)
print('\tPROJECT_PATH = %s\n' % CODE)
print('\t__file__ = %s\n' % __file__)
print('\tRESULT = %s\n' % CHECKPOINTS)
print('\n')
print('\t<additional comments>\n')
print('\t ... \n')
print('\n')
## dataset ----------------------------------------
print('Configuring dataset...\n')
batch_size = 16
train_dataset = TGSDataset('list_train8_3600', train_augment, 'train')
os.makedirs(CHECKPOINTS +'/list_train8_3600', exist_ok=True)
train_loader = DataLoader(
train_dataset,
sampler = RandomSampler(train_dataset),
#sampler = ConstantSampler(train_dataset,[31]*batch_size*100),
batch_size = batch_size,
drop_last = True,
num_workers = 8,
pin_memory = True,
collate_fn = null_collate)
valid_dataset = TGSDataset('list_valid8_400', valid_augment, 'train')
valid_loader = DataLoader(
valid_dataset,
sampler = RandomSampler(valid_dataset),
batch_size = batch_size,
drop_last = False,
num_workers = 8,
pin_memory = True,
collate_fn = null_collate)
assert(len(train_dataset)>=batch_size)
print('batch_size = %d\n'%(batch_size))
print('train_dataset.split = %s\n'%(train_dataset.split))
print('valid_dataset.split = %s\n'%(valid_dataset.split))
print('\n')
#debug
if 0: #debug ##-------------------------------
for input, truth, index, cache in train_loader:
images = input.cpu().data.numpy().squeeze()
masks = truth.cpu().data.numpy().squeeze()
batch_size = len(index)
for b in range(batch_size):
image = images[b]*255
image = np.dstack([image,image,image])
mask = masks[b]
image_show('image',image,resize=2)
image_show_norm('mask', mask, max=1,resize=2)
overlay0 = draw_mask_overlay(mask, image, color=[0,0,255])
overlay0 = draw_mask_to_contour_overlay(mask, overlay0, 2, color=[0,0,255])
image_show('overlay0',overlay0,resize=2)
cv2.waitKey(0)
#--------------------------------------
## net ----------------------------------------
print('Configuring neural network...\n')
net = Net().cuda()
if initial_checkpoint is not None:
print('\tinitial_checkpoint = %s\n' % initial_checkpoint)
net.load_state_dict(torch.load(initial_checkpoint, map_location=lambda storage, loc: storage))
print("The net is an instance of {}.".format(type(net)))
print('\n')
## optimiser ----------------------------------
num_iters = 300 *1000
iter_smooth = 20
iter_log = 50
iter_valid = 100
epoch_save = np.arange(0,1500,10)#[0, num_iters-1]\
#+ list(range(0,num_iters,500))#1*1000
FREEZE=False
#------------------------------------------------------
if FREEZE: ##freeze
for p in net.feature_net.parameters():
p.requires_grad = False
#from cls import CyclicLR
#net.set_mode('train',is_freeze_bn=True)
#------------------------------------------------------
scheduler = lambda x: (0.009/2)*(np.cos(PI*(np.mod(x-1,int(11.25*1000))/(int(11.25*1000))))+1)+0.001
print(scheduler(1))
print(scheduler(5000))
print(scheduler(10001))
#scheduler = CyclicLR(base_lr=0.01, max_lr=0.01, step_size=10000, gamma=1., scale_fn=clr_fn, scale_mode='iterations')
#schduler = None #StepLR([ (0, 0.01), (200, 0.001)])
#base_params = list(map(id, net.resnet.parameters()))
#decode_params = filter(lambda p: id(p) not in base_params, net.parameters())
#params = [ {"params": decode_params, "lr": 0.01},
# {"params": net.resnet.parameters(), "lr": 0.005}, ]
#optimizer = torch.optim.SGD(params, momentum=0.9, weight_decay=0.0001)
optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
lr=0.01, momentum=0.9, weight_decay=0.0001)
#scheduler = CyclicLR(optimizer,base_lr=0.01, max_lr=0.01, step_size=10000, gamma=1., scale_fn=clr_fn, scale_mode='iterations')
#scheduler= CyclicLR(optimizer, base_lr=0.001, max_lr=0.01, step_size=10000, gamma=0.99, mode='cos_anneal')
start_iter = 0
start_epoch= 0
if initial_checkpoint is not None:
checkpoint = torch.load(initial_checkpoint.replace('_model.pth','_optimizer.pth'))
start_iter = checkpoint['iter' ]
start_epoch = checkpoint['epoch']
rate = get_learning_rate(optimizer) #load all except learning rate
#optimizer.load_state_dict(checkpoint['optimizer'])
adjust_learning_rate(optimizer, rate)
pass
## start training here! ##############################################
print('Start training...\n')
#print(' samples_per_epoch = %d\n\n'%len(train_dataset))
print(' rate iter epoch | valid_loss | train_loss | batch_loss | time \n')
print('-------------------------------------------------------------------------------------------------------------------------------\n')
train_loss = np.zeros(6,np.float32)
valid_loss = np.zeros(6,np.float32)
batch_loss = np.zeros(6,np.float32)
rate = 0
iter = 0
i = 0
start = timer()
while iter<num_iters: # loop over the dataset multiple times
sum_train_loss = np.zeros(6,np.float32)
sum = 0
optimizer.zero_grad()
for input, truth, index, cache in train_loader:
if 0: #debug ##-------------------------------
image = input.cpu().data.numpy().squeeze()
mask = truth.cpu().data.numpy().squeeze()
batch_size = len(index)
for b in range(batch_size):
image_show_norm('image',image[b],max=1,resize=2)
image_show_norm('mask', mask[b], max=1,resize=2)
cv2.waitKey(0)
#--------------------------------------
len_train_dataset = len(train_dataset)
batch_size = len(index)
iter = i + start_iter
epoch = (iter-start_iter)*batch_size/len_train_dataset + start_epoch
num_samples = epoch*len_train_dataset
if iter % iter_valid==0:
net.set_mode('valid')
valid_loss = validation(net, valid_loader)
net.set_mode('train')
print('\r',end='',flush=True)
print('%0.4f %5.1f %6.1f | %0.3f %0.3f (%0.3f) | %0.3f %0.3f | %0.3f %0.3f | %s \n' % (\
rate, iter/1000, epoch,
valid_loss[0], valid_loss[1], valid_loss[2],
train_loss[0], train_loss[1],
batch_loss[0], batch_loss[1],
time_to_str((timer() - start),'min')))
time.sleep(0.01)
#if 1:
if round(epoch,1) == 0 or round(epoch,1) == 1 or round(epoch,1)+0.1 in epoch_save:
torch.save(net.state_dict(),CHECKPOINTS+"/"+train_dataset.split+'/%08d_model.pth'%(int(round(epoch,1)+0.1)))
torch.save({
'optimizer': optimizer.state_dict(),
'iter' : iter,
'epoch' : epoch,
}, CHECKPOINTS+"/"+train_dataset.split+'/%08d_optimizer.pth'%(int(round(epoch,1)+0.1)))
pass
# learning rate schduler -------------
if scheduler is not None:
#scheduler.batch_step()
lr = scheduler(iter)
if lr<0 : break
adjust_learning_rate(optimizer, lr)
rate = get_learning_rate(optimizer)
#rate = 0.01
# one iteration update -------------
#net.set_mode('train',is_freeze_bn=True)
net.set_mode('train')
input = input.cuda()
truth = truth.cuda()
logit = data_parallel(net,input) #net(input)
loss = net.criterion(logit, truth)
#loss = torch.nn.BCEWithLogitsLoss(logit,truth)
dice = net.metric(logit, truth)
loss.backward()
optimizer.step()
optimizer.zero_grad()
#torch.nn.utils.clip_grad_norm(net.parameters(), 1)
# print statistics ------------
batch_loss = np.array((
loss.item(),
dice.item(),
0, 0, 0, 0,
))
sum_train_loss += batch_loss
sum += 1
if iter%iter_smooth == 0:
train_loss = sum_train_loss/sum
sum_train_loss = np.zeros(6,np.float32)
sum = 0
print('\r%0.4f %5.1f %6.1f | %0.3f %0.3f (%0.3f) | %0.3f %0.3f | %0.3f %0.3f | %s ' % (\
rate, iter/1000, epoch,
valid_loss[0], valid_loss[1], valid_loss[2],
train_loss[0], train_loss[1],
batch_loss[0], batch_loss[1],
time_to_str((timer() - start), 'min')), end='',flush=True)
i=i+1
#<debug> ===================================================================
if 0:
#if iter%200==0:
#voxel, aux, query, link, truth, cache = make_valid_batch(valid_dataset.dataset, batch_size=2)
net.set_mode('test')#
with torch.no_grad():
logit = net(input)
prob = F.sigmoid(logit)
loss = net.criterion(logit, truth)
dice = net.metric(logit, truth)
if 0:
loss = net.criterion(logit, truth)
accuracy,hit_rate,precision_rate = net.metric(logit, truth)
valid_loss[0] = loss.item()
valid_loss[1] = accuracy.item()
valid_loss[2] = hit_rate.item()
valid_loss[3] = precision_rate.item()
#show only b in batch ---
b = 1
prob = prob.data.cpu().numpy()[b].squeeze()
truth = truth.data.cpu().numpy()[b].squeeze()
input = input.data.cpu().numpy()[b].squeeze()
all = np.hstack([input,truth,prob])
image_show_norm('all',all,max=1,resize=3)
cv2.waitKey(100)
net.set_mode('train')
#<debug> ===================================================================
pass #-- end of one data loader --
pass #-- end of all iterations --
if 1: #save last
torch.save(net.state_dict(),CHECKPOINTS +'/checkpoint/'+train_dataset.split+'/%d_model.pth'%(i))
torch.save({
'optimizer': optimizer.state_dict(),
'iter' : i,
'epoch' : epoch,
}, CHECKPOINTS +'/checkpoint/'+train_dataset.split+'/%d_optimizer.pth'%(i))
print('\n')
if __name__ == '__main__':
print("Training U-Net with hypercolumn concatenation and spatial/channel-wise excitation...")
train()
print('\tFinished!')
| 36.795349 | 142 | 0.518771 | import os
import sys
sys.path.append('../../')
from dependencies import *
from settings import *
from reproducibility import *
from models.TGS_salt.Unet34_scSE_hyper import Unet_scSE_hyper as Net
SIZE = 101
PAD = 27
Y0, Y1, X0, X1 = PAD,PAD+SIZE,PAD,PAD+SIZE,
def time_to_str(time, str):
return time
class Logger():
def __init__(self,path=None):
super().__init__()
self.path=path
def write(str):
print(str)
def valid_augment(image,mask,index):
cache = Struct(image = image.copy(), mask = mask.copy())
image, mask = do_resize2(image, mask, SIZE, SIZE)
image, mask = do_center_pad_to_factor2(image, mask)
return image,mask,index,cache
def train_augment(image,mask,index):
cache = Struct(image = image.copy(), mask = mask.copy())
if np.random.rand() < 0.5:
image, mask = do_horizontal_flip2(image, mask)
pass
if np.random.rand() < 0.5:
c = np.random.choice(4)
if c==0:
image, mask = do_random_shift_scale_crop_pad2(image, mask, 0.2)
if c==1:
image, mask = do_horizontal_shear2( image, mask, dx=np.random.uniform(-0.07,0.07) )
pass
if c==2:
image, mask = do_shift_scale_rotate2( image, mask, dx=0, dy=0, scale=1, angle=np.random.uniform(0,15))
if c==3:
image, mask = do_elastic_transform2(image, mask, grid=10, distort=np.random.uniform(0,0.15))
pass
if np.random.rand() < 0.5:
c = np.random.choice(3)
if c==0:
image = do_brightness_shift(image,np.random.uniform(-0.1,+0.1))
if c==1:
image = do_brightness_multiply(image,np.random.uniform(1-0.08,1+0.08))
if c==2:
image = do_gamma(image,np.random.uniform(1-0.08,1+0.08))
image, mask = do_resize2(image, mask, SIZE, SIZE)
image, mask = do_center_pad_to_factor2(image, mask)
return image,mask,index,cache
def validation( net, valid_loader ):
valid_num = 0
valid_loss = np.zeros(3,np.float32)
predicts = []
truths = []
for input, truth, index, cache in valid_loader:
input = input.cuda()
truth = truth.cuda()
with torch.no_grad():
logit = data_parallel(net,input)
prob = F.sigmoid(logit)
loss = net.criterion(logit, truth)
dice = net.metric(logit, truth)
batch_size = len(index)
valid_loss += batch_size*np.array(( loss.item(), dice.item(), 0))
valid_num += batch_size
prob = prob [:,:,Y0:Y1, X0:X1]
truth = truth[:,:,Y0:Y1, X0:X1]
prob = F.avg_pool2d(prob, kernel_size=2, stride=2)
truth = F.avg_pool2d(truth, kernel_size=2, stride=2)
predicts.append(prob.data.cpu().numpy())
truths.append(truth.data.cpu().numpy())
assert(valid_num == len(valid_loader.sampler))
valid_loss = valid_loss/valid_num
predicts = np.concatenate(predicts).squeeze()
truths = np.concatenate(truths).squeeze()
precision, result, threshold = do_kaggle_metric(predicts, truths)
valid_loss[2] = precision.mean()
return valid_loss
def train():
initial_checkpoint = None
dirs(CHECKPOINTS +'/backup', exist_ok=True)
log = Logger()
print('\n--- [START %s] %s\n\n' % (IDENTIFIER, '-' * 64))
print('\tSEED = %u\n' % SEED)
print('\tPROJECT_PATH = %s\n' % CODE)
print('\t__file__ = %s\n' % __file__)
print('\tRESULT = %s\n' % CHECKPOINTS)
print('\n')
print('\t<additional comments>\n')
print('\t ... \n')
print('\n')
ize = 16
train_dataset = TGSDataset('list_train8_3600', train_augment, 'train')
os.makedirs(CHECKPOINTS +'/list_train8_3600', exist_ok=True)
train_loader = DataLoader(
train_dataset,
sampler = RandomSampler(train_dataset),
batch_size = batch_size,
drop_last = True,
num_workers = 8,
pin_memory = True,
collate_fn = null_collate)
valid_dataset = TGSDataset('list_valid8_400', valid_augment, 'train')
valid_loader = DataLoader(
valid_dataset,
sampler = RandomSampler(valid_dataset),
batch_size = batch_size,
drop_last = False,
num_workers = 8,
pin_memory = True,
collate_fn = null_collate)
assert(len(train_dataset)>=batch_size)
print('batch_size = %d\n'%(batch_size))
print('train_dataset.split = %s\n'%(train_dataset.split))
print('valid_dataset.split = %s\n'%(valid_dataset.split))
print('\n')
if 0: images = input.cpu().data.numpy().squeeze()
masks = truth.cpu().data.numpy().squeeze()
batch_size = len(index)
for b in range(batch_size):
image = images[b]*255
image = np.dstack([image,image,image])
mask = masks[b]
image_show('image',image,resize=2)
image_show_norm('mask', mask, max=1,resize=2)
overlay0 = draw_mask_overlay(mask, image, color=[0,0,255])
overlay0 = draw_mask_to_contour_overlay(mask, overlay0, 2, color=[0,0,255])
image_show('overlay0',overlay0,resize=2)
cv2.waitKey(0)
net = Net().cuda()
if initial_checkpoint is not None:
print('\tinitial_checkpoint = %s\n' % initial_checkpoint)
net.load_state_dict(torch.load(initial_checkpoint, map_location=lambda storage, loc: storage))
print("The net is an instance of {}.".format(type(net)))
print('\n')
= 20
iter_log = 50
iter_valid = 100
epoch_save = np.arange(0,1500,10)
REEZE=False
if FREEZE: for p in net.feature_net.parameters():
p.requires_grad = False
scheduler = lambda x: (0.009/2)*(np.cos(PI*(np.mod(x-1,int(11.25*1000))/(int(11.25*1000))))+1)+0.001
print(scheduler(1))
print(scheduler(5000))
print(scheduler(10001))
zer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
lr=0.01, momentum=0.9, weight_decay=0.0001)
start_iter = 0
start_epoch= 0
if initial_checkpoint is not None:
checkpoint = torch.load(initial_checkpoint.replace('_model.pth','_optimizer.pth'))
start_iter = checkpoint['iter' ]
start_epoch = checkpoint['epoch']
rate = get_learning_rate(optimizer)
adjust_learning_rate(optimizer, rate)
pass
er
epoch = (iter-start_iter)*batch_size/len_train_dataset + start_epoch
num_samples = epoch*len_train_dataset
if iter % iter_valid==0:
net.set_mode('valid')
valid_loss = validation(net, valid_loader)
net.set_mode('train')
print('\r',end='',flush=True)
print('%0.4f %5.1f %6.1f | %0.3f %0.3f (%0.3f) | %0.3f %0.3f | %0.3f %0.3f | %s \n' % (\
rate, iter/1000, epoch,
valid_loss[0], valid_loss[1], valid_loss[2],
train_loss[0], train_loss[1],
batch_loss[0], batch_loss[1],
time_to_str((timer() - start),'min')))
time.sleep(0.01)
if round(epoch,1) == 0 or round(epoch,1) == 1 or round(epoch,1)+0.1 in epoch_save:
torch.save(net.state_dict(),CHECKPOINTS+"/"+train_dataset.split+'/%08d_model.pth'%(int(round(epoch,1)+0.1)))
torch.save({
'optimizer': optimizer.state_dict(),
'iter' : iter,
'epoch' : epoch,
}, CHECKPOINTS+"/"+train_dataset.split+'/%08d_optimizer.pth'%(int(round(epoch,1)+0.1)))
pass
if scheduler is not None:
lr = scheduler(iter)
if lr<0 : break
adjust_learning_rate(optimizer, lr)
rate = get_learning_rate(optimizer)
net.set_mode('train')
input = input.cuda()
truth = truth.cuda()
logit = data_parallel(net,input)
loss = net.criterion(logit, truth)
dice = net.metric(logit, truth)
loss.backward()
optimizer.step()
optimizer.zero_grad()
batch_loss = np.array((
loss.item(),
dice.item(),
0, 0, 0, 0,
))
sum_train_loss += batch_loss
sum += 1
if iter%iter_smooth == 0:
train_loss = sum_train_loss/sum
sum_train_loss = np.zeros(6,np.float32)
sum = 0
print('\r%0.4f %5.1f %6.1f | %0.3f %0.3f (%0.3f) | %0.3f %0.3f | %0.3f %0.3f | %s ' % (\
rate, iter/1000, epoch,
valid_loss[0], valid_loss[1], valid_loss[2],
train_loss[0], train_loss[1],
batch_loss[0], batch_loss[1],
time_to_str((timer() - start), 'min')), end='',flush=True)
i=i+1
if 0:
net.set_mode('test')
with torch.no_grad():
logit = net(input)
prob = F.sigmoid(logit)
loss = net.criterion(logit, truth)
dice = net.metric(logit, truth)
if 0:
loss = net.criterion(logit, truth)
accuracy,hit_rate,precision_rate = net.metric(logit, truth)
valid_loss[0] = loss.item()
valid_loss[1] = accuracy.item()
valid_loss[2] = hit_rate.item()
valid_loss[3] = precision_rate.item()
b = 1
prob = prob.data.cpu().numpy()[b].squeeze()
truth = truth.data.cpu().numpy()[b].squeeze()
input = input.data.cpu().numpy()[b].squeeze()
all = np.hstack([input,truth,prob])
image_show_norm('all',all,max=1,resize=3)
cv2.waitKey(100)
net.set_mode('train')
pass
pass
if 1:
torch.save(net.state_dict(),CHECKPOINTS +'/checkpoint/'+train_dataset.split+'/%d_model.pth'%(i))
torch.save({
'optimizer': optimizer.state_dict(),
'iter' : i,
'epoch' : epoch,
}, CHECKPOINTS +'/checkpoint/'+train_dataset.split+'/%d_optimizer.pth'%(i))
print('\n')
if __name__ == '__main__':
print("Training U-Net with hypercolumn concatenation and spatial/channel-wise excitation...")
train()
print('\tFinished!')
| true | true |
f73a33c99ded24c0e514bf4a0dbf736ff72a6c4f | 251 | py | Python | output/models/nist_data/list_pkg/float_pkg/schema_instance/nistschema_sv_iv_list_float_white_space_1_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/list_pkg/float_pkg/schema_instance/nistschema_sv_iv_list_float_white_space_1_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/list_pkg/float_pkg/schema_instance/nistschema_sv_iv_list_float_white_space_1_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.nist_data.list_pkg.float_pkg.schema_instance.nistschema_sv_iv_list_float_white_space_1_xsd.nistschema_sv_iv_list_float_white_space_1 import NistschemaSvIvListFloatWhiteSpace1
__all__ = [
"NistschemaSvIvListFloatWhiteSpace1",
]
| 41.833333 | 193 | 0.89243 | from output.models.nist_data.list_pkg.float_pkg.schema_instance.nistschema_sv_iv_list_float_white_space_1_xsd.nistschema_sv_iv_list_float_white_space_1 import NistschemaSvIvListFloatWhiteSpace1
__all__ = [
"NistschemaSvIvListFloatWhiteSpace1",
]
| true | true |
f73a351832d4fff1c6c577ae4418436252e29bfa | 395 | py | Python | WebServer/webserver/wsgi.py | dhairyaagrawal/SmartMirror | 7ffaf29a6ac31a710c9ae922d5d4fdaeb8025a88 | [
"MIT"
] | null | null | null | WebServer/webserver/wsgi.py | dhairyaagrawal/SmartMirror | 7ffaf29a6ac31a710c9ae922d5d4fdaeb8025a88 | [
"MIT"
] | null | null | null | WebServer/webserver/wsgi.py | dhairyaagrawal/SmartMirror | 7ffaf29a6ac31a710c9ae922d5d4fdaeb8025a88 | [
"MIT"
] | 5 | 2018-10-11T05:49:37.000Z | 2018-10-27T06:37:17.000Z | """
WSGI config for webserver project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'webserver.settings')
application = get_wsgi_application()
| 23.235294 | 78 | 0.787342 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'webserver.settings')
application = get_wsgi_application()
| true | true |
f73a356fec902d8362ced3f8d2d9347e9796881a | 274 | py | Python | examples/sample_basilisk.py | larsbratholm/fragbuilder | e16cbcb190403b5fef49811abd11d16d7ef7fb30 | [
"BSD-2-Clause"
] | null | null | null | examples/sample_basilisk.py | larsbratholm/fragbuilder | e16cbcb190403b5fef49811abd11d16d7ef7fb30 | [
"BSD-2-Clause"
] | null | null | null | examples/sample_basilisk.py | larsbratholm/fragbuilder | e16cbcb190403b5fef49811abd11d16d7ef7fb30 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import print_function
from fragbuilder import Basilisk_DBN
from fragbuilder import set_seed
set_seed(12)
dbn = Basilisk_DBN()
chi, bb, ll = dbn.get_sample("K")
print("Chi angles: ", chi)
print("Phi/Psi angles: ", bb)
print("Log likelihood: ", ll)
| 19.571429 | 37 | 0.722628 | from __future__ import print_function
from fragbuilder import Basilisk_DBN
from fragbuilder import set_seed
set_seed(12)
dbn = Basilisk_DBN()
chi, bb, ll = dbn.get_sample("K")
print("Chi angles: ", chi)
print("Phi/Psi angles: ", bb)
print("Log likelihood: ", ll)
| true | true |
f73a36ef236a0195fe5a8771954b392d3e16858c | 22,339 | py | Python | tensor2tensor/models/video/savp.py | shankharaj29/tensor2tensor | b89ba51a6fa9e0c20009cfb57ee8de04f7138392 | [
"Apache-2.0"
] | 2 | 2020-03-02T13:49:11.000Z | 2020-06-18T09:48:35.000Z | tensor2tensor/models/video/savp.py | PedroLelis/tensor2tensor | 5a867d031bd493eeb7d2776e1118d1594ff0a623 | [
"Apache-2.0"
] | 1 | 2019-01-21T10:57:47.000Z | 2019-01-21T10:57:47.000Z | tensor2tensor/models/video/savp.py | PedroLelis/tensor2tensor | 5a867d031bd493eeb7d2776e1118d1594ff0a623 | [
"Apache-2.0"
] | 3 | 2019-02-10T11:12:30.000Z | 2022-02-23T20:43:48.000Z | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stochastic Adversarial Video Prediction model.
Reference: https://arxiv.org/abs/1804.01523
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import numpy as np
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import common_video
from tensor2tensor.models.video import savp_params # pylint: disable=unused-import
from tensor2tensor.models.video import sv2p
from tensor2tensor.utils import registry
from tensor2tensor.utils import update_ops_hook
import tensorflow as tf
gan_losses = tf.contrib.gan.losses.wargs
class NextFrameSavpBase(object):
"""Main function for Stochastic Adversarial Video Prediction."""
def encoder(self, inputs, n_layers=3):
"""Convnet that encodes inputs into mean and std of a gaussian.
Args:
inputs: 5-D Tensor, shape (batch_size, num_frames, width, height, channels)
n_layers: Number of layers.
Returns:
z_mu: Mean of the latent gaussians.
z_log_var: log(var) of the latent gaussians.
Raises:
ValueError: If inputs is not a 5-D tensor or not float32.
"""
latent_dims = self.hparams.z_dim
shape_as_list = inputs.shape.as_list()
if len(shape_as_list) != 5:
raise ValueError("Expected inputs to be a 5-D, got %d" %
len(shape_as_list))
if inputs.dtype != tf.float32:
raise ValueError("Expected dtype tf.float32, got %s" % inputs.dtype)
# Flatten (N,T,W,H,C) into (NT,W,H,C)
batch_size, _ = shape_as_list[:2]
inputs = tf.reshape(inputs, [-1] + list(inputs.shape)[2:])
n_filters = 64
rectified = None
# Applies 3 layer conv-net with padding, instance normalization
# and leaky relu as per the encoder in
# https://github.com/alexlee-gk/video_prediction
padding = [[0, 0], [1, 1], [1, 1], [0, 0]]
for i in range(n_layers):
with tf.variable_scope("layer_%d" % (i + 1)):
n_filters *= 2**i
if i:
padded = tf.pad(rectified, padding)
else:
padded = tf.pad(inputs, padding)
convolved = tf.layers.conv2d(padded, filters=n_filters, kernel_size=4,
strides=2, padding="VALID")
normalized = tf.contrib.layers.instance_norm(convolved)
rectified = tf.nn.leaky_relu(normalized, alpha=0.2)
# Mean pooling across all spatial dimensions.
pooled = tf.nn.avg_pool(
rectified, [1] + rectified.shape[1:3].as_list() + [1],
strides=[1, 1, 1, 1], padding="VALID")
squeezed = tf.squeeze(pooled, [1, 2])
# Down-project and output the mean and log of the standard deviation of
# the latents.
with tf.variable_scope("z_mu"):
z_mu = tf.layers.dense(squeezed, latent_dims)
with tf.variable_scope("z_log_sigma_sq"):
z_log_var = tf.layers.dense(squeezed, latent_dims)
z_log_var = tf.clip_by_value(z_log_var, -10, 10)
# Reshape to (batch_size X num_frames X latent_dims)
z_mu = tf.reshape(z_mu, (batch_size, -1, latent_dims))
z_log_var = tf.reshape(
z_log_var, (batch_size, -1, latent_dims))
return z_mu, z_log_var
def expected_output_shape(self, input_shape, stride, padding, kernel_size):
return (input_shape + 2*padding - kernel_size) // stride + 1
def get_fc_dimensions(self, strides, kernel_sizes):
"""Get expected fully connected shape after a series of convolutions."""
output_height, output_width, _ = self.hparams.problem.frame_shape
output_steps = self.hparams.video_num_target_frames
output_shape = np.array([output_steps, output_height, output_width])
for curr_stride, kernel_size in zip(strides, kernel_sizes):
output_shape = self.expected_output_shape(
output_shape, np.array(curr_stride), 1, kernel_size)
return np.prod(output_shape) * self.hparams.num_discriminator_filters * 8
def discriminator(self, frames):
"""3-D SNGAN discriminator.
Args:
frames: a list of batch-major tensors indexed by time.
Returns:
logits: 1-D Tensor with shape=batch_size.
Positive logits imply that the discriminator thinks that it
belongs to the true class.
"""
ndf = self.hparams.num_discriminator_filters
frames = tf.stack(frames)
# Switch from time-major axis to batch-major axis.
frames = common_video.swap_time_and_batch_axes(frames)
# 3-D Conv-net mapping inputs to activations.
num_outputs = [ndf, ndf*2, ndf*2, ndf*4, ndf*4, ndf*8, ndf*8]
kernel_sizes = [3, 4, 3, 4, 3, 4, 3]
strides = [[1, 1, 1], [1, 2, 2], [1, 1, 1], [1, 2, 2], [1, 1, 1],
[2, 2, 2], [1, 1, 1]]
names = ["video_sn_conv0_0", "video_sn_conv0_1", "video_sn_conv1_0",
"video_sn_conv1_1", "video_sn_conv2_0", "video_sn_conv2_1",
"video_sn_conv3_0"]
iterable = zip(num_outputs, kernel_sizes, strides, names)
activations = frames
for num_filters, kernel_size, stride, name in iterable:
activations = self.pad_conv3d_lrelu(activations, num_filters, kernel_size,
stride, name)
num_fc_dimensions = self.get_fc_dimensions(strides, kernel_sizes)
activations = tf.reshape(activations, (-1, num_fc_dimensions))
return tf.squeeze(tf.layers.dense(activations, 1))
def d_step(self, true_frames, gen_frames):
"""Performs the discriminator step in computing the GAN loss.
Applies stop-gradient to the generated frames while computing the
discriminator loss to make sure that the gradients are not back-propagated
to the generator. This makes sure that only the discriminator is updated.
Args:
true_frames: True outputs
gen_frames: Generated frames.
Returns:
d_loss: Loss component due to the discriminator.
"""
hparam_to_disc_loss = {
"least_squares": gan_losses.least_squares_discriminator_loss,
"cross_entropy": gan_losses.modified_discriminator_loss,
"wasserstein": gan_losses.wasserstein_discriminator_loss}
# Concat across batch-axis.
_, batch_size, _, _, _ = common_layers.shape_list(true_frames)
all_frames = tf.concat(
[true_frames, tf.stop_gradient(gen_frames)], axis=1)
all_logits = self.discriminator(all_frames)
true_logits, fake_logits_stop = \
all_logits[:batch_size], all_logits[batch_size:]
mean_true_logits = tf.reduce_mean(true_logits)
tf.summary.scalar("mean_true_logits", mean_true_logits)
mean_fake_logits_stop = tf.reduce_mean(fake_logits_stop)
tf.summary.scalar("mean_fake_logits_stop", mean_fake_logits_stop)
discriminator_loss_func = hparam_to_disc_loss[self.hparams.gan_loss]
gan_d_loss = discriminator_loss_func(
discriminator_real_outputs=true_logits,
discriminator_gen_outputs=fake_logits_stop,
add_summaries=True)
return gan_d_loss, true_logits, fake_logits_stop
def g_step(self, gen_frames, fake_logits_stop):
"""Performs the generator step in computing the GAN loss.
Args:
gen_frames: Generated frames
fake_logits_stop: Logits corresponding to the generated frames as per
the discriminator. Assumed to have a stop-gradient term.
Returns:
gan_g_loss_pos_d: Loss.
gan_g_loss_neg_d: -gan_g_loss_pos_d but with a stop gradient on generator.
"""
hparam_to_gen_loss = {
"least_squares": gan_losses.least_squares_generator_loss,
"cross_entropy": gan_losses.modified_generator_loss,
"wasserstein": gan_losses.wasserstein_generator_loss
}
fake_logits = self.discriminator(gen_frames)
mean_fake_logits = tf.reduce_mean(fake_logits)
tf.summary.scalar("mean_fake_logits", mean_fake_logits)
# Generator loss.
# Using gan_g_loss_pos_d updates the discriminator as well.
# To avoid this add gan_g_loss_neg_d = -gan_g_loss_pos_d
# but with stop gradient on the generator.
# This makes sure that the net gradient on the discriminator is zero and
# net-gradient on the generator is just due to the gan_g_loss_pos_d.
generator_loss_func = hparam_to_gen_loss[self.hparams.gan_loss]
gan_g_loss_pos_d = generator_loss_func(
discriminator_gen_outputs=fake_logits, add_summaries=True)
gan_g_loss_neg_d = -generator_loss_func(
discriminator_gen_outputs=fake_logits_stop, add_summaries=True)
return gan_g_loss_pos_d, gan_g_loss_neg_d
def get_gan_loss(self, true_frames, gen_frames, name):
"""Get the discriminator + generator loss at every step.
This performs an 1:1 update of the discriminator and generator at every
step.
Args:
true_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be ground truth.
gen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be fake.
name: discriminator scope.
Returns:
loss: 0-D Tensor, with d_loss + g_loss
"""
# D - STEP
with tf.variable_scope("%s_discriminator" % name, reuse=tf.AUTO_REUSE):
gan_d_loss, _, fake_logits_stop = self.d_step(
true_frames, gen_frames)
# G - STEP
with tf.variable_scope("%s_discriminator" % name, reuse=True):
gan_g_loss_pos_d, gan_g_loss_neg_d = self.g_step(
gen_frames, fake_logits_stop)
gan_g_loss = gan_g_loss_pos_d + gan_g_loss_neg_d
tf.summary.scalar("gan_loss_%s" % name, gan_g_loss_pos_d + gan_d_loss)
if self.hparams.gan_optimization == "joint":
gan_loss = gan_g_loss + gan_d_loss
else:
curr_step = self.get_iteration_num()
gan_loss = tf.cond(
tf.logical_not(curr_step % 2 == 0), lambda: gan_g_loss,
lambda: gan_d_loss)
return gan_loss
def get_extra_loss(self, latent_means=None, latent_stds=None,
true_frames=None, gen_frames=None):
"""Gets extra loss from VAE and GAN."""
if not self.is_training:
return 0.0
vae_loss, d_vae_loss, d_gan_loss = 0.0, 0.0, 0.0
# Use sv2p's KL divergence computation.
if self.hparams.use_vae:
vae_loss = super(NextFrameSavpBase, self).get_extra_loss(
latent_means=latent_means, latent_stds=latent_stds)
if self.hparams.use_gan:
# Strip out the first context_frames for the true_frames
# Strip out the first context_frames - 1 for the gen_frames
context_frames = self.hparams.video_num_input_frames
true_frames = tf.stack(
tf.unstack(true_frames, axis=0)[context_frames:])
# discriminator for VAE.
if self.hparams.use_vae:
gen_enc_frames = tf.stack(
tf.unstack(gen_frames, axis=0)[context_frames-1:])
d_vae_loss = self.get_gan_loss(true_frames, gen_enc_frames, name="vae")
# discriminator for GAN.
gen_prior_frames = tf.stack(
tf.unstack(self.gen_prior_video, axis=0)[context_frames-1:])
d_gan_loss = self.get_gan_loss(true_frames, gen_prior_frames, name="gan")
return (
vae_loss + self.hparams.gan_loss_multiplier * d_gan_loss +
self.hparams.gan_vae_loss_multiplier * d_vae_loss)
def pad_conv3d_lrelu(self, activations, n_filters, kernel_size, strides,
scope):
"""Pad, apply 3-D convolution and leaky relu."""
padding = [[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]
# tf.nn.conv3d accepts a list of 5 values for strides
# with first and last value equal to 1
if isinstance(strides, numbers.Integral):
strides = [strides] * 3
strides = [1] + strides + [1]
# Filter_shape = [K, K, K, num_input, num_output]
filter_shape = (
[kernel_size]*3 + activations.shape[-1:].as_list() + [n_filters])
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
conv_filter = tf.get_variable(
"conv_filter", shape=filter_shape,
initializer=tf.truncated_normal_initializer(stddev=0.02))
if self.hparams.use_spectral_norm:
conv_filter, assign_op = common_layers.apply_spectral_norm(conv_filter)
if self.is_training:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, assign_op)
padded = tf.pad(activations, padding)
convolved = tf.nn.conv3d(
padded, conv_filter, strides=strides, padding="VALID")
rectified = tf.nn.leaky_relu(convolved, alpha=0.2)
return rectified
@staticmethod
def train_hooks(hook_context):
del hook_context
return [update_ops_hook.UpdateOpsHook()]
@registry.register_model
class NextFrameSAVP(NextFrameSavpBase, sv2p.NextFrameSv2pLegacy):
"""Stochastic Adversarial Video Prediction."""
def construct_model(self, images, actions, rewards):
"""Model that takes in images and returns predictions.
Args:
images: list of 4-D Tensors indexed by time.
(batch_size, width, height, channels)
actions: list of action tensors
each action should be in the shape ?x1xZ
rewards: list of reward tensors
each reward should be in the shape ?x1xZ
Returns:
video: list of 4-D predicted frames.
all_rewards: predicted rewards.
latent_means: list of gaussian means conditioned on the input at
every frame.
latent_stds: list of gaussian stds conditioned on the input at
every frame.
Raises:
ValueError: If not exactly one of self.hparams.vae or self.hparams.gan
is set to True.
"""
if not self.hparams.use_vae and not self.hparams.use_gan:
raise ValueError("Set at least one of use_vae or use_gan to be True")
if self.hparams.gan_optimization not in ["joint", "sequential"]:
raise ValueError("self.hparams.gan_optimization should be either joint "
"or sequential got %s" % self.hparams.gan_optimization)
images = tf.unstack(images, axis=0)
actions = tf.unstack(actions, axis=0)
rewards = tf.unstack(rewards, axis=0)
latent_dims = self.hparams.z_dim
context_frames = self.hparams.video_num_input_frames
seq_len = len(images)
input_shape = common_layers.shape_list(images[0])
batch_size = input_shape[0]
# Model does not support reward-conditioned frame generation.
fake_rewards = rewards[:-1]
# Concatenate x_{t-1} and x_{t} along depth and encode it to
# produce the mean and standard deviation of z_{t-1}
image_pairs = tf.concat([images[:seq_len - 1],
images[1:seq_len]], axis=-1)
z_mu, z_log_sigma_sq = self.encoder(image_pairs)
# Unstack z_mu and z_log_sigma_sq along the time dimension.
z_mu = tf.unstack(z_mu, axis=0)
z_log_sigma_sq = tf.unstack(z_log_sigma_sq, axis=0)
iterable = zip(images[:-1], actions[:-1], fake_rewards,
z_mu, z_log_sigma_sq)
# Initialize LSTM State
lstm_state = [None] * 7
gen_cond_video, gen_prior_video, all_rewards, latent_means, latent_stds = \
[], [], [], [], []
pred_image = tf.zeros_like(images[0])
prior_latent_state, cond_latent_state = None, None
train_mode = self.hparams.mode == tf.estimator.ModeKeys.TRAIN
# Create scheduled sampling function
ss_func = self.get_scheduled_sample_func(batch_size)
with tf.variable_scope("prediction", reuse=tf.AUTO_REUSE):
for step, (image, action, reward, mu, log_sigma_sq) in enumerate(iterable): # pylint:disable=line-too-long
# Sample latents using a gaussian centered at conditional mu and std.
latent = common_video.get_gaussian_tensor(mu, log_sigma_sq)
# Sample prior latents from isotropic normal distribution.
prior_latent = tf.random_normal(tf.shape(latent), dtype=tf.float32)
# LSTM that encodes correlations between conditional latents.
# Pg 22 in https://arxiv.org/pdf/1804.01523.pdf
enc_cond_latent, cond_latent_state = common_video.basic_lstm(
latent, cond_latent_state, latent_dims, name="cond_latent")
# LSTM that encodes correlations between prior latents.
enc_prior_latent, prior_latent_state = common_video.basic_lstm(
prior_latent, prior_latent_state, latent_dims, name="prior_latent")
# Scheduled Sampling
done_warm_start = step > context_frames - 1
groundtruth_items = [image]
generated_items = [pred_image]
input_image, = self.get_scheduled_sample_inputs(
done_warm_start, groundtruth_items, generated_items, ss_func)
all_latents = tf.concat([enc_cond_latent, enc_prior_latent], axis=0)
all_image = tf.concat([input_image, input_image], axis=0)
all_action = tf.concat([action, action], axis=0)
all_rewards = tf.concat([reward, reward], axis=0)
all_pred_images, lstm_state, _ = self.construct_predictive_tower(
all_image, all_rewards, all_action, lstm_state, all_latents,
concat_latent=True)
cond_pred_images, prior_pred_images = \
all_pred_images[:batch_size], all_pred_images[batch_size:]
if train_mode and self.hparams.use_vae:
pred_image = cond_pred_images
else:
pred_image = prior_pred_images
gen_cond_video.append(cond_pred_images)
gen_prior_video.append(prior_pred_images)
latent_means.append(mu)
latent_stds.append(log_sigma_sq)
gen_cond_video = tf.stack(gen_cond_video, axis=0)
self.gen_prior_video = tf.stack(gen_prior_video, axis=0)
fake_rewards = tf.stack(fake_rewards, axis=0)
if train_mode and self.hparams.use_vae:
return gen_cond_video, fake_rewards, latent_means, latent_stds
else:
return self.gen_prior_video, fake_rewards, latent_means, latent_stds
@registry.register_model
class NextFrameSavpRl(NextFrameSavpBase, sv2p.NextFrameSv2p):
"""Stochastic Adversarial Video Prediction for RL pipeline."""
def video_features(
self, all_frames, all_actions, all_rewards, all_raw_frames):
"""No video wide feature."""
del all_actions, all_rewards, all_raw_frames
# Concatenate x_{t-1} and x_{t} along depth and encode it to
# produce the mean and standard deviation of z_{t-1}
seq_len = len(all_frames)
image_pairs = tf.concat([all_frames[:seq_len-1],
all_frames[1:seq_len]], axis=-1)
z_mu, z_log_sigma_sq = self.encoder(image_pairs)
# Unstack z_mu and z_log_sigma_sq along the time dimension.
z_mu = tf.unstack(z_mu, axis=0)
z_log_sigma_sq = tf.unstack(z_log_sigma_sq, axis=0)
return [z_mu, z_log_sigma_sq]
def video_extra_loss(self, frames_predicted, frames_target,
internal_states, video_features):
if not self.is_training:
return 0.0
latent_means, latent_stds = video_features
true_frames, gen_frames = frames_target, frames_predicted
loss = super(NextFrameSavpRl, self).get_extra_loss(
latent_means=latent_means, latent_stds=latent_stds,
true_frames=true_frames, gen_frames=gen_frames)
return loss
def next_frame(self, frames, actions, rewards, target_frame,
internal_states, video_features):
del target_frame
if not self.hparams.use_vae or self.hparams.use_gan:
raise NotImplementedError("Only supporting VAE for now.")
if self.has_pred_actions or self.has_values:
raise NotImplementedError("Parameter sharing with policy not supported.")
image, action, reward = frames[0], actions[0], rewards[0]
latent_dims = self.hparams.z_dim
batch_size = common_layers.shape_list(image)[0]
if internal_states is None:
# Initialize LSTM State
frame_index = 0
lstm_state = [None] * 7
cond_latent_state, prior_latent_state = None, None
gen_prior_video = []
else:
(frame_index, lstm_state, cond_latent_state,
prior_latent_state, gen_prior_video) = internal_states
z_mu, log_sigma_sq = video_features
z_mu, log_sigma_sq = z_mu[frame_index], log_sigma_sq[frame_index]
# Sample latents using a gaussian centered at conditional mu and std.
latent = common_video.get_gaussian_tensor(z_mu, log_sigma_sq)
# Sample prior latents from isotropic normal distribution.
prior_latent = tf.random_normal(tf.shape(latent), dtype=tf.float32)
# # LSTM that encodes correlations between conditional latents.
# # Pg 22 in https://arxiv.org/pdf/1804.01523.pdf
enc_cond_latent, cond_latent_state = common_video.basic_lstm(
latent, cond_latent_state, latent_dims, name="cond_latent")
# LSTM that encodes correlations between prior latents.
enc_prior_latent, prior_latent_state = common_video.basic_lstm(
prior_latent, prior_latent_state, latent_dims, name="prior_latent")
all_latents = tf.concat([enc_cond_latent, enc_prior_latent], axis=0)
all_image = tf.concat([image, image], 0)
all_action = tf.concat([action, action], 0) if self.has_actions else None
all_pred_images, lstm_state = self.construct_predictive_tower(
all_image, None, all_action, lstm_state, all_latents,
concat_latent=True)
cond_pred_images, prior_pred_images = \
all_pred_images[:batch_size], all_pred_images[batch_size:]
if self.is_training and self.hparams.use_vae:
pred_image = cond_pred_images
else:
pred_image = prior_pred_images
gen_prior_video.append(prior_pred_images)
internal_states = (frame_index + 1, lstm_state, cond_latent_state,
prior_latent_state, gen_prior_video)
if not self.has_rewards:
return pred_image, None, 0.0, internal_states
pred_reward = self.reward_prediction(
pred_image, action, reward, latent)
return pred_image, pred_reward, None, None, 0.0, internal_states
| 39.74911 | 113 | 0.696316 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import numpy as np
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import common_video
from tensor2tensor.models.video import savp_params
from tensor2tensor.models.video import sv2p
from tensor2tensor.utils import registry
from tensor2tensor.utils import update_ops_hook
import tensorflow as tf
gan_losses = tf.contrib.gan.losses.wargs
class NextFrameSavpBase(object):
def encoder(self, inputs, n_layers=3):
latent_dims = self.hparams.z_dim
shape_as_list = inputs.shape.as_list()
if len(shape_as_list) != 5:
raise ValueError("Expected inputs to be a 5-D, got %d" %
len(shape_as_list))
if inputs.dtype != tf.float32:
raise ValueError("Expected dtype tf.float32, got %s" % inputs.dtype)
batch_size, _ = shape_as_list[:2]
inputs = tf.reshape(inputs, [-1] + list(inputs.shape)[2:])
n_filters = 64
rectified = None
padding = [[0, 0], [1, 1], [1, 1], [0, 0]]
for i in range(n_layers):
with tf.variable_scope("layer_%d" % (i + 1)):
n_filters *= 2**i
if i:
padded = tf.pad(rectified, padding)
else:
padded = tf.pad(inputs, padding)
convolved = tf.layers.conv2d(padded, filters=n_filters, kernel_size=4,
strides=2, padding="VALID")
normalized = tf.contrib.layers.instance_norm(convolved)
rectified = tf.nn.leaky_relu(normalized, alpha=0.2)
pooled = tf.nn.avg_pool(
rectified, [1] + rectified.shape[1:3].as_list() + [1],
strides=[1, 1, 1, 1], padding="VALID")
squeezed = tf.squeeze(pooled, [1, 2])
with tf.variable_scope("z_mu"):
z_mu = tf.layers.dense(squeezed, latent_dims)
with tf.variable_scope("z_log_sigma_sq"):
z_log_var = tf.layers.dense(squeezed, latent_dims)
z_log_var = tf.clip_by_value(z_log_var, -10, 10)
z_mu = tf.reshape(z_mu, (batch_size, -1, latent_dims))
z_log_var = tf.reshape(
z_log_var, (batch_size, -1, latent_dims))
return z_mu, z_log_var
def expected_output_shape(self, input_shape, stride, padding, kernel_size):
return (input_shape + 2*padding - kernel_size) // stride + 1
def get_fc_dimensions(self, strides, kernel_sizes):
output_height, output_width, _ = self.hparams.problem.frame_shape
output_steps = self.hparams.video_num_target_frames
output_shape = np.array([output_steps, output_height, output_width])
for curr_stride, kernel_size in zip(strides, kernel_sizes):
output_shape = self.expected_output_shape(
output_shape, np.array(curr_stride), 1, kernel_size)
return np.prod(output_shape) * self.hparams.num_discriminator_filters * 8
def discriminator(self, frames):
ndf = self.hparams.num_discriminator_filters
frames = tf.stack(frames)
frames = common_video.swap_time_and_batch_axes(frames)
num_outputs = [ndf, ndf*2, ndf*2, ndf*4, ndf*4, ndf*8, ndf*8]
kernel_sizes = [3, 4, 3, 4, 3, 4, 3]
strides = [[1, 1, 1], [1, 2, 2], [1, 1, 1], [1, 2, 2], [1, 1, 1],
[2, 2, 2], [1, 1, 1]]
names = ["video_sn_conv0_0", "video_sn_conv0_1", "video_sn_conv1_0",
"video_sn_conv1_1", "video_sn_conv2_0", "video_sn_conv2_1",
"video_sn_conv3_0"]
iterable = zip(num_outputs, kernel_sizes, strides, names)
activations = frames
for num_filters, kernel_size, stride, name in iterable:
activations = self.pad_conv3d_lrelu(activations, num_filters, kernel_size,
stride, name)
num_fc_dimensions = self.get_fc_dimensions(strides, kernel_sizes)
activations = tf.reshape(activations, (-1, num_fc_dimensions))
return tf.squeeze(tf.layers.dense(activations, 1))
def d_step(self, true_frames, gen_frames):
hparam_to_disc_loss = {
"least_squares": gan_losses.least_squares_discriminator_loss,
"cross_entropy": gan_losses.modified_discriminator_loss,
"wasserstein": gan_losses.wasserstein_discriminator_loss}
_, batch_size, _, _, _ = common_layers.shape_list(true_frames)
all_frames = tf.concat(
[true_frames, tf.stop_gradient(gen_frames)], axis=1)
all_logits = self.discriminator(all_frames)
true_logits, fake_logits_stop = \
all_logits[:batch_size], all_logits[batch_size:]
mean_true_logits = tf.reduce_mean(true_logits)
tf.summary.scalar("mean_true_logits", mean_true_logits)
mean_fake_logits_stop = tf.reduce_mean(fake_logits_stop)
tf.summary.scalar("mean_fake_logits_stop", mean_fake_logits_stop)
discriminator_loss_func = hparam_to_disc_loss[self.hparams.gan_loss]
gan_d_loss = discriminator_loss_func(
discriminator_real_outputs=true_logits,
discriminator_gen_outputs=fake_logits_stop,
add_summaries=True)
return gan_d_loss, true_logits, fake_logits_stop
def g_step(self, gen_frames, fake_logits_stop):
hparam_to_gen_loss = {
"least_squares": gan_losses.least_squares_generator_loss,
"cross_entropy": gan_losses.modified_generator_loss,
"wasserstein": gan_losses.wasserstein_generator_loss
}
fake_logits = self.discriminator(gen_frames)
mean_fake_logits = tf.reduce_mean(fake_logits)
tf.summary.scalar("mean_fake_logits", mean_fake_logits)
generator_loss_func = hparam_to_gen_loss[self.hparams.gan_loss]
gan_g_loss_pos_d = generator_loss_func(
discriminator_gen_outputs=fake_logits, add_summaries=True)
gan_g_loss_neg_d = -generator_loss_func(
discriminator_gen_outputs=fake_logits_stop, add_summaries=True)
return gan_g_loss_pos_d, gan_g_loss_neg_d
def get_gan_loss(self, true_frames, gen_frames, name):
with tf.variable_scope("%s_discriminator" % name, reuse=tf.AUTO_REUSE):
gan_d_loss, _, fake_logits_stop = self.d_step(
true_frames, gen_frames)
with tf.variable_scope("%s_discriminator" % name, reuse=True):
gan_g_loss_pos_d, gan_g_loss_neg_d = self.g_step(
gen_frames, fake_logits_stop)
gan_g_loss = gan_g_loss_pos_d + gan_g_loss_neg_d
tf.summary.scalar("gan_loss_%s" % name, gan_g_loss_pos_d + gan_d_loss)
if self.hparams.gan_optimization == "joint":
gan_loss = gan_g_loss + gan_d_loss
else:
curr_step = self.get_iteration_num()
gan_loss = tf.cond(
tf.logical_not(curr_step % 2 == 0), lambda: gan_g_loss,
lambda: gan_d_loss)
return gan_loss
def get_extra_loss(self, latent_means=None, latent_stds=None,
true_frames=None, gen_frames=None):
if not self.is_training:
return 0.0
vae_loss, d_vae_loss, d_gan_loss = 0.0, 0.0, 0.0
if self.hparams.use_vae:
vae_loss = super(NextFrameSavpBase, self).get_extra_loss(
latent_means=latent_means, latent_stds=latent_stds)
if self.hparams.use_gan:
# Strip out the first context_frames for the true_frames
# Strip out the first context_frames - 1 for the gen_frames
context_frames = self.hparams.video_num_input_frames
true_frames = tf.stack(
tf.unstack(true_frames, axis=0)[context_frames:])
# discriminator for VAE.
if self.hparams.use_vae:
gen_enc_frames = tf.stack(
tf.unstack(gen_frames, axis=0)[context_frames-1:])
d_vae_loss = self.get_gan_loss(true_frames, gen_enc_frames, name="vae")
# discriminator for GAN.
gen_prior_frames = tf.stack(
tf.unstack(self.gen_prior_video, axis=0)[context_frames-1:])
d_gan_loss = self.get_gan_loss(true_frames, gen_prior_frames, name="gan")
return (
vae_loss + self.hparams.gan_loss_multiplier * d_gan_loss +
self.hparams.gan_vae_loss_multiplier * d_vae_loss)
def pad_conv3d_lrelu(self, activations, n_filters, kernel_size, strides,
scope):
padding = [[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]
# tf.nn.conv3d accepts a list of 5 values for strides
# with first and last value equal to 1
if isinstance(strides, numbers.Integral):
strides = [strides] * 3
strides = [1] + strides + [1]
# Filter_shape = [K, K, K, num_input, num_output]
filter_shape = (
[kernel_size]*3 + activations.shape[-1:].as_list() + [n_filters])
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
conv_filter = tf.get_variable(
"conv_filter", shape=filter_shape,
initializer=tf.truncated_normal_initializer(stddev=0.02))
if self.hparams.use_spectral_norm:
conv_filter, assign_op = common_layers.apply_spectral_norm(conv_filter)
if self.is_training:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, assign_op)
padded = tf.pad(activations, padding)
convolved = tf.nn.conv3d(
padded, conv_filter, strides=strides, padding="VALID")
rectified = tf.nn.leaky_relu(convolved, alpha=0.2)
return rectified
@staticmethod
def train_hooks(hook_context):
del hook_context
return [update_ops_hook.UpdateOpsHook()]
@registry.register_model
class NextFrameSAVP(NextFrameSavpBase, sv2p.NextFrameSv2pLegacy):
def construct_model(self, images, actions, rewards):
if not self.hparams.use_vae and not self.hparams.use_gan:
raise ValueError("Set at least one of use_vae or use_gan to be True")
if self.hparams.gan_optimization not in ["joint", "sequential"]:
raise ValueError("self.hparams.gan_optimization should be either joint "
"or sequential got %s" % self.hparams.gan_optimization)
images = tf.unstack(images, axis=0)
actions = tf.unstack(actions, axis=0)
rewards = tf.unstack(rewards, axis=0)
latent_dims = self.hparams.z_dim
context_frames = self.hparams.video_num_input_frames
seq_len = len(images)
input_shape = common_layers.shape_list(images[0])
batch_size = input_shape[0]
# Model does not support reward-conditioned frame generation.
fake_rewards = rewards[:-1]
# Concatenate x_{t-1} and x_{t} along depth and encode it to
# produce the mean and standard deviation of z_{t-1}
image_pairs = tf.concat([images[:seq_len - 1],
images[1:seq_len]], axis=-1)
z_mu, z_log_sigma_sq = self.encoder(image_pairs)
# Unstack z_mu and z_log_sigma_sq along the time dimension.
z_mu = tf.unstack(z_mu, axis=0)
z_log_sigma_sq = tf.unstack(z_log_sigma_sq, axis=0)
iterable = zip(images[:-1], actions[:-1], fake_rewards,
z_mu, z_log_sigma_sq)
# Initialize LSTM State
lstm_state = [None] * 7
gen_cond_video, gen_prior_video, all_rewards, latent_means, latent_stds = \
[], [], [], [], []
pred_image = tf.zeros_like(images[0])
prior_latent_state, cond_latent_state = None, None
train_mode = self.hparams.mode == tf.estimator.ModeKeys.TRAIN
# Create scheduled sampling function
ss_func = self.get_scheduled_sample_func(batch_size)
with tf.variable_scope("prediction", reuse=tf.AUTO_REUSE):
for step, (image, action, reward, mu, log_sigma_sq) in enumerate(iterable): # pylint:disable=line-too-long
# Sample latents using a gaussian centered at conditional mu and std.
latent = common_video.get_gaussian_tensor(mu, log_sigma_sq)
# Sample prior latents from isotropic normal distribution.
prior_latent = tf.random_normal(tf.shape(latent), dtype=tf.float32)
# LSTM that encodes correlations between conditional latents.
# Pg 22 in https://arxiv.org/pdf/1804.01523.pdf
enc_cond_latent, cond_latent_state = common_video.basic_lstm(
latent, cond_latent_state, latent_dims, name="cond_latent")
# LSTM that encodes correlations between prior latents.
enc_prior_latent, prior_latent_state = common_video.basic_lstm(
prior_latent, prior_latent_state, latent_dims, name="prior_latent")
# Scheduled Sampling
done_warm_start = step > context_frames - 1
groundtruth_items = [image]
generated_items = [pred_image]
input_image, = self.get_scheduled_sample_inputs(
done_warm_start, groundtruth_items, generated_items, ss_func)
all_latents = tf.concat([enc_cond_latent, enc_prior_latent], axis=0)
all_image = tf.concat([input_image, input_image], axis=0)
all_action = tf.concat([action, action], axis=0)
all_rewards = tf.concat([reward, reward], axis=0)
all_pred_images, lstm_state, _ = self.construct_predictive_tower(
all_image, all_rewards, all_action, lstm_state, all_latents,
concat_latent=True)
cond_pred_images, prior_pred_images = \
all_pred_images[:batch_size], all_pred_images[batch_size:]
if train_mode and self.hparams.use_vae:
pred_image = cond_pred_images
else:
pred_image = prior_pred_images
gen_cond_video.append(cond_pred_images)
gen_prior_video.append(prior_pred_images)
latent_means.append(mu)
latent_stds.append(log_sigma_sq)
gen_cond_video = tf.stack(gen_cond_video, axis=0)
self.gen_prior_video = tf.stack(gen_prior_video, axis=0)
fake_rewards = tf.stack(fake_rewards, axis=0)
if train_mode and self.hparams.use_vae:
return gen_cond_video, fake_rewards, latent_means, latent_stds
else:
return self.gen_prior_video, fake_rewards, latent_means, latent_stds
@registry.register_model
class NextFrameSavpRl(NextFrameSavpBase, sv2p.NextFrameSv2p):
def video_features(
self, all_frames, all_actions, all_rewards, all_raw_frames):
del all_actions, all_rewards, all_raw_frames
# Concatenate x_{t-1} and x_{t} along depth and encode it to
# produce the mean and standard deviation of z_{t-1}
seq_len = len(all_frames)
image_pairs = tf.concat([all_frames[:seq_len-1],
all_frames[1:seq_len]], axis=-1)
z_mu, z_log_sigma_sq = self.encoder(image_pairs)
# Unstack z_mu and z_log_sigma_sq along the time dimension.
z_mu = tf.unstack(z_mu, axis=0)
z_log_sigma_sq = tf.unstack(z_log_sigma_sq, axis=0)
return [z_mu, z_log_sigma_sq]
def video_extra_loss(self, frames_predicted, frames_target,
internal_states, video_features):
if not self.is_training:
return 0.0
latent_means, latent_stds = video_features
true_frames, gen_frames = frames_target, frames_predicted
loss = super(NextFrameSavpRl, self).get_extra_loss(
latent_means=latent_means, latent_stds=latent_stds,
true_frames=true_frames, gen_frames=gen_frames)
return loss
def next_frame(self, frames, actions, rewards, target_frame,
internal_states, video_features):
del target_frame
if not self.hparams.use_vae or self.hparams.use_gan:
raise NotImplementedError("Only supporting VAE for now.")
if self.has_pred_actions or self.has_values:
raise NotImplementedError("Parameter sharing with policy not supported.")
image, action, reward = frames[0], actions[0], rewards[0]
latent_dims = self.hparams.z_dim
batch_size = common_layers.shape_list(image)[0]
if internal_states is None:
# Initialize LSTM State
frame_index = 0
lstm_state = [None] * 7
cond_latent_state, prior_latent_state = None, None
gen_prior_video = []
else:
(frame_index, lstm_state, cond_latent_state,
prior_latent_state, gen_prior_video) = internal_states
z_mu, log_sigma_sq = video_features
z_mu, log_sigma_sq = z_mu[frame_index], log_sigma_sq[frame_index]
# Sample latents using a gaussian centered at conditional mu and std.
latent = common_video.get_gaussian_tensor(z_mu, log_sigma_sq)
# Sample prior latents from isotropic normal distribution.
prior_latent = tf.random_normal(tf.shape(latent), dtype=tf.float32)
# # LSTM that encodes correlations between conditional latents.
# # Pg 22 in https://arxiv.org/pdf/1804.01523.pdf
enc_cond_latent, cond_latent_state = common_video.basic_lstm(
latent, cond_latent_state, latent_dims, name="cond_latent")
# LSTM that encodes correlations between prior latents.
enc_prior_latent, prior_latent_state = common_video.basic_lstm(
prior_latent, prior_latent_state, latent_dims, name="prior_latent")
all_latents = tf.concat([enc_cond_latent, enc_prior_latent], axis=0)
all_image = tf.concat([image, image], 0)
all_action = tf.concat([action, action], 0) if self.has_actions else None
all_pred_images, lstm_state = self.construct_predictive_tower(
all_image, None, all_action, lstm_state, all_latents,
concat_latent=True)
cond_pred_images, prior_pred_images = \
all_pred_images[:batch_size], all_pred_images[batch_size:]
if self.is_training and self.hparams.use_vae:
pred_image = cond_pred_images
else:
pred_image = prior_pred_images
gen_prior_video.append(prior_pred_images)
internal_states = (frame_index + 1, lstm_state, cond_latent_state,
prior_latent_state, gen_prior_video)
if not self.has_rewards:
return pred_image, None, 0.0, internal_states
pred_reward = self.reward_prediction(
pred_image, action, reward, latent)
return pred_image, pred_reward, None, None, 0.0, internal_states
| true | true |
f73a37620969906a2512d0cda3e81d2fb8bd9953 | 26,800 | py | Python | pycroscopy/analysis/fitter.py | ealopez/pycroscopy | 9f7c0543b67eaa0668296295fc5f492360c130a0 | [
"MIT"
] | null | null | null | pycroscopy/analysis/fitter.py | ealopez/pycroscopy | 9f7c0543b67eaa0668296295fc5f492360c130a0 | [
"MIT"
] | null | null | null | pycroscopy/analysis/fitter.py | ealopez/pycroscopy | 9f7c0543b67eaa0668296295fc5f492360c130a0 | [
"MIT"
] | null | null | null | """
Created on 7/17/16 10:08 AM
@author: Numan Laanait, Suhas Somnath, Chris Smith
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import numpy as np
import psutil
import scipy
import h5py
import time as tm
from .guess_methods import GuessMethods
from .fit_methods import Fit_Methods
from ..core.io.pycro_data import PycroDataset
from ..core.io.io_utils import get_available_memory, recommend_cpu_cores, format_time
from ..core.io.hdf_utils import check_for_old, find_results_groups, check_for_matching_attrs, get_attr
from .optimize import Optimize
class Fitter(object):
"""
Encapsulates the typical routines performed during model-dependent analysis of data.
This abstract class should be extended to cover different types of imaging modalities.
"""
def __init__(self, h5_main, variables=['Frequency'], parallel=True, verbose=False):
"""
For now, we assume that the guess dataset has not been generated for this dataset but we will relax this
requirement after testing the basic components.
Parameters
----------
h5_main : h5py.Dataset instance
The dataset over which the analysis will be performed. This dataset should be linked to the spectroscopic
indices and values, and position indices and values datasets.
variables : list(string), Default ['Frequency']
Lists of attributes that h5_main should possess so that it may be analyzed by Model.
parallel : bool, optional
Should the parallel implementation of the fitting be used. Default True
verbose : bool, optional. default = False
Whether or not to print statements that aid in debugging
"""
if not isinstance(h5_main, PycroDataset):
h5_main = PycroDataset(h5_main)
# Checking if dataset has the proper dimensions for the model to run.
if self._is_legal(h5_main, variables):
self.h5_main = h5_main
else:
raise ValueError('Provided dataset is not a "Main" dataset with necessary ancillary datasets')
# Checking if parallel processing will be used
self._parallel = parallel
self._verbose = verbose
# Determining the max size of the data that can be put into memory
self._set_memory_and_cores()
self._start_pos = 0
self._end_pos = self.h5_main.shape[0]
self.h5_guess = None
self.h5_fit = None
self.h5_results_grp = None
# TODO: do NOT expose a lot of innards. Turn it into private with _var_name
self.data = None
self.guess = None
self.fit = None
self._fitter_name = None # Reset this in the extended classes
self._parms_dict = dict()
def _set_memory_and_cores(self):
"""
Checks hardware limitations such as memory, # cpus and sets the recommended datachunk sizes and the
number of cores to be used by analysis methods.
"""
if self._parallel:
self._maxCpus = max(1, psutil.cpu_count() - 2)
else:
self._maxCpus = 1
if self._maxCpus == 1:
self._parallel = False
self._maxMemoryMB = get_available_memory() / 1024 ** 2 # in Mb
self._maxDataChunk = int(self._maxMemoryMB / self._maxCpus)
# Now calculate the number of positions that can be stored in memory in one go.
mb_per_position = self.h5_main.dtype.itemsize * self.h5_main.shape[1] / 1024.0 ** 2
# TODO: The size of the chunk should be determined by BOTH the computation time and memory restrictions
self._max_pos_per_read = int(np.floor(self._maxDataChunk / mb_per_position))
if self._verbose:
print('Allowed to read {} pixels per chunk'.format(self._max_pos_per_read))
def _is_legal(self, h5_main, variables):
"""
Checks whether or not the provided object can be analyzed by this Model class.
Classes that extend this class will do additional checks to ensure that the supplied dataset is legal.
Parameters
----
h5_main : PycroDataset instance
The dataset over which the analysis will be performed. This dataset should be linked to the spectroscopic
indices and values, and position indices and values datasets.
variables : list(string)
The dimensions needed to be present in the attributes of h5_main to analyze the data with Model.
Returns
-------
legal : Boolean
Whether or not this dataset satisfies the necessary conditions for analysis
"""
return np.all(np.isin(variables, h5_main.spec_dim_labels))
def _get_data_chunk(self):
"""
Reads the next chunk of data for the guess or the fit into memory
"""
if self._start_pos < self.h5_main.shape[0]:
self._end_pos = int(min(self.h5_main.shape[0], self._start_pos + self._max_pos_per_read))
self.data = self.h5_main[self._start_pos:self._end_pos, :]
if self._verbose:
print('\nReading pixels {} to {} of {}'.format(self._start_pos, self._end_pos, self.h5_main.shape[0]))
else:
if self._verbose:
print('Finished reading all data!')
self.data = None
def _get_guess_chunk(self):
"""
Returns a chunk of guess dataset corresponding to the main dataset.
Should be called BEFORE _get_data_chunk since it relies upon current values of
`self._start_pos`, `self._end_pos`
Parameters
-----
None
Returns
--------
"""
if self.data is None:
self._end_pos = int(min(self.h5_main.shape[0], self._start_pos + self._max_pos_per_read))
self.guess = self.h5_guess[self._start_pos:self._end_pos, :]
else:
self.guess = self.h5_guess[self._start_pos:self._end_pos, :]
if self._verbose:
print('Guess of shape: {}'.format(self.guess.shape))
def _set_results(self, is_guess=False):
"""
Writes the provided guess or fit results into appropriate datasets.
Given that the guess and fit datasets are relatively small, we should be able to hold them in memory just fine
Parameters
---------
is_guess : bool, optional
Default - False
Flag that differentiates the guess from the fit
"""
statement = 'guess'
if is_guess:
targ_dset = self.h5_guess
source_dset = self.guess
else:
statement = 'fit'
targ_dset = self.h5_fit
source_dset = self.fit
if self._verbose:
print('Writing data to positions: {} to {}'.format(self._start_pos, self._end_pos))
targ_dset[self._start_pos: self._end_pos, :] = source_dset
# This flag will let us resume the computation if it is aborted
targ_dset.attrs['last_pixel'] = self._end_pos
# Now update the start position
self._start_pos = self._end_pos
# flush the file
self.h5_main.file.flush()
if self._verbose:
print('Finished writing ' + statement + ' results (chunk) to file!')
def _create_guess_datasets(self):
"""
Model specific call that will write the h5 group, guess dataset, corresponding spectroscopic datasets and also
link the guess dataset to the spectroscopic datasets. It is recommended that the ancillary datasets be populated
within this function.
The guess dataset will NOT be populated here but will be populated by the __setData function
The fit dataset should NOT be populated here unless the user calls the optimize function.
Parameters
--------
None
Returns
-------
None
"""
self.guess = None # replace with actual h5 dataset
raise NotImplementedError('Please override the _create_guess_datasets specific to your model')
def _create_fit_datasets(self):
"""
Model specific call that will write the h5 group, fit dataset, corresponding spectroscopic datasets and also
link the fit dataset to the spectroscopic datasets. It is recommended that the ancillary datasets be populated
within this function.
The fit dataset will NOT be populated here but will be populated by the __setData function
The guess dataset should NOT be populated here unless the user calls the optimize function.
Parameters
--------
None
Returns
-------
None
"""
self.fit = None # replace with actual h5 dataset
raise NotImplementedError('Please override the _create_fit_datasets specific to your model')
def _check_for_old_guess(self):
"""
Returns a list of datasets where the same parameters have already been used to compute Guesses for this dataset
Returns
-------
list
List of datasets with results from do_guess on this dataset
"""
groups = check_for_old(self.h5_main, self._fitter_name, new_parms=self._parms_dict, target_dset='Guess',
verbose=self._verbose)
datasets = [grp['Guess'] for grp in groups]
# Now sort these datasets into partial and complete:
completed_dsets = []
partial_dsets = []
for dset in datasets:
try:
last_pix = get_attr(dset, 'last_pixel')
except KeyError:
last_pix = None
# Skip datasets without last_pixel attribute
if last_pix is None:
continue
elif last_pix < self.h5_main.shape[0]:
partial_dsets.append(dset)
else:
completed_dsets.append(dset)
return partial_dsets, completed_dsets
def do_guess(self, processors=None, strategy=None, options=dict(), h5_partial_guess=None, override=False):
"""
Parameters
----------
strategy: string (optional)
Default is 'Wavelet_Peaks'.
Can be one of ['wavelet_peaks', 'relative_maximum', 'gaussian_processes'].
For updated list, run GuessMethods.methods
processors : int (optional)
Number of cores to use for computing. Default = all available - 2 cores
options: dict
Default, options for wavelet_peaks {"peaks_widths": np.array([10,200]), "peak_step":20}.
Dictionary of options passed to strategy. For more info see GuessMethods documentation.
h5_partial_guess : h5py.group. optional, default = None
Datagroup containing (partially computed) guess results. do_guess will resume computation if provided.
override : bool, optional. default = False
By default, will simply return duplicate results to avoid recomputing or resume computation on a
group with partial results. Set to True to force fresh computation.
Returns
-------
h5_guess : h5py.Dataset
Dataset containing guesses that can be passed on to do_fit()
"""
gm = GuessMethods()
if strategy not in gm.methods:
raise KeyError('Error: %s is not implemented in pycroscopy.analysis.GuessMethods to find guesses' %
strategy)
# ################## CHECK FOR DUPLICATES AND RESUME PARTIAL #######################################
# Prepare the parms dict that will be used for comparison:
self._parms_dict = options.copy()
self._parms_dict.update({'strategy': strategy})
# check for old:
partial_dsets, completed_dsets = self._check_for_old_guess()
if len(completed_dsets) == 0 and len(partial_dsets) == 0:
print('No existing datasets found')
override = True
if not override:
# First try to simply return any completed computation
if len(completed_dsets) > 0:
print('Returned previously computed results at ' + completed_dsets[-1].name)
self.h5_guess = PycroDataset(completed_dsets[-1])
return
# Next attempt to resume automatically if nothing is provided
if len(partial_dsets) > 0:
# attempt to use whatever the user provided (if legal)
target_partial_dset = partial_dsets[-1]
if h5_partial_guess is not None:
if not isinstance(h5_partial_guess, h5py.Dataset):
raise ValueError('Provided parameter is not an h5py.Dataset object')
if h5_partial_guess not in partial_dsets:
raise ValueError('Provided dataset for partial Guesses is not compatible')
if self._verbose:
print('Provided partial Guess dataset was acceptable')
target_partial_dset = h5_partial_guess
# Finally resume from this dataset
print('Resuming computation in group: ' + target_partial_dset.name)
self.h5_guess = target_partial_dset
self._start_pos = target_partial_dset.attrs['last_pixel']
# No completed / partials available or forced via override:
if self.h5_guess is None:
if self._verbose:
print('Starting a fresh computation!')
self._start_pos = 0
self._create_guess_datasets()
# ################## BEGIN THE ACTUAL COMPUTING #######################################
if processors is None:
processors = self._maxCpus
else:
processors = min(int(processors), self._maxCpus)
processors = recommend_cpu_cores(self._max_pos_per_read, processors, verbose=self._verbose)
print("Using %s to find guesses...\n" % strategy)
time_per_pix = 0
num_pos = self.h5_main.shape[0] - self._start_pos
orig_start_pos = self._start_pos
print('You can abort this computation at any time and resume at a later time!\n'
'\tIf you are operating in a python console, press Ctrl+C or Cmd+C to abort\n'
'\tIf you are in a Jupyter notebook, click on "Kernel">>"Interrupt"\n')
self._get_data_chunk()
while self.data is not None:
t_start = tm.time()
opt = Optimize(data=self.data, parallel=self._parallel)
temp = opt.computeGuess(processors=processors, strategy=strategy, options=options)
# reorder to get one numpy array out
temp = self._reformat_results(temp, strategy)
self.guess = np.hstack(tuple(temp))
# Write to file
self._set_results(is_guess=True)
# basic timing logs
tot_time = np.round(tm.time() - t_start, decimals=2) # in seconds
if self._verbose:
print('Done parallel computing in {} or {} per pixel'.format(format_time(tot_time),
format_time(tot_time / self.data.shape[0])))
if self._start_pos == orig_start_pos:
time_per_pix = tot_time / self._end_pos # in seconds
else:
time_remaining = (num_pos - self._end_pos) * time_per_pix # in seconds
print('Time remaining: ' + format_time(time_remaining))
# get next batch of data
self._get_data_chunk()
print('Completed computing guess')
print()
return PycroDataset(self.h5_guess)
def _reformat_results(self, results, strategy='wavelet_peaks'):
"""
Model specific restructuring / reformatting of the parallel compute results
Parameters
----------
results : array-like
Results to be formatted for writing
strategy : str
The strategy used in the fit. Determines how the results will be reformatted.
Default 'wavelet_peaks'
Returns
-------
results : numpy.ndarray
Formatted array that is ready to be writen to the HDF5 file
"""
return np.array(results)
def _check_for_old_fit(self):
"""
Returns three lists of h5py.Dataset objects where the group contained:
1. Completed guess only
2. Partial Fit
3. Completed Fit
Returns
-------
"""
# First find all groups that match the basic condition of matching tool name
all_groups = find_results_groups(self.h5_main, self._fitter_name)
if self._verbose:
print('Groups that matched the nomenclature: {}'.format(all_groups))
# Next sort these groups into three categories:
completed_guess = []
partial_fits = []
completed_fits = []
for h5_group in all_groups:
if 'Fit' in h5_group.keys():
# check group for fit attribute
h5_fit = h5_group['Fit']
# check Fit dataset against parms_dict
if not check_for_matching_attrs(h5_fit, new_parms=self._parms_dict, verbose=self._verbose):
if self._verbose:
print('{} did not match the given parameters'.format(h5_fit.name))
continue
# sort this dataset:
try:
last_pix = get_attr(h5_fit, 'last_pixel')
except KeyError:
last_pix = None
# For now skip any fits that are missing 'last_pixel'
if last_pix is None:
continue
elif last_pix < self.h5_main.shape[0]:
partial_fits.append(h5_fit.parent)
else:
completed_fits.append(h5_fit)
else:
if 'Guess' in h5_group.keys():
h5_guess = h5_group['Guess']
# sort this dataset:
try:
last_pix = get_attr(h5_guess, 'last_pixel')
except KeyError:
last_pix = None
# For now skip any fits that are missing 'last_pixel'
if last_pix is None:
continue
elif last_pix == self.h5_main.shape[0]:
if self._verbose:
print('{} was a completed Guess'.format(h5_guess.name))
completed_guess.append(h5_guess)
else:
if self._verbose:
print('{} did not not have completed Guesses'.format(h5_guess.name))
else:
if self._verbose:
print('{} did not even have Guess. Categorizing as defective Group'.format(h5_group.name))
return completed_guess, partial_fits, completed_fits
def do_fit(self, processors=None, solver_type='least_squares', solver_options=None, obj_func=None,
h5_partial_fit=None, h5_guess=None, override=False):
"""
Generates the fit for the given dataset and writes back to file
Parameters
----------
processors : int
Number of cpu cores the user wishes to run on. The minimum of this and self._maxCpus is used.
solver_type : str
The name of the solver in scipy.optimize to use for the fit
solver_options : dict
Dictionary of parameters to pass to the solver specified by `solver_type`
obj_func : dict
Dictionary defining the class and method containing the function to be fit as well as any
additional function parameters.
h5_partial_fit : h5py.group. optional, default = None
Datagroup containing (partially computed) fit results. do_fit will resume computation if provided.
h5_guess : h5py.group. optional, default = None
Datagroup containing guess results. do_fit will use this if provided.
override : bool, optional. default = False
By default, will simply return duplicate results to avoid recomputing or resume computation on a
group with partial results. Set to True to force fresh computation.
Returns
-------
h5_results : h5py.Dataset object
Dataset with the fit parameters
"""
# ################## PREPARE THE SOLVER #######################################
legit_solver = solver_type in scipy.optimize.__dict__.keys()
if not legit_solver:
raise KeyError('Error: Objective Functions "%s" is not implemented in pycroscopy.analysis.Fit_Methods' %
obj_func['obj_func'])
obj_func_name = obj_func['obj_func']
legit_obj_func = obj_func_name in Fit_Methods().methods
if not legit_obj_func:
raise KeyError('Error: Solver "%s" does not exist!. For additional info see scipy.optimize\n' % solver_type)
# ################## CHECK FOR DUPLICATES AND RESUME PARTIAL #######################################
def _get_group_to_resume(legal_groups, provided_partial_fit):
for h5_group in legal_groups:
if h5_group['Fit'] == provided_partial_fit:
return h5_group
return None
def _resume_fit(fitter, h5_group):
fitter.h5_guess = h5_group['Guess']
fitter.h5_fit = h5_group['Fit']
fitter._start_pos = fitter.h5_fit.attrs['last_pixel']
def _start_fresh_fit(fitter, h5_guess_legal):
fitter.h5_guess = h5_guess_legal
fitter._create_fit_datasets()
fitter._start_pos = 0
# Prepare the parms dict that will be used for comparison:
self._parms_dict = solver_options.copy()
self._parms_dict.update({'solver_type': solver_type})
self._parms_dict.update(obj_func)
completed_guess, partial_fit_groups, completed_fits = self._check_for_old_fit()
override = override or (h5_partial_fit is not None or h5_guess is not None)
if not override:
# First try to simply return completed results
if len(completed_fits) > 0:
print('Returned previously computed results at ' + completed_fits[-1].name)
self.h5_fit = PycroDataset(completed_fits[-1])
return
# Next, attempt to resume automatically:
elif len(partial_fit_groups) > 0:
print('Will resume fitting in {}. '
'You can supply a dataset using the h5_partial_fit argument'.format(partial_fit_groups[-1].name))
_resume_fit(self, partial_fit_groups[-1])
# Finally, attempt to do fresh fitting using completed Guess:
elif len(completed_guess) > 0:
print('Will use {} for generating new Fit. '
'You can supply a dataset using the h5_guess argument'.format(completed_guess[-1].name))
_start_fresh_fit(self, completed_guess[-1])
else:
raise ValueError('Could not find a compatible Guess to use for Fit. Call do_guess() before do_fit()')
else:
if h5_partial_fit is not None:
h5_group = _get_group_to_resume(partial_fit_groups, h5_partial_fit)
if h5_group is None:
raise ValueError('Provided dataset with partial Fit was not found to be compatible')
_resume_fit(self, h5_group)
elif h5_guess is not None:
if h5_guess not in completed_guess:
raise ValueError('Provided dataset with completed Guess was not found to be compatible')
_start_fresh_fit(self, h5_guess)
else:
raise ValueError('Please provide a completed guess or partially completed Fit to resume')
# ################## BEGIN THE ACTUAL FITTING #######################################
print("Using solver %s and objective function %s to fit your data\n" % (solver_type, obj_func['obj_func']))
if processors is None:
processors = self._maxCpus
else:
processors = min(processors, self._maxCpus)
processors = recommend_cpu_cores(self._max_pos_per_read, processors, verbose=self._verbose)
time_per_pix = 0
num_pos = self.h5_main.shape[0] - self._start_pos
orig_start_pos = self._start_pos
print('You can abort this computation at any time and resume at a later time!\n'
'\tIf you are operating in a python console, press Ctrl+C or Cmd+C to abort\n'
'\tIf you are in a Jupyter notebook, click on "Kernel">>"Interrupt"\n')
self._get_guess_chunk()
self._get_data_chunk()
while self.data is not None:
t_start = tm.time()
opt = Optimize(data=self.data, guess=self.guess, parallel=self._parallel)
temp = opt.computeFit(processors=processors, solver_type=solver_type, solver_options=solver_options,
obj_func=obj_func.copy())
# TODO: need a different .reformatResults to process fitting results
# reorder to get one numpy array out
temp = self._reformat_results(temp, obj_func_name)
self.fit = np.hstack(tuple(temp))
# Write to file
self._set_results(is_guess=False)
# basic timing logs
tot_time = np.round(tm.time() - t_start, decimals=2) # in seconds
if self._verbose:
print('Done parallel computing in {} or {} per pixel'.format(format_time(tot_time),
format_time(
tot_time / self.data.shape[0])))
if self._start_pos == orig_start_pos:
time_per_pix = tot_time / self._end_pos # in seconds
else:
time_remaining = (num_pos - self._end_pos) * time_per_pix # in seconds
print('Time remaining: ' + format_time(time_remaining))
# get next batch of data
self._get_guess_chunk()
self._get_data_chunk()
print('Completed computing fit. Writing to file.')
return PycroDataset(self.h5_fit)
| 40.853659 | 121 | 0.600672 |
from __future__ import division, print_function, absolute_import, unicode_literals
import numpy as np
import psutil
import scipy
import h5py
import time as tm
from .guess_methods import GuessMethods
from .fit_methods import Fit_Methods
from ..core.io.pycro_data import PycroDataset
from ..core.io.io_utils import get_available_memory, recommend_cpu_cores, format_time
from ..core.io.hdf_utils import check_for_old, find_results_groups, check_for_matching_attrs, get_attr
from .optimize import Optimize
class Fitter(object):
def __init__(self, h5_main, variables=['Frequency'], parallel=True, verbose=False):
if not isinstance(h5_main, PycroDataset):
h5_main = PycroDataset(h5_main)
if self._is_legal(h5_main, variables):
self.h5_main = h5_main
else:
raise ValueError('Provided dataset is not a "Main" dataset with necessary ancillary datasets')
self._parallel = parallel
self._verbose = verbose
self._set_memory_and_cores()
self._start_pos = 0
self._end_pos = self.h5_main.shape[0]
self.h5_guess = None
self.h5_fit = None
self.h5_results_grp = None
self.data = None
self.guess = None
self.fit = None
self._fitter_name = None
self._parms_dict = dict()
def _set_memory_and_cores(self):
if self._parallel:
self._maxCpus = max(1, psutil.cpu_count() - 2)
else:
self._maxCpus = 1
if self._maxCpus == 1:
self._parallel = False
self._maxMemoryMB = get_available_memory() / 1024 ** 2
self._maxDataChunk = int(self._maxMemoryMB / self._maxCpus)
mb_per_position = self.h5_main.dtype.itemsize * self.h5_main.shape[1] / 1024.0 ** 2
self._max_pos_per_read = int(np.floor(self._maxDataChunk / mb_per_position))
if self._verbose:
print('Allowed to read {} pixels per chunk'.format(self._max_pos_per_read))
def _is_legal(self, h5_main, variables):
return np.all(np.isin(variables, h5_main.spec_dim_labels))
def _get_data_chunk(self):
if self._start_pos < self.h5_main.shape[0]:
self._end_pos = int(min(self.h5_main.shape[0], self._start_pos + self._max_pos_per_read))
self.data = self.h5_main[self._start_pos:self._end_pos, :]
if self._verbose:
print('\nReading pixels {} to {} of {}'.format(self._start_pos, self._end_pos, self.h5_main.shape[0]))
else:
if self._verbose:
print('Finished reading all data!')
self.data = None
def _get_guess_chunk(self):
if self.data is None:
self._end_pos = int(min(self.h5_main.shape[0], self._start_pos + self._max_pos_per_read))
self.guess = self.h5_guess[self._start_pos:self._end_pos, :]
else:
self.guess = self.h5_guess[self._start_pos:self._end_pos, :]
if self._verbose:
print('Guess of shape: {}'.format(self.guess.shape))
def _set_results(self, is_guess=False):
statement = 'guess'
if is_guess:
targ_dset = self.h5_guess
source_dset = self.guess
else:
statement = 'fit'
targ_dset = self.h5_fit
source_dset = self.fit
if self._verbose:
print('Writing data to positions: {} to {}'.format(self._start_pos, self._end_pos))
targ_dset[self._start_pos: self._end_pos, :] = source_dset
targ_dset.attrs['last_pixel'] = self._end_pos
self._start_pos = self._end_pos
self.h5_main.file.flush()
if self._verbose:
print('Finished writing ' + statement + ' results (chunk) to file!')
def _create_guess_datasets(self):
self.guess = None
raise NotImplementedError('Please override the _create_guess_datasets specific to your model')
def _create_fit_datasets(self):
self.fit = None
raise NotImplementedError('Please override the _create_fit_datasets specific to your model')
def _check_for_old_guess(self):
groups = check_for_old(self.h5_main, self._fitter_name, new_parms=self._parms_dict, target_dset='Guess',
verbose=self._verbose)
datasets = [grp['Guess'] for grp in groups]
completed_dsets = []
partial_dsets = []
for dset in datasets:
try:
last_pix = get_attr(dset, 'last_pixel')
except KeyError:
last_pix = None
if last_pix is None:
continue
elif last_pix < self.h5_main.shape[0]:
partial_dsets.append(dset)
else:
completed_dsets.append(dset)
return partial_dsets, completed_dsets
def do_guess(self, processors=None, strategy=None, options=dict(), h5_partial_guess=None, override=False):
gm = GuessMethods()
if strategy not in gm.methods:
raise KeyError('Error: %s is not implemented in pycroscopy.analysis.GuessMethods to find guesses' %
strategy)
pixel')
except KeyError:
last_pix = None
if last_pix is None:
continue
elif last_pix < self.h5_main.shape[0]:
partial_fits.append(h5_fit.parent)
else:
completed_fits.append(h5_fit)
else:
if 'Guess' in h5_group.keys():
h5_guess = h5_group['Guess']
try:
last_pix = get_attr(h5_guess, 'last_pixel')
except KeyError:
last_pix = None
if last_pix is None:
continue
elif last_pix == self.h5_main.shape[0]:
if self._verbose:
print('{} was a completed Guess'.format(h5_guess.name))
completed_guess.append(h5_guess)
else:
if self._verbose:
print('{} did not not have completed Guesses'.format(h5_guess.name))
else:
if self._verbose:
print('{} did not even have Guess. Categorizing as defective Group'.format(h5_group.name))
return completed_guess, partial_fits, completed_fits
def do_fit(self, processors=None, solver_type='least_squares', solver_options=None, obj_func=None,
h5_partial_fit=None, h5_guess=None, override=False):
| true | true |
f73a398d7987e29ad476208066f2c4136bfffaee | 3,107 | py | Python | src/trunk/apps/python/inv2dlsv.py | kbouk/seiscomp3 | 2385e4197274135c70aaef93a0b7df65ed8fa6a6 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 94 | 2015-02-04T13:57:34.000Z | 2021-11-01T15:10:06.000Z | src/trunk/apps/python/inv2dlsv.py | kbouk/seiscomp3 | 2385e4197274135c70aaef93a0b7df65ed8fa6a6 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 233 | 2015-01-28T15:16:46.000Z | 2021-08-23T11:31:37.000Z | src/trunk/apps/python/inv2dlsv.py | kbouk/seiscomp3 | 2385e4197274135c70aaef93a0b7df65ed8fa6a6 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 95 | 2015-02-13T15:53:30.000Z | 2021-11-02T14:54:54.000Z | #!/usr/bin/env seiscomp-python
############################################################################
# Copyright (C) by GFZ Potsdam #
# #
# You can redistribute and/or modify this program under the #
# terms of the SeisComP Public License. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# SeisComP Public License for more details. #
############################################################################
from __future__ import absolute_import, division, print_function
import sys
import io
from seiscomp.fseed import *
from seiscomp.db.seiscomp3 import sc3wrap
from seiscomp.db.seiscomp3.inventory import Inventory
from seiscomp3 import DataModel, IO
ORGANIZATION = "EIDA"
def iterinv(obj):
return (j for i in obj.values() for j in i.values())
def main():
if len(sys.argv) < 1 or len(sys.argv) > 3:
sys.stderr.write("Usage inv2dlsv [in_xml [out_dataless]]\n")
return 1
if len(sys.argv) > 1:
inFile = sys.argv[1]
else:
inFile = "-"
if len(sys.argv) > 2:
out = sys.argv[2]
else:
out = ""
sc3wrap.dbQuery = None
ar = IO.XMLArchive()
if ar.open(inFile) == False:
raise IOError(inFile + ": unable to open")
obj = ar.readObject()
if obj is None:
raise TypeError(inFile + ": invalid format")
sc3inv = DataModel.Inventory.Cast(obj)
if sc3inv is None:
raise TypeError(inFile + ": invalid format")
inv = Inventory(sc3inv)
inv.load_stations("*", "*", "*", "*")
inv.load_instruments()
vol = SEEDVolume(inv, ORGANIZATION, "", resp_dict=False)
for net in iterinv(inv.network):
for sta in iterinv(net.station):
for loc in iterinv(sta.sensorLocation):
for strm in iterinv(loc.stream):
try:
vol.add_chan(net.code, sta.code, loc.code,
strm.code, strm.start, strm.end)
except SEEDError as e:
sys.stderr.write("Error (%s,%s,%s,%s): %s\n" % (
net.code, sta.code, loc.code, strm.code, str(e)))
if not out or out == "-":
output = io.BytesIO()
vol.output(output)
stdout = sys.stdout.buffer if hasattr(sys.stdout, "buffer") else sys.stdout
stdout.write(output.getvalue())
stdout.flush()
output.close()
else:
with open(sys.argv[2], "wb") as fd:
vol.output(fd)
return 0
if __name__ == "__main__":
try:
sys.exit(main())
except Exception as e:
sys.stderr.write("Error: %s" % str(e))
sys.exit(1)
| 31.704082 | 83 | 0.505311 | true | true | |
f73a3a299dc3ff7bc79edcc95e8f21c8011e2a3d | 1,795 | py | Python | flask-docker-master/apps/city_spelling_matcher.py | nephylum/city-data-comparison-ds | 286698a7137774dd5c9245a5180911a1e6c48720 | [
"MIT"
] | 2 | 2020-09-08T22:07:21.000Z | 2022-01-05T23:51:15.000Z | flask-docker-master/apps/city_spelling_matcher.py | nephylum/city-data-comparison-ds | 286698a7137774dd5c9245a5180911a1e6c48720 | [
"MIT"
] | null | null | null | flask-docker-master/apps/city_spelling_matcher.py | nephylum/city-data-comparison-ds | 286698a7137774dd5c9245a5180911a1e6c48720 | [
"MIT"
] | 2 | 2020-05-05T21:16:22.000Z | 2021-01-20T22:18:21.000Z | import difflib
import json
def data_loader():
"""This opens the JSON obj for all the city names."""
with open('apps/data/spellcheck/spell_check_opject2.json', 'r') as myfile:
data = myfile.read()
obj = json.loads(data)
return(obj)
def check_spelling(data, words):
"""This function taks a city name and check for the closest match in a list of words."""
jsn = {}
id_manager = []
for i in difflib.get_close_matches(words.lower(), list(data.keys()), n=15):
if list(data[i].values())[0]['ID'] not in id_manager:
id_manager.append(list(data[i].values())[0]['ID'])
jsn[list(data[i].keys())[0]] = list(data[i].values())[0]
else:
pass
if len(jsn) > 0 and len(jsn) <= 5:
res = jsn
elif len(jsn) > 5:
short_dict = {}
for i in list(jsn.keys())[0:5]:
short_dict[i] = jsn[i]
res = short_dict
else:
if len(words.split()) <= 1:
res = {'No Data': f'Cannot find {words}, please include the State name along with the City you are searching for.'}
else:
res = {'No Data': f'Cannot find {words}, please check the spelling or search for another City.'}
return(res)
def force_id(data, words):
"""This funtion takes single word that you want to search for in a
array and finds the most similarly spelled word. If there are no
close matches it will return the city data for Seattle"""
jsn = {}
res = difflib.get_close_matches(words.lower(), list(data.keys()), n=1)
if len(res) > 0:
jsn['data'] = data[res[0]]
jsn = jsn['data'][list(jsn['data'].keys())[0]]['ID']
else:
jsn['data'] = data['Seattle WA']
jsn = jsn['data']['Seattle, WA']['ID']
return(jsn)
| 34.519231 | 127 | 0.583287 | import difflib
import json
def data_loader():
with open('apps/data/spellcheck/spell_check_opject2.json', 'r') as myfile:
data = myfile.read()
obj = json.loads(data)
return(obj)
def check_spelling(data, words):
jsn = {}
id_manager = []
for i in difflib.get_close_matches(words.lower(), list(data.keys()), n=15):
if list(data[i].values())[0]['ID'] not in id_manager:
id_manager.append(list(data[i].values())[0]['ID'])
jsn[list(data[i].keys())[0]] = list(data[i].values())[0]
else:
pass
if len(jsn) > 0 and len(jsn) <= 5:
res = jsn
elif len(jsn) > 5:
short_dict = {}
for i in list(jsn.keys())[0:5]:
short_dict[i] = jsn[i]
res = short_dict
else:
if len(words.split()) <= 1:
res = {'No Data': f'Cannot find {words}, please include the State name along with the City you are searching for.'}
else:
res = {'No Data': f'Cannot find {words}, please check the spelling or search for another City.'}
return(res)
def force_id(data, words):
jsn = {}
res = difflib.get_close_matches(words.lower(), list(data.keys()), n=1)
if len(res) > 0:
jsn['data'] = data[res[0]]
jsn = jsn['data'][list(jsn['data'].keys())[0]]['ID']
else:
jsn['data'] = data['Seattle WA']
jsn = jsn['data']['Seattle, WA']['ID']
return(jsn)
| true | true |
f73a3d1c1a9dde921b8e4dcececf9b8829d7aa79 | 15,957 | py | Python | file_formats/wow_common_types.py | ihm-tswow/pywowlib | f4e49d2e3204e90046716bfb608275d7f4e40b81 | [
"MIT"
] | null | null | null | file_formats/wow_common_types.py | ihm-tswow/pywowlib | f4e49d2e3204e90046716bfb608275d7f4e40b81 | [
"MIT"
] | null | null | null | file_formats/wow_common_types.py | ihm-tswow/pywowlib | f4e49d2e3204e90046716bfb608275d7f4e40b81 | [
"MIT"
] | null | null | null | import struct
from ..io_utils.types import *
from io import SEEK_CUR, BytesIO
from collections.abc import Iterable
from typing import Optional, Protocol
__reload_order_index__ = 1
# TODO: temp declaration for compatibility
class Self:
pass
###### M2 file versions ######
@singleton
class M2VersionsManager:
def __init__(self):
self.m2_version = M2Versions.WOTLK
def set_m2_version(self, version: int):
self.m2_version = version
@singleton
class M2ExternalSequenceCache:
def __init__(self, m2_header):
self.external_sequences = {i: sequence for i, sequence in enumerate(m2_header.sequences)
if not sequence.flags & 0x130}
class M2Versions:
CLASSIC = 256
TBC = 263
WOTLK = 264
CATA = 272
MOP = 272
WOD = 273 # ?
LEGION = 274
BFA = 274 # TODO: verify
@classmethod
def from_expansion_number(cls, exp_num: int):
v_dict = {
0: cls.CLASSIC,
1: cls.TBC,
2: cls.WOTLK,
3: cls.CATA,
4: cls.MOP,
5: cls.WOD,
6: cls.LEGION,
7: cls.BFA
}
return v_dict[exp_num]
#############################################################
###### WoW Common Types ######
#############################################################
class CArgb:
"""A color given in values of red, green, blue and alpha"""
def __init__(self, color=(255, 255, 255, 255)):
self.r, self.g, self.b, self.a = color
def read(self, f):
self.r, self.g, self.b, self.a = uint8.read(f, 4)
def write(self, f):
uint8.write(f, (self.r, self.g, self.b, self.a), 4)
class CImVector:
"""A color given in values of blue, green, red and alpha"""
def __init__(self, color=(255, 255, 255, 255)):
self.b, self.g, self.r, self.a = color
def read(self, f):
self.b, self.g, self.r, self.a = uint8.read(f, 4)
def write(self, f):
uint8.write(f, (self.b, self.g, self.r, self.a), 4)
class C3Vector:
"""A three component float vector"""
def __init__(self, vector=None):
if vector is None:
vector = (0.0, 0.0, 0.0)
self.x, self.y, self.z = vector
def read(self, f):
self.x = float32.read(f)
self.y = float32.read(f)
self.z = float32.read(f)
return self
def write(self, f):
float32.write(f, self.x)
float32.write(f, self.y)
float32.write(f, self.z)
return self
class C4Plane:
"""A 3D plane defined by four floats"""
def __init__(self):
self.normal = (0, 0, 0)
self.distance = 0.0
def read(self, f):
self.normal = vec3D.read(f)
self.distance = float32.read(f)
return self
def write(self, f):
vec3D.write(f, self.normal)
float32.write(f, self.distance)
return self
@staticmethod
def size():
return 16
class CRange:
"""A one dimensional float range defined by the bounds."""
def __init__(self):
self.min = 0.0
self.max = 0.0
def read(self, f):
self.min = float32.read(f)
self.max = float32.read(f)
return self
def write(self, f):
float32.write(f, self.min)
float32.write(f, self.max)
return self
class CAaBox:
"""An axis aligned box described by the minimum and maximum point."""
def __init__(self, min_=None, max_=None):
if min_ is None:
min_ = (0.0, 0.0, 0.0)
if max_ is None:
max_ = (0.0, 0.0, 0.0)
self.min = min_
self.max = max_
def read(self, f):
self.min = vec3D.read(f)
self.max = vec3D.read(f)
return self
def write(self, f):
vec3D.write(f, self.min)
vec3D.write(f, self.max)
return self
class fixed_point:
"""A fixed point real number, opposed to a floating point."""
def __init__(self, type_, dec_bits, int_bits):
self.type = type_
self.dec_bits = dec_bits
self.int_bits = int_bits
self.value = 0
def read(self, f):
fixed_point_val = self.type.read(f)
decimal_part = fixed_point_val & ((1 << self.dec_bits) - 1)
integral_part = (fixed_point_val >> self.dec_bits) & (1 << self.int_bits) - 1
sign = -1.0 if (fixed_point_val & (1 << (self.dec_bits + self.int_bits)) != 0) else 1.0
self.value = sign * (integral_part + decimal_part / (((1 << self.dec_bits) - 1) + 1.0))
return self
def write(self, f):
sign = 1 if self.value < 0 else 0
integral_part = int(self.value) & ((1 << self.int_bits) - 1)
decimal_part = int((self.value - int(self.value)) * (1 << self.dec_bits))
fixed_point_val = (sign << (self.int_bits + self.dec_bits)) | (integral_part << self.int_bits) | decimal_part
self.type.write(f, fixed_point_val)
return self
fixed16 = uint16
class MemoryManager:
@staticmethod
def mem_reserve(f, n_bytes):
if n_bytes:
pos = f.tell()
f.seek(pos + n_bytes)
f.write(b'\0')
f.seek(pos)
@staticmethod
def ofs_request(f):
pos = f.tell()
ofs = f.seek(0, 2)
f.seek(pos)
return ofs
class M2Array(metaclass=Template):
def __init__(self, type_):
self.n_elements = 0
self.ofs_elements = 0
self.type = type_
self.values = []
self.is_read = False
def read(self, f, ignore_header=False, ignore_data=False, is_anim_data=False):
if not ignore_header:
self.n_elements = uint32.read(f)
self.ofs_elements = uint32.read(f)
if ignore_data:
return self
pos = f.tell()
f.seek(self.ofs_elements)
if not is_anim_data:
type_t = type(self.type)
if type_t is GenericType:
self.values = [self.type.read(f) for _ in range(self.n_elements)]
else:
self.values = [self.type().read(f) for _ in range(self.n_elements)]
else:
self.values = [self.type().read(f, ignore_data=bool(M2ExternalSequenceCache().external_sequences.get(i)))
for i in range(self.n_elements)]
f.seek(pos)
return self
def write(self, f):
ofs = MemoryManager.ofs_request(f)
uint32.write(f, len(self.values))
uint32.write(f, ofs if len(self.values) else 0)
pos = f.tell()
f.seek(ofs)
type_t = type(self.type)
if type_t is not partial:
if hasattr(self.type, 'size'):
MemoryManager.mem_reserve(f, len(self.values) * self.type.size())
elif hasattr(self.type.func, 'size'):
MemoryManager.mem_reserve(f, len(self.values) * self.type.func.size())
if type_t is GenericType:
for value in self.values:
self.type.write(f, value)
else:
for value in self.values:
value.write(f)
f.seek(pos)
return self
def __getitem__(self, item):
return self.values[item]
def append(self, value):
self.values.append(value)
def add(self, value):
self.values.append(value)
return len(self.values) - 1
def extend(self, itrbl):
self.values.extend(itrbl)
def prepend(self, itrbl):
self.values = itrbl[:].extend(self.values)
def new(self):
self.values.append(self.type())
return self.values[-1]
def from_iterable(self, itrbl):
self.values = [self.type(item) for item in itrbl]
def set_index(self, index, value):
self.values[index] = value
def set(self, itrbl):
self.values = itrbl
def __len__(self):
return len(self.values)
def __iter__(self):
return self.values.__iter__()
@staticmethod
def size():
return uint32.size() * 2
class IOProtocol(Protocol):
def read(self, f) -> Self: ...
def write(self, f) -> Self: ...
class ContentChunk: # for inheriting only
def __init__(self):
self.magic = self.__class__.__name__
self.size = 0
def read(self, f):
self.size = uint32.read(f)
return self
def write(self, f):
f.write(self.magic[::-1].encode('ascii'))
uint32.write(f, self.size)
return self
class ContentChunkBuffered: # for inheriting only
raw_data = None
def __init__(self):
self.magic = self.__class__.__name__
self.size = 0
self.raw_data = None
def from_bytes(self, data: bytes):
self.raw_data = data
def read(self, f):
self.size = uint32.read(f)
return self
def write(self, f):
f.write(self.magic[::-1].encode('ascii'))
uint32.write(f, self.size)
return self
def _write_buffered(self, f):
raw_data = super().__getattribute__('raw_data')
magic = super().__getattribute__('magic')
f.write(magic[::-1].encode('ascii'))
size = len(raw_data)
self.size = size
uint32.write(f, size)
f.write(raw_data)
return self
def __getattribute__(self, item):
raw_data = super().__getattribute__('raw_data')
if raw_data is not None:
if item == 'write':
return super().__getattribute__('_write_buffered')
elif item == 'read':
self.raw_data = None
elif item == 'size':
return len(raw_data)
else:
size = struct.pack('I', len(raw_data))
super().__getattribute__('read')(BytesIO(size + raw_data))
self.raw_data = None
return super().__getattribute__(item)
return super().__getattribute__(item)
class M2ContentChunk(ContentChunk): # for inheriting only, M2 files do not have reversed headers
def write(self, f):
f.write(self.magic.encode('ascii'))
uint32.write(f, self.size)
return self
class M2RawChunk(M2ContentChunk):
def __init__(self):
super().__init__()
self.raw_data = BytesIO()
def read(self, f):
super().read(f)
self.raw_data.write(f.read(self.size))
self.raw_data.seek(0)
return self
def write(self, f):
self.raw_data.seek(0, 2)
self.size = self.raw_data.tell()
self.raw_data.seek(0)
super().write(f)
f.write(self.raw_data.read())
return self
class ArrayChunkBase: # for internal use only
item: IOProtocol = None
data: str = "content"
raw_data: Optional[bytes] = None
lazy_read: bool = False
def __init__(self):
super().__init__()
setattr(self, self.data, [])
def from_bytes(self, data: bytes):
self.raw_data = data
def as_bytes(self) -> Optional[bytes]:
return self.raw_data
def read(self, f) -> Self:
super().read(f)
if self.lazy_read:
self._read_content_raw(f)
else:
self._read_content(f)
return self
def _read_content(self, f):
size = 0
if isinstance(self.item, Iterable):
for var in self.item:
size += var.size()
setattr(self, self.data, [tuple([var().read(f) for var in self.item]) for _ in range(self.size // size)])
else:
setattr(self, self.data, [self.item().read(f) for _ in range(self.size // self.item.size())])
def _read_content_raw(self, f):
self.raw_data = f.read(self.size)
def write(self, f) -> Self:
self.size = 0
if isinstance(self.item, Iterable):
is_generic_type_map = [False] * len(self.item)
for i, var in enumerate(self.item):
self.size += var.size()
is_generic_type_map[i] = isinstance(var, GenericType)
if self.raw_data is None:
content = getattr(self, self.data)
self.size *= len(content)
else:
self.size = len(self.raw_data)
super().write(f)
if self.raw_data:
f.write(self.raw_data)
return self
for struct in content:
for i, var in enumerate(struct):
if is_generic_type_map[i]:
self.item[i].write(f, var)
else:
var.write(f)
else:
content = None
if self.raw_data is None:
content = getattr(self, self.data)
self.size = (len(content) * self.item.size())
else:
self.size = len(self.raw_data)
super().write(f)
if self.raw_data:
f.write(self.raw_data)
return self
for var in content:
if isinstance(self.item, GenericType):
self.item.write(f, var)
else:
var.write(f)
return self
def __getattribute__(self, item):
raw_data = super().__getattribute__('raw_data')
if item == super().__getattribute__('data') and raw_data is not None:
f = BytesIO(raw_data)
self.size = len(raw_data)
self._read_content(f)
self.raw_data = None
return super().__getattribute__(item)
class ArrayChunk(ArrayChunkBase, ContentChunk): # for inheriting only
pass
class M2ArrayChunk(ArrayChunkBase, M2ContentChunk): # for inheriting only
pass
class StringBlock:
"""A block of zero terminated strings."""
def __init__(self, size=0, padding=0):
self.strings = []
self.size = size
self.padding = padding
def read(self, f):
cur_str = ""
for _ in range(self.size):
# byte = f.read(1)
# if byte != b'\x00':
# cur_str += byte.decode('ascii')
charcode = uint8.read(f)
if charcode:
cur_str += chr(charcode)
elif cur_str:
self.strings.append(cur_str)
cur_str = ""
f.seek(self.padding, SEEK_CUR)
return self
def write(self, f):
for str_ in self.strings:
f.write((str_ + '\x00').encode())
f.seek(self.padding, SEEK_CUR)
def _add(self, str_):
self.size += len(str_) + 1
self.strings.append(str_)
def _replace(self, index, str_):
size_change = len(str_) - len(self.strings[index])
self.strings[index] = str_
self.size += size_change
def _remove(self, index):
self.size -= len(self.strings[index]) + 1
del self.strings[index]
def __getitem__(self, index):
return self.strings[index]
def __len__(self):
return len(self.strings)
'''
class StringBlockChunk:
magic = ""
def __init__(self):
self.header = ChunkHeader(self.magic)
self.filenames = StringBlock()
def read(self, f):
self.header.read(f)
self.filenames.size = self.header.size
self.filenames.read(f)
return self
def write(self, f):
self.header.size = self.filenames.size
self.header.write(f)
self.filenames.write(f)
return self
'''
class MVER(ContentChunk):
""" Version of the file. Actually meaningless. """
def __init__(self, version=0):
super().__init__()
self.size = 4
self.version = version
def read(self, f):
super().read(f)
self.version = uint32.read(f)
return self
def write(self, f):
super().write(f)
uint32.write(f, self.version)
return self
| 24.625 | 117 | 0.549414 | import struct
from ..io_utils.types import *
from io import SEEK_CUR, BytesIO
from collections.abc import Iterable
from typing import Optional, Protocol
__reload_order_index__ = 1
class Self:
pass
self.m2_version = version
@singleton
class M2ExternalSequenceCache:
def __init__(self, m2_header):
self.external_sequences = {i: sequence for i, sequence in enumerate(m2_header.sequences)
if not sequence.flags & 0x130}
class M2Versions:
CLASSIC = 256
TBC = 263
WOTLK = 264
CATA = 272
MOP = 272
WOD = 273
LEGION = 274
BFA = 274
@classmethod
def from_expansion_number(cls, exp_num: int):
v_dict = {
0: cls.CLASSIC,
1: cls.TBC,
2: cls.WOTLK,
3: cls.CATA,
4: cls.MOP,
5: cls.WOD,
6: cls.LEGION,
7: cls.BFA
}
return v_dict[exp_num]
read(f)
if ignore_data:
return self
pos = f.tell()
f.seek(self.ofs_elements)
if not is_anim_data:
type_t = type(self.type)
if type_t is GenericType:
self.values = [self.type.read(f) for _ in range(self.n_elements)]
else:
self.values = [self.type().read(f) for _ in range(self.n_elements)]
else:
self.values = [self.type().read(f, ignore_data=bool(M2ExternalSequenceCache().external_sequences.get(i)))
for i in range(self.n_elements)]
f.seek(pos)
return self
def write(self, f):
ofs = MemoryManager.ofs_request(f)
uint32.write(f, len(self.values))
uint32.write(f, ofs if len(self.values) else 0)
pos = f.tell()
f.seek(ofs)
type_t = type(self.type)
if type_t is not partial:
if hasattr(self.type, 'size'):
MemoryManager.mem_reserve(f, len(self.values) * self.type.size())
elif hasattr(self.type.func, 'size'):
MemoryManager.mem_reserve(f, len(self.values) * self.type.func.size())
if type_t is GenericType:
for value in self.values:
self.type.write(f, value)
else:
for value in self.values:
value.write(f)
f.seek(pos)
return self
def __getitem__(self, item):
return self.values[item]
def append(self, value):
self.values.append(value)
def add(self, value):
self.values.append(value)
return len(self.values) - 1
def extend(self, itrbl):
self.values.extend(itrbl)
def prepend(self, itrbl):
self.values = itrbl[:].extend(self.values)
def new(self):
self.values.append(self.type())
return self.values[-1]
def from_iterable(self, itrbl):
self.values = [self.type(item) for item in itrbl]
def set_index(self, index, value):
self.values[index] = value
def set(self, itrbl):
self.values = itrbl
def __len__(self):
return len(self.values)
def __iter__(self):
return self.values.__iter__()
@staticmethod
def size():
return uint32.size() * 2
class IOProtocol(Protocol):
def read(self, f) -> Self: ...
def write(self, f) -> Self: ...
class ContentChunk:
def __init__(self):
self.magic = self.__class__.__name__
self.size = 0
def read(self, f):
self.size = uint32.read(f)
return self
def write(self, f):
f.write(self.magic[::-1].encode('ascii'))
uint32.write(f, self.size)
return self
class ContentChunkBuffered:
raw_data = None
def __init__(self):
self.magic = self.__class__.__name__
self.size = 0
self.raw_data = None
def from_bytes(self, data: bytes):
self.raw_data = data
def read(self, f):
self.size = uint32.read(f)
return self
def write(self, f):
f.write(self.magic[::-1].encode('ascii'))
uint32.write(f, self.size)
return self
def _write_buffered(self, f):
raw_data = super().__getattribute__('raw_data')
magic = super().__getattribute__('magic')
f.write(magic[::-1].encode('ascii'))
size = len(raw_data)
self.size = size
uint32.write(f, size)
f.write(raw_data)
return self
def __getattribute__(self, item):
raw_data = super().__getattribute__('raw_data')
if raw_data is not None:
if item == 'write':
return super().__getattribute__('_write_buffered')
elif item == 'read':
self.raw_data = None
elif item == 'size':
return len(raw_data)
else:
size = struct.pack('I', len(raw_data))
super().__getattribute__('read')(BytesIO(size + raw_data))
self.raw_data = None
return super().__getattribute__(item)
return super().__getattribute__(item)
class M2ContentChunk(ContentChunk):
def write(self, f):
f.write(self.magic.encode('ascii'))
uint32.write(f, self.size)
return self
class M2RawChunk(M2ContentChunk):
def __init__(self):
super().__init__()
self.raw_data = BytesIO()
def read(self, f):
super().read(f)
self.raw_data.write(f.read(self.size))
self.raw_data.seek(0)
return self
def write(self, f):
self.raw_data.seek(0, 2)
self.size = self.raw_data.tell()
self.raw_data.seek(0)
super().write(f)
f.write(self.raw_data.read())
return self
class ArrayChunkBase:
item: IOProtocol = None
data: str = "content"
raw_data: Optional[bytes] = None
lazy_read: bool = False
def __init__(self):
super().__init__()
setattr(self, self.data, [])
def from_bytes(self, data: bytes):
self.raw_data = data
def as_bytes(self) -> Optional[bytes]:
return self.raw_data
def read(self, f) -> Self:
super().read(f)
if self.lazy_read:
self._read_content_raw(f)
else:
self._read_content(f)
return self
def _read_content(self, f):
size = 0
if isinstance(self.item, Iterable):
for var in self.item:
size += var.size()
setattr(self, self.data, [tuple([var().read(f) for var in self.item]) for _ in range(self.size // size)])
else:
setattr(self, self.data, [self.item().read(f) for _ in range(self.size // self.item.size())])
def _read_content_raw(self, f):
self.raw_data = f.read(self.size)
def write(self, f) -> Self:
self.size = 0
if isinstance(self.item, Iterable):
is_generic_type_map = [False] * len(self.item)
for i, var in enumerate(self.item):
self.size += var.size()
is_generic_type_map[i] = isinstance(var, GenericType)
if self.raw_data is None:
content = getattr(self, self.data)
self.size *= len(content)
else:
self.size = len(self.raw_data)
super().write(f)
if self.raw_data:
f.write(self.raw_data)
return self
for struct in content:
for i, var in enumerate(struct):
if is_generic_type_map[i]:
self.item[i].write(f, var)
else:
var.write(f)
else:
content = None
if self.raw_data is None:
content = getattr(self, self.data)
self.size = (len(content) * self.item.size())
else:
self.size = len(self.raw_data)
super().write(f)
if self.raw_data:
f.write(self.raw_data)
return self
for var in content:
if isinstance(self.item, GenericType):
self.item.write(f, var)
else:
var.write(f)
return self
def __getattribute__(self, item):
raw_data = super().__getattribute__('raw_data')
if item == super().__getattribute__('data') and raw_data is not None:
f = BytesIO(raw_data)
self.size = len(raw_data)
self._read_content(f)
self.raw_data = None
return super().__getattribute__(item)
class ArrayChunk(ArrayChunkBase, ContentChunk):
pass
class M2ArrayChunk(ArrayChunkBase, M2ContentChunk):
pass
class StringBlock:
def __init__(self, size=0, padding=0):
self.strings = []
self.size = size
self.padding = padding
def read(self, f):
cur_str = ""
for _ in range(self.size):
charcode = uint8.read(f)
if charcode:
cur_str += chr(charcode)
elif cur_str:
self.strings.append(cur_str)
cur_str = ""
f.seek(self.padding, SEEK_CUR)
return self
def write(self, f):
for str_ in self.strings:
f.write((str_ + '\x00').encode())
f.seek(self.padding, SEEK_CUR)
def _add(self, str_):
self.size += len(str_) + 1
self.strings.append(str_)
def _replace(self, index, str_):
size_change = len(str_) - len(self.strings[index])
self.strings[index] = str_
self.size += size_change
def _remove(self, index):
self.size -= len(self.strings[index]) + 1
del self.strings[index]
def __getitem__(self, index):
return self.strings[index]
def __len__(self):
return len(self.strings)
class MVER(ContentChunk):
def __init__(self, version=0):
super().__init__()
self.size = 4
self.version = version
def read(self, f):
super().read(f)
self.version = uint32.read(f)
return self
def write(self, f):
super().write(f)
uint32.write(f, self.version)
return self
| true | true |
f73a3d697cf912bb499a7d79997bcb8a20e19c51 | 4,137 | py | Python | exawind/prelude/cfg.py | sayerhs/py-exawind | 7adea1567bd58069774ca56a8a75be7e4d9eefd2 | [
"Apache-2.0"
] | null | null | null | exawind/prelude/cfg.py | sayerhs/py-exawind | 7adea1567bd58069774ca56a8a75be7e4d9eefd2 | [
"Apache-2.0"
] | null | null | null | exawind/prelude/cfg.py | sayerhs/py-exawind | 7adea1567bd58069774ca56a8a75be7e4d9eefd2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""\
Configuration manager
~~~~~~~~~~~~~~~~~~~~~
"""
import os
import abc
import inspect
import logging
from logging.config import dictConfig
from pathlib import Path
class ConfigManager(metaclass=abc.ABCMeta):
"""Base configuration manager utility"""
def __init__(self):
self.cfg = None
@abc.abstractstaticmethod
def rc_type():
"""Type of configuration file"""
@abc.abstractstaticmethod
def rc_base():
"""Base filename"""
@abc.abstractstaticmethod
def cfg_class():
"""Configuration class"""
@property
def cfg_root(self):
"""Root node of the configuration"""
return self.rc_base()
@property
def rc_envvar(self):
"""Environment variable for searching RC files"""
return "%sRC"%self.rc_base()
@property
def rc_sys_envvar(self):
"""Environment variable for searching system RC files"""
return "%sRC_SYSTEM"%self.rc_base()
@property
def rc_file_ext(self):
"""File extension for configuration file"""
return self.rc_type()
@property
def rc_home(self):
"""Home config file"""
return "." + self.rc_base() + "rc"
@property
def cfg_file_name(self):
"""Configuration file name"""
return self.rc_base() + "." + self.rc_file_ext
@property
def cfg_files(self):
"""Return a list of available config files available on the system"""
rcfiles = []
sys_rc = os.environ.get(self.rc_sys_envvar, None)
if sys_rc and Path(sys_rc).exists():
rcfiles.append(Path(sys_rc))
home_rc = Path.home() / self.rc_home
if home_rc.exists():
rcfiles.append(home_rc)
env_rc = os.environ.get(self.rc_envvar, None)
if env_rc and Path(env_rc).exists():
rcfiles.append(Path(env_rc))
cwd_rc = Path.cwd() / self.cfg_file_name
if cwd_rc.exists():
rcfiles.append(cwd_rc)
return rcfiles
@property
def default_cfg_file(self):
"""Get default configuration file"""
try:
cfile = inspect.getfile(self.__class__)
cdir = Path(cfile).parent
default_yaml = cdir / self.cfg_file_name
return default_yaml
except TypeError:
return self.cfg_file_name
@property
def default_config(self):
"""Return default config"""
cfg_cls = self.cfg_class()
cfg_file = Path(self.default_cfg_file)
if not cfg_file.exists():
return cfg_cls()
return self.load_cfg_file(cfg_file)
def load_cfg_file(self, cfg_file):
"""Load a configuration file"""
cfg_cls = self.cfg_class()
cfg = cfg_cls.load_file(cfg_file)
return cfg
def reset_to_defaults(self):
"""Reset to default configuration"""
self.cfg = self.default_config
return self.cfg
@staticmethod
def configure_logging(log_cfg=None):
"""Configure python logging"""
if log_cfg is None:
logging.basicConfig()
else:
logger_cfg = log_cfg.pylogger_options
dictConfig(logger_cfg)
def init_config(self, base_cfg=None, init_logging=True):
"""Initialize configuration"""
cfg = base_cfg or self.default_config
rcfiles = self.cfg_files
for rcname in rcfiles:
cfg.merge(self.load_cfg_file(rcname))
if init_logging:
cfg_root = cfg.get(self.cfg_root, self.cfg_class()())
log_cfg = cfg_root.get("logging", None)
self.configure_logging(log_cfg)
self.cfg = cfg
return cfg
def get_config(self, base_cfg=None, init_logging=True):
"""Get the current configuration object"""
if self.cfg is None:
self.init_config(base_cfg, init_logging)
return self.cfg
def make_config_manager(cls):
"""Make a configuration object"""
cfg_obj = cls()
def config_manager():
"""Configuration manager"""
return cfg_obj
return config_manager
| 26.863636 | 77 | 0.60672 |
import os
import abc
import inspect
import logging
from logging.config import dictConfig
from pathlib import Path
class ConfigManager(metaclass=abc.ABCMeta):
def __init__(self):
self.cfg = None
@abc.abstractstaticmethod
def rc_type():
@abc.abstractstaticmethod
def rc_base():
@abc.abstractstaticmethod
def cfg_class():
@property
def cfg_root(self):
return self.rc_base()
@property
def rc_envvar(self):
return "%sRC"%self.rc_base()
@property
def rc_sys_envvar(self):
return "%sRC_SYSTEM"%self.rc_base()
@property
def rc_file_ext(self):
return self.rc_type()
@property
def rc_home(self):
return "." + self.rc_base() + "rc"
@property
def cfg_file_name(self):
return self.rc_base() + "." + self.rc_file_ext
@property
def cfg_files(self):
rcfiles = []
sys_rc = os.environ.get(self.rc_sys_envvar, None)
if sys_rc and Path(sys_rc).exists():
rcfiles.append(Path(sys_rc))
home_rc = Path.home() / self.rc_home
if home_rc.exists():
rcfiles.append(home_rc)
env_rc = os.environ.get(self.rc_envvar, None)
if env_rc and Path(env_rc).exists():
rcfiles.append(Path(env_rc))
cwd_rc = Path.cwd() / self.cfg_file_name
if cwd_rc.exists():
rcfiles.append(cwd_rc)
return rcfiles
@property
def default_cfg_file(self):
try:
cfile = inspect.getfile(self.__class__)
cdir = Path(cfile).parent
default_yaml = cdir / self.cfg_file_name
return default_yaml
except TypeError:
return self.cfg_file_name
@property
def default_config(self):
cfg_cls = self.cfg_class()
cfg_file = Path(self.default_cfg_file)
if not cfg_file.exists():
return cfg_cls()
return self.load_cfg_file(cfg_file)
def load_cfg_file(self, cfg_file):
cfg_cls = self.cfg_class()
cfg = cfg_cls.load_file(cfg_file)
return cfg
def reset_to_defaults(self):
self.cfg = self.default_config
return self.cfg
@staticmethod
def configure_logging(log_cfg=None):
if log_cfg is None:
logging.basicConfig()
else:
logger_cfg = log_cfg.pylogger_options
dictConfig(logger_cfg)
def init_config(self, base_cfg=None, init_logging=True):
cfg = base_cfg or self.default_config
rcfiles = self.cfg_files
for rcname in rcfiles:
cfg.merge(self.load_cfg_file(rcname))
if init_logging:
cfg_root = cfg.get(self.cfg_root, self.cfg_class()())
log_cfg = cfg_root.get("logging", None)
self.configure_logging(log_cfg)
self.cfg = cfg
return cfg
def get_config(self, base_cfg=None, init_logging=True):
if self.cfg is None:
self.init_config(base_cfg, init_logging)
return self.cfg
def make_config_manager(cls):
cfg_obj = cls()
def config_manager():
return cfg_obj
return config_manager
| true | true |
f73a3e01d093316b44cfe41fff84e844a730cc1c | 1,165 | py | Python | src/spaceone/cost_analysis/info/budget_usage_info.py | whdalsrnt/cost-analysis | cf73e294bcd35fa47f988aab7f00ed4cd777aba5 | [
"Apache-2.0"
] | 2 | 2021-12-22T05:31:18.000Z | 2021-12-23T11:47:29.000Z | src/spaceone/cost_analysis/info/budget_usage_info.py | whdalsrnt/cost-analysis | cf73e294bcd35fa47f988aab7f00ed4cd777aba5 | [
"Apache-2.0"
] | 9 | 2022-02-10T00:58:28.000Z | 2022-03-23T11:12:47.000Z | src/spaceone/cost_analysis/info/budget_usage_info.py | spaceone-dev/cost-analysis | cf73e294bcd35fa47f988aab7f00ed4cd777aba5 | [
"Apache-2.0"
] | null | null | null | import functools
from spaceone.api.cost_analysis.v1 import budget_usage_pb2
from spaceone.core.pygrpc.message_type import *
from spaceone.core import utils
from spaceone.cost_analysis.model.budget_usage_model import BudgetUsage
__all__ = ['BudgetUsageInfo', 'BudgetUsagesInfo']
def BudgetUsageInfo(budget_usage_vo: BudgetUsage, minimal=False):
info = {
'budget_id': budget_usage_vo.budget_id,
'name': budget_usage_vo.name,
'date': budget_usage_vo.date,
'usd_cost': budget_usage_vo.usd_cost,
'limit': budget_usage_vo.limit
}
if not minimal:
info.update({
'cost_types': change_struct_type(budget_usage_vo.cost_types.to_dict()) if budget_usage_vo.cost_types else None,
'domain_id': budget_usage_vo.domain_id,
'updated_at': utils.datetime_to_iso8601(budget_usage_vo.updated_at)
})
return budget_usage_pb2.BudgetUsageInfo(**info)
def BudgetUsagesInfo(budget_usage_vos, total_count, **kwargs):
return budget_usage_pb2.BudgetUsagesInfo(results=list(
map(functools.partial(BudgetUsageInfo, **kwargs), budget_usage_vos)), total_count=total_count)
| 36.40625 | 123 | 0.739056 | import functools
from spaceone.api.cost_analysis.v1 import budget_usage_pb2
from spaceone.core.pygrpc.message_type import *
from spaceone.core import utils
from spaceone.cost_analysis.model.budget_usage_model import BudgetUsage
__all__ = ['BudgetUsageInfo', 'BudgetUsagesInfo']
def BudgetUsageInfo(budget_usage_vo: BudgetUsage, minimal=False):
info = {
'budget_id': budget_usage_vo.budget_id,
'name': budget_usage_vo.name,
'date': budget_usage_vo.date,
'usd_cost': budget_usage_vo.usd_cost,
'limit': budget_usage_vo.limit
}
if not minimal:
info.update({
'cost_types': change_struct_type(budget_usage_vo.cost_types.to_dict()) if budget_usage_vo.cost_types else None,
'domain_id': budget_usage_vo.domain_id,
'updated_at': utils.datetime_to_iso8601(budget_usage_vo.updated_at)
})
return budget_usage_pb2.BudgetUsageInfo(**info)
def BudgetUsagesInfo(budget_usage_vos, total_count, **kwargs):
return budget_usage_pb2.BudgetUsagesInfo(results=list(
map(functools.partial(BudgetUsageInfo, **kwargs), budget_usage_vos)), total_count=total_count)
| true | true |
f73a3e38ff3a2d2a801f286fdd178e73b7a5458c | 11,875 | py | Python | web-site/server/helpers/coco_eval.py | Maxew42/Trashedy | e7e43f172ef4a039e134cac26980f59fede24423 | [
"MIT"
] | null | null | null | web-site/server/helpers/coco_eval.py | Maxew42/Trashedy | e7e43f172ef4a039e134cac26980f59fede24423 | [
"MIT"
] | null | null | null | web-site/server/helpers/coco_eval.py | Maxew42/Trashedy | e7e43f172ef4a039e134cac26980f59fede24423 | [
"MIT"
] | null | null | null | import json
import tempfile
import numpy as np
import copy
import time
import torch
import torch._six
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
from collections import defaultdict
import helpers.utils as utils
class CocoEvaluator(object):
def __init__(self, coco_gt, iou_types):
assert isinstance(iou_types, (list, tuple))
coco_gt = copy.deepcopy(coco_gt)
self.coco_gt = coco_gt
self.iou_types = iou_types
self.coco_eval = {}
for iou_type in iou_types:
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
self.img_ids = []
self.eval_imgs = {k: [] for k in iou_types}
def update(self, predictions):
img_ids = list(np.unique(list(predictions.keys())))
self.img_ids.extend(img_ids)
for iou_type in self.iou_types:
results = self.prepare(predictions, iou_type)
coco_dt = loadRes(self.coco_gt, results) if results else COCO()
coco_eval = self.coco_eval[iou_type]
coco_eval.cocoDt = coco_dt
coco_eval.params.imgIds = list(img_ids)
img_ids, eval_imgs = evaluate(coco_eval)
self.eval_imgs[iou_type].append(eval_imgs)
def synchronize_between_processes(self):
for iou_type in self.iou_types:
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
def accumulate(self):
for coco_eval in self.coco_eval.values():
coco_eval.accumulate()
def summarize(self):
for iou_type, coco_eval in self.coco_eval.items():
print("IoU metric: {}".format(iou_type))
coco_eval.summarize()
def prepare(self, predictions, iou_type):
if iou_type == "bbox":
return self.prepare_for_coco_detection(predictions)
elif iou_type == "segm":
return self.prepare_for_coco_segmentation(predictions)
elif iou_type == "keypoints":
return self.prepare_for_coco_keypoint(predictions)
else:
raise ValueError("Unknown iou type {}".format(iou_type))
def prepare_for_coco_detection(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
scores = prediction["scores"]
labels = prediction["labels"]
masks = prediction["masks"]
masks = masks > 0.5
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
def prepare_for_coco_keypoint(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
keypoints = prediction["keypoints"]
keypoints = keypoints.flatten(start_dim=1).tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
'keypoints': keypoint,
"score": scores[k],
}
for k, keypoint in enumerate(keypoints)
]
)
return coco_results
def convert_to_xywh(boxes):
xmin, ymin, xmax, ymax = boxes.unbind(1)
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
def merge(img_ids, eval_imgs):
all_img_ids = utils.all_gather(img_ids)
all_eval_imgs = utils.all_gather(eval_imgs)
merged_img_ids = []
for p in all_img_ids:
merged_img_ids.extend(p)
merged_eval_imgs = []
for p in all_eval_imgs:
merged_eval_imgs.append(p)
merged_img_ids = np.array(merged_img_ids)
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
# keep only unique (and in sorted order) images
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
merged_eval_imgs = merged_eval_imgs[..., idx]
return merged_img_ids, merged_eval_imgs
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
img_ids, eval_imgs = merge(img_ids, eval_imgs)
img_ids = list(img_ids)
eval_imgs = list(eval_imgs.flatten())
coco_eval.evalImgs = eval_imgs
coco_eval.params.imgIds = img_ids
coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
#################################################################
# From pycocotools, just removed the prints and fixed
# a Python3 bug about unicode not defined
#################################################################
# Ideally, pycocotools wouldn't have hard-coded prints
# so that we could avoid copy-pasting those two functions
def createIndex(self):
# create index
# print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns, catToImgs = defaultdict(list), defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
# print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
maskUtils = mask_util
def loadRes(self, resFile):
"""
Load result file and return a result api object.
Args:
self (obj): coco object with ground truth annotations
resFile (str): file name of result file
Returns:
res (obj): result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
# print('Loading and preparing results...')
# tic = time.time()
if isinstance(resFile, torch._six.string_classes):
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id + 1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
if 'segmentation' not in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2] * bb[3]
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if 'bbox' not in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x2 - x1) * (y2 - y1)
ann['id'] = id + 1
ann['bbox'] = [x1, y1, x2 - x1, y2 - y1]
# print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
createIndex(res)
return res
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
# tic = time.time()
# print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
# print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
# this is NOT in the pycocotools code, but could be done outside
evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
self._paramsEval = copy.deepcopy(self.params)
# toc = time.time()
# print('DONE (t={:0.2f}s).'.format(toc-tic))
return p.imgIds, evalImgs
| 34.025788 | 107 | 0.575663 | import json
import tempfile
import numpy as np
import copy
import time
import torch
import torch._six
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
from collections import defaultdict
import helpers.utils as utils
class CocoEvaluator(object):
def __init__(self, coco_gt, iou_types):
assert isinstance(iou_types, (list, tuple))
coco_gt = copy.deepcopy(coco_gt)
self.coco_gt = coco_gt
self.iou_types = iou_types
self.coco_eval = {}
for iou_type in iou_types:
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
self.img_ids = []
self.eval_imgs = {k: [] for k in iou_types}
def update(self, predictions):
img_ids = list(np.unique(list(predictions.keys())))
self.img_ids.extend(img_ids)
for iou_type in self.iou_types:
results = self.prepare(predictions, iou_type)
coco_dt = loadRes(self.coco_gt, results) if results else COCO()
coco_eval = self.coco_eval[iou_type]
coco_eval.cocoDt = coco_dt
coco_eval.params.imgIds = list(img_ids)
img_ids, eval_imgs = evaluate(coco_eval)
self.eval_imgs[iou_type].append(eval_imgs)
def synchronize_between_processes(self):
for iou_type in self.iou_types:
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
def accumulate(self):
for coco_eval in self.coco_eval.values():
coco_eval.accumulate()
def summarize(self):
for iou_type, coco_eval in self.coco_eval.items():
print("IoU metric: {}".format(iou_type))
coco_eval.summarize()
def prepare(self, predictions, iou_type):
if iou_type == "bbox":
return self.prepare_for_coco_detection(predictions)
elif iou_type == "segm":
return self.prepare_for_coco_segmentation(predictions)
elif iou_type == "keypoints":
return self.prepare_for_coco_keypoint(predictions)
else:
raise ValueError("Unknown iou type {}".format(iou_type))
def prepare_for_coco_detection(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
scores = prediction["scores"]
labels = prediction["labels"]
masks = prediction["masks"]
masks = masks > 0.5
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
def prepare_for_coco_keypoint(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
keypoints = prediction["keypoints"]
keypoints = keypoints.flatten(start_dim=1).tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
'keypoints': keypoint,
"score": scores[k],
}
for k, keypoint in enumerate(keypoints)
]
)
return coco_results
def convert_to_xywh(boxes):
xmin, ymin, xmax, ymax = boxes.unbind(1)
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
def merge(img_ids, eval_imgs):
all_img_ids = utils.all_gather(img_ids)
all_eval_imgs = utils.all_gather(eval_imgs)
merged_img_ids = []
for p in all_img_ids:
merged_img_ids.extend(p)
merged_eval_imgs = []
for p in all_eval_imgs:
merged_eval_imgs.append(p)
merged_img_ids = np.array(merged_img_ids)
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
merged_eval_imgs = merged_eval_imgs[..., idx]
return merged_img_ids, merged_eval_imgs
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
img_ids, eval_imgs = merge(img_ids, eval_imgs)
img_ids = list(img_ids)
eval_imgs = list(eval_imgs.flatten())
coco_eval.evalImgs = eval_imgs
coco_eval.params.imgIds = img_ids
coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
ion number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
# this is NOT in the pycocotools code, but could be done outside
evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
self._paramsEval = copy.deepcopy(self.params)
# toc = time.time()
# print('DONE (t={:0.2f}s).'.format(toc-tic))
return p.imgIds, evalImgs
| true | true |
f73a3ec3b3edc3ad615b52890d9c828bb35cb31c | 2,835 | py | Python | config.py | federicoviola/ynitiumapp | 2ca3f4b27d2a032e18e856d691dcc02ec5bb2697 | [
"MIT"
] | null | null | null | config.py | federicoviola/ynitiumapp | 2ca3f4b27d2a032e18e856d691dcc02ec5bb2697 | [
"MIT"
] | null | null | null | config.py | federicoviola/ynitiumapp | 2ca3f4b27d2a032e18e856d691dcc02ec5bb2697 | [
"MIT"
] | null | null | null | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SSL_DISABLE = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
MAIL_SERVER = 'mail.messagingengine.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
YNITIUM_MAIL_SUBJECT_PREFIX = '[Ynitium]'
YNITIUM_MAIL_SENDER = 'Ynitium Admin <federicoviola@fastmail.fm>'
YNITIUM_ADMIN = os.environ.get('YNITIUM_ADMIN')
YNITIUM_POSTS_PER_PAGE = 15
YNITIUM_FOLLOWERS_PER_PAGE = 50
YNITIUM_COMMENTS_PER_PAGE = 30
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
@classmethod
def init_app(cls, app):
Config.init_app(app)
# email errors to the administrators
import logging
from logging.handlers import SMTPHandler
credentials = None
secure = None
if getattr(cls, 'MAIL_USERNAME', None) is not None:
credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)
if getattr(cls, 'MAIL_USE_TLS', None):
secure = ()
mail_handler = SMTPHandler(
mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),
fromaddr=cls.YNITIUM_MAIL_SENDER,
toaddrs=[cls.YNITIUM_ADMIN],
subject=cls.YNITIUM_MAIL_SUBJECT_PREFIX + ' Application Error',
credentials=credentials,
secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
class HerokuConfig(ProductionConfig):
SSL_DISABLE = bool(os.environ.get('SSL_DISABLE'))
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# handle proxy server headers
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'heroku': HerokuConfig,
'default': DevelopmentConfig
}
| 31.5 | 75 | 0.670899 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SSL_DISABLE = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
MAIL_SERVER = 'mail.messagingengine.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
YNITIUM_MAIL_SUBJECT_PREFIX = '[Ynitium]'
YNITIUM_MAIL_SENDER = 'Ynitium Admin <federicoviola@fastmail.fm>'
YNITIUM_ADMIN = os.environ.get('YNITIUM_ADMIN')
YNITIUM_POSTS_PER_PAGE = 15
YNITIUM_FOLLOWERS_PER_PAGE = 50
YNITIUM_COMMENTS_PER_PAGE = 30
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
@classmethod
def init_app(cls, app):
Config.init_app(app)
import logging
from logging.handlers import SMTPHandler
credentials = None
secure = None
if getattr(cls, 'MAIL_USERNAME', None) is not None:
credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)
if getattr(cls, 'MAIL_USE_TLS', None):
secure = ()
mail_handler = SMTPHandler(
mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),
fromaddr=cls.YNITIUM_MAIL_SENDER,
toaddrs=[cls.YNITIUM_ADMIN],
subject=cls.YNITIUM_MAIL_SUBJECT_PREFIX + ' Application Error',
credentials=credentials,
secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
class HerokuConfig(ProductionConfig):
SSL_DISABLE = bool(os.environ.get('SSL_DISABLE'))
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'heroku': HerokuConfig,
'default': DevelopmentConfig
}
| true | true |
f73a3f8b14868e92a9505f7d1fba8a233fea96f3 | 221 | py | Python | anet/tasks/mnist/envs/mnist_env_senary.py | thomasaunger/Anet | 1d353f280a30c3207fa6d09af91a85c4955bbda4 | [
"BSD-3-Clause"
] | null | null | null | anet/tasks/mnist/envs/mnist_env_senary.py | thomasaunger/Anet | 1d353f280a30c3207fa6d09af91a85c4955bbda4 | [
"BSD-3-Clause"
] | null | null | null | anet/tasks/mnist/envs/mnist_env_senary.py | thomasaunger/Anet | 1d353f280a30c3207fa6d09af91a85c4955bbda4 | [
"BSD-3-Clause"
] | null | null | null | from anet.tasks.mnist.envs.mnist_env import MNISTEnv
class MNISTEnvSenary(MNISTEnv):
def __init__(self, procs=0, proc_id=-1, train=True):
MNISTEnv.__init__(self, 6, procs=procs, proc_id=proc_id, train=train)
| 36.833333 | 77 | 0.746606 | from anet.tasks.mnist.envs.mnist_env import MNISTEnv
class MNISTEnvSenary(MNISTEnv):
def __init__(self, procs=0, proc_id=-1, train=True):
MNISTEnv.__init__(self, 6, procs=procs, proc_id=proc_id, train=train)
| true | true |
f73a40724a9514df3f4699c831628b04f799fd18 | 442 | py | Python | Códigos fichados e comentados/Matemática/Aritmetica Complexa.py | kioolz/Python-scripts | cb8ad758811e2eed8673392077a55e8922ac7b9f | [
"MIT"
] | null | null | null | Códigos fichados e comentados/Matemática/Aritmetica Complexa.py | kioolz/Python-scripts | cb8ad758811e2eed8673392077a55e8922ac7b9f | [
"MIT"
] | null | null | null | Códigos fichados e comentados/Matemática/Aritmetica Complexa.py | kioolz/Python-scripts | cb8ad758811e2eed8673392077a55e8922ac7b9f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 29 23:34:18 2019
@author: Caio
"""
# Exemplo de Aritmética complexa
u = 2.5 + 3j #Criando um numero complexo
v = 2
w = u + v # Operação soma
print(w)
a = -2
b = 0.5
s = a + b*1j
s = complex(a,b)
s
s* w #Complex * Complex
s/w #Complex/Complex
#Partes do numero complexo
#Parte real
s.real
#Parte imaginaria
s.imag
#Conjugado
s.conjugate()
# Funções complexas com Python | 9.404255 | 42 | 0.628959 |
u = 2.5 + 3j
v = 2
w = u + v
print(w)
a = -2
b = 0.5
s = a + b*1j
s = complex(a,b)
s
s* w
s/w
s.real
s.imag
s.conjugate()
| true | true |
f73a4232b7c41b5804c217321acbc5b6975e869d | 9,950 | py | Python | flask/config.py | himanshumangla/flaskExperiment | e4c4557ab097e918ddd3b8f0b16524e65ae9bd63 | [
"BSD-3-Clause"
] | null | null | null | flask/config.py | himanshumangla/flaskExperiment | e4c4557ab097e918ddd3b8f0b16524e65ae9bd63 | [
"BSD-3-Clause"
] | null | null | null | flask/config.py | himanshumangla/flaskExperiment | e4c4557ab097e918ddd3b8f0b16524e65ae9bd63 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
flask.config
~~~~~~~~~~~~
Implements the configuration related objects.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import types
import errno
from werkzeug.utils import import_string
from ._compat import string_types, iteritems
from . import json
class ConfigAttribute(object):
"""Makes an attribute forward to the config"""
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to ``True`` if you want silent failure for missing
files.
:return: bool. ``True`` if able to load config, ``False`` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to ``True`` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = types.ModuleType('config')
d.__file__ = filename
try:
with open(filename, mode='rb') as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
except IOError as e:
if silent and e.errno in (
errno.ENOENT, errno.EISDIR, errno.ENOTDIR
):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes. :meth:`from_object`
loads only the uppercase attributes of the module/class. A ``dict``
object will not work with :meth:`from_object` because the keys of a
``dict`` are not attributes of the ``dict`` class.
Example of module-based configuration::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
See :ref:`config-dev-prod` for an example of class-based configuration
using :meth:`from_object`.
:param obj: an import name or object
"""
if isinstance(obj, string_types):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def from_json(self, filename, silent=False):
"""Updates the values in the config from a JSON file. This function
behaves as if the JSON object was a dictionary and passed to the
:meth:`from_mapping` function.
:param filename: the filename of the JSON file. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to ``True`` if you want silent failure for missing
files.
.. versionadded:: 0.11
"""
filename = os.path.join(self.root_path, filename)
try:
with open(filename) as json_file:
obj = json.loads(json_file.read())
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
return self.from_mapping(obj)
def from_mapping(self, *mapping, **kwargs):
"""Updates the config like :meth:`update` ignoring items with non-upper
keys.
.. versionadded:: 0.11
"""
mappings = []
if len(mapping) == 1:
if hasattr(mapping[0], 'items'):
mappings.append(mapping[0].items())
else:
mappings.append(mapping[0])
elif len(mapping) > 1:
raise TypeError(
'expected at most 1 positional argument, got %d' % len(mapping)
)
mappings.append(kwargs.items())
for mapping in mappings:
for (key, value) in mapping:
if key.isupper():
self[key] = value
return True
def get_namespace(self, namespace, lowercase=True, trim_namespace=True):
"""Returns a dictionary containing a subset of configuration options
that match the specified namespace/prefix. Example usage::
app.config['IMAGE_STORE_TYPE'] = 'fs'
app.config['IMAGE_STORE_PATH'] = '/var/app/images'
app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'
image_store_config = app.config.get_namespace('IMAGE_STORE_')
The resulting dictionary `image_store_config` would look like::
{
'type': 'fs',
'path': '/var/app/images',
'base_url': 'http://img.website.com'
}
This is often useful when configuration options map directly to
keyword arguments in functions or class constructors.
:param namespace: a configuration namespace
:param lowercase: a flag indicating if the keys of the resulting
dictionary should be lowercase
:param trim_namespace: a flag indicating if the keys of the resulting
dictionary should not include the namespace
.. versionadded:: 0.11
"""
rv = {}
for k, v in iteritems(self):
if not k.startswith(namespace):
continue
if trim_namespace:
key = k[len(namespace):]
else:
key = k
if lowercase:
key = key.lower()
rv[key] = v
return rv
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
| 37.406015 | 79 | 0.60804 |
import os
import types
import errno
from werkzeug.utils import import_string
from ._compat import string_types, iteritems
from . import json
class ConfigAttribute(object):
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
filename = os.path.join(self.root_path, filename)
d = types.ModuleType('config')
d.__file__ = filename
try:
with open(filename, mode='rb') as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
except IOError as e:
if silent and e.errno in (
errno.ENOENT, errno.EISDIR, errno.ENOTDIR
):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
if isinstance(obj, string_types):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def from_json(self, filename, silent=False):
filename = os.path.join(self.root_path, filename)
try:
with open(filename) as json_file:
obj = json.loads(json_file.read())
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
return self.from_mapping(obj)
def from_mapping(self, *mapping, **kwargs):
mappings = []
if len(mapping) == 1:
if hasattr(mapping[0], 'items'):
mappings.append(mapping[0].items())
else:
mappings.append(mapping[0])
elif len(mapping) > 1:
raise TypeError(
'expected at most 1 positional argument, got %d' % len(mapping)
)
mappings.append(kwargs.items())
for mapping in mappings:
for (key, value) in mapping:
if key.isupper():
self[key] = value
return True
def get_namespace(self, namespace, lowercase=True, trim_namespace=True):
rv = {}
for k, v in iteritems(self):
if not k.startswith(namespace):
continue
if trim_namespace:
key = k[len(namespace):]
else:
key = k
if lowercase:
key = key.lower()
rv[key] = v
return rv
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
| true | true |
f73a43ece7c313f71c221112e83575d177340f75 | 4,722 | py | Python | hendjibi/tools/config.py | Konrad-Ziarko/hendjibi | c1d93e85a94b348408110cdf319f64cc0f815997 | [
"MIT"
] | null | null | null | hendjibi/tools/config.py | Konrad-Ziarko/hendjibi | c1d93e85a94b348408110cdf319f64cc0f815997 | [
"MIT"
] | null | null | null | hendjibi/tools/config.py | Konrad-Ziarko/hendjibi | c1d93e85a94b348408110cdf319f64cc0f815997 | [
"MIT"
] | null | null | null | import configparser
import os
from enum import Enum
from hendjibi import PROJECT_NAME_SHORT
from hendjibi.tools.app_logger import get_logger
from hendjibi.tools.translator import translate as _
from hendjibi.model.entry import ProgressStatus, EntryType
logger = get_logger(__name__)
SLIDER_MIN = 50
SLIDER_MAX = 250
class ConfigSection(Enum):
MAIN = 'Main'
VIEW = 'View'
PROGRESS_STATUS = ProgressStatus.__name__
ENTRY_TYPE = EntryType.__name__
class ConfigManager(object):
PROPERTIES = [
('data_dump_path', ConfigSection.MAIN, str, os.path.join('hdb', 'entries.data'), None, None),
('height', ConfigSection.MAIN, int, 600, 200, None),
('width', ConfigSection.MAIN, int, 800, 300, None),
('log_level', ConfigSection.MAIN, int, 2, 0, 5),
('redraw_on_release', ConfigSection.VIEW, bool, False, None, None),
('stay_on_top', ConfigSection.VIEW, bool, False, None, None),
('dark_mode', ConfigSection.VIEW, bool, True, None, None),
('slider', ConfigSection.VIEW, int, 150, SLIDER_MIN, SLIDER_MAX),
]
def __init__(self, cwd):
path = os.path.join(cwd, PROJECT_NAME_SHORT)
if not os.path.isdir(path):
os.mkdir(path)
self.config_path = os.path.join(path, F'{PROJECT_NAME_SHORT}.ini')
self.config = configparser.ConfigParser()
try:
self.config.read(self.config_path)
for section in ConfigSection:
if not self.config.has_section(section.value):
self.config.add_section(section.value)
except Exception as e:
logger.error(_(F'Could not open config file due to: {e}'))
for property_to_add in ConfigManager.PROPERTIES:
ConfigManager.add_property(*property_to_add)
for progress_status_type in ProgressStatus:
name = progress_status_type.value.lower()
ConfigManager.add_property(name, ProgressStatus.__name__, bool, True)
for entry_type in EntryType:
name = entry_type.value.lower()
ConfigManager.add_property(name, EntryType.__name__, bool, True)
self.read_config()
@staticmethod
def add_property(name, tag, prop_type, default_value, min_value=None, max_value=None):
if not isinstance(tag, str):
tag = tag.value
setattr(ConfigManager, F'_default_{name}', default_value)
def setter_method(this, value):
if issubclass(prop_type, int):
if min_value is not None:
if value < min_value:
value = min_value
if max_value is not None:
if value > max_value:
value = max_value
this.config.set(tag, name, str(value))
setattr(this, F'_{name}', value)
this.write_config()
getter_method = property(lambda x: getattr(x, F'_{name}'), setter_method)
setattr(ConfigManager, F'_{name}', default_value)
setattr(ConfigManager, name, getter_method)
def read_config(self):
for property_to_read in ConfigManager.PROPERTIES:
try:
if issubclass(property_to_read[2], int):
v = self.config.getint(property_to_read[1].value, property_to_read[0])
elif issubclass(property_to_read[2], bool):
v = self.config.getboolean(property_to_read[1].value, property_to_read[0])
elif issubclass(property_to_read[2], str):
v = self.config.get(property_to_read[1].value, property_to_read[0])
else:
raise Exception('Property with unhandled type!')
setattr(self, property_to_read[0], v)
except (configparser.NoOptionError, ValueError):
setattr(self, property_to_read[0], property_to_read[3])
for property_to_read in ProgressStatus:
prop_name = property_to_read.value.lower()
try:
v = self.config.getboolean(ProgressStatus.__name__, prop_name)
setattr(self, prop_name, v)
except (configparser.NoOptionError, ValueError):
setattr(self, prop_name, True)
for property_to_read in EntryType:
prop_name = property_to_read.value.lower()
try:
v = self.config.getboolean(EntryType.__name__, prop_name)
setattr(self, prop_name, v)
except (configparser.NoOptionError, ValueError):
setattr(self, prop_name, True)
def write_config(self):
with open(self.config_path, 'w') as configfile:
self.config.write(configfile)
| 41.787611 | 101 | 0.622618 | import configparser
import os
from enum import Enum
from hendjibi import PROJECT_NAME_SHORT
from hendjibi.tools.app_logger import get_logger
from hendjibi.tools.translator import translate as _
from hendjibi.model.entry import ProgressStatus, EntryType
logger = get_logger(__name__)
SLIDER_MIN = 50
SLIDER_MAX = 250
class ConfigSection(Enum):
MAIN = 'Main'
VIEW = 'View'
PROGRESS_STATUS = ProgressStatus.__name__
ENTRY_TYPE = EntryType.__name__
class ConfigManager(object):
PROPERTIES = [
('data_dump_path', ConfigSection.MAIN, str, os.path.join('hdb', 'entries.data'), None, None),
('height', ConfigSection.MAIN, int, 600, 200, None),
('width', ConfigSection.MAIN, int, 800, 300, None),
('log_level', ConfigSection.MAIN, int, 2, 0, 5),
('redraw_on_release', ConfigSection.VIEW, bool, False, None, None),
('stay_on_top', ConfigSection.VIEW, bool, False, None, None),
('dark_mode', ConfigSection.VIEW, bool, True, None, None),
('slider', ConfigSection.VIEW, int, 150, SLIDER_MIN, SLIDER_MAX),
]
def __init__(self, cwd):
path = os.path.join(cwd, PROJECT_NAME_SHORT)
if not os.path.isdir(path):
os.mkdir(path)
self.config_path = os.path.join(path, F'{PROJECT_NAME_SHORT}.ini')
self.config = configparser.ConfigParser()
try:
self.config.read(self.config_path)
for section in ConfigSection:
if not self.config.has_section(section.value):
self.config.add_section(section.value)
except Exception as e:
logger.error(_(F'Could not open config file due to: {e}'))
for property_to_add in ConfigManager.PROPERTIES:
ConfigManager.add_property(*property_to_add)
for progress_status_type in ProgressStatus:
name = progress_status_type.value.lower()
ConfigManager.add_property(name, ProgressStatus.__name__, bool, True)
for entry_type in EntryType:
name = entry_type.value.lower()
ConfigManager.add_property(name, EntryType.__name__, bool, True)
self.read_config()
@staticmethod
def add_property(name, tag, prop_type, default_value, min_value=None, max_value=None):
if not isinstance(tag, str):
tag = tag.value
setattr(ConfigManager, F'_default_{name}', default_value)
def setter_method(this, value):
if issubclass(prop_type, int):
if min_value is not None:
if value < min_value:
value = min_value
if max_value is not None:
if value > max_value:
value = max_value
this.config.set(tag, name, str(value))
setattr(this, F'_{name}', value)
this.write_config()
getter_method = property(lambda x: getattr(x, F'_{name}'), setter_method)
setattr(ConfigManager, F'_{name}', default_value)
setattr(ConfigManager, name, getter_method)
def read_config(self):
for property_to_read in ConfigManager.PROPERTIES:
try:
if issubclass(property_to_read[2], int):
v = self.config.getint(property_to_read[1].value, property_to_read[0])
elif issubclass(property_to_read[2], bool):
v = self.config.getboolean(property_to_read[1].value, property_to_read[0])
elif issubclass(property_to_read[2], str):
v = self.config.get(property_to_read[1].value, property_to_read[0])
else:
raise Exception('Property with unhandled type!')
setattr(self, property_to_read[0], v)
except (configparser.NoOptionError, ValueError):
setattr(self, property_to_read[0], property_to_read[3])
for property_to_read in ProgressStatus:
prop_name = property_to_read.value.lower()
try:
v = self.config.getboolean(ProgressStatus.__name__, prop_name)
setattr(self, prop_name, v)
except (configparser.NoOptionError, ValueError):
setattr(self, prop_name, True)
for property_to_read in EntryType:
prop_name = property_to_read.value.lower()
try:
v = self.config.getboolean(EntryType.__name__, prop_name)
setattr(self, prop_name, v)
except (configparser.NoOptionError, ValueError):
setattr(self, prop_name, True)
def write_config(self):
with open(self.config_path, 'w') as configfile:
self.config.write(configfile)
| true | true |
f73a4464c69e410cdfb4817d0e80d18032a75bc6 | 3,415 | py | Python | QUANTAXIS/QAUtil/QATransform.py | Sinovel/QUANTAXIS | 97f1ea2140f58c92ff5c84b851886d9eda1f9ac3 | [
"MIT"
] | 3 | 2020-10-20T07:48:52.000Z | 2022-02-11T05:47:34.000Z | QUANTAXIS/QAUtil/QATransform.py | Sinovel/QUANTAXIS | 97f1ea2140f58c92ff5c84b851886d9eda1f9ac3 | [
"MIT"
] | null | null | null | QUANTAXIS/QAUtil/QATransform.py | Sinovel/QUANTAXIS | 97f1ea2140f58c92ff5c84b851886d9eda1f9ac3 | [
"MIT"
] | 2 | 2021-03-05T13:54:28.000Z | 2021-03-06T11:53:43.000Z | # coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2019 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import csv
import json
import numpy as np
import pandas as pd
def QA_util_to_json_from_pandas(data):
"""
explanation:
将pandas数据转换成json格式
params:
* data ->:
meaning: pandas数据
type: null
optional: [null]
return:
dict
demonstrate:
Not described
output:
Not described
"""
"""需要对于datetime 和date 进行转换, 以免直接被变成了时间戳"""
if 'datetime' in data.columns:
data.datetime = data.datetime.apply(str)
if 'date' in data.columns:
data.date = data.date.apply(str)
return json.loads(data.to_json(orient='records'))
def QA_util_to_json_from_numpy(data):
pass
def QA_util_to_json_from_list(data):
pass
def QA_util_to_list_from_pandas(data):
"""
explanation:
将pandas数据转换成列表
params:
* data ->:
meaning: pandas数据
type: null
optional: [null]
return:
list
demonstrate:
Not described
output:
Not described
"""
return np.asarray(data).tolist()
def QA_util_to_list_from_numpy(data):
"""
explanation:
将numpy数据转换为列表
params:
* data ->:
meaning: numpy数据
type: null
optional: [null]
return:
None
demonstrate:
Not described
output:
Not described
"""
return data.tolist()
def QA_util_to_pandas_from_json(data):
"""
explanation:
将json数据载入为pandas数据
params:
* data ->:
meaning: json数据
type: null
optional: [null]
return:
DataFrame
demonstrate:
Not described
output:
Not described
"""
if isinstance(data, dict):
return pd.DataFrame(data=[data, ])
else:
return pd.DataFrame(data=[{'value': data}])
def QA_util_to_pandas_from_list(data):
"""
explanation:
将列表数据转换为pandas
params:
* data ->:
meaning: 列表数据
type: list
optional: [null]
return:
DataFrame
demonstrate:
Not described
output:
Not described
"""
if isinstance(data, list):
return pd.DataFrame(data=data)
| 20.572289 | 80 | 0.623426 |
import csv
import json
import numpy as np
import pandas as pd
def QA_util_to_json_from_pandas(data):
if 'datetime' in data.columns:
data.datetime = data.datetime.apply(str)
if 'date' in data.columns:
data.date = data.date.apply(str)
return json.loads(data.to_json(orient='records'))
def QA_util_to_json_from_numpy(data):
pass
def QA_util_to_json_from_list(data):
pass
def QA_util_to_list_from_pandas(data):
return np.asarray(data).tolist()
def QA_util_to_list_from_numpy(data):
return data.tolist()
def QA_util_to_pandas_from_json(data):
if isinstance(data, dict):
return pd.DataFrame(data=[data, ])
else:
return pd.DataFrame(data=[{'value': data}])
def QA_util_to_pandas_from_list(data):
if isinstance(data, list):
return pd.DataFrame(data=data)
| true | true |
f73a45ab73a94d9a47f984a159a20b05d0f93746 | 5,100 | py | Python | model_zoo/research/cv/FaceAttribute/src/FaceAttribute/resnet18_softmax.py | GuoSuiming/mindspore | 48afc4cfa53d970c0b20eedfb46e039db2a133d5 | [
"Apache-2.0"
] | 55 | 2020-12-17T10:26:06.000Z | 2022-03-28T07:18:26.000Z | model_zoo/research/cv/FaceAttribute/src/FaceAttribute/resnet18_softmax.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | null | null | null | model_zoo/research/cv/FaceAttribute/src/FaceAttribute/resnet18_softmax.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 14 | 2021-01-29T02:39:47.000Z | 2022-03-23T05:00:26.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Face attribute resnet18 backbone."""
import mindspore.nn as nn
from mindspore.ops.operations import TensorAdd
from mindspore.ops import operations as P
from mindspore.nn import Cell
from src.FaceAttribute.custom_net import Cut, bn_with_initialize, conv1x1, conv3x3
from src.FaceAttribute.head_factory_softmax import get_attri_head
__all__ = ['get_resnet18']
class IRBlock(Cell):
'''IRBlock.'''
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(IRBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride=stride)
self.bn1 = bn_with_initialize(planes)
self.relu1 = P.ReLU()
self.conv2 = conv3x3(planes, planes, stride=1)
self.bn2 = bn_with_initialize(planes)
if downsample is None:
self.downsample = Cut()
else:
self.downsample = downsample
self.add = TensorAdd()
self.cast = P.Cast()
self.relu2 = P.ReLU()
def construct(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
identity = self.downsample(x)
out = self.add(out, identity)
out = self.relu2(out)
return out
class DownSample(Cell):
def __init__(self, inplanes, planes, expansion, stride):
super(DownSample, self).__init__()
self.conv1 = conv1x1(inplanes, planes * expansion, stride=stride, pad_mode="valid")
self.bn1 = bn_with_initialize(planes * expansion)
def construct(self, x):
out = self.conv1(x)
out = self.bn1(out)
return out
class MakeLayer(Cell):
'''Make layer function.'''
def __init__(self, block, inplanes, planes, blocks, stride=1):
super(MakeLayer, self).__init__()
self.inplanes = inplanes
self.downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
self.downsample = DownSample(self.inplanes, planes, block.expansion, stride)
self.layers = []
self.layers.append(block(self.inplanes, planes, stride, self.downsample))
self.inplanes = planes
for _ in range(1, blocks):
self.layers.append(block(self.inplanes, planes))
self.layers = nn.CellList(self.layers)
def construct(self, x):
for block in self.layers:
x = block(x)
return x
class AttriResNet(Cell):
'''Resnet for attribute.'''
def __init__(self, block, layers, flat_dim, fc_dim, attri_num_list):
super(AttriResNet, self).__init__()
# resnet18
self.inplanes = 32
self.conv1 = conv3x3(3, self.inplanes, stride=1)
self.bn1 = bn_with_initialize(self.inplanes)
self.relu = P.ReLU()
self.layer1 = MakeLayer(block, inplanes=32, planes=64, blocks=layers[0], stride=2)
self.layer2 = MakeLayer(block, inplanes=64, planes=128, blocks=layers[1], stride=2)
self.layer3 = MakeLayer(block, inplanes=128, planes=256, blocks=layers[2], stride=2)
self.layer4 = MakeLayer(block, inplanes=256, planes=512, blocks=layers[3], stride=2)
# avg global pooling
self.mean = P.ReduceMean(keep_dims=True)
self.shape = P.Shape()
self.reshape = P.Reshape()
self.head = get_attri_head(flat_dim, fc_dim, attri_num_list)
def construct(self, x):
'''Construct function.'''
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.mean(x, (2, 3))
b, c, _, _ = self.shape(x)
x = self.reshape(x, (b, c))
return self.head(x)
def get_resnet18(args):
'''Build resnet18 for attribute.'''
flat_dim = args.flat_dim
fc_dim = args.fc_dim
str_classes = args.classes.strip().split(',')
if args.attri_num != len(str_classes):
print('args warning: attri_num != classes num')
return None
attri_num_list = []
for i, _ in enumerate(str_classes):
attri_num_list.append(int(str_classes[i]))
attri_resnet18 = AttriResNet(IRBlock, (2, 2, 2, 2), flat_dim, fc_dim, attri_num_list)
return attri_resnet18
| 34.931507 | 93 | 0.612157 |
import mindspore.nn as nn
from mindspore.ops.operations import TensorAdd
from mindspore.ops import operations as P
from mindspore.nn import Cell
from src.FaceAttribute.custom_net import Cut, bn_with_initialize, conv1x1, conv3x3
from src.FaceAttribute.head_factory_softmax import get_attri_head
__all__ = ['get_resnet18']
class IRBlock(Cell):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(IRBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride=stride)
self.bn1 = bn_with_initialize(planes)
self.relu1 = P.ReLU()
self.conv2 = conv3x3(planes, planes, stride=1)
self.bn2 = bn_with_initialize(planes)
if downsample is None:
self.downsample = Cut()
else:
self.downsample = downsample
self.add = TensorAdd()
self.cast = P.Cast()
self.relu2 = P.ReLU()
def construct(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
identity = self.downsample(x)
out = self.add(out, identity)
out = self.relu2(out)
return out
class DownSample(Cell):
def __init__(self, inplanes, planes, expansion, stride):
super(DownSample, self).__init__()
self.conv1 = conv1x1(inplanes, planes * expansion, stride=stride, pad_mode="valid")
self.bn1 = bn_with_initialize(planes * expansion)
def construct(self, x):
out = self.conv1(x)
out = self.bn1(out)
return out
class MakeLayer(Cell):
def __init__(self, block, inplanes, planes, blocks, stride=1):
super(MakeLayer, self).__init__()
self.inplanes = inplanes
self.downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
self.downsample = DownSample(self.inplanes, planes, block.expansion, stride)
self.layers = []
self.layers.append(block(self.inplanes, planes, stride, self.downsample))
self.inplanes = planes
for _ in range(1, blocks):
self.layers.append(block(self.inplanes, planes))
self.layers = nn.CellList(self.layers)
def construct(self, x):
for block in self.layers:
x = block(x)
return x
class AttriResNet(Cell):
def __init__(self, block, layers, flat_dim, fc_dim, attri_num_list):
super(AttriResNet, self).__init__()
self.inplanes = 32
self.conv1 = conv3x3(3, self.inplanes, stride=1)
self.bn1 = bn_with_initialize(self.inplanes)
self.relu = P.ReLU()
self.layer1 = MakeLayer(block, inplanes=32, planes=64, blocks=layers[0], stride=2)
self.layer2 = MakeLayer(block, inplanes=64, planes=128, blocks=layers[1], stride=2)
self.layer3 = MakeLayer(block, inplanes=128, planes=256, blocks=layers[2], stride=2)
self.layer4 = MakeLayer(block, inplanes=256, planes=512, blocks=layers[3], stride=2)
self.mean = P.ReduceMean(keep_dims=True)
self.shape = P.Shape()
self.reshape = P.Reshape()
self.head = get_attri_head(flat_dim, fc_dim, attri_num_list)
def construct(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.mean(x, (2, 3))
b, c, _, _ = self.shape(x)
x = self.reshape(x, (b, c))
return self.head(x)
def get_resnet18(args):
flat_dim = args.flat_dim
fc_dim = args.fc_dim
str_classes = args.classes.strip().split(',')
if args.attri_num != len(str_classes):
print('args warning: attri_num != classes num')
return None
attri_num_list = []
for i, _ in enumerate(str_classes):
attri_num_list.append(int(str_classes[i]))
attri_resnet18 = AttriResNet(IRBlock, (2, 2, 2, 2), flat_dim, fc_dim, attri_num_list)
return attri_resnet18
| true | true |
f73a45c01270d0583189d4419eccaf24e8981d3e | 5,780 | py | Python | electrum_mona/gui/qt/contact_list.py | wakiyamap/electrum-mona | d00830c96785c77025432669158ad903146a2298 | [
"MIT"
] | 61 | 2017-08-06T08:51:49.000Z | 2021-12-28T06:25:36.000Z | electrum_mona/gui/qt/contact_list.py | wakiyamap/electrum-mona | d00830c96785c77025432669158ad903146a2298 | [
"MIT"
] | 15 | 2017-09-12T07:15:01.000Z | 2021-12-28T06:25:15.000Z | electrum_mona/gui/qt/contact_list.py | wakiyamap/electrum-mona | d00830c96785c77025432669158ad903146a2298 | [
"MIT"
] | 27 | 2017-08-18T19:40:30.000Z | 2021-03-01T11:16:02.000Z | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from enum import IntEnum
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtCore import Qt, QPersistentModelIndex, QModelIndex
from PyQt5.QtWidgets import (QAbstractItemView, QMenu)
from electrum_mona.i18n import _
from electrum_mona.bitcoin import is_address
from electrum_mona.util import block_explorer_URL
from electrum_mona.plugin import run_hook
from .util import MyTreeView, webopen
class ContactList(MyTreeView):
class Columns(IntEnum):
NAME = 0
ADDRESS = 1
headers = {
Columns.NAME: _('Name'),
Columns.ADDRESS: _('Address'),
}
filter_columns = [Columns.NAME, Columns.ADDRESS]
ROLE_CONTACT_KEY = Qt.UserRole + 1000
def __init__(self, parent):
super().__init__(parent, self.create_menu,
stretch_column=self.Columns.NAME,
editable_columns=[self.Columns.NAME])
self.setModel(QStandardItemModel(self))
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setSortingEnabled(True)
self.update()
def on_edited(self, idx, edit_key, *, text):
_type, prior_name = self.parent.contacts.pop(edit_key)
self.parent.set_contact(text, edit_key)
self.update()
def create_menu(self, position):
menu = QMenu()
idx = self.indexAt(position)
column = idx.column() or self.Columns.NAME
selected_keys = []
for s_idx in self.selected_in_column(self.Columns.NAME):
sel_key = self.model().itemFromIndex(s_idx).data(self.ROLE_CONTACT_KEY)
selected_keys.append(sel_key)
if not selected_keys or not idx.isValid():
menu.addAction(_("New contact"), lambda: self.parent.new_contact_dialog())
menu.addAction(_("Import file"), lambda: self.parent.import_contacts())
menu.addAction(_("Export file"), lambda: self.parent.export_contacts())
else:
column_title = self.model().horizontalHeaderItem(column).text()
column_data = '\n'.join(self.model().itemFromIndex(s_idx).text()
for s_idx in self.selected_in_column(column))
menu.addAction(_("Copy {}").format(column_title), lambda: self.place_text_on_clipboard(column_data, title=column_title))
if column in self.editable_columns:
item = self.model().itemFromIndex(idx)
if item.isEditable():
# would not be editable if openalias
persistent = QPersistentModelIndex(idx)
menu.addAction(_("Edit {}").format(column_title), lambda p=persistent: self.edit(QModelIndex(p)))
menu.addAction(_("Pay to"), lambda: self.parent.payto_contacts(selected_keys))
menu.addAction(_("Delete"), lambda: self.parent.delete_contacts(selected_keys))
URLs = [block_explorer_URL(self.config, 'addr', key) for key in filter(is_address, selected_keys)]
if URLs:
menu.addAction(_("View on block explorer"), lambda: [webopen(u) for u in URLs])
run_hook('create_contact_menu', menu, selected_keys)
menu.exec_(self.viewport().mapToGlobal(position))
def update(self):
if self.maybe_defer_update():
return
current_key = self.get_role_data_for_current_item(col=self.Columns.NAME, role=self.ROLE_CONTACT_KEY)
self.model().clear()
self.update_headers(self.__class__.headers)
set_current = None
for key in sorted(self.parent.contacts.keys()):
contact_type, name = self.parent.contacts[key]
items = [QStandardItem(x) for x in (name, key)]
items[self.Columns.NAME].setEditable(contact_type != 'openalias')
items[self.Columns.ADDRESS].setEditable(False)
items[self.Columns.NAME].setData(key, self.ROLE_CONTACT_KEY)
row_count = self.model().rowCount()
self.model().insertRow(row_count, items)
if key == current_key:
idx = self.model().index(row_count, self.Columns.NAME)
set_current = QPersistentModelIndex(idx)
self.set_current_idx(set_current)
# FIXME refresh loses sort order; so set "default" here:
self.sortByColumn(self.Columns.NAME, Qt.AscendingOrder)
self.filter()
run_hook('update_contacts_tab', self)
def get_edit_key_from_coordinate(self, row, col):
if col != self.Columns.NAME:
return None
return self.get_role_data_from_coordinate(row, col, role=self.ROLE_CONTACT_KEY)
| 45.15625 | 132 | 0.675952 |
from enum import IntEnum
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtCore import Qt, QPersistentModelIndex, QModelIndex
from PyQt5.QtWidgets import (QAbstractItemView, QMenu)
from electrum_mona.i18n import _
from electrum_mona.bitcoin import is_address
from electrum_mona.util import block_explorer_URL
from electrum_mona.plugin import run_hook
from .util import MyTreeView, webopen
class ContactList(MyTreeView):
class Columns(IntEnum):
NAME = 0
ADDRESS = 1
headers = {
Columns.NAME: _('Name'),
Columns.ADDRESS: _('Address'),
}
filter_columns = [Columns.NAME, Columns.ADDRESS]
ROLE_CONTACT_KEY = Qt.UserRole + 1000
def __init__(self, parent):
super().__init__(parent, self.create_menu,
stretch_column=self.Columns.NAME,
editable_columns=[self.Columns.NAME])
self.setModel(QStandardItemModel(self))
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setSortingEnabled(True)
self.update()
def on_edited(self, idx, edit_key, *, text):
_type, prior_name = self.parent.contacts.pop(edit_key)
self.parent.set_contact(text, edit_key)
self.update()
def create_menu(self, position):
menu = QMenu()
idx = self.indexAt(position)
column = idx.column() or self.Columns.NAME
selected_keys = []
for s_idx in self.selected_in_column(self.Columns.NAME):
sel_key = self.model().itemFromIndex(s_idx).data(self.ROLE_CONTACT_KEY)
selected_keys.append(sel_key)
if not selected_keys or not idx.isValid():
menu.addAction(_("New contact"), lambda: self.parent.new_contact_dialog())
menu.addAction(_("Import file"), lambda: self.parent.import_contacts())
menu.addAction(_("Export file"), lambda: self.parent.export_contacts())
else:
column_title = self.model().horizontalHeaderItem(column).text()
column_data = '\n'.join(self.model().itemFromIndex(s_idx).text()
for s_idx in self.selected_in_column(column))
menu.addAction(_("Copy {}").format(column_title), lambda: self.place_text_on_clipboard(column_data, title=column_title))
if column in self.editable_columns:
item = self.model().itemFromIndex(idx)
if item.isEditable():
persistent = QPersistentModelIndex(idx)
menu.addAction(_("Edit {}").format(column_title), lambda p=persistent: self.edit(QModelIndex(p)))
menu.addAction(_("Pay to"), lambda: self.parent.payto_contacts(selected_keys))
menu.addAction(_("Delete"), lambda: self.parent.delete_contacts(selected_keys))
URLs = [block_explorer_URL(self.config, 'addr', key) for key in filter(is_address, selected_keys)]
if URLs:
menu.addAction(_("View on block explorer"), lambda: [webopen(u) for u in URLs])
run_hook('create_contact_menu', menu, selected_keys)
menu.exec_(self.viewport().mapToGlobal(position))
def update(self):
if self.maybe_defer_update():
return
current_key = self.get_role_data_for_current_item(col=self.Columns.NAME, role=self.ROLE_CONTACT_KEY)
self.model().clear()
self.update_headers(self.__class__.headers)
set_current = None
for key in sorted(self.parent.contacts.keys()):
contact_type, name = self.parent.contacts[key]
items = [QStandardItem(x) for x in (name, key)]
items[self.Columns.NAME].setEditable(contact_type != 'openalias')
items[self.Columns.ADDRESS].setEditable(False)
items[self.Columns.NAME].setData(key, self.ROLE_CONTACT_KEY)
row_count = self.model().rowCount()
self.model().insertRow(row_count, items)
if key == current_key:
idx = self.model().index(row_count, self.Columns.NAME)
set_current = QPersistentModelIndex(idx)
self.set_current_idx(set_current)
self.sortByColumn(self.Columns.NAME, Qt.AscendingOrder)
self.filter()
run_hook('update_contacts_tab', self)
def get_edit_key_from_coordinate(self, row, col):
if col != self.Columns.NAME:
return None
return self.get_role_data_from_coordinate(row, col, role=self.ROLE_CONTACT_KEY)
| true | true |
f73a45c0a0ff7e3c290777d566a59c61a6e3b43b | 14,952 | py | Python | pycon/migrations/0007_auto__add_pyconlightningtalkproposal__add_field_pyconposterproposal_ad.py | pyconjp/pyconjp-website | c14b1412b70ad04d6c6e837cb0feaec17fd5cd36 | [
"BSD-3-Clause"
] | 6 | 2016-04-03T18:22:45.000Z | 2018-03-15T11:20:39.000Z | pycon/migrations/0007_auto__add_pyconlightningtalkproposal__add_field_pyconposterproposal_ad.py | alex/pycon | d1437a9f2ac1ec4f4fd5ad41ef3a7fe06958b52b | [
"BSD-3-Clause"
] | 60 | 2016-04-14T12:16:06.000Z | 2017-08-15T06:15:50.000Z | pycon/migrations/0007_auto__add_pyconlightningtalkproposal__add_field_pyconposterproposal_ad.py | alex/pycon | d1437a9f2ac1ec4f4fd5ad41ef3a7fe06958b52b | [
"BSD-3-Clause"
] | 7 | 2016-04-23T02:29:35.000Z | 2017-10-05T07:37:46.000Z | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PyConLightningTalkProposal'
db.create_table(u'pycon_pyconlightningtalkproposal', (
(u'proposalbase_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['proposals.ProposalBase'], unique=True, primary_key=True)),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['pycon.PyConProposalCategory'])),
('audience_level', self.gf('django.db.models.fields.IntegerField')()),
('overall_status', self.gf('django.db.models.fields.IntegerField')(default=1)),
('damaged_score', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('rejection_status', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('recording_release', self.gf('django.db.models.fields.BooleanField')(default=True)),
('additional_requirements', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'pycon', ['PyConLightningTalkProposal'])
def backwards(self, orm):
# Deleting model 'PyConLightningTalkProposal'
db.delete_table(u'pycon_pyconlightningtalkproposal')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'conference.conference': {
'Meta': {'object_name': 'Conference'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('timezones.fields.TimeZoneField', [], {'default': "'US/Eastern'", 'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'conference.section': {
'Meta': {'object_name': 'Section'},
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['conference.Conference']"}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'proposals.additionalspeaker': {
'Meta': {'unique_together': "(('speaker', 'proposalbase'),)", 'object_name': 'AdditionalSpeaker', 'db_table': "'proposals_proposalbase_additional_speakers'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proposalbase': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['proposals.ProposalBase']"}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['speakers.Speaker']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'proposals.proposalbase': {
'Meta': {'object_name': 'ProposalBase'},
'abstract': ('django.db.models.fields.TextField', [], {}),
'additional_notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'additional_speakers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['speakers.Speaker']", 'symmetrical': 'False', 'through': u"orm['proposals.AdditionalSpeaker']", 'blank': 'True'}),
'cancelled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '400'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['proposals.ProposalKind']"}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposals'", 'to': u"orm['speakers.Speaker']"}),
'submitted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'proposals.proposalkind': {
'Meta': {'object_name': 'ProposalKind'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposal_kinds'", 'to': u"orm['conference.Section']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'pycon.pyconlightningtalkproposal': {
'Meta': {'object_name': 'PyConLightningTalkProposal'},
'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'audience_level': ('django.db.models.fields.IntegerField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}),
'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}),
'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'pycon.pyconposterproposal': {
'Meta': {'object_name': 'PyConPosterProposal'},
'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'audience_level': ('django.db.models.fields.IntegerField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}),
'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}),
'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'pycon.pyconproposalcategory': {
'Meta': {'object_name': 'PyConProposalCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'pycon.pyconsponsortutorialproposal': {
'Meta': {'object_name': 'PyConSponsorTutorialProposal', '_ormbases': [u'proposals.ProposalBase']},
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'})
},
u'pycon.pycontalkproposal': {
'Meta': {'object_name': 'PyConTalkProposal'},
'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'audience': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'audience_level': ('django.db.models.fields.IntegerField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}),
'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'outline': ('django.db.models.fields.TextField', [], {}),
'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'perceived_value': ('django.db.models.fields.TextField', [], {'max_length': '500'}),
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}),
'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'pycon.pycontutorialproposal': {
'Meta': {'object_name': 'PyConTutorialProposal'},
'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'audience': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'audience_level': ('django.db.models.fields.IntegerField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}),
'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'domain_level': ('django.db.models.fields.IntegerField', [], {}),
'more_info': ('django.db.models.fields.TextField', [], {}),
'outline': ('django.db.models.fields.TextField', [], {}),
'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'perceived_value': ('django.db.models.fields.TextField', [], {'max_length': '500'}),
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}),
'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'speakers.speaker': {
'Meta': {'object_name': 'Speaker'},
'annotation': ('django.db.models.fields.TextField', [], {}),
'biography': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_email': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'invite_token': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'sessions_preference': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'twitter_username': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'speaker_profile'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['pycon']
| 78.694737 | 218 | 0.583133 |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.create_table(u'pycon_pyconlightningtalkproposal', (
(u'proposalbase_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['proposals.ProposalBase'], unique=True, primary_key=True)),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['pycon.PyConProposalCategory'])),
('audience_level', self.gf('django.db.models.fields.IntegerField')()),
('overall_status', self.gf('django.db.models.fields.IntegerField')(default=1)),
('damaged_score', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('rejection_status', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('recording_release', self.gf('django.db.models.fields.BooleanField')(default=True)),
('additional_requirements', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'pycon', ['PyConLightningTalkProposal'])
def backwards(self, orm):
db.delete_table(u'pycon_pyconlightningtalkproposal')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'conference.conference': {
'Meta': {'object_name': 'Conference'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('timezones.fields.TimeZoneField', [], {'default': "'US/Eastern'", 'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'conference.section': {
'Meta': {'object_name': 'Section'},
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['conference.Conference']"}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'proposals.additionalspeaker': {
'Meta': {'unique_together': "(('speaker', 'proposalbase'),)", 'object_name': 'AdditionalSpeaker', 'db_table': "'proposals_proposalbase_additional_speakers'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proposalbase': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['proposals.ProposalBase']"}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['speakers.Speaker']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'proposals.proposalbase': {
'Meta': {'object_name': 'ProposalBase'},
'abstract': ('django.db.models.fields.TextField', [], {}),
'additional_notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'additional_speakers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['speakers.Speaker']", 'symmetrical': 'False', 'through': u"orm['proposals.AdditionalSpeaker']", 'blank': 'True'}),
'cancelled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '400'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['proposals.ProposalKind']"}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposals'", 'to': u"orm['speakers.Speaker']"}),
'submitted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'proposals.proposalkind': {
'Meta': {'object_name': 'ProposalKind'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposal_kinds'", 'to': u"orm['conference.Section']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'pycon.pyconlightningtalkproposal': {
'Meta': {'object_name': 'PyConLightningTalkProposal'},
'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'audience_level': ('django.db.models.fields.IntegerField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}),
'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}),
'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'pycon.pyconposterproposal': {
'Meta': {'object_name': 'PyConPosterProposal'},
'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'audience_level': ('django.db.models.fields.IntegerField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}),
'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}),
'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'pycon.pyconproposalcategory': {
'Meta': {'object_name': 'PyConProposalCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'pycon.pyconsponsortutorialproposal': {
'Meta': {'object_name': 'PyConSponsorTutorialProposal', '_ormbases': [u'proposals.ProposalBase']},
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'})
},
u'pycon.pycontalkproposal': {
'Meta': {'object_name': 'PyConTalkProposal'},
'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'audience': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'audience_level': ('django.db.models.fields.IntegerField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}),
'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'outline': ('django.db.models.fields.TextField', [], {}),
'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'perceived_value': ('django.db.models.fields.TextField', [], {'max_length': '500'}),
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}),
'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'pycon.pycontutorialproposal': {
'Meta': {'object_name': 'PyConTutorialProposal'},
'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'audience': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'audience_level': ('django.db.models.fields.IntegerField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}),
'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'domain_level': ('django.db.models.fields.IntegerField', [], {}),
'more_info': ('django.db.models.fields.TextField', [], {}),
'outline': ('django.db.models.fields.TextField', [], {}),
'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'perceived_value': ('django.db.models.fields.TextField', [], {'max_length': '500'}),
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}),
'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'speakers.speaker': {
'Meta': {'object_name': 'Speaker'},
'annotation': ('django.db.models.fields.TextField', [], {}),
'biography': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_email': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'invite_token': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'sessions_preference': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'twitter_username': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'speaker_profile'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['pycon']
| true | true |
f73a45fa33e9ce4ebec9a8041fcf77e8acaa94f8 | 4,384 | py | Python | archives/presenters/predictionsFilter4.py | block1o1/CryptoPredicted | 7f660cdc456fb8252b3125028f31fd6f5a3ceea5 | [
"MIT"
] | 4 | 2021-10-14T21:22:25.000Z | 2022-03-12T19:58:48.000Z | archives/presenters/predictionsFilter4.py | inevolin/CryptoPredicted | 7f660cdc456fb8252b3125028f31fd6f5a3ceea5 | [
"MIT"
] | null | null | null | archives/presenters/predictionsFilter4.py | inevolin/CryptoPredicted | 7f660cdc456fb8252b3125028f31fd6f5a3ceea5 | [
"MIT"
] | 1 | 2022-03-15T22:52:53.000Z | 2022-03-15T22:52:53.000Z | import datetime
import pprint
import pymongo
import sys
import json
import time
import copy
from pymongo import MongoClient
import sys
sys.path.insert(0, '/home/nevolin/public_html/cryptoproto/')
from mysettings import dtNow
import DAL
client = DAL.openConnection()
db=client.crypto
if not len(sys.argv) >= 2:
print("expected exchange and symbol parameters, e.g. binance BTCUSDT ")
sys.exit(0)
exchange = sys.argv[1]
symbol = sys.argv[2]
INTERVAL = 30
if len(sys.argv) >= 4:
INTERVAL = int(sys.argv[3])
currentDateTime = dtNow().replace(second=0,microsecond=0)
if len(sys.argv) >= 6:
currentDateTime = datetime.datetime.strptime(sys.argv[5], '%Y-%m-%dT%H:%M') # in future the user may send datetime from another tz, use dtLocal()
if currentDateTime > dtNow():
currentDateTime = dtNow().replace(second=0,microsecond=0)
maxDateTimeExcluded = currentDateTime
if INTERVAL > 1: # and INTERVAL <= 60
maxDateTimeExcluded = currentDateTime.replace(minute=currentDateTime.minute-(currentDateTime.minute % INTERVAL))
WINDOW = 1440
if len(sys.argv) >= 5: # value in minutes
WINDOW = int(sys.argv[4])
minDateTimeIncluded = maxDateTimeExcluded - datetime.timedelta(minutes=WINDOW)
featuresID = None
if len(sys.argv) >= 7:
featuresID = sys.argv[6]
else:
print("missing featuresID")
exit()
batchsize = None
if len(sys.argv) >= 8:
batchsize = int(sys.argv[7])
else:
print("missing batchsize")
exit()
neurons = None
if len(sys.argv) >= 9:
neurons = int(sys.argv[8])
else:
print("missing neurons")
exit()
windowsize = None
if len(sys.argv) >= 10:
windowsize = int(sys.argv[9])
else:
print("missing windowsize")
exit()
n_epoch = None
if len(sys.argv) >= 11:
n_epoch = int(sys.argv[10])
else:
print("missing n_epoch")
exit()
predicted_feature = None
if len(sys.argv) >= 12:
predicted_feature = sys.argv[11]
else:
print("missing predicted_feature")
exit()
n_hiddenlay = None
if len(sys.argv) >= 13:
n_hiddenlay = int(sys.argv[12])
else:
print("missing n_hiddenlay")
exit()
FINAL_predic = {}
#### PREDICTIONS
# instead of retrieving just one prediction by timestamp
# let's retrieve this one, including X previous ones
X = 30
minDateTimeIncluded = maxDateTimeExcluded - datetime.timedelta(minutes=INTERVAL*X)
pipeline = [
{'$match' :
{ 'symbol': { '$eq' : symbol },
# 'timestamp': { '$eq': maxDateTimeExcluded },
'timestamp': { '$gte': minDateTimeIncluded, '$lte': maxDateTimeExcluded },
'interval': { '$eq': INTERVAL },
'$or': [{'feature': predicted_feature}, {'feature': predicted_feature+'_traindata'}],
}
},
]
if (featuresID != "-1"):
pipeline[0]['$match']['featuresID'] = { '$eq': featuresID }
if (batchsize != -1):
pipeline[0]['$match']['n_batch_size'] = { '$eq': batchsize }
if (neurons != -1):
pipeline[0]['$match']['n_neurons'] = { '$eq': neurons }
if (windowsize != -1):
pipeline[0]['$match']['n_window'] = { '$eq': windowsize }
if (n_epoch != -1):
pipeline[0]['$match']['n_epoch'] = { '$eq': n_epoch }
if (n_hiddenlay != -1):
pipeline[0]['$match']['n_hiddenlayers'] = { '$eq': n_hiddenlay }
cursor = db.get_collection('predictions4').aggregate(pipeline);
res_predic = list(cursor)
# pre-process:
for obj in res_predic:
for e in obj['data']:
e['label_dt'] = e['timestamp']-datetime.timedelta(minutes=INTERVAL)
del e['timestamp']
e['start'] = str(e['label_dt'])
e['end'] = str(e['label_dt'] + datetime.timedelta(minutes=INTERVAL))
e['label'] = str(datetime.datetime.strftime(e['label_dt'], '%Y-%m-%dT%H:%M'))
uid = obj['featuresID']+' '+str(obj['n_epoch'])+' '+str(obj['n_window'])+' '+str(obj['n_neurons'])+' '+str(obj['n_batch_size'])+' '+str(obj['n_hiddenlayers'])
if obj['feature'] == predicted_feature:
if not uid in FINAL_predic:
FINAL_predic[uid] = []
FINAL_predic[uid].append(e)
for key in FINAL_predic.keys():
FINAL_predic[key] = sorted(FINAL_predic[key], key=(lambda x:( x['label_dt'] ) ))
# post-process:
for e, arr in FINAL_predic.items():
for ee in arr:
del ee['label_dt']
json_out = json.dumps( {'predictions': FINAL_predic} )
print(json_out)
| 27.061728 | 166 | 0.630931 | import datetime
import pprint
import pymongo
import sys
import json
import time
import copy
from pymongo import MongoClient
import sys
sys.path.insert(0, '/home/nevolin/public_html/cryptoproto/')
from mysettings import dtNow
import DAL
client = DAL.openConnection()
db=client.crypto
if not len(sys.argv) >= 2:
print("expected exchange and symbol parameters, e.g. binance BTCUSDT ")
sys.exit(0)
exchange = sys.argv[1]
symbol = sys.argv[2]
INTERVAL = 30
if len(sys.argv) >= 4:
INTERVAL = int(sys.argv[3])
currentDateTime = dtNow().replace(second=0,microsecond=0)
if len(sys.argv) >= 6:
currentDateTime = datetime.datetime.strptime(sys.argv[5], '%Y-%m-%dT%H:%M')
if currentDateTime > dtNow():
currentDateTime = dtNow().replace(second=0,microsecond=0)
maxDateTimeExcluded = currentDateTime
if INTERVAL > 1:
maxDateTimeExcluded = currentDateTime.replace(minute=currentDateTime.minute-(currentDateTime.minute % INTERVAL))
WINDOW = 1440
if len(sys.argv) >= 5:
WINDOW = int(sys.argv[4])
minDateTimeIncluded = maxDateTimeExcluded - datetime.timedelta(minutes=WINDOW)
featuresID = None
if len(sys.argv) >= 7:
featuresID = sys.argv[6]
else:
print("missing featuresID")
exit()
batchsize = None
if len(sys.argv) >= 8:
batchsize = int(sys.argv[7])
else:
print("missing batchsize")
exit()
neurons = None
if len(sys.argv) >= 9:
neurons = int(sys.argv[8])
else:
print("missing neurons")
exit()
windowsize = None
if len(sys.argv) >= 10:
windowsize = int(sys.argv[9])
else:
print("missing windowsize")
exit()
n_epoch = None
if len(sys.argv) >= 11:
n_epoch = int(sys.argv[10])
else:
print("missing n_epoch")
exit()
predicted_feature = None
if len(sys.argv) >= 12:
predicted_feature = sys.argv[11]
else:
print("missing predicted_feature")
exit()
n_hiddenlay = None
if len(sys.argv) >= 13:
n_hiddenlay = int(sys.argv[12])
else:
print("missing n_hiddenlay")
exit()
FINAL_predic = {}
meExcluded - datetime.timedelta(minutes=INTERVAL*X)
pipeline = [
{'$match' :
{ 'symbol': { '$eq' : symbol },
# 'timestamp': { '$eq': maxDateTimeExcluded },
'timestamp': { '$gte': minDateTimeIncluded, '$lte': maxDateTimeExcluded },
'interval': { '$eq': INTERVAL },
'$or': [{'feature': predicted_feature}, {'feature': predicted_feature+'_traindata'}],
}
},
]
if (featuresID != "-1"):
pipeline[0]['$match']['featuresID'] = { '$eq': featuresID }
if (batchsize != -1):
pipeline[0]['$match']['n_batch_size'] = { '$eq': batchsize }
if (neurons != -1):
pipeline[0]['$match']['n_neurons'] = { '$eq': neurons }
if (windowsize != -1):
pipeline[0]['$match']['n_window'] = { '$eq': windowsize }
if (n_epoch != -1):
pipeline[0]['$match']['n_epoch'] = { '$eq': n_epoch }
if (n_hiddenlay != -1):
pipeline[0]['$match']['n_hiddenlayers'] = { '$eq': n_hiddenlay }
cursor = db.get_collection('predictions4').aggregate(pipeline);
res_predic = list(cursor)
# pre-process:
for obj in res_predic:
for e in obj['data']:
e['label_dt'] = e['timestamp']-datetime.timedelta(minutes=INTERVAL)
del e['timestamp']
e['start'] = str(e['label_dt'])
e['end'] = str(e['label_dt'] + datetime.timedelta(minutes=INTERVAL))
e['label'] = str(datetime.datetime.strftime(e['label_dt'], '%Y-%m-%dT%H:%M'))
uid = obj['featuresID']+' '+str(obj['n_epoch'])+' '+str(obj['n_window'])+' '+str(obj['n_neurons'])+' '+str(obj['n_batch_size'])+' '+str(obj['n_hiddenlayers'])
if obj['feature'] == predicted_feature:
if not uid in FINAL_predic:
FINAL_predic[uid] = []
FINAL_predic[uid].append(e)
for key in FINAL_predic.keys():
FINAL_predic[key] = sorted(FINAL_predic[key], key=(lambda x:( x['label_dt'] ) ))
# post-process:
for e, arr in FINAL_predic.items():
for ee in arr:
del ee['label_dt']
json_out = json.dumps( {'predictions': FINAL_predic} )
print(json_out)
| true | true |
f73a467c69b65d80e67f7a5ccdacff9292dabd04 | 426 | py | Python | setup.py | avoceteditors/weekdate | 643447bf8a75614e9a7aadfd026f59bddeb99d69 | [
"BSD-3-Clause"
] | null | null | null | setup.py | avoceteditors/weekdate | 643447bf8a75614e9a7aadfd026f59bddeb99d69 | [
"BSD-3-Clause"
] | null | null | null | setup.py | avoceteditors/weekdate | 643447bf8a75614e9a7aadfd026f59bddeb99d69 | [
"BSD-3-Clause"
] | null | null | null | from distutils.core import setup
setup( name = 'weekdate',
version = '0.1',
author = 'Kenneth P. J. Dyer',
author_email = 'kenneth@avoceteditors.com',
url = 'https://github.com/kennethpjdyer/weekdate',
description = 'Basic utility for determining start and end dates for a given week.',
scripts = ['scripts/weekdate']
)
| 35.5 | 96 | 0.549296 | from distutils.core import setup
setup( name = 'weekdate',
version = '0.1',
author = 'Kenneth P. J. Dyer',
author_email = 'kenneth@avoceteditors.com',
url = 'https://github.com/kennethpjdyer/weekdate',
description = 'Basic utility for determining start and end dates for a given week.',
scripts = ['scripts/weekdate']
)
| true | true |
f73a46f0bb61a050d182f205270c4397ae389c77 | 4,186 | py | Python | confz/confz_source.py | AndrewW85/ConfZ | 69a83af4905d7f182cef68f14574394de084ccca | [
"MIT"
] | null | null | null | confz/confz_source.py | AndrewW85/ConfZ | 69a83af4905d7f182cef68f14574394de084ccca | [
"MIT"
] | null | null | null | confz/confz_source.py | AndrewW85/ConfZ | 69a83af4905d7f182cef68f14574394de084ccca | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Optional, Union, List, Dict, Any
@dataclass
class ConfZSource:
"""Source configuration for :class:`~confz.ConfZ` models."""
ConfZSources = Union[ConfZSource, List[ConfZSource]]
class FileFormat(Enum):
"""Enum for file format."""
JSON = 'json' #: JSON file format
YAML = 'yaml' #: YAML file format
@dataclass
class ConfZFileSource(ConfZSource):
"""Source config for files."""
file: Optional[Path] = None
"""Specify a config file directly by a path."""
file_from_env: Optional[str] = None
"""Alternatively, use this environment variable to get the file."""
file_from_cl: Optional[Union[int, str]] = None
"""Alternatively, use this command line argument to get the file name/path. It can be a specific position
(integer, e.g. `1`) or after a specific option (string, e.g. `\\-\\-config-file`). In the latter case, the file
name must follow after whitespace, an equal sign between argument and value is not supported at the moment."""
folder: Optional[Path] = None
"""The file specified above can optionally be relative to this folder."""
format: Optional[FileFormat] = None
"""The format of the config file. If not specified, it will be inferred from the file ending."""
encoding: str = 'utf-8'
"""The encoding of the file. Default is UTF-8."""
@dataclass
class ConfZEnvSource(ConfZSource):
"""Source config for environment variables and .env files. On loading of the source, the dotenv file
values (if available) are merged with the environment, with environment always taking precedence in case of
name collusion. All loaded variable names are transformed to lowercase and all dashes are replaced by underscores.
The definitions below are not case-sensitive and can be written with underscore or dash. An exception is `prefix`,
which needs to match exactly. Dot-notation can be used to access nested configurations."""
allow_all: bool = False
"""Allow potentially all environment variables to be read as config option."""
allow: Optional[List[str]] = None
"""Only allow a list of environment variables as input."""
deny: Optional[List[str]] = None
"""Do not allow to read from environemnt variables in this list. Useful if `allow_all` is set and certain variables
should be excluded."""
prefix: Optional[str] = None
"""The selection above can be narrowed down to a specific prefix, e.g. `CONFIG_`. The variables in the lists above
or the map below do not need to include this prefix, it is automatically added. This option is especially
recommended, if ´allow_all´ is set."""
remap: Optional[Dict[str, str]] = None
"""Certain environment variables can be mapped to config arguments with a different name."""
file: Optional[Path] = None
"""Built in .env file loading with lower than environment precedence. Uses UTF-8 for decoding."""
@dataclass
class ConfZCLArgSource(ConfZSource):
"""Source config for command line arguments. Command line arguments are case-sensitive. Dot-notation can be
used to access nested configurations. Only command line arguments starting with two dashes (\\-\\-) are considered.
Between argument and value must be whitespace, an equal sign is not supported at the moment."""
prefix: Optional[str] = None
"""Optionally, all command line arguments can have a prefix, e.g. `config_`. The prefix does not need to include
the two dashes at the beginning. The map below does not need to include the prefix, it is automatically added."""
remap: Optional[Dict[str, str]] = None
"""Certain command line arguments can be mapped to config arguments with a different name. The map does not need to
include the two dashes at the beginning."""
@dataclass
class ConfZDataSource(ConfZSource):
"""Source config for raw data, i.e. constants. This can be useful for unit-test together with
:meth:`~confz.ConfZ.change_config_sources` to inject test data into the config."""
data: Dict[str, Any]
"""All data should go into this (possibly nested) dict."""
| 50.433735 | 119 | 0.721691 | from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Optional, Union, List, Dict, Any
@dataclass
class ConfZSource:
ConfZSources = Union[ConfZSource, List[ConfZSource]]
class FileFormat(Enum):
JSON = 'json'
YAML = 'yaml'
@dataclass
class ConfZFileSource(ConfZSource):
file: Optional[Path] = None
file_from_env: Optional[str] = None
file_from_cl: Optional[Union[int, str]] = None
folder: Optional[Path] = None
format: Optional[FileFormat] = None
encoding: str = 'utf-8'
@dataclass
class ConfZEnvSource(ConfZSource):
allow_all: bool = False
allow: Optional[List[str]] = None
deny: Optional[List[str]] = None
prefix: Optional[str] = None
remap: Optional[Dict[str, str]] = None
file: Optional[Path] = None
@dataclass
class ConfZCLArgSource(ConfZSource):
prefix: Optional[str] = None
remap: Optional[Dict[str, str]] = None
@dataclass
class ConfZDataSource(ConfZSource):
data: Dict[str, Any]
| true | true |
f73a4706e53853acb6cd5a38b62f25982c9c3567 | 511 | py | Python | demo.py | adamslab-ub/SCoPP | b88a2b04537d5828190973d73f525fa902723375 | [
"MIT"
] | 5 | 2021-05-26T04:56:16.000Z | 2022-03-26T19:59:46.000Z | demo.py | adamslab-ub/SCoPP | b88a2b04537d5828190973d73f525fa902723375 | [
"MIT"
] | 2 | 2021-10-30T14:53:05.000Z | 2021-11-07T02:51:10.000Z | demo.py | adamslab-ub/SCoPP | b88a2b04537d5828190973d73f525fa902723375 | [
"MIT"
] | 3 | 2021-08-15T03:31:57.000Z | 2022-02-01T21:16:57.000Z | """
This code contains examples of how to call and use the SCoPP-Monitoring module.
"""
# Import the necessary modules:
import monitoring_algorithms
import environments as envs
# Initialize environment class
environment = envs.Debugger()
# Initialize monitoring algorithm instance
way_point_allocator = monitoring_algorithms.QLB(environment, number_of_robots=5,plot="full")
# Run the algorithm on the given environment and display all information
paths = way_point_allocator.run(info="verbose") | 34.066667 | 93 | 0.790607 |
import monitoring_algorithms
import environments as envs
environment = envs.Debugger()
way_point_allocator = monitoring_algorithms.QLB(environment, number_of_robots=5,plot="full")
paths = way_point_allocator.run(info="verbose") | true | true |
f73a490a3587b20639a7f51e5d07bd2315e75b7d | 250 | py | Python | signalwire/relay/calling/results/record_result.py | ramarketing/signalwire-python | c0663bdd0454faaa39f42af7c936cea1d43e1842 | [
"MIT"
] | 23 | 2018-12-19T14:48:18.000Z | 2022-01-11T03:58:36.000Z | signalwire/relay/calling/results/record_result.py | ramarketing/signalwire-python | c0663bdd0454faaa39f42af7c936cea1d43e1842 | [
"MIT"
] | 13 | 2018-10-17T12:57:54.000Z | 2021-09-01T21:46:01.000Z | signalwire/relay/calling/results/record_result.py | ramarketing/signalwire-python | c0663bdd0454faaa39f42af7c936cea1d43e1842 | [
"MIT"
] | 12 | 2020-01-21T14:29:43.000Z | 2022-01-11T07:48:06.000Z | from . import BaseResult
class RecordResult(BaseResult):
@property
def url(self):
return self.component.url
@property
def duration(self):
return self.component.duration
@property
def size(self):
return self.component.size
| 15.625 | 34 | 0.716 | from . import BaseResult
class RecordResult(BaseResult):
@property
def url(self):
return self.component.url
@property
def duration(self):
return self.component.duration
@property
def size(self):
return self.component.size
| true | true |
f73a49870c7e051c3891727f8965f4cc1fd87a90 | 9,869 | py | Python | src/oci/identity/models/mfa_totp_device_summary.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/identity/models/mfa_totp_device_summary.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/identity/models/mfa_totp_device_summary.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class MfaTotpDeviceSummary(object):
"""
As the name suggests, a `MfaTotpDeviceSummary` object contains information about a `MfaTotpDevice`.
"""
#: A constant which can be used with the lifecycle_state property of a MfaTotpDeviceSummary.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a MfaTotpDeviceSummary.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a MfaTotpDeviceSummary.
#: This constant has a value of "INACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
#: A constant which can be used with the lifecycle_state property of a MfaTotpDeviceSummary.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a MfaTotpDeviceSummary.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
def __init__(self, **kwargs):
"""
Initializes a new MfaTotpDeviceSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this MfaTotpDeviceSummary.
:type id: str
:param user_id:
The value to assign to the user_id property of this MfaTotpDeviceSummary.
:type user_id: str
:param time_created:
The value to assign to the time_created property of this MfaTotpDeviceSummary.
:type time_created: datetime
:param time_expires:
The value to assign to the time_expires property of this MfaTotpDeviceSummary.
:type time_expires: datetime
:param lifecycle_state:
The value to assign to the lifecycle_state property of this MfaTotpDeviceSummary.
Allowed values for this property are: "CREATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param inactive_status:
The value to assign to the inactive_status property of this MfaTotpDeviceSummary.
:type inactive_status: int
:param is_activated:
The value to assign to the is_activated property of this MfaTotpDeviceSummary.
:type is_activated: bool
"""
self.swagger_types = {
'id': 'str',
'user_id': 'str',
'time_created': 'datetime',
'time_expires': 'datetime',
'lifecycle_state': 'str',
'inactive_status': 'int',
'is_activated': 'bool'
}
self.attribute_map = {
'id': 'id',
'user_id': 'userId',
'time_created': 'timeCreated',
'time_expires': 'timeExpires',
'lifecycle_state': 'lifecycleState',
'inactive_status': 'inactiveStatus',
'is_activated': 'isActivated'
}
self._id = None
self._user_id = None
self._time_created = None
self._time_expires = None
self._lifecycle_state = None
self._inactive_status = None
self._is_activated = None
@property
def id(self):
"""
**[Required]** Gets the id of this MfaTotpDeviceSummary.
The OCID of the MFA TOTP Device.
:return: The id of this MfaTotpDeviceSummary.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this MfaTotpDeviceSummary.
The OCID of the MFA TOTP Device.
:param id: The id of this MfaTotpDeviceSummary.
:type: str
"""
self._id = id
@property
def user_id(self):
"""
**[Required]** Gets the user_id of this MfaTotpDeviceSummary.
The OCID of the user the MFA TOTP device belongs to.
:return: The user_id of this MfaTotpDeviceSummary.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""
Sets the user_id of this MfaTotpDeviceSummary.
The OCID of the user the MFA TOTP device belongs to.
:param user_id: The user_id of this MfaTotpDeviceSummary.
:type: str
"""
self._user_id = user_id
@property
def time_created(self):
"""
**[Required]** Gets the time_created of this MfaTotpDeviceSummary.
Date and time the `MfaTotpDevice` object was created, in the format defined by RFC3339.
Example: `2016-08-25T21:10:29.600Z`
:return: The time_created of this MfaTotpDeviceSummary.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this MfaTotpDeviceSummary.
Date and time the `MfaTotpDevice` object was created, in the format defined by RFC3339.
Example: `2016-08-25T21:10:29.600Z`
:param time_created: The time_created of this MfaTotpDeviceSummary.
:type: datetime
"""
self._time_created = time_created
@property
def time_expires(self):
"""
Gets the time_expires of this MfaTotpDeviceSummary.
Date and time when this MFA TOTP device will expire, in the format defined by RFC3339.
Null if it never expires.
Example: `2016-08-25T21:10:29.600Z`
:return: The time_expires of this MfaTotpDeviceSummary.
:rtype: datetime
"""
return self._time_expires
@time_expires.setter
def time_expires(self, time_expires):
"""
Sets the time_expires of this MfaTotpDeviceSummary.
Date and time when this MFA TOTP device will expire, in the format defined by RFC3339.
Null if it never expires.
Example: `2016-08-25T21:10:29.600Z`
:param time_expires: The time_expires of this MfaTotpDeviceSummary.
:type: datetime
"""
self._time_expires = time_expires
@property
def lifecycle_state(self):
"""
**[Required]** Gets the lifecycle_state of this MfaTotpDeviceSummary.
The MFA TOTP device's current state.
Allowed values for this property are: "CREATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this MfaTotpDeviceSummary.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this MfaTotpDeviceSummary.
The MFA TOTP device's current state.
:param lifecycle_state: The lifecycle_state of this MfaTotpDeviceSummary.
:type: str
"""
allowed_values = ["CREATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def inactive_status(self):
"""
Gets the inactive_status of this MfaTotpDeviceSummary.
The detailed status of INACTIVE lifecycleState.
Allowed values are:
- 1 - SUSPENDED
- 2 - DISABLED
- 4 - BLOCKED
- 8 - LOCKED
:return: The inactive_status of this MfaTotpDeviceSummary.
:rtype: int
"""
return self._inactive_status
@inactive_status.setter
def inactive_status(self, inactive_status):
"""
Sets the inactive_status of this MfaTotpDeviceSummary.
The detailed status of INACTIVE lifecycleState.
Allowed values are:
- 1 - SUSPENDED
- 2 - DISABLED
- 4 - BLOCKED
- 8 - LOCKED
:param inactive_status: The inactive_status of this MfaTotpDeviceSummary.
:type: int
"""
self._inactive_status = inactive_status
@property
def is_activated(self):
"""
**[Required]** Gets the is_activated of this MfaTotpDeviceSummary.
Flag to indicate if the MFA TOTP device has been activated
:return: The is_activated of this MfaTotpDeviceSummary.
:rtype: bool
"""
return self._is_activated
@is_activated.setter
def is_activated(self, is_activated):
"""
Sets the is_activated of this MfaTotpDeviceSummary.
Flag to indicate if the MFA TOTP device has been activated
:param is_activated: The is_activated of this MfaTotpDeviceSummary.
:type: bool
"""
self._is_activated = is_activated
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 32.357377 | 245 | 0.644746 |
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class MfaTotpDeviceSummary(object):
LIFECYCLE_STATE_CREATING = "CREATING"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
LIFECYCLE_STATE_DELETING = "DELETING"
LIFECYCLE_STATE_DELETED = "DELETED"
def __init__(self, **kwargs):
self.swagger_types = {
'id': 'str',
'user_id': 'str',
'time_created': 'datetime',
'time_expires': 'datetime',
'lifecycle_state': 'str',
'inactive_status': 'int',
'is_activated': 'bool'
}
self.attribute_map = {
'id': 'id',
'user_id': 'userId',
'time_created': 'timeCreated',
'time_expires': 'timeExpires',
'lifecycle_state': 'lifecycleState',
'inactive_status': 'inactiveStatus',
'is_activated': 'isActivated'
}
self._id = None
self._user_id = None
self._time_created = None
self._time_expires = None
self._lifecycle_state = None
self._inactive_status = None
self._is_activated = None
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, user_id):
self._user_id = user_id
@property
def time_created(self):
return self._time_created
@time_created.setter
def time_created(self, time_created):
self._time_created = time_created
@property
def time_expires(self):
return self._time_expires
@time_expires.setter
def time_expires(self, time_expires):
self._time_expires = time_expires
@property
def lifecycle_state(self):
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
allowed_values = ["CREATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def inactive_status(self):
return self._inactive_status
@inactive_status.setter
def inactive_status(self, inactive_status):
self._inactive_status = inactive_status
@property
def is_activated(self):
return self._is_activated
@is_activated.setter
def is_activated(self, is_activated):
self._is_activated = is_activated
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f73a4a4a72981c957855835fb762832d53a10783 | 6,404 | py | Python | parallel_accel/Analysis/benchmarks/gralt/gralt_benchmarks.py | google/parallel_accel | b58fda1c3a22f2aaa9a97337d602cd72c49ee8be | [
"Apache-2.0"
] | 1 | 2021-12-19T21:17:02.000Z | 2021-12-19T21:17:02.000Z | parallel_accel/Analysis/benchmarks/gralt/gralt_benchmarks.py | google/parallel_accel | b58fda1c3a22f2aaa9a97337d602cd72c49ee8be | [
"Apache-2.0"
] | null | null | null | parallel_accel/Analysis/benchmarks/gralt/gralt_benchmarks.py | google/parallel_accel | b58fda1c3a22f2aaa9a97337d602cd72c49ee8be | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The ParallelAccel Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Test the speed of GRALTool on standard benchmark acyclic_graphs.
This is deprecated code and is included for reference. New benchmarks should use the
Benchmark and BenchmarkSuite models.
"""
import json
import os
import time
import benchmarks.acyclic_graphs.benchmark_acyclic_graphs as acyclic_graphs
from benchmarks.acyclic_graphs import pbaxisum
import benchmarks.gralt.settings as settings
import linear_algebra
import tensorflow as tf
import grapal_tool as gralt
sample_subgraph = gralt.subgraphs.Sample()
expectation_subgraph = gralt.subgraphs.Expectation()
state_subgraph = gralt.subgraphs.State()
def exp_and_grad_call(
acyclic_graph_t, symbol_names_t, symbol_values_t, ops_t, num_samples_t):
with tf.GradientTape() as g:
g.watch(symbol_values_t)
exp = expectation_subgraph(
acyclic_graph_t, symbol_names=symbol_names_t, symbol_values=symbol_values_t,
operators=ops_t)
grad = g.gradient(exp, symbol_values_t)
return exp, grad
call_dict = {
"samples": lambda acyclic_graph_t, symbol_names_t, symbol_values_t, ops_t,
num_samples_t: sample_subgraph(
acyclic_graph_t, symbol_names=symbol_names_t, symbol_values=symbol_values_t,
repetitions=num_samples_t),
"exp": lambda acyclic_graph_t, symbol_names_t, symbol_values_t, ops_t,
num_samples_t: expectation_subgraph(
acyclic_graph_t, symbol_names=symbol_names_t, symbol_values=symbol_values_t,
operators=ops_t),
"exp_and_grad": exp_and_grad_call,
"state": lambda acyclic_graph_t, symbol_names_t, symbol_values_t, ops_t,
num_samples_t: state_subgraph(
acyclic_graph_t, symbol_names=symbol_names_t, symbol_values=symbol_values_t),
}
get_num_samples_dict = {
"samples": lambda settings_dict:
tf.constant([settings_dict["num_samples"]]),
"exp": lambda settings_dict: tf.constant([0]),
"exp_and_grad": lambda settings_dict: tf.constant([0]),
"state": lambda settings_dict: tf.constant([0]),
}
get_ops_dict = {
"samples": lambda discretes: tf.constant(""),
"exp": lambda discretes:
gralt.convert_to_tensor([[pbaxisum.get_random_prob_basis_axis_sum(discretes)]]),
"exp_and_grad": lambda discretes:
gralt.convert_to_tensor([[pbaxisum.get_random_prob_basis_axis_sum(discretes)]]),
"state": lambda discretes: tf.constant(""),
}
def run_gralt_benchmarks(
min_subgraphs, max_subgraphs, skip_subgraphs, min_discretes, max_discretes, iterations,
num_samples, rounding_digits, acyclic_graph_type, sim_type, rel_save_dir,
save_dir_prefix=os.getcwd()):
if acyclic_graph_type == "approxopt":
acyclic_graph_builder = acyclic_graphs.approxopt
elif acyclic_graph_type == "hea":
acyclic_graph_builder = acyclic_graphs.hea
else:
raise ValueError(acyclic_graph_type + " is not a valid type of test acyclic_graph.")
if sim_type in {"samples", "exp", "exp_and_grad", "state"}:
call_subgraph = call_dict[sim_type]
get_num_samples = get_num_samples_dict[sim_type]
get_ops = get_ops_dict[sim_type]
else:
raise ValueError(sim_type + " is not a valid simulation types.")
# Save settings.
full_save_dir = os.path.join(save_dir_prefix, rel_save_dir)
settings.set_settings(
min_subgraphs=min_subgraphs,
max_subgraphs=max_subgraphs,
skip_subgraphs=skip_subgraphs,
min_discretes=min_discretes,
max_discretes=max_discretes,
iterations=iterations,
num_samples=num_samples,
rounding_digits=rounding_digits,
acyclic_graph_type=acyclic_graph_type,
sim_type=sim_type,
full_save_dir=full_save_dir
)
settings_dict = settings.load_settings(full_save_dir)
# Run benchmarks.
num_samples_t = get_num_samples(settings_dict)
for q in range(settings_dict["min_discretes"], settings_dict["max_discretes"] + 1):
print(f"Current discrete size: {q}")
benchmarks_dict = dict()
discretes = linear_algebra.GridSpace.rect(1, q)
ops_t = get_ops(discretes)
for l in range(
settings_dict["min_subgraphs"], settings_dict["max_subgraphs"] + 1,
settings_dict["skip_subgraphs"]):
print(f"Current number of subgraphs: {l}")
benchmarks_dict[l] = {}
acyclic_graph, symbols = acyclic_graph_builder(discretes, l, acyclic_graph_type)
is_acyclic_graph_compiled = False
symbol_names_t = tf.constant([str(s) for s in symbols])
for r in range(settings_dict["iterations"]):
symbol_values_t = tf.random.uniform(
[1, len(symbols)], minval=-2.0, maxval=2.0)
start = time.time()
if not is_acyclic_graph_compiled:
compiled_acyclic_graph = gralt.convert_to_tensor([acyclic_graph])
is_acyclic_graph_compiled = True
result = call_subgraph(
compiled_acyclic_graph, symbol_names_t, symbol_values_t,
ops_t, num_samples_t)
stop = time.time()
this_runtime = round(stop - start, rounding_digits)
if r == 0:
# First run is special because it considers the compilation time
benchmarks_dict[l]["initial"] = this_runtime
benchmarks_dict[l]["remaining"] = []
print("initial runtime of {} seconds".format(this_runtime))
else:
print("subsequent runtime of {} seconds".format(this_runtime))
benchmarks_dict[l]["remaining"].append(this_runtime)
benchmarks_dict[l]["depth"] = len(acyclic_graph)
# Checkpoint the benchmarks after each discrete number.
benchmarks_filename = "benchmarks_dict_{}.json".format(q)
benchmarks_data_file = os.path.join(full_save_dir, benchmarks_filename)
with open(benchmarks_data_file, 'w') as datafile:
json.dump(benchmarks_dict, datafile)
| 39.288344 | 91 | 0.723142 |
import json
import os
import time
import benchmarks.acyclic_graphs.benchmark_acyclic_graphs as acyclic_graphs
from benchmarks.acyclic_graphs import pbaxisum
import benchmarks.gralt.settings as settings
import linear_algebra
import tensorflow as tf
import grapal_tool as gralt
sample_subgraph = gralt.subgraphs.Sample()
expectation_subgraph = gralt.subgraphs.Expectation()
state_subgraph = gralt.subgraphs.State()
def exp_and_grad_call(
acyclic_graph_t, symbol_names_t, symbol_values_t, ops_t, num_samples_t):
with tf.GradientTape() as g:
g.watch(symbol_values_t)
exp = expectation_subgraph(
acyclic_graph_t, symbol_names=symbol_names_t, symbol_values=symbol_values_t,
operators=ops_t)
grad = g.gradient(exp, symbol_values_t)
return exp, grad
call_dict = {
"samples": lambda acyclic_graph_t, symbol_names_t, symbol_values_t, ops_t,
num_samples_t: sample_subgraph(
acyclic_graph_t, symbol_names=symbol_names_t, symbol_values=symbol_values_t,
repetitions=num_samples_t),
"exp": lambda acyclic_graph_t, symbol_names_t, symbol_values_t, ops_t,
num_samples_t: expectation_subgraph(
acyclic_graph_t, symbol_names=symbol_names_t, symbol_values=symbol_values_t,
operators=ops_t),
"exp_and_grad": exp_and_grad_call,
"state": lambda acyclic_graph_t, symbol_names_t, symbol_values_t, ops_t,
num_samples_t: state_subgraph(
acyclic_graph_t, symbol_names=symbol_names_t, symbol_values=symbol_values_t),
}
get_num_samples_dict = {
"samples": lambda settings_dict:
tf.constant([settings_dict["num_samples"]]),
"exp": lambda settings_dict: tf.constant([0]),
"exp_and_grad": lambda settings_dict: tf.constant([0]),
"state": lambda settings_dict: tf.constant([0]),
}
get_ops_dict = {
"samples": lambda discretes: tf.constant(""),
"exp": lambda discretes:
gralt.convert_to_tensor([[pbaxisum.get_random_prob_basis_axis_sum(discretes)]]),
"exp_and_grad": lambda discretes:
gralt.convert_to_tensor([[pbaxisum.get_random_prob_basis_axis_sum(discretes)]]),
"state": lambda discretes: tf.constant(""),
}
def run_gralt_benchmarks(
min_subgraphs, max_subgraphs, skip_subgraphs, min_discretes, max_discretes, iterations,
num_samples, rounding_digits, acyclic_graph_type, sim_type, rel_save_dir,
save_dir_prefix=os.getcwd()):
if acyclic_graph_type == "approxopt":
acyclic_graph_builder = acyclic_graphs.approxopt
elif acyclic_graph_type == "hea":
acyclic_graph_builder = acyclic_graphs.hea
else:
raise ValueError(acyclic_graph_type + " is not a valid type of test acyclic_graph.")
if sim_type in {"samples", "exp", "exp_and_grad", "state"}:
call_subgraph = call_dict[sim_type]
get_num_samples = get_num_samples_dict[sim_type]
get_ops = get_ops_dict[sim_type]
else:
raise ValueError(sim_type + " is not a valid simulation types.")
full_save_dir = os.path.join(save_dir_prefix, rel_save_dir)
settings.set_settings(
min_subgraphs=min_subgraphs,
max_subgraphs=max_subgraphs,
skip_subgraphs=skip_subgraphs,
min_discretes=min_discretes,
max_discretes=max_discretes,
iterations=iterations,
num_samples=num_samples,
rounding_digits=rounding_digits,
acyclic_graph_type=acyclic_graph_type,
sim_type=sim_type,
full_save_dir=full_save_dir
)
settings_dict = settings.load_settings(full_save_dir)
num_samples_t = get_num_samples(settings_dict)
for q in range(settings_dict["min_discretes"], settings_dict["max_discretes"] + 1):
print(f"Current discrete size: {q}")
benchmarks_dict = dict()
discretes = linear_algebra.GridSpace.rect(1, q)
ops_t = get_ops(discretes)
for l in range(
settings_dict["min_subgraphs"], settings_dict["max_subgraphs"] + 1,
settings_dict["skip_subgraphs"]):
print(f"Current number of subgraphs: {l}")
benchmarks_dict[l] = {}
acyclic_graph, symbols = acyclic_graph_builder(discretes, l, acyclic_graph_type)
is_acyclic_graph_compiled = False
symbol_names_t = tf.constant([str(s) for s in symbols])
for r in range(settings_dict["iterations"]):
symbol_values_t = tf.random.uniform(
[1, len(symbols)], minval=-2.0, maxval=2.0)
start = time.time()
if not is_acyclic_graph_compiled:
compiled_acyclic_graph = gralt.convert_to_tensor([acyclic_graph])
is_acyclic_graph_compiled = True
result = call_subgraph(
compiled_acyclic_graph, symbol_names_t, symbol_values_t,
ops_t, num_samples_t)
stop = time.time()
this_runtime = round(stop - start, rounding_digits)
if r == 0:
benchmarks_dict[l]["initial"] = this_runtime
benchmarks_dict[l]["remaining"] = []
print("initial runtime of {} seconds".format(this_runtime))
else:
print("subsequent runtime of {} seconds".format(this_runtime))
benchmarks_dict[l]["remaining"].append(this_runtime)
benchmarks_dict[l]["depth"] = len(acyclic_graph)
benchmarks_filename = "benchmarks_dict_{}.json".format(q)
benchmarks_data_file = os.path.join(full_save_dir, benchmarks_filename)
with open(benchmarks_data_file, 'w') as datafile:
json.dump(benchmarks_dict, datafile)
| true | true |
f73a4bde185e7342b8435e35a8462ea6fc6b22c8 | 62 | py | Python | debug_toolbar/__init__.py | none-da/zeshare | 6c13cd3bd9d82d89f53d4a8b287fe2c30f1d3779 | [
"BSD-3-Clause"
] | 10 | 2015-01-10T15:34:25.000Z | 2021-07-30T11:14:22.000Z | vkontakte_wall/__init__.py | gorelikspb/django-vkontakte-wall | 09b921034d909d7162ee48e8a3eb1c29c0747f40 | [
"BSD-3-Clause"
] | 2 | 2015-06-11T15:28:52.000Z | 2015-08-04T11:53:13.000Z | vkontakte_wall/__init__.py | gorelikspb/django-vkontakte-wall | 09b921034d909d7162ee48e8a3eb1c29c0747f40 | [
"BSD-3-Clause"
] | 7 | 2015-01-29T15:51:38.000Z | 2020-09-01T03:14:47.000Z | VERSION = (0, 8, 1)
__version__ = '.'.join(map(str, VERSION))
| 20.666667 | 41 | 0.612903 | VERSION = (0, 8, 1)
__version__ = '.'.join(map(str, VERSION))
| true | true |
f73a4d22041854c5326afaffc36927b22884b07a | 5,370 | py | Python | workers/test/test_exportactionlogsworker.py | kwestpharedhat/quay | a0df895005bcd3e53847046f69f6a7add87c88fd | [
"Apache-2.0"
] | null | null | null | workers/test/test_exportactionlogsworker.py | kwestpharedhat/quay | a0df895005bcd3e53847046f69f6a7add87c88fd | [
"Apache-2.0"
] | null | null | null | workers/test/test_exportactionlogsworker.py | kwestpharedhat/quay | a0df895005bcd3e53847046f69f6a7add87c88fd | [
"Apache-2.0"
] | null | null | null | import json
import os
import pytest
from datetime import datetime, timedelta
import boto3
from httmock import urlmatch, HTTMock
from moto import mock_s3
from app import storage as test_storage
from data import model, database
from data.logs_model import logs_model
from storage import S3Storage, StorageContext, DistributedStorage
from workers.exportactionlogsworker import ExportActionLogsWorker, POLL_PERIOD_SECONDS
from test.fixtures import *
_TEST_CONTENT = os.urandom(1024)
_TEST_BUCKET = "somebucket"
_TEST_USER = "someuser"
_TEST_PASSWORD = "somepassword"
_TEST_PATH = "some/cool/path"
_TEST_CONTEXT = StorageContext("nyc", None, None, None)
@pytest.fixture(params=["test", "mock_s3"])
def storage_engine(request):
if request.param == "test":
yield test_storage
else:
with mock_s3():
# Create a test bucket and put some test content.
boto3.client("s3").create_bucket(Bucket=_TEST_BUCKET)
engine = DistributedStorage(
{
"foo": S3Storage(
_TEST_CONTEXT, "some/path", _TEST_BUCKET, _TEST_USER, _TEST_PASSWORD
)
},
["foo"],
)
yield engine
def test_export_logs_failure(initialized_db):
# Make all uploads fail.
test_storage.put_content("local_us", "except_upload", b"true")
repo = model.repository.get_repository("devtable", "simple")
user = model.user.get_user("devtable")
worker = ExportActionLogsWorker(None)
called = [{}]
@urlmatch(netloc=r"testcallback")
def handle_request(url, request):
called[0] = json.loads(request.body)
return {"status_code": 200, "content": "{}"}
def format_date(datetime):
return datetime.strftime("%m/%d/%Y")
now = datetime.now()
with HTTMock(handle_request):
with pytest.raises(IOError):
worker._process_queue_item(
{
"export_id": "someid",
"repository_id": repo.id,
"namespace_id": repo.namespace_user.id,
"namespace_name": "devtable",
"repository_name": "simple",
"start_time": format_date(now + timedelta(days=-10)),
"end_time": format_date(now + timedelta(days=10)),
"callback_url": "http://testcallback/",
"callback_email": None,
},
test_storage,
)
test_storage.remove("local_us", "except_upload")
assert called[0]
assert called[0]["export_id"] == "someid"
assert called[0]["status"] == "failed"
@pytest.mark.parametrize(
"has_logs",
[
True,
False,
],
)
def test_export_logs(initialized_db, storage_engine, has_logs):
# Delete all existing logs.
database.LogEntry3.delete().execute()
repo = model.repository.get_repository("devtable", "simple")
user = model.user.get_user("devtable")
now = datetime.now()
if has_logs:
# Add new logs over a multi-day period.
for index in range(-10, 10):
logs_model.log_action(
"push_repo",
"devtable",
user,
"0.0.0.0",
{"index": index},
repo,
timestamp=now + timedelta(days=index),
)
worker = ExportActionLogsWorker(None)
called = [{}]
@urlmatch(netloc=r"testcallback")
def handle_request(url, request):
called[0] = json.loads(request.body)
return {"status_code": 200, "content": "{}"}
def format_date(datetime):
return datetime.strftime("%m/%d/%Y")
with HTTMock(handle_request):
worker._process_queue_item(
{
"export_id": "someid",
"repository_id": repo.id,
"namespace_id": repo.namespace_user.id,
"namespace_name": "devtable",
"repository_name": "simple",
"start_time": format_date(now + timedelta(days=-10)),
"end_time": format_date(now + timedelta(days=10)),
"callback_url": "http://testcallback/",
"callback_email": None,
},
storage_engine,
)
assert called[0]
assert called[0]["export_id"] == "someid"
assert called[0]["status"] == "success"
url = called[0]["exported_data_url"]
if url.find("http://localhost:5000/exportedlogs/") == 0:
storage_id = url[len("http://localhost:5000/exportedlogs/") :]
else:
assert url.find("https://somebucket.s3.amazonaws.com/some/path/exportedactionlogs/") == 0
storage_id, _ = url[
len("https://somebucket.s3.amazonaws.com/some/path/exportedactionlogs/") :
].split("?")
created = storage_engine.get_content(
storage_engine.preferred_locations, "exportedactionlogs/" + storage_id
)
created_json = json.loads(created)
if has_logs:
found = set()
for log in created_json["logs"]:
if log.get("terminator"):
continue
found.add(log["metadata"]["index"])
for index in range(-10, 10):
assert index in found
else:
assert created_json["logs"] == [{"terminator": True}]
| 30.338983 | 97 | 0.58324 | import json
import os
import pytest
from datetime import datetime, timedelta
import boto3
from httmock import urlmatch, HTTMock
from moto import mock_s3
from app import storage as test_storage
from data import model, database
from data.logs_model import logs_model
from storage import S3Storage, StorageContext, DistributedStorage
from workers.exportactionlogsworker import ExportActionLogsWorker, POLL_PERIOD_SECONDS
from test.fixtures import *
_TEST_CONTENT = os.urandom(1024)
_TEST_BUCKET = "somebucket"
_TEST_USER = "someuser"
_TEST_PASSWORD = "somepassword"
_TEST_PATH = "some/cool/path"
_TEST_CONTEXT = StorageContext("nyc", None, None, None)
@pytest.fixture(params=["test", "mock_s3"])
def storage_engine(request):
if request.param == "test":
yield test_storage
else:
with mock_s3():
boto3.client("s3").create_bucket(Bucket=_TEST_BUCKET)
engine = DistributedStorage(
{
"foo": S3Storage(
_TEST_CONTEXT, "some/path", _TEST_BUCKET, _TEST_USER, _TEST_PASSWORD
)
},
["foo"],
)
yield engine
def test_export_logs_failure(initialized_db):
test_storage.put_content("local_us", "except_upload", b"true")
repo = model.repository.get_repository("devtable", "simple")
user = model.user.get_user("devtable")
worker = ExportActionLogsWorker(None)
called = [{}]
@urlmatch(netloc=r"testcallback")
def handle_request(url, request):
called[0] = json.loads(request.body)
return {"status_code": 200, "content": "{}"}
def format_date(datetime):
return datetime.strftime("%m/%d/%Y")
now = datetime.now()
with HTTMock(handle_request):
with pytest.raises(IOError):
worker._process_queue_item(
{
"export_id": "someid",
"repository_id": repo.id,
"namespace_id": repo.namespace_user.id,
"namespace_name": "devtable",
"repository_name": "simple",
"start_time": format_date(now + timedelta(days=-10)),
"end_time": format_date(now + timedelta(days=10)),
"callback_url": "http://testcallback/",
"callback_email": None,
},
test_storage,
)
test_storage.remove("local_us", "except_upload")
assert called[0]
assert called[0]["export_id"] == "someid"
assert called[0]["status"] == "failed"
@pytest.mark.parametrize(
"has_logs",
[
True,
False,
],
)
def test_export_logs(initialized_db, storage_engine, has_logs):
database.LogEntry3.delete().execute()
repo = model.repository.get_repository("devtable", "simple")
user = model.user.get_user("devtable")
now = datetime.now()
if has_logs:
for index in range(-10, 10):
logs_model.log_action(
"push_repo",
"devtable",
user,
"0.0.0.0",
{"index": index},
repo,
timestamp=now + timedelta(days=index),
)
worker = ExportActionLogsWorker(None)
called = [{}]
@urlmatch(netloc=r"testcallback")
def handle_request(url, request):
called[0] = json.loads(request.body)
return {"status_code": 200, "content": "{}"}
def format_date(datetime):
return datetime.strftime("%m/%d/%Y")
with HTTMock(handle_request):
worker._process_queue_item(
{
"export_id": "someid",
"repository_id": repo.id,
"namespace_id": repo.namespace_user.id,
"namespace_name": "devtable",
"repository_name": "simple",
"start_time": format_date(now + timedelta(days=-10)),
"end_time": format_date(now + timedelta(days=10)),
"callback_url": "http://testcallback/",
"callback_email": None,
},
storage_engine,
)
assert called[0]
assert called[0]["export_id"] == "someid"
assert called[0]["status"] == "success"
url = called[0]["exported_data_url"]
if url.find("http://localhost:5000/exportedlogs/") == 0:
storage_id = url[len("http://localhost:5000/exportedlogs/") :]
else:
assert url.find("https://somebucket.s3.amazonaws.com/some/path/exportedactionlogs/") == 0
storage_id, _ = url[
len("https://somebucket.s3.amazonaws.com/some/path/exportedactionlogs/") :
].split("?")
created = storage_engine.get_content(
storage_engine.preferred_locations, "exportedactionlogs/" + storage_id
)
created_json = json.loads(created)
if has_logs:
found = set()
for log in created_json["logs"]:
if log.get("terminator"):
continue
found.add(log["metadata"]["index"])
for index in range(-10, 10):
assert index in found
else:
assert created_json["logs"] == [{"terminator": True}]
| true | true |
f73a4e39146352545db7ac5058c6f78e7da2e30f | 5,014 | py | Python | mpf/core/bcp/bcp.py | Wolfmarsh/mpf | ad71f381ce8a0e65f28958e51cf8a8b38a6154fb | [
"MIT"
] | null | null | null | mpf/core/bcp/bcp.py | Wolfmarsh/mpf | ad71f381ce8a0e65f28958e51cf8a8b38a6154fb | [
"MIT"
] | null | null | null | mpf/core/bcp/bcp.py | Wolfmarsh/mpf | ad71f381ce8a0e65f28958e51cf8a8b38a6154fb | [
"MIT"
] | null | null | null | """BCP module."""
import asyncio
from functools import partial
from typing import List
from mpf.core.events import QueuedEvent
from mpf.core.mpf_controller import MpfController
from mpf.core.bcp.bcp_server import BcpServer
from mpf.core.utility_functions import Util
from mpf.core.bcp.bcp_interface import BcpInterface
from mpf.core.bcp.bcp_transport import BcpTransportManager
MYPY = False
if MYPY: # pragma: no cover
from mpf.core.machine import MachineController # pylint: disable-msg=cyclic-import,unused-import
class Bcp(MpfController):
"""BCP Module."""
config_name = "bcp"
__slots__ = ["interface", "transport", "servers"]
def __init__(self, machine: "MachineController") -> None:
"""Initialise BCP module."""
super().__init__(machine)
self.interface = BcpInterface(machine)
self.transport = BcpTransportManager(machine)
self.servers = [] # type: List[BcpServer]
if self.machine.options['bcp']:
self.machine.events.add_handler('init_phase_2',
self._setup_bcp_connections)
self.machine.events.add_handler('init_phase_4',
self._setup_bcp_servers)
self.machine.events.add_handler('shutdown',
self._stop_servers)
def send(self, bcp_command, **kwargs):
"""Emulate legacy send.
Args:
bcp_command: Commmand to send
"""
self.transport.send_to_all_clients(bcp_command, **kwargs)
def _setup_bcp_connections(self, queue: QueuedEvent, **kwargs):
"""Connect to BCP servers from MPF config."""
del kwargs
if ('connections' not in self.machine.config['bcp'] or not
self.machine.config['bcp']['connections']):
return
client_connect_futures = []
for name, settings in self.machine.config['bcp']['connections'].items():
settings = self.machine.config_validator.validate_config("bcp:connections", settings)
self.machine.events.post('bcp_connection_attempt',
name=name,
host=settings['host'],
port=settings['port'])
'''event: bcp_connection_attempt
desc: MPF is attempting to make a BCP connection.
args:
name: The name of the connection.
host: The host name MPF is attempting to connect to.
port: The TCP port MPF is attempting to connect to'''
client = Util.string_to_class(settings['type'])(self.machine, name, self.machine.bcp)
client.exit_on_close = settings['exit_on_close']
connect_future = asyncio.ensure_future(client.connect(settings), loop=self.machine.clock.loop)
connect_future.add_done_callback(partial(self.transport.register_transport, client))
client_connect_futures.append(connect_future)
# block init until all clients are connected
if client_connect_futures:
queue.wait()
future = asyncio.ensure_future(asyncio.wait(iter(client_connect_futures), loop=self.machine.clock.loop),
loop=self.machine.clock.loop)
future.add_done_callback(lambda x: queue.clear())
future.add_done_callback(self._bcp_clients_connected)
def _bcp_clients_connected(self, *args):
del args
self.machine.events.post('bcp_clients_connected')
'''event: bcp_clients_connected
desc: All BCP outgoing BCP connections have been made.'''
def _setup_bcp_servers(self, queue: QueuedEvent, **kwargs):
"""Start BCP servers to allow other clients to connect."""
del kwargs
if 'servers' not in self.machine.config['bcp'] or not self.machine.config['bcp']['servers']:
return
servers_start_futures = []
for settings in self.machine.config['bcp']['servers'].values():
settings = self.machine.config_validator.validate_config("bcp:servers", settings)
server = BcpServer(self.machine, settings['ip'], settings['port'], settings['type'])
server_future = asyncio.ensure_future(server.start(), loop=self.machine.clock.loop)
server_future.add_done_callback(lambda x, s=server: self.servers.append(s))
servers_start_futures.append(server_future)
# block init until all servers were started
if servers_start_futures:
queue.wait()
future = asyncio.ensure_future(asyncio.wait(iter(servers_start_futures), loop=self.machine.clock.loop),
loop=self.machine.clock.loop)
future.add_done_callback(lambda x: queue.clear())
def _stop_servers(self, **kwargs):
"""Stop BCP servers."""
del kwargs
for server in self.servers:
server.stop()
| 41.438017 | 116 | 0.62844 | import asyncio
from functools import partial
from typing import List
from mpf.core.events import QueuedEvent
from mpf.core.mpf_controller import MpfController
from mpf.core.bcp.bcp_server import BcpServer
from mpf.core.utility_functions import Util
from mpf.core.bcp.bcp_interface import BcpInterface
from mpf.core.bcp.bcp_transport import BcpTransportManager
MYPY = False
if MYPY:
from mpf.core.machine import MachineController
class Bcp(MpfController):
config_name = "bcp"
__slots__ = ["interface", "transport", "servers"]
def __init__(self, machine: "MachineController") -> None:
super().__init__(machine)
self.interface = BcpInterface(machine)
self.transport = BcpTransportManager(machine)
self.servers = []
if self.machine.options['bcp']:
self.machine.events.add_handler('init_phase_2',
self._setup_bcp_connections)
self.machine.events.add_handler('init_phase_4',
self._setup_bcp_servers)
self.machine.events.add_handler('shutdown',
self._stop_servers)
def send(self, bcp_command, **kwargs):
self.transport.send_to_all_clients(bcp_command, **kwargs)
def _setup_bcp_connections(self, queue: QueuedEvent, **kwargs):
del kwargs
if ('connections' not in self.machine.config['bcp'] or not
self.machine.config['bcp']['connections']):
return
client_connect_futures = []
for name, settings in self.machine.config['bcp']['connections'].items():
settings = self.machine.config_validator.validate_config("bcp:connections", settings)
self.machine.events.post('bcp_connection_attempt',
name=name,
host=settings['host'],
port=settings['port'])
client = Util.string_to_class(settings['type'])(self.machine, name, self.machine.bcp)
client.exit_on_close = settings['exit_on_close']
connect_future = asyncio.ensure_future(client.connect(settings), loop=self.machine.clock.loop)
connect_future.add_done_callback(partial(self.transport.register_transport, client))
client_connect_futures.append(connect_future)
if client_connect_futures:
queue.wait()
future = asyncio.ensure_future(asyncio.wait(iter(client_connect_futures), loop=self.machine.clock.loop),
loop=self.machine.clock.loop)
future.add_done_callback(lambda x: queue.clear())
future.add_done_callback(self._bcp_clients_connected)
def _bcp_clients_connected(self, *args):
del args
self.machine.events.post('bcp_clients_connected')
def _setup_bcp_servers(self, queue: QueuedEvent, **kwargs):
del kwargs
if 'servers' not in self.machine.config['bcp'] or not self.machine.config['bcp']['servers']:
return
servers_start_futures = []
for settings in self.machine.config['bcp']['servers'].values():
settings = self.machine.config_validator.validate_config("bcp:servers", settings)
server = BcpServer(self.machine, settings['ip'], settings['port'], settings['type'])
server_future = asyncio.ensure_future(server.start(), loop=self.machine.clock.loop)
server_future.add_done_callback(lambda x, s=server: self.servers.append(s))
servers_start_futures.append(server_future)
if servers_start_futures:
queue.wait()
future = asyncio.ensure_future(asyncio.wait(iter(servers_start_futures), loop=self.machine.clock.loop),
loop=self.machine.clock.loop)
future.add_done_callback(lambda x: queue.clear())
def _stop_servers(self, **kwargs):
del kwargs
for server in self.servers:
server.stop()
| true | true |
f73a4f097c8fc92de9032bf09d5e135ebc9ae997 | 2,673 | py | Python | test_elasticsearch/test_helpers.py | dliappis/elasticsearch-py | 85573db2759922aed7fb655cfdd7cb95d3071a34 | [
"Apache-2.0"
] | 1 | 2019-01-18T02:36:01.000Z | 2019-01-18T02:36:01.000Z | test_elasticsearch/test_helpers.py | dliappis/elasticsearch-py | 85573db2759922aed7fb655cfdd7cb95d3071a34 | [
"Apache-2.0"
] | null | null | null | test_elasticsearch/test_helpers.py | dliappis/elasticsearch-py | 85573db2759922aed7fb655cfdd7cb95d3071a34 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import mock
import time
import threading
from elasticsearch import helpers, Elasticsearch
from elasticsearch.serializer import JSONSerializer
from .test_cases import TestCase
lock_side_effect = threading.Lock()
def mock_process_bulk_chunk(*args, **kwargs):
"""
Threadsafe way of mocking process bulk chunk:
https://stackoverflow.com/questions/39332139/thread-safe-version-of-mock-call-count
"""
with lock_side_effect:
mock_process_bulk_chunk.call_count += 1
time.sleep(0.1)
return []
mock_process_bulk_chunk.call_count = 0
class TestParallelBulk(TestCase):
@mock.patch('elasticsearch.helpers.actions._process_bulk_chunk', side_effect=mock_process_bulk_chunk)
def test_all_chunks_sent(self, _process_bulk_chunk):
actions = ({'x': i} for i in range(100))
list(helpers.parallel_bulk(Elasticsearch(), actions, chunk_size=2))
self.assertEquals(50, _process_bulk_chunk.call_count)
@mock.patch(
'elasticsearch.helpers.actions._process_bulk_chunk',
# make sure we spend some time in the thread
side_effect=lambda *a: [(True, time.sleep(.001) or threading.current_thread().ident)]
)
def test_chunk_sent_from_different_threads(self, _process_bulk_chunk):
actions = ({'x': i} for i in range(100))
results = list(helpers.parallel_bulk(Elasticsearch(), actions, thread_count=10, chunk_size=2))
self.assertTrue(len(set([r[1] for r in results])) > 1)
class TestChunkActions(TestCase):
def setUp(self):
super(TestChunkActions, self).setUp()
self.actions = [({'index': {}}, {'some': u'datá', 'i': i}) for i in range(100)]
def test_chunks_are_chopped_by_byte_size(self):
self.assertEquals(100, len(list(helpers._chunk_actions(self.actions, 100000, 1, JSONSerializer()))))
def test_chunks_are_chopped_by_chunk_size(self):
self.assertEquals(10, len(list(helpers._chunk_actions(self.actions, 10, 99999999, JSONSerializer()))))
def test_chunks_are_chopped_by_byte_size_properly(self):
max_byte_size = 170
chunks = list(helpers._chunk_actions(self.actions, 100000, max_byte_size, JSONSerializer()))
self.assertEquals(25, len(chunks))
for chunk_data, chunk_actions in chunks:
chunk = u''.join(chunk_actions)
chunk = chunk if isinstance(chunk, str) else chunk.encode('utf-8')
self.assertLessEqual(len(chunk), max_byte_size)
class TestExpandActions(TestCase):
def test_string_actions_are_marked_as_simple_inserts(self):
self.assertEquals(('{"index":{}}', "whatever"), helpers.expand_action('whatever'))
| 38.73913 | 110 | 0.710438 |
import mock
import time
import threading
from elasticsearch import helpers, Elasticsearch
from elasticsearch.serializer import JSONSerializer
from .test_cases import TestCase
lock_side_effect = threading.Lock()
def mock_process_bulk_chunk(*args, **kwargs):
with lock_side_effect:
mock_process_bulk_chunk.call_count += 1
time.sleep(0.1)
return []
mock_process_bulk_chunk.call_count = 0
class TestParallelBulk(TestCase):
@mock.patch('elasticsearch.helpers.actions._process_bulk_chunk', side_effect=mock_process_bulk_chunk)
def test_all_chunks_sent(self, _process_bulk_chunk):
actions = ({'x': i} for i in range(100))
list(helpers.parallel_bulk(Elasticsearch(), actions, chunk_size=2))
self.assertEquals(50, _process_bulk_chunk.call_count)
@mock.patch(
'elasticsearch.helpers.actions._process_bulk_chunk',
side_effect=lambda *a: [(True, time.sleep(.001) or threading.current_thread().ident)]
)
def test_chunk_sent_from_different_threads(self, _process_bulk_chunk):
actions = ({'x': i} for i in range(100))
results = list(helpers.parallel_bulk(Elasticsearch(), actions, thread_count=10, chunk_size=2))
self.assertTrue(len(set([r[1] for r in results])) > 1)
class TestChunkActions(TestCase):
def setUp(self):
super(TestChunkActions, self).setUp()
self.actions = [({'index': {}}, {'some': u'datá', 'i': i}) for i in range(100)]
def test_chunks_are_chopped_by_byte_size(self):
self.assertEquals(100, len(list(helpers._chunk_actions(self.actions, 100000, 1, JSONSerializer()))))
def test_chunks_are_chopped_by_chunk_size(self):
self.assertEquals(10, len(list(helpers._chunk_actions(self.actions, 10, 99999999, JSONSerializer()))))
def test_chunks_are_chopped_by_byte_size_properly(self):
max_byte_size = 170
chunks = list(helpers._chunk_actions(self.actions, 100000, max_byte_size, JSONSerializer()))
self.assertEquals(25, len(chunks))
for chunk_data, chunk_actions in chunks:
chunk = u''.join(chunk_actions)
chunk = chunk if isinstance(chunk, str) else chunk.encode('utf-8')
self.assertLessEqual(len(chunk), max_byte_size)
class TestExpandActions(TestCase):
def test_string_actions_are_marked_as_simple_inserts(self):
self.assertEquals(('{"index":{}}', "whatever"), helpers.expand_action('whatever'))
| true | true |
f73a4f2f282124807128247d2a677a39cec76d4b | 5,715 | py | Python | maze.py | Sai-Prabhav/maze | 9fd0dc8d269a7718730cea51cfbe263dd616bcfc | [
"MIT"
] | 1 | 2021-09-04T13:13:13.000Z | 2021-09-04T13:13:13.000Z | maze.py | Sai-Prabhav/maze | 9fd0dc8d269a7718730cea51cfbe263dd616bcfc | [
"MIT"
] | null | null | null | maze.py | Sai-Prabhav/maze | 9fd0dc8d269a7718730cea51cfbe263dd616bcfc | [
"MIT"
] | null | null | null | import json
import os
import pygame
from find_path import solve_maze
pygame.init()
def maze_gen():
"""takes in put from the user and returns a map
Returns:
dict: the json file containing all the maps
"""
res = {}
for i in range(1): # for each of the mazes (9)
print("this is maze ", i+1)
Green_1 = []
Green_2 = []
Green_1.append([int((inp := input(
"The Position of the first circle given in the format x,y :"))[0]), int(inp[-1])])
Green_2.append([int((inp := input(
"The Position of the second circle given in the format x,y :"))[0]), int(inp[-1])])
horizontal_wall = []
vertical_wall = []
for y in range(5): # for each x (5) because there are only 5 rows of horizontal walls
horizontal_wall.append([])
for x in range(6): # for each y (6)
# horizontal_wall[y].append(bool(1))
horizontal_wall[y].append(
bool(input(f"is there a horizontal wall after {x +1, y+1}")))
for y in range(6): # for each x (6)
vertical_wall.append([])
for x in range(5): # for each x (5) because there are only 5 columns of vertical walls
# vertical_wall[y].append(bool(1))
vertical_wall[y].append(
bool(input(f"is there a vertical wall after {x+1 , y+1}"))) # adding 1 to x and y so as the range start with 0 but we use 1
res[str(i+1)] = {
"Green_circle_1": Green_1,
"Green_circle_2": Green_2,
"horizontal_wall": horizontal_wall,
"vertical_wall": vertical_wall}
return res
def save_data(data: dict) -> None:
"""takes a dict objects and saves it as json file in ./data.json
Args:
data (dict): the data u want to save in ./data.json file
Returns:
None
"""
with open(__file__[:-1*len(os.path.basename(__file__))]+"data.json", "w") as f:
return json.dump(data, f, indent=4)
def load_data() -> dict:
"""loads in ./data.json file
Returns:
dict: the data.json in dict type
"""
with open(__file__[:-1*len(os.path.basename(__file__))]+"data.json",) as f:
data = json.load(f)
return data
# res = maze_gen()
# print(res)
# save_data(res)
# print(load_data())
class maze:
def __init__(self, data: dict, screen):
"""creates a maze using the data given
Args:
data (dict): a dict which includes Green circle, horizontal wall,vertical wall
screen (pygame.screen): the screen to display the maze
"""
self.SQUARE_SIZE = 100
# list of green Circles with x,y cords
self.Green_circle = [*data["Green_circle_1"], *data["Green_circle_2"]]
# list of horizontal walls [[True,False,...],[True,True,...]...]
self.horizontal_wall = data["horizontal_wall"]
# list of vertical walls [[True,False,...],[True,True,...]...]
self.vertical_wall = data["vertical_wall"]
self.screen = screen # pygame scree to draw and do stuff
# loop through every side and it there is a wall i draws that
for y, row in enumerate(self.vertical_wall):
for x, cell in enumerate(row):
if cell:
self.draw_vertical_wall(x+1, y)
for y, row in enumerate(self.horizontal_wall):
for x, cell in enumerate(row):
if cell:
self.draw_horizontal_wall(x, y+1)
self.draw_circle()
def draw_vertical_wall(self, x: int, y: int) -> None:
"""draws the vertical walls of maze
Args:
x (int): x codtinate of the wall
y (int): y codtinate of the wall
"""
pygame.draw.line(self.screen, (0, 0, 0),
(x*100, (y)*100), (x*100, (y+1)*100))
def draw_horizontal_wall(self, x: int, y: int) -> None:
"""draws the horizontal walls of maze
Args:
x (int): x codtinate of the wall
y (int): y codtinate of the wall
"""
pygame.draw.line(self.screen, (0, 0, 0),
((x)*100, y*100), ((x+1)*100, y*100))
def draw_circle(self) -> None:
"""draws the 2 green circle of the maze
"""
pygame.draw.circle(self.screen, (20, 200, 20),
(self.Green_circle[0][0]*100-50, self.Green_circle[0][1]*100-50), 25)
pygame.draw.circle(self.screen, (20, 200, 20),
(self.Green_circle[1][0]*100-50, self.Green_circle[1][1]*100-50), 25)
def draw_path(self, path: list[list[int]]) -> None:
print(path)
for lis in range(len(path)):
x, y = path[lis]
pygame.draw.circle(self.screen, (lis*10, lis*10, lis*10),
((x*100)+50, (y*100)+50), 20)
HEIGHT = 600
WIDTH = 600
screen = pygame.display.set_mode((WIDTH, HEIGHT))
clock = pygame.time.Clock()
screen.fill((255, 255, 255))
start = [0, 0]
end = [5, 5]
maze_map = load_data()["9"]
path = solve_maze(start, start, end, [], maze_map)
print(path)
# loads and creates the first make the maze
maze_1 = maze(maze_map, screen)
# draw the path to the maze
maze_1.draw_path(path)
pygame.display.update()
"""
basic code to keep the pygame screen running until u stop it
"""
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
clock.tick(5)
| 32.844828 | 145 | 0.545582 | import json
import os
import pygame
from find_path import solve_maze
pygame.init()
def maze_gen():
res = {}
for i in range(1):
print("this is maze ", i+1)
Green_1 = []
Green_2 = []
Green_1.append([int((inp := input(
"The Position of the first circle given in the format x,y :"))[0]), int(inp[-1])])
Green_2.append([int((inp := input(
"The Position of the second circle given in the format x,y :"))[0]), int(inp[-1])])
horizontal_wall = []
vertical_wall = []
for y in range(5):
horizontal_wall.append([])
for x in range(6):
horizontal_wall[y].append(
bool(input(f"is there a horizontal wall after {x +1, y+1}")))
for y in range(6):
vertical_wall.append([])
for x in range(5):
vertical_wall[y].append(
bool(input(f"is there a vertical wall after {x+1 , y+1}")))
res[str(i+1)] = {
"Green_circle_1": Green_1,
"Green_circle_2": Green_2,
"horizontal_wall": horizontal_wall,
"vertical_wall": vertical_wall}
return res
def save_data(data: dict) -> None:
with open(__file__[:-1*len(os.path.basename(__file__))]+"data.json", "w") as f:
return json.dump(data, f, indent=4)
def load_data() -> dict:
with open(__file__[:-1*len(os.path.basename(__file__))]+"data.json",) as f:
data = json.load(f)
return data
class maze:
def __init__(self, data: dict, screen):
self.SQUARE_SIZE = 100
self.Green_circle = [*data["Green_circle_1"], *data["Green_circle_2"]]
self.horizontal_wall = data["horizontal_wall"]
self.vertical_wall = data["vertical_wall"]
self.screen = screen
for y, row in enumerate(self.vertical_wall):
for x, cell in enumerate(row):
if cell:
self.draw_vertical_wall(x+1, y)
for y, row in enumerate(self.horizontal_wall):
for x, cell in enumerate(row):
if cell:
self.draw_horizontal_wall(x, y+1)
self.draw_circle()
def draw_vertical_wall(self, x: int, y: int) -> None:
pygame.draw.line(self.screen, (0, 0, 0),
(x*100, (y)*100), (x*100, (y+1)*100))
def draw_horizontal_wall(self, x: int, y: int) -> None:
pygame.draw.line(self.screen, (0, 0, 0),
((x)*100, y*100), ((x+1)*100, y*100))
def draw_circle(self) -> None:
pygame.draw.circle(self.screen, (20, 200, 20),
(self.Green_circle[0][0]*100-50, self.Green_circle[0][1]*100-50), 25)
pygame.draw.circle(self.screen, (20, 200, 20),
(self.Green_circle[1][0]*100-50, self.Green_circle[1][1]*100-50), 25)
def draw_path(self, path: list[list[int]]) -> None:
print(path)
for lis in range(len(path)):
x, y = path[lis]
pygame.draw.circle(self.screen, (lis*10, lis*10, lis*10),
((x*100)+50, (y*100)+50), 20)
HEIGHT = 600
WIDTH = 600
screen = pygame.display.set_mode((WIDTH, HEIGHT))
clock = pygame.time.Clock()
screen.fill((255, 255, 255))
start = [0, 0]
end = [5, 5]
maze_map = load_data()["9"]
path = solve_maze(start, start, end, [], maze_map)
print(path)
maze_1 = maze(maze_map, screen)
maze_1.draw_path(path)
pygame.display.update()
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
clock.tick(5)
| true | true |
f73a4f5695e2ff4902a5156c58fe443c790c15b1 | 13,552 | py | Python | pfr_api/parse/parse.py | aadamson/pfr-api | b5cab2763db71e57e231507e03747fc0922cdba3 | [
"MIT"
] | 1 | 2021-10-12T01:39:04.000Z | 2021-10-12T01:39:04.000Z | pfr_api/parse/parse.py | aadamson/pfr-api | b5cab2763db71e57e231507e03747fc0922cdba3 | [
"MIT"
] | null | null | null | pfr_api/parse/parse.py | aadamson/pfr-api | b5cab2763db71e57e231507e03747fc0922cdba3 | [
"MIT"
] | null | null | null | from typing import Any, Dict, List, Optional, Tuple
from bs4 import BeautifulSoup
from pfr_api.parse.parser import RowParser, IdentityParser, \
StrToIntParser, NullableStrToIntParser, \
NullableStrToFloatParser, StrPercentageToFloatParser, \
NullableStrPercentageToFloatParser
PARSERS = {
'year_id': StrToIntParser('year_id'),
'gs': IdentityParser('gs'), # TODO make this a boolean
# Passing
'pass_cmp': NullableStrToIntParser('pass_cmp'),
'pass_att': NullableStrToIntParser('pass_att'),
'pass_cmp_perc': NullableStrPercentageToFloatParser('pass_cmp_perc'),
'pass_yds': NullableStrToIntParser('pass_yds'),
'pass_td': NullableStrToIntParser('pass_td'),
'pass_int': NullableStrToIntParser('pass_int'),
'pass_rating': NullableStrToFloatParser('pass_rating'),
'pass_sacked': NullableStrToIntParser('pass_sacked'),
'pass_sacked_yds': NullableStrToIntParser('pass_sacked_yds'),
'pass_yds_per_att': NullableStrToFloatParser('pass_yds_per_att'),
'pass_adj_yds_per_att': NullableStrToFloatParser('pass_adj_yds_per_att'),
'qb_rec': IdentityParser('qb_rec'),
'pass_td_perc': NullableStrPercentageToFloatParser('pass_td_perc'),
'pass_int_perc': NullableStrToFloatParser('pass_int_perc'),
'pass_first_down': NullableStrToIntParser('pass_first_down'),
'pass_yds_per_cmp': NullableStrToFloatParser('pass_yds_per_cmp'),
'pass_yds_per_g': NullableStrToFloatParser('pass_yds_per_g'),
'qbr': NullableStrToFloatParser('qbr'),
'pass_net_yds_per_att': NullableStrToFloatParser('pass_net_yds_per_att'),
'pass_adj_net_yds_per_att': NullableStrToFloatParser('pass_adj_net_yds_per_att'),
'pass_sacked_perc': StrPercentageToFloatParser('pass_sacked_perc'),
'comebacks': NullableStrToIntParser('comebacks'),
'gwd': NullableStrToIntParser('gwd'),
'av': NullableStrToIntParser('av'),
# Advanced passing
'pass_air_yards': NullableStrToIntParser('pass_air_yards'),
'pass_air_yards_per_cmp': NullableStrToFloatParser('pass_air_yards_per_cmp'),
'pass_air_yards_per_att': NullableStrToFloatParser('pass_air_yards_per_att'),
'pass_tgt_yards_per_att': NullableStrToFloatParser('pass_tgt_yards_per_att'),
'pass_yac': NullableStrToFloatParser('pass_yac'),
'pass_yac_per_cmp': NullableStrToFloatParser('pass_yac_per_cmp'),
'pass_drops': NullableStrToFloatParser('pass_drops'),
'pass_drops_pct': NullableStrPercentageToFloatParser('pass_drops_pct'),
'pass_poor_throws': NullableStrToFloatParser('pass_poor_throws'),
'pass_poor_throws_pct': NullableStrPercentageToFloatParser('pass_poor_throws_pct'),
'pass_blitzed': NullableStrToFloatParser('pass_blitzed'),
'pass_hurried': NullableStrToFloatParser('pass_hurried'),
'pass_hits': NullableStrToFloatParser('pass_hits'),
'rush_scrambles': NullableStrToFloatParser('rush_scrambles'),
'rush_scrambles_yds_per_att': NullableStrToFloatParser('rush_scrambles_yds_per_att'),
# Rushing/receiving
'rush_att': NullableStrToIntParser('rush_att'),
'rush_yds': NullableStrToIntParser('rush_yds'),
'rush_yds_per_att': NullableStrToFloatParser('rush_yds_per_att'),
'rush_td': NullableStrToIntParser('rush_td'),
'rush_td_perc': NullableStrPercentageToFloatParser('rush_td_perc'),
'rush_first_down': NullableStrToIntParser('rush_first_down'),
'rush_long': NullableStrToIntParser('rush_first_down'),
'rush_yds_per_g': NullableStrToFloatParser('rush_yds_per_g'),
'rush_att_per_g': NullableStrToFloatParser('rush_att_per_g'),
'targets': NullableStrToIntParser('targets'),
'rec': NullableStrToIntParser('rec'),
'rec_yds': NullableStrToIntParser('rec_yds'),
'rec_yds_per_rec': NullableStrToFloatParser('rec_yds_per_rec'),
'rec_td': NullableStrToIntParser('rec_td'),
'catch_pct': StrPercentageToFloatParser('catch_pct'),
'rec_yds_per_tgt': NullableStrToFloatParser('rec_yds_per_tgt'),
'rec_first_down': NullableStrToIntParser('rec_first_down'),
'rec_long': NullableStrToIntParser('rec_first_down'),
'rec_yds_per_g': NullableStrToFloatParser('rec_yds_per_g'),
'rec_att_per_g': NullableStrToFloatParser('rec_att_per_g'),
'touches': NullableStrToIntParser('touches'),
'yds_per_touch': NullableStrToIntParser('yds_per_touch'),
'yds_from_scrimmage': NullableStrToIntParser('yds_from_scrimmage'),
'rush_receive_td': NullableStrToIntParser('rush_receive_td'),
# Advanced rushing/receiving
'rush_yds_before_contact': NullableStrToIntParser('rush_yds_before_contact'),
'rush_yds_bc_per_rush': NullableStrToFloatParser('rush_yds_bc_per_rush'),
'rush_yac': NullableStrToIntParser('rush_yac'),
'rush_yac_per_rush': NullableStrToFloatParser('rush_yac_per_rush'),
'rush_broken_tackles': NullableStrToIntParser('rush_broken_tackles'),
'rush_broken_tackles_per_rush': NullableStrToFloatParser('rush_broken_tackles_per_rush'),
'rec_air_yds': NullableStrToIntParser('rec_air_yds'),
'rec_air_yds_per_rec': NullableStrToFloatParser('rec_air_yds_per_rec'),
'rec_yac': NullableStrToIntParser('rec_yac'),
'rec_yac_per_rac': NullableStrToFloatParser('rec_yac_per_rac'),
'rec_broken_tackles': NullableStrToIntParser('rec_broken_tackles'),
'rec_broken_tackles_per_rec': NullableStrToFloatParser('rec_broken_tackles_per_rec'),
'dropped_passes': NullableStrToIntParser('dropped_passes'),
'rec_drop_pct': NullableStrPercentageToFloatParser('rec_drop_pct'),
# Field-position aware
# Rushing/receiving
'rush_att_in_10': NullableStrToIntParser('rush_att_in_10'),
'rush_yds_in_10': NullableStrToIntParser('rush_yds_in_10'),
'rush_td_in_10': NullableStrToIntParser('rush_td_in_10'),
'targets_in_10': NullableStrToIntParser('targets_in_10'),
'rec_in_10': NullableStrToIntParser('rec_in_10'),
'rec_yds_in_10': NullableStrToIntParser('rec_yds_in_10'),
'rec_yds_per_rec_in_10': NullableStrToFloatParser('rec_yds_per_rec_in_10'),
'rec_td_in_10': NullableStrToIntParser('rec_td_in_10'),
# Passing
'pass_cmp_in_10': NullableStrToIntParser('pass_cmp_in_10'),
'pass_att_in_10': NullableStrToIntParser('pass_att_in_10'),
'pass_yds_in_10': NullableStrToIntParser('pass_yds_in_10'),
'pass_td_in_10': NullableStrToIntParser('pass_td_in_10'),
# Misc. offense
'two_pt_md': NullableStrToIntParser('two_pt_md'),
'all_td': NullableStrToIntParser('all_td'),
'scoring': NullableStrToIntParser('scoring'),
'fumbles': NullableStrToIntParser('fumbles'),
'fumbles_lost': NullableStrToIntParser('fumbles_lost'),
'offense': NullableStrToIntParser('offense'),
'off_pct': NullableStrPercentageToFloatParser('off_pct'),
# Misc
'uniform_number': StrToIntParser('uniform_number'),
# Special teams
'kick_ret': NullableStrToIntParser('kick_ret'),
'kick_ret_yds': NullableStrToIntParser('kick_ret_yds'),
'kick_ret_yds_per_ret': NullableStrToFloatParser('kick_ret_yds_per_ret'),
'kick_ret_td': NullableStrToIntParser('kick_ret_td'),
'punt_ret': NullableStrToIntParser('punt_ret'),
'punt_ret_yds': NullableStrToIntParser('punt_ret_yds'),
'punt_ret_yds_per_ret': NullableStrToFloatParser('punt_ret_yds_per_ret'),
'punt_ret_td': NullableStrToIntParser('punt_ret_td'),
'special_teams': NullableStrToIntParser('special_teams'),
'st_pct': NullableStrPercentageToFloatParser('st_pct'),
# defensive
'sacks': NullableStrToFloatParser('sacks'),
'tackles_solo': NullableStrToIntParser('tackles_solo'),
'tackles_assists': NullableStrToIntParser('tackles_assists'),
'tackles_combined': NullableStrToIntParser('tackles_combined'),
'tackles_loss': NullableStrToIntParser('tackles_loss'),
'qb_hits': NullableStrToIntParser('qb_hits'),
'fumbles_forced': NullableStrToIntParser('fumbles_forced'),
'fumbles_rec': NullableStrToIntParser('fumbles_rec'),
'fumbles_rec_yds': NullableStrToIntParser('fumbles_rec_yds'),
'fumbles_rec_td': NullableStrToIntParser('fumbles_rec_td'),
'def_int': NullableStrToIntParser('def_int'),
'def_int_yds': NullableStrToIntParser('def_int_yds'),
'def_int_td': NullableStrToIntParser('def_int_td'),
'pass_defended': NullableStrToIntParser('pass_defended'),
'defense': NullableStrToIntParser('defense'),
'def_pct': NullableStrPercentageToFloatParser('def_pct'),
# Fantasy-specific
'player': IdentityParser('player'),
'fantasy_pos': IdentityParser('fantasy_pos'),
'starter_pos': IdentityParser('starter_pos'),
'g': NullableStrToIntParser('g'),
# 'gs': _str_to_int_parser('gs'), TODO how to handle ambiguity
'two_pt_pass': NullableStrToFloatParser('two_pt_pass'),
'fantasy_points': NullableStrToFloatParser('fantasy_points'),
'fantasy_points_ppr': NullableStrToFloatParser('fantasy_points_ppr'),
'draftkings_points': NullableStrToFloatParser('draftkings_points'),
'fanduel_points': NullableStrToFloatParser('fanduel_points'),
'vbd': NullableStrToIntParser('vbd'),
'fantasy_rank_pos': NullableStrToIntParser('fantasy_rank_pos'),
'fantasy_rank_overall': NullableStrToIntParser('fantasy_rank_overall'),
# Fantasy metadata
# Literally just contains a link to a fantasy game log page
'games': IdentityParser('games'),
# Team stats
# Offense
'points': StrToIntParser('points'),
'total_yards': StrToIntParser('total_yards'),
'plays_offense': StrToIntParser('plays_offense'),
'yds_per_play_offense': NullableStrToFloatParser('yds_per_play_offense'),
'turnovers': StrToIntParser('turnovers'),
'first_down': StrToIntParser('first_down'),
'pass_fd': StrToIntParser('pass_fd'),
'rush_fd': StrToIntParser('rush_fd'),
'penalties': StrToIntParser('penalties'),
'penalties_yds': StrToIntParser('penalties_yds'),
'pen_fd': StrToIntParser('pen_fd'),
'drives': StrToIntParser('drives'),
'score_pct': NullableStrPercentageToFloatParser('score_pct'),
'turnover_pct': NullableStrPercentageToFloatParser('turnover_pct'),
'start_avg': IdentityParser('start_avg'),
'time_avg': IdentityParser('time_avg'),
'plays_per_drive': NullableStrToFloatParser('plays_per_drive'),
'yds_per_drive': NullableStrToFloatParser('yds_per_drive'),
'points_avg': NullableStrToFloatParser('points_avg'),
# Game info
'game_date': IdentityParser('game_date'), # TODO datetime
'game_num': StrToIntParser('game_num'),
'week_num': StrToIntParser('week_num'),
'age': IdentityParser('age'),
'team': IdentityParser('team'),
'game_location': IdentityParser('game_location'),
'game_result': IdentityParser('game_result'),
'week': StrToIntParser('week'),
'day': IdentityParser('day'),
'date': IdentityParser('date'), # TODO datetime
'game_time': IdentityParser('game_time'), # TODO datetime,
'boxscore_word': IdentityParser('boxscore_word'),
'game_outcome': IdentityParser('game_outcome'),
'overtime': IdentityParser('overtime'),
'team_record': IdentityParser('team_record'),
'opp': IdentityParser('opp'),
# Team game stats
'pts_off': StrToIntParser('pts_off'),
'pts_def': StrToIntParser('pts_def'),
'first_down_off': StrToIntParser('first_down_off'),
'yards_off': StrToIntParser('yards_off'),
'pass_yds_off_off': StrToIntParser('pass_yds_off_off'),
'rush_yds_off_off': StrToIntParser('rush_yds_off_off'),
'to_off': StrToIntParser('to_off'),
'first_down_def': StrToIntParser('first_down_def'),
'yards_def': StrToIntParser('yards_def'),
'pass_yds_def_def': StrToIntParser('pass_yds_def_def'),
'rush_yds_def_def': StrToIntParser('rush_yds_def_def'),
'to_def': StrToIntParser('to_def'),
'exp_pts_off': NullableStrToFloatParser('exp_pts_off'),
'exp_pts_def': NullableStrToFloatParser('exp_pts_def'),
'exp_pts_st': NullableStrToFloatParser('exp_pts_st'),
} # type: Dict[str, RowParser]
def parse_stats_table(
table: BeautifulSoup,
stat_row_attributes: Optional[Dict[str, Any]] = None,
parsers: Optional[Dict[str, RowParser]] = None,
) -> Tuple[List[str], List[List[Any]]]:
if stat_row_attributes is None:
stat_row_attributes = {}
if parsers is None:
parsers = {} # type: Dict[str, RowParser]
parsers = {**PARSERS, **parsers}
column_infos = []
html_columns = table.find('thead').find_all('tr')[-1]
for column in html_columns.find_all('th'):
stat = column['data-stat']
name = column.text
column_infos.append((stat, name))
column_infos = column_infos[1:] # Skip the ranker column
output_columns = []
for column_stat, column_name in column_infos:
parser = parsers[column_stat]
output_columns.extend(parser.output_fields)
rows = []
html_body = table.find('tbody')
html_rows = html_body.find_all(
'tr', recursive=False, **stat_row_attributes)
for html_row in html_rows:
row = [None] * len(output_columns)
field_count = 0
for i, ((column_stat, column_name), html_row_col) in enumerate(
zip(column_infos, html_row.find_all('td', recursive=False))
):
parser = parsers[column_stat]
parsed = parser.parse(html_row_col)
num_fields = len(parsed)
# Assumption: .values() will return the fields in the order returned
# by .output_fields
row[field_count:field_count+num_fields] = parsed.values()
field_count += num_fields
rows.append(row)
return output_columns, rows
| 48.056738 | 93 | 0.739669 | from typing import Any, Dict, List, Optional, Tuple
from bs4 import BeautifulSoup
from pfr_api.parse.parser import RowParser, IdentityParser, \
StrToIntParser, NullableStrToIntParser, \
NullableStrToFloatParser, StrPercentageToFloatParser, \
NullableStrPercentageToFloatParser
PARSERS = {
'year_id': StrToIntParser('year_id'),
'gs': IdentityParser('gs'),
'pass_cmp': NullableStrToIntParser('pass_cmp'),
'pass_att': NullableStrToIntParser('pass_att'),
'pass_cmp_perc': NullableStrPercentageToFloatParser('pass_cmp_perc'),
'pass_yds': NullableStrToIntParser('pass_yds'),
'pass_td': NullableStrToIntParser('pass_td'),
'pass_int': NullableStrToIntParser('pass_int'),
'pass_rating': NullableStrToFloatParser('pass_rating'),
'pass_sacked': NullableStrToIntParser('pass_sacked'),
'pass_sacked_yds': NullableStrToIntParser('pass_sacked_yds'),
'pass_yds_per_att': NullableStrToFloatParser('pass_yds_per_att'),
'pass_adj_yds_per_att': NullableStrToFloatParser('pass_adj_yds_per_att'),
'qb_rec': IdentityParser('qb_rec'),
'pass_td_perc': NullableStrPercentageToFloatParser('pass_td_perc'),
'pass_int_perc': NullableStrToFloatParser('pass_int_perc'),
'pass_first_down': NullableStrToIntParser('pass_first_down'),
'pass_yds_per_cmp': NullableStrToFloatParser('pass_yds_per_cmp'),
'pass_yds_per_g': NullableStrToFloatParser('pass_yds_per_g'),
'qbr': NullableStrToFloatParser('qbr'),
'pass_net_yds_per_att': NullableStrToFloatParser('pass_net_yds_per_att'),
'pass_adj_net_yds_per_att': NullableStrToFloatParser('pass_adj_net_yds_per_att'),
'pass_sacked_perc': StrPercentageToFloatParser('pass_sacked_perc'),
'comebacks': NullableStrToIntParser('comebacks'),
'gwd': NullableStrToIntParser('gwd'),
'av': NullableStrToIntParser('av'),
'pass_air_yards': NullableStrToIntParser('pass_air_yards'),
'pass_air_yards_per_cmp': NullableStrToFloatParser('pass_air_yards_per_cmp'),
'pass_air_yards_per_att': NullableStrToFloatParser('pass_air_yards_per_att'),
'pass_tgt_yards_per_att': NullableStrToFloatParser('pass_tgt_yards_per_att'),
'pass_yac': NullableStrToFloatParser('pass_yac'),
'pass_yac_per_cmp': NullableStrToFloatParser('pass_yac_per_cmp'),
'pass_drops': NullableStrToFloatParser('pass_drops'),
'pass_drops_pct': NullableStrPercentageToFloatParser('pass_drops_pct'),
'pass_poor_throws': NullableStrToFloatParser('pass_poor_throws'),
'pass_poor_throws_pct': NullableStrPercentageToFloatParser('pass_poor_throws_pct'),
'pass_blitzed': NullableStrToFloatParser('pass_blitzed'),
'pass_hurried': NullableStrToFloatParser('pass_hurried'),
'pass_hits': NullableStrToFloatParser('pass_hits'),
'rush_scrambles': NullableStrToFloatParser('rush_scrambles'),
'rush_scrambles_yds_per_att': NullableStrToFloatParser('rush_scrambles_yds_per_att'),
'rush_att': NullableStrToIntParser('rush_att'),
'rush_yds': NullableStrToIntParser('rush_yds'),
'rush_yds_per_att': NullableStrToFloatParser('rush_yds_per_att'),
'rush_td': NullableStrToIntParser('rush_td'),
'rush_td_perc': NullableStrPercentageToFloatParser('rush_td_perc'),
'rush_first_down': NullableStrToIntParser('rush_first_down'),
'rush_long': NullableStrToIntParser('rush_first_down'),
'rush_yds_per_g': NullableStrToFloatParser('rush_yds_per_g'),
'rush_att_per_g': NullableStrToFloatParser('rush_att_per_g'),
'targets': NullableStrToIntParser('targets'),
'rec': NullableStrToIntParser('rec'),
'rec_yds': NullableStrToIntParser('rec_yds'),
'rec_yds_per_rec': NullableStrToFloatParser('rec_yds_per_rec'),
'rec_td': NullableStrToIntParser('rec_td'),
'catch_pct': StrPercentageToFloatParser('catch_pct'),
'rec_yds_per_tgt': NullableStrToFloatParser('rec_yds_per_tgt'),
'rec_first_down': NullableStrToIntParser('rec_first_down'),
'rec_long': NullableStrToIntParser('rec_first_down'),
'rec_yds_per_g': NullableStrToFloatParser('rec_yds_per_g'),
'rec_att_per_g': NullableStrToFloatParser('rec_att_per_g'),
'touches': NullableStrToIntParser('touches'),
'yds_per_touch': NullableStrToIntParser('yds_per_touch'),
'yds_from_scrimmage': NullableStrToIntParser('yds_from_scrimmage'),
'rush_receive_td': NullableStrToIntParser('rush_receive_td'),
'rush_yds_before_contact': NullableStrToIntParser('rush_yds_before_contact'),
'rush_yds_bc_per_rush': NullableStrToFloatParser('rush_yds_bc_per_rush'),
'rush_yac': NullableStrToIntParser('rush_yac'),
'rush_yac_per_rush': NullableStrToFloatParser('rush_yac_per_rush'),
'rush_broken_tackles': NullableStrToIntParser('rush_broken_tackles'),
'rush_broken_tackles_per_rush': NullableStrToFloatParser('rush_broken_tackles_per_rush'),
'rec_air_yds': NullableStrToIntParser('rec_air_yds'),
'rec_air_yds_per_rec': NullableStrToFloatParser('rec_air_yds_per_rec'),
'rec_yac': NullableStrToIntParser('rec_yac'),
'rec_yac_per_rac': NullableStrToFloatParser('rec_yac_per_rac'),
'rec_broken_tackles': NullableStrToIntParser('rec_broken_tackles'),
'rec_broken_tackles_per_rec': NullableStrToFloatParser('rec_broken_tackles_per_rec'),
'dropped_passes': NullableStrToIntParser('dropped_passes'),
'rec_drop_pct': NullableStrPercentageToFloatParser('rec_drop_pct'),
'rush_att_in_10': NullableStrToIntParser('rush_att_in_10'),
'rush_yds_in_10': NullableStrToIntParser('rush_yds_in_10'),
'rush_td_in_10': NullableStrToIntParser('rush_td_in_10'),
'targets_in_10': NullableStrToIntParser('targets_in_10'),
'rec_in_10': NullableStrToIntParser('rec_in_10'),
'rec_yds_in_10': NullableStrToIntParser('rec_yds_in_10'),
'rec_yds_per_rec_in_10': NullableStrToFloatParser('rec_yds_per_rec_in_10'),
'rec_td_in_10': NullableStrToIntParser('rec_td_in_10'),
'pass_cmp_in_10': NullableStrToIntParser('pass_cmp_in_10'),
'pass_att_in_10': NullableStrToIntParser('pass_att_in_10'),
'pass_yds_in_10': NullableStrToIntParser('pass_yds_in_10'),
'pass_td_in_10': NullableStrToIntParser('pass_td_in_10'),
'two_pt_md': NullableStrToIntParser('two_pt_md'),
'all_td': NullableStrToIntParser('all_td'),
'scoring': NullableStrToIntParser('scoring'),
'fumbles': NullableStrToIntParser('fumbles'),
'fumbles_lost': NullableStrToIntParser('fumbles_lost'),
'offense': NullableStrToIntParser('offense'),
'off_pct': NullableStrPercentageToFloatParser('off_pct'),
'uniform_number': StrToIntParser('uniform_number'),
'kick_ret': NullableStrToIntParser('kick_ret'),
'kick_ret_yds': NullableStrToIntParser('kick_ret_yds'),
'kick_ret_yds_per_ret': NullableStrToFloatParser('kick_ret_yds_per_ret'),
'kick_ret_td': NullableStrToIntParser('kick_ret_td'),
'punt_ret': NullableStrToIntParser('punt_ret'),
'punt_ret_yds': NullableStrToIntParser('punt_ret_yds'),
'punt_ret_yds_per_ret': NullableStrToFloatParser('punt_ret_yds_per_ret'),
'punt_ret_td': NullableStrToIntParser('punt_ret_td'),
'special_teams': NullableStrToIntParser('special_teams'),
'st_pct': NullableStrPercentageToFloatParser('st_pct'),
'sacks': NullableStrToFloatParser('sacks'),
'tackles_solo': NullableStrToIntParser('tackles_solo'),
'tackles_assists': NullableStrToIntParser('tackles_assists'),
'tackles_combined': NullableStrToIntParser('tackles_combined'),
'tackles_loss': NullableStrToIntParser('tackles_loss'),
'qb_hits': NullableStrToIntParser('qb_hits'),
'fumbles_forced': NullableStrToIntParser('fumbles_forced'),
'fumbles_rec': NullableStrToIntParser('fumbles_rec'),
'fumbles_rec_yds': NullableStrToIntParser('fumbles_rec_yds'),
'fumbles_rec_td': NullableStrToIntParser('fumbles_rec_td'),
'def_int': NullableStrToIntParser('def_int'),
'def_int_yds': NullableStrToIntParser('def_int_yds'),
'def_int_td': NullableStrToIntParser('def_int_td'),
'pass_defended': NullableStrToIntParser('pass_defended'),
'defense': NullableStrToIntParser('defense'),
'def_pct': NullableStrPercentageToFloatParser('def_pct'),
'player': IdentityParser('player'),
'fantasy_pos': IdentityParser('fantasy_pos'),
'starter_pos': IdentityParser('starter_pos'),
'g': NullableStrToIntParser('g'),
'two_pt_pass': NullableStrToFloatParser('two_pt_pass'),
'fantasy_points': NullableStrToFloatParser('fantasy_points'),
'fantasy_points_ppr': NullableStrToFloatParser('fantasy_points_ppr'),
'draftkings_points': NullableStrToFloatParser('draftkings_points'),
'fanduel_points': NullableStrToFloatParser('fanduel_points'),
'vbd': NullableStrToIntParser('vbd'),
'fantasy_rank_pos': NullableStrToIntParser('fantasy_rank_pos'),
'fantasy_rank_overall': NullableStrToIntParser('fantasy_rank_overall'),
'games': IdentityParser('games'),
'points': StrToIntParser('points'),
'total_yards': StrToIntParser('total_yards'),
'plays_offense': StrToIntParser('plays_offense'),
'yds_per_play_offense': NullableStrToFloatParser('yds_per_play_offense'),
'turnovers': StrToIntParser('turnovers'),
'first_down': StrToIntParser('first_down'),
'pass_fd': StrToIntParser('pass_fd'),
'rush_fd': StrToIntParser('rush_fd'),
'penalties': StrToIntParser('penalties'),
'penalties_yds': StrToIntParser('penalties_yds'),
'pen_fd': StrToIntParser('pen_fd'),
'drives': StrToIntParser('drives'),
'score_pct': NullableStrPercentageToFloatParser('score_pct'),
'turnover_pct': NullableStrPercentageToFloatParser('turnover_pct'),
'start_avg': IdentityParser('start_avg'),
'time_avg': IdentityParser('time_avg'),
'plays_per_drive': NullableStrToFloatParser('plays_per_drive'),
'yds_per_drive': NullableStrToFloatParser('yds_per_drive'),
'points_avg': NullableStrToFloatParser('points_avg'),
'game_date': IdentityParser('game_date'),
'game_num': StrToIntParser('game_num'),
'week_num': StrToIntParser('week_num'),
'age': IdentityParser('age'),
'team': IdentityParser('team'),
'game_location': IdentityParser('game_location'),
'game_result': IdentityParser('game_result'),
'week': StrToIntParser('week'),
'day': IdentityParser('day'),
'date': IdentityParser('date'),
'game_time': IdentityParser('game_time'),
'boxscore_word': IdentityParser('boxscore_word'),
'game_outcome': IdentityParser('game_outcome'),
'overtime': IdentityParser('overtime'),
'team_record': IdentityParser('team_record'),
'opp': IdentityParser('opp'),
'pts_off': StrToIntParser('pts_off'),
'pts_def': StrToIntParser('pts_def'),
'first_down_off': StrToIntParser('first_down_off'),
'yards_off': StrToIntParser('yards_off'),
'pass_yds_off_off': StrToIntParser('pass_yds_off_off'),
'rush_yds_off_off': StrToIntParser('rush_yds_off_off'),
'to_off': StrToIntParser('to_off'),
'first_down_def': StrToIntParser('first_down_def'),
'yards_def': StrToIntParser('yards_def'),
'pass_yds_def_def': StrToIntParser('pass_yds_def_def'),
'rush_yds_def_def': StrToIntParser('rush_yds_def_def'),
'to_def': StrToIntParser('to_def'),
'exp_pts_off': NullableStrToFloatParser('exp_pts_off'),
'exp_pts_def': NullableStrToFloatParser('exp_pts_def'),
'exp_pts_st': NullableStrToFloatParser('exp_pts_st'),
}
def parse_stats_table(
table: BeautifulSoup,
stat_row_attributes: Optional[Dict[str, Any]] = None,
parsers: Optional[Dict[str, RowParser]] = None,
) -> Tuple[List[str], List[List[Any]]]:
if stat_row_attributes is None:
stat_row_attributes = {}
if parsers is None:
parsers = {}
parsers = {**PARSERS, **parsers}
column_infos = []
html_columns = table.find('thead').find_all('tr')[-1]
for column in html_columns.find_all('th'):
stat = column['data-stat']
name = column.text
column_infos.append((stat, name))
column_infos = column_infos[1:]
output_columns = []
for column_stat, column_name in column_infos:
parser = parsers[column_stat]
output_columns.extend(parser.output_fields)
rows = []
html_body = table.find('tbody')
html_rows = html_body.find_all(
'tr', recursive=False, **stat_row_attributes)
for html_row in html_rows:
row = [None] * len(output_columns)
field_count = 0
for i, ((column_stat, column_name), html_row_col) in enumerate(
zip(column_infos, html_row.find_all('td', recursive=False))
):
parser = parsers[column_stat]
parsed = parser.parse(html_row_col)
num_fields = len(parsed)
row[field_count:field_count+num_fields] = parsed.values()
field_count += num_fields
rows.append(row)
return output_columns, rows
| true | true |
f73a4f903101f0a6a4f1a49bb0551b130217b200 | 17,148 | py | Python | src/sentry/interfaces/security.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/interfaces/security.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/interfaces/security.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import jsonschema
import six
__all__ = ("Csp", "Hpkp", "ExpectCT", "ExpectStaple")
from six.moves.urllib.parse import urlsplit, urlunsplit
from sentry.interfaces.base import Interface, InterfaceValidationError
from sentry.interfaces.schemas import validate_and_default_interface, INPUT_SCHEMAS
from sentry.utils import json
from sentry.utils.cache import memoize
from sentry.utils.http import is_valid_origin
from sentry.utils.safe import trim
from sentry.web.helpers import render_to_string
# Default block list sourced from personal experience as well as
# reputable blogs from Twitter and Dropbox
DEFAULT_DISALLOWED_SOURCES = (
"about", # Noise from Chrome about page.
"ms-browser-extension",
"chrome://*",
"chrome-extension://*",
"chromeinvokeimmediate://*",
"chromenull://*",
"safari-extension://*",
"mxaddon-pkg://*",
"jar://*",
"webviewprogressproxy://*",
"ms-browser-extension://*",
"tmtbff://*",
"mbinit://*",
"symres://*",
"resource://*",
"moz-extension://*",
"*.metrext.com",
"static.image2play.com",
"*.tlscdn.com",
"73a5b0806e464be8bd4e694c744624f0.com",
"020dfefc4ac745dab7594f2f771c1ded.com",
"*.superfish.com",
"addons.mozilla.org",
"v.zilionfast.in",
"widgets.amung.us",
"*.superfish.com",
"xls.searchfun.in",
"istatic.datafastguru.info",
"v.zilionfast.in",
"localhost",
"resultshub-a.akamaihd.net",
"pulseadnetwork.com",
"gateway.zscalertwo.net",
"www.passpack.com",
"middlerush-a.akamaihd.net",
"www.websmartcenter.com",
"a.linkluster.com",
"saveyoutime.ru",
"cdncache-a.akamaihd.net",
"x.rafomedia.com",
"savingsslider-a.akamaihd.net",
"injections.adguard.com",
"icontent.us",
"amiok.org",
"connectionstrenth.com",
"siteheart.net",
"netanalitics.space",
"printapplink.com",
"godlinkapp.com",
"devappstor.com",
"hoholikik.club",
"smartlink.cool",
"promfflinkdev.com",
) # yapf: disable
class SecurityReport(Interface):
"""
A browser security violation report.
"""
title = None
@classmethod
def from_raw(cls, raw):
"""
Constructs the interface from a raw security report request body
This is usually slightly different than to_python as it needs to
do some extra validation, data extraction / default setting.
"""
raise NotImplementedError
@classmethod
def to_python(cls, data):
# TODO(markus): Relay does not validate security interfaces yet
is_valid, errors = validate_and_default_interface(data, cls.path)
if not is_valid:
raise InterfaceValidationError("Invalid interface data")
return cls(**data)
def get_culprit(self):
raise NotImplementedError
def get_message(self):
raise NotImplementedError
def get_tags(self):
raise NotImplementedError
def get_title(self):
return self.title
def should_filter(self, project=None):
raise NotImplementedError
def get_origin(self):
"""
The document URL that generated this report
"""
raise NotImplementedError
def get_referrer(self):
"""
The referrer of the page that generated this report.
"""
raise NotImplementedError
class Hpkp(SecurityReport):
"""
A HTTP Public Key Pinning pin validation failure report.
See also: https://tools.ietf.org/html/rfc7469#section-3
>>> {
>>> "date-time": "2014-04-06T13:00:50Z",
>>> "hostname": "www.example.com",
>>> "port": 443,
>>> "effective-expiration-date": "2014-05-01T12:40:50Z",
>>> "include-subdomains": False,
>>> "served-certificate-chain": [],
>>> "validated-certificate-chain": [],
>>> "known-pins": [],
>>> }
"""
score = 1300
display_score = 1300
title = "HPKP Report"
@classmethod
def from_raw(cls, raw):
# Validate the raw data against the input schema (raises on failure)
schema = INPUT_SCHEMAS[cls.path]
jsonschema.validate(raw, schema)
# Trim values and convert keys to use underscores
kwargs = {k.replace("-", "_"): trim(v, 1024) for k, v in six.iteritems(raw)}
return cls.to_python(kwargs)
def get_culprit(self):
return None
def get_message(self):
return u"Public key pinning validation failed for '{self.hostname}'".format(self=self)
def get_tags(self):
return [
("port", six.text_type(self.port)),
("include-subdomains", json.dumps(self.include_subdomains)),
("hostname", self.hostname),
]
def get_origin(self):
# not quite origin, but the domain that failed pinning
return self.hostname
def get_referrer(self):
return None
def should_filter(self, project=None):
return False
class ExpectStaple(SecurityReport):
"""
An OCSP Stapling violation report
See: https://docs.google.com/document/d/1aISglJIIwglcOAhqNfK-2vtQl-_dWAapc-VLDh-9-BE
>>> {
>>> "date-time": date-time,
>>> "hostname": hostname,
>>> "port": port,
>>> "effective-expiration-date": date-time,
>>> "response-status": ResponseStatus,
>>> "ocsp-response": ocsp,
>>> "cert-status": CertStatus,
>>> "served-certificate-chain": [pem1, ... pemN],(MUST be in the order served)
>>> "validated-certificate-chain": [pem1, ... pemN](MUST be in the order served)
>>> }
"""
score = 1300
display_score = 1300
title = "Expect-Staple Report"
@classmethod
def from_raw(cls, raw):
# Validate the raw data against the input schema (raises on failure)
schema = INPUT_SCHEMAS[cls.path]
jsonschema.validate(raw, schema)
# For Expect-Staple, the values we want are nested under the
# 'expect-staple-report' key.
raw = raw["expect-staple-report"]
# Trim values and convert keys to use underscores
kwargs = {k.replace("-", "_"): trim(v, 1024) for k, v in six.iteritems(raw)}
return cls.to_python(kwargs)
def get_culprit(self):
return self.hostname
def get_message(self):
return u"Expect-Staple failed for '{self.hostname}'".format(self=self)
def get_tags(self):
return (
("port", six.text_type(self.port)),
("hostname", self.hostname),
("response_status", self.response_status),
("cert_status", self.cert_status),
)
def get_origin(self):
# not quite origin, but the domain that failed pinning
return self.hostname
def get_referrer(self):
return None
def should_filter(self, project=None):
return False
class ExpectCT(SecurityReport):
"""
A Certificate Transparency violation report.
See also: http://httpwg.org/http-extensions/expect-ct.html
>>> {
>>> "date-time": "2014-04-06T13:00:50Z",
>>> "hostname": "www.example.com",
>>> "port": 443,
>>> "effective-expiration-date": "2014-05-01T12:40:50Z",
>>> "served-certificate-chain": [],
>>> "validated-certificate-chain": [],
>>> "scts-pins": [],
>>> }
"""
score = 1300
display_score = 1300
title = "Expect-CT Report"
@classmethod
def from_raw(cls, raw):
# Validate the raw data against the input schema (raises on failure)
schema = INPUT_SCHEMAS[cls.path]
jsonschema.validate(raw, schema)
# For Expect-CT, the values we want are nested under the 'expect-ct-report' key.
raw = raw["expect-ct-report"]
# Trim values and convert keys to use underscores
kwargs = {k.replace("-", "_"): trim(v, 1024) for k, v in six.iteritems(raw)}
return cls.to_python(kwargs)
def get_culprit(self):
return self.hostname
def get_message(self):
return u"Expect-CT failed for '{self.hostname}'".format(self=self)
def get_tags(self):
return (("port", six.text_type(self.port)), ("hostname", self.hostname))
def get_origin(self):
# not quite origin, but the domain that failed pinning
return self.hostname
def get_referrer(self):
return None
def should_filter(self, project=None):
return False
class Csp(SecurityReport):
"""
A CSP violation report.
See also: http://www.w3.org/TR/CSP/#violation-reports
>>> {
>>> "document_uri": "http://example.com/",
>>> "violated_directive": "style-src cdn.example.com",
>>> "blocked_uri": "http://example.com/style.css",
>>> "effective_directive": "style-src",
>>> }
"""
LOCAL = "'self'"
score = 1300
display_score = 1300
title = "CSP Report"
@classmethod
def from_raw(cls, raw):
# Firefox doesn't send effective-directive, so parse it from
# violated-directive but prefer effective-directive when present
#
# refs: https://bugzil.la/1192684#c8
try:
report = raw["csp-report"]
report["effective-directive"] = report.get(
"effective-directive", report["violated-directive"].split(None, 1)[0]
)
except (KeyError, IndexError):
pass
# Validate the raw data against the input schema (raises on failure)
schema = INPUT_SCHEMAS[cls.path]
jsonschema.validate(raw, schema)
# For CSP, the values we want are nested under the 'csp-report' key.
raw = raw["csp-report"]
# Trim values and convert keys to use underscores
kwargs = {k.replace("-", "_"): trim(v, 1024) for k, v in six.iteritems(raw)}
return cls.to_python(kwargs)
def get_message(self):
templates = {
"child-src": (u"Blocked 'child' from '{uri}'", "Blocked inline 'child'"),
"connect-src": (u"Blocked 'connect' from '{uri}'", "Blocked inline 'connect'"),
"font-src": (u"Blocked 'font' from '{uri}'", "Blocked inline 'font'"),
"form-action": (u"Blocked 'form' action to '{uri}'",), # no inline option
"img-src": (u"Blocked 'image' from '{uri}'", "Blocked inline 'image'"),
"manifest-src": (u"Blocked 'manifest' from '{uri}'", "Blocked inline 'manifest'"),
"media-src": (u"Blocked 'media' from '{uri}'", "Blocked inline 'media'"),
"object-src": (u"Blocked 'object' from '{uri}'", "Blocked inline 'object'"),
"script-src": (
u"Blocked 'script' from '{uri}'",
"Blocked unsafe (eval() or inline) 'script'",
),
"script-src-elem": (
u"Blocked 'script' from '{uri}'",
"Blocked unsafe 'script' element",
),
"script-src-attr": (
u"Blocked inline script attribute from '{uri}'",
"Blocked inline script attribute",
),
"style-src": (u"Blocked 'style' from '{uri}'", "Blocked inline 'style'"),
"style-src-elem": (
u"Blocked 'style' from '{uri}'",
"Blocked 'style' or 'link' element",
),
"style-src-attr": (u"Blocked style attribute from '{uri}'", "Blocked style attribute"),
"unsafe-inline": (None, u"Blocked unsafe inline 'script'"),
"unsafe-eval": (None, u"Blocked unsafe eval() 'script'"),
}
default_template = ("Blocked {directive!r} from {uri!r}", "Blocked inline {directive!r}")
directive = self.local_script_violation_type or self.effective_directive
uri = self.normalized_blocked_uri
index = 1 if uri == self.LOCAL else 0
try:
tmpl = templates[directive][index]
except (KeyError, IndexError):
tmpl = default_template[index]
return tmpl.format(directive=directive, uri=uri)
def get_culprit(self):
if not self.violated_directive:
return ""
bits = [d for d in self.violated_directive.split(" ") if d]
return " ".join([bits[0]] + [self._normalize_value(b) for b in bits[1:]])
def get_tags(self):
return [
("effective-directive", self.effective_directive),
("blocked-uri", self._sanitized_blocked_uri()),
]
def get_origin(self):
return self.document_uri
def get_referrer(self):
return self.referrer
def to_string(self, is_public=False, **kwargs):
return json.dumps({"csp-report": self.get_api_context()}, indent=2)
def to_email_html(self, event, **kwargs):
return render_to_string(
"sentry/partial/interfaces/csp_email.html", {"data": self.get_api_context()}
)
def should_filter(self, project=None):
disallowed = ()
paths = ["blocked_uri", "source_file"]
uris = [getattr(self, path) for path in paths if hasattr(self, path)]
if project is None or bool(project.get_option("sentry:csp_ignored_sources_defaults", True)):
disallowed += DEFAULT_DISALLOWED_SOURCES
if project is not None:
disallowed += tuple(project.get_option("sentry:csp_ignored_sources", []))
if disallowed and any(is_valid_origin(uri, allowed=disallowed) for uri in uris):
return True
return False
def _sanitized_blocked_uri(self):
# HACK: This is 100% to work around Stripe urls
# that will casually put extremely sensitive information
# in querystrings. The real solution is to apply
# data scrubbing to all tags generically
# TODO this could be done in filter_csp
# instead but that might only be run conditionally on the org/project settings
# relevant code is @L191:
#
# if netloc == 'api.stripe.com':
# query = ''
# fragment = ''
uri = self.blocked_uri
if uri.startswith("https://api.stripe.com/"):
return urlunsplit(urlsplit(uri)[:3] + (None, None))
return uri
@memoize
def normalized_blocked_uri(self):
return self._normalize_uri(self.blocked_uri)
@memoize
def _normalized_document_uri(self):
return self._normalize_uri(self.document_uri)
def _normalize_value(self, value):
keywords = ("'none'", "'self'", "'unsafe-inline'", "'unsafe-eval'")
all_schemes = ("data:", "mediastream:", "blob:", "filesystem:", "http:", "https:", "file:")
# > If no scheme is specified, the same scheme as the one used to
# > access the protected document is assumed.
# Source: https://developer.mozilla.org/en-US/docs/Web/Security/CSP/CSP_policy_directives
if value in keywords:
return value
# normalize a value down to 'self' if it matches the origin of document-uri
# FireFox transforms a 'self' value into the spelled out origin, so we
# want to reverse this and bring it back
if value.startswith(all_schemes):
if self._normalized_document_uri == self._normalize_uri(value):
return self.LOCAL
# Their rule had an explicit scheme, so let's respect that
return value
# value doesn't have a scheme, but let's see if their
# hostnames match at least, if so, they're the same
if value == self._normalized_document_uri:
return self.LOCAL
# Now we need to stitch on a scheme to the value
scheme = self.document_uri.split(":", 1)[0]
# But let's not stitch on the boring values
if scheme in ("http", "https"):
return value
return self._unsplit(scheme, value)
@memoize
def local_script_violation_type(self):
"""
If this is a locally-sourced script-src error, gives the type.
"""
if (
self.violated_directive
and self.effective_directive == "script-src"
and self.normalized_blocked_uri == self.LOCAL
):
if "'unsafe-inline'" in self.violated_directive:
return "unsafe-inline"
elif "'unsafe-eval'" in self.violated_directive:
return "unsafe-eval"
return None
def _normalize_uri(self, value):
if value in ("", self.LOCAL, self.LOCAL.strip("'")):
return self.LOCAL
# A lot of these values get reported as literally
# just the scheme. So a value like 'data' or 'blob', which
# are valid schemes, just not a uri. So we want to
# normalize it into a uri.
if ":" not in value:
scheme, hostname = value, ""
else:
scheme, hostname = urlsplit(value)[:2]
if scheme in ("http", "https"):
return hostname
return self._unsplit(scheme, hostname)
def _unsplit(self, scheme, hostname):
return urlunsplit((scheme, hostname, "", None, None))
| 32.662857 | 100 | 0.602053 | from __future__ import absolute_import
import jsonschema
import six
__all__ = ("Csp", "Hpkp", "ExpectCT", "ExpectStaple")
from six.moves.urllib.parse import urlsplit, urlunsplit
from sentry.interfaces.base import Interface, InterfaceValidationError
from sentry.interfaces.schemas import validate_and_default_interface, INPUT_SCHEMAS
from sentry.utils import json
from sentry.utils.cache import memoize
from sentry.utils.http import is_valid_origin
from sentry.utils.safe import trim
from sentry.web.helpers import render_to_string
DEFAULT_DISALLOWED_SOURCES = (
"about",
"ms-browser-extension",
"chrome://*",
"chrome-extension://*",
"chromeinvokeimmediate://*",
"chromenull://*",
"safari-extension://*",
"mxaddon-pkg://*",
"jar://*",
"webviewprogressproxy://*",
"ms-browser-extension://*",
"tmtbff://*",
"mbinit://*",
"symres://*",
"resource://*",
"moz-extension://*",
"*.metrext.com",
"static.image2play.com",
"*.tlscdn.com",
"73a5b0806e464be8bd4e694c744624f0.com",
"020dfefc4ac745dab7594f2f771c1ded.com",
"*.superfish.com",
"addons.mozilla.org",
"v.zilionfast.in",
"widgets.amung.us",
"*.superfish.com",
"xls.searchfun.in",
"istatic.datafastguru.info",
"v.zilionfast.in",
"localhost",
"resultshub-a.akamaihd.net",
"pulseadnetwork.com",
"gateway.zscalertwo.net",
"www.passpack.com",
"middlerush-a.akamaihd.net",
"www.websmartcenter.com",
"a.linkluster.com",
"saveyoutime.ru",
"cdncache-a.akamaihd.net",
"x.rafomedia.com",
"savingsslider-a.akamaihd.net",
"injections.adguard.com",
"icontent.us",
"amiok.org",
"connectionstrenth.com",
"siteheart.net",
"netanalitics.space",
"printapplink.com",
"godlinkapp.com",
"devappstor.com",
"hoholikik.club",
"smartlink.cool",
"promfflinkdev.com",
)
class SecurityReport(Interface):
title = None
@classmethod
def from_raw(cls, raw):
raise NotImplementedError
@classmethod
def to_python(cls, data):
is_valid, errors = validate_and_default_interface(data, cls.path)
if not is_valid:
raise InterfaceValidationError("Invalid interface data")
return cls(**data)
def get_culprit(self):
raise NotImplementedError
def get_message(self):
raise NotImplementedError
def get_tags(self):
raise NotImplementedError
def get_title(self):
return self.title
def should_filter(self, project=None):
raise NotImplementedError
def get_origin(self):
raise NotImplementedError
def get_referrer(self):
raise NotImplementedError
class Hpkp(SecurityReport):
score = 1300
display_score = 1300
title = "HPKP Report"
@classmethod
def from_raw(cls, raw):
schema = INPUT_SCHEMAS[cls.path]
jsonschema.validate(raw, schema)
kwargs = {k.replace("-", "_"): trim(v, 1024) for k, v in six.iteritems(raw)}
return cls.to_python(kwargs)
def get_culprit(self):
return None
def get_message(self):
return u"Public key pinning validation failed for '{self.hostname}'".format(self=self)
def get_tags(self):
return [
("port", six.text_type(self.port)),
("include-subdomains", json.dumps(self.include_subdomains)),
("hostname", self.hostname),
]
def get_origin(self):
return self.hostname
def get_referrer(self):
return None
def should_filter(self, project=None):
return False
class ExpectStaple(SecurityReport):
score = 1300
display_score = 1300
title = "Expect-Staple Report"
@classmethod
def from_raw(cls, raw):
schema = INPUT_SCHEMAS[cls.path]
jsonschema.validate(raw, schema)
raw = raw["expect-staple-report"]
kwargs = {k.replace("-", "_"): trim(v, 1024) for k, v in six.iteritems(raw)}
return cls.to_python(kwargs)
def get_culprit(self):
return self.hostname
def get_message(self):
return u"Expect-Staple failed for '{self.hostname}'".format(self=self)
def get_tags(self):
return (
("port", six.text_type(self.port)),
("hostname", self.hostname),
("response_status", self.response_status),
("cert_status", self.cert_status),
)
def get_origin(self):
return self.hostname
def get_referrer(self):
return None
def should_filter(self, project=None):
return False
class ExpectCT(SecurityReport):
score = 1300
display_score = 1300
title = "Expect-CT Report"
@classmethod
def from_raw(cls, raw):
schema = INPUT_SCHEMAS[cls.path]
jsonschema.validate(raw, schema)
raw = raw["expect-ct-report"]
kwargs = {k.replace("-", "_"): trim(v, 1024) for k, v in six.iteritems(raw)}
return cls.to_python(kwargs)
def get_culprit(self):
return self.hostname
def get_message(self):
return u"Expect-CT failed for '{self.hostname}'".format(self=self)
def get_tags(self):
return (("port", six.text_type(self.port)), ("hostname", self.hostname))
def get_origin(self):
return self.hostname
def get_referrer(self):
return None
def should_filter(self, project=None):
return False
class Csp(SecurityReport):
LOCAL = "'self'"
score = 1300
display_score = 1300
title = "CSP Report"
@classmethod
def from_raw(cls, raw):
# violated-directive but prefer effective-directive when present
#
# refs: https://bugzil.la/1192684#c8
try:
report = raw["csp-report"]
report["effective-directive"] = report.get(
"effective-directive", report["violated-directive"].split(None, 1)[0]
)
except (KeyError, IndexError):
pass
# Validate the raw data against the input schema (raises on failure)
schema = INPUT_SCHEMAS[cls.path]
jsonschema.validate(raw, schema)
# For CSP, the values we want are nested under the 'csp-report' key.
raw = raw["csp-report"]
# Trim values and convert keys to use underscores
kwargs = {k.replace("-", "_"): trim(v, 1024) for k, v in six.iteritems(raw)}
return cls.to_python(kwargs)
def get_message(self):
templates = {
"child-src": (u"Blocked 'child' from '{uri}'", "Blocked inline 'child'"),
"connect-src": (u"Blocked 'connect' from '{uri}'", "Blocked inline 'connect'"),
"font-src": (u"Blocked 'font' from '{uri}'", "Blocked inline 'font'"),
"form-action": (u"Blocked 'form' action to '{uri}'",), # no inline option
"img-src": (u"Blocked 'image' from '{uri}'", "Blocked inline 'image'"),
"manifest-src": (u"Blocked 'manifest' from '{uri}'", "Blocked inline 'manifest'"),
"media-src": (u"Blocked 'media' from '{uri}'", "Blocked inline 'media'"),
"object-src": (u"Blocked 'object' from '{uri}'", "Blocked inline 'object'"),
"script-src": (
u"Blocked 'script' from '{uri}'",
"Blocked unsafe (eval() or inline) 'script'",
),
"script-src-elem": (
u"Blocked 'script' from '{uri}'",
"Blocked unsafe 'script' element",
),
"script-src-attr": (
u"Blocked inline script attribute from '{uri}'",
"Blocked inline script attribute",
),
"style-src": (u"Blocked 'style' from '{uri}'", "Blocked inline 'style'"),
"style-src-elem": (
u"Blocked 'style' from '{uri}'",
"Blocked 'style' or 'link' element",
),
"style-src-attr": (u"Blocked style attribute from '{uri}'", "Blocked style attribute"),
"unsafe-inline": (None, u"Blocked unsafe inline 'script'"),
"unsafe-eval": (None, u"Blocked unsafe eval() 'script'"),
}
default_template = ("Blocked {directive!r} from {uri!r}", "Blocked inline {directive!r}")
directive = self.local_script_violation_type or self.effective_directive
uri = self.normalized_blocked_uri
index = 1 if uri == self.LOCAL else 0
try:
tmpl = templates[directive][index]
except (KeyError, IndexError):
tmpl = default_template[index]
return tmpl.format(directive=directive, uri=uri)
def get_culprit(self):
if not self.violated_directive:
return ""
bits = [d for d in self.violated_directive.split(" ") if d]
return " ".join([bits[0]] + [self._normalize_value(b) for b in bits[1:]])
def get_tags(self):
return [
("effective-directive", self.effective_directive),
("blocked-uri", self._sanitized_blocked_uri()),
]
def get_origin(self):
return self.document_uri
def get_referrer(self):
return self.referrer
def to_string(self, is_public=False, **kwargs):
return json.dumps({"csp-report": self.get_api_context()}, indent=2)
def to_email_html(self, event, **kwargs):
return render_to_string(
"sentry/partial/interfaces/csp_email.html", {"data": self.get_api_context()}
)
def should_filter(self, project=None):
disallowed = ()
paths = ["blocked_uri", "source_file"]
uris = [getattr(self, path) for path in paths if hasattr(self, path)]
if project is None or bool(project.get_option("sentry:csp_ignored_sources_defaults", True)):
disallowed += DEFAULT_DISALLOWED_SOURCES
if project is not None:
disallowed += tuple(project.get_option("sentry:csp_ignored_sources", []))
if disallowed and any(is_valid_origin(uri, allowed=disallowed) for uri in uris):
return True
return False
def _sanitized_blocked_uri(self):
# HACK: This is 100% to work around Stripe urls
# that will casually put extremely sensitive information
# in querystrings. The real solution is to apply
# data scrubbing to all tags generically
# TODO this could be done in filter_csp
# instead but that might only be run conditionally on the org/project settings
# relevant code is @L191:
#
# if netloc == 'api.stripe.com':
# query = ''
# fragment = ''
uri = self.blocked_uri
if uri.startswith("https://api.stripe.com/"):
return urlunsplit(urlsplit(uri)[:3] + (None, None))
return uri
@memoize
def normalized_blocked_uri(self):
return self._normalize_uri(self.blocked_uri)
@memoize
def _normalized_document_uri(self):
return self._normalize_uri(self.document_uri)
def _normalize_value(self, value):
keywords = ("'none'", "'self'", "'unsafe-inline'", "'unsafe-eval'")
all_schemes = ("data:", "mediastream:", "blob:", "filesystem:", "http:", "https:", "file:")
# > If no scheme is specified, the same scheme as the one used to
# > access the protected document is assumed.
# Source: https://developer.mozilla.org/en-US/docs/Web/Security/CSP/CSP_policy_directives
if value in keywords:
return value
# normalize a value down to 'self' if it matches the origin of document-uri
# FireFox transforms a 'self' value into the spelled out origin, so we
# want to reverse this and bring it back
if value.startswith(all_schemes):
if self._normalized_document_uri == self._normalize_uri(value):
return self.LOCAL
# Their rule had an explicit scheme, so let's respect that
return value
if value == self._normalized_document_uri:
return self.LOCAL
# Now we need to stitch on a scheme to the value
scheme = self.document_uri.split(":", 1)[0]
# But let's not stitch on the boring values
if scheme in ("http", "https"):
return value
return self._unsplit(scheme, value)
@memoize
def local_script_violation_type(self):
if (
self.violated_directive
and self.effective_directive == "script-src"
and self.normalized_blocked_uri == self.LOCAL
):
if "'unsafe-inline'" in self.violated_directive:
return "unsafe-inline"
elif "'unsafe-eval'" in self.violated_directive:
return "unsafe-eval"
return None
def _normalize_uri(self, value):
if value in ("", self.LOCAL, self.LOCAL.strip("'")):
return self.LOCAL
# A lot of these values get reported as literally
# just the scheme. So a value like 'data' or 'blob', which
# are valid schemes, just not a uri. So we want to
# normalize it into a uri.
if ":" not in value:
scheme, hostname = value, ""
else:
scheme, hostname = urlsplit(value)[:2]
if scheme in ("http", "https"):
return hostname
return self._unsplit(scheme, hostname)
def _unsplit(self, scheme, hostname):
return urlunsplit((scheme, hostname, "", None, None))
| true | true |
f73a50c0e344fcd69f9c995c194228b477953779 | 8,189 | py | Python | bank/accounts.py | samroon2/bank_project | e272bdd96b07b17de69cecb3b42ddb01c95dfe0b | [
"Apache-2.0"
] | null | null | null | bank/accounts.py | samroon2/bank_project | e272bdd96b07b17de69cecb3b42ddb01c95dfe0b | [
"Apache-2.0"
] | null | null | null | bank/accounts.py | samroon2/bank_project | e272bdd96b07b17de69cecb3b42ddb01c95dfe0b | [
"Apache-2.0"
] | null | null | null | """
bank.accounts
~~~~~~~~~~~~~
This module contains code for managing accounts.
"""
from .cards import Card
from .exceptions import InsufficientBalance, AccountError, ExceedsLimit
import time, datetime
class Account:
"""
Base class for accounts, handles balances & transactions.
:param account_id: Unique ID associated with the account.
:param account_type: Type of account (savings, checkings, credit).
:param holder_accounts: An AccountHolder.Accounts() class.
:param accountholder_id: Unique ID of the account holder.
:param opening_balance: When account is created the opening amount of $.
:param open_date: Date the account was opened.
:param status: Status of the account (open, closed, locked).
"""
def __init__(
self,
account_id: int,
account_type: str,
holder_accounts,
accountholder_id: str,
opening_balance=0,
open_date=datetime.date.today(),
status: str = "open",
):
self.account_id = account_id
self.account_type = account_type
self.holder_accounts = holder_accounts
self.accountholder_id = account_id
self.balance = opening_balance if opening_balance >= 0 else 0
self.open_date = open_date
self.status = status
self.linked_cards = {}
self.withdrawal_limit = 5000
def withdraw(self, amount: float) -> dict:
"""'
Method to withdraw funds from account.
:param amount: Transaction amount.
"""
# Assuming there can be $0.
if self.status != "open":
raise AccountError(self.account_id, self.status)
elif amount > self.withdrawal_limit:
raise ExceedsLimit(self.withdrawal_limit)
elif amount > self.balance:
raise InsufficientBalance(self.balance, amount)
else:
self.balance -= amount
return {
"status": True,
"new_balance": self.balance,
"transaction_time": time.time(),
}
def deposit(self, amount: float) -> dict:
"""
Method to deposit funds to an account.
:param amount: Transaction amount.
"""
if self.status != "open":
raise AccountError(self.account_id, self.status)
self.balance += amount
return {
"status": True,
"new_balance": self.balance,
"transaction_time": time.time(),
}
class CheckingAccount(Account):
"""
Class for checking accounts, inherits base account class.
:param account_id: Unique ID associated with the account.
:param account_type: Type of account (savings, checkings, credit).
:param holder_accounts: An AccountHolder.Accounts() class.
:param accountholder_id: Unique ID of the account holder.
:param opening_balance: When account is created the opening amount of $.
:param open_date: Date the account was opened.
:param status: Status of the account (open, closed, frozen).
"""
def __init__(
self,
account_id: int,
account_type: str,
holder_accounts,
accountholder_id: str,
opening_balance=0,
open_date=datetime.date.today(),
status: str = "open",
):
super().__init__(
account_id,
account_type,
holder_accounts,
accountholder_id,
opening_balance,
open_date,
status,
)
self.account_type = "checking"
self.holder_accounts.checking_accounts[self.account_id] = self
class SavingsAccount(Account):
"""
Class for savings accounts, inherits base account class.
:param account_id: Unique ID associated with the account.
:param account_type: Type of account (savings, checkings, credit).
:param holder_accounts: An AccountHolder.Accounts() class.
:param accountholder_id: Unique ID of the account holder.
:param opening_balance: When account is created the opening amount of $.
:param open_date: Date the account was opened.
:param status: Status of the account (open, closed, frozen).
:kwarg interest: The interest of the savings account.
"""
def __init__(
self,
account_id: int,
account_type: str,
holder_accounts,
accountholder_id: str,
opening_balance=0,
open_date=datetime.date.today(),
status: str = "open",
interest_rate=0.001,
):
super().__init__(
account_id,
account_type,
holder_accounts,
accountholder_id,
opening_balance,
open_date,
status,
)
self.account_type = account_type
self.interest_rate = interest_rate
self.holder_accounts.saving_accounts[self.account_id] = self
class CreditAccount(Account):
"""
Class for credit accounts, inherits base account class.
:param account_id: Unique ID associated with the account.
:param account_type: Type of account (savings, checkings, credit).
:param holder_accounts: An AccountHolder.Accounts() class.
:param accountholder_id: Unique ID of the account holder.
:param opening_balance: When account is created the opening amount of $.
:param open_date: Date the account was opened.
:param status: Status of the account (open, closed, frozen).
:kwarg apr: the APR charged on outstanding balance.
"""
def __init__(
self,
account_id: int,
account_type: str,
holder_accounts,
accountholder_id: str,
opening_balance=0,
open_date=datetime.date.today(),
status: str = "open",
apr_rate=0.15,
):
super().__init__(
account_id,
account_type,
accountholder_id,
opening_balance,
open_date,
status,
)
self.account_type = account_type
self.apr_rate = apr_rate
self.holderaccounts.credit_accounts[self.account_id] = self
# self.billing_end =
# self.balance_due =
# .
# .
# etc etc.
class Accounts:
"""
Class that maintains the relations between account holders, accounts and cards.
:param holder: AccountHolder object holding account holder information.
:param accountholder_id: ID of account holder.
"""
def __init__(self, holder, accountholder_id: str):
self.holder = holder
self.accountholder_id = accountholder_id
self.checking_accounts = {}
self.saving_accounts = {}
self.credit_accounts = {}
self.issued_cards = {}
@property
def holder_info(self):
"""
Summary of the account holder who is linked with the accounts.
"""
return self.holder.__repr__
@property
def accounts(self):
"""
Str summary of number of accounts.
"""
return "".join(
[
f"Accounts: Checking: {len(self.checking_accounts)}, ",
f"Savings: {len(self.saving_accounts)}, ",
f"Credit: {len(self.credit_accounts)}",
]
)
@property
def total_balance(self) -> int:
"""
Total balance of all accounts.
"""
return self._checking_balance + self._savings_balance + self._credit_balance
@property
def _checking_balance(self) -> int:
"""
Total balance of all checking accounts.
"""
bal = 0
for id, obj in self.checking_accounts.items():
bal += obj.balance
return bal
@property
def _savings_balance(self) -> int:
"""
Total balance of all savings accounts.
"""
bal = 0
for id, obj in self.saving_accounts.items():
bal += obj.balance
return bal
@property
def _credit_balance(self) -> int:
"""
Total balance of all credit accounts.
"""
bal = 0
for id, obj in self.credit_accounts.items():
bal += obj.balance
return bal
| 30.901887 | 84 | 0.605446 |
from .cards import Card
from .exceptions import InsufficientBalance, AccountError, ExceedsLimit
import time, datetime
class Account:
def __init__(
self,
account_id: int,
account_type: str,
holder_accounts,
accountholder_id: str,
opening_balance=0,
open_date=datetime.date.today(),
status: str = "open",
):
self.account_id = account_id
self.account_type = account_type
self.holder_accounts = holder_accounts
self.accountholder_id = account_id
self.balance = opening_balance if opening_balance >= 0 else 0
self.open_date = open_date
self.status = status
self.linked_cards = {}
self.withdrawal_limit = 5000
def withdraw(self, amount: float) -> dict:
if self.status != "open":
raise AccountError(self.account_id, self.status)
elif amount > self.withdrawal_limit:
raise ExceedsLimit(self.withdrawal_limit)
elif amount > self.balance:
raise InsufficientBalance(self.balance, amount)
else:
self.balance -= amount
return {
"status": True,
"new_balance": self.balance,
"transaction_time": time.time(),
}
def deposit(self, amount: float) -> dict:
if self.status != "open":
raise AccountError(self.account_id, self.status)
self.balance += amount
return {
"status": True,
"new_balance": self.balance,
"transaction_time": time.time(),
}
class CheckingAccount(Account):
def __init__(
self,
account_id: int,
account_type: str,
holder_accounts,
accountholder_id: str,
opening_balance=0,
open_date=datetime.date.today(),
status: str = "open",
):
super().__init__(
account_id,
account_type,
holder_accounts,
accountholder_id,
opening_balance,
open_date,
status,
)
self.account_type = "checking"
self.holder_accounts.checking_accounts[self.account_id] = self
class SavingsAccount(Account):
def __init__(
self,
account_id: int,
account_type: str,
holder_accounts,
accountholder_id: str,
opening_balance=0,
open_date=datetime.date.today(),
status: str = "open",
interest_rate=0.001,
):
super().__init__(
account_id,
account_type,
holder_accounts,
accountholder_id,
opening_balance,
open_date,
status,
)
self.account_type = account_type
self.interest_rate = interest_rate
self.holder_accounts.saving_accounts[self.account_id] = self
class CreditAccount(Account):
def __init__(
self,
account_id: int,
account_type: str,
holder_accounts,
accountholder_id: str,
opening_balance=0,
open_date=datetime.date.today(),
status: str = "open",
apr_rate=0.15,
):
super().__init__(
account_id,
account_type,
accountholder_id,
opening_balance,
open_date,
status,
)
self.account_type = account_type
self.apr_rate = apr_rate
self.holderaccounts.credit_accounts[self.account_id] = self
class Accounts:
def __init__(self, holder, accountholder_id: str):
self.holder = holder
self.accountholder_id = accountholder_id
self.checking_accounts = {}
self.saving_accounts = {}
self.credit_accounts = {}
self.issued_cards = {}
@property
def holder_info(self):
return self.holder.__repr__
@property
def accounts(self):
return "".join(
[
f"Accounts: Checking: {len(self.checking_accounts)}, ",
f"Savings: {len(self.saving_accounts)}, ",
f"Credit: {len(self.credit_accounts)}",
]
)
@property
def total_balance(self) -> int:
return self._checking_balance + self._savings_balance + self._credit_balance
@property
def _checking_balance(self) -> int:
bal = 0
for id, obj in self.checking_accounts.items():
bal += obj.balance
return bal
@property
def _savings_balance(self) -> int:
bal = 0
for id, obj in self.saving_accounts.items():
bal += obj.balance
return bal
@property
def _credit_balance(self) -> int:
bal = 0
for id, obj in self.credit_accounts.items():
bal += obj.balance
return bal
| true | true |
f73a53505340aba6e1987ab6d467523d88ef5c8d | 2,391 | py | Python | src/alert.py | computer-geek64/swamphacks | 16231e8123c7660ddc987cfda357629227bc2154 | [
"MIT"
] | null | null | null | src/alert.py | computer-geek64/swamphacks | 16231e8123c7660ddc987cfda357629227bc2154 | [
"MIT"
] | 10 | 2020-02-02T05:36:40.000Z | 2022-02-26T23:08:19.000Z | src/alert.py | computer-geek64/swamphacks | 16231e8123c7660ddc987cfda357629227bc2154 | [
"MIT"
] | 1 | 2020-02-21T18:05:10.000Z | 2020-02-21T18:05:10.000Z | #!/usr/bin/python3
# alert.py
import math
from data import mongo
from data import gdacs
from data import wildfires
import geopy.distance
import pymongo
from time import sleep
from datetime import datetime
from config import MONGODB_USER, MONGODB_PASS
def monitor_danger(time_threshold=5 * 60, distance_thresholds={"hurricanes": 200, "floods": 50, "wildfires": 50}):
client = pymongo.MongoClient("mongodb+srv://" + MONGODB_USER + ":" + MONGODB_PASS + "@alrt-ypzt7.mongodb.net/test?retryWrites=true&w=majority")
users = client["users"]
threshold_difference = datetime.now().timestamp() - time_threshold
output = []
for user in users.list_collection_names():
results = list(users[user].find({"time": {"$gte": threshold_difference}}))
if len(results) == 0:
# Location off
last_location = users[user].find().sort("time", pymongo.DESCENDING).limit(1)[0]
disasters = client["disasters"]
for disaster in disasters.list_collection_names():
for x in disasters[disaster].find():
if (disaster == "earthquakes" and geopy.distance.distance((x["lat"], x["lon"]), (last_location["lat"], last_location["lon"])).mi < math.exp(x["magnitude"] / 1.01 - 0.13) * 1000 * 0.00062137) or (disaster != "earthquakes" and geopy.distance.distance((x["lat"], x["lon"]), (last_location["lat"], last_location["lon"])).mi < distance_thresholds[disaster]):
if x["time"] >= last_location["time"] - 60 * 60 * 24:
output.append({"user": user, "last_location": last_location, "disaster": x})
client.close()
return output
while True:
gdacs.download_geojson()
documents = gdacs.get_disasters() + wildfires.get_wildfires()
mongo.add_disaster_documents(documents)
client = pymongo.MongoClient("mongodb+srv://" + MONGODB_USER + ":" + MONGODB_PASS + "@alrt-ypzt7.mongodb.net/test?retryWrites=true&w=majority")
#for user in client["users"].list_collection_names():
# mongo.cleanup_user(user)
for disaster in client["disasters"].list_collection_names():
mongo.cleanup_disaster(disaster)
db = client["alerts"]
user_collection = db["users"]
user_collection.delete_many({})
danger = monitor_danger()
if len(danger) > 0:
user_collection.insert_many(danger)
client.close()
sleep(300) | 46.882353 | 373 | 0.663321 |
import math
from data import mongo
from data import gdacs
from data import wildfires
import geopy.distance
import pymongo
from time import sleep
from datetime import datetime
from config import MONGODB_USER, MONGODB_PASS
def monitor_danger(time_threshold=5 * 60, distance_thresholds={"hurricanes": 200, "floods": 50, "wildfires": 50}):
client = pymongo.MongoClient("mongodb+srv://" + MONGODB_USER + ":" + MONGODB_PASS + "@alrt-ypzt7.mongodb.net/test?retryWrites=true&w=majority")
users = client["users"]
threshold_difference = datetime.now().timestamp() - time_threshold
output = []
for user in users.list_collection_names():
results = list(users[user].find({"time": {"$gte": threshold_difference}}))
if len(results) == 0:
last_location = users[user].find().sort("time", pymongo.DESCENDING).limit(1)[0]
disasters = client["disasters"]
for disaster in disasters.list_collection_names():
for x in disasters[disaster].find():
if (disaster == "earthquakes" and geopy.distance.distance((x["lat"], x["lon"]), (last_location["lat"], last_location["lon"])).mi < math.exp(x["magnitude"] / 1.01 - 0.13) * 1000 * 0.00062137) or (disaster != "earthquakes" and geopy.distance.distance((x["lat"], x["lon"]), (last_location["lat"], last_location["lon"])).mi < distance_thresholds[disaster]):
if x["time"] >= last_location["time"] - 60 * 60 * 24:
output.append({"user": user, "last_location": last_location, "disaster": x})
client.close()
return output
while True:
gdacs.download_geojson()
documents = gdacs.get_disasters() + wildfires.get_wildfires()
mongo.add_disaster_documents(documents)
client = pymongo.MongoClient("mongodb+srv://" + MONGODB_USER + ":" + MONGODB_PASS + "@alrt-ypzt7.mongodb.net/test?retryWrites=true&w=majority")
for disaster in client["disasters"].list_collection_names():
mongo.cleanup_disaster(disaster)
db = client["alerts"]
user_collection = db["users"]
user_collection.delete_many({})
danger = monitor_danger()
if len(danger) > 0:
user_collection.insert_many(danger)
client.close()
sleep(300) | true | true |
f73a53927386a45603bbf811a4d1b3aa32955053 | 1,566 | py | Python | mercurial/pure/diffhelpers.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | mercurial/pure/diffhelpers.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | mercurial/pure/diffhelpers.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | # diffhelpers.py - pure Python implementation of diffhelpers.c
#
# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
def addlines(fp, hunk, lena, lenb, a, b):
while True:
todoa = lena - len(a)
todob = lenb - len(b)
num = max(todoa, todob)
if num == 0:
break
for i in range(num):
s = fp.readline()
c = s[0]
if s == "\\ No newline at end of file\n":
fix_newline(hunk, a, b)
continue
if c == "\n":
# Some patches may be missing the control char
# on empty lines. Supply a leading space.
s = " \n"
hunk.append(s)
if c == "+":
b.append(s[1:])
elif c == "-":
a.append(s)
else:
b.append(s[1:])
a.append(s)
return 0
def fix_newline(hunk, a, b):
l = hunk[-1]
# tolerate CRLF in last line
if l.endswith('\r\n'):
hline = l[:-2]
else:
hline = l[:-1]
c = hline[0]
if c in " +":
b[-1] = hline[1:]
if c in " -":
a[-1] = hline
hunk[-1] = hline
return 0
def testhunk(a, b, bstart):
alen = len(a)
blen = len(b)
if alen > blen - bstart:
return -1
for i in range(alen):
if a[i][1:] != b[i + bstart]:
return -1
return 0
| 24.857143 | 73 | 0.469987 |
def addlines(fp, hunk, lena, lenb, a, b):
while True:
todoa = lena - len(a)
todob = lenb - len(b)
num = max(todoa, todob)
if num == 0:
break
for i in range(num):
s = fp.readline()
c = s[0]
if s == "\\ No newline at end of file\n":
fix_newline(hunk, a, b)
continue
if c == "\n":
s = " \n"
hunk.append(s)
if c == "+":
b.append(s[1:])
elif c == "-":
a.append(s)
else:
b.append(s[1:])
a.append(s)
return 0
def fix_newline(hunk, a, b):
l = hunk[-1]
if l.endswith('\r\n'):
hline = l[:-2]
else:
hline = l[:-1]
c = hline[0]
if c in " +":
b[-1] = hline[1:]
if c in " -":
a[-1] = hline
hunk[-1] = hline
return 0
def testhunk(a, b, bstart):
alen = len(a)
blen = len(b)
if alen > blen - bstart:
return -1
for i in range(alen):
if a[i][1:] != b[i + bstart]:
return -1
return 0
| true | true |
f73a53dac0bae336a1463e8eae67a7c4cf0dc991 | 7,386 | py | Python | data.py | ChuanTianML/mxnet_word_lm | 231b67370712a5dccae9433858dd66800005a00f | [
"Apache-2.0"
] | null | null | null | data.py | ChuanTianML/mxnet_word_lm | 231b67370712a5dccae9433858dd66800005a00f | [
"Apache-2.0"
] | null | null | null | data.py | ChuanTianML/mxnet_word_lm | 231b67370712a5dccae9433858dd66800005a00f | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os, gzip
import sys
import mxnet as mx
import numpy as np
class Dictionary(object):
"""字典类
@func add_word(word): 在字典中添加单词word
"""
def __init__(self):
self.word2idx = {} #单词到id
self.idx2word = [] #id到单词
self.word_count = [] #统计每个单词在语料中出现的次数,index为单词id
def add_word(self, word): #尝试添加一个单词
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
self.word_count.append(0)
index = self.word2idx[word]
self.word_count[index] += 1
return index
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
"""
@param path: 数据所在目录
"""
self.dictionary = Dictionary() #构造字典实例,准备根据语料构造字典
self.train = self.tokenize(path + 'train.txt') #tokenize train/valid/test语料,同时获得字典
self.valid = self.tokenize(path + 'valid.txt')
self.test = self.tokenize(path + 'test.txt')
def tokenize(self, path):
"""构建词表,tokenize语料(转wordid)
@param path: 语料文件路径
@return: 转为wordid的语料, 形状为(token数量,)
@notes: 1.添加了句子结束符'<eos>'
2.语料中所有token均被添加到字典
3.最后的ids怎么不分行,而是把整个语料文件存进一个长数组?
"""
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r') as f:
tokens = 0 #tokens记录整个文件的token数量
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r') as f:
ids = np.zeros((tokens,), dtype='int32') #ids是整个语料文件所有token的wordid
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return mx.nd.array(ids, dtype='int32')
def batchify(data, batch_size):
"""
@param data: (Corpus.[train/valid/test]) tokenize后的数据
@param batch_size: batch size
@return: 按batch分好的数据,形状为(batch数量,batch size)
@notes: source corpus: [我,爱,你,们,大,家,好,啊,晚上,吃,的,什么,你,是,哪,位,今天,天气,怎么,样,不,告,诉,你]
reshape(3,8): [[我, 爱, 你, 们, 大, 家, 好, 啊],
[晚上, 吃, 的, 什么, 你, 是, 哪, 位],
[今天, 天气, 怎么, 样, 不, 告, 诉, 你]]
即reshape((batch_size=3, nbatch=8),得到形状(batch_size, batch_num*sentence_len)
最清晰的数据形状应该是(batch_num, batch_size, sentence_len),因为这里仅仅保留了2个维度,所以nbatch=batch_num*sentence_len,所以上面的形状不直观
T: [[我, 晚上, 今天],
[爱, 吃, 天气],
[你, 的, 怎么],
[们, 什么, 样]
[大, 你, 不]
[家, 是, 告]
[好, 哪, 诉]
[啊, 位, 你]]
得到形状(batch_num*sentence_len, batch_size)
iter_next()函数取一个batch的操作是:假设bptt=4,也就是上面每个句子的长度
第一次取得到: [[我, 晚上, 今天],
[爱, 吃, 天气],
[你, 的, 怎么],
[们, 什么, 样]]
第二次取得到: [[大, 你, 不]
[家, 是, 告]
[好, 哪, 诉]
[啊, 位, 你]]
即,在0维度上,一次取一个sentence_len,也就是去了batch_num次
"""
"""Reshape data into (num_example, batch_size)"""
nbatch = data.shape[0] // batch_size #获取batch的数量,1.从这里的逻辑来看,batch_size单位是token而不是句子? 2.使用整数除法,尾巴舍弃不要了啊?
data = data[:nbatch * batch_size] #两个目的吧,一是转list,二是去除尾巴,即每个batch都是满的
data = data.reshape((batch_size, nbatch)).T #转形状,为(bptt*batch_num,batch_size)
return data
class CorpusIter(mx.io.DataIter):
"""数据迭代器
"""
"An iterator that returns the a batch of sequence each time"
def __init__(self, source, batch_size, bptt):
"""初始化数据迭代器
@param source: (Corpus.[train/valid/test]) tokenize后的数据
@param batch_size: batch size
@param bptt: 句子长度
"""
super(CorpusIter, self).__init__()
self.batch_size = batch_size
self.provide_data = [('data', (bptt, batch_size), np.int32)] #一个list,只有一个tuple元素,tuple有3个元素。 输入数据的形状(bptt, batch_size)
self.provide_label = [('label', (bptt, batch_size))] #一个list,只要一个tuple元素,tuple有2个元素。 输入label的形状(bptt, batch_size)
self._index = 0
self._bptt = bptt
self._source = batchify(source, batch_size) #数据按batch分好,得到形状为(batch数量,batch size)的数据
def iter_next(self):
"""mxnet: move to the next batch
"""
i = self._index #记录当前取到的位置
if i+self._bptt > self._source.shape[0] - 1:
return False
self._next_data = self._source[i:i+self._bptt] #得到形状(bptt, batch_size)
self._next_label = self._source[i+1:i+1+self._bptt].astype(np.float32) #得到形状(bptt, batch_size)
self._index += self._bptt
return True
def next(self):
"""mxnet: get next data batch from iterator
"""
if self.iter_next(): #还有数据可取,则返回数据
return mx.io.DataBatch(data=self.getdata(), label=self.getlabel()) #
else: #数据已经取完,则抛出终止迭代错误
raise StopIteration
def reset(self):
self._index = 0
self._next_data = None
self._next_label = None
def getdata(self):
"""mxnet: get data of current batch
"""
return [self._next_data] #形状(1, bptt, batch_size)
def getlabel(self):
"""mxnet: get label of current batch
"""
return [self._next_label] #形状(1, bptt, batch_size)
| 42.205714 | 133 | 0.50176 |
import os, gzip
import sys
import mxnet as mx
import numpy as np
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
self.word_count = []
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
self.word_count.append(0)
index = self.word2idx[word]
self.word_count[index] += 1
return index
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(path + 'train.txt')
self.valid = self.tokenize(path + 'valid.txt')
self.test = self.tokenize(path + 'test.txt')
def tokenize(self, path):
assert os.path.exists(path)
with open(path, 'r') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
with open(path, 'r') as f:
ids = np.zeros((tokens,), dtype='int32')
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return mx.nd.array(ids, dtype='int32')
def batchify(data, batch_size):
nbatch = data.shape[0] // batch_size
data = data[:nbatch * batch_size]
data = data.reshape((batch_size, nbatch)).T
return data
class CorpusIter(mx.io.DataIter):
def __init__(self, source, batch_size, bptt):
super(CorpusIter, self).__init__()
self.batch_size = batch_size
self.provide_data = [('data', (bptt, batch_size), np.int32)]
self.provide_label = [('label', (bptt, batch_size))]
self._index = 0
self._bptt = bptt
self._source = batchify(source, batch_size)
def iter_next(self):
i = self._index
if i+self._bptt > self._source.shape[0] - 1:
return False
self._next_data = self._source[i:i+self._bptt]
self._next_label = self._source[i+1:i+1+self._bptt].astype(np.float32)
self._index += self._bptt
return True
def next(self):
if self.iter_next():
return mx.io.DataBatch(data=self.getdata(), label=self.getlabel())
else:
raise StopIteration
def reset(self):
self._index = 0
self._next_data = None
self._next_label = None
def getdata(self):
return [self._next_data]
def getlabel(self):
return [self._next_label]
| true | true |
f73a5551e2e7c9bbd2eda5dc1fc55becf9d3d8a3 | 667 | py | Python | BachelorETL/manage.py | Athanar/BachelorProject | b2867aab55dab0c793fb5eb993850f13bb9e64fa | [
"MIT"
] | null | null | null | BachelorETL/manage.py | Athanar/BachelorProject | b2867aab55dab0c793fb5eb993850f13bb9e64fa | [
"MIT"
] | null | null | null | BachelorETL/manage.py | Athanar/BachelorProject | b2867aab55dab0c793fb5eb993850f13bb9e64fa | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'BachelorETL.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29 | 75 | 0.68066 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'BachelorETL.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
f73a56658d1c5ce2862f7709bb31e17539fb146c | 128 | py | Python | wafw00f/plugins/teros.py | wizard531/wafw00f | dce0d0616db0f970013432c520b51aeef62d387f | [
"BSD-3-Clause"
] | 10 | 2015-08-31T10:38:24.000Z | 2021-09-30T06:39:13.000Z | wafw00f/plugins/teros.py | wizard531/wafw00f | dce0d0616db0f970013432c520b51aeef62d387f | [
"BSD-3-Clause"
] | null | null | null | wafw00f/plugins/teros.py | wizard531/wafw00f | dce0d0616db0f970013432c520b51aeef62d387f | [
"BSD-3-Clause"
] | 17 | 2015-07-24T20:40:23.000Z | 2021-01-08T19:41:18.000Z | #!/usr/bin/env python
NAME = 'Teros WAF'
def is_waf(self):
# credit goes to W3AF
return self.matchcookie('^st8id=')
| 12.8 | 38 | 0.640625 |
NAME = 'Teros WAF'
def is_waf(self):
return self.matchcookie('^st8id=')
| true | true |
f73a568794799ff26877debad0fe49e7d4400851 | 17,339 | py | Python | frontend/view_model.py | Kuturkokov/retro-ipod-spotify-client | 267792b86d6be7573e40910b3152c465d0c97979 | [
"Apache-2.0"
] | null | null | null | frontend/view_model.py | Kuturkokov/retro-ipod-spotify-client | 267792b86d6be7573e40910b3152c465d0c97979 | [
"Apache-2.0"
] | null | null | null | frontend/view_model.py | Kuturkokov/retro-ipod-spotify-client | 267792b86d6be7573e40910b3152c465d0c97979 | [
"Apache-2.0"
] | null | null | null | import spotify_manager
import re as re
from functools import lru_cache
MENU_PAGE_SIZE = 6
# Screen render types
MENU_RENDER_TYPE = 0
NOW_PLAYING_RENDER = 1
SEARCH_RENDER = 2
# Menu line item types
LINE_NORMAL = 0
LINE_HIGHLIGHT = 1
LINE_TITLE = 2
spotify_manager.refresh_devices()
class LineItem():
def __init__(self, title = "", line_type = LINE_NORMAL, show_arrow = False):
self.title = title
self.line_type = line_type
self.show_arrow = show_arrow
class Rendering():
def __init__(self, type):
self.type = type
def unsubscribe(self):
pass
class MenuRendering(Rendering):
def __init__(self, header = "", lines = [], page_start = 0, total_count = 0):
super().__init__(MENU_RENDER_TYPE)
self.lines = lines
self.header = header
self.page_start = page_start
self.total_count = total_count
self.now_playing = spotify_manager.DATASTORE.now_playing
self.has_internet = spotify_manager.has_internet
class NowPlayingRendering(Rendering):
def __init__(self):
super().__init__(NOW_PLAYING_RENDER)
self.callback = None
self.after_id = None
def subscribe(self, app, callback):
if callback == self.callback:
return
new_callback = self.callback is None
self.callback = callback
self.app = app
if new_callback:
self.refresh()
def refresh(self):
if not self.callback:
return
if self.after_id:
self.app.after_cancel(self.after_id)
self.callback(spotify_manager.DATASTORE.now_playing)
self.after_id = self.app.after(500, lambda: self.refresh())
def unsubscribe(self):
super().unsubscribe()
self.callback = None
self.app = None
class NowPlayingCommand():
def __init__(self, runnable = lambda:()):
self.has_run = False
self.runnable = runnable
def run(self):
self.has_run = True
self.runnable()
class SearchRendering(Rendering):
def __init__(self, query, active_char):
super().__init__(SEARCH_RENDER)
self.query = query
self.active_char = active_char
self.loading = False
self.callback = None
self.results = None
def get_active_char(self):
return ' ' if self.active_char == 26 else chr(self.active_char + ord('a'))
def subscribe(self, app, callback):
if (callback == self.callback):
return
new_callback = self.callback is None
self.callback = callback
self.app = app
if new_callback:
self.refresh()
def refresh(self):
if not self.callback:
return
self.callback(self.query, self.get_active_char(), self.loading, self.results)
self.results = None
def unsubscribe(self):
super().unsubscribe()
self.callback = None
self.app = None
class SearchPage():
def __init__(self, previous_page):
self.header = "Search"
self.has_sub_page = True
self.previous_page = previous_page
self.live_render = SearchRendering("", 0)
self.is_title = False
def nav_prev(self):
self.live_render.query = self.live_render.query[0:-1]
self.live_render.refresh()
def nav_next(self):
if len(self.live_render.query) > 15:
return
active_char = ' ' if self.live_render.active_char == 26 \
else chr(self.live_render.active_char + ord('a'))
self.live_render.query += active_char
self.live_render.refresh()
def nav_play(self):
pass
def nav_up(self):
self.live_render.active_char += 1
if (self.live_render.active_char > 26):
self.live_render.active_char = 0
self.live_render.refresh()
def nav_down(self):
self.live_render.active_char -= 1
if (self.live_render.active_char < 0):
self.live_render.active_char = 26
self.live_render.refresh()
def run_search(self, query):
self.live_render.loading = True
self.live_render.refresh()
self.live_render.results = spotify_manager.search(query)
self.live_render.loading = False
self.live_render.refresh()
def nav_select(self):
spotify_manager.run_async(lambda: self.run_search(self.live_render.query))
return self
def nav_back(self):
return self.previous_page
def render(self):
return self.live_render
class NowPlayingPage():
def __init__(self, previous_page, header, command):
self.has_sub_page = False
self.previous_page = previous_page
self.command = command
self.header = header
self.live_render = NowPlayingRendering()
self.is_title = False
def play_previous(self):
spotify_manager.play_previous()
self.live_render.refresh()
def play_next(self):
spotify_manager.play_next()
self.live_render.refresh()
def toggle_play(self):
spotify_manager.toggle_play()
self.live_render.refresh()
def nav_prev(self):
spotify_manager.run_async(lambda: self.play_previous())
def nav_next(self):
spotify_manager.run_async(lambda: self.play_next())
def nav_play(self):
spotify_manager.run_async(lambda: self.toggle_play())
def nav_up(self):
pass
def nav_down(self):
pass
def nav_select(self):
return self
def nav_back(self):
return self.previous_page
def render(self):
if (not self.command.has_run):
self.command.run()
return self.live_render
EMPTY_LINE_ITEM = LineItem()
class MenuPage():
def __init__(self, header, previous_page, has_sub_page, is_title = False):
self.index = 0
self.page_start = 0
self.header = header
self.has_sub_page = has_sub_page
self.previous_page = previous_page
self.is_title = is_title
def total_size(self):
return 0
def page_at(self, index):
return None
def nav_prev(self):
spotify_manager.run_async(lambda: spotify_manager.play_previous())
def nav_next(self):
spotify_manager.run_async(lambda: spotify_manager.play_next())
def nav_play(self):
spotify_manager.run_async(lambda: spotify_manager.toggle_play())
def get_index_jump_up(self):
return 1
def get_index_jump_down(self):
return 1
def nav_up(self):
jump = self.get_index_jump_up()
if(self.index >= self.total_size() - jump):
return
if (self.index >= self.page_start + MENU_PAGE_SIZE - jump):
self.page_start = self.page_start + jump
self.index = self.index + jump
def nav_down(self):
jump = self.get_index_jump_down()
if(self.index <= (jump - 1)):
return
if (self.index <= self.page_start + (jump - 1)):
self.page_start = self.page_start - jump
if (self.page_start == 1):
self.page_start = 0
self.index = self.index - jump
def nav_select(self):
return self.page_at(self.index)
def nav_back(self):
return self.previous_page
def render(self):
lines = []
total_size = self.total_size()
for i in range(self.page_start, self.page_start + MENU_PAGE_SIZE):
if (i < total_size):
page = self.page_at(i)
if (page is None) :
lines.append(EMPTY_LINE_ITEM)
else:
line_type = LINE_TITLE if page.is_title else \
LINE_HIGHLIGHT if i == self.index else LINE_NORMAL
lines.append(LineItem(page.header, line_type, page.has_sub_page))
else:
lines.append(EMPTY_LINE_ITEM)
return MenuRendering(lines=lines, header=self.header, page_start=self.index, total_count=total_size)
class ShowsPage(MenuPage):
def __init__(self, previous_page):
super().__init__(self.get_title(), previous_page, has_sub_page=True)
self.shows = self.get_content()
self.num_shows = len(self.shows)
def get_title(self):
return "Podcasts"
def get_content(self):
return spotify_manager.DATASTORE.getAllSavedShows()
def total_size(self):
return self.num_shows
@lru_cache(maxsize=15)
def page_at(self, index):
return SingleShowPage(self.shows[index], self)
class PlaylistsPage(MenuPage):
def __init__(self, previous_page):
super().__init__(self.get_title(), previous_page, has_sub_page=True)
self.playlists = self.get_content()
self.num_playlists = len(self.playlists)
self.playlists.sort(key=self.get_idx) # sort playlists to keep order as arranged in Spotify library
def get_title(self):
return "Playlists"
def get_content(self):
return spotify_manager.DATASTORE.getAllSavedPlaylists()
def get_idx(self, e): # function to get idx from UserPlaylist for sorting
if type(e) == spotify_manager.UserPlaylist: # self.playlists also contains albums as it seems and they don't have the idx value
return e.idx
else:
return 0
def total_size(self):
return self.num_playlists
@lru_cache(maxsize=15)
def page_at(self, index):
return SinglePlaylistPage(self.playlists[index], self)
class AlbumsPage(PlaylistsPage):
def __init__(self, previous_page):
super().__init__(previous_page)
def get_title(self):
return "Albums"
def get_content(self):
return spotify_manager.DATASTORE.getAllSavedAlbums()
class SearchResultsPage(MenuPage):
def __init__(self, previous_page, results):
super().__init__("Search Results", previous_page, has_sub_page=True)
self.results = results
tracks, albums, artists = len(results.tracks), len(results.albums), len(results.artists)
# Add 1 to each count (if > 0) to make room for section header line items
self.tracks = tracks + 1 if tracks > 0 else 0
self.artists = artists + 1 if artists > 0 else 0
self.albums = albums + 1 if albums > 0 else 0
self.total_count = self.tracks + self.albums + self.artists
self.index = 1
# indices of the section header line items
self.header_indices = [0, self.tracks, self.artists + self.tracks]
def total_size(self):
return self.total_count
def page_at(self, index):
if self.tracks > 0 and index == 0:
return PlaceHolderPage("TRACKS", self, has_sub_page=False, is_title=True)
elif self.artists > 0 and index == self.header_indices[1]:
return PlaceHolderPage("ARTISTS", self, has_sub_page=False, is_title=True)
elif self.albums > 0 and index == self.header_indices[2]:
return PlaceHolderPage("ALBUMS", self, has_sub_page=False, is_title=True)
elif self.tracks > 0 and index < self.header_indices[1]:
track = self.results.tracks[index - 1]
command = NowPlayingCommand(lambda: spotify_manager.play_track(track.uri))
return NowPlayingPage(self, track.title, command)
elif self.albums > 0 and index < self.header_indices[2]:
artist = self.results.artists[index - (self.tracks + 1)]
command = NowPlayingCommand(lambda: spotify_manager.play_artist(artist.uri))
return NowPlayingPage(self, artist.name, command)
else:
album = self.results.albums[index - (self.artists + self.tracks + 1)]
tracks = self.results.album_track_map[album.uri]
return InMemoryPlaylistPage(album, tracks, self)
def get_index_jump_up(self):
if self.index + 1 in self.header_indices:
return 2
return 1
def get_index_jump_down(self):
if self.index - 1 in self.header_indices:
return 2
return 1
class NewReleasesPage(PlaylistsPage):
def __init__(self, previous_page):
super().__init__(previous_page)
def get_title(self):
return "New Releases"
def get_content(self):
return spotify_manager.DATASTORE.getAllNewReleases()
class ArtistsPage(MenuPage):
def __init__(self, previous_page):
super().__init__("Artists", previous_page, has_sub_page=True)
def total_size(self):
return spotify_manager.DATASTORE.getArtistCount()
def page_at(self, index):
# play track
artist = spotify_manager.DATASTORE.getArtist(index)
command = NowPlayingCommand(lambda: spotify_manager.play_artist(artist.uri))
return NowPlayingPage(self, artist.name, command)
class SingleArtistPage(MenuPage):
def __init__(self, artistName, previous_page):
super().__init__(artistName, previous_page, has_sub_page=True)
class SinglePlaylistPage(MenuPage):
def __init__(self, playlist, previous_page):
# Credit for code to remove emoticons from string: https://stackoverflow.com/a/49986645
regex_pattern = re.compile(pattern = "["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags = re.UNICODE)
super().__init__(regex_pattern.sub(r'',playlist.name), previous_page, has_sub_page=True)
self.playlist = playlist
self.tracks = None
def get_tracks(self):
if self.tracks is None:
self.tracks = spotify_manager.DATASTORE.getPlaylistTracks(self.playlist.uri)
return self.tracks
def total_size(self):
return self.playlist.track_count
def page_at(self, index):
track = self.get_tracks()[index]
command = NowPlayingCommand(lambda: spotify_manager.play_from_playlist(self.playlist.uri, track.uri, None))
return NowPlayingPage(self, track.title, command)
class SingleShowPage(MenuPage):
def __init__(self, show, previous_page):
super().__init__(show.name, previous_page, has_sub_page=True)
self.show = show
self.episodes = None
def get_episodes(self):
if self.episodes is None:
self.episodes = spotify_manager.DATASTORE.getShowEpisodes(self.show.uri)
return self.episodes
def total_size(self):
return self.show.episode_count
def page_at(self, index):
episode = self.get_episodes()[index]
command = NowPlayingCommand(lambda: spotify_manager.play_from_show(self.show.uri, episode.uri, None))
return NowPlayingPage(self, episode.name, command)
class InMemoryPlaylistPage(SinglePlaylistPage):
def __init__(self, playlist, tracks, previous_page):
super().__init__(playlist, previous_page)
self.tracks = tracks
class SingleTrackPage(MenuPage):
def __init__(self, track, previous_page, playlist = None, album = None):
super().__init__(track.title, previous_page, has_sub_page=False)
self.track = track
self.playlist = playlist
self.album = album
def render(self):
r = super().render()
print("render track")
context_uri = self.playlist.uri if self.playlist else self.album.uri
spotify_manager.play_from_playlist(context_uri, self.track.uri, None)
return r
class SingleEpisodePage(MenuPage):
def __init__(self, episode, previous_page, show = None):
super().__init__(episode.name, previous_page, has_sub_page=False)
self.episode = episode
self.show = show
def render(self):
r = super().render()
print("render episode")
context_uri = self.show.uri
spotify_manager.play_from_show(context_uri, self.episode.uri, None)
return r
class SavedTracksPage(MenuPage):
def __init__(self, previous_page):
super().__init__("Saved Tracks", previous_page, has_sub_page=True)
def total_size(self):
return spotify_manager.DATASTORE.getSavedTrackCount()
def page_at(self, index):
# play track
return SingleTrackPage(spotify_manager.DATASTORE.getSavedTrack(index), self)
class PlaceHolderPage(MenuPage):
def __init__(self, header, previous_page, has_sub_page=True, is_title = False):
super().__init__(header, previous_page, has_sub_page, is_title)
class RootPage(MenuPage):
def __init__(self, previous_page):
super().__init__("sPot", previous_page, has_sub_page=True)
self.pages = [
ArtistsPage(self),
AlbumsPage(self),
NewReleasesPage(self),
PlaylistsPage(self),
ShowsPage(self),
SearchPage(self),
NowPlayingPage(self, "Now Playing", NowPlayingCommand())
]
self.index = 0
self.page_start = 0
def get_pages(self):
if (not spotify_manager.DATASTORE.now_playing):
return self.pages[0:-1]
return self.pages
def total_size(self):
return len(self.get_pages())
def page_at(self, index):
return self.get_pages()[index]
| 32.592105 | 135 | 0.64352 | import spotify_manager
import re as re
from functools import lru_cache
MENU_PAGE_SIZE = 6
MENU_RENDER_TYPE = 0
NOW_PLAYING_RENDER = 1
SEARCH_RENDER = 2
LINE_NORMAL = 0
LINE_HIGHLIGHT = 1
LINE_TITLE = 2
spotify_manager.refresh_devices()
class LineItem():
def __init__(self, title = "", line_type = LINE_NORMAL, show_arrow = False):
self.title = title
self.line_type = line_type
self.show_arrow = show_arrow
class Rendering():
def __init__(self, type):
self.type = type
def unsubscribe(self):
pass
class MenuRendering(Rendering):
def __init__(self, header = "", lines = [], page_start = 0, total_count = 0):
super().__init__(MENU_RENDER_TYPE)
self.lines = lines
self.header = header
self.page_start = page_start
self.total_count = total_count
self.now_playing = spotify_manager.DATASTORE.now_playing
self.has_internet = spotify_manager.has_internet
class NowPlayingRendering(Rendering):
def __init__(self):
super().__init__(NOW_PLAYING_RENDER)
self.callback = None
self.after_id = None
def subscribe(self, app, callback):
if callback == self.callback:
return
new_callback = self.callback is None
self.callback = callback
self.app = app
if new_callback:
self.refresh()
def refresh(self):
if not self.callback:
return
if self.after_id:
self.app.after_cancel(self.after_id)
self.callback(spotify_manager.DATASTORE.now_playing)
self.after_id = self.app.after(500, lambda: self.refresh())
def unsubscribe(self):
super().unsubscribe()
self.callback = None
self.app = None
class NowPlayingCommand():
def __init__(self, runnable = lambda:()):
self.has_run = False
self.runnable = runnable
def run(self):
self.has_run = True
self.runnable()
class SearchRendering(Rendering):
def __init__(self, query, active_char):
super().__init__(SEARCH_RENDER)
self.query = query
self.active_char = active_char
self.loading = False
self.callback = None
self.results = None
def get_active_char(self):
return ' ' if self.active_char == 26 else chr(self.active_char + ord('a'))
def subscribe(self, app, callback):
if (callback == self.callback):
return
new_callback = self.callback is None
self.callback = callback
self.app = app
if new_callback:
self.refresh()
def refresh(self):
if not self.callback:
return
self.callback(self.query, self.get_active_char(), self.loading, self.results)
self.results = None
def unsubscribe(self):
super().unsubscribe()
self.callback = None
self.app = None
class SearchPage():
def __init__(self, previous_page):
self.header = "Search"
self.has_sub_page = True
self.previous_page = previous_page
self.live_render = SearchRendering("", 0)
self.is_title = False
def nav_prev(self):
self.live_render.query = self.live_render.query[0:-1]
self.live_render.refresh()
def nav_next(self):
if len(self.live_render.query) > 15:
return
active_char = ' ' if self.live_render.active_char == 26 \
else chr(self.live_render.active_char + ord('a'))
self.live_render.query += active_char
self.live_render.refresh()
def nav_play(self):
pass
def nav_up(self):
self.live_render.active_char += 1
if (self.live_render.active_char > 26):
self.live_render.active_char = 0
self.live_render.refresh()
def nav_down(self):
self.live_render.active_char -= 1
if (self.live_render.active_char < 0):
self.live_render.active_char = 26
self.live_render.refresh()
def run_search(self, query):
self.live_render.loading = True
self.live_render.refresh()
self.live_render.results = spotify_manager.search(query)
self.live_render.loading = False
self.live_render.refresh()
def nav_select(self):
spotify_manager.run_async(lambda: self.run_search(self.live_render.query))
return self
def nav_back(self):
return self.previous_page
def render(self):
return self.live_render
class NowPlayingPage():
def __init__(self, previous_page, header, command):
self.has_sub_page = False
self.previous_page = previous_page
self.command = command
self.header = header
self.live_render = NowPlayingRendering()
self.is_title = False
def play_previous(self):
spotify_manager.play_previous()
self.live_render.refresh()
def play_next(self):
spotify_manager.play_next()
self.live_render.refresh()
def toggle_play(self):
spotify_manager.toggle_play()
self.live_render.refresh()
def nav_prev(self):
spotify_manager.run_async(lambda: self.play_previous())
def nav_next(self):
spotify_manager.run_async(lambda: self.play_next())
def nav_play(self):
spotify_manager.run_async(lambda: self.toggle_play())
def nav_up(self):
pass
def nav_down(self):
pass
def nav_select(self):
return self
def nav_back(self):
return self.previous_page
def render(self):
if (not self.command.has_run):
self.command.run()
return self.live_render
EMPTY_LINE_ITEM = LineItem()
class MenuPage():
def __init__(self, header, previous_page, has_sub_page, is_title = False):
self.index = 0
self.page_start = 0
self.header = header
self.has_sub_page = has_sub_page
self.previous_page = previous_page
self.is_title = is_title
def total_size(self):
return 0
def page_at(self, index):
return None
def nav_prev(self):
spotify_manager.run_async(lambda: spotify_manager.play_previous())
def nav_next(self):
spotify_manager.run_async(lambda: spotify_manager.play_next())
def nav_play(self):
spotify_manager.run_async(lambda: spotify_manager.toggle_play())
def get_index_jump_up(self):
return 1
def get_index_jump_down(self):
return 1
def nav_up(self):
jump = self.get_index_jump_up()
if(self.index >= self.total_size() - jump):
return
if (self.index >= self.page_start + MENU_PAGE_SIZE - jump):
self.page_start = self.page_start + jump
self.index = self.index + jump
def nav_down(self):
jump = self.get_index_jump_down()
if(self.index <= (jump - 1)):
return
if (self.index <= self.page_start + (jump - 1)):
self.page_start = self.page_start - jump
if (self.page_start == 1):
self.page_start = 0
self.index = self.index - jump
def nav_select(self):
return self.page_at(self.index)
def nav_back(self):
return self.previous_page
def render(self):
lines = []
total_size = self.total_size()
for i in range(self.page_start, self.page_start + MENU_PAGE_SIZE):
if (i < total_size):
page = self.page_at(i)
if (page is None) :
lines.append(EMPTY_LINE_ITEM)
else:
line_type = LINE_TITLE if page.is_title else \
LINE_HIGHLIGHT if i == self.index else LINE_NORMAL
lines.append(LineItem(page.header, line_type, page.has_sub_page))
else:
lines.append(EMPTY_LINE_ITEM)
return MenuRendering(lines=lines, header=self.header, page_start=self.index, total_count=total_size)
class ShowsPage(MenuPage):
def __init__(self, previous_page):
super().__init__(self.get_title(), previous_page, has_sub_page=True)
self.shows = self.get_content()
self.num_shows = len(self.shows)
def get_title(self):
return "Podcasts"
def get_content(self):
return spotify_manager.DATASTORE.getAllSavedShows()
def total_size(self):
return self.num_shows
@lru_cache(maxsize=15)
def page_at(self, index):
return SingleShowPage(self.shows[index], self)
class PlaylistsPage(MenuPage):
def __init__(self, previous_page):
super().__init__(self.get_title(), previous_page, has_sub_page=True)
self.playlists = self.get_content()
self.num_playlists = len(self.playlists)
self.playlists.sort(key=self.get_idx)
def get_title(self):
return "Playlists"
def get_content(self):
return spotify_manager.DATASTORE.getAllSavedPlaylists()
def get_idx(self, e):
if type(e) == spotify_manager.UserPlaylist:
return e.idx
else:
return 0
def total_size(self):
return self.num_playlists
@lru_cache(maxsize=15)
def page_at(self, index):
return SinglePlaylistPage(self.playlists[index], self)
class AlbumsPage(PlaylistsPage):
def __init__(self, previous_page):
super().__init__(previous_page)
def get_title(self):
return "Albums"
def get_content(self):
return spotify_manager.DATASTORE.getAllSavedAlbums()
class SearchResultsPage(MenuPage):
def __init__(self, previous_page, results):
super().__init__("Search Results", previous_page, has_sub_page=True)
self.results = results
tracks, albums, artists = len(results.tracks), len(results.albums), len(results.artists)
# Add 1 to each count (if > 0) to make room for section header line items
self.tracks = tracks + 1 if tracks > 0 else 0
self.artists = artists + 1 if artists > 0 else 0
self.albums = albums + 1 if albums > 0 else 0
self.total_count = self.tracks + self.albums + self.artists
self.index = 1
# indices of the section header line items
self.header_indices = [0, self.tracks, self.artists + self.tracks]
def total_size(self):
return self.total_count
def page_at(self, index):
if self.tracks > 0 and index == 0:
return PlaceHolderPage("TRACKS", self, has_sub_page=False, is_title=True)
elif self.artists > 0 and index == self.header_indices[1]:
return PlaceHolderPage("ARTISTS", self, has_sub_page=False, is_title=True)
elif self.albums > 0 and index == self.header_indices[2]:
return PlaceHolderPage("ALBUMS", self, has_sub_page=False, is_title=True)
elif self.tracks > 0 and index < self.header_indices[1]:
track = self.results.tracks[index - 1]
command = NowPlayingCommand(lambda: spotify_manager.play_track(track.uri))
return NowPlayingPage(self, track.title, command)
elif self.albums > 0 and index < self.header_indices[2]:
artist = self.results.artists[index - (self.tracks + 1)]
command = NowPlayingCommand(lambda: spotify_manager.play_artist(artist.uri))
return NowPlayingPage(self, artist.name, command)
else:
album = self.results.albums[index - (self.artists + self.tracks + 1)]
tracks = self.results.album_track_map[album.uri]
return InMemoryPlaylistPage(album, tracks, self)
def get_index_jump_up(self):
if self.index + 1 in self.header_indices:
return 2
return 1
def get_index_jump_down(self):
if self.index - 1 in self.header_indices:
return 2
return 1
class NewReleasesPage(PlaylistsPage):
def __init__(self, previous_page):
super().__init__(previous_page)
def get_title(self):
return "New Releases"
def get_content(self):
return spotify_manager.DATASTORE.getAllNewReleases()
class ArtistsPage(MenuPage):
def __init__(self, previous_page):
super().__init__("Artists", previous_page, has_sub_page=True)
def total_size(self):
return spotify_manager.DATASTORE.getArtistCount()
def page_at(self, index):
# play track
artist = spotify_manager.DATASTORE.getArtist(index)
command = NowPlayingCommand(lambda: spotify_manager.play_artist(artist.uri))
return NowPlayingPage(self, artist.name, command)
class SingleArtistPage(MenuPage):
def __init__(self, artistName, previous_page):
super().__init__(artistName, previous_page, has_sub_page=True)
class SinglePlaylistPage(MenuPage):
def __init__(self, playlist, previous_page):
# Credit for code to remove emoticons from string: https://stackoverflow.com/a/49986645
regex_pattern = re.compile(pattern = "["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags = re.UNICODE)
super().__init__(regex_pattern.sub(r'',playlist.name), previous_page, has_sub_page=True)
self.playlist = playlist
self.tracks = None
def get_tracks(self):
if self.tracks is None:
self.tracks = spotify_manager.DATASTORE.getPlaylistTracks(self.playlist.uri)
return self.tracks
def total_size(self):
return self.playlist.track_count
def page_at(self, index):
track = self.get_tracks()[index]
command = NowPlayingCommand(lambda: spotify_manager.play_from_playlist(self.playlist.uri, track.uri, None))
return NowPlayingPage(self, track.title, command)
class SingleShowPage(MenuPage):
def __init__(self, show, previous_page):
super().__init__(show.name, previous_page, has_sub_page=True)
self.show = show
self.episodes = None
def get_episodes(self):
if self.episodes is None:
self.episodes = spotify_manager.DATASTORE.getShowEpisodes(self.show.uri)
return self.episodes
def total_size(self):
return self.show.episode_count
def page_at(self, index):
episode = self.get_episodes()[index]
command = NowPlayingCommand(lambda: spotify_manager.play_from_show(self.show.uri, episode.uri, None))
return NowPlayingPage(self, episode.name, command)
class InMemoryPlaylistPage(SinglePlaylistPage):
def __init__(self, playlist, tracks, previous_page):
super().__init__(playlist, previous_page)
self.tracks = tracks
class SingleTrackPage(MenuPage):
def __init__(self, track, previous_page, playlist = None, album = None):
super().__init__(track.title, previous_page, has_sub_page=False)
self.track = track
self.playlist = playlist
self.album = album
def render(self):
r = super().render()
print("render track")
context_uri = self.playlist.uri if self.playlist else self.album.uri
spotify_manager.play_from_playlist(context_uri, self.track.uri, None)
return r
class SingleEpisodePage(MenuPage):
def __init__(self, episode, previous_page, show = None):
super().__init__(episode.name, previous_page, has_sub_page=False)
self.episode = episode
self.show = show
def render(self):
r = super().render()
print("render episode")
context_uri = self.show.uri
spotify_manager.play_from_show(context_uri, self.episode.uri, None)
return r
class SavedTracksPage(MenuPage):
def __init__(self, previous_page):
super().__init__("Saved Tracks", previous_page, has_sub_page=True)
def total_size(self):
return spotify_manager.DATASTORE.getSavedTrackCount()
def page_at(self, index):
# play track
return SingleTrackPage(spotify_manager.DATASTORE.getSavedTrack(index), self)
class PlaceHolderPage(MenuPage):
def __init__(self, header, previous_page, has_sub_page=True, is_title = False):
super().__init__(header, previous_page, has_sub_page, is_title)
class RootPage(MenuPage):
def __init__(self, previous_page):
super().__init__("sPot", previous_page, has_sub_page=True)
self.pages = [
ArtistsPage(self),
AlbumsPage(self),
NewReleasesPage(self),
PlaylistsPage(self),
ShowsPage(self),
SearchPage(self),
NowPlayingPage(self, "Now Playing", NowPlayingCommand())
]
self.index = 0
self.page_start = 0
def get_pages(self):
if (not spotify_manager.DATASTORE.now_playing):
return self.pages[0:-1]
return self.pages
def total_size(self):
return len(self.get_pages())
def page_at(self, index):
return self.get_pages()[index]
| true | true |
f73a56bb8c3fa24fbce3f3d3df74194631bb27bd | 1,025 | py | Python | manage.py | kevotovar/kuras-backend | 22746977cb54018a7cf3a35a6bbe0fe04d21c2aa | [
"MIT"
] | null | null | null | manage.py | kevotovar/kuras-backend | 22746977cb54018a7cf3a35a6bbe0fe04d21c2aa | [
"MIT"
] | null | null | null | manage.py | kevotovar/kuras-backend | 22746977cb54018a7cf3a35a6bbe0fe04d21c2aa | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# kuras directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, "kuras"))
execute_from_command_line(sys.argv)
| 33.064516 | 77 | 0.654634 |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, "kuras"))
execute_from_command_line(sys.argv)
| true | true |
f73a57a9315e6ad405d7faab8e9ae449849bbaa4 | 2,783 | py | Python | core/entry/entry.py | MaiAbboud/SwinTrack | 10b5636674f470a0df7d8c58df8b7e54f57ee324 | [
"MIT"
] | 1 | 2022-02-16T11:29:26.000Z | 2022-02-16T11:29:26.000Z | core/entry/entry.py | MaiAbboud/SwinTrack | 10b5636674f470a0df7d8c58df8b7e54f57ee324 | [
"MIT"
] | null | null | null | core/entry/entry.py | MaiAbboud/SwinTrack | 10b5636674f470a0df7d8c58df8b7e54f57ee324 | [
"MIT"
] | null | null | null | from miscellanies.torch.distributed import is_main_process
from contextlib import nullcontext
import torch.distributed
from miscellanies.torch.distributed import is_dist_available_and_initialized, get_world_size
import socket
import pprint
import os
from miscellanies.yaml_ops import load_yaml
from .sweep_utils import prepare_sweep
from .mixin_utils import load_static_mixin_config_and_apply_rules
from .build_and_run import build_and_run
def update_output_dir(args):
# redirect output path with run_id
if args.output_dir is not None:
args.output_dir = os.path.join(args.output_dir, args.run_id)
os.makedirs(args.output_dir, exist_ok=True)
def entry(runtime_vars):
config_path = os.path.join(runtime_vars.config_path, runtime_vars.method_name, runtime_vars.config_name, 'config.yaml')
config = load_yaml(config_path)
if runtime_vars.mixin_config is not None:
load_static_mixin_config_and_apply_rules(runtime_vars, config)
my_hostname = socket.gethostname()
my_ip = socket.gethostbyname(my_hostname)
print(f'Hostname: {my_hostname}')
print(f'IP: {my_ip}')
if is_dist_available_and_initialized():
host_names = [None] * get_world_size()
torch.distributed.all_gather_object(host_names, [my_ip, my_hostname])
host_names = {ip: hostname for ip, hostname in host_names}
print('Distributed Group:')
pprint.pprint(host_names)
else:
host_names = {my_ip: my_hostname}
if not runtime_vars.do_sweep:
update_output_dir(runtime_vars)
wandb_instance = None
if runtime_vars.wandb_distributed_aware or not is_dist_available_and_initialized():
# if runtime_vars.wandb_distributed_aware:
from .setup_wandb import setup_wandb
# wandb_instance = setup_wandb(runtime_vars, config, str(host_names))
wandb_instance = None
if runtime_vars.do_sweep:
runtime_vars.run_id = wandb_instance.id
update_output_dir(runtime_vars)
else:
if is_main_process():
from .setup_wandb import setup_wandb
# wandb_instance = setup_wandb(runtime_vars, config, str(host_names))
wandb_instance = None
if runtime_vars.do_sweep:
if is_main_process():
run_id = [wandb_instance.id]
else:
run_id = [None]
torch.distributed.broadcast_object_list(run_id)
runtime_vars.run_id = run_id[0]
update_output_dir(runtime_vars)
wandb_context = wandb_instance if wandb_instance is not None else nullcontext()
with wandb_context:
if runtime_vars.do_sweep:
prepare_sweep(runtime_vars, wandb_instance, config)
build_and_run(runtime_vars, config, wandb_instance)
| 38.652778 | 123 | 0.720086 | from miscellanies.torch.distributed import is_main_process
from contextlib import nullcontext
import torch.distributed
from miscellanies.torch.distributed import is_dist_available_and_initialized, get_world_size
import socket
import pprint
import os
from miscellanies.yaml_ops import load_yaml
from .sweep_utils import prepare_sweep
from .mixin_utils import load_static_mixin_config_and_apply_rules
from .build_and_run import build_and_run
def update_output_dir(args):
if args.output_dir is not None:
args.output_dir = os.path.join(args.output_dir, args.run_id)
os.makedirs(args.output_dir, exist_ok=True)
def entry(runtime_vars):
config_path = os.path.join(runtime_vars.config_path, runtime_vars.method_name, runtime_vars.config_name, 'config.yaml')
config = load_yaml(config_path)
if runtime_vars.mixin_config is not None:
load_static_mixin_config_and_apply_rules(runtime_vars, config)
my_hostname = socket.gethostname()
my_ip = socket.gethostbyname(my_hostname)
print(f'Hostname: {my_hostname}')
print(f'IP: {my_ip}')
if is_dist_available_and_initialized():
host_names = [None] * get_world_size()
torch.distributed.all_gather_object(host_names, [my_ip, my_hostname])
host_names = {ip: hostname for ip, hostname in host_names}
print('Distributed Group:')
pprint.pprint(host_names)
else:
host_names = {my_ip: my_hostname}
if not runtime_vars.do_sweep:
update_output_dir(runtime_vars)
wandb_instance = None
if runtime_vars.wandb_distributed_aware or not is_dist_available_and_initialized():
from .setup_wandb import setup_wandb
wandb_instance = None
if runtime_vars.do_sweep:
runtime_vars.run_id = wandb_instance.id
update_output_dir(runtime_vars)
else:
if is_main_process():
from .setup_wandb import setup_wandb
wandb_instance = None
if runtime_vars.do_sweep:
if is_main_process():
run_id = [wandb_instance.id]
else:
run_id = [None]
torch.distributed.broadcast_object_list(run_id)
runtime_vars.run_id = run_id[0]
update_output_dir(runtime_vars)
wandb_context = wandb_instance if wandb_instance is not None else nullcontext()
with wandb_context:
if runtime_vars.do_sweep:
prepare_sweep(runtime_vars, wandb_instance, config)
build_and_run(runtime_vars, config, wandb_instance)
| true | true |
f73a587b2f08b3c6f9d26edc537bbeda0b6f2156 | 4,034 | py | Python | utils.py | JosephRynkiewicz/CIFAR100 | 26e44e15346e31cae0522eb02099dd15e47f3a0f | [
"MIT"
] | 2 | 2021-05-20T10:26:45.000Z | 2021-11-02T13:59:14.000Z | utils.py | JosephRynkiewicz/CIFAR10 | 2eeef95480fdc8454296cbe2f90011aef660c6a8 | [
"MIT"
] | null | null | null | utils.py | JosephRynkiewicz/CIFAR10 | 2eeef95480fdc8454296cbe2f90011aef660c6a8 | [
"MIT"
] | 1 | 2020-10-12T14:39:15.000Z | 2020-10-12T14:39:15.000Z | '''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.nn.init as init
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
#def init_params(net):
# '''Init layer parameters.'''
# for m in net.modules():
# if isinstance(m, nn.Conv2d):
# init.kaiming_normal(m.weight, mode='fan_out')
# if m.bias:
# init.constant(m.bias, 0)
# elif isinstance(m, nn.BatchNorm2d):
# init.constant(m.weight, 1)
# init.constant(m.bias, 0)
# elif isinstance(m, nn.Linear):
# init.normal(m.weight, std=1e-3)
# if m.bias:
# init.constant(m.bias, 0)
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
def get_lr(step, base_lr=0.003):
"""Returns learning-rate for `step` or None at the end."""
supports = [500, 3000, 6000, 9000, 10_000]
# Linear warmup
if step < supports[0]:
return base_lr * step / supports[0]
# End of training
elif step >= supports[-1]:
return None
# Staircase decays by factor of 10
else:
for s in supports[1:]:
if s < step:
base_lr /= 10
return base_lr
def recycle(iterable):
"""Variant of itertools.cycle that does not save iterates."""
while True:
for i in iterable:
yield i
| 27.073826 | 97 | 0.584531 | import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.nn.init as init
def get_mean_and_std(dataset):
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time()
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
def get_lr(step, base_lr=0.003):
supports = [500, 3000, 6000, 9000, 10_000]
if step < supports[0]:
return base_lr * step / supports[0]
elif step >= supports[-1]:
return None
else:
for s in supports[1:]:
if s < step:
base_lr /= 10
return base_lr
def recycle(iterable):
while True:
for i in iterable:
yield i
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.