input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
anymore. The state of the instance could be persisted on the host
and allocate storage space this way. A "softer" way of `suspend`
is :func:`pause`. The counter action for `suspend` is :func:`resume`.
:param nova.context.RequestContext context:
The context for the suspend.
:param nova.objects.instance.Instance instance:
The instance to suspend.
:return: None
"""
raise NotImplementedError()
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified suspended instance.
The suspended instance gets resumed and will use CPU cycles and memory
of the host again. The counter action for 'resume' is :func:`suspend`.
Depending on the underlying hypervisor technology, the guest has the
same state as before the 'suspend'.
:param nova.context.RequestContext context:
The context for the resume.
:param nova.objects.instance.Instance instance:
The suspended instance to resume.
:param nova.network.model.NetworkInfo network_info:
Necessary network information for the resume.
:param dict block_device_info:
Instance volume block device info.
:return: None
"""
raise NotImplementedError()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance.
:param nova.context.RequestContext context:
The context for the rescue.
:param nova.objects.instance.Instance instance:
The instance being rescued.
:param nova.network.model.NetworkInfo network_info:
Necessary network information for the resume.
:param nova.objects.ImageMeta image_meta:
The metadata of the image of the instance.
:param rescue_password: <PASSWORD> for rescue.
"""
raise NotImplementedError()
def set_bootable(self, instance, is_bootable):
"""Set the ability to power on/off an instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def unrescue(self, instance, network_info):
"""Unrescue the specified instance.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance.
:param instance: nova.objects.instance.Instance
:param timeout: time to wait for GuestOS to shutdown
:param retry_interval: How often to signal guest while
waiting for it to shutdown
"""
raise NotImplementedError()
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def trigger_crash_dump(self, instance):
"""Trigger crash dump mechanism on the given instance.
Stalling instances can be triggered to dump the crash data. How the
guest OS reacts in details, depends on the configuration of it.
:param nova.objects.instance.Instance instance:
The instance where the crash dump should be triggered.
:return: None
"""
raise NotImplementedError()
def soft_delete(self, instance):
"""Soft delete the specified instance.
A soft-deleted instance doesn't allocate any resources anymore, but is
still available as a database entry. The counter action :func:`restore`
uses the database entry to create a new instance based on that.
:param nova.objects.instance.Instance instance:
The instance to soft-delete.
:return: None
"""
raise NotImplementedError()
def restore(self, instance):
"""Restore the specified soft-deleted instance.
The restored instance will be automatically booted. The counter action
for `restore` is :func:`soft_delete`.
:param nova.objects.instance.Instance instance:
The soft-deleted instance which should be restored from the
soft-deleted data.
:return: None
"""
raise NotImplementedError()
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename:
node which the caller want to get resources from
a driver that manages only one node can safely ignore this
:returns: Dictionary describing resources
"""
raise NotImplementedError()
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
"""Prepare an instance for live migration
:param context: security context
:param instance: nova.objects.instance.Instance object
:param block_device_info: instance block device information
:param network_info: instance network information
:param disk_info: instance disk information
:param migrate_data: a LiveMigrateData object
"""
raise NotImplementedError()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Live migration of an instance to another host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, migrate VM disk.
:param migrate_data: a LiveMigrateData object
"""
raise NotImplementedError()
def live_migration_force_complete(self, instance):
"""Force live migration to complete
:param instance: Instance being live migrated
"""
raise NotImplementedError()
def live_migration_abort(self, instance):
"""Abort an in-progress live migration.
:param instance: instance that is live migrating
"""
raise NotImplementedError()
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
"""Clean up destination node after a failed live migration.
:param context: security context
:param instance: instance object that was being migrated
:param network_info: instance network information
:param block_device_info: instance block device information
:param destroy_disks:
if true, destroy disks at destination during cleanup
:param migrate_data: a LiveMigrateData object
"""
raise NotImplementedError()
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
"""Post operation of live migration at source host.
:param context: security context
:instance: instance object that was migrated
:block_device_info: instance block device information
:param migrate_data: a LiveMigrateData object
"""
pass
def post_live_migration_at_source(self, context, instance, network_info):
"""Unplug VIFs from networks at source.
:param context: security context
:param instance: instance object reference
:param network_info: instance network information
"""
raise NotImplementedError(_("Hypervisor driver does not support "
"post_live_migration_at_source method"))
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance: instance object that is migrated
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
raise NotImplementedError()
def check_instance_shared_storage_local(self, context, instance):
"""Check if instance files located on shared storage.
This runs check on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.objects.instance.Instance object
"""
raise NotImplementedError()
def check_instance_shared_storage_remote(self, context, data):
"""Check if instance files located on shared storage.
:param context: security context
:param data: result of check_instance_shared_storage_local
"""
raise NotImplementedError()
def check_instance_shared_storage_cleanup(self, context, data):
"""Do cleanup on host after check_instance_shared_storage calls
:param context: security context
:param data: result of check_instance_shared_storage_local
"""
pass
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param src_compute_info: Info about the sending machine
:param dst_compute_info: Info about the receiving machine
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a LiveMigrateData object (hypervisor-dependent)
"""
raise NotImplementedError()
def cleanup_live_migration_destination_check(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
:param dest_check_data: result of check_can_live_migrate_destination
"""
raise NotImplementedError()
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:param block_device_info: result of _get_instance_block_device_info
:returns: a LiveMigrateData object
"""
raise NotImplementedError()
def get_instance_disk_info(self, instance,
block_device_info=None):
"""Retrieve information about actual disk sizes of an instance.
:param instance: nova.objects.Instance
:param block_device_info:
Optional; Can be used to filter out devices which are
actually volumes.
:return:
json strings with below format::
"[{'path':'disk',
'type':'raw',
'virt_disk_size':'10737418240',
'backing_file':'backing_file',
'disk_size':'83886080'
'over_committed_disk_size':'10737418240'},
...]"
"""
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
"""This method is called after a change to security groups.
All security groups and their associated rules live in the datastore,
and calling this method should apply the updated rules to instances
running the specified security group.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_instance_security_rules(self, instance):
"""Refresh security group rules
Gets called when an instance gets added to or removed from
the security group the instance is a member of or if the
group gains or loses a rule.
"""
raise NotImplementedError()
def reset_network(self, instance):
"""reset networking for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def ensure_filtering_rules_for_instance(self, instance, network_info):
"""Setting up filtering rules and waiting for its completion.
To migrate an instance, filtering rules to hypervisors
and firewalls are inevitable on destination host.
( Waiting only for filtering rules | |
import io
from pathlib import Path
from textwrap import dedent
import numpy as np
import openpyxl
import pandas as pd
import pytest
from pytest import fixture, raises
try:
from openpyxl.worksheet.worksheet import Worksheet
except ImportError:
# openpyxl < 2.6
from openpyxl.worksheet import Worksheet
from openpyxl import load_workbook
from pyscheme import make_root_environment, Environment
from startables import ColumnMetadata
from startables.startables import Table, read_csv, read_excel, Bundle, nan
from startables.units import Unit, CustomUnitPolicy, ScaleUnitConversion
@fixture(scope='module')
def input_dir() -> Path:
return Path(__file__).parent / 'input'
class TestTable:
@fixture
def some_df(self):
return pd.DataFrame(data=[[nan, 'gnu', 3], [4, 'gnat', '{{(+ x y)}}']], columns=['a', 'b', 'c'])
@fixture
def col_specs(self):
return {n: ColumnMetadata(Unit(u)) for n, u in zip(['a', 'b', 'c'], ['-', 'text', 'm'])}
@fixture
def col_specs_with_format(self):
return {'a': ColumnMetadata(Unit('-'), format_str='${:,.2f}'),
'b': ColumnMetadata(Unit('text')),
'c': ColumnMetadata(Unit('m'), format_str='.4e')}
@fixture
def some_df_with_digits(self):
return pd.DataFrame(data=[[nan, 'gnu', 3.23412121],
[4.12, 'gnat', 0.023],
[0.4, 'galah', 42.01],
[0.04334, 'gentoo', 43232],
[4000.04334, 'gerbil', 43232.0987]],
columns=['a', 'b', 'c'])
@fixture
def some_table(self, some_df, col_specs):
return Table(df=some_df, name='some_table', col_specs=col_specs, destinations=['success', 'glory'])
@fixture
def some_table_with_digits(self, some_df_with_digits, col_specs_with_format):
return Table(df=some_df_with_digits, name='some_table_with_digits', col_specs=col_specs_with_format,
destinations=['success', 'glory'])
def test_init_with_col_specs(self, some_df, col_specs):
t = Table(df=some_df, name='adequate_table', col_specs=col_specs)
assert t.name == 'adequate_table'
assert t.destinations == ['all']
assert t.df.iloc[1, 0] == 4
assert t.col_names == ['a', 'b', 'c']
assert t.col_units == ['-', 'text', 'm']
assert t.col_specs == col_specs
assert len(t) == 2
def test_init_with_no_col_specs_at_all(self, some_df):
t = Table(df=some_df, name='adequate_table')
cs = t.col_specs
assert list(cs.keys()) == ['a', 'b', 'c']
assert [cs[col_name].unit for col_name in cs] == ['text', 'text', 'text']
def test_init_errors(self, some_df, col_specs):
with pytest.raises(ValueError):
too_short_col_specs = {n: col_specs[n] for n in col_specs if n != 'b'}
Table(df=some_df, name='adequate_table', col_specs=too_short_col_specs)
with pytest.raises(ValueError):
destination_with_illegal_space = ['ok', 'definitely not ok!', 'ok_again']
Table(df=some_df, name='adequate_table', destinations=destination_with_illegal_space)
with raises(ValueError):
destinations_with_illegal_duplicates = ['ok', 'you_again', 'you_again']
Table(df=some_df, name='adequate_table', destinations=destinations_with_illegal_duplicates)
def test_df_setter(self, some_table: Table):
df_with_subset_of_columns = pd.DataFrame(data=[[nan, 3], [4, 666]], columns=['a', 'c'])
some_table.df = df_with_subset_of_columns
assert some_table.col_names == ['a', 'c']
assert some_table.col_units == ['-', 'm']
df_with_new_unknown_column = pd.DataFrame(data=[[nan, 'gnu', 3], [4, 'gnat', '{{(+ x y)}}']],
columns=['some_unknown_column', 'b', 'c'])
with pytest.raises(ValueError):
some_table.df = df_with_new_unknown_column
def test_copy(self, some_table: Table):
t1 = some_table
t2 = some_table.copy()
assert t1.df is not t2.df # checking it's actually a new copy
assert t2.name == t1.name
assert t2.destinations == t1.destinations
assert t2.col_names == t1.col_names
assert t2.col_units == t1.col_units
pd.testing.assert_frame_equal(t1.df, t2.df)
def test_to_csv(self, some_table: Table):
out = io.StringIO()
some_table.to_csv(out)
assert out.getvalue() == dedent("""\
**some_table;;
success glory
a;b;c
-;text;m
-;gnu;3
4.0;gnat;{{(+ x y)}}
""")
def test_to_csv_with_format(self, some_table_with_digits: Table):
out = io.StringIO()
some_table_with_digits.to_csv(out)
print(out.getvalue())
assert out.getvalue() == dedent("""\
**some_table_with_digits;;
success glory
a;b;c
-;text;m
-;gnu;3.2341e+00
$4.12;gnat;2.3000e-02
$0.40;galah;4.2010e+01
$0.04;gentoo;4.3232e+04
$4,000.04;gerbil;4.3232e+04
""")
def test_to_csv_nonstring_colnames_and_destinations(self):
# PS-53 Bundle.to_csv() fails when column names are not strings
nonstring_colnames = [1.234, 666.0, 42.0]
nonstring_destinations = [1984, 2001.2001]
df = pd.DataFrame(data=[[nan, 'gnu', 3], [4, 'gnat', '{{(+ x y)}}']], columns=nonstring_colnames)
col_specs = {n: ColumnMetadata(Unit(u)) for n, u in zip(nonstring_colnames, ['-', 'text', 'm'])}
t = Table(df=df, name='some_table', col_specs=col_specs,
destinations=nonstring_destinations)
out = io.StringIO()
t.to_csv(out)
assert out.getvalue() == dedent("""\
**some_table;;
1984 2001.2001
1.234;666.0;42.0
-;text;m
-;gnu;3
4.0;gnat;{{(+ x y)}}
""")
def test_to_excel(self, some_table: Table):
wb = openpyxl.Workbook()
ws: Worksheet = wb.active
some_table.to_excel(ws)
assert ws.cell(row=1, column=1).value == f'**{some_table.name}'
assert ws.cell(2, 1).value == f'{" ".join(some_table.destinations)}'
assert ws.cell(3, 2).value == 'b'
assert ws.cell(4, 3).value == 'm'
assert ws.cell(5, 1).value == '-'
assert ws.cell(6, 3).value == '{{(+ x y)}}'
def test_to_excel_with_digits(self, some_table_with_digits: Table):
wb = openpyxl.Workbook()
ws: Worksheet = wb.active
some_table_with_digits.to_excel(ws)
assert ws.cell(row=1, column=1).value == f'**{some_table_with_digits.name}'
assert ws.cell(2, 1).value == f'{" ".join(some_table_with_digits.destinations)}'
assert ws.cell(3, 2).value == 'b'
assert ws.cell(4, 3).value == 'm'
assert ws.cell(5, 1).value == '-'
assert ws.cell(5, 3).value == '3.2341e+00'
assert ws.cell(6, 1).value == '$4.12'
assert ws.cell(6, 3).value == '2.3000e-02'
assert ws.cell(7, 1).value == '$0.40'
assert ws.cell(7, 3).value == '4.2010e+01'
assert ws.cell(8, 1).value == '$0.04'
assert ws.cell(8, 3).value == '4.3232e+04'
assert ws.cell(9, 1).value == '$4,000.04'
assert ws.cell(9, 3).value == '4.3232e+04'
def test_evaluate_expressions(self, some_table: Table):
env: Environment = make_root_environment().define('x', 42).define('y', 7)
assert some_table.evaluate_expressions(env, inplace=False).df.iloc[1, 2] == 49
context_dict = {'x': 7, 'y': 9}
assert some_table.evaluate_expressions(context_dict, inplace=False).df.iloc[1, 2] == 16
env.update([('y', 10)])
some_table.evaluate_expressions(env, inplace=True)
assert some_table.df.iloc[1, 2] == 52
def test_evaluate_expression_syntax_error(self):
df = pd.DataFrame(data=[[nan, 'gnu', 3], [4, 'gnat', '{{((((*+-/ x y heres_a_syntax_error!!!!!!!!!!!!!!!!}}']],
columns=['a', 'b', 'c'])
col_specs = {n: ColumnMetadata(Unit(u)) for n, u in zip(['a', 'b', 'c'], ['-', 'text', 'm'])}
t = Table(df=df, name='some_table', col_specs=col_specs, destinations=['success', 'glory'])
with raises(SyntaxError, match=r"Syntax error in expression in table 'some_table', column 2, row 1"):
t.evaluate_expressions({'x': 7, 'y': 9}, inplace=False)
def test_convert_to_ref_units(self):
df = pd.DataFrame([
[11, 12, 13],
[21, 22, 23]], columns=['a', 'b', 'c'])
cs = {n: ColumnMetadata(Unit(u)) for n, u in zip(['a', 'b', 'c'], ['m', 'mm', 'km'])}
t = Table(df, name='Fool', col_specs=cs)
cup = CustomUnitPolicy([
ScaleUnitConversion(Unit('mm'), Unit('m'), 0.001),
ScaleUnitConversion(Unit('km'), Unit('m'), 1000)])
t_ref = t.convert_to_ref_units(cup, inplace=False)
assert t_ref.col_units == [Unit('m')] * 3
assert (np.array(t_ref.df) == np.array([[11, 0.012, 13000],
[21, 0.022, 23000]])).all()
def test_convert_to_ref_units_unknown_unit(self):
df = pd.DataFrame([
[11, 12, 13],
[21, 22, 23]], columns=['a', 'b', 'c'])
cs = {n: ColumnMetadata(Unit(u)) for n, u in zip(['a', 'b', 'c'], ['m', 'mm', 'km'])}
t = Table(df, name='Fool', col_specs=cs)
cup_no_km = CustomUnitPolicy([ScaleUnitConversion(Unit('mm'), Unit('m'), 0.001)])
t_ref_no_km = t.convert_to_ref_units(cup_no_km, inplace=False, units_not_in_policy='ignore')
assert t_ref_no_km.col_units == [Unit('m'), Unit('m'), Unit('km')]
assert (np.array(t_ref_no_km.df) == np.array([[11, 0.012, 13],
[21, 0.022, 23]])).all()
with raises(ValueError):
t.convert_to_ref_units(cup_no_km, inplace=False, units_not_in_policy='raise')
def test_convert_to_home_units(self):
df = pd.DataFrame([
[11, 12, 13],
[21, 22, 23]], columns=['a', 'b', 'c'])
cs = {n: ColumnMetadata(Unit(u), Unit(hu)) for n, u, hu in zip(
['a', 'b', 'c'], ['m', 'm', 'm'], ['m', 'mm', 'km'])}
t = Table(df, name='Fool', col_specs=cs)
cup = CustomUnitPolicy([
ScaleUnitConversion(Unit('mm'), Unit('m'), 0.001),
ScaleUnitConversion(Unit('km'), Unit('m'), 1000)])
t_home = t.convert_to_home_units(cup)
assert t_home.col_units == [Unit('m'), Unit('mm'), Unit('km')]
assert (np.array(t_home.df) == np.array([[11, 12000, 0.013],
[21, 22000, 0.023]])).all()
# TODO tests for convert_units, and convert_to_home_units error handling of messed up unit policies
class TestBundle:
@fixture
def csv_path(self, input_dir: Path) -> Path:
return input_dir / 'example.csv'
@fixture
def xlsx_path(self, input_dir: Path) -> Path:
return input_dir / 'example.xlsx'
@fixture
def csv_path_illegal_empty_cell(self, input_dir: Path) -> Path:
return input_dir / 'example_illegal_empty_cell.csv'
@fixture
def csv_path_illegal_str_in_num_col(self, input_dir: Path) -> Path:
return input_dir / 'example_illegal_str_in_num_col.csv'
@fixture
def some_bundle(self, csv_path: Path):
with open(str(csv_path)) as f:
return read_csv(f)
@fixture
def csv_path_with_header(self, input_dir: Path) -> Path:
return input_dir / 'example_header.csv'
def test__filter_tables_noargs(self, some_bundle: Bundle):
tables = some_bundle._filter_tables()
assert len(tables) == 4
@pytest.mark.parametrize("name,ignore_case,expected_len", [
('farm', True, 0),
('taxidermy', True, 1),
('TAXIdermy', True, 1),
('TAXIdermy', False, 0),
])
def test__filter_tables_by_name(self, some_bundle: Bundle, name, ignore_case, expected_len):
assert len(some_bundle._filter_tables(name=name, ignore_case=ignore_case)) == expected_len
@pytest.mark.parametrize("name_pattern,ignore_case,expected_len", [
('farm', True, 1),
('fARM', True, 1),
('TAXIdermy', False, 0),
])
def test__filter_tables_by_name_pattern(self, some_bundle: Bundle, name_pattern, ignore_case, expected_len):
assert some_bundle._filter_tables(name_pattern='farm')[0].name == 'farm_animals'
assert len(some_bundle._filter_tables(name_pattern=name_pattern, ignore_case=ignore_case)) == expected_len
@pytest.mark.parametrize("destination,ignore_case,expected_len", [
('your_farm', True, 1),
('YOUR_farm', True, 1),
('YOUR_farm', False, 0),
])
def test__filter_tables_by_destination(self, some_bundle: Bundle, destination, ignore_case, expected_len):
assert some_bundle._filter_tables(destination='your_farm')[0].name == 'farm_animals'
assert len(some_bundle._filter_tables(destination=destination, ignore_case=ignore_case)) == expected_len
@pytest.mark.parametrize("destination_pattern,ignore_case,expected_len", [
('_farm', True, 1),
('_FaRm', True, 1),
('_FaRm', False, 0),
])
def test__filter_tables_by_destination_pattern(self, some_bundle: Bundle, destination_pattern, ignore_case,
expected_len):
assert some_bundle._filter_tables(destination_pattern='_farm')[0].name == 'farm_animals'
assert len(some_bundle._filter_tables(destination_pattern=destination_pattern,
ignore_case=ignore_case)) == expected_len
def test_filter(self, some_bundle: Bundle):
assert len(some_bundle.filter().tables) == 4
assert some_bundle.filter(destination='your_farm').tables[0].name == 'farm_animals'
def test_pop_tables_noargs(self, csv_path: Path):
with open(str(csv_path)) as f:
b = read_csv(f)
p = b.pop_tables()
assert len(p) == 4
assert len(b.tables) == 0
def test_pop_tables_by_name(self, csv_path: Path):
with open(str(csv_path)) as f:
b = read_csv(f)
taxidermy_tables = b.pop_tables(name='taxidermy')
assert len(taxidermy_tables) == 1
assert len(b.tables) == 3
def test_copy(self, some_bundle: Bundle):
b1 = some_bundle
b2 = b1.copy()
assert len(b1.tables) == len(b2.tables)
assert b1.tables[1].name == b2.tables[1].name
def test_read_csv(self, csv_path: Path):
with open(str(csv_path)) as f:
b = read_csv(f)
assert len(b.tables) == 4
t = b.tables[0]
assert t.name == 'farm_animals'
assert t.col_names == ['species', 'n_legs', 'avg_weight']
assert t.col_units == ['text', '-', 'kg']
assert t.destinations == ['your_farm', 'my_farm', 'farms_galore']
df = t.df
assert df.iloc[4, 2] == 9
assert df.iloc[1, 2] == '{{(* age 30)}}'
assert np.isnan(df.iloc[2, 2])
assert np.isnan(df.iloc[3, 1]) # PS-15 Accept 'NaN' as NaN marker
assert df.iloc[5, 0] == '1234' # PS-28 Numerical data in text columns gets read
assert df.shape == (6, 3)
t2 = b.tables[2]
assert t2.name == 'taxidermy'
assert t2.df.iloc[3, 3] == pd.Timestamp('2012-05-01 12:34')
assert pd.isna(t2.df.iloc[1, 3])
assert t.evaluate_expressions({'age': 3}).df.iloc[1, 2] == 90
assert len(b.tables[3]) == 0 # PS-5 Empty StarTables are unjustly ignored / omitted
def test_read_csv_no_extra_delimiters_on_tables(self, input_dir):
# PS-19 Reading from CSV can fail | |
dict((k, v.open()) for (k, v) in inputs.items())
variable, operator = self._get_var_and_op()
ds = objects.integrate.integrate(
objects=da_objects, variable=variable, operator=operator, **kwargs
)
ds.to_netcdf(self.output().fn)
class ComputePerObjectAtHeight(luigi.Task):
"""
For each object defined by `mask_method`, `mask_method_extra_args` and
`object_splitting_scalar` compute the operation of `op` applied
on `field_name` at height `z`
"""
base_name = luigi.Parameter()
mask_method = luigi.Parameter()
mask_method_extra_args = luigi.Parameter(default="")
object_splitting_scalar = luigi.Parameter()
field_name = luigi.OptionalParameter(default=None)
op = luigi.Parameter()
z = luigi.FloatParameter()
def requires(self):
tasks = dict(
objects=IdentifyObjects(
base_name=self.base_name,
mask_method=self.mask_method,
mask_method_extra_args=self.mask_method_extra_args,
splitting_scalar=self.object_splitting_scalar,
),
)
if self.op != "num_cells":
tasks["field"] = ExtractField3D(
base_name=self.base_name,
field_name=self.field_name,
)
return tasks
def run(self):
# for `num_cells` operation the field shouldn't be given because the
# number of cells is just computed from the mask
inputs = self.input()
da_objects = inputs["objects"].open()
if self.op != "num_cells":
da_field = inputs["field"].open().squeeze()
else:
if self.field_name != None:
raise Exception(
f"Field name should not be given when computing `{self.op}`"
f" (`{self.field_name}` was provided)"
)
da_field = None
object_ids = np.unique(da_objects.chunk(None).values)
if object_ids[0] == 0:
object_ids = object_ids[1:]
kwargs = dict(
objects=da_objects.name,
object_ids=object_ids,
op=self.op,
)
if self.op != "num_cells":
kwargs["scalar"] = da_field.name
da_objects_ = da_objects.sel(zt=self.z).compute()
if self.op != "num_cells":
da_ = da_field.sel(zt=self.z).compute()
else:
da_ = xr.ones_like(da_objects_)
# to avoid the confusion where the "area" is requested but what in fact
# is returned is the "number of cells" (which is dimensionless) we
# enforce here that the "area" cannot be calculated, but instead
# "num_cells" can be requested and we use the `area` dask-image op
# (which returns the number of cells)
op = kwargs["op"]
if op == "area":
raise Exception(
"Shouldn't ask for `area` as it asctually the number of cells"
)
elif op == "num_cells":
op = "area"
fn = getattr(dask_image.ndmeasure, op)
v = fn(da_, label_image=da_objects_, index=object_ids).compute()
da = xr.DataArray(data=v, dims=["object_id"], coords=dict(object_id=object_ids))
if self.op != "num_cells":
da.name = "{}__{}".format(da_.name, kwargs["op"])
da.attrs["units"] = da_.units
da.attrs["long_name"] = "{} of {} per object".format(
kwargs["op"],
da_.long_name,
)
else:
da.name = "num_cells"
da.attrs["units"] = "1"
da.attrs["long_name"] = "num_cells per object"
da.coords["zt"] = self.z
da.coords["time"] = da_objects_.time
da.to_netcdf(self.output().fn)
def output(self):
mask_name = MakeMask.make_mask_name(
base_name=self.base_name,
method_name=self.mask_method,
method_extra_args=self.mask_method_extra_args,
)
fn = (
f"{self.base_name}.{mask_name}.{self.field_name}__{self.op}_at_z{self.z}.nc"
)
p = get_workdir() / self.base_name / fn
target = XArrayTarget(str(p))
return target
class ComputePerObjectProfiles(luigi.Task):
"""
For each object defined by `mask_method`, `mask_method_extra_args` and
`object_splitting_scalar` compute a profile of the operation `op` applied
on `field_name` as a function of height
"""
base_name = luigi.Parameter()
mask_method = luigi.Parameter()
mask_method_extra_args = luigi.Parameter(default="")
object_splitting_scalar = luigi.Parameter()
field_name = luigi.OptionalParameter(default=None)
op = luigi.Parameter()
z_max = luigi.FloatParameter(default=None)
def requires(self):
return IdentifyObjects(
base_name=self.base_name,
mask_method=self.mask_method,
mask_method_extra_args=self.mask_method_extra_args,
splitting_scalar=self.object_splitting_scalar,
)
def run(self):
da_objects = self.input().open()
z_values = da_objects.sel(zt=slice(None, self.z_max)).zt.values
tasks = [
ComputePerObjectAtHeight(
base_name=self.base_name,
mask_method=self.mask_method,
mask_method_extra_args=self.mask_method_extra_args,
object_splitting_scalar=self.object_splitting_scalar,
field_name=self.field_name,
op=self.op,
z=z,
)
for z in z_values
]
outputs = yield tasks
da_by_height = xr.concat([output.open() for output in outputs], dim="zt")
da_by_height.to_netcdf(self.output().fn)
def output(self):
mask_name = MakeMask.make_mask_name(
base_name=self.base_name,
method_name=self.mask_method,
method_extra_args=self.mask_method_extra_args,
)
fn = (
"{base_name}.{mask_name}.{field_name}__{op}"
".by_z_per_object{ex}.nc".format(
base_name=self.base_name,
mask_name=mask_name,
field_name=self.field_name,
op=self.op,
ex=self.z_max is None and "" or "_to_z" + str(self.z_max),
)
)
p = get_workdir() / self.base_name / fn
target = XArrayTarget(str(p))
return target
class ComputeFieldDecompositionByHeightAndObjects(luigi.Task):
field_name = luigi.Parameter()
base_name = luigi.Parameter()
mask_method = luigi.Parameter()
mask_method_extra_args = luigi.Parameter(default="")
object_splitting_scalar = luigi.Parameter()
object_filters = luigi.Parameter(default=None)
z_max = luigi.FloatParameter(default=None)
def requires(self):
tasks = dict(
decomp_profile_ncells=ComputePerObjectProfiles(
base_name=self.base_name,
mask_method=self.mask_method,
mask_method_extra_args=self.mask_method_extra_args,
object_splitting_scalar=self.object_splitting_scalar,
op="num_cells",
z_max=self.z_max,
),
decomp_profile_sum=ComputePerObjectProfiles(
base_name=self.base_name,
mask_method=self.mask_method,
mask_method_extra_args=self.mask_method_extra_args,
object_splitting_scalar=self.object_splitting_scalar,
field_name=self.field_name,
op="sum",
z_max=self.z_max,
),
da_3d=ExtractField3D(
base_name=self.base_name,
field_name=self.field_name,
),
mask=MakeMask(
base_name=self.base_name,
method_name=self.mask_method,
method_extra_args=self.mask_method_extra_args,
),
)
if self.object_filters is not None:
# if we're filtering on a set of scales we just compute a scale
# here that's easy so that we get the objects which satisfy the
# filter
tasks["scales"] = ComputeObjectScales(
base_name=self.base_name,
mask_method=self.mask_method,
mask_method_extra_args=self.mask_method_extra_args,
object_splitting_scalar=self.object_splitting_scalar,
variables="num_cells",
object_filters=self.object_filters,
)
return tasks
def _run_single(self):
input = self.input()
da_field_ncells_per_object = input["decomp_profile_ncells"].open().squeeze()
da_field_sum_per_object = input["decomp_profile_sum"].open()
da_3d = input["da_3d"].open().squeeze()
da_mask = input["mask"].open().squeeze()
nx, ny = da_3d.xt.count(), da_3d.yt.count()
# calculate domain mean profile
da_domain_mean_profile = da_3d.mean(
dim=("xt", "yt"), dtype=np.float64, skipna=True
)
da_domain_mean_profile["sampling"] = "full domain"
# # contributions from mask only
# calculate mask mean profile and mask fractional area (so that
# total contribution to domain mean can be computed later)
da_mask_mean_profile = da_3d.where(da_mask).mean(
dim=("xt", "yt"), dtype=np.float64, skipna=True
)
da_mask_mean_profile["sampling"] = "mask"
da_mask_areafrac_profile = da_mask.sum(
dim=("xt", "yt"), dtype=np.float64, skipna=True
) / (nx * ny)
da_mask_areafrac_profile["sampling"] = "mask"
# # contributions from objects
# if object filters have been provided we should only include the
# objects which are in the filtered scales file (as these satisfy the
# filtering criteria)
if self.object_filters is not None:
ds_scales = input["scales"].open()
# need to cast scales indexing (int64) to object identifitcation
# indexing (uint32) here, otherwise saving goes wrong when merging
# (because xarray makes the dtype `object` otherwise)
ds_scales["object_id"] = ds_scales.object_id.astype(
da_field_ncells_per_object.object_id.dtype
)
def filter_per_object_field(da_field):
return da_field.where(da_field.object_id == ds_scales.object_id)
da_field_ncells_per_object = filter_per_object_field(
da_field=da_field_ncells_per_object
)
da_field_sum_per_object = filter_per_object_field(
da_field=da_field_sum_per_object
)
# calculate objects mean profile and objects fractional area (so
# that total contribution to domain mean can be computed later)
da_objects_total_flux = da_field_sum_per_object.sum(
dim=("object_id",), dtype=np.float64, skipna=True
)
da_objects_total_ncells = da_field_ncells_per_object.sum(
dim=("object_id",), dtype=np.float64, skipna=True
)
da_objects_mean_profile = da_objects_total_flux / da_objects_total_ncells
da_objects_mean_profile["sampling"] = "objects"
da_objects_areafrac_profile = da_objects_total_ncells / (nx * ny)
da_objects_areafrac_profile["sampling"] = "objects"
da_mean_profiles = xr.concat(
[da_domain_mean_profile, da_mask_mean_profile, da_objects_mean_profile],
dim="sampling",
)
da_mean_profiles.name = "{}__mean".format(self.field_name)
da_mean_profiles.attrs = dict(
units=da_3d.units, long_name="{} mean".format(da_3d.long_name)
)
da_areafrac_profiles = xr.concat(
[da_mask_areafrac_profile, da_objects_areafrac_profile], dim="sampling"
)
da_areafrac_profiles.name = "areafrac"
da_areafrac_profiles.attrs = dict(units="1", long_name="area fraction")
ds_profiles = xr.merge([da_mean_profiles, da_areafrac_profiles])
ds = xr.merge(
[
ds_profiles,
da_field_ncells_per_object,
da_field_sum_per_object,
]
)
if self.z_max is not None:
ds = ds.sel(zt=slice(None, self.z_max))
ds.attrs["nx"] = int(nx)
ds.attrs["ny"] = int(ny)
return ds
def run(self):
if "+" in self.base_name:
ds = self._run_multiple()
else:
ds = self._run_single()
fn = self.output().fn
Path(fn).parent.mkdir(parents=True, exist_ok=True)
ds.to_netcdf(fn)
def output(self):
mask_name = MakeMask.make_mask_name(
base_name=self.base_name,
method_name=self.mask_method,
method_extra_args=self.mask_method_extra_args,
)
s_filter = ""
if self.object_filters is not None:
s_filter = ".filtered_by.{}".format(
(
self.object_filters.replace(",", ".")
.replace(":", "__")
.replace("=", "_")
)
)
fn = (
"{base_name}.{mask_name}.{field_name}"
"{s_filter}.by_z_per_object{ex}.{filetype}".format(
base_name=self.base_name,
mask_name=mask_name,
field_name=self.field_name,
filetype="nc",
s_filter=s_filter,
ex=self.z_max is None and "" or "_to_z" + str(self.z_max),
)
)
p = get_workdir() / self.base_name / fn
target = XArrayTarget(str(p))
return target
class EstimateCharacteristicScales(luigi.Task):
object_splitting_scalar = luigi.Parameter()
base_name = luigi.Parameter()
mask_method = luigi.Parameter()
mask_method_extra_args = luigi.Parameter(default="")
variables = ["length", "width", "thickness"]
object_filters = luigi.Parameter(default=None)
fit_type = luigi.Parameter(default="exponential")
def requires(self):
return ComputeObjectScales(
variables=",".join(self.variables),
base_name=self.base_name,
mask_method=self.mask_method,
mask_method_extra_args=self.mask_method_extra_args,
object_splitting_scalar=self.object_splitting_scalar,
object_filters=self.object_filters,
)
def run(self):
ds = self.input().open()
assert self.fit_type == "exponential"
fn = length_scales.model_fitting.exponential_fit.fit
ds_scales = ds[self.variables].apply(fn)
ds_scales.to_netcdf(self.output().fn)
def output(self):
mask_name = MakeMask.make_mask_name(
base_name=self.base_name,
method_name=self.mask_method,
method_extra_args=self.mask_method_extra_args,
)
fn = "{}.{}.exp_fit_scales.nc".format(self.base_name, mask_name)
p = get_workdir() / self.base_name / fn
target = XArrayTarget(str(p))
return target
class MakeMaskWithObjects(MakeMask):
filtered_by = luigi.Parameter()
object_splitting_scalar = luigi.Parameter()
object_filters = luigi.Parameter()
def requires(self):
reqs = {}
is_filtered = "__filtered_by=" in self.method_name
if is_filtered:
method_name, filters = self.method_name.split("__filtered_by=")
else:
method_name = self.method_name
object_splitting_scalar = self.object_splitting_scalar
method_extra_args = self.method_extra_args
reqs["all_objects"] = IdentifyObjects(
base_name=self.base_name,
splitting_scalar=object_splitting_scalar,
mask_method=method_name,
mask_method_extra_args=method_extra_args,
)
if "tracking" in filters:
raise NotImplementedError
assert filters == "tracking:triggers_cloud"
reqs["tracking"] = PerformObjectTracking2D(
base_name=self.base_name,
tracking_type=objects.filter.TrackingType.THERMALS_ONLY,
)
else:
reqs["filtered_objects"] = ComputeObjectScales(
base_name=self.base_name,
variables="num_cells",
mask_method=method_name,
mask_method_extra_args=method_extra_args,
object_splitting_scalar=object_splitting_scalar,
object_filters=filters,
)
return reqs
def run(self):
raise NotImplementedError
# object_splitting_scalar = self.object_splitting_scalar
# method_extra_args = self.method_extra_args
method_name = self.method_name
mask_functions = None
cloud_identification = None
object_filters = None
method_kwargs = self._build_method_kwargs(
base_name=self.base_name, method_extra_args=self.method_extra_args
)
mask_fn = getattr(mask_functions, method_name)
assert hasattr(mask_fn, "description")
input = self.input()
da_objects = input["all_objects"].open(decode_times=False)
if "tracking:" in self.method_name:
raise NotImplementedError
cloud_data = self.requires()["tracking"].get_cloud_data()
t0 = da_objects.time.values
ds_track_2d = cloud_data._fh_track.sel(time=t0)
objects_tracked_2d = ds_track_2d.nrthrm
da_mask = da_objects.where(~objects_tracked_2d.isnull())
filter_desc = "cloud_trigger"
else:
ds_obj_props_filtered = input["filtered_objects"].open()
labels = da_objects.values
cloud_identification.filter_labels(
labels=labels, idxs_keep=ds_obj_props_filtered.object_id.values
)
da_mask = xr.DataArray(
labels != 0, coords=da_objects.coords, dims=da_objects.dims
)
filter_desc = objects.filter.latex_format(object_filters)
mask_desc = mask_fn.description.format(**method_kwargs)
da_mask.attrs["long_name"] = "{} filtered by {}".format(mask_desc, filter_desc)
da_mask.name = self.method_name
da_mask.to_netcdf(self.output().fn)
@classmethod
def make_mask_name(cls, base_name, method_name, method_extra_args, object_filters):
mask_name = super().make_mask_name(
base_name=base_name,
method_name=method_name,
method_extra_args=method_extra_args,
)
s_filters = object_filters.replace(",", ".").replace("=", "").replace(":", "__")
mask_name += ".filtered_by." + s_filters
if mask_name.endswith("."):
mask_name = mask_name[:-1]
return mask_name
def output(self):
mask_name = self.make_mask_name(
base_name=self.base_name,
method_name=self.method_name,
method_extra_args=self.method_extra_args,
object_filters=self.object_filters,
)
fn = make_mask.OUT_FILENAME_FORMAT.format(
base_name=self.base_name, mask_name=mask_name
)
p = get_workdir() / self.base_name / fn
return XArrayTarget(str(p))
class ObjectTwoScalesComposition(luigi.Task):
"""
Decompose `field_name` | |
#TODO: TMP
# import sys
# sys.path.append('../')
# from dataops.colors import ycbcr_to_rgb, yuv_to_rgb
import os #, glob
import random
import numpy as np
import cv2
import torch
from torch.utils.data.dataset import Dataset
import dataops.common as util
from dataops.augmentations import Scale, NoisePatches, RandomNoisePatches, get_blur, get_noise
from dataops.debug import tmp_vis, describe_numpy, describe_tensor
import dataops.augmennt.augmennt as transforms
class VidTrainsetLoader(Dataset):
def __init__(self, opt):
super(VidTrainsetLoader).__init__()
self.opt = opt
self.image_channels = opt.get('image_channels', 3)
self.num_frames = opt.get('num_frames', 3)
self.srcolors = opt.get('srcolors', None)
self.otf_noise = opt.get('lr_noise', None) or opt.get('lr_blur', None)
self.y_only = opt.get('y_only', True)
self.shape = opt.get('tensor_shape', 'TCHW')
assert self.num_frames % 2 == 1, (
f'num_frame must be an odd number, but got {self.num_frames}')
if self.opt['phase'] == 'train':
if opt.get('dataroot_kernels', None) and 999 in opt["lr_downscale_types"]:
self.ds_kernels = transforms.ApplyKernel(
scale=opt.get('scale', 4), kernels_path=opt['dataroot_kernels'], pattern='kernelgan')
else:
self.ds_kernels = None
# if opt['phase'] == 'train' and opt.get('lr_noise_types', None) and "patches" in opt['lr_noise_types']:
if opt.get('lr_noise_types', None) and "patches" in opt['lr_noise_types']:
assert opt['noise_data']
self.noise_patches = NoisePatches(opt['noise_data'], opt.get('HR_size', 128)/opt.get('scale', 4), grayscale=self.y_only)
else:
self.noise_patches = None
self.n_iters = opt.get('n_iters', 200000) * opt.get('batch_size', 32) * opt.get('virtual_batch_size', 32) / opt.get('batch_size', 32)
# Check if dataroot_HR is a list of directories or a single directory. Note: lmdb will not currently work with a list
self.paths_HR = opt.get('dataroot_HR', None)
if self.paths_HR:
self.video_list = os.listdir(self.paths_HR)
# Check if dataroot_LR is a list of directories or a single directory. Note: lmdb will not currently work with a list
self.paths_LR = opt.get('dataroot_LR', None)
if self.paths_LR and not self.paths_HR:
self.video_list = os.listdir(self.paths_LR)
def __getitem__(self, idx):
scale = self.opt.get('scale', 4)
HR_size = self.opt.get('HR_size', 128)
LR_size = HR_size // scale
idx_center = (self.num_frames - 1) // 2
ds_kernel = None
# Default case: tensor will result in the [0,1] range
# Alternative: tensor will be z-normalized to the [-1,1] range
znorm = self.opt.get('znorm', False)
if self.opt['phase'] == 'train':
if self.opt.get('lr_downscale', None) and self.opt.get('dataroot_kernels', None) and 999 in self.opt["lr_downscale_types"]:
ds_kernel = self.ds_kernels #KernelDownscale(scale, self.kernel_paths, self.num_kernel)
# get a random video directory
idx_video = random.randint(0, len(self.video_list)-1)
video_dir = self.video_list[idx_video]
# print(video_dir)
else:
# only one video and paths_LR/paths_HR is already the video dir
video_dir = ""
# list the frames in the directory
# hr_dir = self.trainset_dir + '/' + video_dir + '/hr'
paths_HR = util.get_image_paths(self.opt['data_type'], os.path.join(self.paths_HR, video_dir))
# print(paths_HR)
if self.opt['phase'] == 'train':
# random reverse augmentation
random_reverse = self.opt.get('random_reverse', False)
# skipping intermediate frames to learn from low FPS videos augmentation
# testing random frameskip up to 'max_frameskip' frames
max_frameskip = self.opt.get('max_frameskip', 0)
if max_frameskip > 0:
max_frameskip = min(max_frameskip, len(paths_HR)//(self.num_frames-1))
frameskip = random.randint(1, max_frameskip)
else:
frameskip = 1
# print("max_frameskip: ", max_frameskip)
assert ((self.num_frames-1)*frameskip) <= (len(paths_HR)-1), (
f'num_frame*frameskip must be smaller than the number of frames per video, check {video_dir}')
# if number of frames of training video is for example 31, "max index -num_frames" = 31-3=28
idx_frame = random.randint(0, (len(paths_HR)-1)-((self.num_frames-1)*frameskip))
# print('frameskip:', frameskip)
else:
frameskip = 1
idx_frame = idx
'''
List based frames loading
'''
if self.paths_LR:
paths_LR = util.get_image_paths(self.opt['data_type'], os.path.join(self.paths_LR, video_dir))
else:
paths_LR = paths_HR
ds_algo = 777 # default to matlab-like bicubic downscale
if self.opt.get('lr_downscale', None): # if manually set and scale algorithms are provided, then:
ds_algo = self.opt.get('lr_downscale_types', 777)
# get the video directory
HR_dir, _ = os.path.split(paths_HR[idx_frame])
LR_dir, _ = os.path.split(paths_HR[idx_frame])
# read HR & LR frames
HR_list = []
LR_list = []
resize_type = None
LR_bicubic = None
HR_center = None
# print('len(paths_HR)', len(paths_HR))
for i_frame in range(self.num_frames):
# print('frame path:', paths_HR[int(idx_frame)+(frameskip*i_frame)])
HR_img = util.read_img(None, paths_HR[int(idx_frame)+(frameskip*i_frame)], out_nc=self.image_channels)
HR_img = util.modcrop(HR_img, scale)
if self.opt['phase'] == 'train':
'''
If using individual image augmentations, get cropping parameters for reuse
'''
if self.otf_noise and i_frame == 0: #only need to calculate once, from the first frame
# reuse the cropping parameters for all LR and HR frames
hr_crop_params, lr_crop_params = get_crop_params(HR_img, LR_size, scale)
if self.opt.get('lr_noise', None):
# reuse the same noise type for all the frames
noise_option = get_noise(self.opt.get('lr_noise_types', None), self.noise_patches)
if self.opt.get('lr_blur', None):
# reuse the same blur type for all the frames
blur_option = get_blur(self.opt.get('lr_blur_types', None))
if self.paths_LR:
# LR images are provided at the correct scale
LR_img = util.read_img(None, paths_LR[int(idx_frame)+(frameskip*i_frame)], out_nc=self.image_channels)
if LR_img.shape == HR_img.shape:
LR_img, resize_type = Scale(img=HR_img, scale=scale, algo=ds_algo, ds_kernel=ds_kernel, resize_type=resize_type)
else:
# generate LR images on the fly
LR_img, resize_type = Scale(img=HR_img, scale=scale, algo=ds_algo, ds_kernel=ds_kernel, resize_type=resize_type)
# get the bicubic upscale of the center frame to concatenate for SR
if self.y_only and self.srcolors and i_frame == idx_center:
LR_bicubic, _ = Scale(img=LR_img, scale=1/scale, algo=777) # bicubic upscale
HR_center = HR_img
# tmp_vis(LR_bicubic, False)
# tmp_vis(HR_center, False)
if self.y_only:
# extract Y channel from frames
# normal path, only Y for both
HR_img = util.bgr2ycbcr(HR_img, only_y=True)
LR_img = util.bgr2ycbcr(LR_img, only_y=True)
# crop patches randomly if using otf noise
#TODO: make a composable random_crop
#TODO: note the original crop should go here and crop after loading each image, but could also be much simpler
# to crop after concatenating. Check the speed difference.
if self.otf_noise and self.opt['phase'] == 'train':
HR_img, LR_img = apply_crop_params(HR_img, LR_img, hr_crop_params, lr_crop_params)
if self.y_only and self.srcolors and i_frame == idx_center:
LR_bicubic, _ = apply_crop_params(LR_bicubic, None, hr_crop_params, None)
HR_center, _ = apply_crop_params(HR_center, None, hr_crop_params, None)
# expand Y images to add the channel dimension
# normal path, only Y for both
if self.y_only:
HR_img = util.fix_img_channels(HR_img, 1)
LR_img = util.fix_img_channels(LR_img, 1)
if self.opt['phase'] == 'train':
# single frame augmentation (noise, blur, etc). Would only be efficient if patches are cropped in this loop
if self.opt.get('lr_blur', None):
if blur_option:
LR_img = blur_option(LR_img)
if self.opt.get('lr_noise', None):
if noise_option:
LR_img = noise_option(LR_img)
# expand LR images to add the channel dimension again if needed (blur removes the grayscale channel)
#TODO: add a if condition, can compare to the ndim before the augs, maybe move inside the aug condition
# if not fullimgchannels: #TODO: TMP, this should be when using srcolors for HR or when training with 3 channels tests, separatedly
if self.y_only:
LR_img = util.fix_img_channels(LR_img, 1)
# print("HR_img.shape: ", HR_img.shape)
# print("LR_img.shape", LR_img.shape)
HR_list.append(HR_img) # h, w, c
LR_list.append(LR_img) # h, w, c
# print(len(HR_list))
# print(len(LR_list))
if self.opt['phase'] == 'train':
# random reverse sequence augmentation
if random_reverse and random.random() < 0.5:
HR_list.reverse()
LR_list.reverse()
if not self.y_only:
t = self.num_frames
HR = [np.asarray(GT) for GT in HR_list] # list -> numpy # input: list (contatin numpy: [H,W,C])
HR = np.asarray(HR) # numpy, [T,H,W,C]
h_HR, w_HR, c = HR_img.shape #HR_center.shape #TODO: check, may be risky
HR = HR.transpose(1,2,3,0).reshape(h_HR, w_HR, -1) # numpy, [H',W',CT]
LR = [np.asarray(LT) for LT in LR_list] # list -> numpy # input: list (contatin numpy: [H,W,C])
LR = np.asarray(LR) # numpy, [T,H,W,C]
LR = LR.transpose(1,2,3,0).reshape(h_HR//scale, w_HR//scale, -1) # numpy, [Hl',Wl',CT]
else:
HR = np.concatenate((HR_list), axis=2) # h, w, t
LR = np.concatenate((LR_list), axis=2) # h, w, t
if self.opt['phase'] == 'train':
'''
# If not using individual image augmentations, this cropping should be faster, only once
'''
# crop patches randomly. If not using otf noise, crop all concatenated images
if not self.otf_noise:
HR, LR, hr_crop_params, _ = random_crop_mod(HR, LR, LR_size, scale)
if self.y_only and self.srcolors:
LR_bicubic, _, _, _ = random_crop_mod(LR_bicubic, _, LR_size, scale, hr_crop_params)
HR_center, _, _, _ = random_crop_mod(HR_center, _, LR_size, scale, hr_crop_params)
# tmp_vis(LR_bicubic, False)
# tmp_vis(HR_center, False)
# data augmentation
#TODO: reuse augmentations
#TODO: use variables from config
LR, HR, LR_bicubic, HR_center = augmentation()([LR, HR, LR_bicubic, HR_center])
# tmp_vis(HR, False)
# tmp_vis(LR, False)
# tmp_vis(LR_bicubic, False)
# tmp_vis(HR_center, False)
if self.y_only:
HR = util.np2tensor(HR, normalize=znorm, bgr2rgb=False, add_batch=False) # Tensor, [CT',H',W'] or [T, H, W]
LR = util.np2tensor(LR, normalize=znorm, bgr2rgb=False, add_batch=False) # Tensor, [CT',H',W'] or [T, H, W]
else:
HR = util.np2tensor(HR, normalize=znorm, bgr2rgb=True, add_batch=False) # Tensor, [CT',H',W'] or [T, H, W]
LR = util.np2tensor(LR, normalize=znorm, bgr2rgb=True, add_batch=False) # Tensor, [CT',H',W'] or [T, H, W]
#TODO: TMP to test generating 3 channel images for SR loss
# HR = util.np2tensor(HR, normalize=znorm, bgr2rgb=False, add_batch=True) # Tensor, [CT',H',W'] or [T, H, W]
# LR = | |
rotation_velocity (float): Rotation velocity of the rotor
pitch_deg (float): pitch angle in degrees
excel_file_name (str):
excel_sheet_structural_blade (str):
excel_sheet_aero_blade (str):
excel_sheet_airfoil_coord (str):
excel_sheet_parameters (str):
excel_sheet_structural_tower (str):
m_distribution (str):
n_points_camber (int): number of points to define the camber of the airfoil,
tol_remove_points (float): maximum distance to remove adjacent points
Returns:
wt (sharpy.utils.generate_cases.AeroelasticInfromation): Aeroelastic infrmation of the wind turbine
LC (list): list of all the Lagrange constraints needed in the cases (sharpy.utils.generate_cases.LagrangeConstraint)
MB (list): list of the multibody information of each body (sharpy.utils.generate_cases.BodyInfrmation)
"""
rotor = rotor_from_OpenFAST_db(chord_panels,
rotation_velocity,
pitch_deg,
excel_file_name= excel_file_name,
excel_sheet_parameters = excel_sheet_parameters,
excel_sheet_structural_blade = excel_sheet_structural_blade,
excel_sheet_aero_blade = excel_sheet_aero_blade,
excel_sheet_airfoil_coord = excel_sheet_airfoil_coord,
m_distribution = m_distribution,
n_points_camber = n_points_camber,
tol_remove_points = tol_remove_points)
######################################################################
## TOWER
######################################################################
# Read from excel file
HtFract = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'HtFract')
TMassDen = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'TMassDen')
TwFAStif = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'TwFAStif')
TwSSStif = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'TwSSStif')
# TODO> variables to be defined
TwGJStif = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'TwGJStif')
TwEAStif = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'TwEAStif')
TwFAIner = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'TwFAIner')
TwSSIner = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'TwSSIner')
TwFAcgOf = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'TwFAcgOf')
TwSScgOf = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'TwSScgOf')
# Define the TOWER
TowerHt = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'TowerHt')
Elevation = TowerHt*HtFract
tower = gc.AeroelasticInformation()
tower.StructuralInformation.num_elem = len(Elevation) - 2
tower.StructuralInformation.num_node_elem = 3
tower.StructuralInformation.compute_basic_num_node()
# Interpolate excel variables into the correct locations
node_r, elem_r = create_node_radial_pos_from_elem_centres(Elevation,
tower.StructuralInformation.num_node,
tower.StructuralInformation.num_elem,
tower.StructuralInformation.num_node_elem)
# Stiffness
elem_EA = np.interp(elem_r,Elevation,TwEAStif)
elem_EIz = np.interp(elem_r,Elevation,TwSSStif)
elem_EIy = np.interp(elem_r,Elevation,TwFAStif)
elem_GJ = np.interp(elem_r,Elevation,TwGJStif)
# Stiffness: estimate unknown properties
print('WARNING: The poisson cofficient is supossed equal to 0.3')
print('WARNING: Cross-section area is used as shear area')
poisson_coef = 0.3
elem_GAy = elem_EA/2.0/(1.0+poisson_coef)
elem_GAz = elem_EA/2.0/(1.0+poisson_coef)
# Inertia
elem_mass_per_unit_length = np.interp(elem_r,Elevation,TMassDen)
elem_mass_iner_y = np.interp(elem_r,Elevation,TwFAIner)
elem_mass_iner_z = np.interp(elem_r,Elevation,TwSSIner)
# TODO: check yz axis and Flap-edge
elem_pos_cg_B = np.zeros((tower.StructuralInformation.num_elem,3),)
elem_pos_cg_B[:,1]=np.interp(elem_r,Elevation,TwSScgOf)
elem_pos_cg_B[:,2]=np.interp(elem_r,Elevation,TwFAcgOf)
# Stiffness: estimate unknown properties
print('WARNING: Using perpendicular axis theorem to compute the inertia around xB')
elem_mass_iner_x = elem_mass_iner_y + elem_mass_iner_z
# Create the tower
tower.StructuralInformation.create_mass_db_from_vector(elem_mass_per_unit_length, elem_mass_iner_x, elem_mass_iner_y, elem_mass_iner_z, elem_pos_cg_B)
tower.StructuralInformation.create_stiff_db_from_vector(elem_EA, elem_GAy, elem_GAz, elem_GJ, elem_EIy, elem_EIz)
coordinates = np.zeros((tower.StructuralInformation.num_node,3),)
coordinates[:,0] = node_r
tower.StructuralInformation.generate_1to1_from_vectors(
num_node_elem = tower.StructuralInformation.num_node_elem,
num_node = tower.StructuralInformation.num_node,
num_elem = tower.StructuralInformation.num_elem,
coordinates = coordinates,
stiffness_db = tower.StructuralInformation.stiffness_db,
mass_db = tower.StructuralInformation.mass_db,
frame_of_reference_delta = 'y_AFoR',
vec_node_structural_twist = np.zeros((tower.StructuralInformation.num_node,),),
num_lumped_mass = 1)
tower.StructuralInformation.boundary_conditions = np.zeros((tower.StructuralInformation.num_node), dtype = int)
tower.StructuralInformation.boundary_conditions[0] = 1
# Read overhang and nacelle properties from excel file
overhang_len = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'overhang')
# HubMass = gc.read_column_sheet_type01(excel_file_name, excel_sheet_nacelle, 'HubMass')
NacelleMass = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'NacMass')
# NacelleYawIner = gc.read_column_sheet_type01(excel_file_name, excel_sheet_nacelle, 'NacelleYawIner')
# Include nacelle mass
tower.StructuralInformation.lumped_mass_nodes = np.array([tower.StructuralInformation.num_node-1], dtype=int)
tower.StructuralInformation.lumped_mass = np.array([NacelleMass], dtype=float)
tower.AerodynamicInformation.set_to_zero(tower.StructuralInformation.num_node_elem,
tower.StructuralInformation.num_node,
tower.StructuralInformation.num_elem)
# Assembly overhang with the tower
# numberOfBlades = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'NumBl')
tilt = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'ShftTilt')*deg2rad
# cone = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'Cone')*deg2rad
overhang = gc.AeroelasticInformation()
overhang.StructuralInformation.num_node = 3
overhang.StructuralInformation.num_node_elem = 3
overhang.StructuralInformation.compute_basic_num_elem()
node_pos = np.zeros((overhang.StructuralInformation.num_node,3), )
node_pos[:,0] += tower.StructuralInformation.coordinates[-1,0]
node_pos[:,0] += np.linspace(0.,overhang_len*np.sin(tilt*deg2rad), overhang.StructuralInformation.num_node)
node_pos[:,2] = np.linspace(0.,-overhang_len*np.cos(tilt*deg2rad), overhang.StructuralInformation.num_node)
# TODO: change the following by real values
# Same properties as the last element of the tower
print("WARNING: Using the structural properties of the last tower section for the overhang")
oh_mass_per_unit_length = tower.StructuralInformation.mass_db[-1,0,0]
oh_mass_iner = tower.StructuralInformation.mass_db[-1,3,3]
oh_EA = tower.StructuralInformation.stiffness_db[-1,0,0]
oh_GA = tower.StructuralInformation.stiffness_db[-1,1,1]
oh_GJ = tower.StructuralInformation.stiffness_db[-1,3,3]
oh_EI = tower.StructuralInformation.stiffness_db[-1,4,4]
overhang.StructuralInformation.generate_uniform_sym_beam(node_pos,
oh_mass_per_unit_length,
oh_mass_iner,
oh_EA,
oh_GA,
oh_GJ,
oh_EI,
num_node_elem = 3,
y_BFoR = 'y_AFoR',
num_lumped_mass=0)
overhang.StructuralInformation.boundary_conditions = np.zeros((overhang.StructuralInformation.num_node), dtype = int)
overhang.StructuralInformation.boundary_conditions[-1] = -1
overhang.AerodynamicInformation.set_to_zero(overhang.StructuralInformation.num_node_elem,
overhang.StructuralInformation.num_node,
overhang.StructuralInformation.num_elem)
tower.assembly(overhang)
tower.remove_duplicated_points(tol_remove_points)
######################################################################
## WIND TURBINE
######################################################################
# Assembly the whole case
wt = tower.copy()
hub_position = tower.StructuralInformation.coordinates[-1,:]
rotor.StructuralInformation.coordinates += hub_position
wt.assembly(rotor)
# Redefine the body numbers
wt.StructuralInformation.body_number *= 0
wt.StructuralInformation.body_number[tower.StructuralInformation.num_elem:wt.StructuralInformation.num_elem] += 1
######################################################################
## MULTIBODY
######################################################################
# Define the boundary condition between the rotor and the tower tip
LC1 = gc.LagrangeConstraint()
LC1.behaviour = 'hinge_node_FoR_constant_vel'
LC1.node_in_body = tower.StructuralInformation.num_node-1
LC1.body = 0
LC1.body_FoR = 1
LC1.rot_axisB = np.array([1.,0.,0.0])
LC1.rot_vel = -rotation_velocity
LC = []
LC.append(LC1)
# Define the multibody infromation for the tower and the rotor
MB1 = gc.BodyInformation()
MB1.body_number = 0
MB1.FoR_position = np.zeros((6,),)
MB1.FoR_velocity = np.zeros((6,),)
MB1.FoR_acceleration = np.zeros((6,),)
MB1.FoR_movement = 'prescribed'
MB1.quat = np.array([1.0,0.0,0.0,0.0])
MB2 = gc.BodyInformation()
MB2.body_number = 1
MB2.FoR_position = np.array([rotor.StructuralInformation.coordinates[0, 0], rotor.StructuralInformation.coordinates[0, 1], rotor.StructuralInformation.coordinates[0, 2], 0.0, 0.0, 0.0])
MB2.FoR_velocity = np.array([0.,0.,0.,0.,0.,rotation_velocity])
MB2.FoR_acceleration = np.zeros((6,),)
MB2.FoR_movement = 'free'
MB2.quat = algebra.euler2quat(np.array([0.0,tilt,0.0]))
MB = []
MB.append(MB1)
MB.append(MB2)
######################################################################
## RETURN
######################################################################
return wt, LC, MB
######################################################################
# FROM excel type02
######################################################################
def rotor_from_excel_type02(chord_panels,
rotation_velocity,
pitch_deg,
excel_file_name= 'database_excel_type02.xlsx',
excel_sheet_parameters = 'parameters',
excel_sheet_structural_blade = 'structural_blade',
excel_sheet_discretization_blade = 'discretization_blade',
excel_sheet_aero_blade = 'aero_blade',
excel_sheet_airfoil_info = 'airfoil_info',
excel_sheet_airfoil_coord = 'airfoil_coord',
m_distribution = 'uniform',
h5_cross_sec_prop = None,
n_points_camber = 100,
tol_remove_points = 1e-3,
user_defined_m_distribution_type = None,
camber_effect_on_twist = False,
wsp = 0.,
dt = 0.):
"""
generate_from_excel_type02_db
Function needed to generate a wind turbine from an excel database type02
Args:
chord_panels (int): Number of panels on the blade surface in the chord direction
rotation_velocity (float): Rotation velocity of the rotor
pitch_deg (float): pitch angle in degrees
excel_file_name (str):
excel_sheet_structural_blade (str):
excel_sheet_discretization_blade (str):
excel_sheet_aero_blade (str):
excel_sheet_airfoil_info (str):
excel_sheet_airfoil_coord (str):
excel_sheet_parameters (str):
h5_cross_sec_prop (str): h5 containing mass and stiffness matrices along the blade.
m_distribution (str):
n_points_camber (int): number of points to define the camber of the airfoil,
tol_remove_points (float): maximum distance to remove adjacent points
Returns:
rotor (sharpy.utils.generate_cases.AeroelasticInfromation): Aeroelastic information of the rotor
Note:
- h5_cross_sec_prop is a path to a h5 containing the following groups:
- str_prop: with:
- K: list of 6x6 stiffness matrices
- M: list of 6x6 mass matrices
- radius: radial location (including hub) of K and M matrices
- when h5_cross_sec_prop is not None, mass and stiffness properties are
interpolated at BlFract location specified in "excel_sheet_structural_blade"
"""
######################################################################
## BLADE
######################################################################
blade = gc.AeroelasticInformation()
######################################################################
### STRUCTURE
######################################################################
# Read blade structural information from excel file
rR_structural = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'rR')
OutPElAxis = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'OutPElAxis')
InPElAxis = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'InPElAxis')
ElAxisAftLEc = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'ElAxisAftLEc')
StrcTwst = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'StrcTwst')*deg2rad
BMassDen = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'BMassDen')
FlpStff = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'FlpStff')
EdgStff = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'EdgStff')
FlapEdgeStiff = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'FlapEdgeStiff')
GJStff = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'GJStff')
EAStff = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'EAStff')
FlpIner = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'FlpIner')
EdgIner = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'EdgIner')
FlapEdgeIner = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'FlapEdgeIner')
PrebendRef = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'PrebendRef')
PreswpRef = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'PreswpRef')
OutPcg = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'OutPcg')
InPcg = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'InPcg')
# Blade parameters
TipRad = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'TipRad')
# HubRad = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'HubRad')
# Discretization points
rR = gc.read_column_sheet_type01(excel_file_name, excel_sheet_discretization_blade, 'rR')
# Interpolate excel variables into the correct locations
# Geometry
if rR[0] < rR_structural[0]:
rR_structural = np.concatenate((np.array([0.]), rR_structural),)
OutPElAxis = np.concatenate((np.array([OutPElAxis[0]]), OutPElAxis),)
InPElAxis = np.concatenate((np.array([InPElAxis[0]]), InPElAxis),)
ElAxisAftLEc = np.concatenate((np.array([ElAxisAftLEc[0]]), ElAxisAftLEc),)
StrcTwst = np.concatenate((np.array([StrcTwst[0]]), StrcTwst),)
BMassDen = np.concatenate((np.array([BMassDen[0]]), BMassDen),)
FlpStff = np.concatenate((np.array([FlpStff[0]]), FlpStff),)
EdgStff = np.concatenate((np.array([EdgStff[0]]), EdgStff),)
FlapEdgeStiff = np.concatenate((np.array([FlapEdgeStiff[0]]), FlapEdgeStiff),)
GJStff = np.concatenate((np.array([GJStff[0]]), GJStff),)
EAStff = np.concatenate((np.array([EAStff[0]]), EAStff),)
FlpIner = np.concatenate((np.array([FlpIner[0]]), FlpIner),)
EdgIner = np.concatenate((np.array([EdgIner[0]]), EdgIner),)
FlapEdgeIner = np.concatenate((np.array([FlapEdgeIner[0]]), FlapEdgeIner),)
PrebendRef = np.concatenate((np.array([PrebendRef[0]]), PrebendRef),)
PreswpRef = np.concatenate((np.array([PreswpRef[0]]), PreswpRef),)
OutPcg = np.concatenate((np.array([OutPcg[0]]), OutPcg),)
InPcg = np.concatenate((np.array([InPcg[0]]), InPcg),)
# Base parameters
use_excel_struct_as_elem = False
if use_excel_struct_as_elem:
blade.StructuralInformation.num_node_elem = 3
blade.StructuralInformation.num_elem = len(rR) - 2
blade.StructuralInformation.compute_basic_num_node()
node_r, elem_r = create_node_radial_pos_from_elem_centres(rR*TipRad,
blade.StructuralInformation.num_node,
blade.StructuralInformation.num_elem,
blade.StructuralInformation.num_node_elem)
else:
# Use excel struct as nodes
# Check the number of nodes
blade.StructuralInformation.num_node_elem = 3
blade.StructuralInformation.num_node = len(rR)
if ((len(rR) - 1) % (blade.StructuralInformation.num_node_elem - 1)) == 0:
blade.StructuralInformation.num_elem = int((len(rR) - 1)/(blade.StructuralInformation.num_node_elem - 1))
node_r = rR*TipRad
elem_rR = rR[1::2] + 0.
elem_r = rR[1::2]*TipRad + 0.
else:
print("ERROR: Cannot build ", blade.StructuralInformation.num_node_elem, "-noded elements from ", blade.StructuralInformation.num_node, "nodes")
node_y = np.interp(rR,rR_structural,InPElAxis) + np.interp(rR,rR_structural,PreswpRef)
node_z = -np.interp(rR,rR_structural,OutPElAxis) - np.interp(rR,rR_structural,PrebendRef)
node_twist = -1.0*np.interp(rR,rR_structural,StrcTwst)
coordinates = create_blade_coordinates(blade.StructuralInformation.num_node, node_r, node_y, node_z)
if h5_cross_sec_prop is None:
# Stiffness
elem_EA = np.interp(elem_rR,rR_structural,EAStff)
elem_EIy = np.interp(elem_rR,rR_structural,FlpStff)
elem_EIz = np.interp(elem_rR,rR_structural,EdgStff)
elem_EIyz = np.interp(elem_rR,rR_structural,FlapEdgeStiff)
elem_GJ = np.interp(elem_rR,rR_structural,GJStff)
# Stiffness: estimate unknown properties
print('WARNING: The poisson cofficient is supossed equal to 0.3')
print('WARNING: Cross-section area is used as shear area')
poisson_coef = 0.3
elem_GAy = elem_EA/2.0/(1.0+poisson_coef)
elem_GAz = elem_EA/2.0/(1.0+poisson_coef)
# Inertia
elem_pos_cg_B = np.zeros((blade.StructuralInformation.num_elem,3),)
elem_pos_cg_B[:,1] = np.interp(elem_rR,rR_structural,InPcg)
elem_pos_cg_B[:,2] = -np.interp(elem_rR,rR_structural,OutPcg)
elem_mass_per_unit_length = np.interp(elem_rR,rR_structural,BMassDen)
elem_mass_iner_y = np.interp(elem_rR,rR_structural,FlpIner)
elem_mass_iner_z = np.interp(elem_rR,rR_structural,EdgIner)
elem_mass_iner_yz = np.interp(elem_rR,rR_structural,FlapEdgeIner)
| |
]
if MYSQL:
query = f"""
SELECT COUNT(*) cnt
FROM midi_files
{where}
LIMIT {Select_Limit}
"""
if SQLITE:
if FULLTEXT:
query = f"""
SELECT COUNT(*) cnt
FROM midi_files_fts
{where}
LIMIT {Select_Limit}
"""
else:
query = f"""
SELECT COUNT(*) cnt
FROM midi_files
{where}
LIMIT {Select_Limit}
"""
query = fix_query( query )
dc.execute( query, data )
count = dc.fetchone()[ 'cnt' ]
return table, count
# --------------------------------------------------------------------------
def do_query_chordpro( dc, title, artist ):
table = []
wheres = []
data = []
count = 0
if title:
if MYSQL:
wheres.append( "MATCH( title ) AGAINST( %s IN BOOLEAN MODE )" )
data.append( title )
if SQLITE:
if FULLTEXT:
wheres.append( "title MATCH ?" )
data.append( title )
else:
w, d = fb.get_fulltext( "title", title )
wheres.append( w )
data.extend( d )
if artist:
if MYSQL:
wheres.append( "MATCH( artist ) AGAINST( %s IN BOOLEAN MODE )" )
data.append( artist )
if SQLITE:
if FULLTEXT:
wheres.append( "artist MATCH ?" )
data.append( artist )
else:
w, d = fb.get_fulltext( "artist", artist )
wheres.append( w )
data.extend( d )
if len( data ):
where = "WHERE " + " AND ".join( wheres )
if MYSQL:
query = f"""
SELECT title, artist, file
FROM chordpro_files
{where}
ORDER BY title, artist, file
LIMIT {Select_Limit}
"""
if SQLITE:
if FULLTEXT:
query = f"""
SELECT title, artist, file
FROM chordpro_files_fts
{where}
ORDER BY title, artist, file
LIMIT {Select_Limit}
"""
else:
query = f"""
SELECT title, artist, file
FROM chordpro_files
{where}
ORDER BY title, artist, file
LIMIT {Select_Limit}
"""
query = fix_query( query )
dc.execute( query, data )
rows = dc.fetchall()
if rows:
table = [ [ row[ 'title' ], row[ 'artist' ], row[ 'file' ] ] for row in rows ]
if MYSQL:
query = f"""
SELECT COUNT(*) cnt
FROM chordpro_files
{where}
LIMIT {Select_Limit}
"""
if SQLITE:
if FULLTEXT:
query = f"""
SELECT COUNT(*) cnt
FROM chordpro_files_fts
{where}
LIMIT {Select_Limit}
"""
else:
query = f"""
SELECT COUNT(*) cnt
FROM chordpro_files
{where}
LIMIT {Select_Limit}
"""
query = fix_query( query )
dc.execute( query, data )
count = dc.fetchone()[ 'cnt' ]
return table, count
# --------------------------------------------------------------------------
def do_query_jjazz_filename( dc, title ):
table = []
wheres = []
data = []
count = 0
if title:
if MYSQL:
wheres.append( "MATCH( title ) AGAINST( %s IN BOOLEAN MODE )" )
data.append( title )
if SQLITE:
if FULLTEXT:
wheres.append( "title MATCH ?" )
data.append( title )
else:
w, d = fb.get_fulltext( "title", title )
wheres.append( w )
data.extend( d )
if len( data ):
where = "WHERE " + " OR ".join( wheres )
if MYSQL:
query = f"""
SELECT title, file
FROM jjazz_files
{where}
ORDER BY title, file
LIMIT {Select_Limit}
"""
if SQLITE:
if FULLTEXT:
query = f"""
SELECT title, file
FROM jjazz_files_fts
{where}
ORDER BY title, file
LIMIT {Select_Limit}
"""
else:
query = f"""
SELECT title, file
FROM jjazz_files
{where}
ORDER BY title, file
LIMIT {Select_Limit}
"""
query = fix_query( query )
dc.execute( query, data )
rows = dc.fetchall()
if rows:
table = [ [ row[ 'title' ], row[ 'file' ] ] for row in rows ]
if MYSQL:
query = f"""
SELECT COUNT(*) cnt
FROM jjazz_files
{where}
LIMIT {Select_Limit}
"""
if SQLITE:
if FULLTEXT:
query = f"""
SELECT COUNT(*) cnt
FROM jjazz_files_fts
{where}
LIMIT {Select_Limit}
"""
else:
query = f"""
SELECT COUNT(*) cnt
FROM jjazz_files
{where}
LIMIT {Select_Limit}
"""
query = fix_query( query )
dc.execute( query, data )
count = dc.fetchone()[ 'cnt' ]
return table, count
# --------------------------------------------------------------------------
def do_query_youtube_index( dc, title ):
table = []
data = []
count = 0
if title:
if MYSQL:
query = f"""
SELECT titles_distinct.title,
title2youtube.ytitle, title2youtube.duration, title2youtube.yt_id
FROM titles_distinct
JOIN title2youtube ON title2youtube.title_id = titles_distinct.title_id
WHERE MATCH( titles_distinct.title ) AGAINST( %s IN BOOLEAN MODE )
ORDER BY titles_distinct.title, title2youtube.ytitle
LIMIT {Select_Limit}
"""
data.append( title )
if SQLITE:
if FULLTEXT:
query = f"""
SELECT title,
title2youtube.ytitle, title2youtube.duration, title2youtube.yt_id
FROM titles_distinct_fts
JOIN title2youtube USING( title_id )
WHERE titles_distinct_fts.title MATCH ?
ORDER BY titles_distinct.title, title2youtube.ytitle
LIMIT {Select_Limit}
"""
data.append( title )
else:
w, d = fb.get_fulltext( "titles_distinct.title", title )
data.extend( d )
query = f"""
SELECT title,
title2youtube.ytitle, title2youtube.duration, title2youtube.yt_id
FROM titles_distinct
JOIN title2youtube USING( title_id )
WHERE {w}
ORDER BY titles_distinct.title, title2youtube.ytitle
LIMIT {Select_Limit}
"""
query = fix_query( query )
# --------------------------------------------------------------------
if len( data ):
dc.execute( query, data )
rows = dc.fetchall()
if rows:
for row in rows:
title = row[ 'title' ]
ytitle = row[ 'ytitle' ]
duration = row[ 'duration' ]
yt_id = row[ 'yt_id' ]
table.append( [ title, ytitle, duration, yt_id ] )
# data = []
if MYSQL:
query = f"""
SELECT COUNT(*) cnt
FROM titles_distinct
JOIN title2youtube ON title2youtube.title_id = titles_distinct.title_id
WHERE MATCH( titles_distinct.title ) AGAINST( %s IN BOOLEAN MODE )
"""
# data.append( title )
if SQLITE:
if FULLTEXT:
query = f"""
SELECT COUNT(*) cnt
FROM titles_distinct_fts
JOIN title2youtube USING( title_id )
WHERE titles_distinct_fts.title MATCH ?
"""
# data.append( title )
else:
# w, d = fb.get_fulltext( "titles_distinct.title", title )
query = f"""
SELECT COUNT(*) cnt
FROM titles_distinct
JOIN title2youtube USING( title_id )
WHERE {w}
"""
# data.extend( d )
query = fix_query( query )
dc.execute( query, data )
count = dc.fetchone()[ 'cnt' ]
return table, count
# --------------------------------------------------------------------------
def process_events( event, values ):
global indexed_music_file_table_data, audio_file_table_data, music_file_table_data, midi_file_table_data
global youtube_file_table_data, chordpro_file_table_data, jjazz_file_table_data
if( event == 'search' or # Search button or 'Enter' in search text boxes.
event == 'exclude-duplicate-none' or # Also events when these buttons change.
event == 'exclude-duplicate-titles' or
event == 'exclude-duplicate-canonicals' or
event == 'exclude-duplicate-srcs'
):
# ---------------------------------------
# Gather search data
title = values[ "song-title" ] if 'song-title' in values else None
album = values[ "song-album" ] if 'song-album' in values else None
artist = values[ "song-artist" ] if 'song-artist' in values else None
composer = values[ "song-composer" ] if 'song-composer' in values else None
lyricist = values[ "song-lyricist" ] if 'song-lyricist' in values else None
src = values[ "local-src" ] if 'local-src' in values else None
canonical = values[ "canonical" ] if 'canonical' in values else None
join_flag = True if values[ 'search-join-title' ] else False
# exclude_duplicate_none = True if values[ 'exclude-duplicate-none' ] else False
exclude_duplicate_titles = True if values[ 'exclude-duplicate-titles' ] else False
exclude_duplicate_srcs = True if values[ 'exclude-duplicate-srcs' ] else False
exclude_duplicate_canonicals = True if values[ 'exclude-duplicate-canonicals' ] else False
# ---------------------------------------
# Make MySql boolean search values.
if MYSQL:
if title:
title = make_boolean( title )
if album:
album = make_boolean( album )
if artist:
artist = make_boolean( artist )
if composer:
composer = make_boolean( composer )
if lyricist:
lyricist = make_boolean( lyricist )
if SQLITE: # Nothing to be done for search terms in Sqlite3, at least for simulated fullword search.
pass
# ---------------------------------------
# Initialize all tables to no data. Update tables only when matched search selection.
indexed_music_file_table_data = music_file_table_data = audio_file_table_data = \
midi_file_table_data = youtube_file_table_data = chordpro_file_table_data = jjazz_file_table_data = []
# ------------------------------------------------------------------------------
# Search music filename, audio file index, youtube index in database.
# Update associated tables.
# Select first tab for search results found going from left to right.
# Leave select/focus as is if nothing found.
# /// RESUME OK - include additional search terms in do_query*() functions?
# ---------------------------------------
# Set True when first tab selected. Don't select others after that.
tab_selected = False
# ---------------------------------------
if join_flag:
indexed_music_file_table_data, pdf_count = do_query_music_file_index_with_join( dc, title, composer, lyricist, album, artist, src, canonical )
else:
indexed_music_file_table_data, pdf_count = do_query_music_file_index( dc, title, composer, lyricist, src, canonical )
# ------------------------
# WRW 10 Apr 2022 - Changed this around a bit, now disjoint selection via radio buttons.
if exclude_duplicate_titles:
indexed_music_file_table_data = select_unique_titles( indexed_music_file_table_data )
elif exclude_duplicate_canonicals:
indexed_music_file_table_data = select_unique_canonicals( indexed_music_file_table_data )
elif exclude_duplicate_srcs:
indexed_music_file_table_data = select_unique_srcs( indexed_music_file_table_data )
# ------------------------
indexed_music_file_table_data = strip_priority_data( indexed_music_file_table_data )
fb.safe_update( window['indexed-music-file-table'] , indexed_music_file_table_data, None )
if len( indexed_music_file_table_data ):
window.Element( 'tab-indexed-music-file-table' ).select()
window.Element( 'tab-display-pdf' ).set_focus()
tab_selected = True
# ---------------------------------------
| |
<filename>Publication/DONE_SuppFig12.py
,import os
import sys
import pandas as pd
from Bio import SeqIO
import matplotlib.pyplot as plt
import matplotlib as mpl
import scipy.stats as st
import random as rnd
import numpy as np
#
#
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
# rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
# #
#
mpl.rcParams['text.latex.preamble'] = [
r'\usepackage{textcomp}', # i need upright \micro symbols, but you need...
# r'\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts
r'\usepackage{helvet}', # set the normal font here
r'\usepackage{sansmath}', # load up the sansmath so that math -> helvet
r'\sansmath' # <- tricky! -- gotta actually tell tex to use!
]
#
font = {#'family' : 'sans-serif',
#'weight' : 'bold',
'size' :9}
rc('font', **font)
# # data loading ...
#
#
#
#################################
# before we proceed to plotting, add the TrOp status calculator for organisms ...
def get_one_trop(all_cds_grouped,idx):
org_cds = all_cds_grouped.get_group(idx)
# check if TrOp ...
# for a given organism(id) all TrOp values must be same
trop_vals = org_cds['TrOp'].unique()
assert trop_vals.size == 1
# then just figure out TrOp value after unpacking ...
trop, = trop_vals
if pd.isnull(trop):
# special return - not enough ribosomal proteins ...
return 'none'
if not trop:
# False, return False
return 'false'
elif trop == True:
# if it's True just return ...
return 'true'
else:
raise ValueError
#################################
from matplotlib.ticker import MaxNLocator
from matplotlib.patches import ConnectionPatch
from matplotlib.patches import Rectangle
from matplotlib.ticker import NullFormatter
import scipy.interpolate as interpol
###########################################
aacids = list('CMFILVWYAGTSNQDEHRKP')
aa_combinations = ['IVYWREL', 'DEKR', 'AGNQSTHY', 'MPCLVWIF', 'ILVM']
##########################################
root_path = os.path.expanduser('~')
bact_path = os.path.join(root_path,'GENOMES_BACTER_RELEASE69/genbank')
arch_path = os.path.join(root_path,'GENOMES_ARCH_SEP2015')
# SOME ARCHAEAL DATA ...
arch = pd.read_csv(os.path.join(arch_path,'summary_organisms_interest.dat'))
arch_nohalo = pd.read_csv(os.path.join(arch_path,'summary_organisms_interest_no_halop.dat'))
###########################################
# SOME BACTERIAL DATA ...
# complete genomes only ...
bact = pd.read_csv(os.path.join(bact_path,'env_catalog_compgenome.dat'))
# bacter proteomic summary ...
bact_prot = pd.read_csv(os.path.join(bact_path,'proteome_all.dat'))
# arch proteomic summary ...
arch_prot = pd.read_csv(os.path.join(arch_path,'proteome_arch.dat'))
arch_prot[aacids] = arch_prot[aacids]*100.0
#
arch_dat = pd.merge(arch,arch_prot,on='assembly_accession')
arch_nohalo_dat = pd.merge(arch_nohalo,arch_prot,on='assembly_accession')
arch_halo_dat = arch_dat[~arch_dat['assembly_accession'].isin(arch_nohalo['assembly_accession'])]
#
bact_dat = pd.merge(bact,bact_prot,on='GenomicID')
bact_dat[aacids] = bact_dat[aacids]*100.0
calculate_TrOp = True
if calculate_TrOp:
# we need the following to calculate TrOp status on the fly ...
###############################################
# complete_CDS_CAI_DNA.dat same thing ...
arch_cai_fname = os.path.join(arch_path,"complete_arch_CDS_CAI_DNA.dat")
bact_cai_fname = os.path.join(bact_path,"complete_CDS_CAI_DNA.dat")
#
arch_cai = pd.read_csv(arch_cai_fname)
bact_cai = pd.read_csv(bact_cai_fname)
#
bact_cai_by_org = bact_cai.groupby('GenomicID')
arch_cai_by_org = arch_cai.groupby('assembly_accession')
#
# calculate TrOp
bact_dat['TrOp'] = [get_one_trop(bact_cai_by_org,idx) for idx in bact_dat['GenomicID']]
arch_nohalo_dat['TrOp'] = [get_one_trop(arch_cai_by_org,idx) for idx in arch_nohalo_dat['assembly_accession']]
############################################################
# PLOTTING FUNCTIONS ...
##############################################################
def aap_get_axes(ranges):
#
# limit exapnsion function ...
def exp_lim(xlims,expansion_coeff = 1.1):
xmean = 0.5*sum(xlims)
half_xdelta = 0.5*(xlims[1] - xlims[0])
new_delta = expansion_coeff*half_xdelta
return (xmean - new_delta, xmean + new_delta)
# get ranges before you define axis ...
# exctracting limits ...
# and adjusting them a bit ...
exp_coeffs = (1.1, 1.1, 1.0, 1.1)
flims, tlims, gclims, fclims = [ exp_lim(lims,coeff) for lims,coeff in zip(ranges,exp_coeffs) ]
#
#
xbins = 5
ybins = 4
# create grid of subplots on a large figure canvas
# share x&y axes among all plots
fig, ax = plt.subplots(ybins, xbins, figsize=(7.5,xbins*7.5/ybins), sharex=True, sharey=True)
# no space between subplots
fig.subplots_adjust(hspace=0.0, wspace=0.0)
# make some room for the axes' labels
l,b,r,t = 0.05,0.3,0.98,0.98
w, h = r-l, t-b
fig.subplots_adjust(bottom=b, left=l, right=r, top=t)
#
# axis lims are predefined by the ranges ...
# assign just one of them, sharex,sharey will take care of the rest ...
ax[0,0].set_xlim(tlims)
ax[0,0].set_ylim(flims)
#
ax[-2,0].yaxis.set_major_locator( MaxNLocator(nbins = 5,prune='upper') )
ax[0,-2].xaxis.set_major_locator( MaxNLocator(nbins = 5,prune='upper') )
#
##############################
# lower axes panel for amino acid combinations ...
w = w/float(xbins)
height = h = h/float(ybins)
palette_bottom = b
bottom = b = 0.04
right = r
ax_comb = []
for i_ax in xrange(xbins):
ax_comb.append(fig.add_axes([l,b,w,h]))
if i_ax:
ax_comb[i_ax].yaxis.set_tick_params(which='both',labelleft='off')
ax_comb[i_ax].xaxis.set_major_locator( MaxNLocator(nbins = 5,prune='upper') )
ax_comb[i_ax].yaxis.set_major_locator( MaxNLocator(nbins = 5) )
# this ylims must be changed further on ...
ax_comb[i_ax].set_ylim(fclims)
ax_comb[i_ax].set_xlim(tlims)
l += w
#################
#
left = 0.5
cax_height = 0.028
cax_bottom = 0.5*(bottom+height+palette_bottom)-0.5*cax_height
cax = fig.add_axes([left,cax_bottom,right-left,cax_height])
#
# return axes ...
return (fig,ax,ax_comb,cax)
def update_ranges(dat,flims=False,tlims=False,gclims=False,fclims=False,temp='OptimumTemperature',gc='GC'):
# lim of all sort is supposed to be either (min,max) or [min,max] ...
# fclims is {key1:[min,max], key2:[min,max], ...}
get_lims = lambda dat_array: (dat_array.min(),dat_array.max())
#
def update_lims(new_dat,old_lims):
# function either return (min,max) from new data, or updates old_lims with the new_dat ...
if not old_lims:
return get_lims(new_dat)
else:
new_lims = get_lims(new_dat)
return ( min(new_lims[0],old_lims[0]), max(new_lims[1],old_lims[1]) )
# we are searching min/max across all 20 amino acids ...
updated_flims = flims
for aa in aacids:
updated_flims = update_lims( dat[aa], updated_flims )
# yet they share their's organisms GC and Temp ...
updated_tlims = update_lims( dat[temp], tlims )
updated_gclims = update_lims( dat[gc], gclims )
#
# aa combinations update must be done as well ...
# aa_combinations = ['IVYWREL', 'DEKR', 'AGNQSTHY', 'MPCLVWIF', 'ILVM']
updated_fclims = fclims
for combo in aa_combinations:
updated_fclims = update_lims( dat[list(combo)].sum(axis=1), updated_fclims )
#
return (updated_flims, updated_tlims, updated_gclims, updated_fclims)
def fill_palette(dat,axis,coloring_type='solid',color_vlims=False,temp='OptimumTemperature',gc='GC',**kwargs):
#
#
def get_coloring(coloring_type,**kwargs):
if coloring_type == 'solid':
if 'color' in kwargs:
coloring = kwargs['color']
else:
coloring = 'red'
elif coloring_type == 'map':
coloring = dat[gc]
# color_vlims must be provided ...
else:
raise ValueError("coloring type can either 'solid' or 'map'!")
# returning ...
return coloring
#
#
def flabel(aa,rr,pp):
if bool(rr) and bool(pp):
label = '%s: '%aa
if pp<0.001:
label+= '$R=%.2f^{***}$ '%rr
elif pp<0.05:
label+= '$R=%.2f^{**}$ '%rr
else:
label+= '$R=%.2f^{*}$ '%rr
return label
else:
return '%s'%aa
#
#
def plot_single_scatter(x,y,ax,flabel,coloring='red',vlims=False,fit=True,alpha=1.0):
scatter_size = 65
edgecolor = 'none'
cmap = plt.get_cmap('jet')
#
x_range = np.asarray([x.min(),x.max()])
# use palette limits or not ...
kwargs = {'vmin':vlims[0],'vmax':vlims[1]} if vlims else {'norm':True}
kwargs['alpha'] = alpha
# plot the scatter ...
scatter = ax.scatter(x,y,edgecolor=edgecolor,s=scatter_size,c=coloring,cmap=cmap,**kwargs)
if fit and flabel:
a,b,r,pval,_ = st.linregress(x,y)
label = flabel(r,pval)
ax.plot(x_range,a*x_range+b,'-',color='dimgray',lw=2,label=label)
elif flabel:
label = flabel(False,False)
else:
label = ""
#
axes_equator = np.asarray(ax.get_ylim()).mean()
loc = (0.02,0.91) if y.mean()<axes_equator else (0.02,0.15)
ax.text(loc[0],loc[1],label,fontsize=8.3,fontweight='bold',verticalalignment='top',transform=ax.transAxes)
#
return scatter
#
###################################################
# extracting axis ...
fig,ax,ax_comb,cax = axis
ybins,xbins = ax.shape
#
coloring = get_coloring(coloring_type,**kwargs)
# vlims will be ignored if coloring is solid ...
alpha = kwargs['alpha'] if ('alpha' in kwargs) else 1.0
fit = kwargs['fit'] if ('fit' in kwargs) else True
label = kwargs['label'] if ('label' in kwargs) else True
#
x = dat[temp]
for axc,combo in zip(ax_comb,aa_combinations):
y = dat[list(combo)].sum(axis=1)
# function: partial argument substitution ...
label = (lambda rr,pp: flabel(combo,rr,pp)) if label else False
plot_single_scatter(x,y,axc,flabel=label,coloring=coloring,vlims=color_vlims,fit=fit,alpha=alpha)
##############################
for yi in xrange(ybins):
for xi in xrange(xbins):
# figuring out corresponding amino acid ...
aa_num = yi*xbins + xi
aa = aacids[aa_num]
y = dat[aa]
label = (lambda rr,pp: flabel(aa,rr,pp)) if label else False
# x&y data for plotting in a given axis ...
scatter = plot_single_scatter(x,y,ax[yi,xi],flabel=label,coloring=coloring,vlims=color_vlims,fit=fit,alpha=alpha)
#
# #
return scatter
##########################################################
# UNDER CONSTRUCTION ...
##########################################################
def figure_level_caps(axis,scatters,coloring_type,color_vlims=None,scnames=None):
# extract axis ...
fig,ax,ax_comb,cax = axis
# some text for x&y axis ...
fig.text(0.015,0.5,r'amino acid usage, \%',rotation='vertical',transform=fig.transFigure,fontsize=13,ha='center',va='center')
fig.text(0.5,0.01,r'Temperature, \textcelsius',transform=fig.transFigure,fontsize=13,ha='center',va='center')
#
#
if (coloring_type == 'map') and (len(scatters)>1):
raise ValueError("color map supports only 1 set of scatter plots ...")
elif coloring_type == 'map':
# extract the only scatter ...
scatter, = scatters
# draw that color map (likely GC) ...
cax.set_visible(True)
pos = cax.get_position()
left = pos.x0
cax_bottom = pos.y0
cax_height = pos.height
fig.text(left-0.2, cax_bottom+0.5*cax_height,r'GC content, \%',transform=fig.transFigure,fontsize=14,ha='left',va='center')
cbar = fig.colorbar(scatter,cax=cax,orientation='horizontal')
ticks = 5*np.arange(color_vlims[0]//5,color_vlims[1]//5)+5
ticklabels = [str(_) for _ in ticks]
cbar.set_ticks(ticks)
cbar.set_ticklabels(ticklabels)
elif coloring_type == 'solid':
cax.set_visible(False)
pos = cax.get_position()
left = pos.x0
right = left + pos.width
cax_bottom = pos.y0
cax_height = pos.height
fig.legend(scatters, scnames,
loc = 'center right',
bbox_to_anchor=(right, cax_bottom+0.5*cax_height),
bbox_transform=fig.transFigure,
scatterpoints=1,
markerscale=1.5,
ncol = len(scatters))
#
##########################################################
# UNDER CONSTRUCTION ...
##########################################################
# # that's a working exmaple of closure here ...
# def outer():
# # defaut definitions ...
# xlims = [45,45]
# ylims = [50,50]
# def inner(x,y):
# xlims[0],xlims[1] = min(xlims[0],min(x)), max(xlims[1],max(x))
# ylims[0],ylims[1] = min(ylims[0],min(y)), max(ylims[1],max(y))
# return (xlims,ylims)
# return inner
# f = outer()
# drawing functional style ...
def dq_get_axes():
pass
# drawing procedures ...
nullfmt = NullFormatter() # no labels
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left+width+0.02
#
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
plt.clf()
fig = plt.figure(figsize=(7.5,1.0*7.5))
# add axes ...
axScatter = fig.add_axes(rect_scatter)
axHistx = fig.add_axes(rect_histx)
axHisty = fig.add_axes(rect_histy)
# no labels
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
#
axHistx.yaxis.set_major_locator( MaxNLocator(nbins = 5) )
axHisty.yaxis.set_major_locator( MaxNLocator(nbins = 5) )
axHisty.xaxis.set_major_locator( MaxNLocator(nbins | |
<gh_stars>0
import collections
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
import dask
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq, assert_dask_graph, assert_max_deps, PANDAS_VERSION
AGG_FUNCS = ['sum', 'mean', 'min', 'max', 'count', 'size', 'std', 'var', 'nunique', 'first', 'last']
@pytest.fixture(params=AGG_FUNCS)
def agg_func(request):
"""
Aggregations supported for groups
"""
return request.param
def groupby_internal_repr():
pdf = pd.DataFrame({'x': [1, 2, 3, 4, 6, 7, 8, 9, 10],
'y': list('abcbabbcda')})
ddf = dd.from_pandas(pdf, 3)
gp = pdf.groupby('y')
dp = ddf.groupby('y')
assert isinstance(dp, dd.groupby.DataFrameGroupBy)
assert isinstance(dp._meta, pd.core.groupby.DataFrameGroupBy)
assert isinstance(dp.obj, dd.DataFrame)
assert_eq(dp.obj, gp.obj)
gp = pdf.groupby('y')['x']
dp = ddf.groupby('y')['x']
assert isinstance(dp, dd.groupby.SeriesGroupBy)
assert isinstance(dp._meta, pd.core.groupby.SeriesGroupBy)
# slicing should not affect to internal
assert isinstance(dp.obj, dd.Series)
assert_eq(dp.obj, gp.obj)
gp = pdf.groupby('y')[['x']]
dp = ddf.groupby('y')[['x']]
assert isinstance(dp, dd.groupby.DataFrameGroupBy)
assert isinstance(dp._meta, pd.core.groupby.DataFrameGroupBy)
# slicing should not affect to internal
assert isinstance(dp.obj, dd.DataFrame)
assert_eq(dp.obj, gp.obj)
gp = pdf.groupby(pdf.y)['x']
dp = ddf.groupby(ddf.y)['x']
assert isinstance(dp, dd.groupby.SeriesGroupBy)
assert isinstance(dp._meta, pd.core.groupby.SeriesGroupBy)
# slicing should not affect to internal
assert isinstance(dp.obj, dd.Series)
assert_eq(dp.obj, gp.obj)
gp = pdf.groupby(pdf.y)[['x']]
dp = ddf.groupby(ddf.y)[['x']]
assert isinstance(dp, dd.groupby.DataFrameGroupBy)
assert isinstance(dp._meta, pd.core.groupby.DataFrameGroupBy)
# slicing should not affect to internal
assert isinstance(dp.obj, dd.DataFrame)
assert_eq(dp.obj, gp.obj)
def groupby_error():
pdf = pd.DataFrame({'x': [1, 2, 3, 4, 6, 7, 8, 9, 10],
'y': list('abcbabbcda')})
ddf = dd.from_pandas(pdf, 3)
with pytest.raises(KeyError):
ddf.groupby('A')
with pytest.raises(KeyError):
ddf.groupby(['x', 'A'])
dp = ddf.groupby('y')
msg = 'Column not found: '
with pytest.raises(KeyError) as err:
dp['A']
assert msg in str(err.value)
with pytest.raises(KeyError) as err:
dp[['x', 'A']]
assert msg in str(err.value)
def groupby_internal_head():
pdf = pd.DataFrame({'A': [1, 2] * 10,
'B': np.random.randn(20),
'C': np.random.randn(20)})
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.groupby('A')._head().sum(),
pdf.head().groupby('A').sum())
assert_eq(ddf.groupby(ddf['A'])._head().sum(),
pdf.head().groupby(pdf['A']).sum())
assert_eq(ddf.groupby(ddf['A'] + 1)._head().sum(),
pdf.head().groupby(pdf['A'] + 1).sum())
def test_full_groupby():
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
ddf = dd.from_pandas(df, npartitions=3)
pytest.raises(KeyError, lambda: ddf.groupby('does_not_exist'))
pytest.raises(AttributeError, lambda: ddf.groupby('a').does_not_exist)
assert 'b' in dir(ddf.groupby('a'))
def func(df):
return df.assign(b=df.b - df.b.mean())
assert_eq(df.groupby('a').apply(func),
ddf.groupby('a').apply(func))
def test_full_groupby_apply_multiarg():
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
ddf = dd.from_pandas(df, npartitions=3)
def func(df, c, d=3):
return df.assign(b=df.b - df.b.mean() + c * d)
c = df.a.sum()
d = df.b.mean()
c_scalar = ddf.a.sum()
d_scalar = ddf.b.mean()
c_delayed = dask.delayed(lambda: c)()
d_delayed = dask.delayed(lambda: d)()
meta = df.groupby('a').apply(func, c)
for c_lazy, d_lazy in [(c_scalar, d_scalar),
(c_delayed, d_delayed)]:
assert_eq(df.groupby('a').apply(func, c),
ddf.groupby('a').apply(func, c))
assert_eq(df.groupby('a').apply(func, c, d=d),
ddf.groupby('a').apply(func, c, d=d))
assert_eq(df.groupby('a').apply(func, c),
ddf.groupby('a').apply(func, c_lazy), check_dtype=False)
assert_eq(df.groupby('a').apply(func, c),
ddf.groupby('a').apply(func, c_lazy, meta=meta))
assert_eq(df.groupby('a').apply(func, c, d=d),
ddf.groupby('a').apply(func, c, d=d_lazy))
assert_eq(df.groupby('a').apply(func, c, d=d),
ddf.groupby('a').apply(func, c, d=d_lazy, meta=meta))
@pytest.mark.parametrize('grouper', [
lambda df: ['a'],
lambda df: ['a', 'b'],
lambda df: df['a'],
lambda df: [df['a'], df['b']],
pytest.mark.xfail(reason="not yet supported")(lambda df: [df['a'] > 2, df['b'] > 1])
])
@pytest.mark.parametrize('reverse', [True, False])
def test_full_groupby_multilevel(grouper, reverse):
index = [0, 1, 3, 5, 6, 8, 9, 9, 9]
if reverse:
index = index[::-1]
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'd': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=index)
ddf = dd.from_pandas(df, npartitions=3)
def func(df):
return df.assign(b=df.b - df.b.mean())
# last one causes a DeprcationWarning from pandas.
# See https://github.com/pandas-dev/pandas/issues/16481
assert_eq(df.groupby(grouper(df)).apply(func),
ddf.groupby(grouper(ddf)).apply(func))
def test_groupby_dir():
df = pd.DataFrame({'a': range(10), 'b c d e': range(10)})
ddf = dd.from_pandas(df, npartitions=2)
g = ddf.groupby('a')
assert 'a' in dir(g)
assert 'b c d e' not in dir(g)
@pytest.mark.parametrize('scheduler', ['sync', 'threads'])
def test_groupby_on_index(scheduler):
pdf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
ddf = dd.from_pandas(pdf, npartitions=3)
ddf2 = ddf.set_index('a')
pdf2 = pdf.set_index('a')
assert_eq(ddf.groupby('a').b.mean(), ddf2.groupby(ddf2.index).b.mean())
def func(df):
return df.assign(b=df.b - df.b.mean())
def func2(df):
return df[['b']] - df[['b']].mean()
with dask.config.set(scheduler=scheduler):
with pytest.warns(None):
assert_eq(ddf.groupby('a').apply(func),
pdf.groupby('a').apply(func))
assert_eq(ddf.groupby('a').apply(func).set_index('a'),
pdf.groupby('a').apply(func).set_index('a'))
assert_eq(pdf2.groupby(pdf2.index).apply(func2),
ddf2.groupby(ddf2.index).apply(func2))
@pytest.mark.parametrize('grouper',
[lambda df: df.groupby('a')['b'],
lambda df: df.groupby(['a', 'b']),
lambda df: df.groupby(['a', 'b'])['c'],
lambda df: df.groupby(df['a'])[['b', 'c']],
lambda df: df.groupby('a')[['b', 'c']],
lambda df: df.groupby('a')[['b']],
lambda df: df.groupby(['a', 'b', 'c'])])
def test_groupby_multilevel_getitem(grouper, agg_func):
# nunique is not implemented for DataFrameGroupBy
if agg_func == 'nunique':
return
df = pd.DataFrame({'a': [1, 2, 3, 1, 2, 3],
'b': [1, 2, 1, 4, 2, 1],
'c': [1, 3, 2, 1, 1, 2],
'd': [1, 2, 1, 1, 2, 2]})
ddf = dd.from_pandas(df, 2)
dask_group = grouper(ddf)
pandas_group = grouper(df)
dask_agg = getattr(dask_group, agg_func)
pandas_agg = getattr(pandas_group, agg_func)
assert isinstance(dask_group, dd.groupby._GroupBy)
assert isinstance(pandas_group, pd.core.groupby.GroupBy)
if agg_func == 'mean':
assert_eq(dask_agg(), pandas_agg().astype(float))
else:
assert_eq(dask_agg(), pandas_agg())
def test_groupby_multilevel_agg():
df = pd.DataFrame({'a': [1, 2, 3, 1, 2, 3],
'b': [1, 2, 1, 4, 2, 1],
'c': [1, 3, 2, 1, 1, 2],
'd': [1, 2, 1, 1, 2, 2]})
ddf = dd.from_pandas(df, 2)
sol = df.groupby(['a']).mean()
res = ddf.groupby(['a']).mean()
assert_eq(res, sol)
sol = df.groupby(['a', 'c']).mean()
res = ddf.groupby(['a', 'c']).mean()
assert_eq(res, sol)
sol = df.groupby([df['a'], df['c']]).mean()
res = ddf.groupby([ddf['a'], ddf['c']]).mean()
assert_eq(res, sol)
def test_groupby_get_group():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 6], 'b': [4, 2, 7]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 2, 6], 'b': [3, 3, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [4, 3, 7], 'b': [1, 1, 3]},
index=[9, 9, 9])}
meta = dsk[('x', 0)]
d = dd.DataFrame(dsk, 'x', meta, [0, 4, 9, 9])
full = d.compute()
for ddkey, pdkey in [('b', 'b'), (d.b, full.b),
(d.b + 1, full.b + 1)]:
ddgrouped = d.groupby(ddkey)
pdgrouped = full.groupby(pdkey)
# DataFrame
assert_eq(ddgrouped.get_group(2), pdgrouped.get_group(2))
assert_eq(ddgrouped.get_group(3), pdgrouped.get_group(3))
# Series
assert_eq(ddgrouped.a.get_group(3), pdgrouped.a.get_group(3))
assert_eq(ddgrouped.a.get_group(2), pdgrouped.a.get_group(2))
def test_dataframe_groupby_nunique():
strings = list('aaabbccccdddeee')
data = np.random.randn(len(strings))
ps = pd.DataFrame(dict(strings=strings, data=data))
s = dd.from_pandas(ps, npartitions=3)
expected = ps.groupby('strings')['data'].nunique()
assert_eq(s.groupby('strings')['data'].nunique(), expected)
def test_dataframe_groupby_nunique_across_group_same_value():
strings = list('aaabbccccdddeee')
data = list(map(int, '123111223323412'))
ps = pd.DataFrame(dict(strings=strings, data=data))
s = dd.from_pandas(ps, npartitions=3)
expected = ps.groupby('strings')['data'].nunique()
assert_eq(s.groupby('strings')['data'].nunique(), expected)
def test_series_groupby_propagates_names():
df = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
ddf = dd.from_pandas(df, 2)
func = lambda df: df['y'].sum()
with pytest.warns(UserWarning): # meta inference
result = ddf.groupby('x').apply(func)
expected = df.groupby('x').apply(func)
assert_eq(result, expected)
def test_series_groupby():
s = pd.Series([1, 2, 2, 1, 1])
pd_group = s.groupby(s)
ss = dd.from_pandas(s, npartitions=2)
dask_group = ss.groupby(ss)
pd_group2 = s.groupby(s + 1)
dask_group2 = ss.groupby(ss + 1)
for dg, pdg in [(dask_group, pd_group), (pd_group2, dask_group2)]:
assert_eq(dg.count(), pdg.count())
assert_eq(dg.sum(), pdg.sum())
assert_eq(dg.min(), pdg.min())
assert_eq(dg.max(), pdg.max())
assert_eq(dg.size(), pdg.size())
assert_eq(dg.first(), pdg.first())
assert_eq(dg.last(), pdg.last())
def test_series_groupby_errors():
s = pd.Series([1, 2, 2, 1, 1])
ss = dd.from_pandas(s, npartitions=2)
msg = "No group keys passed!"
with pytest.raises(ValueError) as err:
s.groupby([]) # pandas
assert msg in str(err.value)
with pytest.raises(ValueError) as err:
ss.groupby([]) # dask should raise the same error
assert msg in str(err.value)
sss = dd.from_pandas(s, npartitions=3)
pytest.raises(NotImplementedError, lambda: ss.groupby(sss))
with pytest.raises(KeyError):
s.groupby('x') # pandas
with pytest.raises(KeyError):
ss.groupby('x') # dask should raise the same error
def test_groupby_index_array():
df = tm.makeTimeDataFrame()
ddf = dd.from_pandas(df, npartitions=2)
# first select column, then group
assert_eq(df.A.groupby(df.index.month).nunique(),
ddf.A.groupby(ddf.index.month).nunique(), check_names=False)
# first group, then select column
assert_eq(df.groupby(df.index.month).A.nunique(),
ddf.groupby(ddf.index.month).A.nunique(), check_names=False)
def test_groupby_set_index():
df = tm.makeTimeDataFrame()
ddf = dd.from_pandas(df, npartitions=2)
pytest.raises(TypeError,
lambda: ddf.groupby(df.index.month, as_index=False))
def test_split_apply_combine_on_series():
pdf = pd.DataFrame({'a': [1, 2, 6, 4, 4, 6, 4, 3, 7],
'b': [4, 2, 7, 3, 3, 1, 1, 1, 2]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
ddf = dd.from_pandas(pdf, npartitions=3)
for ddkey, pdkey in [('b', 'b'), (ddf.b, pdf.b), (ddf.b + 1, pdf.b + 1)]:
assert_eq(ddf.groupby(ddkey).a.min(), pdf.groupby(pdkey).a.min())
assert_eq(ddf.groupby(ddkey).a.max(), pdf.groupby(pdkey).a.max())
assert_eq(ddf.groupby(ddkey).a.count(), pdf.groupby(pdkey).a.count())
assert_eq(ddf.groupby(ddkey).a.mean(), pdf.groupby(pdkey).a.mean())
assert_eq(ddf.groupby(ddkey).a.nunique(), pdf.groupby(pdkey).a.nunique())
assert_eq(ddf.groupby(ddkey).a.size(), pdf.groupby(pdkey).a.size())
assert_eq(ddf.groupby(ddkey).a.first(), pdf.groupby(pdkey).a.first())
assert_eq(ddf.groupby(ddkey).a.last(), pdf.groupby(pdkey).a.last())
for ddof in [0, 1, 2]:
assert_eq(ddf.groupby(ddkey).a.var(ddof),
pdf.groupby(pdkey).a.var(ddof))
assert_eq(ddf.groupby(ddkey).a.std(ddof),
pdf.groupby(pdkey).a.std(ddof))
assert_eq(ddf.groupby(ddkey).sum(), pdf.groupby(pdkey).sum())
assert_eq(ddf.groupby(ddkey).min(), pdf.groupby(pdkey).min())
assert_eq(ddf.groupby(ddkey).max(), pdf.groupby(pdkey).max())
assert_eq(ddf.groupby(ddkey).count(), pdf.groupby(pdkey).count())
assert_eq(ddf.groupby(ddkey).mean(), pdf.groupby(pdkey).mean())
assert_eq(ddf.groupby(ddkey).size(), pdf.groupby(pdkey).size())
assert_eq(ddf.groupby(ddkey).first(), pdf.groupby(pdkey).first())
assert_eq(ddf.groupby(ddkey).last(), pdf.groupby(pdkey).last())
for ddof in [0, 1, 2]:
assert_eq(ddf.groupby(ddkey).var(ddof),
pdf.groupby(pdkey).var(ddof), check_dtype=False)
assert_eq(ddf.groupby(ddkey).std(ddof),
pdf.groupby(pdkey).std(ddof), check_dtype=False)
for ddkey, pdkey in [(ddf.b, pdf.b), (ddf.b + 1, pdf.b + 1)]:
assert_eq(ddf.a.groupby(ddkey).sum(), pdf.a.groupby(pdkey).sum(), check_names=False)
assert_eq(ddf.a.groupby(ddkey).max(), pdf.a.groupby(pdkey).max(), check_names=False)
assert_eq(ddf.a.groupby(ddkey).count(), pdf.a.groupby(pdkey).count(), check_names=False)
assert_eq(ddf.a.groupby(ddkey).mean(), pdf.a.groupby(pdkey).mean(), check_names=False)
assert_eq(ddf.a.groupby(ddkey).nunique(), pdf.a.groupby(pdkey).nunique(), check_names=False)
assert_eq(ddf.a.groupby(ddkey).first(), pdf.a.groupby(pdkey).first(), check_names=False)
assert_eq(ddf.a.groupby(ddkey).last(), pdf.a.groupby(pdkey).last(), check_names=False)
for ddof in [0, 1, 2]:
assert_eq(ddf.a.groupby(ddkey).var(ddof),
pdf.a.groupby(pdkey).var(ddof))
assert_eq(ddf.a.groupby(ddkey).std(ddof),
pdf.a.groupby(pdkey).std(ddof))
for i in [0, 4, 7]:
assert_eq(ddf.groupby(ddf.b > i).a.sum(), pdf.groupby(pdf.b > i).a.sum())
assert_eq(ddf.groupby(ddf.b > i).a.min(), pdf.groupby(pdf.b > i).a.min())
assert_eq(ddf.groupby(ddf.b > i).a.max(), pdf.groupby(pdf.b > i).a.max())
assert_eq(ddf.groupby(ddf.b > i).a.count(), | |
if the schema is not valid
"""
def __init__(self, descriptor, project=None):
self.descriptor = descriptor
self.schema_model = TableSchema(descriptor, strict=True)
self.fields = [SchemaField(f.descriptor) for f in self.schema_model.fields]
self.foreign_keys = [SchemaForeignKey(fk) for fk in
self.schema_model.foreign_keys] if self.schema_model.foreign_keys else []
self.project = project
# implement some dict like methods
def __getitem__(self, item):
return self.descriptor.__getitem__(item)
def get(self, k, d=None):
return self.descriptor.get(k, d)
@property
def headers(self):
return self.field_names
@property
def field_names(self):
return [f.name for f in self.fields]
@property
def required_fields(self):
return [f for f in self.fields if f.required]
@property
def numeric_fields(self):
return [f for f in self.fields if f.is_numeric]
def get_field_by_name(self, name):
for f in self.fields:
if f.name == name:
return f
return None
def field_validation_error(self, field_name, value):
field = self.get_field_by_name(field_name)
if field is not None:
return field.validation_error(value)
else:
raise Exception("The field '{}' doesn't exists in the schema. Should be one of {}"
.format(field_name, self.field_names))
def is_field_valid(self, field_name, value):
return self.field_validation_error(field_name, value) is None
def validate_row(self, row):
"""
The row must be a dictionary or a list of key => value
:param row:
:return: return a dictionary with an error added to the field
{
field_name: {
value: value (as given)
error: None or error message
}
"""
row = dict(row)
result = {}
for field_name, value in row.items():
error = self.field_validation_error(field_name, value)
result[field_name] = {
'value': value,
'error': error
}
return result
def cast_numbers(self, row, raise_error=False):
"""
Replace the numeric fields value by a json serializable python number. An int or float
:param row: a dict of (field_name, value)
:param raise_error: if True any casting error will raise an exception
:return: in place replacement {field_name: value} where the numeric fields are casted into python numbers
"""
for field in self.numeric_fields:
if field.name in row:
value = row[field.name]
try:
python_value = field.cast(value)
# The frictionless cast will cast a number to a python Decimal(), which is not json serializable
# by default. Cast it to a float or int. We want to keep it as entered as possible. E.g if entered
# 0 we don't want 0.0 or vice versa
if isinstance(python_value, decimal.Decimal):
if str(value).find('.') > 0:
python_value = float(python_value)
else:
python_value = int(python_value)
row[field.name] = python_value
except Exception as e:
if raise_error:
raise e
pass
return row
def rows_validator(self, rows):
for row in rows:
yield self.validate_row(row)
def get_error_fields(self, row):
"""
Return the field that does not validate
:param row: a key value dict or tuple
:return: [(field_name, {'value':value, 'error':error_string}]
"""
validated_row = self.validate_row(row)
errors = []
for field, data in validated_row.items():
if data.get('error'):
errors.append((field, data))
return errors
def is_row_valid(self, row):
return len(self.get_error_fields(row)) == 0
def is_all_valid(self, rows):
for row in rows:
if not self.is_row_valid(row):
return False
return True
def has_fk_for_model(self, model_name):
return self.get_fk_for_model(model_name) is not None
def get_fk_for_model(self, model_name):
for fk in self.foreign_keys:
if fk.model == model_name:
return fk
return None
def has_fk_for_model_field(self, model_name, model_field):
return self.get_fk_for_model_field(model_name, model_field) is not None
def get_fk_for_model_field(self, model_name, model_field):
for fk in self.foreign_keys:
if fk.model == model_name and fk.model_field == model_field:
return fk
return None
def __str__(self):
return self.get('name')
class ObservationSchema(GenericSchema):
"""
A schema specific to an Observation Dataset.
Its main job is to deal with the observation date and its geometry
(lat/long or geojson)
There's a special case: a lat/long or geometry field can be omitted if there's a reference (foreign key)
to a site code (only)
"""
OBSERVATION_DATE_FIELD_NAME = 'Observation Date'
LATITUDE_FIELD_NAME = 'Latitude'
LONGITUDE_FIELD_NAME = 'Longitude'
EASTING_FIELD_NAME = 'Easting'
NORTHING_FIELD_NAME = 'Northing'
DATUM_FIELD_NAME = 'Datum'
ZONE_FIELD_NAME = 'Zone'
SITE_CODE_FIELD_NAME = 'Site Code'
SITE_CODE_FOREIGN_KEY_EXAMPLE = """
"foreignKeys": [
{
"fields": ["Site Code"],
"reference": {
"fields": ["code"],
"resource": "Site"
}
}
]
"""
def __init__(self, descriptor, project=None):
super(ObservationSchema, self).__init__(descriptor, project)
self.errors = []
# date parser
self.date_parser = ObservationDateParser(self)
self.errors += self.date_parser.errors
# geometry parser
self.geometry_parser = GeometryParser(self, self.project)
self.errors += self.geometry_parser.errors
if self.errors:
msg = "\n".join(self.errors)
raise ObservationSchemaError(msg)
@property
def observation_date_field(self):
return self.date_parser.observation_date_field
@property
def latitude_field(self):
return self.geometry_parser.latitude_field
@property
def longitude_field(self):
return self.geometry_parser.longitude_field
@property
def easting_field(self):
return self.geometry_parser.easting_field
@property
def northing_field(self):
return self.geometry_parser.northing_field
@property
def datum_field(self):
return self.geometry_parser.datum_field
@property
def zone_field(self):
return self.geometry_parser.zone_field
@property
def site_code_field(self):
return self.geometry_parser.site_code_field
def find_site_code_foreign(self):
return self.get_fk_for_model_field('Site', 'code')
def cast_record_observation_date(self, record):
return self.date_parser.cast_date(record)
def cast_srid(self, record, default_srid=MODEL_SRID):
return self.geometry_parser.cast_srid(record, default_srid=default_srid)
def cast_geometry(self, record, default_srid=MODEL_SRID):
return self.geometry_parser.cast_geometry(record, default_srid=default_srid)
class SpeciesObservationSchema(ObservationSchema):
"""
An ObservationSchema with a Species Name
"""
SPECIES_NAME_FIELD_NAME = 'Species Name'
GENUS_FIELD_NAME = 'Genus'
SPECIES_FIELD_NAME = 'Species'
INFRA_SPECIFIC_RANK_FIELD_NAME = 'Infraspecific Rank'
INFRA_SPECIFIC_NAME_FIELD_NAME = 'Infraspecific Name'
SPECIES_NAME_ID_FIELD_NAME = 'Name Id'
def __init__(self, descriptor, project=None):
"""
An ObservationSchema with a field for species name or species nameid
:param descriptor:
"""
super(SpeciesObservationSchema, self).__init__(descriptor, project)
self.species_name_parser = SpeciesNameParser(self)
self.errors += self.species_name_parser.errors
if self.errors:
msg = "\n".join(self.errors)
raise SpeciesObservationSchemaError(msg)
def cast_species_name(self, record):
return self.species_name_parser.cast_species_name(record)
def cast_species_name_id(self, record):
return self.species_name_parser.cast_species_name_id(record)
def format_required_message(field):
return "The field named '{field_name}' must have the 'required' constraint set to true.".format(
field_name=field.name
)
class ObservationDateParser(object):
"""
A utility class to extract the observation date from data given a schema.
Rules to find the observation date:
1- Look for a biosys type 'observationDate'
2- Look for an 'Observation Date' column
3- Look for a single field of type date or datetime
It will store errors if there are any ambiguity (typical two fields with same name) or if the field is not of a
date/time type.
"""
def __init__(self, schema):
if not isinstance(schema, GenericSchema):
schema = GenericSchema(schema)
self.schema = schema
self.errors = []
self.observation_date_field, errors = find_unique_field(
self.schema,
BiosysSchema.OBSERVATION_DATE_TYPE_NAME,
ObservationSchema.OBSERVATION_DATE_FIELD_NAME
)
if errors:
self.errors += errors
# verify type
if self.observation_date_field and not self.observation_date_field.is_datetime_types:
msg = "Wrong type for the observation date field: '{field}' should be one of: {types}".format(
field=self.observation_date_field.name,
types=[SchemaField.DATETIME_TYPES]
)
self.errors += msg
self.observation_date_field = None
if not self.observation_date_field:
# fall back. Look for a single date/time type field
dt_fields = [f for f in self.schema.fields if f.is_datetime_types]
if len(dt_fields) == 1:
self.observation_date_field = dt_fields[0]
def is_valid(self):
return not self.errors
def cast_date(self, record):
"""
Extract geometry from a record data
:param record: a column -> value dictionary
:return: a date or datetime or None
"""
if self.observation_date_field:
value = record.get(self.observation_date_field.name)
if value:
return self.observation_date_field.cast(value)
return None
def get_active_fields(self):
all_possibles_fields = [self.observation_date_field]
return [f for f in all_possibles_fields if f is not None]
class GeometryParser(object):
"""
A utility class to extract the geometry from data given a schema.
"""
def __init__(self, schema, project=None):
if not isinstance(schema, GenericSchema):
schema = GenericSchema(schema)
self.schema = schema
self.project = project
self.errors = []
# Site Code
self.site_code_field, errors = self._find_site_code_field()
if errors:
self.errors += errors
# Datum
self.datum_field, errors = find_unique_field(
self.schema,
BiosysSchema.DATUM_TYPE_NAME,
ObservationSchema.DATUM_FIELD_NAME
)
if errors:
self.errors += errors
# Zone
self.zone_field, errors = find_unique_field(
self.schema,
BiosysSchema.ZONE_TYPE_NAME,
ObservationSchema.ZONE_FIELD_NAME
)
if errors:
self.errors += errors
# Latitude
self.latitude_field, errors = find_unique_field(
self.schema,
BiosysSchema.LATITUDE_TYPE_NAME,
ObservationSchema.LATITUDE_FIELD_NAME
)
if errors:
self.errors += errors
# Longitude
self.longitude_field, errors = find_unique_field(
self.schema,
BiosysSchema.LONGITUDE_TYPE_NAME,
ObservationSchema.LONGITUDE_FIELD_NAME)
if errors:
self.errors += errors
# Easting
self.easting_field, errors = find_unique_field(
self.schema,
BiosysSchema.EASTING_TYPE_NAME,
ObservationSchema.EASTING_FIELD_NAME
)
if errors:
self.errors += errors
# Northing
self.northing_field, errors = find_unique_field(
self.schema,
BiosysSchema.NORTHING_TYPE_NAME,
ObservationSchema.NORTHING_FIELD_NAME
)
if errors:
self.errors += errors
# some post validations.
# we need at least one method to get the geometry.
if not any([
self.site_code_field,
self.latitude_field,
self.longitude_field,
self.easting_field,
self.northing_field
]):
msg = "The schema must contain some geometry fields: latitude/longitude or easting/northing or " \
"alternatively a reference to the Site Code."
self.errors.append(msg)
# if we have a latitude we must have a longitude and vice-versa
if not self.errors:
if self.latitude_field and not self.longitude_field:
self.errors.append("Missing Longitude field")
if self.longitude_field and not self.latitude_field:
self.errors.append("Missing Latitude field")
# same for easting and northing
if self.easting_field and not self.northing_field:
self.errors.append("Missing Northing field")
if self.northing_field and not self.easting_field:
self.errors.append("Missing Easting field")
# verify 'required' constraints: required constraints must be set if we are in 'single' mode.
# e.g lat/long without site code or easting/northing
if self.is_site_code_only and not self.site_code_field.required:
self.errors.append(format_required_message(self.site_code_field))
if self.is_lat_long_only:
if not self.latitude_field.required:
self.errors.append(format_required_message(self.latitude_field))
if not self.longitude_field.required:
self.errors.append(format_required_message(self.longitude_field))
if self.is_easting_northing_only:
if not self.easting_field.required:
self.errors.append(format_required_message(self.easting_field))
if not self.northing_field.required:
| |
= self.features_34(features_33)
features_35 = self.features_35(features_34)
features_36 = self.features_36(features_35)
layers = []
layers.append(features_0)
layers.append(features_1)
layers.append(features_2)
layers.append(features_3)
layers.append(features_4)
layers.append(features_5)
layers.append(features_6)
layers.append(features_7)
layers.append(features_8)
layers.append(features_9)
layers.append(features_10)
layers.append(features_11)
layers.append(features_12)
layers.append(features_13)
layers.append(features_14)
layers.append(features_15)
layers.append(features_16)
layers.append(features_17)
layers.append(features_18)
layers.append(features_19)
layers.append(features_20)
layers.append(features_21)
layers.append(features_22)
layers.append(features_23)
layers.append(features_24)
layers.append(features_25)
layers.append(features_26)
layers.append(features_27)
layers.append(features_28)
layers.append(features_29)
layers.append(features_30)
layers.append(features_31)
layers.append(features_32)
layers.append(features_33)
layers.append(features_34)
layers.append(features_35)
layers.append(features_36)
post_process = self.layer_postprocess
output_layers = [post_process(layers[int(i)]).view(layers[int(i)].shape[0], -1) for i in self.layers]
classifier_flatten = features_36.view(features_36.size(0), -1)
classifier_0 = self.classifier_0(classifier_flatten)
classifier_1 = self.classifier_1(classifier_0)
classifier_2 = self.classifier_2(classifier_1)
classifier_3 = self.classifier_3(classifier_2)
classifier_4 = self.classifier_4(classifier_3)
classifier_5 = self.classifier_5(classifier_4)
classifier_6 = self.classifier_6(classifier_5)
return torch.cat(output_layers, dim=1)
class VGG19(nn.Module):
"""
Pretrained VGG-19 model features. Layers to use can be specified in the
constructor. If multiple ones are specified, all the outputs will be
concatenated.
Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace)
(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(6): ReLU(inplace)
(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(8): ReLU(inplace)
(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace)
(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(13): ReLU(inplace)
(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(15): ReLU(inplace)
(16): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(17): ReLU(inplace)
(18): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(19): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(20): ReLU(inplace)
(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(22): ReLU(inplace)
(23): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(24): ReLU(inplace)
(25): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(26): ReLU(inplace)
(27): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(29): ReLU(inplace)
(30): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(31): ReLU(inplace)
(32): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(33): ReLU(inplace)
(34): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(35): ReLU(inplace)
(36): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
"""
def __init__(self, layers=4, layer_postprocess=None):
"""
layers: a list of layers to use to extract features. Outputs will be
concatenated together.
layer_postprocess: a post-processing torch.nn.Module to further process
the result extracted from each layer, before stacking the results
in the end. y = layer_postprocess(x). Require y.shape[0] =
x.shape[0] i.e., same minibatch dimension.
"""
super(VGG19, self).__init__()
self.layers = layers
self.model = models.vgg19(pretrained=True).features
self.layer_postprocess = layer_postprocess
# Because of large sizes of images we need to downsample images before
# feeding them to extractors
# self.downsample = torch.nn.AvgPool2d(3, stride=2)
for param in self.model.parameters():
param.requires_grad = False
# See https://pytorch.org/docs/stable/torchvision/models.html
self.normalize_module = NormalizeChannels2d(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def forward(self, x):
assert x.dim() == 4, "Input should have 4 dimensions. Was {}".format(x.dim())
# This pretrained VGG19 expects input pixels to be in [0,1].
# See https://pytorch.org/docs/stable/torchvision/models.html
kmain.pixel_values_check(x, (0, 1), "Input")
# Normalize according to the instruction on the web page above
x = self.normalize_module(x)
features = []
postprocess = self.layer_postprocess
# if x.size()[-1] ==1024:
# x = self.downsample(self.downsample(x))
for name, layer in enumerate(self.model):
# name is a non-negative integer.
x = layer(x)
if name in self.layers:
y = postprocess(x) if postprocess is not None else x
assert y.shape[0] == x.shape[0]
# flatten
y_reshaped = y.view(y.shape[0], -1)
features.append(y_reshaped)
if len(features) == len(self.layers):
# Outputs from all the specified layers are collected.
break
# return features
return torch.cat(features, dim=1)
# end class VGG19
class GlobalMaxPool(nn.Module):
"""
A module that takes a 4d tensor X (b x c x h x w) as input and outputs
a tensor Y of size (b x c). Y is computed by computing the max for each
input in the batch.
"""
def __init__(self):
super(GlobalMaxPool, self).__init__()
def forward(self, x):
if x.dim() != 4:
raise ValueError("Input must be a 4d tensor. Shape was {}".format(x.shape))
s = x.shape
xflat = x.view(s[0], s[1], -1)
y, _ = torch.max(xflat, dim=2)
assert y.dim() == 2
assert y.shape[0] == x.shape[0]
assert y.shape[1] == x.shape[1]
return y
# GlobalMaxPool
class GlobalAvgPool(nn.Module):
"""
A module that takes a 4d tensor X (b x c x h x w) as input and outputs
a tensor Y of size (b x c). Y is computed by computing the average for each
input in the batch.
"""
def __init__(self):
super(GlobalAvgPool, self).__init__()
def forward(self, x):
if x.dim() != 4:
raise ValueError("Input must be a 4d tensor. Shape was {}".format(x.shape))
s = x.shape
xflat = x.view(s[0], s[1], -1)
y = torch.mean(xflat, dim=2)
assert y.dim() == 2
assert y.shape[0] == x.shape[0]
assert y.shape[1] == x.shape[1]
return y
# GlobalAvgPool
class Identity(nn.Module):
def __init__(self, flatten=False, slice_dim=None):
super(Identity, self).__init__()
self.flatten = flatten
self.slice = slice_dim
def forward(self, x):
if self.slice != None:
x = x[:, self.slice : (self.slice + 1), :, :]
return x.view(x.shape[0], -1) if self.flatten else x
class NormalizeChannels2d(nn.Module):
"""
Normalize (standardize) each channel by a constant i.e.,
(Tensor[channel_i]-mean[i])/std[i]. Does the same as thing as
torchvision.transforms.Normalize. But this is a version for tensors with a
minibatch dimension. Does not modify tensors in place.
"""
def __init__(self, mean, std):
"""
mean: list of constants for the means
std: list of constants used to divide
"""
super(NormalizeChannels2d, self).__init__()
if len(mean) != len(std):
raise ValueError("mean and std must have the same length")
self.mean = torch.tensor(mean)
self.std = torch.tensor(std)
def forward(self, x):
# First dimension of x is the minibatch dimension
mean = self.mean
std = self.std
n_channels = len(mean)
if n_channels != x.shape[1]:
raise ValueError(
"Number of channels of x does not match len of mean vector. x has {} channels. mean length = {}".format(
x.shape[1], n_channels
)
)
# This is faster than using broadcasting, don't change without benchmarking
reshaped_mean = mean.view(1, n_channels, 1, 1)
reshaped_std = std.view(1, n_channels, 1, 1)
if torch.cuda.is_available():
reshaped_mean = reshaped_mean.cuda()
reshaped_std = reshaped_std.cuda()
# rely on broadcasting
standardized = (x - reshaped_mean) / reshaped_std
return standardized
# end class NormalizeChannels2d
class Flatten(nn.Module):
"""
A module that flattens even tensor in the minibatch.
If input tensor has size [n, a, b, c], the this modules returns a tensor
of size [n, a*b*c] by reshaping.
"""
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.reshape(x.shape[0], -1)
# end Flatten
class AlexNet_365(nn.Module):
"""
Pretrained Alexnet (PLACES-365) model features.
AlexNet(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
(1): ReLU(inplace)
(2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
(3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(4): ReLU(inplace)
(5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
(6): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(7): ReLU(inplace)
(8): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(9): ReLU(inplace)
(10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace)
(12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(classifier): Sequential(
(0): Dropout(p=0.5)
(1): Linear(in_features=9216, out_features=4096, bias=True)
(2): ReLU(inplace)
(3): Dropout(p=0.5)
(4): Linear(in_features=4096, out_features=4096, bias=True)
(5): ReLU(inplace)
(6): Linear(in_features=4096, out_features=365, bias=True)
)
)
"""
def __init__(self):
"""
layers: a list of layers to use to extract features. Outputs will be
concatenated together.
layer_postprocess: a post-processing torch.nn.Module to further process
the result extracted from each layer, before stacking the results
in the end. y = layer_postprocess(x). Require y.shape[0] =
x.shape[0] i.e., same minibatch dimension.
"""
super(AlexNet_365, self).__init__()
arch = "alexnet" #'resnet18'
# load the pre-trained weights, the weights will be downloaded automatically
model_file = "%s_places365.pth.tar" % arch
if not os.access(model_file, os.W_OK):
weight_url = "http://places2.csail.mit.edu/models_places365/" + model_file
os.system("wget " + weight_url)
model = models.__dict__[arch](num_classes=365)
model = models.__dict__[arch](num_classes=365)
model = model
checkpoint = torch.load(model_file, map_location=lambda storage, loc: storage)
state_dict = {str.replace(k, "module.", ""): v for k, v in checkpoint["state_dict"].items()}
model.load_state_dict(state_dict)
model.eval()
self.model = model.features # Just taking the features
self.normalize_module = NormalizeChannels2d(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def forward(self, x):
assert x.dim() == 4, "Input should have 4 dimensions. Was {}".format(x.dim())
kmain.pixel_values_check(x, (0, 1), "Input")
x = self.normalize_module(x)
logit = self.model.forward(x)
return logit.view(logit.shape[0], -1)
class ResNet18_365(nn.Module):
"""
Pretrained ResNet-18 (PLACES-365) model features.
Sequential(
(0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(inplace)
| |
bo.append(np.nan)
# print('lack one')
#Total Test:Positive Ratio
bo.append(int(bo[3])/int(bo[13]))
#New Positive
try:
bo.append((int(bo[3])-int(bo1[3]))/(int(bo[13])-int(bo1[13])))
except:
bo.append(np.nan)
# print('lack one')
#Case Fatality Rate%
try:
if bo[5]=='':
bo.append(0)
else:
bo.append(int(bo[5])/int(bo[3]))
except:
bo.append(np.nan)
#New Confirmed Case Growth Rate
# try:
# q=2
# while (math.isnan(inc1) or inc1==np.inf) and q<=9:
# # print(inc1)
# inc1=hist_data_of_coun_i.loc[len(date)-q,'case inc']/(7*hist_data_of_coun_i.loc[len(date)-q-7,'case inc'])
# c=hist_data_of_coun_i.loc[len(date)-q,'case inc']
# q+=1
# # print(inc1)
# if math.isnan(inc1):
# bo.append(0)
# elif inc1==np.inf:
# bo.append(0.01)
# # elif c<=100:
# # bo.append(0.03)
# else:
# bo.append(inc1)
# except:
# bo.append(0)
# print('lack one')
#New Death Case Growth Rate
# try:
# q=2
# while (math.isnan(inc2) or inc2==np.inf) and q<=9:
# # print(inc2)
# inc2=hist_data_of_coun_i.loc[len(date)-q,'death inc']/(7*hist_data_of_coun_i.loc[len(date)-q-7,'death inc'])
# q+=1
# # print(inc2)
# if math.isnan(inc2):
# bo.append(0)
# elif inc2==np.inf:
# bo.append(0.1)
# else:
# bo.append(inc2)
# except:
# bo.append(0)
# print('lack one')
#New Sum Confirmed Case Growth Rate
if math.isnan(inc_1) or inc_1=='':
bo.append(0)
elif inc_1==np.inf:
bo.append(0.01)
else:
bo.append(inc_1)
# print(bo[-1])
#New Sum Death Case Growth Rate
if math.isnan(inc_2) or inc_2=='':
bo.append(0)
elif inc_2==np.inf:
bo.append(0.1)
else:
bo.append(inc_2)
# print(bo[-1])
#Average daily cases per 100,000 people in the past week
bo.append(adcp*100000/int(bo[15]))
# New Test
try:
bo.append(int(bo[13])-int(bo1[13]))
except:
bo.append(np.nan)
# print('lack one')
bo.append(slope)
if region=='missing':
continue
else:
bo.append(region)
bo.append(coun1[1])
bo.append(iso)
bo.append('world')
bo.append(seven_cases)
bo.append(seven_deaths)
print(len(bo))
print(bo)
if len(bo)!=40:
print(bo)
exit(0)
raw_data.append(bo)
raw_data=DataFrame(raw_data,columns=col_name)
brief_raw_data=raw_data[['Country,Other','key-id','Region','Country/District','field','Population',
'TotalCases','ActiveCases','TotalDeaths','NewDeaths','TotalRecovered','NewRecovered','Serious,Critical','NewCases','New Test','Cases Per 100K Population','Tests Per 100K Population',
'Active Cases Per 100k Population','Total Test:Positive Ratio','New Positive%',
'Case Fatality Rate%','New Confirmed Case Growth Rate','New Death Case Growth Rate','Average daily cases per 100,000 people in the past week','NPI','7 days inc cases','7 days inc deaths']]
tf=copy.deepcopy(brief_raw_data)
uni_region=list(set(list(tf['Region'].values)))
uni_region.remove('western europe')
data_region=tf[tf['Region']=='western europe']
data_region=data_region.replace(np.nan,'shit')
data_region=data_region.replace(np.inf,'shit')
data_region=data_region.replace('N/A','shit')
data_region=data_region.replace('',0)
data_region=data_region.replace(' ',0)
data_region.loc[data_region['NPI']=='shit','NPI']=0
data_region.loc[data_region['Case Fatality Rate%']=='shit','Case Fatality Rate%']=0
dd=data_region[['TotalCases','ActiveCases','TotalRecovered','Case Fatality Rate%']]
ac=dd[(dd['TotalCases']!='shit')&(dd['ActiveCases']!='shit')&(dd['TotalRecovered']!='shit')&(dd['Case Fatality Rate%']!='shit')]
active_rate_region=sum(ac['ActiveCases'].astype(int))/sum(ac['TotalCases'].astype(int))
data_region.loc[data_region['Active Cases Per 100k Population']=='shit','Active Cases Per 100k Population']=active_rate_region*100000*data_region.loc[data_region['Active Cases Per 100k Population']=='shit','TotalCases'].astype(int)/data_region.loc[data_region['Active Cases Per 100k Population']=='shit','Population'].astype(int)
dd=data_region[['NewCases','New Test']]
ac=dd[dd['New Test']!=0]
new_posi=sum(ac['NewCases'].astype(int))/sum(ac['New Test'])
data_region.loc[data_region['New Test']==0,'New Positive%']=new_posi
final=copy.deepcopy(data_region)
for distri in uni_region:
data_region=tf[tf['Region']==distri]
data_region=data_region.replace(np.nan,'shit')
data_region=data_region.replace(np.inf,'shit')
data_region=data_region.replace('N/A','shit')
data_region=data_region.replace('',0)
data_region=data_region.replace(' ',0)
data_region.loc[data_region['NPI']=='shit','NPI']=0
data_region.loc[data_region['Case Fatality Rate%']=='shit','Case Fatality Rate%']=0
dd=data_region[['TotalCases','ActiveCases','TotalRecovered','Case Fatality Rate%']]
ac=dd[(dd['TotalCases']!='shit')&(dd['ActiveCases']!='shit')&(dd['TotalRecovered']!='shit')&(dd['Case Fatality Rate%']!='shit')]
active_rate_region=sum(ac['ActiveCases'].astype(int))/sum(ac['TotalCases'].astype(int))
data_region.loc[data_region['Active Cases Per 100k Population']=='shit','Active Cases Per 100k Population']=active_rate_region*100000*data_region.loc[data_region['Active Cases Per 100k Population']=='shit','TotalCases'].astype(int)/data_region.loc[data_region['Active Cases Per 100k Population']=='shit','Population'].astype(int)
dd=data_region[['NewCases','New Test']]
ac=dd[dd['New Test']!=0]
try:
new_posi=sum(ac['NewCases'].astype(int))/sum(ac['New Test'])
except:
new_posi=0
data_region.loc[data_region['New Test']==0,'New Positive%']=new_posi
data_region.loc[data_region['New Test']=='shit','New Positive%']=new_posi
final=pd.concat([final,data_region])
final=final.reset_index(drop=True)
tf2=final[['Country,Other','key-id','Country/District','Region','field','TotalCases','Cases Per 100K Population','Tests Per 100K Population',
'Active Cases Per 100k Population','Total Test:Positive Ratio','New Positive%',
'Case Fatality Rate%','New Confirmed Case Growth Rate','New Death Case Growth Rate','Average daily cases per 100,000 people in the past week','NPI']]
#越高越好,即需要降序
# for x in ['Cases Per 100K Population','Active Cases Per 100k Population','Total Test:Positive Ratio','New Positive%',]
# x='Tests Per 100K Population'
# df=tf2[['Country,Other',x]]
# df2=df.sort_values(x,ascending=False,inplace=False)
# df2 = df2.reset_index(drop=True)
# df2['cum']=df.index+1
# df2['cum_prob']=100*df2['cum']/max(df2['cum'])
# df3=pd.merge(df,df2,on=['Country,Other'])
# tf2['IND_'+x]=df3['cum_prob']
# for x in ['Cases Per 100K Population','Active Cases Per 100k Population','Total Test:Positive Ratio','New Positive%','Case Fatality Rate%','New Confirmed Case Growth Rate','New Death Case Growth Rate','NPI']:
# i=1
# df=tf2[['Country,Other',x]]
# df2=df.sort_values(x,inplace=False)
# df2 = df2.reset_index(drop=True)
# df2['cum']=df.index+1
# df2['cum_prob']=100*df2['cum']/max(df2['cum'])
# df3=pd.merge(df,df2,on=['Country,Other'])
# tf2['IND_'+x]=df3['cum_prob']
# i+=1
# tf2['Comprehensive Index']=0.1*tf2['IND_Cases Per 100K Population']+0.08*tf2['IND_Tests Per 100K Population']
# +0.2*tf2['IND_Active Cases Per 100k Population']+0.1*tf2['IND_Total Test:Positive Ratio']
# +0.13*tf2['IND_New Positive%']+0.02*tf2['IND_Case Fatality Rate%']+ 0.22*tf2['IND_New Confirmed Case Growth Rate']
# +0.1*tf2['IND_New Death Case Growth Rate']+ 0.05*tf2['IND_NPI']
# today=datetime.datetime.now()
# tf4=tf2[['Country/District','TotalCases','IND_Cases Per 100K Population','IND_Tests Per 100K Population','IND_Total Test:Positive Ratio',
# 'IND_New Positive%','IND_Case Fatality Rate%','IND_New Confirmed Case Growth Rate','IND_New Death Case Growth Rate','IND_Active Cases Per 100k Population',
# 'IND_NPI','Comprehensive Index']]
# tf_c=copy.deepcopy(tf4)
# tf_c_rename=tf_c.rename({'TotalCases':'TOTAL CASE','IND_Cases Per 100K Population':'IND1_Cases Per 100K Population','IND_Tests Per 100K Population':'IND2_Tests Per 100K Population',
# 'IND_Active Cases Per 100k Population':'IND8_Active Cases Per 100k Population','IND_Total Test:Positive Ratio':'IND3_Total Test:Positive Ratio',
# 'IND_New Positive%':'IND4_New Positive%','IND_Case Fatality Rate%':'IND5_Case Fatality Rate%','IND_New Confirmed Case Growth Rate':'IND6_New Confirmed Case Growth Rate',
# 'IND_New Death Case Growth Rate':'IND7_New Death Case Growth Rate','IND_NPI':'NPI'},axis='columns')
# tf_c_rename.to_excel('World_index_{}.xlsx'.format(today),sheet_name='Index',index=False)
# tf2.to_excel('World_raw_index_{}.xlsx'.format(today),sheet_name='Index',index=False)
# brief_raw_data.to_excel('World_rawdata_{}.xlsx'.format(today),sheet_name='Index',index=False)
import pickle
import pandas
import json
from pprint import pprint
from urllib import request
#resp = request.urlopen('https://covidtracking.com/api/v1/states/daily.json')
#proxies = {'http': 'http://proxy.example.com:8080/'}
#opener = request.FancyURLopener(proxies)
a=0
while a==0:
try:
resp = requests.get('https://covidtracking.com/api/v1/states/daily.json')
a=1
except:
a=0
state_data=resp.json()#json.loads(resp.read().decode())
print('stage 1 finished')
import datetime
x0=datetime.date.today()
x1=datetime.date.today()-datetime.timedelta(days=1)
x2=datetime.date.today()-datetime.timedelta(days=2)
x3=datetime.date.today()-datetime.timedelta(days=3)
x4=datetime.date.today()-datetime.timedelta(days=4)
x5=datetime.date.today()-datetime.timedelta(days=5)
x6=datetime.date.today()-datetime.timedelta(days=6)
x7=datetime.date.today()-datetime.timedelta(days=7)
x8=datetime.date.today()-datetime.timedelta(days=8)
x9=datetime.date.today()-datetime.timedelta(days=9)
# run_time
ts=[]
ts.append(x0.__format__('%Y%m%d'))
ts.append(x1.__format__('%Y%m%d'))
ts.append(x2.__format__('%Y%m%d'))
ts.append(x3.__format__('%Y%m%d'))
ts.append(x4.__format__('%Y%m%d'))
ts.append(x5.__format__('%Y%m%d'))
ts.append(x6.__format__('%Y%m%d'))
ts.append(x7.__format__('%Y%m%d'))
ts.append(x8.__format__('%Y%m%d'))
ts.append(x9.__format__('%Y%m%d'))
print(ts)
id_names={'Alabama': 'AL',
'Alaska': 'AK',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'District Of Columbia':'DC',
'Florida': 'FL',
'Georgia': 'GA',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Pennsylvania': 'PA',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virginia': 'VA',
'Washington': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY'}
from tqdm import tqdm
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import requests
import json
import time
import random
import html5lib
import re
import scipy.stats as st
from pandas.core.frame import DataFrame
import copy
import math
import datetime
#url='https://www.worldometers.info/coronavirus/#countries'
url='https://www.worldometers.info/coronavirus/country/us/'
a=requests.get(url)
soup = BeautifulSoup(a.content,'html5lib')
x=soup.body.find_all('tr', attrs={'style': ''})
# 190 210
def find_start_yesterday(i,j):
for start in range(i,j):
one=x[start]
two=x[start+1]
l1=one.find_all('a',attrs={'class':'mt_a'})
l2=two.find_all('a',attrs={'class':'mt_a'})
if l1==[] or l2==[]:
continue
s1=str(l1[0])
s2=str(l2[0])
coun1=s1.split('/')
coun2=s2.split('/')
if coun1[3]=='texas' or coun1[3]=='california':
return start
#385 410
def find_end_yesterday(i,j):
for end in range(i,j):
final_pre=x[end-1]
final=x[end]
l1=final_pre.find_all('a',attrs={'class':'mt_a'})
l2=final.find_all('a',attrs={'class':'mt_a'})
if l1==[] or l2==[]:
continue
s1=str(l1[0])
s2=str(l2[0])
coun1=s1.split('/')
coun2=s2.split('/')
if (coun1[3]=='district-of-columbia' and coun2[3]=='vermont') or (coun2[3]=='district-of-columbia' and coun1[3]=='vermont'):
return end+1
end=find_end_yesterday(80,200)
start=find_start_yesterday(64,80)
print('start:{}\tend:{}'.format(start,end))
col_name=['0','#','2','Country,Other','TotalCases',
'5','NewCases','7','TotalDeaths',
'NewDeaths','10','TotalRecovered','12','ActiveCases','Tot Cases/1M pop',
'Deaths/1M pop','16','TotalTests','Tests/1M pop','19','Pop','21','source','23','24','Cases Per 100K Population',
'Tests Per 100K Population','Active Cases Per 100k Population','Total Test:Positive Ratio','New Positive%',
'Case Fatality Rate%','New Confirmed Case Growth Rate','New Death Case Growth Rate','Average daily cases per 100,000 people in the past week',
'New Test','NPI','key-id','Country/District','Region','field','7 days inc cases','7 days inc deaths']
raw_data=[]
for i in tqdm(range(start,end)):
# time.sleep(2)
text_source=x[i]
l=text_source.find_all('a',attrs={'class':'mt_a'})
if l==[]:
continue
s=str(l[0])
coun=s.split('/')
url='https://www.worldometers.info/coronavirus/usa/'+coun[3]+'/'
# a=requests.get(url,proxies=proxies,headers = headers)
a=''
while a=='':
try:
a=requests.get(url,headers=headers)
except:
a=''
soup = BeautifulSoup(a.content,'html5lib')
r=soup.body.find_all('script',attrs={'type':'text/javascript'})
p=re.compile(r'categories: \[(.*?)\]',re.S)
rs=re.findall(p,r[0].text)
d=rs[0]
str_pat = re.compile(r'\"(.*?)\"')
d = str_pat.findall(d)
date=d
p1=re.compile(r'name: \'Cases\'.*?\[(.*?)\]',re.S)
for j in range(10):
try:
rs=re.findall(p1,r[j].text)
d=rs[0]
d=re.sub(r'\"','',d)
case=d.split(',')
except:
# print('{} cases is not{}'.format(coun[1],j))
continue
p1=re.compile(r'name: \'Deaths\'.*?\[(.*?)\]',re.S)
for j in range(10):
try:
rs=re.findall(p1,r[j].text)
d=rs[0]
d=re.sub(r'\"','',d)
TD=d.split(',')
except:
continue
j={'Date':date,'Total Cases':case,'Total Deaths':TD}
hist_data_of_coun_i=pd.DataFrame(j)
for k in range(len(hist_data_of_coun_i['Total Deaths'])):
if hist_data_of_coun_i['Total Deaths'][k]=='null':
data['Total Deaths'][k]=0
hist_data_of_coun_i['Total Cases']=hist_data_of_coun_i['Total Cases'].astype(int)
hist_data_of_coun_i['Total Deaths']=hist_data_of_coun_i['Total Deaths'].astype(int)
hist_data_of_coun_i['case inc']=hist_data_of_coun_i['Total Cases'].diff()
hist_data_of_coun_i['death inc']=hist_data_of_coun_i['Total Deaths'].diff()
#七日新增死亡与cases
seven_cases=sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(1,8)])
seven_deaths=sum([hist_data_of_coun_i.loc[len(date)-i,'death inc'] for i in range(1,8)])
inc1=hist_data_of_coun_i.loc[len(date)-1,'case inc']/(7*hist_data_of_coun_i.loc[len(date)-8,'case inc'])
inc2=hist_data_of_coun_i.loc[len(date)-1,'death inc']/(7*hist_data_of_coun_i.loc[len(date)-8,'death inc'])
inc_1=sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(1,8)])/sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(8,15)])
inc_2=sum([hist_data_of_coun_i.loc[len(date)-i,'death inc'] for i in range(1,8)])/sum([hist_data_of_coun_i.loc[len(date)-i,'death inc'] for i in range(8,15)])
adcp=sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(1,8)])/7
dd=hist_data_of_coun_i.shift(5)
hist_data_of_coun_i['inc_p']=np.log(hist_data_of_coun_i['case inc']/dd['case inc'])/5
hist_data_of_coun_i=hist_data_of_coun_i[~hist_data_of_coun_i.isin([np.nan, np.inf, -np.inf]).any(1)]
da=hist_data_of_coun_i['inc_p'].values
try:
slope,intercept, r_value, p_value, std_err=st.linregress(list(range(30)), da[:30])
except:
slope=None
# print(x[i].text)
bo=x[i].text.split('\n')
# print(bo)
for h in range(len(bo)):
bo[h]=bo[h].replace(',','')
bo[h]=bo[h].replace('+','')
bo[h]=bo[h].strip()
bo[3]=bo[3].strip()
try:
region=id_names[bo[3]]
except:
region='missing'
# print(region)
if bo[4]=='':
del bo[4]
if bo[11]=='':
del bo[11]
# if bo[17]=='':
# del bo[17]
if bo[20]=='':
del bo[20]
if bo[6]=='':
new_cases=0
for t in state_data:
if t['state']==region:
date_time=str(t['date'])
if date_time == ts[1]:
new_cases=t['positiveIncrease']
break
bo[6]=new_cases
if bo[9]=='':
new_cases=0
for t in state_data:
if t['state']==region:
date_time=str(t['date'])
if date_time == ts[1]:
new_cases=t['deathIncrease']
break
bo[9]=new_cases
if bo[22]!='[projections]':
del bo[22]
#match-json
# bo[3]=bo[3].strip()
# try:
# region=id_names[bo[3]]
# except:
# region='missing'
# # print(region)
new_test=1
# test_7 days
for t in state_data:
if t['state']==region:
date_time=str(t['date'])
if date_time == ts[1]:
new_test=t['totalTestResultsIncrease']
break
print(bo)
#Cases Per 100K Population
try:
bo.append(int(bo[14])/10)
except:
continue
# bo.append(np.nan)
# print('lack one')
#Tests Per 100K Population
if bo[25]=='':
del bo[25]
try:
bo.append(int(bo[18])/10)
except:
continue
# bo.append(np.nan)
# print('lack one')
#'Active Cases Per 100k Population'
try:
bo.append(int(bo[13])*100000/int(bo[20]))
except:
bo.append(np.nan)
# print('lack one')
#Total Test:Positive Ratio
bo.append(int(bo[4])/int(bo[17]))
#'New Positive%'
print(region)
try:
bo.append(int(bo[6])/new_test)
except:
bo.append(0)
#Case Fatality Rate%
try:
if bo[8]=='':
bo.append(0)
else:
bo.append(int(bo[8])/int(bo[4]))
except:
bo.append(np.nan)
#New Confirmed Case Growth Rate
# try:
# q=2
# while (math.isnan(inc1) or inc1==np.inf) and q<=9:
# inc1=hist_data_of_coun_i.loc[len(date)-q,'case inc']/(7*hist_data_of_coun_i.loc[len(date)-q-7,'case inc'])
# # c=hist_data_of_coun_i.loc[len(date)-q,'case inc']
# q+=1
# # print(c)
# if math.isnan(inc1):
# bo.append(0)
# elif inc1==np.inf:
# bo.append(0.01)
# else:
# bo.append(inc1)
# # print(inc1)
# except:
# bo.append(0)
# # print('lack one')
# # print(bo[27])
# #New Death Case Growth Rate
# try:
# q=2
# while (math.isnan(inc2) or inc2==np.inf) and q<=9:
# # print(inc2)
# inc2=hist_data_of_coun_i.loc[len(date)-1,'death inc']/(7*hist_data_of_coun_i.loc[len(date)-8,'death inc'])
# q+=1
# # print(inc2)
# if math.isnan(inc2):
# bo.append(0)
# elif inc2==np.inf:
# bo.append(0.1)
# else:
# bo.append(inc2)
# except:
# bo.append(0)
if math.isnan(inc_1) or inc_1=='':
bo.append(0)
elif inc_1==np.inf:
bo.append(0.01)
else:
bo.append(inc_1)
print(bo[-1])
#New Sum Death Case Growth Rate
if math.isnan(inc_2) or inc_2=='':
bo.append(0)
elif inc_2==np.inf:
bo.append(0.1)
else:
bo.append(inc_2)
print(bo[-1])
#Average daily cases per 100,000 people in the past week
bo.append(adcp*100000/int(bo[20]))
# New Test
bo.append(new_test)
#NPI
if slope==np.inf or math.isnan(slope):
bo.append(0)
else:
bo.append(slope)
bo.append(coun[3])
bo.append(region)
bo.append('No')
bo.append('us')
bo.append(seven_cases)
bo.append(seven_deaths)
# if bo[20]=='':
# del bo[20]
print(len(bo))
print(bo)
raw_data.append(bo)
raw_data=DataFrame(raw_data,columns=col_name)
brief_raw_data=raw_data[['Country,Other','key-id','Country/District','Region','field','TotalCases',
'NewCases','TotalDeaths',
'NewDeaths','ActiveCases','Tot Cases/1M pop',
'Deaths/1M pop','TotalTests','Tests/1M pop','Pop','Cases Per 100K Population',
'Tests Per 100K Population','Active Cases | |
number of labels
if(labels_on):
if( not set_label_strings):
label_strings = ['%d' % i for i in range(ncolors)]
else:
if(len(label_strings) != ncolors):
print("Error: draw_color_palette: invalid number of labels for boxes")
return
#---Calculate X and Y positions of text and box in the view port.
width = 1./ncols
height = 1./nrows
if(ncols > 1):
xpos_2d = numpy.empty([nrows,ncols])
xpos_2d[:] = fspan(0,1-width,ncols)
if(across):
xpos = numpy.ravel(xpos_2d)
else:
xpos = numpy.ravel(xpos_2d.T)
else:
xpos = numpy.empty(ntotal)
xpos.fill(0.)
if(nrows > 1):
ypos_2d = numpy.empty([ncols,nrows])
ypos_2d[:] = fspan(1-height,0,nrows)
if(across):
ypos = numpy.ravel(ypos_2d.T)
else:
ypos = numpy.ravel(ypos_2d)
else:
ypos = numpy.empty(ntotal)
ypos.fill(1.-height)
#---Calculate box coordinates.
xbox = [0,width, width, 0,0]
ybox = [0, 0,height,height,0]
gnres = Resources() # variables to hold list of resources
lnres = Resources()
if(labels_on):
font_space = font_height/2.
txres = Resources()
txres.txFontHeightF = font_height
txres.txFont = "helvetica-bold"
txres.txJust = "BottomLeft"
txres.txPerimOn = True
txres.txPerimColor = "black"
txres.txFontColor = "black"
txres.txBackgroundFillColor = "white"
lnres.gsLineColor = "black"
#---ntotal colors per page.
for i in range(ncolors):
#---Draw box and fill in the appropriate color.
gnres.gsFillColor = rgb[i,:]
polygon_ndc(wks,xbox+xpos[i],ybox+ypos[i],gnres) # Draw box.
#---Outline box in black.
polyline_ndc(wks,xbox+xpos[i],ybox+ypos[i],lnres)
#---Draw color label.
if(labels_on):
text_ndc(wks,label_strings[i],font_space+xpos[i],ypos[i]+font_space,txres)
if(call_frame):
frame(wks) # Advance the frame.
return
################################################################
def draw_ndc_grid(wks):
"""
Draws grid lines at 0.1 NDC intervals and labels them.
Ngl.draw_ndc_grid(wks)
wks -- The identifier returned from calling Ngl.open_wks.
"""
igray = new_color(wks,0.72,0.72,0.72) # Add gray.
gridres = Resources() # polyline mods desired
gridres.gsLineColor = igray # color of lines
gridres.gsLineThicknessF = 1.5 # thickness of lines
gridres.gsLineDashPattern = 1 # dash the lines
gridres.gsLineLabelFontColor = igray # color of labels
gridres.gsLineLabelFontHeightF = 0.0105 # size of labels
#
# Draw and label vertical and horizontal lines at 0.1 intervals.
#
for gh in range(1,10):
gridres.gsLineLabelString = gh*10/100. # adds a line label string
polyline_ndc(wks,[0.,1.],[gh*10/100.,gh*10/100.],gridres)
polyline_ndc(wks,[gh*10/100.,gh*10/100.],[0.,1.],gridres)
#
# Draw and label vertical and horizontal lines at the very
# edges at 0.01 and 0.99 NDC.
#
gridres.gsLineLabelString = 0.01
polyline_ndc(wks,[0.,1.],[0.01,0.01],gridres)
gridres.gsLineLabelString = 0.99
polyline_ndc(wks,[0.,1.],[0.99,0.99],gridres)
gridres.gsLineLabelString = 0.01
polyline_ndc(wks,[0.01,0.01],[0.,1.],gridres)
gridres.gsLineLabelString = 0.99
polyline_ndc(wks,[0.99,0.99],[0.,1.],gridres)
return None
################################################################
def end():
"""
Terminates a PyNGL script, flushes all buffers, and closes all
internal files.
Ngl.end()
"""
NhlClose()
return None
################################################################
def frame(wks):
"""
Terminates a picture on a specified workstation.
Ngl.frame(wks)
wks -- The identifier returned from calling Ngl.open_wks.
"""
NhlFrame(wks)
return None
################################################################
def fspan(min,max,num):
"""
Returns an array of evenly-spaced floating point numbers.
sarray = Ngl.fspan(start, end, num)
start -- Value at which to start.
end -- Value at which to end.
num -- Number of equally-spaced points desired between start and end.
"""
delta = (float(max-min)/float(num-1))
a = []
for i in range(num-1):
a.append(min + float(i)*delta)
a.append(max)
return numpy.array(a,'f')
################################################################
def ftcurv(x,y,xo):
"""
Calculates an interpolatory spline through a sequence of functional
values.
iarray = Ngl.ftcurv(xi, yi, xo)
xi -- An array containing the abscissae for the input function, with
rightmost dimension npts. If xi is multi-dimensional, it must
have the same dimension sizes as yi.
yi -- An array of any dimensionality, whose rightmost dimension is
npts, containing the functional values of the input
function. That is, yi(...,k) is the functional value at
xi(...,k) for k=0,npts-1.
xo -- A 1D array of length nxo containing the abscissae for the
interpolated values.
"""
if _is_list_or_tuple(x):
dsizes_x = len(x)
elif _is_numpy_array(x):
dsizes_x = x.shape[0]
else:
print("ftcurv: type of argument 1 must be one of: list, tuple, or NumPy array")
return None
if _is_list_or_tuple(y):
dsizes_y = len(y)
elif _is_numpy_array(y):
dsizes_y = y.shape[0]
else:
print("ftcurv: type of argument 2 must be one of: list, tuple, or NumPy array")
return None
if (dsizes_x != dsizes_y):
print("ftcurv: first and second arguments must be the same length.")
return None
if _is_list_or_tuple(xo):
dsizes_xo = len(xo)
elif _is_numpy_array(xo):
dsizes_xo = xo.shape[0]
status,yo = ftcurvc(dsizes_x,x,y,dsizes_xo,xo)
if (status == 1):
print("ftcurv: input array must have at least three elements.")
return None
elif (status == 2):
print("ftcurv: input array values must be strictly increasing.")
return None
else:
del status
return yo
################################################################
def ftcurvp(x,y,p,xo):
"""
Calculates an interpolatory spline under tension through a sequence of
functional values for a periodic function.
iarray = Ngl.ftcurvp(xi, yi, p, xo)
xi -- An array containing the abscissae for the input function, with
rightmost dimension npts. If xi is multi-dimensional, it must
have the same dimension sizes as yi.
yi -- An array of any dimensionality, whose rightmost dimension is
npts, containing the functional values of the input
function. That is, yi(...,k) is the functional value at
xi(...,k) for k=0,npts-1.
p -- A scalar value specifying the period of the input function; the
value must not be less than xi(npts-1) - xi(0).
xo -- A 1D array of length nxo containing the abscissae for the
interpolated values.
"""
if _is_list_or_tuple(x):
dsizes_x = len(x)
elif _is_numpy_array(x):
dsizes_x = x.shape[0]
else:
print("ftcurvp: type of argument 1 must be one of: list, tuple, or NumPy array")
return None
if _is_list_or_tuple(y):
dsizes_y = len(y)
elif _is_numpy_array(y):
dsizes_y = y.shape[0]
else:
print("ftcurvp: type of argument 2 must be one of: list, tuple, or NumPy array")
return None
if (dsizes_x != dsizes_y):
print("ftcurvp: first and second arguments must be the same length.")
return None
if _is_list_or_tuple(xo):
dsizes_xo = len(xo)
elif _is_numpy_array(xo):
dsizes_xo = xo.shape[0]
status,yo = ftcurvpc(dsizes_x,x,y,p,dsizes_xo,xo)
if (status == 1):
print("ftcurvp: input array must have at least three elements.")
return None
elif (status == 2):
print("ftcurvp: the period is strictly less than the span of the abscissae.")
return None
else:
del status
return yo
################################################################
def ftcurvpi(xl, xr, p, x, y):
"""
Calculates an integral of an interpolatory spline between two
specified points.
iarray = Ngl.ftcurvpi(xl, xr, p, xi, yi)
xl -- A scalar value containing the lower limit of the integration.
xr -- A scalar value containing the upper limit of the integration.
p -- A scalar value specifying the period of the input function; the
value must not be less than xi(npts-1) - xi(0).
xi -- An array containing the abscissae for the input function, with
rightmost dimension npts. If xi is multi-dimensional, it must
have the same dimension sizes as yi.
yi -- An array of any dimensionality, whose rightmost dimension is
npts, containing the functional values of the input
function. That is, yi(...,k) is the functional value at
xi(...,k) for k=0,npts-1.
"""
if _is_list_or_tuple(x):
dsizes_x = len(x)
elif _is_numpy_array(x):
dsizes_x = x.shape[0]
else:
print("ftcurvpi: type of argument 4 must be one of: list, tuple, or NumPy array")
return None
if _is_list_or_tuple(y):
dsizes_y = len(y)
elif _is_numpy_array(y):
dsizes_y = y.shape[0]
else:
print("ftcurvpi: type of argument 5 must be one of: list, tuple, or NumPy array")
return None
if (dsizes_x != dsizes_y):
print("ftcurvpi: fourth and fifth arguments must be the same length.")
return None
return (ftcurvpic(xl,xr,p,dsizes_x,x,y)[1])
################################################################
def gaus(n):
"""
Computes gaussian latitudes and weights and returns a NumPy array
dimensioned 2*nlat-by-2.
ginfo = Ngl.gaus(nlat)
nlat -- A scalar integer equal to the number of latitude points per
hemisphere.
"""
return NglGaus_p(n,2*n,2)[1]
################################################################
def gc_convert(angle,ctype):
"""
Converts degrees along a great circle to radians, meters, feet, or
kilometers and returns a NumPy array of the same shape as angle.
conv_vals = Ngl.gc_convert(angle, type)
angle -- A one-dimensional NumPy array (or scalar value) of angles
(in degrees).
type -- A string (or integer) indicating the units you want to convert
to. Legal values are:
"radians" (or 0)
"meters" (or 1)
"kilometers" (or 2)
"feet" (or 3)
"miles" (or 4)
"""
#
# Convert an angle in degrees along a great circle to
# radians, meters, kilometers, or feet.
#
d2r = 0.0174532952 # degrees to radians
r2m = 6371220. # radians to meters
m2f = 3.2808 # meters to feet
dtype = ctype
if (ctype == 0):
dtype = "ra"
elif (ctype == 1):
dtype = "me"
elif (ctype == 2):
dtype = "ki"
elif (ctype == 3):
dtype = "fe"
elif (ctype == 4):
dtype = "mi"
if (dtype[0:2] == "ra"):
return d2r*angle
elif (dtype[0:2] == "me"):
return d2r*angle*r2m
elif (dtype[0:2] == "ki"):
return d2r*angle*r2m/1000.
elif (dtype[0:2] == "fe"):
return d2r*angle*r2m*m2f
elif (dtype[0:2] == "mi"):
return d2r*angle*r2m*m2f/5280.
else:
print("gc_convert: unrecognized conversion type " + str(ctype))
################################################################
def gc_dist(rlat1,rlon1,rlat2,rlon2):
"""
Calculates the distance in degrees along a great circle between two
points.
dist = Ngl.gc_dist(lat1, lon1, lat2, lon2)
lat1, lon1 -- Latitude and longitude of first point on the globe.
lat2, lon2 -- Latitude and longitude of second point on the globe.
"""
return | |
"""Per-prefix data, mapping each prefix to a dict of locale:name.
Auto-generated file, do not edit by hand.
"""
from ..util import u
# Copyright (C) 2011-2019 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
data = {
'86156856':{'en': 'Tongren, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u94dc\u4ec1\u5730\u533a')},
'861568560':{'en': 'Guiyang, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861568561':{'en': 'Zunyi, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u9075\u4e49\u5e02')},
'861568562':{'en': 'Zunyi, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u9075\u4e49\u5e02')},
'861568563':{'en': 'Liupanshui, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u516d\u76d8\u6c34\u5e02')},
'86156857':{'en': 'Bijie, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u6bd5\u8282\u5730\u533a')},
'86156858':{'en': 'Liupanshui, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u516d\u76d8\u6c34\u5e02')},
'86156859':{'en': 'Qianxinan, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u897f\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861568590':{'en': 'Guiyang, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861568591':{'en': 'Bijie, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u6bd5\u8282\u5730\u533a')},
'861568592':{'en': 'Bijie, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u6bd5\u8282\u5730\u533a')},
'86156860':{'en': 'XiAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'86156861':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u54b8\u9633\u5e02')},
'861568619':{'en': 'Han<NAME>', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'86156862':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'86156863':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u5546\u6d1b\u5e02')},
'861568630':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'861568631':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'861568632':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'861568633':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'86156864':{'en': 'XiAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'86156865':{'en': 'Hanzhong, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'861568656':{'en': 'Tongchuan, Shaanxi', 'zh': u('\u9655\u897f\u7701\u94dc\u5ddd\u5e02')},
'861568657':{'en': 'Tongchuan, Shaanxi', 'zh': u('\u9655\u897f\u7701\u94dc\u5ddd\u5e02')},
'861568658':{'en': 'Tongchuan, Shaanxi', 'zh': u('\u9655\u897f\u7701\u94dc\u5ddd\u5e02')},
'861568659':{'en': 'Tongchuan, Shaanxi', 'zh': u('\u9655\u897f\u7701\u94dc\u5ddd\u5e02')},
'86156866':{'en': 'Yulin, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6986\u6797\u5e02')},
'86156867':{'en': 'Yulin, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6986\u6797\u5e02')},
'861568678':{'en': 'Hanzhong, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'861568679':{'en': 'Hanzhong, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'86156868':{'en': 'Yulin, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6986\u6797\u5e02')},
'86156869':{'en': 'Baoji, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b9d\u9e21\u5e02')},
'861568700':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u662d\u901a\u5e02')},
'861568701':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u662d\u901a\u5e02')},
'861568702':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u662d\u901a\u5e02')},
'861568703':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u662d\u901a\u5e02')},
'861568704':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u662d\u901a\u5e02')},
'861568705':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861568706':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861568707':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861568708':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861568709':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'86156871':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'86156872':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u5927\u7406\u767d\u65cf\u81ea\u6cbb\u5dde')},
'861568727':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u4e3d\u6c5f\u5e02')},
'861568728':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u4e3d\u6c5f\u5e02')},
'861568729':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u8fea\u5e86\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'86156873':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7ea2\u6cb3\u54c8\u5c3c\u65cf\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861568737':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861568738':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861568739':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'86156874':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u66f2\u9756\u5e02')},
'86156875':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u4fdd\u5c71\u5e02')},
'861568756':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u5fb7\u5b8f\u50a3\u65cf\u666f\u9887\u65cf\u81ea\u6cbb\u5dde')},
'861568757':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u5fb7\u5b8f\u50a3\u65cf\u666f\u9887\u65cf\u81ea\u6cbb\u5dde')},
'861568758':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u5fb7\u5b8f\u50a3\u65cf\u666f\u9887\u65cf\u81ea\u6cbb\u5dde')},
'861568759':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6012\u6c5f\u5088\u50f3\u65cf\u81ea\u6cbb\u5dde')},
'86156876':{'en': 'Kunming, Yunnan', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861568766':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6587\u5c71\u58ee\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861568767':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6587\u5c71\u58ee\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861568768':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6587\u5c71\u58ee\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861568769':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6587\u5c71\u58ee\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861568770':{'en': 'Yuxi, Yunnan', 'zh': u('\u4e91\u5357\u7701\u7389\u6eaa\u5e02')},
'861568771':{'en': 'Yuxi, Yunnan', 'zh': u('\u4e91\u5357\u7701\u7389\u6eaa\u5e02')},
'861568772':{'en': 'Yuxi, Yunnan', 'zh': u('\u4e91\u5357\u7701\u7389\u6eaa\u5e02')},
'861568773':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7389\u6eaa\u5e02')},
'861568774':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7389\u6eaa\u5e02')},
'861568775':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861568776':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861568777':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861568778':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861568779':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861568780':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u695a\u96c4\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861568781':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u695a\u96c4\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861568782':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u695a\u96c4\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861568783':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u695a\u96c4\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861568784':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u695a\u96c4\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861568785':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861568786':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861568787':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861568788':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861568789':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861568790':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u897f\u53cc\u7248\u7eb3\u50a3\u65cf\u81ea\u6cbb\u5dde')},
'861568791':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u897f\u53cc\u7248\u7eb3\u50a3\u65cf\u81ea\u6cbb\u5dde')},
'861568792':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u897f\u53cc\u7248\u7eb3\u50a3\u65cf\u81ea\u6cbb\u5dde')},
'861568793':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u4e34\u6ca7\u5e02')},
'861568794':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u4e34\u6ca7\u5e02')},
'861568795':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u4e34\u6ca7\u5e02')},
'861568796':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u666e\u6d31\u5e02')},
'861568797':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u666e\u6d31\u5e02')},
'861568798':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u666e\u6d31\u5e02')},
'861568799':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u666e\u6d31\u5e02')},
'86156880':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u94dc\u4ec1\u5730\u533a')},
'861568800':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861568805':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u4e1c\u5357\u82d7\u65cf\u4f97\u65cf\u81ea\u6cbb\u5dde')},
'861568807':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u6bd5\u8282\u5730\u533a')},
'861568808':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u516d\u76d8\u6c34\u5e02')},
'86156881':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861568820':{'en': 'Ch<NAME>', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861568821':{'en': 'Shaoyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u90b5\u9633\u5e02')},
'861568822':{'en': 'Shaoyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u90b5\u9633\u5e02')},
'861568823':{'en': 'Shaoyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u90b5\u9633\u5e02')},
'861568824':{'en': 'Loudi, Hunan', 'zh': u('\u6e56\u5357\u7701\u5a04\u5e95\u5e02')},
'861568825':{'en': 'Loudi, Hunan', 'zh': u('\u6e56\u5357\u7701\u5a04\u5e95\u5e02')},
'861568826':{'en': 'Yongzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u6c38\u5dde\u5e02')},
'861568827':{'en': 'Yongzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u6c38\u5dde\u5e02')},
'861568828':{'en': 'Huaihua, Hunan', 'zh': u('\u6e56\u5357\u7701\u6000\u5316\u5e02')},
'861568829':{'en': 'Huaihua, Hunan', 'zh': u('\u6e56\u5357\u7701\u6000\u5316\u5e02')},
'861568830':{'en': 'Bayingolin, Xinjiang', 'zh': u('\u65b0\u7586\u5df4\u97f3\u90ed\u695e\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861568831':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u5410\u9c81\u756a\u5730\u533a')},
'861568832':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861568833':{'en': 'Hami, Xinjiang', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861568834':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861568835':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861568836':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861568837':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861568838':{'en': 'Ili, Xinjiang', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861568839':{'en': 'Ili, Xinjiang', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'86156884':{'en': 'Jinan, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'86156885':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'86156886':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'861568866':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861568867':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861568868':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861568869':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861568870':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861568871':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861568872':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861568873':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861568874':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861568875':{'en': 'Laiwu, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u83b1\u829c\u5e02')},
'861568876':{'en': 'Laiwu, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u83b1\u829c\u5e02')},
'861568877':{'en': 'Laiwu, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u83b1\u829c\u5e02')},
'861568878':{'en': 'Binzhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861568879':{'en': 'Binzhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'86156888':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861568880':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861568881':{'en': 'Rizhao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'861568882':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6cf0\u5b89\u5e02')},
'86156889':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'86156890':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'86156891':{'en': 'Weifang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6f4d\u574a\u5e02')},
'861568910':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'861568911':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'861568912':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'861568913':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'86156892':{'en': 'Weifang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6f4d\u574a\u5e02')},
'861568929':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'86156893':{'en': 'Zaozhuang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u67a3\u5e84\u5e02')},
'861568930':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'861568931':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'861568932':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'86156894':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'861568940':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861568941':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861568942':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861568949':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'86156895':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861568956':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')},
'861568957':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')},
'861568958':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')},
'861568959':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')},
'861568960':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')},
'861568961':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')},
'861568962':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'861568963':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'861568964':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'861568965':{'en': 'Rizhao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'861568966':{'en': 'Rizhao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'861568967':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6cf0\u5b89\u5e02')},
'861568968':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u804a\u57ce\u5e02')},
'861568969':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'86156897':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'861568970':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861568971':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861568972':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861568973':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'86156898':{'en': 'Weifang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6f4d\u574a\u5e02')},
'86156899':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'861569000':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861569001':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861569002':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861569003':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861569004':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861569005':{'en': 'Qinhuangdao, Hebei', 'zh': u('\u6cb3\u5317\u7701\u79e6\u7687\u5c9b\u5e02')},
'861569006':{'en': 'Qinhuangdao, Hebei', 'zh': u('\u6cb3\u5317\u7701\u79e6\u7687\u5c9b\u5e02')},
'861569007':{'en': 'Qinhuangdao, Hebei', 'zh': u('\u6cb3\u5317\u7701\u79e6\u7687\u5c9b\u5e02')},
'861569008':{'en': 'Chengde, Hebei', 'zh': u('\u6cb3\u5317\u7701\u627f\u5fb7\u5e02')},
'861569009':{'en': 'Chengde, Hebei', 'zh': u('\u6cb3\u5317\u7701\u627f\u5fb7\u5e02')},
'861569010':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861569011':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'861569012':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861569013':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')},
'861569014':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')},
'861569015':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')},
'861569016':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')},
'861569017':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')},
'861569018':{'en': 'Hengshui, Hebei', 'zh': u('\u6cb3\u5317\u7701\u8861\u6c34\u5e02')},
'861569019':{'en': 'Hengshui, Hebei', 'zh': u('\u6cb3\u5317\u7701\u8861\u6c34\u5e02')},
'86156902':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')},
'861569026':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861569027':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861569028':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861569029':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'86156903':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'861569037':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'861569038':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'861569039':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'861569040':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'861569041':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'861569042':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')},
'861569043':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')},
'861569044':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')},
'861569045':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861569046':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')},
'861569047':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861569048':{'en': 'Hengshui, Hebei', 'zh': u('\u6cb3\u5317\u7701\u8861\u6c34\u5e02')},
'861569049':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'86156905':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861569057':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')},
'861569058':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')},
'861569059':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')},
'861569060':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861569061':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861569062':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u7126\u4f5c\u5e02')},
'861569063':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u7126\u4f5c\u5e02')},
'861569064':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861569065':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9e64\u58c1\u5e02')},
'861569066':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861569067':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5f00\u5c01\u5e02')},
'861569068':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6f2f\u6cb3\u5e02')},
'861569069':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861569070':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861569071':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')},
'861569072':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')},
'861569073':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')},
'861569074':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9a7b\u9a6c\u5e97\u5e02')},
'861569075':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9a7b\u9a6c\u5e97\u5e02')},
'861569076':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861569077':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861569078':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')},
'861569079':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')},
'861569080':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u4e09\u95e8\u5ce1\u5e02')},
'861569081':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861569082':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861569083':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u4fe1\u9633\u5e02')},
'861569084':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861569085':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861569086':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861569087':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861569088':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861569089':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'861569090':{'en': 'Baotou, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5305\u5934\u5e02')},
'861569091':{'en': 'Hohhot, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u548c\u6d69\u7279\u5e02')},
'861569092':{'en': 'Hinggan, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5174\u5b89\u76df')},
'861569093':{'en': 'Hulun, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u4f26\u8d1d\u5c14\u5e02')},
'861569094':{'en': 'Alxa, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u963f\u62c9\u5584\u76df')},
'861569095':{'en': 'Tongliao, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u901a\u8fbd\u5e02')},
'861569096':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861569097':{'en': 'Ordos, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9102\u5c14\u591a\u65af\u5e02')},
'861569098':{'en': | |
**Request Syntax**
::
response = client.list_groups(
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'Groups': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Groups** *(list) --* Information about a group.
- *(dict) --* Information about a group.
- **Arn** *(string) --* The ARN of the group.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the group was created.
- **Id** *(string) --* The ID of the group.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the group was last updated.
- **LatestVersion** *(string) --* The latest version of the group.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the group.
- **Name** *(string) --* The name of the group.
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_logger_definition_versions(self, LoggerDefinitionId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Lists the versions of a logger definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListLoggerDefinitionVersions>`_
**Request Syntax**
::
response = client.list_logger_definition_versions(
LoggerDefinitionId='string',
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'NextToken': 'string',
'Versions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Versions** *(list) --* Information about a version.
- *(dict) --* Information about a version.
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type LoggerDefinitionId: string
:param LoggerDefinitionId: **[REQUIRED]** The ID of the logger definition.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_logger_definitions(self, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Retrieves a list of logger definitions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListLoggerDefinitions>`_
**Request Syntax**
::
response = client.list_logger_definitions(
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'Definitions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'Tags': {
'string': 'string'
}
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Definitions** *(list) --* Information about a definition.
- *(dict) --* Information about a definition.
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **Tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_resource_definition_versions(self, ResourceDefinitionId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Lists the versions of a resource definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListResourceDefinitionVersions>`_
**Request Syntax**
::
response = client.list_resource_definition_versions(
MaxResults='string',
NextToken='string',
ResourceDefinitionId='string'
)
**Response Syntax**
::
{
'NextToken': 'string',
'Versions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
},
]
}
**Response Structure**
- *(dict) --* success
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Versions** *(list) --* Information about a version.
- *(dict) --* Information about a version.
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:type ResourceDefinitionId: string
:param ResourceDefinitionId: **[REQUIRED]** The ID of the resource definition.
:rtype: dict
:returns:
"""
pass
def list_resource_definitions(self, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Retrieves a list of resource definitions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListResourceDefinitions>`_
**Request Syntax**
::
response = client.list_resource_definitions(
MaxResults='string',
NextToken='string'
)
**Response Syntax**
::
{
'Definitions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'LastUpdatedTimestamp': 'string',
'LatestVersion': 'string',
'LatestVersionArn': 'string',
'Name': 'string',
'Tags': {
'string': 'string'
}
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --* The IDs of all the Greengrass resource definitions in this account.
- **Definitions** *(list) --* Information about a definition.
- *(dict) --* Information about a definition.
- **Arn** *(string) --* The ARN of the definition.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created.
- **Id** *(string) --* The ID of the definition.
- **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated.
- **LatestVersion** *(string) --* The latest version of the definition.
- **LatestVersionArn** *(string) --* The ARN of the latest version of the definition.
- **Name** *(string) --* The name of the definition.
- **Tags** *(dict) --* The tags for the definition.
- *(string) --*
- *(string) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the next set of results, or \'\'null\'\' if there are no additional results.
:rtype: dict
:returns:
"""
pass
def list_subscription_definition_versions(self, SubscriptionDefinitionId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
"""
Lists the versions of a subscription definition.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/ListSubscriptionDefinitionVersions>`_
**Request Syntax**
::
response = client.list_subscription_definition_versions(
MaxResults='string',
NextToken='string',
SubscriptionDefinitionId='string'
)
**Response Syntax**
::
{
'NextToken': 'string',
'Versions': [
{
'Arn': 'string',
'CreationTimestamp': 'string',
'Id': 'string',
'Version': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **NextToken** *(string) --* The token for the next set of results, or ''null'' if there are no additional results.
- **Versions** *(list) --* Information about a version.
- *(dict) --* Information about a version.
- **Arn** *(string) --* The ARN of the version.
- **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the version was created.
- **Id** *(string) --* The ID of the version.
- **Version** *(string) --* The unique ID of the version.
:type MaxResults: string
:param MaxResults: The maximum number of results to be returned per request.
:type NextToken: string
:param NextToken: The token for the | |
data
@self.console_option("filter", "f", type=str, help="Filter indexes")
@self.console_argument(
"color", type=Color, help=_("Color to color the given stroke")
)
@self.console_command(
"stroke",
help=_("stroke <svg color>"),
input_type=(
None,
"elements",
),
output_type="elements",
)
def element_stroke(
command, channel, _, color, data=None, filter=None, **kwargs
):
if data is None:
data = list(self.elems(emphasized=True))
apply = data
if filter is not None:
apply = list()
for value in filter.split(","):
try:
value = int(value)
except ValueError:
continue
try:
apply.append(data[value])
except IndexError:
channel(_("index %d out of range") % value)
if color is None:
channel("----------")
channel(_("Stroke Values:"))
i = 0
for e in self.elems():
name = str(e)
if len(name) > 50:
name = name[:50] + "…"
if e.stroke is None or e.stroke == "none":
channel(_("%d: stroke = none - %s") % (i, name))
else:
channel(_("%d: stroke = %s - %s") % (i, e.stroke.hex, name))
i += 1
channel("----------")
return
elif color == "none":
for e in apply:
e.stroke = None
e.altered()
else:
for e in apply:
e.stroke = Color(color)
e.altered()
return "elements", data
@self.console_option("filter", "f", type=str, help="Filter indexes")
@self.console_argument("color", type=Color, help=_("Color to set the fill to"))
@self.console_command(
"fill",
help=_("fill <svg color>"),
input_type=(
None,
"elements",
),
output_type="elements",
)
def element_fill(command, channel, _, color, data=None, filter=None, **kwargs):
if data is None:
data = list(self.elems(emphasized=True))
apply = data
if filter is not None:
apply = list()
for value in filter.split(","):
try:
value = int(value)
except ValueError:
continue
try:
apply.append(data[value])
except IndexError:
channel(_("index %d out of range") % value)
if color is None:
channel("----------")
channel(_("Fill Values:"))
i = 0
for e in self.elems():
name = str(e)
if len(name) > 50:
name = name[:50] + "…"
if e.fill is None or e.fill == "none":
channel(_("%d: fill = none - %s") % (i, name))
else:
channel(_("%d: fill = %s - %s") % (i, e.fill.hex, name))
i += 1
channel("----------")
return "elements", data
elif color == "none":
for e in apply:
e.fill = None
e.altered()
else:
for e in apply:
e.fill = Color(color)
e.altered()
return "elements", data
@self.console_argument(
"x_offset", type=self.length_x, help=_("x offset."), default="0"
)
@self.console_argument(
"y_offset", type=self.length_y, help=_("y offset"), default="0"
)
@self.console_command(
"outline",
help=_("outline the current selected elements"),
input_type=(
None,
"elements",
),
output_type="elements",
)
def element_outline(
command,
channel,
_,
x_offset=None,
y_offset=None,
data=None,
**kwargs,
):
"""
Draws an outline of the current shape.
"""
bounds = self.selected_area()
if bounds is None:
channel(_("Nothing Selected"))
return
x_pos = bounds[0]
y_pos = bounds[1]
width = bounds[2] - bounds[0]
height = bounds[3] - bounds[1]
x_pos -= x_offset
y_pos -= y_offset
width += x_offset * 2
height += y_offset * 2
element = Path(Rect(x=x_pos, y=y_pos, width=width, height=height))
node = self.elem_branch.add(shape=element, type="elem ellipse")
node.stroke = Color("red")
self.set_emphasis([node])
node.focus()
self.classify([node])
if data is None:
data = list()
data.append(element)
return "elements", data
@self.console_argument("angle", type=Angle.parse, help=_("angle to rotate by"))
@self.console_option("cx", "x", type=self.length_x, help=_("center x"))
@self.console_option("cy", "y", type=self.length_y, help=_("center y"))
@self.console_option(
"absolute",
"a",
type=bool,
action="store_true",
help=_("angle_to absolute angle"),
)
@self.console_command(
"rotate",
help=_("rotate <angle>"),
input_type=(
None,
"elements",
),
output_type="elements",
)
def element_rotate(
command,
channel,
_,
angle,
cx=None,
cy=None,
absolute=False,
data=None,
**kwargs,
):
if angle is None:
channel("----------")
channel(_("Rotate Values:"))
i = 0
for node in self.elems():
name = str(node)
if len(name) > 50:
name = name[:50] + "…"
channel(
_("%d: rotate(%fturn) - %s")
% (i, node.matrix.rotation.as_turns, name)
)
i += 1
channel("----------")
return
if data is None:
data = list(self.elems(emphasized=True))
if len(data) == 0:
channel(_("No selected elements."))
return
self.validate_selected_area()
bounds = self.selected_area()
if bounds is None:
channel(_("No selected elements."))
return
rot = angle.as_degrees
if cx is None:
cx = (bounds[2] + bounds[0]) / 2.0
if cy is None:
cy = (bounds[3] + bounds[1]) / 2.0
matrix = Matrix("rotate(%fdeg,%f,%f)" % (rot, cx, cy))
try:
if not absolute:
for node in data:
try:
if node.lock:
continue
except AttributeError:
pass
node.matrix *= matrix
node.modified()
else:
for node in data:
start_angle = node.matrix.rotation
amount = rot - start_angle
matrix = Matrix(
"rotate(%f,%f,%f)" % (Angle(amount).as_degrees, cx, cy)
)
node.matrix *= matrix
node.modified()
except ValueError:
raise CommandSyntaxError
return "elements", data
@self.console_argument("scale_x", type=float, help=_("scale_x value"))
@self.console_argument("scale_y", type=float, help=_("scale_y value"))
@self.console_option(
"px", "x", type=self.length_x, help=_("scale x origin point")
)
@self.console_option(
"py", "y", type=self.length_y, help=_("scale y origin point")
)
@self.console_option(
"absolute",
"a",
type=bool,
action="store_true",
help=_("scale to absolute size"),
)
@self.console_command(
"scale",
help=_("scale <scale> [<scale-y>]?"),
input_type=(None, "elements"),
output_type="elements",
)
def element_scale(
command,
channel,
_,
scale_x=None,
scale_y=None,
px=None,
py=None,
absolute=False,
data=None,
**kwargs,
):
if scale_x is None:
channel("----------")
channel(_("Scale Values:"))
i = 0
for node in self.elems():
name = str(node)
if len(name) > 50:
name = name[:50] + "…"
channel(
"%d: scale(%f, %f) - %s"
% (
i,
node.matrix.value_scale_x(),
node.matrix.value_scale_x(),
name,
)
)
i += 1
channel("----------")
return
if data is None:
data = list(self.elems(emphasized=True))
if len(data) == 0:
channel(_("No selected elements."))
return
bounds = Node.union_bounds(data)
if scale_y is None:
scale_y = scale_x
if px is None:
px = (bounds[2] + bounds[0]) / 2.0
if py is None:
py = (bounds[3] + bounds[1]) / 2.0
if scale_x == 0 or scale_y == 0:
channel(_("Scaling by Zero Error"))
return
matrix = Matrix("scale(%f,%f,%f,%f)" % (scale_x, scale_y, px, py))
try:
if not absolute:
for node in data:
try:
if node.lock:
continue
except AttributeError:
pass
node.matrix *= matrix
node.modified()
else:
for node in data:
try:
if node.lock:
continue
except AttributeError:
pass
osx = node.matrix.value_scale_x()
osy = node.matrix.value_scale_y()
nsx = scale_x / osx
nsy = scale_y / osy
matrix = Matrix("scale(%f,%f,%f,%f)" % (nsx, nsy, px, px))
node.matrix *= matrix
node.modified()
except ValueError:
raise CommandSyntaxError
return "elements", data
@self.console_option(
"new_area", "n", type=self.area, help=_("provide a new area to cover")
)
@self.console_command(
"area",
help=_("provides information about/changes the area of a selected element"),
input_type=(None, "elements"),
output_type=("elements"),
)
def element_area(
command,
channel,
_,
new_area=None,
data=None,
**kwargs,
):
if new_area is None:
display_only = True
else:
if new_area == 0:
channel(_("You shouldn't collapse a shape to a zero-sized thing"))
return
display_only = False
if data is None:
data = list(self.elems(emphasized=True))
if len(data) == 0:
channel(_("No selected elements."))
return
total_area = 0
if display_only:
channel("----------")
channel(_("Area values:"))
units = ("mm", "cm", "in")
square_unit = [0] * len(units)
for idx, u in enumerate(units):
value = float(Length("1{unit}".format(unit=u)))
square_unit[idx] = value * value
i = 0
for elem in data:
this_area = 0
try:
path = elem.as_path()
except AttributeError:
path = None
subject_polygons = []
if not path is None:
for subpath in path.as_subpaths():
subj = Path(subpath).npoint(linspace(0, 1, 1000))
subj.reshape((2, 1000))
s = list(map(Point, subj))
subject_polygons.append(s)
else:
try:
bb = elem.bounds
except:
# Even bounds failed, next element please
continue
s = [
Point(bb[0], bb[1]),
Point(bb[2], bb[1]),
Point(bb[2], bb[3]),
Point(bb[1], bb[3]),
]
subject_polygons.append(s)
if len(subject_polygons) > 0:
idx = len(subject_polygons[0]) - 1
if (
subject_polygons[0][0].x != subject_polygons[0][idx].x
or subject_polygons[0][0].y != subject_polygons[0][idx].y
):
# not identical, so close the loop
subject_polygons.append(
Point(subject_polygons[0][0].x, subject_polygons[0][0].y)
)
if len(subject_polygons) > 0:
idx = -1
area_x_y = 0
area_y_x = 0
for pt in subject_polygons[0]:
idx += 1
if idx > 0:
area_x_y += last_x * pt.y
area_y_x += last_y * pt.x
last_x = pt.x
last_y = pt.y
this_area = 0.5 * abs(area_x_y - area_y_x)
if display_only:
name = str(elem)
if len(name) > 50:
name = name[:50] + "…"
channel("%d: %s" % (i, name))
for idx, u in enumerate(units):
this_area_local = this_area / square_unit[idx]
channel(
_(" Area= {area:.3f} {unit}²").format(
area=this_area_local, unit=u
)
)
i += 1
total_area += this_area
if display_only:
channel("----------")
else:
if total_area == 0:
channel(_("You can't reshape a zero-sized shape"))
return
ratio = sqrt(new_area / total_area)
self("scale %f\n" % ratio)
return "elements", data
# Do we have a new value to set? If yes scale by sqrt(of the fraction)
@self.console_argument("tx", type=self.length_x, help=_("translate x value"))
@self.console_argument("ty", type=self.length_y, help=_("translate y value"))
@self.console_option(
"absolute",
"a",
type=bool,
action="store_true",
help=_("translate to absolute position"),
)
@self.console_command(
"translate",
help=_("translate <tx> <ty>"),
input_type=(None, "elements"),
output_type="elements",
)
def element_translate(
command, channel, _, tx, ty, absolute=False, data=None, **kwargs
):
if tx is None:
channel("----------")
channel(_("Translate Values:"))
i = 0
for node in self.elems():
name = str(node)
if len(name) > 50:
name = name[:50] + "…"
channel(
_("%d: translate(%f, %f) - | |
<filename>NLI/models.py
import numpy as np
import time
import torch
import torch.nn as nn
from torch.nn import init
class SememeSumLstm(nn.Module):
def __init__(self, sememe_dim, mem_dim):
super(SememeSumLstm, self).__init__()
self.in_dim = sememe_dim
self.mem_dim = mem_dim
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.reset_parameters()
def node_forward(self, inputs):
iou = self.ioux(inputs)# three Wx+b
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
c = torch.mul(i, u)
h = torch.mul(o, torch.tanh(c))
return c, h
def forward(self, inputs):
max_time, batch_size, _ = inputs.size()
c = []
h = []
for time in range(max_time):
new_c, new_h = self.node_forward(inputs[time])
c.append(new_c)
h.append(new_h)
return torch.stack(c, 0), torch.stack(h, 0)
def reset_parameters(self):
layers = [self.ioux]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
class SememeSumGRU(nn.Module):
def __init__(self, sememe_dim, mem_dim):
super(SememeSumGRU, self).__init__()
self.in_dim = sememe_dim
self.mem_dim = mem_dim
self.ioux = nn.Linear(self.in_dim, 2 * self.mem_dim)
self.reset_parameters()
def node_forward(self, inputs):
iou = self.ioux(inputs)# three Wx+b
i, o = torch.split(iou, iou.size(1) // 2, dim=1)
i, o = torch.sigmoid(i), torch.tanh(o)
h = torch.mul(i,o)
return h
def forward(self, inputs):
max_time, batch_size, _ = inputs.size()
h = []
for time in range(max_time):
new_h = self.node_forward(inputs[time])
h.append(new_h)
return torch.stack(h, 0)
def reset_parameters(self):
layers = [self.ioux]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
class LSTM_baseline(nn.Module):
def __init__(self, config):
super(LSTM_baseline, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.emb_sememe = nn.Embedding(2186, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.ious = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
#fs是专门处理sememe传过来的c和h
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.ious, self.fx, self.fx_s, self.fh, self.fs]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc #sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, inputs, length, sememe_data):
# hx: (child_c, child_h)
max_time, batch_size, _ = inputs.size()
output = []
hx = (inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(inputs[time], hx)
output.append(next_hx[1])
hx = next_hx
return torch.stack([output[length[i]-1][i] for i in range(len(length))], 0)
class LSTM_concat(nn.Module):
def __init__(self, config):
super(LSTM_concat, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(2 * self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(2 * self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.fx, self.fh, self.fs]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, word_emb, length, sememe_data):
emb_s_1 = self.sememe_sum(sememe_data)
inputs = torch.cat([word_emb, emb_s_1], dim = 2)
max_time, batch_size, _ = inputs.size()
output = []
hx = (inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(inputs[time], hx)
output.append(next_hx[1])
hx = next_hx
return torch.stack([output[length[i]-1][i] for i in range(len(length))], 0)
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
return input_sememe
class LSTM_gate(nn.Module):
def __init__(self, config):
super(LSTM_gate, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 4 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 4 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.ious = nn.Linear(self.in_dim, 4 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
#self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
self.W_c = nn.Linear(self.in_dim, self.mem_dim)
self.reset_parameters()
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.ious, self.fx, self.fh, self.W_c]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, sememe_h, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h) + self.ious(sememe_h)
f, i, o, o_c = torch.split(iou, iou.size(1) // 4, dim=1)
f, i, o, o_c = torch.sigmoid(f), torch.sigmoid(i), torch.sigmoid(o), torch.sigmoid(o_c)
c_telta = self.fx(inputs) + self.fh(child_h)
c_telta = torch.tanh(c_telta)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, c_telta) + fc #sum means sigma
h = torch.mul(o, torch.tanh(c)) + torch.mul(o_c, torch.tanh(self.W_c(sememe_h)))
return (c, h)
def forward(self, inputs, length, sememe_data):
sememe_h = self.sememe_sum(sememe_data)
max_time, batch_size, _ = inputs.size()
output = []
hx = (inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(inputs[time], sememe_h[time], hx)
output.append(next_hx[1])
hx = next_hx
return torch.stack([output[length[i]-1][i] for i in range(len(length))], 0)
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
return input_sememe
class LSTM_cell(nn.Module):
def __init__(self, config):
super(LSTM_cell, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.ious = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
#fs是专门处理sememe传过来的c和h
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.ious, self.fx, self.fh, self.fs, self.fx_s]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, sememe_c, sememe_h, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h) + self.ious(sememe_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
f_s = torch.sigmoid(
self.fs(sememe_h) + self.fx_s(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
fc_s = torch.mul(f_s, sememe_c) # part of memory cell induced by sememe-child
c = torch.mul(i, u) + fc + fc_s #sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, inputs, length, sememe_data):
sememe_c, sememe_h = self.sememe_sum(sememe_data)
max_time, batch_size, _ = inputs.size()
output = []
hx = (inputs[0][0].detach().new(batch_size, sememe_c.size()[2]).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(batch_size, sememe_h.size()[2]).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(inputs[time], sememe_c[time], sememe_h[time], hx)
output.append(next_hx[1])
hx = next_hx
return torch.stack([output[length[i]-1][i] for i in range(len(length))], 0)
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
sememe_c, sememe_h = self.sememesumlstm(input_sememe)
return sememe_c, sememe_h
class LSTM_cell_baseline(nn.Module):
def __init__(self, config):
super(LSTM_cell_baseline, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememesumlstm = SememeSumLstm(512, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(512, self.enc_lstm_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
# self.project_layer = nn.Linear(768, 300)
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.ious = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
#fs是专门处理sememe传过来的c和h
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.ious, self.fx, self.fh, self.fs, self.fx_s]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, sememe_c, sememe_h, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h) + self.ious(sememe_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
f_s = torch.sigmoid(
self.fs(sememe_h) + self.fx_s(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
fc_s = torch.mul(f_s, sememe_c) # part of memory cell induced by sememe-child
c | |
<filename>utils/PSIS/py/psis.py
"""Pareto smoothed importance sampling (PSIS)
This module implements Pareto smoothed importance sampling (PSIS) and PSIS
leave-one-out cross-validation for Python (Numpy).
Included functions
------------------
psisloo
Pareto smoothed importance sampling leave-one-out log predictive densities.
psislw
Pareto smoothed importance sampling.
gpdfitnew
Estimate the paramaters for the Generalized Pareto Distribution (GPD).
gpinv
Inverse Generalised Pareto distribution function.
sumlogs
Sum of vector where numbers are represented by their logarithms.
References
----------
<NAME>, <NAME> and <NAME> (2015). Efficient implementation
of leave-one-out cross-validation and WAIC for evaluating fitted Bayesian
models. arXiv preprint arXiv:1507.04544.
<NAME> and <NAME> (2015). Pareto smoothed importance sampling.
arXiv preprint arXiv:1507.02646.
"""
# Copyright (c) 2015 <NAME>, <NAME>
# Original Matlab version by <NAME>. Translation to Python
# by <NAME>.
# This software is distributed under the GNU General Public
# License (version 3 or later); please refer to the file
# License.txt, included with the software, for details.
from __future__ import division # For Python 2 compatibility
import numpy as np
def psisloo(log_lik, **kwargs):
"""PSIS leave-one-out log predictive densities.
Computes the log predictive densities given posterior samples of the log
likelihood terms p(y_i|\theta^s) in input parameter `log_lik`. Returns a
sum of the leave-one-out log predictive densities `loo`, individual
leave-one-out log predictive density terms `loos` and an estimate of Pareto
tail indeces `ks`. If tail index k>0.5, variance of the raw estimate does
not exist and if tail index k>1 the mean of the raw estimate does not exist
and the PSIS estimate is likely to have large variation and some bias.
Parameters
----------
log_lik : ndarray
Array of size n x m containing n posterior samples of the log likelihood
terms p(y_i|\theta^s).
Additional keyword arguments are passed to the psislw() function (see the
corresponding documentation).
Returns
-------
loo : scalar
sum of the leave-one-out log predictive densities
loos : ndarray
individual leave-one-out log predictive density terms
ks : ndarray
estimated Pareto tail indeces
"""
# ensure overwrite flag in passed arguments
kwargs['overwrite_lw'] = True
# log raw weights from log_lik
lw = -log_lik
# compute Pareto smoothed log weights given raw log weights
lw, ks = psislw(lw, **kwargs)
# compute
lw += log_lik
loos = sumlogs(lw, axis=0)
loo = loos.sum()
return loo, loos, ks
def psislw(lw, wcpp=20, wtrunc=3/4, overwrite_lw=False):
"""Pareto smoothed importance sampling (PSIS).
Parameters
----------
lw : ndarray
Array of size n x m containing m sets of n log weights. It is also
possible to provide one dimensional array of length n.
wcpp : number
Percentage of samples used for GPD fit estimate (default is 20).
wtrunc : float
Positive parameter for truncating very large weights to n^wtrunc.
Providing False or 0 disables truncation. Default values is 3/4.
overwrite_lw : bool, optional
If True, the input array `lw` is smoothed in-place. By default, a new
array is allocated.
Returns
-------
lw_out : ndarray
smoothed log weights
kss : ndarray
Pareto tail indices
"""
if lw.ndim == 2:
n, m = lw.shape
elif lw.ndim == 1:
n = len(lw)
m = 1
else:
raise ValueError("Argument `lw` must be 1 or 2 dimensional.")
if n <= 1:
raise ValueError("More than one log-weight needed.")
if overwrite_lw:
# in-place operation
lw_out = lw
else:
# allocate new array for output
lw_out = np.copy(lw, order='K')
# allocate output array for kss
kss = np.empty(m)
# precalculate constants
cutoffmin = np.log(np.finfo(float).tiny)
logn = np.log(n)
# loop over sets of log weights
for i, x in enumerate(lw_out.T if lw_out.ndim == 2 else lw_out[None,:]):
# improve numerical accuracy
x -= np.max(x)
# divide log weights into body and right tail
xcutoff = max(
np.percentile(x, 100 - wcpp),
cutoffmin
)
expxcutoff = np.exp(xcutoff)
tailinds, = np.where(x > xcutoff)
x2 = x[tailinds]
n2 = len(x2)
if n2 <= 4:
# not enough tail samples for gpdfitnew
k = np.inf
else:
# order of tail samples
x2si = np.argsort(x2)
# fit generalized Pareto distribution to the right tail samples
np.exp(x2, out=x2)
x2 -= expxcutoff
k, sigma = gpdfitnew(x2, sort=x2si)
# compute ordered statistic for the fit
sti = np.arange(0.5, n2)
sti /= n2
qq = gpinv(sti, k, sigma)
qq += expxcutoff
np.log(qq, out=qq)
# place the smoothed tail into the output array
x[tailinds[x2si]] = qq
if wtrunc > 0:
# truncate too large weights
lwtrunc = wtrunc * logn - logn + sumlogs(x)
x[x > lwtrunc] = lwtrunc
# renormalize weights
x -= sumlogs(x)
# store tail index k
kss[i] = k
# If the provided input array is one dimensional, return kss as scalar.
if lw_out.ndim == 1:
kss = kss[0]
return lw_out, kss
def gpdfitnew(x, sort=True):
"""Estimate the paramaters for the Generalized Pareto Distribution (GPD)
Returns empirical Bayes estimate for the parameters of the two-parameter
generalized Parato distribution given the data.
Parameters
----------
x : ndarray
One dimensional data array
sort : {bool, ndarray, 'in-place'}, optional
If known in advance, one can provide an array of indices that would
sort the input array `x`. If the input array is already sorted, provide
False. If the array is not sorted but can be sorted in-place, provide
string 'in-place'. If True (default behaviour) the sorted array indices
are determined internally.
Returns
-------
k, sigma : float
estimated parameter values
Notes
-----
This function returns a negative of Zhang and Stephens's k, because it is
more common parameterisation.
"""
if x.ndim != 1 or len(x) <= 1:
raise ValueError("Invalid input array.")
# check if x should be sorted
if sort is True:
sort = np.argsort(x)
xsorted = False
elif sort is False:
xsorted = True
elif sort == 'in-place':
x.sort()
xsorted = True
else:
xsorted = False
n = len(x)
m = 80 + int(np.floor(np.sqrt(n)))
bs = np.arange(1, m + 1, dtype=float)
bs -= 0.5
np.divide(m, bs, out=bs)
np.sqrt(bs, out=bs)
np.subtract(1, bs, out=bs)
if xsorted:
bs /= 3 * x[np.floor(n/4 + 0.5) - 1]
bs += 1 / x[-1]
else:
bs /= 3 * x[sort[np.floor(n/4 + 0.5) - 1]]
bs += 1 / x[sort[-1]]
ks = np.negative(bs)
temp = ks[:,None] * x
np.log1p(temp, out=temp)
np.mean(temp, axis=1, out=ks)
L = bs / ks
np.negative(L, out=L)
np.log(L, out=L)
L -= ks
L -= 1
L *= n
temp = L - L[:,None]
np.exp(temp, out=temp)
w = np.sum(temp, axis=1)
np.divide(1, w, out=w)
# remove negligible weights
dii = w >= 10 * np.finfo(float).eps
if not np.all(dii):
w = w[dii]
bs = bs[dii]
# normalise w
w /= w.sum()
# posterior mean for b
b = np.sum(bs * w)
# Estimate for k, note that we return a negative of Zhang and
# Stephens's k, because it is more common parameterisation.
temp = (-b) * x
np.log1p(temp, out=temp)
k = np.mean(temp)
# estimate for sigma
sigma = -k / b
return k, sigma
def gpinv(p, k, sigma):
"""Inverse Generalised Pareto distribution function."""
x = np.empty(p.shape)
x.fill(np.nan)
if sigma <= 0:
return x
ok = (p > 0) & (p < 1)
if np.all(ok):
if np.abs(k) < np.finfo(float).eps:
np.negative(p, out=x)
np.log1p(x, out=x)
np.negative(x, out=x)
else:
np.negative(p, out=x)
np.log1p(x, out=x)
x *= -k
np.expm1(x, out=x)
x /= k
x *= sigma
else:
if np.abs(k) < np.finfo(float).eps:
# x[ok] = - np.log1p(-p[ok])
temp = p[ok]
np.negative(temp, out=temp)
np.log1p(temp, out=temp)
np.negative(temp, out=temp)
x[ok] = temp
else:
# x[ok] = np.expm1(-k * np.log1p(-p[ok])) / k
temp = p[ok]
np.negative(temp, out=temp)
np.log1p(temp, out=temp)
temp *= -k
np.expm1(temp, out=temp)
temp /= k
x[ok] = temp
x *= sigma
x[p == 0] = 0
if k >= 0:
x[p == 1] = np.inf
else:
x[p == 1] = -sigma / k
return x
def sumlogs(x, axis=None, out=None):
"""Sum of vector where numbers are represented by their logarithms.
Calculates np.log(np.sum(np.exp(x), axis=axis)) in such a fashion that it
works even when elements have large magnitude.
"""
maxx = x.max(axis=axis, keepdims=True)
xnorm = x - maxx
| |
<gh_stars>0
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: <NAME>
'''
Module containing the TablewareDetection class and other functions useful for detecting objects on tables.
'''
__docformat__ = "restructuredtext en"
import roslib
roslib.load_manifest('pr2_tasks')
import rospy
from exceptions import DetectionError
from tabletop_object_detector.srv import TabletopDetection, TabletopSegmentation, TabletopObjectRecognition,\
TabletopDetectionRequest, TabletopSegmentationRequest, TabletopObjectRecognitionRequest
from arm_navigation_msgs.msg import CollisionObject, Shape
from object_manipulation_msgs.msg import GraspableObject
from object_manipulation_msgs.srv import FindClusterBoundingBox
from pickplace_definitions import PickupGoal
from household_objects_database_msgs.srv import GetModelMesh, GetModelDescription
from pr2_python.head import Head #so we can jiggle the head until we found a horizontal table
from pr2_python.world_interface import WorldInterface
from geometry_msgs.msg import Point, Pose
from mobile_manipulation_msgs.msg import Object, TabletopSnapshot
import pr2_python.transform_listener as tl
import pr2_python.geometry_tools as gt
import copy
#Names of pertinent services
OBJECT_DETECTION_SERVICE = '/object_detection'
'''
Service for detecting objects
'''
FIND_CLUSTER_BOUNDING_BOX_SERVICE = '/find_cluster_bounding_box'
'''
Service for finding bounding boxes of point clouds.
'''
GET_MODEL_MESH_SERVICE = '/objects_database_node/get_model_mesh'
'''
Service for getting a model's mesh from the database.
'''
GET_MODEL_DESCRIPTION_SERVICE = '/objects_database_node/get_model_description'
'''
Service for getting a model's description from the database.
'''
GET_PLANNING_SCENE_SERVICE = '/environment_server/get_planning_scene'
'''
Service for getting the planning scene.
'''
RECOGNITION_SERVICE = '/tabletop_object_recognition'
SEGMENTATION_SERVICE = '/tabletop_segmentation'
#The following defaults are all defaults for initializing the detector class. In general,
#you just want to set these when you initialize the class; there is no reason to change them here.
DEFAULT_TABLE_THICKNESS = 0.02
'''
Default thickness with which table will be added to the map.
'''
TABLE_RESOLUTION = 0.02
'''
The resolution (in m) of the height of the table. Table meshes will be added every TABLE_RESOLUTION m to make up the
height.
'''
DEFAULT_TABLEWARE_LABELS = ['plate', 'bowl', 'cup', 'utensil', 'knife', 'fork', 'spoon']
'''
Default labels for tableware.
'''
DEFAULT_TABLE_SEARCH_RESOLUTION = 0.1
'''
Default resolution for searching with the head for a horizontal table.
'''
DEFAULT_TABLE_SEARCH_MAX = 1.0
'''
Default maximum search for searching with the head for a horizontal table.
'''
DEFAULT_VERTICAL_THRESHOLD = 0.5
'''
Default value for the dot product with the z axis above which a table is considered vertical.
'''
DEFAULT_OBJECT_PADDING = 0.08
'''
Default padding to give objects when adding them to the collision map.
'''
def find_height_above_table(pose_stamped_in, table_pose_stamped):
'''
Utility function that, given the pose (presumably of an object) and the pose of a table
will return the height of that object above the table.
**Args:**
**pose_stamped_in (geometry_msgs.msg.PoseStamped):** The pose above the table
**table_pose_stamped (geometry_msgs.msg.PoseStamped):** The pose of the table
**Returns:**
The height of the pose above the table.
**Raises:**
All exceptions a TransformListener can raise.
'''
pose_stamped = tl.transform_pose_stamped(table_pose_stamped.header.frame_id, pose_stamped_in)
#take this coordinate and transform it into the table's
tp = gt.inverse_transform_point(pose_stamped.pose.position, table_pose_stamped.pose)
rospy.logdebug('Height above table is '+str(tp.z))
return tp.z
class TablewareDetectionResult:
'''
The return from a tableware detection.
A pickup goal is defined in pickplace_definitions.py. Fields you may want to use after detection are
'label', which will have a label corresponding to the recognition ('bowl', 'cup', etc) and
object_pose_stamped, which has the pose of the object. The pickup goal returned here MUST HAVE THE ARM
NAME filled in and then can be passed directly to pickup.
Note that tableware detection always detects a table although you can control whether that table will be
added to the collision map. The table variable in this class is that detected table.
**Attributes:**
**pickup_goals (pickplace_definitions.PickupGoal):** Contains all of the detection information. If you want
to pick up a detected object, you must fill in the arm_name field for which arm you want to use and
then the pickup_goal can be passed directly to pickup.
**table (tabletop_object_detector.msg.Table):** The detected table.
**table_name (string):** The collision map ID of the table if it was added to the collision map.
'''
def __init__(self, pickup_goals, table, table_name):
'''
Constructor for TablewareDetectionResult.
**Args:**
**pickup_goals (pickplace_definitions.PickupGoal):** Contains all of the detection information. If you
want to pick up a detected object, you must fill in the arm_name field for which arm you want to use and
then the pickup_goal can be passed directly to pickup.
**table (tabletop_object_detector.msg.Table):** The detected table.
**table_name (string):** The collision map ID of the table if it was added to the collision map.
'''
self.pickup_goals = pickup_goals
self.table = table
self.table_name = table_name
class TablewareDetection:
'''
Calls tabletop detection allowing the user a lot of (optional) control over when and how objects are added
to the collision map. The usual use of this class is to instantiate an instance and then use detect_objects
to find objects on a table in front of the robot:
detector = TablewareDetection()
res = detector.detect_objects([optional arguments])
This class is a wrapper around the tabletop_object_detector, which assumes that the robot is standing in
front of a table on which objects are positioned at least 3cm apart. It first locates the principle
horizontal plane in the image and then segments clusters above that plane based on Euclidean distance.
It runs those clusters through the object recognition pipeline and returns them in the form of pickup goals.
The attributes of this class can all be changed by passing in different defaults although the default settings
should work well in most cases. However, most of the parameters pertinent to detection must be changed
service side. Look at the tabletop_object_detector package for that.
Many of the parameters in this class are for working with "vertical tables".
The detector finds the "principle plane" in the image. Unfortunately, this plane may not be horizontal.
In general, it is best if any detection that does not return a table that is approximately perpendicular to
the z axis in the world is considered as failed. If a point for the head to look at is passed to
detect_objects, the robot will move its head trying to find a horizontal table.
**Attributes:**
**allow_vertical_tables (boolean):** If True will do segmentation and return even if a vertical table is found.
**vertical_threshold (double):** The dot product of the normal to the table plane and the z axis must be greater
than this for the table to be considered horizontal.
**table_search_resolution (double):** If detect_objects is initially given a point for the head to look at, it
will search along the world x axis near that point until it founds a horizontal table. This is the
resolution in meters of that search. Resolution cannot be smaller than a centimeter.
**table_search_max (double):** The maximum distance in meters from the point the search should go. It will go
this far in both directions before failing.
**table_thickness (double):** If the table is added to the map, it will be added with this thickness in meters.
This is helpful for placing as it will occlude points that the robot sees below the table.
**tableware_labels ([string]):** A set of database tags that count as "tableware". If objects have
multiple | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2020-2021 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
# File: bitMapping.py
# Author: <NAME>
# Created: 24 June 2020
# Description:
# Will compute bit mappings from init.mem bit locations to FASM INIT/INITP lines/bits and to frame/bitoffset values
import os
import sys
import glob
import parseutil
import parseutil.misc as misc
import argparse
import json
import pathlib
import struct
import DbgParser
import parseutil.parse_mdd as parse_mdd
import re
# Holds a single mapping record from init.mem bit to FASM and bitstream
class Mapping:
def __init__(
self, word, bit, tile, bits, fasmY, fasmINITP, fasmLine, fasmBit,
frameAddr, frameBitOffset
):
self.word = word
self.bit = bit
self.tile = tile
self.bits = bits
self.fasmY = fasmY
self.fasmINITP = fasmINITP
self.fasmLine = fasmLine
self.fasmBit = fasmBit
self.frameAddr = frameAddr
self.frameBitOffset = frameBitOffset
def toString(self):
return "word={}, bit={}, tile = {}, bits = {}, fasmY={}, fasmINITP={}, fasmLine={}, fasmBit={}, frameAddr={:x}, frameBitOffset={}".format(
self.word, self.bit, self.tile, self.bits, self.fasmY,
self.fasmINITP, self.fasmLine, self.fasmBit, self.frameAddr,
self.frameBitOffset
)
def toStringShort(self):
return "word={}, bit={}, tile = {}, frameAddr={:x}, frameBitOffset={}".format(
self.word, self.bit, self.tile, self.frameAddr, self.frameBitOffset
)
# Add mappings for a particular BRAM primitive into the mappingsn array and return it
def createBitMapping(
segs, words, bits, cell, mappings, verbose, printMappings
):
# 1. Flag of whether this is RAMB36E cell or not
ramb36 = (cell.type == "RAMB36E1")
# 2. Get the info on this BRAM tile from tilegrid.json
tilegridname = os.environ["XRAY_DIR"] + "/database/" + os.environ[
"XRAY_DATABASE"] + "/" + os.environ["XRAY_PART"] + "/tilegrid.json"
with open(tilegridname) as f:
tilegrid = json.load(f)
tilegridinfo = tilegrid[cell.tile]
cell.baseaddr = int(tilegridinfo["bits"]["BLOCK_RAM"]["baseaddr"], 16)
cell.wordoffset = int(tilegridinfo["bits"]["BLOCK_RAM"]["offset"])
# Step 2: Now build the mappings
for w in range(words):
for b in range(bits):
# Just do the locations covered by this RAMB primitive
if w < cell.addr_beg or w > cell.addr_end or b < cell.slice_beg or b > cell.slice_end:
continue
# 2a: Compute characteristics of this particular RAMB primitive
RAMBinitwidth = cell.slice_end - cell.slice_beg + 1
RAMBdepth = cell.addr_end - cell.addr_beg + 1
RAMBreadinitwidth = cell.width
RAMBparityinitwidth = cell.pbits
RAMBdatainitwidth = cell.dbits
assert RAMBinitwidth == RAMBdatainitwidth + RAMBparityinitwidth, "RAMBinitwidth ERROR: {} {} {} {}".format(
RAMBinitwidth, RAMBreadinitwidth, RAMBparityinitwidth,
RAMBdatainitwidth
)
# 2b: Determine how many bits are in parity and how many are in the "normal" bits
if not ramb36:
assert not RAMBreadinitwidth == 72
initSliceinitwidth = 64 if RAMBreadinitwidth == 72 else 32 if RAMBreadinitwidth == 36 else 16 if RAMBreadinitwidth == 18 else 8 if RAMBreadinitwidth == 9 else RAMBreadinitwidth
initpSliceinitwidth = 8 if RAMBreadinitwidth == 72 else 4 if RAMBreadinitwidth == 36 else 2 if RAMBreadinitwidth == 18 else 1 if RAMBreadinitwidth == 9 else 0
if verbose:
print("\nDoing: {} {}".format(w, b))
print(
"{} data bits will be found in {}({}) LSB's of INITP and {}({}) LSB's of INIT"
.format(
RAMBinitwidth, RAMBparityinitwidth,
initpSliceinitwidth, RAMBdatainitwidth,
initSliceinitwidth
),
end=''
)
print(
" {} {} {} ({}) {}:{} {}.{}".format(
cell.tile,
cell.type,
cell.write_style,
cell.width,
cell.addr_end,
cell.addr_beg,
cell.slice_end,
cell.slice_beg,
)
)
print(
" This is the correct BRAM block iff: {} >= r >= {} and {} >= b >= {}"
.format(
cell.addr_end,
cell.addr_beg,
cell.slice_end,
cell.slice_beg,
)
)
# Some sanity checks
# Number of bits in INIT and INITP should be 32K and 4K respectively (RAMB36E1) or 16K and 2K respectively (RAMB18E1)
assert initSliceinitwidth * RAMBdepth == (
32768 if ramb36 else 16384
)
assert initpSliceinitwidth == 0 or initpSliceinitwidth * RAMBdepth == (
4096 if ramb36 else 2048
)
# 2c: Is the bit of interest in the INIT portion or the INITP portion?
if b - cell.slice_beg < initSliceinitwidth:
# In the INIT portion
sliceinitwidth = initSliceinitwidth
parity = False
else:
# In the INITP portion
sliceinitwidth = initpSliceinitwidth
parity = True
# 2.d: Compute how many "words" fit into each INIT string.
# This may be the initwidth of the memory or it may not since
# Vivado often pads. Example: for a 128b1 the "read initwidth" is 18.
# That means the memory has a 16-bit word in the INIT and a 2-bit word in the INITP.
# In the INIT there are 15 0's as padding + the 1 bit of data. The INITP is all 0's in this case.
initStringLen = 512 if ramb36 else 256
# 2.e: How many words are in each INIT and INITP string?
numWordsPerInit = (initStringLen / sliceinitwidth)
# Make sure it divides evenly or there must be a problem
assert int(numWordsPerInit) == numWordsPerInit
numWordsPerInit = int(numWordsPerInit)
# 2.f: Compute where to find the bit in the INIT strings
# Find which INIT or INITP entry it is in (00-3F for INIT, 00-07 for INITP)
initRow = int((w - cell.addr_beg) / numWordsPerInit)
assert initRow <= 0x3F, "{} {} {} {} {}".format(
initRow, w, numWordsPerInit, initStringLen, sliceinitwidth
)
# 2.g: Now, compute the actual bit offset into that INIT or INITP string
wordOffset = int(w % numWordsPerInit)
bitOffset = wordOffset * sliceinitwidth + (
b - cell.slice_beg - (initSliceinitwidth if parity else 0)
)
#if verbose:
# print(
# "FASM initRow = {} bitOffset = {}".format(
# initRow, bitOffset
# )
# )
# 2.h: Get the segment info from the prjxray segments file
lr = 0 if cell.tile[6] == "L" else 1
y01 = 0 if ramb36 is False or bitOffset % 2 == 0 else 1
segoffset = findSegOffset(
segs,
lr,
y01,
1 if parity else 0,
initRow,
int(bitOffset / 2) if ramb36 else bitOffset,
)
# 2.i: Compute the bitstream location of the bit from the above information
# Frame number is tilegrid.json's baseaddr + segbits frame offset number
frameNum = cell.baseaddr + segoffset[0]
# Bit offset is given in segbits file
frameBitOffset = int(segoffset[1]) + cell.wordoffset * 32
# 2.j: Print out the mapping if requested
bbb = int(bitOffset / 2) if ramb36 else bitOffset
if printMappings or verbose:
if parity:
print(
"init.mem[{}][{}] -> {}.{}_Y{}.INITP_{:02x}[{:03}] -> {} {} {}_{} wordoffset = {}"
.format(
w, b, cell.tile,
cell.type[:-2], y01, initRow, bbb, cell.tile,
hex(cell.baseaddr), segoffset[0], segoffset[1],
cell.wordoffset
)
)
else:
print(
"init.mem[{}][{}] -> {}.{}_Y{}.INIT_{:02x}[{:03}] -> {} {} {}_{} wordoffset = {}"
.format(
w, b, cell.tile,
cell.type[:-2], y01, initRow, bbb, cell.tile,
hex(cell.baseaddr), segoffset[0], segoffset[1],
cell.wordoffset
)
)
# 2.k: Finally, build a Mapping object and add it to the mappings list (to be returned below)
mappings.append(
Mapping(
w, b, cell.tile, bits, y01, parity, initRow, bbb, frameNum,
frameBitOffset
)
)
# All done...
return mappings
# Given a word/bit index, find the mapping
def findMapping(w, b, bits, mappings):
#for m in mappings:
# if m.word == w and m.bit == b:
# return m
#assert False, "Could not find mapping: {} {}".format(w, b)
row = (w * bits) + b
return mappings[row]
# If this is done once and re-used, much time can be saved
def loadSegs():
# Read the segbits database information for later use.
# Create multidimensional array to hold it.
# Indices from left to right are: lr, y01, initinitp, initnum, initbit, frame, framebit
segs = [
[
[
[[None
for j in range(256)]
for k in range(64)]
for l in range(2)
]
for m in range(2)
]
for n in range(2)
]
# Read the segbits database info
segname = os.environ["XRAY_DIR"] + "/database/" + os.environ[
"XRAY_DATABASE"] + "/segbits_bram_l.block_ram.db"
with open(segname) as f:
lines = f.readlines()
segs = processSegLines(lines, segs)
segname = os.environ["XRAY_DIR"] + "/database/" + os.environ[
"XRAY_DATABASE"] + "/segbits_bram_r.block_ram.db"
with open(segname) as f:
lines = f.readlines()
segs = processSegLines(lines, segs)
return segs
# Process the segment lines read from the database and fill an array with them
def processSegLines(seglines, segs):
# seglines is a multi-dimensional array, indexed by integers
# Level1: [bram_l, bram_r]
# Level 2: [y0, y1]
# Level 3: [INIT, INITP]
# Level 4: [0, 1, ..., 3F] (which INIT line? could make INITP ones shorter but, hey...)
# Level 5: [0, 1, ..., 255] (bits from a given init line)
# | |
encodings[level][
:, :, int(start / (level // 8)) : int(start / (level // 8)) + 250,
],
distenc if iii == 0 else torch.flip(distenc, [2, 3]),
)
return pred
preds = []
starts = [0]
ns = {}
if targets and iii == 0:
ts = []
if annotation is not None and iii == 0:
annos = []
for j, level in enumerate([256, 128, 64, 32]):
normmat_r = np.nanmean(
np.nanmean(
np.reshape(
normmat[
starts[j] : starts[j] + 250 * level // 8,
starts[j] : starts[j] + 250 * level // 8,
],
(1, 250, level // 8, 250, level // 8),
),
axis=4,
),
axis=2,
)
ns[level] = normmat_r
if j == 0:
pred = eval_step(level, starts[j])
else:
pred = eval_step(
level,
starts[j],
preds[j - 1][
:,
:,
start_index : start_index + 125,
start_index : start_index + 125,
],
)
if targets and iii == 0:
target_r = np.nanmean(
np.nanmean(
np.reshape(
target[
:,
starts[j] : starts[j] + 250 * level // 8,
starts[j] : starts[j] + 250 * level // 8,
].numpy(),
(target.shape[0], 250, level // 8, 250, level // 8),
),
axis=4,
),
axis=2,
)
target_nan = np.mean(
np.mean(
np.isnan(
np.reshape(
target[
:,
starts[j] : starts[j] + 250 * level // 8,
starts[j] : starts[j] + 250 * level // 8,
].numpy(),
(target.shape[0], 250, level // 8, 250, level // 8,),
)
),
axis=4,
),
axis=2,
)
target_r[target_nan > nan_thresh] = np.nan
eps = np.nanmin(normmat_r)
target_np = np.log((target_r + eps) / (normmat_r + eps))[0, 0:, 0:]
ts.append(target_np)
if annotation is not None and iii == 0:
newstart = starts[j] / 8000.0
newend = (starts[j] + 250 * level // 8) / 8000.0
anno_r = []
for r in annotation:
if len(r) == 3:
if not (r[0] >= newend or r[1] <= newstart):
anno_r.append(
(
np.fmax((r[0] - newstart) / (newend - newstart), 0,),
np.fmin((r[1] - newstart) / (newend - newstart), 1,),
r[2],
)
)
else:
if r[0] >= newstart and r[0] < newend:
anno_r.append(((r[0] - newstart) / (newend - newstart), r[1]))
annos.append(anno_r)
if iii == 0:
proposed_start = (mpos - level * 1000000 / 4) - (
wpos - 128000000 + starts[j] * 4000 * 8
)
else:
proposed_start = (mpos - level * 1000000 / 4) - (
wpos + 128000000 - starts[j] * 4000 * 8 - level * 1000000
)
if chrlen is not None:
bounds = [
0 - (wpos - 128000000),
chrlen - level * 1000000 / 2 - (wpos - 128000000),
]
if bounds[0] < bounds[1]:
proposed_start = np.clip(proposed_start, bounds[0], bounds[1])
else:
proposed_start = bounds[0]
start_index = int(np.clip(np.floor(proposed_start / (4000 * level)), 0, 125,))
if iii != 0:
start_index = 250 - (start_index + 125)
starts.append(starts[j] + start_index * level // 8)
preds.append(pred)
allpreds.append(preds)
allnormmats.append(ns)
if iii == 0:
if targets:
alltargets.append(ts)
if annotation is not None:
allannos.append(annos)
allstarts.append(starts[:-1])
output = {}
output["predictions"] = [[] for _ in range(n_models)]
for i in range(n_models):
for j in range(len(allpreds[i])):
if allpreds[i][j].shape[1] == 1:
output["predictions"][i].append(
allpreds[i][j].cpu().detach().numpy()[0, 0, :, :] * 0.5
+ allpreds[i + n_models][j].cpu().detach().numpy()[0, 0, ::-1, ::-1] * 0.5
)
else:
output["predictions"][i].append(
allpreds[i][j].cpu().detach().numpy()[0, :, :, :] * 0.5
+ allpreds[i + n_models][j].cpu().detach().numpy()[0, :, ::-1, ::-1] * 0.5
)
if targets:
output["experiments"] = alltargets
else:
output["experiments"] = None
output["start_coords"] = [wpos - 128000000 + s * 32000 for s in allstarts[0]]
output["end_coords"] = [
np.fmin(int(output["start_coords"][ii] + 256000000 / 2 ** (ii)), chrlen) for ii in range(4)
]
if annotation is not None:
output["annos"] = allannos[0]
else:
output["annos"] = None
output["chr"] = mchr
output["padding_chr"] = padding_chr
output["normmats"] = allnormmats
return output
def _retrieve_multi(regionlist, genome, target=True, normmat=True, normmat_regionlist=None):
sequences = []
for region in regionlist:
if len(region) == 4:
chrom, start, end, strand = region
sequences.append(genome.get_encoding_from_coords(chrom, start, end, strand))
else:
chrom, start, end = region
sequences.append(genome.get_encoding_from_coords(chrom, start, end, "+"))
sequence = np.vstack(sequences)[None, :, :]
if isinstance(target, list):
target_objs = target
has_target = True
elif target and target_available:
target_objs = [target_h1esc_256m, target_hff_256m]
has_target = True
else:
has_target = False
if has_target:
targets = []
for target_obj in target_objs:
targets_ = []
for region in regionlist:
if len(region) == 4:
chrom, start, end, strand = region
else:
chrom, start, end = region
strand = "+"
t = []
for region2 in regionlist:
if len(region2) == 4:
chrom2, start2, end2, strand2 = region2
else:
chrom2, start2, end2 = region2
strand = "+"
t.append(
target_obj.get_feature_data(
chrom, start, end, chrom2=chrom2, start2=start2, end2=end2
)
)
if strand == "-":
t[-1] = t[-1][::-1, :]
if strand2 == "-":
t[-1] = t[-1][:, ::-1]
targets_.append(t)
targets_= np.vstack([np.hstack(l) for l in targets_])
targets.append(targets_)
targets = [
torch.FloatTensor(l[None, :, :]) for l in targets
]
if normmat:
if isinstance(normmat, list):
normmat_objs = normmat
else:
normmat_objs = [h1esc_256m, hff_256m]
if normmat_regionlist is None:
normmat_regionlist = regionlist
normmats = []
for normmat_obj in normmat_objs:
normmats_ = []
for chrom, start, end, strand in normmat_regionlist:
b = []
for chrom2, start2, end2, strand2 in normmat_regionlist:
if chrom2 != chrom:
b.append(
np.full(
(int((end - start) / 32000), int((end2 - start2) / 32000)),
normmat_obj.background_trans,
)
)
else:
binsize = 32000
acoor = np.linspace(start, end, int((end - start) / 32000) + 1)[:-1]
bcoor = np.linspace(start2, end2, int((end2 - start2) / 32000) + 1)[:-1]
b.append(
normmat_obj.background_cis[
(np.abs(acoor[:, None] - bcoor[None, :]) / binsize).astype(int)
]
)
if strand == "-":
b[-1] = b[-1][::-1, :]
if strand2 == "-":
b[-1] = b[-1][:, ::-1]
normmats_.append(b)
normmats_ = np.vstack([np.hstack(l) for l in normmats_])
normmats.append(normmats_)
datatuple = (sequence,)
if normmat:
datatuple = datatuple + (normmats,)
if has_target:
datatuple = datatuple + (targets,)
return datatuple
def process_region(
mchr,
mstart,
mend,
genome,
file=None,
custom_models=None,
target=True,
show_genes=True,
show_tracks=False,
window_radius=16000000,
padding_chr="chr1",
use_cuda=True,
):
"""
Generate multiscale genome interaction predictions for
the specified region.
Parameters
----------
mchr : str
The chromosome name of the first segment
mstart : int
The start coordinate of the region.
mend : ind
The end coordinate of the region.
genome : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
The reference genome object to extract sequence from
custom_models : list(torch.nn.Module or str) or None, optional
Models to use instead of the default H1-ESC and HFF Orca models.
Default is None.
target : list(selene_utils2.Genomic2DFeatures or str) or bool, optional
If specified as list, use this list of targets to retrieve experimental
data (for plotting only). Default is True and will use micro-C data
for H1-ESC and HFF cells (4DNFI9GMP2J8, 4DNFI643OYP9) that correspond
to the default models.
file : str or None, optional
Default is None. The output file prefix.
show_genes : bool, optional
Default is True. If True, generate gene annotation visualization
file in pdf format that matches the windows of multiscale predictions.
show_tracks : bool, optional
Default is False. If True, generate chromatin tracks visualization
file in pdf format that matches the windows of multiscale predictions.
window_radius : int, optional
Default is 16000000. The acceptable values are 16000000 which selects
the 1-32Mb models or 128000000 which selects the 32-256Mb models.
padding_chr : str, optional
Default is "chr1". If window_radius is 128000000, padding is generally
needed to fill the sequence to 256Mb. The padding sequence will be
extracted from the padding_chr.
use_cuda : bool, optional
Default is True. Use CPU if False.
Returns
-------
outputs_ref_l, outputs_ref_r, outputs_alt : dict, dict, dict
Reference allele predictions zooming into the left boundary of the
duplication,
Reference allele predictions zooming into the right boundary of the
duplication,
Alternative allele predictions zooming into the duplication breakpoint.
The returned results are in the format of dictonaries
containing the prediction outputs and other
retrieved information. These dictionaries can be directly used as
input to genomeplot or genomeplot_256Mb. See documentation of `genomepredict` or `genomepredict_256Mb` for
details of the dictionary content.
"""
chrlen = [l for c, l in genome.get_chr_lens() if c == mchr].pop()
mpos = int((int(mstart) + int(mend)) / 2)
if custom_models is None:
if window_radius == 16000000:
models = | |
order.
nquads.sort()
# 5) Return the hash that results from passing the sorted, joined
# nquads through the hash algorithm.
info['hash'] = self.hash_nquads(nquads)
return info['hash']
# helper for modifying component during Hash First Degree Quads
def modify_first_degree_component(self, id_, component, key):
if component['type'] != 'blank node':
return component
component = copy.deepcopy(component)
component['value'] = '_:a' if component['value'] == id_ else '_:z'
return component
# 4.7) Hash Related Blank Node
def hash_related_blank_node(self, related, quad, issuer, position):
# 1) Set the identifier to use for related, preferring first the
# canonical identifier for related if issued, second the identifier
# issued by issuer if issued, and last, if necessary, the result of
# the Hash First Degree Quads algorithm, passing related.
if self.canonical_issuer.has_id(related):
id_ = self.canonical_issuer.get_id(related)
elif issuer.has_id(related):
id_ = issuer.get_id(related)
else:
id_ = self.hash_first_degree_quads(related)
# 2) Initialize a string input to the value of position.
# Note: We use a hash object instead.
md = self.create_hash()
md.update(position.encode('utf8'))
# 3) If position is not g, append <, the value of the predicate in
# quad, and > to input.
if position != 'g':
md.update(self.get_related_predicate(quad).encode('utf8'))
# 4) Append identifier to input.
md.update(id_.encode('utf8'))
# 5) Return the hash that results from passing input through the hash
# algorithm.
return md.hexdigest()
# helper for getting a related predicate
def get_related_predicate(self, quad):
return '<' + quad['predicate']['value'] + '>'
# 4.8) Hash N-Degree Quads
def hash_n_degree_quads(self, id_, issuer):
# 1) Create a hash to related blank nodes map for storing hashes that
# identify related blank nodes.
# Note: 2) and 3) handled within `createHashToRelated`
hash_to_related = self.create_hash_to_related(id_, issuer)
# 4) Create an empty string, data to hash.
# Note: We create a hash object instead.
md = self.create_hash()
# 5) For each related hash to blank node list mapping in hash to
# related blank nodes map, sorted lexicographically by related hash:
for hash, blank_nodes in sorted(hash_to_related.items()):
# 5.1) Append the related hash to the data to hash.
md.update(hash.encode('utf8'))
# 5.2) Create a string chosen path.
chosen_path = ''
# 5.3) Create an unset chosen issuer variable.
chosen_issuer = None
# 5.4) For each permutation of blank node list:
for permutation in permutations(blank_nodes):
# 5.4.1) Create a copy of issuer, issuer copy.
issuer_copy = copy.deepcopy(issuer)
# 5.4.2) Create a string path.
path = ''
# 5.4.3) Create a recursion list, to store blank node
# identifiers that must be recursively processed by this
# algorithm.
recursion_list = []
# 5.4.4) For each related in permutation:
skip_to_next_permutation = False
for related in permutation:
# 5.4.4.1) If a canonical identifier has been issued for
# related, append it to path.
if(self.canonical_issuer.has_id(related)):
path += self.canonical_issuer.get_id(related)
# 5.4.4.2) Otherwise:
else:
# 5.4.4.2.1) If issuer copy has not issued an
# identifier for related, append related to recursion
# list.
if not issuer_copy.has_id(related):
recursion_list.append(related)
# 5.4.4.2.2) Use the Issue Identifier algorithm,
# passing issuer copy and related and append the result
# to path.
path += issuer_copy.get_id(related)
# 5.4.4.3) If chosen path is not empty and the length of
# path is greater than or equal to the length of chosen
# path and path is lexicographically greater than chosen
# path, then skip to the next permutation.
if(len(chosen_path) != 0 and
len(path) >= len(chosen_path) and
path > chosen_path):
skip_to_next_permutation = True
break
if skip_to_next_permutation:
continue
# 5.4.5) For each related in recursion list:
for related in recursion_list:
# 5.4.5.1) Set result to the result of recursively
# executing the Hash N-Degree Quads algorithm, passing
# related for identifier and issuer copy for path
# identifier issuer.
result = self.hash_n_degree_quads(related, issuer_copy)
# 5.4.5.2) Use the Issue Identifier algorithm, passing
# issuer copy and related and append the result to path.
path += issuer_copy.get_id(related)
# 5.4.5.3) Append <, the hash in result, and > to path.
path += '<' + result['hash'] + '>'
# 5.4.5.4) Set issuer copy to the identifier issuer in
# result.
issuer_copy = result['issuer']
# 5.4.5.5) If chosen path is not empty and the length of
# path is greater than or equal to the length of chosen
# path and path is lexicographically greater than chosen
# path, then skip to the next permutation.
if(len(chosen_path) != 0 and
len(path) >= len(chosen_path) and
path > chosen_path):
skip_to_next_permutation = True
break
if skip_to_next_permutation:
continue
# 5.4.6) If chosen path is empty or path is lexicographically
# less than chosen path, set chosen path to path and chosen
# issuer to issuer copy.
if len(chosen_path) == 0 or path < chosen_path:
chosen_path = path
chosen_issuer = issuer_copy
# 5.5) Append chosen path to data to hash.
md.update(chosen_path.encode('utf8'))
# 5.6) Replace issuer, by reference, with chosen issuer.
issuer = chosen_issuer
# 6) Return issuer and the hash that results from passing data to hash
# through the hash algorithm.
return {'hash': md.hexdigest(), 'issuer': issuer}
# helper for creating hash to related blank nodes map
def create_hash_to_related(self, id_, issuer):
# 1) Create a hash to related blank nodes map for storing hashes that
# identify related blank nodes.
hash_to_related = {}
# 2) Get a reference, quads, to the list of quads in the blank node to
# quads map for the key identifier.
quads = self.blank_node_info[id_]['quads']
# 3) For each quad in quads:
for quad in quads:
# 3.1) For each component in quad, if component is the subject,
# object, and graph name and it is a blank node that is not
# identified by identifier:
for key, component in quad.items():
if(key != 'predicate' and
component['type'] == 'blank node' and
component['value'] != id_):
# 3.1.1) Set hash to the result of the Hash Related Blank
# Node algorithm, passing the blank node identifier for
# component as related, quad, path identifier issuer as
# issuer, and position as either s, o, or g based on
# whether component is a subject, object, graph name,
# respectively.
related = component['value']
position = self.POSITIONS[key]
hash = self.hash_related_blank_node(
related, quad, issuer, position)
# 3.1.2) Add a mapping of hash to the blank node identifier
# for component to hash to related blank nodes map, adding
# an entry as necessary.
hash_to_related.setdefault(hash, []).append(related)
return hash_to_related
# helper to create appropriate hash object
def create_hash(self):
return hashlib.sha256()
# helper to hash a list of nquads
def hash_nquads(self, nquads):
md = self.create_hash()
for nquad in nquads:
md.update(nquad.encode('utf8'))
return md.hexdigest()
class URGNA2012(URDNA2015):
"""
URGNA2012 implements the URGNA2012 RDF Graph Normalization Algorithm.
"""
def __init__(self):
URDNA2015.__init__(self)
# helper for modifying component during Hash First Degree Quads
def modify_first_degree_component(self, id_, component, key):
if component['type'] != 'blank node':
return component
component = copy.deepcopy(component)
if key == 'name':
component['value'] = '_:g'
else:
component['value'] = '_:a' if component['value'] == id_ else '_:z'
return component
# helper for getting a related predicate
def get_related_predicate(self, quad):
return quad['predicate']['value']
# helper for creating hash to related blank nodes map
def create_hash_to_related(self, id_, issuer):
# 1) Create a hash to related blank nodes map for storing hashes that
# identify related blank nodes.
hash_to_related = {}
# 2) Get a reference, quads, to the list of quads in the blank node to
# quads map for the key identifier.
quads = self.blank_node_info[id_]['quads']
# 3) For each quad in quads:
for quad in quads:
# 3.1) If the quad's subject is a blank node that does not match
# identifier, set hash to the result of the Hash Related Blank Node
# algorithm, passing the blank node identifier for subject as
# related, quad, path identifier issuer as issuer, and p as
# position.
if(quad['subject']['type'] == 'blank node' and
quad['subject']['value'] != id_):
related = quad['subject']['value']
position = 'p'
# 3.2) Otherwise, if quad's object is a blank node that does
# not match identifier, to the result of the Hash Related Blank
# Node algorithm, passing the blank node identifier | |
<filename>explainy/explanations/counterfactual_explanation.py<gh_stars>1-10
"""
Counterfactual Explanation
--------------------------
Counterfactual explanations tell us how the values of an instance have to change to
significantly change its prediction. A counterfactual explanation of a prediction
describes the smallest change to the feature values that changes the prediction
to a predefined output. By creating counterfactual instances, we learn about how the
model makes its predictions and can explain individual predictions [1].
Characteristics
===============
- local
- contrastive
Source
======
[1] Molnar, Christoph. "Interpretable machine learning. A Guide for Making Black Box Models Explainable", 2019.
https://christophm.github.io/interpretable-ml-book/
"""
from typing import Dict
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn
from matplotlib.font_manager import FontProperties
from mlxtend.evaluate import create_counterfactual
from sklearn.base import is_classifier, is_regressor
from explainy.core.explanation import Explanation
from explainy.core.explanation_base import ExplanationBase
from explainy.utils.utils import create_one_hot_sentence
np.seterr(divide="ignore", invalid="ignore")
COLUMN_REFERENCE = "Reference Values"
COLUMN_COUNTERFACTUAL = "Counterfactual Values"
COLUMN_DIFFERENCE = "Prediction Difference"
class CounterfactualExplanation(ExplanationBase):
"""
Contrastive, local Explanation
"""
def __init__(
self,
X: pd.DataFrame,
y: np.array,
model: sklearn.base.BaseEstimator,
number_of_features: int = 4,
config: Dict = None,
y_desired: float = None,
delta: float = None,
random_state: int = 0,
**kwargs,
) -> None:
super(CounterfactualExplanation, self).__init__(model, config)
"""
This implementation is a thin wrapper around `smlxtend.evaluate.create_counterfactual
<http://rasbt.github.io/mlxtend/user_guide/evaluate/create_counterfactual>`
Args:
X (df): (Test) samples and features to calculate the importance for (sample, features)
y (np.array): (Test) target values of the samples (samples, 1)
model (object): trained (sckit-learn) model object
number_of_features (int):
y_desired (float, optional): desired target value for the counter factual example.
Defaults to max(y).
Returns:
None.
"""
self.X = X
self.y = y
self.y_desired = y_desired
self.delta = delta
self.feature_names = self.get_feature_names(self.X)
self.number_of_features = self.get_number_of_features(number_of_features)
self.kwargs = kwargs
self.kwargs['random_seed'] = random_state
natural_language_text_empty = (
"The sample would have had the desired prediction, {}."
)
method_text_empty = (
"The feature importance is shown using a counterfactual example."
)
sentence_text_empty = "the '{}' was {}"
self.define_explanation_placeholder(
natural_language_text_empty, method_text_empty, sentence_text_empty
)
self.explanation_name = "counterfactual"
self.logger = self.setup_logger(self.explanation_name)
def _calculate_importance(self, sample_index=0):
"""
Create the counter factual explanation for the given sample.
Args:
sample (int, optional): DESCRIPTION. Defaults to 0.
lammbda (float, optional): hyperparameter (0,1). Defaults to 1.0.
Returns:
x_ref (TYPE): DESCRIPTION.
x_counter_factual (TYPE): DESCRIPTION.
"""
if not self.y_desired:
self.y_desired = min(self.prediction * 1.2, self.y.values.max())
if not self.delta:
self.delta = self.prediction * 0.1
x_ref = self.X.values[sample_index, :]
count = 0
for lammbda in np.arange(0, 10000, 0.1):
x_counter_factual = create_counterfactual(
x_reference=x_ref,
y_desired=self.y_desired,
model=self.model,
X_dataset=self.X.values,
lammbda=lammbda,
**self.kwargs,
)
self.y_counter_factual = self.model.predict(
x_counter_factual.reshape(1, -1)
)[0]
self._log_counterfactual(lammbda)
self._log_output(sample_index, x_ref, x_counter_factual)
if is_regressor(self.model):
if np.abs(self.y_counter_factual - self.y_desired) < self.delta:
break
elif is_classifier(self.model):
if self.y_counter_factual == self.y_desired:
break
if count > 40:
raise
count += 1
self.logger.debug("\nFinal Lambda:")
self._log_counterfactual(lammbda)
self._log_output(sample_index, x_ref, x_counter_factual)
return x_ref, x_counter_factual
def _log_counterfactual(self, lammbda: float):
"""
Log the values from the counterfactual output
Args:
lammbda (TYPE): DESCRIPTION.
Returns:
None.
"""
self.logger.debug(f"lambda: {lammbda}")
self.logger.debug(f"diff: {np.abs(self.y_counter_factual - self.y_desired)}")
self.logger.debug(
f"y_counterfactual: {self.y_counter_factual:.2f}, desired:"
f" {self.y_desired:.2f}, y_pred: {self.prediction:.2f}, delta:"
f" {self.delta}"
)
self.logger.debug("---" * 15)
def _log_output(self, sample, x_ref, x_counter_factual):
"""
Log all the relevant values
Args:
sample (TYPE): DESCRIPTION.
x_ref (TYPE): DESCRIPTION.
x_counter_factual (TYPE): DESCRIPTION.
Returns:
None.
"""
self.logger.debug("True label: {}".format(self.y.values[sample]))
self.logger.debug("Predicted label: {}".format(self.prediction))
self.logger.debug(f"Desired label: {self.y_desired}")
self.logger.debug(
"Predicted counterfactual label: {}".format(
self.model.predict(x_counter_factual.reshape(1, -1))[0]
)
)
self.logger.debug("Features of the sample: {}".format(x_ref))
self.logger.debug("Features of the countefactual: {}".format(x_counter_factual))
def get_prediction_from_new_value(self, ii, x_ref, x_counter_factual):
"""
replace the value of the feauture at postion ii and predict
a new value for this new set of features
Args:
ii (TYPE): DESCRIPTION.
x_ref (TYPE): DESCRIPTION.
x_counter_factual (TYPE): DESCRIPTION.
Returns:
difference (TYPE): DESCRIPTION.
"""
x_created = x_ref.reshape(1, -1).copy()
old_value = x_created[0, ii]
new_value = x_counter_factual.reshape(1, -1)[0, ii]
# assign new value
x_created[0, ii] = x_counter_factual.reshape(1, -1)[0, ii]
self.logger.debug(f"old_value: {old_value} -- new_value: {new_value}")
pred_new = self.model.predict(x_created)[0]
return pred_new
def get_feature_importance(self, x_ref, x_counter_factual):
"""
Calculate the importance of each feature. Take the reference
features and replace every feature with the new counter_factual value.
Calculat the absulte difference that this feature adds to the prediction.
A larger absolute value, means a larger contribution and therefore a more
important feature.
Args:
x_ref (TYPE): DESCRIPTION.
x_counter_factual (TYPE): DESCRIPTION.
Returns:
None.
"""
pred_ref = self.model.predict(x_ref.reshape(1, -1))[0]
self.differences = []
for ii in range(x_ref.shape[0]):
pred_new = self.get_prediction_from_new_value(ii, x_ref, x_counter_factual)
difference = pred_new - pred_ref
self.differences.append(difference)
self.logger.debug(
"name: {} -- difference: {}".format(
self.feature_names[ii], self.differences[ii]
)
)
# get the sorted feature_names
self.feature_sort = np.array(self.feature_names)[
np.array(self.differences).argsort()[::-1]
].tolist()
def get_feature_values(self, x_ref, x_counter_factual, decimal=2, debug=False):
"""
Arrange the reference and the counter factual features in a dataframe
Args:
x_ref (np.array): features of the sample
x_counter_factual (np.array): features of the counter factual sample to achive y_desired
decimal (int): decimal number to round the values to in the plot
Returns:
None.
"""
self.df = (
pd.DataFrame(
[x_ref, x_counter_factual, self.differences],
index=[
COLUMN_REFERENCE,
COLUMN_COUNTERFACTUAL,
COLUMN_DIFFERENCE,
],
columns=self.feature_names,
)
.round(decimal)
.T
)
# reorder dataframe according the the feature importance
self.df = self.df.loc[self.feature_sort, :]
try:
self.df[COLUMN_DIFFERENCE][self.df[COLUMN_DIFFERENCE] != 0]
if debug:
self.df.plot(kind="barh", figsize=(3, 5))
except IndexError as e:
print(e)
def importance(self) -> pd.DataFrame:
return self.df.round(2)
def format_features_for_plot(self) -> None:
"""
- map categorical variables
- replace one-hot-encoded value with True, False strings
Returns:
None.
"""
for feature_name in list(self.df.index)[: self.number_of_features]:
for col_name in [COLUMN_REFERENCE, COLUMN_COUNTERFACTUAL]:
feature_value = self.df.loc[feature_name, col_name]
self.df.loc[feature_name, col_name] = self.map_category(
feature_name, feature_value
)
# replace one-hot-encoded value with True, False strings
if " - " in feature_name:
self.logger.debug(
f"{feature_name}, {col_name},"
f" {self.df.loc[feature_name, col_name]}"
)
if self.df.loc[feature_name, col_name] == 1.0:
string = "Yes" # "True"
else:
string = "No" # "False"
self.df.loc[feature_name, col_name] = string
def plot(self, sample_index: int, kind: str = 'table', **kwargs: dict) -> None:
"""Create the plot of the counterfactual table
Args:
sample_index (int): index of the sample in scope
kind (str, optional): kind of plot. Defaults to 'table'.
Raises:
Exception: raise Exception if the "kind" of plot is not supported
"""
if kind == "table":
self.fig = self._plot_table(sample_index)
else:
raise Exception(f'Value of "kind = {kind}" is not supported!')
def _plot_table(self, sample_index=None):
"""
Plot the table comparing the refence and the counterfactual values
Returns:
None.
"""
colLabels = ["Sample", "Counterfactual Sample"]
columns = [COLUMN_REFERENCE, COLUMN_COUNTERFACTUAL]
self.format_features_for_plot()
array_subset = self.df[columns].values[: self.number_of_features]
rowLabels = list(self.df.index)[: self.number_of_features]
# if show_rating:
score_row = np.array(
[f"{self.prediction:.1f}", f"{self.y_counter_factual:.1f}"]
).reshape(1, -1)
array_subset = np.append(array_subset, score_row, axis=0)
rowLabels = rowLabels + ["Prediction"]
fig, ax = plt.subplots()
fig.patch.set_visible(False)
ax.axis("off")
ax.axis("tight")
table = ax.table(
cellText=array_subset,
colLabels=colLabels,
rowLabels=rowLabels,
loc="center",
cellLoc="center",
)
table.auto_set_font_size(False)
table.set_fontsize(12)
table.scale(1.25, 2)
# if show_rating:
# make the last row bold
for (row, col), cell in table.get_celld().items():
if row == array_subset.shape[0]:
cell.set_text_props(fontproperties=FontProperties(weight="bold"))
plt.axis("off")
plt.grid("off")
# draw canvas once
plt.gcf().canvas.draw()
# get bounding box of table
points = table.get_window_extent(plt.gcf()._cachedRenderer).get_points()
# add 10 pixel spacing
points[0, :] -= 10
points[1, :] += 10
# get new bounding box in inches
self.nbbox = matplotlib.transforms.Bbox.from_extents(points / plt.gcf().dpi)
plt.show()
return fig
def get_method_text(self) -> str:
"""
Define the method introduction text of the explanation type.
Returns:
str: method text explanation
"""
return self.method_text_empty.format(
self.num_to_str[self.number_of_features], self.y_counter_factual
)
def get_natural_language_text(self) -> str:
"""Define the natural language output using the feature names
and its values for this explanation type
Returns:
str: natural language explanation
"""
feature_values = self.df[COLUMN_COUNTERFACTUAL].tolist()[
: self.number_of_features
]
feature_names = list(self.df.index)[: self.number_of_features]
sentences = []
for feature_name, feature_value in zip(feature_names, feature_values):
feature_value = self.map_category(feature_name, feature_value)
# handle one-hot encoding case
if " - " in feature_name:
sentence_filled = create_one_hot_sentence(
feature_name, feature_value, self.sentence_text_empty
)
mode = "one-hot feature"
else:
sentence_filled = self.sentence_text_empty.format(
feature_name, f"'{feature_value}'"
)
mode = "standard feature"
self.logger.debug(f"{mode}: {sentence_filled}")
sentences.append(sentence_filled)
sentences = "if " + self.join_text_with_comma_and_and(sentences)
return self.natural_language_text_empty.format(sentences)
def _setup(self, sample_index: int, sample_name: str) -> None:
"""Helper function to setup the counterfactual explanation
Args:
sample_index (int): index of sample in scope
sample_name (str): name of the sample in scope
"""
x_ref, x_counter_factual = self._calculate_importance(sample_index)
self.get_feature_importance(x_ref, x_counter_factual)
self.get_feature_values(x_ref, x_counter_factual)
self.natural_language_text = self.get_natural_language_text()
self.method_text = self.get_method_text()
self.plot_name = self.get_plot_name(sample_name)
def explain(self, sample_index, sample_name=None, separator="\n"):
"""
main function to create the explanation of the given sample. The
method_text, natural_language_text and the plots are create per sample.
Args:
sample (int): number of the sample to create the explanation for
Returns:
None.
"""
sample_name = self.get_sample_name(sample_index, sample_name)
self.prediction = | |
and time weights are used.
work_dir: str (default: ~/work)
Root directory to save all other files (mainly ``*.nc`` files).
write_plots: bool (default: True)
If ``False``, do not write any plot.
"""
import importlib
import logging
import os
from copy import deepcopy
from inspect import getfullargspec
from pprint import pformat
import iris
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from cf_units import Unit
from joblib import Parallel, delayed
from lime.lime_tabular import LimeTabularExplainer
from matplotlib.ticker import ScalarFormatter
from scipy.stats import shapiro
from sklearn import metrics
from sklearn.compose import ColumnTransformer
from sklearn.decomposition import PCA
from sklearn.exceptions import NotFittedError
from sklearn.impute import SimpleImputer
from sklearn.inspection import plot_partial_dependence
from sklearn.model_selection import (
GridSearchCV,
LeaveOneGroupOut,
LeaveOneOut,
train_test_split,
)
from sklearn.preprocessing import StandardScaler
from esmvaltool.diag_scripts import mlr
from esmvaltool.diag_scripts.mlr.custom_sklearn import (
AdvancedPipeline,
AdvancedRFECV,
AdvancedTransformedTargetRegressor,
cross_val_score_weighted,
get_rfecv_transformer,
perform_efecv,
)
from esmvaltool.diag_scripts.shared import (
ProvenanceLogger,
group_metadata,
io,
select_metadata,
)
logger = logging.getLogger(os.path.basename(__file__))
class MLRModel():
"""Base class for MLR models."""
_CLF_TYPE = None
_MODELS = {}
_MLR_MODEL_TYPE = None
@staticmethod
def _load_mlr_models():
"""Load MLR models from :mod:`esmvaltool.diag_scripts.mlr.models`."""
current_path = os.path.dirname(os.path.realpath(__file__))
models_path = os.path.join(current_path)
for (root, _, model_files) in os.walk(models_path):
for model_file in model_files:
rel_path = ('' if root == models_path else os.path.relpath(
root, models_path))
module = os.path.join(rel_path,
os.path.splitext(model_file)[0])
try:
importlib.import_module(
'esmvaltool.diag_scripts.mlr.models.{}'.format(
module.replace(os.sep, '.')))
except ImportError:
pass
@classmethod
def register_mlr_model(cls, mlr_model_type):
"""Add MLR model (subclass of this class) (decorator)."""
logger.debug("Found available MLR model '%s'", mlr_model_type)
def decorator(subclass):
"""Decorate subclass."""
subclass._MLR_MODEL_TYPE = mlr_model_type
cls._MODELS[mlr_model_type] = subclass
return subclass
return decorator
@classmethod
def create(cls, mlr_model_type, *args, **kwargs):
"""Create desired MLR model subclass (factory method)."""
cls._load_mlr_models()
if not cls._MODELS:
raise NotImplementedError(
f"Cannot initialize new MLR model with type "
f"'{mlr_model_type}', no MLR models found. Please add "
f"subclasses of {cls} in new files under 'esmvaltool/"
f"diag_scripts/mlr/models/' decorated by 'esmvaltool."
f"diag_scripts.mlr.models.{cls.__name__}."
f"register_mlr_model()'")
if mlr_model_type not in cls._MODELS:
raise NotImplementedError(
f"MLR model type '{mlr_model_type}' not found in 'esmvaltool/"
f"diag_scripts/mlr/models/'")
subclass = cls._MODELS[mlr_model_type]
logger.info(
"Initialized MLR model with type '%s' and final regressor %s",
mlr_model_type, subclass._CLF_TYPE)
return subclass(*args, **kwargs)
def __init__(self, input_datasets, **kwargs):
"""Initialize class members.
Parameters
----------
input_datasets : list of dict
List of dataset metadata used as data for the MLR model.
**kwargs
Optional keyword arguments, see next sections.
Raises
------
NotImplementedError
Class is initialized directly without the use of its factory
function ``create()``.
ValueError
Invalid data given.
"""
self._check_clf()
# Private attributes
self._cfg = deepcopy(kwargs)
self._clf = None
self._lime_explainer = None
self._data = {}
self._data['pred'] = {}
self._datasets = {}
self._classes = {}
self._parameters = {}
# Set default settings
self._set_default_settings()
# Seaborn
sns.set(**self._cfg.get('seaborn_settings', {}))
# Adapt output directories
self._cfg['mlr_work_dir'] = os.path.join(self._cfg['work_dir'],
self._cfg['sub_dir'])
self._cfg['mlr_plot_dir'] = os.path.join(self._cfg['plot_dir'],
self._cfg['sub_dir'])
if not os.path.exists(self._cfg['mlr_work_dir']):
os.makedirs(self._cfg['mlr_work_dir'])
logger.info("Created %s", self._cfg['mlr_work_dir'])
if not os.path.exists(self._cfg['mlr_plot_dir']):
os.makedirs(self._cfg['mlr_plot_dir'])
logger.info("Created %s", self._cfg['mlr_plot_dir'])
# Load datasets, classes and training data
self._load_input_datasets(input_datasets)
self._load_classes()
self._load_data()
# Create pipeline (with all preprocessor steps and final regressor)
self.reset_pipeline()
if self._cfg['parameters']:
logger.debug("Using parameter(s): %s", self._cfg['parameters'])
self.update_parameters(**self._cfg['parameters'])
# Log successful initialization
logger.info("Initialized MLR model (using at most %i processes)",
self._cfg['n_jobs'])
logger.debug("With parameters")
logger.debug(pformat(self.parameters))
@property
def categorical_features(self):
"""numpy.ndarray: Categorical features."""
return self.features[self._classes['features'].categorical]
@property
def data(self):
"""dict: Input data of the MLR model."""
return self._data
@property
def features(self):
"""numpy.ndarray: Features of the input data."""
return self._classes['features'].index.values
@property
def features_after_preprocessing(self):
"""numpy.ndarray: Features of the input data after preprocessing."""
x_train = self.get_x_array('train')
y_train = self.get_y_array('train')
try:
self._check_fit_status('Calculating features after preprocessing')
except NotFittedError:
self._clf.fit_transformers_only(x_train, y_train,
**self.fit_kwargs)
x_trans = self._clf.transform_only(x_train)
features = self.features
n_features_may_drop = False
if 'feature_selection' in self._clf.named_steps:
support = self._clf.named_steps['feature_selection'].support
features = features[support]
n_features_may_drop = True
if 'pca' in self._clf.named_steps:
categorical_features = np.array([
f for f in features if f in self.categorical_features])
n_numerical_features = x_trans.shape[1] - categorical_features.size
features = [
f'Principal component {idx}'
for idx in range(n_numerical_features)
]
features.extend(categorical_features)
n_features_may_drop = True
if not n_features_may_drop and x_trans.shape[1] != self.features.size:
logger.warning(
"Number of features decreased from %i to %i during "
"preprocessing for unknown reasons (neither feature selection "
"using recursive feature elimination nor PCA is performed)",
self.features.size, x_trans.shape[1])
features = [
f'Unknown feature {idx}' for idx in range(x_trans.shape[1])
]
return np.array(features, dtype='str')
@property
def features_types(self):
"""pandas.Series: Types of the features."""
return self._classes['features'].types
@property
def features_units(self):
"""pandas.Series: Units of the features."""
return self._classes['features'].units
@property
def fit_kwargs(self):
"""dict: Keyword arguments for :meth:`fit`."""
fit_kwargs = self._cfg['fit_kwargs']
fit_kwargs = self._update_fit_kwargs(fit_kwargs)
verbosity_kwargs = self._get_verbosity_parameters(self._clf.fit)
for (key, val) in verbosity_kwargs.items():
fit_kwargs.setdefault(key, val)
return fit_kwargs
@property
def group_attributes(self):
"""numpy.ndarray: Group attributes of the input data."""
return self._classes['group_attributes']
@property
def label(self):
"""str: Label of the input data."""
return self._classes['label'].index.values[0]
@property
def label_units(self):
"""str: Units of the label."""
return self._classes['label'].units.values[0]
@property
def mlr_model_type(self):
"""str: MLR model type."""
return self._MLR_MODEL_TYPE
@property
def numerical_features(self):
"""numpy.ndarray: Numerical features."""
return self.features[~self._classes['features'].categorical]
@property
def parameters(self):
"""dict: Parameters of the complete MLR model pipeline."""
return self._parameters
def efecv(self, **kwargs):
"""Perform exhaustive feature elimination using cross-validation.
Parameters
----------
**kwargs : keyword arguments, optional
Additional options for :func:`esmvaltool.diag_scripts.mlr.
custom_sklearn.cross_val_score_weighted`.
"""
logger.info(
"Performing exhaustive feature elimination using cross-validation "
"with final regressor %s on %i training points (thiy may take a "
"while...)", self._CLF_TYPE,
len(self.data['train'].index))
# Get fit parameters
fit_kwargs = deepcopy(self.fit_kwargs)
keys_to_remove = []
for key in fit_kwargs:
if key.endswith('eval_set'):
keys_to_remove.append(key)
for key in keys_to_remove:
logger.warning(
"Fit parameter '%s' is not supported for efecv()", key)
fit_kwargs.pop(key)
# Get other keyword arguments
kwargs = deepcopy(kwargs)
verbosity_kwargs = self._get_verbosity_parameters(
cross_val_score_weighted)
for (key, val) in verbosity_kwargs.items():
kwargs.setdefault(key, val)
kwargs.setdefault('n_jobs', self._cfg['n_jobs'])
kwargs['fit_params'] = fit_kwargs
kwargs['sample_weights'] = self._get_sample_weights('train')
if kwargs.get('cv') == 'logo':
kwargs.update(self._get_logo_cv_kwargs())
# Exhaustive feature selection
(self._clf, transformer) = perform_efecv(
self._clf, self.data['train'].x, self.get_y_array('train'),
**kwargs)
self._clf.steps.insert(0, ('feature_selection', transformer))
# Log results
new_features = self.features[transformer.support]
logger.info(
"Exhaustive feature elimination was successful, %i of the %i "
"features remain", new_features.size, self.features.size)
logger.info("Old features: %s", self.features)
logger.info("New features: %s", new_features)
logger.info("Successfully fitted MLR model on %i training point(s)",
len(self.data['train'].index))
logger.debug("Pipeline steps:")
logger.debug(pformat(list(self._clf.named_steps.keys())))
logger.debug("Parameters:")
logger.debug(pformat(self.parameters))
# LIME
self._load_lime_explainer()
def export_prediction_data(self, filename=None):
"""Export all prediction data contained in `self._data`.
Parameters
----------
filename : str, optional (default: '{data_type}_{pred_name}.csv')
Name of the exported files.
"""
for pred_name in self.data['pred']:
self._save_csv_file('pred', filename, pred_name=pred_name)
def export_training_data(self, filename=None):
"""Export all training data contained in `self._data`.
Parameters
----------
filename : str, optional (default: '{data_type}.csv')
Name of the exported files.
"""
for data_type in ('all', 'train', 'test'):
self._save_csv_file(data_type, filename)
def fit(self):
"""Fit MLR model.
Note
----
Specifying keyword arguments for this function is not allowed here
since :attr:`features_after_preprocessing` might be altered by
that. Use the keyword argument ``fit_kwargs`` during class
initialization instead.
"""
logger.info(
"Fitting MLR model with final regressor %s on %i training "
"point(s)", self._CLF_TYPE, len(self.data['train'].index))
# Create MLR model with desired parameters and fit it
self._clf.fit(self.data['train'].x, self.data['train'].y,
**self.fit_kwargs)
self._parameters = self._get_clf_parameters()
logger.info("Successfully fitted MLR model on %i training point(s)",
len(self.data['train'].index))
logger.debug("Pipeline steps:")
logger.debug(pformat(list(self._clf.named_steps.keys())))
logger.debug("Parameters:")
logger.debug(pformat(self.parameters))
# LIME
self._load_lime_explainer()
def get_ancestors(self, label=True, features=None, prediction_names=None,
prediction_reference=False):
"""Return ancestor files.
Parameters
----------
label : bool, optional (default: True)
Return ``label`` files.
features : list of str, optional (default: None)
Features for which files should be returned. If ``None``, return
files for all features.
prediction_names : list of str, optional (default: None)
Prediction names for which files should be returned. If ``None``,
return files for all prediction names.
prediction_reference : bool, optional (default: False)
Return ``prediction_reference`` files if available for given
``prediction_names``.
Returns
-------
list of str
Ancestor files.
Raises
------
ValueError
Invalid ``feature`` or ``prediction_name`` given.
"""
ancestors = []
# Label files
if label:
ancestors.extend([d['filename'] for d in self._datasets['label']])
# Feature files
if features is None:
features = self.features
for feature in features:
if feature not in self.features:
raise ValueError(
f"Got invalid feature '{feature}', expected one of "
f"{self.features}")
ancestors.extend(
[d['filename'] for d in self._datasets['feature']
if d['tag'] == feature]
)
# Prediction files
available_pred_names = list(self._datasets['prediction_input'].keys())
if prediction_names is None:
prediction_names = available_pred_names
for pred_name in prediction_names:
if pred_name not in available_pred_names:
raise ValueError(
f"Got invalid prediction name '{pred_name}', expected one "
f"of {available_pred_names}")
ancestors.extend(
[d['filename'] for d in
self._datasets['prediction_input'][pred_name]]
)
ancestors.extend(
[d['filename'] for d in
self._datasets['prediction_input_error'].get(pred_name, [])]
)
if prediction_reference:
ancestors.extend(
[d['filename'] for d in
self._datasets['prediction_reference'].get(pred_name, [])]
)
return ancestors
def get_data_frame(self, data_type, impute_nans=False):
"""Return data frame of specified type.
Parameters
----------
data_type : str
Data type to be returned. Must be one | |
4527",
52665: "4366 4463 4528",
52666: "4366 4463 4529",
52667: "4366 4463 4530",
52668: "4366 4463 4531",
52669: "4366 4463 4532",
52670: "4366 4463 4533",
52671: "4366 4463 4534",
52672: "4366 4463 4535",
52673: "4366 4463 4536",
52674: "4366 4463 4537",
52675: "4366 4463 4538",
52676: "4366 4463 4539",
52677: "4366 4463 4540",
52678: "4366 4463 4541",
52679: "4366 4463 4542",
52680: "4366 4463 4543",
52681: "4366 4463 4544",
52682: "4366 4463 4545",
52683: "4366 4463 4546",
52684: "4366 4464",
52685: "4366 4464 4520",
52686: "4366 4464 4521",
52687: "4366 4464 4522",
52688: "4366 4464 4523",
52689: "4366 4464 4524",
52690: "4366 4464 4525",
52691: "4366 4464 4526",
52692: "4366 4464 4527",
52693: "4366 4464 4528",
52694: "4366 4464 4529",
52695: "4366 4464 4530",
52696: "4366 4464 4531",
52697: "4366 4464 4532",
52698: "4366 4464 4533",
52699: "4366 4464 4534",
52700: "4366 4464 4535",
52701: "4366 4464 4536",
52702: "4366 4464 4537",
52703: "4366 4464 4538",
52704: "4366 4464 4539",
52705: "4366 4464 4540",
52706: "4366 4464 4541",
52707: "4366 4464 4542",
52708: "4366 4464 4543",
52709: "4366 4464 4544",
52710: "4366 4464 4545",
52711: "4366 4464 4546",
52712: "4366 4465",
52713: "4366 4465 4520",
52714: "4366 4465 4521",
52715: "4366 4465 4522",
52716: "4366 4465 4523",
52717: "4366 4465 4524",
52718: "4366 4465 4525",
52719: "4366 4465 4526",
52720: "4366 4465 4527",
52721: "4366 4465 4528",
52722: "4366 4465 4529",
52723: "4366 4465 4530",
52724: "4366 4465 4531",
52725: "4366 4465 4532",
52726: "4366 4465 4533",
52727: "4366 4465 4534",
52728: "4366 4465 4535",
52729: "4366 4465 4536",
52730: "4366 4465 4537",
52731: "4366 4465 4538",
52732: "4366 4465 4539",
52733: "4366 4465 4540",
52734: "4366 4465 4541",
52735: "4366 4465 4542",
52736: "4366 4465 4543",
52737: "4366 4465 4544",
52738: "4366 4465 4545",
52739: "4366 4465 4546",
52740: "4366 4466",
52741: "4366 4466 4520",
52742: "4366 4466 4521",
52743: "4366 4466 4522",
52744: "4366 4466 4523",
52745: "4366 4466 4524",
52746: "4366 4466 4525",
52747: "4366 4466 4526",
52748: "4366 4466 4527",
52749: "4366 4466 4528",
52750: "4366 4466 4529",
52751: "4366 4466 4530",
52752: "4366 4466 4531",
52753: "4366 4466 4532",
52754: "4366 4466 4533",
52755: "4366 4466 4534",
52756: "4366 4466 4535",
52757: "4366 4466 4536",
52758: "4366 4466 4537",
52759: "4366 4466 4538",
52760: "4366 4466 4539",
52761: "4366 4466 4540",
52762: "4366 4466 4541",
52763: "4366 4466 4542",
52764: "4366 4466 4543",
52765: "4366 4466 4544",
52766: "4366 4466 4545",
52767: "4366 4466 4546",
52768: "4366 4467",
52769: "4366 4467 4520",
52770: "4366 4467 4521",
52771: "4366 4467 4522",
52772: "4366 4467 4523",
52773: "4366 4467 4524",
52774: "4366 4467 4525",
52775: "4366 4467 4526",
52776: "4366 4467 4527",
52777: "4366 4467 4528",
52778: "4366 4467 4529",
52779: "4366 4467 4530",
52780: "4366 4467 4531",
52781: "4366 4467 4532",
52782: "4366 4467 4533",
52783: "4366 4467 4534",
52784: "4366 4467 4535",
52785: "4366 4467 4536",
52786: "4366 4467 4537",
52787: "4366 4467 4538",
52788: "4366 4467 4539",
52789: "4366 4467 4540",
52790: "4366 4467 4541",
52791: "4366 4467 4542",
52792: "4366 4467 4543",
52793: "4366 4467 4544",
52794: "4366 4467 4545",
52795: "4366 4467 4546",
52796: "4366 4468",
52797: "4366 4468 4520",
52798: "4366 4468 4521",
52799: "4366 4468 4522",
52800: "4366 4468 4523",
52801: "4366 4468 4524",
52802: "4366 4468 4525",
52803: "4366 4468 4526",
52804: "4366 4468 4527",
52805: "4366 4468 4528",
52806: "4366 4468 4529",
52807: "4366 4468 4530",
52808: "4366 4468 4531",
52809: "4366 4468 4532",
52810: "4366 4468 4533",
52811: "4366 4468 4534",
52812: "4366 4468 4535",
52813: "4366 4468 4536",
52814: "4366 4468 4537",
52815: "4366 4468 4538",
52816: "4366 4468 4539",
52817: "4366 4468 4540",
52818: "4366 4468 4541",
52819: "4366 4468 4542",
52820: "4366 4468 4543",
52821: "4366 4468 4544",
52822: "4366 4468 4545",
52823: "4366 4468 4546",
52824: "4366 4469",
52825: "4366 4469 4520",
52826: "4366 4469 4521",
52827: "4366 4469 4522",
52828: "4366 4469 4523",
52829: "4366 4469 4524",
52830: "4366 4469 4525",
52831: "4366 4469 4526",
52832: "4366 4469 4527",
52833: "4366 4469 4528",
52834: "4366 4469 4529",
52835: "4366 4469 4530",
52836: "4366 4469 4531",
52837: "4366 4469 4532",
52838: "4366 4469 4533",
52839: "4366 4469 4534",
52840: "4366 4469 4535",
52841: "4366 4469 4536",
52842: "4366 4469 4537",
52843: "4366 4469 4538",
52844: "4366 4469 4539",
52845: "4366 4469 4540",
52846: "4366 4469 4541",
52847: "4366 4469 4542",
52848: "4366 4469 4543",
52849: "4366 4469 4544",
52850: "4366 4469 4545",
52851: "4366 4469 4546",
52852: "4367 4449",
52853: "4367 4449 4520",
52854: "4367 4449 4521",
52855: "4367 4449 4522",
52856: "4367 4449 4523",
52857: "4367 4449 4524",
52858: "4367 4449 4525",
52859: "4367 4449 4526",
52860: "4367 4449 4527",
52861: "4367 4449 4528",
52862: "4367 4449 4529",
52863: "4367 4449 4530",
52864: "4367 4449 4531",
52865: "4367 4449 4532",
52866: "4367 4449 4533",
52867: "4367 4449 4534",
52868: "4367 4449 4535",
52869: "4367 4449 4536",
52870: "4367 4449 4537",
52871: "4367 4449 4538",
52872: "4367 4449 4539",
52873: "4367 4449 4540",
52874: "4367 4449 4541",
52875: "4367 4449 4542",
52876: "4367 4449 4543",
52877: "4367 4449 4544",
52878: "4367 4449 4545",
52879: "4367 4449 4546",
52880: "4367 4450",
52881: "4367 4450 4520",
52882: "4367 4450 4521",
52883: "4367 4450 4522",
52884: "4367 4450 4523",
52885: "4367 4450 4524",
52886: "4367 4450 4525",
52887: "4367 4450 4526",
52888: "4367 4450 4527",
52889: "4367 4450 4528",
52890: "4367 4450 4529",
52891: "4367 4450 4530",
52892: "4367 4450 4531",
52893: "4367 4450 4532",
52894: "4367 4450 4533",
52895: "4367 4450 4534",
52896: "4367 4450 4535",
52897: "4367 4450 4536",
52898: "4367 4450 4537",
52899: "4367 4450 4538",
52900: "4367 4450 4539",
52901: "4367 4450 4540",
52902: "4367 4450 4541",
52903: "4367 4450 4542",
52904: "4367 4450 4543",
52905: "4367 4450 4544",
52906: "4367 4450 4545",
52907: "4367 4450 4546",
52908: "4367 4451",
52909: "4367 4451 4520",
52910: "4367 4451 4521",
52911: "4367 4451 4522",
52912: "4367 4451 4523",
52913: "4367 4451 4524",
52914: "4367 4451 4525",
52915: "4367 4451 4526",
52916: "4367 4451 4527",
52917: "4367 4451 4528",
52918: "4367 4451 4529",
52919: "4367 4451 4530",
52920: "4367 4451 4531",
52921: "4367 4451 4532",
52922: "4367 4451 4533",
52923: "4367 4451 4534",
52924: "4367 4451 4535",
52925: "4367 4451 4536",
52926: "4367 4451 4537",
52927: "4367 4451 4538",
52928: "4367 4451 4539",
52929: "4367 4451 4540",
52930: "4367 4451 4541",
52931: "4367 4451 4542",
52932: "4367 4451 4543",
52933: "4367 4451 4544",
52934: "4367 4451 4545",
52935: "4367 4451 4546",
52936: "4367 4452",
52937: "4367 4452 4520",
52938: "4367 4452 4521",
52939: "4367 4452 4522",
52940: "4367 4452 4523",
52941: "4367 4452 4524",
52942: "4367 4452 4525",
52943: "4367 4452 4526",
52944: "4367 4452 4527",
52945: "4367 4452 4528",
52946: "4367 4452 4529",
52947: "4367 4452 4530",
52948: "4367 4452 4531",
52949: "4367 4452 4532",
52950: "4367 4452 4533",
52951: "4367 4452 4534",
52952: "4367 4452 4535",
52953: "4367 4452 4536",
52954: "4367 4452 4537",
52955: "4367 4452 4538",
52956: "4367 4452 4539",
52957: "4367 4452 4540",
52958: "4367 4452 4541",
52959: "4367 4452 4542",
52960: "4367 4452 4543",
52961: "4367 4452 4544",
52962: "4367 4452 4545",
52963: "4367 4452 4546",
52964: "4367 4453",
52965: "4367 4453 4520",
52966: "4367 4453 4521",
52967: "4367 4453 4522",
52968: "4367 4453 4523",
52969: "4367 4453 4524",
52970: "4367 4453 4525",
52971: "4367 4453 4526",
52972: "4367 4453 4527",
52973: "4367 4453 4528",
52974: "4367 4453 4529",
52975: "4367 4453 4530",
52976: "4367 4453 4531",
52977: "4367 4453 4532",
52978: "4367 4453 4533",
52979: "4367 4453 4534",
52980: "4367 4453 4535",
52981: "4367 4453 4536",
52982: "4367 4453 4537",
52983: "4367 4453 4538",
52984: "4367 4453 4539",
52985: "4367 4453 4540",
52986: "4367 4453 4541",
52987: "4367 | |
target we're heading north
# tan(bet) = tan(sig) * cos(alp)
ssig1 = sbet1; csig1 = calp1 * cbet1
ssig2 = sbet2; csig2 = calp2 * cbet2
# sig12 = sig2 - sig1
sig12 = math.atan2(max(csig1 * ssig2 - ssig1 * csig2, 0.0),
csig1 * csig2 + ssig1 * ssig2)
s12x, m12x, dummy, M12, M21 = self.Lengths(
self._n, sig12, ssig1, csig1, ssig2, csig2, cbet1, cbet2,
(outmask & Geodesic.GEODESICSCALE) != 0, C1a, C2a)
# Add the check for sig12 since zero length geodesics might yield m12 <
# 0. Test case was
#
# echo 20.001 0 20.001 0 | Geod -i
#
# In fact, we will have sig12 > pi/2 for meridional geodesic which is
# not a shortest path.
if sig12 < 1 or m12x >= 0:
m12x *= self._a
s12x *= self._b
a12 = sig12 / Math.degree
else:
# m12 < 0, i.e., prolate and too close to anti-podal
meridian = False
# end if meridian:
#real omg12
if (not meridian and
sbet1 == 0 and # and sbet2 == 0
# Mimic the way Lambda12 works with calp1 = 0
(self._f <= 0 or lam12 <= math.pi - self._f * math.pi)):
# Geodesic runs along equator
calp1 = calp2 = 0; salp1 = salp2 = 1
s12x = self._a * lam12
m12x = self._b * math.sin(lam12 / self._f1)
if outmask & Geodesic.GEODESICSCALE:
M12 = M21 = math.cos(lam12 / self._f1)
a12 = lon12 / self._f1
sig12 = omg12 = lam12 / self._f1
elif not meridian:
# Now point1 and point2 belong within a hemisphere bounded by a
# meridian and geodesic is neither meridional or equatorial.
# Figure a starting point for Newton's method
sig12, salp1, calp1, salp2, calp2 = self.InverseStart(
sbet1, cbet1, sbet2, cbet2, lam12, C1a, C2a)
if sig12 >= 0:
# Short lines (InverseStart sets salp2, calp2)
w1 = math.sqrt(1 - self._e2 * Math.sq(cbet1))
s12x = sig12 * self._a * w1
m12x = (Math.sq(w1) * self._a / self._f1 *
math.sin(sig12 * self._f1 / w1))
if outmask & Geodesic.GEODESICSCALE:
M12 = M21 = math.cos(sig12 * self._f1 / w1)
a12 = sig12 / Math.degree
omg12 = lam12 / w1
else:
# Newton's method
# real ssig1, csig1, ssig2, csig2, eps
ov = numit = trip = 0
while numit < Geodesic.maxit_:
(nlam12, salp2, calp2, sig12, ssig1, csig1, ssig2, csig2,
eps, omg12, dv) = self.Lambda12(
sbet1, cbet1, sbet2, cbet2, salp1, calp1, trip < 1, C1a, C2a, C3a)
v = nlam12 - lam12
if not(abs(v) > Geodesic.tiny_) or not(trip < 1):
if not(abs(v) <= max(Geodesic.tol1_, ov)):
numit = Geodesic.maxit_
break
dalp1 = -v/dv
sdalp1 = math.sin(dalp1); cdalp1 = math.cos(dalp1)
nsalp1 = salp1 * cdalp1 + calp1 * sdalp1
calp1 = calp1 * cdalp1 - salp1 * sdalp1
salp1 = max(0.0, nsalp1)
salp1, calp1 = Geodesic.SinCosNorm(salp1, calp1)
# In some regimes we don't get quadratic convergence because slope
# -> 0. So use convergence conditions based on epsilon instead of
# sqrt(epsilon). The first criterion is a test on abs(v) against
# 100 * epsilon. The second takes credit for an anticipated
# reduction in abs(v) by v/ov (due to the latest update in alp1) and
# checks this against epsilon.
if not(abs(v) >= Geodesic.tol1_ and
Math.sq(v) >= ov * Geodesic.tol0_):
trip += 1
ov = abs(v)
numit += 1
if numit >= Geodesic.maxit_:
# Signal failure.
return a12, s12, azi1, azi2, m12, M12, M21, S12
s12x, m12x, dummy, M12, M21 = self.Lengths(
eps, sig12, ssig1, csig1, ssig2, csig2, cbet1, cbet2,
(outmask & Geodesic.GEODESICSCALE) != 0, C1a, C2a)
m12x *= self._a
s12x *= self._b
a12 = sig12 / Math.degree
omg12 = lam12 - omg12
# end elif not meridian
if outmask & Geodesic.DISTANCE:
s12 = 0 + s12x # Convert -0 to 0
if outmask & Geodesic.REDUCEDLENGTH:
m12 = 0 + m12x # Convert -0 to 0
if outmask & Geodesic.AREA:
# From Lambda12: sin(alp1) * cos(bet1) = sin(alp0)
salp0 = salp1 * cbet1
calp0 = math.hypot(calp1, salp1 * sbet1) # calp0 > 0
# real alp12
if calp0 != 0 and salp0 != 0:
# From Lambda12: tan(bet) = tan(sig) * cos(alp)
ssig1 = sbet1; csig1 = calp1 * cbet1
ssig2 = sbet2; csig2 = calp2 * cbet2
k2 = Math.sq(calp0) * self._ep2
# Multiplier = a^2 * e^2 * cos(alpha0) * sin(alpha0).
A4 = Math.sq(self._a) * calp0 * salp0 * self._e2
ssig1, csig1 = Geodesic.SinCosNorm(ssig1, csig1)
ssig2, csig2 = Geodesic.SinCosNorm(ssig2, csig2)
C4a = list(range(Geodesic.nC4_))
self.C4f(k2, C4a)
B41 = Geodesic.SinCosSeries(False, ssig1, csig1, C4a, Geodesic.nC4_)
B42 = Geodesic.SinCosSeries(False, ssig2, csig2, C4a, Geodesic.nC4_)
S12 = A4 * (B42 - B41)
else:
# Avoid problems with indeterminate sig1, sig2 on equator
S12 = 0
if (not meridian and
omg12 < 0.75 * math.pi and # Long difference too big
sbet2 - sbet1 < 1.75): # Lat difference too big
# Use tan(Gamma/2) = tan(omg12/2)
# * (tan(bet1/2)+tan(bet2/2))/(1+tan(bet1/2)*tan(bet2/2))
# with tan(x/2) = sin(x)/(1+cos(x))
somg12 = math.sin(omg12); domg12 = 1 + math.cos(omg12)
dbet1 = 1 + cbet1; dbet2 = 1 + cbet2
alp12 = 2 * math.atan2( somg12 * ( sbet1 * dbet2 + sbet2 * dbet1 ),
domg12 * ( sbet1 * sbet2 + dbet1 * dbet2 ) )
else:
# alp12 = alp2 - alp1, used in atan2 so no need to normalize
salp12 = salp2 * calp1 - calp2 * salp1
calp12 = calp2 * calp1 + salp2 * salp1
# The right thing appears to happen if alp1 = +/-180 and alp2 = 0, viz
# salp12 = -0 and alp12 = -180. However this depends on the sign
# being attached to 0 correctly. The following ensures the correct
# behavior.
if salp12 == 0 and calp12 < 0:
salp12 = Geodesic.tiny_ * calp1
calp12 = -1
alp12 = math.atan2(salp12, calp12)
S12 += self._c2 * alp12
S12 *= swapp * lonsign * latsign
# Convert -0 to 0
S12 += 0
# Convert calp, salp to azimuth accounting for lonsign, swapp, latsign.
if swapp < 0:
salp2, salp1 = salp1, salp2
calp2, calp1 = calp1, calp2
if outmask & Geodesic.GEODESICSCALE:
M21, M12 = M12, M21
salp1 *= swapp * lonsign; calp1 *= swapp * latsign
salp2 *= swapp * lonsign; calp2 *= swapp * latsign
if outmask & Geodesic.AZIMUTH:
# minus signs give range [-180, 180). 0- converts -0 to +0.
azi1 = 0 - math.atan2(-salp1, calp1) / Math.degree
azi2 = 0 - math.atan2(-salp2, calp2) / Math.degree
# Returned value in [0, 180]
return a12, s12, azi1, azi2, m12, M12, M21, S12
def CheckPosition(lat, lon):
if not (abs(lat) <= 90):
raise ValueError("latitude " + str(lat) + " not in [-90, 90]")
if lon > 360.: lon = lon - 360.
if lon < -180: lon = lon + 360.
if not (lon >= -180 and lon <= 360):
raise ValueError("longitude " + str(lon) + " not in [-180, 360]")
return Geodesic.AngNormalize(lon)
CheckPosition = staticmethod(CheckPosition)
def CheckAzimuth(azi):
if azi > 360.: azi = azi - 360.
if azi < -180: azi = azi + 360.
if not (azi >= -180 and azi <= 360):
raise ValueError("azimuth " + str(azi) + " not in [-180, 360]")
return Geodesic.AngNormalize(azi)
CheckAzimuth = staticmethod(CheckAzimuth)
def CheckDistance(s):
if not (Math.isfinite(s)):
raise ValueError("distance " + str(s) + " not a finite number")
CheckDistance = staticmethod(CheckDistance)
def Inverse(self, lat1, lon1, lat2, lon2, outmask = DISTANCE | AZIMUTH):
"""
Solve the inverse geodesic problem. Compute geodesic between
(lat1, lon1) and (lat2, lon2). Return a dictionary with (some) of
the following entries:
lat1 latitude of point 1
lon1 longitude of point 1
azi1 azimuth of line at point 1
lat2 latitude of point 2
lon2 longitude of point 2
azi2 azimuth of line at point 2
s12 distance from 1 to 2
a12 arc length on auxiliary sphere from 1 to 2
m12 reduced length of geodesic
M12 geodesic scale | |
# -*- coding: utf-8 -*-
r"""
Hyperbolic Points
This module implements points in hyperbolic space of arbitrary dimension.
It also contains the implementations for specific models of
hyperbolic geometry.
This module also implements ideal points in hyperbolic space of arbitrary
dimension. It also contains the implementations for specific models
of hyperbolic geometry.
Note that not all models of hyperbolic space are bounded, meaning that
the ideal boundary is not the topological boundary of the set underlying
tho model. For example, the unit disk model is bounded with boundary
given by the unit sphere. The hyperboloid model is not bounded.
AUTHORS:
- <NAME> (2013): initial version
EXAMPLES:
We can construct points in the upper half plane model, abbreviated
UHP for convenience::
sage: UHP = HyperbolicPlane().UHP()
sage: UHP.get_point(2 + I)
Point in UHP I + 2
sage: g = UHP.get_point(3 + I)
sage: g.dist(UHP.get_point(I))
arccosh(11/2)
We can also construct boundary points in the upper half plane model::
sage: UHP.get_point(3)
Boundary point in UHP 3
Some more examples::
sage: HyperbolicPlane().UHP().get_point(0)
Boundary point in UHP 0
sage: HyperbolicPlane().PD().get_point(I/2)
Point in PD 1/2*I
sage: HyperbolicPlane().KM().get_point((0,1))
Boundary point in KM (0, 1)
sage: HyperbolicPlane().HM().get_point((0,0,1))
Point in HM (0, 0, 1)
"""
#***********************************************************************
# Copyright (C) 2013 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#***********************************************************************
from sage.structure.element import Element
from sage.structure.richcmp import richcmp, op_NE
from sage.symbolic.all import I
from sage.misc.latex import latex
from sage.structure.element import is_Matrix
from sage.matrix.constructor import matrix
from sage.modules.free_module_element import vector
from sage.rings.infinity import infinity
from sage.rings.all import RR, CC
from sage.functions.other import real, imag
from sage.geometry.hyperbolic_space.hyperbolic_isometry import HyperbolicIsometry
class HyperbolicPoint(Element):
r"""
Abstract base class for hyperbolic points. This class should never
be instantiated.
INPUT:
- ``model`` -- the model of the hyperbolic space
- ``coordinates`` -- the coordinates of a hyperbolic point in the
appropriate model
- ``is_boundary`` -- whether the point is a boundary point
- ``check`` -- (default: ``True``) if ``True``, then check to make sure
the coordinates give a valid point in the model
EXAMPLES:
Comparison between different models is performed via coercion::
sage: UHP = HyperbolicPlane().UHP()
sage: p = UHP.get_point(.2 + .3*I); p
Point in UHP 0.200000000000000 + 0.300000000000000*I
sage: PD = HyperbolicPlane().PD()
sage: q = PD.get_point(0.2 + 0.3*I); q
Point in PD 0.200000000000000 + 0.300000000000000*I
sage: p == q
False
sage: PD(p)
Point in PD 0.231213872832370 - 0.502890173410405*I
sage: bool(p.coordinates() == q.coordinates())
True
Similarly for boundary points::
sage: p = UHP.get_point(-1); p
Boundary point in UHP -1
sage: q = PD.get_point(-1); q
Boundary point in PD -1
sage: p == q
True
sage: PD(p)
Boundary point in PD -1
It is an error to specify a point that does not lie in the
appropriate model::
sage: HyperbolicPlane().UHP().get_point(0.2 - 0.3*I)
Traceback (most recent call last):
...
ValueError: 0.200000000000000 - 0.300000000000000*I is not a valid point in the UHP model
sage: HyperbolicPlane().PD().get_point(1.2)
Traceback (most recent call last):
...
ValueError: 1.20000000000000 is not a valid point in the PD model
sage: HyperbolicPlane().KM().get_point((1,1))
Traceback (most recent call last):
...
ValueError: (1, 1) is not a valid point in the KM model
sage: HyperbolicPlane().HM().get_point((1, 1, 1))
Traceback (most recent call last):
...
ValueError: (1, 1, 1) is not a valid point in the HM model
It is an error to specify an interior point of hyperbolic space as a
boundary point::
sage: HyperbolicPlane().UHP().get_point(0.2 + 0.3*I, is_boundary=True)
Traceback (most recent call last):
...
ValueError: 0.200000000000000 + 0.300000000000000*I is not a valid boundary point in the UHP model
TESTS:
In the PD model, the coordinates of a point are in the unit disk
in the complex plane `\CC`::
sage: HyperbolicPlane().PD().get_point(0)
Point in PD 0
sage: HyperbolicPlane().PD().get_point(1)
Boundary point in PD 1
In the KM model, the coordinates of a point are in the unit disk
in the real plane `\RR^2`::
sage: HyperbolicPlane().KM().get_point((0,0))
Point in KM (0, 0)
sage: HyperbolicPlane().KM().get_point((1,0))
Boundary point in KM (1, 0)
In the HM model, the coordinates of a point are on the
hyperboloid given by `x^2 + y^2 - z^2 = -1`::
sage: HyperbolicPlane().HM().get_point((0,0,1))
Point in HM (0, 0, 1)
sage: HyperbolicPlane().HM().get_point((0,0,2))
Traceback (most recent call last):
...
ValueError: (0, 0, 2) is not a valid point in the HM model
sage: HyperbolicPlane().HM().get_point((1,0,0), is_boundary=True)
Traceback (most recent call last):
...
NotImplementedError: boundary points are not implemented in the HM model
"""
def __init__(self, model, coordinates, is_boundary, check=True, **graphics_options):
r"""
See ``HyperbolicPoint`` for full documentation.
EXAMPLES::
sage: p = HyperbolicPlane().UHP().get_point(I)
sage: TestSuite(p).run()
"""
if is_boundary:
if not model.is_bounded():
raise NotImplementedError("boundary points are not implemented in the {0} model".format(model.short_name()))
if check and not model.boundary_point_in_model(coordinates):
raise ValueError(
"{0} is not a valid".format(coordinates) +
" boundary point in the {0} model".format(model.short_name()))
elif check and not model.point_in_model(coordinates):
raise ValueError(
"{0} is not a valid".format(coordinates) +
" point in the {0} model".format(model.short_name()))
if isinstance(coordinates, tuple):
coordinates = vector(coordinates)
self._coordinates = coordinates
self._bdry = is_boundary
self._graphics_options = graphics_options
Element.__init__(self, model)
#####################
# "Private" Methods #
#####################
def _repr_(self):
r"""
Return a string representation of ``self``.
EXAMPLES::
sage: HyperbolicPlane().UHP().get_point(3 + 4*I)
Point in UHP 4*I + 3
sage: HyperbolicPlane().PD().get_point(1/2 + I/2)
Point in PD 1/2*I + 1/2
sage: HyperbolicPlane().KM().get_point((1/2, 1/2))
Point in KM (1/2, 1/2)
sage: HyperbolicPlane().HM().get_point((0,0,1))
Point in HM (0, 0, 1)
sage: HyperbolicPlane().UHP().get_point(infinity)
Boundary point in UHP +Infinity
sage: HyperbolicPlane().PD().get_point(-1)
Boundary point in PD -1
sage: HyperbolicPlane().KM().get_point((0, -1))
Boundary point in KM (0, -1)
"""
if self._bdry:
base = "Boundary point"
else:
base = "Point"
return base + " in {0} {1}".format(self.parent().short_name(), self._coordinates)
def _latex_(self):
r"""
EXAMPLES::
sage: from sage.geometry.hyperbolic_space.hyperbolic_point import *
sage: p = HyperbolicPlane().UHP().get_point(0)
sage: latex(p)
0
sage: q = HyperbolicPlane().HM().get_point((0,0,1))
sage: latex(q)
\left(0,\,0,\,1\right)
"""
return latex(self._coordinates)
def _richcmp_(self, other, op):
r"""
Comparison of self and other.
EXAMPLES::
sage: p1 = HyperbolicPlane().UHP().get_point(1 + I)
sage: p2 = HyperbolicPlane().UHP().get_point(2 + I)
sage: p1 == p2
False
sage: p1 == p1
True
sage: p1 = HyperbolicPlane().PD().get_point(0)
sage: p2 = HyperbolicPlane().PD().get_point(1/2 + 2*I/3)
sage: p1 == p2
False
sage: p1 == p1
True
sage: p1 = HyperbolicPlane().KM().get_point((0,0))
sage: p2 = HyperbolicPlane().KM().get_point((0, 1/2))
sage: p1 == p2
False
sage: p1 = HyperbolicPlane().HM().get_point((0,0,1))
sage: p2 = HyperbolicPlane().HM().get_point((0,0,1/1))
sage: p1 == p2
True
"""
if not(isinstance(other, HyperbolicPoint)
or self.parent() is other.parent()):
return op == op_NE
# bool is required to convert symbolic (in)equalities
return bool(richcmp(self._coordinates, other._coordinates, op))
def __rmul__(self, other):
r"""
Implement the action of matrices on points of hyperbolic space.
EXAMPLES::
sage: A = matrix(2, [0, 1, 1, 0])
sage: A = HyperbolicPlane().UHP().get_isometry(A)
sage: A * HyperbolicPlane().UHP().get_point(2 + I)
Point in UHP 1/5*I + 2/5
We also lift matrices into isometries::
sage: B = diagonal_matrix([-1, -1, 1])
sage: B = HyperbolicPlane().HM().get_isometry(B)
sage: B * HyperbolicPlane().HM().get_point((0, 1, sqrt(2)))
Point in HM (0, -1, sqrt(2))
"""
if isinstance(other, HyperbolicIsometry):
return other(self)
elif is_Matrix(other):
# TODO: Currently the __mul__ from the matrices gets called first
# and returns an error instead of calling this method
A = self.parent().get_isometry(other)
return A(self)
else:
raise TypeError("unsupported operand type(s) for *:"
"{0} and {1}".format(self, other))
#######################
# Setters and Getters #
#######################
def coordinates(self):
r"""
Return the coordinates of the point.
EXAMPLES::
sage: HyperbolicPlane().UHP().get_point(2 + I).coordinates()
I + 2
sage: HyperbolicPlane().PD().get_point(1/2 + 1/2*I).coordinates()
1/2*I + 1/2
sage: HyperbolicPlane().KM().get_point((1/3, 1/4)).coordinates()
(1/3, 1/4)
sage: HyperbolicPlane().HM().get_point((0,0,1)).coordinates()
(0, 0, 1)
"""
return self._coordinates
def model(self):
r"""
Return the model to which the :class:`HyperbolicPoint` belongs.
EXAMPLES::
sage: HyperbolicPlane().UHP().get_point(I).model()
Hyperbolic plane in the Upper Half Plane Model
sage: HyperbolicPlane().PD().get_point(0).model()
Hyperbolic plane in the Poincare Disk Model
sage: HyperbolicPlane().KM().get_point((0,0)).model()
Hyperbolic plane in the Klein Disk Model
sage: HyperbolicPlane().HM().get_point((0,0,1)).model()
Hyperbolic plane in the Hyperboloid Model
"""
return self.parent()
def to_model(self, model):
"""
Convert ``self`` to the ``model``.
INPUT:
- ``other`` -- (a string representing) the image model
EXAMPLES::
sage: UHP = HyperbolicPlane().UHP()
sage: PD = HyperbolicPlane().PD()
sage: PD.get_point(1/2+I/2).to_model(UHP)
Point in UHP I + 2
sage: PD.get_point(1/2+I/2).to_model('UHP')
Point in UHP I + 2
"""
if isinstance(model, str):
model = getattr(self.parent().realization_of(), model)()
return model(self)
def is_boundary(self):
"""
Return ``True`` if ``self`` is a boundary | |
snapshot files and folders
self.check_restricted_access(tool.snapshots)
self.check_restricted_access(snapshot)
for obj in snapshot.objectValues():
self.check_restricted_access(obj)
if hasattr(aq_base(obj), 'objectValues'):
for child in obj.objectValues():
self.check_restricted_access(child)
def test_applyContext(self):
from ..tool import EXPORT_STEPS_XML
from ..tool import IMPORT_STEPS_XML
from ..tool import TOOLSET_XML
site = self._makeSite()
tool = self._makeOne('setup_tool').__of__(site)
tool.getImportStepRegistry().clear()
tool.getExportStepRegistry().clear()
tool.getToolsetRegistry().clear()
context = DummyImportContext(site, tool=tool)
context._files[IMPORT_STEPS_XML] = _SINGLE_IMPORT_XML
context._files[EXPORT_STEPS_XML] = _SINGLE_EXPORT_XML
context._files[TOOLSET_XML] = _NORMAL_TOOLSET_XML
tool.applyContext(context)
import_registry = tool.getImportStepRegistry()
self.assertEqual(len(import_registry.listSteps()), 1)
self.assertTrue('one' in import_registry.listSteps())
info = import_registry.getStepMetadata('one')
self.assertEqual(info['id'], 'one')
self.assertEqual(info['title'], 'One Step')
self.assertEqual(info['version'], '1')
self.assertTrue('One small step' in info['description'])
self.assertEqual(info['handler'],
'Products.GenericSetup.tests.test_registry.ONE_FUNC')
self.assertEqual(import_registry.getStep('one'), ONE_FUNC)
export_registry = tool.getExportStepRegistry()
self.assertEqual(len(export_registry.listSteps()), 1)
self.assertTrue('one' in import_registry.listSteps())
info = export_registry.getStepMetadata('one')
self.assertEqual(info['id'], 'one')
self.assertEqual(info['title'], 'One Step')
self.assertTrue('One small step' in info['description'])
self.assertEqual(info['handler'],
'Products.GenericSetup.tests.test_registry.ONE_FUNC')
self.assertEqual(export_registry.getStep('one'), ONE_FUNC)
def test_listContextInfos_empty(self):
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
infos = tool.listContextInfos()
self.assertEqual(len(infos), 0)
def test_listContextInfos_with_snapshot(self):
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
tool.createSnapshot('testing')
infos = tool.listContextInfos()
self.assertEqual(len(infos), 1)
info = infos[0]
self.assertEqual(info['id'], 'snapshot-testing')
self.assertEqual(info['title'], 'testing')
self.assertEqual(info['type'], 'snapshot')
def test_listContextInfos_with_registered_base_profile(self):
from ..interfaces import BASE
profile_registry.registerProfile('foo', 'Foo', '', self._PROFILE_PATH,
'Foo', BASE)
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
infos = tool.listContextInfos()
self.assertEqual(len(infos), 1)
info = infos[0]
self.assertEqual(info['id'], 'profile-Foo:foo')
self.assertEqual(info['title'], 'Foo')
self.assertEqual(info['type'], 'base')
def test_listContextInfos_with_registered_extension_profile(self):
from ..interfaces import EXTENSION
profile_registry.registerProfile('foo', 'Foo', '', self._PROFILE_PATH,
'Foo', EXTENSION)
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
infos = tool.listContextInfos()
self.assertEqual(len(infos), 1)
info = infos[0]
self.assertEqual(info['id'], 'profile-Foo:foo')
self.assertEqual(info['title'], 'Foo')
self.assertEqual(info['type'], 'extension')
def test_listContextInfos_with_ordering(self):
from ..interfaces import BASE
from ..interfaces import EXTENSION
# three extension profiles
profile_registry.registerProfile(
'bar', 'bar', '', self._PROFILE_PATH, 'bar', EXTENSION)
profile_registry.registerProfile(
'foo', 'foo', '', self._PROFILE_PATH, 'foo', EXTENSION)
profile_registry.registerProfile(
'upper', 'UPPER', '', self._PROFILE_PATH, 'UPPER', EXTENSION)
# one base profile
profile_registry.registerProfile(
'base', 'base', '', self._PROFILE_PATH, 'base', BASE)
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
tool.createSnapshot('UPPER')
tool.createSnapshot('lower')
infos = tool.listContextInfos()
self.assertEqual(len(infos), 6)
# We sort case insensitively, so by lowercase.
# First snapshots.
self.assertEqual(infos[0]['id'], 'snapshot-lower')
self.assertEqual(infos[1]['id'], 'snapshot-UPPER')
# Then base and extension profiles
self.assertEqual(infos[2]['id'], 'profile-bar:bar')
self.assertEqual(infos[3]['id'], 'profile-base:base')
self.assertEqual(infos[4]['id'], 'profile-foo:foo')
self.assertEqual(infos[5]['id'], 'profile-UPPER:upper')
def test_getProfileImportDate_nonesuch(self):
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
self.assertEqual(tool.getProfileImportDate('nonesuch'), None)
def test_getProfileImportDate_simple_id(self):
from OFS.Image import File
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
filename = 'import-all-foo-20070315123456.log'
tool._setObject(filename, File(filename, '', b''))
self.assertEqual(tool.getProfileImportDate('foo'),
'2007-03-15T12:34:56Z')
def test_getProfileImportDate_id_with_colon(self):
from OFS.Image import File
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
filename = 'import-all-foo_bar-20070315123456.log'
tool._setObject(filename, File(filename, '', b''))
self.assertEqual(tool.getProfileImportDate('foo:bar'),
'2007-03-15T12:34:56Z')
def test_getProfileImportDate_id_with_prefix(self):
# Test if getProfileImportDate does not fail if there is another
# item id with id with a longer id which starts with the same
# prefix
from OFS.Image import File
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
filename = 'import-all-foo_bar-20070315123456.log'
tool._setObject(filename, File(filename, '', b''))
filename2 = 'import-all-foo_bar-boo-20070315123456.log'
tool._setObject(filename2, File(filename2, '', b''))
self.assertEqual(tool.getProfileImportDate('foo:bar'),
'2007-03-15T12:34:56Z')
def test_profileVersioning(self):
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
profile_id = 'dummy_profile'
product_name = 'GenericSetup'
directory = os.path.split(__file__)[0]
path = os.path.join(directory, 'versioned_profile')
# register profile
profile_registry.registerProfile(profile_id,
'Dummy Profile',
'This is a dummy profile',
path,
product=product_name)
# register upgrade step
step = UpgradeStep("Upgrade",
"GenericSetup:dummy_profile", '*', '1.1', '',
dummy_upgrade,
None, "1")
_registerUpgradeStep(step)
# test initial states
profile_id = ':'.join((product_name, profile_id))
self.assertEqual(tool.getVersionForProfile(profile_id), '1.1')
self.assertEqual(tool.getLastVersionForProfile(profile_id),
'unknown')
# run upgrade steps
request = site.REQUEST
request.form['profile_id'] = profile_id
steps = listUpgradeSteps(tool, profile_id, '1.0')
step_id = steps[0]['id']
request.form['upgrades'] = [step_id]
tool.manage_doUpgrades()
self.assertEqual(tool.getLastVersionForProfile(profile_id),
('1', '1'))
def test_get_and_setLastVersionForProfile(self):
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
self.assertEqual(tool._profile_upgrade_versions, {})
# Any 'profile-' is stripped off in these calls.
self.assertEqual(tool.getLastVersionForProfile('foo'), 'unknown')
self.assertEqual(tool.getLastVersionForProfile(
'profile-foo'), 'unknown')
tool.setLastVersionForProfile('foo', '1.0')
self.assertEqual(tool.getLastVersionForProfile('foo'), ('1', '0'))
self.assertEqual(tool.getLastVersionForProfile(
'profile-foo'), ('1', '0'))
tool.setLastVersionForProfile('profile-foo', '2.0')
self.assertEqual(tool.getLastVersionForProfile('foo'), ('2', '0'))
self.assertEqual(tool.getLastVersionForProfile(
'profile-foo'), ('2', '0'))
# Setting the profile to unknown, removes it from the versions.
self.assertEqual(tool._profile_upgrade_versions, {'foo': ('2', '0')})
tool.setLastVersionForProfile('profile-foo', 'unknown')
self.assertEqual(tool._profile_upgrade_versions, {})
def test_unsetLastVersionForProfile(self):
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
tool.setLastVersionForProfile('foo', '1.0')
tool.setLastVersionForProfile('bar', '2.0')
self.assertEqual(tool._profile_upgrade_versions,
{'foo': ('1', '0'), 'bar': ('2', '0')})
# Any 'profile-' is stripped off in these calls.
tool.unsetLastVersionForProfile('profile-foo')
self.assertEqual(tool._profile_upgrade_versions,
{'bar': ('2', '0')})
tool.unsetLastVersionForProfile('bar')
self.assertEqual(tool._profile_upgrade_versions, {})
def test_purgeProfileVersions(self):
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
tool.setLastVersionForProfile('foo', '1.0')
tool.setLastVersionForProfile('bar', '2.0')
self.assertEqual(tool._profile_upgrade_versions,
{'foo': ('1', '0'), 'bar': ('2', '0')})
tool.purgeProfileVersions()
self.assertEqual(tool._profile_upgrade_versions, {})
def test_listProfilesWithUpgrades(self):
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
self.assertEqual(tool.listProfilesWithUpgrades(), [])
self.assertEqual(tool.listProfilesWithPendingUpgrades(), [])
self.assertEqual(tool.listUptodateProfiles(), [])
self.assertEqual(tool.hasPendingUpgrades(), False)
profile_id = 'dummy_profile'
product_name = 'GenericSetup'
directory = os.path.split(__file__)[0]
path = os.path.join(directory, 'versioned_profile')
# register profile
profile_registry.registerProfile(profile_id,
'Dummy Profile',
'This is a dummy profile',
path,
product=product_name)
self.assertEqual(tool.listProfilesWithUpgrades(), [])
self.assertEqual(tool.listProfilesWithPendingUpgrades(), [])
self.assertEqual(tool.listUptodateProfiles(), [])
self.assertEqual(tool.hasPendingUpgrades(), False)
# register upgrade step
step1 = UpgradeStep("Upgrade 1",
"GenericSetup:dummy_profile", '*', '1.1', '',
dummy_upgrade,
None, "1")
_registerUpgradeStep(step1)
self.assertEqual(tool.listProfilesWithUpgrades(),
[u'GenericSetup:dummy_profile'])
self.assertEqual(tool.listProfilesWithPendingUpgrades(), [])
self.assertEqual(tool.listUptodateProfiles(), [])
self.assertEqual(tool.hasPendingUpgrades(), False)
# register another upgrade step
step2 = UpgradeStep("Upgrade 2",
"GenericSetup:dummy_profile", '1.1', '1.2', '',
dummy_upgrade,
None, "1")
_registerUpgradeStep(step2)
self.assertEqual(tool.listProfilesWithUpgrades(),
[u'GenericSetup:dummy_profile'])
self.assertEqual(tool.listProfilesWithPendingUpgrades(), [])
self.assertEqual(tool.listUptodateProfiles(), [])
self.assertEqual(tool.hasPendingUpgrades(), False)
# get full profile id
profile_id = ':'.join((product_name, profile_id))
# Pretend the profile was installed
tool.setLastVersionForProfile(profile_id, '1.0')
self.assertEqual(tool.listProfilesWithUpgrades(),
[u'GenericSetup:dummy_profile'])
self.assertEqual(tool.listProfilesWithPendingUpgrades(),
[u'GenericSetup:dummy_profile'])
self.assertEqual(tool.listUptodateProfiles(), [])
self.assertEqual(tool.hasPendingUpgrades(), True)
# run first upgrade step
request = site.REQUEST
request.form['profile_id'] = profile_id
steps = listUpgradeSteps(tool, profile_id, '1.0')
step_id = steps[0]['id']
request.form['upgrades'] = [step_id]
tool.manage_doUpgrades()
self.assertEqual(tool.getLastVersionForProfile(profile_id),
('1', '1'))
self.assertEqual(tool.listProfilesWithUpgrades(),
[u'GenericSetup:dummy_profile'])
self.assertEqual(tool.listProfilesWithPendingUpgrades(),
[u'GenericSetup:dummy_profile'])
self.assertEqual(tool.listUptodateProfiles(), [])
self.assertEqual(tool.hasPendingUpgrades(), True)
# run second upgrade step
request = site.REQUEST
request.form['profile_id'] = profile_id
steps = listUpgradeSteps(tool, profile_id, '1.1')
step_id = steps[0]['id']
request.form['upgrades'] = [step_id]
tool.manage_doUpgrades()
self.assertEqual(tool.getLastVersionForProfile(profile_id),
('1', '2'))
self.assertEqual(tool.listProfilesWithUpgrades(),
[u'GenericSetup:dummy_profile'])
self.assertEqual(tool.listProfilesWithPendingUpgrades(), [])
self.assertEqual(tool.listUptodateProfiles(),
[u'GenericSetup:dummy_profile'])
self.assertEqual(tool.hasPendingUpgrades(), False)
# Pretend the profile was never installed.
tool.unsetLastVersionForProfile(profile_id)
self.assertEqual(tool.listProfilesWithPendingUpgrades(), [])
self.assertEqual(tool.listUptodateProfiles(), [])
self.assertEqual(tool.hasPendingUpgrades(), False)
def test_hasPendingUpgrades(self):
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
profile_id_1 = 'dummy_profile1'
profile_id_2 = 'dummy_profile2'
product_name = 'GenericSetup'
directory = os.path.split(__file__)[0]
path = os.path.join(directory, 'versioned_profile')
# register profiles
profile_registry.registerProfile(profile_id_1,
'Dummy Profile 1',
'This is dummy profile 1',
path,
product=product_name)
profile_registry.registerProfile(profile_id_2,
'Dummy Profile 2',
'This is dummy profile 2',
path,
product=product_name)
# get full profile ids
profile_id_1 = ':'.join((product_name, profile_id_1))
profile_id_2 = ':'.join((product_name, profile_id_2))
# test
self.assertEqual(tool.hasPendingUpgrades(), False)
self.assertEqual(tool.hasPendingUpgrades(profile_id_1), False)
self.assertEqual(tool.hasPendingUpgrades(profile_id_2), False)
self.assertEqual(tool.hasPendingUpgrades('non-existing'), False)
# register upgrade steps
step1 = UpgradeStep("Upgrade 1",
profile_id_1, '*', '1.1', '',
dummy_upgrade,
None, "1")
_registerUpgradeStep(step1)
step2 = UpgradeStep("Upgrade 2",
profile_id_2, '*', '2.2', '',
dummy_upgrade,
None, "2")
_registerUpgradeStep(step2)
# No profile has been applied, so no upgrade is pending.
self.assertEqual(tool.hasPendingUpgrades(), False)
self.assertEqual(tool.hasPendingUpgrades(profile_id_1), False)
self.assertEqual(tool.hasPendingUpgrades(profile_id_2), False)
# Pretend profile 1 was installed to an earlier version.
tool.setLastVersionForProfile(profile_id_1, '1.0')
self.assertEqual(tool.hasPendingUpgrades(), True)
self.assertEqual(tool.hasPendingUpgrades(profile_id_1), True)
self.assertEqual(tool.hasPendingUpgrades(profile_id_2), False)
# Pretend profile 2 was installed to an earlier version.
tool.setLastVersionForProfile(profile_id_2, '2.0')
self.assertEqual(tool.hasPendingUpgrades(), True)
self.assertEqual(tool.hasPendingUpgrades(profile_id_1), True)
self.assertEqual(tool.hasPendingUpgrades(profile_id_2), True)
# Pretend profile 1 was installed to the final version.
tool.setLastVersionForProfile(profile_id_1, '1.1')
self.assertEqual(tool.hasPendingUpgrades(), True)
self.assertEqual(tool.hasPendingUpgrades(profile_id_1), False)
self.assertEqual(tool.hasPendingUpgrades(profile_id_2), True)
# Pretend profile 2 was installed to the final version.
tool.setLastVersionForProfile(profile_id_2, '2.2')
self.assertEqual(tool.hasPendingUpgrades(), False)
self.assertEqual(tool.hasPendingUpgrades(profile_id_1), False)
self.assertEqual(tool.hasPendingUpgrades(profile_id_2), False)
def test_manage_doUpgrades_no_profile_id_or_updates(self):
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
tool.manage_doUpgrades()
self.assertEqual(tool._profile_upgrade_versions, {})
def test_manage_doUpgrades_upgrade_w_no_target_version(self):
def notool():
return None
step = UpgradeStep('TITLE', 'foo', '*', '*', 'DESC', notool)
_registerUpgradeStep(step)
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
request = site.REQUEST
request['profile_id'] = ['foo']
request['upgrade'] = [step.id]
tool.manage_doUpgrades()
self.assertEqual(tool._profile_upgrade_versions, {})
def test_upgradeProfile_no_profile_id_or_updates(self):
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
# Mostly this checks to see if we can call this without an
# exception.
tool.upgradeProfile('no.such.profile:default')
self.assertEqual(tool._profile_upgrade_versions, {})
tool.upgradeProfile('no.such.profile:default', dest='42')
self.assertEqual(tool._profile_upgrade_versions, {})
def test_persistent_profile_upgrade_versions(self):
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
savepoint1 = transaction.savepoint()
tool.setLastVersionForProfile('foo', '1.0')
savepoint2 = transaction.savepoint()
tool.setLastVersionForProfile('bar', '2.0')
self.assertEqual(tool._profile_upgrade_versions,
{'foo': ('1', '0'), 'bar': ('2', '0')})
savepoint2.rollback()
self.assertEqual(tool._profile_upgrade_versions,
{'foo': ('1', '0')})
savepoint1.rollback()
self.assertEqual(tool._profile_upgrade_versions, {})
def test_separate_profile_upgrade_versions(self):
# _profile_upgrade_versions used to be a class property. That is fine
# as long as we only work on copies, otherwise state is shared between
# two instances. We now create the property in the __init__ method,
# but let's test it to avoid a regression.
site = self._makeSite()
site.setup_tool1 = self._makeOne('setup_tool1')
tool1 = site.setup_tool1
site.setup_tool2 = self._makeOne('setup_tool2')
tool2 = site.setup_tool2
tool1._profile_upgrade_versions['foo'] = '1.0'
self.assertEqual(tool2._profile_upgrade_versions, {})
tool2.setLastVersionForProfile('bar', '2.0')
self.assertEqual(self._makeOne('t')._profile_upgrade_versions, {})
def test_upgradeProfile(self):
def dummy_handler(tool):
return None
def step3_handler(tool):
tool._step3_applied = 'just a marker'
def step3_checker(tool):
# False means already applied or does not apply.
# True means can be applied.
return not hasattr(tool, '_step3_applied')
step1 = UpgradeStep('Step 1', 'foo', '0', '1', | |
s):
self.cpu = c
self.sub = s
def is_64_bit(self):
return (self.cpu & CPU_ARCH_ABI64) != 0
cpu_infos = [
["arm64", CPU_TYPE_ARM64, 2],
["x86_64", CPU_TYPE_X86_64, 3],
["x86_64", CPU_TYPE_X86_64, CPU_TYPE_ANY],
]
def __str__(self):
for info in self.cpu_infos:
if self.cpu == info[1] and (self.sub & 0x00ffffff) == info[2]:
return info[0]
return "{0:x}.{1:x}".format(self.cpu, self.sub)
class Magic(Enum):
enum = {
'MH_MAGIC': MH_MAGIC,
'MH_CIGAM': MH_CIGAM,
'MH_MAGIC_64': MH_MAGIC_64,
'MH_CIGAM_64': MH_CIGAM_64,
'FAT_MAGIC': FAT_MAGIC,
'FAT_CIGAM': FAT_CIGAM
}
def __init__(self, initial_value=0):
Enum.__init__(self, initial_value, self.enum)
def is_skinny_mach_file(self):
return self.value == MH_MAGIC or self.value == MH_CIGAM or self.value == MH_MAGIC_64 or self.value == MH_CIGAM_64
def is_universal_mach_file(self):
return self.value == FAT_MAGIC or self.value == FAT_CIGAM
def unpack(self, data):
data.set_byte_order('native')
self.value = data.get_uint32()
def get_byte_order(self):
if self.value == MH_CIGAM or self.value == MH_CIGAM_64 or self.value == FAT_CIGAM:
return swap_unpack_char()
else:
return '='
def is_64_bit(self):
return self.value == MH_MAGIC_64 or self.value == MH_CIGAM_64
def __init__(self, debugger):
self.magic = Mach.Magic()
self.content = None
self.path = None
self.debugger = debugger
def extract(self, path, extractor):
self.path = path
self.unpack(extractor)
def parse(self, path):
self.path = path
try:
f = open(self.path, 'rb')
file_extractor = FileExtract(f, '=')
self.unpack(file_extractor)
# f.close()
except IOError as xxx_todo_changeme:
(errno, strerror) = xxx_todo_changeme.args
print("I/O error({0}): {1}".format(errno, strerror))
except ValueError:
print("Could not convert data to an integer.")
except:
print("Unexpected error:", sys.exc_info()[0])
raise
def compare(self, rhs):
self.content.compare(rhs.content)
def dump(self, options=None):
self.content.dump(options)
def dump_header(self, dump_description=True, options=None):
self.content.dump_header(dump_description, options)
def dump_load_commands(self, dump_description=True, options=None):
self.content.dump_load_commands(dump_description, options)
def dump_sections(self, dump_description=True, options=None):
self.content.dump_sections(dump_description, options)
def dump_section_contents(self, options):
self.content.dump_section_contents(options)
def dump_symtab(self, dump_description=True, options=None):
self.content.dump_symtab(dump_description, options)
def dump_symbol_names_matching_regex(self, regex, file=None):
self.content.dump_symbol_names_matching_regex(regex, file)
def description(self):
return self.content.description()
def unpack(self, data):
self.magic.unpack(data)
if self.magic.is_skinny_mach_file():
self.content = Mach.Skinny(self.path, self.debugger)
elif self.magic.is_universal_mach_file():
self.content = Mach.Universal(self.path, self.debugger)
else:
self.content = None
if self.content is not None:
self.content.unpack(data, self.magic)
def is_valid(self):
return self.content is not None
class Universal:
def __init__(self, path, debugger):
self.path = path
self.type = 'universal'
self.file_off = 0
self.magic = None
self.nfat_arch = 0
self.archs = list()
self.debugger = debugger
def description(self):
s = '%#8.8x: %s (' % (self.file_off, self.path)
archs_string = ''
for arch in self.archs:
if len(archs_string):
archs_string += ', '
archs_string += '%s' % arch.arch
s += archs_string
s += ')'
return s
def unpack(self, data, magic=None):
self.file_off = data.tell()
if magic is None:
self.magic = Mach.Magic()
self.magic.unpack(data)
else:
self.magic = magic
self.file_off = self.file_off - 4
# Universal headers are always in big endian
data.set_byte_order('big')
self.nfat_arch = data.get_uint32()
for i in range(self.nfat_arch):
self.archs.append(Mach.Universal.ArchInfo())
self.archs[i].unpack(data)
for i in range(self.nfat_arch):
self.archs[i].mach = Mach.Skinny(self.path, self.debugger)
data.seek(self.archs[i].offset, 0)
skinny_magic = Mach.Magic()
skinny_magic.unpack(data)
self.archs[i].mach.unpack(data, skinny_magic)
def compare(self, rhs):
print('error: comparing two universal files is not supported yet')
return False
def dump(self, options):
if options.dump_header:
print()
print("Universal Mach File: magic = %s, nfat_arch = %u" % (self.magic, self.nfat_arch))
print()
if self.nfat_arch > 0:
if options.dump_header:
self.archs[0].dump_header(True, options)
for i in range(self.nfat_arch):
self.archs[i].dump_flat(options)
if options.dump_header:
print()
for i in range(self.nfat_arch):
self.archs[i].mach.dump(options)
def dump_header(self, dump_description=True, options=None):
if dump_description:
print(self.description())
for i in range(self.nfat_arch):
self.archs[i].mach.dump_header(True, options)
print()
def dump_load_commands(self, dump_description=True, options=None):
if dump_description:
print(self.description())
for i in range(self.nfat_arch):
self.archs[i].mach.dump_load_commands(True, options)
print()
def dump_sections(self, dump_description=True, options=None):
if dump_description:
print(self.description())
for i in range(self.nfat_arch):
self.archs[i].mach.dump_sections(True, options)
print()
def dump_section_contents(self, options):
for i in range(self.nfat_arch):
self.archs[i].mach.dump_section_contents(options)
print()
def dump_symtab(self, dump_description=True, options=None):
if dump_description:
print(self.description())
for i in range(self.nfat_arch):
self.archs[i].mach.dump_symtab(True, options)
print()
def dump_symbol_names_matching_regex(self, regex, file=None):
for i in range(self.nfat_arch):
self.archs[i].mach.dump_symbol_names_matching_regex(
regex, file)
def checksec(self):
for i in range(self.nfat_arch):
if self.archs[i].mach.arch.__str__() == get_host_machine():
self.archs[i].mach.checksec()
class ArchInfo:
def __init__(self):
self.arch = Mach.Arch(0, 0)
self.offset = 0
self.size = 0
self.align = 0
self.mach = None
def unpack(self, data):
# Universal headers are always in big endian
data.set_byte_order('big')
self.arch.cpu, self.arch.sub, self.offset, self.size, self.align = data.get_n_uint32(
5)
def dump_header(self, dump_description=True, options=None):
if options.verbose:
print("CPU SUBTYPE OFFSET SIZE ALIGN")
print("---------- ---------- ---------- ---------- ----------")
else:
print("ARCH FILEOFFSET FILESIZE ALIGN")
print("---------- ---------- ---------- ----------")
def dump_flat(self, options):
if options.verbose:
print("%#8.8x %#8.8x %#8.8x %#8.8x %#8.8x" % (self.arch.cpu, self.arch.sub, self.offset, self.size, self.align))
else:
print("%-10s %#8.8x %#8.8x %#8.8x" % (self.arch, self.offset, self.size, self.align))
def dump(self):
print(" cputype: %#8.8x" % self.arch.cpu)
print("cpusubtype: %#8.8x" % self.arch.sub)
print(" offset: %#8.8x" % self.offset)
print(" size: %#8.8x" % self.size)
print(" align: %#8.8x" % self.align)
def __str__(self):
return "Mach.Universal.ArchInfo: %#8.8x %#8.8x %#8.8x %#8.8x %#8.8x" % (
self.arch.cpu, self.arch.sub, self.offset, self.size, self.align)
def __repr__(self):
return "Mach.Universal.ArchInfo: %#8.8x %#8.8x %#8.8x %#8.8x %#8.8x" % (
self.arch.cpu, self.arch.sub, self.offset, self.size, self.align)
class Flags:
def __init__(self, b):
self.bits = b
def __str__(self):
s = ''
if self.bits & MH_NOUNDEFS:
s += 'MH_NOUNDEFS | '
if self.bits & MH_INCRLINK:
s += 'MH_INCRLINK | '
if self.bits & MH_DYLDLINK:
s += 'MH_DYLDLINK | '
if self.bits & MH_BINDATLOAD:
s += 'MH_BINDATLOAD | '
if self.bits & MH_PREBOUND:
s += 'MH_PREBOUND | '
if self.bits & MH_SPLIT_SEGS:
s += 'MH_SPLIT_SEGS | '
if self.bits & MH_LAZY_INIT:
s += 'MH_LAZY_INIT | '
if self.bits & MH_TWOLEVEL:
s += 'MH_TWOLEVEL | '
if self.bits & MH_FORCE_FLAT:
s += 'MH_FORCE_FLAT | '
if self.bits & MH_NOMULTIDEFS:
s += 'MH_NOMULTIDEFS | '
if self.bits & MH_NOFIXPREBINDING:
s += 'MH_NOFIXPREBINDING | '
if self.bits & MH_PREBINDABLE:
s += 'MH_PREBINDABLE | '
if self.bits & MH_ALLMODSBOUND:
s += 'MH_ALLMODSBOUND | '
if self.bits & MH_SUBSECTIONS_VIA_SYMBOLS:
s += 'MH_SUBSECTIONS_VIA_SYMBOLS | '
if self.bits & MH_CANONICAL:
s += 'MH_CANONICAL | '
if self.bits & MH_WEAK_DEFINES:
s += 'MH_WEAK_DEFINES | '
if self.bits & MH_BINDS_TO_WEAK:
s += 'MH_BINDS_TO_WEAK | '
if self.bits & MH_ALLOW_STACK_EXECUTION:
s += 'MH_ALLOW_STACK_EXECUTION | '
if self.bits & MH_ROOT_SAFE:
s += 'MH_ROOT_SAFE | '
if self.bits & MH_SETUID_SAFE:
s += 'MH_SETUID_SAFE | '
if self.bits & MH_NO_REEXPORTED_DYLIBS:
s += 'MH_NO_REEXPORTED_DYLIBS | '
if self.bits & MH_PIE:
s += 'MH_PIE | '
if self.bits & MH_DEAD_STRIPPABLE_DYLIB:
s += 'MH_DEAD_STRIPPABLE_DYLIB | '
if self.bits & MH_HAS_TLV_DESCRIPTORS:
s += 'MH_HAS_TLV_DESCRIPTORS | '
if self.bits & MH_NO_HEAP_EXECUTION:
s += 'MH_NO_HEAP_EXECUTION | '
# Strip the trailing " |" if we have any flags
if len(s) > 0:
s = s[0:-2]
return s
class FileType(Enum):
enum = {
'MH_OBJECT': MH_OBJECT,
'MH_EXECUTE': MH_EXECUTE,
'MH_FVMLIB': MH_FVMLIB,
'MH_CORE': MH_CORE,
'MH_PRELOAD': MH_PRELOAD,
'MH_DYLIB': MH_DYLIB,
'MH_DYLINKER': MH_DYLINKER,
'MH_BUNDLE': MH_BUNDLE,
'MH_DYLIB_STUB': MH_DYLIB_STUB,
'MH_DSYM': MH_DSYM,
'MH_KEXT_BUNDLE': MH_KEXT_BUNDLE
}
def __init__(self, initial_value=0):
Enum.__init__(self, initial_value, self.enum)
class Skinny:
def __init__(self, path, debugger):
self.path = path
self.type = 'skinny'
self.data = None
self.file_off = 0
self.magic = 0
self.arch = Mach.Arch(0, 0)
self.filetype = Mach.FileType(0)
self.ncmds = 0
self.sizeofcmds = 0
self.flags = Mach.Flags(0)
self.uuid = None
self.commands = list()
self.segments = list()
self.sections = list()
self.symbols = list()
self.is_encrypted = False
self.debugger = debugger
self.sections.append(Mach.Section())
def checksec(self):
macho_stat = os.stat(self.path)
nx_heap = self.has_nx_heap()
self.has_pie = bool(self.flags.bits & MH_PIE)
self.is_uid = stat.S_ISUID & macho_stat.st_mode
self.is_gid = stat.S_ISGID & macho_stat.st_mode
objc_release, __stack_chk_guard, __stack_chk_fail = self.has_arc_and_strong_stack()
print(f"ARC : {objc_release}")
print(f"PIE : {self.has_pie}")
print(f"Stack Canary : {__stack_chk_guard and __stack_chk_fail}")
print(f"Encrypted : {self.is_encrypted}")
print(f"NX Heap : {self.has_nx_heap()}")
print(f"NX Stack : {self.has_nx_stack()}")
print(f"Restricted : {self.has_restricted()}")
def has_nx_heap(self):
#do we need to check this??? I'm gonna return TRUE cause of W^X
return True if self.flags.bits & MH_NO_HEAP_EXECUTION else True
def has_nx_stack(self):
return False if self.flags.bits & MH_ALLOW_STACK_EXECUTION else True
def has_arc_and_strong_stack(self):
objc_release = False
__stack_chk_guard = False
__stack_chk_fail = False
selected_target = self.debugger.GetSelectedTarget()
target = self.debugger.CreateTarget(self.path)
for module in target.modules:
if fnmatch.fnmatch(module.file.fullpath.lower(), self.path.lower()):
for i in module.symbols:
if i.name == "objc_release":
objc_release = True
if i.name == "__stack_chk_guard":
__stack_chk_guard = True
if i.name == "__stack_chk_fail":
__stack_chk_fail = True
self.debugger.DeleteTarget(target)
self.debugger.SetSelectedTarget(selected_target) # reset back to previously selected target
return objc_release, __stack_chk_guard, __stack_chk_fail
def has_restricted(self):
#3 cases restrictedBySetGUid, restrictedBySegment, restrictedByEntitlements
codesign = run_shell_command(f"codesign -dvvvv '{self.path}'").stderr.decode() #stderr ( :| ) ???
if codesign and "Authority=Apple Root CA" in codesign:
authority = ""
for i in codesign.splitlines():
if "Authority" in i:
return f"True ({i})"
#restrictedBySetGUid
if self.is_uid or self.is_gid:
msg = "is_uid" if self.is_uid else "gid"
return f"True ({msg})"
#restrictedBySegment
for seg in self.segments:
if seg.segname.lower()=="__restrict":
return "True (__restrict)"
return False
def description(self):
return '%#8.8x: %s (%s)' % (self.file_off, self.path, self.arch)
def unpack(self, data, magic=None):
self.data = data
self.file_off = data.tell()
if magic is None:
self.magic = Mach.Magic()
self.magic.unpack(data)
else:
self.magic = magic
self.file_off = self.file_off - 4
data.set_byte_order(self.magic.get_byte_order())
self.arch.cpu, self.arch.sub, self.filetype.value, self.ncmds, self.sizeofcmds, bits = data.get_n_uint32(
6)
self.flags.bits = bits
if self.is_64_bit():
data.get_uint32() # Skip reserved word in mach_header_64
for i in range(0, self.ncmds):
lc = self.unpack_load_command(data)
self.commands.append(lc)
def get_data(self):
if self.data:
self.data.set_byte_order(self.magic.get_byte_order())
return self.data
return None
def unpack_load_command(self, data):
lc = Mach.LoadCommand()
lc.unpack(self, data)
lc_command = lc.command.get_enum_value()
if (lc_command == LC_SEGMENT or
lc_command == LC_SEGMENT_64):
lc = Mach.SegmentLoadCommand(lc)
lc.unpack(self, data)
elif (lc_command == LC_LOAD_DYLIB or
lc_command == LC_ID_DYLIB or
lc_command == LC_LOAD_WEAK_DYLIB or
lc_command == LC_REEXPORT_DYLIB):
lc = Mach.DylibLoadCommand(lc)
lc.unpack(self, data)
elif (lc_command == LC_LOAD_DYLINKER or
lc_command == LC_SUB_FRAMEWORK or
lc_command == LC_SUB_CLIENT or
lc_command == LC_SUB_UMBRELLA or
lc_command == LC_SUB_LIBRARY or
lc_command == LC_ID_DYLINKER or
lc_command == LC_RPATH):
lc = Mach.LoadDYLDLoadCommand(lc)
lc.unpack(self, data)
elif (lc_command == LC_DYLD_INFO_ONLY):
lc = Mach.DYLDInfoOnlyLoadCommand(lc)
lc.unpack(self, data)
elif (lc_command == LC_SYMTAB):
lc = Mach.SymtabLoadCommand(lc)
lc.unpack(self, data)
elif (lc_command == LC_DYSYMTAB):
lc = Mach.DYLDSymtabLoadCommand(lc)
lc.unpack(self, data)
elif (lc_command == LC_UUID):
lc = Mach.UUIDLoadCommand(lc)
lc.unpack(self, data)
elif (lc_command == LC_CODE_SIGNATURE or
lc_command == LC_SEGMENT_SPLIT_INFO or
lc_command == LC_FUNCTION_STARTS):
lc = Mach.DataBlobLoadCommand(lc)
lc.unpack(self, data)
elif (lc_command == LC_UNIXTHREAD):
lc = Mach.UnixThreadLoadCommand(lc)
lc.unpack(self, data)
elif (lc_command == LC_ENCRYPTION_INFO):
lc = Mach.EncryptionInfoLoadCommand(lc)
lc.unpack(self, data)
self.is_encrypted = bool(cryptid)
lc.skip(data)
return lc
def compare(self, rhs):
print("\nComparing:")
print("a) %s %s" % (self.arch, self.path))
print("b) %s %s" % (rhs.arch, rhs.path))
result = True
if self.type == rhs.type:
for lhs_section in self.sections[1:]:
rhs_section = rhs.get_section_by_section(lhs_section)
if rhs_section:
print('comparing %s.%s...' % (lhs_section.segname, lhs_section.sectname), end=' ')
sys.stdout.flush()
lhs_data = lhs_section.get_contents(self)
rhs_data = rhs_section.get_contents(rhs)
if lhs_data and rhs_data:
if lhs_data == rhs_data:
print('ok')
else:
lhs_data_len = len(lhs_data)
rhs_data_len = len(rhs_data)
# if lhs_data_len < rhs_data_len:
# if lhs_data == rhs_data[0:lhs_data_len]:
# print 'section data for %s matches the first %u bytes' % (lhs_section.sectname, lhs_data_len)
# else:
# # TODO: check padding
# result = False
# elif lhs_data_len > rhs_data_len:
# if lhs_data[0:rhs_data_len] == rhs_data:
# print 'section data for %s matches the first %u bytes' % (lhs_section.sectname, lhs_data_len)
# else:
# # TODO: check padding
# result = False
# else:
result = False
print('error: sections differ')
# print 'a) %s' % (lhs_section)
# dump_hex_byte_string_diff(0, lhs_data, rhs_data)
# print 'b) %s' % (rhs_section)
# dump_hex_byte_string_diff(0, rhs_data, lhs_data)
elif lhs_data and not rhs_data:
print('error: section data missing from b:')
print('a) %s' % (lhs_section))
print('b) %s' % (rhs_section))
result = False
elif not lhs_data and rhs_data:
print('error: section data missing from a:')
print('a) %s' % (lhs_section))
print('b) %s' % (rhs_section))
result = False
elif lhs_section.offset or rhs_section.offset:
print('error: section data missing for both a and b:')
print('a) %s' % (lhs_section))
print('b) %s' % (rhs_section))
result = False
else:
print('ok')
else:
result = False
print('error: section %s is missing in %s' % (lhs_section.sectname, rhs.path))
else:
print('error: comparing a %s mach-o file with a %s mach-o file is not supported' % (self.type, rhs.type))
result = False
if not result:
print('error: mach files differ')
return result
def dump_header(self, dump_description=True, options=None):
if options.verbose:
print("MAGIC CPU SUBTYPE FILETYPE NUM CMDS SIZE CMDS FLAGS")
print("---------- ---------- | |
)
self._Execute( 'CREATE TABLE services ( service_id INTEGER PRIMARY KEY, service_key BLOB_BYTES, service_type INTEGER, name TEXT, port INTEGER, dictionary_string TEXT );' )
self._Execute( 'CREATE TABLE accounts ( account_id INTEGER PRIMARY KEY, service_id INTEGER, account_key BLOB_BYTES, hashed_access_key BLOB_BYTES, account_type_id INTEGER, created INTEGER, expires INTEGER, dictionary_string TEXT );' )
self._Execute( 'CREATE UNIQUE INDEX accounts_account_key_index ON accounts ( account_key );' )
self._Execute( 'CREATE UNIQUE INDEX accounts_hashed_access_key_index ON accounts ( hashed_access_key );' )
self._Execute( 'CREATE TABLE account_scores ( service_id INTEGER, account_id INTEGER, score_type INTEGER, score INTEGER, PRIMARY KEY ( service_id, account_id, score_type ) );' )
self._Execute( 'CREATE TABLE account_types ( account_type_id INTEGER PRIMARY KEY, service_id INTEGER, dump TEXT );' )
self._Execute( 'CREATE TABLE analyze_timestamps ( name TEXT, timestamp INTEGER );' )
self._Execute( 'CREATE TABLE files_info ( master_hash_id INTEGER PRIMARY KEY, size INTEGER, mime INTEGER, width INTEGER, height INTEGER, duration INTEGER, num_frames INTEGER, num_words INTEGER );' )
self._Execute( 'CREATE TABLE reasons ( reason_id INTEGER PRIMARY KEY, reason TEXT );' )
self._Execute( 'CREATE UNIQUE INDEX reasons_reason_index ON reasons ( reason );' )
self._Execute( 'CREATE TABLE registration_keys ( registration_key BLOB_BYTES PRIMARY KEY, service_id INTEGER, account_type_id INTEGER, account_key BLOB_BYTES, access_key BLOB_BYTES UNIQUE, expires INTEGER );' )
self._Execute( 'CREATE TABLE sessions ( session_key BLOB_BYTES, service_id INTEGER, account_id INTEGER, expires INTEGER );' )
self._Execute( 'CREATE TABLE version ( version INTEGER, year INTEGER, month INTEGER );' )
# master
self._Execute( 'CREATE TABLE IF NOT EXISTS external_master.hashes ( master_hash_id INTEGER PRIMARY KEY, hash BLOB_BYTES UNIQUE );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS external_master.tags ( master_tag_id INTEGER PRIMARY KEY, tag TEXT UNIQUE );' )
# inserts
current_time_struct = time.localtime()
( current_year, current_month ) = ( current_time_struct.tm_year, current_time_struct.tm_mon )
self._Execute( 'INSERT INTO version ( version, year, month ) VALUES ( ?, ?, ? );', ( HC.SOFTWARE_VERSION, current_year, current_month ) )
# set up server admin
admin_service = HydrusNetwork.GenerateService( HC.SERVER_ADMIN_KEY, HC.SERVER_ADMIN, 'server admin', HC.DEFAULT_SERVER_ADMIN_PORT )
self._AddService( admin_service ) # this sets up the admin account and a registration token by itself
def _DeleteOrphans( self ):
# make a table for files
# make a table for thumbnails
# populate both tables with what you have in your hdd
# if the filename isn't even a hash, schedule it for immediate deletion instead
# delete from the tables based on what is in current and pending repo file tables
# delete from the file tables based on what is in update tables
# delete whatever is left
# might want to split this up into 256 jobs--depends on how fast its bits run
# might also want to set server_busy, if it isn't already
# also think about how often it runs--maybe only once a month is appropriate
return # return to this to fix it for new system
def _DeleteRepositoryPetitions( self, service_id, subject_account_ids ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
self._ExecuteMany( 'DELETE FROM ' + pending_files_table_name + ' WHERE account_id = ?;', ( ( subject_account_id, ) for subject_account_id in subject_account_ids ) )
self._ExecuteMany( 'DELETE FROM ' + petitioned_files_table_name + ' WHERE account_id = ?;', ( ( subject_account_id, ) for subject_account_id in subject_account_ids ) )
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateRepositoryMappingsTableNames( service_id )
self._ExecuteMany( 'DELETE FROM ' + pending_mappings_table_name + ' WHERE account_id = ?;', ( ( subject_account_id, ) for subject_account_id in subject_account_ids ) )
self._ExecuteMany( 'DELETE FROM ' + petitioned_mappings_table_name + ' WHERE account_id = ?;', ( ( subject_account_id, ) for subject_account_id in subject_account_ids ) )
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
self._ExecuteMany( 'DELETE FROM ' + pending_tag_parents_table_name + ' WHERE account_id = ?;', ( ( subject_account_id, ) for subject_account_id in subject_account_ids ) )
self._ExecuteMany( 'DELETE FROM ' + petitioned_tag_parents_table_name + ' WHERE account_id = ?;', ( ( subject_account_id, ) for subject_account_id in subject_account_ids ) )
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
self._ExecuteMany( 'DELETE FROM ' + pending_tag_siblings_table_name + ' WHERE account_id = ?;', ( ( subject_account_id, ) for subject_account_id in subject_account_ids ) )
self._ExecuteMany( 'DELETE FROM ' + petitioned_tag_siblings_table_name + ' WHERE account_id = ?;', ( ( subject_account_id, ) for subject_account_id in subject_account_ids ) )
def _DeleteService( self, service_key ):
service_id = self._GetServiceId( service_key )
service_type = self._GetServiceType( service_id )
service_id = self._GetServiceId( service_key )
self._Execute( 'DELETE FROM services WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM accounts WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM account_types WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM account_scores WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM registration_keys WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM sessions WHERE service_id = ?;', ( service_id, ) )
if service_type in HC.REPOSITORIES:
self._RepositoryDrop( service_id )
def _GenerateRegistrationKeysFromAccount( self, service_key, account: HydrusNetwork.Account, num, account_type_key, expires ):
service_id = self._GetServiceId( service_key )
account_type_id = self._GetAccountTypeId( service_id, account_type_key )
return self._GenerateRegistrationKeys( service_id, num, account_type_id, expires )
def _GenerateRegistrationKeys( self, service_id, num, account_type_id, expires, force_registration_key = None ):
account_type = self._GetAccountType( service_id, account_type_id )
if account_type.IsNullAccount():
result = self._Execute( 'SELECT 1 FROM accounts WHERE account_type_id = ?;', ( account_type_id, ) ).fetchone()
if result is not None:
# null account already exists
raise HydrusExceptions.BadRequestException( 'You cannot create new null accounts!' )
if force_registration_key is None:
keys = [ ( os.urandom( HC.HYDRUS_KEY_LENGTH ), os.urandom( HC.HYDRUS_KEY_LENGTH ), os.urandom( HC.HYDRUS_KEY_LENGTH ) ) for i in range( num ) ]
else:
keys = [ ( force_registration_key, os.urandom( HC.HYDRUS_KEY_LENGTH ), os.urandom( HC.HYDRUS_KEY_LENGTH ) ) for i in range( num ) ]
self._ExecuteMany( 'INSERT INTO registration_keys ( registration_key, service_id, account_type_id, account_key, access_key, expires ) VALUES ( ?, ?, ?, ?, ?, ? );', [ ( sqlite3.Binary( hashlib.sha256( registration_key ).digest() ), service_id, account_type_id, sqlite3.Binary( account_key ), sqlite3.Binary( access_key ), expires ) for ( registration_key, account_key, access_key ) in keys ] )
return [ registration_key for ( registration_key, account_key, access_key ) in keys ]
def _GetAccessKey( self, service_key, registration_key ):
service_id = self._GetServiceId( service_key )
# we generate a new access_key every time this is requested so that no one with access to the registration token can peek at the access_key before the legit user fetches it for real
# the reg_key is deleted when the last-requested access_key is used to create a session, which calls getaccountkeyfromaccesskey
registration_key_sha256 = hashlib.sha256( registration_key ).digest()
result = self._Execute( 'SELECT 1 FROM registration_keys WHERE service_id = ? AND registration_key = ?;', ( service_id, sqlite3.Binary( registration_key_sha256 ) ) ).fetchone()
if result is None:
raise HydrusExceptions.InsufficientCredentialsException( 'The service could not find that registration token in its database.' )
new_access_key = os.urandom( HC.HYDRUS_KEY_LENGTH )
self._Execute( 'UPDATE registration_keys SET access_key = ? WHERE service_id = ? AND registration_key = ?;', ( sqlite3.Binary( new_access_key ), service_id, sqlite3.Binary( registration_key_sha256 ) ) )
return new_access_key
def _GetAccount( self, service_id, account_id ) -> HydrusNetwork.Account:
( account_key, account_type_id, created, expires, dictionary_string ) = self._Execute( 'SELECT account_key, account_type_id, created, expires, dictionary_string FROM accounts WHERE service_id = ? AND account_id = ?;', ( service_id, account_id ) ).fetchone()
account_type = self._GetAccountType( service_id, account_type_id )
dictionary = HydrusSerialisable.CreateFromString( dictionary_string )
return HydrusNetwork.Account.GenerateAccountFromTuple( ( account_key, account_type, created, expires, dictionary ) )
def _GetAccountFromContent( self, service_key, content ):
service_id = self._GetServiceId( service_key )
service_type = self._GetServiceType( service_id )
content_type = content.GetContentType()
content_data = content.GetContentData()
if content_type == HC.CONTENT_TYPE_FILES:
if service_type != HC.FILE_REPOSITORY:
raise HydrusExceptions.NotFoundException( 'Only | |
<filename>DonkiDirector/DataServer.py<gh_stars>0
# -*- coding: utf-8 -*-
import sys
import threading
import multiprocessing
import time
import traceback
import zmq
import numpy
from hdfwriter import HDFWriter
from metadatahandler import MetaDataHandler
HDFWTHREADS = 1
MAX_HDFWTHREADS = 3
HDF5FILESIZE = 15
_dbg = True
class DataServer(multiprocessing.Process):
"""
Do management of all Data for the HDF5 files.
"""
def __init__(self, daq_xml_config , data_queue, task_queue, notif_queue):
multiprocessing.Process.__init__(self)
self.data_queue = data_queue
self.task_queue = task_queue
self.notify_queue = notif_queue
self.go_on = True
self.daq_running = False
self.data_archives = []
self._hdf_threads = HDFWTHREADS
#: The number of bunches to be saved for each file. This parameter
#: may be changed afterward and will affect the next HDF5 file to
#: be created. The Default Value is 15.
self.file_size = HDF5FILESIZE
#: an instance of :class:`~fermidaq.lib.metadatahandler.MetaDataHandler`
#: responsible for managing the metadata.
try:
self.meta = MetaDataHandler(daq_xml_config)
except:
self.meta = None
#: an instance of :class:`~fermidaq.lib.xmlconfig.DaqXmlConfig` - has all the
#: information necessary to know how to create the
#: :class:`~fermidaq.lib.attribdaq.AttribDaq` and how to organize them inside
#: the HDF5 file.
#:
#: List of :class:`~fermidaq.lib.bunchmanager.BunchManager.HDFs`.
#:
self._hdfwriters = self._init_hdfs('', '')
self.file_prefix = ''
self.Files_contiguous = True
self.files_opened = 0
self._stop_at_current_file = False
self.allocated_bunch_range = (0, 0)
self.files2save = -1
self.first_shot_saved = 0
self.shots_saved = 0
self._daq_list = dict()
self.mutex = threading.Lock()
class HDFs():
"""
Auxiliar class to help controlling the :class:`~fermidaq.lib.hdfwriter.HDFWriter`
threads. It has the following attributes:
* ptr2hdfwriter: instance of :class:`~fermidaq.lib.hdfwriter.HDFWriter`
* initial_bn : the first bunch number for saving file for that instance.
* last_bn : the last bunch number to save in the HDF5 file for that instance.
* working : flag to indicate if it is busy or idle.
"""
def __init__(self,pt):
self.ptr2hdfwriter = pt
self.hdf_key = pt.key
self.initial_bn = 0
self.last_bn = 0
self.working = False
def run(self):
try:
if self.meta is not None:
print "Meta starting"
self.meta.start()
[th.ptr2hdfwriter.start() for th in self._hdfwriters]
while self.go_on:
self._process_tasks()
if not self.daq_running:
time.sleep(0.01)
continue
dataready = True
read_loop = 0
while self.data_queue.qsize() > 0 and read_loop < 20000: #not self.data_queue.empty() and read_loop < 20:
data_in = self.data_queue.get()
self.store_data(data_in)
read_loop += 1
self.data_queue.task_done()
# Sort archive on the base of bunchnumber
self.data_archives.sort(key=self.getKey)
#
store_loop = 0
while len(self.data_archives) > 0 and store_loop < 10000:
# Wait for slow data
next_daq_key = (self.data_archives[0])[0]
next_bn_in = (self.data_archives[0])[1]
next_bn_fi = (self.data_archives[0])[2]
self.mutex.acquire()
self.data_ready(next_daq_key,next_bn_in,next_bn_fi)
self.mutex.release()
store_loop += 1
except:
print traceback.format_exc()
#
for _hdf in self._hdfwriters:
_hdf.ptr2hdfwriter.stop_thread()
def _process_tasks(self):
while self.task_queue.qsize() > 0:
try:
next_task = self.task_queue.get()
print next_task
if next_task[0] == 'stop':
if self.meta is not None:
self.meta.go_on = False
self.go_on = False
elif next_task[0] == 'stop_and_clear':
for _hdf in self._hdfwriters:
_hdf.ptr2hdfwriter._force_close_daq()
while not self.data_queue.empty():
self.data_queue.get()
del self.data_archives[:]
if self.meta is not None:
# Stop metadata acquisition
self.meta.stop_polling()
self.daq_running = False
#
elif next_task[0] == 'file_prefix':
self.file_prefix = next_task[1]
for _hdf in self._hdfwriters:
_hdf.ptr2hdfwriter.file_prefix = next_task[1]
elif next_task[0] == 'file_path':
self.file_path = next_task[1]
for _hdf in self._hdfwriters:
_hdf.ptr2hdfwriter.file_path = next_task[1]
elif next_task[0] == 'Files_contiguous':
self.Files_contiguous = next_task[1]
elif next_task[0] == 'stop_at_this_file':
self._stop_at_current_file = True
elif next_task[0] == 'Files_to_save':
self.files2save = next_task[1]
elif next_task[0] == 'File_size':
self.file_size = next_task[1]
elif next_task[0] == 'daq_switch_off':
for _hdf in self._hdfwriters:
_hdf.ptr2hdfwriter.daq_switch_off(next_task[1])
for daq_key in next_task[1]:
if daq_key in self._daq_list.keys():
self._daq_list.pop(daq_key)
elif next_task[0] == 'daq_switch_on':
for daq_key in next_task[1]:
self._daq_list[daq_key] = 0
elif next_task[0] == 'start_daq':
self.daq_running = True
self._stop_at_current_file = False
self.files_saved = 0
self.files_opened = 0
self.allocated_bunch_range = (0,0)
self.first_shot_saved = 0
self.shots_saved = 0
if self.meta is not None:
self.meta.start_polling()
elif next_task[0] == 'pause_metadata':
daq_meta_key = next_task[1]
do_pause = next_task[2]
self.meta.meta_attribute[daq_meta_key].is_paused = do_pause
elif next_task[0] == 'hdf_finished':
hdf_key = next_task[1]
full_file_path = next_task[2]
report = next_task[3]
self.hdf_finished(hdf_key, full_file_path, report)
#
self.task_queue.task_done()
except:
print traceback.format_exc()
def getKey(self,dataitem):
# return bn_in for sorting the list
return dataitem[1]
def store_data(self,data_in):
try:
daq_key = data_in[0]
bn_in = min(data_in[1],data_in[2])
bn_fi = max(data_in[1],data_in[2])
if daq_key not in self._daq_list.keys():
print "ABORRRO!",daq_key
self._daq_list[daq_key] = 0
#return
if isinstance(data_in[3], list) or isinstance(data_in[3], numpy.ndarray):
#self.data_ready(daq_key,bn_in,bn_fi,data_in[3])
self.data_archives.append((daq_key,bn_in,bn_fi,data_in[3]))
if len(data_in[3]) != (bn_fi - bn_in + 1):
print "MMMMMMM.....",daq_key,bn_in,bn_fi,len(data_in[3])
except:
print traceback.format_exc()
def data_ready(self,daq_key,bn_in,bn_f):
"""
Receive Notification from :class:`~fermidaq.lib.attribdaq.AttribDaq`
and pass them to the :class:`~fermidaq.lib.hdfwriter.HDFWriter`.
In order to do this, it will first, look for the list of the
busy :class:`~fermidaq.lib.hdfwriter.HDFWriter` so, to see, if
one of those threads must be notified.
If none of those threads should be notified, it will pick one of
the idle threads, and pass to it the notification, just after configuring
its acquisition range (:meth:`~fermidaq.lib.hdfwriter.HDFWriter.save_conf`).
"""
try:
# first of all: we must be sure that there are hdfs allocated
# for the given range:
# so, if the last allocated bunch number is lower
# than the current given final bunch number, we should allocate
# more hdfs to store this data.
while (self.Files_contiguous or self.files_opened < 1) and \
not self._stop_at_current_file and \
self.allocated_bunch_range[1] < bn_f \
and (self.files_opened < self.files2save
or self.files2save == -1): # it will allocate up to files2save
#at this moment we do not accept that both, bn_in and bn_f is
#so great that we would have to allocate more than one hdfs
#at this single time.
#assert
#bn_f - self.allocated_bunch_range[1] < self.file_size
#new range:
if (not self.Files_contiguous) or (self.allocated_bunch_range[0] < 0):
all_bn_in = bn_in
else:
all_bn_in = self.allocated_bunch_range[1] + 1
all_bn_f = all_bn_in + self.file_size - 1
idle_hdfwriter = [hdf for hdf in self._hdfwriters
if hdf.working == False]
#check if there is a free hdfwriter
if len(idle_hdfwriter) == 0:
if len(self._hdfwriters) < MAX_HDFWTHREADS:
new_id = len(self._hdfwriters)
fpath = self.file_path
fpref = self.file_prefix
#
key='hdf%d'%(new_id)
self._hdfwriters += [self.HDFs(HDFWriter(key, self,
file_path=fpath, file_prefix=fpref))]
#
self._hdfwriters[-1].ptr2hdfwriter.start()
idle_hdfwriter = [self._hdfwriters[-1]]
else:
# NO MORE HDFs!
break
if len(idle_hdfwriter) > 0:
#get the pointer to the free hdfwriter.
free_hdfwriter = idle_hdfwriter[0]
#add one new hdfsPyTango.DevState.ON
if _dbg:
print ("""DataServer: Allocating hdfwriter %s
for range %d->%d"""% (free_hdfwriter.hdf_key,
all_bn_in, all_bn_f))
assert (all_bn_f - all_bn_in + 1) == self.file_size
free_hdfwriter.ptr2hdfwriter.file_path = self.file_path
free_hdfwriter.ptr2hdfwriter.file_prefix = self.file_prefix
free_hdfwriter.ptr2hdfwriter.save_conf(all_bn_in, all_bn_f,self.Files_contiguous)
free_hdfwriter.initial_bn = all_bn_in
free_hdfwriter.last_bn = all_bn_f
free_hdfwriter.ptr2hdfwriter.daq_switch_on(self._daq_list.keys())
free_hdfwriter.working = True
if (self.allocated_bunch_range[0] <= 0):
self.allocated_bunch_range = (all_bn_in, all_bn_f)
else:
self.allocated_bunch_range = (min(all_bn_in,self.allocated_bunch_range[0]),max(all_bn_f,self.allocated_bunch_range[1]))
print "self.data_archives",len(self.data_archives),self.data_queue.qsize()
self.files_opened += 1
#
# Extract data from internal data archive
data_in = (self.data_archives.pop(0))[3]
#
if (bn_f > self.allocated_bunch_range[1]):
if (bn_in > self.allocated_bunch_range[1]):
# chunk of data cannot be allocated at the moment, skip.
self.data_archives.append((daq_key, bn_in, bn_f,data_in))
return
# not all shots can be saved (no more HDF threads)
# postpone 'overflow' shots
last_avail_bn = self.allocated_bunch_range[1]
self.data_archives.append((daq_key, last_avail_bn+1, bn_f,data_in[-(bn_f-last_avail_bn):]))
if len(data_in[-(bn_f-last_avail_bn):]) != (bn_f- (last_avail_bn + 1) + 1):
print "UUUUUUU.....",daq_key,last_avail_bn+1,bn_f,len(data_in[-(bn_f-last_avail_bn):])
#
data_in = data_in[:-(bn_f-last_avail_bn)]
bn_f = last_avail_bn
if len(data_in) != (bn_f-bn_in+1):
print "********",daq_key,len(data_in),(bn_f-bn_in),bn_in,bn_f
#
if (bn_in < self.allocated_bunch_range[0]):
# purge too old data
if (bn_f < self.allocated_bunch_range[0]):
# chunk of data too old: forget about it
return
data_in = data_in[-(bn_f-self.allocated_bunch_range[0]+1):]
bn_in = self.allocated_bunch_range[0]
if len(data_in) != (bn_f-bn_in+1):
print "#########",daq_key,len(data_in),(bn_f-bn_in),bn_in,bn_f
#
#
# Look for all working hdfs which of them has the initial
# bunch and the final bunch in the range of the received
# bunches.
# So, if the initial bunch of the hdf writer is greater of
# the last bunch received or if the last bunch of the hdf writer
# is lower than the first bunch received, this means that the
# range of this hdfwriter is outside the range of this
# input and must be rejected.
# NOTE: working_entry[1] initial_bunch
# working_entry[2] final_bunch
#
# The rule is reject the hdf if:
# hdf.initial_bunch > bn_f or hdf.final_bunch < bn_in
#
pass2this_working = None
pass2this_working = [working_entry for working_entry in \
self._hdfwriters \
if working_entry.working == True and \
not (working_entry.initial_bn > bn_f \
or working_entry.last_bn < bn_in)]
daq_bn_f = -1
daq_bn_in = -1
last_bn_saved = -1
for hdfs_entry in pass2this_working:
hdf_key = hdfs_entry.hdf_key
hdf_bn_in = hdfs_entry.initial_bn
hdf_bn_f = | |
+ 2]]] + 1
if sum(tmpCode) != 0:
tmpCode = [i / sum(tmpCode) for i in tmpCode]
code = code + tmpCode
encodings.append(code)
return encodings
def DDE(seq):
AA = 'ACDEFGHIKLMNPQRSTVWY'
myCodons = {
'A': 4,
'C': 2,
'D': 2,
'E': 2,
'F': 2,
'G': 4,
'H': 2,
'I': 3,
'K': 2,
'L': 6,
'M': 1,
'N': 2,
'P': 4,
'Q': 2,
'R': 6,
'S': 6,
'T': 4,
'V': 4,
'W': 1,
'Y': 2
}
encodings = []
diPeptides = [aa1 + aa2 for aa1 in AA for aa2 in AA]
myTM = []
for pair in diPeptides:
myTM.append((myCodons[pair[0]] / 61) * (myCodons[pair[1]] / 61))
AADict = {}
for i in range(len(AA)):
AADict[AA[i]] = i
# for i in fastas:
# name, sequence = i[0], re.sub('-', '', i[1])
code = []
tmpCode = [0] * 400
for j in range(len(seq) - 2 + 1):
tmpCode[AADict[seq[j]] * 20 + AADict[seq[j + 1]]] = tmpCode[AADict[seq[j]] * 20 + AADict[
seq[j + 1]]] + 1
if sum(tmpCode) != 0:
tmpCode = [i / sum(tmpCode) for i in tmpCode]
myTV = []
for j in range(len(myTM)):
myTV.append(myTM[j] * (1 - myTM[j]) / (len(seq) - 1))
for j in range(len(tmpCode)):
tmpCode[j] = (tmpCode[j] - myTM[j]) / math.sqrt(myTV[j])
code = code + tmpCode
encodings.append(code)
return encodings
def CalculateKSCTriad(sequence, gap, features, AADict):
res = []
for g in range(gap + 1):
myDict = {}
for f in features:
myDict[f] = 0
for i in range(len(sequence)):
if i + gap + 1 < len(sequence) and i + 2 * gap + 2 < len(sequence):
fea = AADict[sequence[i]] + '.' + AADict[sequence[i + gap + 1]] + '.' + AADict[
sequence[i + 2 * gap + 2]]
myDict[fea] = myDict[fea] + 1
maxValue, minValue = max(myDict.values()), min(myDict.values())
for f in features:
res.append((myDict[f] - minValue) / maxValue)
return res
def CTriad(seq):
AAGroup = {
'g1': 'AGV',
'g2': 'ILFP',
'g3': 'YMTS',
'g4': 'HNQW',
'g5': 'RK',
'g6': 'DE',
'g7': 'C'
}
myGroups = sorted(AAGroup.keys())
AADict = {}
for g in myGroups:
for aa in AAGroup[g]:
AADict[aa] = g
features = [f1 + '.' + f2 + '.' + f3 for f1 in myGroups for f2 in myGroups for f3 in myGroups]
encodings = []
# header = ['#']
# for f in features:
# header.append(f)
# encodings.append(header)
# me, sequence = i[0], re.sub('-', '', i[1])
code = []
if len(seq) < 3:
print('Error: for "CTriad" encoding, the input fasta sequences should be greater than 3. \n\n')
return 0
code = code + CalculateKSCTriad(seq, 0, features, AADict)
encodings.append(code)
return encodings
def CalculateKSCTriad(sequence, gap, features, AADict):
res = []
for g in range(gap + 1):
myDict = {}
for f in features:
myDict[f] = 0
for i in range(len(sequence)):
if i + g + 1 < len(sequence) and i + 2 * g + 2 < len(sequence):
fea = AADict[sequence[i]] + '.' + AADict[sequence[i + g + 1]] + '.' + AADict[sequence[i + 2 * g + 2]]
myDict[fea] = myDict[fea] + 1
maxValue, minValue = max(myDict.values()), min(myDict.values())
for f in features:
res.append((myDict[f] - minValue) / maxValue)
return res
def KSCTriad(seq, gap=1):
AAGroup = {
'g1': 'AGV',
'g2': 'ILFP',
'g3': 'YMTS',
'g4': 'HNQW',
'g5': 'RK',
'g6': 'DE',
'g7': 'C'
}
myGroups = sorted(AAGroup.keys())
AADict = {}
for g in myGroups:
for aa in AAGroup[g]:
AADict[aa] = g
features = [f1 + '.' + f2 + '.' + f3 for f1 in myGroups for f2 in myGroups for f3 in myGroups]
encodings = []
code = []
if len(seq) < 2 * gap + 3:
print('Error: for "KSCTriad" encoding, the input fasta sequences should be greater than (2*gap+3). \n\n')
return 0
code = code + CalculateKSCTriad(seq, gap, features, AADict)
encodings.append(code)
return encodings
def GTPC(seq):
group = {
'alphaticr': 'GAVLMI',
'aromatic': 'FYW',
'postivecharger': 'KRH',
'negativecharger': 'DE',
'uncharger': 'STCPNQ'
}
groupKey = group.keys()
baseNum = len(groupKey)
triple = [g1 + '.' + g2 + '.' + g3 for g1 in groupKey for g2 in groupKey for g3 in groupKey]
index = {}
for key in groupKey:
for aa in group[key]:
index[aa] = key
encodings = []
code = []
myDict = {}
for t in triple:
myDict[t] = 0
sum = 0
for j in range(len(seq) - 3 + 1):
myDict[index[seq[j]] + '.' + index[seq[j + 1]] + '.' + index[seq[j + 2]]] = myDict[index[seq[j]] + '.' + index[
seq[j + 1]] + '.' + index[seq[j + 2]]] + 1
sum = sum + 1
if sum == 0:
for t in triple:
code.append(0)
else:
for t in triple:
code.append(myDict[t] / sum)
encodings.append(code)
return encodings
def generateGroupPairs(groupKey):
gPair = {}
for key1 in groupKey:
for key2 in groupKey:
gPair[key1 + '.' + key2] = 0
return gPair
def CKSAAGP(seq, gap=2):
if gap < 0:
print('Error: the gap should be equal or greater than zero' + '\n\n')
return 0
group = {
'alphaticr': 'GAVLMI',
'aromatic': 'FYW',
'postivecharger': 'KRH',
'negativecharger': 'DE',
'uncharger': 'STCPNQ'
}
AA = '<KEY>'
groupKey = group.keys()
index = {}
for key in groupKey:
for aa in group[key]:
index[aa] = key
gPairIndex = []
for key1 in groupKey:
for key2 in groupKey:
gPairIndex.append(key1 + '.' + key2)
encodings = []
code = []
for g in range(gap + 1):
gPair = generateGroupPairs(groupKey)
sum = 0
for p1 in range(len(seq)):
p2 = p1 + g + 1
if p2 < len(seq) and seq[p1] in AA and seq[p2] in AA:
gPair[index[seq[p1]] + '.' + index[seq[p2]]] = gPair[index[seq[p1]] + '.' + index[
seq[p2]]] + 1
sum = sum + 1
if sum == 0:
for gp in gPairIndex:
code.append(0)
else:
for gp in gPairIndex:
code.append(gPair[gp] / sum)
encodings.append(code)
return encodings
def GAAC(seq):
group = {
'alphatic': 'GAVLMI',
'aromatic': 'FYW',
'postivecharge': 'KRH',
'negativecharge': 'DE',
'uncharge': 'STCPNQ'
}
groupKey = group.keys()
encodings = []
code = []
count = Counter(seq)
myDict = {}
for key in groupKey:
for aa in group[key]:
myDict[key] = myDict.get(key, 0) + count[aa]
for key in groupKey:
code.append(myDict[key] / len(seq))
encodings.append(code)
return encodings
def GDPC(seq):
group = {
'alphaticr': 'GAVLMI',
'aromatic': 'FYW',
'postivecharger': 'KRH',
'negativecharger': 'DE',
'uncharger': 'STCPNQ'
}
groupKey = group.keys()
baseNum = len(groupKey)
dipeptide = [g1 + '.' + g2 for g1 in groupKey for g2 in groupKey]
index = {}
for key in groupKey:
for aa in group[key]:
index[aa] = key
encodings = []
code = []
myDict = {}
for t in dipeptide:
myDict[t] = 0
sum = 0
for j in range(len(seq) - 2 + 1):
myDict[index[seq[j]] + '.' + index[seq[j + 1]]] = myDict[index[seq[j]] + '.' + index[
seq[j + 1]]] + 1
sum = sum + 1
if sum == 0:
for t in dipeptide:
code.append(0)
else:
for t in dipeptide:
code.append(myDict[t] / sum)
encodings.append(code)
return encodings
def AAINDEX(seq):
temp = "-" * (Max_length - len(seq))
seq += temp
AA = 'ARNDCQEGHILKMFPSTWYV'
fileAAindex = "data\\AAindex1.txt"
with open(fileAAindex) as f:
records = f.readlines()[1:]
AAindex = []
AAindexName = []
for i in records:
AAindex.append(i.rstrip().split()[1:] if i.rstrip() != '' else None)
AAindexName.append(i.rstrip().split()[0] if i.rstrip() != '' else None)
index = {}
for i in range(len(AA)):
index[AA[i]] = i
encodings = []
code = []
for aa in seq:
if aa == '-':
for j in AAindex:
code.append(0)
continue
for j in AAindex:
code.append(j[index[aa]])
encodings.append(code)
return encodings
def CTDT(seq):
group1 = {
'hydrophobicity_PRAM900101': 'RKEDQN',
'hydrophobicity_ARGP820101': 'QSTNGDE',
'hydrophobicity_ZIMJ680101': 'QNGSWTDERA',
'hydrophobicity_PONP930101': 'KPDESNQT',
'hydrophobicity_CASG920101': 'KDEQPSRNTG',
'hydrophobicity_ENGD860101': 'RDKENQHYP',
'hydrophobicity_FASG890101': 'KERSQD',
'normwaalsvolume': 'GASTPDC',
'polarity': 'LIFWCMVY',
'polarizability': 'GASDT',
'charge': 'KR',
'secondarystruct': 'EALMQKRH',
'solventaccess': 'ALFCGIVW'
}
group2 = {
'hydrophobicity_PRAM900101': 'GASTPHY',
'hydrophobicity_ARGP820101': 'RAHCKMV',
'hydrophobicity_ZIMJ680101': 'HMCKV',
'hydrophobicity_PONP930101': 'GRHA',
'hydrophobicity_CASG920101': 'AHYMLV',
'hydrophobicity_ENGD860101': 'SGTAW',
'hydrophobicity_FASG890101': 'NTPG',
'normwaalsvolume': 'NVEQIL',
'polarity': 'PATGS',
'polarizability': 'CPNVEQIL',
'charge': 'ANCQGHILMFPSTWYV',
'secondarystruct': 'VIYCWFT',
'solventaccess': 'RKQEND'
}
group3 = {
'hydrophobicity_PRAM900101': 'CLVIMFW',
'hydrophobicity_ARGP820101': 'LYPFIW',
'hydrophobicity_ZIMJ680101': 'LPFYI',
'hydrophobicity_PONP930101': 'YMFWLCVI',
'hydrophobicity_CASG920101': 'FIWC',
'hydrophobicity_ENGD860101': 'CVLIMF',
'hydrophobicity_FASG890101': 'AYHWVMFLIC',
'normwaalsvolume': 'MHKFRYW',
'polarity': 'HQRKNED',
'polarizability': 'KMHFRYW',
'charge': 'DE',
'secondarystruct': 'GNPSD',
'solventaccess': 'MSPTHY'
}
groups = [group1, group2, group3]
property = (
'hydrophobicity_PRAM900101', 'hydrophobicity_ARGP820101', 'hydrophobicity_ZIMJ680101',
'hydrophobicity_PONP930101',
'hydrophobicity_CASG920101', 'hydrophobicity_ENGD860101', 'hydrophobicity_FASG890101', 'normwaalsvolume',
'polarity', | |
<filename>CreditTool_2.py<gh_stars>0
"""
Name: CreditTool_2.py
Author: <NAME>
Created: April 25, 2019
Revised: April 25, 2019
Version: Created using Python 2.7.10, Arc version 10.4.1
Requires: ArcGIS version 10.1 or later, Basic (ArcView) license or better
Spatial Analyst extension
The provided Map_Units feature class is used to derive the workspace.
Requires Credit_Project_Area feature class created by Credit Tool 1 unless project
proposes to remove anthropogenic features.
Copyright 2017-2020 Environmental Incentives, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Import system modules
import arcpy
import sys
import os
import gc
import hqtlib
import util
import cohqt
from arcpy.sa import Con
if arcpy.ListInstallations()[0] == 'arcgispro': # switch
import importlib
importlib.reload(hqtlib) #ensures up-to-date hqtlib runs on arcpro
importlib.reload(util)
importlib.reload(cohqt)
def main():
# GET PARAMETER VALUES
Map_Units_Provided = arcpy.GetParameterAsText(0) # optional
Proposed_Modified_Features_Provided = arcpy.GetParameterAsText(1) # optional
Project_Name = arcpy.GetParameterAsText(2)
# DEFINE DIRECTORIES
# Get the pathname to this script
scriptPath = sys.path[0]
arcpy.AddMessage("Script folder: " + scriptPath)
arcpy.AddMessage("Python version: " + sys.version)
# Construct pathname to workspace
if Map_Units_Provided:
projectGDB = arcpy.Describe(Map_Units_Provided).path
elif Proposed_Modified_Features_Provided:
projectGDB = arcpy.Describe(Proposed_Modified_Features_Provided).path
else:
arcpy.AddMessage("Please provide either a Map_Units or " +
"Proposed_Modified_Features layer.")
sys.exit(0)
arcpy.AddMessage("Project geodatabase: " + projectGDB)
Project_Folder = arcpy.Describe(projectGDB).path
arcpy.AddMessage("Project folder:" + Project_Folder)
# Instantiate a cheStandard object
cheStandard = cohqt.cheStandard(projectGDB, scriptPath)
# ENVIRONMENT SETTINGS
# Set workspaces
arcpy.env.workspace = projectGDB
scratch_folder = os.path.join(
arcpy.Describe(projectGDB).path, 'scratch'
)
if arcpy.Exists(scratch_folder):
pass
else:
arcpy.CreateFolder_management( arcpy.Describe(projectGDB).path, 'scratch')
arcpy.env.scratchWorkspace = scratch_folder
# Overwrite outputs
arcpy.env.overwriteOutput = True
# DEFINE GLOBAL VARIABLES
Parameter_Values = cheStandard.ParameterValues
ConiferModifier = cheStandard.ConiferModifier
GrSG_LDI = cheStandard.GrSG_LDI
LekPresenceRaster = cheStandard.LekPresenceRaster
Lek_Distance_Modifier = cheStandard.LekDistanceModifier
SageModifier = cheStandard.SageModifier
GrSG_Habitat = cheStandard.GrSGHabitatRaster
MigrationModifier = cheStandard.MuleDeerMigrationMod
WinterModifier = cheStandard.MuleDeerWinterMod
SummerModifier = cheStandard.MuleDeerSummerMod
MuleDeer_LDI = cheStandard.MuleDeerLDI
emptyRaster = cheStandard.EmptyRaster
BWMD_Open = cheStandard.BWMD_Open
GrSG_Range = cheStandard.GrSGHabitat
Mule_Range = cheStandard.MuleDeerHabitat
cellSize = arcpy.GetRasterProperties_management(
emptyRaster, "CELLSIZEX").getOutput(0)
# Filenames for feature classes or rasters used by this script
MAP_UNITS = "Map_Units"
PROPOSED_MODIFIED_FEATURES = "Proposed_Modified_Features"
CREDIT_PROJECT_AREA = "Credit_Project_Area"
CONIFER_TREATMENT_AREA = "Conifer_Treatment_Area"
# Filenames for feature class and rasters created by this script
INDIRECT_IMPACT_AREA = "Indirect_Impact_Area"
ANALYSIS_AREA = "Analysis_Area"
MAP_UNITS_DISSOLVE = "Map_Units_Dissolve"
# GrSG Filenames
CURRENT_ANTHRO_DISTURBANCE = "GRSG_Pre_Anthro_Disturbance"
PROJECTED_ANTHRO_DISTURBANCE = "GRSG_Post_Anthro_Disturbance"
GRSG_PRE_BREEDING = "GRSG_Pre_Breeding"
GRSG_PRE_SUMMER = "GRSG_Pre_Summer"
GRSG_PRE_WINTER = "GRSG_Pre_Winter"
GRSG_POST_BREEDING = "GRSG_Post_Breeding"
GRSG_POST_SUMMER = "GRSG_Post_Summer"
GRSG_POST_WINTER = "GRSG_Post_Winter"
POST_CONIFER_MODIFIER= "Post_Conifer_Modifier"
# Mule Deer Filenames
CURRENT_ANTHRO_DISTURBANCE_MD = "MuleDeer_Pre_Anthro_Disturbance"
PROJECTED_ANTHRO_DISTURBANCE_MD = "MuleDeer_Post_Anthro_Disturbance"
MULE_PRE_SUMMER = "MuleDeer_Pre_Summer"
MULE_PRE_MIGRATION = "MuleDeer_Pre_Migration"
MULE_PRE_WINTER = "MuleDeer_Pre_Winter"
MULE_POST_SUMMER = "MuleDeer_Post_Summer"
MULE_POST_MIGRATION = "MuleDeer_Post_Migration"
MULE_POST_WINTER = "MuleDeer_Post_Winter"
# ------------------------------------------------------------------------
# FUNCTION CALLS
# Check out Spatial Analyst extension
hqtlib.CheckOutSpatialAnalyst()
# Check provided layers
if not Map_Units_Provided and not Proposed_Modified_Features_Provided:
arcpy.AddError("ERROR:: Please provide a 'Map_Units' and/or "
"'Proposed_Modified_Features' feature.")
sys.exit(0)
if not Proposed_Modified_Features_Provided:
# Ensure Proposed_Modified_Features does not exist
if arcpy.Exists("Proposed_Modified_Features"):
arcpy.AddError("ERROR:: A 'Proposed_Modified_Features' layer "
"was detected in the project's geodatabase. "
"Provide the 'Proposed_Modified_Features' layer "
"and re-run Credit Tool 2.")
sys.exit(0)
if Map_Units_Provided:
# Clear selection, if present
util.ClearSelectedFeatures(Map_Units_Provided)
# Check provided layer
feature = Map_Units_Provided
required_fields = ["Map_Unit_ID", "Map_Unit_Name"]
no_null_fields = ["Map_Unit_ID"]
expected_fcs = [CREDIT_PROJECT_AREA]
hqtlib.CheckPolygonInput(feature, required_fields, expected_fcs,
no_null_fields)
# Update Map Units layer with provided layer
provided_input = Map_Units_Provided
parameter_name = MAP_UNITS
preserve_existing = False
Map_Units = util.AdoptParameter(provided_input, parameter_name,
preserve_existing)
# Add Map Units layer to map
layerFile = cheStandard.getLayerFile("MapUnits.lyr")
util.AddToMap(Map_Units, layerFile)
# Provide location of Credit Project Area
Credit_Project_Area = CREDIT_PROJECT_AREA
if Proposed_Modified_Features_Provided:
# Clear selection, if present
util.ClearSelectedFeatures(Proposed_Modified_Features_Provided)
# Check provided layer
required_fields = ["Type", "Subtype"]
no_null_fields = required_fields
expected_fcs = None
hqtlib.CheckPolygonInput(Proposed_Modified_Features_Provided,
required_fields, expected_fcs, no_null_fields)
# Update Proposed_Modified_Features with provided layer
provided_input = Proposed_Modified_Features_Provided
parameterName = PROPOSED_MODIFIED_FEATURES
preserve_existing = False
Proposed_Modified_Features = util.AdoptParameter(
provided_input, parameterName, preserve_existing
)
# Add Proposed Modified Features layer to map
layerFile = cheStandard.getLayerFile("DebitProjectArea.lyr")
util.AddToMap(Proposed_Modified_Features, layerFile)
# Update message
arcpy.AddMessage("Creating the area of indirect benefit")
# Create Credit_Project_Area for projects that propose to modify
# anthropogenic features
# Create the Indirect_Impact_Area
in_data = Proposed_Modified_Features
out_name = INDIRECT_IMPACT_AREA
Indirect_Impact_Area = hqtlib.CreateIndirectImpactArea(
in_data, Parameter_Values, out_name
)
# Add field "Indirect"
input_feature = Indirect_Impact_Area
fieldsToAdd = ["Indirect"]
fieldTypes = ["TEXT"]
util.AddFields(input_feature, fieldsToAdd, fieldTypes)
# Update field 'Indirect' to equal 'True'
with arcpy.da.UpdateCursor(Indirect_Impact_Area,
fieldsToAdd) as cursor:
for row in cursor:
row[0] = "True"
cursor.updateRow(row)
if Map_Units_Provided:
# Merge with Credit_Project_Boundary
fileList = [Map_Units_Provided, Indirect_Impact_Area]
out_name = "in_memory/Credit_Project_Boundary"
Project_Area = arcpy.Union_analysis(fileList, out_name)
else:
Project_Area = Indirect_Impact_Area
# Eliminate areas of non-habitat to create Credit_Project_Area
out_name = CREDIT_PROJECT_AREA
habitat_bounds = cheStandard.HabitatMgmtArea
Credit_Project_Area = hqtlib.EliminateNonHabitat(
Project_Area, out_name, habitat_bounds
)
# Detect habitat types impacted directly or indirectly
is_grsg = cohqt.DetectHabitat(Credit_Project_Area, GrSG_Range)
is_mule = cohqt.DetectHabitat(Credit_Project_Area, Mule_Range)
# Update message
arcpy.AddMessage("Dissolving all multi-part map units to create "
"Map_Units_Dissolve")
# Dissolve Map Units
in_features = MAP_UNITS
allowable_fields = ["Map_Unit_ID", "Map_Unit_Name", "Indirect"]
out_name = MAP_UNITS_DISSOLVE
anthro_features = None
Map_Units_Dissolve = hqtlib.DissolveMapUnits(in_features, allowable_fields,
out_name, anthro_features)
# Update message
arcpy.AddMessage("Adding Map_Units_Dissolve to map")
# Add layer to map document
feature = Map_Units_Dissolve
layerFile = cheStandard.getLayerFile("MapUnits.lyr")
util.AddToMap(feature, layerFile, zoom_to=True)
# Update message
arcpy.AddMessage("Calculating area in acres for each map unit")
# Calculate Area
hqtlib.CalcAcres(Map_Units_Dissolve)
# Update message
arcpy.AddMessage("Adding transect field to Map Units Dissolve")
# Add transects field to map units table
fields = ["Transects"]
fieldTypes = ["SHORT"]
util.AddFields(Map_Units_Dissolve, fields, fieldTypes)
# Update message
arcpy.AddMessage("Creating Analysis Area")
# Create Analysis Area
out_name = ANALYSIS_AREA
Analysis_Area = hqtlib.CreateAnalysisArea(Credit_Project_Area,
Parameter_Values,
out_name)
# Add Analysis_Area to map
layerFile = cheStandard.getLayerFile("AnalysisArea.lyr")
util.AddToMap(Analysis_Area, layerFile, zoom_to=True)
# Set processing extent to Analysis_Area
arcpy.env.extent = ANALYSIS_AREA
### GREATER SAGE-GROUSE ANTHRO DIST & MODIFIERS ###
if is_grsg:
# Update message
arcpy.AddMessage("Calculating proportion of each map unit within 1 km "
"of a lek")
# Calculate proportion of map unit within 1 km of a lek
inZoneData = Map_Units_Dissolve
inValueRaster = cheStandard.LekPresenceRaster
zoneField = "Map_Unit_ID"
outTable = "Proportion_Lek"
hqtlib.CalcZonalStats(inZoneData, zoneField, inValueRaster, outTable)
# Join the zonal statistic to the Map Units Dissolve table
field_name = "PropLek"
hqtlib.JoinMeanToTable(inZoneData, outTable, zoneField, field_name)
# Update message
arcpy.AddMessage("Calculating proportion of each map unit in the mesic "
"precip zone")
# Calculate Proportion of each map unit in the mesic precip zone
inZoneData = Map_Units_Dissolve
inValueRaster = cheStandard.Precip
zoneField = "Map_Unit_ID"
outTable = "Proportion_Mesic"
hqtlib.CalcZonalStats(inZoneData, zoneField, inValueRaster, outTable)
# Join the zonal statistic to the Map Units Dissolve table
field_name = "PropMesic"
hqtlib.JoinMeanToTable(inZoneData, outTable, zoneField, field_name)
# Update message
arcpy.AddMessage("Calculating pre-project anthropogenic "
"disturbance modifier for greater sage-grouse")
# Calculate Current_Anthro_Disturbance
dist_field = "GrSG_Dist"
weight_field = "GrSG_Weight"
term = cheStandard.CreditTerms[0]
unique_proposed_subtypes = []
anthro_disturbance_type = "Pre"
Current_Anthro_Disturbance = cohqt.CalcAnthroDisturbance(
Parameter_Values, term, unique_proposed_subtypes,
anthro_disturbance_type, cheStandard, dist_field, weight_field,
cellSize, emptyRaster
)
Current_Anthro_Disturbance.save(CURRENT_ANTHRO_DISTURBANCE)
# Update message
arcpy.AddMessage("Current_Anthro_Disturbance Calculated")
arcpy.AddMessage("Calculating Pre-Project Habitat Modifiers for"
"Greater Sage-Grouse")
# Calculate pre-project cumulative habitat modifiers
winterHabitatPre = cohqt.calcWinterHabitatGRSG(
Current_Anthro_Disturbance,
ConiferModifier,
GrSG_LDI,
GrSG_Habitat
)
LSDMWinterPre = cohqt.applyLekUpliftModifierPre(
winterHabitatPre,
LekPresenceRaster
)
breedingHabitatPre = cohqt.calcBreedingHabitatGRSG(
Current_Anthro_Disturbance,
ConiferModifier,
GrSG_LDI,
Lek_Distance_Modifier,
GrSG_Habitat
)
LSDMBreedingPre = cohqt.applyLekUpliftModifierPre(
breedingHabitatPre,
LekPresenceRaster
)
summerHabitatPre = cohqt.calcSummerHabitatGRSG(
Current_Anthro_Disturbance,
ConiferModifier,
GrSG_LDI,
SageModifier,
GrSG_Habitat
)
LSDMSummerPre = cohqt.applyLekUpliftModifierPre(
summerHabitatPre,
LekPresenceRaster
)
seasonalHabitatRasters = [LSDMWinterPre, LSDMBreedingPre, LSDMSummerPre]
# Save outputs
# winterHabitatPre.save(GRSG_PRE_WINTER)
LSDMWinterPre.save(GRSG_PRE_WINTER)
# breedingHabitatPre.save(GRSG_PRE_BREEDING)
LSDMBreedingPre.save(GRSG_PRE_BREEDING)
# summerHabitatPre.save(GRSG_PRE_SUMMER)
LSDMSummerPre.save(GRSG_PRE_SUMMER)
# Initialize list of uplift rasters to combine for LekUpliftModifier
upliftRasters = []
if arcpy.Exists(CONIFER_TREATMENT_AREA):
# Calculate post-project conifer modifier
Conifer_Cover = cheStandard.ConiferCover
coniferModifierPost = cohqt.calcConiferPost(
CONIFER_TREATMENT_AREA, Conifer_Cover
)
coniferModifierPost.save(POST_CONIFER_MODIFIER)
# Calculate uplift from conifer removal
coniferUplift = cohqt.calcUplift(ConiferModifier, coniferModifierPost)
upliftRasters.append(coniferUplift)
else:
coniferModifierPost = ConiferModifier
if arcpy.Exists(PROPOSED_MODIFIED_FEATURES):
# Prepare proposed anthropogenic features
unique_proposed_subtypes = cohqt.convertProposedToRasterCredit(
PROPOSED_MODIFIED_FEATURES, cellSize
)
anthroPath = cheStandard.AnthroFeaturePath
cohqt.combineProposedWithCurrentCredit(anthroPath, unique_proposed_subtypes)
# Update message
arcpy.AddMessage("Calculating post-project anthropogenic "
"disturbance modifier for greater sage-grouse")
# Calculate post-project anthropogenic disturbance
term = cheStandard.CreditTerms[1]
anthro_disturbance_type = "Post"
Projected_Anthro_Disturbance = cohqt.CalcAnthroDisturbance(
Parameter_Values, term, unique_proposed_subtypes,
anthro_disturbance_type, cheStandard, dist_field, weight_field,
cellSize, emptyRaster
)
Projected_Anthro_Disturbance.save(PROJECTED_ANTHRO_DISTURBANCE)
# Update message
arcpy.AddMessage("Projected_Anthro_Disturbance Calculated")
# Calculate uplift from anthro feature removal
anthroUplift = cohqt.calcUplift(Current_Anthro_Disturbance,
Projected_Anthro_Disturbance)
upliftRasters.append(anthroUplift)
# Update message
arcpy.AddMessage("Merging | |
3, 1/2, 1/3],
feature_maps=None,
categories_arr=None,
img_size=None):
self.aspect_ratios = aspect_ratios
self.feature_maps = feature_maps
self.categories_arr = categories_arr
self.num_categories = len(self.categories_arr)
self.img_size = img_size
self.num_priors = compute_num_priors(aspect_ratios)
self.categories_index = {}
# Creacion de indices de las categorias
for i in range(len(self.categories_arr)):
self.categories_index[self.categories_arr[i]] = i
# Procesa un batch de imagenes del data set de coco para convertirlos a training data
# Argumentos:
# path_to_tfrecord: string al dataset de coco en formato tfrecord
def preprocess_tfrecord_coco(self, path_to_tfrecord, res_path):
total_fmaps = len(self.feature_maps)
dataset_tfrecord_coco = tfrecord_coco.parse_dataset(path_to_tfrecord)
def gen_match(img_cats, img_locs, debugging=False, debug_image=None):
y_true = None
num_bboxes = locs.get_shape().as_list()[0]
num_matches = 0
if debugging:
for loc in locs:
draw_bbox(img=debug_image, bbox=loc)
for f in range(total_fmaps):
m = self.feature_maps[f][0]
priors = PriorsBoxes(features=m, num_fmap=f+1, total_fmaps=total_fmaps,
aspect_ratios=self.aspect_ratios, img_size=self.img_size)
feature_y = np.zeros((m, m, self.num_priors, 1 + self.num_categories + 4))
for i in range(m):
for j in range(m):
for p in range(self.num_priors):
prior = priors[i][j][p]
prior = bbox_center_to_rect(prior)
for b in range(num_bboxes):
iou = intersection_over_union(prior, img_locs[b])
if iou > 0.5:
num_matches += 1
match = tf.ones([1, 1])
# Se obtiene la categoria y se convierte a one hot
cat = img_cats[b].numpy().decode("UTF-8")
cat_one_hot = [self.categories_index[cat]]
cat_one_hot = tf.one_hot(cat_one_hot, self.num_categories)
# se calcula la diferencia del prior al ground truth
prior = tbbox_rect_to_center(prior)
loc = tbbox_rect_to_center(img_locs[b])
diff = tf.cast(tf.abs(prior - loc),
tf.float32)
diff = tf.expand_dims(diff, 0)
match_y = tf.concat([match, cat_one_hot, diff], -1)
feature_y[i][j][p] = match_y
if debugging:
draw_bbox(img=debug_image, bbox=prior, color=(255, 0, 0))
feature_y = tf.convert_to_tensor(feature_y)
if f == 0:
y_true = tf.identity(tf.reshape(feature_y, [m*m, self.num_priors, 1 +
self.num_categories + 4]))
else:
feature_y = tf.reshape(feature_y, [m*m, self.num_priors, 1 +
self.num_categories + 4])
y_true = tf.concat([y_true, tf.identity(feature_y)], 0)
if num_matches > 0:
y_true = tf.cast(y_true, tf.float32)
if num_matches == 0:
return None
return y_true
it = iter(dataset_tfrecord_coco)
writer = tf.io.TFRecordWriter(res_path)
def write_img_to_file(x_data, y_data):
x_data = tf.cast(x_data, tf.float32)
data = {
"x": bytes_feature(tf.io.serialize_tensor(x_data)),
"y": bytes_feature(tf.io.serialize_tensor(y_data))
}
example = tf.train.Example(features=tf.train.Features(feature=data))
writer.write(example.SerializeToString())
i = 0
for img_data in dataset_tfrecord_coco.take(1):
print("Processing image {}".format(i+1))
i += 1
# Decodificacion de imagen
image_string = np.frombuffer(img_data["img/str"].numpy(), np.uint8)
decoded_image = cv2.imdecode(image_string, cv2.IMREAD_COLOR)
# tamaños original de la imagen
y_, x_ = decoded_image.shape[0], decoded_image.shape[1]
# rescale de bbounding box
x_scalar = self.img_size / x_
y_scalar = self.img_size / y_
# Decodificacion de anotaciones
cats, locs = self.decode_bboxes(img_data["img/bboxes/category"],
img_data["img/bboxes/x"], img_data["img/bboxes/y"],
img_data["img/bboxes/width"],
img_data["img/bboxes/height"], x_scalar, y_scalar)
# Crea mask de los indices correctos
mask = self.mask_indices(img_data["img/bboxes/category"])
# Aplica mask
cats = tf.boolean_mask(cats, mask)
locs = tf.boolean_mask(locs, mask)
# Crea un patch para data augmentation
aug_image, locs, cats = ssd_sample_patch(decoded_image, locs, cats)
locs_cp = locs.copy()
# resize de la imagen y la convierte a un tensor
resized_img = cv2.resize(aug_image, (self.img_size, self.img_size))
aug_image_cp = resized_img.copy()
image_tensor = tf.convert_to_tensor(resized_img)
image_tensor /= 255 # normaliza entre 0-1
locs = tf.convert_to_tensor(locs)
cats = tf.convert_to_tensor(cats)
y = gen_match(cats, locs, debugging=True, debug_image=resized_img)
cv2.imshow("matching strategy", resized_img)
cv2.waitKey(0)
if y != None:
write_img_to_file(image_tensor, y)
# obtiene la imagen y localizaciones expandidas
ex_img, ex_locs = ssd_expand_image(aug_image_cp, locs_cp)
ex_img = cv2.resize(ex_img, (self.img_size, self.img_size))
image_tensor = tf.convert_to_tensor(ex_img)
image_tensor /= 255 # normaliza entre 0-1
ex_locs = tf.convert_to_tensor(ex_locs)
ex_y = gen_match(cats, ex_locs, debugging=True, debug_image=ex_img)
print("Expanded image")
cv2.imshow("matching strategy expanded", ex_img)
cv2.waitKey(0)
if ex_y != None:
write_img_to_file(image_tensor, ex_y)
writer.close()
# proces y cambia el formato de las annotacions de las images de tfrecord
# Args:
# cats: sparse tensor de strings con las categorias
# x: sparse tensor con las coordenadas x del bbox
# y: sparse tensor con las coordenadas y del bbox
# width: sparse tensor con en ancho del bbox
# height: sparse tensor con la altura del bbox
# x_scalar: scalar horizontal que se le aplica al bbox por el resize de la img
# y_scalar: scalar vertical que se le aplica al bbox por el resize de la img
def decode_bboxes(self, cats, x, y, width, height, x_scalar, y_scalar):
cats_tensor = []
loc_tensor = []
for i in cats.indices:
cat = cats.values[i[0]].numpy().decode("UTF-8")
_x = x.values[i[0]].numpy() * x_scalar
_y = y.values[i[0]].numpy() * y_scalar
_w = width.values[i[0]].numpy() * x_scalar
_h = height.values[i[0]].numpy() * y_scalar
cats_tensor.append(cat)
loc_tensor.append([_x, _y, _w, _h])
return tf.convert_to_tensor(cats_tensor), tf.convert_to_tensor(loc_tensor)
# Funcion que regresa un mask booleano de los bbox que se van usar para el
# modelo, segun las categirias a clasificar
# Args:
# sparse_tensor: sparse tensor con las cadenas de las categorias
def mask_indices(self, sparse_tensor):
indices = sparse_tensor.indices
mask = []
for i in indices:
index = i.numpy()[0]
cat = sparse_tensor.values[index]
cat = cat.numpy().decode("UTF-8")
mask.append(cat in self.categories_arr)
return mask
# Metodo que carga dataset ya preprocesado de un tfrecord
# Args:
# path_to_tfrecord: string con el path al archivo tfrecord
# Returns
# tensor_data: tensorflow Dataset object.
def SSD_load_dataset(path_to_tfrecord):
raw_data = tf.data.TFRecordDataset(path_to_tfrecord)
format_ = {
"x": tf.io.FixedLenFeature([],
tf.string),
"y": tf.io.FixedLenFeature([], tf.string)
}
def _parse_function(example):
return tf.io.parse_single_example(example, format_)
data = raw_data.map(_parse_function)
def _parse_tensors(example):
x = tf.io.parse_tensor(example["x"], tf.float32)
y_true = tf.io.parse_tensor(example["y"], tf.float32)
return x, y_true
tensor_data = data.map(_parse_tensors)
return tensor_data
#
# Metodos para SSD data augmentation
# Metodo que calcula IOU entre 2 batchs de bboxes
# Args:
# boxA: tensor of shape [?, 4] (x, y, w, h)
# boxB: tensor of shape [?, 4] (x, y, w, h)
def iou_batch(_boxA, _boxB):
boxA = np.copy(_boxA)
boxB = np.copy(_boxB)
# Convierte a (x, y, w+x, h+y)
boxA[:, 2:] = boxA[:, :2] + _boxA[:, 2:]
boxB[:, 2:] = boxB[:, :2] + _boxB[:, 2:]
# Calcula la interseccion
xA = tf.math.maximum(boxA[:, 0], boxB[:, 0])
yA = tf.math.maximum(boxA[:, 1], boxB[:, 1])
xB = tf.math.minimum(boxA[:, 2], boxB[:, 2])
yB = tf.math.minimum(boxA[:, 3], boxB[:, 3])
interArea = tf.math.maximum(0, xB - xA + 1) * tf.math.maximum(0, yB - yA + 1)
boxAArea = (boxA[:,2] - boxA[:,0] + 1) * (boxA[:,3] - boxA[:,1] + 1)
boxBArea = (boxB[:,2] - boxB[:,0] + 1) * (boxB[:,3] - boxB[:,1] + 1)
iou = interArea / (boxAArea + boxBArea - interArea)
return iou
# Metodo que genera un patch a la imagen, segun paper de ssd
# Args:
# image: tensor of shape (height, width, 3)
# locs: tensor con localizacion de bbox de shape [?, 4]
# cats: tensor con las categorias de los bbox, de shape [?]
# Returns:
# igual que args pero con el patch aplicado a la imagen
def ssd_sample_patch(image, locs, cats):
image = np.array(image)
locs = np.array(locs)
cats = np.array(cats)
sample_options = (
# Original input image
None,
# patch con min iou .1, .3, .7, .9
(0.1, None),
(0.3, None),
(0.7, None),
(0.9, None),
# random patch
(None, None)
)
height, width, _ = image.shape
while(True):
# Escoger un modo de forma aleatoria
mode = random.choice(sample_options)
if mode is None:
return image, locs, cats
min_iou, max_iou = mode
if min_iou is None:
min_iou = float("-inf")
if max_iou is None:
max_iou = float("inf")
# Maximos intentos (50)
for _ in range(50):
current_image = image
w = random.uniform(0.3*width, width)
h = random.uniform(0.3*height, height)
# aspcect ratio esta entre .5 y 2
if h / w < 0.5 or h / w > 2:
continue
left = random.uniform(0, width - w)
top = random.uniform(0, height - h)
# convert to rect
rect = np.array([int(left), int(top), int(w), int(h)])
# calcular iou
overlap = iou_batch(locs, np.array([rect]))
overlap = np.array(overlap)
# si se satisface las restricciones del iou
if overlap.min() < min_iou and max_iou > overlap.max():
continue
# Obtiene crop de la imagen
current_image = current_image[rect[1]:rect[1]+rect[3],
rect[0]:rect[0]+rect[2], :]
centers = locs[:, :2] + (locs[:, 2:] / 2.0)
# mask locs
m1 = (rect[0] < centers[:, 0]) * (rect[1] < centers[:, 1])
m2 = (rect[0]+rect[2] > centers[:, 0]) * (rect[1]+rect[3] > centers[:, 1])
mask = m1 * m2
# si se tiene boxes validas
if not mask.any():
continue
# aplica mask a bboxes y cats
current_locs = locs[mask, :].copy()
current_cats = cats[mask].copy()
# cambia dataformat para corregir coordenadas de bboxes
rect[2:] = rect[:2] + rect[2:]
current_locs[:, 2:] = current_locs[:,:2] + current_locs[:, 2:]
# should we use the box left and top corner or the crop's
current_locs[:, :2] = np.maximum(current_locs[:, :2], rect[:2])
# adjust to crop (by substracting crop's left,top)
current_locs[:, :2] -= rect[:2]
current_locs[:, 2:] = np.minimum(current_locs[:, 2:], rect[2:])
# adjust to | |
will
only replay all the events, meta data and market data sources will
need to be reloaded as well. An example input file would look like
TRADE|{"timestamp": "2016-12-01 10:00:00", "ccy": "USD", "commission": 2.5, "instrument": "CLZ16", "price": 53.46, "quantity": 100, "multiplier": 1}
TRADE|{"timestamp": "2016-12-02 10:00:00", "ccy": "USD", "commission": 2.5, "instrument": "CLZ16", "price": 55.32, "quantity": 100, "multiplier": 1}
Parameters
----------
fp: str
path to read log from
""" # NOQA
events = self._create_log_events(fp)
self.dispatch_events(events)
@staticmethod
def _create_log_events(fp):
events = []
with open(fp, 'r') as thefile:
for line in thefile:
events.append(_Event.fromstring(line))
return events
def write_meta(self, fp):
"""
Write meta data of associated with instruments in a Blotter to a file.
This can be used later to reconstitute a Blotter. An example output
file file is
{"ccy": "CAD", "margin": 0.1, "multiplier": 100, "commission": 2.5, "isFX": false}|{"CL": ["CLU16", "CLZ16"]}
{"ccy": "CAD", "margin": 0, "multiplier": 1, "commission": 2.5, "isFX": true}|{"USDCAD": ["USDCAD"]}
Parameters
----------
fp: str
path to write meta data
""" # NOQA
# https://stackoverflow.com/questions/483666/python-reverse-invert-a-mapping#485368 # NOQA
inv_map = {}
for k, v in self._instr_map.items():
inv_map[v] = inv_map.get(v, [])
inv_map[v].append(k)
for key in inv_map:
inv_map[key].sort()
keys = list(self._gnrc_meta.keys())
keys.sort()
with open(fp, 'w') as myfile:
for key in keys:
meta_data_str = json.dumps(self._gnrc_meta[key]._asdict())
map_str = '{"' + str(key) + '": ' + json.dumps(inv_map[key]) + '}' # NOQA
line = meta_data_str + "|" + map_str + "\n"
myfile.write(line)
def read_meta(self, fp):
"""
Reconstitute the meta data of a Blotter from a file. Reads as input
files output by write_meta(). File formats should be of the following
form
Parameters
----------
fp: str
Path to file. File should have the following format
{"ccy": "CAD", "margin": 0.1, "multiplier": 100, "commission": 2.5,"isFX": false}|{"CL": ["CLU16", "CLZ16"]}
{"ccy": "CAD", "margin": 0, "multiplier": 1, "commission": 2.5, "isFX": true}|{"USDCAD": ["USDCAD"]}
...
""" # NOQA
with open(fp, 'r') as thefile:
for line in thefile:
meta, mapping = line.split("|")
meta_dict = json.loads(meta)
mapping_dict = json.loads(mapping)
generic = list(mapping_dict.keys())[0]
meta_dict['generic'] = generic
self.define_generic(**meta_dict)
instrs = mapping_dict[generic]
for instr in instrs:
self.map_instrument(generic, instr)
class Holdings():
"""
The Holdings class is designed to manage holdings data and PnL data. The
class stores instrument level holdings data on a per currency basis and
calculates PnL on a per currency basis given instrument prices. The class
is primarily designed to manage these aspects from within the context
of the Blotter class however can also provide this functionality stand
alone.
The main features of the Holdings class include:
- Store per currency per instrument holindgs
- Calculate per currency per instrument PnL
- Maintain interest payable cash balances per currency
- Maintain charged/payed interest per currency
- Provide functionality to sweep PnL from one currency to another
- Return historical holdings
- Return historical PnL
Calculating PnL is done on a as of current holdings basis, there is no
functionality for looking up historical holdings for calculating historic
PnL.
Note: For interest bearing instruments, when users are using the Holdings
class standalone, users are responsible for calling charge_interest() at
appropriate intervals and with appropriate interest rates to ensure that
the PnL calculations are correct. This is handled by the Blotter class.
All actions on the Holdings class must follow in time sequential order.
"""
def __init__(self):
self._position_data_per_ccy = {}
self._cash = {}
self._interest = {}
self._pnl_sweep = {}
self._pnl_data = {}
self._timestamp = pd.NaT
@staticmethod
def _make_empty_holding():
holding = namedtuple('holding', ['timestamp', 'trade', 'position',
'avg_pos_price', 'fees',
'avg_sell_price', 'total_sell',
'avg_buy_price', 'total_buy'])
return holding(array('d'), array('d'), array('d'), array('d'),
array('d'), array('d'), array('d'), array('d'),
array('d'))
@staticmethod
def _make_empty_qty():
cash = namedtuple('cash', ['timestamp', 'amount'])
return cash(array('d'), array('d'))
@staticmethod
def _make_empty_hist_pnl():
pnl_hist = namedtuple('hist_pnl', ['time', 'pnl'])
return pnl_hist([], [])
@property
def timestamp(self):
"""
Returns the current timestamp of the Holdings
"""
return self._timestamp
def get_holdings(self):
"""
Get the current amount of instrument holdings. This includes any
multiplier associated with the instrument.
Returns
-------
dictionary
Dictionary with currencies as keys and pandas.Series as values
where that Series contain the most recent holdings for each of the
holdings in a given currency
"""
pos_data = self._position_data_per_ccy
positions = dict()
for ccy in pos_data:
ccy_pos_data = pos_data[ccy]
idx = list(ccy_pos_data)
idx.sort()
h = pd.Series(index=idx)
for asst in ccy_pos_data:
h.loc[asst] = ccy_pos_data[asst].position[-1]
# filter closed positions
h = h.loc[h != 0]
if not h.empty:
positions[ccy] = h
return positions
def get_holdings_history(self):
"""
Get the full history of the amount of holdings for each instrument
traded (this includes any multiplier associated with the instrument).
Returns
-------
dictionary
Dictionary with currencies as keys and dictionary of pandas.Series
as values where the keys of the nested dictionary are instrument
names and the pandas.Series is a timeseries of holdings
"""
pos_data = self._position_data_per_ccy
positions = dict()
for ccy in pos_data:
ccy_pos_data = pos_data[ccy]
ccy_positions = dict()
for asst in ccy_pos_data:
pos_array = ccy_pos_data[asst]
ts = self._to_timestamp(pos_array.timestamp)
pos = pd.Series(pos_array.position, index=ts, copy=True)
ccy_positions[asst] = pos
positions[ccy] = ccy_positions
return positions
@staticmethod
def _to_timestamp(array):
# convert array of floats representing POSIX timestamps to a
# list of Timestamps
return [pd.Timestamp.fromtimestamp(i) for i in array]
def get_assets(self):
"""
Get the names of instruments held.
Returns
-------
list
Sorted list of strings of current assets which have holdings
"""
pos_data = self._position_data_per_ccy
asts = []
for ccy in pos_data:
ccy_pos_data = pos_data[ccy]
for asst in ccy_pos_data:
if ccy_pos_data[asst].position[-1] != 0:
asts.append(asst)
asts.sort()
return asts
def record_trade(self, timestamp, instrument, price, quantity, multiplier,
commission, ccy):
"""
Record an instrument trade in Holdings. Trades must be time ordered.
Parameters
----------
timestamp: pandas.Timestamp
Time of trade
instrument: str
Tradeable instrument name
price: float
Price of trade
quantity: int
Number of instruments traded.
multiplier: int
A number which when multiplied by the price gives the notional
value of a contract. E.g. for trading an ES contract,
the multipler is 50, therefore 1 ES contract with a price of 2081
the notional value of the contract is 2081 x 50$.
commission: float
total commission for the trade
ccy: str
currency of instrument denomination
"""
if quantity == 0:
raise ValueError("Cannot trade 0 quantity of an instrument")
if np.isnan(quantity):
raise ValueError("Cannot trade nan quantity of an instrument")
if multiplier <= 0 or not isinstance(multiplier, int):
raise ValueError("multiplier must be positive integer")
if quantity > 0:
price_attr = "avg_buy_price"
total_attr = "total_buy"
elif quantity < 0:
price_attr = "avg_sell_price"
total_attr = "total_sell"
amount = quantity * multiplier
if ccy in self._position_data_per_ccy:
ccy_holdings = self._position_data_per_ccy[ccy]
else:
ccy_holdings = {}
self._position_data_per_ccy[ccy] = ccy_holdings
if instrument in ccy_holdings:
holdings = ccy_holdings[instrument]
else:
holdings = self._make_empty_holding()
ccy_holdings[instrument] = holdings
# deals with first access being non existent
prev_hldings = self._get_last(holdings, 'position')
avg_price = self._get_last(holdings, price_attr)
total = self._get_last(holdings, total_attr)
if self._timestamp > timestamp:
raise ValueError('Operations on Holdings must follow in time'
' sequential order')
holdings.timestamp.append(timestamp.timestamp())
holdings.position.append(prev_hldings + amount)
holdings.trade.append(amount)
self._timestamp = timestamp
fees = self._get_last(holdings, "fees", default=0)
holdings.fees.append(commission + fees)
aamnt = np.abs(amount)
new_price = (total * avg_price + aamnt * price) / (total + aamnt)
getattr(holdings, price_attr).append(new_price)
getattr(holdings, total_attr).append(total + aamnt)
# when adding to position or flipping position sign update
# average price
ADDING = np.sign(amount) == np.sign(prev_hldings)
NEW_POS = np.sign(amount + prev_hldings) not in {np.sign(prev_hldings), 0} # NOQA
if ADDING:
a_price = holdings.avg_pos_price[-1]
new_pos_price = (a_price * prev_hldings + price * amount) / (prev_hldings + amount) # NOQA
holdings.avg_pos_price.append(new_pos_price)
elif NEW_POS:
holdings.avg_pos_price.append(price)
else:
holdings.avg_pos_price.append(holdings.avg_pos_price[-1])
def _get_last(self, obj, attr, default=0):
try:
value = getattr(obj, attr)[-1]
except IndexError:
value = default
return value
def update_cash(self, timestamp, ccy, quantity):
"""
Update the amount of cash in a certain type of currency, used for
charging interest on that balance.
Parameters
----------
timestamp: pandas.Timestamp
Time of trade
ccy: str
currency of cash balance
quantity: float
Amount of cash
"""
self._update_property(timestamp, ccy, quantity, '_cash')
def charge_interest(self, timestamp, ccy, quantity):
"""
Update the amount of interest charged in the account of a currency.
Parameters
----------
timestamp: pandas.Timestamp
Time of trade
ccy: str
currency of interest charge/payment
quantity: float
Amount of interest
"""
| |
/ 2.0,
65909: 1.0 / 2.0,
65910: 1.0 / 2.0,
65911: 2.0 / 3.0,
65912: 3.0 / 4.0,
69243: 1.0 / 2.0,
69244: 1.0 / 4.0,
69245: 1.0 / 3.0,
69246: 2.0 / 3.0,
69714: 1.0,
69715: 2.0,
69716: 3.0,
69717: 4.0,
69718: 5.0,
69719: 6.0,
69720: 7.0,
69721: 8.0,
69722: 9.0,
69723: 10.0,
69724: 20.0,
69725: 30.0,
69726: 40.0,
69727: 50.0,
69728: 60.0,
69729: 70.0,
69730: 80.0,
69731: 90.0,
69732: 100.0,
69733: 1000.0,
69734: 0.0,
69735: 1.0,
69736: 2.0,
69737: 3.0,
69738: 4.0,
69739: 5.0,
69740: 6.0,
69741: 7.0,
69742: 8.0,
69743: 9.0,
69872: 0.0,
69873: 1.0,
69874: 2.0,
69875: 3.0,
69876: 4.0,
69877: 5.0,
69878: 6.0,
69879: 7.0,
69880: 8.0,
69881: 9.0,
69942: 0.0,
69943: 1.0,
69944: 2.0,
69945: 3.0,
69946: 4.0,
69947: 5.0,
69948: 6.0,
69949: 7.0,
69950: 8.0,
69951: 9.0,
70096: 0.0,
70097: 1.0,
70098: 2.0,
70099: 3.0,
70100: 4.0,
70101: 5.0,
70102: 6.0,
70103: 7.0,
70104: 8.0,
70105: 9.0,
71360: 0.0,
71361: 1.0,
71362: 2.0,
71363: 3.0,
71364: 4.0,
71365: 5.0,
71366: 6.0,
71367: 7.0,
71368: 8.0,
71369: 9.0,
74802: 216000.0,
74803: 432000.0,
74838: -1.0,
74839: -1.0,
74842: 1.0 / 3.0,
74843: 2.0 / 3.0,
74844: 5.0 / 6.0,
74845: 1.0 / 3.0,
74846: 2.0 / 3.0,
74847: 1.0 / 8.0,
74848: 1.0 / 4.0,
74849: 1.0 / 6.0,
74850: 1.0 / 4.0,
}
_numeric_corrected = {
}
def decimal(code):
try:
return _decimal[code]
except KeyError:
if base_mod is not None and code not in _decimal_corrected:
return base_mod._decimal[code]
else:
raise
def digit(code):
try:
return _digit[code]
except KeyError:
if base_mod is not None and code not in _digit_corrected:
return base_mod._digit[code]
else:
raise
def numeric(code):
try:
return _numeric[code]
except KeyError:
if base_mod is not None and code not in _numeric_corrected:
return base_mod._numeric[code]
else:
raise
_toupper = {
613: 42893,
614: 42922,
1319: 1318,
11507: 11506,
11559: 4295,
11565: 4301,
42593: 42592,
42897: 42896,
42899: 42898,
42913: 42912,
42915: 42914,
42917: 42916,
42919: 42918,
42921: 42920,
}
_toupper_corrected = {
}
_tolower = {
1318: 1319,
4295: 11559,
4301: 11565,
11506: 11507,
42592: 42593,
42893: 613,
42896: 42897,
42898: 42899,
42912: 42913,
42914: 42915,
42916: 42917,
42918: 42919,
42920: 42921,
42922: 614,
}
_tolower_corrected = {
}
_totitle = {
613: 42893,
614: 42922,
1319: 1318,
11507: 11506,
11559: 4295,
11565: 4301,
42593: 42592,
42897: 42896,
42899: 42898,
42913: 42912,
42915: 42914,
42917: 42916,
42919: 42918,
42921: 42920,
}
_totitle_corrected = {
}
_special_casing = {
223: ([223], [83, 115], [83, 83]),
304: ([105, 775], [304], [304]),
329: ([329], [700, 78], [700, 78]),
496: ([496], [74, 780], [74, 780]),
912: ([912], [921, 776, 769], [921, 776, 769]),
944: ([944], [933, 776, 769], [933, 776, 769]),
1415: ([1415], [1333, 1410], [1333, 1362]),
7830: ([7830], [72, 817], [72, 817]),
7831: ([7831], [84, 776], [84, 776]),
7832: ([7832], [87, 778], [87, 778]),
7833: ([7833], [89, 778], [89, 778]),
7834: ([7834], [65, 702], [65, 702]),
8016: ([8016], [933, 787], [933, 787]),
8018: ([8018], [933, 787, 768], [933, 787, 768]),
8020: ([8020], [933, 787, 769], [933, 787, 769]),
8022: ([8022], [933, 787, 834], [933, 787, 834]),
8064: ([8064], [8072], [7944, 921]),
8065: ([8065], [8073], [7945, 921]),
8066: ([8066], [8074], [7946, 921]),
8067: ([8067], [8075], [7947, 921]),
8068: ([8068], [8076], [7948, 921]),
8069: ([8069], [8077], [7949, 921]),
8070: ([8070], [8078], [7950, 921]),
8071: ([8071], [8079], [7951, 921]),
8072: ([8064], [8072], [7944, 921]),
8073: ([8065], [8073], [7945, 921]),
8074: ([8066], [8074], [7946, 921]),
8075: ([8067], [8075], [7947, 921]),
8076: ([8068], [8076], [7948, 921]),
8077: ([8069], [8077], [7949, 921]),
8078: ([8070], [8078], [7950, 921]),
8079: ([8071], [8079], [7951, 921]),
8080: ([8080], [8088], [7976, 921]),
8081: ([8081], [8089], [7977, 921]),
8082: ([8082], [8090], [7978, 921]),
8083: ([8083], [8091], [7979, 921]),
8084: ([8084], [8092], [7980, 921]),
8085: ([8085], [8093], [7981, 921]),
8086: ([8086], [8094], [7982, 921]),
8087: ([8087], [8095], [7983, 921]),
8088: ([8080], [8088], [7976, 921]),
8089: ([8081], [8089], [7977, 921]),
8090: ([8082], [8090], [7978, 921]),
8091: ([8083], [8091], [7979, 921]),
8092: ([8084], [8092], [7980, 921]),
8093: ([8085], [8093], [7981, 921]),
8094: ([8086], [8094], [7982, 921]),
8095: ([8087], [8095], [7983, 921]),
8096: ([8096], [8104], [8040, 921]),
8097: ([8097], [8105], [8041, 921]),
8098: ([8098], [8106], [8042, 921]),
8099: ([8099], [8107], [8043, 921]),
8100: ([8100], [8108], [8044, 921]),
8101: ([8101], [8109], [8045, 921]),
8102: ([8102], [8110], [8046, 921]),
8103: ([8103], [8111], [8047, 921]),
8104: ([8096], [8104], [8040, 921]),
8105: ([8097], [8105], [8041, 921]),
8106: ([8098], [8106], [8042, 921]),
8107: ([8099], [8107], [8043, 921]),
8108: ([8100], [8108], [8044, 921]),
8109: ([8101], [8109], [8045, 921]),
8110: ([8102], [8110], [8046, 921]),
8111: ([8103], [8111], [8047, 921]),
8114: ([8114], [8122, 837], [8122, 921]),
8115: ([8115], [8124], [913, 921]),
8116: ([8116], [902, 837], [902, 921]),
8118: ([8118], [913, 834], [913, 834]),
8119: ([8119], [913, 834, 837], [913, 834, 921]),
8124: ([8115], [8124], [913, 921]),
8130: ([8130], [8138, 837], [8138, 921]),
8131: ([8131], [8140], [919, 921]),
8132: ([8132], [905, 837], [905, 921]),
8134: ([8134], [919, 834], [919, 834]),
8135: ([8135], [919, 834, 837], [919, 834, 921]),
8140: ([8131], [8140], [919, 921]),
8146: ([8146], [921, 776, 768], [921, 776, 768]),
8147: ([8147], [921, 776, 769], [921, 776, 769]),
8150: ([8150], [921, 834], [921, 834]),
8151: ([8151], [921, 776, 834], [921, 776, 834]),
8162: ([8162], [933, 776, 768], [933, 776, 768]),
8163: ([8163], [933, 776, 769], [933, 776, 769]),
8164: ([8164], [929, 787], [929, 787]),
8166: ([8166], [933, 834], [933, 834]),
8167: ([8167], [933, 776, 834], [933, 776, 834]),
8178: ([8178], [8186, 837], [8186, 921]),
8179: ([8179], [8188], [937, 921]),
8180: ([8180], [911, 837], [911, 921]),
8182: ([8182], [937, 834], [937, 834]),
8183: ([8183], [937, 834, 837], [937, 834, 921]),
8188: ([8179], [8188], [937, 921]),
64256: ([64256], [70, 102], [70, 70]),
64257: ([64257], [70, 105], [70, 73]),
64258: ([64258], [70, 108], [70, 76]),
64259: ([64259], [70, 102, 105], [70, 70, 73]),
64260: ([64260], [70, 102, 108], [70, 70, 76]),
64261: ([64261], [83, 116], [83, 84]),
64262: ([64262], [83, 116], [83, 84]),
64275: ([64275], [1348, 1398], [1348, 1350]),
64276: ([64276], [1348, 1381], [1348, 1333]),
64277: ([64277], [1348, 1387], [1348, 1339]),
64278: ([64278], [1358, 1398], [1358, 1350]),
64279: ([64279], [1348, 1389], [1348, 1341]),
}
_special_casing_corrected = {
}
def toupper(code):
try:
return _toupper[code]
except KeyError:
if base_mod is not None and code not in _toupper_corrected:
return base_mod._toupper.get(code, code)
else:
return code
def tolower(code):
try:
return _tolower[code]
except KeyError:
if base_mod is not None and code not in _tolower_corrected:
return base_mod._tolower.get(code, code)
else:
return code
def totitle(code):
try:
return _totitle[code]
except KeyError:
if base_mod is not None and code not in _totitle_corrected:
return base_mod._totitle.get(code, code)
else:
return code
def toupper_full(code):
try:
return _special_casing[code][2]
except KeyError:
if base_mod is not None and code not in _special_casing_corrected:
try:
return base_mod._special_casing[code][2]
except KeyError:
pass
return [toupper(code)]
def tolower_full(code):
try:
return _special_casing[code][0]
except KeyError:
if base_mod is not None and code not in _special_casing_corrected:
try:
return base_mod._special_casing[code][0]
except KeyError:
pass
return [tolower(code)]
def totitle_full(code):
try:
return _special_casing[code][1]
except KeyError:
if base_mod is not None and code not in _special_casing_corrected:
try:
return base_mod._special_casing[code][1]
except KeyError:
pass
return [totitle(code)]
_raw_decomposition = {
8341: '<sub> 0068',
8342: '<sub> 006B',
8343: '<sub> 006C',
8344: '<sub> 006D',
8345: '<sub> 006E',
8346: '<sub> 0070',
8347: '<sub> 0073',
8348: '<sub> 0074',
43000: '<super> 0126',
43001: '<super> 0153',
64046: '90DE',
64047: '96B7',
69934: '11131 11127',
69935: '11132 11127',
126464: '<font> 0627',
126465: '<font> 0628',
126466: '<font> 062C',
126467: '<font> 062F',
126469: '<font> 0648',
126470: '<font> 0632',
126471: '<font> 062D',
126472: '<font> 0637',
126473: '<font> 064A',
126474: '<font> 0643',
126475: '<font> 0644',
126476: '<font> 0645',
126477: '<font> 0646',
126478: '<font> 0633',
126479: '<font> 0639',
126480: '<font> 0641',
126481: '<font> 0635',
126482: '<font> 0642',
126483: '<font> 0631',
126484: '<font> 0634',
126485: '<font> 062A',
126486: '<font> 062B',
126487: '<font> 062E',
126488: '<font> 0630',
126489: '<font> 0636',
126490: '<font> 0638',
126491: '<font> 063A',
126492: '<font> 066E',
126493: '<font> 06BA',
126494: '<font> 06A1',
126495: '<font> 066F',
126497: '<font> 0628',
126498: '<font> 062C',
126500: '<font> 0647',
126503: '<font> 062D',
126505: '<font> 064A',
126506: '<font> 0643',
126507: '<font> 0644',
126508: '<font> 0645',
126509: '<font> 0646',
126510: '<font> 0633',
126511: '<font> 0639',
126512: '<font> 0641',
126513: '<font> 0635',
126514: '<font> 0642',
126516: '<font> 0634',
126517: '<font> 062A',
126518: '<font> 062B',
126519: '<font> 062E',
126521: '<font> 0636',
126523: '<font> 063A',
126530: '<font> 062C',
126535: '<font> 062D',
126537: '<font> 064A',
126539: '<font> 0644',
126541: '<font> 0646',
126542: '<font> 0633',
126543: '<font> 0639',
126545: '<font> 0635',
126546: '<font> 0642',
126548: '<font> 0634',
126551: '<font> 062E',
126553: '<font> 0636',
126555: '<font> 063A',
126557: '<font> 06BA',
126559: '<font> 066F',
126561: '<font> 0628',
126562: '<font> 062C',
126564: '<font> 0647',
126567: '<font> 062D',
126568: '<font> 0637',
126569: '<font> 064A',
126570: '<font> 0643',
126572: '<font> 0645',
126573: '<font> 0646',
126574: '<font> 0633',
126575: '<font> 0639',
126576: '<font> 0641',
126577: '<font> 0635',
126578: '<font> 0642',
126580: '<font> 0634',
126581: '<font> 062A',
126582: '<font> 062B',
126583: '<font> 062E',
126585: '<font> 0636',
126586: '<font> 0638',
126587: '<font> 063A',
126588: '<font> 066E',
126590: '<font> 06A1',
126592: '<font> 0627',
126593: '<font> 0628',
126594: '<font> 062C',
126595: '<font> 062F',
126596: '<font> 0647',
126597: '<font> 0648',
126598: '<font> 0632',
126599: '<font> 062D',
126600: '<font> 0637',
126601: '<font> 064A',
126603: '<font> 0644',
126604: '<font> 0645',
126605: '<font> 0646',
126606: '<font> 0633',
126607: '<font> 0639',
126608: '<font> 0641',
126609: '<font> 0635',
126610: '<font> 0642',
126611: '<font> 0631',
126612: '<font> 0634',
126613: '<font> 062A',
126614: '<font> 062B',
126615: '<font> 062E',
126616: '<font> 0630',
126617: '<font> 0636',
126618: '<font> 0638',
126619: '<font> 063A',
126625: '<font> 0628',
126626: '<font> 062C',
126627: '<font> 062F',
126629: '<font> 0648',
126630: '<font> 0632',
126631: '<font> 062D',
126632: '<font> 0637',
126633: '<font> 064A',
126635: '<font> 0644',
126636: '<font> 0645',
126637: '<font> 0646',
126638: '<font> 0633',
126639: '<font> 0639',
126640: '<font> 0641',
126641: '<font> 0635',
126642: '<font> 0642',
126643: '<font> 0631',
126644: '<font> 0634',
126645: '<font> 062A',
126646: '<font> 062B',
126647: '<font> 062E',
126648: '<font> 0630',
126649: '<font> 0636',
126650: '<font> 0638',
126651: '<font> 063A',
127280: '<square> 0041',
127282: '<square> 0043',
127283: '<square> 0044',
127284: '<square> 0045',
127285: '<square> 0046',
127286: '<square> 0047',
127287: '<square> 0048',
127288: '<square> 0049',
127289: '<square> 004A',
127290: '<square> 004B',
127291: '<square> 004C',
127292: '<square> 004D',
127294: '<square> 004F',
127296: '<square> 0051',
127297: '<square> 0052',
127299: '<square> 0054',
127300: '<square> 0055',
127301: '<square> 0056',
127303: '<square> 0058',
127304: '<square> 0059',
127305: '<square> 005A',
127311: '<square> 0057 0043',
127338: '<super> 004D 0043',
127339: '<super> 004D 0044',
127489: '<square> 30B3 30B3',
127490: '<square> 30B5',
127538: '<square> 7981',
127539: '<square> 7A7A',
127540: '<square> 5408',
127541: '<square> 6E80',
127542: '<square> 6709',
127543: '<square> 6708',
127544: '<square> 7533',
127545: '<square> 5272',
127546: '<square> 55B6',
127568: '<circle> 5F97',
127569: '<circle> 53EF',
}
_raw_decomposition_corrected = {
}
def decomposition(code):
try:
return _raw_decomposition[code]
except KeyError:
if base_mod is not None and code | |
import collections
import copy
import enum
import functools
import json
import re
import asdl
import attr
from seq2struct import ast_util
from seq2struct.utils import registry
class HoleType(enum.Enum):
ReplaceSelf = 1
AddChild = 2
class MissingValue:
pass
@attr.s
class SeqField:
type_name = attr.ib()
field = attr.ib()
@registry.register('grammar', 'idiom_ast')
class IdiomAstGrammar:
def __init__(self, base_grammar, template_file, root_type=None,
all_sections_rewritten=False):
self.base_grammar = registry.construct('grammar', base_grammar)
self.templates = json.load(open(template_file))
self.all_sections_rewritten = all_sections_rewritten
self.pointers = self.base_grammar.pointers
self.ast_wrapper = copy.deepcopy(self.base_grammar.ast_wrapper)
self.base_ast_wrapper = self.base_grammar.ast_wrapper
# TODO: Override root_type more intelligently
self.root_type = self.base_grammar.root_type
if base_grammar['name'] == 'python':
self.root_type = 'mod'
singular_types_with_single_seq_field = set(
name for name, type_info in self.ast_wrapper.singular_types.items()
if len(type_info.fields) == 1 and type_info.fields[0].seq)
seq_fields = {
'{}-{}'.format(name, field.name): SeqField(name, field)
for name, type_info in self.ast_wrapper.singular_types.items()
for field in type_info.fields
if field.seq
}
templates_by_head_type = collections.defaultdict(list)
for template in self.templates:
head_type = template['idiom'][0]
# head_type can be one of the following:
# 1. name of a constructor/product with a single seq field.
# 2. name of any other constructor/product
# 3. name of a seq field (e.g. 'Dict-keys'),
# when the containing constructor/product contains more than one field
# (not yet implemented)
# For 1 and 3, the template should be treated as a 'seq fragment'
# which can occur in any seq field of the corresponding sum/product type.
# However, the NL2Code model has no such notion currently.
if head_type in singular_types_with_single_seq_field:
# field.type could be sum type or product type, but not constructor
field = self.ast_wrapper.singular_types[head_type].fields[0]
templates_by_head_type[field.type].append((template, SeqField(head_type, field)))
templates_by_head_type[head_type].append((template, None))
elif head_type in seq_fields:
seq_field = seq_fields[head_type]
templates_by_head_type[seq_field.field.type].append((template, seq_field))
else:
templates_by_head_type[head_type].append((template, None))
types_to_replace = {}
for head_type, templates in templates_by_head_type.items():
constructors, seq_fragment_constructors = [], []
for template, seq_field in templates:
if seq_field:
if head_type in self.ast_wrapper.product_types:
seq_type = '{}_plus_templates'.format(head_type)
else:
seq_type = head_type
seq_fragment_constructors.append(
self._template_to_constructor(template, '_{}_seq'.format(seq_type), seq_field))
else:
constructors.append(self._template_to_constructor(template, '', seq_field))
# head type can be:
# constructor (member of sum type)
if head_type in self.ast_wrapper.constructors:
assert constructors
assert not seq_fragment_constructors
self.ast_wrapper.add_constructors_to_sum_type(
self.ast_wrapper.constructor_to_sum_type[head_type],
constructors)
# sum type
elif head_type in self.ast_wrapper.sum_types:
assert not constructors
assert seq_fragment_constructors
self.ast_wrapper.add_seq_fragment_type(head_type, seq_fragment_constructors)
# product type
elif head_type in self.ast_wrapper.product_types:
# Replace Product with Constructor
# - make a Constructor
orig_prod_type = self.ast_wrapper.product_types[head_type]
new_constructor_for_prod_type = asdl.Constructor(
name=head_type, fields=orig_prod_type.fields)
# - remove Product in ast_wrapper
self.ast_wrapper.remove_product_type(head_type)
# Define a new sum type
# Add the original product type and template as constructors
name = '{}_plus_templates'.format(head_type)
self.ast_wrapper.add_sum_type(
name,
asdl.Sum(types=constructors + [new_constructor_for_prod_type]))
# Add seq fragment constructors
self.ast_wrapper.add_seq_fragment_type(name, seq_fragment_constructors)
# Replace every occurrence of the product type in the grammar
types_to_replace[head_type] = name
# built-in type
elif head_type in self.ast_wrapper.primitive_types:
raise NotImplementedError(
'built-in type as head type of idiom unsupported: {}'.format(head_type))
# Define a new sum type
# Add the original built-in type and template as constructors
# Replace every occurrence of the product type in the grammar
else:
raise NotImplementedError('Unable to handle head type of idiom: {}'.format(head_type))
# Replace occurrences of product types which have been used as idiom head types
for constructor_or_product in self.ast_wrapper.singular_types.values():
for field in constructor_or_product.fields:
if field.type in types_to_replace:
field.type = types_to_replace[field.type]
self.templates_containing_placeholders = {}
for name, constructor in self.ast_wrapper.singular_types.items():
if not hasattr(constructor, 'template'):
continue
hole_values = {}
for field in constructor.fields:
hole_id = self.get_hole_id(field.name)
placeholder = ast_util.HoleValuePlaceholder(id=hole_id, is_seq=field.seq, is_opt=field.opt)
if field.seq:
hole_values[hole_id] = [placeholder]
else:
hole_values[hole_id] = placeholder
self.templates_containing_placeholders[name] = constructor.template(hole_values)
if root_type is not None:
if isinstance(root_type, (list, tuple)):
for choice in root_type:
if (choice in self.ast_wrapper.singular_types or
choice in self.ast_wrapper.sum_types):
self.root_type = choice
break
else:
self.root_type = root_type
def parse(self, code, section):
if self.all_sections_rewritten or section == 'train':
return self.convert_idiom_ast(code, template_id=None)()
else:
return self.base_grammar.parse(code, section)
def unparse(self, tree, item):
expanded_tree = self._expand_templates(tree)
self.base_ast_wrapper.verify_ast(expanded_tree)
return self.base_grammar.unparse(expanded_tree, item)
def tokenize_field_value(self, field_value):
return self.base_grammar.tokenize_field_value(field_value)
#
#
#
@classmethod
def get_hole_id(cls, field):
m = re.match('^hole(\d+)$', field)
if not m:
raise ValueError('Unexpected field name: {}'.format(field))
return int(m.group(1))
def _expand_templates(self, tree):
if not isinstance(tree, dict):
return tree
node_type = tree['_type']
constructor = self.ast_wrapper.constructors.get(node_type)
expanded_fields = {}
for field, value in tree.items():
if field == '_type':
continue
if isinstance(value, (list, tuple)):
result = []
for item in value:
converted = self._expand_templates(item)
if isinstance(item, dict) and re.match('^Template\d+_.*_seq$', item['_type']):
# TODO: Handle seq fragment fields here
item_type_info = self.ast_wrapper.constructors[converted['_type']]
assert len(item_type_info.fields) == 1
assert item_type_info.fields[0].seq
result += converted.get(item_type_info.fields[0].name, [])
else:
result.append(converted)
expanded_fields[field] = result
else:
expanded_fields[field] = self._expand_templates(value)
if constructor is None or not hasattr(constructor, 'template'):
return {'_type': node_type, **expanded_fields}
template = constructor.template
hole_values = {}
for field, expanded_value in expanded_fields.items():
hole_id = self.get_hole_id(field)
# Do something special if we have a seq fragment
hole_values[hole_id] = expanded_value
return template(hole_values)
def _template_to_constructor(self, template_dict, suffix, seq_field):
hole_node_types = {}
# Find where the holes occur
stack = [(None, template_dict['idiom'], None)]
while stack:
parent, node, child_index = stack.pop()
node_type, ref_symbols, hole_id, children = node
if hole_id is not None:
assert hole_id not in hole_node_types
# node_type could be:
# - name of field
# => hole type is same as field's type
# - name of type, if it only has one child
# - binarizer
hyphenated_node_type = None
unhyphenated_node_type = None
hole_type_str = template_dict['holes'][hole_id]['type']
if hole_type_str == 'AddChild':
node_type_for_field_type = node_type
elif hole_type_str == 'ReplaceSelf':
# Two types of ReplaceSelf
# 1. Node has a hyphen: should be a repeated field
# 2. Node lacks a hyphen, and
# 2a. node is same as parent: a repeated field
# 2b. node is not the same as parent: an elem
if '-' in node_type:
node_type_for_field_type = node_type
else:
node_type_for_field_type = parent[0]
#if '-' in parent_type:
# hyphenated_node_type = parent_type
#else:
# unhyphenated_node_type = parent_type
field_info = self._get_field_info_from_name(node_type_for_field_type)
# Check for situations like
# Call-args
# | \
# List[0] Call-args[1]
#
# Tuple
# | \
# Tuple[0] Tuple[1]
# where hole 0 should not be a sequence.
if field_info.seq and hole_type_str == 'ReplaceSelf' and '-' not in node_type:
assert child_index in (0, 1)
seq = child_index == 1
else:
seq = field_info.seq
hole_node_types[hole_id] = (field_info.type, seq, field_info.opt)
stack += [(node, child, i) for i, child in enumerate(children)]
# Create fields for the holes
fields = []
for hole in template_dict['holes']:
i = hole['id']
field_type, seq, opt = hole_node_types[i]
field = asdl.Field(type=field_type, name='hole{}'.format(i), seq=seq, opt=opt)
field.hole_type = HoleType[hole['type']]
fields.append(field)
constructor = asdl.Constructor('Template{}{}'.format(template_dict['id'], suffix), fields)
constructor.template = self.convert_idiom_ast(template_dict['idiom'], template_id=template_dict['id'], seq_field=seq_field)
return constructor
def _get_field_info_from_name(self, node_type):
if '-' in node_type:
type_name, field_name = node_type.split('-')
type_info = self.ast_wrapper.singular_types[type_name]
field_info, = [field for field in type_info.fields if field.name == field_name]
else:
type_info = self.ast_wrapper.singular_types[node_type]
assert len(type_info.fields) == 1
field_info = type_info.fields[0]
return field_info
@classmethod
def _node_type(cls, node):
if isinstance(node[0], dict):
if 'nt' in node[0]:
return node[0]['nt']
elif 'template_id' in node[0]:
return 'Template{}'.format(node[0]['template_id'])
else:
return node[0]
def convert_idiom_ast(self, idiom_ast, template_id=None, seq_fragment_type=None, seq_field=None):
if template_id is not None:
node_type, ref_symbols, hole, children = idiom_ast
else:
node_type, ref_symbols, children = idiom_ast
is_template_node = False
extra_types = []
if isinstance(node_type, dict):
if seq_fragment_type:
suffix = '_{}_seq'.format(seq_fragment_type)
else:
suffix = ''
if 'template_id' in node_type:
node_type = 'Template{}{}'.format(node_type['template_id'], suffix)
is_template_node = True
elif 'nt' in node_type and 'mt' in node_type:
extra_types = ['Template{}{}'.format(i, suffix) for i in node_type['mt']]
node_type = node_type['nt']
if seq_field is None:
field_infos = self.ast_wrapper.singular_types[node_type].fields
else:
field_infos = [seq_field.field]
# Each element of this list is a tuple (field, child)
# - field: asdl.Field object
# - child: an idiom_ast node
# If field.seq then child will be a binarizer node, or a template headed by a binarizer
# Otherwise, child will be a node whose type indicates the field's name (e.g. Call-func),
# and with a single child that contains the content of the field
children_to_convert = []
if is_template_node:
assert len(children) == len(field_infos)
for field, child in zip(field_infos, children):
if field.hole_type == HoleType.ReplaceSelf and field.seq:
children_to_convert.append((field, child))
else:
assert not field.seq
dummy_node = list(idiom_ast)
dummy_node[0] = '{}-{}'.format(node_type, field.name)
dummy_node[-1] = [child]
children_to_convert.append((field, dummy_node))
# else:
# raise ValueError('Unexpected hole_type: {}'.format(field.hole_type))
else:
fields_by_name = {f.name: f for f in field_infos}
if len(field_infos) == 0:
pass
| |
s = raw_input(prompt)
s = s.lower()
if s == 'quit':
return
elif s == 'f' or s in possible_positions:
if s != 'f':
cond = 1 #condition that this place is not already occupied.
for k in range(len(teachers)):
teacher = teachers[k]
if teacher.schedule[i][j] == s:
print "But one of our teachers (%s) is already assigned to class %s during session %i. Please retry." %(teacher.name, s, j+1)
cond = 0
break
if not cond:
continue
day_sched.append(s)
else:
print response
continue
j +=1
schedule.append(day_sched)
return schedule
def make_sched(schedule): #make this better, and add the music sign here as well (it conveys the current session).
sched = '%-9s: ' % 'Session'
for i in range(1, 9):
sched += '%i\t' % i
if i == 4:
sched += 'r\t'
sched += '\n'
sched += '_' * 80
sched += '\n\n'
for d in schedule:
d.insert(4, 'r')
day = days[schedule.index(d)]
sched += '%-9s: ' % day
for c in d:
if c not in ['r', 'f']:
s = c.upper()
else:
s = c
sched += '%s\t' % s
if today == day:
sched = sched.strip('\t')
sched += ' <- today'
sched += '\n'
d.remove('r')
sched = sched.strip()
return sched
def make_tod_sched(teacher): #make sure that a fixed-width font is used, look for good symbols to represent pres_session
tod_sched = '\n%-7s:' %('Status')
lwords = []
free = False
for i in range(8):
position = teacher.today_sched[i]
if position == 'f':
word = teacher.free_pos
if teacher.is_present:
word += '*' # '*' denotes that teacher is free somewhere around here.
free = True
else:
word = position.upper()
if teacher.assigned[i] == position: #this would happen only for pres_teachers, unless someone has a numerical name.
word += '(proxy)'
word += ' ' #for neatness
length = len(word)
lwords += [length]
tod_sched += '%s\t' % word
if i == 3:
r = teacher.recess_pos.upper()
tod_sched += '%s\t' % r
lwords.append(len(r))
sessions = '%-7s:' %('Session')
slist = []
if pres_session > 3:
comp = pres_session
else:
comp = pres_session + 1
for i in range(1, 9):
slist += [i]
if i == 4:
slist += ['RECESS']
n = 1
for ses in slist:
length = lwords[n-1]
tnums = length / 8 + 1
if type(ses) != str: #check for non-recess sessions
if ses < 5:
snum = ses - 1
else:
snum = ses
if snum == pres_session: #this never happens for 4
ses = str(ses) + '\x0e'
else:
if pres_session == 4:
ses += '\x0e'
sessions += '%s' % ses
sessions += '\t' * tnums
n += 1
free_message = "Note:'*' denotes that %s is free in, or somewhere around %s.\n\n" % (teacher.first_name,
teacher.free_pos)
teacher.tod_sched = sessions + tod_sched
if free: #happens only for pres_teachers
teacher.tod_sched = free_message + teacher.tod_sched
if not teacher.is_present:
teacher.tod_sched = '('+teacher.status+')\n\n' + teacher.tod_sched
def dispClaMoni(claMoni, special=''): #special to be used in displaystats()
if len(claMoni) == 0:
print 'No class requires monitoring!'
return
print 'Classes which require%s monitoring:\n' % special
print 'Class\tSession(s)'
for i in claMoni.items():
g = i[0] #g for grade
pS = i[1] #p for position
pString = ''
for j in pS:
pString += str(j+1) + ', ' #added 1 to convert from computer 'language' to human 'language'
pString = pString.strip(', ')
print g + '\t' + pString
if len(claMoni) > 2:
print "\n(Total %i.)" % len(claMoni)
def attendance(): #the list thus formed can be used to obtain the chronological order of the coming of teachers
print "\nHello teachers, type your good names below to mark your attendance, and press enter after each:\n\
When finished, just type 'quit'.\n"
l = len(teachers)
count = 0
while count < l:
name = raw_input()
name = name.lower()
if name == 'quit':
break
elif name in same_first_names:
print "Please provide your full name.\n"
continue
elif name not in names:
print "Please type your name correctly. If this message persists, please consult the management.\n"
continue
c = 0
for i in range(len(pres_teachers)):
teacher = pres_teachers[i]
if name.lower() == teacher.title_name.lower() or name == teacher.name.lower():
print "Why are you retyping your name? Please let others type theirs, or type 'quit' if \
everyone has completed doing so.\n"
c = 1
break
if c:
continue
for t in range(len(abs_teachers)):
teacher = abs_teachers[t]
if name == teacher.title_name.lower() or name == teacher.name.lower(): #the probability that first_name of a teacher is the (full) name of another teacher, is very less.
teacher.is_present = True
teacher.status = teacher.status[:3]
pres_teachers.append(abs_teachers.pop(t))
break
print "Have a nice day, %s.\n" % teacher.title_name
count += 1
if count == l:
print "Enough! It seems all are present today."
elif count == 0:
print "No one is present today!" #add additional options here
print 'Exiting the shell.'
exit()
def out(): #for quitting the shell
today_stats = Stats(teachers, abs_teachers, pres_teachers, proxy_teachers, comp_proxies, s_proxies, proxies, gen_proxies,
custom_proxies, removed_proxies, claMoni)
with open(filename, 'wb') as today_file:
pickle.dump(today_stats, today_file)
with open('log.pkl', 'wb') as log_file:
pickle.dump(datetime.datetime.now(), log_file)
def printItems(list): #only for teachers
if list == []:
print 'No teacher.'
return
if 'proxy' in list: #when the seeked teacher has a proxy at her/his present position.
print list[0].name + ' (proxy, in place of ' + list[2].title_name + ')'
return
else:
if len(list) > 2:
print "(Total %i.)\n" % len(list)
for i in range(len(list)):
print list[i].name
def pause():
print '_' * random.randint(20, 40)
def displayStats(d):
filename = 'History/' + d + '.pkl'
if os.path.exists(filename):
f = open(filename, 'rb')
stat = pickle.load(f)
f.close()
date_list = d.split('-')
day = days[datetime.datetime.weekday(datetime.date(int(date_list[0]), int(date_list[1]), int(date_list[2])))]
today_date_str = str(datetime.datetime.now().date())
if d == today_date_str: #to print today's date for today_stats.
w = '(%s)' % today_date_str
else:
w = ''
print '\nDay: %s %s' % (day, w)
pause()
print '\nPresent teachers:\n'
printItems(stat.pres_teachers)
pause()
print '\nAbsent Teachers:\n'
printItems(stat.abs_teachers)
pause()
print '\nProxies:\n'
printProxies(proxies)
pause()
if d != today_date_str:#to decide whether to use 'require' or 'required'.
sp = 'd'
else:
sp = ''
print
dispClaMoni(stat.clamoni, sp)
pause()
else:
print 'Record unavailable.'
#instead of computing some terms on demand, such as current proxies, save them, for each session. Then display them as
#per user's demand.
def on_going():
global session, pres_session, free_teachers, s_proxies, comp_proxies, err_count
command_error = True
choice = raw_input(prompt)
choice = choice.lower()
mSession = (datetime.datetime.now() - start).seconds #mSession because it's mathematical
session = int(mSession) / (30 * 60)#consider recess' approximation here
ses = session
if ses == 4:
ses = 'recess'
elif pres_session < 4:
ses = session + 1
if session > 9:
out()
print "School time is over. Today's data has been saved. Thank you."
if pres_session != session: #notify the user if it feels good
pres_session = session
free_teachers = []
for teacher in pres_teachers:
teacher.update_status()
teacher.today_sched.insert(4, 'r')
if teacher.today_sched[session] == 'f':
free_teachers.append(teacher)
teacher.today_sched.remove('r')
for teacher in teachers:#if the user is interested in viewing the tod_scheds of absent teachers also.
make_tod_sched(teacher) #a function like update_tod_sched would probably be better for just updating.
out() #to save the day's data every time a session gets over, for safety.
if not choice:
command_error = False
if choice == "now proxies":
choice = 'proxies during ' + str(ses)
if "-" in choice:
choice_elements = choice.split("-")
if choice_elements[0] in same_first_names and len(choice_elements) == 2:
print "Please enter %s's full name." % choice_elements[0].capitalize()
command_error = False
else:
for teacher in teachers:
if choice_elements[0] == teacher.title_name.lower():
if choice_elements[1] == 'proxies':
tp = teacher.proxies
if len(tp) == 0:
print 'No proxies.'
else:
for i in range(len(tp)):
p = tp[i]
print proxyFormat(p)
command_error = False
else:
try:
if choice_elements[1] == 'gender':
f = "Don't you know even that? It's "
l = '.'
else:
f = l = ''
outp = f+str(getattr(teacher, choice_elements[1]))+l
print outp
command_error = False
except:
pass
break
if choice_elements[1] == 'proxies' and choice_elements[0] in ['custom', 'generated', 'removed']+possible_positions:
if choice_elements[0] == 'custom':
list = custom_proxies
elif choice_elements[0] == 'generated':
list = gen_proxies
elif choice_elements[0] == 'removed':
list = removed_proxies
else:
list = []
for proxy in proxies:
if proxy[2] == choice_elements[0]:
list.append(proxy)
if list == []:
print 'No proxies.'
else:
| |
stderr=subprocess.PIPE,
cwd=write_path,
universal_newlines=True,
)
self.watch(mproc)
os.remove(write_path + "/pfg_write_lef.tcl")
# Watch a running process, polling for output and updating the GUI message
# window as output arrives. Return only when the process has exited.
# Note that this process cannot handle stdin(), so any input to the process
# must be passed from a file.
def watch(self, process):
if process == None:
return
while True:
status = process.poll()
if status != None:
try:
outputpair = process.communicate(timeout=1)
except ValueError:
self.print("Process forced stop, status " + str(status))
else:
for line in outputpair[0].splitlines():
self.print(line)
for line in outputpair[1].splitlines():
self.print(line, file=sys.stderr)
break
else:
sresult = select.select([process.stdout, process.stderr], [], [], 0)[0]
if process.stdout in sresult:
outputline = process.stdout.readline().strip()
self.print(outputline)
elif process.stderr in sresult:
outputline = process.stderr.readline().strip()
self.print(outputline, file=sys.stderr)
else:
self.update_idletasks()
# Reimport the pad list by reading the top-level verilog netlist. Determine
# what pads are listed in the file, and check against the existing pad list.
# The verilog/ directory should have a .v file containing a module of the
# same name as self.project (ip-name). The .v filename should have the
# same name as well (but not necessarily). To do: Handle import of
# projects having a top-level schematic instead of a verilog netlist.
def vlogimport(self):
self.print("Importing verilog sources.")
# First find the process PDK name for this project. Read the nodeinfo.json
# file and find the list of I/O cell libraries.
techpath = self.techpath if self.techpath else self.projectpath
if os.path.exists(techpath + "/.config"):
config_dir = "/.config"
else:
config_dir = "/.ef-config"
if os.path.exists(techpath + config_dir):
self.ef_format = True
pdkpath = (
self.techpath
if self.techpath
else os.path.realpath(self.projectpath + config_dir + "/techdir")
)
nodeinfopath = pdkpath + config_dir + "/nodeinfo.json"
ioleflist = []
if os.path.exists(nodeinfopath):
self.print("Reading known I/O cell libraries from " + nodeinfopath)
with open(nodeinfopath, "r") as ifile:
itop = json.load(ifile)
if "iocells" in itop:
ioleflist = []
for iolib in itop["iocells"]:
if "/" in iolib:
# Entries <lib>/<cell> refer to specific files
libcell = iolib.split("/")
if self.ef_format:
iolibpath = pdkpath + "/libs.ref/lef/" + libcell[0]
else:
iolibpath = (
pdkpath + "/libs.ref/" + libcell[0] + "/lef/"
)
ioleflist.extend(
glob.glob(iolibpath + "/" + libcell[1] + ".lef")
)
else:
# All other entries refer to everything in the directory.
if self.ef_format:
iolibpath = pdkpath + "/libs.ref/lef/" + iolib
else:
iolibpath = pdkpath + "/libs.ref/" + iolib + "/lef/"
print(iolibpath)
ioleflist.extend(glob.glob(iolibpath + "/*.lef"))
else:
# Diagnostic
print("Cannot read PDK information file " + nodeinfopath)
# Fallback behavior: List everything in libs.ref/lef/ beginning with "IO"
if len(ioleflist) == 0:
if self.ef_format:
ioleflist = glob.glob(pdkpath + "/libs.ref/lef/IO*/*.lef")
else:
ioleflist = glob.glob(pdkpath + "/libs.ref/IO*/lef/*.lef")
if len(ioleflist) == 0:
self.print("Cannot find any I/O cell libraries for this technology")
return
# Read the LEF libraries to get a list of all available cells. Keep
# this list of cells in "celldefs".
celldefs = []
ioliblist = []
ioleflibs = []
for iolib in ioleflist:
iolibpath = os.path.split(iolib)[0]
iolibfile = os.path.split(iolib)[1]
ioliblist.append(os.path.split(iolibpath)[1])
celldefs.extend(self.read_lef_macros(iolibpath, iolibfile, "iolib"))
verilogcells = []
newpadlist = []
coredefs = []
corecells = []
corecelllist = []
lefprocessed = []
busrex = re.compile(".*\[[ \t]*([0-9]+)[ \t]*:[ \t]*([0-9]+)[ \t]*\]")
vlogpath = self.projectpath + "/verilog"
vlogfile = vlogpath + "/" + self.project + ".v"
if os.path.isfile(vlogfile):
with open(vlogfile, "r") as ifile:
vloglines = ifile.read().splitlines()
for vlogline in vloglines:
vlogparse = re.split("[\t ()]", vlogline)
while "" in vlogparse:
vlogparse.remove("")
if vlogparse == []:
continue
elif vlogparse[0] == "//":
continue
elif vlogparse[0] == "`include":
incpath = vlogparse[1].strip('"')
libpath = os.path.split(incpath)[0]
libname = os.path.split(libpath)[1]
libfile = os.path.split(incpath)[1]
# Read the verilog library for module names to match
# against macro names in celldefs.
modulelist = self.read_verilog_lib(incpath, vlogpath)
matching = list(
item for item in celldefs if item["name"] in modulelist
)
for imatch in matching:
verilogcells.append(imatch["name"])
leffile = imatch["iolib"]
if leffile not in ioleflibs:
ioleflibs.append(leffile)
# Read a corresponding LEF file entry for non-I/O macros, if one
# can be found (this handles files in the PDK).
if len(matching) == 0:
if libname != "":
# (NOTE: Assumes full path starting with '/')
lefpath = libpath.replace("verilog", "lef")
lefname = libfile.replace(".v", ".lef")
if not os.path.exists(lefpath + "/" + lefname):
leffiles = glob.glob(lefpath + "/*.lef")
else:
leffiles = [lefpath + "/" + lefname]
for leffile in leffiles:
if leffile in ioleflibs:
continue
elif leffile in lefprocessed:
continue
else:
lefprocessed.append(leffile)
lefname = os.path.split(leffile)[1]
newcoredefs = self.read_lef_macros(
lefpath, lefname, "celllib"
)
coredefs.extend(newcoredefs)
corecells.extend(
list(item["name"] for item in newcoredefs)
)
if leffiles == []:
maglefname = libfile.replace(".v", ".mag")
# Handle PDK files with a maglef/ view but no LEF file.
maglefpath = libpath.replace("verilog", "maglef")
if not os.path.exists(
maglefpath + "/" + maglefname
):
magleffiles = glob.glob(maglefpath + "/*.mag")
else:
magleffiles = [maglefpath + "/" + maglefname]
if magleffiles == []:
# Handle user ip/ files with a maglef/ view but
# no LEF file.
maglefpath = libpath.replace(
"verilog", "maglef"
)
designpath = os.path.split(self.projectpath)[0]
maglefpath = designpath + "/ip/" + maglefpath
if not os.path.exists(
maglefpath + "/" + maglefname
):
magleffiles = glob.glob(
maglefpath + "/*.mag"
)
else:
magleffiles = [
maglefpath + "/" + maglefname
]
for magleffile in magleffiles:
# Generate LEF file. Since PDK and ip/ entries
# are not writeable, write into the project mag/
# directory.
magpath = self.projectpath + "/mag"
magname = os.path.split(magleffile)[1]
magroot = os.path.splitext(magname)[0]
leffile = magpath + "/" + magroot + ".lef"
if not os.path.isfile(leffile):
self.write_lef_file(magleffile, magpath)
if leffile in ioleflibs:
continue
elif leffile in lefprocessed:
continue
else:
lefprocessed.append(leffile)
lefname = os.path.split(leffile)[1]
newcoredefs = self.read_lef_macros(
magpath, lefname, "celllib"
)
coredefs.extend(newcoredefs)
corecells.extend(
list(item["name"] for item in newcoredefs)
)
# LEF files generated on-the-fly are not needed
# after they have been parsed.
# os.remove(leffile)
# Check if all modules in modulelist are represented by
# corresponding LEF macros. If not, then go looking for a LEF
# file in the mag/ or maglef/ directory. Then, go looking for
# a .mag file in the mag/ or maglef/ directory, and build a
# LEF macro from it.
matching = list(
item["name"]
for item in coredefs
if item["name"] in modulelist
)
for module in modulelist:
if module not in matching:
lefpath = self.projectpath + "/lef"
magpath = self.projectpath + "/mag"
maglefpath = self.projectpath + "/mag"
lefname = libfile.replace(".v", ".lef")
# If the verilog file root name is not the same as
# the module name, then make a quick check for a
# LEF file with the same root name as the verilog.
# That indicates that the module does not exist in
# the LEF file, probably because it is a primary
# module that does not correspond to any layout.
leffile = lefpath + "/" + lefname
if os.path.exists(leffile):
self.print(
"Diagnostic: module "
+ module
+ " is not in "
+ leffile
+ " (probably a primary module)"
)
continue
leffile = magpath + "/" + lefname
istemp = False
if not os.path.exists(leffile):
magname = libfile.replace(".v", ".mag")
magfile = magpath + "/" + magname
if os.path.exists(magfile):
self.print(
"Diagnostic: Found a .mag file for "
+ module
+ " in "
+ magfile
)
self.write_lef_file(magfile)
istemp = True
else:
magleffile = maglefpath + "/" + lefname
if not os.path.exists(magleffile):
self.print(
"Diagnostic: (module "
+ module
+ ") has no LEF file "
+ leffile
+ " or "
+ magleffile
)
magleffile = maglefpath + "/" + magname
if os.path.exists(magleffile):
self.print(
"Diagnostic: Found a .mag file for "
+ module
+ " in "
+ magleffile
)
if os.access(maglefpath, os.W_OK):
self.write_lef_file(magleffile)
leffile = magleffile
istemp = True
else:
self.write_lef_file(
magleffile, magpath
)
else:
self.print(
"Did not find a file " + magfile
)
# self.print('Warning: module ' + module + ' has no LEF or .mag views')
pass
else:
self.print(
"Diagnostic: Found a LEF file for "
+ module
+ " in "
+ magleffile
)
leffile = magleffile
else:
self.print(
"Diagnostic: Found a LEF file | |
<reponame>tyunist/Kaggle_PKU_Baidu<filename>tools/finetune_RT_NMR_iou.py
"""
Finding camera parameters R, T
Image per batch --> this is not likely to work!!!
"""
import mmcv
from mmcv import imwrite, imread
from skimage import color
import shutil
import glob
import os
import torch
import torch.nn as nn
import numpy as np
from skimage.io import imsave
import pycocotools.mask as maskUtils
import cv2
import neural_renderer as nr
from mmdet.datasets.kaggle_pku_utils import quaternion_to_euler_angle, euler_to_Rot, rot2eul
from mmdet.datasets.car_models import car_id2name
from mmdet.utils import RotationDistance, TranslationDistance
import imageio
def make_gif(filename, dir_tmp, remove_png=False):
with imageio.get_writer(filename, mode='I') as writer:
for filename in sorted(glob.glob(os.path.join(dir_tmp, '_tmp_*.png'))):
writer.append_data(imageio.imread(filename))
if remove_png:
os.remove(filename)
writer.close()
class Model(nn.Module):
def __init__(self,
vertices,
faces,
Rotation_Matrix,
T,
euler_angle,
mask_full_size,
masked_grayscale_img,
camera_matrix,
image_size,
loss_thresh=0.9,
fix_rot=False,
fix_trans=False,
fix_light_source=True,
light_intensity_directional=0.1,
light_intensity_ambient=0.1,
light_direction=[1, -2, 1]
):
super(Model, self).__init__()
vertices = torch.from_numpy(vertices.astype(np.float32)).cuda()
faces = torch.from_numpy(faces.astype(np.int32)).cuda()
self.register_buffer('vertices', vertices)
self.register_buffer('faces', faces)
self.translation_original = T
self.euler_original = euler_angle
self.fix_rot = fix_rot
self.fix_trans = fix_trans
self.fix_light_source = fix_light_source
# create textures
texture_size = 1
textures = torch.ones(T.shape[0], self.faces.shape[1], texture_size, texture_size, texture_size, 3,
dtype=torch.float32)
self.register_buffer('textures', textures)
# we set the loss threshold to stop perturbation
self.mask_full_size = mask_full_size
self.mask_sum = self.mask_full_size.sum()
self.loss_thresh = -loss_thresh
image_ref = torch.from_numpy(mask_full_size.astype(np.float32))
self.register_buffer('image_ref', image_ref)
masked_grayscale_img = torch.from_numpy(masked_grayscale_img.astype(np.float32))
self.register_buffer('masked_grayscale_img', masked_grayscale_img)
# camera parameters
self.register_buffer('K', torch.from_numpy(camera_matrix))
# initialise the renderer
renderer = nr.Renderer(image_size=image_size,
orig_size=image_size,
camera_mode='projection',
K=camera_matrix[None, :, :])
if fix_rot:
R = torch.from_numpy(np.array(Rotation_Matrix, dtype=np.float32)).cuda()
self.register_buffer('R', R)
renderer.R = R
else:
renderer.R = nn.Parameter(torch.from_numpy(np.array(Rotation_Matrix, dtype=np.float32)))
if fix_trans:
t = torch.from_numpy(np.array(T, dtype=np.float32)).cuda()
self.register_buffer('t', t)
renderer.t = t
else:
renderer.t = nn.Parameter(torch.from_numpy(np.array(T, dtype=np.float32)))
if fix_light_source:
renderer.light_intensity_directional = torch.from_numpy(
np.array(light_intensity_directional, dtype=np.float32)).cuda()
renderer.light_intensity_ambient = torch.from_numpy(
np.array(light_intensity_ambient, dtype=np.float32)).cuda()
renderer.light_direction = torch.from_numpy(np.array(light_direction, dtype=np.float32)).cuda()
else:
renderer.light_intensity_directional = nn.Parameter(
torch.from_numpy(np.array(light_intensity_directional, dtype=np.float32)))
renderer.light_intensity_ambient = nn.Parameter(
torch.from_numpy(np.array(light_intensity_ambient, dtype=np.float32)))
renderer.light_direction = nn.Parameter(torch.from_numpy(np.array(light_direction, dtype=np.float32)))
self.renderer = renderer
def forward(self):
image_rgb = self.renderer(self.vertices, self.faces, self.textures, mode='rgb')
image = self.renderer(self.vertices, self.faces, mode='silhouettes')
interception = torch.sum(torch.abs(image * self.image_ref[None, :, :]))
union = torch.sum(image) + torch.sum(self.image_ref) - interception
loss = - interception / union
# loss = torch.sum((image - self.image_ref[None, :, :]) ** 2)
return loss, image_rgb
def make_reference_image(filename_ref, filename_obj):
model = Model(filename_obj)
model.cuda()
images, _, _ = model.renderer.render(model.vertices, model.faces, torch.tanh(model.textures))
image = images.detach().cpu().numpy()[0]
imsave(filename_ref, image)
def get_updated_RT(vertices,
faces,
Rotation_Matrix,
T,
euler_angle,
mask_full_size,
masked_grayscale_img,
camera_matrix,
image_size,
loss_RT=0.1, # Greyscale difference
num_epochs=50,
draw_flag=False,
output_gif=None,
lr=0.05,
fix_rot=False,
fix_trans=False,
fix_light_source=True):
model = Model(vertices,
faces,
Rotation_Matrix,
T,
euler_angle,
mask_full_size,
masked_grayscale_img,
camera_matrix,
image_size=image_size,
loss_thresh=loss_RT,
fix_rot=fix_rot,
fix_trans=fix_trans,
fix_light_source=fix_light_source
)
if draw_flag:
for name, param in model.named_parameters():
if param.requires_grad:
print(name, param.data)
model.cuda()
#optimizer = torch.optim.Adam(model.parameters(), lr=lr)
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
# optimizer_trans = torch.optim.Adam(model.renderer.t, lr=lr)
# optimizer_eular_angle = torch.optim.Adam(model.renderer.R, lr=lr*lr_angle_ratio)
if draw_flag: # We don't save the images
if not os.path.isdir(output_gif[:-4]):
os.mkdir(output_gif[:-4])
else:
# remove old files and create new empty dir
shutil.rmtree(output_gif[:-4])
os.mkdir(output_gif[:-4])
# We only keep the best max IoU Result
max_iou = -1
for i in range(num_epochs):
optimizer.zero_grad()
loss, image = model()
loss.backward()
optimizer.step()
if -loss.item() > max_iou:
best_translation = model.renderer.t.detach().cpu().numpy()[0]
best_rot_matrix = model.renderer.R.detach().cpu().numpy()[0]
max_iou = -loss.item()
if draw_flag: # We don't save the images
image = image.detach().cpu().numpy()[0].transpose(1, 2, 0)
image = image / image.max()
image[:, :, 1] += model.image_ref.detach().cpu().numpy()[0] * 0.5
image_all = np.zeros((2710, 3384, 3))
image_all[1480:, :, :] = image
image_all *= 255
label_text = loss.detach().cpu().numpy()
image_all = cv2.putText(image_all, '%.3f' % -label_text, (500, 500), cv2.FONT_HERSHEY_SIMPLEX, fontScale=15,
color=(255,255,255), thickness=20)
imsave(os.path.join(output_gif[:-4], '_tmp_%04d.png' % i), image_all)
### we print some updates
if False:
image_ref = model.masked_grayscale_img.detach().cpu().numpy()
imwrite(image_ref * 255, '/data/Kaggle/wudi_data/NMR_images/ref.jpg')
imwrite(image * 255, '/data/Kaggle/wudi_data/NMR_images/r1.jpg')
print('Optimizing (loss %.4f)' % loss.data)
updated_translation = model.renderer.t.detach().cpu().numpy()[0]
original_translation = model.translation_original
changed_dis = TranslationDistance(original_translation, updated_translation, abs_dist=False)
print('Origin translation: %s - > updated tranlsation: %s. Changed distance: %.4f' % (
np.array2string(np.array(original_translation)), np.array2string(updated_translation), changed_dis))
if not fix_rot:
rot_matrix = model.renderer.R.detach().cpu().numpy()[0]
updated_euler_angle = rot2eul(rot_matrix, model.euler_original)
changed_rot = RotationDistance(model.euler_original, updated_euler_angle)
print('Origin eular angle: %s - > updated eular angle: %s. Changed rot: %.4f'
% (np.array2string(np.array(model.euler_original)), np.array2string(updated_euler_angle),
changed_rot))
if loss.item() < model.loss_thresh:
break
if draw_flag:
make_gif(output_gif, output_gif[:-4])
return best_translation, rot2eul(best_rot_matrix, model.euler_original)
def finetune_RT(output,
dataset,
loss_grayscale_light=0.05,
loss_grayscale_RT=0.05,
loss_IoU=0.9,
num_epochs=50,
draw_flag=True,
lr=0.05, # lr=0.05,
conf_thresh=0.8,
tmp_save_dir='/data/Kaggle/wudi_data/tmp_output/',
fix_rot=True,
num_car_for_light_rendering=2):
"""
We first get the lighting parameters: using 2 cars gray scale,
then use grayscale loss and IoU loss to update T, and R(optional)
:param outputs:
:param dataset:
:param loss_grayscale_light:
:param loss_grayscale_RT: default: 0.05 is a good guess
:param loss_IoU:
:param num_epochs: num epochs for both lighting and R,T
:param draw_flag:
:param lr:
:param conf_thresh: confidence threshold for NMR process from bboxes, if lower, we will not process
this individual car--> because we don't care and accelerate the learning process
:param tmp_save_dir: tmp saving directory for plotting .gif images
:param fix_rot: fix rotation, if set to True, we will not learn rotation
:param fix_trans: fix translation, if set to True, we will not learn translation--> most likely we are
learning the lighting is set to True
:param fix_light_source: fix light source parameters if set to True
:param num_car_for_light_rendering: default is 2 (consume 9 Gb GPU memory),
for P100, we could use 3.
We use the closest (smallest z) for rendering
because the closer, the bigger car and more grayscale information.
:return: the modified outputs
"""
CAR_IDX = 2
output_gif = None
outputs_update = [output].copy()
camera_matrix = dataset.camera_matrix.copy()
camera_matrix[1, 2] -= 1480 # Because we have only bottom half
# First we collect all the car instances info. in an image
bboxes, segms, six_dof = output[0], output[1], output[2]
car_cls_score_pred = six_dof['car_cls_score_pred']
quaternion_pred = six_dof['quaternion_pred']
trans_pred_world = six_dof['trans_pred_world']
car_labels = np.argmax(car_cls_score_pred, axis=1)
kaggle_car_labels = [dataset.unique_car_mode[x] for x in car_labels]
car_names = [car_id2name[x].name for x in kaggle_car_labels]
euler_angles = np.array([quaternion_to_euler_angle(x) for x in quaternion_pred])
conf = output[0][CAR_IDX][:, -1] # output [0] is the bbox
conf_list = conf > conf_thresh
# We choose the closest z two cars
idx_conf = np.array([False] * len(conf)) # We choose only one car
lighting_count = 0
for close_idx in np.argsort(trans_pred_world[:, -1]):
if conf_list[close_idx]:
idx_conf[close_idx] = True
lighting_count += 1
if lighting_count >= num_car_for_light_rendering:
break
# Di Wu parrallise the code as below for one image per GPU
rgb_image = imread(output[2]['file_name'])
# convert the rgb image to grayscale
grayscale_image = color.rgb2gray(rgb_image)
vertices_img = []
max_vertices = 0
faces_img = []
# there are in total 4999-5000 faces... we choose 4999 faces, for some car, not rendering one
# face should be alright.
min_faces = 4999
Rotation_Matrix_img = []
T_img = []
euler_angles_img = []
mask_img = []
for car_idx in range(len(quaternion_pred)):
# The the HTC predicted Mask which is served as the GT Mask
segms_car = segms[CAR_IDX][car_idx]
mask = maskUtils.decode(segms_car)
# Get car mesh--> vertices and faces
car_name = car_names[car_idx]
vertices = np.array(dataset.car_model_dict[car_name]['vertices'])
vertices[:, 1] = -vertices[:, 1]
faces = np.array(dataset.car_model_dict[car_name]['faces']) - 1
# Get prediction of Rotation Matrix and Translation
ea = euler_angles[car_idx]
yaw, pitch, roll = ea[0], ea[1], ea[2]
yaw, pitch, roll = -pitch, -yaw, -roll
Rotation_Matrix = euler_to_Rot(yaw, pitch, roll).T
T = trans_pred_world[car_idx]
vertices_img.append(vertices)
max_vertices = max(vertices.shape[0], max_vertices)
faces_img.append(faces)
min_faces = min(faces.shape[0], min_faces)
Rotation_Matrix_img.append(Rotation_Matrix)
T_img.append(T)
euler_angles_img.append(np.array([yaw, pitch, roll]))
mask_img.append(mask)
Rotation_Matrix_img = np.stack(Rotation_Matrix_img)
T_img = np.stack(T_img)
euler_angles_img = np.stack(euler_angles_img)
mask_img = np.stack(mask_img)
masked_grayscale_img = mask_img[idx_conf].sum(axis=0) * grayscale_image[1480:, :]
masked_grayscale_img = masked_grayscale_img / masked_grayscale_img.max()
# For vertices and faces each car will generate different
vertices_img_all = np.zeros((len(vertices_img), max_vertices, 3))
faces_img_all = np.zeros((len(faces_img), min_faces, 3))
for i in range(len(vertices_img)):
vertices_img_all[i, :vertices_img[i].shape[0], :] = vertices_img[i]
faces_img_all[i, :, :] = faces_img[i][:min_faces, :]
if draw_flag:
output_gif = tmp_save_dir + '/' + output[2]['file_name'].split('/')[-1][:-4] + '.gif'
# Now we start to fine tune R, T
for i, true_flag in enumerate(conf_list):
if true_flag:
if draw_flag:
output_gif = tmp_save_dir + '/' + output[2]['file_name'].split('/')[-1][:-4] + '_' + str(i) + '.gif'
# Now we consider only one masked grayscale car
masked_grayscale_car = mask_img[i] * grayscale_image[1480:, :]
# masked_grayscale_car = masked_grayscale_car / masked_grayscale_car.max()
T_update, ea_update = get_updated_RT(vertices=vertices_img_all[None, i],
faces=faces_img_all[None, i],
Rotation_Matrix=Rotation_Matrix_img[None, i],
T=T_img[None, i],
euler_angle=euler_angles_img[i],
mask_full_size=mask_img[None, i],
masked_grayscale_img=masked_grayscale_car,
camera_matrix=camera_matrix,
image_size=(3384, 2710 - 1480),
loss_RT=loss_IoU,
num_epochs=num_epochs,
draw_flag=draw_flag,
output_gif=output_gif,
lr=lr,
fix_rot=fix_rot)
if fix_rot:
# we don't change the euler angle here
R_update = -euler_angles_img[i][1], -euler_angles_img[i][0], -euler_angles_img[i][2]
else:
# We need to reverse here
R_update = -ea_update[1], -ea_update[0], -ea_update[2]
# outputs_update is a list of length 0
outputs_update[0][2]['trans_pred_world'][i] = T_update
euler_angles[i] = R_update
if not fix_rot:
outputs_update[0][2]['euler_angle'] = euler_angles
if not os.path.exists(tmp_save_dir):
os.mkdir(tmp_save_dir)
output_name = tmp_save_dir | |
<filename>reasoning_layers/mac_layer.py
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
from my.tensorflow.nn import linear_logits, get_logits, softsel
from tensorflow.python.ops import tensor_array_ops, control_flow_ops
from my.tensorflow.rnn import bidirectional_dynamic_rnn
from my.tensorflow.rnn_cell import SwitchableDropoutWrapper, AttentionCell
from tensorflow.contrib.rnn import BasicLSTMCell
from reasoning_layers.utils import biattention_layer
def dynamic_mac_rnn(cell, context, query, q_len, c_mask, q_mask, q_sub_st=None, context_st=None, query_st=None, cdoc_mask=None, candidates=None, cand_mask=None, greedy_read=False):
if cdoc_mask is None:
assert context_st is None
return cell.apply(context, query, q_len, c_mask, q_mask, q_sub_st=q_sub_st, candidates=candidates, cand_mask=cand_mask)
else:
assert context_st is not None and q_sub_st is not None
assert isinstance(cell, HierarchicalAttnMACRnn)
return cell.apply(context, context_st, query, query_st, q_sub_st, q_len, c_mask, cdoc_mask, q_mask, candidates=candidates, cand_mask=cand_mask, greedy_read=greedy_read)
class MACRnn(object):
"""
This class implements a standard MAC RNN (https://arxiv.org/abs/1803.03067) adapted for multi-hop qa.
"""
def __init__(self, batch_size, context_dim, query_dim, hidden_dim=80, num_hops=6, bidirectional_input_unit=False, prediction='span-single', \
reuse_cell=True, is_train=None, use_control_unit=True, mode="train", output_unit_type='similarity', reasoning_unit='answer_unit', \
answer_state_update_rule='mlp'):
"""
num_hops: the number of mac cell chained together, or number of reasoning steps.
bidriectional_input_unit: use bi-lstm for input unit. Default to false to save memory.
prediction: prediction layer. Could be 'span-single/dual', 'candidates'
reuse_cell: use one single cell for all reasoning steps. (not sure what Hudson and Mannning did.)
"""
self.batch_size = batch_size
self.hidden_dim = hidden_dim
self.context_dim = context_dim
self.query_dim = query_dim
self.num_hops = num_hops
self.bidirectional_input_unit = bidirectional_input_unit
self.prediction = prediction
self.reuse_cell = reuse_cell
self.is_train = is_train
self.use_control_unit = use_control_unit
self.mode = mode
self.output_unit_type = output_unit_type
self.reasoning_unit = reasoning_unit
self.answer_state_update_rule = answer_state_update_rule
self.top_attn = []
def apply(self, context, query, q_len, c_mask, q_mask, candidates=None, cand_mask=None, q_sub_st=None):
batch_size = self.batch_size
hidden_dim = self.hidden_dim
query_dim = self.query_dim
reuse_cell = self.reuse_cell
context = tf.squeeze(context, axis=1)
if candidates is not None:
candidates = tf.squeeze(candidates, axis=1)
c_state = tf.zeros((batch_size, hidden_dim))
m_state = tf.zeros((batch_size, hidden_dim))
with tf.variable_scope('MACRnn'):
query, q_rep = self.MACInputUnit(query, q_len)
c_history = []
m_history = []
for i in range(self.num_hops):
if reuse_cell:
scope_str = 'MACRnn-layer-%d' % 0
c_state, m_state = self.MACCell(i, query, q_rep, context, c_mask, q_mask, c_history, m_history, \
c_state, m_state, scope_str, reuse=(i!=0))
else:
scope_str = 'MACRnn-layer-%d' % i
c_state, m_state = self.MACCell(i, query, q_rep, context, c_mask, q_mask, c_history, m_history, \
c_state, m_state, scope_str, reuse=False)
c_history.append(c_state)
m_history.append(m_state)
if self.prediction == 'candidates':
g1 = self.MACOutputUnit(m_state, context, candidates)
return tf.expand_dims(g1, axis=1)
elif self.prediction == 'span-dual':
g1, g2 = self.MACOutputUnit(m_state, context)
return tf.expand_dims(g1, axis=1), tf.expand_dims(g2, axis=1)
else:
assert self.prediction == 'span-single'
g1, logits = self.MACOutputUnit(m_state, context)
return tf.expand_dims(g1, axis=1), logits
def MACInputUnit(self, query, query_len, reuse=False):
"""
Inputs: encodede query and length.
Outputs: query encoded by another lstm, and the final state of this lstm as
a fixed-size representation of this query.
"""
with tf.variable_scope('input_unit', initializer=tf.random_uniform_initializer, reuse=reuse):
hidden_dim = self.hidden_dim
if self.bidirectional_input_unit is True:
cell_fw = tf.contrib.rnn.LSTMCell(hidden_dim, state_is_tuple=True)
cell_bw = tf.contrib.rnn.LSTMCell(hidden_dim, state_is_tuple=True)
(encoder_outputs, (fw_st, bw_st)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, query, \
dtype=tf.float32, sequence_length=query_len, swap_memory=True)
query_embed = tf.concat(axis=2, values=encoder_outputs)
query_rep = tf.concat([fw_st.c, bw_st.c], axis=1)
W_emb = tf.get_variable('W_emb', [2*hidden_dim, hidden_dim])
b_emb = tf.get_variable('b_emb', [hidden_dim])
W_rep = tf.get_variable('W_rep', [2*hidden_dim, hidden_dim])
b_rep = tf.get_variable('b_rep', [hidden_dim])
query_embed = tf.einsum('ijk,kl->ijl', query_embed, W_emb) + b_emb
query_rep = tf.matmul(query_rep, W_rep) + b_rep
else:
cell_fw = tf.contrib.rnn.LSTMCell(hidden_dim, state_is_tuple=True)
query_embed, final_st = tf.nn.dynamic_rnn(cell_fw, query, dtype=tf.float32, \
sequence_length=query_len)
query_rep = final_st.c
return query_embed, query_rep
def MACCell(self, layer: int, cw, q, k, c_mask, q_mask, c_history, m_history, c_state, m_state, scope_str, reuse=False):
hidden_dim = self.hidden_dim
context_dim = self.context_dim
query_dim = self.query_dim
def control_unit():
with tf.variable_scope('control_unit'):
W_cq = tf.get_variable('W_cq', [2*hidden_dim, hidden_dim])
b_cq = tf.get_variable('b_cq', [hidden_dim])
cq = tf.matmul(tf.concat([c_state, q], axis=1), W_cq) + b_cq
W_ca = tf.get_variable('W_ca', [hidden_dim, 1])
b_ca = tf.get_variable('b_ca', [1])
ca = tf.squeeze(tf.einsum('ijk,kl->ijl', tf.einsum('ik,ijk->ijk', cq, cw), W_ca), axis=2) + b_ca
cv = tf.nn.softmax(ca)
return tf.einsum('ijk,ij->ik', cw, cv)
def read_unit(new_c_state):
"""
Does not include the I' in the original MAC paper.
"""
with tf.variable_scope('read_unit'):
W_m = tf.get_variable('W_m', [hidden_dim, hidden_dim])
b_m = tf.get_variable('b_m', [hidden_dim])
W_k = tf.get_variable('W_k', [context_dim, hidden_dim])
b_k = tf.get_variable('b_k', [hidden_dim])
I = tf.einsum('il,ijl->ijl', tf.matmul(m_state, W_m) + b_m, tf.einsum('ijk,kl->ijl', k, W_k) + b_k) # [batch_size, context_len, hidden_dim]
W_ra = tf.get_variable('W_ra', [hidden_dim, 1])
b_ra = tf.get_variable('b_ra', [1])
ra = tf.squeeze(tf.einsum('ijk,kl->ijl', tf.einsum('ik,ijk->ijk', new_c_state, I), W_ra), axis=2) + b_ra
rv = tf.nn.softmax(ra)
return tf.einsum('ijk,ij->ik', k, rv)
def write_unit(r, new_c_state):
with tf.variable_scope('write_unit'):
W_m = tf.get_variable('W_m', [context_dim + hidden_dim, hidden_dim])
b_m = tf.get_variable('b_m', [hidden_dim])
m_prev = tf.matmul(tf.concat([r, m_state], axis=1), W_m) + b_m
if layer > 0 or self.reuse_cell:
W_c = tf.get_variable('W_c', [hidden_dim, 1])
b_c = tf.get_variable('b_c', [1])
#sa = tf.nn.softmax(tf.squeeze(tf.einsum('ijk,kl->ijl', tf.multiply(new_c_state, c_history), W_c), axis=2))
W_s = tf.get_variable('W_s', [hidden_dim, hidden_dim])
W_p = tf.get_variable('W_p', [hidden_dim, hidden_dim])
b = tf.get_variable('b', [hidden_dim])
if layer > 0:
sa = tf.nn.softmax(tf.squeeze(tf.einsum('ijk,kl->ijl', tf.einsum('ik,ijk->ijk', new_c_state, c_history), W_c) + b_c, axis=2))
m_sa = tf.einsum('ijk,ij->ik', m_history, sa)
m_prime = tf.matmul(m_sa, W_s) + tf.matmul(m_prev, W_p) + b
else:
m_prime = tf.matmul(m_prev, W_p) + b
W_c_2 = tf.get_variable('W_c_2', [hidden_dim, 1])
b_c_2 = tf.get_variable('b_c_2', [1])
c_prime = tf.matmul(new_c_state, W_c_2) + b_c_2
return tf.nn.sigmoid(c_prime) * m_state + (1 - tf.nn.sigmoid(c_prime)) * m_prime
if layer > 0:
c_history = tf.stack(c_history, axis=1)
m_history = tf.stack(m_history, axis=1)
with tf.variable_scope(scope_str, reuse=reuse) as scope:
new_c_state = control_unit()
new_m_state = write_unit(read_unit(new_c_state), new_c_state)
return new_c_state, new_m_state
def MACOutputUnit(self, m_state, context, candidates=None, query=None, reuse=False):
hidden_dim = self.hidden_dim
context_dim = self.context_dim
with tf.variable_scope('output_unit', reuse=reuse):
if self.prediction == 'candidates':
assert candidates is not None
cand_dim = context_dim
#cand_dim = candidates.get_shape()[-1]
if self.output_unit_type == 'similarity':
W_m = tf.get_variable('W_m', [hidden_dim, hidden_dim])
b_m = tf.get_variable('b_m', [hidden_dim])
M = tf.matmul(m_state, W_m) + b_m
W_k = tf.get_variable('W_k', [cand_dim, hidden_dim])
b_k = tf.get_variable('b_k', [hidden_dim])
I = tf.einsum('ijk,kl->ijl', candidates, W_k) + b_k
g1 = tf.einsum('ik,ijk->ijk', M, I)
elif self.output_unit_type == 'nested-triplet-mlp':
num_cand = tf.shape(candidates)[1]
if self.reasoning_unit == 'bi-attn' or self.reasoning_unit == 'attention-lstm' or self.reasoning_unit == 'concat_first_sent' or self.reasoning_unit == 'concat_full_doc':
similarity = tf.einsum('ik,ijk->ijk', m_state, candidates)
M = tf.tile(tf.expand_dims(m_state, axis=1), [1, num_cand, 1])
W1 = tf.get_variable('W1', [3*cand_dim, 2*cand_dim])
b1 = tf.get_variable('b1', [2*cand_dim])
W2 = tf.get_variable('W2', [2*cand_dim, cand_dim])
b2 = tf.get_variable('b2', [cand_dim])
concat_in = tf.concat(axis=-1, values=[tf.reshape(M, [-1, cand_dim]), tf.reshape(candidates, [-1, cand_dim]), tf.reshape(similarity, [-1, cand_dim])])
output = tf.matmul(tf.nn.relu(tf.matmul(concat_in, W1) + b1), W2) + b2
else:
W_k = tf.get_variable('W_k', [cand_dim, hidden_dim])
b_k = tf.get_variable('b_k', [hidden_dim])
similarity = tf.einsum('ik,ijk->ijk', m_state, tf.einsum('ijk,kl->ijl', candidates, W_k)) + b_k
M = tf.tile(tf.expand_dims(m_state, axis=1), [1, num_cand, 1])
W1 = tf.get_variable('W1', [2*hidden_dim + cand_dim, hidden_dim])
b1 = tf.get_variable('b1', [hidden_dim])
W2 = tf.get_variable('W2', [hidden_dim, 40])
b2 = tf.get_variable('b2', [40])
concat_in = tf.concat(axis=-1, values=[tf.reshape(M, [-1, hidden_dim]), tf.reshape(candidates, [-1, cand_dim]), tf.reshape(similarity, [-1, hidden_dim])])
output = tf.matmul(tf.nn.relu(tf.matmul(concat_in, W1) + b1), W2) + b2
g1 = tf.reshape(output, [self.batch_size, -1, context_dim])
elif self.output_unit_type == 'triplet-mlp':
assert query is not None
assert self.reasoning_unit == 'None' or self.reasoning_unit is None
num_cand = tf.shape(candidates)[1]
query_dim = self.query_dim
W_q = tf.get_variable('W_q', [query_dim, hidden_dim])
b_q = tf.get_variable('b_q', [hidden_dim])
query = tf.matmul(query, W_q) + b_q
query = tf.tile(tf.expand_dims(query, axis=1), [1, num_cand, 1])
W_k = tf.get_variable('W_k', [cand_dim, hidden_dim])
b_k = tf.get_variable('b_k', [hidden_dim])
similarity = tf.einsum('ik,ijk->ijk', m_state, tf.einsum('ijk,kl->ijl', candidates, W_k)) + b_k
M = tf.tile(tf.expand_dims(m_state, axis=1), [1, num_cand, 1])
W1 = tf.get_variable('W1', [3*hidden_dim + cand_dim, hidden_dim])
b1 = tf.get_variable('b1', [hidden_dim])
W2 = tf.get_variable('W2', [hidden_dim, 40])
b2 = tf.get_variable('b2', [40])
concat_in = tf.concat(axis=-1, values=[tf.reshape(query, [-1, hidden_dim]), tf.reshape(M, [-1, hidden_dim]), tf.reshape(candidates, [-1, cand_dim]), tf.reshape(similarity, [-1, hidden_dim])])
output = tf.matmul(tf.nn.relu(tf.matmul(concat_in, W1) + b1), W2) + b2
g1 = tf.reshape(output, [self.batch_size, -1, 40])
else:
raise NotImplementedError
return g1
else:
W_m = tf.get_variable('W_m', [hidden_dim, hidden_dim])
b_m = tf.get_variable('b_m', [hidden_dim])
W_k = tf.get_variable('W_k', [context_dim, hidden_dim])
b_k = tf.get_variable('b_k', [hidden_dim])
I = tf.einsum('ijk,kl->ijl', context, W_k) + b_k
M = tf.matmul(m_state, W_m) + b_m
g1 = tf.einsum('ik,ijk->ijk', M, I)
if self.prediction == 'span-dual':
p2 = tf.concat([I, g1], axis=2)
W_p = tf.get_variable('W_p', [2*hidden_dim, hidden_dim])
b_p = tf.get_variable('b_p', [hidden_dim])
I_prime = tf.einsum('ijk,kl->ijl', p2, W_p) + b_p
g2 = tf.einsum('ik,ijk->ijk', M, I_prime)
return g1, g2
else:
W_ra = tf.get_variable('W_ra', [hidden_dim, 1])
b_ra = tf.get_variable('b_ra', [1])
ra = tf.squeeze(tf.einsum('ijk,kl->ijl', g1, W_ra), axis=2) + b_ra
return g1, ra
class HierarchicalAttnMACRnn(MACRnn):
def __init__(self, batch_size, context_dim, query_dim, hidden_dim=80, num_hops=6, bidirectional_input_unit=False, prediction='candidates', input_keep_prob=0.8, reuse_cell=True, \
is_train=None, use_control_unit=True, mode="train", read_strategy='full', output_unit_type='similarity', reasoning_unit='answer_unit', \
memory_state_update_rule=None, answer_state_update_rule='mlp', attention_style='similarity', \
answer_doc_ids=None, sents_len=None, oracle=None, reinforce=False, attention_cell_dropout=False, \
read_topk_docs=0):
"""
num_hops: the number of mac cell chained together, or number of reasoning steps.
bidriectional_input_unit: use bi-lstm for input unit. | |
s = format_time("size 10, focals " + str(round(1/mediumSet[0][1])) + ", set " + str(len(mediumSet)) + ", maxcard 3, bel", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.bel, 3, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(round(1/mediumSet[0][1])) + ", set " + str(len(mediumSet)) + ", maxcard 6, bel", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.bel, 6, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(round(1/mediumSet[0][1])) + ", set " + str(len(mediumSet)) + ", maxcard 9, bel", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.bel, 9, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(round(1/mediumSet[0][1])) + ", set " + str(len(mediumSet)) + ", maxcard 3, betP", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.betP, 3, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(round(1/mediumSet[0][1])) + ", set " + str(len(mediumSet)) + ", maxcard 6, betP", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.betP, 6, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(round(1/mediumSet[0][1])) + ", set " + str(len(mediumSet)) + ", maxcard 9, betP", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.betP, 9, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(round(1/mediumSet[0][1])) + ", set " + str(len(mediumSet)) + ", maxcard 3, pl", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.pl, 3, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(round(1/mediumSet[0][1])) + ", set " + str(len(mediumSet)) + ", maxcard 6, pl", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.pl, 6, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(round(1/mediumSet[0][1])) + ", set " + str(len(mediumSet)) + ", maxcard 9, pl", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.pl, 9, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
nb_iterations = 1
for bigSet in bigSets:
#Build a mass function:
m = MassFunction(*bigSet)
realSet = [x[0] for x in bigSet]
s = format_time("size 10000, focals " + str(round(1/bigSet[0][1])) + ", set " + str(len(bigSet)) + ", maxcard 300, mass", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.mass, 300, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(round(1/bigSet[0][1])) + ", set " + str(len(bigSet)) + ", maxcard 600, mass", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.mass, 600, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(round(1/bigSet[0][1])) + ", set " + str(len(bigSet)) + ", maxcard 900, mass", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.mass, 900, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(round(1/bigSet[0][1])) + ", set " + str(len(bigSet)) + ", maxcard 300, bel", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.bel, 300, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(round(1/bigSet[0][1])) + ", set " + str(len(bigSet)) + ", maxcard 600, bel", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.bel, 600, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(round(1/bigSet[0][1])) + ", set " + str(len(bigSet)) + ", maxcard 900, bel", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.bel, 900, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(round(1/bigSet[0][1])) + ", set " + str(len(bigSet)) + ", maxcard 300, betP", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.betP, 300, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(round(1/bigSet[0][1])) + ", set " + str(len(bigSet)) + ", maxcard 600, betP", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.betP, 600, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(round(1/bigSet[0][1])) + ", set " + str(len(bigSet)) + ", maxcard 900, betP", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.betP, 900, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(round(1/bigSet[0][1])) + ", set " + str(len(bigSet)) + ", maxcard 300, pl", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.pl, 300, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(round(1/bigSet[0][1])) + ", set " + str(len(bigSet)) + ", maxcard 600, pl", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.pl, 600, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(round(1/bigSet[0][1])) + ", set " + str(len(bigSet)) + ", maxcard 900, pl", time_function_cannot_be_pickled(nb_iterations, MassFunction.get_max, m.pl, 900, realSet, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = "-" * 80
print(s)
f.write(s + "\n")
########################################################################################################################################################################################################
########################################################################################################################################################################################################
########################################################################################################################################################################################################
nb_iterations = 1000
s = "CHARACTERISATION:"
print(s)
f.write(s + "\n")
s = "- " * 40
print(s)
f.write(s + "\n")
s = "Specificity:"
print(s)
f.write(s + "\n")
for smallSet in smallSets:
#Build a mass function:
m = MassFunction(*smallSet)
s = format_time("size 3, focals " + str(round(1/smallSet[0][1])), time_function(nb_iterations, m.specificity, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for mediumSet in mediumSets:
#Build a mass function:
m = MassFunction(*mediumSet)
s = format_time("size 10, focals " + str(round(1/mediumSet[0][1])), time_function(nb_iterations, m.specificity, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for bigSet in bigSets:
#Build a mass function:
m = MassFunction(*bigSet)
s = format_time("size 10, focals " + str(round(1/bigSet[0][1])), time_function(nb_iterations, m.specificity, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = "- " * 40
print(s)
f.write(s + "\n")
########################################################################################################################################################################################################
########################################################################################################################################################################################################
########################################################################################################################################################################################################
s = "Non-specificity:"
print(s)
f.write(s + "\n")
for smallSet in smallSets:
#Build a mass function:
m = MassFunction(*smallSet)
s = format_time("size 3, focals " + str(round(1/smallSet[0][1])), time_function(nb_iterations, m.non_specificity, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for mediumSet in mediumSets:
#Build a mass function:
m = MassFunction(*mediumSet)
s = format_time("size 10, focals " + str(round(1/mediumSet[0][1])), time_function(nb_iterations, m.non_specificity, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for bigSet in bigSets:
#Build a mass function:
m = MassFunction(*bigSet)
s = format_time("size 10, focals " + str(round(1/bigSet[0][1])), time_function(nb_iterations, m.non_specificity, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = "- " * 40
print(s)
f.write(s + "\n")
########################################################################################################################################################################################################
########################################################################################################################################################################################################
########################################################################################################################################################################################################
s = "Discrepancy:"
print(s)
f.write(s + "\n")
for smallSet in smallSets:
#Build a mass function:
m = MassFunction(*smallSet)
s = format_time("size 3, focals " + str(round(1/smallSet[0][1])), time_function(nb_iterations, m.discrepancy, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for mediumSet in mediumSets:
#Build a mass function:
m = MassFunction(*mediumSet)
s = format_time("size 10, focals " + str(round(1/mediumSet[0][1])), time_function(nb_iterations, m.discrepancy, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for bigSet in bigSets:
#Build a mass function:
m = MassFunction(*bigSet)
s = format_time("size 10, focals " + str(round(1/bigSet[0][1])), time_function(nb_iterations, m.discrepancy, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
########################################################################################################################################################################################################
########################################################################################################################################################################################################
########################################################################################################################################################################################################
s = "-" * 80
print(s)
f.write(s + "\n")
nb_iterations = 1000
s = "DISCOUNTING:"
print(s)
f.write(s + "\n")
s = "- " * 40
print(s)
f.write(s + "\n")
s = "Discounting:"
print(s)
f.write(s + "\n")
for smallSet in smallSets:
#Build a mass function:
m = MassFunction(*smallSet)
s = format_time("size 3, focals " + str(round(1/smallSet[0][1])), time_function(nb_iterations, m.discounting, 0.1, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for mediumSet in mediumSets:
#Build a mass function:
m = MassFunction(*mediumSet)
s = format_time("size 10, focals " + str(round(1/mediumSet[0][1])), time_function(nb_iterations, m.discounting, 0.1, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for bigSet in bigSets:
#Build a mass function:
m = MassFunction(*bigSet)
s = format_time("size 10, focals " + str(round(1/bigSet[0][1])), time_function(nb_iterations, m.discounting, 0.1, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
########################################################################################################################################################################################################
########################################################################################################################################################################################################
########################################################################################################################################################################################################
s = "- " * 40
print(s)
f.write(s + "\n")
s = "Weakening:"
print(s)
f.write(s + "\n")
for smallSet in smallSets:
#Build a mass function:
m = MassFunction(*smallSet)
s = format_time("size 3, focals " + str(round(1/smallSet[0][1])), time_function(nb_iterations, m.weakening, 0.1, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for mediumSet in mediumSets:
#Build a mass function:
m = MassFunction(*mediumSet)
s = format_time("size 10, focals " + str(round(1/mediumSet[0][1])), time_function(nb_iterations, m.weakening, 0.1, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for bigSet in bigSets:
#Build a mass function:
m = MassFunction(*bigSet)
s = format_time("size 10, focals " + str(round(1/bigSet[0][1])), time_function(nb_iterations, m.weakening, 0.1, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
########################################################################################################################################################################################################
########################################################################################################################################################################################################
########################################################################################################################################################################################################
s = "-" * 80
print(s)
f.write(s + "\n")
nb_iterations = 100
s = "DISTANCES:"
print(s)
f.write(s + "\n")
s = "- " | |
0 , 384 , (3, 0, None, None) , 0 , )),
(( 'RequiredAttendees' , 'RequiredAttendees' , ), 3588, (3588, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 388 , (3, 0, None, None) , 0 , )),
(( 'Resources' , 'Resources' , ), 3586, (3586, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 392 , (3, 0, None, None) , 0 , )),
(( 'Resources' , 'Resources' , ), 3586, (3586, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 396 , (3, 0, None, None) , 0 , )),
(( 'ResponseRequested' , 'ResponseRequested' , ), 99, (99, (), [ (16395, 10, None, None) , ], 1 , 2 , 4 , 0 , 400 , (3, 0, None, None) , 0 , )),
(( 'ResponseRequested' , 'ResponseRequested' , ), 99, (99, (), [ (11, 1, None, None) , ], 1 , 4 , 4 , 0 , 404 , (3, 0, None, None) , 0 , )),
(( 'ResponseStatus' , 'ResponseStatus' , ), 33304, (33304, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 408 , (3, 0, None, None) , 0 , )),
(( 'Start' , 'Start' , ), 33293, (33293, (), [ (16391, 10, None, None) , ], 1 , 2 , 4 , 0 , 412 , (3, 0, None, None) , 0 , )),
(( 'Start' , 'Start' , ), 33293, (33293, (), [ (7, 1, None, None) , ], 1 , 4 , 4 , 0 , 416 , (3, 0, None, None) , 0 , )),
(( 'ClearRecurrencePattern' , ), 61605, (61605, (), [ ], 1 , 1 , 4 , 0 , 420 , (3, 0, None, None) , 0 , )),
(( 'ForwardAsVcal' , 'Item' , ), 62791, (62791, (), [ (16397, 10, None, "IID('{00061033-0000-0000-C000-000000000046}')") , ], 1 , 1 , 4 , 0 , 424 , (3, 0, None, None) , 0 , )),
(( 'GetRecurrencePattern' , 'RecurrencPattern' , ), 61604, (61604, (), [ (16393, 10, None, "IID('{00063044-0000-0000-C000-000000000046}')") , ], 1 , 1 , 4 , 0 , 428 , (3, 0, None, None) , 0 , )),
(( 'Respond' , 'Response' , 'fNoUI' , 'fAdditionalTextDialog' , 'ResponseItem' ,
), 62722, (62722, (), [ (3, 1, None, None) , (12, 17, None, None) , (12, 17, None, None) , (16397, 10, None, "IID('{00061036-0000-0000-C000-000000000046}')") , ], 1 , 1 , 4 , 2 , 432 , (3, 0, None, None) , 0 , )),
(( 'Send' , ), 61557, (61557, (), [ ], 1 , 1 , 4 , 0 , 436 , (3, 0, None, None) , 0 , )),
(( 'NetMeetingDocPathName' , 'NetMeetingDocPathName' , ), 33351, (33351, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 440 , (3, 0, None, None) , 0 , )),
(( 'NetMeetingDocPathName' , 'NetMeetingDocPathName' , ), 33351, (33351, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 444 , (3, 0, None, None) , 0 , )),
(( 'NetShowURL' , 'NetShowURL' , ), 33352, (33352, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 448 , (3, 0, None, None) , 0 , )),
(( 'NetShowURL' , 'NetShowURL' , ), 33352, (33352, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 452 , (3, 0, None, None) , 0 , )),
(( 'Links' , 'Links' , ), 62469, (62469, (), [ (16393, 10, None, "IID('{0006308A-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 456 , (3, 0, None, None) , 0 , )),
(( 'ConferenceServerAllowExternal' , 'ConferenceServerAllowExternal' , ), 33350, (33350, (), [ (16395, 10, None, None) , ], 1 , 2 , 4 , 0 , 460 , (3, 0, None, None) , 0 , )),
(( 'ConferenceServerAllowExternal' , 'ConferenceServerAllowExternal' , ), 33350, (33350, (), [ (11, 1, None, None) , ], 1 , 4 , 4 , 0 , 464 , (3, 0, None, None) , 0 , )),
(( 'ConferenceServerPassword' , 'ConferenceServerPassword' , ), 33353, (33353, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 468 , (3, 0, None, None) , 0 , )),
(( 'ConferenceServerPassword' , 'ConferenceServerPassword' , ), 33353, (33353, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 472 , (3, 0, None, None) , 0 , )),
]
_ContactItem_vtables_dispatch_ = 1
_ContactItem_vtables_ = [
(( 'Application' , 'Application' , ), 61440, (61440, (), [ (16393, 10, None, "IID('{00063001-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 28 , (3, 0, None, None) , 0 , )),
(( 'Class' , 'Class' , ), 61450, (61450, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( 'Session' , 'Session' , ), 61451, (61451, (), [ (16393, 10, None, "IID('{00063002-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 36 , (3, 0, None, None) , 0 , )),
(( 'Parent' , 'Parent' , ), 61441, (61441, (), [ (16393, 10, None, None) , ], 1 , 2 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( 'Actions' , 'Actions' , ), 63511, (63511, (), [ (16393, 10, None, "IID('{0006303E-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 44 , (3, 0, None, None) , 0 , )),
(( 'Attachments' , 'Attachments' , ), 63509, (63509, (), [ (16393, 10, None, "IID('{0006303C-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
(( 'BillingInformation' , 'BillingInformation' , ), 34101, (34101, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 52 , (3, 0, None, None) , 0 , )),
(( 'BillingInformation' , 'BillingInformation' , ), 34101, (34101, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( 'Body' , 'Body' , ), 37120, (37120, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 60 , (3, 0, None, None) , 0 , )),
(( 'Body' , 'Body' , ), 37120, (37120, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( 'Categories' , 'Categories' , ), 36865, (36865, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 68 , (3, 0, None, None) , 0 , )),
(( 'Categories' , 'Categories' , ), 36865, (36865, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( 'Companies' , 'Companies' , ), 34107, (34107, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 76 , (3, 0, None, None) , 0 , )),
(( 'Companies' , 'Companies' , ), 34107, (34107, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( 'ConversationIndex' , 'ConversationIndex' , ), 113, (113, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 84 , (3, 0, None, None) , 0 , )),
(( 'ConversationTopic' , 'ConversationTopic' , ), 112, (112, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( 'CreationTime' , 'CreationTime' | |
<gh_stars>1-10
#!/usr/bin/python
# ----------------------------------------------------------------------------
# Copyright 2018 Intel
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import numpy as np
import os
import argparse
import psutil
import time
import datetime
import tensorflow as tf
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model.signature_def_utils import predict_signature_def
from tensorflow.python.saved_model import tag_constants
parser = argparse.ArgumentParser(
description="Sanity testing for 3D and 2D Convolution Models",add_help=True)
parser.add_argument("--dim_length",
type = int,
default=16,
help="Tensor cube length of side")
parser.add_argument("--num_channels",
type = int,
default=1,
help="Number of channels")
parser.add_argument("--num_outputs",
type = int,
default=1,
help="Number of outputs")
parser.add_argument("--bz",
type = int,
default=1,
help="Batch size")
parser.add_argument("--lr",
type = float,
default=0.001,
help="Learning rate")
parser.add_argument("--num_datapoints",
type = int,
default=1024,
help="Number of datapoints")
parser.add_argument("--epochs",
type = int,
default=3,
help="Number of epochs")
parser.add_argument("--intraop_threads",
type = int,
default=psutil.cpu_count(logical=False),
help="Number of intraop threads")
parser.add_argument("--interop_threads",
type = int,
default=2,
help="Number of interop threads")
parser.add_argument("--blocktime",
type = int,
default=0,
help="Block time for CPU threads")
parser.add_argument("--print_model",
action="store_true",
default=False,
help="Print the summary of the model layers")
parser.add_argument("--use_upsampling",
action="store_true",
default=False,
help="Use upsampling instead of transposed convolution")
parser.add_argument("--D2",
action="store_true",
default=False,
help="Use 2D model and images instead of 3D.")
parser.add_argument("--single_class_output",
action="store_true",
default=False,
help="Use binary classifier instead of U-Net")
parser.add_argument("--mkl_verbose",
action="store_true",
default=False,
help="Print MKL debug statements.")
parser.add_argument("--inference",
action="store_true",
default=False,
help="Test inference speed. Default=Test training speed")
parser.add_argument("--ngraph",
action="store_true",
default=False,
help="Use ngraph")
parser.add_argument("--keras_api",
action="store_true",
default=False,
help="Use Keras API. False=Use tf.keras")
parser.add_argument("--channels_first",
action="store_true",
default=False,
help="Channels first. NCHW")
args = parser.parse_args()
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # Get rid of the AVX, SSE warnings
if args.mkl_verbose:
os.environ["MKL_VERBOSE"] = "1" # Print out messages from MKL operations
os.environ["MKLDNN_VERBOSE"] = "1" # Print out messages from MKL-DNN operations
os.environ["OMP_NUM_THREADS"] = str(args.intraop_threads)
os.environ["KMP_BLOCKTIME"] = str(args.blocktime)
os.environ["KMP_AFFINITY"] = "granularity=thread,compact,1,0"
print("Started script on {}".format(datetime.datetime.now()))
print("args = {}".format(args))
os.system("uname -a")
print("TensorFlow version: {}".format(tf.__version__))
if args.keras_api:
import keras as K
print("Using Keras API")
else:
from tensorflow import keras as K
print("Using tf.keras")
if args.ngraph:
print("Using nGraph")
import ngraph_bridge
if args.channels_first:
os.environ["NGRAPH_PASS_ENABLES"]="CPUReshapeSinking:1;ReshapeElimination:1"
print("Keras API version: {}".format(K.__version__))
if args.D2: # Define shape of the tensors (2D)
dims = (1,2)
if args.channels_first:
tensor_shape = (args.num_channels,
args.dim_length,
args.dim_length)
out_shape = (args.num_outputs,
args.dim_length,
args.dim_length)
else:
tensor_shape = (args.dim_length,
args.dim_length,
args.num_channels)
out_shape = (args.dim_length,
args.dim_length,
args.num_outputs)
else: # Define shape of the tensors (3D)
dims=(1,2,3)
if args.channels_first:
tensor_shape = (args.num_channels,
args.dim_length,
args.dim_length,
args.dim_length)
tensor_shape = (args.num_outputs,
args.dim_length,
args.dim_length,
args.dim_length)
else:
tensor_shape = (args.dim_length,
args.dim_length,
args.dim_length,
args.num_channels)
tensor_shape = (args.dim_length,
args.dim_length,
args.dim_length,
args.num_outputs)
# Optimize CPU threads for TensorFlow
config = tf.ConfigProto(
inter_op_parallelism_threads=args.interop_threads,
intra_op_parallelism_threads=args.intraop_threads)
# Configure only as much GPU memory as needed during runtime
# Default is to use the entire GPU memory
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.backend.set_session(sess)
def dice_coef(y_true, y_pred, axis=(1,2,3), smooth=1.0):
intersection = tf.reduce_sum(y_true * K.backend.round(y_pred), axis=axis)
union = tf.reduce_sum(y_true + K.backend.round(y_pred), axis=axis)
numerator = tf.constant(2.) * intersection + smooth
denominator = union + smooth
coef = numerator / denominator
return tf.reduce_mean(coef)
def dice_coef_loss(target, prediction, axis=(1,2,3), smooth=1.0):
"""
Sorenson Dice loss
Using -log(Dice) as the loss since it is better behaved.
Also, the log allows avoidance of the division which
can help prevent underflow when the numbers are very small.
"""
intersection = tf.reduce_sum(prediction * target, axis=axis)
p = tf.reduce_sum(prediction, axis=axis)
t = tf.reduce_sum(target, axis=axis)
numerator = tf.reduce_mean(2. * intersection + smooth)
denominator = tf.reduce_mean(t + p + smooth)
dice_loss = -tf.log(numerator) + tf.log(denominator)
return dice_loss
if args.channels_first:
concat_axis = -1
data_format = "channels_first"
else:
concat_axis = -1
data_format = "channels_last"
def unet3D(input_img, use_upsampling=False, n_out=1, dropout=0.2,
print_summary = False, return_model=False):
"""
3D U-Net model
"""
print("3D U-Net Segmentation")
inputs = K.layers.Input(shape=input_img, name="Input_Image")
params = dict(kernel_size=(3, 3, 3), activation=None,
padding="same", data_format=data_format,
kernel_initializer="he_uniform")
conv1 = K.layers.Conv3D(name="conv1a", filters=32, **params)(inputs)
conv1 = K.layers.BatchNormalization()(conv1)
conv1 = K.layers.Activation("relu")(conv1)
conv1 = K.layers.Conv3D(name="conv1b", filters=64, **params)(conv1)
conv1 = K.layers.BatchNormalization()(conv1)
conv1 = K.layers.Activation("relu")(conv1)
pool1 = K.layers.MaxPooling3D(name="pool1", pool_size=(2, 2, 2))(conv1)
conv2 = K.layers.Conv3D(name="conv2a", filters=64, **params)(pool1)
conv2 = K.layers.BatchNormalization()(conv2)
conv2 = K.layers.Activation("relu")(conv2)
conv2 = K.layers.Conv3D(name="conv2b", filters=128, **params)(conv2)
conv2 = K.layers.BatchNormalization()(conv2)
conv2 = K.layers.Activation("relu")(conv2)
pool2 = K.layers.MaxPooling3D(name="pool2", pool_size=(2, 2, 2))(conv2)
conv3 = K.layers.Conv3D(name="conv3a", filters=128, **params)(pool2)
conv3 = K.layers.BatchNormalization()(conv3)
conv3 = K.layers.Activation("relu")(conv3)
conv3 = K.layers.Dropout(dropout)(conv3) ### Trying dropout layers earlier on, as indicated in the paper
conv3 = K.layers.Conv3D(name="conv3b", filters=256, **params)(conv3)
conv3 = K.layers.BatchNormalization()(conv3)
conv3 = K.layers.Activation("relu")(conv3)
pool3 = K.layers.MaxPooling3D(name="pool3", pool_size=(2, 2, 2))(conv3)
conv4 = K.layers.Conv3D(name="conv4a", filters=256, **params)(pool3)
conv4 = K.layers.BatchNormalization()(conv4)
conv4 = K.layers.Activation("relu")(conv4)
conv4 = K.layers.Dropout(dropout)(conv4) ### Trying dropout layers earlier on, as indicated in the paper
conv4 = K.layers.Conv3D(name="conv4b", filters=512, **params)(conv4)
conv4 = K.layers.BatchNormalization()(conv4)
conv4 = K.layers.Activation("relu")(conv4)
if use_upsampling:
up = K.layers.UpSampling3D(name="up4", size=(2, 2, 2))(conv4)
else:
up = K.layers.Conv3DTranspose(name="transConv4", filters=512, data_format=data_format,
kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="same")(conv4)
up4 = K.layers.concatenate([up, conv3], axis=concat_axis)
conv5 = K.layers.Conv3D(name="conv5a", filters=256, **params)(up4)
conv5 = K.layers.BatchNormalization()(conv5)
conv5 = K.layers.Activation("relu")(conv5)
conv5 = K.layers.Conv3D(name="conv5b", filters=256, **params)(conv5)
conv5 = K.layers.BatchNormalization()(conv5)
conv5 = K.layers.Activation("relu")(conv5)
if use_upsampling:
up = K.layers.UpSampling3D(name="up5", size=(2, 2, 2))(conv5)
else:
up = K.layers.Conv3DTranspose(name="transConv5", filters=256, data_format=data_format,
kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="same")(conv5)
up5 = K.layers.concatenate([up, conv2], axis=concat_axis)
conv6 = K.layers.Conv3D(name="conv6a", filters=128, **params)(up5)
conv6 = K.layers.BatchNormalization()(conv6)
conv6 = K.layers.Activation("relu")(conv6)
conv6 = K.layers.Conv3D(name="conv6b", filters=128, **params)(conv6)
conv6 = K.layers.BatchNormalization()(conv6)
conv6 = K.layers.Activation("relu")(conv6)
if use_upsampling:
up = K.layers.UpSampling3D(name="up6", size=(2, 2, 2))(conv6)
else:
up = K.layers.Conv3DTranspose(name="transConv6", filters=128, data_format=data_format,
kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="same")(conv6)
up6 = K.layers.concatenate([up, conv1], axis=concat_axis)
conv7 = K.layers.Conv3D(name="conv7a", filters=64, **params)(up6)
conv7 = K.layers.BatchNormalization()(conv7)
conv7 = K.layers.Activation("relu")(conv7)
conv7 = K.layers.Conv3D(name="conv7b", filters=64, **params)(conv7)
conv7 = K.layers.BatchNormalization()(conv7)
conv7 = K.layers.Activation("relu")(conv7)
pred = K.layers.Conv3D(name="Prediction", filters=n_out, kernel_size=(1, 1, 1),
data_format=data_format, activation="sigmoid")(conv7)
if return_model:
model = K.models.Model(inputs=[inputs], outputs=[pred])
if print_summary:
print(model.summary())
return pred, model
else:
return pred
def unet2D(input_tensor, use_upsampling=False,
n_out=1, dropout=0.2, print_summary = False, return_model=False):
"""
2D U-Net
"""
print("2D U-Net Segmentation")
inputs = K.layers.Input(shape=input_tensor, name="Images")
# Convolution parameters
params = dict(kernel_size=(3, 3), activation="relu",
padding="same", data_format=data_format,
kernel_initializer="he_uniform")
# Transposed convolution parameters
params_trans = dict(data_format=data_format,
kernel_size=(2, 2), strides=(2, 2),
padding="same")
fms = 64
conv1 = K.layers.Conv2D(name="conv1a", filters=fms, **params)(inputs)
conv1 = K.layers.Conv2D(name="conv1b", filters=fms, **params)(conv1)
pool1 = K.layers.MaxPooling2D(name="pool1", pool_size=(2, 2))(conv1)
conv2 = K.layers.Conv2D(name="conv2a", filters=fms*2, **params)(pool1)
conv2 = K.layers.Conv2D(name="conv2b", filters=fms*2, **params)(conv2)
pool2 = K.layers.MaxPooling2D(name="pool2", pool_size=(2, 2))(conv2)
conv3 = K.layers.Conv2D(name="conv3a", filters=fms*4, **params)(pool2)
#conv3 = K.layers.Dropout(dropout)(conv3)
conv3 = K.layers.Conv2D(name="conv3b", filters=fms*4, **params)(conv3)
pool3 = K.layers.MaxPooling2D(name="pool3", pool_size=(2, 2))(conv3)
conv4 = K.layers.Conv2D(name="conv4a", filters=fms*8, **params)(pool3)
#conv4 = K.layers.Dropout(dropout)(conv4)
conv4 = K.layers.Conv2D(name="conv4b", filters=fms*8, **params)(conv4)
pool4 = K.layers.MaxPooling2D(name="pool4", pool_size=(2, 2))(conv4)
conv5 = K.layers.Conv2D(name="conv5a", filters=fms*16, **params)(pool4)
conv5 = K.layers.Conv2D(name="conv5b", filters=fms*16, **params)(conv5)
if args.use_upsampling:
up = K.layers.UpSampling2D(name="up6", size=(2, 2))(conv5)
else:
up = K.layers.Conv2DTranspose(name="transConv6", filters=fms*8,
**params_trans)(conv5)
up6 = K.layers.concatenate([up, conv4], axis=concat_axis)
conv6 = K.layers.Conv2D(name="conv6a", filters=fms*8, **params)(up6)
conv6 = K.layers.Conv2D(name="conv6b", filters=fms*8, **params)(conv6)
if args.use_upsampling:
up = K.layers.UpSampling2D(name="up7", size=(2, 2))(conv6)
else:
up = K.layers.Conv2DTranspose(name="transConv7", filters=fms*4,
**params_trans)(conv6)
up7 = K.layers.concatenate([up, conv3], axis=concat_axis)
conv7 = K.layers.Conv2D(name="conv7a", filters=fms*4, **params)(up7)
conv7 = K.layers.Conv2D(name="conv7b", filters=fms*4, **params)(conv7)
if args.use_upsampling:
up = K.layers.UpSampling2D(name="up8", size=(2, 2))(conv7)
else:
up = K.layers.Conv2DTranspose(name="transConv8", filters=fms*2,
**params_trans)(conv7)
up8 = K.layers.concatenate([up, conv2], axis=concat_axis)
conv8 = K.layers.Conv2D(name="conv8a", filters=fms*2, **params)(up8)
conv8 = K.layers.Conv2D(name="conv8b", filters=fms*2, **params)(conv8)
if args.use_upsampling:
up = K.layers.UpSampling2D(name="up9", size=(2, 2))(conv8)
else:
up = K.layers.Conv2DTranspose(name="transConv9", filters=fms,
**params_trans)(conv8)
up9 = K.layers.concatenate([up, conv1], axis=concat_axis)
conv9 = K.layers.Conv2D(name="conv9a", filters=fms, **params)(up9)
conv9 = K.layers.Conv2D(name="conv9b", filters=fms, **params)(conv9)
pred = K.layers.Conv2D(name="PredictionMask",
filters=n_out, kernel_size=(1, 1),
data_format=data_format,
activation="sigmoid")(conv9)
if return_model:
model = K.models.Model(inputs=[inputs], outputs=[pred])
if print_summary:
print(model.summary())
return pred, model
else:
return pred
def conv3D(input_img, print_summary = False, dropout=0.2, n_out=1,
return_model=False):
"""
Simple 3D convolution model based on VGG-16
"""
print("3D Convolutional Binary Classifier based on VGG-16")
inputs = K.layers.Input(shape=input_img, name="Images")
params = dict(kernel_size=(3, 3, 3), activation="relu",
padding="same", data_format=data_format,
kernel_initializer="he_uniform") #RandomUniform(minval=-0.01, maxval=0.01, seed=816))
conv1 = K.layers.Conv3D(name="conv1", filters=64, **params)(inputs)
conv2 = K.layers.Conv3D(name="conv2", filters=64, **params)(conv1)
pool1 = K.layers.MaxPooling3D(name="pool1", pool_size=(2, 2, 2))(conv2)
conv3 = K.layers.Conv3D(name="conv3", filters=128, **params)(pool1)
conv4 = K.layers.Conv3D(name="conv4", filters=128, **params)(conv3)
pool2 = K.layers.MaxPooling3D(name="pool2", pool_size=(2, 2, 2))(conv4)
conv5 = K.layers.Conv3D(name="conv5", filters=256, **params)(pool2)
conv6 = K.layers.Conv3D(name="conv6", filters=256, **params)(conv5)
conv7 = K.layers.Conv3D(name="conv7", filters=256, **params)(conv6)
pool3 = K.layers.MaxPooling3D(name="pool3", pool_size=(2, 2, 2))(conv7)
conv8 = K.layers.Conv3D(name="conv8", filters=512, **params)(pool3)
conv9 = K.layers.Conv3D(name="conv9", filters=512, **params)(conv8)
conv10 = K.layers.Conv3D(name="conv10", filters=512, **params)(conv9)
pool4 = K.layers.MaxPooling3D(name="pool4", pool_size=(2, 2, 2))(conv10)
conv11 = K.layers.Conv3D(name="conv11", filters=512, **params)(pool4)
conv12 = K.layers.Conv3D(name="conv12", filters=512, **params)(conv11)
conv13 = K.layers.Conv3D(name="conv13", filters=512, **params)(conv12)
pool5 = K.layers.MaxPooling3D(name="pool5", pool_size=(2, 2, 2))(conv13)
flat = K.layers.Flatten()(pool5)
dense1 = K.layers.Dense(4096, activation="relu")(flat)
drop1 = | |
'in_downtime': {},
'service_scheduled_downtime_depth': {},
'service_acknowledged': {},
'hostregex': {},
'host_address': {},
'service_active_checks_enabled': {},
'serviceregex': {},
'service_display_name': {},
'check_command': {},
'hoststate': {},
'svcstate': {},
'svchardstate': {},
'opthostgroup': {},
'opthost_contactgroup': {},
'output': {},
'service_is_flapping': {},
'svc_last_state_change': {},
'svc_last_check': {},
'siteopt': {},
'aggr_service_used': {},
'svc_notif_number': {},
'service_staleness': {},
'host_tags': {},
'hostalias': {},
'host_favorites': {},
'service_favorites': {},
'has_performance_data' : { 'is_has_performance_data': '1' },
},
'datasource': 'services',
'description': _('A Matrix of performance data values, grouped by hosts and services'),
'group_painters': [('host', 'host', None)],
'hidden': False,
'hidebutton': False,
'icon': 'matrix',
'layout': 'matrix',
'num_columns': 12,
'painters': [('service_description', 'service', None),
('perfometer', None, None),
],
'single_infos': [],
'sorters': [('site_host', False), ('svcdescr', False)],
'title': _('Search performance data'),
'user_sortable': True,
'topic' : _("Metrics"),
'mustsearch': True,
},
#
# ____ _
# | __ ) _ _ ___(_)_ __ ___ ___ ___
# | _ \| | | / __| | '_ \ / _ \/ __/ __|
# | |_) | |_| \__ \ | | | | __/\__ \__ \
# |____/ \__,_|___/_|_| |_|\___||___/___/
#
# ___ _ _ _ _
# |_ _|_ __ | |_ ___| | (_) __ _ ___ _ __ ___ ___
# | || '_ \| __/ _ \ | | |/ _` |/ _ \ '_ \ / __/ _ \
# | || | | | || __/ | | | (_| | __/ | | | (_| __/
# |___|_| |_|\__\___|_|_|_|\__, |\___|_| |_|\___\___|
# |___/
#
# All Aggregations
'aggr_all': {
'browser_reload': 0,
'column_headers': 'pergroup',
'datasource': 'bi_aggregations',
'description': _('Displays all BI aggregations.'),
'group_painters': [('aggr_group', 'aggr_group')],
'hard_filters': [],
'hard_filtervars': [('host', ''),
('aggr_name_regex', ''),
('aggr_output', ''),
('birs0', 'on'),
('birs1', 'on'),
('birs2', 'on'),
('birs3', 'on'),
('birs-1', 'on'),
('bias0', 'on'),
('bias1', 'on'),
('bias2', 'on'),
('bias3', 'on'),
('birs-1', 'on'),
('biasn', 'on'),
('bies0', 'on'),
('bies1', 'on'),
('bies2', 'on'),
('bies3', 'on'),
('bies-1', 'on'),
],
'hidden': False,
'hide_filters': [],
'hidebutton': False,
'icon' : 'aggr',
'layout': 'table',
'linktitle': _('All Aggregations'),
'mustsearch': False,
'name': 'aggr_all',
'num_columns': 1,
'owner': 'omdadmin',
'painters': [('aggr_icons', None, ''),
('aggr_state', None, ''),
('aggr_treestate', None, ''),
('aggr_hosts', None, '')],
'play_sounds': False,
'public': False,
'show_filters': ['aggr_group',
'aggr_hosts',
'aggr_name_regex',
'aggr_state',
'aggr_output',
'aggr_assumed_state',
'aggr_effective_state'],
'sorters': [ ('aggr_group', False), ('aggr_name', False) ],
'title': _('All Aggregations'),
'topic': _('Business Intelligence')},
# All aggregations of a certain group
'aggr_group': {
'browser_reload': 0,
'column_headers': 'pergroup',
'datasource': 'bi_aggregations',
'description': _('Displays all aggregations of a certain group.'),
'group_painters': [],
'hard_filters': [],
'hard_filtervars': [('host', ''),
('aggr_name_regex', ''),
('aggr_output', ''),
('birs0', 'on'),
('birs1', 'on'),
('birs2', 'on'),
('birs3', 'on'),
('birs-1', 'on'),
('bias0', 'on'),
('bias1', 'on'),
('bias2', 'on'),
('bias3', 'on'),
('birs-1', 'on'),
('biasn', 'on'),
('bies0', 'on'),
('bies1', 'on'),
('bies2', 'on'),
('bies3', 'on'),
('bies-1', 'on'),
],
'hidden': True,
'hide_filters': [ 'aggr_group' ],
'hidebutton': False,
'icon' : 'aggr',
'layout': 'table',
'linktitle': _('Aggregation group'),
'mustsearch': False,
'name': 'aggr_group',
'num_columns': 1,
'owner': 'omdadmin',
'painters': [('aggr_icons', None, ''),
('aggr_state', None, ''),
('aggr_treestate', None, ''),
('aggr_hosts', None, '')],
'play_sounds': False,
'public': False,
'show_filters': ['aggr_hosts',
'aggr_name_regex',
'aggr_state',
'aggr_output',
'aggr_assumed_state',
'aggr_effective_state'],
'sorters': [ ('aggr_name', False) ],
'title': _('Aggregation group'),
'topic': _('Business Intelligence')},
# All host-only aggregations
'aggr_singlehosts': {
'browser_reload': 0,
'column_headers': 'off',
'datasource': 'bi_host_aggregations',
'description': _('Lists all aggregations which only rely on information of one host.'),
'group_painters': [('aggr_group', 'aggr_group')],
'hard_filters': [],
'hard_filtervars': [('is_host_scheduled_downtime_depth', '-1'),
('aggr_name_regex', ''),
('aggr_group', ''),
('birs0', 'on'),
('birs1', 'on'),
('birs2', 'on'),
('birs3', 'on'),
('birs-1', 'on'),
('bias0', 'on'),
('bias1', 'on'),
('bias2', 'on'),
('bias3', 'on'),
('birs-1', 'on'),
('biasn', 'on'),
('bies0', 'on'),
('bies1', 'on'),
('bies2', 'on'),
('bies3', 'on'),
('bies-1', 'on'),
('is_host_in_notification_period', '-1'),
('aggr_output', ''),
('site', ''),
('host', ''),
('opthostgroup', ''),
('neg_opthostgroup', ''),
],
'hidden': False,
'hide_filters': [],
'hidebutton': True,
'icon' : 'aggr',
'layout': 'table',
'linktitle': _('Host Aggregations'),
'mustsearch': False,
'name': 'aggr_singlehosts',
'num_columns': 1,
'owner': 'omdadmin',
'painters': [('aggr_icons', None, ''),
('host', 'aggr_host', ''),
('host_icons', None, ''),
('aggr_treestate', None, ''),
],
'play_sounds': False,
'public': True,
'show_filters': ['host_scheduled_downtime_depth',
'aggr_name_regex',
'aggr_group',
'aggr_state',
'host_in_notification_period',
'aggr_output',
'hoststate',
'siteopt',
'aggr_assumed_state',
'hostregex',
'opthostgroup',
'aggr_effective_state'],
'sorters': [('aggr_group', False), ('site_host', False)],
'title': _('Single-Host Aggregations'),
'topic': _('Business Intelligence')},
# Aggregations that bear the name of a host
'aggr_hostnameaggrs': {
'browser_reload': 0,
'column_headers': 'off',
'datasource': 'bi_hostname_aggregations',
'description': _('Host related aggregations'),
'group_painters': [('aggr_group', 'aggr_group')],
'hard_filters': [],
'hard_filtervars': [('is_host_scheduled_downtime_depth', '-1'),
('aggr_name_regex', ''),
('aggr_group', ''),
('birs0', 'on'),
('birs1', 'on'),
('birs2', 'on'),
('birs3', 'on'),
('birs-1', 'on'),
('bias0', 'on'),
('bias1', 'on'),
('bias2', 'on'),
('bias3', 'on'),
('birs-1', 'on'),
('biasn', 'on'),
('bies0', 'on'),
('bies1', 'on'),
('bies2', 'on'),
('bies3', 'on'),
('bies-1', 'on'),
('is_host_in_notification_period', '-1'),
('aggr_output', ''),
('site', ''),
('host', ''),
('opthostgroup', ''),
('neg_opthostgroup', ''),
],
'hidden': False,
'hide_filters': [],
'hidebutton': True,
'icon' : 'aggr',
'layout': 'table',
'linktitle': _('Host Aggregations'),
'mustsearch': False,
'num_columns': 1,
'owner': 'omdadmin',
'painters': [('aggr_icons', None, ''),
('host', 'aggr_host', ''),
('host_icons', None, ''),
('aggr_treestate', None, ''),
],
'play_sounds': False,
'public': True,
'show_filters': ['host_scheduled_downtime_depth',
'aggr_name_regex',
'aggr_group',
'aggr_state',
'host_in_notification_period',
'aggr_output',
'hoststate',
'siteopt',
'aggr_assumed_state',
'hostregex',
'opthostgroup',
'aggr_effective_state'],
'sorters': [('aggr_group', False), ('site_host', False)],
'title': _('Hostname Aggregations'),
'topic': _('Business Intelligence')},
# Single-Host Aggregations of a host
'aggr_singlehost': {'browser_reload': 0,
'column_headers': 'pergroup',
'datasource': 'bi_host_aggregations',
'description': _('A single host related aggregation'),
'group_painters': [('aggr_name', None)],
'hard_filters': [],
'hard_filtervars': [],
'hidden': True,
'hide_filters': ['siteopt', 'host'],
'hidebutton': False,
'icon' : 'aggr',
'layout': 'table',
'linktitle': _('Host Aggregations'),
'mustsearch': False,
'name': 'aggrhost',
'num_columns': 1,
'owner': 'omdadmin',
'painters': [('aggr_icons', None, ''),
('aggr_state', None, ''),
('aggr_treestate', None, '')],
'play_sounds': False,
'public': True,
'show_filters': [],
'sorters': [ ('aggr_name', False) ],
'title': _('Single-Host Aggregations of Host'),
'topic': _('Other')},
# All aggregations affected by a certain host
'aggr_host': {
'browser_reload': 0,
'column_headers': 'pergroup',
'datasource': 'bi_aggregations',
'description': _('All aggregations the given host is part of'),
'group_painters': [('aggr_group', 'aggr_group')],
'hard_filters': [],
'hard_filtervars': [('aggr_name_regex', ''),
('aggr_output', ''),
('birs0', 'on'),
('birs1', 'on'),
('birs2', 'on'),
('birs3', 'on'),
('birs-1', 'on'),
('bias0', 'on'),
('bias1', 'on'),
('bias2', 'on'),
('bias3', 'on'),
('bias-1', 'on'),
('biasn', 'on'),
('bies0', 'on'),
('bies1', 'on'),
('bies2', 'on'),
('bies3', 'on'),
('bies-1', 'on'),
],
'hidden': True,
'hide_filters': [],
'hidebutton': False,
'icon' : 'aggr',
'layout': 'table',
'linktitle': _('Aggregations'),
'mustsearch': False,
'name': 'aggr_host',
'num_columns': 1,
'owner': 'omdadmin',
'painters': [('aggr_icons', None, ''),
('aggr_state', None, ''),
('aggr_treestate', None, '')],
'play_sounds': False,
'public': False,
'show_filters': ['aggr_group',
'aggr_name_regex',
'aggr_state',
'aggr_output',
'aggr_assumed_state',
'aggr_effective_state',
'aggr_hosts'],
'sorters': [ ('aggr_name', False) ],
'title': _('Aggregations Affected by Host'),
'topic': _('Business Intelligence')},
# All aggregations affected by a certain service (one one site/host!)
'aggr_service': {
'browser_reload': 0,
'column_headers': 'pergroup',
'datasource': 'bi_aggregations',
'description': _('All aggregations affected by a certain service'),
'group_painters': [('aggr_group', 'aggr_group')],
'hard_filters': [],
'hard_filtervars': [('aggr_name_regex', ''),
('aggr_output', ''),
('birs0', 'on'),
('birs1', 'on'),
('birs2', 'on'),
('birs3', 'on'),
('birs-1', 'on'),
('bias0', 'on'),
('bias1', 'on'),
('bias2', 'on'),
('bias3', 'on'),
('bias-1', 'on'),
('biasn', 'on'),
('bies0', 'on'),
('bies1', 'on'),
('bies2', 'on'),
('bies3', 'on'),
('bies-1', 'on'),
],
'hidden': True,
'hide_filters': [],
'hidebutton': False,
'icon' : 'aggr',
'layout': 'table',
'linktitle': _('Service Aggreg.'),
'mustsearch': False,
'name': 'aggr_service',
'num_columns': 1,
'owner': 'omdadmin',
'painters': [('aggr_icons', None, ''),
('aggr_state', None, ''),
('aggr_treestate', None, '')],
'play_sounds': False,
'public': False,
'show_filters': ['aggr_group',
'aggr_name_regex',
'aggr_state',
'aggr_output',
'aggr_assumed_state',
'aggr_effective_state',
'aggr_service'],
'sorters': [ ('aggr_name', False) ],
'title': _('Aggregations Affected by Service'),
'topic': _('Business Intelligence')},
# All Aggregations that have (real) problems
'aggr_problems': {
'browser_reload': 0,
'column_headers': 'pergroup',
'datasource': 'bi_aggregations',
'description': _('All aggregations that have a non-OK state (honoring state assumptions)'),
'group_painters': [('aggr_group', 'aggr_group')],
'hard_filters': [],
'hard_filtervars': [('host', ''),
('aggr_name_regex', ''),
('aggr_output', ''),
('birs0', ''),
('birs1', 'on'),
('birs2', 'on'),
('birs3', 'on'),
('birs-1', ''),
('bias0', 'on'),
('bias1', 'on'),
('bias2', 'on'),
('bias3', 'on'),
('bias-1', ''),
('biasn', 'on'),
('bies0', 'on'),
('bies1', 'on'),
('bies2', 'on'),
('bies3', 'on'),
('bies-1', ''),
],
'hidden': False,
'hide_filters': [],
'hidebutton': False,
'icon' : 'aggr',
'layout': 'table',
'linktitle': _('Problem Aggregations'),
'mustsearch': False,
'name': 'aggr_all',
'num_columns': 1,
'owner': 'omdadmin',
'painters': [('aggr_icons', None, ''),
('aggr_state', None, ''),
('aggr_treestate', None, ''),
('aggr_hosts', None, '')],
'play_sounds': True,
'public': False,
'show_filters': ['aggr_group',
'aggr_hosts',
'aggr_name_regex',
'aggr_state',
'aggr_output',
'aggr_assumed_state',
'aggr_effective_state'],
'sorters': [ ('aggr_group', False), ('aggr_name', False)],
'title': _('Problem Aggregations'),
'topic': _('Business Intelligence')},
# All single-host aggregations with problems
'aggr_hostproblems': {
'browser_reload': 0,
'column_headers': 'off',
'datasource': 'bi_host_aggregations',
'description': _('All single-host aggregations that are in non-OK state (honoring state assumptions)'),
'group_painters': [('aggr_group', 'aggr_group')],
'hard_filters': [],
'hard_filtervars': [('is_host_scheduled_downtime_depth', '-1'),
('aggr_name_regex', ''),
('aggr_group', 'Hosts'),
('is_host_in_notification_period', '-1'),
('aggr_output', ''),
('birs0', 'on'),
('birs1', 'on'),
('birs2', 'on'),
('birs3', 'on'),
('birs-1', 'on'),
('bias0', 'on'),
('bias1', 'on'),
('bias2', 'on'),
('bias3', 'on'),
('bias-1', 'on'),
('biasn', 'on'),
('bies0', 'on'),
('bies1', 'on'),
('bies2', 'on'),
('bies3', 'on'),
('bies-1', 'on'),
('site', ''),
('host', ''),
('opthostgroup', ''),
('neg_opthostgroup', | |
where this policy or set is
attached
.. attribute:: binding
bindings list
**type**\: list of :py:class:`Binding <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Sets.Prefix.Sets_.Set.Attached.Binding>`
"""
_prefix = 'policy-repository-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.binding = YList()
self.binding.parent = self
self.binding.name = 'binding'
class Binding(object):
"""
bindings list
.. attribute:: af_name
Address Family Identifier
**type**\: :py:class:`AddressFamilyEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.AddressFamilyEnum>`
.. attribute:: aggregate_network_address
Aggregate IP address or Network IP Address in IPv4 or IPv6 Format
**type**\: str
.. attribute:: area_id
OSPF Area ID in Decimal Integer Format
**type**\: str
.. attribute:: attach_point
Name of attach point where policy is attached
**type**\: str
.. attribute:: attached_policy
The attached policy that (maybe indirectly) uses the object in question
**type**\: str
.. attribute:: direction
Direction In or Out
**type**\: :py:class:`AttachPointDirectionEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.AttachPointDirectionEnum>`
.. attribute:: group
Neighbor Group
**type**\: :py:class:`GroupEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.GroupEnum>`
.. attribute:: group_name
Neighbor Group Name
**type**\: str
.. attribute:: instance
Instance
**type**\: str
.. attribute:: interface_name
Interface Name
**type**\: str
.. attribute:: neighbor_address
Neighbor IP Address
**type**\: str
.. attribute:: neighbor_af_name
Neighbor IP Address Family
**type**\: :py:class:`AddressFamilyEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.AddressFamilyEnum>`
.. attribute:: propogate_from
ISIS Propogate From Level
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: propogate_to
ISIS Propogate To Level
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: proto_instance
Protocol instance
**type**\: str
.. attribute:: protocol
Protocol to which policy attached
**type**\: str
.. attribute:: route_policy_name
Policy that uses object in question
**type**\: str
.. attribute:: saf_name
Subsequent Address Family Identifier
**type**\: :py:class:`SubAddressFamilyEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.SubAddressFamilyEnum>`
.. attribute:: source_protocol
Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown }
**type**\: str
.. attribute:: vrf_name
VRF name
**type**\: str
"""
_prefix = 'policy-repository-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.af_name = None
self.aggregate_network_address = None
self.area_id = None
self.attach_point = None
self.attached_policy = None
self.direction = None
self.group = None
self.group_name = None
self.instance = None
self.interface_name = None
self.neighbor_address = None
self.neighbor_af_name = None
self.propogate_from = None
self.propogate_to = None
self.proto_instance = None
self.protocol = None
self.route_policy_name = None
self.saf_name = None
self.source_protocol = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-policy-repository-oper:binding'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.af_name is not None:
return True
if self.aggregate_network_address is not None:
return True
if self.area_id is not None:
return True
if self.attach_point is not None:
return True
if self.attached_policy is not None:
return True
if self.direction is not None:
return True
if self.group is not None:
return True
if self.group_name is not None:
return True
if self.instance is not None:
return True
if self.interface_name is not None:
return True
if self.neighbor_address is not None:
return True
if self.neighbor_af_name is not None:
return True
if self.propogate_from is not None:
return True
if self.propogate_to is not None:
return True
if self.proto_instance is not None:
return True
if self.protocol is not None:
return True
if self.route_policy_name is not None:
return True
if self.saf_name is not None:
return True
if self.source_protocol is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta
return meta._meta_table['RoutingPolicy.Sets.Prefix.Sets_.Set.Attached.Binding']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-policy-repository-oper:attached'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.binding is not None:
for child_ref in self.binding:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta
return meta._meta_table['RoutingPolicy.Sets.Prefix.Sets_.Set.Attached']['meta_info']
@property
def _common_path(self):
if self.set_name is None:
raise YPYModelError('Key property set_name is None')
return '/Cisco-IOS-XR-policy-repository-oper:routing-policy/Cisco-IOS-XR-policy-repository-oper:sets/Cisco-IOS-XR-policy-repository-oper:prefix/Cisco-IOS-XR-policy-repository-oper:sets/Cisco-IOS-XR-policy-repository-oper:set[Cisco-IOS-XR-policy-repository-oper:set-name = ' + str(self.set_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.set_name is not None:
return True
if self.attached is not None and self.attached._has_data():
return True
if self.used_by is not None and self.used_by._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta
return meta._meta_table['RoutingPolicy.Sets.Prefix.Sets_.Set']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-policy-repository-oper:routing-policy/Cisco-IOS-XR-policy-repository-oper:sets/Cisco-IOS-XR-policy-repository-oper:prefix/Cisco-IOS-XR-policy-repository-oper:sets'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.set is not None:
for child_ref in self.set:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta
return meta._meta_table['RoutingPolicy.Sets.Prefix.Sets_']['meta_info']
class Unused(object):
"""
All objects of a given type that are not
referenced at all
.. attribute:: object
Policy objects
**type**\: list of str
"""
_prefix = 'policy-repository-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.object = YLeafList()
self.object.parent = self
self.object.name = 'object'
@property
def _common_path(self):
return '/Cisco-IOS-XR-policy-repository-oper:routing-policy/Cisco-IOS-XR-policy-repository-oper:sets/Cisco-IOS-XR-policy-repository-oper:prefix/Cisco-IOS-XR-policy-repository-oper:unused'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.object is not None:
for child in self.object:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta
return meta._meta_table['RoutingPolicy.Sets.Prefix.Unused']['meta_info']
class Inactive(object):
"""
All objects of a given type that are not
attached to a protocol
.. attribute:: object
Policy objects
**type**\: list of str
"""
_prefix = 'policy-repository-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.object = YLeafList()
self.object.parent = self
self.object.name = 'object'
@property
def _common_path(self):
return '/Cisco-IOS-XR-policy-repository-oper:routing-policy/Cisco-IOS-XR-policy-repository-oper:sets/Cisco-IOS-XR-policy-repository-oper:prefix/Cisco-IOS-XR-policy-repository-oper:inactive'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.object is not None:
for child in self.object:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta
return meta._meta_table['RoutingPolicy.Sets.Prefix.Inactive']['meta_info']
class Active(object):
"""
All objects of a given type that are attached to
a protocol
.. attribute:: object
Policy objects
**type**\: list of str
"""
_prefix = 'policy-repository-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.object = YLeafList()
self.object.parent = self
self.object.name = 'object'
@property
def _common_path(self):
return '/Cisco-IOS-XR-policy-repository-oper:routing-policy/Cisco-IOS-XR-policy-repository-oper:sets/Cisco-IOS-XR-policy-repository-oper:prefix/Cisco-IOS-XR-policy-repository-oper:active'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.object is not None:
for child in self.object:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta
return meta._meta_table['RoutingPolicy.Sets.Prefix.Active']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-policy-repository-oper:routing-policy/Cisco-IOS-XR-policy-repository-oper:sets/Cisco-IOS-XR-policy-repository-oper:prefix'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.active is not None and self.active._has_data():
return True
if self.inactive is not None and self.inactive._has_data():
return True
if self.sets is not None and self.sets._has_data():
return True
if self.unused is not None and self.unused._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_policy_repository_oper as meta
return meta._meta_table['RoutingPolicy.Sets.Prefix']['meta_info']
class Community(object):
"""
Information about Community sets
.. attribute:: active
All objects of a given type that are attached to a protocol
**type**\: :py:class:`Active <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Sets.Community.Active>`
.. attribute:: inactive
All objects of a given type that are not attached to a protocol
**type**\: :py:class:`Inactive <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Sets.Community.Inactive>`
.. attribute:: sets
Information about individual sets
**type**\: :py:class:`Sets_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Sets.Community.Sets_>`
.. attribute:: unused
All objects of a given type that are not referenced at all
**type**\: :py:class:`Unused <ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper.RoutingPolicy.Sets.Community.Unused>`
"""
_prefix = 'policy-repository-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent | |
chain. {}'.format(e))
except ValidationError as e:
self.logger.debug('ValidationError error when importing chain. Error: {}'.format(e))
except ValueError as e:
self.logger.debug('ValueError error when importing chain. Error: {}'.format(e))
except Exception as e:
self.logger.error('tried to import a chain and got error {}'.format(e))
if self.raise_errors:
raise e
#
# Loops
#
async def _handle_msg_loop(self) -> None:
while self.is_running:
try:
peer, cmd, msg = await self.wait(self.msg_queue.get())
except OperationCancelled:
break
# Our handle_msg() method runs cpu-intensive tasks in sub-processes so that the main
# loop can keep processing msgs, and that's why we use ensure_future() instead of
# awaiting for it to finish here.
asyncio.ensure_future(self.handle_msg(peer, cmd, msg))
async def handle_msg(self, peer: HLSPeer, cmd: protocol.Command,
msg: protocol._DecodedMsgType) -> None:
try:
await self._handle_msg(peer, cmd, msg)
except OperationCancelled:
# Silently swallow OperationCancelled exceptions because we run unsupervised (i.e.
# with ensure_future()). Our caller will also get an OperationCancelled anyway, and
# there it will be handled.
pass
except Exception:
self.logger.exception("Unexpected error when processing msg from %s", peer)
async def _handle_import_block_loop(self):
while self.is_running:
try:
new_block_queue_item = await self.wait(self._new_blocks_to_import.get())
except OperationCancelled:
break
if await self.consensus.current_sync_stage >= ADDITIVE_SYNC_STAGE_ID:
self.logger.debug('found new block to import in queue. sending to handling function')
# we await for the import block function here to make sure that we are only importing one block at a time.
# later we will add multiprocessing with multiple instances of this object to import in parallel.
try:
async with self.importing_blocks_lock:
await self.handle_new_block(new_block=new_block_queue_item.new_block,
peer=new_block_queue_item.peer,
propogate_to_network=new_block_queue_item.propogate_to_network,
from_rpc=new_block_queue_item.from_rpc)
except OperationCancelled:
# Silently swallow OperationCancelled exceptions because we run unsupervised (i.e.
# with ensure_future()). Our caller will also get an OperationCancelled anyway, and
# there it will be handled.
pass
except Exception:
self.logger.exception("Unexpected error when importing block from %s", new_block_queue_item.peer)
async def sync_with_consensus_loop(self):
'''
It is the duty of nodes who are not in consensus to figure out what differences their database has, and bringing their database in line with consensus.
If one of these nodes is missing blocks, it will request them from one of the nodes in consensus. If one of these nodes has
new blocks that the consensus nodes do not have, it should send the new blocks to them.
If we need new blocks, lets request them all from one peer.
If we have blocks that they need, send them out to all peers who need it.
:return:
'''
# We have 3 stages of syncing. The window that is still active, and still filling up with new blocks,
# gets synced as much as possible by nodes sending out new blocks to all other nodes.
# We cant effeciently sync this window with the hash fragment method because it will be continuously
# changing.
#
# The second stage of syncing, from about 1000 seconds ago until 2000 or 3000 seconds ago
# is where we look at differences in blockchain databases between the nodes
# and add any blocks that other nodes have that we don't. We also inform other nodes if we have blocks
# that they are missing.
#
# If we arent synced by then, we perform a third stage of syncing, which is where we take the blockchain database
# that is currently in consensus. This differs from the second stage because we don't add any blocks
# that are missing. We simply go with whatever database currently has the most stake and is in consensus.
#
# These stages are labeled from top to bottom 4, 3, 2. stage 1 is fast sync. Stage 0 is unknown.
# TODO: track the block requests we send to peers. Then make sure we don't keep sending the same requests to
# a peer that isn't responding.
self.logger.debug("additively_sync_recent_blocks_with_consensus starting")
while self.is_operational:
try:
sync_parameters = await self.consensus.get_blockchain_sync_parameters()
except NoEligiblePeers:
self.logger.debug("No peers have the data we need to sync with. Skipping sync loop.")
await asyncio.sleep(SYNC_WITH_CONSENSUS_LOOP_TIME_PERIOD)
else:
if sync_parameters is None:
self.logger.debug("We are fully synced. Skipping sync loop and pausing before checking again.")
await asyncio.sleep(SYNC_WITH_CONSENSUS_LOOP_TIME_PERIOD)
elif sync_parameters.sync_stage >= FULLY_SYNCED_STAGE_ID:
self.logger.debug("We are synced up to stage 4. Skipping sync loop and pausing before checking again.")
await asyncio.sleep(SYNC_WITH_CONSENSUS_LOOP_TIME_PERIOD)
else:
await self.sync_with_consensus(sync_parameters)
#
# async def _recently_imported_block_memory_expire_loop(self):
# self.logger.debug("Syncer recently imported block cache expiry loop started")
# while self.is_operational:
# expire_time = time.time()-SYNCER_CACHE_TO_PREVENT_MULTIPLE_IMPORTS_OF_SAME_BLOCKS_EXPIRE_TIME
# for block_hash, timestamp in self.recently_imported_block_hashes.copy().items():
# if timestamp < expire_time:
# del self.recently_imported_block_hashes[block_hash]
#
# await asyncio.sleep(SYNCER_RECENTLY_IMPORTED_BLOCK_MEMORY_EXPIRE_CHECK_LOOP_PERIOD)
#
# def remove_recently_imported_hashes(self, new_hashes):
# return list(set(new_hashes) - self.recently_imported_block_hashes.keys())
async def sync_block_conflict_with_consensus_loop(self):
self.logger.debug("sync_block_conflict_with_consensus_loop starting")
while self.is_operational:
await self.sync_block_conflict_with_consensus()
await asyncio.sleep(CONSENSUS_SYNC_TIME_PERIOD)
#
# Core functionality. Methods for performing sync
#
async def sync_with_consensus(self, sync_parameters):
fragment_length = 3
sync_stage = sync_parameters.sync_stage
if sync_stage == FAST_SYNC_STAGE_ID:
await self.fast_sync_main(sync_parameters)
return
self.logger.debug("Syncing loop at sync stage {}".format(sync_stage))
additional_candidate_peers = list(sync_parameters.peers_to_sync_with)
peer_to_sync_with = additional_candidate_peers.pop()
chronological_window_timestamp = sync_parameters.timestamp_for_chronoligcal_block_window
if sync_stage <= CONSENSUS_MATCH_SYNC_STAGE_ID:
# The blocks we are downloading are in consensus. So for any conflict blocks, we are at fault.
force_replace_existing_blocks = True
else:
force_replace_existing_blocks = True
if sync_parameters.consensus_root_hash is None:
hex_consensus_root_hash = None
else:
hex_consensus_root_hash = encode_hex(sync_parameters.consensus_root_hash)
self.logger.debug("Syncing to chronological window timestamp {}. Syncing to chain head hash {}. Local root hash {}".format(chronological_window_timestamp,hex_consensus_root_hash, encode_hex(sync_parameters.local_root_hash)))
test_hist_root_hashes = self.chain_head_db.get_historical_root_hashes()[0:10]
self.logger.debug([(x[0], encode_hex(x[1])) for x in test_hist_root_hashes])
self.logger.debug([encode_hex(x) for x in self.chain_head_db.get_head_block_hashes_list(test_hist_root_hashes[0][1])])
timestamp_block_hashes = await self.chain_head_db.coro_load_chronological_block_window(chronological_window_timestamp)
#self.logger.debug("Our chronological block window has {} blocks".format(len(timestamp_block_hashes)))
if timestamp_block_hashes is None:
# we have no blocks for this window. So just request all of them automatically.
# This is the same for all versions of syncing
self.logger.debug("We have no blocks for this chronological block window. Requesting all blocks to add to our database.")
try:
fragment_bundle, peer_to_sync_with = await self.handle_getting_request_from_peers(request_function_name = "get_hash_fragments",
request_function_parameters = {'timestamp': chronological_window_timestamp},
peer = peer_to_sync_with,
additional_candidate_peers = additional_candidate_peers)
fragment_bundle = cast(HashFragmentBundle, fragment_bundle)
required_block_hashes = cast(List[Hash32], fragment_bundle.fragments)
if len(required_block_hashes) == 0:
raise SyncingError("Need to sync up to a chronological window timestamp, but there are no blocks to request. This is an empty window that cant possibly bring us to sync. 1")
# if sync_stage >= ADDITIVE_SYNC_STAGE_ID:
# required_block_hashes = self.remove_recently_imported_hashes(required_block_hashes)
peer_to_sync_with = await self.request_blocks_then_priority_import(block_hash_list = required_block_hashes,
peer = peer_to_sync_with,
additional_candidate_peers = additional_candidate_peers,
allow_import_for_expired_timestamp=True,
force_replace_existing_blocks = force_replace_existing_blocks,
allow_low_gas_block = True)
except NoCandidatePeers:
return
else:
our_block_hashes = [x[1] for x in timestamp_block_hashes]
first_try = True
try:
while True:
if first_try:
first_try = False
else:
if fragment_length > 16:
chain = self.node.get_chain()
self.logger.warning("Diff verification failed even with max fragment length. This is very unlikely to occur and something has probably gone wrong.")
self.logger.warning("Rebuilding our chronological block window from database.")
chronological_window_before=chain.chain_head_db.load_chronological_block_window(chronological_window_timestamp)
self.logger.warning(chronological_window_before)
await chain.coro_try_to_rebuild_chronological_chain_from_historical_root_hashes(sync_parameters.timestamp_for_root_hash)
chronological_window_after=chain.chain_head_db.load_chronological_block_window(chronological_window_timestamp)
self.logger.warning(chronological_window_after)
self.logger.debug("PRINTING diff_verification_block_hashes")
self.logger.debug([encode_hex(x) for x in diff_verification_block_hashes])
self.logger.debug("PRINTING their_fragment_list")
self.logger.debug([encode_hex(x) for x in their_fragment_list])
local_root_hash_from_sync_params = sync_parameters.local_root_hash
local_root_hash_from_chain_head_db = self.chain_head_db.get_historical_root_hash(sync_parameters.timestamp_for_root_hash)
consensus_root_hash_from_sync_params = sync_parameters.consensus_root_hash
self.logger.debug("local_root_hash_from_sync_params {}".format(encode_hex(local_root_hash_from_sync_params)))
self.logger.debug("local_root_hash_from_chain_head_db {}".format(encode_hex(local_root_hash_from_chain_head_db)))
self.logger.debug("consensus_root_hash_from_sync_params {}".format(encode_hex(consensus_root_hash_from_sync_params)))
break
self.logger.debug("Diff was incorrect, increasing fragment length and trying again. Fragment length {}".format(fragment_length))
fragment_length += 1
their_fragment_bundle, peer_to_sync_with = await self.handle_getting_request_from_peers(request_function_name = "get_hash_fragments",
request_function_parameters = {'timestamp': chronological_window_timestamp,
'fragment_length':fragment_length},
peer = peer_to_sync_with,
additional_candidate_peers = additional_candidate_peers)
their_fragment_bundle = cast(HashFragmentBundle, their_fragment_bundle)
their_fragment_list = their_fragment_bundle.fragments
our_fragment_list = prepare_hash_fragments(our_block_hashes, fragment_length)
hash_positions_of_theirs_that_we_need, hash_positions_of_ours_that_they_need = get_missing_hash_locations_list(
our_hash_fragments=our_fragment_list,
their_hash_fragments=their_fragment_list,
)
diff_verification_block_hashes = list(our_block_hashes)
if len(hash_positions_of_ours_that_they_need) > 0:
for idx in sorted(hash_positions_of_ours_that_they_need, key= lambda x: -x):
del(diff_verification_block_hashes[idx])
if len(hash_positions_of_ours_that_they_need) == 0 and len(hash_positions_of_theirs_that_we_need) == 0:
# This must mean that we need to increase the length of the hash fragments. If we have different chain head root hashes then we must have some difference here.
self.logger.debug("We found no differences in our chronological block windows. Continuing to increase fragment length")
continue
# now lets request the missing hashes from them, then verify that adding them to our hashes results in the correct root hash
# We must get these hashes from the same peer. If they don't respond here then we need to start over again.
if len(hash_positions_of_theirs_that_we_need) > 0:
their_fragment_bundle_we_need_to_add, peer_to_sync_with = await self.handle_getting_request_from_peers(
request_function_name = "get_hash_fragments",
request_function_parameters = {'timestamp': chronological_window_timestamp,
'only_these_indices':list(hash_positions_of_theirs_that_we_need)},
peer = peer_to_sync_with,
num_attempts_when_no_additional_peers = 3)
their_fragment_bundle_we_need_to_add = cast(HashFragmentBundle, their_fragment_bundle_we_need_to_add)
their_fragment_list_we_need_to_add = their_fragment_bundle_we_need_to_add.fragments
diff_verification_block_hashes.extend(their_fragment_list_we_need_to_add)
diff_verification_root_hash, _ = _make_trie_root_and_nodes_isometric_on_order(tuple(diff_verification_block_hashes))
if diff_verification_root_hash | |
with other 200+ tests or
any performance degradation for single/multi-process
parsing.
"""
template = """
<group name="service">
service {{ ignore }}
<group name="epipe.{{ service_id }}">
epipe {{ service_id }} customer {{ customer_id }} create
<group name="regular_sdp.{{r_spoke_sdp_id}}**">
spoke-sdp {{ r_spoke_sdp_id }}:{{vc_id }} create
no shutdown {{ state | set("enabled") }}
</group>
</group>
</group>
"""
data = """
service foo
epipe 103076 customer 160 create
description "vf=EWL:cn=TATA_COM:tl=2C02495918:st=act:"
service-mtu 1588
service-name "EPIPE service-103076 DKTN08a-D0105 (192.168.3.11)"
sap 1/2/12:20.* create
description "vf=EWL:cn=TATA_COM:tl=2C02495890:st=act:"
multi-service-site "TATA_VSNL_STRAT_A206_LAN10"
ingress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
egress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8051:103076 create
no shutdown
exit
no shutdown
exit
epipe 103206 customer 1904 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
service-mtu 1988
service-name "EPIPE service-103206 DKTN08a-D0105 (192.168.3.11)"
sap 2/2/3:401.100 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
multi-service-site "SKANSKA_E13DG_A825_LAN1"
ingress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
egress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
collect-stats
accounting-policy 4
no shutdown
exit
spoke-sdp 8035:103206 create
no shutdown
exit
no shutdown
exit
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
pprint.pprint(res)
assert res == [
[
{
"service": {
"epipe": {
"103076": {
"customer_id": "160",
"regular_sdp": {
"8051": {"state": "enabled", "vc_id": "103076"}
},
},
"103206": {
"customer_id": "1904",
"regular_sdp": {
"8035": {"state": "enabled", "vc_id": "103206"}
},
},
}
}
}
]
]
# test_github_issue_37_cleaned_up_data()
def test_github_issue_37_cleaned_data_template():
template = """
<group name="service">
service {{ ignore }}
<group name="epipe.{{ service_id }}" default="none">
epipe {{ service_id }} customer {{ customer_id }} create
description "{{ description | ORPHRASE }}"
service-mtu {{ service_mtu }}
service-name "{{ service_name | ORPHRASE }}"
<group name="endpoint" default="none">
endpoint {{ endpoint }} create
revert-time {{ revert_time }}
exit {{ _end_ }}
</group>
<group name="sap.{{ sap_id }}" default="none">
sap {{ sap_id | resub(r"\\*", "qinq") | ORPHRASE }} create
description "{{ description | ORPHRASE }}"
multi-service-site "{{ mss_name }}"
<group name="ingress">
ingress {{ _start_ }}
qos {{ sap_ingress | default("1") }}
scheduler-policy {{ scheduler_policy | default("none")}}
exit {{ _end_ }}
</group>
<group name="egress">
egress {{ _start_ }}
scheduler-policy {{ scheduler_policy | default("none") }}
qos {{ sap_egress | default("1)") }}
exit {{ _end_ }}
</group>
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
<group name="pwr_sdp.{{pwr_spoke_sdp_id}}**" default="none">
spoke-sdp {{ pwr_spoke_sdp_id }}:{{vc_id }} endpoint {{ endpoint }} create
precedence {{ precedence | default("default_precedence") }}
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
<group name="regular_sdp.{{r_spoke_sdp_id}}**" default="none">
spoke-sdp {{ r_spoke_sdp_id }}:{{vc_id }} create
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
exit {{ _end_ }}
</group>
"""
data = """
service foo
epipe 103076 customer 160 create
description "vf=EWL:cn=TATA_COM:tl=2C02495918:st=act:"
service-mtu 1588
service-name "EPIPE service-103076 DKTN08a-D0105 (192.168.3.11)"
sap 1/2/12:20.* create
description "vf=EWL:cn=TATA_COM:tl=2C02495890:st=act:"
multi-service-site "TATA_VSNL_STRAT_A206_LAN10"
ingress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
egress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8051:103076 create
no shutdown
exit
no shutdown
exit
epipe 103206 customer 1904 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
service-mtu 1988
service-name "EPIPE service-103206 DKTN08a-D0105 (192.168.3.11)"
sap 2/2/3:401.100 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
multi-service-site "SKANSKA_E13DG_A825_LAN1"
ingress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
egress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
collect-stats
accounting-policy 4
no shutdown
exit
spoke-sdp 8035:103206 create
no shutdown
exit
no shutdown
exit
epipe 103256 customer 160 create
description "vf=EWL:cn=TATA_COMM:tl=2C02490189:st=act:"
service-mtu 1988
service-name "EPIPE service-103256 DKTN08a-D0105 (192.168.3.11)"
sap 1/2/12:15.* create
description "vf=EWL:cn=TATA_COMM:tl=2C02490171:st=act:"
multi-service-site "TATA_VSNL_STRAT_A206_LAN5"
ingress
qos 11000
queue-override
queue 1 create
cbs default
mbs 391 kilobytes
rate 100000 cir 100000
exit
exit
exit
egress
qos 11000
queue-override
queue 1 create
cbs default
mbs 391 kilobytes
rate 100000 cir 100000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8139:103256 create
no shutdown
exit
no shutdown
exit
epipe 103742 customer 160 create
description "vf=EWL:cn=TATA_COM:tl=2C02410363:st=act:"
service-mtu 1588
service-name "EPIPE service-103742 DKTN08a-D0105 (192.168.3.11)"
sap 5/2/50:20.* create
description "vf=EWL:cn=TATA_COM:tl=2C02410338:st=act:"
multi-service-site "TATA_STRAT_LON_A206_LANA"
ingress
qos 11000
queue-override
queue 1 create
cbs default
mbs 32 kilobytes
rate 8000 cir 8000
exit
exit
exit
egress
qos 11000
queue-override
queue 1 create
cbs default
mbs 32 kilobytes
rate 8000 cir 8000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8061:103742 create
no shutdown
exit
no shutdown
exit
epipe 55513386 customer 4 vc-switching create
description "vf=EAGG:cn=Bulldog:tl=VF"
service-mtu 1526
spoke-sdp 78:55513386 create
control-word
no shutdown
exit
spoke-sdp 8245:55513386 create
control-word
no shutdown
exit
no shutdown
exit
epipe 55517673 customer 4 create
description "vf=EAGG:cn=Bulldog:tl=2C01291821:st=act:no=NGA EPIPE#BAACTQ#VLAN 901"
service-mtu 1526
service-name "epipe service-64585 DKTN08a-D0105 (63.130.108.41)"
endpoint "SDP" create
revert-time infinite
exit
sap 2/2/3:901.* create
description "2_2_3,H0505824A,Bulldog,VLAN 901"
ingress
scheduler-policy "NGA-LLU-300M"
qos 20010
exit
egress
scheduler-policy "NGA-LLU-300M"
qos 20010
exit
no shutdown
exit
spoke-sdp 8243:55517673 endpoint "SDP" create
collect-stats
precedence 1
no shutdown
exit
spoke-sdp 8245:55517673 endpoint "SDP" create
collect-stats
precedence primary
no shutdown
exit
no shutdown
exit
"""
parser = ttp(data=data, template=template, log_level="ERROR")
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"service": {
"epipe": {
"103076": {
"customer_id": "160",
"description": "vf=EWL:cn=TATA_COM:tl=2C02495918:st=act:",
"regular_sdp": {
"8051": {"state": "enabled", "vc_id": "103076"}
},
"sap": {
"1/2/12:20.qinq": {
"description": "vf=EWL:cn=TATA_COM:tl=2C02495890:st=act:",
"egress": {
"sap_egress": "1)",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "1",
"scheduler_policy": "none",
},
"mss_name": "TATA_VSNL_STRAT_A206_LAN10",
"state": "enabled",
}
},
"service_mtu": "1588",
"service_name": "EPIPE service-103076 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"103206": {
"customer_id": "1904",
"description": "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA "
"UK PLC Stepney Green E1 "
"3DG'",
"regular_sdp": {
"8035": {"state": "enabled", "vc_id": "103206"}
},
"sap": {
"2/2/3:401.100": {
"description": "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA "
"UK "
"PLC "
"Stepney "
"Green "
"E1 "
"3DG'",
"egress": {
"sap_egress": "11010",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "11010",
"scheduler_policy": "none",
},
"mss_name": "SKANSKA_E13DG_A825_LAN1",
"state": "enabled",
}
},
"service_mtu": "1988",
"service_name": "EPIPE service-103206 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"103256": {
"customer_id": "160",
"description": "vf=EWL:cn=TATA_COMM:tl=2C02490189:st=act:",
"regular_sdp": {
"8139": {"state": "enabled", "vc_id": "103256"}
},
"sap": {
"1/2/12:15.qinq": {
"description": "vf=EWL:cn=TATA_COMM:tl=2C02490171:st=act:",
"egress": {
"sap_egress": "11000",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "11000",
"scheduler_policy": "none",
},
"mss_name": "TATA_VSNL_STRAT_A206_LAN5",
"state": "enabled",
}
},
"service_mtu": "1988",
"service_name": "EPIPE service-103256 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"103742": {
"customer_id": "160",
"description": "vf=EWL:cn=TATA_COM:tl=2C02410363:st=act:",
"regular_sdp": {
"8061": {"state": "enabled", "vc_id": "103742"}
},
"sap": {
"5/2/50:20.qinq": {
"description": "vf=EWL:cn=TATA_COM:tl=2C02410338:st=act:",
"egress": {
"sap_egress": "11000",
"scheduler_policy": "none",
},
"ingress": {
"sap_ingress": "11000",
"scheduler_policy": "none",
},
"mss_name": "TATA_STRAT_LON_A206_LANA",
"state": "enabled",
}
},
"service_mtu": "1588",
"service_name": "EPIPE service-103742 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
"55517673": {
"customer_id": "4",
"description": "vf=EAGG:cn=Bulldog:tl=2C01291821:st=act:no=NGA "
"EPIPE#BAACTQ#VLAN 901",
"endpoint": {
"endpoint": '"SDP"',
"revert_time": "infinite",
},
"pwr_sdp": {
"8243": {
"endpoint": '"SDP"',
"precedence": "1",
"state": "enabled",
"vc_id": "55517673",
},
"8245": {
"endpoint": '"SDP"',
"precedence": "primary",
"state": "enabled",
"vc_id": "55517673",
},
},
"sap": {
"2/2/3:901.qinq": {
"description": "2_2_3,H0505824A,Bulldog,VLAN "
"901",
"egress": {
"sap_egress": "20010",
"scheduler_policy": '"NGA-LLU-300M"',
},
"ingress": {
"sap_ingress": "20010",
"scheduler_policy": '"NGA-LLU-300M"',
},
"mss_name": "none",
"state": "enabled",
}
},
"service_mtu": "1526",
"service_name": "epipe service-64585 "
"DKTN08a-D0105 "
"(192.168.3.11)",
"state": "enabled",
},
}
}
}
]
| |
Any,
NA,
DefaultDialog),
7267: (DL_TIER + 1,
OBSOLETE,
(SupervisorQuest, ToontownGlobals.CashbotMintIntC, 6),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
7268: (DL_TIER + 1,
Start,
(CogLevelQuest,
ToontownGlobals.CashbotMintIntB,
20,
11),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
7269: (DL_TIER + 1,
Start,
(MintQuest, ToontownGlobals.CashbotMintIntB, 6),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
7270: (DL_TIER + 1,
Start,
(SupervisorQuest, ToontownGlobals.CashbotMintIntB, 6),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
7500: (DL_TIER + 1,
Start,
(CogQuest,
ToontownGlobals.DonaldsDreamland,
100,
Any),
Any,
ToonHQ,
NA,
7501,
DefaultDialog),
7501: (DL_TIER + 1,
Cont,
(DeliverItemQuest, 1000),
Any,
9010,
1000,
NA,
DefaultDialog),
8101: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
240,
Any),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8102: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
260,
Any),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8103: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
280,
Any),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8104: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
320,
Any),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8105: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
360,
Any),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8106: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
400,
Any),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8107: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
140,
'ym'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8108: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
120,
'mm'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8109: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
100,
'ds'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8110: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
100,
'hh'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8111: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
60,
'cr'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8112: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
40,
'tbc'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8113: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
140,
'nd'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8114: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
120,
'gh'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8115: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
100,
'ms'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8116: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
100,
'tf'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8117: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
60,
'm'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8118: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
40,
'mh'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8119: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
140,
'tw'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8120: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
120,
'bc'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8121: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
100,
'nc'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8122: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
100,
'mb'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8123: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
60,
'ls'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8124: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
40,
'rb'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8125: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
140,
'dt'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8126: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
120,
'ac'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8127: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
100,
'bs'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8128: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
100,
'sd'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8129: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
60,
'le'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8130: (DL_TIER + 2,
Start,
(CogQuest,
Anywhere,
40,
'bw'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8131: (DL_TIER + 2,
Start,
(CogLevelQuest,
Anywhere,
160,
9),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8132: (DL_TIER + 2,
Start,
(CogLevelQuest,
Anywhere,
200,
9),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8133: (DL_TIER + 2,
Start,
(CogLevelQuest,
Anywhere,
120,
10),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8134: (DL_TIER + 2,
Start,
(CogLevelQuest,
Anywhere,
140,
10),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8135: (DL_TIER + 2,
Start,
(CogLevelQuest,
Anywhere,
80,
11),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8136: (DL_TIER + 2,
Start,
(CogLevelQuest,
Anywhere,
100,
11),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8137: (DL_TIER + 2,
Start,
(CogLevelQuest,
Anywhere,
40,
12),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8138: (DL_TIER + 2,
Start,
(CogLevelQuest,
Anywhere,
60,
12),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8139: (DL_TIER + 2,
Start,
(CogTrackQuest,
Anywhere,
200,
'm'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8140: (DL_TIER + 2,
Start,
(CogTrackQuest,
Anywhere,
200,
's'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8141: (DL_TIER + 2,
Start,
(CogTrackQuest,
Anywhere,
200,
'c'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8142: (DL_TIER + 2,
Start,
(CogTrackQuest,
Anywhere,
200,
'l'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8143: (DL_TIER + 2,
Start,
(CogTrackQuest,
Anywhere,
250,
'm'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8144: (DL_TIER + 2,
Start,
(CogTrackQuest,
Anywhere,
250,
's'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8145: (DL_TIER + 2,
Start,
(CogTrackQuest,
Anywhere,
250,
'c'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8146: (DL_TIER + 2,
Start,
(CogTrackQuest,
Anywhere,
250,
'l'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8147: (DL_TIER + 2,
Start,
(CogTrackQuest,
Anywhere,
300,
'm'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8148: (DL_TIER + 2,
Start,
(CogTrackQuest,
Anywhere,
300,
's'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8149: (DL_TIER + 2,
Start,
(CogTrackQuest,
Anywhere,
300,
'c'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8150: (DL_TIER + 2,
Start,
(CogTrackQuest,
Anywhere,
300,
'l'),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8151: (DL_TIER + 2,
Start,
(BuildingQuest,
Anywhere,
40,
Any,
2),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8152: (DL_TIER + 2,
Start,
(BuildingQuest,
Anywhere,
20,
Any,
3),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8153: (DL_TIER + 2,
Start,
(BuildingQuest,
Anywhere,
20,
Any,
4),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8154: (DL_TIER + 2,
Start,
(BuildingQuest,
Anywhere,
20,
Any,
5),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8155: (DL_TIER + 2,
Start,
(BuildingQuest,
Anywhere,
10,
'm',
5),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8156: (DL_TIER + 2,
Start,
(BuildingQuest,
Anywhere,
10,
's',
5),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8157: (DL_TIER + 2,
Start,
(BuildingQuest,
Anywhere,
10,
'c',
5),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8158: (DL_TIER + 2,
Start,
(BuildingQuest,
Anywhere,
10,
'l',
5),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8160: (DL_TIER + 2,
Start,
(CogNewbieQuest,
ToontownGlobals.ToontownCentral,
40,
Any,
NEWBIE_HP),
Any,
ToonHQ,
608,
NA,
DefaultDialog),
8161: (DL_TIER + 2,
Start,
(CogNewbieQuest,
ToontownGlobals.ToontownCentral,
40,
Any,
NEWBIE_HP),
Any,
ToonHQ,
608,
NA,
DefaultDialog),
8162: (DL_TIER + 2,
Start,
(CogNewbieQuest,
ToontownGlobals.ToontownCentral,
40,
Any,
NEWBIE_HP),
Any,
ToonHQ,
608,
NA,
DefaultDialog),
8163: (DL_TIER + 2,
Start,
(CogNewbieQuest,
ToontownGlobals.ToontownCentral,
40,
Any,
NEWBIE_HP),
Any,
ToonHQ,
608,
NA,
DefaultDialog),
8164: (DL_TIER + 2,
Start,
(BuildingNewbieQuest,
Anywhere,
1,
Any,
3,
NEWBIE_HP),
Any,
ToonHQ,
609,
NA,
DefaultDialog),
8165: (DL_TIER + 2,
Start,
(BuildingNewbieQuest,
Anywhere,
1,
Any,
3,
NEWBIE_HP),
Any,
ToonHQ,
609,
NA,
DefaultDialog),
8166: (DL_TIER + 2,
Start,
(BuildingNewbieQuest,
Anywhere,
1,
Any,
3,
NEWBIE_HP),
Any,
ToonHQ,
609,
NA,
DefaultDialog),
8167: (DL_TIER + 2,
Start,
(BuildingNewbieQuest,
Anywhere,
1,
Any,
3,
NEWBIE_HP),
Any,
ToonHQ,
609,
NA,
DefaultDialog),
8170: (DL_TIER + 2,
OBSOLETE,
(CogQuest,
ToontownGlobals.SellbotHQ,
160,
Any),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8171: (DL_TIER + 2,
OBSOLETE,
(CogQuest,
ToontownGlobals.SellbotHQ,
180,
Any),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8172: (DL_TIER + 2,
OBSOLETE,
(CogQuest,
ToontownGlobals.SellbotHQ,
200,
Any),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8173: (DL_TIER + 2,
OBSOLETE,
(CogLevelQuest,
ToontownGlobals.SellbotHQ,
100,
4),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8174: (DL_TIER + 2,
OBSOLETE,
(CogLevelQuest,
ToontownGlobals.SellbotHQ,
70,
5),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8175: (DL_TIER + 2,
OBSOLETE,
(CogLevelQuest,
ToontownGlobals.SellbotHQ,
70,
5),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8176: (DL_TIER + 2,
OBSOLETE,
(CogLevelQuest,
ToontownGlobals.SellbotFactoryInt,
60,
5),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8177: (DL_TIER + 2,
OBSOLETE,
(CogLevelQuest,
ToontownGlobals.SellbotFactoryInt,
40,
6),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8178: (DL_TIER + 2,
OBSOLETE,
(CogLevelQuest,
ToontownGlobals.SellbotFactoryInt,
40,
6),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8179: (DL_TIER + 2,
OBSOLETE,
(FactoryQuest, ToontownGlobals.SellbotHQ, 12),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8180: (DL_TIER + 2,
OBSOLETE,
(FactoryQuest, ToontownGlobals.SellbotHQ, 16),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8181: (DL_TIER + 2,
OBSOLETE,
(FactoryQuest, ToontownGlobals.SellbotHQ, 12),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8182: (DL_TIER + 2,
OBSOLETE,
(FactoryQuest, ToontownGlobals.SellbotHQ, 16),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8183: (DL_TIER + 2,
OBSOLETE,
(SkelecogQuest, ToontownGlobals.SellbotFactoryInt, 80),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8184: (DL_TIER + 2,
OBSOLETE,
(SkelecogLevelQuest,
ToontownGlobals.SellbotHQ,
40,
5),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8185: (DL_TIER + 2,
OBSOLETE,
(SkelecogLevelQuest,
ToontownGlobals.SellbotHQ,
16,
6),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8186: (DL_TIER + 2,
OBSOLETE,
(ForemanQuest, ToontownGlobals.SellbotHQ, 12),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8187: (DL_TIER + 2,
OBSOLETE,
(ForemanQuest, ToontownGlobals.SellbotHQ, 16),
Any,
ToonHQ,
Any,
NA,
DefaultDialog),
8188: (DL_TIER + 2,
OBSOLETE,
(VPQuest, ToontownGlobals.SellbotHQ, 2),
Any,
| |
<reponame>ksk5429/ksk5429.github.io
# -*- coding: utf-8 -*-
"""
Created on Thu May 28 11:59:27 2015
@author: eric
"""
import sqlite3
import json
import sys
import datetime
class ConnectDB:
db_details = {
'name':'nanodb',
'prefix':'nano_',
'type':'SQLite'
}
debug = True
'''
Print Messages
'''
def debug(self,text):
if self.debug:
print text
'''
Connect to the database
'''
def connect(self):
try:
conn = None
if 'SQLite' == self.db_details['type']:
conn = sqlite3.connect(self.db_details['name']+'.sqlite')
return [True,conn]
except:
pass
return [False,None]
def getType(self,indi_type):
# Interpret indi_type=0 as signed char
# Ref: https://docs.python.org/2/library/struct.html
c_type_options = ['b','?','c','b','B','h','H','i','I','q','Q','f','d']
c_type_details = {
'b' : {
'c_type' : 'signed char',
'python_type' : 'integer',
'standard_size' : 1
},
'?' : {
'c_type' : '_Bool',
'python_type' : 'bool',
'standard_size' : 1
},
'c' : {
'c_type' : 'char',
'python_type' : 'string',
'standard_size' : 1
},
's' : {
'c_type' : 'char[]',
'python_type' : 'string',
'standard_size' : 1
},
'B' : {
'c_type' : 'unsigned char',
'python_type' : 'integer',
'standard_size' : 1
},
'h' : {
'c_type' : 'short',
'python_type' : 'integer',
'standard_size' : 2
},
'H' : {
'c_type' : 'unsigned short',
'python_type' : 'integer',
'standard_size' : 2
},
'i' : {
'c_type' : 'int',
'python_type' : 'integer',
'standard_size' : 4
},
'I' : {
'c_type' : 'unsigned int',
'python_type' : 'integer',
'standard_size' : 4
},
'f' : {
'c_type' : 'float',
'python_type' : 'float',
'standard_size' : 4
},
'q' : {
'c_type' : 'long long',
'python_type' : 'integer',
'standard_size' : 8
},
'Q' : {
'c_type' : 'unsigned long long',
'python_type' : 'integer',
'standard_size' : 8
},
'd' : {
'c_type' : 'double',
'python_type' : 'float',
'standard_size' : 8
}
}
return c_type_details[c_type_options[indi_type]]['python_type']
'''
Create Acount, Object, and/or Stream
'''
def create(self,network_id,create_details,unixtime=None):
if unixtime is None:
unixtime = int((datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds())
response = {}
try:
# Check network_id
if not isinstance(network_id,basestring):
self.debug( "network_id Should Be String" )
response["msg"] = "network_id Should Be String"
return response
if "network_id" not in create_details:
create_details["network_id"] = network_id
# Check if network exists
network_exists, network_details = self.networkExists(network_id)
if not network_exists:
self.debug( "Create Network: "+network_id )
# Check that "network_id" and "network_details" are in create_details
if "network_id" not in create_details or "network_details" not in create_details:
self.debug( "Incomplete Create Details. Missing network_id and/or network_details" )
response["msg"] = "Incomplete Create Details. Missing network_id and/or network_details"
return response
# Add empty dictionary, if necessary
if 'objects' not in create_details or not isinstance(create_details['objects'],dict):
create_details['objects'] = {} # Empty object dict
# Create network
created = self.createNetwork(network_id,create_details,unixtime)
# Something went wrong
if not created:
self.debug( "Network "+network_id+" Not Created" )
response["msg"] = "Network "+network_id+" Not Created"
return response
# Set new info as network info
network_details = create_details
elif 'objects' not in create_details or len(create_details['objects']) == 0:
# Unnecessary network create request
self.debug( "Network "+network_id+" Already Exists" )
response["msg"] = "Network "+network_id+" Already Exists"
return response
# Check for objects
if 'objects' in create_details and len(create_details['objects']) > 0:
response['objects'] = {}
for object_id in create_details['objects']:
response['objects'][object_id] = {}
# Check if object exists
object_exists, object_details = self.objectExists(network_id,object_id,network_details)
if not object_exists:
self.debug( "Create Object: "+object_id )
# Check that "object_id" and "object_details" are in create_details
create_details['objects'][object_id]['object_id'] = object_id
if "object_details" not in create_details['objects'][object_id]:
self.debug( "Incomplete Create Details for Object "+object_id )
response['objects'][object_id]["msg"] = "Incomplete Create Details for Object "+object_id
return response
# Add empty dictionary, if necessary
if 'streams' not in create_details['objects'][object_id] or not isinstance(create_details['objects'][object_id]['streams'],dict):
create_details['objects'][object_id]['streams'] = {} # Empty object dict
created = self.createObject(network_id,object_id,create_details['objects'][object_id],unixtime)
# Something went wrong
if not created:
self.debug( "Object "+object_id+" Not Created" )
response['objects'][object_id]["msg"] = "Object "+object_id+" Not Created"
return response
else:
self.debug( "Object "+object_id+" Created" )
response['objects'][object_id]["msg"] = "Object "+object_id+" Created"
elif 'streams' not in create_details['objects'][object_id] or len(create_details['objects'][object_id]['streams']) == 0:
# Unnecessary object create request
self.debug( "Object "+object_id+" Already Exists" )
response['objects'][object_id]["msg"] = "Object "+object_id+" Already Exists"
# Check for streams
if 'streams' in create_details['objects'][object_id] and len(create_details['objects'][object_id]['streams']) > 0:
response['objects'][object_id]['streams'] = {}
for stream_id in create_details['objects'][object_id]['streams']:
response['objects'][object_id]['streams'][stream_id] = {}
# Check if stream exists
stream_exists, stream_details = self.streamExists(network_id,object_id,stream_id,network_details)
if not stream_exists:
self.debug( "Create Stream: "+stream_id )
# Check that "stream_id" and "stream_details" are in create_details
create_details['objects'][object_id]['streams'][stream_id]['stream_id'] = stream_id
if "stream_details" not in create_details['objects'][object_id]['streams'][stream_id]:
self.debug( "Incomplete Create Details for Stream "+stream_id )
response['objects'][object_id]['streams'][stream_id]["msg"] = "Incomplete Create Details for Stream "+stream_id
return response
if "indi_details" not in create_details['objects'][object_id]['streams'][stream_id]['stream_details']:
self.debug( "Incomplete INDI Details for Stream "+stream_id )
response['objects'][object_id]['streams'][stream_id]["msg"] = "Incomplete INDI Details for Stream "+stream_id
return response
indi = create_details['objects'][object_id]['streams'][stream_id]['stream_details']['indi_details']
if not all(k in indi for k in ("stream_type","data_type","data_length","data_unit")):
self.debug( "Incomplete INDI Details for Stream "+stream_id )
response['objects'][object_id]['streams'][stream_id]["msg"] = "Incomplete INDI Details for Stream "+stream_id
return response
created = self.createStream(network_id,object_id,stream_id,create_details['objects'][object_id]['streams'][stream_id],unixtime)
# Something went wrong
if not created:
self.debug( "Stream "+stream_id+" Not Created" )
response['objects'][object_id]['streams'][stream_id]["msg"] = "Stream "+stream_id+" Not Created"
return response
else:
self.debug( "Stream "+stream_id+" Created" )
response['objects'][object_id]['streams'][stream_id]["msg"] = "Stream "+stream_id+" Created"
else:
self.debug( "Stream "+stream_id+" Exists" )
response['objects'][object_id]['streams'][stream_id]["msg"] = "Stream "+stream_id+" Already Exists"
return response
except:
response["msg"] = "Error Occured"
response["error"] = True
self.debug( "Create Error" )
return response
'''
Read Stream(s).
'''
def read(self,network_id,read_details):
response = {}
try:
# Check network_id
if not isinstance(network_id,basestring):
self.debug( "network_id Should Be String" )
response["msg"] = "network_id Should Be String"
return response
response["network_id"] = network_id
# Check if network exists
network_exists, network_details = self.networkExists(network_id)
if not network_exists:
self.debug( "Read Failed. Network "+network_id+" Not Found" )
response["msg"] = "Read Failed. Network "+network_id+" Not Found."
return response
# Read network_details
if "network_details" in read_details:
response['network_details'] = network_details['network_details']
self.debug( "Network "+network_id+" Details Read" )
response["msg"] = "Network "+network_id+" Details Read"
# Done reading
if 'objects' not in read_details or len(read_details['objects']) == 0:
pass
# Check for objects
if 'objects' in read_details and len(read_details['objects']) > 0:
response['objects'] = {}
for object_id in read_details['objects']:
response['objects'][object_id] = {}
# Check if object exists
object_exists, object_details = self.objectExists(network_id,object_id,network_details)
if not object_exists:
self.debug( "Object "+object_id+" Not Found" )
response['objects'][object_id]["msg"] = "Object "+object_id+" Not Found."
else:
self.debug( "Read Object: "+object_id )
# Check if "object_id" and "object_details" are in read_details
response['objects'][object_id]['object_id'] = object_id
if "object_details" in read_details['objects'][object_id]:
# Read object_details
response['objects'][object_id]["object_details"] = network_details['objects'][object_id]["object_details"]
self.debug( "Object "+object_id+" Details Read" )
response['objects'][object_id]["msg"] = "Object "+object_id+" Details Read"
# Done reading
if 'streams' not in read_details['objects'][object_id] or len(read_details['objects'][object_id]['streams']) == 0:
pass
# Check for streams
if 'streams' in read_details['objects'][object_id] and len(read_details['objects'][object_id]['streams']) > 0:
response['objects'][object_id]['streams'] = {}
for stream_id in read_details['objects'][object_id]['streams']:
response['objects'][object_id]['streams'][stream_id] = {}
# Check if stream exists
stream_exists, stream_details = self.streamExists(network_id,object_id,stream_id,network_details)
if not stream_exists:
self.debug( "Stream "+stream_id+" Not Found" )
response['objects'][object_id]['streams'][stream_id]["msg"] = "Stream "+stream_id+" Not Found."
else:
# Check that "stream_id" and "stream_details" are in read_details
response['objects'][object_id]['streams'][stream_id]['stream_id'] = stream_id
if "stream_details" in read_details['objects'][object_id]['streams'][stream_id]:
# Read stream_details
response['objects'][object_id]['streams'][stream_id]['stream_details'] = network_details['objects'][object_id]['streams'][stream_id]['stream_details']
self.debug( "Stream "+stream_id+" Details Read" )
response['objects'][object_id]['streams'][stream_id]["msg"] = "Stream "+stream_id+" Details Read"
# Done reading
if 'points' not in read_details['objects'][object_id]['streams'][stream_id]:
pass
else:
points = []
read_stream_details = read_details['objects'][object_id]['streams'][stream_id]
if any (k in read_stream_details for k in ("start","end","limit")):
# Get records from stream db
read, points = self.readStream(network_id,object_id,stream_id,read_stream_details)
elif 'points' in network_details['objects'][object_id]['streams'][stream_id]:
# Get recent records from network info
points = network_details['objects'][object_id]['streams'][stream_id]['points']
response['objects'][object_id]['streams'][stream_id]['points'] = points
if len(points) > 1 and isinstance(points[0]['value'],(int,long,float)):
min_val = points[0]['value']
max_val = points[0]['value']
for i in range(1,len(points)):
if points[i]['value'] > max_val:
max_val = points[i]['value']
elif points[i]['value'] < min_val:
min_val = points[i]['value']
response['objects'][object_id]['streams'][stream_id]['min_value'] = min_val
response['objects'][object_id]['streams'][stream_id]['max_value'] = max_val
except:
response["msg"] = "Error Occured"
response["error"] = True
self.debug( "Read Error" )
return response
'''
Update Stream(s)
'''
def update(self,network_id,update_details,unixtime=None):
if unixtime is None:
unixtime = int((datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds())
response = {}
try:
# Check network_id
if not isinstance(network_id,basestring):
self.debug( "network_id Should Be String" )
response["msg"] = "network_id Should Be String"
return response
if "network_id" not in update_details:
update_details["network_id"] = network_id
# Check if network exists
network_exists, network_details = self.networkExists(network_id)
if not network_exists:
self.debug( "Update Failed. | |
#end for
#end if
return self
#end def set_optional
def get(self,key,value=None): # follow dict interface, no plural
if key in self:
value = self[key]
#end if
return value
#end def get
def get_optional(self,key,value=None):
if key in self:
value = self[key]
#end if
return value
#end def get_optional
def get_required(self,key):
if key in self:
value = self[key]
else:
obj.error(self,'a required key is not present\nkey required: {0}\nkeys present: {1}'.format(key,sorted(self._keys())))
#end if
return value
#end def get_required
def delete(self,*keys):
nkeys = len(keys)
single = False
if nkeys==0:
keys = sorted(self._keys())
elif nkeys==1 and isinstance(keys[0],(list,tuple)):
keys = keys[0]
elif nkeys==1:
single = True
#end if
values = []
for key in keys:
values.append(self[key])
del self[key]
#end for
if single:
return values[0]
else:
return values
#end if
#end def delete
def delete_optional(self,key,value=None):
if key in self:
value = self[key]
del self[key]
#end if
return value
#end def delete_optional
def delete_required(self,key):
if key in self:
value = self[key]
del self[key]
else:
obj.error(self,'a required key is not present\nkey required: {0}\nkeys present: {1}'.format(key,sorted(self._keys())))
#end if
return value
#end def delete_required
def add(self,key,value):
self[key] = value
#end def add
def add_optional(self,key,value):
if key not in self:
self[key] = value
#end if
#end def add_optional
def transfer_from(self,other,keys=None,copy=False,overwrite=True):
if keys==None:
if isinstance(other,object_interface):
keys = other._keys()
else:
keys = other.keys()
#end if
#end if
if copy:
copier = deepcopy
else:
copier = nocopy
#end if
if overwrite:
for k in keys:
self[k]=copier(other[k])
#end for
else:
for k in keys:
if k not in self:
self[k]=copier(other[k])
#end if
#end for
#end if
#end def transfer_from
def transfer_to(self,other,keys=None,copy=False,overwrite=True):
if keys==None:
keys = self._keys()
#end if
if copy:
copier = deepcopy
else:
copier = nocopy
#end if
if overwrite:
for k in keys:
other[k]=copier(self[k])
#end for
else:
for k in keys:
if k not in self:
other[k]=copier(self[k])
#end if
#end for
#end if
#end def transfer_to
def move_from(self,other,keys=None):
if keys==None:
if isinstance(other,object_interface):
keys = other._keys()
else:
keys = other.keys()
#end if
#end if
for k in keys:
self[k]=other[k]
del other[k]
#end for
#end def move_from
def move_to(self,other,keys=None):
if keys==None:
keys = self._keys()
#end if
for k in keys:
other[k]=self[k]
del self[k]
#end for
#end def move_to
def copy_from(self,other,keys=None,deep=True):
obj.transfer_from(self,other,keys,copy=deep)
#end def copy_from
def copy_to(self,other,keys=None,deep=True):
obj.transfer_to(self,other,keys,copy=deep)
#end def copy_to
def shallow_copy(self):
new = self.__class__()
for k,v in self._items():
new[k] = v
#end for
return new
#end def shallow_copy
def inverse(self):
new = self.__class__()
for k,v in self._items():
new[v] = k
#end for
return new
#end def inverse
def path_exists(self,path):
o = self
if isinstance(path,str):
path = path.split('/')
#end if
for p in path:
if not p in o:
return False
#end if
o = o[p]
#end for
return True
#end def path_exists
def set_path(self,path,value=None):
o = self
cls = self.__class__
if isinstance(path,str):
path = path.split('/')
#end if
for p in path[0:-1]:
if not p in o:
o[p] = cls()
#end if
o = o[p]
#end for
o[path[-1]] = value
#end def set_path
def get_path(self,path,value=None):
o = self
if isinstance(path,str):
path = path.split('/')
#end if
for p in path[0:-1]:
if not p in o:
return value
#end if
o = o[p]
#end for
lp = path[-1]
if lp not in o:
return value
else:
return o[lp]
#end if
#end def get_path
def serial(self,s=None,path=None):
first = s is None
if first:
s = obj()
path = ''
#end if
for k,v in self._items():
p = path+str(k)
if isinstance(v,obj):
if len(v)==0:
s[p]=v
else:
v._serial(s,p+'/')
#end if
else:
s[p]=v
#end if
#end for
if first:
return s
#end if
#end def serial
# access preserving functions
# list interface
def _append(self,*args,**kwargs):
obj.append(self,*args,**kwargs)
# return representations
def _list(self,*args,**kwargs):
return obj.list(self,*args,**kwargs)
def _list_optional(self,*args,**kwargs):
return obj.list_optional(self,*args,**kwargs)
def _tuple(self,*args,**kwargs):
return obj.tuple(self,*args,**kwargs)
def _dict(self,*args,**kwargs):
return obj.dict(self,*args,**kwargs)
def _to_dict(self,*args,**kwargs):
return obj.to_dict(self,*args,**kwargs)
def _obj(self,*args,**kwargs):
return obj.obj(self,*args,**kwargs)
# list extensions
def _first(self,*args,**kwargs):
return obj.first(self,*args,**kwargs)
def _last(self,*args,**kwargs):
return obj.last(self,*args,**kwargs)
def _select_random(self,*args,**kwargs):
return obj.select_random(self,*args,**kwargs)
# dict extensions
def _random_key(self,*args,**kwargs):
obj.random_key(self,*args,**kwargs)
def _set(self,*args,**kwargs):
obj.set(self,*args,**kwargs)
def _set_optional(self,*args,**kwargs):
obj.set_optional(self,*args,**kwargs)
def _get(self,*args,**kwargs):
obj.get(self,*args,**kwargs)
def _get_optional(self,*args,**kwargs):
obj.get_optional(self,*args,**kwargs)
def _get_required(self,*args,**kwargs):
obj.get_required(self,*args,**kwargs)
def _delete(self,*args,**kwargs):
obj.delete(self,*args,**kwargs)
def _delete_optional(self,*args,**kwargs):
obj.delete_optional(self,*args,**kwargs)
def _delete_required(self,*args,**kwargs):
obj.delete_required(self,*args,**kwargs)
def _add(self,*args,**kwargs):
obj.add(self,*args,**kwargs)
def _add_optional(self,*args,**kwargs):
obj.add_optional(self,*args,**kwargs)
def _transfer_from(self,*args,**kwargs):
obj.transfer_from(self,*args,**kwargs)
def _transfer_to(self,*args,**kwargs):
obj.transfer_to(self,*args,**kwargs)
def _move_from(self,*args,**kwargs):
obj.move_from(self,*args,**kwargs)
def _move_to(self,*args,**kwargs):
obj.move_to(self,*args,**kwargs)
def _copy_from(self,*args,**kwargs):
obj.copy_from(self,*args,**kwargs)
def _copy_to(self,*args,**kwargs):
obj.copy_to(self,*args,**kwargs)
def _shallow_copy(self,*args,**kwargs):
obj.shallow_copy(self,*args,**kwargs)
def _inverse(self,*args,**kwargs):
return obj.inverse(self,*args,**kwargs)
def _path_exists(self,*args,**kwargs):
obj.path_exists(self,*args,**kwargs)
def _set_path(self,*args,**kwargs):
obj.set_path(self,*args,**kwargs)
def _get_path(self,*args,**kwargs):
obj.get_path(self,*args,**kwargs)
def _serial(self,*args,**kwargs):
return obj.serial(self,*args,**kwargs)
#end class obj
######################################################################
# end from generic.py
######################################################################
######################################################################
# from superstring.py
######################################################################
import string
def contains_any(str, set):
for c in set:
if c in str: return 1;
return 0;
#end def contains_any
invalid_variable_name_chars=set('!"#$%&\'()*+,-./:;<=>?@[\\]^`{|}-\n\t ')
def valid_variable_name(s):
return not contains_any(s,invalid_variable_name_chars)
#end def valid_variable_name
######################################################################
# end from superstring.py
######################################################################
######################################################################
# from debug.py
######################################################################
import code
import inspect
def ci(locs=None,globs=None):
if locs is None or globs is None:
cur_frame = inspect.currentframe()
caller_frame = cur_frame.f_back
locs = caller_frame.f_locals
globs = caller_frame.f_globals
#end if
code.interact(local=dict(globs,**locs))
#end def ci
ls = locals
gs = globals
interact = ci
######################################################################
# end from debug.py
######################################################################
######################################################################
# from developer.py
######################################################################
class DevBase(obj):
def not_implemented(self):
self.error('a base class function has not been implemented',trace=True)
#end def not_implemented
#end class DevBase
######################################################################
# end from developer.py
######################################################################
######################################################################
# from hdfreader.py
######################################################################
from numpy import array,ndarray,minimum,abs,ix_,resize
import sys
import keyword
from inspect import getmembers
import h5py
class HDFglobals(DevBase):
view = False
#end class HDFglobals
class HDFgroup(DevBase):
def _escape_name(self,name):
if name in self._escape_names:
name=name+'_'
#end if
return name
#end def escape_name
def _set_parent(self,parent):
self._parent=parent
return
#end def set_parent
def _add_dataset(self,name,dataset):
self._datasets[name]=dataset
return
#end def add_dataset
def _add_group(self,name,group):
group._name=name
self._groups[name]=group
return
#end def add_group
def _contains_group(self,name):
return name in self._groups.keys()
#end def _contains_group
def _contains_dataset(self,name):
return name in self._datasets.keys()
#end def _contains_dataset
def _to_string(self):
s=''
if len(self._datasets)>0:
s+=' datasets:\n'
for k,v in self._datasets.items():
s+= ' '+k+'\n'
#end for
#end if
if len(self._groups)>0:
s+= ' groups:\n'
for k,v in self._groups.items():
s+= ' '+k+'\n'
#end for
#end if
return s
#end def list
# def __str__(self):
# return self._to_string()
# #end def __str__
#
# def __repr__(self):
# return self._to_string()
# #end def __repr__
def __init__(self):
self._name=''
self._parent=None
self._groups={};
self._datasets={};
self._group_counts={}
self._escape_names=None
self._escape_names=set(dict(getmembers(self)).keys()) | set(keyword.kwlist)
return
#end def __init__
def _remove_hidden(self,deep=True):
if '_parent' in self:
del self._parent
#end if
if deep:
for name,value in self.items():
if isinstance(value,HDFgroup):
value._remove_hidden()
#end if
#end for
#end if
for name in list(self.keys()):
if name[0]=='_':
del self[name]
#end if
#end for
#end def _remove_hidden
# read in all data views (h5py datasets) into arrays
# useful for converting a single group read in view form to full arrays
def read_arrays(self):
self._remove_hidden()
for k,v in self.items():
if isinstance(v,HDFgroup):
v.read_arrays()
else:
self[k] = array(v)
#end if
#end for
#end def read_arrays
def get_keys(self):
if '_groups' in self:
keys = list(self._groups.keys())
else:
keys = list(self.keys())
#end if
return keys
#end def get_keys
#end class HDFgroup
class HDFreader(DevBase):
datasets = set(["<class 'h5py.highlevel.Dataset'>","<class 'h5py._hl.dataset.Dataset'>"])
groups = set(["<class 'h5py.highlevel.Group'>","<class 'h5py._hl.group.Group'>"])
def __init__(self,fpath,verbose=False,view=False):
HDFglobals.view = view
if verbose:
print(' Initializing HDFreader')
self.fpath=fpath
if verbose:
print(' loading h5 file')
try:
self.hdf = h5py.File(fpath,'r')
except IOError:
self._success = False
self.hdf = obj(obj=obj())
else:
self._success = True
#end if
if verbose:
print(' converting h5 file to dynamic object')
#convert the hdf 'dict' into a dynamic object
self.nlevels=1
self.ilevel=0
# Set the current hdf group
self.obj = HDFgroup()
self.cur=[self.obj]
self.hcur=[self.hdf]
if self._success:
cur = self.cur[self.ilevel]
hcur = self.hcur[self.ilevel]
for kr,v in hcur.items():
k=cur._escape_name(kr)
if valid_variable_name(k):
vtype = str(type(v))
if vtype in HDFreader.datasets:
self.add_dataset(cur,k,v)
elif vtype in HDFreader.groups:
self.add_group(hcur,cur,k,v)
else:
print('hdfreader error: encountered invalid type: '+vtype)
sys.exit()
#end if
else:
print('hdfreader warning: attribute '+k+' is not a valid variable name and has been ignored')
#end if
#end for
#end if
if verbose:
print(' end HDFreader Initialization')
return
#end def __init__
def increment_level(self):
self.ilevel+=1
self.nlevels = max(self.ilevel+1,self.nlevels)
if self.ilevel+1==self.nlevels:
self.cur.append(None)
self.hcur.append(None)
#end if
self.pad = self.ilevel*' '
return
#end def increment_level
def decrement_level(self):
self.ilevel-=1
self.pad = self.ilevel*' '
return
#end def decrement_level
def add_dataset(self,cur,k,v):
if not HDFglobals.view:
cur[k]=array(v)
else:
cur[k] = v
#end if
cur._add_dataset(k,cur[k])
return
#end def add_dataset
def add_group(self,hcur,cur,k,v):
cur[k] = HDFgroup()
cur._add_group(k,cur[k])
cur._groups[k]._parent = cur
self.increment_level()
self.cur[self.ilevel] = cur._groups[k]
self.hcur[self.ilevel] = hcur[k]
cur = self.cur[self.ilevel]
hcur = self.hcur[self.ilevel]
for kr,v in hcur.items():
k=cur._escape_name(kr)
if valid_variable_name(k):
vtype = str(type(v))
if vtype in HDFreader.datasets:
self.add_dataset(cur,k,v)
elif vtype in HDFreader.groups:
self.add_group(hcur,cur,k,v)
| |
Practicing
# abbreviate parameter names if possible (e.g. rgb = thisPracticing.rgb)
if thisPracticing != None:
for paramName in thisPracticing.keys():
exec(paramName + '= thisPracticing.' + paramName)
# ------Prepare to start Routine "PractInstr"-------
t = 0
PractInstrClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
numcorrect=0
# keep track of which components have finished
PractInstrComponents = [practiceinstr]
for thisComponent in PractInstrComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "PractInstr"-------
while continueRoutine:
# get current time
t = PractInstrClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *practiceinstr* updates
if t >= 0.0 and practiceinstr.status == NOT_STARTED:
# keep track of start time/frame for later
practiceinstr.tStart = t
practiceinstr.frameNStart = frameN # exact frame index
practiceinstr.setAutoDraw(True)
if practiceinstr.status == STARTED and bool(event.getKeys('space')):
practiceinstr.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in PractInstrComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "PractInstr"-------
for thisComponent in PractInstrComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "PractInstr" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
PracticeTrials = data.TrialHandler(nReps=1, method='random',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('demo.xls'),
seed=None, name='PracticeTrials')
thisExp.addLoop(PracticeTrials) # add the loop to the experiment
thisPracticeTrial = PracticeTrials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisPracticeTrial.rgb)
if thisPracticeTrial != None:
for paramName in thisPracticeTrial.keys():
exec(paramName + '= thisPracticeTrial.' + paramName)
for thisPracticeTrial in PracticeTrials:
currentLoop = PracticeTrials
# abbreviate parameter names if possible (e.g. rgb = thisPracticeTrial.rgb)
if thisPracticeTrial != None:
for paramName in thisPracticeTrial.keys():
exec(paramName + '= thisPracticeTrial.' + paramName)
# ------Prepare to start Routine "setimg"-------
t = 0
setimgClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
if (xpos == -.25 and corrResp == 1) or (xpos == .25 and corrResp == 0):
stimimg=congr
elif (xpos == -.25 and corrResp == 0) or (xpos == .25 and corrResp == 1):
stimimg=incongr
# keep track of which components have finished
setimgComponents = []
for thisComponent in setimgComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "setimg"-------
while continueRoutine:
# get current time
t = setimgClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in setimgComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "setimg"-------
for thisComponent in setimgComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "setimg" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "spatialtrial"-------
t = 0
spatialtrialClock.reset() # clock
frameN = -1
continueRoutine = True
routineTimer.add(1.250000)
# update component parameters for each repeat
stimulus.setPos((xpos,0))
stimulus.setImage(stimimg)
key_resp_2 = event.BuilderKeyResponse()
thisExp.addData('img', stimimg)
# keep track of which components have finished
spatialtrialComponents = [Fixation2, stimulus, key_resp_2]
for thisComponent in spatialtrialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "spatialtrial"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = spatialtrialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *Fixation2* updates
if t >= 0.0 and Fixation2.status == NOT_STARTED:
# keep track of start time/frame for later
Fixation2.tStart = t
Fixation2.frameNStart = frameN # exact frame index
Fixation2.setAutoDraw(True)
frameRemains = 0.0 + 1.25- win.monitorFramePeriod * 0.75 # most of one frame period left
if Fixation2.status == STARTED and t >= frameRemains:
Fixation2.setAutoDraw(False)
# *stimulus* updates
if t >= .5 and stimulus.status == NOT_STARTED:
# keep track of start time/frame for later
stimulus.tStart = t
stimulus.frameNStart = frameN # exact frame index
stimulus.setAutoDraw(True)
frameRemains = .5 + .75- win.monitorFramePeriod * 0.75 # most of one frame period left
if stimulus.status == STARTED and t >= frameRemains:
stimulus.setAutoDraw(False)
# *key_resp_2* updates
if t >= .5 and key_resp_2.status == NOT_STARTED:
# keep track of start time/frame for later
key_resp_2.tStart = t
key_resp_2.frameNStart = frameN # exact frame index
key_resp_2.status = STARTED
# keyboard checking is just starting
win.callOnFlip(key_resp_2.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
frameRemains = .5 + .75- win.monitorFramePeriod * 0.75 # most of one frame period left
if key_resp_2.status == STARTED and t >= frameRemains:
key_resp_2.status = STOPPED
if key_resp_2.status == STARTED:
theseKeys = event.getKeys(keyList=['1', '0'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
if key_resp_2.keys == []: # then this was the first keypress
key_resp_2.keys = theseKeys[0] # just the first key pressed
key_resp_2.rt = key_resp_2.clock.getTime()
# was this 'correct'?
if (key_resp_2.keys == str(corrResp)) or (key_resp_2.keys == corrResp):
key_resp_2.corr = 1
else:
key_resp_2.corr = 0
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in spatialtrialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "spatialtrial"-------
for thisComponent in spatialtrialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if key_resp_2.keys in ['', [], None]: # No response was made
key_resp_2.keys=None
# was no response the correct answer?!
if str(corrResp).lower() == 'none':
key_resp_2.corr = 1 # correct non-response
else:
key_resp_2.corr = 0 # failed to respond (incorrectly)
# store data for PracticeTrials (TrialHandler)
PracticeTrials.addData('key_resp_2.keys',key_resp_2.keys)
PracticeTrials.addData('key_resp_2.corr', key_resp_2.corr)
if key_resp_2.keys != None: # we had a response
PracticeTrials.addData('key_resp_2.rt', key_resp_2.rt)
# ------Prepare to start Routine "Practicecounter"-------
t = 0
PracticecounterClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
numcorrect+=key_resp_2.corr
# keep track of which components have finished
PracticecounterComponents = []
for thisComponent in PracticecounterComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "Practicecounter"-------
while continueRoutine:
# get current time
t = PracticecounterClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in PracticecounterComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # | |
import random, time, datetime
import curses
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class InvalidMoveException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Piece:
"""A list of tuples that contain the coordinates of the blocks in a piece,
in respect to the left most block in the top row of a piece. This block is represented by (0,0)."""
def __init__(self, blocks, name, char_rep, input_position):
self.blocks = blocks
self.name = name
self.char_rep = char_rep
self.input_position = input_position
# as an example, a piece that is three blockshly would be [(0,0),(1,0),(2,0)]
def __str__(self):
out = ""
x_offset = 0
y_offset = 0
width = 0
height = 0
for block in self.blocks:
width = max(width, block[0]+1)
height = max(height, block[1]+1)
x_offset = max(x_offset,-block[0])
y_offset = max(y_offset,-block[1])
width += x_offset
height += y_offset
for y in range(height):
for x in range(width):
if (x-x_offset,y-y_offset) in self.blocks:
out += self.char_rep.upper()
else:
out += ' '
out += '\n'
return out
def show(self, win, p):
(y, x) = p
for i,l in enumerate(str(self).split('\n')):
win.addstr(y+i, x, l)
class Move:
def __init__(self, piece, x, y):
self.piece = piece
self.x = x
self.y = y
def __str__(self):
return "{} at {},{}".format(self.piece.name,self.x,self.y)
def __repr__(self):
return '\n' + str(self)
def export_as_str(self):
return chr((10*self.y+self.x)+32)+ self.piece.char_rep
def import_move_as_str(string):
return Move(piece_dict[string[1]], (ord(string[0])-32)%10, ((ord(string[0])-32)//10))
class Board:
"""A 10 x 10 list of 0's and 1's, 0 means a space is clear, 1 means it's occupied. Represents a gamestate of the 10 x 10 board
On this board, the coordinates system starts with (0,0) being the top left corner, and the first coordinate being the x, and the second being the y."""
# EMPTY_BOARD = [[0] * 10 for x in range(10)]
"""Constructs a Board. Default is empty"""
def __init__(self, matrix = None, current_pieces = [], move_str = ''):
if matrix == None:
self.matrix = [[0] * 10 for x in range(10)]
else:
self.matrix = matrix
self.last_piece = [[0] * 10 for x in range(10)]
self.current_pieces = current_pieces[:]
self.move_str = ''
def copy(self):
"""Returns a new board identical to self"""
new_matrix = []
for col in self.matrix:
new_matrix.append(col[:])
return Board(new_matrix, self.current_pieces[:], self.move_str[:])
"""Returns True if you can place Piece p at the Location loc"""
def is_valid_move(self, move):
if not move.piece in self.current_pieces:
return False
for block in move.piece.blocks:
currX, currY = block[0] + move.x, block[1] + move.y
if currX not in range(10) or currY not in range(10) or self.matrix[currX][currY] != 0:
return False
return True
def get_valid_moves(self):
out = []
for piece in self.current_pieces:
for x in range(10):
for y in range(10):
move = Move(piece, x, y)
if self.is_valid_move(move):
out.append(move)
return out
def get_valid_moves_2(self, piece):
out = []
for x in range(10):
for y in range(10):
move = Move(piece, x, y)
if self.is_valid_move(move):
out.append(move)
return out
def has_valid_moves(self):
for piece in self.current_pieces:
for x in range(10):
for y in range(10):
move = Move(piece, x, y)
if self.is_valid_move(move):
return True
return False
"""Returns a list of numbers containing the numbers of the rows that are currently full in the board"""
def get_full_rows(self):
output = []
for y in range(10):
found0 = False
for x in range(10):
if self.matrix[x][y] == 0:
found0 = True
break
if not found0:
output.append(y)
return output
"""Returns a list of numbers containing the numbers of the columns that are currently full in the board"""
def get_full_cols(self):
output = []
for x in range(10):
if 0 not in self.matrix[x]:
output.append(x)
return output
"""Places a piece p at a space loc (given by a tuple with two coordinates), updates board accordingly
Returns a tuple of two lists that contains rows and cols cleared respectively"""
def make_move(self, move):
if self.is_valid_move(move):
self.last_piece = [[0] * 10 for x in range(10)]
move_str = move.export_as_str()
# placing piece
for block in move.piece.blocks:
self.matrix[block[0] + move.x][block[1] + move.y] = 1
self.last_piece[block[0] + move.x][block[1] + move.y] = 1
# clearing appropriate rows/cols
full_rows, full_cols = self.get_full_rows(), self.get_full_cols()
for y in full_rows:
move_str += "{}y".format(y)
for x in range(10):
if not (x in full_cols):
self.matrix[x][y] -= 1
self.last_piece[x][y] = 2
for x in full_cols:
move_str += "{}x".format(x)
for y in range(10):
self.matrix[x][y] -= 1
self.last_piece[x][y] = 3
for block in move.piece.blocks:
self.last_piece[block[0] + move.x][block[1] + move.y] = 1
self.current_pieces.remove(move.piece)
self.move_str += move_str
return full_rows, full_cols
else:
print("Here is a string representing the current board:")
print(self.export_as_str())
raise InvalidMoveException('{} is not a valid move'.format(move))
"""Places a piece p at a space loc (given by a tuple with two coordinates), updates board accordingly
Returns a tuple of two lists that contains rows and cols cleared respectively"""
def force_move(self, move):
self.last_piece = [[0] * 10 for x in range(10)]
# placing piece
move_str = move.export_as_str()
for block in move.piece.blocks:
self.matrix[block[0] + move.x][block[1] + move.y] = 1
self.last_piece[block[0] + move.x][block[1] + move.y] = 1
# clearing appropriate rows/cols
full_rows, full_cols = self.get_full_rows(), self.get_full_cols()
for y in full_rows:
move_str += "{}y".format(y)
for x in range(10):
if not (x in full_cols):
self.matrix[x][y] -= 1
self.last_piece[x][y] = 2
for x in full_cols:
move_str += "{}x".format(x)
for y in range(10):
self.matrix[x][y] -= 1
self.last_piece[x][y] = 3
# self.current_pieces.remove(move.piece)
self.move_str += move_str
return full_rows, full_cols
def draw_input(self, win):
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_RED)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_GREEN)
curses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_BLACK)
win.erase()
for piece in piece_list:
piece.show(win, piece.input_position)
win.addstr(19, 0, "#"*41)
for i,p in enumerate(self.current_pieces):
p.show(win, (21, i*15))
win.addstr(26, 0, ' ')
if len(self.current_pieces) == 3:
win.addstr('[OK]', curses.color_pair(2))
else:
win.addstr(' ')
win.addstr(' ')
win.addstr('[REINICIAR]', curses.color_pair(1))
win.addstr(' ')
win.refresh()
def add_to_pieces(self, piece):
while len(self.current_pieces)>=3:
del self.current_pieces[0]
self.current_pieces.append(piece)
def refresh_pieces(self, win):
self.last_piece = [[0] * 10 for x in range(10)]
self.current_pieces = []
while True:
self.draw_input(win)
win.keypad(1)
curses.halfdelay(1)
event = win.getch()
if (event == curses.KEY_ENTER or event == 10 or event == 13) and len(self.current_pieces) == 3:
return
for piece in piece_list:
if event == ord(piece.char_rep):
self.add_to_pieces(piece)
if event == curses.KEY_MOUSE:
_, x, y, _, bstate = curses.getmouse()
if bstate & curses.BUTTON1_CLICKED:
if y==26 and 18<=x and x<=28:
self.current_pieces = []
elif y==26 and 7<=x and x<=10 and len(self.current_pieces) == 3:
return
for piece in piece_list:
if (x-piece.input_position[1], y-piece.input_position[0]) in piece.blocks:
self.add_to_pieces(piece)
def export_as_str(self):
out = ''
for x in range(10):
for y in range(10):
out += str(self.matrix[x][y])
for piece in self.current_pieces:
out += piece.char_rep
return out
def import_as_str(self, input_str):
for x in range(10):
for y in range(10):
self.matrix[x][y] = int(input_str[x*10+y])
for char in input_str[100:]:
self.current_pieces.append(piece_dict[char])
def undo_move(self):
"undoes the last move based on the board's move_str"
self.last_piece = [[0] * 10 for x in range(10)]
index = len(self.move_str) -1
# print "BEFORE"
# print self.move_str
decreased = []
last_char = self.move_str[index]
while last_char == 'x' or last_char == 'y':
if last_char == 'x':
for y in range(10):
if not ((int(self.move_str[index-1]), y) in decreased):
decreased.append((int(self.move_str[index-1]), y))
self.matrix[int(self.move_str[index-1])][y] += 1
elif last_char == 'y':
for x in range(10):
if not ((x, int(self.move_str[index-1])) in decreased):
decreased.append((x, int(self.move_str[index-1])))
self.matrix[x][int(self.move_str[index-1])] += 1
index -= 2
last_char = self.move_str[index]
move = import_move_as_str(self.move_str[index - 1:index + 1])
for block in move.piece.blocks:
self.matrix[block[0] + move.x][block[1] + move.y] = 0
self.current_pieces.append(move.piece)
self.move_str = self.move_str[:index - 1]
# print "AFTER"
# print self.move_str
# def import_move_str(self, move_str):
# while x <
# curr_pair = move_str
def interact(self, win):
while True:
self.show(win)
win.keypad(1)
curses.halfdelay(1)
event = win.getch()
if event == curses.KEY_ENTER or event == 10 or event == 13:
return
if event == curses.KEY_MOUSE:
_, x, y, _, bstate = curses.getmouse()
xM = (x-1)/4
yM = (y-2)/2
if bstate & curses.BUTTON1_CLICKED:
if y==26 and 18<=x and x<=27 and self.move_str:
while len(self.current_pieces)<3:
self.undo_move()
self.current_pieces = []
elif y==26 and 7<=x and x<=10:
return
if 0<=xM and xM <10 and 0<=yM and yM <10:
self.matrix[xM][yM]+=1
elif bstate & curses.BUTTON3_CLICKED:
if 0<=xM and xM <10 and 0<=yM and yM <10:
self.matrix[xM][yM]-=1
def show(self, win):
win.erase()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_RED)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_GREEN)
curses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_BLACK)
win.addstr(0, | |
from statsmodels.compat.python import iterkeys
from statsmodels.regression.linear_model import GLS
import numpy as np
from statsmodels.base.model import LikelihoodModelResults
from scipy import sparse
# http://www.irisa.fr/aladin/wg-statlin/WORKSHOPS/RENNES02/SLIDES/Foschi.pdf
__all__ = ['SUR', 'Sem2SLS']
#probably should have a SystemModel superclass
# TODO: does it make sense of SUR equations to have
# independent endogenous regressors? If so, then
# change docs to LHS = RHS
#TODO: make a dictionary that holds equation specific information
#rather than these cryptic lists? Slower to get a dict value?
#TODO: refine sigma definition
class SUR(object):
"""
Seemingly Unrelated Regression
Parameters
----------
sys : list
[endog1, exog1, endog2, exog2,...] It will be of length 2 x M,
where M is the number of equations endog = exog.
sigma : array_like
M x M array where sigma[i,j] is the covariance between equation i and j
dfk : None, 'dfk1', or 'dfk2'
Default is None. Correction for the degrees of freedom
should be specified for small samples. See the notes for more
information.
Attributes
----------
cholsigmainv : array
The transpose of the Cholesky decomposition of `pinv_wexog`
df_model : array
Model degrees of freedom of each equation. p_{m} - 1 where p is
the number of regressors for each equation m and one is subtracted
for the constant.
df_resid : array
Residual degrees of freedom of each equation. Number of observations
less the number of parameters.
endog : array
The LHS variables for each equation in the system.
It is a M x nobs array where M is the number of equations.
exog : array
The RHS variable for each equation in the system.
It is a nobs x sum(p_{m}) array. Which is just each
RHS array stacked next to each other in columns.
history : dict
Contains the history of fitting the model. Probably not of interest
if the model is fit with `igls` = False.
iterations : int
The number of iterations until convergence if the model is fit
iteratively.
nobs : float
The number of observations of the equations.
normalized_cov_params : array
sum(p_{m}) x sum(p_{m}) array
:math:`\\left[X^{T}\\left(\\Sigma^{-1}\\otimes\\boldsymbol{I}\\right)X\\right]^{-1}`
pinv_wexog : array
The pseudo-inverse of the `wexog`
sigma : array
M x M covariance matrix of the cross-equation disturbances. See notes.
sp_exog : CSR sparse matrix
Contains a block diagonal sparse matrix of the design so that
exog1 ... exogM are on the diagonal.
wendog : array
M * nobs x 1 array of the endogenous variables whitened by
`cholsigmainv` and stacked into a single column.
wexog : array
M*nobs x sum(p_{m}) array of the whitened exogenous variables.
Notes
-----
All individual equations are assumed to be well-behaved, homoskedastic
iid errors. This is basically an extension of GLS, using sparse matrices.
.. math:: \\Sigma=\\left[\\begin{array}{cccc}
\\sigma_{11} & \\sigma_{12} & \\cdots & \\sigma_{1M}\\\\
\\sigma_{21} & \\sigma_{22} & \\cdots & \\sigma_{2M}\\\\
\\vdots & \\vdots & \\ddots & \\vdots\\\\
\\sigma_{M1} & \\sigma_{M2} & \\cdots & \\sigma_{MM}\\end{array}\\right]
References
----------
Zellner (1962), Greene (2003)
"""
#TODO: Does each equation need nobs to be the same?
def __init__(self, sys, sigma=None, dfk=None):
if len(sys) % 2 != 0:
raise ValueError("sys must be a list of pairs of endogenous and \
exogenous variables. Got length %s" % len(sys))
if dfk:
if not dfk.lower() in ['dfk1','dfk2']:
raise ValueError("dfk option %s not understood" % (dfk))
self._dfk = dfk
M = len(sys[1::2])
self._M = M
# exog = np.zeros((M,M), dtype=object)
# for i,eq in enumerate(sys[1::2]):
# exog[i,i] = np.asarray(eq) # not sure this exog is needed
# used to compute resids for now
exog = np.column_stack(np.asarray(sys[1::2][i]) for i in range(M))
# exog = np.vstack(np.asarray(sys[1::2][i]) for i in range(M))
self.exog = exog # 2d ndarray exog is better
# Endog, might just go ahead and reshape this?
endog = np.asarray(sys[::2])
self.endog = endog
self.nobs = float(self.endog[0].shape[0]) # assumes all the same length
# Degrees of Freedom
df_resid = []
df_model = []
[df_resid.append(self.nobs - np.linalg.matrix_rank(_)) for _ in sys[1::2]]
[df_model.append(np.linalg.matrix_rank(_) - 1) for _ in sys[1::2]]
self.df_resid = np.asarray(df_resid)
self.df_model = np.asarray(df_model)
# "Block-diagonal" sparse matrix of exog
sp_exog = sparse.lil_matrix((int(self.nobs*M),
int(np.sum(self.df_model+1)))) # linked lists to build
self._cols = np.cumsum(np.hstack((0, self.df_model+1)))
for i in range(M):
sp_exog[i*self.nobs:(i+1)*self.nobs,
self._cols[i]:self._cols[i+1]] = sys[1::2][i]
self.sp_exog = sp_exog.tocsr() # cast to compressed for efficiency
# Deal with sigma, check shape earlier if given
if np.any(sigma):
sigma = np.asarray(sigma) # check shape
elif sigma is None:
resids = []
for i in range(M):
resids.append(GLS(endog[i],exog[:,
self._cols[i]:self._cols[i+1]]).fit().resid)
resids = np.asarray(resids).reshape(M,-1)
sigma = self._compute_sigma(resids)
self.sigma = sigma
self.cholsigmainv = np.linalg.cholesky(np.linalg.pinv(\
self.sigma)).T
self.initialize()
def initialize(self):
self.wendog = self.whiten(self.endog)
self.wexog = self.whiten(self.sp_exog)
self.pinv_wexog = np.linalg.pinv(self.wexog)
self.normalized_cov_params = np.dot(self.pinv_wexog,
np.transpose(self.pinv_wexog))
self.history = {'params' : [np.inf]}
self.iterations = 0
def _update_history(self, params):
self.history['params'].append(params)
def _compute_sigma(self, resids):
"""
Computes the sigma matrix and update the cholesky decomposition.
"""
M = self._M
nobs = self.nobs
sig = np.dot(resids, resids.T) # faster way to do this?
if not self._dfk:
div = nobs
elif self._dfk.lower() == 'dfk1':
div = np.zeros(M**2)
for i in range(M):
for j in range(M):
div[i+j] = ((self.df_model[i]+1) *\
(self.df_model[j]+1))**(1/2)
div.reshape(M,M)
else: # 'dfk2' error checking is done earlier
div = np.zeros(M**2)
for i in range(M):
for j in range(M):
div[i+j] = nobs - np.max(self.df_model[i]+1,
self.df_model[j]+1)
div.reshape(M,M)
# does not handle (#,)
self.cholsigmainv = np.linalg.cholesky(np.linalg.pinv(sig/div)).T
return sig/div
def whiten(self, X):
"""
SUR whiten method.
Parameters
----------
X : list of arrays
Data to be whitened.
Returns
-------
If X is the exogenous RHS of the system.
``np.dot(np.kron(cholsigmainv,np.eye(M)),np.diag(X))``
If X is the endogenous LHS of the system.
"""
nobs = self.nobs
if X is self.endog: # definitely not a robust check
return np.dot(np.kron(self.cholsigmainv,np.eye(nobs)),
X.reshape(-1,1))
elif X is self.sp_exog:
return (sparse.kron(self.cholsigmainv,
sparse.eye(nobs,nobs))*X).toarray()#*=dot until cast to array
def fit(self, igls=False, tol=1e-5, maxiter=100):
"""
igls : bool
Iterate until estimates converge if sigma is None instead of
two-step GLS, which is the default is sigma is None.
tol : float
maxiter : int
Notes
-----
This ia naive implementation that does not exploit the block
diagonal structure. It should work for ill-conditioned `sigma`
but this is untested.
"""
if not np.any(self.sigma):
self.sigma = self._compute_sigma(self.endog, self.exog)
M = self._M
beta = np.dot(self.pinv_wexog, self.wendog)
self._update_history(beta)
self.iterations += 1
if not igls:
sur_fit = SysResults(self, beta, self.normalized_cov_params)
return sur_fit
conv = self.history['params']
while igls and (np.any(np.abs(conv[-2] - conv[-1]) > tol)) and \
(self.iterations < maxiter):
fittedvalues = (self.sp_exog*beta).reshape(M,-1)
resids = self.endog - fittedvalues # do not attach results yet
self.sigma = self._compute_sigma(resids) # need to attach for compute?
self.wendog = self.whiten(self.endog)
self.wexog = self.whiten(self.sp_exog)
self.pinv_wexog = np.linalg.pinv(self.wexog)
self.normalized_cov_params = np.dot(self.pinv_wexog,
np.transpose(self.pinv_wexog))
beta = np.dot(self.pinv_wexog, self.wendog)
self._update_history(beta)
self.iterations += 1
sur_fit = SysResults(self, beta, self.normalized_cov_params)
return sur_fit
def predict(self, design):
pass
#TODO: Should just have a general 2SLS estimator to subclass
# for IV, FGLS, etc.
# Also should probably have SEM class and estimators as subclasses
class Sem2SLS(object):
"""
Two-Stage Least Squares for Simultaneous equations
Parameters
----------
sys : list
[endog1, exog1, endog2, exog2,...] It will be of length 2 x M,
where M is the number of equations endog = exog.
indep_endog : dict
A dictionary mapping the equation to the column numbers of the
the independent endogenous regressors in each equation.
It is assumed that the system is entered as broken up into
LHS and RHS. For now, the values of the dict have to be sequences.
Note that the keys for the equations should be zero-indexed.
instruments : array
Array of the exogenous independent variables.
Notes
-----
This is unfinished, and the design should be refactored.
Estimation is done by brute force and there is no exploitation of
the structure of the system.
"""
def __init__(self, sys, indep_endog=None, instruments=None):
if len(sys) % 2 != 0:
raise ValueError("sys must be a list of pairs of endogenous and \
exogenous variables. Got length %s" % len(sys))
M = len(sys[1::2])
self._M = M
# The lists are probably a bad idea
self.endog = sys[::2] # these are just list containers
self.exog = sys[1::2]
self._K = [np.linalg.matrix_rank(_) for _ in sys[1::2]]
# fullexog = np.column_stack((_ for _ in self.exog))
self.instruments = instruments
# Keep the Y_j's in a container to get IVs
| |
<filename>connector/src/yang/connector/gnmi.py
import traceback
import os
import logging
from collections import OrderedDict, Iterable
import base64
import json
from threading import Thread, Event
from time import sleep
from datetime import datetime
from six import string_types
from xml.etree.ElementPath import xpath_tokenizer_re
from google.protobuf import json_format
from cisco_gnmi import ClientBuilder
try:
from pyats.log.utils import banner
from pyats.connections import BaseConnection
except ImportError:
# Standalone without pyats install
class BaseConnection:
class dev:
def __init__(self, dev_os):
self.os = dev_os
def __init__(self, device_os, **kwargs):
self.connection_info = {'protocol': 'gnmi'}
self._device = self.dev(device_os)
self.connection_info.update(kwargs)
def banner(string):
return string
# try to record usage statistics
# - only internal cisco users will have stats.CesMonitor module
# - below code does nothing for DevNet users - we DO NOT track usage stats
# for PyPI/public/customer users
try:
# new internal cisco-only pkg since devnet release
from ats.cisco.stats import CesMonitor
except Exception:
try:
# legacy pyats version, stats was inside utils module
from ats.utils.stats import CesMonitor
except Exception:
CesMonitor = None
finally:
if CesMonitor is not None:
# CesMonitor exists -> this is an internal cisco user
CesMonitor(action = __name__, application='pyATS Packages').post()
# create a logger for this module
log = logging.getLogger(__name__)
class GnmiNotification(Thread):
"""Thread listening for event notifications from the device."""
def __init__(self, device, response, **request):
Thread.__init__(self)
self.device = device
self._stop_event = Event()
self.log = logging.getLogger(__name__)
self.request = request
self.responses = response
@property
def request(self):
return self._request
@request.setter
def request(self, request={}):
self.returns = request.get('returns')
self.response_verify = request.get('verifier')
self.decode_response = request.get('decode')
self.namespace = request.get('namespace')
self.sub_mode = request['format'].get('sub_mode', 'SAMPLE')
self.encoding = request['format'].get('encoding', 'PROTO')
self.sample_interval = request['format'].get('sample_interval', 10)
self.stream_max = request['format'].get('stream_max', 0)
self.time_delta = 0
self.result = None
self.event_triggered = False
def process_opfields(self, response):
subscribe_resp = json_format.MessageToDict(response)
updates = subscribe_resp['update']
resp = self.decode_response(updates)
if resp:
if self.event_triggered:
self.result = self.response_verify(resp, self.returns.copy())
else:
self.log.error('No values in subscribe response')
self.stop()
def run(self):
"""Check for inbound notifications."""
t1 = datetime.now()
self.log.info('\nSubscribe notification active\n{0}'.format(
29 * '='
))
for response in self.responses:
if self.stopped():
self.log.info("Terminating notification thread")
break
if response.HasField('sync_response'):
self.log.info('Subscribe syncing response')
if response.HasField('update'):
self.log.info(
'\nSubscribe response:\n{0}\n{1}'.format(
19 * '=',
str(response)
)
)
self.process_opfields(response)
self.log.info('Subscribe opfields processed')
if self.stream_max:
t2 = datetime.now()
td = t2 - t1
self.log.info(
'Subscribe time {0} seconds'.format(td.seconds)
)
self.time_delta = td.seconds
if td.seconds > self.stream_max:
self.stop()
break
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
class Gnmi(BaseConnection):
"""Session handling for gNMI connections.
Can be used with pyATS same as yang.connector.Netconf is used or
can be used as a standlone module.
Methods:
--------
capabilities(): gNMI Capabilities.
set(dict): gNMI Set. Input is namespace, xpath/value pairs.
get(dict): gNMI Get mode='STATE'. Input xpath/value pairs (value optional).
get_config(dict): gNMI Get mode='CONFIG'. Input xpath/value pairs.
subscribe(dict): gNMI Subscribe. Input xpath/value pairs and format
notify_wait(dict, callback): Notify subscibe thread that event occured,
"callback" must be a class with passed, and failed methods and a
result class containing "code" property.
pyATS Examples:
---------------
>>> from pyats.topology import loader
>>> from yang.connector.gnmi import Gnmi
>>> testbed=loader.load('testbed_native_test.yaml')
>>> device=testbed.devices['ddmi-9500-2']
>>> device.connect(alias='gnmi', via='yang2')
>>> #####################
>>> # Set/Get example #
>>> #####################
>>> content={
... 'namespace': {'ios: 'http://cisco.com/ns/yang/Cisco-IOS-XE-native',
... 'ios-cdp': 'http://cisco.com/ns/yang/Cisco-IOS-XE-cdp'},
... 'nodes': [{'xpath': '/ios:native/ios:cdp/ios-cdp:holdtime',
... 'value': '10'}]
... }
>>> device.gnmi.set(content)
[]
>>> content['nodes'][0].pop('value')
>>> device.gnmi.get(content)
[{'update': [(10, '/native/cdp/holdtime')]}]
>>> #####################
>>> # Capabilities #
>>> #####################
>>> resp=device.gnmi.capabilities()
>>> resp.keys()
dict_keys(['supportedModels', 'supportedEncodings', 'gNMIVersion'])
Standalone Examples (pyATS not installed):
------------------------------------------
>>> #####################
>>> # Capabilities #
>>> #####################
>>> from yang.connector.gnmi import Gnmi
>>> kwargs={
... 'host':'172.23.167.122',
... 'port':'50051',
... 'root_certificate':'root.pem',
... 'username':'admin',
... 'password':'<PASSWORD>',
... 'ssl_name_override':'ems.cisco.com'
... }
>>> gnmi=Gnmi('iosxe', **kwargs)
>>> resp = gnmi.capabilities()
>>> #####################
>>> # Set example #
>>> #####################
>>> content={
... 'namespace': {'ios: 'http://cisco.com/ns/yang/Cisco-IOS-XE-native',
... 'ios-cdp': 'http://cisco.com/ns/yang/Cisco-IOS-XE-cdp'},
... 'nodes': [{'xpath': '/ios:native/ios:cdp/ios-cdp:holdtime',
... 'value': '10'}]
... }
>>> resp = gnmi.set(content)
>>> #####################
>>> # Get mode='CONFIG' #
>>> #####################
>>> content={
... 'namespace': {'ios: 'http://cisco.com/ns/yang/Cisco-IOS-XE-native',
... 'ios-cdp': 'http://cisco.com/ns/yang/Cisco-IOS-XE-cdp'},
... 'nodes': [{'xpath': '/ios:native/ios:cdp/ios-cdp:holdtime'}]
... }
>>> resp = gnmi.get_config({'content': content, 'returns': returns})
>>> #####################
>>> # Get mode='STATE' #
>>> #####################
>>> content={
... 'namespace': {'ios: 'http://cisco.com/ns/yang/Cisco-IOS-XE-native',
... 'ios-cdp': 'http://cisco.com/ns/yang/Cisco-IOS-XE-cdp'},
... 'nodes': [{'xpath': '/ios:native/ios:cdp/ios-cdp:holdtime'}]
... }
>>> resp = gnmi.get({'content': content, 'returns': returns})
>>> #####################
>>> # Subscribe example #
>>> #####################
>>> content={
... 'format': {
... 'encoding': 'JSON',
... 'request_mode': 'STREAM',
... 'sample_interval': 5,
... 'stream_max': 15,
... 'sub_mode': 'SAMPLE'},
... }
>>> # Add namespace, node xpath list similar to Get to the content
>>> # Add list of expected return values to content
>>> # Example of an expected return value:
>>> content['returns']=[{
... 'datatype': 'string',
... 'id': 0,
... 'name': 'name',
... 'op': '==',
... 'selected': True,
... 'value': 'v4acl',
... 'xpath': '/acl/acl-sets/acl-set/name'}]
>>> # Trigger an event that would kick off subscribe
>>> # Call notify_wait() passing in a callback class (see notify_wait)
"""
os_class_map = {
None: None,
"iosxr": "IOS XR",
"nxos": "NX-OS",
"iosxe": "IOS XE",
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.device = kwargs.get('device')
dev_args = self.connection_info
if dev_args.get('protocol', '') != 'gnmi':
msg = 'Invalid protocol {0}'.format(
dev_args.get('protocol', '')
)
raise TypeError(msg)
# Initialize ClientBuilder
self.client_os = self.os_class_map.get(self.device.os, None)
# ClientBuilder target is IP:Port
target = dev_args.get('host') + ':' + str(dev_args.get('port'))
builder = ClientBuilder(target).set_os(self.client_os)
# Gather certificate settings
root = dev_args.get('root_certificate')
if not root:
root = None
if root and os.path.isfile(root):
root = open(root, 'rb').read()
chain = dev_args.get('certificate_chain')
if not chain:
chain = None
if chain and os.path.isfile(chain):
chain = open(chain, 'rb').read()
private_key = dev_args.get('private_key', '')
if not private_key:
private_key = None
if private_key and os.path.isfile(private_key):
private_key = open(private_key, 'rb').read()
builder.set_secure(root, private_key, chain)
builder.set_ssl_target_override(
dev_args.get('ssl_name_override', '')
)
builder.set_call_authentication(
dev_args.get('username'),
dev_args.get('password')
)
# builder.construct() returns client and connects the channel
self.builder = builder
self.gnmi = self.builder.construct()
resp = self.capabilities()
if resp:
log.info(
'\ngNMI version: {0} supported encodings: {1}\n\n'.format(
resp.get('gNMIVersion', 'unknown'),
resp.get('supportedEncodings', 'unknown')
))
log.info(banner('gNMI CONNECTED'))
else:
log.info(banner('gNMI Capabilities not returned'))
active_notifications = {}
@property
def device(self):
return self._device
@device.setter
def device(self, device):
if device:
self._device = device
@property
def connected(self):
"""Return True if session is connected."""
return self.gnmi
def path_elem_to_xpath(self, path_elem, namespace={}, opfields=[]):
"""Convert a Path structure to an Xpath."""
elems = path_elem.get('elem', [])
xpath = []
for elem in elems:
name = elem.get('name', '')
if name:
for mod in namespace.values():
name = name.replace(mod + ':', '')
xpath.append(name)
key = elem.get('key', '')
if key:
for name, value in key.items():
for mod in namespace.values():
value = str(value).replace(mod + ':', '')
opfields.append((
value,
'/' + '/'.join(xpath) + '/' + name,
))
return '/' + '/'.join(xpath)
def get_opfields(self, val, xpath_str, opfields=[]):
if isinstance(val, dict):
for name, dict_val in val.items():
opfields = self.get_opfields(
dict_val,
xpath_str + '/' + name,
opfields=opfields
)
return opfields
elif isinstance(val, list):
for item in val:
for name, dict_val in item.items():
opfields = self.get_opfields(
dict_val,
xpath_str + '/' + name,
opfields
)
return opfields
else:
xpath_list = xpath_str.split('/')
name = xpath_list.pop()
xpath_str = '/'.join(xpath_list)
opfields.append((val, xpath_str + '/' + name))
return opfields
def decode_update(self, updates=[], namespace={}):
opfields = []
for update in updates['update']:
xpath_str = self.path_elem_to_xpath(
update.get('path', {}),
namespace=namespace,
opfields=opfields
)
if not xpath_str:
log.error('Xpath not determined from response')
return []
# TODO: the val depends on the encoding type
val = update.get('val', {}).get('jsonIetfVal', '')
if not val:
val = update.get('val', {}).get('jsonVal', '')
if not val:
log.error('{0} has no values'.format(xpath_str))
return []
json_val = base64.b64decode(val).decode('utf-8')
update_val = json.loads(json_val)
if isinstance(update_val, dict):
update_val = [update_val]
elif isinstance(update_val, list):
for val_dict in update_val:
opfields = self.get_opfields(
val_dict,
xpath_str,
opfields
)
else:
# Just one value returned
opfields.append((update_val, xpath_str))
return opfields
def decode_notification(self, response, namespace):
"""Decode a response from the google.protobuf into a dict."""
resp_dict = json_format.MessageToDict(response)
notifies = resp_dict.get('notification', [])
ret_vals = []
for notify in notifies:
ret_val = {}
time_stamp = notify.get('timestamp', '')
# TODO: convert time_stamp from str nanoseconds since epoch time
# to datetime
opfields = self.decode_update(notify, namespace=namespace)
ret_val['update'] = opfields
deletes = notify.get('delete', [])
deleted = []
for delete in deletes:
xpath_str = self.path_elem_to_xpath(
delete.get('path', {}),
namespace
)
if xpath_str:
deleted.append(xpath_str)
if deleted:
ret_val['delete'] = deleted
ret_vals.append(ret_val)
return ret_vals
def decode_capabilities(self, caps={}):
return_caps = {
'version': 'unknown',
'encodings': 'unknown',
'models': []
}
if not caps:
| |
<filename>graph/lib.py
"""
Common library for Graph Component.
"""
import logging
from typing import List, Dict, Set
from dbas.database import DBDiscussionSession
from dbas.database.discussion_model import Argument, TextVersion, Premise, Issue, User, ClickedStatement, Statement, \
SeenStatement, StatementToIssue, GraphNode
from dbas.lib import get_profile_picture
LOG = logging.getLogger(__name__)
def get_graph_nodes(issue: Issue) -> Set[GraphNode]:
"""
:return: A set of GraphNodes which are reachable from from the issue.
"""
positions: Set[Statement] = set([position for position in issue.positions if not position.is_disabled])
return positions.union(*[position.get_sub_tree() for position in positions])
def get_d3_data(db_issue: Issue, all_statements=None, all_arguments=None):
"""
Given an issue, create an dictionary and return it
:param db_issue: Current issue
:param all_statements:
:param all_arguments:
:return: dictionary
"""
return get_d3_graph(db_issue), False
def _d3_edges_from_argument(argument: Argument) -> List[Dict[str, str]]:
"""
Generates a list of d3 representations of the edges which are connected to an argument.
:param argument: The argument, which edges will be considered.
:return: A list of edges for d3, which are coming in and are going out from an argument.
"""
if argument.conclusion_uid:
out_edge = {
'id': f'edge_{argument.uid}_c{argument.conclusion_uid}',
'edge_type': 'arrow',
'color': 'green' if argument.is_supportive else 'red',
'target': f'statement_{argument.conclusion_uid}',
'source': f'argument_{argument.uid}'
}
else:
out_edge = {
'id': f'edge_{argument.uid}_a{argument.argument_uid}',
'edge_type': 'arrow',
'color': 'green' if argument.is_supportive else 'red',
'target': f'argument_{argument.argument_uid}',
'source': f'argument_{argument.uid}'
}
def premise_edge(argument: Argument, premise: Premise) -> Dict[str, str]:
"""
Generates the representation of a premise edge.
:param argument: The targeted argument node.
:param premise: The single premise which is used by the argument.
:return:
"""
return {
'id': f'edge_{argument.uid}_{premise.statement_uid}',
'edge_type': '',
'color': 'green' if argument.is_supportive else 'red',
'target': f'argument_{argument.uid}',
'source': f'statement_{premise.statement_uid}'
}
in_edges = [premise_edge(argument, premise) for premise in argument.premises]
in_edges.append(out_edge)
return in_edges
def _position_edge(position: Statement):
"""
Generates the d3 representation of an edge between a position and the issue.
:param position:
:return:
"""
return {
'id': f'edge_{position.uid}_issue',
'label': position.get_text(),
'type': 'position',
'timestamp': position.get_first_timestamp().timestamp,
'edge_type': 'arrow',
'color': 'grey',
'target': 'issue',
'source': f'statement_{position.uid}'
}
def get_d3_graph(db_issue: Issue) -> Dict:
"""
Returns the graph structure for an issue for the frontend.
:param db_issue: The issue from were to start the Graph
:return: A dict with 'nodes', 'edges' and 'extras', where 'nodes' is a list of nodes (argument and statements) in the graph.
'edges' are edges between the nodes and extra a dictionary in the form of node-id -> node (the same nodes like in 'nodes'
"""
d3_data = {'nodes': [], 'edges': [], 'extras': {}}
nodes = get_graph_nodes(db_issue)
d3_data['nodes'] = [node.to_d3_dict() for node in nodes]
# add center node for issue
d3_data['nodes'].append({'id': 'issue', 'label': db_issue.title, 'type': 'issue', 'timestamp': str(db_issue.date)})
d3_data['edges'] = sum([_d3_edges_from_argument(node) for node in nodes if isinstance(node, Argument)],
[]) # because sum = many pluses... and pluses concat lists...
# add edges between positions and issue
d3_data['edges'].extend([_position_edge(position) for position in db_issue.positions if not position.is_disabled])
d3_data['extras'] = {d3_node['id']: d3_node for d3_node in
[node.to_d3_dict() for node in nodes if isinstance(node, Statement)]}
return d3_data
def get_opinion_data(db_issue: Issue) -> dict:
"""
:param db_issue:
:return:
"""
statements = [el.statement_uid for el in
DBDiscussionSession.query(StatementToIssue).filter_by(issue_uid=db_issue.uid).all()]
db_statements = DBDiscussionSession.query(Statement).filter(Statement.uid.in_(statements)).all()
db_all_seen = DBDiscussionSession.query(SeenStatement)
db_all_votes = DBDiscussionSession.query(ClickedStatement)
ret_dict = dict()
for statement in db_statements:
db_seen = db_all_seen.filter_by(statement_uid=statement.uid).count()
db_votes = db_all_votes.filter(ClickedStatement.statement_uid == statement.uid,
ClickedStatement.is_up_vote == True,
ClickedStatement.is_valid == True).count()
ret_dict[str(statement.uid)] = (db_votes / db_seen) if db_seen != 0 else 1
return ret_dict
def get_path_of_user(base_url, path, db_issue):
"""
:param base_url:
:param path:
:param db_issue:
:return:
"""
LOG.debug("Path of a specific user: %s", path)
# replace everything what we do not need
kill_it = [base_url, '/discuss/', '/discuss', db_issue.slug, '#graph', '#']
for k in kill_it:
path = path.replace(k, '')
# split in current step and history
if '?history=' in path:
current, history = path.split('?history=')
history = history.split('-')
history += [current]
else:
history = [path]
LOG.debug("History: %s", history)
tlist = []
for h in history:
steps = __get_statements_of_path_step(h)
if steps:
tlist += steps
# return same neighbours
if len(tlist) > 1:
ret_list = [x for index, x in enumerate(tlist[: - 1]) if tlist[index] != tlist[index + 1]] + [tlist[- 1]]
else:
ret_list = tlist
LOG.debug("Returning path %s", ret_list)
return ret_list
def __get_statements_of_path_step(step):
"""
:param step:
:return:
"""
statements = []
splitted = step.split('/')
if 'justify' in step and len(splitted) > 2:
LOG.debug("Append %s -> issue", splitted[2])
statements.append([int(splitted[2]), 'issue'])
# elif 'justify' in step:
# if len(splitted) == 4: # statement
# statements.append([int(splitted[2])])
# else: # argument
# db_arg = DBDiscussionSession.query(Argument).get(splitted[2])
# db_prems = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=db_arg.premisegroup_uid)
# statements.append([premise.statement_uid for premise in db_prems])
elif 'reaction' in step:
collected_arguments = []
db_argument = DBDiscussionSession.query(Argument).get(splitted[2])
collected_arguments.append(db_argument)
while db_argument.argument_uid is not None:
db_argument = DBDiscussionSession.query(Argument).get(db_argument.argument_uid)
if db_argument not in collected_arguments:
collected_arguments.append(db_argument)
target = db_argument.conclusion_uid
for arg in collected_arguments:
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=arg.premisegroup_uid)
for premise in db_premises:
statements.append([premise.statement_uid, target])
LOG.debug("Append %s -> %s", premise.statement_uid, target)
return statements if len(statements) > 0 else None
def __prepare_statements_for_d3_data(db_statements: List[Statement], edge_type):
"""
:param db_statements:
:param edge_type:
:return:
"""
LOG.debug("Enter private function to prepare statements for d3")
all_ids = []
nodes = []
edges = []
extras = {}
for statement in db_statements:
text = statement.get_text()
node_dict = __get_node_dict(uid='statement_' + str(statement.uid),
label=text,
node_type='position' if statement.is_position else 'statement',
author=__get_author_of_statement(statement.uid),
editor=__get_editor_of_statement(statement.uid),
timestamp=statement.get_first_timestamp().timestamp)
extras[node_dict['id']] = node_dict
all_ids.append('statement_' + str(statement.uid))
nodes.append(node_dict)
if statement.is_position:
edge_dict = __get_edge_dict(uid='edge_' + str(statement.uid) + '_issue',
source='statement_' + str(statement.uid),
target='issue',
color='grey',
edge_type=edge_type)
edges.append(edge_dict)
return all_ids, nodes, edges, extras
def __prepare_arguments_for_d3_data(db_arguments, edge_type):
"""
:param db_arguments:
:param edge_type:
:return:
"""
all_ids = []
nodes = []
edges = []
extras = {}
LOG.debug("Enter private function to prepare arguments for d3")
# for each argument edges will be added as well as the premises
for argument in db_arguments:
counter = 1
# we have an argument with:
# 1) with one premise and no undercut is done on this argument
# 2) with at least two premises one conclusion or an undercut is done on this argument
db_premises = DBDiscussionSession.query(Premise).filter(Premise.premisegroup_uid == argument.premisegroup_uid,
Premise.is_disabled == False).all()
db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid=argument.uid).all()
# target of the edge (case 1) or last edge (case 2)
target = 'argument_' + str(argument.argument_uid)
if argument.conclusion_uid is not None:
target = 'statement_' + str(argument.conclusion_uid)
if len(db_premises) == 1 and len(db_undercuts) == 0:
__add_edge_to_dict(edges, argument, counter, db_premises[0], target, edge_type)
else:
__add_edge_and_node_to_dict(edges, nodes, all_ids, argument, counter, db_premises, target, edge_type)
return all_ids, nodes, edges, extras
def __add_edge_to_dict(edges, argument, counter, premise, target, edge_type):
edges.append(__get_edge_dict(uid='edge_' + str(argument.uid) + '_' + str(counter),
source='statement_' + str(premise.statement_uid),
target=target,
color='green' if argument.is_supportive else 'red',
edge_type=edge_type))
def __add_edge_and_node_to_dict(edges, nodes, all_ids, argument, counter, db_premises, target, edge_type):
edge_source = []
# edge from premisegroup to the middle point
for premise in db_premises:
edge_dict = __get_edge_dict(uid='edge_' + str(argument.uid) + '_' + str(counter),
source='statement_' + str(premise.statement_uid),
target='argument_' + str(argument.uid),
color='green' if argument.is_supportive else 'red',
edge_type='')
edges.append(edge_dict)
edge_source.append('statement_' + str(premise.statement_uid))
counter += 1
# edge from the middle point to the conclusion/argument
edge_dict = __get_edge_dict(uid='edge_' + str(argument.uid) + '_0',
source='argument_' + str(argument.uid),
target=target,
color='green' if argument.is_supportive else 'red',
edge_type=edge_type)
edges.append(edge_dict)
# add invisible point in the middle of the edge (to enable pgroups and undercuts)
node_dict = __get_node_dict(uid='argument_' + str(argument.uid),
label='',
edge_source=edge_source,
edge_target=target,
timestamp=argument.timestamp.timestamp)
nodes.append(node_dict)
all_ids.append('argument_' + str(argument.uid))
def __sanity_check_of_d3_data(all_node_ids, edges_array):
"""
:param all_node_ids:
:param edges_array:
:return:
"""
error = False
for e in edges_array:
err1 = e['source'] not in all_node_ids
err2 = e['target'] not in all_node_ids
if err1:
LOG.debug("Source of %s is not valid", e)
if err2:
LOG.debug("Target of %s is not valid", e)
error = error or err1 or err2
if error:
LOG.warning("At least one edge has an invalid source or target.")
LOG.debug("List of all node ids: %s", all_node_ids)
return True
else:
LOG.debug("All nodes are connected well.")
return False
def __get_author_of_statement(uid):
"""
:param uid:
:return:
"""
db_tv = DBDiscussionSession.query(TextVersion).filter_by(statement_uid=uid).order_by(
TextVersion.uid.asc()).first()
db_author = DBDiscussionSession.query(User).get(db_tv.author_uid)
gravatar = get_profile_picture(db_author, 40)
name = db_author.global_nickname
return {'name': name, 'gravatar_url': gravatar}
def __get_editor_of_statement(uid):
"""
:param uid:
:return:
"""
db_statement = DBDiscussionSession.query(TextVersion).filter_by(statement_uid=uid).order_by(
TextVersion.uid.desc()).first()
db_editor = DBDiscussionSession.query(User).get(db_statement.author_uid)
gravatar = get_profile_picture(db_editor, 40)
name = db_editor.global_nickname
return {'name': name, 'gravatar': gravatar}
def __get_node_dict(uid, label, node_type='', author=None, editor=None, edge_source=None, edge_target=None,
timestamp=''):
"""
Create node dict for D3
:param uid:
:param label:
:param node_type:
:param author:
:param editor:
:param edge_source:
:param edge_target:
:param timestamp:
:return: dict()
"""
if author is None:
author = dict()
if editor is None:
editor = dict()
return {
'id': uid,
'label': label,
| |
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class OsconfigProjectsSetIamPolicyRequest(_messages.Message):
r"""A OsconfigProjectsSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class OsconfigProjectsTestIamPermissionsRequest(_messages.Message):
r"""A OsconfigProjectsTestIamPermissionsRequest object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class OsconfigProjectsZonesInstancesLookupConfigsRequest(_messages.Message):
r"""A OsconfigProjectsZonesInstancesLookupConfigsRequest object.
Fields:
lookupConfigsRequest: A LookupConfigsRequest resource to be passed as the
request body.
resource: The resource name for the instance.
"""
lookupConfigsRequest = _messages.MessageField('LookupConfigsRequest', 1)
resource = _messages.StringField(2, required=True)
class OsconfigProjectsZonesInstancesReportPatchJobInstanceDetailsRequest(_messages.Message):
r"""A OsconfigProjectsZonesInstancesReportPatchJobInstanceDetailsRequest
object.
Fields:
reportPatchJobInstanceDetailsRequest: A
ReportPatchJobInstanceDetailsRequest resource to be passed as the
request body.
resource: The instance reporting its status in the form
`projects/*/zones/*/instances/*`
"""
reportPatchJobInstanceDetailsRequest = _messages.MessageField('ReportPatchJobInstanceDetailsRequest', 1)
resource = _messages.StringField(2, required=True)
class Package(_messages.Message):
r"""Package is a reference to the actual package to be installed or removed.
Fields:
name: The name of the package.
"""
name = _messages.StringField(1)
class PatchConfig(_messages.Message):
r"""Patch configuration specifications. Contains details on specifically how
to apply the patch(es) to an instance.
Enums:
RebootConfigValueValuesEnum: Optional. Post-patch reboot settings.
Fields:
apt: Apt update settings. Use this override the default apt patch rules.
goo: Goo update settings. Use this override the default goo patch rules.
rebootConfig: Optional. Post-patch reboot settings.
retryStrategy: Optional. Retry strategy can be defined to have the agent
retry patching during the window if patching fails. If omitted, the
agent will use its default retry strategy.
windowsUpdate: Windows update settings. Use this override the default
windows patch rules.
yum: Yum update settings. Use this override the default yum patch rules.
zypper: Zypper update settings. Use this override the default zypper patch
rules.
"""
class RebootConfigValueValuesEnum(_messages.Enum):
r"""Optional. Post-patch reboot settings.
Values:
REBOOT_CONFIG_UNSPECIFIED: The default behavior is DEFAULT.
DEFAULT: The agent will decide if a reboot is necessary by checking well
known signals such as registry keys or `/var/run/reboot-required`.
ALWAYS: Always reboot the machine after the update has completed.
NEVER: Never reboot the machine after the update has completed.
"""
REBOOT_CONFIG_UNSPECIFIED = 0
DEFAULT = 1
ALWAYS = 2
NEVER = 3
apt = _messages.MessageField('AptSettings', 1)
goo = _messages.MessageField('GooSettings', 2)
rebootConfig = _messages.EnumField('RebootConfigValueValuesEnum', 3)
retryStrategy = _messages.MessageField('RetryStrategy', 4)
windowsUpdate = _messages.MessageField('WindowsUpdateSettings', 5)
yum = _messages.MessageField('YumSettings', 6)
zypper = _messages.MessageField('ZypperSettings', 7)
class PatchJob(_messages.Message):
r"""A high level representation of a patch job that is either in progress or
has completed. Instances' details are not included in the job. To paginate
through instance details, use ListPatchJobInstanceDetails.
Enums:
StateValueValuesEnum: Output only. The current state of the PatchJob.
Fields:
createTime: Output only. Time this PatchJob was created.
description: Description of the patch job. Length of the description is
limited to 1024 characters.
dryRun: If this patch job is a dry run, the agent will report that it has
finished without running any updates on the VM.
duration: Duration of the patch job. After the duration ends, the patch
job will time out.
errorMessage: If this patch job failed, this message will provide
information about the failure.
filter: Required. There must be at least one instance to patch for this
job to succeed. This is the same filter used when listing compute
instances.
instanceDetailsSummary: Summary of instance details.
name: Output only. Unique identifier for this patch job in the form
`projects/*/patchJobs/*`
patchConfig: Patch configuration being applied.
percentComplete: Reflects the overall progress of the patch job in the
range of 0.0 being no progress to 100.0 being complete.
state: Output only. The current state of the PatchJob.
updateTime: Output only. Last time this PatchJob was updated.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Output only. The current state of the PatchJob.
Values:
STATE_UNSPECIFIED: State must be specified.
STARTED: The patch job was successfully initiated.
INSTANCE_LOOKUP: The patch job is looking up instances to run the patch
on.
PATCHING: Instances are being patched.
SUCCEEDED: Patch job completed successfully.
COMPLETED_WITH_ERRORS: Patch job completed but there were errors.
CANCELED: The patch job was canceled.
TIMED_OUT: The patch job has timed out.
"""
STATE_UNSPECIFIED = 0
STARTED = 1
INSTANCE_LOOKUP = 2
PATCHING = 3
SUCCEEDED = 4
COMPLETED_WITH_ERRORS = 5
CANCELED = 6
TIMED_OUT = 7
createTime = _messages.StringField(1)
description = _messages.StringField(2)
dryRun = _messages.BooleanField(3)
duration = _messages.StringField(4)
errorMessage = _messages.StringField(5)
filter = _messages.StringField(6)
instanceDetailsSummary = _messages.MessageField('InstanceDetailsSummary', 7)
name = _messages.StringField(8)
patchConfig = _messages.MessageField('PatchConfig', 9)
percentComplete = _messages.FloatField(10)
state = _messages.EnumField('StateValueValuesEnum', 11)
updateTime = _messages.StringField(12)
class PatchJobInstanceDetails(_messages.Message):
r"""Patch details of an instance.
Enums:
StateValueValuesEnum: Current state of instance patch.
Fields:
attemptCount: Number of times the agent attempted to apply the patch.
failureReason: If the patch has failed, this is the reason.
instanceSystemId: The unique, system-generated identifier for the
instance.
name: The instance name in the form `projects/*/zones/*/instances/*`
state: Current state of instance patch.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Current state of instance patch.
Values:
PATCH_STATE_UNSPECIFIED: Unspecified.
PENDING: The instance has not been notified yet.
INACTIVE: Instance is inactive and cannot be patched.
NOTIFIED: The instance has been notified that it should patch.
STARTED: The instance has started the patching process.
DOWNLOADING_PATCHES: The instance is downloading patches.
APPLYING_PATCHES: The instance is applying patches.
REBOOTING: The instance is rebooting.
SUCCEEDED: The instance has completed applying patches.
SUCCEEDED_REBOOT_REQUIRED: The instance has completed applying patches
but a reboot is required.
FAILED: The instance has failed to apply the patch.
ACKED: The instance acked the notification and will start shortly.
TIMED_OUT: The instance exceeded the time out while applying the patch.
"""
PATCH_STATE_UNSPECIFIED = 0
PENDING = 1
INACTIVE = 2
NOTIFIED = 3
STARTED = 4
DOWNLOADING_PATCHES = 5
APPLYING_PATCHES = 6
REBOOTING = 7
SUCCEEDED = 8
SUCCEEDED_REBOOT_REQUIRED = 9
FAILED = 10
ACKED = 11
TIMED_OUT = 12
attemptCount = _messages.IntegerField(1)
failureReason = _messages.StringField(2)
instanceSystemId = _messages.StringField(3)
name = _messages.StringField(4)
state = _messages.EnumField('StateValueValuesEnum', 5)
class Policy(_messages.Message):
r"""Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform resources. A `Policy`
consists of a list of `bindings`. A `binding` binds a list of `members` to a
`role`, where the members can be user accounts, Google groups, Google
domains, and service accounts. A `role` is a named list of permissions
defined by IAM. **JSON Example** { "bindings": [ {
"role": "roles/owner", "members": [
"user:<EMAIL>", "group:<EMAIL>",
"domain:google.com", "serviceAccount:my-other-
app@appspot.gserviceaccount.com" ] }, {
"role": "roles/viewer", "members": ["user:<EMAIL>"]
} ] } **YAML Example** bindings: - members: -
user:<EMAIL> - group:<EMAIL> -
domain:google.com - serviceAccount:my-other-
app@appspot.gserviceaccount.com role: roles/owner - members:
- user:<EMAIL> role: roles/viewer For a description of IAM
and its features, see the [IAM developer's
guide](https://cloud.google.com/iam/docs).
Fields:
auditConfigs: Specifies cloud audit logging configuration for this policy.
bindings: Associates a list of `members` to a `role`. `bindings` with no
members will result in an error.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. If no `etag` is provided in the call to `setIamPolicy`, then
the existing policy is overwritten blindly.
version: Deprecated.
"""
auditConfigs = _messages.MessageField('AuditConfig', 1, repeated=True)
bindings = _messages.MessageField('Binding', 2, repeated=True)
etag = _messages.BytesField(3)
version = _messages.IntegerField(4, variant=_messages.Variant.INT32)
class ReportPatchJobInstanceDetailsRequest(_messages.Message):
r"""Request to report the patch status for an instance.
Enums:
StateValueValuesEnum: State of current patch execution on the instance.
Fields:
attemptCount: Number of times the agent attempted to apply the patch.
failureReason: Reason | |
<reponame>TencentYoutuResearch/PersonReID-TSF<gh_stars>10-100
from __future__ import print_function
import sys
import time
import os.path as osp
from PIL import Image
import cv2
import numpy as np
import random
from collections import defaultdict
from .Dataset import Dataset
from ..utils.utils import measure_time
from ..utils.re_ranking import re_ranking
from ..utils.metric import cmc, mean_ap, precision_recall, evaluate
from ..utils.dataset_utils import parse_im_name
from ..utils.distance import normalize
from ..utils.distance import compute_dist
import pickle
DEBUG = True
class TestSetAvgMARS(Dataset):
"""
Args:
extract_feat_func: a function to extract features. It takes a batch of
images and returns a batch of features.
marks: a list, each element e denoting whether the image is from
query (e == 0), or
gallery (e == 1), or
multi query (e == 2) set
"""
def __init__(
self,
im_dir=None,
im_names=None,
marks=None,
extract_feat_func=None,
separate_camera_set=None,
single_gallery_shot=None,
first_match_break=None,
**kwargs):
# The im dir of all images
self.im_dir = im_dir
self.im_names = im_names
self.extract_feat_func = extract_feat_func
self.separate_camera_set = separate_camera_set
self.single_gallery_shot = single_gallery_shot
self.first_match_break = first_match_break
self.im_dict = {}
self.marks = {}
self.max_n_samples = 25
'''
self.im_names = self.im_names[0:1000]
self.im_names += im_names[-5000:]
marks = marks[0:1000] + marks[-5000:]
'''
id_ch_segment = {}
# self.im_names.sort()
#self.im_names = self.im_names[0:250] + self.im_names[10000:10250] + self.im_names[-250:]
# marks = [0] * 250 + [1] * 500#list(marks[0:250] + marks[1000:1250] + marks[-250:])
for i, im_name in enumerate(self.im_names):
id_ch = '_'.join(im_name.split('_')[0:2])
if id_ch not in id_ch_segment:
id_ch_segment[id_ch] = [id_ch + '_seg00000']
self.im_dict[id_ch + '_seg00000'] = []
self.marks[id_ch + '_seg00000'] = []
key = id_ch_segment[id_ch][-1]
if len(self.im_dict[key]) == self.max_n_samples:
key = id_ch + \
'_seg%05d' % (
int(key.split('_')[-1].replace('seg', '')) + 1)
id_ch_segment[id_ch].append(key)
self.im_dict[key] = []
self.marks[key] = []
self.im_dict[key].append(im_name)
self.marks[key].append(marks[i])
id_list = sorted(list(self.im_dict.keys()))
self.id_list = id_list
self.id_ch_segment = id_ch_segment
super(TestSetAvgMARS, self).__init__(
dataset_size=len(self.id_list), **kwargs)
print('Creating dataset using TestSetAvgMARS')
def set_feat_func(self, extract_feat_func):
self.extract_feat_func = extract_feat_func
def get_sample(self, ptr):
"""get one id in one cam's images to queue"""
if ptr >= len(self.id_list):
ptr = ptr % len(self.id_list)
im_names = []
id_ch = self.id_list[ptr]
im_names = self.im_dict[id_ch]
# if len(im_names) > self.max_n_samples:
# indices = random.sample(range(len(im_names)), self.max_n_samples)
# im_names = [im_names[i] for i in indices]
#print (len(im_names))
ims = np.zeros(
(self.max_n_samples, 3, self.pre_process_im.resize_h_w[0], self.pre_process_im.resize_h_w[1]))
for i, im_name in enumerate(im_names):
im_path = osp.join(self.im_dir, im_name)
im = cv2.imread(im_path)
if im is None:
print('%s img read fail' % im_path)
continue
im = im[:, :, ::-1]
im, _ = self.pre_process_im(im)
ims[i] = np.copy(im)
id = id_ch
cam = id_ch.split('_')[1][0]
track = id_ch.split('_')[1][1:]
mark = self.marks[id_ch][0]
sample_mask = np.array([1] * len(im_names) + [0]
* (self.max_n_samples - len(im_names)))
return (ims, im_names, id, cam, track, sample_mask, mark)
def next_batch(self):
if self.epoch_done and self.shuffle:
self.prng.shuffle(self.im_names)
ims = None
im_names = None
ids = None
cams = None
tracks = None
sample_masks = None
marks = None
samples, self.epoch_done = self.prefetcher.next_batch_test()
if len(samples) > 0:
ims_list, im_names_list, ids, cams, tracks, sample_masks, marks = zip(
*samples)
else:
return ims, im_names, ids, cams, tracks, sample_masks, marks, self.epoch_done
# Transform the list into a numpy array with shape [N, ...]
ims = np.stack(ims_list, axis=0)
ids = np.array(ids)
cams = np.array(cams)
tracks = np.array(tracks)
im_names = im_names_list
sample_masks = np.array(sample_masks)
marks = np.array(marks)
return ims, im_names, ids, cams, tracks, sample_masks, marks, self.epoch_done
def extract_feat(self, normalize_feat, verbose=True):
"""Extract the features of the whole image set.
Args:
normalize_feat: True or False, whether to normalize feature to unit length
verbose: whether to print the progress of extracting feature
Returns:
feat: numpy array with shape [N, C]
ids: numpy array with shape [N]
cams: numpy array with shape [N]
im_names: numpy array with shape [N]
marks: numpy array with shape [N]
"""
feat, ids, id_ch_seg, cams, tracks, im_names, marks = [], [], [], [], [], [], []
done = False
step = 0
printed = False
st = time.time()
last_time = time.time()
while not done:
ims_, im_names_, ids_, cams_, tracks_, samples_masks, marks_, done = self.next_batch()
if done and ims_ is None:
break
feat_ = self.extract_feat_func(ims_, samples_masks)
feat.append(feat_)
id_ch_seg.append(ids_)
ids.append([id_ch.split('_')[0] for id_ch in ids_])
cams.append(cams_)
tracks.append(tracks_)
im_names += list(im_names_)
step += 1
marks.append(marks_)
'''
print ('ids', ids)
print ('id_ch', id_ch_seg)
print ('cams', cams)
print ('tracks', tracks)
print ('im names', im_names)
print ('marks', marks)
'''
if verbose:
# Print the progress of extracting feature
total_batches = (self.prefetcher.dataset_size
// self.prefetcher.batch_size + 1)
if step % 20 == 0:
if not printed:
printed = True
else:
# Clean the current line
sys.stdout.write("\033[F\033[K")
print('{}/{} batches done, +{:.2f}s, total {:.2f}s'
.format(step, total_batches,
time.time() - last_time, time.time() - st))
last_time = time.time()
feat = np.vstack(feat)
ids = np.hstack(ids)
id_ch_seg = np.hstack(id_ch_seg)
cams = np.hstack(cams)
tracks = np.hstack(tracks)
#im_names = np.hstack(im_names)
marks = np.hstack(marks)
feat_dict = {}
for i, ics in enumerate(id_ch_seg):
id = ids[i]
f = feat[i]
cam = cams[i]
im_name = im_names[i]
id_ch = '_'.join(ics.split('_')[0:2])
mark = marks[i]
if id_ch not in feat_dict:
feat_dict[id_ch] = {'id': id, 'feat': [],
'cam': cam, 'mark': mark, 'im_names': []}
feat_dict[id_ch]['feat'].append(f)
feat_dict[id_ch]['im_names'] += im_name
feat, ids, cams, im_names, marks = [], [], [], [], []
for key in feat_dict:
f = feat_dict[key]['feat']
f = np.mean(np.vstack(f), axis=0)
f = normalize(f, axis=0)
feat.append(f)
ids.append(feat_dict[key]['id'])
cams.append(feat_dict[key]['cam'])
marks.append(feat_dict[key]['mark'])
im_names.append(feat_dict[key]['im_names'])
feat = np.array(feat)
ids = np.array(ids)
cams = np.array(cams)
marks = np.array(marks)
print(ids, cams, marks, im_names)
return feat, ids, cams, im_names, marks
def eval(
self,
normalize_feat=True,
to_re_rank=False,
pool_type='average',
verbose=True,
preload_feature=False):
"""Evaluate using metric CMC and mAP.
Args:
normalize_feat: whether to normalize features before computing distance
to_re_rank: whether to also report re-ranking scores
pool_type: 'average' or 'max', only for multi-query case
verbose: whether to print the intermediate information
"""
#to_re_rank = False
if preload_feature:
feat, ids, cams, im_names, marks = pickle.load(
open('test_preload_feature.pkl'))
else:
with measure_time('Extracting feature...', verbose=verbose):
feat, ids, cams, im_names, marks = self.extract_feat(
normalize_feat, verbose)
#im_names = [x if isinstance(x, str) else x.decode('utf-8') for x in im_names]
#pickle.dump((feat, ids, cams, im_names, marks), open('test_preload_feature.pkl', 'w'))
# query, gallery, multi-query indices
'''
"""
rearrange query and gallery, use all the images of the same id and cam_id, as query, others as gallery
"""
print('ids:', ids.shape)
print('cams:', cams.shape)
feat_dim = feat.shape[1]
feat_dict = {}
for fea, id, cam in zip(feat, ids, cams):
if id not in feat_dict:
feat_dict[id] = {}
if cam not in feat_dict[id]:
feat_dict[id][cam] = fea
else:
feat_dict[id][cam] = np.vstack((feat_dict[id][cam], fea))
new_ids = [] # the rank of new person ids
new_feat_matrix = []
query_ids = [] # choose which cam_id of one person to be query
query_feats = np.array([])
gallery_feats = np.array([])
gallery_ids = []
query_cams = []
gallery_cams = []
for i, p_id in enumerate(sorted(feat_dict)):
print('p_id:', i)
new_ids.append(p_id)
new_feat_matrix.append([])
print('p_id:', i)
new_ids.append(p_id)
new_feat_matrix.append([])
for j , cam_track_id in enumerate(sorted(feat_dict[p_id])):
print('cam_track_id:', j)
#trace_feat = np.mean(feat_dict[p_id][cam_track_id], axis = 0)
trace_feat = np.copy(feat_dict[p_id][cam_track_id])
cam_id = cam_track_id[0].zfill(5)
if j == i % len(feat_dict[p_id]): #use as query
if len(query_feats) == 0:
query_feats = np.copy(trace_feat)
else:
query_feats = np.vstack((query_feats, trace_feat))
query_ids.append(p_id)
#resolve cam_id from cam_track
query_cams.append(cam_id)
else: # use as gallery
if len(gallery_feats) == 0:
gallery_feats = np.copy(trace_feat)
else:
gallery_feats = np.vstack((gallery_feats, trace_feat))
print('gallery:',gallery_feats.shape)
gallery_ids.append(p_id)
gallery_cams.append(cam_id)
if len(query_feats.shape) == 1:
query_feats = query_feats.reshape(1, query_feats.shape[0])
if len(gallery_feats.shape) == 1:
gallery_feats = gallery_feats.reshape(1, gallery_feats.shape[0])
#dist_mat = compute_dist(query_feats, gallery_feats, type = 'euclidean')
new_ids = [] # the rank of new person ids
new_feat_matrix = []
query_ids = [] # choose which cam_id of one person to be query
query_feats = np.array([])
gallery_feats = np.array([])
gallery_ids = []
query_cams = []
gallery_cams = []
for i, p_id in enumerate(sorted(feat_dict)):
print('p_id:', i)
new_ids.append(p_id)
new_feat_matrix.append([])
print('p_id:', i)
new_ids.append(p_id)
new_feat_matrix.append([])
for j , cam_track_id in enumerate(sorted(feat_dict[p_id])):
print('cam_track_id:', j)
#trace_feat = np.mean(feat_dict[p_id][cam_track_id], axis = 0)
trace_feat = np.copy(feat_dict[p_id][cam_track_id])
cam_id = cam_track_id[0].zfill(5)
if j == i % len(feat_dict[p_id]): #use as query
if len(query_feats) == 0:
query_feats = np.copy(trace_feat)
else:
query_feats = np.vstack((query_feats, trace_feat))
query_ids.append(p_id)
#resolve cam_id from cam_track
query_cams.append(cam_id)
else: # use as gallery
if len(gallery_feats) == 0:
gallery_feats = np.copy(trace_feat)
else:
gallery_feats = np.vstack((gallery_feats, trace_feat))
print('gallery:',gallery_feats.shape)
gallery_ids.append(p_id)
gallery_cams.append(cam_id)
if len(query_feats.shape) == 1:
query_feats = query_feats.reshape(1, query_feats.shape[0])
if len(gallery_feats.shape) == 1:
gallery_feats = gallery_feats.reshape(1, gallery_feats.shape[0])
#dist_mat = compute_dist(query_feats, gallery_feats, type = 'euclidean')
query_ids = np.array(query_ids)
gallery_ids = np.array(gallery_ids)
query_cams = np.array(query_cams)
gallery_cams = np.array(gallery_cams)
print('query ids', | |
import os
import shutil
import tempfile
import unittest
from pavilion import config
from pavilion.test_config import PavTest, variables
from pavilion.test_config.test import PavTestError
from pavilion.suite import Suite
class PavTestTests(unittest.TestCase):
TEST_DATA_ROOT = os.path.realpath(__file__)
TEST_DATA_ROOT = os.path.dirname(os.path.dirname(TEST_DATA_ROOT))
TEST_DATA_ROOT = os.path.join(TEST_DATA_ROOT, 'test_data')
PAV_CONFIG_PATH = os.path.join(TEST_DATA_ROOT,
'pav_config_dir',
'pavilion.yaml')
TEST_URL = 'https://github.com/lanl/Pavilion/archive/2.0.zip'
def __init__(self, *args, **kwargs):
with open(self.PAV_CONFIG_PATH) as cfg_file:
self.pav_cfg = config.PavilionConfigLoader().load(cfg_file)
self.pav_cfg.config_dirs = [os.path.join(self.TEST_DATA_ROOT,
'pav_config_dir')]
self.tmp_dir = tempfile.TemporaryDirectory()
#self.pav_cfg.working_dir = self.tmp_dir.name
self.pav_cfg.working_dir = '/tmp/{}/pav_tests/'.format(os.getlogin())
# Create the basic directories in the working directory
for path in [self.pav_cfg.working_dir,
os.path.join(self.pav_cfg.working_dir, 'builds'),
os.path.join(self.pav_cfg.working_dir, 'tests'),
os.path.join(self.pav_cfg.working_dir, 'suites'),
os.path.join(self.pav_cfg.working_dir, 'downloads')]:
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
super().__init__(*args, **kwargs)
def test_obj(self):
"""Test pavtest object initialization."""
# Initializing with a mostly blank config
config = {
# The only required param.
'name': 'blank_test'
}
# Making sure this doesn't throw errors from missing params.
PavTest(self.pav_cfg, config)
config = {
'subtest': 'st',
'name': 'test',
'build': {
'modules': ['gcc'],
'cmds': ['echo "Hello World"'],
},
'run': {
'modules': ['gcc', 'openmpi'],
'cmds': ['echo "Running dis stuff"'],
'env': {'BLARG': 'foo'},
}
}
# Make sure we can create a test from a fairly populated config.
t = PavTest(self.pav_cfg, config)
# Make sure we can recreate the object from id.
t2 = PavTest.from_id(self.pav_cfg, t.id)
# Make sure the objects are identical
# This tests the following functions
# - from_id
# - save_config, load_config
# - get_test_path
# - write_tmpl
for key in set(t.__dict__.keys()).union(t2.__dict__.keys()):
self.assertEqual(t.__dict__[key], t2.__dict__[key])
def test_setup_build_dir(self):
"""Make sure we can correctly handle all of the various archive
formats."""
base_config = {
'name': 'test',
'build': {
'modules': ['gcc'],
}
}
# Check that decompression and setup works for all accepted types.
archives = [
'src.tar.gz',
'src.xz',
# A bz2 archive
'src.extensions_dont_matter',
'src.zip',
# These archives don't have a containing directory.
'no_encaps.tgz',
'no_encaps.zip',
]
test_archives = os.path.join(self.TEST_DATA_ROOT, 'pav_config_dir',
'test_src')
original_tree = os.path.join(test_archives, 'src')
for archive in archives:
config = base_config.copy()
config['build']['source_location'] = archive
test = PavTest(self.pav_cfg, config=config)
if os.path.exists(test.build_origin):
shutil.rmtree(test.build_origin)
test._setup_build_dir(test.build_origin)
# Make sure the extracted archive is identical to the original
# (Though the containing directory will have a different name)
try:
self._cmp_tree(test.build_origin, original_tree)
except AssertionError as err:
raise AssertionError("Error extracting {}".format(archive),
*err.args)
# Check directory copying
config = base_config.copy()
config['build']['source_location'] = 'src'
test = PavTest(self.pav_cfg, config=config)
if os.path.exists(test.build_origin):
shutil.rmtree(test.build_origin)
test._setup_build_dir(test.build_origin)
self._cmp_tree(test.build_origin, original_tree)
# Test single compressed files.
files = [
'binfile.gz',
'binfile.bz2',
'binfile.xz',
]
for file in files:
config = base_config.copy()
config['build']['source_location'] = file
test = PavTest(self.pav_cfg, config=config)
if os.path.exists(test.build_origin):
shutil.rmtree(test.build_origin)
test._setup_build_dir(test.build_origin)
self._cmp_files(os.path.join(test.build_origin, 'binfile'),
os.path.join(original_tree, 'binfile'))
# Make sure extra files are getting copied over.
config = base_config.copy()
config['build']['source_location'] = 'src.tar.gz'
config['build']['extra_files'] = [
'src.tar.gz',
'src.xz',
]
test = PavTest(self.pav_cfg, config=config)
if os.path.exists(test.build_origin):
shutil.rmtree(test.build_origin)
test._setup_build_dir(test.build_origin)
for file in config['build']['extra_files']:
self._cmp_files(os.path.join(test_archives, file),
os.path.join(test.build_origin, file))
def test_src_urls(self):
base_config = {
'name': 'test',
'build': {
'modules': ['gcc'],
}
}
config = base_config.copy()
config['build']['source_location'] = self.TEST_URL
# remove existing downloads, and replace the directory.
downloads_path = os.path.join(self.pav_cfg.working_dir, 'downloads')
shutil.rmtree(downloads_path)
os.mkdir(downloads_path)
test = PavTest(self.pav_cfg, config)
if os.path.exists(test.build_origin):
shutil.rmtree(test.build_origin)
test._setup_build_dir(test.build_origin)
self._cmp_files(os.path.join(self.TEST_DATA_ROOT, '../../README.md'),
os.path.join(test.build_origin, 'README.md'))
def test_resolve_template(self):
tmpl_path = os.path.join(self.TEST_DATA_ROOT,
'resolve_template_good.tmpl')
var_man = variables.VariableSetManager()
var_man.add_var_set('sched', {
'num_nodes': '3',
'partition': 'test'
})
var_man.add_var_set('sys', {
'hostname': 'test.host.com',
'complicated': {
'a': 'yes',
'b': 'no'
}
})
script_path = tempfile.mktemp()
PavTest.resolve_template(tmpl_path, script_path, var_man)
good_path = os.path.join(self.TEST_DATA_ROOT,
'resolve_template_good.sh')
with open(script_path) as gen_script,\
open(good_path) as ver_script:
self.assertEqual(gen_script.read(), ver_script.read())
os.unlink(script_path)
for bad_tmpl in (
'resolve_template_keyerror.tmpl',
'resolve_template_bad_key.tmpl'):
script_path = tempfile.mktemp()
tmpl_path = os.path.join(self.TEST_DATA_ROOT, bad_tmpl)
with self.assertRaises(
KeyError,
msg="Error not raised on bad file '{}'".format(bad_tmpl)):
PavTest.resolve_template(tmpl_path, script_path, var_man)
if os.path.exists(script_path):
os.unlink(script_path)
script_path = tempfile.mktemp()
tmpl_path = os.path.join(self.TEST_DATA_ROOT,
'resolve_template_extra_escape.tmpl')
with self.assertRaises(
PavTestError,
msg="Error not raised on bad file '{}'".format(bad_tmpl)):
PavTest.resolve_template(tmpl_path, script_path, var_man)
if os.path.exists(script_path):
os.unlink(script_path)
def test_build(self):
"""Make sure building works."""
config1 = {
'name': 'build_test',
'build': {
'cmds': ['echo "Hello World [\x1esched.num_nodes\x1e]"'],
'source_location': 'binfile.gz',
},
}
test = PavTest(self.pav_cfg, config1)
# Test a basic build, with a gzip file and an actual build script.
self.assertTrue(test.build(), msg="Build failed")
# Make sure the build path and build origin contain softlinks to the same files.
self._cmp_tree(test.build_origin, test.build_path)
self._is_softlink_dir(test.build_path)
# We're going to time out this build on purpose, to test the code that waits for
# builds to complete.
config = {
'name': 'build_test',
'build': {
'cmds': ['sleep 10'],
'source_location': 'binfile.gz',
},
}
test = PavTest(self.pav_cfg, config)
test.BUILD_SILENT_TIMEOUT = 1
# This build should fail.
self.assertFalse(test.build(), "Build succeeded when it should have timed out.")
self.assertTrue(test.status.current().note.startswith("Build timed out"))
# Test general build failure.
config = {
'name': 'build_test',
'build': {
'cmds': ['exit 1'],
'source_location': 'binfile.gz',
},
}
# These next two test a few things:
# 1. That building, and then re-using, a build directory works.
# 2. That the test fails properly under a couple different conditions
test = PavTest(self.pav_cfg, config)
# Remove the build tree to ensure we do the build fresh.
if os.path.isdir(test.build_origin):
shutil.rmtree(test.build_origin)
# This should fail because the build exits non-zero
self.assertFalse(test.build(), "Build succeeded when it should have failed.")
self.assertTrue(test.status.current().note.startswith("Build returned a non-zero result."))
# This should fail due to a missing variable
# The build should already exist.
test2 = PavTest(self.pav_cfg, config)
self.assertFalse(test2.build(), "Build succeeded when it should have failed.")
self.assertTrue(test.status.current().note.startswith("Build returned a non-zero result."))
self.assertEqual(test.build_origin, test2.build_origin)
def test_run(self):
config1 = {
'name': 'run_test',
'run': {
'env': {
'foo': 'bar',
},
#
'cmds': ['echo "I ran, punks"'],
},
}
test = PavTest(self.pav_cfg, config1)
self.assertTrue(test.run({}), msg="Test failed to run.")
config2 = config1.copy()
config2['run']['modules'] = ['asdlfkjae', 'adjwerloijeflkasd']
test = PavTest(self.pav_cfg, config2)
self.assertFalse(
test.run({}),
msg="Test should have failed because a module couldn't be "
"loaded. {}".format(test.path))
# TODO: Make sure this is the exact reason for the failure
# (doesn't work currently).
# Make sure the test fails properly on a timeout.
config3 = {
'name': 'sleep_test',
'run': {
'cmds': ['sleep 10']
}
}
test = PavTest(self.pav_cfg, config3)
test.RUN_SILENT_TIMEOUT = 1
self.assertFalse(test.run({}),
msg="Test should have failed due to timeout. {}"
.format(test.path))
def test_suites(self):
"""Test suite creation and regeneration."""
config1 = {
'name': 'run_test',
'run': {
'env': {
'foo': 'bar',
},
#
'cmds': ['echo "I ran, punks"'],
},
}
tests = []
for i in range(3):
tests.append(PavTest(self.pav_cfg, config1))
# Make sure this doesn't explode
suite = Suite(self.pav_cfg, tests)
# Make sure we got all the tests
self.assertEqual(len(suite.tests), 3)
test_paths = [os.path.join(suite.path, p)
for p in os.listdir(suite.path)]
# And that the test paths are unique
self.assertEqual(len(set(test_paths)),
len([os.path.realpath(p) for p in test_paths]))
# Make sure the paths resolve
for link_path in test_paths:
self.assertTrue(os.path.islink(link_path))
self.assertTrue(os.path.exists(os.path.realpath(link_path)))
suite2 = Suite.from_id(self.pav_cfg, suite.id)
self.assertEqual(suite.tests.keys(), suite2.tests.keys())
self.assertEqual([t.id for t in suite.tests.values()],
[t.id for t in suite2.tests.values()])
self.assertEqual(suite.path, suite2.path)
self.assertEqual(suite.id, suite2.id)
def _is_softlink_dir(self, path):
"""Verify that a directory contains nothing but softlinks whose files exist. Directories
in a softlink dir should be real directories though."""
for base_dir, cdirs, cfiles in os.walk(path):
for cdir in cdirs:
self.assert_(os.path.isdir(os.path.join(base_dir, cdir)),
"Directory in softlink dir is a softlink (it shouldn't be).")
for file in cfiles:
file_path = os.path.join(base_dir, file)
self.assert_(os.path.islink(file_path),
"File in softlink dir '{}' is not a softlink."
.format(file_path))
target_path = os.path.realpath(file_path)
self.assert_(os.path.exists(target_path),
"Softlink target '{}' for link '{}' does not exist."
.format(target_path, file_path))
def _cmp_files(self, a_path, b_path):
"""Compare two files."""
with open(a_path, 'rb') as a_file, open(b_path, 'rb') as b_file:
self.assertEqual(a_file.read(), b_file.read(),
"File contents mismatch for {} and {}."
.format(a_path, b_path))
def _cmp_tree(self, a, b):
"""Compare two directory trees, including the contents of all the
files."""
a_walk = list(os.walk(a))
b_walk = list(os.walk(b))
# Make sure these are in the same order.
a_walk.sort()
b_walk.sort()
while a_walk and b_walk:
a_dir, a_dirs, a_files = a_walk.pop(0)
b_dir, b_dirs, b_files = b_walk.pop(0)
self.assertEqual(
sorted(a_dirs), sorted(b_dirs),
"Extracted archive subdir mismatch for '{}' {} != {}"
.format(a, a_dirs, b_dirs))
# Make sure these are in the same order.
a_files.sort()
b_files.sort()
self.assertEqual(a_files, b_files,
"Extracted archive file list mismatch. "
"{} != {}".format(a_files, b_files))
for file in a_files:
# The file names have are been verified as the same.
a_path = os.path.join(a_dir, file)
b_path = os.path.join(b_dir, file)
# We know the file exists in a, does it in b?
self.assert_(os.path.exists(b_path),
"File missing from archive b '{}'".format(b_path))
self._cmp_files(a_path, b_path)
self.assert_(not a_walk and not b_walk,
| |
there is no match between the key types supported by this index and the ones
accepted by the collection
"""
if not accepted_key_types:
return
if self.supported_key_types.isdisjoint(accepted_key_types):
raise ImplementationError(
'%s can only return keys of type %s' % (
self.__class__.__name__,
', '.join(self.supported_key_types)
)
)
def get_filtered_keys(self, suffix, *args, **kwargs):
"""Returns the index keys to be used by the collection for the given args
Parameters
-----------
suffix: str
The suffix used in the filter that called this index
Useful if the index supports many suffixes doing different things
args: tuple
All the "values" to take into account to get the indexed entries.
In general, the real indexed value is the last entries and the previous
ones are "additional" information, for example the sub-field name in
case of a HashField
kwargs: dict
accepted_key_types: iterable
If set, the returned key must be of one of the given redis type.
May include: 'set', 'zset' or 'list'
MUST be passed as a named argument
Returns
-------
list of tuple
An index may return many keys. So it's a list with, each one being
a tuple with three entries:
- str
The redis key to use
- str
The redis type of the key
- bool
True if the key is a temporary key that must be deleted
after the computation of the collection
"""
raise NotImplementedError
def check_uniqueness(self, pk, *args):
"""For a unique index, check if the given args are not used twice
To implement this method in subclasses, get pks for the value (via `args`)
then call ``assert_pks_uniqueness`` (see in ``EqualIndex``)
Parameters
----------
pk: Any
The pk of the instance for which its ok to have the value.
args: tuple
All the values to take into account to check the indexed entries
Raises
------
UniquenessError
If the uniqueness is not respected.
"""
raise NotImplementedError
@property
def unique_index_name(self):
"""Get a string to describe the index in case of UniquenessError"""
return 'unique field %s.%s' % (self.model.__name__, self.field.name)
def assert_pks_uniqueness(self, pks, exclude, get_display_value):
"""Check uniqueness of pks
Parameters
-----------
pks: iterable
The pks to check for uniqueness. If more than one different,
it will raise. If only one and different than `exclude`, it will
raise too.
exclude: str
The pk that we accept to be the only one in `pks`. For example
the pk of the instance we want to check for uniqueness: we don't
want to raise if the value is the one already set for this instance
get_display_value: callable
Called to display the value in the error message.
Raises
------
UniquenessError
- If at least two different pks
- If only one pk that is not the `exclude` one
"""
pks = list(set(pks))
if len(pks) > 1:
# this may not happen !
raise UniquenessError(
"Multiple values indexed for %s: %s" % (self.unique_index_name, pks)
)
elif len(pks) == 1 and (not exclude or pks[0] != exclude):
raise UniquenessError(
'Value "%s" already indexed for %s (for instance %s)' % (
get_display_value(), self.unique_index_name, pks[0]
)
)
def add(self, pk, *args, **kwargs):
"""Add the instance tied to the field for the given "value" (via `args`) to the index
Parameters
----------
pk : Any
The primary key of the instance we want to add to the index
args: tuple
All the values to take into account to define the index entry
kwargs: dict
check_uniqueness: Optional[bool]
When ``True`` (the default), if the index is unique, the uniqueness will
be checked before indexing
If passed, it MUST be passed as a named argument
Raises
------
UniquenessError
If `check_uniqueness` is ``True``, the index unique, and the uniqueness not respected.
"""
raise NotImplementedError
def remove(self, pk, *args, **kwargs):
"""Remove the instance tied to the field for the given "value" (via `args`) from the index
Parameters
----------
pk : Any
The primary key of the instance we want to remove from the index
args: tuple
All the values to take into account to define the index entry
"""
raise NotImplementedError
def get_all_storage_keys(self):
"""Returns the keys to be removed by `clear` in aggressive mode
Returns
-------
set
The set of all keys that matches the keys used by this index.
"""
raise NotImplementedError
def clear(self, chunk_size=1000, aggressive=False):
"""Will deindex all the value for the current field
Parameters
----------
chunk_size: int
Default to 1000, it's the number of instances to load at once if not in aggressive mode.
aggressive: bool
Default to ``False``. When ``False``, the actual collection of instances will
be ran through to deindex all the values.
But when ``True``, the database keys will be scanned to find keys that matches the
pattern of the keys used by the index. This is a lot faster and may find forsgotten keys.
But may also find keys not related to the index.
Should be set to ``True`` if you are not sure about the already indexed values.
Examples
--------
>>> MyModel.get_field('myfield').get_index().clear()
"""
if aggressive:
keys = self.get_all_storage_keys()
with self.model.database.pipeline(transaction=False) as pipe:
for key in keys:
pipe.delete(key)
pipe.execute()
else:
start = 0
while True:
instances = self.model.collection().sort().instances(lazy=True)[start:start + chunk_size]
for instance in instances:
field = instance.get_instance_field(self.field.name)
value = field.proxy_get()
if value is not None:
field.deindex(value, only_index=self)
if len(instances) < chunk_size: # not enough data, it means we are done
break
start += chunk_size
def rebuild(self, chunk_size=1000, aggressive_clear=False):
"""Rebuild the whole index for this field.
Parameters
----------
chunk_size: int
Default to 1000, it's the number of instances to load at once.
aggressive_clear: bool
Will be passed to the `aggressive` argument of the `clear` method.
If `False`, all values will be normally deindexed. If `True`, the work
will be done at low level, scanning for keys that may match the ones used by the index
Examples
--------
>>> MyModel.get_field('myfield').get_index().rebuild()
"""
self.clear(chunk_size=chunk_size, aggressive=aggressive_clear)
start = 0
while True:
instances = self.model.collection().sort().instances(lazy=True)[start:start + chunk_size]
for instance in instances:
field = instance.get_instance_field(self.field.name)
value = field.proxy_get()
if value is not None:
field.index(value, only_index=self)
if len(instances) < chunk_size: # not enough data, it means we are done
break
start += chunk_size
@classmethod
def _field_model_ready(cls, model, field):
"""Called when a model is ready, for each field..
May be used by a subclass to handle indexes related but not directly tied to this field
Parameters
----------
field : RedisField
The field for which the indexes are ready
"""
pass
def _unique_key(self, prefix=None):
"""
Create a unique key.
"""
prefix_parts = [self.model._name, '__index__', self.__class__.__name__.lower()]
if prefix:
prefix_parts.append(prefix)
return unique_key(
self.connection,
prefix=make_key(*prefix_parts)
)
class EqualIndex(BaseIndex):
"""Default simple equal index."""
handled_suffixes = {None, 'eq', 'in'}
handle_uniqueness = True
supported_key_types = {'set'}
def union_filtered_in_keys(self, dest_key, *source_keys):
"""Do a union of the given `source_keys` at the redis level, into `dest_key`
Parameters
----------
dest_key : str
The key where to store the result of the union
source_keys : str
The keys to union
"""
self.connection.sunionstore(dest_key, source_keys)
def get_filtered_keys(self, suffix, *args, **kwargs):
"""Return the set used by the index for the given "value" (`args`)
For the parameters, see ``BaseIndex.get_filtered_keys``
"""
self._check_key_accepted_key_types(kwargs.get('accepted_key_types'))
# special "in" case: we get n keys and make an unionstore with them then return this key
if suffix == 'in':
args = list(args)
values = set(args.pop())
if not values:
return [] # no keys
in_keys = [
self.get_storage_key(transform_value=False, *(args+[value]))
for value in values
]
tmp_key = self._unique_key('tmp')
self.union_filtered_in_keys(tmp_key, *in_keys)
return [(tmp_key, 'set', True)]
# do not transform because we already have the value we want to look for
return [(self.get_storage_key(transform_value=False, *args), 'set', False)]
def get_storage_key(self, *args, **kwargs):
"""Return the redis key where to store the index for the given "value" (`args`)
For this index, we store all PKs having the same value for a field in the same
set. Key has this form:
model-name:field-name:sub-field-name:normalized-value
The ':sub-field-name part' is repeated for each entry in *args that is not the final value
Parameters
-----------
kwargs: dict
transform_value: bool
Default to ``True``. Tell the call to ``normalize_value`` to transform
the value or not
args: tuple
| |
"""Contains definitions for the preactivation form of Residual Networks.
Residual networks (ResNets) were originally proposed in:
[1] <NAME>, <NAME>, <NAME>, <NAME>
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] <NAME>, <NAME>, <NAME>, <NAME>
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer
rather than after.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import sys, os, time, pickle
from argparse import ArgumentParser
import numpy as np
import logging
_BATCH_NORM_DECAY = 0.9
_BATCH_NORM_EPSILON = 1e-4
# We use a weight decay of 0.0002, which performs better than the 0.0001 that
# was originally suggested.
_WEIGHT_DECAY = 2e-4
_MOMENTUM = 0.9
# for the larger networks (n>=9), we need to adjust pythons recursion limit
sys.setrecursionlimit(10000)
num_of_train_images = 1281167
class Logger(object):
def __init__(self, k, lr, run):
self.log = logging.getLogger('Log Message')
self.stat = logging.getLogger('Log Stat')
self.loss = logging.getLogger('Log Loss')
self.log.setLevel(logging.DEBUG)
self.stat.setLevel(logging.DEBUG)
self.loss.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
fh1 = logging.FileHandler('log_{}_{}_{}.txt'.format(k, lr, run))
fh1.setFormatter(formatter)
self.log.addHandler(fh1)
fh2 = logging.FileHandler("stat_{}_{}_{}.txt".format(k, lr, run))
fh2.setFormatter(formatter)
self.stat.addHandler(fh2)
fh3 = logging.FileHandler("statloss_{}_{}_{}.txt".format(k, lr, run))
fh3.setFormatter(formatter)
self.loss.addHandler(fh3)
def log_message(self, message):
self.log.info(message)
def log_stat(self, message):
self.stat.info(message)
def log_loss(self, message):
self.loss.info(message)
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo)
return dict
# Mean image can be extracted from any training data file
def load_validation_data(data_folder, mean_image, img_size=32):
test_file = os.path.join(data_folder, 'val_data')
d = unpickle(test_file)
x = d['data']
y = d['labels']
x = x / np.float32(255)
# Labels are indexed from 1, shift it so that indexes start at 0
y = np.array([i-1 for i in y])
# Remove mean (computed from training data) from images
x -= mean_image
img_size2 = img_size * img_size
x = np.dstack((x[:, :img_size2], x[:, img_size2:2*img_size2], x[:, 2*img_size2:]))
x = x.reshape((x.shape[0], img_size, img_size, 3)).transpose(0, 3, 1, 2)
return dict(
X_test=x.astype('float32'),
Y_test=y.astype('int32'))
def load_databatch(data_folder, idx, img_size=32):
data_file = os.path.join(data_folder, 'train_data_batch_')
d = unpickle(data_file + str(idx))
x = d['data']
y = d['labels']
mean_image = d['mean']
x = x/np.float32(255)
mean_image = mean_image/np.float32(255)
# Labels are indexed from 1, shift it so that indexes start at 0
y = [i-1 for i in y]
data_size = x.shape[0]
x -= mean_image
img_size2 = img_size * img_size
x = np.dstack((x[:, :img_size2], x[:, img_size2:2*img_size2], x[:, 2*img_size2:]))
x = x.reshape((x.shape[0], img_size, img_size, 3)).transpose(0, 3, 1, 2)
# create mirrored images
X_train = x[0:data_size, :, :, :]
Y_train = y[0:data_size]
X_train_flip = X_train[:, :, :, ::-1]
Y_train_flip = Y_train
X_train = np.concatenate((X_train, X_train_flip), axis=0)
Y_train = np.concatenate((Y_train, Y_train_flip), axis=0)
return dict(
X_train=X_train.astype('float32'),
Y_train=Y_train.astype('int32'),
mean=mean_image)
def batch_norm_relu(inputs, is_training, data_format):
"""Performs a batch normalization followed by a ReLU."""
# We set fused=True for a significant performance boost. See
# https://www.tensorflow.org/performance/performance_guide#common_fused_ops
inputs = tf.layers.batch_normalization(
inputs=inputs, axis=1 if data_format == 'channels_first' else 3,
momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,
scale=True, training=is_training, fused=True)
inputs = tf.nn.relu(inputs)
return inputs
def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):
return tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding='SAME', use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(scale=2.0, distribution='normal'),
data_format=data_format)
def building_block(inputs, filters, is_training, projection_shortcut, strides,
dropoutrate, data_format):
"""Standard building block for residual networks with BN before convolutions.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
is_training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts (typically
a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block.
"""
shortcut = inputs
inputs = batch_norm_relu(inputs, is_training, data_format)
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training, data_format)
inputs = tf.layers.dropout(inputs=inputs, rate=dropoutrate, training=is_training)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=1,
data_format=data_format)
return inputs + shortcut
def bottleneck_block(inputs, filters, is_training, projection_shortcut,
strides, data_format):
"""Bottleneck block variant for residual networks with BN before convolutions.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the first two convolutions. Note that the
third and final convolution will use 4 times as many filters.
is_training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts (typically
a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block.
"""
shortcut = inputs
inputs = batch_norm_relu(inputs, is_training, data_format)
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=1, strides=1,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training, data_format)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training, data_format)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=4 * filters, kernel_size=1, strides=1,
data_format=data_format)
return inputs + shortcut
def block_group(inputs, filters, block_fn, blocks, strides, dropoutrate, is_training, name,
data_format):
"""Creates one layer of blocks for the ResNet model.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the first convolution of the layer.
block_fn: The block to use within the model, either `building_block` or
`bottleneck_block`.
blocks: The number of blocks contained in the layer.
strides: The stride to use for the first convolution of the layer. If
greater than 1, this layer will ultimately downsample the input.
is_training: Either True or False, whether we are currently training the
model. Needed for batch norm.
name: A string name for the tensor output of the block layer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block layer.
"""
# Bottleneck blocks end with 4x the number of filters as they start with
filters_out = 4 * filters if block_fn is bottleneck_block else filters
def projection_shortcut(inputs):
return tf.layers.conv2d(
inputs=inputs, filters=filters_out, kernel_size=1, strides=strides,
padding='SAME', use_bias=False,
kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=True),
data_format=data_format)
# Only the first block per block_layer uses projection_shortcut and strides
inputs = block_fn(inputs, filters, is_training, projection_shortcut, strides,
dropoutrate, data_format)
for _ in range(1, blocks):
inputs = block_fn(inputs, filters, is_training, None, 1,
dropoutrate, data_format)
return tf.identity(inputs, name)
# ##################### Build the neural network model #######################
def ImgNet_downsampled_WRN_v2_generator(depth=16, k=2, num_classes=1000, data_format=None, dropoutrate=0, img_size=32):
"""Generator for downsampled ImageNet WRN v2 models.
Args:
depth: A single integer for the size of the WRN model.
k: The filter multiplicative factor which determines the width of the network.
num_classes: The number of possible classes for image classification.
data_format: The input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
Returns:
The model function that takes in `inputs` and `is_training` and
returns the output tensor of the WRN model.
Raises:
ValueError: If `depth` is invalid.
"""
if | |
<gh_stars>0
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: base40.py
# Purpose: Base40/Music21 Pitch/Interval Translator
#
# Authors: <NAME>
#
# Copyright: Copyright © 2009-2010 <NAME> and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
Implementation of Walter Hewlett's base40 system for musical intervals.
'''
import unittest
from music21 import exceptions21
from music21 import common
from music21 import interval
from music21 import note
from music21 import pitch
#Key => Base40 pitch number
#Value => Music21 Pitch name
base40Equivalent = {1: 'C--',
2: 'C-',
3: 'C',
4: 'C#',
5: 'C##',
6: None,
7: 'D--',
8: 'D-',
9: 'D',
10: 'D#',
11: 'D##',
12: None,
13: 'E--',
14: 'E-',
15: 'E',
16: 'E#',
17: 'E##',
18: 'F--',
19: 'F-',
20: 'F',
21: 'F#',
22: 'F##',
23: None,
24: 'G--',
25: 'G-',
26: 'G',
27: 'G#',
28: 'G##',
29: None,
30: 'A--',
31: 'A-',
32: 'A',
33: 'A#',
34: 'A##',
35: None,
36: 'B--',
37: 'B-',
38: 'B',
39: 'B#',
40: 'B##'}
#Key => Music21 Pitch name
#Value => Base40 pitch number
base40Representation = {'C--': 1,
'C-' : 2,
'C' : 3,
'C#' : 4,
'C##': 5,
# empty
'D--': 7,
'D-' : 8,
'D' : 9,
'D#' : 10,
'D##': 11,
# empty
'E--': 13,
'E-' : 14,
'E' : 15,
'E#' : 16,
'E##': 17,
'F--': 18,
'F-' : 19,
'F' : 20,
'F#' : 21,
'F##': 22,
# empty
'G--': 24,
'G-' : 25,
'G' : 26,
'G#' : 27,
'G##': 28,
# empty
'A--': 30,
'A-' : 31,
'A' : 32,
'A#' : 33,
'A##': 34,
# empty
'B--': 36,
'B-' : 37,
'B' : 38,
'B#' : 39,
'B##': 40,
}
#Key => Base40 delta (difference between two Base40 pitch numbers)
#Value => Corresponding music21 Interval
base40IntervalTable = {0: 'P1',
1: 'A1',
4: 'd2',
5: 'm2',
6: 'M2',
7: 'A2',
10: 'd3',
11: 'm3',
12: 'M3',
13: 'A3',
16: 'd4',
17: 'P4',
18: 'A4',
22: 'd5',
23: 'P5',
24: 'A5',
27: 'd6',
28: 'm6',
29: 'M6',
30: 'A6',
33: 'd7',
34: 'm7',
35: 'M7',
36: 'A7',
39: 'd8',
40: 'P8', # wraps around...
}
def base40DeltaToInterval(delta):
'''
Returns a music21 Interval between two Base40 pitch numbers
given the delta (difference) between them.
Raises a Base40 Exception if the interval is not handled by Base40.
Base40 can only handle major, minor, perfect, augmented,
and diminished intervals. Although not for certain, it seems
that the engineers that designed this system assumed that asdfasdf
other intervals (doubly augmented intervals, for instance)
would be of a very rare occurrence, and extreme intervals
which would trigger an incorrect answer (C-- to C##, for
instance, would return a diminished second, even though it's
a quadruple augmented unison) just would not occur.
>>> musedata.base40.base40DeltaToInterval(4)
<music21.interval.Interval d2>
>>> musedata.base40.base40DeltaToInterval(11)
<music21.interval.Interval m3>
>>> musedata.base40.base40DeltaToInterval(23)
<music21.interval.Interval P5>
>>> musedata.base40.base40DeltaToInterval(-23)
<music21.interval.Interval P-5>
>>> musedata.base40.base40DeltaToInterval(52)
<music21.interval.Interval M10>
>>> musedata.base40.base40DeltaToInterval(-52)
<music21.interval.Interval M-10>
>>> musedata.base40.base40DeltaToInterval(77)
Traceback (most recent call last):
music21.musedata.base40.Base40Exception: Interval not handled by Base40 37
'''
direction = 1
if delta < 0:
direction = -1
simpleDelta = abs(delta) % 40
try:
simpleIntervalName = base40IntervalTable[simpleDelta]
simpleInterval = interval.Interval(simpleIntervalName)
except KeyError:
raise Base40Exception('Interval not handled by Base40 ' + str(simpleDelta))
numOctaves = abs(delta) // 40
sgi = simpleInterval.generic #Simple generic interval
#Compound generic interval
cgi = interval.GenericInterval(direction * (sgi.value + 7 * numOctaves))
sdi = simpleInterval.diatonic #Simple diatonic interval
newInterval = interval.convertSpecifier(sdi.specifier)[1] + str(cgi.value)
return interval.Interval(newInterval)
def base40ToPitch(base40Num):
'''
Converts a Base40 pitch number into a music21 Pitch.
The Base40 number is octave specific.
Raises a Base40 Exception if the Base40 pitch number given doesn't
have an associated pitch name. There is one unassigned number
each time the interval between two letters is a whole step.
>>> musedata.base40.base40ToPitch(1)
<music21.pitch.Pitch C--1>
>>> musedata.base40.base40ToPitch(40)
<music21.pitch.Pitch B##1>
>>> musedata.base40.base40ToPitch(23)
Traceback (most recent call last):
music21.musedata.base40.Base40Exception: Pitch name not assigned to this Base40 number 23
>>> musedata.base40.base40ToPitch(186)
<music21.pitch.Pitch G5>
'''
p = pitch.Pitch()
p.octave = ((base40Num - 1) / 40) + 1
tableNum = base40Num - 40 * (p.octave - 1)
pitchName = base40Equivalent[tableNum]
if pitchName != None:
p.name = pitchName
return p
raise Base40Exception('Pitch name not assigned to this Base40 number ' + str(base40Num))
def pitchToBase40(pitchToConvert):
'''
Converts a pitch string or a music21 Pitch into a Base40
pitch number. The Base40 number is octave specific.
Raises a Base40 Exception if the pitch to convert is outside the set
of pitches that Base40 can handle; for example, half flats
and half sharps or triple flats and triple sharps.
>>> musedata.base40.pitchToBase40(pitch.Pitch('C--5'))
161
>>> musedata.base40.pitchToBase40('F##4')
142
>>> musedata.base40.pitchToBase40('F###4')
Traceback (most recent call last):
music21.musedata.base40.Base40Exception: Base40 cannot handle this pitch F###4
'''
if isinstance(pitchToConvert, str):
pitchToConvert = pitch.Pitch(pitchToConvert)
if pitchToConvert.name in base40Representation:
tableNum = base40Representation[pitchToConvert.name]
base40Num = (40 * (pitchToConvert.octave - 1)) + tableNum
return base40Num
#raise ValueError('Base40 cannot handle this pitch.')
raise Base40Exception('Base40 cannot handle this pitch ' + pitchToConvert.nameWithOctave)
def base40Interval(base40NumA, base40NumB):
'''
Returns a music21 Interval between two base40 pitch
numbers, using their delta (difference) as defined
in Base40. The interval provided is without direction.
Raises a Base40 Exception if the delta doesn't correspond
to an interval in Base40, or if either base40 pitch
number doesn't correspond to a pitch name.
>>> musedata.base40.base40Interval(163, 191)
<music21.interval.Interval m6>
>>> musedata.base40.base40Interval(186, 174) #Descending M3
<music21.interval.Interval M-3>
Base40 has limitations for intervals smaller than diminished or bigger than augmented.
>>> musedata.base40.base40Interval(1, 5)
Traceback (most recent call last):
music21.musedata.base40.Base40Exception: Base40 cannot compute interval between 1 and 5.
>>> musedata.base40.base40Interval(1, 3)
Traceback (most recent call last):
music21.musedata.base40.Base40Exception: Interval not handled by Base40 2
>>> musedata.base40.base40Interval(2, 6)
Traceback (most recent call last):
music21.musedata.base40.Base40Exception: Pitch name not assigned to this Base40 number 6
Interval does not exist
>>> musedata.base40.base40Interval(12, 6)
Traceback (most recent call last):
music21.musedata.base40.Base40Exception: Pitch name not assigned to these Base40 numbers
12 and 6 Interval does not exist
'''
pitchA = base40Equivalent[(base40NumA - 1) % 40 + 1]
pitchB = base40Equivalent[(base40NumB - 1) % 40 + 1]
delta = base40NumB - base40NumA
if pitchA is None and pitchB is None:
raise Base40Exception('Pitch name not assigned to these Base40 numbers ' +
str(base40NumA) + ' and ' + str(base40NumB) + ' Interval does not exist')
elif pitchA is None:
raise Base40Exception('Pitch name not assigned to this Base40 number ' +
str(base40NumA) + ' Interval does not exist')
elif pitchB is None:
raise Base40Exception('Pitch name not assigned to this Base40 number ' +
str(base40NumB) + ' Interval does not exist')
elif delta > 3 and pitchA[0] == pitchB[0]:
raise Base40Exception('Base40 cannot compute interval between ' +
str(base40NumA) + ' and ' + str(base40NumB) + '.')
return base40DeltaToInterval(delta)
def base40ActualInterval(base40NumA, base40NumB):
'''
Calculates a music21 Interval between two Base40 pitch
numbers, as calculated using the music21.interval module.
Raises a Base40 Exception if (a) Either of the Base40 pitch
numbers does not correspond to a pitch name or (b) If
an unusual interval is encountered that can't be handled
by music21.
>>> musedata.base40.base40ActualInterval(163, 191)
<music21.interval.Interval m6>
>>> musedata.base40.base40ActualInterval(186, 174) #Descending M3
<music21.interval.Interval M-3>
>>> musedata.base40.base40ActualInterval(1, 5)
<music21.interval.Interval AAAA1>
>>> musedata.base40.base40ActualInterval(1, 3)
<music21.interval.Interval AA1>
>>> musedata.base40.base40ActualInterval(2, 6)
Traceback (most recent call last):
music21.musedata.base40.Base40Exception: Pitch name not assigned to this Base40 number 6
OMIT_FROM_DOCS
>>> musedata.base40.base40ActualInterval(12, 6)
Traceback (most recent call last):
music21.musedata.base40.Base40Exception: Pitch name not assigned to this Base40 number 12
'''
pitchA = base40ToPitch(base40NumA)
pitchB = base40ToPitch(base40NumB)
noteA = note.Note()
noteA.pitch = pitchA
noteB = note.Note()
noteB.pitch = pitchB
try:
return interval.notesToInterval(noteA,noteB)
except IndexError:
raise Base40Exception('Unusual interval- Limitation of music21.interval')
def _quickEnharmonicString(nameStr, direction='up', allowDoubleAccidentals=True):
'''
Helper function for quickHigherEnharmonicString and quickLowerEnharmonicString
'''
if direction == 'up':
addNum = 4
elif direction == 'down':
addNum = -4
else:
raise Base40Exception("Not a valid direction, {}".format(direction))
enharmonics = []
if common.isNum(nameStr):
base40num = nameStr
nameStr = base40Equivalent.get(base40num, None)
if nameStr is None:
base40num = None
else:
base40num = base40Representation.get(nameStr, None)
while base40num is not None:
base40num = (base40num + addNum) % 40
if base40num == 0:
base40num = 40
base40str = base40Equivalent.get(base40num, None)
if allowDoubleAccidentals is False and base40str is not None and len(base40str) > 2:
| |
'description'
question = self._create_mc_question(description)
# link a skill to the question
skill_graph = SkillGraph.load()
skill = skill_graph.add(Skill.build(SKILL_NAME, SKILL_DESC))
question.dict[SKILLS_KEY] = [skill.id]
models.QuestionDAO.save(question)
skill_map = SkillMap.load(self.course)
questions = skill_map.get_questions_for_skill(skill)
self.assertEqual(1, len(questions))
self.assertEqual(description, questions[0].description)
# delete the skill
actions.login(ADMIN_EMAIL)
xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(self.XSRF_TOKEN)
delete_url = '%s?%s' % (
self.URL,
urllib.urlencode({
'key': skill.id,
'xsrf_token': cgi.escape(xsrf_token)
}))
response = self.delete(delete_url)
self.assertEquals(200, response.status_int)
# assert question is not link to the deleted skill
question = models.QuestionDAO.load(question.id)
assert (
SKILLS_KEY not in question.dict or
not question.dict[SKILLS_KEY])
def test_get_skill_with_questions(self):
"""Get a skill mapped to two questions."""
# map a skill to two questions
skill_graph = SkillGraph.load()
skill = skill_graph.add(Skill.build(SKILL_NAME, SKILL_DESC))
q1 = self._create_mc_question('description 1')
q2 = self._create_mc_question('description 2')
q1.dict[SKILLS_KEY] = [skill.id]
q2.dict[SKILLS_KEY] = [skill.id]
models.QuestionDAO.save_all([q1, q2])
# get skills
actions.login(ADMIN_EMAIL)
response = transforms.loads(self.get(self.URL).body)
self.assertEqual(200, response['status'])
skills = transforms.loads(response['payload'])['skills']
self.assertEqual(1, len(skills))
# assert that it's linked to two questions
self.assertEqual(2, len(skills[0]['questions']))
def test_delete_prerequisites(self):
skill_graph = SkillGraph.load()
src_skill = skill_graph.add(Skill.build(SKILL_NAME, SKILL_DESC))
tgt_skill = skill_graph.add(Skill.build(SKILL_NAME_2, SKILL_DESC_2))
skill_graph.add_prerequisite(tgt_skill.id, src_skill.id)
skill_graph = SkillGraph.load()
self.assertEqual(1, len(skill_graph.prerequisites(tgt_skill.id)))
# delete prerequisite
actions.login(ADMIN_EMAIL)
xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(self.XSRF_TOKEN)
response = self._put(
version='1',
name=tgt_skill.name,
description=tgt_skill.description,
prerequisite_ids=[],
xsrf_token=xsrf_token,
key=tgt_skill.id)
self.assertEqual(200, response['status'])
self.assertEqual('Saved.', response['message'])
skill_graph = SkillGraph.load()
prerequisites = skill_graph.prerequisites(tgt_skill.id)
self.assertEqual(0, len(prerequisites))
class SkillMapHandlerTests(actions.TestBase):
ADMIN_EMAIL = '<EMAIL>'
COURSE_NAME = 'skill_map_course'
SKILL_MAP_URL = 'modules/skill_map?action=edit_skills_table'
GRAPH_URL = 'modules/skill_map?action=edit_dependency_graph'
def setUp(self):
super(SkillMapHandlerTests, self).setUp()
self.base = '/' + self.COURSE_NAME
context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, 'Skill Map Course')
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME)
self.course = courses.Course(None, context)
self.unit = self.course.add_unit()
self.unit.title = 'Unit 1'
self.lesson = self.course.add_lesson(self.unit)
self.lesson.title = 'Lesson 1'
self.course.save()
actions.login(self.ADMIN_EMAIL, is_admin=True)
def tearDown(self):
del sites.Registry.test_overrides[sites.GCB_COURSES_CONFIG.name]
namespace_manager.set_namespace(self.old_namespace)
super(SkillMapHandlerTests, self).tearDown()
def test_rejected_if_not_authorized(self):
actions.login('<EMAIL>')
response = self.get(self.SKILL_MAP_URL)
self.assertEqual(302, response.status_int)
def test_dependency_graph_tab(self):
response = self.get(self.GRAPH_URL)
self.assertEqual(200, response.status_int)
dom = self.parse_html_string_to_soup(response.body)
graph_div = dom.select('.graph')[0]
assert len(dom.select('.gcb-button-toolbar'))
# verify that skills is the active tab for the skills graph
skills_tab = dom.select('a#menu-item__edit__skills_table')[0]
assert 'gcb-active' in skills_tab.get('class')
def test_dependency_graph(self):
skill_graph = SkillGraph.load()
src_skill = skill_graph.add(Skill.build(SKILL_NAME, SKILL_DESC))
tgt_skill = skill_graph.add(Skill.build(
SKILL_NAME_2, SKILL_DESC_2,
prerequisite_ids=[{'id': src_skill.id}]))
response = self.get(self.GRAPH_URL)
self.assertEqual(200, response.status_int)
dom = self.parse_html_string_to_soup(response.body)
graph_div = dom.select('.graph')[0]
assert len(dom.select('.gcb-button-toolbar'))
nodes = json.loads(graph_div.get('data-nodes'))
self.assertEqual(2, len(nodes))
links = json.loads(graph_div.get('data-links'))
self.assertEqual(1, len(links))
link = links[0]
# The link points from the node to its prerequisite
# because d3 dependo library follows the discrete math convention
# for arrow direction
if nodes[0]['id'] == tgt_skill.name:
self.assertEqual(1, link['source'])
self.assertEqual(0, link['target'])
elif nodes[0]['id'] == src_skill.name:
self.assertEqual(0, link['source'])
self.assertEqual(1, link['target'])
else:
raise Exception('Unexpected skill name.')
class StudentSkillViewWidgetTests(BaseSkillMapTests):
def setUp(self):
super(StudentSkillViewWidgetTests, self).setUp()
actions.login(ADMIN_EMAIL)
self.unit = self.course.add_unit()
self.unit.title = 'Test Unit'
self.unit.availability = courses.AVAILABILITY_AVAILABLE
self.lesson = self.course.add_lesson(self.unit)
self.lesson.title = 'Test Lesson'
self.lesson.availability = courses.AVAILABILITY_AVAILABLE
self.course.save()
def _getSkillPanelWidget(self):
url = 'unit?unit=%(unit)s&lesson=%(lesson)s' % {
'unit': self.unit.unit_id, 'lesson': self.lesson.lesson_id}
response = self.get(url)
dom = self.parse_html_string(response.body)
self.assertEqual(
'Test Lesson',
dom.find('.//h1[@class="gcb-lesson-title"]/span').text.strip())
return dom.find('.//div[@class="skill-panel"]')
def test_skills_widget_supressed_by_course_settings(self):
skill_graph = SkillGraph.load()
sa = skill_graph.add(Skill.build('a', 'describe a'))
sb = skill_graph.add(Skill.build('b', 'describe b'))
self.lesson.properties[SKILLS_KEY] = [sa.id, sb.id]
self.course.save()
# Skill widget is not shown if supressed by course setting
env = {'course': {'display_skill_widget': False}}
with actions.OverriddenEnvironment(env):
self.assertIsNone(self._getSkillPanelWidget())
# But the skill widget *is* shown if the course setting is True or is
# unset
self.assertIsNotNone(self._getSkillPanelWidget())
env = {'course': {'display_skill_widget': True}}
with actions.OverriddenEnvironment(env):
self.assertIsNotNone(self._getSkillPanelWidget())
def test_no_skills_in_lesson(self):
self.assertIsNone(self._getSkillPanelWidget())
def test_skills_with_no_prerequisites_or_successors(self):
# Expect skills shown and friendly messages for prerequ and successors
skill_graph = SkillGraph.load()
sa = skill_graph.add(Skill.build('a', 'describe a'))
sb = skill_graph.add(Skill.build('b', 'describe b'))
self.lesson.properties[SKILLS_KEY] = [sa.id, sb.id]
self.course.save()
widget = self._getSkillPanelWidget()
skills_div, details_div, control_div = widget.findall('./*')
actions.assert_contains(
'Taught in this lesson',
skills_div.find('./span[@class="section-title"]').text)
li_list = skills_div.findall('.//li[@class="skill unknown"]')
self.assertEqual(2, len(li_list))
actions.assert_contains('a', li_list[0].text)
actions.assert_contains(
'describe a', li_list[0].attrib['data-skill-description'])
actions.assert_contains('b', li_list[1].text)
actions.assert_contains(
'describe b', li_list[1].attrib['data-skill-description'])
details_xml = cElementTree.tostring(details_div)
actions.assert_contains('doesn\'t have any prerequisites', details_xml)
actions.assert_contains('isn\'t a prerequisite', details_xml)
def test_skills_with_prerequisites_and_successors(self):
# Create skills, a, b, c, d
# a --> b
# c --> d
# Add skills {b, c} to self.lesson
# Expect self.lesson.depends_on == {a}
# Expect self.lesson.leads_to == {d}
skill_graph = SkillGraph.load()
sa = skill_graph.add(Skill.build('a', 'describe a'))
sb = skill_graph.add(Skill.build('b', 'describe b'))
sc = skill_graph.add(Skill.build('c', 'describe c'))
sd = skill_graph.add(Skill.build('d', 'describe d'))
skill_graph.add_prerequisite(sb.id, sa.id)
skill_graph.add_prerequisite(sd.id, sc.id)
self.lesson.properties[SKILLS_KEY] = [sb.id, sc.id]
self.course.save()
widget = self._getSkillPanelWidget()
# Check that 'b' and 'c' are listed as skills in this lesson
skills_in_lesson = widget.findall(
'./div[1]//li[@class="skill unknown"]')
self.assertEqual(2, len(skills_in_lesson))
actions.assert_contains('b', skills_in_lesson[0].text)
actions.assert_contains('c', skills_in_lesson[1].text)
# Skill 'a' is in depends_on
depends_on = widget.findall('./div[2]/div[1]/ol/li')
self.assertEqual(1, len(depends_on))
self.assertEqual(str(sa.id), depends_on[0].attrib['data-skill-id'])
# Skill 'd' is in leads_to'
leads_to = widget.findall('./div[2]/div[2]/ol/li')
self.assertEqual(1, len(leads_to))
self.assertEqual(str(sd.id), leads_to[0].attrib['data-skill-id'])
# Add skill 'a' to the lesson and check that is not in depends_on
self.lesson.properties[SKILLS_KEY].append(sa.id)
self.course.save()
widget = self._getSkillPanelWidget()
depends_on = widget.findall('./div[2]/div[1]/ol/li')
self.assertEqual(0, len(depends_on))
# In fact even if 'a' is also taught elsewhere, because it's taught
# in this lesson, don't list it.
other_lesson = self.course.add_lesson(self.unit)
other_lesson.title = 'Other Lesson'
other_lesson.availability = courses.AVAILABILITY_AVAILABLE
other_lesson.properties[SKILLS_KEY] = [sa.id]
self.course.save()
widget = self._getSkillPanelWidget()
depends_on = widget.findall('./div[2]/div[1]/ol/li')
self.assertEqual(0, len(depends_on))
def test_skill_with_multiple_follow_ons(self):
# Set up one skill which is a prerequisite of two skills and expect it
# to be shown only once in depends_on"
skill_graph = SkillGraph.load()
sa = skill_graph.add(Skill.build('a', 'common prerequisite'))
sb = skill_graph.add(Skill.build('b', 'depends on a'))
sc = skill_graph.add(Skill.build('c', 'also depends on a'))
skill_graph.add_prerequisite(sb.id, sa.id)
skill_graph.add_prerequisite(sc.id, sa.id)
self.lesson.properties[SKILLS_KEY] = [sb.id, sc.id]
self.course.save()
widget = self._getSkillPanelWidget()
# Check B and C are listed as skills in this lesson
skills_in_lesson = widget.findall(
'./div[1]//li[@class="skill unknown"]')
self.assertEqual(2, len(skills_in_lesson))
actions.assert_contains('b', skills_in_lesson[0].text)
actions.assert_contains('c', skills_in_lesson[1].text)
# Skill A is listed exactly once in the "depends on" section
depends_on = widget.findall('./div[2]/div[1]/ol/li')
self.assertEqual(1, len(depends_on))
self.assertEqual(str(sa.id), depends_on[0].attrib['data-skill-id'])
def test_skills_cards_have_title_description_and_lesson_links(self):
# The lesson contains Skill A which has Skill B as a follow-on. Skill B
# is found in Lesson 2. Check that the skill card shown for Skill B in
# Lesson 1 has correct information
skill_graph = SkillGraph.load()
sa = skill_graph.add(Skill.build('a', 'describe a'))
sb = skill_graph.add(Skill.build('b', 'describe b'))
skill_graph.add_prerequisite(sb.id, sa.id)
self.lesson.properties[SKILLS_KEY] = [sa.id]
lesson2 = self.course.add_lesson(self.unit)
lesson2.title = 'Test Lesson 2'
lesson2.availability = courses.AVAILABILITY_AVAILABLE
lesson2.properties[SKILLS_KEY] = [sb.id]
self.course.save()
widget = self._getSkillPanelWidget()
leads_to = widget.findall('./div[2]/div[2]/ol/li')
self.assertEqual(1, len(leads_to))
card = leads_to[0]
name = card.find('.//div[@class="name unknown"]').text
description = card.find(
'.//div[@class="description"]/div[@class="content"]').text
locations = card.findall('.//ol[@class="locations"]/li/a')
self.assertEqual('b', name.strip())
self.assertEqual('describe b', description.strip())
self.assertEqual(1, len(locations))
self.assertEqual(
'1.2 Test Lesson 2', ' '.join(locations[0].text.strip().split()))
self.assertEqual(
'unit?unit=%(unit)s&lesson=%(lesson)s' % {
'unit': self.unit.unit_id, 'lesson': lesson2.lesson_id},
locations[0].attrib['href'])
# Next, make the lesson unavailable
lesson2.availability = courses.AVAILABILITY_UNAVAILABLE
self.course.save()
# Except the subsequent skill does not show its lesson
widget = self._getSkillPanelWidget()
leads_to = widget.findall('./div[2]/div[2]/ol/li')
card = leads_to[0]
locations = card.findall('.//ol[@class="locations"]/li')
self.assertEqual(1, len(locations))
self.assertEqual('Not taught', locations[0].text)
class SkillMapAnalyticsTabTests(BaseSkillMapTests):
"""Tests the handlers for the tab Analytics > Skill Map"""
TAB_URL = ('/{}/dashboard?action=analytics_skill_map'.format(
COURSE_NAME))
NON_ADMIN_EMAIL = '<EMAIL>'
def test_get_tab(self):
"""Performs a get call to the tab."""
actions.login(ADMIN_EMAIL, is_admin=True)
response = self.get(self.TAB_URL)
self.assertEqual(response.status_code, 200)
def test_get_tab_no_admin(self):
"""Non admin users should not have access."""
actions.login(self.NON_ADMIN_EMAIL, is_admin=False)
response = self.get(self.TAB_URL, expect_errors=True)
self.assertEquals(302, response.status_int)
class GenerateCompetencyHistogramsTests(BaseSkillMapTests):
"""Tests the result of the map reduce job CountSkillCompetencies."""
def setUp(self):
super(GenerateCompetencyHistogramsTests, self).setUp()
# create three success rate measures
# student-11:skill-1:competency:1
measure1 = competency.SuccessRateCompetencyMeasure.load(11, 1)
measure1.add_score(1.0)
measure1.save()
# student-11:skill-2:competency:0.5
measure2 = competency.SuccessRateCompetencyMeasure.load(11, 2)
measure2.add_score(0.0)
measure2.add_score(1.0)
measure2.save()
# student-22:skill-1:competency:0
measure3 = competency.SuccessRateCompetencyMeasure.load(22, 1)
measure3.add_score(0.0)
measure3.save()
actions.login(ADMIN_EMAIL, is_admin=True)
def run_map_reduce_job(self):
job = competency.GenerateSkillCompetencyHistograms(self.app_context)
job.submit()
self.execute_all_deferred_tasks()
def test_map_reduce_job_output(self):
self.run_map_reduce_job()
job = competency.GenerateSkillCompetencyHistograms(
self.app_context).load()
job_result = sorted(
jobs.MapReduceJob.get_results(job), key=lambda x: x[0])
expected = [
[1, {'low-competency': 1, 'med-competency': 0, 'high-competency': 1,
'avg': 0.5}],
[2, {'low-competency': 0, 'med-competency': 1, 'high-competency': 0,
'avg': 0.5}]]
self.assertEqual(expected, sorted(job_result))
class CountSkillCompletionsTests(BaseSkillMapTests):
"""Tests the result of the map reduce job CountSkillCompletions."""
def setUp(self):
super(CountSkillCompletionsTests, self).setUp()
actions.login(ADMIN_EMAIL, is_admin=True)
self._create_skills()
self._create_students()
def _create_students(self):
"""Creates 4 StudentPropertyEntities with partial progress."""
def mktime(str_date):
return time.mktime(time.strptime(
str_date, CountSkillCompletion.DATE_FORMAT))
self.day1 = '2015-01-01'
self.day2 = '2015-01-02'
self.day3 = '2015-01-03'
self.day4 = '2015-01-04'
c = SkillCompletionTracker.COMPLETED
p = SkillCompletionTracker.IN_PROGRESS
# progress string for students
students_progress = [
{self.skill1.id : {c: mktime(self.day2), p: mktime(self.day1)},
self.skill2.id : {c: mktime(self.day4), p: mktime(self.day1)}},
{self.skill1.id : {c: mktime(self.day2), p: mktime(self.day2)},
self.skill2.id : {p: mktime(self.day1)}},
{self.skill1.id : {c: mktime(self.day1)}},
{} # No progress
]
for index, progress in enumerate(students_progress):
student = models.Student(user_id=str(index))
student.put()
comp = models.StudentPropertyEntity.create(
student=student,
property_name=SkillCompletionTracker.PROPERTY_KEY)
comp.value = transforms.dumps(progress)
comp.put()
def _create_skills(self):
"""Creates 3 skills."""
skill_graph = SkillGraph.load()
self.skill1 = skill_graph.add(Skill.build('a', ''))
self.skill2 = skill_graph.add(Skill.build('b', ''))
| |
- MS', 'pt': 'Dourados - MS'},
'55673411':{'en': 'Dourados - MS', 'pt': 'Dourados - MS'},
'55673412':{'en': 'Douradina - MS', 'pt': 'Douradina - MS'},
'55673413':{'en': 'Panambi - MS', 'pt': 'Panambi - MS'},
'55673414':{'en': 'Vila Vargas - MS', 'pt': 'Vila Vargas - MS'},
'55673416':{'en': 'Dourados - MS', 'pt': 'Dourados - MS'},
'55673418':{'en': 'Itaum - MS', 'pt': 'Itaum - MS'},
'55673419':{'en': u('Ang\u00e9lica - MS'), 'pt': u('Ang\u00e9lica - MS')},
'5567342':{'en': 'Dourados - MS', 'pt': 'Dourados - MS'},
'55673429':{'en': u('Vila Maca\u00faba - MS'), 'pt': u('Vila Maca\u00faba - MS')},
'55673431':{'en': u('Ponta Por\u00e3 - MS'), 'pt': u('Ponta Por\u00e3 - MS')},
'55673432':{'en': u('Ponta Por\u00e3 - MS'), 'pt': u('Ponta Por\u00e3 - MS')},
'55673433':{'en': u('Ponta Por\u00e3 - MS'), 'pt': u('Ponta Por\u00e3 - MS')},
'55673434':{'en': u('Sanga Puit\u00e3 - MS'), 'pt': u('Sanga Puit\u00e3 - MS')},
'55673435':{'en': u('Ant\u00f4nio Jo\u00e3o - MS'), 'pt': u('Ant\u00f4nio Jo\u00e3o - MS')},
'55673437':{'en': u('Ponta Por\u00e3 - MS'), 'pt': u('Ponta Por\u00e3 - MS')},
'55673438':{'en': u('Laguna Carap\u00e3 - MS'), 'pt': u('Laguna Carap\u00e3 - MS')},
'55673439':{'en': 'Bela Vista - MS', 'pt': 'Bela Vista - MS'},
'55673440':{'en': 'Amandina - MS', 'pt': 'Amandina - MS'},
'55673441':{'en': 'Nova Andradina - MS', 'pt': 'Nova Andradina - MS'},
'55673442':{'en': 'Ivinhema - MS', 'pt': 'Ivinhema - MS'},
'55673443':{'en': u('Bataypor\u00e3 - MS'), 'pt': u('Bataypor\u00e3 - MS')},
'55673444':{'en': 'Taquarussu - MS', 'pt': 'Taquarussu - MS'},
'55673445':{'en': u('Anauril\u00e2ndia - MS'), 'pt': u('Anauril\u00e2ndia - MS')},
'55673446':{'en': u('Ang\u00e9lica - MS'), 'pt': u('Ang\u00e9lica - MS')},
'55673447':{'en': 'Novo Horizonte do Sul - MS', 'pt': 'Novo Horizonte do Sul - MS'},
'55673448':{'en': u('Deod\u00e1polis - MS'), 'pt': u('Deod\u00e1polis - MS')},
'55673449':{'en': 'Nova Andradina - MS', 'pt': 'Nova Andradina - MS'},
'55673451':{'en': u('Itapor\u00e3 - MS'), 'pt': u('Itapor\u00e3 - MS')},
'55673452':{'en': 'Rio Brilhante - MS', 'pt': 'Rio Brilhante - MS'},
'55673453':{'en': u('Caarap\u00f3 - MS'), 'pt': u('Caarap\u00f3 - MS')},
'55673454':{'en': 'Maracaju - MS', 'pt': 'Maracaju - MS'},
'55673455':{'en': 'Rio Brilhante - MS', 'pt': 'Rio Brilhante - MS'},
'55673456':{'en': 'Nova Alvorada do Sul - MS', 'pt': 'Nova Alvorada do Sul - MS'},
'55673457':{'en': u('Itapor\u00e3 - MS'), 'pt': u('Itapor\u00e3 - MS')},
'55673461':{'en': u('Navira\u00ed - MS'), 'pt': u('Navira\u00ed - MS')},
'55673463':{'en': 'Juti - MS', 'pt': 'Juti - MS'},
'55673465':{'en': u('Jate\u00ed - MS'), 'pt': u('Jate\u00ed - MS')},
'55673466':{'en': u('Gl\u00f3ria de Dourados - MS'), 'pt': u('Gl\u00f3ria de Dourados - MS')},
'55673467':{'en': u('F\u00e1tima do Sul - MS'), 'pt': u('F\u00e1tima do Sul - MS')},
'55673468':{'en': 'Vicentina - MS', 'pt': 'Vicentina - MS'},
'55673469':{'en': 'Culturama - MS', 'pt': 'Culturama - MS'},
'55673471':{'en': 'Iguatemi - MS', 'pt': 'Iguatemi - MS'},
'55673473':{'en': 'Eldorado - MS', 'pt': 'Eldorado - MS'},
'55673474':{'en': 'Mundo Novo - MS', 'pt': 'Mundo Novo - MS'},
'55673475':{'en': u('Japor\u00e3 - MS'), 'pt': u('Japor\u00e3 - MS')},
'55673476':{'en': u('Itaquira\u00ed - MS'), 'pt': u('Itaquira\u00ed - MS')},
'55673478':{'en': 'Tacuru - MS', 'pt': 'Tacuru - MS'},
'55673479':{'en': 'Sete Quedas - MS', 'pt': 'Sete Quedas - MS'},
'55673480':{'en': 'Paranhos - MS', 'pt': 'Paranhos - MS'},
'55673481':{'en': u('Amamba\u00ed - MS'), 'pt': u('Amamba\u00ed - MS')},
'55673483':{'en': 'Coronel Sapucaia - MS', 'pt': 'Coronel Sapucaia - MS'},
'55673484':{'en': u('Caarap\u00f3 - MS'), 'pt': u('Caarap\u00f3 - MS')},
'55673487':{'en': 'Vila Marques - MS', 'pt': 'Vila Marques - MS'},
'55673488':{'en': 'Aral Moreira - MS', 'pt': 'Aral Moreira - MS'},
'55673489':{'en': u('Ind\u00e1polis - MS'), 'pt': u('Ind\u00e1polis - MS')},
'55673495':{'en': 'Caracol - MS', 'pt': 'Caracol - MS'},
'55673496':{'en': u('Ponta Por\u00e3 - MS'), 'pt': u('Ponta Por\u00e3 - MS')},
'55673498':{'en': u('Caarap\u00f3 - MS'), 'pt': u('Caarap\u00f3 - MS')},
'55673499':{'en': 'Vila Nova Casa Verde - MS', 'pt': 'Vila Nova Casa Verde - MS'},
'55673503':{'en': u('Parana\u00edba - MS'), 'pt': u('Parana\u00edba - MS')},
'55673509':{'en': u('Tr\u00eas Lagoas - MS'), 'pt': u('Tr\u00eas Lagoas - MS')},
'55673521':{'en': u('Tr\u00eas Lagoas - MS'), 'pt': u('Tr\u00eas Lagoas - MS')},
'55673522':{'en': u('Tr\u00eas Lagoas - MS'), 'pt': u('Tr\u00eas Lagoas - MS')},
'55673524':{'en': u('Tr\u00eas Lagoas - MS'), 'pt': u('Tr\u00eas Lagoas - MS')},
'55673541':{'en': 'Bataguassu - MS', 'pt': 'Bataguassu - MS'},
'55673546':{'en': u('Brasil\u00e2ndia - MS'), 'pt': u('Brasil\u00e2ndia - MS')},
'55673547':{'en': 'Debrasa - MS', 'pt': 'Debrasa - MS'},
'55673557':{'en': u('Tr\u00eas Lagoas - MS'), 'pt': u('Tr\u00eas Lagoas - MS')},
'55673559':{'en': u('Parana\u00edba - MS'), 'pt': u('Parana\u00edba - MS')},
'55673562':{'en': u('Chapad\u00e3o do Sul - MS'), 'pt': u('Chapad\u00e3o do Sul - MS')},
'55673565':{'en': 'Aparecida do Taboado - MS', 'pt': 'Aparecida do Taboado - MS'},
'55673574':{'en': u('Inoc\u00eancia - MS'), 'pt': u('Inoc\u00eancia - MS')},
'55673579':{'en': u('Selv\u00edria - MS'), 'pt': u('Selv\u00edria - MS')},
'55673591':{'en': 'Santa Rita do Pardo - MS', 'pt': 'Santa Rita do Pardo - MS'},
'55673596':{'en': u('Cassil\u00e2ndia - MS'), 'pt': u('Cassil\u00e2ndia - MS')},
'55673665':{'en': u('\u00c1gua Clara - MS'), 'pt': u('\u00c1gua Clara - MS')},
'55673666':{'en': u('Chapad\u00e3o do Sul - MS'), 'pt': u('Chapad\u00e3o do Sul - MS')},
'55673668':{'en': u('Parana\u00edba - MS'), 'pt': u('Parana\u00edba - MS')},
'55673669':{'en': u('Parana\u00edba - MS'), 'pt': u('Parana\u00edba - MS')},
'55673671':{'en': 'Dourados - MS', 'pt': 'Dourados - MS'},
'55673672':{'en': 'Rio Brilhante - MS', 'pt': 'Rio Brilhante - MS'},
'55673673':{'en': u('Jate\u00ed - MS'), 'pt': u('Jate\u00ed - MS')},
'55673674':{'en': 'Bela Vista - MS', 'pt': 'Bela Vista - MS'},
'55673675':{'en': 'Tacuru - MS', 'pt': 'Tacuru - MS'},
'55673676':{'en': 'Nova Andradina - MS', 'pt': 'Nova Andradina - MS'},
'55673681':{'en': 'Terenos - MS', 'pt': 'Terenos - MS'},
'55673682':{'en': u('Camapu\u00e3 - MS'), 'pt': u('Camapu\u00e3 - MS')},
'55673683':{'en': 'Rio Verde de Mato Grosso - MS', 'pt': 'Rio Verde de Mato Grosso - MS'},
'55673686':{'en': 'Bonito - MS', 'pt': 'Bonito - MS'},
'55673687':{'en': 'Miranda - MS', 'pt': 'Miranda - MS'},
'55673901':{'en': 'Campo Grande - MS', 'pt': 'Campo Grande - MS'},
'55673902':{'en': 'Dourados - MS', 'pt': 'Dourados - MS'},
'55673907':{'en': u('Corumb\u00e1 - MS'), 'pt': u('Corumb\u00e1 - MS')},
'55673919':{'en': u('Tr\u00eas Lagoas - MS'), 'pt': u('Tr\u00eas Lagoas - MS')},
'55673926':{'en': u('Ponta Por\u00e3 - MS'), 'pt': u('Ponta Por\u00e3 - MS')},
'55673929':{'en': u('Tr\u00eas Lagoas - MS'), 'pt': u('Tr\u00eas Lagoas - MS')},
'55674001':{'en': 'Campo Grande - MS', 'pt': 'Campo Grande - MS'},
'55674002':{'en': 'Campo Grande - MS', 'pt': 'Campo Grande - MS'},
'55674003':{'en': 'Campo Grande - MS', 'pt': 'Campo Grande - MS'},
'55674004':{'en': 'Campo Grande - MS', 'pt': 'Campo Grande - MS'},
'55674007':{'en': 'Campo Grande - MS', 'pt': 'Campo Grande - MS'},
'55674062':{'en': 'Campo Grande - MS', 'pt': 'Campo Grande - MS'},
'5568':{'en': 'Acre', 'pt': 'Acre'},
'55682101':{'en': 'Rio Branco - AC', 'pt': 'Rio Branco - AC'},
'55682102':{'en': 'Rio Branco - AC', 'pt': 'Rio Branco - AC'},
'55682106':{'en': 'Rio Branco - AC', 'pt': 'Rio Branco - AC'},
'55683025':{'en': 'Rio Branco - AC', 'pt': 'Rio Branco - AC'},
'55683026':{'en': 'Rio Branco - AC', 'pt': 'Rio Branco - AC'},
'55683028':{'en': 'Rio Branco - AC', 'pt': 'Rio Branco - AC'},
'55683211':{'en': 'Rio Branco - AC', 'pt': 'Rio Branco - AC'},
'55683212':{'en': 'Rio Branco - AC', 'pt': 'Rio Branco - AC'},
'55683213':{'en': 'Rio Branco - AC', 'pt': 'Rio Branco - AC'},
'55683214':{'en': 'Rio Branco - AC', 'pt': 'Rio Branco - AC'},
'55683216':{'en': 'Rio Branco - AC', 'pt': 'Rio Branco - AC'},
'5568322':{'en': 'Rio Branco - AC', 'pt': 'Rio Branco - AC'},
'55683231':{'en': 'Bujari - AC', 'pt': 'Bujari - AC'},
'55683232':{'en': 'Senador Guiomard - AC', 'pt': 'Senador Guiomard - AC'},
'55683233':{'en': 'Porto Acre - AC', 'pt': 'Porto Acre - AC'},
'55683234':{'en': 'Capixaba - AC', 'pt': 'Capixaba - AC'},
'55683235':{'en': u('Acrel\u00e2ndia - AC'), 'pt': u('Acrel\u00e2ndia - AC')},
'55683237':{'en': u('Pl\u00e1cido de Castro - AC'), 'pt': u('Pl\u00e1cido de Castro - AC')},
'55683242':{'en': 'Rio Branco - AC', 'pt': 'Rio Branco - AC'},
'55683244':{'en': 'Rio Branco - AC', 'pt': 'Rio Branco - AC'},
'55683248':{'en': 'Rio Branco - AC', 'pt': 'Rio Branco - AC'},
'55683261':{'en': u('Humait\u00e1 (Pad Humait\u00e1) - AC'), 'pt': u('Humait\u00e1 (Pad Humait\u00e1) - AC')},
'55683262':{'en': 'Vila do V - AC', 'pt': 'Vila do V - AC'},
'55683267':{'en': 'Vila Campinas (Pad Peixoto) - AC', 'pt': 'Vila Campinas (Pad Peixoto) - AC'},
'55683301':{'en': 'Rio Branco - AC', 'pt': 'Rio Branco - AC'},
'55683302':{'en': 'Rio Branco - AC', 'pt': 'Rio Branco - AC'},
'55683303':{'en': 'Rio Branco - AC', 'pt': 'Rio Branco - AC'},
'55683311':{'en': 'Cruzeiro do Sul - AC', 'pt': 'Cruzeiro do Sul - AC'},
'55683322':{'en': 'Cruzeiro do Sul - AC', 'pt': 'Cruzeiro do Sul - AC'},
'55683325':{'en': 'Marechal Thaumaturgo - AC', 'pt': 'Marechal Thaumaturgo - AC'},
'55683327':{'en': 'Assis Brasil (Vila) - AC', 'pt': 'Assis Brasil (Vila) - AC'},
'55683342':{'en': 'Rodrigues Alves - | |
"""
Utility functions for generating annulus mesh between start and end loops of points.
"""
from __future__ import division
import copy
from collections.abc import Sequence
from opencmiss.utils.zinc.field import findOrCreateFieldCoordinates
from opencmiss.zinc.element import Element
from opencmiss.zinc.node import Node
from scaffoldmaker.utils import interpolation as interp
from scaffoldmaker.utils import vector
from scaffoldmaker.utils.eft_utils import remapEftNodeValueLabel, setEftScaleFactorIds
from scaffoldmaker.utils.eftfactory_bicubichermitelinear import eftfactory_bicubichermitelinear
from scaffoldmaker.utils.eftfactory_tricubichermite import eftfactory_tricubichermite
def derivativeSignsToExpressionTerms(valueLabels, signs, scaleFactorIdx=None):
"""
Return remap expression terms for summing derivative[i] * sign[i] * scaleFactor
:param valueLabels: List of node value labels to possibly include.
:param signs: List of 1 (no scaling), -1 (scale by scale factor 1) or 0 (no term).
:param scaleFactorIdx: Optional index of local scale factor to scale all non-zero terms. Default None means no
extra scaling.
"""
expressionTerms = []
for i in range(len(valueLabels)):
if signs[i] == 1:
expressionTerms.append((valueLabels[i], ([scaleFactorIdx] if scaleFactorIdx else [])))
elif signs[i] == -1:
expressionTerms.append((valueLabels[i], ([1, scaleFactorIdx] if scaleFactorIdx else [1])))
return expressionTerms
def getMappedD1D2(gds, derivativesMaps):
"""
Get vector combinations of d1In, d2In, d3In indicated by derivativesMap.
:param gds: List of global d1, d2 and optionally d3.
:param derivativesMaps: List over d1, d2, d3, and optionally d1b (for
different d1 exiting global node) of list of 3 weights of gds,
each limited to -1.0, 0.0, or -1.0.
:return: Effective d1, d2. Where d1 is around, d2 is radial.
"""
dslimit = len(gds)
if not (derivativesMaps and derivativesMaps[0]):
d1 = gds[0]
else:
derivativesMap = derivativesMaps[0]
d1 = [0.0, 0.0, 0.0]
for ds in range(dslimit):
if derivativesMap[ds] != 0.0:
for c in range(3):
d1[c] += derivativesMap[ds] * gds[ds][c]
if len(derivativesMaps) > 3:
# average with d1 map for other side
derivativesMap = derivativesMaps[3]
d1 = [0.5 * d for d in d1]
if not derivativesMap:
for c in range(3):
d1[c] += 0.5 * gds[0][c]
else:
for ds in range(dslimit):
if derivativesMap[ds] != 0.0:
for c in range(3):
d1[c] += 0.5 * derivativesMap[ds] * gds[ds][c]
if not (derivativesMaps and derivativesMaps[1]):
d2 = gds[1]
else:
derivativesMap = derivativesMaps[1]
d2 = [0.0, 0.0, 0.0]
for ds in range(dslimit):
if derivativesMap[ds] != 0.0:
for c in range(3):
d2[c] += derivativesMap[ds] * gds[ds][c]
return d1, d2
def createAnnulusMesh3d(nodes, mesh, nextNodeIdentifier, nextElementIdentifier, startPointsx, startPointsd1,
startPointsd2, startPointsd3, startNodeId, startDerivativesMap, endPointsx, endPointsd1,
endPointsd2, endPointsd3, endNodeId, endDerivativesMap,
forceStartLinearXi3=False, forceMidLinearXi3=False, forceEndLinearXi3=False,
maxStartThickness=None, maxEndThickness=None, useCrossDerivatives=False,
elementsCountRadial=1, meshGroups=None, wallAnnotationGroups=None,
tracksurface=None, startProportions=None, endProportions=None,
rescaleStartDerivatives=False, rescaleEndDerivatives=False, sampleBlend=0.0):
"""
Create an annulus mesh from a loop of start points/nodes with specified derivative mappings to
a loop of end points/nodes with specified derivative mappings.
Derivative d3 is through the wall. Currently limited to single element layer through wall.
Points/nodes order cycles fastest around the annulus, then through the wall.
Note doesn't support cross derivatives.
Arrays are indexed by n3 (node through wall, size 2), n2 (node along/radial), n1 (node around, variable size)
and coordinate component c.
:param nodes: The nodeset to create nodes in.
:param mesh: The mesh to create elements in.
:param nextNodeIdentifier, nextElementIdentifier: Next identifiers to use and increment.
:param startPointsx, startPointsd1, startPointsd2, startPointsd3, endPointsx, endPointsd1, endPointsd2, endPointsd3:
List array[n3][n1][c] or start/point coordinates and derivatives. To linearise through the wall, pass None to
d3. If both ends are linear through the wall, interior points are linear through the wall.
:param startNodeId, endNodeId: List array [n3][n1] of existing node identifiers to use at start/end. Pass None for
argument if no nodes are specified at end. These arguments are 'all or nothing'.
:param startDerivativesMap, endDerivativesMap: List array[n3][n1] of mappings for d/dxi1, d/dxi2, d/dxi3 at
start/end of form:
( (1, -1, 0), (1, 0, 0), None ) where the first tuple means d/dxi1 = d/ds1 - d/ds2. Only 0, 1 and -1 may be
used.
None means use default e.g. d/dxi2 = d/ds2.
Pass None for the entire argument to use the defaults d/dxi1 = d/ds1, d/dxi2 = d/ds2, d/dxi3 = d/ds3.
Pass a 4th mapping to apply to d/dxi1 on other side of node; if not supplied first mapping applies both sides.
:param forceStartLinearXi3, forceMidLinearXi3, forceEndLinearXi3: Force start, middle or
end elements to be linear through the wall, even if d3 is supplied at either end.
Can only use forceMidLinearXi3 only if at least one end is linear in d3.
:param maxStartThickness, maxEndThickness: Optional maximum override on start/end thicknesses.
:param useCrossDerivatives: May only be True if no derivatives maps are in use.
:param elementsCountRadial: Optional number of elements in radial direction between start and end.
:param meshGroups: Optional sequence of Zinc MeshGroup for adding all new elements to, or a sequence of
length elementsCountRadial containing sequences of mesh groups to add rows of radial elements to
from start to end.
:param wallAnnotationGroups: Annotation groups for adding all new elements to a sequence
of groups to add to elements through wall.
:param tracksurface: Description for outer surface representation used for creating annulus mesh. Provides
information for creating radial nodes on annulus that sit on tracksurface. Need startProportions and endProportions
to work.
:param startProportions: Proportion around and along of startPoints on tracksurface. These vary with nodes
around as for startPoints. Values only given for tracksurface for outer layer (xi3 == 1).
:param endProportions: Proportion around and along of endPoints on track surface. These vary with nodes
around as for endPoints. Values only given for tracksurface for outer layer (xi3 == 1).
:param rescaleStartDerivatives, rescaleEndDerivatives: Optional flags to compute and multiply additional scale
factors on start, end or both radial derivatives to fit arc length, needed if derivatives are of the wrong scale
for the radial distances and the chosen elementsCountRadial. If either is True, derivatives and sampled radial
nodes are spaced for a gradual change of derivative from that at the other end. If both are True, scaling is set to
give even sampling and arclength derivatives.
:param sampleBlend: Real value varying from 0.0 to 1.0 controlling weighting of start and end
derivatives when interpolating extra points in-between, where 0.0 = sample with equal end derivatives,
and 1.0 = proportional to current magnitudes, interpolated in between.
:return: Final values of nextNodeIdentifier, nextElementIdentifier
"""
assert (elementsCountRadial >= 1), 'createAnnulusMesh3d: Invalid number of radial elements'
startLinearXi3 = (not startPointsd3) or forceStartLinearXi3
endLinearXi3 = (not endPointsd3) or forceEndLinearXi3
midLinearXi3 = (startLinearXi3 and endLinearXi3) or ((startLinearXi3 or endLinearXi3) and forceMidLinearXi3)
# get list whether each row of nodes in elements is linear in Xi3
# this is for element use; start/end nodes may have d3 even if element is linear
rowLinearXi3 = [startLinearXi3] + [midLinearXi3] * (elementsCountRadial - 1) + [endLinearXi3]
assert (not useCrossDerivatives) or ((not startDerivativesMap) and (not endDerivativesMap)), \
'createAnnulusMesh3d: Cannot use cross derivatives with derivatives map'
nodesCountWall = len(startPointsx)
assert (len(startPointsd1) == nodesCountWall) and (len(startPointsd2) == nodesCountWall) and \
(startLinearXi3 or (len(startPointsd3) == nodesCountWall)) and \
(len(endPointsx) == nodesCountWall) and (len(endPointsd1) == nodesCountWall) and \
(len(endPointsd2) == nodesCountWall) and (endLinearXi3 or (len(endPointsd3) == nodesCountWall)) and \
((startNodeId is None) or (len(startNodeId) == nodesCountWall)) and \
((endNodeId is None) or (len(endNodeId) == nodesCountWall)) and \
((startDerivativesMap is None) or (len(startDerivativesMap) == nodesCountWall)) and \
((endDerivativesMap is None) or (len(endDerivativesMap) == nodesCountWall)),\
'createAnnulusMesh3d: Mismatch in number of layers through wall'
elementsCountAround = nodesCountAround = len(startPointsx[0])
assert (nodesCountAround > 1), 'createAnnulusMesh3d: Invalid number of points/nodes around annulus'
for n3 in range(nodesCountWall):
assert (len(startPointsx[n3]) == nodesCountAround) and (len(startPointsd1[n3]) == nodesCountAround) and \
(len(startPointsd2[n3]) == nodesCountAround) and \
(startLinearXi3 or (len(startPointsd3[n3]) == nodesCountAround)) and\
(len(endPointsx[n3]) == nodesCountAround) and (len(endPointsd1[n3]) == nodesCountAround) and \
(len(endPointsd2[n3]) == nodesCountAround) and \
(endLinearXi3 or (len(endPointsd3[n3]) == nodesCountAround)) and \
((startNodeId is None) or (len(startNodeId[n3]) == nodesCountAround)) and\
((endNodeId is None) or (len(endNodeId[n3]) == nodesCountAround)) and \
((startDerivativesMap is None) or (len(startDerivativesMap[n3]) == nodesCountAround)) and \
((endDerivativesMap is None) or (len(endDerivativesMap[n3]) == nodesCountAround)), \
'createAnnulusMesh3d: Mismatch in number of points/nodes in layers through wall'
rowMeshGroups = meshGroups
if meshGroups:
assert isinstance(meshGroups, Sequence), 'createAnnulusMesh3d: Mesh groups is not a sequence'
if (len(meshGroups) == 0) or (not isinstance(meshGroups[0], Sequence)):
rowMeshGroups = [meshGroups] * elementsCountRadial
else:
assert len(meshGroups) == elementsCountRadial, | |
<filename>pycparserext/ext_c_parser.py
from __future__ import division
import pycparser.c_parser
import pycparser.c_ast as c_ast
try:
import pycparser.ply.yacc as yacc
except ImportError:
import ply.yacc as yacc # noqa: F401
from pycparser.plyparser import parameterized, template
class CParserBase(pycparser.c_parser.CParser):
def __init__(self, **kwds):
kwds['lexer'] = self.lexer_class
kwds['lextab'] = 'pycparserext.lextab'
kwds['yacctab'] = 'pycparserext.yacctab'
pycparser.c_parser.CParser.__init__(self, **kwds)
def parse(self, text, filename='', debuglevel=0, initial_type_symbols=set()):
self.clex.filename = filename
self.clex.reset_lineno()
# _scope_stack[-1] is the current (topmost) scope.
initial_scope = dict((tpsym, 1) for tpsym in initial_type_symbols)
initial_scope.update(
dict((tpsym, 1) for tpsym in self.initial_type_symbols))
self._scope_stack = [initial_scope]
if not text or text.isspace():
return c_ast.FileAST([])
else:
return self.cparser.parse(text, lexer=self.clex, debug=debuglevel)
# {{{ ast extensions
class TypeList(c_ast.Node):
def __init__(self, types, coord=None):
self.types = types
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.types or []):
nodelist.append(("types[%d]" % i, child))
return tuple(nodelist)
def __iter__(self):
for child in (self.types or []):
yield child
attr_names = ()
class AttributeSpecifier(c_ast.Node):
def __init__(self, exprlist):
self.exprlist = exprlist
def children(self):
return [("exprlist", self.exprlist)]
def __iter__(self):
# Do not return anything, but yield is necessary to keep this function
# a generator
return
yield
attr_names = ()
class Asm(c_ast.Node):
def __init__(self, asm_keyword, template, output_operands,
input_operands, clobbered_regs, coord=None):
self.asm_keyword = asm_keyword
self.template = template
self.output_operands = output_operands
self.input_operands = input_operands
self.clobbered_regs = clobbered_regs
self.coord = coord
def children(self):
nodelist = []
if self.template is not None:
nodelist.append(("template", self.template))
if self.output_operands is not None:
nodelist.append(("output_operands", self.output_operands))
if self.input_operands is not None:
nodelist.append(("input_operands", self.input_operands))
if self.clobbered_regs is not None:
nodelist.append(("clobbered_regs", self.clobbered_regs))
return tuple(nodelist)
def __iter__(self):
if self.template is not None:
yield self.template
if self.output_operands is not None:
yield self.output_operands
if self.input_operands is not None:
yield self.input_operands
if self.clobbered_regs is not None:
yield self.clobbered_regs
attr_names = ('asm_keyword',)
class PreprocessorLine(c_ast.Node):
def __init__(self, contents, coord=None):
self.contents = contents
self.coord = coord
def children(self):
return ()
def __iter__(self):
# Do not return anything, but yield is necessary to keep this function
# a generator
return
yield
attr_names = ("contents",)
class TypeOfDeclaration(c_ast.Node):
def __init__(self, typeof_keyword, declaration, coord=None):
self.typeof_keyword = typeof_keyword
self.declaration = declaration
self.coord = coord
def children(self):
nodelist = []
if self.declaration is not None:
nodelist.append(("declaration", self.declaration))
return tuple(nodelist)
def __iter__(self):
if self.declaration is not None:
yield self.declaration
attr_names = ('typeof_keyword',)
class TypeOfExpression(c_ast.Node):
def __init__(self, typeof_keyword, expr, coord=None):
self.typeof_keyword = typeof_keyword
self.expr = expr
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None:
nodelist.append(("expr", self.expr))
return tuple(nodelist)
def __iter__(self):
if self.expr is not None:
yield self.expr
attr_names = ('typeof_keyword',)
class RangeExpression(c_ast.Node):
def __init__(self, first, last, coord=None):
self.first = first
self.last = last
self.coord = coord
def children(self):
nodelist = []
if self.first is not None:
nodelist.append(("first", self.first))
if self.last is not None:
nodelist.append(("last", self.last))
return tuple(nodelist)
def __iter__(self):
if self.first is not None:
yield self.first
if self.last is not None:
yield self.last
attr_names = ()
# These are the same as pycparser's, but it does *not* declare __slots__--
# so we can poke in attributes at our leisure.
class TypeDeclExt(c_ast.TypeDecl):
@staticmethod
def from_pycparser(td):
assert isinstance(td, c_ast.TypeDecl)
return TypeDeclExt(td.declname, td.quals, td.type, td.coord)
class ArrayDeclExt(c_ast.ArrayDecl):
@staticmethod
def from_pycparser(ad):
assert isinstance(ad, c_ast.ArrayDecl)
return ArrayDeclExt(ad.type, ad.dim, ad.dim_quals, ad.coord)
def to_decl_ext(d):
if isinstance(d, c_ast.TypeDecl):
return TypeDeclExt.from_pycparser(d)
elif isinstance(d, c_ast.ArrayDecl):
return ArrayDeclExt.from_pycparser(d)
else:
raise TypeError("unexpected decl type: %s" % type(d).__name__)
class FuncDeclExt(c_ast.Node):
def __init__(self, args, type, attributes, asm, coord=None):
self.args = args
self.type = type
self.attributes = attributes
self.asm = asm
self.coord = coord
def children(self):
nodelist = []
if self.args is not None:
nodelist.append(("args", self.args))
if self.type is not None:
nodelist.append(("type", self.type))
if self.attributes is not None:
nodelist.append(("attributes", self.attributes))
if self.asm is not None:
nodelist.append(("asm", self.asm))
return tuple(nodelist)
def __iter__(self):
if self.args is not None:
yield self.args
if self.type is not None:
yield self.type
if self.attributes is not None:
yield self.attributes
if self.asm is not None:
yield self.asm
attr_names = ()
# }}}
# {{{ attributes
class _AttributesMixin(object):
def p_attributes_opt_1(self, p):
""" attributes_opt : attribute_decl attributes_opt
"""
p[1].exprs.extend(p[2].exprs)
p[0] = p[1]
def p_attributes_opt_2(self, p):
""" attributes_opt : empty
"""
p[0] = c_ast.ExprList([], self._coord(p.lineno(1)))
def p_attribute_decl(self, p):
""" attribute_decl : __ATTRIBUTE__ LPAREN LPAREN attribute_list RPAREN RPAREN
| __ATTRIBUTE LPAREN LPAREN attribute_list RPAREN RPAREN
"""
p[0] = p[4]
def p_attribute_list_1(self, p):
""" attribute_list : attribute
"""
p[0] = c_ast.ExprList([p[1]], self._coord(p.lineno(1)))
def p_attribute_list_2(self, p):
""" attribute_list : attribute_list COMMA attribute
"""
p[1].exprs.append(p[3])
p[0] = p[1]
def p_attribute_1(self, p):
""" attribute : CONST
"""
p[0] = c_ast.ID(name="const", coord=self._coord(p.lineno(1)))
def p_attribute_3(self, p):
""" attribute : assignment_expression
"""
p[0] = p[1]
def p_function_specifier_attr(self, p):
""" function_specifier : attribute_decl
"""
p[0] = AttributeSpecifier(p[1])
# }}}
# {{{ asm
class _AsmMixin(object):
def p_asm_opt_1(self, p):
""" asm_opt : empty
"""
p[0] = None
def p_asm_opt_2(self, p):
""" asm_opt : asm_no_semi
"""
p[0] = p[1]
def p_asm_1(self, p):
""" asm_no_semi : asm_keyword LPAREN asm_argument_expression_list RPAREN
"""
p[0] = Asm(p[1], p[3], None, None, None, coord=self._coord(p.lineno(2)))
def p_asm_2(self, p):
""" asm_no_semi : asm_keyword LPAREN asm_argument_expression_list COLON \
asm_argument_expression_list RPAREN
"""
p[0] = Asm(p[1], p[3], p[5], None, None, coord=self._coord(p.lineno(2)))
def p_asm_3(self, p):
""" asm_no_semi : asm_keyword LPAREN asm_argument_expression_list COLON \
asm_argument_expression_list COLON asm_argument_expression_list \
RPAREN
"""
p[0] = Asm(p[1], p[3], p[5], p[7], None, coord=self._coord(p.lineno(2)))
def p_asm_4(self, p):
""" asm_no_semi : asm_keyword LPAREN asm_argument_expression_list COLON \
asm_argument_expression_list COLON asm_argument_expression_list \
COLON asm_argument_expression_list RPAREN
"""
p[0] = Asm(p[1], p[3], p[5], p[7], p[9], coord=self._coord(p.lineno(2)))
def p_asm_keyword(self, p):
""" asm_keyword : __ASM__ asm_volatile_opt
| __ASM asm_volatile_opt
| ASM asm_volatile_opt
"""
p[0] = p[1]
if p[2]:
p[0] += ' ' + p[2]
def p_asm_volatile_opt(self, p):
""" asm_volatile_opt : unified_volatile
| empty
"""
p[0] = p[1]
def p_asm_argument_expression_list(self, p):
"""asm_argument_expression_list : argument_expression_list
| empty
"""
p[0] = p[1]
def p_statement_asm(self, p):
""" statement : asm_no_semi
| asm_no_semi SEMI
"""
p[0] = p[1]
def p_asm_label_opt(self, p):
""" asm_label_opt : asm_keyword LPAREN unified_string_literal RPAREN
| empty
"""
if p[1] is None:
p[0] = None
else:
p[0] = Asm(p[1], p[3], None, None, None, coord=self._coord(p.lineno(2)))
# }}}
@template
class _AsmAndAttributesMixin(_AsmMixin, _AttributesMixin):
# {{{ /!\ names must match C parser to override
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
def p_xxx_declarator_1(self, p):
""" xxx_declarator : direct_xxx_declarator asm_label_opt attributes_opt
"""
if p[2] or p[3].exprs:
if isinstance(p[1], (c_ast.ArrayDecl, c_ast.FuncDecl)):
decl_ext = to_decl_ext(p[1].type)
elif isinstance(p[1], c_ast.TypeDecl):
decl_ext = to_decl_ext(p[1])
else:
raise NotImplementedError(
"cannot attach asm or attributes to nodes of type '%s'"
% type(p[1]))
if p[2]:
decl_ext.asm = p[2]
if p[3].exprs:
decl_ext.attributes = p[3]
p[1] = decl_ext
p[0] = p[1]
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
def p_xxx_declarator_2(self, p):
""" xxx_declarator : pointer direct_xxx_declarator asm_label_opt \
attributes_opt
| pointer attributes_opt direct_xxx_declarator \
asm_label_opt
"""
if hasattr(p[4], "exprs"):
attr_decl = p[4]
asm_label = p[3]
decl = p[2]
else:
attr_decl = p[2]
asm_label = p[4]
decl = p[3]
if asm_label or attr_decl.exprs:
if isinstance(decl, (c_ast.ArrayDecl, c_ast.FuncDecl)):
decl_ext = to_decl_ext(decl.type)
elif isinstance(decl, c_ast.TypeDecl):
decl_ext = to_decl_ext(decl)
else:
raise NotImplementedError(
"cannot attach asm or attributes to nodes of type '%s'"
% type(p[1]))
if asm_label:
decl_ext.asm = asm_label
if attr_decl.exprs:
decl_ext.attributes = attr_decl
p[1] = decl_ext
p[0] = self._type_modify_decl(decl, p[1])
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
def p_direct_xxx_declarator_6(self, p):
""" direct_xxx_declarator : direct_xxx_declarator LPAREN parameter_type_list \
RPAREN asm_opt attributes_opt
| direct_xxx_declarator \
LPAREN identifier_list_opt RPAREN \
asm_label_opt attributes_opt
"""
func = FuncDeclExt(
args=p[3],
type=None,
attributes=p[6],
asm=p[5],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_direct_abstract_declarator_6(self, p):
""" direct_abstract_declarator : direct_abstract_declarator \
LPAREN parameter_type_list_opt RPAREN asm_label_opt attributes_opt
"""
func = FuncDeclExt(
args=p[3],
type=None,
attributes=p[6],
asm=p[5],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
# }}}
# }}}
# {{{ gnu parser
class GnuCParser(_AsmAndAttributesMixin, CParserBase):
# TODO: __extension__
from pycparserext.ext_c_lexer import GnuCLexer as lexer_class # noqa
initial_type_symbols = set(["__builtin_va_list"])
def p_function_specifier_gnu(self, p):
""" function_specifier : __INLINE
| __INLINE__
"""
p[0] = p[1]
def p_type_qualifier_gnu(self, p):
""" type_qualifier : __CONST
| __RESTRICT
| __RESTRICT__
| __EXTENSION__
| __VOLATILE
| __VOLATILE__
"""
p[0] = p[1]
def p_type_specifier_gnu_typeof_expr(self, p):
""" type_specifier : __TYPEOF__ LPAREN expression RPAREN
| TYPEOF LPAREN expression RPAREN
"""
if isinstance(p[3], c_ast.TypeDecl):
pass
p[0] = TypeOfExpression(p[1], p[3])
def p_type_specifier_gnu_typeof_decl(self, p):
""" type_specifier : __TYPEOF__ LPAREN parameter_declaration RPAREN
| TYPEOF LPAREN parameter_declaration RPAREN
"""
p[0] = TypeOfDeclaration(p[1], p[3])
def p_unary_operator_gnu(self, p):
""" unary_operator : __REAL__
| __IMAG__
"""
p[0] = p[1]
def p_postfix_expression_gnu_tcp(self, p):
""" postfix_expression : __BUILTIN_TYPES_COMPATIBLE_P \
LPAREN parameter_declaration COMMA parameter_declaration | |
"""
- environment with unfixed number of obstacles, env.reset(number of obstacles), maximum number == 50
- action-dim: 6 / 26 actions
- agent.reset_optimizer: leraning rate decay
- reward: projection reward
- add target model, update its weights every target_update epochs
- add force reward for each step by envoking traj_mwpts function in generate_trajectory_try_2
"""
import argparse
import csv
from datetime import datetime
import logging
import math
import numpy as np
from numpy import *
import os
import random
import time
import torch
from torch import nn
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
from environment_v5 import Env
from generate_trajectory_try_2 import traj_mwpts
import pdb
# torch.manual_seed(0)
# torch.cuda.manual_seed(0)
# np.random.seed(0)
class ReplayBuffer(object):
def __init__(self, capacity):
self.capacity = capacity
self.buffer = []
def add(self, item):
if len(self.buffer) >= self.capacity:
self.buffer.pop(0)
self.buffer.append(item)
def sample(self, batch_size):
return zip(*random.sample(self.buffer, batch_size))
def size(self):
return len(self.buffer)
class LinearNetwork(nn.Module):
def __init__(self, input_size, num_actions):
"""Network structure is defined here
"""
super(LinearNetwork, self).__init__()
self.input_size = input_size
self.num_actions = num_actions
self.fc_in = nn.Linear(self.input_size, 64)
self.relu = nn.ReLU()
self.fc1 = nn.Linear(64, 256)
self.fc_out = nn.Linear(256, self.num_actions)
def forward(self, s_input):
x = self.fc_in(s_input)
x = self.relu(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc_out(x)
return x
class ConvNetwork(nn.Module):
def __init__(self, input_size, num_actions):
"""Network structure is defined here
"""
super(ConvNetwork, self).__init__()
self.input_size = input_size
self.num_actions = num_actions
self.conv1 = nn.Conv3d(1, 128, 3, stride=1)
self.conv2 = nn.Conv3d(128, 256, 3, stride=1)
self.conv1_1 = nn.Conv3d(1, 256, 5, stride=1)
self.relu = nn.ReLU()
self.fc1 = nn.Linear(256, 512)
self.fc2 = nn.Linear(3, 32)
self.fc3 = nn.Linear(3, 32)
self.fc4 = nn.Linear(576, 128)
self.fc_out = nn.Linear(128, self.num_actions)
def forward(self, s_input):
(state, loc, dest) = s_input
state = state.unsqueeze(1)
x_state_fe = self.conv1(state)
x_state_fe = self.relu(x_state_fe)
x_state_fe = self.conv2(x_state_fe)
x_state_fe = self.relu(x_state_fe)
x_state_fe_1 = self.conv1_1(state)
x_state_fe_1 = self.relu(x_state_fe_1)
x_state_fe = x_state_fe + x_state_fe_1
x_state = self.fc1(x_state_fe.view(state.shape[0], -1))
x_state = self.relu(x_state)
x_loc = self.fc2(loc)
x_loc = self.relu(x_loc)
x_dest = self.fc3(dest)
x_dest = self.relu(x_dest)
x = torch.cat([x_state, x_loc, x_dest], -1)
x = self.fc4(x)
x = self.relu(x)
out = self.fc_out(x)
return out
class Agent(object):
def __init__(self, args):
self.is_training = not args.eval
self.load_pretrained = args.load_pretrained
assert args.buffer_size >= args.batch_size
self.batch_size = args.batch_size
self.buffer = ReplayBuffer(args.buffer_size)
self.action_dim = args.action_dim
self.gamma = args.gamma
self.lr = args.lr
self.model = None
self.target_model = None
if args.mode == "linear":
self.model = LinearNetwork(args.state_dim, args.action_dim).cuda()
self.target_model = LinearNetwork(args.state_dim, args.action_dim).cuda()
elif args.mode == "conv":
self.model = ConvNetwork(args.sensing_range, args.action_dim).cuda()
self.target_model = ConvNetwork(args.sensing_range, args.action_dim).cuda()
assert self.model is not None
assert self.target_model is not None
if args.load_pretrained:
pre_weight_path = os.path.join(
args.save_weights_dir, 'saved_weights_{}_yantao.pth.tar'.format(args.mode))
# args.save_weights_dir, 'saved_weights_{}_10_0.7.pth.tar'.format(args.mode))
# args.save_weights_dir, 'saved_weights_{}.pth.tar'.format(args.mode))
if os.path.isfile(pre_weight_path):
print("=> loading checkpoint '{}'".format(pre_weight_path))
checkpoint = torch.load(pre_weight_path)
self.model.load_state_dict(checkpoint['state_dict'])
else:
raise ValueError('Weight path does not exist.')
self.update_target()
self.model.train()
self.target_model.eval()
self.reset_optimizer(self.lr)
def print_model_weight(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
print(name, param.data)
def reset_optimizer(self, lr):
"""reset optimizer learning rate.
"""
if args.optimizer == 'admm':
self.model_optim = torch.optim.Adam(
self.model.parameters(), lr=lr)
elif args.optimizer == 'sgd':
self.model_optim = torch.optim.SGD(
self.model.parameters(), lr=lr, momentum=0.5,
weight_decay=args.weight_decay)
return
def update_target(self):
print("=> updating target network weights...")
self.target_model.load_state_dict(self.model.state_dict())
def act(self, state, epsilon=0.0, prev_is_hit_bound=False, topk_rand=1):
"""Output an action.
"""
if not self.is_training:
epsilon = 0.0
if random.random() >= epsilon:
if isinstance(state, tuple):
state_var = []
for temp in state:
state_var.append(
torch.tensor(
temp, dtype=torch.float).unsqueeze(0).cuda()
)
state_var = tuple(state_var)
else:
state_var = torch.tensor(
state, dtype=torch.float).unsqueeze(0).cuda()
# self.model.eval()
logits = self.model(state_var).detach().cpu().numpy()
if not prev_is_hit_bound:
actions_sort = np.argsort(logits[0], -1)
rand_idx = np.random.randint(topk_rand)
action = actions_sort[-1 * rand_idx - 1]
else:
action = random.randrange(self.action_dim)
else:
assert self.is_training == True
action = random.randrange(self.action_dim)
return action
def learning(self):
"""Extract from buffer and train for one epoch.
"""
data_list = self.buffer.sample(self.batch_size)
(states_curt, action_curt, rewards_curt, states_next, is_dones) = \
self._stack_to_numpy(data_list)
if isinstance(states_curt, tuple):
states_curt_var = []
for temp in states_curt:
states_curt_var.append(
torch.tensor(
temp, dtype=torch.float).cuda()
)
states_curt_var = tuple(states_curt_var)
else:
states_curt_var = torch.tensor(
states_curt, dtype=torch.float).cuda()
action_curt_var = torch.tensor(
action_curt, dtype=torch.long).cuda()
rewards_curt_var = torch.tensor(
rewards_curt, dtype=torch.float).cuda()
if isinstance(states_next, tuple):
states_next_var = []
for temp in states_next:
states_next_var.append(
torch.tensor(
temp, dtype=torch.float).cuda()
)
states_next_var = tuple(states_next_var)
else:
states_next_var = torch.tensor(
states_next, dtype=torch.float).cuda()
is_dones_var = torch.tensor(
is_dones, dtype=torch.float).cuda()
# if self.is_training and not self.load_pretrained:
if self.is_training:
self.model.train()
else:
self.model.eval()
logits_curt_var = self.model(states_curt_var)
q_value = logits_curt_var.gather(1, action_curt_var.unsqueeze(1)).squeeze(1)
logits_next_var = self.target_model(states_next_var)
next_q_value = logits_next_var.max(1)[0]
expected_q_value = rewards_curt_var + \
self.gamma * next_q_value * (1 - is_dones_var)
loss_mse = (q_value - expected_q_value.detach()).pow(2).mean()
loss_mae = torch.abs(q_value - expected_q_value.detach()).mean()
loss = torch.max(loss_mse, loss_mae)
self.model_optim.zero_grad()
loss.backward()
self.model_optim.step()
return loss.detach().item()
def _stack_to_numpy(self, data_list):
ret = []
for temp_data in data_list:
if isinstance(temp_data[0], tuple):
temp_list = []
tuple_size = len(temp_data[0])
for _ in range(tuple_size):
temp_list.append([])
for curt_tup in temp_data:
for idx in range(tuple_size):
temp_list[idx].append(curt_tup[idx])
temp_ret_list = []
for temp in temp_list:
temp_ret_list.append(np.array(temp))
ret.append(tuple(temp_ret_list))
else:
temp_np = np.array(temp_data)
ret.append(temp_np)
return ret
class Trainer(object):
def __init__(self, agent, env, args):
self.args = args
self.agent = agent
self.env = env
self.max_steps = args.max_steps
self.batch_size = args.batch_size
self.save_epochs = args.save_epochs
self.save_weights_dir = args.save_weights_dir
self.num_obst = args.num_obst
# non-Linear epsilon decay
epsilon_final = args.epsilon_min
epsilon_start = args.epsilon
epsilon_decay = args.epsilon_decay
if args.enable_epsilon:
self.epsilon_by_frame = \
lambda frame_idx: epsilon_final + \
(epsilon_start - epsilon_final) * math.exp(
-1. * (frame_idx // epsilon_decay))
else:
self.epsilon_by_frame = lambda frame_idx: 0.0
def train(self):
timestamp = datetime.now()
time_str = timestamp.strftime("%H_%M_%S")
loss_reward_filepath = os.path.join('.', 'loss_reward_{}.csv'.format(time_str))
if os.path.exists(loss_reward_filepath):
os.remove(loss_reward_filepath)
lr = self.agent.lr
episode = 0
while True:
episode += 1
episode_reward = 0
is_done = False
is_goal = False
prev_is_hit_bound = False
steps = 0
num_obst = 0
num_outbound = 0
epsilon = self.epsilon_by_frame(episode)
logging.info('epsilon: {0:.04f}'.format(epsilon))
actions = []
loss_list = []
rewards = []
# Intialize environment with different number of obstacles
self.num_obst = random.randint(0, 10)
self.env.reset(self.num_obst)
state_curt = self.env.get_state()
segment = np.array(state_curt[1] * self.args.env_size)
velocity_curt = np.array((0, 0, 0.001))
acceler_curt = np.array((0, 0, 0))
gerk_curt = np.array((0, 0, 0))
waypoints = []
while (not is_done) and (steps <= self.max_steps):
action_curt = self.agent.act(state_curt, epsilon=epsilon, prev_is_hit_bound=prev_is_hit_bound, topk_rand=2)
actions.append(action_curt)
reward_curt, is_done, reward_info = self.env.step(action_curt)
num_obst += int(reward_info['is_obst'])
num_outbound += int(reward_info['is_bound'])
prev_is_hit_bound = reward_info['is_bound']
if reward_info['is_goal']:
is_goal = True
waypoints.append(list(self.env.objs_info['drone_pos']))
state_next = self.env.get_state()
# calculate force reward
if self.args.thrust_reward:
segment = vstack((segment, np.array(state_next[1] * self.args.env_size)))
num = segment.shape[0]
t = np.asarray([0])
for i in range(num - 1):
t = hstack((t, 6 * (i + 1)))
path, f, norm_f, velocity_next, acceler_next, gerk_next = \
traj_mwpts(t, segment.T, np.array([velocity_curt]).T,
np.array([acceler_curt]).T, np.array([gerk_curt]).T)
force_reward = 1 / (1 + math.exp(-1 * np.sum(norm_f)/norm_f.shape[1])) / \
self.args.grid_resolution / self.args.env_size
reward_curt -= force_reward
self.agent.buffer.add((state_curt, action_curt, reward_curt, state_next, is_done))
state_curt = state_next
episode_reward += reward_curt
rewards.append(reward_curt)
# loss = 0.0
# if self.agent.buffer.size() >= self.batch_size:
# loss = self.agent.learning()
# loss_list.append(loss)
steps += 1
if self.agent.buffer.size() >= self.batch_size:
loss = self.agent.learning()
loss_list.append(loss)
loss_avg = sum(loss_list) / max(len(loss_list), 1)
waypoints.append(list(self.env.objs_info['drone_pos']))
# plot_env(self.env, waypoints)
# update target model weights
if episode % args.target_update == 0:
self.agent.update_target()
if int(args.verbose) >= 2:
print('actions: ', actions)
logging.info('loss_avg: {0:.04f}'.format(loss_avg))
print('episode: {0:05d}, step: {1:03d}, reward: {2:.04f}, num_obst: {3:01d}, num_outbound: {7:01d}, is_goal: {4}, start: {5}, target: {6}'.format(
episode,
steps,
episode_reward,
num_obst,
is_goal,
self.env.objs_info['drone_pos_start'],
self.env.objs_info['goal'],
num_outbound
))
if episode % 100 == 0:
print('actions: \n', actions)
# learning decay
# if episode % 5000 == 0:
# lr *= 0.8
# self.agent.reset_optimizer(lr)
# plot reward and loss
with open(loss_reward_filepath, 'a', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow([loss_avg, episode_reward, num_obst, int(is_goal)])
if episode % self.save_epochs == 0:
save_dic = {
'args' : args,
'episode' : episode,
'state_dict' : self.agent.model.state_dict()
}
if not os.path.exists(self.save_weights_dir):
os.mkdir(self.save_weights_dir)
torch.save(save_dic, os.path.join(
self.save_weights_dir, 'saved_weights_{}_yantao.pth.tar'.format(args.mode)))
def eval(self):
episode = 0
success = 0
while True:
episode += 1
episode_reward = 0
is_done = False
is_goal = False
steps = 0
num_obst = 0
actions = []
loss_list = []
rewards = []
# Intialize environment
obs_num = random.randint(0, 10)
self.env.reset(obs_num)
state_curt = self.env.get_state()
segment = np.array(state_curt[1] * self.args.env_size)
velocity_curt = np.array((0, 0, 0.001))
acceler_curt = np.array((0, 0, 0))
gerk_curt = np.array((0, 0, 0))
while (not is_done) and (steps <= self.max_steps):
epsilon = self.epsilon_by_frame(episode)
action_curt = self.agent.act(state_curt, epsilon=0.0)
actions.append(action_curt)
reward_curt, is_done, reward_info = self.env.step(action_curt)
num_obst += int(reward_info['is_obst'])
if reward_info['is_goal']:
is_goal = True
success += 1
state_next = self.env.get_state()
#calculate force reward
segment = vstack((segment, np.array(state_next[1] * self.args.env_size)))
num = segment.shape[0]
t = np.asarray([0])
for i in | |
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testtools import TestCase
from kmip.core.attributes import CryptographicAlgorithm
from kmip.core.attributes import CryptographicLength
from kmip.core.attributes import CryptographicUsageMask
from kmip.core.attributes import UniqueIdentifier
from kmip.core.attributes import ObjectType
from kmip.core.attributes import Name
from kmip.core.enums import AttributeType
from kmip.core.enums import CryptographicAlgorithm as CryptoAlgorithmEnum
from kmip.core.enums import CryptographicUsageMask as CryptoUsageMaskEnum
from kmip.core.enums import KeyCompressionType as KeyCompressionTypeEnum
from kmip.core.enums import KeyFormatType as KeyFormatTypeEnum
from kmip.core.enums import ObjectType as ObjectTypeEnum
from kmip.core.enums import ResultReason
from kmip.core.enums import ResultStatus
from kmip.core.enums import NameType
from kmip.core.factories.attributes import AttributeFactory
from kmip.core.messages.contents import KeyCompressionType
from kmip.core.misc import KeyFormatType
from kmip.core.objects import KeyBlock
from kmip.core.objects import KeyMaterial
from kmip.core.objects import KeyValue
from kmip.core.objects import TemplateAttribute
from kmip.core.secrets import SymmetricKey
from kmip.core.server import KMIPImpl
class TestKMIPServer(TestCase):
def setUp(self):
super(TestKMIPServer, self).setUp()
self.kmip = KMIPImpl()
self.algorithm_name = CryptoAlgorithmEnum.AES
self.key_length = 256
self.key = bytearray(range(0, 32))
self.usage_mask = CryptoUsageMaskEnum.ENCRYPT.value |\
CryptoUsageMaskEnum.DECRYPT.value
def tearDown(self):
super(TestKMIPServer, self).tearDown()
def test_create(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
attributes = self._get_attrs()
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.create(obj_type, template_attribute)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(ResultStatus.SUCCESS, res.result_status.value,
'result status did not return success')
def test_create_no_length(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
attributes = self._get_attrs()[0:2]
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.create(obj_type, template_attribute)
self.assertNotEqual(None, res, 'result is None')
attrs = res.template_attribute.attributes
self.assertEqual(ResultStatus.SUCCESS, res.result_status.value,
'result status did not return success')
self.assertTrue(self._check_attr_exists(attributes[2], attrs),
'length attribute not returned')
def test_create_no_alg(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
attributes = [self._get_attrs()[1]]
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.create(obj_type, template_attribute)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(
ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
def test_create_no_usage_mask(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
attributes = [self._get_attrs()[0]]
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.create(obj_type, template_attribute)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(
ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
def test_register(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(ResultStatus.SUCCESS, res.result_status.value,
'result status did not return success')
def test_register_attrs_in_key_value(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.cryptographic_algorithm = None
key.key_block.cryptographic_length = None
key.key_block.key_value.attributes = self._get_attrs()
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(ResultStatus.SUCCESS, res.result_status.value,
'result status did not return success')
def test_register_attrs_in_template(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.cryptographic_algorithm = None
key.key_block.cryptographic_length = None
key.key_block.key_value.attributes = []
attributes = self._get_attrs()
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(ResultStatus.SUCCESS, res.result_status.value,
'result status did not return success')
def test_register_no_alg(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.cryptographic_algorithm = None
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.ITEM_NOT_FOUND,
res.result_reason.value,
'result reason did not match')
def test_register_alg_in_key_value_and_key_block(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.key_value.attributes = [self._get_alg_attr()]
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.INDEX_OUT_OF_BOUNDS,
res.result_reason.value,
'result reason did not match')
def test_register_alg_in_template_and_key_block(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
attributes = [self._get_alg_attr()]
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.INDEX_OUT_OF_BOUNDS,
res.result_reason.value,
'result reason did not match')
def test_register_alg_in_template_and_key_value(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.cryptographic_algorithm = None
key.key_block.key_value.attributes = [self._get_alg_attr()]
attributes = [self._get_alg_attr()]
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.INDEX_OUT_OF_BOUNDS,
res.result_reason.value,
'result reason did not match')
def test_register_invalid_alg(self):
unsupported_algs = (CryptoAlgorithmEnum.RSA,
CryptoAlgorithmEnum.DSA,
CryptoAlgorithmEnum.ECDSA,
CryptoAlgorithmEnum.HMAC_SHA1,
CryptoAlgorithmEnum.HMAC_SHA224,
CryptoAlgorithmEnum.HMAC_SHA256,
CryptoAlgorithmEnum.HMAC_SHA384,
CryptoAlgorithmEnum.HMAC_SHA512,
CryptoAlgorithmEnum.HMAC_MD5,
CryptoAlgorithmEnum.DH,
CryptoAlgorithmEnum.ECDH,
CryptoAlgorithmEnum.ECMQV,
CryptoAlgorithmEnum.BLOWFISH,
CryptoAlgorithmEnum.CAMELLIA,
CryptoAlgorithmEnum.CAST5,
CryptoAlgorithmEnum.IDEA,
CryptoAlgorithmEnum.MARS,
CryptoAlgorithmEnum.RC2,
CryptoAlgorithmEnum.RC4,
CryptoAlgorithmEnum.RC5,
CryptoAlgorithmEnum.SKIPJACK,
CryptoAlgorithmEnum.TWOFISH)
for alg in unsupported_algs:
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.cryptographic_algorithm = CryptographicAlgorithm(alg)
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.INVALID_FIELD,
res.result_reason.value,
'result reason did not match')
def test_register_no_length(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.cryptographic_length = None
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.ITEM_NOT_FOUND,
res.result_reason.value,
'result reason did not match')
def test_register_length_in_key_value_and_key_block(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.key_value.attributes = [self._get_length_attr()]
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.INDEX_OUT_OF_BOUNDS,
res.result_reason.value,
'result reason did not match')
def test_register_length_in_template_and_key_block(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
attributes = [self._get_length_attr()]
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.INDEX_OUT_OF_BOUNDS,
res.result_reason.value,
'result reason did not match')
def test_register_length_in_template_and_key_value(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.cryptographic_length = None
key.key_block.key_value.attributes = [self._get_length_attr()]
attributes = [self._get_length_attr()]
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.INDEX_OUT_OF_BOUNDS,
res.result_reason.value,
'result reason did not match')
def test_register_invalid_length(self):
unsupported_lens = (-1, 0, 2048, 5, 18)
for len in unsupported_lens:
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.cryptographic_length = CryptographicLength(len)
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.INVALID_FIELD,
res.result_reason.value,
'result reason did not match')
def test_register_no_usage_mask(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
key = self._get_symmetric_key()
key.key_block.key_value.attributes = []
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.ITEM_NOT_FOUND,
res.result_reason.value,
'result reason did not match')
def test_register_no_object_type(self):
obj_type = None
key = self._get_symmetric_key()
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.ITEM_NOT_FOUND,
res.result_reason.value,
'result reason did not match')
def test_register_unsupported_object_type(self):
unsupported_types = (ObjectTypeEnum.CERTIFICATE,
ObjectTypeEnum.PUBLIC_KEY,
ObjectTypeEnum.PRIVATE_KEY,
ObjectTypeEnum.SPLIT_KEY,
ObjectTypeEnum.TEMPLATE,
ObjectTypeEnum.SECRET_DATA,
ObjectTypeEnum.OPAQUE_DATA)
for unsupported_type in unsupported_types:
obj_type = ObjectType(unsupported_type)
key = self._get_symmetric_key()
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.INVALID_FIELD,
res.result_reason.value,
'result reason did not match')
def test_register_object_type_mismatch(self):
unsupported_types = (ObjectTypeEnum.CERTIFICATE,
ObjectTypeEnum.PUBLIC_KEY,
ObjectTypeEnum.PRIVATE_KEY,
ObjectTypeEnum.SPLIT_KEY,
ObjectTypeEnum.TEMPLATE,
ObjectTypeEnum.SECRET_DATA,
ObjectTypeEnum.OPAQUE_DATA)
for unsupported_type in unsupported_types:
obj_type = ObjectType(unsupported_type)
key = self._get_symmetric_key()
attributes = []
template_attribute = TemplateAttribute(attributes=attributes)
res = self.kmip.register(obj_type, template_attribute, key)
self.assertNotEqual(None, res, 'result is None')
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.INVALID_FIELD,
res.result_reason.value,
'result reason did not match')
def test_get(self):
uuid = self._create()
key_format_type = KeyFormatType(KeyFormatTypeEnum.RAW)
res = self.kmip.get(uuid, key_format_type)
self.assertEqual(ResultStatus.SUCCESS, res.result_status.value,
'result status did not return success')
def test_get_no_key_format_type(self):
uuid = self._create()
res = self.kmip.get(uuid, None)
self.assertEqual(ResultStatus.SUCCESS, res.result_status.value,
'result status did not return success')
def test_get_unknown(self):
uuids = ('some random string', UniqueIdentifier('no key here'))
for uuid in uuids:
key_format_type = KeyFormatType(KeyFormatTypeEnum.RAW)
res = self.kmip.get(uuid, key_format_type)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.ITEM_NOT_FOUND,
res.result_reason.value,
'result reason did not match')
def test_get_no_uuid(self):
self._create()
key_format_type = KeyFormatType(KeyFormatTypeEnum.RAW)
res = self.kmip.get(None, key_format_type)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
def test_get_with_key_compression(self):
uuid = self._create()
key_format_type = KeyFormatType(KeyFormatTypeEnum.RAW)
key_compression = KeyCompressionType(KeyCompressionTypeEnum.
EC_PUBLIC_KEY_TYPE_UNCOMPRESSED)
res = self.kmip.get(uuid, key_format_type, key_compression)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.KEY_COMPRESSION_TYPE_NOT_SUPPORTED,
res.result_reason.value,
'result reason did not match')
def test_destroy(self):
uuid = self._create()
key_format_type = KeyFormatType(KeyFormatTypeEnum.RAW)
res = self.kmip.get(uuid, key_format_type)
self.assertEqual(ResultStatus.SUCCESS, res.result_status.value,
'result status did not return success')
res = self.kmip.destroy(uuid)
self.assertEqual(ResultStatus.SUCCESS, res.result_status.value,
'result status did not return success')
res = self.kmip.destroy(uuid)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.ITEM_NOT_FOUND,
res.result_reason.value,
'result reason did not match')
def test_destroy_no_uuid(self):
res = self.kmip.destroy(None)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.ITEM_NOT_FOUND,
res.result_reason.value,
'result reason did not match')
def test_destroy_unknown(self):
uuids = ('some random string', UniqueIdentifier('no key here'))
for uuid in uuids:
key_format_type = KeyFormatType(KeyFormatTypeEnum.RAW)
res = self.kmip.get(uuid, key_format_type)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
res = self.kmip.destroy(uuid)
self.assertEqual(ResultStatus.OPERATION_FAILED,
res.result_status.value,
'result status did not return failed')
self.assertEqual(ResultReason.ITEM_NOT_FOUND,
res.result_reason.value,
'result reason did not match')
def _create(self):
obj_type = ObjectType(ObjectTypeEnum.SYMMETRIC_KEY)
attributes = self._get_attrs()
template_attribute = TemplateAttribute(attributes=attributes)
res | |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015-2017 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# # DICOM Processing Script
#
# *Summary:* Take DICOM images and process them for use in neon models.
#
# NOTES:
# + [DICOM](https://en.wikipedia.org/wiki/DICOM) stands for "Digital Imaging and COmmunication in Medicine". It is the standard for all medical imaging from MRI to CT to Ultrasound to whatever.
# + The standard was created so that devices manufacturers would have a common format for hospitals to integrate into their digital imaging systems. That's good for us because it gives us a standard way to load and process many types of medical images. If we can get a good pipeline going, then this script might be useful for any medical imaging study.
# + MRI and CT are stored as 2D slices which can be combined to form a 3D volume. So for any given patient we may have several hundred slice files that correspond to the same scan. This is different than most of our image data models because we need to incoporate all of these slices into one tensor. We definitely want to use the 3D volume because things like tumors and lesions (and other bad things that cause illness) don't limit themselves to a single 2D slice. Therefore, we've got to load this into our model as a 4D tensor (channel, height, width, depth).
# + The slice thickness varies but is typically 1 to 3 mm. It depends on the type of study (MR/CT) and the parameters set at the start of the scan. Be sure to get a good handle on the height, width, and depth parameters of the DICOM files so that you are importing consistent tensors into the data loader. This is something I need to look into because ideally we'd like to standardize the tensors (for example, 1 mm x 1 mm x 1 mm voxels or something like that).
# + The pixels are usually stored as uint16 precision (however I'm seeing several that are 32-bit too). I'm not sure if we need to change that to something with less precision. If we can keep the full 16-bit precision, then that would be preferable. There may in fact be anomalies that involve a very minor difference in the contrast between two adjacent regions. This is an open question for analysis.
# + Along with the actual pixel information, the DICOM file also contains lots of metadata, such as slice thickness, pixel resolution, image orientation, patient orientation, and type of study (i.e. MRI, CT, X-ray).
# + This assumes that your DICOM images are stored in the directory data_dir. Within data_dir there should be a separate folder for each patient's scans. The files usually end in the .dcm extension (e.g. "0a291d1b12b86213d813e3796f14b329.dcm" might be one slice for one patient). The SimpleITK library we use will load slices within a given patient's directory all at once into a 3D object.
import numpy as np
import glob, os
# ## SimpleITK for reading DICOM
#
# [SimpleItk](http://www.simpleitk.org/) is an open-source library for reading and processing 3D models. It was particularly designed for medical imaging and was built on top of the Insight and Segmentation and Registration ([ITK](https://itk.org/)) toolkit sponsored by the National Library of Medicine.
#
# What's nice about SimpleITK is that it has pre-built methods to read all of the DICOM slices into a 3D object and perform segmentation and morphological operations on that 3D object. I believe it automatically arranges the slices in the correct order. It also can handle compressed DICOM files.
import SimpleITK as sitk
#######################
# Parse the command line
from neon.util.argparser import NeonArgparser
parser = NeonArgparser(__doc__)
# We can pass the input directory and output file name from the command line
parser.add_argument('-out', '--outFilename', default='dicom_out.h5', help='Name of the output HDF5 file')
parser.set_defaults(data_dir='/Volumes/data/tonyr/dicom/Lung CT/stage1')
parser.set_defaults(save_path='.')
args = parser.parse_args()
data_dir = args.data_dir
outFilename = args.save_path + '/' + args.outFilename
##############################
def verbosePrint(txt):
print(txt)
verbosePrint('DICOM to HDF5 converter started ... ')
patients = glob.glob(os.path.join(data_dir, '*')) # Get the folder names for the patients
numPatients = len(patients) # Number of patients in the directory
if numPatients == 0:
raise IOError('Directory ' + data_dir + ' not found or no files found in directory')
verbosePrint('Found the following subfolders with DICOMs: {}'.format(patients))
# ## Now load the entire set of DICOM images into one large HDF5 file
#
# Alternatively, we could do some other pre-processing of the images (e.g. normalizing them, aligning them, segmenting them) and then save to HDF5.
#
# However, for right now let's just load the data without any other pre-processing.
import h5py
import ntpath
# ## The 'input' expects the array to be flattened!
#
# The [HDF5Iterator](http://neon.nervanasys.com/index.html/_modules/neon/data/hdf5iterator.html#HDF5Iterator) expects to get a 2D array where the rows are each sample and the columns are each feature. So for a CxHxW set of images, the number of features should be the product of those 3 dimensions.
#
# I need to add a depth dimension (D) for the slices. So I'll have a 2D array that is # samples x (CxHxWxD).
def getImageTensor(patientDirectory):
"""
Helper function for injesting all of the DICOM files for one study into a single tensor
input: 'patientDirectory', the directory where the DICOMs for a single patient are stored
outputs:
imgTensor = a flattened numpy array (1, C*H*W*D)
C = number of channels per pixel (1 for MR, CT, and Xray)
H = number of pixels in height
W = number of pixels in width
D = number of pixels in depth
"""
reader = sitk.ImageSeriesReader() # Set up the reader object
# Now get the names of the DICOM files within the directory
filenamesDICOM = reader.GetGDCMSeriesFileNames(patientDirectory)
reader.SetFileNames(filenamesDICOM)
# Now execute the reader pipeline
patientObject = reader.Execute()
C = patientObject.GetNumberOfComponentsPerPixel() # There is just one color channel in the DICOM for CT and MRI
H = patientObject.GetHeight() # Height in pixels
W = patientObject.GetWidth() # Width in pixels
#D = patientObject.GetDepth() # Depth in pixels
D = 128 # Let's limit to 128 for now -
# We need to tranpose the SimpleITK ndarray to the right order for neon
# Then we need to flatten the array to a single vector (1, C*H*W*D)
imgTensor = sitk.GetArrayFromImage(patientObject[:,:,:D]).transpose([1, 2, 0]).ravel().reshape(1,-1)
return imgTensor, C, H, W, D
# ## Loop through the patient directory and load the DICOM tensors into HDF5 file
#
# HDF5 allows for the dataset to be dynamically updated. So this should iteratively append new DICOM tensors to the HDF5 file. Otherwise, we'd have to load all of the files into memory and quickly run out of space.
#
# **TODO (priority low):** I think there's a parallel way of writing to HDF5. I might be able to speed things up by having parallel threads to load different patients and append them to the HDF5.
with h5py.File(outFilename, 'w') as df: # Open hdf5 file for writing our DICOM dataset
for patientDirectory in patients[:1]: # Start with the first patient to set up the HDF5 dataset
patientID = ntpath.basename(patientDirectory) # Unique ID for patient
verbosePrint('({} of {}): Processing patient: {}'.format(1, numPatients, patientID))
imgTensor, original_C, original_H, original_W, original_D = getImageTensor(patientDirectory)
dset = df.create_dataset('input', data=imgTensor, maxshape=[None, original_C*original_H*original_W*original_D])
# Now iterate through the remaining patients and append their image tensors to the HDF5 dataset
for i, patientDirectory in enumerate(patients[1:]): # Now append the remaining patients
print('({} of {}): Processing patient: {}'.format(i+2, numPatients, ntpath.basename(patientDirectory)))
imgTensor, C, H, W, D = getImageTensor(patientDirectory)
# Sanity check
| |
#!/usr/bin/env python
#
import random
import numpy as np
from copy import copy
from bart_utils import empty, Tree, logsumexp, softmax, check_if_zero, get_children_id
#from itertools import izip, count
from itertools import count
class Particle(Tree):
def __init__(self, train_ids=np.arange(0, dtype='int'), param=empty(), settings=empty(), cache_tmp={}):
Tree.__init__(self, train_ids, param, settings, cache_tmp)
self.ancestry = []
self.nodes_processed_itr = []
self.grow_nodes_itr = []
self.log_sis_ratio_d = {}
if cache_tmp:
self.do_not_grow = False
self.grow_nodes = [0]
def process_node_id(self, data, param, settings, cache, node_id):
if self.do_not_split[node_id]:
log_sis_ratio = 0.0
else:
log_psplit = np.log(self.compute_psplit(node_id, param))
train_ids = self.train_ids[node_id]
left, right = get_children_id(node_id)
if settings.verbose >= 4:
print('train_ids for this node = %s' % train_ids)
(do_not_split_node_id, feat_id_chosen, split_chosen, idx_split_global, log_sis_ratio, logprior_nodeid, \
train_ids_left, train_ids_right, cache_tmp, loglik_left, loglik_right) \
= self.prior_proposal(data, param, settings, cache, node_id, train_ids, log_psplit)
if do_not_split_node_id:
self.do_not_split[node_id] = True
else:
self.update_left_right_statistics(cache_tmp, node_id, logprior_nodeid, train_ids_left,\
train_ids_right, loglik_left, loglik_right, feat_id_chosen, split_chosen, \
idx_split_global, settings, param, data, cache)
self.grow_nodes.append(left)
self.grow_nodes.append(right)
return (log_sis_ratio)
def grow_next(self, data, param, settings, cache):
""" grows just one node at a time (nodewise expansion)
breaks after processing the first non do_not_grow node or when grow_nodes is empty
Note that multiple nodes could be killed in a single grow_next call
"""
# FIXME: refactor without the do_not_grow option; it made sense for SMC paper, but not for PG
do_not_grow = True
log_sis_ratio = 0.0
nodes_processed = []
if not self.grow_nodes:
if settings.verbose >= 2:
print('None of the leaves can be grown any further: Current ' \
'depth = %3d, Skipping grow_next' % self.depth)
else:
while True:
# loop through current leaf nodes, process first "non do_not_grow" node and break;
# if none of the nodes can be processed, do_not_grow = True
remove_position = 0 # just pop the oldest node
node_id = self.grow_nodes.pop(remove_position)
nodes_processed.append(node_id)
do_not_grow = do_not_grow and self.do_not_split[node_id]
if self.do_not_split[node_id]:
if settings.verbose >= 3:
print('Skipping split at node_id %3d' % node_id)
if not self.grow_nodes:
break
else:
log_sis_ratio += self.process_node_id(data, param, settings, cache, node_id)
break # you have processed a non do_not_grow node, take a break!
self.loglik_current = self.compute_loglik()
self.log_sis_ratio = log_sis_ratio
self.do_not_grow = do_not_grow
if nodes_processed:
self.nodes_processed_itr.append(nodes_processed)
def check_nodes_processed_itr(self, settings):
tmp = set([])
for nodes in self.nodes_processed_itr:
for node in nodes:
if node in tmp:
print('node = %s present multiple times in nodes_processed_itr = %s' % \
(node, self.nodes_processed_itr))
raise Exception
else:
tmp.add(node)
def update_particle_weights(particles, log_weights, settings):
for n, p in enumerate(particles):
if settings.verbose >= 2:
print('pid = %5d, log_sis_ratio = %f' % (n, p.log_sis_ratio))
log_weights[n] += p.log_sis_ratio
weights_norm = softmax(log_weights) # normalized weights
ess = 1. / np.sum(weights_norm ** 2) / settings.n_particles
log_pd = logsumexp(log_weights)
return (log_pd, ess, log_weights, weights_norm)
def resample(particles, log_weights, settings, log_pd, ess, weights_norm, tree_pg):
if ess <= settings.ess_threshold:
if tree_pg:
pid_list = resample_pids_basic(settings, settings.n_particles-1, weights_norm)
random.shuffle(pid_list) # shuffle so that particle is assigned randomly
pid_list.insert(0, 0)
else:
pid_list = resample_pids_basic(settings, settings.n_particles, weights_norm)
log_weights = np.ones(settings.n_particles) * (log_pd - np.log(settings.n_particles))
else:
pid_list = range(settings.n_particles)
if settings.verbose >= 2:
print('ess = %s, ess_threshold = %s' % (ess, settings.ess_threshold))
print('new particle ids = ')
print(pid_list)
op = create_new_particles(particles, pid_list, settings)
# update ancestry
for pid, p in zip(pid_list, op):
p.ancestry.append(pid)
return (op, log_weights)
def resample_pids_basic(settings, n_particles, prob):
if settings.resample == 'multinomial':
pid_list = sample_multinomial_numpy(n_particles, prob)
elif settings.resample == 'systematic':
pid_list = systematic_sample(n_particles, prob)
return pid_list
def sample_multinomial_numpy(n_particles, prob):
indices = np.random.multinomial(n_particles, prob, size=1)
pid_list = [pid for pid, cnt in enumerate(indices.flat) \
for n in range(cnt)]
return pid_list
def create_new_particles(particles, pid_list, settings):
""" particles that occur just once after resampling are not 'copied' """
list_allocated = set([])
op = []
for i, pid in enumerate(pid_list):
if pid not in list_allocated:
op.append(particles[pid])
else:
op.append(copy_particle(particles[pid], settings))
list_allocated.add(pid)
return op
def copy_particle(p, settings):
# TODO: lots of unnecessary copying for PG; reduce memory requirement
op = Particle()
# lists
op.leaf_nodes = p.leaf_nodes[:]
op.non_leaf_nodes = p.non_leaf_nodes[:]
op.ancestry = p.ancestry[:]
op.nodes_processed_itr = [x[:] for x in p.nodes_processed_itr]
op.grow_nodes = p.grow_nodes[:]
op.grow_nodes_itr = [x[:] for x in p.grow_nodes_itr]
# dictionaries
op.do_not_split = p.do_not_split.copy()
op.log_sis_ratio_d = p.log_sis_ratio_d.copy()
op.sum_y = p.sum_y.copy()
op.sum_y2 = p.sum_y2.copy()
op.n_points = p.n_points.copy()
op.param_n = p.param_n.copy()
op.train_ids = p.train_ids.copy()
op.node_info = p.node_info.copy()
op.loglik = p.loglik.copy()
op.logprior = p.logprior.copy()
# other variables
op.depth = copy(p.depth)
op.do_not_grow = copy(p.do_not_grow)
op.loglik_current = copy(p.loglik_current)
return op
def systematic_sample(n, prob):
""" systematic re-sampling algorithm.
Note: objects with > 1/n probability (better than average) are guaranteed to occur atleast once.
see section 2.4 of 'Comparison of Resampling Schemes for Particle Filtering' by Douc et. al for more info.
"""
assert(n == len(prob))
assert(abs(np.sum(prob) - 1) < 1e-10)
cum_prob = np.cumsum(prob)
u = np.random.rand(1) / float(n)
i = 0
indices = []
while True:
while u > cum_prob[i]:
i += 1
indices.append(i)
u += 1/float(n)
if u > 1:
break
return indices
def init_particles(data, settings, param, cache_tmp):
particles = [Particle(np.arange(data['n_train']), param, settings, cache_tmp) \
for n in range(settings.n_particles)]
log_weights = np.array([p.loglik[0] for p in particles]) - np.log(settings.n_particles)
return (particles, log_weights)
def grow_next_pg(p, tree_pg, itr, settings):
p.log_sis_ratio = 0.
p.do_not_grow = False
p.grow_nodes = []
try:
nodes_processed = tree_pg.nodes_processed_itr[itr]
p.nodes_processed_itr.append(nodes_processed[:])
for node_id in nodes_processed[:-1]:
assert(tree_pg.do_not_split[node_id])
p.do_not_split[node_id] = True
node_id = nodes_processed[-1]
if node_id in tree_pg.node_info:
left, right = get_children_id(node_id)
log_sis_ratio_loglik_new = tree_pg.loglik[left] + tree_pg.loglik[right] - tree_pg.loglik[node_id]
try:
log_sis_ratio_loglik_old, log_sis_ratio_prior = tree_pg.log_sis_ratio_d[node_id]
except KeyError:
print('tree_pg: node_info = %s, log_sis_ratio_d = %s' % (tree_pg.node_info, tree_pg.log_sis_ratio_d))
raise KeyError
if settings.verbose >= 2:
print('log_sis_ratio_loglik_old = %s' % log_sis_ratio_loglik_old)
print('log_sis_ratio_loglik_new = %s' % log_sis_ratio_loglik_new)
p.log_sis_ratio = log_sis_ratio_loglik_new + log_sis_ratio_prior
tree_pg.log_sis_ratio_d[node_id] = (log_sis_ratio_loglik_new, log_sis_ratio_prior)
p.log_sis_ratio_d[node_id] = tree_pg.log_sis_ratio_d[node_id]
p.non_leaf_nodes.append(node_id)
try:
p.leaf_nodes.remove(node_id)
except ValueError:
print('warning: unable to remove node_id = %s from leaf_nodes = %s' % (node_id, p.leaf_nodes))
pass
p.leaf_nodes.append(left)
p.leaf_nodes.append(right)
# copying relevant bits
p.node_info[node_id] = tree_pg.node_info[node_id]
p.logprior[node_id] = tree_pg.logprior[node_id]
for node_id_child in [left, right]:
p.do_not_split[node_id_child] = False # can look up where node_id_child occurred in nodes_processed_itr
p.loglik[node_id_child] = tree_pg.loglik[node_id_child]
p.logprior[node_id_child] = tree_pg.logprior[node_id_child]
p.train_ids[node_id_child] = tree_pg.train_ids[node_id_child]
p.sum_y[node_id_child] = tree_pg.sum_y[node_id_child]
p.sum_y2[node_id_child] = tree_pg.sum_y2[node_id_child]
p.param_n[node_id_child] = tree_pg.param_n[node_id_child]
p.n_points[node_id_child] = tree_pg.n_points[node_id_child]
if settings.verbose >= 2:
print('p.leaf_nodes = %s' % p.leaf_nodes)
print('p.non_leaf_nodes = %s' % p.non_leaf_nodes)
print('p.node_info.keys() = %s' % sorted(p.node_info.keys()))
try:
p.grow_nodes = tree_pg.grow_nodes_itr[itr+1]
p.log_sis_ratio_d = tree_pg.log_sis_ratio_d
p.depth = tree_pg.depth
except IndexError:
p.do_not_grow = True
except IndexError:
p.do_not_grow = True
def run_smc(particles, data, settings, param, log_weights, cache, tree_pg=None):
if settings.verbose >= 2:
print('Conditioned tree:')
tree_pg.print_tree()
itr = 0
while True:
if settings.verbose >= 2:
print('\n')
print('*'*80)
print('Current iteration = %3d' % itr)
print('*'*80)
if itr != 0:
# no resampling required when itr == 0 since weights haven't been updated yet
if settings.verbose >= 1:
print('iteration = %3d, log p(y|x) = %.2f, ess/n_particles = %f' % (itr, log_pd, ess))
(particles, log_weights) = resample(particles, log_weights, settings, log_pd, \
ess, weights_norm, tree_pg)
for pid, p in enumerate(particles):
if settings.verbose >= 2:
print('Current particle = %3d' % pid)
print('grow_nodes = %s' % p.grow_nodes)
print('leaf_nodes = %s, non_leaf_nodes = %s' % (p.leaf_nodes, p.non_leaf_nodes))
if p.grow_nodes:
p.grow_nodes_itr.append(p.grow_nodes[:])
if tree_pg and (pid == 0):
if settings.verbose >= 2 and itr == 0:
for s in ['leaf_nodes', 'non_leaf_nodes', 'grow_nodes_itr', 'ancestry', 'nodes_processed_itr']:
print('p.%s = %s' % (s, getattr(p, s)))
grow_next_pg(p, tree_pg, itr, settings)
else:
p.grow_next(data, param, settings, cache)
p.update_depth()
if settings.verbose >= 2:
print('nodes_processed_itr for particle = %s' % p.nodes_processed_itr)
print('grow_nodes (after running grow_next) (NOT updated for conditioned tree_pg) = %s' % p.grow_nodes)
print('leaf_nodes = %s, non_leaf_nodes = %s' % (p.leaf_nodes, p.non_leaf_nodes))
print('nodes_processed_itr for particle (after running update_particle weights) = %s' % p.nodes_processed_itr)
print('checking nodes_processed_itr')
(log_pd, ess, log_weights, weights_norm) = \
update_particle_weights(particles, log_weights, settings) # in place update of log_weights
if settings.verbose >= 2:
print('log_weights = %s' % log_weights)
if check_do_not_grow(particles):
if settings.verbose >= 1:
print('None of the particles can be grown any further; breaking out')
break
itr += 1
if (settings.debug == 1) and tree_pg:
for pid, p in enumerate(particles):
if settings.verbose >=2 :
print('checking pid = %s' % pid)
p.check_nodes_processed_itr(settings)
if settings.verbose >= 2:
print('check if tree_pg did the right thing:')
print('nodes_processed_itr (orig, new):\n%s\n%s' % (tree_pg.nodes_processed_itr, particles[0].nodes_processed_itr))
print('leaf_nodes (orig, new):\n%s\n%s' % (tree_pg.leaf_nodes, particles[0].leaf_nodes))
print('non_leaf_nodes (orig, new):\n%s\n%s' % (tree_pg.non_leaf_nodes, particles[0].non_leaf_nodes))
print('grow_nodes_itr (orig, new):\n%s\n%s' % (tree_pg.grow_nodes_itr, particles[0].grow_nodes_itr))
assert particles[0].leaf_nodes | |
import json
import logging
import os
import re
import shutil
import tempfile
from datetime import timedelta
from functools import partial
from pathlib import Path
from typing import List, Dict, Optional, Any, AsyncIterator, Tuple
import pytest
from _pytest.logging import LogCaptureFixture
from aiohttp import ClientTimeout
from aiohttp.hdrs import METH_ANY
from aiohttp.test_utils import TestServer
from aiohttp.web import Request, Response, Application, route
from aiostream import stream
from aiostream.core import Stream
from pytest import fixture
from core import version
from core.cli import is_node
from core.cli.cli import CLI
from core.cli.command import HttpCommand, JqCommand
from core.cli.model import CLIDependencies, CLIContext
from core.console_renderer import ConsoleRenderer, ConsoleColorSystem
from core.db.jobdb import JobDb
from core.error import CLIParseError
from core.model.model import predefined_kinds
from core.query.model import Template, Query
from core.task.task_description import TimeTrigger, Workflow, EventTrigger
from core.task.task_handler import TaskHandler
from core.types import JsonElement, Json
from core.util import AccessJson, utc_str
from core.worker_task_queue import WorkerTask
# noinspection PyUnresolvedReferences
from tests.core.analytics import event_sender
# noinspection PyUnresolvedReferences
from tests.core.cli.cli_test import cli, cli_deps
# noinspection PyUnresolvedReferences
from tests.core.db.graphdb_test import (
filled_graph_db,
graph_db,
test_db,
local_client,
system_db,
foo_model,
foo_kinds,
)
# noinspection PyUnresolvedReferences
from tests.core.db.runningtaskdb_test import running_task_db
# noinspection PyUnresolvedReferences
from tests.core.message_bus_test import message_bus
# noinspection PyUnresolvedReferences
from tests.core.query.template_expander_test import expander
# noinspection PyUnresolvedReferences
from tests.core.task.task_handler_test import (
task_handler,
job_db,
subscription_handler,
test_workflow,
task_handler_args,
)
from tests.core.util_test import not_in_path
# noinspection PyUnresolvedReferences
from tests.core.worker_task_queue_test import worker, task_queue, performed_by, incoming_tasks
@fixture
def json_source() -> str:
nums = ",".join([f'{{ "num": {a}, "inner": {{"num": {a%10}}}}}' for a in range(0, 100)])
return "json [" + nums + "," + nums + "]"
@fixture
async def echo_http_server() -> AsyncIterator[Tuple[int, List[Tuple[Request, Json]]]]:
requests = []
async def add_request(request: Request) -> Response:
requests.append((request, await request.json()))
status = 500 if request.path.startswith("/fail") else 200
return Response(status=status)
app = Application()
app.add_routes([route(METH_ANY, "/{tail:.+}", add_request)])
server = TestServer(app)
await server.start_server()
yield server.port, requests # type: ignore
await server.close()
@pytest.mark.asyncio
async def test_echo_source(cli: CLI) -> None:
# no arg passed to json
result = await cli.execute_cli_command("echo", stream.list)
assert result[0] == [""]
# simple string passed to json
result = await cli.execute_cli_command("echo this is a string", stream.list)
assert result[0] == ["this is a string"]
result = await cli.execute_cli_command('echo "foo bla bar" ', stream.list)
assert result[0] == ["foo bla bar"]
@pytest.mark.asyncio
async def test_json_source(cli: CLI) -> None:
# json object passed to json
result = await cli.execute_cli_command('json {"a": 1}', stream.list)
assert result[0] == [{"a": 1}]
# json array passed to json
result = await cli.execute_cli_command('json [{"a": 1}, {"b":2}]', stream.list)
assert result[0] == [{"a": 1}, {"b": 2}]
# json string passed to json
result = await cli.execute_cli_command('json "foo bla bar"', stream.list)
assert result[0] == ["foo bla bar"]
@pytest.mark.asyncio
async def test_predecessors(cli: CLI) -> None:
r1 = await cli.execute_cli_command("query id(4_0) | predecessors", stream.list)
assert len(r1[0]) == 1
r2 = await cli.execute_cli_command("query id(4_0) | predecessors --with-origin", stream.list)
assert len(r2[0]) == 2
r3 = await cli.execute_cli_command("query id(4_0) | predecessors --with-origin default", stream.list)
assert len(r3[0]) == 2
r4 = await cli.execute_cli_command("query id(4_0) | predecessors delete", stream.list)
assert len(r4[0]) == 0
@pytest.mark.asyncio
async def test_ancestors(cli: CLI) -> None:
r1 = await cli.execute_cli_command("query id(4_0) | ancestors", stream.list)
assert len(r1[0]) == 4
r2 = await cli.execute_cli_command("query id(4_0) | ancestors --with-origin", stream.list)
assert len(r2[0]) == 5
r3 = await cli.execute_cli_command("query id(4_0) | ancestors --with-origin default", stream.list)
assert len(r3[0]) == 5
r4 = await cli.execute_cli_command("query id(4_0) | ancestors delete", stream.list)
assert len(r4[0]) == 0
@pytest.mark.asyncio
async def test_successors(cli: CLI) -> None:
r1 = await cli.execute_cli_command("query id(4) | successors", stream.list)
assert len(r1[0]) == 10
r2 = await cli.execute_cli_command("query id(4) | successors --with-origin", stream.list)
assert len(r2[0]) == 11
r3 = await cli.execute_cli_command("query id(4) | successors --with-origin default", stream.list)
assert len(r3[0]) == 11
r4 = await cli.execute_cli_command("query id(4) | successors delete", stream.list)
assert len(r4[0]) == 0
@pytest.mark.asyncio
async def test_descendants(cli: CLI) -> None:
r1 = await cli.execute_cli_command("query id(4) | descendants", stream.list)
assert len(r1[0]) == 10
r2 = await cli.execute_cli_command("query id(4) | descendants --with-origin", stream.list)
assert len(r2[0]) == 11
r3 = await cli.execute_cli_command("query id(4) | descendants --with-origin default", stream.list)
assert len(r3[0]) == 11
r4 = await cli.execute_cli_command("query id(4) | descendants delete", stream.list)
assert len(r4[0]) == 0
@pytest.mark.asyncio
async def test_query_source(cli: CLI) -> None:
result = await cli.execute_cli_command('query is("foo") and some_int==0 --> identifier=~"9_"', stream.list)
assert len(result[0]) == 10
await cli.dependencies.template_expander.put_template(
Template("test", 'is(foo) and some_int==0 --> identifier=~"{{fid}}"')
)
result2 = await cli.execute_cli_command('query expand(test, fid="9_")', stream.list)
assert len(result2[0]) == 10
result3 = await cli.execute_cli_command("query --include-edges is(graph_root) -[0:1]->", stream.list)
# node: graph_root
# node: collector
# edge: graph_root -> collector
# -----------------------------
# = 3 elements
assert len(result3[0]) == 3
result4 = await cli.execute_cli_command("query --explain --include-edges is(graph_root) -[0:1]->", stream.list)
assert result4[0][0]["rating"] == "simple"
# use absolute path syntax
result5 = await cli.execute_cli_command(
"query aggregate(/reported.kind: sum(/reported.some_int) as si): "
"is(foo) and not(/reported.some_int!=0) "
"{child: --> /metadata!=null} some_int==0 "
"with(any, --> /metadata!=null) sort /reported.name asc limit 1",
stream.list,
)
assert result5 == [[{"group": {"reported.kind": "foo"}, "si": 0}]]
@pytest.mark.asyncio
async def test_sleep_source(cli: CLI) -> None:
with pytest.raises(CLIParseError):
await cli.evaluate_cli_command("sleep forever")
result = await cli.execute_cli_command("sleep 0.001; echo hello", stream.list)
assert result == [[""], ["hello"]]
@pytest.mark.asyncio
async def test_count_command(cli: CLI, json_source: str) -> None:
# count instances
result = await cli.execute_cli_command(f"{json_source} | count", stream.list)
assert len(result[0]) == 2
assert result[0] == ["total matched: 200", "total unmatched: 0"]
# count attributes
result = await cli.execute_cli_command(f"{json_source} | count num", stream.list)
assert len(result[0]) == 102
assert result[0][-2] == "total matched: 200"
assert result[0][-1] == "total unmatched: 0"
# count attributes with path
result = await cli.execute_cli_command(f"{json_source} | count inner.num", stream.list)
assert len(result[0]) == 12
assert result[0][-2] == "total matched: 200"
assert result[0][-1] == "total unmatched: 0"
# count unknown attributes
result = await cli.execute_cli_command(f"{json_source} | count does_not_exist", stream.list)
assert len(result[0]) == 2
assert result[0] == ["total matched: 0", "total unmatched: 200"]
@pytest.mark.asyncio
async def test_head_command(cli: CLI) -> None:
assert await cli.execute_cli_command("json [1,2,3,4,5] | head 2", stream.list) == [[1, 2]]
assert await cli.execute_cli_command("json [1,2,3,4,5] | head -2", stream.list) == [[1, 2]]
assert await cli.execute_cli_command("json [1,2,3,4,5] | head", stream.list) == [[1, 2, 3, 4, 5]]
@pytest.mark.asyncio
async def test_tail_command(cli: CLI) -> None:
assert await cli.execute_cli_command("json [1,2,3,4,5] | tail 2", stream.list) == [[4, 5]]
assert await cli.execute_cli_command("json [1,2,3,4,5] | tail -2", stream.list) == [[4, 5]]
assert await cli.execute_cli_command("json [1,2,3,4,5] | tail", stream.list) == [[1, 2, 3, 4, 5]]
@pytest.mark.asyncio
async def test_chunk_command(cli: CLI, json_source: str) -> None:
result: List[List[str]] = await cli.execute_cli_command(f"{json_source} | chunk 50", stream.list)
assert len(result[0]) == 4 # 200 in chunks of 50
for a in result[0]:
assert len(a) == 50
@pytest.mark.asyncio
async def test_flatten_command(cli: CLI, json_source: str) -> None:
result = await cli.execute_cli_command(f"{json_source} | chunk 50 | flatten", stream.list)
assert len(result[0]) == 200
@pytest.mark.asyncio
async def test_uniq_command(cli: CLI, json_source: str) -> None:
result = await cli.execute_cli_command(f"{json_source} | uniq", stream.list)
assert len(result[0]) == 100
@pytest.mark.asyncio
async def test_set_desired_command(cli: CLI) -> None:
result = await cli.execute_cli_command('query is("foo") | set_desired a="test" b=1 c=true', stream.list)
assert len(result[0]) == 11
for elem in result[0]:
assert {"a": "test", "b": 1, "c": True}.items() <= elem["desired"].items()
@pytest.mark.asyncio
async def test_set_metadata_command(cli: CLI) -> None:
result = await cli.execute_cli_command('query is("foo") | set_metadata a="test" b=1 c=true', stream.list)
assert len(result[0]) == 11
for elem in result[0]:
assert {"a": "test", "b": 1, "c": True}.items() <= elem["metadata"].items()
@pytest.mark.asyncio
async def test_clean_command(cli: CLI) -> None:
result = await cli.execute_cli_command('query is("foo") | clean', stream.list)
assert len(result[0]) == 11
for elem in result[0]:
assert {"clean": True}.items() <= elem["desired"].items()
@pytest.mark.asyncio
async def test_protect_command(cli: CLI) -> None:
result = await cli.execute_cli_command('query is("foo") | protect', stream.list)
assert len(result[0]) == 11
for elem in result[0]:
assert {"protected": True}.items() <= elem["metadata"].items()
@pytest.mark.asyncio
async def test_list_sink(cli: CLI, cli_deps: CLIDependencies) -> None:
result = await cli.execute_cli_command("json [1,2,3]", stream.list)
assert result == [[1, 2, 3]]
@pytest.mark.asyncio
async def test_flat_sink(cli: CLI) -> None:
parsed = await cli.evaluate_cli_command("json [1,2,3]; json [4,5,6]; json [7,8,9]")
result = await stream.list(stream.concat(stream.iterate((await p.execute())[1] for p in parsed)))
assert result == [1, 2, 3, 4, 5, 6, 7, 8, 9]
@pytest.mark.asyncio
async def test_format(cli: CLI) -> None:
# access properties by name and path
result = await cli.execute_cli_command(
'json {"a":"b", "b": {"c":"d"}} | format a:{a} b:{b.c} na:{fuerty}', stream.list
)
assert result[0] == ["a:b b:d na:null"]
# use correct type
props = dict(a="a", b=True, c=False, d=None, e=12, f=1.234)
result = await cli.execute_cli_command(f"json {json.dumps(props)}" " | format {a}:{b}:{c}:{d}:{e}:{f}", stream.list)
assert result[0] == ["a:true:false:null:12:1.234"]
# access deeply nested properties with dict and array
result = await cli.execute_cli_command(
'json {"a":{"b":{"c":{"d":[0,1,2, {"e":"f"}]}}}} | format will be an >{a.b.c.d[3].e}<',
stream.list,
)
assert result[0] == ["will be an >f<"]
# make sure any path that is not available leads to the null value
result = await cli.execute_cli_command("json {} | format {a}:{b.c.d}:{foo.bla[23].test}", | |
<filename>rllib/policy/rnn_sequencing.py
"""RNN utils for RLlib.
The main trick here is that we add the time dimension at the last moment.
The non-LSTM layers of the model see their inputs as one flat batch. Before
the LSTM cell, we reshape the input to add the expected time dimension. During
postprocessing, we dynamically pad the experience batches so that this
reshaping is possible.
Note that this padding strategy only works out if we assume zero inputs don't
meaningfully affect the loss function. This happens to be true for all the
current algorithms: https://github.com/ray-project/ray/issues/2992
"""
import logging
import numpy as np
from typing import List, Optional
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.utils.debug import summarize
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.typing import TensorType, ViewRequirementsDict
from ray.util import log_once
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
logger = logging.getLogger(__name__)
@DeveloperAPI
def pad_batch_to_sequences_of_same_size(
batch: SampleBatch,
max_seq_len: int,
shuffle: bool = False,
batch_divisibility_req: int = 1,
feature_keys: Optional[List[str]] = None,
view_requirements: Optional[ViewRequirementsDict] = None,
):
"""Applies padding to `batch` so it's choppable into same-size sequences.
Shuffles `batch` (if desired), makes sure divisibility requirement is met,
then pads the batch ([B, ...]) into same-size chunks ([B, ...]) w/o
adding a time dimension (yet).
Padding depends on episodes found in batch and `max_seq_len`.
Args:
batch (SampleBatch): The SampleBatch object. All values in here have
the shape [B, ...].
max_seq_len (int): The max. sequence length to use for chopping.
shuffle (bool): Whether to shuffle batch sequences. Shuffle may
be done in-place. This only makes sense if you're further
applying minibatch SGD after getting the outputs.
batch_divisibility_req (int): The int by which the batch dimension
must be dividable.
feature_keys (Optional[List[str]]): An optional list of keys to apply
sequence-chopping to. If None, use all keys in batch that are not
"state_in/out_"-type keys.
view_requirements (Optional[ViewRequirementsDict]): An optional
Policy ViewRequirements dict to be able to infer whether
e.g. dynamic max'ing should be applied over the seq_lens.
"""
if batch_divisibility_req > 1:
meets_divisibility_reqs = (
len(batch[SampleBatch.CUR_OBS]) % batch_divisibility_req == 0
# not multiagent
and max(batch[SampleBatch.AGENT_INDEX]) == 0)
else:
meets_divisibility_reqs = True
states_already_reduced_to_init = False
# RNN/attention net case. Figure out whether we should apply dynamic
# max'ing over the list of sequence lengths.
if "state_in_0" in batch or "state_out_0" in batch:
# Check, whether the state inputs have already been reduced to their
# init values at the beginning of each max_seq_len chunk.
if batch.seq_lens is not None and \
len(batch["state_in_0"]) == len(batch.seq_lens):
states_already_reduced_to_init = True
# RNN (or single timestep state-in): Set the max dynamically.
if view_requirements["state_in_0"].shift_from is None:
dynamic_max = True
# Attention Nets (state inputs are over some range): No dynamic maxing
# possible.
else:
dynamic_max = False
# Multi-agent case.
elif not meets_divisibility_reqs:
max_seq_len = batch_divisibility_req
dynamic_max = False
# Simple case: No RNN/attention net, nor do we need to pad.
else:
if shuffle:
batch.shuffle()
return
# RNN, attention net, or multi-agent case.
state_keys = []
feature_keys_ = feature_keys or []
for k, v in batch.items():
if k.startswith("state_in_"):
state_keys.append(k)
elif not feature_keys and not k.startswith("state_out_") and \
k not in ["infos", "seq_lens"] and isinstance(v, np.ndarray):
feature_keys_.append(k)
feature_sequences, initial_states, seq_lens = \
chop_into_sequences(
feature_columns=[batch[k] for k in feature_keys_],
state_columns=[batch[k] for k in state_keys],
episode_ids=batch.get(SampleBatch.EPS_ID),
unroll_ids=batch.get(SampleBatch.UNROLL_ID),
agent_indices=batch.get(SampleBatch.AGENT_INDEX),
seq_lens=getattr(batch, "seq_lens", batch.get("seq_lens")),
max_seq_len=max_seq_len,
dynamic_max=dynamic_max,
states_already_reduced_to_init=states_already_reduced_to_init,
shuffle=shuffle)
for i, k in enumerate(feature_keys_):
batch[k] = feature_sequences[i]
for i, k in enumerate(state_keys):
batch[k] = initial_states[i]
batch["seq_lens"] = np.array(seq_lens)
if log_once("rnn_ma_feed_dict"):
logger.info("Padded input for RNN/Attn.Nets/MA:\n\n{}\n".format(
summarize({
"features": feature_sequences,
"initial_states": initial_states,
"seq_lens": seq_lens,
"max_seq_len": max_seq_len,
})))
@DeveloperAPI
def add_time_dimension(padded_inputs: TensorType,
*,
max_seq_len: int,
framework: str = "tf",
time_major: bool = False):
"""Adds a time dimension to padded inputs.
Args:
padded_inputs (TensorType): a padded batch of sequences. That is,
for seq_lens=[1, 2, 2], then inputs=[A, *, B, B, C, C], where
A, B, C are sequence elements and * denotes padding.
max_seq_len (int): The max. sequence length in padded_inputs.
framework (str): The framework string ("tf2", "tf", "tfe", "torch").
time_major (bool): Whether data should be returned in time-major (TxB)
format or not (BxT).
Returns:
TensorType: Reshaped tensor of shape [B, T, ...] or [T, B, ...].
"""
# Sequence lengths have to be specified for LSTM batch inputs. The
# input batch must be padded to the max seq length given here. That is,
# batch_size == len(seq_lens) * max(seq_lens)
if framework in ["tf2", "tf", "tfe"]:
assert time_major is False, "time-major not supported yet for tf!"
padded_batch_size = tf.shape(padded_inputs)[0]
# Dynamically reshape the padded batch to introduce a time dimension.
new_batch_size = padded_batch_size // max_seq_len
new_shape = ([new_batch_size, max_seq_len] +
padded_inputs.get_shape().as_list()[1:])
return tf.reshape(padded_inputs, new_shape)
else:
assert framework == "torch", "`framework` must be either tf or torch!"
padded_batch_size = padded_inputs.shape[0]
# Dynamically reshape the padded batch to introduce a time dimension.
new_batch_size = padded_batch_size // max_seq_len
if time_major:
new_shape = (max_seq_len, new_batch_size) + padded_inputs.shape[1:]
else:
new_shape = (new_batch_size, max_seq_len) + padded_inputs.shape[1:]
return torch.reshape(padded_inputs, new_shape)
@DeveloperAPI
def chop_into_sequences(*,
feature_columns,
state_columns,
max_seq_len,
episode_ids=None,
unroll_ids=None,
agent_indices=None,
dynamic_max=True,
shuffle=False,
seq_lens=None,
states_already_reduced_to_init=False,
_extra_padding=0):
"""Truncate and pad experiences into fixed-length sequences.
Args:
feature_columns (list): List of arrays containing features.
state_columns (list): List of arrays containing LSTM state values.
max_seq_len (int): Max length of sequences before truncation.
episode_ids (List[EpisodeID]): List of episode ids for each step.
unroll_ids (List[UnrollID]): List of identifiers for the sample batch.
This is used to make sure sequences are cut between sample batches.
agent_indices (List[AgentID]): List of agent ids for each step. Note
that this has to be combined with episode_ids for uniqueness.
dynamic_max (bool): Whether to dynamically shrink the max seq len.
For example, if max len is 20 and the actual max seq len in the
data is 7, it will be shrunk to 7.
shuffle (bool): Whether to shuffle the sequence outputs.
_extra_padding (int): Add extra padding to the end of sequences.
Returns:
f_pad (list): Padded feature columns. These will be of shape
[NUM_SEQUENCES * MAX_SEQ_LEN, ...].
s_init (list): Initial states for each sequence, of shape
[NUM_SEQUENCES, ...].
seq_lens (list): List of sequence lengths, of shape [NUM_SEQUENCES].
Examples:
>>> f_pad, s_init, seq_lens = chop_into_sequences(
episode_ids=[1, 1, 5, 5, 5, 5],
unroll_ids=[4, 4, 4, 4, 4, 4],
agent_indices=[0, 0, 0, 0, 0, 0],
feature_columns=[[4, 4, 8, 8, 8, 8],
[1, 1, 0, 1, 1, 0]],
state_columns=[[4, 5, 4, 5, 5, 5]],
max_seq_len=3)
>>> print(f_pad)
[[4, 4, 0, 8, 8, 8, 8, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 0]]
>>> print(s_init)
[[4, 4, 5]]
>>> print(seq_lens)
[2, 3, 1]
"""
if seq_lens is None or len(seq_lens) == 0:
prev_id = None
seq_lens = []
seq_len = 0
unique_ids = np.add(
np.add(episode_ids, agent_indices),
np.array(unroll_ids, dtype=np.int64) << 32)
for uid in unique_ids:
if (prev_id is not None and uid != prev_id) or \
seq_len >= max_seq_len:
seq_lens.append(seq_len)
seq_len = 0
seq_len += 1
prev_id = uid
if seq_len:
seq_lens.append(seq_len)
seq_lens = np.array(seq_lens, dtype=np.int32)
assert sum(seq_lens) == len(feature_columns[0])
# Dynamically shrink max len as needed to optimize memory usage
if dynamic_max:
max_seq_len = max(seq_lens) + _extra_padding
feature_sequences = []
for f in feature_columns:
# Save unnecessary copy.
if not isinstance(f, np.ndarray):
f = np.array(f)
length = len(seq_lens) * max_seq_len
if f.dtype == np.object or f.dtype.type is np.str_:
f_pad = [None] * length
else:
# Make sure type doesn't change.
f_pad = np.zeros((length, ) + np.shape(f)[1:], dtype=f.dtype)
seq_base = 0
i = 0
for len_ in seq_lens:
for seq_offset in range(len_):
f_pad[seq_base + seq_offset] = f[i]
i += 1
seq_base += max_seq_len
assert i == len(f), f
feature_sequences.append(f_pad)
if states_already_reduced_to_init:
initial_states = state_columns
else:
initial_states = []
for s in state_columns:
# Skip unnecessary copy.
if not isinstance(s, np.ndarray):
s = np.array(s)
s_init = []
i = 0
for len_ in seq_lens:
s_init.append(s[i])
i += len_
initial_states.append(np.array(s_init))
if shuffle:
permutation = np.random.permutation(len(seq_lens))
for i, f in enumerate(feature_sequences):
orig_shape = f.shape
f = np.reshape(f, (len(seq_lens), -1) + f.shape[1:])
f = f[permutation]
f = np.reshape(f, orig_shape)
feature_sequences[i] = f
for i, s in enumerate(initial_states):
s = s[permutation]
initial_states[i] = s
seq_lens = seq_lens[permutation]
return feature_sequences, initial_states, seq_lens
def timeslice_along_seq_lens_with_overlap(
sample_batch,
| |
<reponame>timgates42/subversion
#!/usr/bin/env python
#
# svndumpfilter_tests.py: testing the 'svndumpfilter' tool.
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import os
import sys
import tempfile
# Our testing module
import svntest
from svntest.verify import SVNExpectedStdout, SVNExpectedStderr
# Get some helper routines
from svnadmin_tests import load_and_verify_dumpstream, load_dumpstream
from svntest.main import run_svn, run_svnadmin
# (abbreviation)
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
Item = svntest.wc.StateItem
######################################################################
# Helper routines
def filter_and_return_output(dump, bufsize=0, *varargs):
"""Filter the array of lines passed in 'dump' and return the output
and errput"""
if isinstance(dump, str):
dump = [ dump ]
# Does the caller want the stderr?
if '-q' in varargs or '--quiet' in varargs:
expected_errput = None # Stderr with -q or --quiet is a real error!
else:
expected_errput = svntest.verify.AnyOutput
## TODO: Should we handle exit_code?
exit_code, output, errput = svntest.main.run_command_stdin(
svntest.main.svndumpfilter_binary, expected_errput, bufsize, True,
dump, *varargs)
# Since we call svntest.main.run_command_stdin() in binary mode,
# normalize the stderr line endings on Windows ourselves.
if sys.platform == 'win32':
errput = [x.replace('\r\n', '\n') for x in errput]
return output, errput
######################################################################
# Tests
@Issue(2982)
def reflect_dropped_renumbered_revs(sbox):
"reflect dropped renumbered revs in svn:mergeinfo"
## See https://issues.apache.org/jira/browse/SVN-2982. ##
# Test svndumpfilter with include option
sbox.build(empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svndumpfilter_tests_data',
'with_merges.dump')
dumpfile = svntest.actions.load_dumpfile(dumpfile_location)
filtered_out, filtered_err = filter_and_return_output(
dumpfile, 0, "include",
"trunk", "branch1",
"--skip-missing-merge-sources",
"--drop-empty-revs",
"--renumber-revs", "--quiet")
load_dumpstream(sbox, filtered_out, "--ignore-uuid")
# Verify the svn:mergeinfo properties
url = sbox.repo_url
expected_output = svntest.verify.UnorderedOutput([
url + "/trunk - /branch1:4-5\n",
])
svntest.actions.run_and_verify_svn(expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
# Test svndumpfilter with exclude option
sbox.build(empty=True)
filtered_out, filtered_err = filter_and_return_output(
dumpfile, 0, "exclude", "branch1",
"--skip-missing-merge-sources",
"--drop-empty-revs",
"--renumber-revs", "--quiet")
load_dumpstream(sbox, filtered_out, "--ignore-uuid")
# Verify the svn:mergeinfo properties
expected_output = svntest.verify.UnorderedOutput([
url + "/trunk - \n",
])
svntest.actions.run_and_verify_svn(expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
@Issue(3181)
def svndumpfilter_loses_mergeinfo(sbox):
"svndumpfilter loses mergeinfo"
#svndumpfilter loses mergeinfo if invoked without --renumber-revs
## See https://issues.apache.org/jira/browse/SVN-3181. ##
sbox.build(empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svndumpfilter_tests_data',
'with_merges.dump')
dumpfile = svntest.actions.load_dumpfile(dumpfile_location)
filtered_out, filtered_err = filter_and_return_output(dumpfile, 0, "include",
"trunk", "branch1",
"--quiet")
load_dumpstream(sbox, filtered_out)
# Verify the svn:mergeinfo properties
url = sbox.repo_url
expected_output = svntest.verify.UnorderedOutput([
url + "/trunk - /branch1:4-8\n",
])
svntest.actions.run_and_verify_svn(expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
def _simple_dumpfilter_test(sbox, dumpfile, *dumpargs):
"""Run svndumpfilter with arguments DUMPARGS, taking input from DUMPFILE.
Check that the output consists of the standard Greek tree excluding
all paths that start with 'A/B/E', 'A/D/G' or 'A/D/H'."""
wc_dir = sbox.wc_dir
filtered_output, filtered_err = filter_and_return_output(dumpfile, 0,
'--quiet',
*dumpargs)
# Setup our expectations
load_dumpstream(sbox, filtered_output, '--ignore-uuid')
expected_disk = svntest.main.greek_state.copy()
expected_disk.remove('A/B/E/alpha')
expected_disk.remove('A/B/E/beta')
expected_disk.remove('A/B/E')
expected_disk.remove('A/D/H/chi')
expected_disk.remove('A/D/H/psi')
expected_disk.remove('A/D/H/omega')
expected_disk.remove('A/D/H')
expected_disk.remove('A/D/G/pi')
expected_disk.remove('A/D/G/rho')
expected_disk.remove('A/D/G/tau')
expected_disk.remove('A/D/G')
expected_output = svntest.wc.State(wc_dir, {
'A' : Item(status='A '),
'A/B' : Item(status='A '),
'A/B/lambda' : Item(status='A '),
'A/B/F' : Item(status='A '),
'A/mu' : Item(status='A '),
'A/C' : Item(status='A '),
'A/D' : Item(status='A '),
'A/D/gamma' : Item(status='A '),
'iota' : Item(status='A '),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.remove('A/B/E/alpha')
expected_status.remove('A/B/E/beta')
expected_status.remove('A/B/E')
expected_status.remove('A/D/H/chi')
expected_status.remove('A/D/H/psi')
expected_status.remove('A/D/H/omega')
expected_status.remove('A/D/H')
expected_status.remove('A/D/G/pi')
expected_status.remove('A/D/G/rho')
expected_status.remove('A/D/G/tau')
expected_status.remove('A/D/G')
# Check that our paths really were excluded
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status)
@Issue(2697)
def dumpfilter_with_targets(sbox):
"svndumpfilter --targets blah"
## See https://issues.apache.org/jira/browse/SVN-2697. ##
sbox.build(empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svndumpfilter_tests_data',
'greek_tree.dump')
dumpfile = svntest.actions.load_dumpfile(dumpfile_location)
(fd, targets_file) = tempfile.mkstemp(dir=svntest.main.temp_dir)
try:
targets = open(targets_file, 'w')
targets.write('/A/D/H\n')
targets.write('/A/D/G\n')
targets.close()
_simple_dumpfilter_test(sbox, dumpfile,
'exclude', '/A/B/E', '--targets', targets_file)
finally:
os.close(fd)
os.remove(targets_file)
def dumpfilter_with_patterns(sbox):
"svndumpfilter --pattern PATH_PREFIX"
sbox.build(empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svndumpfilter_tests_data',
'greek_tree.dump')
dumpfile = svntest.actions.load_dumpfile(dumpfile_location)
_simple_dumpfilter_test(sbox, dumpfile,
'exclude', '--pattern', '/A/D/[GH]*', '/A/[B]/E*')
#----------------------------------------------------------------------
# More testing for issue #3020 'Reflect dropped/renumbered revisions in
# svn:mergeinfo data during svnadmin load'
#
# Specifically, test that svndumpfilter, when used with the
# --skip-missing-merge-sources option, removes mergeinfo that refers to
# revisions that are older than the oldest revision in the dump stream.
@Issue(3020)
def filter_mergeinfo_revs_outside_of_dump_stream(sbox):
"filter mergeinfo revs outside of dump stream"
sbox.build(empty=True)
# Load a partial dump into an existing repository.
#
# Picture == 1k words:
#
# The dump file we filter in this test, 'mergeinfo_included_partial.dump', is
# a dump of r6:HEAD of the following repos:
#
# __________________________________________
# | |
# | ____________________________|_____
# | | | |
# trunk---r2---r3-----r5---r6-------r8---r9---------------> | |
# r1 | | | | | |
# initial | | | |______ | |
# import copy | copy | merge merge
# | | | merge (r5) (r8)
# | | | (r9) | |
# | | | | | |
# | | V V | |
# | | branches/B2-------r11---r12----> | |
# | | r7 |____| | |
# | | | | |
# | merge |___ | |
# | (r6) | | |
# | |_________________ | | |
# | | merge | |
# | | (r11-12) | |
# | | | | |
# V V V | |
# branches/B1-------------------r10--------r13--> | |
# r4 | |
# | V V
# branches/B1/B/E------------------------------r14---r15->
#
#
# The mergeinfo on the complete repos would look like this:
#
# Properties on 'branches/B1':
# svn:mergeinfo
# /branches/B2:11-12
# /trunk:6,9
# Properties on 'branches/B1/B/E':
# svn:mergeinfo
# /branches/B2/B/E:11-12
# /trunk/B/E:5-6,8-9
# Properties on 'branches/B2':
# svn:mergeinfo
# /trunk:9
#
# We will run the partial dump through svndumpfilter using the the
# --skip-missing-merge-soruces which should strip out any revisions < 6.
# Then we'll load the filtered result into an empty repository. This
# should offset the incoming mergeinfo by -5. In addition, any mergeinfo
# referring to the initial revision in the dump file (r6) should be
# removed because the change it refers to (r5:6) is not wholly within the
# dumpfile. The resulting mergeinfo should look like this:
#
# Properties on 'branches/B1':
# svn:mergeinfo
# /branches/B2:6-7
# /trunk:4
# Properties on 'branches/B1/B/E':
# svn:mergeinfo
# /branches/B2/B/E:6-7
# /trunk/B/E:3-4
# Properties on 'branches/B2':
# svn:mergeinfo
# /trunk:4
partial_dump = os.path.join(os.path.dirname(sys.argv[0]),
'svndumpfilter_tests_data',
'mergeinfo_included_partial.dump')
partial_dump_contents = svntest.actions.load_dumpfile(partial_dump)
filtered_dumpfile2, filtered_out = filter_and_return_output(
partial_dump_contents,
8192, # Set a sufficiently large bufsize to avoid a deadlock
"include", "trunk", "branches",
"--skip-missing-merge-sources",
"--quiet")
load_dumpstream(sbox, filtered_dumpfile2, '--ignore-uuid')
# Check the resulting mergeinfo.
url = sbox.repo_url + "/branches"
expected_output = svntest.verify.UnorderedOutput([
url + "/B1 - /branches/B2:6-7\n",
"/trunk:4\n",
url + "/B2 - /trunk:4\n",
url + "/B1/B/E - /branches/B2/B/E:6-7\n",
"/trunk/B/E:3-4\n"])
svntest.actions.run_and_verify_svn(expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
# Blow away the current repos, create an empty one in its place, and
# then load this skeleton repos into the empty target:
#
# Projects/ (Added r1)
# README (Added r2)
# Project-X (Added r3)
# Project-Y (Added r4)
# Project-Z (Added r5)
# docs/ (Added r6)
# README (Added r6).
sbox.build(empty=True)
skeleton_location = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'skeleton_repos.dump')
skeleton_dumpfile = svntest.actions.load_dumpfile(skeleton_location)
load_dumpstream(sbox, skeleton_dumpfile, '--ignore-uuid')
partial_dump2 = os.path.join(os.path.dirname(sys.argv[0]),
'svndumpfilter_tests_data',
'mergeinfo_included_partial.dump')
partial_dump_contents2 = svntest.actions.load_dumpfile(partial_dump2)
# Now use the partial dump file we used above, but this time exclude
# the B2 branch. Load the filtered dump into the /Projects/Project-X
# subtree of the skeleton repos.
filtered_dumpfile2, filtered_err = filter_and_return_output(
partial_dump_contents2,
8192, # Set a sufficiently large bufsize to avoid a deadlock
"exclude", "branches/B2",
"--skip-missing-merge-sources",
"--drop-empty-revs",
"--renumber-revs")
# Starting with the same expectation we had when loading into an empty
# repository, adjust each revision by +6 to account for the six revision
# already present in the target repos, that gives:
#
# Properties on 'branches/B1':
# svn:mergeinfo
# /branches/B2:12-13
# /trunk:10
# Properties on 'branches/B1/B/E':
# svn:mergeinfo
# /branches/B2/B/E:12-13
# /trunk/B/E:9-10
# Properties on 'branches/B2':
# svn:mergeinfo
# /trunk:10
#
# ...But /branches/B2 | |
<reponame>mbonsma/PhD-materials
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 28 14:46:00 2021
@author: madeleine
This script loads simulation files and processes them into an array,
then updates all_data.csv
Requires all_data.csv to be available in the same folder
Requires all_params.csv to be premade and available in the same folder
"""
import numpy as np
import pandas as pd
import re
from scipy import sparse
import argparse
from sim_analysis_functions import (find_nearest, load_simulation)
from spacer_model_plotting_functions import (nbi_steady_state, nvi_steady_state,
get_trajectories, interpolate_trajectories,
get_clone_sizes, get_large_trajectories,
fraction_remaining, calculate_speed,
bac_large_clone_extinction, get_bac_large_trajectories)
def phage_m_to_bac_m(nvi, nb, c0, g, f, alpha, pv, B, n_samples = 15):
"""
Calculate bacteria m from the distribution of phage clone sizes
"""
s0 = float(alpha*pv*nb*(B-1) - f*g*c0 - alpha*(1-pv)*nb)
d0 = float(f*g*c0 + alpha*(1-pv)*nb)
P0_inf = 1- 2*s0/(B*(s0 + d0)) # extinction probability at long time, independent of nbi
if P0_inf > 1: # can happen if s0 comes out small and negative due to fluctuations in nb
P0_inf = 1 # set P0 == 1
N_est = (B*(s0 + d0))/(2*s0) # clone size at which P0 ~ (1/e)
# get list of clone sizes by combining several timepoints
phage_clone_sizes = (nvi[::int(nvi.shape[0]/n_samples)]).toarray().flatten()
phage_clone_sizes = np.array(phage_clone_sizes[phage_clone_sizes > 0 ], dtype = 'int')
# list of sizes from 0 to largest observed size
clone_sizes = np.arange(0, np.max(phage_clone_sizes)+1)
# survival probability for each clone size
clone_size_survival = 1 - P0_inf**clone_sizes
clone_size_survival[int(N_est):] = 1 # set Pest for larger sizes to 1
# number of clones of size k
counts = np.bincount(phage_clone_sizes)
mean_m = np.sum(clone_size_survival*counts)/n_samples
return mean_m
def simulation_stats(folder, timestamp):
# regex to match a year beginning with 20
folder_date = re.findall("20[0-9][0-9]-[0-1][0-9]-[0-3][0-9]", folder)
f, c0, g, B, R, eta, pv, alpha, e, L, mu, m_init, gen_max, max_save, theta, pop_array, \
max_m, mutation_times, all_phages = load_simulation(folder, timestamp);
t_ss = gen_max / 5 # minimun t_ss = 2000, otherwise gen_max/5
#if m_init > 1:
# continue
# check for extinction:
last_tp = pop_array[-1].toarray().flatten()
if not np.any(last_tp[:max_m+1] > 0):
return
if not np.any(last_tp[max_m+1:2*max_m+1] > 0):
return
# subsample time if necessary - makes matrix much smaller in cases where Gillespie was heavily used
# create mask for times that are not near the 0.5 save mark
# CAUTION: if the save timestep is changed, this will do weird things
timestep = 0.5
cutoff = 0.02 # increase the cutoff to keep more points, decrease it to keep fewer
mask1 = np.ma.masked_inside(pop_array[:, -1].toarray().flatten()*g*c0 % timestep, 0, cutoff).mask
new_times = (pop_array[:, -1]*g*c0)[mask1]
timediffs = new_times[1:] - new_times[:-1]
pop_array = pop_array[mask1]
# create mask for timesteps that are 0 (multi-saving)
mask2 = ~np.ma.masked_where(timediffs.toarray().flatten() == 0, timediffs.toarray().flatten()).mask
if type(mask2) != np.bool_: # if nothing is masked, mask2 will be a single value. Only mask if not.
pop_array = pop_array[1:][mask2]
#resave as sparse
pop_array = sparse.coo_matrix(pop_array)
pop_array = sparse.csr_matrix(pop_array)
t_ss_ind = find_nearest(pop_array[:,-1].toarray()*g*c0, t_ss)
if any(x in folder for x in exponential_pv_dates): # then this is a new pv sim
pv_type = 'exponential'
elif any(x in folder for x in exponential_pv_025_dates): # then this is a new pv sim with rate 0.25
pv_type = 'exponential_025'
elif any(x in folder for x in theta_pv_dates): # then this is theta function pv
pv_type = 'theta_function'
else:
pv_type = 'binary'
# doing .toarray() is slow and memory-intensive, so do it once per simulation
nbi = pop_array[t_ss_ind:, 1 : max_m + 1].toarray()
nvi = pop_array[t_ss_ind:, max_m+1 : 2*max_m + 1].toarray()
# get trajectories
# trim at max size in order to measure establishment rate properly
time_end = 500 # time in bacterial generations to run trajectories to
(nvi_trajectories, nbi_trajectories, t_trajectories, nvi_fitness, nbi_fitness,
nbi_acquisitions, phage_size_at_acquisition, trajectory_lengths,
trajectory_extinct, acquisition_rate, phage_identities) = get_trajectories(pop_array, nvi, nbi, f,
g, c0, R, eta, alpha, e, pv, B, mu, max_m, m_init, t_ss_ind,
trim_at_max_size = True, aggressive_trim_length = time_end)
# interpolate trajectories
fitness_times = np.concatenate([np.arange(0.5,6,0.5), np.arange(6,25,2),
np.arange(25, 100, 5), np.arange(100, time_end, 10)])
nvi_interp = interpolate_trajectories(nvi_trajectories, t_trajectories, fitness_times, g, c0)
mean_nvi = np.nanmean(nvi_interp, axis = 1) # conditioned on survival - nan if gone extinct
mean_phage_fitness = np.gradient(mean_nvi, fitness_times) / mean_nvi
# bacterial spacer acquisition
nbi_acquisitions = np.sort(np.array(nbi_acquisitions)[~np.isnan(nbi_acquisitions)])
try:
t = nbi_acquisitions[int(len(nbi_acquisitions)*0.9)] # time at which 90% of acquisitions have happened
t_ind = find_nearest(fitness_times, t)
fitness_at_acquisition = mean_phage_fitness[t_ind]
mean_ind = find_nearest(fitness_times, np.mean(nbi_acquisitions))
first_ind = find_nearest(fitness_times, nbi_acquisitions[0])
if t > fitness_times[-1]: # print warning that trajectories aren't long enough
print(str(timestamp) + " Longer mean trajectories needed: " + str(t) + " > " + str(fitness_times[-1]))
first_acquisition_time = nbi_acquisitions[0]
median_acquisition_time = nbi_acquisitions[int(len(nbi_acquisitions)/2) - 1]
fitness_at_mean_acquisition = mean_phage_fitness[mean_ind]
fitness_at_first_acquisition = mean_phage_fitness[first_ind]
mean_bac_acquisition_time = np.mean(nbi_acquisitions)
except IndexError: # no bacterial acquisitions
fitness_at_acquisition = np.nan
first_acquisition_time = np.nan
median_acquisition_time = np.nan
fitness_at_mean_acquisition = np.nan
fitness_at_first_acquisition = np.nan
mean_bac_acquisition_time = np.nan
# get establishment time
# calculate predicted large clone extinction
nv = np.sum(pop_array[t_ss_ind:, max_m+1 : 2*max_m + 1], axis = 1)
nb = np.sum(pop_array[t_ss_ind:, : max_m+1], axis = 1)
nb0 = pop_array[t_ss_ind:, 0]
C = pop_array[t_ss_ind:, -2]
mean_nb = np.mean(nb[::int(len(nb)/n_snapshots)])
mean_nv = np.mean(nv[::int(len(nb)/n_snapshots)])
mean_C = np.mean(C[::int(len(nb)/n_snapshots)])
mean_nb0 = np.mean(nb0[::int(len(nb)/n_snapshots)])
# get mean field predictions for clone size
nvi_ss = nvi_steady_state(mean_nb, mean_nv, mean_C, mean_nb0, f, g, c0, e, alpha, B, mu,
pv, R, eta)
nbi_ss = nbi_steady_state(mean_nb, f, g, c0, e, alpha, B, mu, pv)
# if nvi_ss is negative (happens sometimes)
while nvi_ss < 0: # recalculate means with different sampling
shift = np.random.randint(0,100)
print("negative nvi_ss: %s" %timestamp)
mean_nb = np.mean(nb[shift::int(len(nb-shift)/n_snapshots)])
mean_nv = np.mean(nv[shift::int(len(nb-shift)/n_snapshots)])
mean_C = np.mean(C[shift::int(len(nb-shift)/n_snapshots)])
mean_nb0 = np.mean(nb0[shift::int(len(nb-shift)/n_snapshots)])
nvi_ss = nvi_steady_state(mean_nb, mean_nv, mean_C, mean_nb0, f, g, c0, e, alpha, B, mu,
pv, R, eta)
# get phage clone sizes
(mean_m, mean_phage_m, mean_large_phage_m, mean_large_phage_size, Delta_bac, Delta_phage,
mean_nu, e_effective) = get_clone_sizes(pop_array, c0, e, max_m, t_ss_ind, pv_type, theta, all_phages, 1,
n_snapshots = n_snapshots)
# use simulation nbi_ss to get extinction times, same as for nvi
bac_extinction_times_large, bac_extinction_times_large_phage_present = bac_large_clone_extinction(pop_array, nbi, nvi,
max_m, nbi_ss, t_ss_ind)
# get large trajectories with size cutoff = nvi_ss
sim_length_ss = last_tp[-1]*g*c0 - t_ss
mean_lifetime_large, establishment_rate, establishment_time = get_large_trajectories(nvi_trajectories,
t_trajectories, trajectory_lengths, trajectory_extinct, nvi_ss, g, c0, sim_length_ss)
bac_establishment_rate, establishment_time_bac = get_bac_large_trajectories(nbi_trajectories,
t_trajectories, nbi_ss, g, c0, sim_length_ss)
# get spacer turnover and turnover speed
turnover_array, interp_times = fraction_remaining(pop_array, t_ss, t_ss_ind, g, c0, gen_max, max_m)
speed, start_ind = calculate_speed(turnover_array, interp_times)
F = f*g*c0
beta = mean_nb*alpha*pv
delta = F + alpha*mean_nb*(1-pv)
freq = nvi_ss / mean_nv
mean_T_backwards_nvi_ss = 2*mean_nv*freq*(1-np.log(freq))*g*c0/((B-1)**2 * beta + delta)
p = beta / (beta + delta)
predicted_establishment_fraction = (1 - (2-3*B*p + p*B**2)/(B*p*(B-1)))
nvi = pop_array[t_ss_ind:, max_m+1 : 2*max_m + 1]
rescaled_phage_m = phage_m_to_bac_m(nvi, mean_nb, c0, g, f, alpha, pv, B)
all_mutation_times = []
for times in mutation_times:
all_mutation_times += list(times)
all_mutation_times = np.sort(all_mutation_times)
all_mutation_times = all_mutation_times[all_mutation_times > 0]
all_mutation_times_ss = all_mutation_times[all_mutation_times*g*c0 > t_ss]
mutation_rate_actual = len(all_mutation_times_ss)/((all_mutation_times_ss[-1] - all_mutation_times_ss[0])*g*c0)
# add to data frame
df = pd.DataFrame()
df['C0'] = [c0]
df['mu'] = [mu]
df['eta'] = [eta]
df['e'] = [e]
df['B'] = [B]
df['m_init'] = [m_init]
df['pv_type'] = [pv_type]
df['gen_max'] = [gen_max]
df['max_save'] = [max_save]
df['theta'] = [theta]
df['t_ss'] = [t_ss]
df['mean_m'] = [mean_m]
df['mean_phage_m'] = [mean_phage_m]
df['mean_large_phage_m'] = [mean_large_phage_m]
df['mean_large_phage_size'] = [mean_large_phage_size]
df['rescaled_phage_m'] = [rescaled_phage_m]
df['timestamp'] = [timestamp]
df['folder_date'] = folder_date
df['mean_nu'] = [mean_nu]
df['mean_nb'] = [mean_nb]
df['mean_nv'] = [mean_nv]
df['mean_C'] = [mean_C]
df['Delta_bac'] = [Delta_bac]
df['Delta_phage'] = [Delta_phage]
df['e_effective'] = [e_effective]
df['fitness_discrepancy'] = [mean_phage_fitness[0]]
df['mean_size_at_acquisition'] = [np.nanmean(phage_size_at_acquisition)] # mean phage clone size at the time that a spacer is acquired, ignoring trajectories for which no spacer is acquired
df['std_size_at_acquisition'] = [np.nanstd(phage_size_at_acquisition)]# std dev phage clone size at the time that a spacer is acquired, ignoring trajectories for which no spacer is acquired
df['fitness_at_90percent_acquisition'] = [fitness_at_acquisition]
df['fitness_at_mean_acquisition'] = [fitness_at_mean_acquisition]
df['fitness_at_first_acquisition'] = [fitness_at_first_acquisition]
df['num_bac_acquisitions'] = [len(nbi_acquisitions)]
df['mean_bac_acquisition_time'] = [mean_bac_acquisition_time]
df['median_bac_acquisition_time'] = [median_acquisition_time]
df['first_bac_acquisition_time'] = [first_acquisition_time]
df['mean_large_trajectory_length_nvi_ss'] = [mean_lifetime_large]
df['mean_trajectory_length'] = [np.mean(trajectory_lengths)]
df['mean_T_backwards_nvi_ss'] = [mean_T_backwards_nvi_ss]
df['mean_bac_extinction_time'] = [np.mean(bac_extinction_times_large)*g*c0] | |
= ds[-2]
else:
vd = ds[-1]
if vd[:4] == 'olev' or vd == 'rho':
gtype = 'o'
nz = self.mcfg['nlo']
elif vd[:4] == 'alev':
nz = self.mcfg['nla']
elif vd in ['slevel']:
nz = self.mcfg['nls']
elif vd in ['snowdepth','sdepth']:
nz = 5
elif vd == 'aslevel':
nz = self.mcfg['nlas']
else:
mlg.prnt( 'Failed to parse dimensions %s: %s' % (i.label,i.dimensions) )
raise
else:
nz = i.levels
dims = set( i.dimensions.split( '|' ) )
if 'latitude' in dims and 'longitude' in dims:
if gtype == 'o':
nh = self.mcfg['nho']
self.isLatLon[i.uid] = 'o'
else:
nh = self.mcfg['nha']
self.isLatLon[i.uid] = 'a'
else:
nh = 10
self.isLatLon[i.uid] = False
self.szss[i.uid] = nh*nz
if self.isLatLon[i.uid] != False and len(dims) == 2:
self.szssSrf[i.uid] = { 'a':self.mcfg['nha']*nz, 'o':self.mcfg['nho']*nz }
for k in szr:
if self.isLatLon[i.uid] != False:
self.szgss[k][i.uid] = szr[k]*nz
else:
self.szgss[k][i.uid] = nh*nz
for i in self.dq.coll['structure'].items:
s = 1
knownAtmos = False
if i.__dict__.get('odims','') != '':
if i.odims in odsz:
sf = odsz[i.odims]
else:
## print 'SEVERE.odims.00001: no information on dimension size: %s' % i.odims
sf = 5
if type( sf ) == type( () ):
sf = sf[0]
s = s*sf
if i.odims not in ['iceband']:
knownAtmos = True
if i.spid in self.szss:
self.sz[i.uid] = self.szss[i.spid]*s
if i.uid in self.szssSrf:
if knownAtmos:
self.sz[i.uid] = self.szssSrf[i.spid]['a']*s
else:
for k in ['a','o']:
self.szSrf[i.uid][k] = self.szssSrf[i.spid][k]*s
for k in szr:
self.szg[k][i.uid] = self.szgss[k][i.spid]*s
else:
print ('WARNING: spid has no size info: %s [%s]' % (i.spid,i.uid) )
self.sz[i.uid] = 0.
for k in szr:
self.szg[k][i.uid] = 0.
def getRequestLinkByMip( self, mipSel ):
"""Return the set of request links which are associated with specified MIP"""
if type(mipSel) == type( {} ):
return self.getRequestLinkByMipObjective(self,mipSel)
if type(mipSel) == type(''):
t1 = lambda x: x == mipSel
elif type(mipSel) == type(set()):
t1 = lambda x: x in mipSel
s = set()
for i in self.dq.coll['requestLink'].items:
if t1(i.mip):
if 'requestItem' in self.dq.inx.iref_by_sect[i.uid].a:
if any( [ self.rqiExp[x][3] > 0 for x in self.dq.inx.iref_by_sect[i.uid].a['requestItem'] if x in self.rqiExp ] ):
s.add( i )
self.rqs = list( s )
return self.rqs
def getRequestLinkByMipObjective( self, mipSel ):
"""Return the set of request links which are associated with specified MIP and its objectives"""
assert type(mipSel) == type( {} ),'Argument must be a dictionary, listing objectives for each MIP'
s = set()
for i in self.dq.coll['requestLink'].items:
if i.mip in mipSel:
if len(mipSel[i.mip]) == 0:
s.add( i )
elif 'objectiveLink' in self.dq.inx.iref_by_sect[i.uid].a:
ss = set( [self.dq.inx.uid[k].label for k in self.dq.inx.iref_by_sect[i.uid].a['objectiveLink'] ] )
if any( [x in mipSel[i.mip] for x in ss] ):
s.add( i )
##
## filter requestLinks by tierMax: check to see whether they link to experiments with tier below or equal to tiermax.
##
s1 = set()
for i in s:
if 'requestItem' in self.dq.inx.iref_by_sect[i.uid].a:
if any( [ self.rqiExp[x][-1] > 0 for x in self.dq.inx.iref_by_sect[i.uid].a['requestItem'] if x in self.rqiExp ] ):
s1.add( i )
self.rqs = list( s1 )
return self.rqs
def varGroupXexpt(self, rqList ):
"""For a list of request links, return a list of variable group IDs for each experiment"""
self.cc = collections.defaultdict( list )
## dummy = {self.cc[i.expt].append(i.rlid) for i in self.dq.coll['requestItem'].items if i.rlid in {j.uid for j in rqList} }
return self.cc
def yearsInRequest(self, rql ):
self.ntot = sum( [i.ny for i in self.dq.coll['requestItem'].items if i.rlid == rql.uid] )
return self.ntot
def rqlByExpt( self, l1, ex, pmax=2, expFullEx=False ):
"""rqlByExpt: return a set of request links for an experiment"""
##
inx = self.dq.inx
if ex != None:
exi = self.dq.inx.uid[ex]
if exi._h.label == 'experiment':
exset = set( [ex,exi.egid,exi.mip] )
else:
exset = set( self.esid_to_exptList(ex,deref=False,full=expFullEx) )
##
## rql is the set of all request links which are associated with a request item for this experiment set
##
l1p = set()
for i in l1:
if i.preset < 0 or i.preset <= pmax:
if i.esid in exset:
l1p.add(i)
else:
exset = None
l1p = l1
rql0 = set()
for i in l1p:
rql0.add(i.rlid)
rqlInv = set()
for u in rql0:
if inx.uid[u]._h.label == 'remarks':
rqlInv.add( u )
if len(rqlInv) != 0:
mlg.prnt ( 'WARNING.001.00002: %s invalid request links from request items ...' % len(rqlInv) )
rql = set()
for u in rql0:
if inx.uid[u]._h.label != 'remarks':
rql.add( u )
return rql, l1p, exset
def varsByRql( self, rql, pmax=2, intersection=False, asDict=False):
"""The complete set of variables associated with a set of request links."""
inx = self.dq.inx
cc1 = collections.defaultdict( set )
for i in rql:
o = inx.uid[i]
if o.opt == 'priority':
p = int( float( o.opar ) )
assert p in [1,2,3], 'Priority incorrectly set .. %s, %s, %s' % (o.label,o.title, o.uid)
cc1[inx.uid[i].mip].add( (inx.uid[i].refid,p) )
else:
cc1[inx.uid[i].mip].add( inx.uid[i].refid )
if intersection:
ccv = {}
#
# set of request variables for each MIP
#
for k in cc1:
thisc = reduce( operator.or_, [set( inx.iref_by_sect[vg].a['requestVar'] ) for vg in cc1[k] ] )
rqvgs = collections.defaultdict( set )
for x in cc1[k]:
if type(x) == type( () ):
rqvgs[x[0]].add( x[1] )
else:
rqvgs[x].add( 3 )
s = set()
for vg in rqvgs:
for l in inx.iref_by_sect[vg].a['requestVar']:
if inx.uid[l].priority <= min(pmax,max(rqvgs[vg])):
s.add( inx.uid[l].vid )
ccv[k] = s
if len( ccv.keys() ) < len( list(imips) ):
vars = set()
else:
vars = reduce( operator.and_, [ccv[k] for k in ccv] )
else:
rqvgs = collections.defaultdict( set )
for k in cc1:
for x in cc1[k]:
if type(x) == type( () ):
rqvgs[x[0]].add( x[1] )
else:
rqvgs[x].add( 3 )
###To obtain a set of variables associated with this collection of variable groups:
if asDict:
vars = collections.defaultdict( list )
else:
vars = set()
for vg in rqvgs:
for l in inx.iref_by_sect[vg].a['requestVar']:
if inx.uid[l].priority <= min(pmax,max(rqvgs[vg])):
if asDict:
vars[inx.uid[l].vid].append( vg )
else:
vars.add(inx.uid[l].vid)
##col1 = reduce( operator.or_, [set( inx.iref_by_sect[vg].a['requestVar'] ) for vg in rqvg ] )
### filter out cases where the request does not point to a CMOR variable.
##vars = {vid for vid in vars if inx.uid[vid][0] == u'CMORvar'}
if asDict:
thisvars = {}
for vid in vars:
if inx.uid[vid]._h.label == u'CMORvar':
thisvars[vid] = vars[vid]
else:
thisvars = set()
for vid in vars:
if inx.uid[vid]._h.label == u'CMORvar':
thisvars.add(vid)
return thisvars
def exptYears( self, rqll, ex=None, exBlack=None):
"""Parse a set of request links, and get years requested for each (varGroup, expt, grid) tuple """
self.tsliceDict = collections.defaultdict( dict )
ccts = collections.defaultdict( dict )
ccts2 = collections.defaultdict( set )
cc = collections.defaultdict( set )
for rl in rqll:
if 'requestItem' not in self.dq.inx.iref_by_sect[rl.uid].a:
self.errorLog['WARN.001.00001'].add( 'no request items for: %s, %s' % (rl.uid, rl.title) )
##print ( 'WARN.001.00001: no request items for: %s, %s' % (rl.uid, rl.title) )
else:
##print rl.uid, rl.title, rl.grid, rl.gridreq
if self.gridPolicyForce != None:
grd = self.gridPolicyForce
elif rl.grid in ['1deg','2deg','100km']:
if rl.grid == '100km':
grd = '1deg'
else:
grd = rl.grid
else:
## note that naming of "gridreq" is unfortunate ... "No" means that native grid is required
if rl.gridreq in ['No', 'no']:
#or self.gridPolicyDefaultNative:
grd = 'native'
elif rl.gridreq in ['no*1']:
#or self.gridPolicyDefaultNative:
grd = 'native:01'
else:
##print ( 'INFO.grd.00001: defaulting to grid ..%s, %s, %s' % (rl.label,rl.title, rl.uid) )
grd = 'DEF'
for iu in self.dq.inx.iref_by_sect[rl.uid].a['requestItem']:
i = self.dq.inx.uid[iu]
##
## apply "treset" filter to request items linked to this group.
##
if self.tierMax < 0 or 'treset' not in i.__dict__ or i.treset <= self.tierMax:
if iu in self.rqiExp:
for e in self.rqiExp[iu][1]:
if (ex == None or e in ex) and (exBlack == None or e not in exBlack):
this = self.rqiExp[iu][1][e]
if this != None:
thisns = this[-3]
thisny = this[-2]
thisne = this[-1]
##cc[ (rl.refid,e,grd) ].add( filter1( thisns*thisny*thisne, i.nymax) )
cc[ (rl.refid,e,grd) ].add( thisns*thisny*thisne )
if self.rqiExp[iu][4] != None:
ccts[(rl.refid,e)][thisns*thisny*thisne] = self.rqiExp[iu][4]
ccts2[(rl.refid,e)].add( self.rqiExp[iu][4] )
ee = collections.defaultdict( dict )
revertToLast = True
ey = exYr()
if revertToLast:
for g,e,grd in cc:
ee[g][(e,grd)] = max( cc[( g,e,grd) ] )
##if (g,e) in ccts and ee[g][(e,grd)] in ccts[(g,e)]:
#
# possible corner cut here ... as max length may not include all years where | |
import os
import unittest
import logging
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
from slicer.util import VTKObservationMixin
#
# CombineModels
#
class CombineModels(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Combine Models"
self.parent.categories = ["Surface Models"]
self.parent.dependencies = []
self.parent.contributors = ["<NAME> (PerkLab)"]
self.parent.helpText = """
This module can perform Boolean operations on model nodes (surface meshes).</a>.
"""
# TODO: replace with organization, grant and thanks
self.parent.acknowledgementText = """
The module uses https://github.com/zippy84/vtkbool for processing.
"""
for subfolder in ['Release', 'Debug', 'RelWithDebInfo', 'MinSizeRel', '.']:
logicPath = os.path.realpath(os.path.join(os.path.dirname(__file__), '../qt-loadable-modules/'+subfolder)).replace('\\','/')
if os.path.exists(logicPath):
import sys
sys.path.append(logicPath)
break
#
# CombineModelsWidget
#
class CombineModelsWidget(ScriptedLoadableModuleWidget, VTKObservationMixin):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent=None):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.__init__(self, parent)
VTKObservationMixin.__init__(self) # needed for parameter node observation
self.logic = None
self._parameterNode = None
self._updatingGUIFromParameterNode = False
def setup(self):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.setup(self)
# Load widget from .ui file (created by Qt Designer).
# Additional widgets can be instantiated manually and added to self.layout.
uiWidget = slicer.util.loadUI(self.resourcePath('UI/CombineModels.ui'))
self.layout.addWidget(uiWidget)
self.ui = slicer.util.childWidgetVariables(uiWidget)
# Set scene in MRML widgets. Make sure that in Qt designer the top-level qMRMLWidget's
# "mrmlSceneChanged(vtkMRMLScene*)" signal in is connected to each MRML widget's.
# "setMRMLScene(vtkMRMLScene*)" slot.
uiWidget.setMRMLScene(slicer.mrmlScene)
# Create logic class. Logic implements all computations that should be possible to run
# in batch mode, without a graphical user interface.
self.logic = CombineModelsLogic()
# Connections
# These connections ensure that we update parameter node when scene is closed
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.StartCloseEvent, self.onSceneStartClose)
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.EndCloseEvent, self.onSceneEndClose)
# These connections ensure that whenever user changes some settings on the GUI, that is saved in the MRML scene
# (in the selected parameter node).
self.ui.inputModelASelector.connect("currentNodeChanged(vtkMRMLNode*)", self.updateParameterNodeFromGUI)
self.ui.inputModelBSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.updateParameterNodeFromGUI)
self.ui.outputModelSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.updateParameterNodeFromGUI)
self.ui.operationUnionRadioButton.connect("toggled(bool)", lambda toggled, op="union": self.operationButtonToggled(op))
self.ui.operationIntersectionRadioButton.connect("toggled(bool)", lambda toggled, op="intersection": self.operationButtonToggled(op))
self.ui.operationDifferenceRadioButton.connect("toggled(bool)", lambda toggled, op="difference": self.operationButtonToggled(op))
self.ui.operationDifference2RadioButton.connect("toggled(bool)", lambda toggled, op="difference2": self.operationButtonToggled(op))
# Buttons
self.ui.applyButton.connect('clicked(bool)', self.onApplyButton)
self.ui.toggleVisibilityButton.connect('clicked(bool)', self.onToggleVisibilityButton)
# Make sure parameter node is initialized (needed for module reload)
self.initializeParameterNode()
def cleanup(self):
"""
Called when the application closes and the module widget is destroyed.
"""
self.removeObservers()
def enter(self):
"""
Called each time the user opens this module.
"""
# Make sure parameter node exists and observed
self.initializeParameterNode()
def exit(self):
"""
Called each time the user opens a different module.
"""
# Do not react to parameter node changes (GUI wlil be updated when the user enters into the module)
self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
def onSceneStartClose(self, caller, event):
"""
Called just before the scene is closed.
"""
# Parameter node will be reset, do not use it anymore
self.setParameterNode(None)
def onSceneEndClose(self, caller, event):
"""
Called just after the scene is closed.
"""
# If this module is shown while the scene is closed then recreate a new parameter node immediately
if self.parent.isEntered:
self.initializeParameterNode()
def initializeParameterNode(self):
"""
Ensure parameter node exists and observed.
"""
# Parameter node stores all user choices in parameter values, node selections, etc.
# so that when the scene is saved and reloaded, these settings are restored.
self.setParameterNode(self.logic.getParameterNode())
def setParameterNode(self, inputParameterNode):
"""
Set and observe parameter node.
Observation is needed because when the parameter node is changed then the GUI must be updated immediately.
"""
if inputParameterNode:
self.logic.setDefaultParameters(inputParameterNode)
# Unobserve previously selected parameter node and add an observer to the newly selected.
# Changes of parameter node are observed so that whenever parameters are changed by a script or any other module
# those are reflected immediately in the GUI.
if self._parameterNode is not None:
self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
self._parameterNode = inputParameterNode
if self._parameterNode is not None:
self.addObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
# Initial GUI update
self.updateGUIFromParameterNode()
def updateGUIFromParameterNode(self, caller=None, event=None):
"""
This method is called whenever parameter node is changed.
The module GUI is updated to show the current state of the parameter node.
"""
if self._parameterNode is None or self._updatingGUIFromParameterNode:
return
# Make sure GUI changes do not call updateParameterNodeFromGUI (it could cause infinite loop)
self._updatingGUIFromParameterNode = True
# Update node selectors and sliders
self.ui.inputModelASelector.setCurrentNode(self._parameterNode.GetNodeReference("InputModelA"))
self.ui.inputModelBSelector.setCurrentNode(self._parameterNode.GetNodeReference("InputModelB"))
self.ui.outputModelSelector.setCurrentNode(self._parameterNode.GetNodeReference("OutputModel"))
operation = self._parameterNode.GetParameter("Operation")
self.ui.operationUnionRadioButton.checked = (operation == "union")
self.ui.operationIntersectionRadioButton.checked = (operation == "intersection")
self.ui.operationDifferenceRadioButton.checked = (operation == "difference")
self.ui.operationDifference2RadioButton.checked = (operation == "difference2")
# Update buttons states and tooltips
if (self._parameterNode.GetNodeReference("InputModelA")
and self._parameterNode.GetNodeReference("InputModelB")):
self.ui.applyButton.toolTip = "Compute output model"
self.ui.applyButton.enabled = True
else:
self.ui.applyButton.toolTip = "Select input model nodes"
self.ui.applyButton.enabled = False
self.ui.toggleVisibilityButton.enabled = (self._parameterNode.GetNodeReference("OutputModel") is not None)
# All the GUI updates are done
self._updatingGUIFromParameterNode = False
def updateParameterNodeFromGUI(self, caller=None, event=None):
"""
This method is called when the user makes any change in the GUI.
The changes are saved into the parameter node (so that they are restored when the scene is saved and loaded).
"""
if self._parameterNode is None or self._updatingGUIFromParameterNode:
return
wasModified = self._parameterNode.StartModify() # Modify all properties in a single batch
self._parameterNode.SetNodeReferenceID("InputModelA", self.ui.inputModelASelector.currentNodeID)
self._parameterNode.SetNodeReferenceID("InputModelB", self.ui.inputModelBSelector.currentNodeID)
self._parameterNode.SetNodeReferenceID("OutputModel", self.ui.outputModelSelector.currentNodeID)
self._parameterNode.EndModify(wasModified)
def operationButtonToggled(self, operation):
self._parameterNode.SetParameter("Operation", operation)
def onApplyButton(self):
"""
Run processing when user clicks "Apply" button.
"""
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
# Add a new node for output, if no output node is selected
if not self._parameterNode.GetNodeReference("OutputModel"):
outputModel = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLModelNode")
self._parameterNode.SetNodeReferenceID("OutputModel", outputModel.GetID())
# Compute output
self.logic.process(
self._parameterNode.GetNodeReference("InputModelA"),
self._parameterNode.GetNodeReference("InputModelB"),
self._parameterNode.GetNodeReference("OutputModel"),
self._parameterNode.GetParameter("Operation"))
except Exception as e:
slicer.util.errorDisplay("Failed to compute results: "+str(e))
import traceback
traceback.print_exc()
finally:
qt.QApplication.restoreOverrideCursor()
def onToggleVisibilityButton(self):
outputModel = self._parameterNode.GetNodeReference("OutputModel")
inputModelA = self._parameterNode.GetNodeReference("InputModelA")
inputModelB = self._parameterNode.GetNodeReference("InputModelB")
if not outputModel:
return
outputModel.CreateDefaultDisplayNodes()
showOutput = not outputModel.GetDisplayNode().GetVisibility()
inputModelA.GetDisplayNode().SetVisibility(not showOutput)
inputModelB.GetDisplayNode().SetVisibility(not showOutput)
outputModel.GetDisplayNode().SetVisibility(showOutput)
#
# CombineModelsLogic
#
class CombineModelsLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self):
"""
Called when the logic class is instantiated. Can be used for initializing member variables.
"""
ScriptedLoadableModuleLogic.__init__(self)
def setDefaultParameters(self, parameterNode):
"""
Initialize parameter node with default settings.
"""
if not parameterNode.GetParameter("Operation"):
parameterNode.SetParameter("Operation", "union")
def process(self, inputModelA, inputModelB, outputModel, operation):
"""
Run the processing algorithm.
Can be used without GUI widget.
:param inputModelA: first input model node
:param inputModelB: second input model node
:param outputModel: result model node, if empty then a new output node will be created
:param operation: union, intersection, difference, difference2
"""
if not inputModelA or not inputModelB or not outputModel:
raise ValueError("Input or output model nodes are invalid")
import time
startTime = time.time()
logging.info('Processing started')
import vtkSlicerCombineModelsModuleLogicPython as vtkbool
combine = vtkbool.vtkPolyDataBooleanFilter()
if operation == 'union':
combine.SetOperModeToUnion()
elif operation == 'intersection':
combine.SetOperModeToIntersection()
elif operation == 'difference':
combine.SetOperModeToDifference()
elif operation == 'difference2':
combine.SetOperModeToDifference2()
else:
raise ValueError("Invalid operation: "+operation)
if inputModelA.GetParentTransformNode() == outputModel.GetParentTransformNode():
combine.SetInputConnection(0, inputModelA.GetPolyDataConnection())
else:
transformToOutput = vtk.vtkGeneralTransform()
slicer.vtkMRMLTransformNode.GetTransformBetweenNodes(inputModelA.GetParentTransformNode(), outputModel.GetParentTransformNode(), transformToOutput)
transformer = vtk.vtkTransformPolyDataFilter()
transformer.SetTransform(transformToOutput)
transformer.SetInputConnection(inputModelA.GetPolyDataConnection())
combine.SetInputConnection(0, transformer.GetOutputPort())
if inputModelB.GetParentTransformNode() == outputModel.GetParentTransformNode():
combine.SetInputConnection(1, inputModelB.GetPolyDataConnection())
else:
transformToOutput = vtk.vtkGeneralTransform()
slicer.vtkMRMLTransformNode.GetTransformBetweenNodes(inputModelB.GetParentTransformNode(), outputModel.GetParentTransformNode(), transformToOutput)
transformer = vtk.vtkTransformPolyDataFilter()
transformer.SetTransform(transformToOutput)
transformer.SetInputConnection(inputModelB.GetPolyDataConnection())
combine.SetInputConnection(1, transformer.GetOutputPort())
# These parameters might be useful to expose:
# combine.MergeRegsOn() # default off
# combine.DecPolysOff() # default on
combine.Update()
outputModel.SetAndObservePolyData(combine.GetOutput())
outputModel.CreateDefaultDisplayNodes()
# The filter creates a few scalars, don't show them by default, as they would be somewhat distracting
outputModel.GetDisplayNode().SetScalarVisibility(False)
stopTime = time.time()
logging.info('Processing completed in {0:.2f} seconds'.format(stopTime-startTime))
#
# CombineModelsTest
#
class CombineModelsTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear()
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_CombineModels1()
def test_CombineModels1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it | |
#!/usr/bin/env python
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# This is the main of the glideinFrontend
#
# Arguments:
# $1 = work_dir
#
# Author:
# <NAME>
#
import os
import sys
import fcntl
import subprocess
import traceback
import signal
import time
import string
import logging
STARTUP_DIR = sys.path[0]
sys.path.append(os.path.join(STARTUP_DIR,"../.."))
from glideinwms.lib import logSupport
from glideinwms.lib import cleanupSupport
from glideinwms.frontend import glideinFrontendPidLib
from glideinwms.frontend import glideinFrontendConfig
from glideinwms.frontend import glideinFrontendLib
from glideinwms.frontend import glideinFrontendInterface
from glideinwms.frontend import glideinFrontendMonitorAggregator
from glideinwms.frontend import glideinFrontendMonitoring
from glideinFrontendElement import glideinFrontendElement
############################################################
# KEL remove this method and just call the monitor aggregator method directly below? we don't use the results
def aggregate_stats():
_ = glideinFrontendMonitorAggregator.aggregateStatus()
return
############################################################
class FailureCounter:
def __init__(self, my_name, max_lifetime):
self.my_name=my_name
self.max_lifetime=max_lifetime
self.failure_times=[]
def add_failure(self, when=None):
if when is None:
when = time.time()
self.clean_old()
self.failure_times.append(when)
def get_failures(self):
self.clean_old()
return self.failure_times
def count_failures(self):
return len(self.get_failures())
# INTERNAL
# clean out any old records
def clean_old(self):
min_time=time.time()-self.max_lifetime
while (len(self.failure_times)>0 and
(self.failure_times[0]<min_time)): # I am assuming they are ordered
self.failure_times.pop(0)
############################################################
def spawn_group(work_dir, group_name, action):
global STARTUP_DIR
command_list = [sys.executable,
os.path.join(STARTUP_DIR,
"glideinFrontendElement.py"),
str(os.getpid()),
work_dir,
group_name,
action]
#logSupport.log.debug("Command list: %s" % command_list)
child = subprocess.Popen(command_list, shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# set it in non blocking mode
for fd in (child.stdout.fileno(),
child.stderr.fileno()):
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
return child
############################################################
def poll_group_process(group_name,child):
# empty stdout and stderr
try:
tempOut = child.stdout.read()
if len(tempOut)!=0:
logSupport.log.info("[%s]: %s" % (group_name, tempOut))
except IOError:
pass # ignore
try:
tempErr = child.stderr.read()
if len(tempErr)!=0:
logSupport.log.warning("[%s]: %s" % (group_name, tempErr))
except IOError:
pass # ignore
return child.poll()
############################################################
# return the list of (group,walltime) pairs
def spawn_iteration(work_dir, groups, max_active,
failure_dict, max_failures,
action):
childs = {}
for group_name in groups:
childs[group_name] = {'state':'queued'}
active_groups = 0
groups_tofinish = len(groups)
max_num_failures=0
logSupport.log.info("Starting iteration")
try:
while groups_tofinish>0:
done_something = False
# check if any group finished by now
for group_name in groups:
if childs[group_name]['state']=='spawned':
group_rc = poll_group_process(group_name, childs[group_name]['data'])
if not (group_rc is None): # None means "still alive"
if group_rc==0:
childs[group_name]['state'] = 'finished'
else:
childs[group_name]['state'] = 'failed'
failure_dict[group_name].add_failure()
num_failures=failure_dict[group_name].count_failures()
max_num_failures=max(max_num_failures, num_failures)
logSupport.log.warning("Group %s terminated with exit code %i (%i recent failure)" % (group_name, group_rc, num_failures))
childs[group_name]['end_time']=time.time()
active_groups-=1
groups_tofinish-=1
done_something = True
# see if I can spawn more
for group_name in groups:
if active_groups<max_active: # can spawn more
if childs[group_name]['state']=='queued':
childs[group_name]['data'] = spawn_group(work_dir, group_name, action)
childs[group_name]['state'] = 'spawned'
childs[group_name]['start_time']=time.time()
active_groups+=1
done_something = True
else:
break
if done_something:
logSupport.log.info("Active groups = %i, Groups to finish = %i"%(active_groups,groups_tofinish))
if groups_tofinish>0:
time.sleep(0.01)
logSupport.log.info("All groups finished")
logSupport.log.info("Aggregate monitoring data")
# KEL - can we just call the monitor aggregator method directly? see above
aggregate_stats()
"""
try:
aggregate_stats()
except Exception:
logSupport.log.exception("Aggregate monitoring data .. ERROR")
"""
logSupport.log.info("Cleaning logs")
cleanupSupport.cleaners.cleanup()
if max_num_failures>max_failures:
logSupport.log.info("Too many group failures, aborting")
logSupport.log.debug("Failed %i times (limit %i), aborting"%(max_num_failures,max_failures))
raise RuntimeError, "Too many group failures, aborting"
finally:
# cleanup at exit
# if anything goes wrong, hardkill the rest
for group_name in childs.keys():
if childs[group_name]['state']=='spawned':
logSupport.log.info("Hard killing group %s" % group_name)
try:
os.kill(childs[group_name]['data'].pid,signal.SIGKILL)
except OSError:
pass # ignore failed kills of non-existent processes
# at this point, all groups should have been run
timings=[]
for group_name in groups:
timings.append((group_name,childs[group_name]['end_time']-childs[group_name]['start_time']))
return timings
############################################################
def spawn_cleanup(work_dir,groups):
global STARTUP_DIR
for group_name in groups:
try:
command_list = [sys.executable,
os.path.join(STARTUP_DIR,
"glideinFrontendElement.py"),
str(os.getpid()),
work_dir,
group_name,
"deadvertise"]
#logSupport.log.debug("Command list: %s" % command_list)
child = subprocess.Popen(command_list, shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# set it in non blocking mode
for fd in (child.stdout.fileno(),
child.stderr.fileno()):
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while poll_group_process(group_name,child) is None:
# None means "still alive"
time.sleep(0.01)
except:
# never fail on cleanup
pass
############################################################
def spawn(sleep_time, advertize_rate, work_dir, frontendDescript,
groups, max_parallel_workers, restart_interval, restart_attempts):
num_groups=len(groups)
# TODO: Get the ha_check_interval from the config
ha = glideinFrontendLib.getHASettings(frontendDescript.data)
ha_check_interval = glideinFrontendLib.getHACheckInterval(frontendDescript.data)
mode = glideinFrontendLib.getHAMode(frontendDescript.data)
master_frontend_name = ''
if mode == 'slave':
master_frontend_name = ha.get('ha_frontends')[0].get('frontend_name')
active = (mode == 'master')
hibernate = shouldHibernate(frontendDescript, work_dir, ha, mode, groups)
logSupport.log.info('Frontend started with mode = %s' % mode)
try:
# Service will exit on signal only.
# This infinite loop is for the slave to go back into hibernation
# once the master becomes alive.
# Master never loops infinitely here, but instead it does in
# the inner loop while(mode=='master') ...
while 1:
while hibernate:
# If I am slave enter hibernation cycle while Master is alive
logSupport.log.info('Master Frontend %s is online. Hibernating.' % master_frontend_name)
time.sleep(ha_check_interval)
hibernate = shouldHibernate(frontendDescript, work_dir,
ha, mode, groups)
# We broke out of hibernation cycle
# Either Master has disappeared or I am the Master
if mode == 'slave':
logSupport.log.info("Master frontend %s is offline. Activating slave frontend." % master_frontend_name)
active = True
failure_dict={}
for group in groups:
failure_dict[group]=FailureCounter(group, restart_interval)
while ((mode == 'master') or ((mode == 'slave') and active)):
start_time=time.time()
timings = spawn_iteration(work_dir, groups,
max_parallel_workers, failure_dict,
restart_attempts, "run")
end_time=time.time()
elapsed_time=end_time-start_time
if elapsed_time<sleep_time:
real_sleep_time=sleep_time-elapsed_time
logSupport.log.info("Sleep %.1f sec" % real_sleep_time)
time.sleep(real_sleep_time)
else:
logSupport.log.info("No sleeping this loop, took %.1f sec > %.1f sec" % (elapsed_time, sleep_time))
# order the groups by walltime
# longest walltime first
timings.sort(lambda x,y:-cmp(x[1],y[1]))
# recreate the groups list, with new ordering
groups=[el[0] for el in timings]
assert num_groups==len(groups), "Something went wrong, number of groups changed"
if mode == 'slave':
# If we are slave, check if master is back and if so
# deadvertise my classads and hibernate
hibernate = shouldHibernate(frontendDescript, work_dir,
ha, mode, groups)
if hibernate:
active = False
logSupport.log.info("Master frontend %s is back online" % master_frontend_name)
logSupport.log.info("Deadvertize my ads and enter hibernation cycle")
spawn_cleanup(work_dir, groups)
else:
logSupport.log.info("Master frontend %s is still offline" % master_frontend_name)
finally:
# We have been asked to terminate
logSupport.log.info("Deadvertize my ads")
spawn_cleanup(work_dir,groups)
############################################################
def shouldHibernate(frontendDescript, work_dir, ha, mode, groups):
"""
Check if the frontend is running in HA mode. If run in master mode never
hibernate. If run in slave mode, hiberate if master is active.
@rtype: bool
@return: True if we should hibernate else False
"""
if mode == 'slave':
master_frontend_name = ha.get('ha_frontends')[0].get('frontend_name')
for group in groups:
element = glideinFrontendElement(os.getpid(), work_dir,
group, "run")
os.environ['CONDOR_CONFIG'] = element.elementDescript.frontend_data['CondorConfig']
os.environ['_CONDOR_CERTIFICATE_MAPFILE'] = element.elementDescript.element_data['MapFile']
os.environ['X509_USER_PROXY'] = element.elementDescript.frontend_data['ClassAdProxy']
for factory_pool in element.factory_pools:
factory_pool_node = factory_pool[0]
master_classads = glideinFrontendInterface.findMasterFrontendClassads(factory_pool_node, master_frontend_name)
if master_classads:
# Found some classads in one of the collectors
# Cleanup the env and return True
clean_htcondor_env()
return True
# Cleanup the env
clean_htcondor_env()
return False
def clean_htcondor_env():
for v in ('CONDOR_CONFIG','_CONDOR_CERTIFICATE_MAPFILE','X509_USER_PROXY'):
if os.environ.get(v):
del os.environ[v]
############################################################
def spawn_removal(work_dir, frontendDescript, groups,
max_parallel_workers, removal_action):
failure_dict={}
for group in groups:
failure_dict[group]=FailureCounter(group, 3600)
spawn_iteration(work_dir,groups,max_parallel_workers,
failure_dict, 1, removal_action)
############################################################
def cleanup_environ():
for val in os.environ.keys():
val_low = val.lower()
if val_low[:8] == "_condor_":
# remove any CONDOR environment variables
# don't want any surprises
del os.environ[val]
elif val_low[:5] == "x509_":
# remove any X509 environment variables
# don't want any surprises
del os.environ[val]
############################################################
def main(work_dir, action):
startup_time=time.time()
glideinFrontendConfig.frontendConfig.frontend_descript_file = os.path.join(work_dir, glideinFrontendConfig.frontendConfig.frontend_descript_file)
frontendDescript = glideinFrontendConfig.FrontendDescript(work_dir)
# the log dir is shared between the frontend main and the groups, so use a subdir
logSupport.log_dir = os.path.join(frontendDescript.data['LogDir'], "frontend")
# Configure frontend process logging
process_logs = eval(frontendDescript.data['ProcessLogs'])
for plog in process_logs:
logSupport.add_processlog_handler("frontend", logSupport.log_dir,
plog['msg_types'], plog['extension'],
int(float(plog['max_days'])),
int(float(plog['min_days'])),
int(float(plog['max_mbytes'])),
int(float(plog['backup_count'])),
plog['compression'])
logSupport.log = logging.getLogger("frontend")
logSupport.log.info("Logging initialized")
logSupport.log.debug("Frontend startup time: %s" % str(startup_time))
try:
cleanup_environ()
# we use a dedicated config... ignore the system-wide
os.environ['CONDOR_CONFIG'] = frontendDescript.data['CondorConfig']
sleep_time = int(frontendDescript.data['LoopDelay'])
advertize_rate = int(frontendDescript.data['AdvertiseDelay'])
max_parallel_workers = int(frontendDescript.data['GroupParallelWorkers'])
restart_attempts = int(frontendDescript.data['RestartAttempts'])
restart_interval = int(frontendDescript.data['RestartInterval'])
groups = string.split(frontendDescript.data['Groups'], ',')
groups.sort()
glideinFrontendMonitorAggregator.monitorAggregatorConfig.config_frontend(os.path.join(work_dir, "monitor"), groups)
except:
logSupport.log.exception("Exception occurred configuring monitoring: ")
raise
glideinFrontendMonitoring.write_frontend_descript_xml(frontendDescript, os.path.join(work_dir, 'monitor/'))
logSupport.log.info("Enabled groups: %s" % groups)
# create lock file
pid_obj = glideinFrontendPidLib.FrontendPidSupport(work_dir)
# start
try:
pid_obj.register(action)
except glideinFrontendPidLib.pidSupport.AlreadyRunning, err:
pid_obj.load_registered()
logSupport.log.exception("Failed starting Frontend with action %s. Instance with pid %s is aready running for action %s. Exception during pid registration: %s" %
(action, pid_obj.mypid , str(pid_obj.action_type), err))
raise
try:
try:
if action=="run":
spawn(sleep_time, advertize_rate, work_dir,
frontendDescript, groups, max_parallel_workers,
restart_interval, restart_attempts)
elif action in ('removeWait','removeIdle','removeAll','removeWaitExcess','removeIdleExcess','removeAllExcess'):
spawn_removal(work_dir, frontendDescript, groups,
max_parallel_workers, action)
else:
raise ValueError, "Unknown action: %s"%action
except KeyboardInterrupt:
logSupport.log.info("Received signal...exit")
except:
logSupport.log.exception("Exception occurred trying to spawn: ")
finally:
pid_obj.relinquish()
############################################################
#
# S T A R T U P
#
############################################################
def termsignal(signr, frame):
raise KeyboardInterrupt, "Received signal %s" % signr
if __name__ == '__main__':
signal.signal(signal.SIGTERM, termsignal)
signal.signal(signal.SIGQUIT, termsignal)
if len(sys.argv)==2:
action = "run"
else:
| |
<filename>Dataflow/full_executer_wordshop.py
# -*- coding: utf-8 -*-
"""Full Executer WordShop.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1kGSQWNtImJknauUN9L8ZRRwIzAdwbmo_
First, we load the pegasus paraphraser.
"""
# Commented out IPython magic to ensure Python compatibility.
!git clone https://github.com/google-research/pegasus
# %cd pegasus
!export PYTHONPATH=.
!pip3 install -r requirements.txt
!pip install transformers==3.5.0
import torch
from transformers import PegasusForConditionalGeneration, PegasusTokenizer
model_name = 'tuner007/pegasus_paraphrase'
torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = PegasusTokenizer.from_pretrained(model_name)
model = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device)
def get_response(input_text,num_return_sequences,num_beams):
batch = tokenizer.prepare_seq2seq_batch([input_text],truncation=True,padding='longest',max_length=60).to(torch_device)
translated = model.generate(**batch,max_length=60,num_beams=num_beams, num_return_sequences=num_return_sequences, temperature=1.5)
tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
return tgt_text
import pandas as pd
import nltk
nltk.download('cmudict')
nltk.download('wordnet')
"""Next, we import the procrustean alliteration paraphraser"""
class RuleBoundsInterface:
"""This interface is used to define different properties of a rhetorical figure generating algorithm.
These properties will create a ruleset for a rhectorical figure, allowing the algorithm to produce results relevant
to the user-request."""
def evaluate(self, tokenlist, replacementquota):
"""Returns a dataset containing the best application of the rule to the original tokenlist using the proportion specified. This also means
that certain conditions will have no return value."""
pass
class LanguageBoundsInterface:
"""This interface is used to define different properties and implementation of a language.
These properties will create a ruleset for a language, which will be the bounds an algorithm
can work in the given context."""
########## Variables ##########
_NULL_PHENOME_INDICATOR = None # Phenome representation of an unknown phenome
_SIMILARITY_THRESHOLD = 0.2 # The threshold that must be passed for a word to be considered similar. Scaled from 0-1.
MULTI_TOKEN_INDICATOR = None # Character used to identify when a token has multiple words. This functionality is specific to a corpus. Must be changed if corpus is changed.
vowelphenomes = None # Contains all phenomes that produce vowel-related sounds for this language.
###############################
########## Constructor ##########
def __init__(self, sensitivity):
self._SIMILARITY_THRESHOLD = sensitivity
#################################
def getphenomes(self, arg):
"""Returns all phenome-lists related to the token."""
pass
def hypernyms(self, arg):
"""Returns all hypernyms related to the token. ('context' is the representation of the phrase in collection form.)"""
pass
def hyponyms(self, arg):
"""Returns all hyponyms related to the token. ('context' is the representation of the phrase in collection form.)"""
pass
def messagefail(self, input):
"""Produces the fail message to print to users in this language if the process cannot return a value."""
pass
def messageonlyresult(self, arg):
"""Produces a indicator message if only one result was possible from the input parameters given."""
pass
def messagetopresult(self, resultlen, requestedresultcount):
"""Produces the top 'x' results message to users in this language if the process has multiple results."""
pass
def similarity(self, arg, arg2):
"""Returns a token similarity score based on language-based weights. Used for determining optimal replacement for
contexts."""
pass
def split(self, arg):
"""Returns an ordered list of tokens, split at delimiters based off of the the language context settings."""
pass
from enum import Enum
from nltk import RegexpTokenizer
from nltk.corpus import cmudict
from nltk.corpus import wordnet
from nltk.wsd import lesk
class AmericanEnglishLangContext(LanguageBoundsInterface):
"""Defines the properties and implementation of standard American English."""
########## Variables ##########
_cmu = cmudict.dict() # Pretrained phenome generation model. Created outside of methods because it is used over iteration(s) and is expensive to generate; TREAT THIS VALUE AS AN IMMUTABLE.
_MULTI_TOKEN_INDICATOR = "_" # Character used to identify when a token has multiple words. This functionality is specific to a corpus. Must be changed if corpus is changed.
_NULL_PHENOME_INDICATOR = "*NONE*" # Used by algorithm to indicate if a corressponding phemone could not be found for a token
_SIMILARITY_THRESHOLD = 0.2 # The threshold that must be passed for a word to be considered similar. Scaled from 0-1.
vowelphenomes = ["AA", "AE", "AH", "AO", "AW", "AY",
"AX", "AXR", "EH", "ER", "EY", "IH",
"IX", "IY", "OW", "OY","UH", "UW", "UX"] # Contains all phenomes that produce vowel-related sounds for this language.
###############################
def _getproperformattype(self, unformattoken):
"""Used to parse through the Wordnet sysnet-token return value to retrieve only relevant sections. Currently the only returns the word.
In future implementations, this function may not be needed if the corpus has a function to return only the word as a string."""
name, junk = unformattoken.name().split(".", 1);
return name
def _getproperhandlemissingphenome(self, unknowntoken):
"""Takes a unknown-phenome (a token which could not be evaluated by CMUdict) and attempts to generate a phenome. If CMUdict or
Wordnet implementation is changed this function MUST be changed."""
finaleval = []
# After various testing, it has been determined that calculating for two letters yields the most consistent results for unknown phenomes.
tokenlen = len(unknowntoken)
if tokenlen is 0:
finaleval.append([self._NULL_PHENOME_INDICATOR])
elif tokenlen is 1:
finaleval.append([unknowntoken.upper()]) # The letter IS the phenome
else:
relevant = unknowntoken[:2] # get first two chars
finalattempt = self._cmu.get(relevant, None)
if finalattempt is None: # No possible phenome can be generated by this algorithm
finaleval.append([self._NULL_PHENOME_INDICATOR])
elif finalattempt is list:
finaleval.append(finalattempt)
else: # 'finalattempt' is guareenteed to only be of type NONE, list, or list[list].
finaleval.extend(finalattempt) # flatten list; tis step is necessary to maintain parsability
return finaleval
def _getproperhandlemultitoken(self, multitoken):
"""Takes a multi-word (a token with words seperated by '_' by Wordnet) and breaks it down into a format that can be evaluated by the CMUdict. If CMUdict or
Wordnet implementation is changed this function MUST be changed."""
finaleval = []
individualtokens = multitoken.split(self._MULTI_TOKEN_INDICATOR)
for token in individualtokens: # evaluate each token phenome indiviually; then represent multitoken for EACH phenome calculated, when returned to scanning.
phenome = self._cmu.get(token.lower(), None)
if phenome is list:
finaleval.append(phenome)
else: # 'phenome' is guareenteed to only be of type NONE, list, or list[list].
if phenome is None:
phenome = self._getproperhandlemissingphenome(token)
finaleval.extend(phenome) # flatten list; this step is necessary to maintain parsability
return finaleval
def getphenomes(self, arg):
"""Returns all phenome-lists related to the token. ('context' is the representation of the phrase in collection form.)"""
# uses CMUdict as the core processing algorithm. If CMUdict fails to find a match the function will predict a possible phenome for the token.
# This function is guareenteed to return a value.
generatephenome = self._cmu.get(arg.lower(), None) # _cmu is defined globally above in "VARIABLES" section. Treat as an immutable.
if generatephenome is None:
if arg.__contains__(self._MULTI_TOKEN_INDICATOR): # _MULTI_TOKEN_INDICATOR is defined globally above in "VARIABLES" section. Treat as an immutable.
generatephenome = self._getproperhandlemultitoken(arg)
else: # token is unknown by CMUdict
generatephenome = self._getproperhandlemissingphenome(arg)
# When multiple phenomes exist for same word, a list[list[str]] is generated
return generatephenome
def hypernyms(self, context, arg):
"""Returns all hypernyms related to the token. ('context' is the representation of the phrase in collection form.)"""
# This function assumes the use of Wordnet. If Wordnet implementation changes, this function MUST change.
eval = None
interpretation = lesk(context, arg)
if interpretation is not None:
eval = map(self._getproperformattype, interpretation.hypernyms())
return eval
def hyponyms(self, context, arg):
"""Returns all hyponyms related to the token."""
# This function assumes the use of Wordnet. If Wordnet implementation changes, this function MUST change.
eval = None
interpretation = lesk(context, arg)
if interpretation is not None:
eval = map(self._getproperformattype, interpretation.hyponyms())
return eval
def messagefail(self, input):
"""Produces the fail message to print to users in this language if the process cannot return a value."""
built = " ".join(input)
return ("Your input: '" + built + "' was not able to be parsed under the conditions you desired. Please try new conditions or try a new phrase.")
def messageonlyresult(self, arg):
"""Produces a indicator message if only one result was possible from the input parameters given."""
return ("This is the only result processed from the given input:\n" + arg)
def messagetopresult(self, resultlen, requestedresultcount):
"""Produces the top 'x' results message to users in this language if the process has multiple results."""
if resultlen < requestedresultcount:
return ("Top " + str(resultlen) + " result(s):\n")
else:
return ("Top " + str(requestedresultcount) + " result(s):\n")
def similarity(self, contextclues, arg1, arg2):
"""Returns a key-value pair for scoring similarity. [0] a bool that determines if the word is similar enough to satisfy language criteria
and the score associated with the evaluation."""
# This function assumes the use of Wordnet. If Wordnet implementation changes, this function | |
from collections import OrderedDict
import math
from auto_ml import utils
import pandas as pd
from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
from sklearn.metrics import mean_squared_error, make_scorer, brier_score_loss, accuracy_score, explained_variance_score, mean_absolute_error, median_absolute_error, r2_score, log_loss, roc_auc_score
import numpy as np
from tabulate import tabulate
bad_vals_as_strings = set([str(float('nan')), str(float('inf')), str(float('-inf')), 'None', 'none', 'NaN', 'NAN', 'nan', 'NULL', 'null', '', 'inf', '-inf', 'np.nan', 'numpy.nan'])
def advanced_scoring_classifiers(probas, actuals, name=None):
# pandas Series don't play nice here. Make sure our actuals list is indeed a list
actuals = list(actuals)
predictions = list(probas)
print('Here is our brier-score-loss, which is the default value we optimized for while training, and is the value returned from .score() unless you requested a custom scoring metric')
print('It is a measure of how close the PROBABILITY predictions are.')
if name != None:
print(name)
# Sometimes we will be given "flattened" probabilities (only the probability of our positive label), while other times we might be given "nested" probabilities (probabilities of both positive and negative, in a list, for each item).
try:
probas = [proba[1] for proba in probas]
except:
pass
brier_score = brier_score_loss(actuals, probas)
print(format(brier_score, '.4f'))
print('\nHere is the trained estimator\'s overall accuracy (when it predicts a label, how frequently is that the correct label?)')
predicted_labels = []
for pred in probas:
if pred >= 0.5:
predicted_labels.append(1)
else:
predicted_labels.append(0)
print(format(accuracy_score(y_true=actuals, y_pred=predicted_labels) * 100, '.1f') + '%')
print('\nHere is a confusion matrix showing predictions vs. actuals by label:')
#it would make sense to use sklearn's confusion_matrix here but it apparently has no labels
#took this idea instead from: http://stats.stackexchange.com/a/109015
conf = pd.crosstab(pd.Series(actuals), pd.Series(predicted_labels), rownames=['v Actual v'], colnames=['Predicted >'], margins=True)
print(conf)
#I like knowing the per class accuracy to see if the model is mishandling imbalanced data.
#For example, if it is predicting 100% of observations to one class just because it is the majority
#Wikipedia seems to call that Positive/negative predictive value
print('\nHere is predictive value by class:')
df = pd.concat([pd.Series(actuals,name='actuals'),pd.Series(predicted_labels,name='predicted')],axis=1)
targets = list(df.predicted.unique())
for i in range(0,len(targets)):
tot_count = len(df[df.predicted==targets[i]])
true_count = len(df[(df.predicted==targets[i]) & (df.actuals == targets[i])])
print('Class: ',targets[i],'=',float(true_count)/tot_count)
# qcut is super fickle. so, try to use 10 buckets first, then 5 if that fails, then nothing
try:
try:
bucket_results = pd.qcut(probas, q=10, duplicates='drop')
except:
bucket_results = pd.qcut(probas, q=5, duplicates='drop')
df_probas = pd.DataFrame(probas, columns=['Predicted Probability Of Bucket'])
df_probas['Actual Probability of Bucket'] = actuals
df_probas['Bucket Edges'] = bucket_results
df_buckets = df_probas.groupby(df_probas['Bucket Edges'])
print(tabulate(df_buckets.mean(), headers='keys', floatfmt='.4f', tablefmt='psql', showindex='always'))
print('\nHere is the accuracy of our trained estimator at each level of predicted probabilities')
print('For a verbose description of what this means, please visit the docs:')
print('http://auto-ml.readthedocs.io/en/latest/analytics.html#interpreting-predicted-probability-buckets-for-classifiers')
except:
pass
print('\n\n')
return brier_score
def calculate_and_print_differences(predictions, actuals, name=None):
pos_differences = []
neg_differences = []
# Technically, we're ignoring cases where we are spot on
for idx, pred in enumerate(predictions):
difference = pred - actuals[idx]
if difference > 0:
pos_differences.append(difference)
elif difference < 0:
neg_differences.append(difference)
if name != None:
print(name)
print('Count of positive differences (prediction > actual):')
print(len(pos_differences))
print('Count of negative differences:')
print(len(neg_differences))
if len(pos_differences) > 0:
print('Average positive difference:')
print(sum(pos_differences) * 1.0 / len(pos_differences))
if len(neg_differences) > 0:
print('Average negative difference:')
print(sum(neg_differences) * 1.0 / len(neg_differences))
def advanced_scoring_regressors(predictions, actuals, verbose=2, name=None):
# pandas Series don't play nice here. Make sure our actuals list is indeed a list
actuals = list(actuals)
predictions = list(predictions)
print('\n\n***********************************************')
if name != None:
print(name)
print('Advanced scoring metrics for the trained regression model on this particular dataset:\n')
# 1. overall RMSE
print('Here is the overall RMSE for these predictions:')
rmse = mean_squared_error(actuals, predictions)**0.5
print(rmse)
# 2. overall avg predictions
print('\nHere is the average of the predictions:')
print(sum(predictions) * 1.0 / len(predictions))
# 3. overall avg actuals
print('\nHere is the average actual value on this validation set:')
print(sum(actuals) * 1.0 / len(actuals))
# 2(a). median predictions
print('\nHere is the median prediction:')
print(np.median(predictions))
# 3(a). median actuals
print('\nHere is the median actual value:')
print(np.median(actuals))
# 4. avg differences (not RMSE)
print('\nHere is the mean absolute error:')
print(mean_absolute_error(actuals, predictions))
print('\nHere is the median absolute error (robust to outliers):')
print(median_absolute_error(actuals, predictions))
print('\nHere is the explained variance:')
print(explained_variance_score(actuals, predictions))
print('\nHere is the R-squared value:')
print(r2_score(actuals, predictions))
# 5. pos and neg differences
calculate_and_print_differences(predictions=predictions, actuals=actuals, name=name)
actuals_preds = list(zip(actuals, predictions))
# Sort by PREDICTED value, since this is what what we will know at the time we make a prediction
actuals_preds.sort(key=lambda pair: pair[1])
actuals_sorted = [act for act, pred in actuals_preds]
predictions_sorted = [pred for act, pred in actuals_preds]
if verbose > 2:
print('Here\'s how the trained predictor did on each successive decile (ten percent chunk) of the predictions:')
for i in range(1,11):
print('\n**************')
print('Bucket number:')
print(i)
# There's probably some fenceposting error here
min_idx = int((i - 1) / 10.0 * len(actuals_sorted))
max_idx = int(i / 10.0 * len(actuals_sorted))
actuals_for_this_decile = actuals_sorted[min_idx:max_idx]
predictions_for_this_decile = predictions_sorted[min_idx:max_idx]
print('Avg predicted val in this bucket')
print(sum(predictions_for_this_decile) * 1.0 / len(predictions_for_this_decile))
print('Avg actual val in this bucket')
print(sum(actuals_for_this_decile) * 1.0 / len(actuals_for_this_decile))
print('RMSE for this bucket')
print(mean_squared_error(actuals_for_this_decile, predictions_for_this_decile)**0.5)
calculate_and_print_differences(predictions_for_this_decile, actuals_for_this_decile)
print('')
print('\n***********************************************\n\n')
return rmse
def rmse_func(y, predictions):
return mean_squared_error(y, predictions)**0.5
scoring_name_function_map = {
'rmse': rmse_func
, 'median_absolute_error': median_absolute_error
, 'r2': r2_score
, 'r-squared': r2_score
, 'mean_absolute_error': mean_absolute_error
, 'accuracy': accuracy_score
, 'accuracy_score': accuracy_score
, 'log_loss': log_loss
, 'roc_auc': roc_auc_score
, 'brier_score_loss': brier_score_loss
}
class RegressionScorer(object):
def __init__(self, scoring_method=None):
if scoring_method is None:
scoring_method = 'rmse'
self.scoring_method = scoring_method
if callable(scoring_method):
self.scoring_func = scoring_method
else:
self.scoring_func = scoring_name_function_map[scoring_method]
self.scoring_method = scoring_method
def get(self, prop_name, default=None):
try:
return getattr(self, prop_name)
except AttributeError:
return default
def score(self, estimator, X, y, took_log_of_y=False, advanced_scoring=False, verbose=2, name=None):
X, y = utils.drop_missing_y_vals(X, y, output_column=None)
if isinstance(estimator, GradientBoostingRegressor):
X = X.toarray()
predictions = estimator.predict(X)
if took_log_of_y:
for idx, val in enumerate(predictions):
predictions[idx] = math.exp(val)
try:
score = self.scoring_func(y, predictions)
except ValueError:
bad_val_indices = []
for idx, val in enumerate(y):
if str(val) in bad_vals_as_strings or str(predictions[idx]) in bad_vals_as_strings:
bad_val_indices.append(idx)
predictions = [val for idx, val in enumerate(predictions) if idx not in bad_val_indices]
y = [val for idx, val in enumerate(y) if idx not in bad_val_indices]
print('Found ' + str(len(bad_val_indices)) + ' null or infinity values in the predicted or y values. We will ignore these, and report the score on the rest of the dataset')
score = self.scoring_func(y, predictions)
if advanced_scoring == True:
if hasattr(estimator, 'name'):
print(estimator.name)
advanced_scoring_regressors(predictions, y, verbose=verbose, name=name)
return - 1 * score
class ClassificationScorer(object):
def __init__(self, scoring_method=None):
if scoring_method is None:
scoring_method = 'brier_score_loss'
self.scoring_method = scoring_method
if callable(scoring_method):
self.scoring_func = scoring_method
else:
self.scoring_func = scoring_name_function_map[scoring_method]
def get(self, prop_name, default=None):
try:
return getattr(self, prop_name)
except AttributeError:
return default
def clean_probas(self, probas):
print('Warning: We have found some values in the predicted probabilities that fall outside the range {0, 1}')
print('This is likely the result of a model being trained on too little data, or with a bad set of hyperparameters. If you get this warning while doing a hyperparameter search, for instance, you can probably safely ignore it')
print('We will cap those values at 0 or 1 for the purposes of scoring, but you should be careful to have similar safeguards in place in prod if you use this model')
if not isinstance(probas[0], list):
probas = [val if str(val) not in bad_vals_as_strings else 0 for val in probas]
probas = [min(max(pred, 0), 1) for pred in probas]
return probas
else:
cleaned_probas = []
for proba_tuple in probas:
cleaned_tuple = []
for item in proba_tuple:
if str(item) in bad_vals_as_strings:
item = 0
cleaned_tuple.append(max(min(item, 1), 0))
cleaned_probas.append(cleaned_tuple)
return cleaned_probas
def score(self, estimator, X, y, advanced_scoring=False):
X, y = utils.drop_missing_y_vals(X, y, output_column=None)
if isinstance(estimator, GradientBoostingClassifier):
X = X.toarray()
predictions = estimator.predict_proba(X)
if self.scoring_method == 'brier_score_loss':
# At the moment, Microsoft's LightGBM returns probabilities > 1 and < 0, which can break some scoring functions. So we have to take the max of 1 and the pred, and the min of 0 and the pred.
probas = [max(min(row[1], 1), 0) for row in predictions]
predictions = probas
try:
score = self.scoring_func(y, predictions)
except ValueError as e:
bad_val_indices = []
for idx, val in enumerate(y):
| |
'string',
'name': 'string',
'description': 'string',
'dataSourceName': 'string',
'requestMappingTemplate': 'string',
'responseMappingTemplate': 'string',
'functionVersion': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **functions** *(list) --*
A list of ``Function`` objects.
- *(dict) --*
A function is a reusable entity. Multiple functions can be used to compose the resolver logic.
- **functionId** *(string) --*
A unique ID representing the ``Function`` object.
- **functionArn** *(string) --*
The ARN of the ``Function`` object.
- **name** *(string) --*
The name of the ``Function`` object.
- **description** *(string) --*
The ``Function`` description.
- **dataSourceName** *(string) --*
The name of the ``DataSource`` .
- **requestMappingTemplate** *(string) --*
The ``Function`` request mapping template. Functions support only the 2018-05-29 version of the request mapping template.
- **responseMappingTemplate** *(string) --*
The ``Function`` response mapping template.
- **functionVersion** *(string) --*
The version of the request mapping template. Currently only the 2018-05-29 version of the template is supported.
- **NextToken** *(string) --*
A token to resume pagination.
:type apiId: string
:param apiId: **[REQUIRED]**
The GraphQL API ID.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListGraphqlApis(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`AppSync.Client.list_graphql_apis`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListGraphqlApis>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'graphqlApis': [
{
'name': 'string',
'apiId': 'string',
'authenticationType': 'API_KEY'|'AWS_IAM'|'AMAZON_COGNITO_USER_POOLS'|'OPENID_CONNECT',
'logConfig': {
'fieldLogLevel': 'NONE'|'ERROR'|'ALL',
'cloudWatchLogsRoleArn': 'string'
},
'userPoolConfig': {
'userPoolId': 'string',
'awsRegion': 'string',
'defaultAction': 'ALLOW'|'DENY',
'appIdClientRegex': 'string'
},
'openIDConnectConfig': {
'issuer': 'string',
'clientId': 'string',
'iatTTL': 123,
'authTTL': 123
},
'arn': 'string',
'uris': {
'string': 'string'
}
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **graphqlApis** *(list) --*
The ``GraphqlApi`` objects.
- *(dict) --*
Describes a GraphQL API.
- **name** *(string) --*
The API name.
- **apiId** *(string) --*
The API ID.
- **authenticationType** *(string) --*
The authentication type.
- **logConfig** *(dict) --*
The Amazon CloudWatch Logs configuration.
- **fieldLogLevel** *(string) --*
The field logging level. Values can be NONE, ERROR, or ALL.
* **NONE** : No field-level logs are captured.
* **ERROR** : Logs the following information only for the fields that are in error:
* The error section in the server response.
* Field-level errors.
* The generated request/response functions that got resolved for error fields.
* **ALL** : The following information is logged for all fields in the query:
* Field-level tracing information.
* The generated request/response functions that got resolved for each field.
- **cloudWatchLogsRoleArn** *(string) --*
The service role that AWS AppSync will assume to publish to Amazon CloudWatch logs in your account.
- **userPoolConfig** *(dict) --*
The Amazon Cognito user pool configuration.
- **userPoolId** *(string) --*
The user pool ID.
- **awsRegion** *(string) --*
The AWS Region in which the user pool was created.
- **defaultAction** *(string) --*
The action that you want your GraphQL API to take when a request that uses Amazon Cognito user pool authentication doesn't match the Amazon Cognito user pool configuration.
- **appIdClientRegex** *(string) --*
A regular expression for validating the incoming Amazon Cognito user pool app client ID.
- **openIDConnectConfig** *(dict) --*
The OpenID Connect configuration.
- **issuer** *(string) --*
The issuer for the OpenID Connect configuration. The issuer returned by discovery must exactly match the value of ``iss`` in the ID token.
- **clientId** *(string) --*
The client identifier of the Relying party at the OpenID identity provider. This identifier is typically obtained when the Relying party is registered with the OpenID identity provider. You can specify a regular expression so the AWS AppSync can validate against multiple client identifiers at a time.
- **iatTTL** *(integer) --*
The number of milliseconds a token is valid after being issued to a user.
- **authTTL** *(integer) --*
The number of milliseconds a token is valid after being authenticated.
- **arn** *(string) --*
The ARN.
- **uris** *(dict) --*
The URIs.
- *(string) --*
- *(string) --*
- **NextToken** *(string) --*
A token to resume pagination.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListResolvers(Paginator):
def paginate(self, apiId: str, typeName: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`AppSync.Client.list_resolvers`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListResolvers>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
apiId='string',
typeName='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'resolvers': [
{
'typeName': 'string',
'fieldName': 'string',
'dataSourceName': 'string',
'resolverArn': 'string',
'requestMappingTemplate': 'string',
'responseMappingTemplate': 'string',
'kind': 'UNIT'|'PIPELINE',
'pipelineConfig': {
'functions': [
'string',
]
}
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **resolvers** *(list) --*
The ``Resolver`` objects.
- *(dict) --*
Describes a resolver.
- **typeName** *(string) --*
The resolver type name.
- **fieldName** *(string) --*
The resolver field name.
- **dataSourceName** *(string) --*
The resolver data source name.
- **resolverArn** *(string) --*
The resolver ARN.
- **requestMappingTemplate** *(string) --*
The request mapping template.
- **responseMappingTemplate** *(string) --*
The response mapping template.
- **kind** *(string) --*
The resolver type.
* **UNIT** : A UNIT resolver type. A UNIT resolver is the default resolver type. A UNIT resolver enables you to execute a GraphQL query against a single data source.
* **PIPELINE** : A PIPELINE resolver type. A PIPELINE resolver enables you to execute a series of ``Function`` in a serial manner. You can use a pipeline resolver to execute a GraphQL query against multiple data sources.
- **pipelineConfig** *(dict) --*
The ``PipelineConfig`` .
- **functions** *(list) --*
A list of ``Function`` objects.
- *(string) --*
- **NextToken** *(string) --*
A token to resume pagination.
:type apiId: string
:param apiId: **[REQUIRED]**
The API ID.
:type typeName: string
:param typeName: **[REQUIRED]**
The type name.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListResolversByFunction(Paginator):
def paginate(self, apiId: str, functionId: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`AppSync.Client.list_resolvers_by_function`.
See also: `AWS API | |
0, 0, 0, 0],
[1658, 1.063334, 0, 9999, -9999, 1.0, 100, 1, 1.879381, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1659, 42.758003, 0, 9999, -9999, 1.0, 100, 1, 91.77667, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1660, 81.182801, 0, 9999, -9999, 1.0, 100, 1, 186.942171, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1661, 54.425695, 0, 9999, -9999, 1.0, 100, 1, 138.604087, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1662, 1.7252, 0, 9999, -9999, 1.0, 100, 1, 3.040325, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1663, 0.88777, 0, 9999, -9999, 1.0, 100, 1, 1.600649, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1664, 0.892007, 0, 9999, -9999, 1.0, 100, 1, 1.578207, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1665, 26.879102, 0, 9999, -9999, 1.0, 100, 1, 48.659717, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1666, 1.568464, 0, 9999, -9999, 1.0, 100, 1, 2.877877, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1667, 2.930199, 0, 9999, -9999, 1.0, 100, 1, 5.227282, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1668, 2.222682, 0, 9999, -9999, 1.0, 100, 1, 3.927043, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1669, 32.479792, 0, 9999, -9999, 1.0, 100, 1, 72.677935, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1670, 57.381942, 0, 9999, -9999, 1.0, 100, 1, 111.043025, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1671, 30.998365, 0, 9999, -9999, 1.0, 100, 1, 62.404971, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1672, 5.901853, 0, 9999, -9999, 1.0, 100, 1, 10.579925, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1673, 2.372304, 0, 9999, -9999, 1.0, 100, 1, 4.091034, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1674, 20.609688, 0, 9999, -9999, 1.0, 100, 1, 47.970381, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1675, 17.173313, 0, 9999, -9999, 1.0, 100, 1, 31.233663, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1676, 4.733187, 0, 9999, -9999, 1.0, 100, 1, 83.173368, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1677, 0.748216, 0, 9999, -9999, 1.0, 100, 1, 13.887293, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1678, 35.527692, 0, 9999, -9999, 1.0, 100, 1, 226.804108, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1679, 13.18382, 0, 9999, -9999, 1.0, 100, 1, 71.380413, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1680, 10.906483, 0, 9999, -9999, 1.0, 100, 1, 52.148102, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1681, 5.842714, 0, 9999, -9999, 1.0, 100, 1, 17.30062, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1682, 12.101248, 0, 9999, -9999, 1.0, 100, 1, 39.892468, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1683, 4.466464, 0, 9999, -9999, 1.0, 100, 1, 9.189765, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1684, 3.14363, 0, 9999, -9999, 1.0, 100, 1, 40.575646, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1685, 3.384526, 0, 9999, -9999, 1.0, 100, 1, 74.922434, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1686, 4.905179, 0, 9999, -9999, 1.0, 100, 1, 81.035483, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1687, 55.528937, 0, 9999, -9999, 1.0, 100, 1, 112.01808, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1688, 9.148372, 0, 9999, -9999, 1.0, 100, 1, 18.158729, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1689, 13.639446, 0, 9999, -9999, 1.0, 100, 1, 116.696894, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1690, 21.09561, 0, 9999, -9999, 1.0, 100, 1, 116.477465, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1691, 19.637371, 0, 9999, -9999, 1.0, 100, 1, 228.38653, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1692, 2.901237, 0, 9999, -9999, 1.0, 100, 1, 26.501573, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1693, 50.543953, 0, 9999, -9999, 1.0, 100, 1, 86.236575, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1694, 18.920588, 0, 9999, -9999, 1.0, 100, 1, 53.656832, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1695, 7.719672, 0, 9999, -9999, 1.0, 100, 1, 23.132774, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1696, 30.540056, 0, 9999, -9999, 1.0, 100, 1, 53.34209, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1697, 76.168097, 0, 9999, -9999, 1.0, 100, 1, 136.821485, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1698, 12.290127, 0, 9999, -9999, 1.0, 100, 1, 25.60631, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1699, 2.114343, 0, 9999, -9999, 1.0, 100, 1, 5.356106, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1700, 21.391783, 0, 9999, -9999, 1.0, 100, 1, 55.825815, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1701, 10.832276, 0, 9999, -9999, 1.0, 100, 1, 37.297196, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1702, 2.156654, 0, 9999, -9999, 1.0, 100, 1, 25.149806, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1703, 3.858718, 0, 9999, -9999, 1.0, 100, 1, 48.587768, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1704, 10.17193, 0, 9999, -9999, 1.0, 100, 1, 127.647586, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1705, 5.26422, 0, 9999, -9999, 1.0, 100, 1, 52.051788, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1706, 0.604489, 0, 9999, -9999, 1.0, 100, 1, 6.76178, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1707, 6.403263, 0, 9999, -9999, 1.0, 100, 1, 11.7078, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1708, 12.92226, 0, 9999, -9999, 1.0, 100, 1, 26.288692, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1709, 17.033589, 0, 9999, -9999, 1.0, 100, 1, 226.257418, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1710, 49.517907, 0, 9999, -9999, 1.0, 100, 1, 183.631947, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1711, 2.441574, 0, 9999, -9999, 1.0, 100, 1, 7.213854, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1712, 6.895552, 0, 9999, -9999, 1.0, 100, 1, 75.638853, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1713, 28.927684, 0, 9999, -9999, 1.0, 100, 1, 90.775073, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1714, 16.998693, 0, 9999, -9999, 1.0, 100, 1, 42.312538, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1715, 62.747539, 0, 9999, -9999, 1.0, 100, 1, 155.279397, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1716, 97.323451, 0, 9999, -9999, 1.0, 100, 1, 156.979012, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1717, 10.583744, 0, 9999, -9999, 1.0, 100, 1, 82.928251, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1718, 170.834174, 0, 9999, -9999, 1.0, 100, 1, 301.614349, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1719, 3.601351, 0, 9999, -9999, 1.0, 100, 1, 19.488967, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1720, 5.131112, 0, 9999, -9999, 1.0, 100, 1, 54.067169, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1721, 37.327656, 0, 9999, -9999, 1.0, 100, 1, 82.151947, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1722, 9.385188, 0, 9999, -9999, 1.0, 100, 1, 21.329566, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1723, 0.050571, 0, 9999, -9999, 1.0, 100, 1, 2.855273, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1724, 0.677839, 0, 9999, -9999, 1.0, 100, 1, 36.268783, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1725, 22.953281, 0, 9999, -9999, 1.0, 100, 1, 55.750844, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1726, 15.03217, 0, 9999, -9999, 1.0, 100, 1, 84.308501, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1727, 0.192521, 0, 9999, -9999, 1.0, 100, 1, 0.456443, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1728, 28.516908, 0, 9999, -9999, 1.0, 100, 1, 65.283314, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1729, 100.473844, 0, 9999, -9999, 1.0, 100, 1, 220.758669, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1730, 4.797176, 0, 9999, -9999, 1.0, 100, 1, 51.367164, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1731, 34.263201, 0, 9999, -9999, 1.0, 100, 1, 151.90213, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1732, 155.11784, 0, 9999, -9999, 1.0, 100, 1, 383.858473, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1733, 23.326039, 0, 9999, -9999, 1.0, 100, 1, 60.655652, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1734, 30.795291, 0, 9999, -9999, 1.0, 100, 1, 77.375277, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1735, 79.949927, 0, 9999, -9999, 1.0, 100, 1, 153.887449, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1736, 32.566074, 0, 9999, -9999, 1.0, 100, 1, 89.439426, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1737, 81.938946, 0, 9999, -9999, 1.0, 100, 1, 194.473407, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1738, 47.91826, 0, 9999, -9999, 1.0, 100, 1, 116.049526, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1739, 18.885817, 0, 9999, -9999, 1.0, 100, 1, 33.525947, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1740, 37.672724, 0, 9999, -9999, 1.0, 100, 1, 66.638954, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1741, 2.695111, 0, 9999, -9999, 1.0, 100, 1, 35.869318, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1742, 1.393089, 0, 9999, -9999, 1.0, 100, 1, 25.619162, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1743, 0.154333, 0, 9999, -9999, 1.0, 100, 1, 0.986841, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1744, 0.212264, 0, 9999, -9999, 1.0, 100, 1, 3.775325, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1745, 2.093329, 0, 9999, -9999, 1.0, 100, 1, 31.215591, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1746, 26.502147, 0, 9999, -9999, 1.0, 100, 1, 172.123236, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1747, 1.53228, 0, 9999, -9999, 1.0, 100, 1, 25.963706, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1748, 25.422655, 0, 9999, -9999, 1.0, 100, 1, 67.219313, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1749, 47.531509, 0, 9999, -9999, 1.0, 100, 1, 218.703564, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1750, 11.270767, 0, 9999, -9999, 1.0, 100, 1, 22.191848, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1751, 8.41057, 0, 9999, -9999, 1.0, 100, 1, 18.416283, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1752, 52.647161, 0, 9999, -9999, 1.0, 100, 1, 136.190504, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1753, 37.833072, 0, 9999, -9999, 1.0, 100, 1, 79.270006, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1754, 183.717347, 0, 9999, -9999, 1.0, 100, 1, 408.37422, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1755, 24.049325, 0, 9999, -9999, 1.0, 100, 1, 46.277001, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1756, 48.363188, 0, 9999, -9999, 1.0, 100, 1, 93.807787, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1757, 104.993994, 0, 9999, -9999, 1.0, 100, 1, 197.08743, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1758, 154.052205, 0, 9999, -9999, 1.0, 100, 1, 311.473267, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1759, 88.42199, 0, 9999, -9999, 1.0, 100, 1, 156.546089, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1760, 60.975908, 0, 9999, -9999, 1.0, 100, 1, 114.687411, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1761, 28.606966, 0, 9999, -9999, 1.0, 100, 1, 48.443946, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1762, 56.025721, 0, 9999, -9999, 1.0, 100, 1, 107.077622, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1763, 37.224422, 0, 9999, -9999, 1.0, 100, 1, 90.136674, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1764, 9.235744, 0, 9999, -9999, 1.0, 100, 1, 21.994769, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1765, 52.402958, 0, 9999, -9999, 1.0, 100, 1, 112.249863, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1766, 51.104322, 0, 9999, -9999, 1.0, 100, 1, 99.811208, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1767, 52.541385, 0, 9999, -9999, 1.0, 100, 1, 95.5909, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1768, 85.744629, 0, 9999, -9999, 1.0, 100, 1, 159.818572, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1769, 112.347054, 0, 9999, -9999, 1.0, 100, 1, 235.581664, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1770, 259.651541, 0, 9999, -9999, 1.0, 100, 1, 479.248156, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1771, 5.109869, 0, 9999, -9999, 1.0, 100, 1, 276.640075, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1772, 126.592412, 0, 9999, -9999, 1.0, 100, 1, 272.215345, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1773, 258.328874, 0, 9999, -9999, 1.0, 100, 1, 533.823159, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1774, 33.114072, 0, 9999, -9999, 1.0, 100, 1, 88.57714, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1775, 97.439377, 0, 9999, -9999, 1.0, 100, 1, 197.787397, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1776, 57.66107, 0, 9999, -9999, 1.0, 100, 1, 111.203656, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1777, 71.274796, 0, 9999, -9999, 1.0, 100, 1, 199.457983, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1778, 44.558231, 0, 9999, -9999, 1.0, 100, 1, 80.070627, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1779, 40.606138, 0, 9999, -9999, 1.0, 100, 1, 78.485044, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1780, 49.274198, 0, 9999, -9999, 1.0, 100, 1, 97.872974, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1781, 0.280452, 0, 9999, -9999, 1.0, 100, 1, 7.067063, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1782, 0.319467, 0, 9999, -9999, 1.0, 100, 1, 9.94901, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1783, 0.345254, 0, 9999, -9999, 1.0, 100, 1, 10.739092, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1784, 66.79638, 0, 9999, -9999, 1.0, 100, 1, 240.920274, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1785, 50.360782, 0, 9999, -9999, 1.0, 100, 1, 275.41262, 0.0, 0, 0, 0, 0, 0, 0, 0, | |
"""Test hook that runs shard splits continuously."""
import copy
import random
import threading
import time
import uuid
import bson
import pymongo.errors
from buildscripts.resmokelib import errors
from buildscripts.resmokelib.testing.fixtures import interface as fixture_interface
from buildscripts.resmokelib.testing.fixtures import shard_split
from buildscripts.resmokelib.testing.fixtures.replicaset import ReplicaSetFixture
from buildscripts.resmokelib.testing.hooks import interface
class ContinuousShardSplit(interface.Hook): # pylint: disable=too-many-instance-attributes
"""Starts a shard split thread at the beginning of each test."""
DESCRIPTION = ("Continuous shard split operations")
IS_BACKGROUND = True
AWAIT_REPL_TIMEOUT_MINS = ReplicaSetFixture.AWAIT_REPL_TIMEOUT_MINS
def __init__(self, hook_logger, fixture, shell_options):
"""Initialize the ContinuousShardSplit.
Args:
hook_logger: the logger instance for this hook.
fixture: the target ShardSplitFixture containing the donor replica set.
shell_options: contains the global_vars which contains TestData.tenantIds to be used for
shard splits.
"""
interface.Hook.__init__(self, hook_logger, fixture, ContinuousShardSplit.DESCRIPTION)
if not isinstance(fixture, shard_split.ShardSplitFixture):
raise ValueError("The ContinuousShardSplit hook requires a ShardSplitFixture")
self._shard_split_fixture = fixture
self._shell_options = copy.deepcopy(shell_options)
self._shard_split_thread = None
def before_suite(self, test_report):
"""Before suite."""
if not self._shard_split_fixture:
raise ValueError("No ShardSplitFixture to run shard splits on")
self.logger.info("Starting the shard split thread.")
self._shard_split_thread = _ShardSplitThread(self.logger, self._shard_split_fixture,
self._shell_options, test_report)
self._shard_split_thread.start()
def after_suite(self, test_report, teardown_flag=None):
"""After suite."""
self.logger.info("Stopping the shard split thread.")
self._shard_split_thread.stop()
self.logger.info("Stopped the shard split thread.")
def before_test(self, test, test_report):
"""Before test."""
self.logger.info("Resuming the shard split thread.")
self._shard_split_thread.resume(test)
def after_test(self, test, test_report):
"""After test."""
self.logger.info("Pausing the shard split thread.")
self._shard_split_thread.pause()
self.logger.info("Paused the shard split thread.")
class ShardSplitLifeCycle(object):
"""Class for managing the various states of the shard split thread.
The job thread alternates between calling mark_test_started() and mark_test_finished(). The
shard split thread is allowed to perform splits at any point between these two calls.
Note that the job thread synchronizes with the shard split thread outside the context of
this object to know it isn't in the process of running a split.
"""
_TEST_STARTED_STATE = "start"
_TEST_FINISHED_STATE = "finished"
def __init__(self):
"""Initialize the ShardSplitLifeCycle instance."""
self.__lock = threading.Lock()
self.__cond = threading.Condition(self.__lock)
self.test_num = 0
self.__test_state = self._TEST_FINISHED_STATE
self.__should_stop = False
def mark_test_started(self):
"""Signal to the shard split thread that a new test has started.
This function should be called during before_test(). Calling it causes the
wait_for_shard_split_permitted() function to no longer block and to instead return
true.
"""
with self.__lock:
self.test_num += 1
self.__test_state = self._TEST_STARTED_STATE
self.__cond.notify_all()
def mark_test_finished(self):
"""Signal to the shard split thread that the current test has finished.
This function should be called during after_test(). Calling it causes the
wait_for_shard_split_permitted() function to block until mark_test_started() is called
again.
"""
with self.__lock:
self.__test_state = self._TEST_FINISHED_STATE
self.__cond.notify_all()
def is_test_finished(self):
"""Return true if the current test has finished."""
with self.__lock:
return self.__test_state == self._TEST_FINISHED_STATE
def stop(self):
"""Signal to the shard split thread that it should exit.
This function should be called during after_suite(). Calling it causes the
wait_for_shard_split_permitted() function to no longer block and to instead return
false.
"""
with self.__lock:
self.__should_stop = True
self.__cond.notify_all()
def wait_for_shard_split_permitted(self):
"""Block until splits are permitted, or until stop() is called."""
with self.__lock:
while not self.__should_stop:
if self.__test_state == self._TEST_STARTED_STATE:
return True
self.__cond.wait()
return False
def wait_for_shard_split_interval(self, timeout):
"""Block for 'timeout' seconds, or until stop() is called."""
with self.__lock:
self.__cond.wait(timeout)
def poll_for_idle_request(self): # noqa: D205,D400
"""Return true if the shard split thread should continue running splits, or false
if it should temporarily stop running splits.
"""
with self.__lock:
return self.__test_state == self._TEST_FINISHED_STATE
class _ShardSplitOptions:
def __init__( # pylint: disable=too-many-arguments
self, logger, shard_split_fixture, tenant_ids, recipient_tag_name, recipient_set_name):
self.logger = logger
self.migration_id = uuid.uuid4()
self.shard_split_fixture = shard_split_fixture
self.tenant_ids = tenant_ids
self.recipient_tag_name = recipient_tag_name
self.recipient_set_name = recipient_set_name
def get_migration_id_as_binary(self):
"""Return the migration id as BSON Binary."""
return bson.Binary(self.migration_id.bytes, 4)
def get_donor_rs(self):
"""Return the current donor for the split fixture."""
return self.shard_split_fixture.get_donor_rs()
def get_donor_name(self):
"""Return the replica set name for the donor."""
return self.get_donor_rs().replset_name
def get_donor_primary(self):
"""Return a connection to the donor primary."""
return self.get_donor_rs().get_primary(timeout_secs=self.AWAIT_REPL_TIMEOUT_MINS)
def get_donor_nodes(self):
"""Return the nodes for the current shard split fixture donor."""
return self.get_donor_rs().nodes
def get_recipient_nodes(self):
"""Return the recipient nodes for the shard split fixture."""
return self.shard_split_fixture.get_recipient_nodes()
def __str__(self):
opts = {
"migration_id": self.migration_id, "tenant_ids": self.tenant_ids,
"donor": self.get_donor_name(), "recipientSetName": self.recipient_set_name,
"recipientTagName": self.recipient_tag_name
}
return str(opts)
class _ShardSplitThread(threading.Thread): # pylint: disable=too-many-instance-attributes
THREAD_NAME = "ShardSplitThread"
WAIT_SECS_RANGES = [[0.05, 0.1], [0.1, 0.5], [1, 5], [5, 15]]
POLL_INTERVAL_SECS = 0.1
NO_SUCH_MIGRATION_ERR_CODE = 327
INTERNAL_ERR_CODE = 1
def __init__(self, logger, shard_split_fixture, shell_options, test_report):
"""Initialize _ShardSplitThread."""
threading.Thread.__init__(self, name=self.THREAD_NAME)
self.daemon = True
self.logger = logger
self._shard_split_fixture = shard_split_fixture
self._tenant_ids = shell_options["global_vars"]["TestData"]["tenantIds"]
self._auth_options = shell_options["global_vars"]["TestData"]["authOptions"]
self._test = None
self._test_report = test_report
self._shell_options = shell_options
self.__lifecycle = ShardSplitLifeCycle()
# Event set when the thread has been stopped using the 'stop()' method.
self._is_stopped_evt = threading.Event()
# Event set when the thread is not performing shard splits.
self._is_idle_evt = threading.Event()
self._is_idle_evt.set()
def run(self):
"""Execute the thread."""
if not self._shard_split_fixture:
self.logger.warning("No ShardSplitFixture to run shard splits on.")
return
split_count = 0
try:
while True:
self._is_idle_evt.set()
permitted = self.__lifecycle.wait_for_shard_split_permitted()
if not permitted:
break
if split_count >= 3: # TODO(SERVER-66045): Remove this check and run unbounded splits
time.sleep(self.POLL_INTERVAL_SECS)
continue
self._is_idle_evt.clear()
split_opts = self._create_split_opts(split_count)
# Set up the donor for a split
self._shard_split_fixture.add_recipient_nodes(split_opts.recipient_set_name)
# Briefly wait to let the test run before starting the split operation, so that
# the first split is more likely to have data to migrate.
wait_secs = random.uniform(
*self.WAIT_SECS_RANGES[split_count % len(self.WAIT_SECS_RANGES)])
self.logger.info(f"Waiting for {wait_secs} seconds before starting split.")
self.__lifecycle.wait_for_shard_split_interval(wait_secs)
self.logger.info(f"Starting shard split: {str(split_opts)}.")
start_time = time.time()
is_committed = self._run_shard_split(split_opts)
end_time = time.time()
split_count += 1
self.logger.info(
f"Completed shard split {str(split_opts)} in {(end_time - start_time) * 1000} ms."
)
# set up the fixture for the next split operation
if is_committed:
self._shard_split_fixture.replace_donor_with_recipient(
split_opts.recipient_set_name)
else:
self._shard_split_fixture.remove_recipient_nodes()
found_idle_request = self.__lifecycle.poll_for_idle_request()
if found_idle_request:
continue
except Exception: # pylint: disable=W0703
# Proactively log the exception when it happens so it will be flushed immediately.
self.logger.exception("Shard split thread threw exception")
# The event should be signaled whenever the thread is not performing shard splits.
self._is_idle_evt.set()
def stop(self):
"""Stop the thread when the suite finishes."""
self.__lifecycle.stop()
self._is_stopped_evt.set()
# Unpause to allow the thread to finish.
self.resume(self._test)
self.join()
def pause(self):
"""Pause the thread after test."""
self.__lifecycle.mark_test_finished()
# Wait until we are no longer executing splits.
self._is_idle_evt.wait()
# Check if the thread is alive in case it has thrown an exception while running.
self._check_thread()
# Check that the fixture is still running.
if not self._shard_split_fixture.is_running():
raise errors.ServerFailure(
f"ShardSplitFixture with pids {self._shard_split_fixture.pids()} expected to be running in"
" ContinuousShardSplit, but wasn't.")
def resume(self, test):
"""Resume the thread before test."""
self._test = test
self.__lifecycle.mark_test_started()
def _wait(self, timeout):
"""Wait until stop or timeout."""
self._is_stopped_evt.wait(timeout)
def short_name(self):
"""Return the name of the thread."""
return self.THREAD_NAME
def _check_thread(self):
"""Throw an error if the thread is not running."""
if not self.is_alive():
msg = "Shard split thread is not running."
self.logger.error(msg)
raise errors.ServerFailure(msg)
def _is_fail_point_abort_reason(self, abort_reason):
return abort_reason["code"] == self.INTERNAL_ERR_CODE and abort_reason[
"errmsg"] == "simulate a shard split error"
def _create_split_opts(self, split_count):
recipient_set_name = f"rs{split_count+1}"
recipient_tag_name = "recipientNode"
return _ShardSplitOptions(self.logger, self._shard_split_fixture, self._tenant_ids,
recipient_tag_name, recipient_set_name)
def _create_client(self, node):
return fixture_interface.authenticate(node.mongo_client(), self._auth_options)
def _run_shard_split(self, split_opts): # noqa: D205,D400
try:
donor_client = self._create_client(split_opts.get_donor_rs())
res = self._commit_shard_split(donor_client, split_opts)
is_committed = res["state"] == "committed"
# Garbage collect the split prior to throwing error to avoid conflicting operations
# in the next test.
if is_committed:
# Wait for the donor/proxy to reroute at least one command before doing garbage
# collection. Stop waiting when the test finishes.
self._wait_for_reroute_or_test_completion(donor_client, split_opts)
self._forget_shard_split(donor_client, split_opts)
self._wait_for_garbage_collection(split_opts)
if not res["ok"]:
raise errors.ServerFailure(
f"Shard split '{split_opts.migration_id}' on replica set "
f"'{split_opts.get_donor_name()}' failed: {str(res)}")
if is_committed:
return True
abort_reason = res["abortReason"]
if self._is_fail_point_abort_reason(abort_reason):
self.logger.info(
f"Shard split '{split_opts.migration_id}' on replica set "
f"'{split_opts.get_donor_name()}' aborted due to failpoint: {str(res)}.")
return False
raise errors.ServerFailure(
f"Shard split '{str(split_opts.migration_id)}' with donor replica set "
f"'{split_opts.get_donor_name()}' aborted due to an error: {str(res)}")
except pymongo.errors.PyMongoError:
self.logger.exception(
f"Error running shard split '{split_opts.migration_id}' with donor primary on "
f"replica set '{split_opts.get_donor_name()}'.")
raise
def _commit_shard_split(self, donor_client, split_opts): # noqa: D205,D400
self.logger.info(f"Starting shard split '{split_opts.migration_id}' on replica set "
f"'{split_opts.get_donor_name()}'.")
while True:
try:
res = donor_client.admin.command({
"commitShardSplit": 1, "migrationId": split_opts.get_migration_id_as_binary(),
"tenantIds": split_opts.tenant_ids,
"recipientTagName": split_opts.recipient_tag_name, "recipientSetName":
split_opts.recipient_set_name
}, bson.codec_options.CodecOptions(uuid_representation=bson.binary.UUID_SUBTYPE))
if res["state"] == "committed":
self.logger.info(f"Shard split '{split_opts.migration_id}' on replica set "
f"'{split_opts.get_donor_name()}' has committed.")
return res
if res["state"] == "aborted":
self.logger.info(f"Shard | |
<reponame>grudloff/metric-learn<filename>metric_learn/mmc.py<gh_stars>0
"""Mahalanobis Metric for Clustering (MMC)"""
from __future__ import print_function, absolute_import, division
import warnings
import numpy as np
from six.moves import xrange
from sklearn.base import TransformerMixin
from sklearn.utils.validation import assert_all_finite
from sklearn.exceptions import ChangedBehaviorWarning
from .base_metric import _PairsClassifierMixin, MahalanobisMixin
from .constraints import Constraints, wrap_pairs
from ._util import components_from_metric, _initialize_metric_mahalanobis
class _BaseMMC(MahalanobisMixin):
_tuple_size = 2 # constraints are pairs
def __init__(self, max_iter=100, max_proj=10000, convergence_threshold=1e-3,
init=None, A0='deprecated', diagonal=False,
diagonal_c=1.0, verbose=False, preprocessor=None,
random_state=None):
self.max_iter = max_iter
self.max_proj = max_proj
self.convergence_threshold = convergence_threshold
self.init = init
self.A0 = A0
self.diagonal = diagonal
self.diagonal_c = diagonal_c
self.verbose = verbose
self.random_state = random_state
super(_BaseMMC, self).__init__(preprocessor)
def _fit(self, pairs, y):
if self.A0 != 'deprecated':
warnings.warn('"A0" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
'removed in 0.6.0. Use "init" instead.',
DeprecationWarning)
pairs, y = self._prepare_inputs(pairs, y,
type_of_inputs='tuples')
if self.init is None:
# TODO: replace init=None by init='auto' in v0.6.0 and remove the warning
msg = ("Warning, no init was set (`init=None`). As of version 0.5.0, "
"the default init will now be set to 'identity', instead of the "
"identity divided by a scaling factor of 10. "
"If you still want to use the same init as in previous "
"versions, set init=np.eye(d)/10, where d is the dimension "
"of your input space (d=pairs.shape[1]). "
"This warning will disappear in v0.6.0, and `init` parameter's"
" default value will be set to 'auto'.")
warnings.warn(msg, ChangedBehaviorWarning)
init = 'identity'
else:
init = self.init
self.A_ = _initialize_metric_mahalanobis(pairs, init,
random_state=self.random_state,
matrix_name='init')
if self.diagonal:
return self._fit_diag(pairs, y)
else:
return self._fit_full(pairs, y)
def _fit_full(self, pairs, y):
"""Learn full metric using MMC.
Parameters
----------
X : (n x d) data matrix
each row corresponds to a single instance
constraints : 4-tuple of arrays
(a,b,c,d) indices into X, with (a,b) specifying similar and (c,d)
dissimilar pairs
"""
num_dim = pairs.shape[2]
error2 = 1e10
eps = 0.01 # error-bound of iterative projection on C1 and C2
A = self.A_
pos_pairs, neg_pairs = pairs[y == 1], pairs[y == -1]
# Create weight vector from similar samples
pos_diff = pos_pairs[:, 0, :] - pos_pairs[:, 1, :]
w = np.einsum('ij,ik->jk', pos_diff, pos_diff).ravel()
# `w` is the sum of all outer products of the rows in `pos_diff`.
# The above `einsum` is equivalent to the much more inefficient:
# w = np.apply_along_axis(
# lambda x: np.outer(x,x).ravel(),
# 1,
# X[a] - X[b]
# ).sum(axis = 0)
t = w.dot(A.ravel()) / 100.0
w_norm = np.linalg.norm(w)
w1 = w / w_norm # make `w` a unit vector
t1 = t / w_norm # distance from origin to `w^T*x=t` plane
cycle = 1
alpha = 0.1 # initial step size along gradient
grad1 = self._fS1(pos_pairs, A) # gradient of similarity
# constraint function
grad2 = self._fD1(neg_pairs, A) # gradient of dissimilarity
# constraint function
# gradient of fD1 orthogonal to fS1:
M = self._grad_projection(grad1, grad2)
A_old = A.copy()
for cycle in xrange(self.max_iter):
# projection of constraints C1 and C2
satisfy = False
for it in xrange(self.max_proj):
# First constraint:
# f(A) = \sum_{i,j \in S} d_ij' A d_ij <= t (1)
# (1) can be rewritten as a linear constraint: w^T x = t,
# where x is the unrolled matrix of A,
# w is also an unrolled matrix of W where
# W_{kl}= \sum_{i,j \in S}d_ij^k * d_ij^l
x0 = A.ravel()
if w.dot(x0) <= t:
x = x0
else:
x = x0 + (t1 - w1.dot(x0)) * w1
A[:] = x.reshape(num_dim, num_dim)
# Second constraint:
# PSD constraint A >= 0
# project A onto domain A>0
l, V = np.linalg.eigh((A + A.T) / 2)
A[:] = np.dot(V * np.maximum(0, l[None, :]), V.T)
fDC2 = w.dot(A.ravel())
error2 = (fDC2 - t) / t
if error2 < eps:
satisfy = True
break
# third constraint: gradient ascent
# max: g(A) >= 1
# here we suppose g(A) = fD(A) = \sum_{I,J \in D} sqrt(d_ij' A d_ij)
obj_previous = self._fD(neg_pairs, A_old) # g(A_old)
obj = self._fD(neg_pairs, A) # g(A)
if satisfy and (obj > obj_previous or cycle == 0):
# If projection of 1 and 2 is successful, and such projection
# improves objective function, slightly increase learning rate
# and update from the current A.
alpha *= 1.05
A_old[:] = A
grad2 = self._fS1(pos_pairs, A)
grad1 = self._fD1(neg_pairs, A)
M = self._grad_projection(grad1, grad2)
A += alpha * M
else:
# If projection of 1 and 2 failed, or obj <= obj_previous due
# to projection of 1 and 2, shrink learning rate and re-update
# from the previous A.
alpha /= 2
A[:] = A_old + alpha * M
delta = np.linalg.norm(alpha * M) / np.linalg.norm(A_old)
if delta < self.convergence_threshold:
break
if self.verbose:
print('mmc iter: %d, conv = %f, projections = %d' %
(cycle, delta, it + 1))
if delta > self.convergence_threshold:
self.converged_ = False
if self.verbose:
print('mmc did not converge, conv = %f' % (delta,))
else:
self.converged_ = True
if self.verbose:
print('mmc converged at iter %d, conv = %f' % (cycle, delta))
self.A_[:] = A_old
self.n_iter_ = cycle
self.components_ = components_from_metric(self.A_)
return self
def _fit_diag(self, pairs, y):
"""Learn diagonal metric using MMC.
Parameters
----------
X : (n x d) data matrix
each row corresponds to a single instance
constraints : 4-tuple of arrays
(a,b,c,d) indices into X, with (a,b) specifying similar and (c,d)
dissimilar pairs
"""
num_dim = pairs.shape[2]
pos_pairs, neg_pairs = pairs[y == 1], pairs[y == -1]
s_sum = np.sum((pos_pairs[:, 0, :] - pos_pairs[:, 1, :]) ** 2, axis=0)
it = 0
error = 1.0
eps = 1e-6
reduction = 2.0
w = np.diag(self.A_).copy()
while error > self.convergence_threshold and it < self.max_iter:
fD0, fD_1st_d, fD_2nd_d = self._D_constraint(neg_pairs, w)
obj_initial = np.dot(s_sum, w) + self.diagonal_c * fD0
fS_1st_d = s_sum # first derivative of the similarity constraints
# gradient of the objective:
gradient = fS_1st_d - self.diagonal_c * fD_1st_d
# Hessian of the objective:
hessian = -self.diagonal_c * fD_2nd_d + eps * np.eye(num_dim)
step = np.dot(np.linalg.inv(hessian), gradient)
# Newton-Rapshon update
# search over optimal lambda
lambd = 1 # initial step-size
w_tmp = np.maximum(0, w - lambd * step)
obj = (np.dot(s_sum, w_tmp) + self.diagonal_c *
self._D_objective(neg_pairs, w_tmp))
assert_all_finite(obj)
obj_previous = obj + 1 # just to get the while-loop started
inner_it = 0
while obj < obj_previous:
obj_previous = obj
w_previous = w_tmp.copy()
lambd /= reduction
w_tmp = np.maximum(0, w - lambd * step)
obj = (np.dot(s_sum, w_tmp) + self.diagonal_c *
self._D_objective(neg_pairs, w_tmp))
inner_it += 1
assert_all_finite(obj)
w[:] = w_previous
error = np.abs((obj_previous - obj_initial) / obj_previous)
if self.verbose:
print('mmc iter: %d, conv = %f' % (it, error))
it += 1
self.A_ = np.diag(w)
self.components_ = components_from_metric(self.A_)
return self
def _fD(self, neg_pairs, A):
r"""The value of the dissimilarity constraint function.
f = f(\sum_{ij \in D} distance(x_i, x_j))
i.e. distance can be L1: \sqrt{(x_i-x_j)A(x_i-x_j)'}
"""
diff = neg_pairs[:, 0, :] - neg_pairs[:, 1, :]
return np.log(np.sum(np.sqrt(np.sum(np.dot(diff, A) * diff, axis=1))) +
1e-6)
def _fD1(self, neg_pairs, A):
r"""The gradient of the dissimilarity constraint function w.r.t. A.
For example, let distance by L1 norm:
f = f(\sum_{ij \in D} \sqrt{(x_i-x_j)A(x_i-x_j)'})
df/dA_{kl} = f'* d(\sum_{ij \in D} \sqrt{(x_i-x_j)^k*(x_i-x_j)^l})/dA_{kl}
Note that d_ij*A*d_ij' = tr(d_ij*A*d_ij') = tr(d_ij'*d_ij*A)
so, d(d_ij*A*d_ij')/dA = d_ij'*d_ij
df/dA = f'(\sum_{ij \in D} \sqrt{tr(d_ij'*d_ij*A)})
* 0.5*(\sum_{ij \in D} (1/sqrt{tr(d_ij'*d_ij*A)})*(d_ij'*d_ij))
"""
diff = neg_pairs[:, 0, :] - neg_pairs[:, 1, :]
# outer products of all rows in `diff`
M = np.einsum('ij,ik->ijk', diff, diff)
# faster version of: dist = np.sqrt(np.sum(M * A[None,:,:], axis=(1,2)))
dist = np.sqrt(np.einsum('ijk,jk', M, A))
# faster version of: sum_deri = np.sum(M /
# (2 * (dist[:,None,None] + 1e-6)), axis=0)
sum_deri = np.einsum('ijk,i->jk', M, 0.5 / (dist + 1e-6))
sum_dist = dist.sum()
return sum_deri / (sum_dist + 1e-6)
def _fS1(self, pos_pairs, A):
r"""The gradient of the similarity constraint function w.r.t. A.
f = \sum_{ij}(x_i-x_j)A(x_i-x_j)' = \sum_{ij}d_ij*A*d_ij'
df/dA = d(d_ij*A*d_ij')/dA
Note that d_ij*A*d_ij' = tr(d_ij*A*d_ij') = tr(d_ij'*d_ij*A)
so, d(d_ij*A*d_ij')/dA = d_ij'*d_ij
"""
diff = pos_pairs[:, 0, :] | |
#!/usr/bin/env python
""" Redis Enterprise Cluster log collector script.
Creates a directory with output of kubectl for
several API objects and for pods logs unless pass a -n
parameter will run on current namespace. Run with -h to see options
"""
import argparse
import json
import logging
import os
import re
import shutil
import signal
import subprocess
import sys
import tarfile
import time
from collections import OrderedDict
from multiprocessing import Process
RLEC_CONTAINER_NAME = "redis-enterprise-node"
RS_LOG_FOLDER_PATH = "/var/opt/redislabs/log"
# pylint: disable=locally-disabled, invalid-name
logger = logging.getLogger("log collector")
TIME_FORMAT = time.strftime("%Y%m%d-%H%M%S")
timeout = 180
API_RESOURCES = [
"RedisEnterpriseCluster",
"RedisEnterpriseDatabase",
"StatefulSet",
"Deployment",
"Service",
"ConfigMap",
"Routes",
"Ingress",
"Role",
"RoleBinding",
"PersistentVolume",
"PersistentVolumeClaim",
"Node",
"PodDisruptionBudget",
"ResourceQuota",
"Endpoints",
"Pod",
"NetworkPolicy",
"CustomResourceDefinition",
"CertificateSigningRequest",
"ValidatingWebhookConfiguration",
"ClusterRole",
"ClusterRoleBinding",
"ClusterServiceVersion",
"Subscription",
"Installplan",
"CatalogSource"
"PodSecurityPolicy",
"ReplicaSet",
]
def make_dir(directory):
"""
Create an directory if not exists
"""
if not os.path.exists(directory):
# noinspection PyBroadException
try:
os.mkdir(directory)
except OSError as ex:
logger.warning("Failed to create directory %s: %s", directory, ex)
sys.exit()
def _filter_non_existing_namespaces(namespaces):
"""
Filter non-existing namespaces from user's input
"""
return_code, out = run_shell_command("kubectl get ns -o=custom-columns='DATA:metadata.name' --no-headers=true")
if return_code:
return []
res = []
existing_namespaces = set(out.split())
for ns in namespaces:
if ns in existing_namespaces:
res.append(ns)
else:
logger.warning("Namespace %s doesn't exist - Skipping", ns)
return res
def _get_namespaces_to_run_on(namespace):
def _get_namespace_from_config():
config_namespace = get_namespace_from_config()
if not config_namespace:
return ["default"]
return [config_namespace]
if not namespace:
return _get_namespace_from_config()
if namespace == 'all':
return_code, out = run_shell_command("kubectl get ns -o=custom-columns='DATA:metadata.name' --no-headers=true")
if return_code:
logger.warning("Failed to parse namespace list - will use namespace from config: %s", out)
return _get_namespace_from_config()
return out.split()
# comma separated string
namespaces = namespace.split(',')
existing_namespaces = _filter_non_existing_namespaces(namespaces)
if not existing_namespaces:
logger.warning("Input doesn't contain an existing namespace - will use namespace from config")
return _get_namespace_from_config()
return existing_namespaces
def collect_from_ns(namespace, output_dir):
"Collect the context of a specific namespace. Typically runs in parallel processes."
logger.info("Started collecting from namespace '%s'", namespace)
ns_output_dir = output_dir + ("/" + namespace if output_dir[-1] != '/' else namespace)
make_dir(ns_output_dir)
get_redis_enterprise_debug_info(namespace, ns_output_dir)
collect_pod_rs_logs(namespace, ns_output_dir)
collect_resources_list(namespace, ns_output_dir)
collect_events(namespace, ns_output_dir)
collect_api_resources(namespace, ns_output_dir)
collect_api_resources_description(namespace, ns_output_dir)
collect_pods_logs(namespace, ns_output_dir)
def run(namespace_input, output_dir):
"""
Collect logs
"""
start_time = time.time()
namespaces = _get_namespaces_to_run_on(namespace_input)
output_file_name = "redis_enterprise_k8s_debug_info_{}".format(TIME_FORMAT)
if not output_dir:
# if not specified, use cwd
output_dir = os.getcwd()
output_dir = os.path.join(output_dir, output_file_name)
make_dir(output_dir)
collect_cluster_info(output_dir)
processes = []
for namespace in namespaces:
p = Process(target=collect_from_ns, args=[namespace, output_dir])
p.start()
processes.append(p)
for p in processes:
p.join()
archive_files(output_dir, output_file_name)
logger.info("Finished Redis Enterprise log collector")
logger.info("--- Run time: %d minutes ---", round(((time.time() - start_time) / 60), 3))
def get_non_ready_rs_pod_names(namespace):
"""
get names of rs pods that are not ready
"""
pod_names = []
rs_pods = get_pods(namespace, selector='redis.io/role=node')
if not rs_pods:
logger.info("Namespace '%s': cannot find redis enterprise pods", namespace)
return []
for rs_pod in rs_pods:
pod_name = rs_pod['metadata']['name']
if "status" in rs_pod and "containerStatuses" in rs_pod["status"]:
for container_status_entry in rs_pod["status"]["containerStatuses"]:
container_name = container_status_entry['name']
is_ready = container_status_entry["ready"]
if container_name == RLEC_CONTAINER_NAME and not is_ready:
pod_names.append(pod_name)
return pod_names
def collect_pod_rs_logs(namespace, output_dir):
"""
get logs from rs pods that are not ready
"""
rs_pod_logs_dir = os.path.join(output_dir, "rs_pod_logs")
rs_pod_names = get_pod_names(namespace=namespace, selector='redis.io/role=node')
make_dir(rs_pod_logs_dir)
# TODO restore usage of get_non_ready_rs_pod_names once RS bug is resolved (RED-51857) # pylint: disable=W0511
for rs_pod_name in rs_pod_names:
pod_log_dir = os.path.join(rs_pod_logs_dir, rs_pod_name)
make_dir(pod_log_dir)
cmd = "kubectl -n {} cp {}:{} {} -c {}".format(namespace,
rs_pod_name,
RS_LOG_FOLDER_PATH,
pod_log_dir,
RLEC_CONTAINER_NAME)
return_code, out = run_shell_command(cmd)
if return_code:
logger.warning("Failed to copy rs logs from pod "
"to output directory, output:%s", out)
else:
logger.info("Namespace '%s': "
"Collected rs logs from pod marked as not ready, pod name: %s", namespace, rs_pod_name)
pod_config_dir = os.path.join(pod_log_dir, "config")
make_dir(pod_config_dir)
cmd = "kubectl -n {} cp {}:{} {} -c {}".format(namespace,
rs_pod_name,
"/opt/redislabs/config",
pod_config_dir,
RLEC_CONTAINER_NAME)
return_code, out = run_shell_command(cmd)
if return_code:
logger.warning("Failed to copy rs config from pod "
"to output directory, output:%s", out)
else:
logger.info("Collected rs config from pod marked as not ready, pod name: %s", rs_pod_name)
def debuginfo_attempt_on_pod(namespace, output_dir, pod_name, attempt):
"""
Execute the rladmin command to get debug info on a specific pod
Returns: true on success, false on failure
"""
prog = "/opt/redislabs/bin/rladmin"
cmd = "kubectl -n {} exec {} -c {} {} cluster debug_info path /tmp" \
.format(namespace, pod_name, RLEC_CONTAINER_NAME, prog)
return_code, out = run_shell_command(cmd)
if "Downloading complete" not in out:
logger.warning("Failed running rladmin command in pod: %s (attempt %d)",
out.rstrip(), attempt)
return False
# get the debug file name
match = re.search(r'File (/tmp/(.*\.gz))', out)
if match:
debug_file_path = match.group(1)
debug_file_name = match.group(2)
logger.info("Namespace '%s': debug info created on pod %s in path %s",
namespace, pod_name, debug_file_path)
else:
logger.warning(
"Failed to extract debug info name from output (attempt %d for pod %s) - (%s)",
attempt, pod_name, out)
return False
# copy package from RS pod
output_path = os.path.join(output_dir, debug_file_name)
cmd = "kubectl -n {} cp {}:{} {}".format(namespace,
pod_name,
debug_file_path,
output_path)
return_code, out = run_shell_command(cmd)
if return_code:
logger.warning("Failed to copy debug info from pod "
"to output directory (attempt %d for pod %s), output:%s",
attempt, pod_name, out)
return False
# all is well
return True
def get_redis_enterprise_debug_info(namespace, output_dir):
"""
Connects to an RS cluster node,
creates and copies debug info package from a pod, preferably one that passes readiness probe
"""
rs_pods = get_pods(namespace, selector='redis.io/role=node')
if not rs_pods:
logger.info("Namespace '%s': Cannot find redis enterprise pod", namespace)
return
pod_names = []
for pod in rs_pods:
if 'containerStatuses' in pod['status'] and all(
container_status['ready'] for container_status in pod['status']['containerStatuses']):
pod_names.append(pod['metadata']['name'])
if not pod_names:
logger.warning("Cannot find a ready redis enterprise pod, will use a non-ready pod")
pod_names = [pod['metadata']['name'] for pod in rs_pods]
logger.info("Trying to extract debug info from RS pods: {%s}", pod_names)
for pod_name in pod_names:
for attempt in range(3):
if attempt > 0:
time.sleep(1)
if debuginfo_attempt_on_pod(namespace,
output_dir,
pod_name,
attempt + 1):
logger.info("Namespace '%s': Collected Redis Enterprise cluster debug package", namespace)
return
def collect_resources_list(namespace, output_dir):
"""
Prints the output of kubectl get all to a file
"""
collect_helper(output_dir,
cmd="kubectl get all -o wide -n {}".format(namespace),
file_name="resources_list",
resource_name="resources list",
namespace=namespace)
def collect_cluster_info(output_dir):
"""
Prints the output of kubectl cluster-info to a file
"""
collect_helper(output_dir, cmd="kubectl cluster-info",
file_name="cluster_info", resource_name="cluster-info")
def collect_events(namespace, output_dir):
"""
Prints the output of kubectl cluster-info to a file
"""
# events need -n parameter in kubectl
if not namespace:
logger.warning("Cannot collect events without namespace - "
"skipping events collection")
return
cmd = "kubectl get events -n {} -o wide".format(namespace)
collect_helper(output_dir, cmd=cmd,
file_name="events", resource_name="events", namespace=namespace)
def collect_api_resources(namespace, output_dir):
"""
Creates file for each of the API resources
with the output of kubectl get <resource> -o yaml
"""
logger.info("Namespace '%s': Collecting API resources", namespace)
resources_out = OrderedDict()
for resource in API_RESOURCES:
output = run_kubectl_get_yaml(namespace, resource)
if output:
resources_out[resource] = output
logger.info("Namespace '%s': + Collected %s", namespace, resource)
for entry, out in resources_out.items():
with open(os.path.join(output_dir,
"{}.yaml".format(entry)), "w+") as file_handle:
file_handle.write(out)
def collect_api_resources_description(namespace, output_dir):
"""
Creates file for each of the API resources
with the output of kubectl describe <resource>
"""
logger.info("Namespace '%s': Collecting API resources description", namespace)
resources_out = OrderedDict()
for resource in API_RESOURCES:
output = run_kubectl_describe(namespace, resource)
if output:
resources_out[resource] = output
logger.info("Namespace: '%s' + Collected %s", namespace, resource)
for entry, out in resources_out.items():
with open(os.path.join(output_dir,
"{}.txt".format(entry)), "w+") as file_handle:
file_handle.write(out)
def collect_pods_logs(namespace, output_dir):
"""
Collects all the pods logs from given namespace
"""
logger.info("Namespace '%s': Collecting pods' logs:", namespace)
logs_dir = os.path.join(output_dir, "pods")
pods = get_pod_names(namespace)
if not pods:
logger.warning("Namespace '%s' Could not get pods list - "
"skipping pods logs collection", namespace)
return
make_dir(logs_dir)
for pod in pods:
cmd = "kubectl logs --all-containers=true -n {} {}" \
.format(namespace, pod)
with open(os.path.join(logs_dir, "{}.log".format(pod)),
"w+") as file_handle:
_, output = run_shell_command(cmd)
file_handle.write(output)
logger.info("Namespace '%s': + %s", namespace, pod)
def archive_files(output_dir, output_dir_name):
"""
Create a compressed tar out of the debug file collection
"""
file_name = output_dir + ".tar.gz"
with tarfile.open(file_name, "w|gz") as tar:
tar.add(output_dir, arcname=output_dir_name)
logger.info("Archived files into %s", file_name)
try:
shutil.rmtree(output_dir)
except OSError as ex:
logger.warning("Failed to delete directory after archiving: %s", ex)
def get_pods(namespace, selector=""):
"""
Returns list of pods
"""
if selector:
selector = '--selector="{}"'.format(selector)
cmd = 'kubectl get pod -n {} {} -o json '.format(namespace, selector)
return_code, out = run_shell_command(cmd)
if return_code:
logger.warning("Failed to get pods: %s", out)
return None
return json.loads(out)['items']
def get_pod_names(namespace, selector=""):
"""
Returns list of pod names
"""
return [pod['metadata']['name'] for pod in get_pods(namespace, | |
<gh_stars>0
#!/usr/bin/python
# -----------------------------------------------------------------------------------------------
# fb_index_diff.py - Functions for the 'Index Management->Index Comparison' tab.
# WRW 7 May 2022 - Move raw index .csv files to 'Raw-Index' and build in 'Raw-Index' folder name here.
# -----------------------------------------------------------------------------------------------
from pathlib import Path
import sys
import subprocess
import shutil
import fitz
import tempfile
import gzip
import os
# -----------------------------------------------------------------------------------------------
def most_common( a ):
return max(set(a), key = a.count)
# -----------------------------------------------------------------------------------------------
class Diff():
# --------------------------------------------------------------
def __init__( self ):
self.src = None
self.local = None
self.canonical = None
self.table_loaded = False
self.title_by_row = []
self.pdf_window = None
self.pdf_figure = None
def set_elements( self, dc, info_canon, table, canon_table, display_pdf ):
self.dc = dc
self.index_diff_info_canonical = info_canon
self.index_diff_table = table
self.index_diff_canonical_table = canon_table
self.display_pdf = display_pdf
# -----------------------------------
def set_classes( self, conf, sg, fb, pdf, meta ):
self.conf = conf
self.sg = sg
self.fb = fb
self.pdf = pdf
self.meta = meta
# -----------------------------------
def set_icon( self, t ):
global BL_Icon
BL_Icon = t
# -----------------------------------------------------------------------------------------------
# WRW 15 Apr 2022 - An open question if I should do this all here or in fb_pdf.py. Needs here
# are very simple. Extracted code from fb_pdf.py. /// RESUME - add error checking.
# This opens the PDF file, gets the indicated page, gets and returns page dimensions.
# saving the dlist for display in part2 after zoom is determined.
def show_pdf_part1( self, canonical, page ):
file = self.fb.get_file_from_canonical( canonical )
path = Path( self.fb.Music_File_Root, file )
self.doc = fitz.open( path )
fitz.TOOLS.mupdf_display_errors(False) # Suppress error messages in books with alpha-numbered front matter.
self.dlist = self.doc[ int(page) -1 ].get_displaylist()
fitz.TOOLS.mupdf_display_errors(True)
self.doc.close() # Done with doc after we get displaylist
fitz.TOOLS.mupdf_display_errors(False) # Suppress error messages in books with alpha-numbered front matter.
r = self.dlist.rect
fitz.TOOLS.mupdf_display_errors(True)
page_height = (r.br - r.tr).y
page_width = (r.tr - r.tl).x
return page_width, page_height
# --------------------------------------------------
# This adjust the image size according to zoom and shows it in graph_element
def show_pdf_part2( self, zoom, graph_element ):
zoom_matrix = fitz.Matrix( zoom, zoom )
image = self.dlist.get_pixmap( alpha=False, matrix=zoom_matrix )
if self.pdf_figure:
graph_element.delete_figure( self.pdf_figure )
self.pdf_figure = graph_element.draw_image( data = image.tobytes(), location = (0, 0) )
# -----------------------------------------------------------------------------------------------
# /// RESUME - Can optimize this to eliminate multiple calls for same page, prep and show.
def get_pdf_dimensions( self, canonical, page ):
file = self.fb.get_file_from_canonical( canonical )
path = Path( self.fb.Music_File_Root, file )
doc = fitz.open( path )
fitz.TOOLS.mupdf_display_errors(False) # Suppress error messages in books with alpha-numbered front matter.
r = doc[ int(page) -1 ].get_displaylist().rect
fitz.TOOLS.mupdf_display_errors(True)
page_height = (r.br - r.tr).y
page_width = (r.tr - r.tl).x
doc.close()
return page_width, page_height
# -----------------------------------------------------------------------------------------------
# Populate the canonical table. Can't do this at init as don't have self.fb yet.
def load_tables( self ):
if self.table_loaded:
return
self.table_loaded = True
self.canonicals_data = self.fb.get_canonicals_with_index()
self.canonicals = [ [x['canonical']] for x in self.canonicals_data ]
# self.index_diff_canonical_table.update( values = self.canonicals )
self.fb.safe_update( self.index_diff_canonical_table, self.canonicals )
self.srcs = self.fb.get_srcs()
# -----------------------------------
# WRW 16 Apr 2022 - Want to get mouse release to remove popup of PDF display of clicked title.
def do_bind( self ):
self.index_diff_table.bind( '<Button-1>', '-Click' )
self.index_diff_table.bind( '<Button-3>', '-Right-Click' )
self.index_diff_table.bind( '<ButtonRelease-1>', '-Button-1-Up' )
# -----------------------------------
def set_class_config( self ):
pass
# ---------------------------------------------------------------------------------------------------------
# WRW 14 Apr 2022
# Display a window of all selected titles. User selects one. All others are mapped to that one
# throught file saved here.
def do_titles_to_edit_window( self, titles_to_edit ):
page = self.most_common_by_title[ titles_to_edit[0] ]
values = [ [x] for x in titles_to_edit ] # Each row must be in separate array.
intro = """Select one title in table and click 'Save'.
All other titles in table will be mapped to the selected one
the next time the raw index is processed."""
intro_text = self.sg.Text( text=intro,
font=("Helvetica", 10 ),
pad = ((0,0), (0, 0)),
justification='left',
)
titles_table = \
self.sg.Table(
key='titles-table',
headings = [ "Title" ],
font = ("Helvetica", 11),
values = values,
row_height = 25,
col_widths = [ 60 ],
num_rows = len( titles_to_edit ),
auto_size_columns = False, # Must be false for col_width to work.
justification = 'left',
pad=[(0, 0), (10, 0)],
select_mode = None,
# enable_events = True,
expand_x = True,
expand_y = True,
hide_vertical_scroll = True,
)
b1 = self.sg.Button('Save',
key='titles-button-save',
font=("Helvetica", 9),
pad=((0,0),(10,10)), )
b2 = self.sg.Button('Cancel',
key='titles-button-cancel',
font=("Helvetica", 9),
pad=((10,0),(10,10)), )
page_width, page_height = self.show_pdf_part1( self.canonical, page )
graph_y_size = 600
graph_x_size = int( graph_y_size * page_width/page_height )
zoom = graph_x_size / page_width # Fit width Since now fitting to exact page fit width and height
# zoom = graph_y_size / page_height # Fit height should be the same.
titles_pdf_graph = \
self.sg.Graph( (graph_x_size, graph_y_size ),
key = 'titles-pdf-graph',
graph_bottom_left=(0, graph_y_size),
graph_top_right=(graph_x_size, 0),
# background_color = GraphBackground,
pad=((0,0),(0,0)),
)
table_column = \
self.sg.Column(
[
[ intro_text ],
[ titles_table ],
[ b1, b2 ],
],
vertical_alignment='Top',
justification='left',
element_justification='left',
pad=((10,0),(10,0)),
)
graph_column = \
self.sg.Column(
[
[ titles_pdf_graph],
],
vertical_alignment='Top',
justification='left',
element_justification='right',
pad=((20,10),(10,10)),
)
titles_layout = [[ table_column, graph_column ]]
titles_window = self.sg.Window( 'Select One from Similar Titles',
return_keyboard_events=False,
resizable=True,
icon=BL_Icon,
finalize = True,
layout=titles_layout,
keep_on_top = True,
)
self.show_pdf_part2( zoom, titles_window[ 'titles-pdf-graph' ] )
# ------------------------------------------
# EVENT Loop for titles edit window.
while True:
event, values = titles_window.Read( )
if event == self.sg.WINDOW_CLOSED:
return False
elif event == 'titles-button-cancel':
titles_window.close()
return False
elif event == 'titles-button-save':
if not values['titles-table']:
t = "Please select a title in table to map or click 'Cancel'"
self.conf.do_popup( t )
continue
else:
row = values['titles-table'][0]
# print( "Selected:", titles_to_edit[ row ] )
selected = titles_to_edit[ row ]
others = [ titles_to_edit[ x ] for x in range( 0, len( titles_to_edit )) if x != row ]
# print( "Others:", others )
ofile = Path( self.conf.val( 'corrections' )).with_suffix( '.A.txt' )
with open( ofile, "a" ) as fo:
for other in others:
print( f"{other} | {selected} | {self.canonical}", file=fo )
titles_window.close()
return True
# ---------------------------------------------------------------------------------------------------------
# graph_x_size = int( graph_y_size * 9.0/12.0 )
def do_pdf_popup( self, canonical, page, sheet=None ):
page_width, page_height = self.show_pdf_part1( canonical, page )
graph_y_size = 600
graph_x_size = int( graph_y_size * page_width/page_height )
zoom = graph_x_size / page_width # Fit width Since now fitting to exact page fit width and height
# zoom = graph_y_size / page_height # Fit height should be the same.
# Build sg.Graph() using dimensions above.
page_number = \
self.sg.Text( text= f"PDF Page: {page}",
font=("Helvetica", 11 ),
pad=((0,0), (0, 10)),
expand_x = True,
text_color = 'black',
background_color = '#e0e0ff',
)
sheet_number = \
self.sg.Text( text= f"Sheet: {sheet}",
font=("Helvetica", 11 ),
pad=((0,0), (0, 10)),
expand_x = True,
text_color = 'black',
background_color = '#e0e0ff',
justification = 'right',
)
pdf_graph = \
self.sg.Graph( (graph_x_size, graph_y_size ),
key = 'pdf-graph',
graph_bottom_left=(0, graph_y_size),
graph_top_right=(graph_x_size, 0),
pad=((0,0),(0,0)),
)
layout = [ [page_number, sheet_number], [ pdf_graph ]]
self.pdf_window = self.sg.Window( '-',
return_keyboard_events=False,
resizable=True,
icon=BL_Icon,
finalize = True,
layout=layout,
keep_on_top = True,
no_titlebar = True,
margins = (10, 10),
background_color = '#e0e0ff',
)
self.show_pdf_part2( zoom, self.pdf_window[ 'pdf-graph' ] )
# ------------------------------------------
# No EVENT Loop for pdf popup window.
# Return here and process 'index-diff-table-Button-1-Up' event
# in process_events().
return
# ---------------------------------------------------------------------------------------------------------
def process_events( self, event, values ):
# ------------------------------------------
# WRW 16 Apr 2022 - Remove pdf_window on mouse release.
if event == 'index-diff-table-Button-1-Up':
if self.pdf_window:
self.pdf_window.close()
return True
# ------------------------------------------
# Click in canonicals table. Select book to show in main table.
# Change state of show-all radio box.
if ( event == 'index-diff-canonical-table' or
event == 'index-diff-controls-1' or
event == 'index-diff-controls-2' or
event == 'index-diff-controls-3' ):
if( 'index-diff-canonical-table' in values and len( values[ 'index-diff-canonical-table' ] ) ):
self.canonical = self.canonicals[ values[ 'index-diff-canonical-table' ][0] ][0]
self.index_diff_info_canonical.update( value = self.canonical )
data = self.fb.get_diff_data( self.canonical )
show_data = False
# ------------------------------------------------
# One pass over data to build array by title and accumulate all srcs covering canonical.
# WRW 1 Apr 2022 - Add partial coverage identification. Sort titles as dicts are not | |
self._nplike
)
self._handle_error(
self._nplike[
"awkward_ListArray_num",
tonum.dtype.type,
self.starts.dtype.type,
self.stops.dtype.type,
](
tonum.data,
self.starts.data,
self.stops.data,
self.length,
)
)
return ak._v2.contents.numpyarray.NumpyArray(
tonum, None, None, self._nplike
)
else:
next = self._content.num(posaxis, depth + 1)
offsets = self._compact_offsets64(True)
return ak._v2.contents.listoffsetarray.ListOffsetArray(
offsets, next, None, self.parameters, self._nplike
)
def _offsets_and_flattened(self, axis, depth):
posaxis = self.axis_wrap_if_negative(axis)
if posaxis == depth:
raise ak._v2._util.error(np.AxisError("axis=0 not allowed for flatten"))
elif posaxis == depth + 1:
listoffsetarray = self.toListOffsetArray64(True)
stop = listoffsetarray.offsets[-1]
content = listoffsetarray.content._getitem_range(slice(0, stop))
return (listoffsetarray.offsets, content)
else:
inneroffsets, flattened = self._content._offsets_and_flattened(
posaxis, depth + 1
)
offsets = ak._v2.index.Index64.zeros(0, self._nplike, dtype=np.int64)
if inneroffsets.length == 0:
return (
offsets,
ListOffsetArray(
self._offsets, flattened, None, self._parameters, self._nplike
),
)
elif self._offsets.length == 1:
tooffsets = ak._v2.index.Index64([inneroffsets[0]])
return (
offsets,
ListOffsetArray(
tooffsets, flattened, None, self._parameters, self._nplike
),
)
else:
tooffsets = ak._v2.index.Index64.empty(
self._offsets.length, self._nplike, dtype=np.int64
)
assert (
tooffsets.nplike is self._nplike
and self._offsets.nplike is self._nplike
and inneroffsets.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_ListOffsetArray_flatten_offsets",
tooffsets.dtype.type,
self._offsets.dtype.type,
inneroffsets.dtype.type,
](
tooffsets.data,
self._offsets.data,
self._offsets.length,
inneroffsets.data,
inneroffsets.length,
)
)
return (
offsets,
ListOffsetArray(
tooffsets, flattened, None, self._parameters, self._nplike
),
)
def mergeable(self, other, mergebool):
if not _parameters_equal(self._parameters, other._parameters):
return False
if isinstance(
other,
(
ak._v2.contents.emptyarray.EmptyArray,
ak._v2.contents.unionarray.UnionArray,
),
):
return True
elif isinstance(
other,
(
ak._v2.contents.indexedarray.IndexedArray,
ak._v2.contents.indexedoptionarray.IndexedOptionArray,
ak._v2.contents.bytemaskedarray.ByteMaskedArray,
ak._v2.contents.bitmaskedarray.BitMaskedArray,
ak._v2.contents.unmaskedarray.UnmaskedArray,
),
):
return self.mergeable(other.content, mergebool)
if isinstance(
other,
(
ak._v2.contents.regulararray.RegularArray,
ak._v2.contents.listarray.ListArray,
ak._v2.contents.listoffsetarray.ListOffsetArray,
),
):
return self._content.mergeable(other.content, mergebool)
else:
return False
def mergemany(self, others):
if len(others) == 0:
return self
listarray = ak._v2.contents.listarray.ListArray(
self.starts, self.stops, self._content, None, self._parameters, self._nplike
)
return listarray.mergemany(others)
def fillna(self, value):
return ListOffsetArray(
self._offsets,
self._content.fillna(value),
self._identifier,
self._parameters,
self._nplike,
)
def _localindex(self, axis, depth):
posaxis = self.axis_wrap_if_negative(axis)
if posaxis == depth:
return self._localindex_axis0()
elif posaxis == depth + 1:
offsets = self._compact_offsets64(True)
if self._nplike.known_data:
innerlength = offsets[offsets.length - 1]
else:
innerlength = ak._v2._typetracer.UnknownLength
localindex = ak._v2.index.Index64.empty(innerlength, self._nplike)
assert localindex.nplike is self._nplike and offsets.nplike is self._nplike
self._handle_error(
self._nplike[
"awkward_ListArray_localindex",
localindex.dtype.type,
offsets.dtype.type,
](
localindex.data,
offsets.data,
offsets.length - 1,
)
)
return ak._v2.contents.listoffsetarray.ListOffsetArray(
offsets,
ak._v2.contents.NumpyArray(localindex),
self._identifier,
self._parameters,
self._nplike,
)
else:
return ak._v2.contents.listoffsetarray.ListOffsetArray(
self._offsets,
self._content._localindex(posaxis, depth + 1),
self._identifier,
self._parameters,
self._nplike,
)
def numbers_to_type(self, name):
return ak._v2.contents.listoffsetarray.ListOffsetArray(
self._offsets,
self._content.numbers_to_type(name),
self._identifier,
self._parameters,
self._nplike,
)
def _is_unique(self, negaxis, starts, parents, outlength):
if self._offsets.length - 1 == 0:
return True
branch, depth = self.branch_depth
if (
self.parameter("__array__") == "string"
or self.parameter("__array__") == "bytestring"
):
if branch or (negaxis is not None and negaxis != depth):
raise ak._v2._util.error(
ValueError(
"array with strings can only be checked on uniqueness with axis=-1"
)
)
# FIXME: check validity error
if isinstance(self._content, ak._v2.contents.NumpyArray):
out, outoffsets = self._content._as_unique_strings(self._offsets)
out2 = ak._v2.contents.listoffsetarray.ListOffsetArray(
outoffsets,
out,
None,
self._parameters,
self._nplike,
)
return out2.length == self.length
if negaxis is None:
return self._content._is_unique(negaxis, starts, parents, outlength)
if not branch and (negaxis == depth):
return self._content._is_unique(negaxis - 1, starts, parents, outlength)
else:
nextparents = ak._v2.index.Index64.empty(
self._offsets[-1] - self._offsets[0], self._nplike
)
assert (
nextparents.nplike is self._nplike
and self._offsets.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_ListOffsetArray_reduce_local_nextparents_64",
nextparents.dtype.type,
self._offsets.dtype.type,
](
nextparents.data,
self._offsets.data,
self._offsets.length - 1,
)
)
starts = self._offsets[:-1]
return self._content._is_unique(negaxis, starts, nextparents, outlength)
def _unique(self, negaxis, starts, parents, outlength):
if self._offsets.length - 1 == 0:
return self
branch, depth = self.branch_depth
if (
self.parameter("__array__") == "string"
or self.parameter("__array__") == "bytestring"
):
if branch or (negaxis != depth):
raise ak._v2._util.error(
np.AxisError("array with strings can only be sorted with axis=-1")
)
# FIXME: check validity error
if isinstance(self._content, ak._v2.contents.NumpyArray):
out, nextoffsets = self._content._as_unique_strings(self._offsets)
return ak._v2.contents.ListOffsetArray(
nextoffsets,
out,
None,
self._parameters,
self._nplike,
)
if not branch and (negaxis == depth):
if (
self.parameter("__array__") == "string"
or self.parameter("__array__") == "bytestring"
):
raise ak._v2._util.error(
np.AxisError("array with strings can only be sorted with axis=-1")
)
if self._nplike.known_shape and parents.nplike.known_shape:
assert self._offsets.length - 1 == parents.length
nextlen = self._offsets[-1] - self._offsets[0]
maxcount = ak._v2.index.Index64.empty(1, self._nplike)
offsetscopy = ak._v2.index.Index64.empty(self._offsets.length, self._nplike)
assert (
maxcount.nplike is self._nplike
and offsetscopy.nplike is self._nplike
and self._offsets.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_ListOffsetArray_reduce_nonlocal_maxcount_offsetscopy_64",
maxcount.dtype.type,
offsetscopy.dtype.type,
self._offsets.dtype.type,
](
maxcount.data,
offsetscopy.data,
self._offsets.data,
self._offsets.length - 1,
)
)
distincts_length = outlength * maxcount[0]
nextcarry = ak._v2.index.Index64.empty(nextlen, self._nplike)
nextparents = ak._v2.index.Index64.empty(nextlen, self._nplike)
maxnextparents = ak._v2.index.Index64.empty(1, self._nplike)
distincts = ak._v2.index.Index64.empty(distincts_length, self._nplike)
assert (
nextcarry.nplike is self._nplike
and nextparents.nplike is self._nplike
and maxnextparents.nplike is self._nplike
and distincts.nplike is self._nplike
and self._offsets.nplike is self._nplike
and offsetscopy.nplike is self._nplike
and parents.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_ListOffsetArray_reduce_nonlocal_preparenext_64",
nextcarry.dtype.type,
nextparents.dtype.type,
maxnextparents.dtype.type,
distincts.dtype.type,
self._offsets.dtype.type,
offsetscopy.dtype.type,
parents.dtype.type,
](
nextcarry.data,
nextparents.data,
nextlen,
maxnextparents.data,
distincts.data,
distincts_length,
offsetscopy.data,
self._offsets.data,
self._offsets.length - 1,
parents.data,
maxcount[0],
)
)
nextstarts = ak._v2.index.Index64.empty(maxnextparents[0] + 1, self._nplike)
assert (
nextstarts.nplike is self._nplike and nextparents.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_ListOffsetArray_reduce_nonlocal_nextstarts_64",
nextstarts.dtype.type,
nextparents.dtype.type,
](
nextstarts.data,
nextparents.data,
nextlen,
)
)
nextcontent = self._content._carry(nextcarry, False)
outcontent = nextcontent._unique(
negaxis - 1,
nextstarts,
nextparents,
maxnextparents[0] + 1,
)
outcarry = ak._v2.index.Index64.empty(nextlen, self._nplike)
assert outcarry.nplike is self._nplike and nextcarry.nplike is self._nplike
self._handle_error(
self._nplike[
"awkward_ListOffsetArray_local_preparenext_64",
outcarry.dtype.type,
nextcarry.dtype.type,
](
outcarry.data,
nextcarry.data,
nextlen,
)
)
return ak._v2.contents.ListOffsetArray(
outcontent._compact_offsets64(True),
outcontent._content._carry(outcarry, False),
None,
self._parameters,
self._nplike,
)
else:
nextparents = ak._v2.index.Index64.empty(
self._offsets[-1] - self._offsets[0], self._nplike
)
assert (
nextparents.nplike is self._nplike
and self._offsets.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_ListOffsetArray_reduce_local_nextparents_64",
nextparents.dtype.type,
self._offsets.dtype.type,
](
nextparents.data,
self._offsets.data,
self._offsets.length - 1,
)
)
trimmed = self._content[self._offsets[0] : self._offsets[-1]]
outcontent = trimmed._unique(
negaxis,
self._offsets[:-1],
nextparents,
self._offsets.length - 1,
)
if negaxis is None or negaxis == depth - 1:
return outcontent
outoffsets = self._compact_offsets64(True)
return ak._v2.contents.ListOffsetArray(
outoffsets,
outcontent,
None,
self._parameters,
self._nplike,
)
def _argsort_next(
self,
negaxis,
starts,
shifts,
parents,
outlength,
ascending,
stable,
kind,
order,
):
if self._offsets.length - 1 == 0:
return ak._v2.contents.NumpyArray(
self.nplike.empty(0, np.int64), None, None, self._nplike
)
branch, depth = self.branch_depth
if (
self.parameter("__array__") == "string"
or self.parameter("__array__") == "bytestring"
):
if branch or (negaxis != depth):
raise ak._v2._util.error(
np.AxisError("array with strings can only be sorted with axis=-1")
)
# FIXME: check validity error
if isinstance(self._content, ak._v2.contents.NumpyArray):
nextcarry = ak._v2.index.Index64.empty(
self._offsets.length - 1, self._nplike
)
self_starts, self_stops = self._offsets[:-1], self._offsets[1:]
assert (
nextcarry.nplike is self._nplike
and parents.nplike is self._nplike
and self._content.nplike is self._nplike
and self_starts.nplike is self._nplike
and self_stops.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_ListOffsetArray_argsort_strings",
nextcarry.dtype.type,
parents.dtype.type,
self._content.dtype.type,
self_starts.dtype.type,
self_stops.dtype.type,
](
nextcarry.data,
parents.data,
parents.length,
self._content._data,
self_starts.data,
self_stops.data,
stable,
ascending,
True,
)
)
return ak._v2.contents.NumpyArray(nextcarry, None, None, self._nplike)
if not branch and (negaxis == depth):
if (
self.parameter("__array__") == "string"
or self.parameter("__array__") == "bytestring"
):
raise ak._v2._util.error(
np.AxisError("array with strings can only be sorted with axis=-1")
)
if self._nplike.known_shape and parents.nplike.known_shape:
assert self._offsets.length - 1 == parents.length
maxcount = ak._v2.index.Index64.empty(1, self._nplike)
offsetscopy = ak._v2.index.Index64.empty(self._offsets.length, self._nplike)
assert (
maxcount.nplike is self._nplike
and offsetscopy.nplike is self._nplike
and self._offsets.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_ListOffsetArray_reduce_nonlocal_maxcount_offsetscopy_64",
maxcount.dtype.type,
offsetscopy.dtype.type,
self._offsets.dtype.type,
](
maxcount.data,
offsetscopy.data,
self._offsets.data,
self._offsets.length - 1,
)
)
maxcount = maxcount[0]
nextlen = self._offsets[-1] - self._offsets[0]
nextcarry = ak._v2.index.Index64.empty(nextlen, self._nplike)
nextparents = ak._v2.index.Index64.empty(nextlen, self._nplike)
maxnextparents = ak._v2.index.Index64.empty(1, self._nplike)
distincts = ak._v2.index.Index64.empty(maxcount * outlength, self._nplike)
assert (
nextcarry.nplike is self._nplike
and nextparents.nplike is self._nplike
and maxnextparents.nplike is self._nplike
and distincts.nplike is self._nplike
and self._offsets.nplike is self._nplike
and offsetscopy.nplike is self._nplike
and parents.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_ListOffsetArray_reduce_nonlocal_preparenext_64",
nextcarry.dtype.type,
nextparents.dtype.type,
maxnextparents.dtype.type,
distincts.dtype.type,
self._offsets.dtype.type,
offsetscopy.dtype.type,
parents.dtype.type,
](
nextcarry.data,
nextparents.data,
nextlen,
maxnextparents.data,
distincts.data,
maxcount * outlength,
offsetscopy.data,
self._offsets.data,
self._offsets.length - 1,
parents.data,
maxcount,
)
)
nextstarts_length = maxnextparents[0] + 1
nextstarts = ak._v2.index.Index64.empty(
nextstarts_length, self._nplike, np.int64
)
assert (
nextstarts.nplike is self._nplike and nextparents.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_ListOffsetArray_reduce_nonlocal_nextstarts_64",
nextstarts.dtype.type,
nextparents.dtype.type,
](
nextstarts.data,
nextparents.data,
nextlen,
)
)
nummissing = ak._v2.index.Index64.empty(maxcount, self._nplike)
missing = ak._v2.index.Index64.empty(self._offsets[-1], self._nplike)
nextshifts = ak._v2.index.Index64.empty(nextlen, self._nplike)
assert (
nummissing.nplike is self._nplike
and missing.nplike is self._nplike
and nextshifts.nplike is self._nplike
and self._offsets.nplike is self._nplike
and starts.nplike is self._nplike
and parents.nplike is self._nplike
and nextcarry.nplike is self._nplike
)
self._handle_error(
self._nplike[
"awkward_ListOffsetArray_reduce_nonlocal_nextshifts_64",
nummissing.dtype.type,
missing.dtype.type,
nextshifts.dtype.type,
self._offsets.dtype.type,
starts.dtype.type,
parents.dtype.type,
nextcarry.dtype.type,
](
nummissing.data,
missing.data,
nextshifts.data,
self._offsets.data,
self._offsets.length - 1,
starts.data,
parents.data,
maxcount,
nextlen,
nextcarry.data,
)
)
nextcontent = self._content._carry(nextcarry, False)
outcontent = nextcontent._argsort_next(
negaxis - 1,
nextstarts,
nextshifts,
nextparents,
nextstarts_length,
ascending,
stable,
kind,
order,
)
outcarry = ak._v2.index.Index64.empty(nextlen, self._nplike)
assert outcarry.nplike is self._nplike and nextcarry.nplike is self._nplike
self._handle_error(
self._nplike[
"awkward_ListOffsetArray_local_preparenext_64",
outcarry.dtype.type,
nextcarry.dtype.type,
](
outcarry.data,
nextcarry.data,
nextlen,
| |
-1, -1),
A vertex at (1, -1, 1),
A vertex at (1, 1, -1)]
sage: P = polytopes.permutahedron(5)
sage: P.an_affine_basis()
[A vertex at (1, 2, 3, 5, 4),
A vertex at (2, 1, 3, 5, 4),
A vertex at (1, 3, 2, 5, 4),
A vertex at (4, 1, 3, 5, 2),
A vertex at (4, 2, 5, 3, 1)]
The method is not implemented for unbounded polyhedra::
sage: p = Polyhedron(vertices=[(0,0)],rays=[(1,0),(0,1)])
sage: p.an_affine_basis()
Traceback (most recent call last):
...
NotImplementedError: this function is not implemented for unbounded polyhedra
"""
if not self.is_compact():
raise NotImplementedError("this function is not implemented for unbounded polyhedra")
chain = self.a_maximal_chain()[1:] # we exclude the empty face
chain_indices = [face.ambient_V_indices() for face in chain]
basis_indices = []
# We use in the following that elements in ``chain_indices`` are sorted lists
# of V-indices.
# Thus for each two faces we can easily find the first vertex that differs.
for dim, face in enumerate(chain_indices):
if dim == 0:
# Append the vertex.
basis_indices.append(face[0])
continue
prev_face = chain_indices[dim-1]
for i in range(len(prev_face)):
if prev_face[i] != face[i]:
# We found a vertex that ``face`` has, but its facet does not.
basis_indices.append(face[i])
break
else: # no break
# ``prev_face`` contains all the same vertices as ``face`` until now.
# But ``face`` is guaranteed to contain one more vertex (at least).
basis_indices.append(face[len(prev_face)])
return [self.Vrepresentation()[i] for i in basis_indices]
def _test_an_affine_basis(self, tester=None, **options):
"""
Run tests on the method :meth:`.an_affine_basis`
TESTS::
sage: polytopes.cross_polytope(3)._test_an_affine_basis()
"""
if tester is None:
tester = self._tester(**options)
if self.is_compact():
b = self.an_affine_basis()
m = matrix([1] + list(v) for v in b)
tester.assertEqual(m.rank(), self.dim() + 1)
for v in b:
tester.assertIn(v, self.vertices())
def ray_generator(self):
"""
Return a generator for the rays of the polyhedron.
EXAMPLES::
sage: pi = Polyhedron(ieqs = [[1,1,0],[1,0,1]])
sage: pir = pi.ray_generator()
sage: [x.vector() for x in pir]
[(1, 0), (0, 1)]
"""
for V in self.Vrepresentation():
if V.is_ray():
yield V
@cached_method
def rays(self):
"""
Return a list of rays of the polyhedron.
OUTPUT:
A tuple of rays.
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,0,0,1],[0,0,1,0],[1,1,0,0]])
sage: p.rays()
(A ray in the direction (1, 0, 0),
A ray in the direction (0, 1, 0),
A ray in the direction (0, 0, 1))
"""
return tuple(self.ray_generator())
def rays_list(self):
"""
Return a list of rays as coefficient lists.
.. NOTE::
It is recommended to use :meth:`rays` or
:meth:`ray_generator` instead to iterate over the list of
:class:`Ray` objects.
OUTPUT:
A list of rays as lists of coordinates.
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,0,0,1],[0,0,1,0],[1,1,0,0]])
sage: p.rays_list()
[[1, 0, 0], [0, 1, 0], [0, 0, 1]]
sage: p.rays_list() == [list(r) for r in p.ray_generator()]
True
"""
return [list(x) for x in self.ray_generator()]
def line_generator(self):
"""
Return a generator for the lines of the polyhedron.
EXAMPLES::
sage: pr = Polyhedron(rays = [[1,0],[-1,0],[0,1]], vertices = [[-1,-1]])
sage: next(pr.line_generator()).vector()
(1, 0)
"""
for V in self.Vrepresentation():
if V.is_line():
yield V
@cached_method
def lines(self):
"""
Return all lines of the polyhedron.
OUTPUT:
A tuple of lines.
EXAMPLES::
sage: p = Polyhedron(rays = [[1,0],[-1,0],[0,1],[1,1]], vertices = [[-2,-2],[2,3]])
sage: p.lines()
(A line in the direction (1, 0),)
"""
return tuple(self.line_generator())
def lines_list(self):
"""
Return a list of lines of the polyhedron. The line data is given
as a list of coordinates rather than as a Hrepresentation object.
.. NOTE::
It is recommended to use :meth:`line_generator` instead to
iterate over the list of :class:`Line` objects.
EXAMPLES::
sage: p = Polyhedron(rays = [[1,0],[-1,0],[0,1],[1,1]], vertices = [[-2,-2],[2,3]])
sage: p.lines_list()
[[1, 0]]
sage: p.lines_list() == [list(x) for x in p.line_generator()]
True
"""
return [list(x) for x in self.line_generator()]
def bounded_edges(self):
"""
Return the bounded edges (excluding rays and lines).
OUTPUT:
A generator for pairs of vertices, one pair per edge.
EXAMPLES::
sage: p = Polyhedron(vertices=[[1,0],[0,1]], rays=[[1,0],[0,1]])
sage: [ e for e in p.bounded_edges() ]
[(A vertex at (0, 1), A vertex at (1, 0))]
sage: for e in p.bounded_edges(): print(e)
(A vertex at (0, 1), A vertex at (1, 0))
"""
obj = self.Vrepresentation()
for i in range(len(obj)):
if not obj[i].is_vertex():
continue
for j in range(i+1, len(obj)):
if not obj[j].is_vertex():
continue
if self.vertex_adjacency_matrix()[i, j] == 0:
continue
yield (obj[i], obj[j])
def Vrepresentation_space(self):
r"""
Return the ambient vector space.
OUTPUT:
A free module over the base ring of dimension :meth:`ambient_dim`.
EXAMPLES::
sage: poly_test = Polyhedron(vertices = [[1,0,0,0],[0,1,0,0]])
sage: poly_test.Vrepresentation_space()
Ambient free module of rank 4 over the principal ideal domain Integer Ring
sage: poly_test.ambient_space() is poly_test.Vrepresentation_space()
True
"""
return self.parent().Vrepresentation_space()
ambient_space = Vrepresentation_space
def Hrepresentation_space(self):
r"""
Return the linear space containing the H-representation vectors.
OUTPUT:
A free module over the base ring of dimension :meth:`ambient_dim` + 1.
EXAMPLES::
sage: poly_test = Polyhedron(vertices = [[1,0,0,0],[0,1,0,0]])
sage: poly_test.Hrepresentation_space()
Ambient free module of rank 5 over the principal ideal domain Integer Ring
"""
return self.parent().Hrepresentation_space()
def ambient_dim(self):
r"""
Return the dimension of the ambient space.
EXAMPLES::
sage: poly_test = Polyhedron(vertices = [[1,0,0,0],[0,1,0,0]])
sage: poly_test.ambient_dim()
4
"""
return self.parent().ambient_dim()
def dim(self):
"""
Return the dimension of the polyhedron.
OUTPUT:
-1 if the polyhedron is empty, otherwise a non-negative integer.
EXAMPLES::
sage: simplex = Polyhedron(vertices = [[1,0,0,0],[0,0,0,1],[0,1,0,0],[0,0,1,0]])
sage: simplex.dim()
3
sage: simplex.ambient_dim()
4
The empty set is a special case (:trac:`12193`)::
sage: P1=Polyhedron(vertices=[[1,0,0],[0,1,0],[0,0,1]])
sage: P2=Polyhedron(vertices=[[2,0,0],[0,2,0],[0,0,2]])
sage: P12 = P1.intersection(P2)
sage: P12
The empty polyhedron in ZZ^3
sage: P12.dim()
-1
"""
if self.n_Vrepresentation() == 0:
return -1 # the empty set
else:
return self.ambient_dim() - self.n_equations()
dimension = dim
def is_empty(self):
"""
Test whether the polyhedron is the empty polyhedron
OUTPUT:
Boolean.
EXAMPLES::
sage: P = Polyhedron(vertices=[[1,0,0],[0,1,0],[0,0,1]]); P
A 2-dimensional polyhedron in ZZ^3 defined as the convex hull of 3 vertices
sage: P.is_empty(), P.is_universe()
(False, False)
sage: Q = Polyhedron(vertices=()); Q
The empty polyhedron in ZZ^0
sage: Q.is_empty(), Q.is_universe()
(True, False)
sage: R = Polyhedron(lines=[(1,0),(0,1)]); R
A 2-dimensional polyhedron in ZZ^2 defined as the convex hull of 1 vertex and 2 lines
sage: R.is_empty(), R.is_universe()
(False, True)
"""
return self.n_Vrepresentation() == 0
def is_universe(self):
"""
Test whether the polyhedron is the whole ambient space
OUTPUT:
Boolean.
EXAMPLES::
sage: P = Polyhedron(vertices=[[1,0,0],[0,1,0],[0,0,1]]); P
A 2-dimensional polyhedron in ZZ^3 defined as the convex hull of 3 vertices
sage: P.is_empty(), P.is_universe()
(False, False)
sage: Q = Polyhedron(vertices=()); Q
The empty polyhedron in ZZ^0
sage: Q.is_empty(), Q.is_universe()
(True, False)
sage: R = Polyhedron(lines=[(1,0),(0,1)]); R
A 2-dimensional polyhedron in ZZ^2 defined as the convex hull of 1 vertex and 2 lines
sage: R.is_empty(), R.is_universe()
(False, True)
"""
return self.n_Hrepresentation() == 0
@cached_method
def vertex_adjacency_matrix(self):
"""
Return the binary matrix of vertex adjacencies.
EXAMPLES::
sage: polytopes.simplex(4).vertex_adjacency_matrix()
[0 1 1 1 1]
[1 0 1 1 1]
[1 1 0 1 1]
[1 1 1 0 1]
[1 1 1 1 0]
The rows and columns of the vertex adjacency matrix correspond
to the :meth:`Vrepresentation` objects: vertices, rays, and
lines. The `(i,j)` matrix entry equals `1` if the `i`-th and
`j`-th V-representation object are adjacent.
Two vertices are adjacent if they are the endpoints of an
edge, that is, a one-dimensional face. For unbounded polyhedra
this clearly needs to be generalized and we define two
V-representation objects (see
:mod:`sage.geometry.polyhedron.constructor`) to be adjacent if
they together generate a one-face. There are three possible
combinations:
* Two vertices can bound a finite-length edge.
* A vertex and a ray can generate a half-infinite edge
starting at the vertex and with the direction given by the
ray.
* A vertex and a line can generate an infinite edge. The
position of the vertex on the line is arbitrary in this
case, only its transverse position matters. The direction of
the edge is given by the line generator.
For example, take the half-plane::
sage: half_plane = Polyhedron(ieqs=[(0,1,0)])
sage: half_plane.Hrepresentation()
(An inequality (1, 0) x + 0 >= 0,)
Its (non-unique) V-representation consists of a vertex, a ray,
and a line. The only edge is spanned by the vertex and | |
e.g., (500, str('K')).
T_max (tuple): The maximum temperature for kinetics computations, e.g., (3000, str('K')).
T_count (int): The number of temperature points between ``T_min`` and ``T_max``.
max_job_time (float): The maximal allowed job time on the server in hours (can be fractional).
rmg_database (RMGDatabase): The RMG database object.
allow_nonisomorphic_2d (bool): Whether to optimize species even if they do not have a 3D conformer that is
isomorphic to the 2D graph representation.
memory (int): The total allocated job memory in GB (14 by default to be lower than 90% * 16 GB).
job_types (dict): A dictionary of job types to execute. Keys are job types, values are boolean.
specific_job_type (str): Specific job type to execute. Legal strings are job types (keys of job_types dict).
bath_gas (str): A bath gas. Currently used in OneDMin to calc L-J parameters.
Allowed values are He, Ne, Ar, Kr, H2, N2, O2.
keep_checks (bool): Whether to keep all Gaussian checkfiles when ARC terminates. True to keep, default is False.
dont_gen_confs (list): A list of species labels for which conformer generation should be avoided
if xyz is given.
compare_to_rmg (bool): If ``True`` data calculated from the RMG-database will be calculated and included on the
parity plot.
solvent (dict): The solvent model and solvent to use.
compute_thermo (bool): Whether to compute thermodynamic properties for converged species.
compute_rates (bool): Whether to compute rate coefficients for converged reactions.
compute_transport (bool): Whether to compute transport properties for converged species.
statmech_adapter (str): The statmech software to use.
fine_only (bool): If ``self.job_types['fine'] and not self.job_types['opt']`` ARC will not run optimization
jobs without fine=True
"""
def __init__(self, input_dict=None, project=None, arc_species_list=None, arc_rxn_list=None, level_of_theory='',
conformer_level='', composite_method='', opt_level='', freq_level='', sp_level='', scan_level='',
ts_guess_level='', irc_level='', orbitals_level='', use_bac=True, job_types=None, model_chemistry='',
job_additional_options=None, job_shortcut_keywords=None, T_min=None, T_max=None, T_count=50,
verbose=logging.INFO, project_directory=None, max_job_time=None, allow_nonisomorphic_2d=False,
job_memory=None, ess_settings=None, bath_gas=None, adaptive_levels=None, freq_scale_factor=None,
calc_freq_factor=True, n_confs=10, e_confs=5, dont_gen_confs=None, keep_checks=False,
solvation=None, compare_to_rmg=True, compute_thermo=True, compute_rates=True, compute_transport=True,
specific_job_type='', statmech_adapter='Arkane'):
self.__version__ = VERSION
self.verbose = verbose
self.output = dict()
self.running_jobs = dict()
self.lib_long_desc = ''
self.unique_species_labels = list()
self.rmg_database = rmgdb.make_rmg_database_object()
self.max_job_time = max_job_time or default_job_settings.get('job_time_limit_hrs', 120)
self.allow_nonisomorphic_2d = allow_nonisomorphic_2d
self.memory = job_memory or default_job_settings.get('job_total_memory_gb', 14)
self.ess_settings = dict()
self.calc_freq_factor = calc_freq_factor
self.keep_checks = keep_checks
self.compare_to_rmg = compare_to_rmg
if input_dict is None:
if project is None:
raise ValueError('A project name must be provided for a new project')
self.project = project
self.compute_thermo = compute_thermo
self.compute_rates = compute_rates
self.compute_transport = compute_transport
self.statmech_adapter = statmech_adapter
self.T_min = T_min
self.T_max = T_max
self.T_count = T_count
self.specific_job_type = specific_job_type
self.job_types = initialize_job_types(job_types, specific_job_type=self.specific_job_type)
self.bath_gas = bath_gas
self.solvation = solvation
self.n_confs = n_confs
self.e_confs = e_confs
self.adaptive_levels = adaptive_levels
self.project_directory = project_directory if project_directory is not None \
else os.path.join(arc_path, 'Projects', self.project)
if not os.path.exists(self.project_directory):
os.makedirs(self.project_directory)
initialize_log(log_file=os.path.join(self.project_directory, 'arc.log'), project=self.project,
project_directory=self.project_directory, verbose=self.verbose)
self.dont_gen_confs = dont_gen_confs if dont_gen_confs is not None else list()
self.t0 = time.time() # init time
self.execution_time = None
self.job_additional_options = job_additional_options if job_additional_options is not None else dict()
self.job_shortcut_keywords = job_shortcut_keywords if job_shortcut_keywords is not None else dict()
if self.job_additional_options:
logger.info(f'Use the following user-specified additional job options\n'
f'{yaml.dump(self.job_additional_options, default_flow_style=False)}')
if self.job_shortcut_keywords:
logger.info(f'Use the following user-specified additional job keywords\n'
f'{yaml.dump(self.job_shortcut_keywords, default_flow_style=False)}')
self.use_bac = use_bac
self.model_chemistry = model_chemistry
self.freq_scale_factor = freq_scale_factor
self.level_of_theory = level_of_theory
self.composite_method = composite_method
self.conformer_level = conformer_level
self.opt_level = opt_level
self.freq_level = freq_level
self.sp_level = sp_level
self.scan_level = scan_level
self.ts_guess_level = ts_guess_level
self.irc_level = irc_level
self.orbitals_level = orbitals_level
self.arc_species_list = arc_species_list if arc_species_list is not None else list()
converted_species_list = list()
indices_to_pop = []
for i, spc in enumerate(self.arc_species_list):
if isinstance(spc, Species):
if not spc.label:
raise InputError('Missing label on RMG Species object {0}'.format(spc))
indices_to_pop.append(i)
arc_spc = ARCSpecies(is_ts=False, rmg_species=spc) # assuming an RMG Species is not a TS
converted_species_list.append(arc_spc)
elif not isinstance(spc, ARCSpecies):
raise ValueError('A species should either be an `ARCSpecies` object or an RMG `Species` object.'
' Got: {0} for {1}'.format(type(spc), spc.label))
for i in reversed(range(len(self.arc_species_list))): # pop from the end, so other indices won't change
if i in indices_to_pop:
self.arc_species_list.pop(i)
self.arc_species_list.extend(converted_species_list)
if self.job_types['bde']:
self.add_hydrogen_for_bde()
self.determine_unique_species_labels()
self.arc_rxn_list = arc_rxn_list if arc_rxn_list is not None else list()
converted_rxn_list = list()
indices_to_pop = []
for i, rxn in enumerate(self.arc_rxn_list):
if isinstance(rxn, Reaction):
if not rxn.reactants or not rxn.products:
raise InputError('Missing reactants and/or products in RMG Reaction object {0}'.format(rxn))
indices_to_pop.append(i)
arc_rxn = ARCReaction(rmg_reaction=rxn)
converted_rxn_list.append(arc_rxn)
for spc in rxn.reactants + rxn.products:
if not isinstance(spc, Species):
raise InputError('All reactants and procucts of an RMG Reaction have to be RMG Species'
' objects. Got: {0} in reaction {1}'.format(type(spc), rxn))
if not spc.label:
raise InputError('Missing label on RMG Species object {0} in reaction {1}'.format(
spc, rxn))
if spc.label not in self.unique_species_labels:
# Add species participating in an RMG Reaction to arc_species_list if not already there
# We assume each species has a unique label
self.arc_species_list.append(ARCSpecies(is_ts=False, rmg_species=spc))
self.unique_species_labels.append(spc.label)
elif not isinstance(rxn, ARCReaction):
raise ValueError('A reaction should either be an `ARCReaction` object or an RMG `Reaction` object.'
' Got: {0} for {1}'.format(type(rxn), rxn.label))
for i in reversed(range(len(self.arc_rxn_list))): # pop from the end, so other indices won't change
if i in indices_to_pop:
self.arc_rxn_list.pop(i)
self.arc_rxn_list.extend(converted_rxn_list)
rxn_index = 0
for arc_rxn in self.arc_rxn_list:
arc_rxn.index = rxn_index
rxn_index += 1
else:
# ARC is run from an input or a restart file.
# Read the input_dict
self.project_directory = project_directory if project_directory is not None \
else os.path.abspath(os.path.dirname(input_dict))
self.from_dict(input_dict=input_dict, project=project, project_directory=self.project_directory)
if self.adaptive_levels is not None:
logger.info('Using the following adaptive levels of theory:\n{0}'.format(self.adaptive_levels))
if not self.ess_settings:
# don't override self.ess_settings if determined from an input dictionary
self.ess_settings = check_ess_settings(ess_settings or global_ess_settings)
if not self.ess_settings:
self.determine_ess_settings()
# Determine if fine-only behavior is requested before determining chemistry for job types
self.fine_only = False
if self.job_types['fine'] and not self.job_types['opt']:
self.fine_only = True
self.job_types['opt'] = True # Run the optimizations, self.fine_only will make sure that they are fine
# execute regardless of new job or restart job
self.determine_model_chemistry_for_job_types() # all level of theory attributes should be dict after this call
self.determine_model_chemistry()
self.scheduler = None
self.check_project_name()
self.check_freq_scaling_factor()
self.restart_dict = self.as_dict()
if not self.job_types['fine'] and not determine_model_chemistry_type(self.opt_level['method']) == 'dft':
logger.info('\n')
logger.warning('Not using a fine DFT grid for geometry optimization jobs')
logger.info('\n')
# make a backup copy of the restart file if it exists (but don't save an updated one just yet)
if os.path.isfile(os.path.join(self.project_directory, 'restart.yml')):
if not os.path.isdir(os.path.join(self.project_directory, 'log_and_restart_archive')):
os.mkdir(os.path.join(self.project_directory, 'log_and_restart_archive'))
local_time = datetime.datetime.now().strftime("%H%M%S_%b%d_%Y")
restart_backup_name = 'restart.old.' + local_time + '.yml'
shutil.copy(os.path.join(self.project_directory, 'restart.yml'),
os.path.join(self.project_directory, 'log_and_restart_archive', restart_backup_name))
def as_dict(self) -> dict:
"""
A helper function for dumping this object as a dictionary in a YAML file for restarting ARC.
"""
restart_dict = dict()
restart_dict['project'] = self.project
if not self.compute_thermo:
restart_dict['compute_thermo'] = self.compute_thermo
if not self.compute_rates:
restart_dict['compute_rates'] = self.compute_rates
if not self.compute_transport:
restart_dict['compute_transport'] = self.compute_transport
restart_dict['statmech_adapter'] = self.statmech_adapter
if self.bath_gas is not None:
restart_dict['bath_gas'] = self.bath_gas
if self.solvation is not None:
restart_dict['solvation'] = self.solvation
if self.adaptive_levels is not None:
restart_dict['adaptive_levels'] = self.adaptive_levels
restart_dict['job_types'] = self.job_types
restart_dict['use_bac'] = self.use_bac
restart_dict['model_chemistry'] = self.model_chemistry
# attributes related to job model chemistry specifications
restart_dict['composite_method'] = self.composite_method
restart_dict['conformer_level'] = self.conformer_level
restart_dict['opt_level'] = self.opt_level
restart_dict['freq_level'] = self.freq_level
restart_dict['sp_level'] = self.sp_level
restart_dict['scan_level'] = self.scan_level
restart_dict['ts_guess_level'] = self.ts_guess_level
restart_dict['irc_level'] = self.irc_level
restart_dict['orbitals_level'] = self.orbitals_level
# special treatment for level of theory to avoid conflict during restart
check_if_empty = (self.composite_method, self.opt_level['method'], self.freq_level['method'],
self.sp_level['method'])
if any(item != '' for item in check_if_empty):
self.level_of_theory = ''
restart_dict['level_of_theory'] = self.level_of_theory
if self.job_additional_options:
restart_dict['job_additional_options'] = self.job_additional_options
if self.job_shortcut_keywords:
restart_dict['job_shortcut_keywords'] = self.job_shortcut_keywords
if self.freq_scale_factor is not None:
restart_dict['freq_scale_factor'] = self.freq_scale_factor
restart_dict['calc_freq_factor'] = self.calc_freq_factor
if self.dont_gen_confs:
restart_dict['dont_gen_confs'] = self.dont_gen_confs
restart_dict['species'] = [spc.as_dict() for spc in self.arc_species_list]
restart_dict['reactions'] = [rxn.as_dict() for rxn in self.arc_rxn_list]
restart_dict['output'] = self.output # if read from_dict then it has actual values
restart_dict['running_jobs'] = self.running_jobs # if read from_dict then it has actual values
restart_dict['T_min'] = self.T_min
restart_dict['T_max'] = self.T_max
restart_dict['T_count'] = self.T_count
restart_dict['max_job_time'] = self.max_job_time
restart_dict['allow_nonisomorphic_2d'] = self.allow_nonisomorphic_2d
restart_dict['ess_settings'] = self.ess_settings
restart_dict['job_memory'] = self.memory
restart_dict['n_confs'] = self.n_confs
restart_dict['e_confs'] = self.e_confs
restart_dict['specific_job_type'] = self.specific_job_type
if self.keep_checks:
restart_dict['keep_checks'] = self.keep_checks
return restart_dict
def from_dict(self, input_dict, project=None, project_directory=None):
"""
A helper function for loading this object | |
ValueError("Axis out of bounds")
self.axis = axis
# Allow rotation of only subset of elements/slices
self.D = X.dims[0][axis]
if subset is None:
#self.subset = np.ones(self.D, dtype=bool)
self.subset = None #tuple(range(self.D))
else:
#self.subset = tuple(range(self.D))
self.subset = subset #self.subset[subset]
if axis != -1:
raise NotImplementedError("Subset indexing for non-last "
"axis not yet implemented")
## self.subset = np.zeros(self.D, dtype=bool)
## self.subset[list(subset)] = True
def nodes(self):
if self.update_alpha:
return [self.node_X, self.node_alpha]
else:
return [self.node_X]
def _full_rotation_matrix(self, R):
if self.subset is not None:
R_full = np.identity(self.D)
indices = np.ix_(self.subset, self.subset)
R_full[indices] = R
return R_full
else:
return R
def rotate(self, R, inv=None, logdet=None, Q=None):
## R = self._full_rotation_matrix(R)
## if inv is not None:
## inv = self._full_rotation_matrix(inv)
self.node_X.rotate(R,
inv=inv,
logdet=logdet,
subset=self.subset,
axis=self.axis)
if self.plate_axis is not None:
self.node_X.rotate_plates(Q, plate_axis=self.plate_axis)
if self.update_alpha:
self.node_alpha.update()
def setup(self, plate_axis=None):
"""
This method should be called just before optimization.
For efficiency, sum over axes that are not in mu, alpha nor rotation.
If using Q, set rotate_plates to True.
"""
# Store the original plate_axis parameter for later use in other methods
self.plate_axis = plate_axis
# Manipulate the plate_axis parameter to suit the needs of this method
if plate_axis is not None:
if not isinstance(plate_axis, int):
raise ValueError("Plate axis must be integer")
if plate_axis >= 0:
plate_axis -= len(self.node_X.plates)
if plate_axis < -len(self.node_X.plates) or plate_axis >= 0:
raise ValueError("Axis out of bounds")
plate_axis -= self.ndim - 1 # Why -1? Because one axis is preserved!
# Get the mean parameter. It will not be rotated. This assumes that mu
# and alpha are really independent.
(alpha_mu, alpha_mu2, alpha, _) = self.node_parent.get_moments()
(X, XX) = self.node_X.get_moments()
#
mu = alpha_mu / alpha
mu2 = alpha_mu2 / alpha
# For simplicity, force mu to have the same shape as X
mu = mu * np.ones(self.node_X.dims[0])
mu2 = mu2 * np.ones(self.node_X.dims[0])
## (mu, mumu) = gaussian.reshape_gaussian_array(self.node_mu.dims[0],
## self.node_X.dims[0],
## mu,
## mumu)
# Take diagonal of covariances to variances for axes that are not in R
# (and move those axes to be the last)
XX = covariance_to_variance(XX,
ndim=self.ndim,
covariance_axis=self.axis)
## mumu = covariance_to_variance(mumu,
## ndim=self.ndim,
## covariance_axis=self.axis)
# Move axes of X and mu and compute their outer product
X = misc.moveaxis(X, self.axis, -1)
mu = misc.moveaxis(mu, self.axis, -1)
mu2 = misc.moveaxis(mu2, self.axis, -1)
Xmu = linalg.outer(X, mu, ndim=1)
D = np.shape(X)[-1]
# Move axes of alpha related variables
def safe_move_axis(x):
if np.ndim(x) >= -self.axis:
return misc.moveaxis(x, self.axis, -1)
else:
return x[...,np.newaxis]
if self.update_alpha:
a = safe_move_axis(self.node_alpha.phi[1])
a0 = safe_move_axis(self.node_alpha.parents[0].get_moments()[0])
b0 = safe_move_axis(self.node_alpha.parents[1].get_moments()[0])
plates_alpha = list(self.node_alpha.plates)
else:
alpha = safe_move_axis(self.node_parent.get_moments()[2])
plates_alpha = list(self.node_parent.get_shape(2))
# Move plates of alpha for R
if len(plates_alpha) >= -self.axis:
plate = plates_alpha.pop(self.axis)
plates_alpha.append(plate)
else:
plates_alpha.append(1)
plates_X = list(self.node_X.get_shape(0))
plates_X.pop(self.axis)
def sum_to_alpha(V, ndim=2):
# TODO/FIXME: This could be improved so that it is not required to
# explicitly repeat to alpha plates. Multiplying by ones was just a
# simple bug fix.
return sum_to_plates(V * np.ones(plates_alpha[:-1]+ndim*[1]),
plates_alpha[:-1],
ndim=ndim,
plates_from=plates_X)
if plate_axis is not None:
# Move plate axis just before the rotated dimensions (which are
# last)
def safe_move_plate_axis(x, ndim):
if np.ndim(x)-ndim >= -plate_axis:
return misc.moveaxis(x,
plate_axis-ndim,
-ndim-1)
else:
inds = (Ellipsis,None) + ndim*(slice(None),)
return x[inds]
X = safe_move_plate_axis(X, 1)
mu = safe_move_plate_axis(mu, 1)
XX = safe_move_plate_axis(XX, 2)
mu2 = safe_move_plate_axis(mu2, 1)
if self.update_alpha:
a = safe_move_plate_axis(a, 1)
a0 = safe_move_plate_axis(a0, 1)
b0 = safe_move_plate_axis(b0, 1)
else:
alpha = safe_move_plate_axis(alpha, 1)
# Move plates of X and alpha
plate = plates_X.pop(plate_axis)
plates_X.append(plate)
if len(plates_alpha) >= -plate_axis+1:
plate = plates_alpha.pop(plate_axis-1)
else:
plate = 1
plates_alpha = plates_alpha[:-1] + [plate] + plates_alpha[-1:]
CovX = XX - linalg.outer(X, X)
self.CovX = sum_to_plates(CovX,
plates_alpha[:-2],
ndim=3,
plates_from=plates_X[:-1])
# Broadcast mumu to ensure shape
#mumu = np.ones(np.shape(XX)[-3:]) * mumu
mu2 = mu2 * np.ones(np.shape(X)[-2:])
self.mu2 = sum_to_alpha(mu2, ndim=1)
if self.precompute:
# Precompute some stuff for the gradient of plate rotation
#
# NOTE: These terms may require a lot of memory if alpha has the
# same or almost the same plates as X.
self.X_X = sum_to_plates(X[...,:,:,None,None] *
X[...,None,None,:,:],
plates_alpha[:-2],
ndim=4,
plates_from=plates_X[:-1])
self.X_mu = sum_to_plates(X[...,:,:,None,None] *
mu[...,None,None,:,:],
plates_alpha[:-2],
ndim=4,
plates_from=plates_X[:-1])
else:
self.X = X
self.mu = mu
else:
# Sum axes that are not in the plates of alpha
self.XX = sum_to_alpha(XX)
self.mu2 = sum_to_alpha(mu2, ndim=1)
self.Xmu = sum_to_alpha(Xmu)
if self.update_alpha:
self.a = a
self.a0 = a0
self.b0 = b0
else:
self.alpha = alpha
self.plates_X = plates_X
self.plates_alpha = plates_alpha
# Take only a subset of the matrix for rotation
if self.subset is not None:
if self.precompute:
raise NotImplementedError("Precomputation not implemented when "
"using a subset")
# from X
self.X = self.X[...,self.subset]
self.mu2 = self.mu2[...,self.subset]
if plate_axis is not None:
# from CovX
inds = []
for i in range(np.ndim(self.CovX)-2):
inds.append(range(np.shape(self.CovX)[i]))
inds.append(self.subset)
inds.append(self.subset)
indices = np.ix_(*inds)
self.CovX = self.CovX[indices]
# from mu
self.mu = self.mu[...,self.subset]
else:
# from XX
inds = []
for i in range(np.ndim(self.XX)-2):
inds.append(range(np.shape(self.XX)[i]))
inds.append(self.subset)
inds.append(self.subset)
indices = np.ix_(*inds)
self.XX = self.XX[indices]
# from Xmu
self.Xmu = self.Xmu[...,self.subset]
# from alpha
if self.update_alpha:
if np.shape(self.a)[-1] > 1:
self.a = self.a[...,self.subset]
if np.shape(self.a0)[-1] > 1:
self.a0 = self.a0[...,self.subset]
if np.shape(self.b0)[-1] > 1:
self.b0 = self.b0[...,self.subset]
else:
if np.shape(self.alpha)[-1] > 1:
self.alpha = self.alpha[...,self.subset]
self.plates_alpha[-1] = min(self.plates_alpha[-1], len(self.subset))
## # from mu
## # from alpha
## alpha_mu = alpha_mu[...,self.subset]
## alpha_mu2 = alpha_mu2[...,self.subset]
## alpha = alpha[...,self.subset]
## dims = list(self.node_X.dims[0])
## dims[-1] = len(self.subset)
## else:
## dims = list(self.node_X.dims[0])
def _compute_bound(self, R, logdet=None, inv=None, Q=None, gradient=False, terms=False):
"""
Rotate q(X) and q(alpha).
Assume:
p(X|alpha) = prod_m N(x_m|0,diag(alpha))
p(alpha) = prod_d G(a_d,b_d)
"""
## R = self._full_rotation_matrix(R)
## if inv is not None:
## inv = self._full_rotation_matrix(inv)
#
# Transform the distributions and moments
#
plates_alpha = self.plates_alpha
plates_X = self.plates_X
# Compute rotated second moment
if self.plate_axis is not None:
# The plate axis has been moved to be the last plate axis
if Q is None:
raise ValueError("Plates should be rotated but no Q give")
# Transform covariance
sumQ = np.sum(Q, axis=0)
QCovQ = sumQ[:,None,None]**2 * self.CovX
# Rotate plates
if self.precompute:
QX_QX = np.einsum('...kalb,...ik,...il->...iab', self.X_X, Q, Q)
XX = QX_QX + QCovQ
XX = sum_to_plates(XX,
plates_alpha[:-1],
ndim=2)
Xmu = np.einsum('...kaib,...ik->...iab', self.X_mu, Q)
Xmu = sum_to_plates(Xmu,
plates_alpha[:-1],
ndim=2)
else:
X = self.X
mu = self.mu
QX = np.einsum('...ik,...kj->...ij', Q, X)
XX = (sum_to_plates(QCovQ,
plates_alpha[:-1],
ndim=2) +
sum_to_plates(linalg.outer(QX, QX),
plates_alpha[:-1],
ndim=2,
plates_from=plates_X))
Xmu = sum_to_plates(linalg.outer(QX, self.mu),
plates_alpha[:-1],
ndim=2,
plates_from=plates_X)
mu2 = self.mu2
D = np.shape(XX)[-1]
logdet_Q = D * np.log(np.abs(sumQ))
else:
XX = self.XX
mu2 = self.mu2
Xmu = self.Xmu
logdet_Q = 0
# Compute transformed moments
#mu2 = np.einsum('...ii->...i', mu2)
RXmu = np.einsum('...ik,...ki->...i', R, Xmu)
RXX = np.einsum('...ik,...kj->...ij', R, XX)
RXXR = np.einsum('...ik,...ik->...i', RXX, R)
# <(X-mu) * (X-mu)'>_R
XmuXmu = (RXXR - 2*RXmu + mu2)
D = np.shape(R)[0]
# Compute q(alpha)
if self.update_alpha:
# Parameters
a0 = self.a0
b0 = self.b0
a = self.a
b = b0 + 0.5*sum_to_plates(XmuXmu,
plates_alpha,
plates_from=None,
ndim=0)
# Some expectations
alpha = a / b
logb = np.log(b)
logalpha = -logb # + const
b0_alpha = b0 * alpha
a0_logalpha = a0 * logalpha
else:
alpha = self.alpha
logalpha = 0
#
# Compute the cost
#
def sum_plates(V, *plates):
full_plates = misc.broadcasted_shape(*plates)
r = self.node_X.broadcasting_multiplier(full_plates, np.shape(V))
return r * np.sum(V)
XmuXmu_alpha = XmuXmu * alpha
if logdet is None:
logdet_R = np.linalg.slogdet(R)[1]
inv_R = np.linalg.inv(R)
else:
logdet_R = logdet
inv_R = inv
# Compute entropy H(X)
logH_X = random.gaussian_entropy(-2*sum_plates(logdet_R + logdet_Q,
plates_X),
0)
# Compute <log p(X|alpha)>
logp_X = random.gaussian_logpdf(sum_plates(XmuXmu_alpha,
plates_alpha[:-1] + [D]),
0,
0,
sum_plates(logalpha,
plates_X + [D]),
0)
if self.update_alpha:
# Compute entropy H(alpha)
# This cancels out with the log(alpha) term in log(p(alpha))
logH_alpha = 0
# Compute <log p(alpha)>
logp_alpha = random.gamma_logpdf(sum_plates(b0_alpha,
plates_alpha),
0,
sum_plates(a0_logalpha,
plates_alpha),
0,
0)
else:
logH_alpha = | |
= 'L1003'
response['message'] = '认证失败'
return response
else:
high_queue.enqueue_call(queue_target_list, args = (username_result['username'], target_list, request['description'], mysqldb,), timeout = 7200000)
response['code'] = 'L1000'
response['message'] = '请求成功'
return response
except Exception as e:
print(e)
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
@app.post('/api/target/edit')
async def target_edit(request : VueRequest):
"""
修改目标描述的接口
:param:
:return: str response: 需要返回的数据
"""
try:
response = {'code': '', 'message': '', 'data': ''}
request = rsa_crypto.decrypt(request.data)
request = json.loads(request)
target = request['target']
description = request['description']
token = request['token']
query_str = {
'type': 'token',
'data': token
}
username_result = mysqldb.username(query_str)
if username_result == 'L1001':
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
elif username_result == None:
response['code'] = 'L1003'
response['message'] = '认证失败'
return response
else:
result = mysqldb.update_target_description(username_result['username'], target, description)
if result == 'L1000':
response['code'] = 'L1000'
response['message'] = '请求成功'
return response
else:
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
except Exception as e:
print(e)
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
@app.post('/api/target/detail')
async def target_detail(request : VueRequest):
"""
获取目标详情的接口
:param:
:return str response: 需要返回的数据
"""
try:
response = {'code': '', 'message': '', 'data': ''}
request = rsa_crypto.decrypt(request.data)
request = json.loads(request)
target = request['target']
pagenum = request['pagenum']
pagesize = request['pagesize']
token = request['token']
query_str = {
'type': 'token',
'data': token
}
username_result = mysqldb.username(query_str)
if username_result == 'L1001':
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
elif username_result == None:
response['code'] = 'L1004'
response['message'] = '认证失败'
return response
else:
sql_result = mysqldb.get_target_detail(username_result['username'], target, pagenum, pagesize)
if sql_result == 'L1001':
response['code'] = 'L1001'
response['message'] = '系统异常'
else:
response['code'] = 'L1000'
response['message'] = '请求成功'
response['data'] = sql_result
return response
except Exception as e:
print(e)
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
@app.post('/api/scan/set')
async def scan_set(request : VueRequest):
"""
设置扫描选项的接口
:param:
:return: str response: 需要返回的数据
"""
try:
response = {'code': '', 'message': '', 'data': ''}
request = rsa_crypto.decrypt(request.data)
request = json.loads(request)
target = request['target']
token = request['token']
scan_data = json.loads(request['scan_data'])
query_str = {
'type': 'token',
'data': token
}
username_result = mysqldb.username(query_str)
if username_result == 'L1001':
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
elif username_result == None:
response['code'] = 'L1003'
response['message'] = '认证失败'
return response
else:
result = mysqldb.scan_set(username_result['username'], target, scan_data['scanner'], scan_data['min_port'], scan_data['max_port'], scan_data['rate'], scan_data['concurren_number'])
if result == 'L1000':
response['code'] = 'L1000'
response['message'] = '请求成功'
return response
else:
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
except Exception as e:
print(e)
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
@app.post('/api/scan/start')
async def start_scan(request : VueRequest):
"""
开始扫描的接口
:param:
:return: str response: 需要返回的数据
"""
try:
response = {'code': '', 'message': '', 'data': ''}
request = rsa_crypto.decrypt(request.data)
request = json.loads(request)
target = request['target']
token = request['token']
scan_option = request['scan_option']
option_list = []
for option in scan_option:
option = json.loads(option)
option_list.append(str(option['id']))
if 'children' in option.keys():
for vul_type in option['children']:
option_list.append(vul_type['label'].replace('-', '_'))
query_str = {
'type': 'token',
'data': token
}
username_result = mysqldb.username(query_str)
if username_result == 'L1001':
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
elif username_result == None:
response['code'] = 'L1003'
response['message'] = '认证失败'
return response
else:
target_list = []
if request['target'] == 'all':
target_list = mysqldb.get_scan_target(username_result['username'])
else:
target_list.append({'target': target})
high_queue.enqueue_call(queue_scan_list, args = (username_result['username'], target_list, option_list, mysqldb,), timeout = 7200000)
response['code'] = 'L1000'
response['message'] = '请求成功'
return response
except Exception as e:
print(e)
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
@app.post('/api/scan/pause')
async def pause_scan(request : VueRequest):
"""
暂停扫描的接口
:param:
:return str response: 需要返回的数据
"""
try:
response = {'code': '', 'message': '', 'data': ''}
request = rsa_crypto.decrypt(request.data)
request = json.loads(request)
target = request['target']
scan_id = request['scan_id']
token = request['token']
query_str = {
'type': 'token',
'data': token
}
username_result = mysqldb.username(query_str)
if username_result == 'L1001':
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
elif username_result == None:
response['code'] = 'L1003'
response['message'] = '认证失败'
return response
else:
scan_status = mysqldb.get_scan_status(username_result['username'], scan_id)
if scan_status == '扫描中':
send_stop_job_command(redis_conn, md5(username_result['username'] + scan_id))
mysqldb.update_scan_status(username_result['username'], scan_id, '暂停扫描')
mysqldb.update_target_scan_status(username_result['username'], target, '暂停扫描')
response['data'] = '请求正常'
response['code'] = 'L1000'
response['message'] = '请求正常'
else:
response['data'] = '目标不在扫描中,无法暂停扫描'
response['code'] = 'L1000'
response['message'] = '请求正常'
return response
except Exception as e:
print(e)
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
@app.post('/api/scan/resume')
async def resume_scan(request : VueRequest):
"""
恢复扫描的接口
:param:
:return str response: 需要返回的数据
"""
try:
response = {'code': '', 'message': '', 'data': ''}
request = rsa_crypto.decrypt(request.data)
request = json.loads(request)
target = request['target']
scan_id = request['scan_id']
token = request['token']
query_str = {
'type': 'token',
'data': token
}
username_result = mysqldb.username(query_str)
if username_result == 'L1001':
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
elif username_result == None:
response['code'] = 'L1003'
response['message'] = '认证失败'
return response
else:
scan_status = mysqldb.get_scan_status(username_result['username'], scan_id)
if scan_status == '暂停扫描':
registry = high_queue.failed_job_registry
registry.requeue(md5(username_result['username'] + scan_id))
mysqldb.update_scan_status(username_result['username'], scan_id, '正在扫描')
mysqldb.update_target_scan_status(username_result['username'], target, '正在扫描')
response['data'] = '请求正常'
response['code'] = 'L1000'
response['message'] = '请求正常'
else:
response['data'] = '目标不处于暂停扫描状态,无法恢复扫描'
response['code'] = 'L1000'
response['message'] = '请求正常'
return response
except Exception as e:
print(e)
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
@app.post('/api/scan/cancel')
async def cancel_scan(request : VueRequest):
"""
取消扫描的接口
:param:
:return str response: 需要返回的数据
"""
try:
response = {'code': '', 'message': '', 'data': ''}
request = rsa_crypto.decrypt(request.data)
request = json.loads(request)
target = request['target']
scan_id = request['scan_id']
token = request['token']
query_str = {
'type': 'token',
'data': token
}
username_result = mysqldb.username(query_str)
if username_result == 'L1001':
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
elif username_result == None:
response['code'] = 'L1003'
response['message'] = '认证失败'
return response
else:
scan_status = mysqldb.get_scan_status(username_result['username'], scan_id)
if scan_status == '扫描结束':
response['data'] = '扫描已结束,无法取消'
response['code'] = 'L1000'
response['message'] = '扫描已结束,无法取消'
elif scan_status == '已取消扫描':
response['data'] = '已取消扫描,无法再次取消'
response['code'] = 'L1000'
response['message'] = '已取消扫描,无法再次取消'
else:
send_stop_job_command(redis_conn, md5(username_result['username'] + scan_id))
time.sleep(0.5)
registry = high_queue.failed_job_registry
try:
registry.remove(md5(username_result['username'] + scan_id), delete_job = True)
mysqldb.update_scan_status(username_result['username'], scan_id, '已取消扫描')
mysqldb.update_target_scan_status(username_result['username'], target, '已取消扫描')
response['data'] = '请求正常'
response['code'] = 'L1000'
response['message'] = '请求正常'
except Exception as e:
print(e)
response['data'] = '系统异常'
response['code'] = 'L10001'
response['message'] = '系统异常'
return response
except Exception as e:
print(e)
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
@app.post('/api/target/list')
async def target_list(request : VueRequest):
"""
获取所有目标的接口
:param:
:return: str response: 需要返回的数据
"""
try:
response = {'code': '', 'message': '', 'data': ''}
request = rsa_crypto.decrypt(request.data)
request = json.loads(request)
pagenum = request['pagenum']
pagesize = request['pagesize']
token = request['token']
query_str = {
'type': 'token',
'data': token
}
list_query = json.loads(request['listQuery'])
if list_query['scan_status'] == '全部':
list_query['scan_status'] = ''
if list_query['scan_schedule'] == '全部':
list_query['scan_schedule'] = ''
username_result = mysqldb.username(query_str)
if username_result == 'L1001':
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
elif username_result == None:
response['code'] = 'L1003'
response['message'] = '认证失败'
return response
else:
sql_result = mysqldb.target_list(username_result['username'], pagenum, pagesize, list_query)
target_list = sql_result['result']
total = sql_result['total']
if target_list == 'L1001':
response['code'] = 'L1001'
response['message'] = '系统异常'
else:
response['code'] = 'L1000'
response['message'] = '请求成功'
if total == 0:
response['data'] = ''
else:
response['data'] = sql_result
return response
except Exception as e:
print(e)
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
@app.post('/api/scan/list')
async def scan_list(request : VueRequest):
"""
获取所有扫描信息的接口
:param:
:return str response: 需要返回的数据
"""
try:
response = {'code': '', 'message': '', 'data': ''}
request = rsa_crypto.decrypt(request.data)
request = json.loads(request)
pagenum = request['pagenum']
pagesize = request['pagesize']
token = request['token']
list_query = json.loads(request['listQuery'])
if list_query['scan_status'] == '全部':
list_query['scan_status'] = ''
if list_query['scan_schedule'] == '全部':
list_query['scan_schedule'] = ''
query_str = {
'type': 'token',
'data': token
}
username_result = mysqldb.username(query_str)
if username_result == 'L1001':
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
elif username_result == None:
response['code'] = 'L1003'
response['message'] = '认证失败'
return response
else:
sql_result = mysqldb.scan_list(username_result['username'], pagenum, pagesize, list_query)
scan_list = sql_result['result']
total = sql_result['total']
if scan_list == 'L1001':
response['code'] = 'L1001'
response['message'] = '系统异常'
else:
response['code'] = 'L1000'
response['message'] = '请求成功'
response['total'] = total
if total == 0:
response['data'] = ''
else:
response['data'] = sql_result
return response
except Exception as e:
print(e)
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
@app.post('/api/port/list')
async def port_list(request : VueRequest):
"""
获取所有端口信息的接口
:param:
:return: str response: 需要返回的数据
"""
try:
response = {'code': '', 'message': '', 'data': ''}
request = rsa_crypto.decrypt(request.data)
request = json.loads(request)
pagenum = request['pagenum']
pagesize = request['pagesize']
token = request['token']
query_str = {
'type': 'token',
'data': token
}
list_query = json.loads(request['listQuery'])
username_result = | |
= math.sqrt(count * (count + 1) * (2.0 * count + 1.0) / 24.0)
z = math.fabs(wt - mn) / se
prob = 2 * (1.0 - zprob(abs(z)))
return wt, prob
def kruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H-test for 3 or more independent samples
and returns the result.
Usage: lkruskalwallish(*args)
Returns: H-statistic (corrected for ties), associated p-value
"""
args = list(args)
n = [0] * len(args)
all = []
n = map(len, args)
for i in range(len(args)):
all = all + args[i]
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i]) ** 2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = len(args) - 1
if T == 0:
raise ValueError('All numbers are identical in lkruskalwallish')
h = h / float(T)
return h, chisqprob(h, df)
def friedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for repeated
measures and returns the result, along with the associated probability
value. It assumes 3 or more repeated measures. Only 3 levels requires a
minimum of 10 subjects in the study. Four levels requires 5 subjects per
level(??).
Usage: lfriedmanchisquare(*args)
Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
raise ValueError('Less than 3 levels. Friedman test not appropriate.')
n = len(args[0])
data = map(zip, tuple(args))
for i in range(len(data)):
data[i] = rankdata(data[i])
ssbn = 0
for i in range(k):
ssbn = ssbn + sum(args[i]) ** 2
chisq = 12.0 / (k * n * (k + 1)) * ssbn - 3 * n * (k + 1)
return chisq, chisqprob(chisq, k - 1)
####################################
#### PROBABILITY CALCULATIONS ####
####################################
def chisqprob(chisq, df):
"""
Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in <NAME>'s |Stat.
Usage: lchisqprob(chisq,df)
"""
BIG = 20.0
def ex(x):
BIG = 20.0
if x < -BIG:
return 0.0
else:
return math.exp(x)
if chisq <= 0 or df < 1:
return 1.0
a = 0.5 * chisq
if df % 2 == 0:
even = 1
else:
even = 0
if df > 1:
y = ex(-a)
if even:
s = y
else:
s = 2.0 * zprob(-math.sqrt(chisq))
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = 1.0
else:
z = 0.5
if a > BIG:
if even:
e = 0.0
else:
e = math.log(math.sqrt(math.pi))
c = math.log(a)
while (z <= chisq):
e = math.log(z) + e
s = s + ex(c * z - a - e)
z = z + 1.0
return s
else:
if even:
e = 1.0
else:
e = 1.0 / math.sqrt(math.pi) / math.sqrt(a)
c = 0.0
while (z <= chisq):
e = e * (a / float(z))
c = c + e
z = z + 1.0
return (c * y + s)
else:
return s
def erfcc(x):
"""
Returns the complementary error function erfc(x) with fractional
error everywhere less than 1.2e-7. Adapted from Numerical Recipies.
Usage: lerfcc(x)
"""
z = abs(x)
t = 1.0 / (1.0 + 0.5 * z)
ans = t * math.exp(
-z * z - 1.26551223 + t * (1.00002368 + t * (0.37409196 + t * (0.09678418 + t * (-0.18628806 + t * (0.27886807 + t * (-1.13520398 + t * (1.48851587 + t * (-0.82215223 + t * 0.17087277)))))))))
if x >= 0:
return ans
else:
return 2.0 - ans
def zprob(z):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
for z>0, 1.0-zprob(z) = 1-tail probability
for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability
Adapted from z.c in <NAME>'s |Stat.
Usage: lzprob(z)
"""
Z_MAX = 6.0 # maximum meaningful z-value
if z == 0.0:
x = 0.0
else:
y = 0.5 * math.fabs(z)
if y >= (Z_MAX * 0.5):
x = 1.0
elif (y < 1.0):
w = y * y
x = ((((((((0.000124818987 * w
- 0.001075204047) * w + 0.005198775019) * w
- 0.019198292004) * w + 0.059054035642) * w
- 0.151968751364) * w + 0.319152932694) * w
- 0.531923007300) * w + 0.797884560593) * y * 2.0
else:
y = y - 2.0
x = (((((((((((((-0.000045255659 * y
+ 0.000152529290) * y - 0.000019538132) * y
- 0.000676904986) * y + 0.001390604284) * y
- 0.000794620820) * y - 0.002034254874) * y
+ 0.006549791214) * y - 0.010557625006) * y
+ 0.011630447319) * y - 0.009279453341) * y
+ 0.005353579108) * y - 0.002141268741) * y
+ 0.000535310849) * y + 0.999936657524
if z > 0.0:
prob = ((x + 1.0) * 0.5)
else:
prob = ((1.0 - x) * 0.5)
return prob
def ksprob(alam):
"""
Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from
Numerical Recipies.
Usage: lksprob(alam)
"""
fac = 2.0
sum = 0.0
termbf = 0.0
a2 = -2.0 * alam * alam
for j in range(1, 201):
term = fac * math.exp(a2 * j * j)
sum = sum + term
if math.fabs(term) <= (0.001 * termbf) or math.fabs(term) < (1.0e-8 * sum):
return sum
fac = -fac
termbf = math.fabs(term)
return 1.0 # Get here only if fails to converge; was 0.0!!
def fprob(dfnum, dfden, F):
"""
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
p = betai(0.5 * dfden, 0.5 * dfnum, dfden / float(dfden + dfnum * F))
return p
def betacf(a, b, x):
"""
This function evaluates the continued fraction form of the incomplete
Beta function, betai. (Adapted from: Numerical Recipies in C.)
Usage: lbetacf(a,b,x)
"""
ITMAX = 200
EPS = 3.0e-7
bm = az = am = 1.0
qab = a + b
qap = a + 1.0
qam = a - 1.0
bz = 1.0 - qab * x / qap
for i in range(ITMAX + 1):
em = float(i + 1)
tem = em + em
d = em * (b - em) * x / ((qam + tem) * (a + tem))
ap = az + d * am
bp = bz + d * bm
d = -(a + em) * (qab + em) * x / ((qap + tem) * (a + tem))
app = ap + d * az
bpp = bp + d * bz
aold = az
am = ap / bpp
bm = bp / bpp
az = app / bpp
bz = 1.0
if (abs(az - aold) < (EPS * abs(az))):
return az
print('a or b too big, or ITMAX too small in Betacf.')
def gammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
(Adapted from: Numerical Recipies in C.)
Usage: lgammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516, 0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x + 0.5) * math.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j] / x
return -tmp + math.log(2.50662827465 * ser)
def betai(a, b, x):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented here,
using the betacf function. (Adapted from: Numerical Recipies in C.)
Usage: lbetai(a,b,x)
"""
if (x < 0.0 or x > 1.0):
raise ValueError('Bad x in lbetai')
if (x == 0.0 or x == 1.0):
bt = 0.0
else:
bt = math.exp(gammln(a + b) - gammln(a) - gammln(b) + a * math.log(x) + b * math.log(1.0 - x))
if (x < (a + 1.0) / (a + b + 2.0)):
return bt | |
target set, when None (default) the target_name
will be set equal to the name
>>> from amuse.datamodel import Particles
>>> from amuse.units import units
>>> particles1 = Particles(2)
>>> particles2 = particles1.copy()
>>> particles1.mass = 1 | units.m
>>> particles2.mass = 3 | units.m
>>> channel = particles1.new_channel_to(particles2)
>>> channel.copy_attribute("mass", "mass_from_p2")
>>> print particles2.mass_from_p2
[1.0, 1.0] m
>>> print particles2.mass - particles2.mass_from_p2
[2.0, 2.0] m
"""
if target_name is None:
target_name = name
self._reindex()
if len(self.keys) == 0:
return
data = self.from_particles.get_values_in_store(self.from_indices, [name,])
self.to_particles.set_values_in_store(self.to_indices, [target_name,], data)
def transform_values(self, attributes, f):
values = self.from_particles.get_values_in_store(self.from_indices, attributes)
return f(*values)
def transform(self, target, function, source):
""" Copy and transform values of one attribute from the source set to the target set.
:argument target: name of the attributes in the target set
:argument function: function used for transform, should return tuple
:argument source: name of the attribute in the source set
>>> from amuse.datamodel import Particles
>>> particles1 = Particles(3)
>>> particles2 = particles1.copy()
>>> particles1.attribute1 = 1
>>> particles1.attribute2 = 2
>>> channel = particles1.new_channel_to(particles2)
>>> channel.transform(["attribute3"], lambda x,y: (x+y,), ["attribute1","attribute2"])
>>> print particles2.attribute3
[3 3 3]
>>> channel.transform(["attribute1","attribute1b"], lambda x: (x,2*x), ["attribute1"])
>>> print particles2.attribute1b
[2 2 2]
"""
self._reindex()
if len(self.keys) == 0:
return
if function is None:
function=lambda *x : x
if not self.to_particles.can_extend_attributes():
target_attributes = self.to_particles.get_defined_settable_attribute_names()
if not set(target).issubset(set(target_attributes)):
raise Exception("trying to set unsettable attributes {0}".format(
list(set(target)-set(target_attributes))) )
converted=self.transform_values(source, function)
if len(converted) != len(target):
raise Exception("function {0} returns {1} values while target attributes are {2} of length {3}".format(
function.__name__, len(converted), target, len(target)))
self.to_particles.set_values_in_store(self.to_indices, target, converted)
class Channels(object):
def __init__(self, channels=None):
self._channels = []
if channels is not None:
self.add_channels(channels)
def add_channel(self, channel):
self._channels.append(channel)
def add_channels(self, channels):
if isinstance(channels, Channels):
self._channels += channels._channels
else:
for chan in iter(channels):
self.add_channel(chan)
def remove_channel(self, channel):
self._channels.remove(channel)
def copy(self):
for channel in self._channels:
channel.copy()
class ParticlesWithNamespacedAttributesView(AbstractParticleSet):
"""
A view on prefixed attributes of a particle set.
"""
def __init__(self, particles, namespace):
AbstractParticleSet.__init__(self, particles)
self._private.particles = particles
self._private.namespace = namespace
def compressed(self):
return ParticlesWithNamespacedAttributesView(
self._private.particles.compressed(),
self._private.namespace
)
def get_valid_particles_mask(self):
return self._private.particles.get_valid_particles_mask()
def __getitem__(self, index):
keys = self.get_all_keys_in_store()[index]
if keys is ma.masked:
return None
elif hasattr(keys, '__iter__'):
return self._subset(keys)
else:
return Particle(keys, self)
def _get_version(self):
return self._private.particles._get_version()
def shallow_copy(self):
copiedParticles = self._private.particles.shallow_copy()
return ParticlesWithNamespacedAttributesView(
copiedParticles,
self._private.namespace
)
def unconverted_set(self):
return self._private.particles
def can_extend_attributes(self):
return self._private.particles.can_extend_attributes()
def add_particles_to_store(self, keys, attributes = [], values = []):
namespace = self._private.namespace
namespaced_attributes = [namespace + '__' + x for x in attributes]
self._private.particles.add_particles_to_store(keys, namespaced_attributes, values)
def remove_particles_from_store(self, keys):
self._private.particles.remove_particles_from_store(keys)
def get_values_in_store(self, indices, attributes):
namespace = self._private.namespace
namespaced_attributes = [namespace + '__' + x for x in attributes]
return self._private.particles.get_values_in_store(indices, namespaced_attributes)
def set_values_in_store(self, indices, attributes, values):
namespace = self._private.namespace
namespaced_attributes = [namespace + '__' + x for x in attributes]
self._private.particles.set_values_in_store(indices, namespaced_attributes, values)
def get_attribute_names_defined_in_store(self):
names = self._private.particles.get_attribute_names_defined_in_store()
namespace_prefix = self._private.namespace + '__'
len_namespace_prefix = len(namespace_prefix)
return [x[len_namespace_prefix:] for x in names if x.startswith(namespace_prefix)]
def get_settable_attribute_names_defined_in_store(self):
names = self._private.particles.get_settable_attribute_names_defined_in_store()
namespace_prefix = self._private.namespace + '__'
len_namespace_prefix = len(namespace_prefix)
return [x[len_namespace_prefix:] for x in names if x.startswith(namespace_prefix)]
def get_all_keys_in_store(self):
return self._private.particles.get_all_keys_in_store()
def get_all_indices_in_store(self):
return self._private.particles.get_all_indices_in_store()
def get_indices_of_keys(self, keys):
return self._private.particles.get_indices_of_keys(keys)
def has_key_in_store(self, key):
return self._private.particles.has_key_in_store(key)
def as_set(self):
return ParticlesSubset(self, self.get_all_keys_in_store())
def get_timestamp(self):
return self._private.particles.get_timestamp()
def savepoint(self, timestamp=None):
return ParticlesWithNamespacedAttributesView(
self._private.particles.savepoint(timestamp),
self._private.namespace
)
def previous_state(self):
return ParticlesWithNamespacedAttributesView(
self._private.particles.previous_state(),
self._private.namespace
)
class DomainAttribute(DerivedAttribute):
"""
Combine multiple attributes into the same namespace
"""
def __init__(self, name):
self.name = name
def get_values_for_entities(self, instance):
return ParticlesWithNamespacedAttributesView(instance, self.name)
def set_values_for_entities(self, instance, value):
raise AttributeError('"{0}" is already defined as a namespace attribute, you cannot assign a value to it'.format(self.name))
def get_value_for_entity(self, instance, particle, index):
namespaced_set = ParticlesWithNamespacedAttributesView(particle.particles_set, self.name)
# or:
# return namespaced_set[index]
return Particle(
particle.key,
namespaced_set,
particle._set_index,
particle._set_version
)
def set_value_for_entity(self, instance, key, vector):
raise AttributeError('"{0}" is already defined as a namespace attribute, you cannot assign a value to it'.format(self.name))
class Stars(Particles):
pass
class Particle(object):
"""A physical object or a physical region simulated as a
physical object (cloud particle).
All attributes defined on a particle are specific for
that particle (for example mass or position). A particle contains
a set of attributes, some attributes are *generic* and applicable
for multiple modules. Other attributes are *specific* and are
only applicable for a single module.
"""
__slots__ = ("key", "particles_set", "_set_index", "_set_version")
# these are defined so that numpy conversion is way faster
# otherwhise it would go through the __getattr__ function
# which will slow it down by a factor 3
if compare_version_strings(numpy.__version__, '1.7.0') < 0:
__array_interface__ = {'shape':() }
else:
__array_interface__ = {'shape':(),'typestr':'|O4' }
def __len__(self):
raise AttributeError()
def __iter__(self):
raise AttributeError()
__array_struct__ = UndefinedAttribute()
__array__ = UndefinedAttribute()
def __init__(self, key = None, particles_set = None, set_index = None, set_version = -1, **keyword_arguments):
if particles_set is None:
if key == None:
particles_set = Particles(1)
key = particles_set.get_all_keys_in_store()[0]
else:
particles_set = Particles(1, keys = [key])
object.__setattr__(self, "key", key)
object.__setattr__(self, "particles_set", particles_set)
object.__setattr__(self, "_set_index", set_index)
object.__setattr__(self, "_set_version", set_version)
for attribute_name in keyword_arguments:
attribute_value = keyword_arguments[attribute_name]
setattr(self, attribute_name, attribute_value)
def __getstate__(self):
return (self.key, self.as_set().copy())
def __setstate__(self, key_and_set):
key, particles_set = key_and_set
object.__setattr__(self, "key", key)
object.__setattr__(self, "particles_set", particles_set)
object.__setattr__(self, "_set_index", None)
object.__setattr__(self, "_set_version", None)
def __setattr__(self, name_of_the_attribute, new_value_for_the_attribute):
if self._set_index is None or self._set_version != self.particles_set._get_version():
object.__setattr__(self, "_set_index", self.particles_set.get_indices_of_keys([self.key])[0])
object.__setattr__(self, "_set_version", self.particles_set._get_version())
self.particles_set._set_value_of_attribute(
self._set_index,
name_of_the_attribute,
new_value_for_the_attribute
)
def __getattr__(self, name_of_the_attribute):
if self._set_index is None or self._set_version != self.particles_set._get_version():
object.__setattr__(self, "_set_index", self.particles_set.get_indices_of_keys([self.key])[0])
object.__setattr__(self, "_set_version", self.particles_set._get_version())
try:
return self.particles_set._get_value_of_attribute(self, self._set_index, name_of_the_attribute)
except Exception as ex:
raise AttributeError("You tried to access attribute '{0}' but this attribute is not defined for this set.".format(name_of_the_attribute, ex))
def children(self):
return self.particles_set.select(lambda x : x == self, ["parent"])
def descendents(self):
result = self.children()
stack = list(result)
while len(stack) > 0:
current = stack.pop()
children = current.children()
result = result.union(children)
stack.extend(children)
return result
def add_child(self, child):
if self.particles_set != child.particles_set:
raise exceptions.AmuseException("The parent and child particles should be in the same set")
child.parent = self
def copy(self):
return self.particles_set.copy()._get_particle(self.key)
def empty_copy(self):
keys = [self.key]
result = Particles()
result.add_particles_to_store(keys, [],[])
object.__setattr__(result, "_derived_attributes", CompositeDictionary(self.particles_set._derived_attributes))
return result._get_particle(self.key)
def __add__(self, particles):
"""
Returns a particle subset, composed of the given
particle(s) and this particle. Attribute values are
not stored by the subset. The subset provides a view
on the particles.
:parameter particles: particle(s) to be added to self.
>>> particles = Particles(2)
>>> particle1 = particles[0]
>>> particle1.x = 1.0 | units.m
>>> particle2 = particles[1]
>>> particle2.x = 2.0 | units.m
>>> new_set = particle1 + particle2
>>> new_set # doctest:+ELLIPSIS
<amuse.datamodel.particles.ParticlesSubset object at 0x...>
>>> print len(new_set)
2
>>> print new_set.x
[1.0, 2.0] m
"""
return self.as_set().__add__(particles)
def __sub__(self, particles):
"""
Raises an exception: cannot subtract particle(s)
from a particle.
"""
raise exceptions.AmuseException("Cannot subtract particle(s) from a particle.")
def __str__(self):
"""
Display string for a particle
>>> p = Particle(10)
>>> p.x = 10.2 | units.m
>>> p.mass = 5 | units.kg
>>> print p # doctest: +ELLIPSIS
Particle(10, set=<...>
, mass=5.0 kg
, x=10.2 m)
"""
if self._set_index is None or self._set_version != self.particles_set._get_version():
object.__setattr__(self, "_set_index", self.particles_set.get_indices_of_keys([self.key])[0])
object.__setattr__(self, "_set_version", self.particles_set._get_version())
output = 'Particle('
output += str(self.key)
output += ', set=<{0}>'.format(id(self.particles_set))
for name, value in self.particles_set._values_of_particle(self._set_index):
output += '\n , '
output += name
output += '='
if isinstance(value, Particle):
output += 'Particle('
output += str(value.key)
output += ', set=<{0}>'.format(id(value.particles_set))
output += ')'
elif isinstance(value, AbstractParticleSet):
output += value.__class__.__name__
output += '(len={0}, id={1})'.format(len(value), id(value))
else:
output += str(value)
output += ')'
return output
def __dir__(self):
result = []
result.extend(dir(type(self)))
result.extend(self.particles_set._attributes_for_dir())
return result
def __eq__(self, other):
return isinstance(other, type(self)) and other.key == self.key
def __hash__(self):
return self.key.__hash__()
def __ne__(self, other):
return not (isinstance(other, type(self)) and other.key == self.key)
def set_default(self, attribute, quantity):
if not attribute in self.particles_set.get_attribute_names_defined_in_store():
self.particles_set._set_value_of_attribute(self, attribute, quantity)
def get_timeline_of_attribute(self, attribute):
return self.particles_set.get_timeline_of_attribute(self.key, attribute)
def get_timeline_of_attribute_as_vector(self, attribute):
return self.particles_set.get_timeline_of_attribute_as_vector(self.key, attribute)
def get_timeline_of_attributes(self, attributes):
return self.particles_set.get_timeline_of_attributes(self.key, attributes)
def as_set(self):
"""
Returns a subset view on the set containing this particle. The
subset view includes | |
selection)
columns = sorted(index.column() for index in selection)
rowcount = rows[-1] - rows[0] + 1
colcount = columns[-1] - columns[0] + 1
table = [[''] * colcount for _ in range(rowcount)]
for index in selection:
row = index.row() - rows[0]
column = index.column() - columns[0]
table[row][column] = index.data().toString()
stream = io.BytesIO()
csv.writer(stream, delimiter='\t').writerows(table)
QtGui.qApp.clipboard().setText(stream.getvalue())
# endregion Data manipulation
# endregion UI methods
# region Field selection
def build_field_checkboxes(self):
# build checkbox items for every field from fieldManangement dict
for key in self.fieldManagement:
columnName = self.fieldManagement[key]['name']
item = QtGui.QCheckBox()
item.setMinimumSize(QtCore.QSize(27, 27))
item.setMaximumSize(QtCore.QSize(self.optionWidth, 27))
item.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.MinimumExpanding))
item.setText(columnName)
item.setTristate(False)
item.setCheckState(QtCore.Qt.Unchecked)
self.fieldManagement[key]['itemWidget'] = item
self.fieldList.addWidget(self.fieldManagement[key]['itemWidget'])
self.fieldManagement[key]['itemWidget'].stateChanged.connect(self.recalculate)
# region Functions
def silent_checkbox_change(self, checkbox, newstate=False):
"""
Changes value of checkbox without triggering any attached functions
:param checkbox: QCheckbox object
:param newstate: Desired state (True/False)
:return:
"""
checkbox.blockSignals(True)
if newstate:
checkbox.setCheckState(QtCore.Qt.Checked)
else:
checkbox.setCheckState(QtCore.Qt.Unchecked)
checkbox.blockSignals(False)
def recheck_fields(self):
"""
Makes sure all fields shown in the actual data table are checked in Select Column pane
"""
existingHeaders = [] # Get list of headers, since they can't be pulled out of model as list (AFAIK)
for j in xrange(self.model.columnCount()): # for all fields available in model
columnName = unicode(self.model.headerData(j, QtCore.Qt.Horizontal).toString()) # .replace('\n(NR)',
# ' (NR)')
if columnName == 'Bin':
pass # skip Bin, which is only added by the analysis process if binning
else:
self.silent_checkbox_change(self.fieldManagement[columnName]['itemWidget'], True)
existingHeaders.append(columnName)
# Mark remaining fields as not visible
# remainingHeaders = list(filter(lambda a: a not in analysis.field_list(), existingHeaders))
for columnName in self.fieldManagement:
if columnName in existingHeaders:
self.fieldManagement[columnName]['visible'] = True
else:
self.fieldManagement[columnName]['visible'] = False
def field_preset_select(self, pattern=None):
for columnName in self.fieldManagement:
if self.fieldManagement[columnName]['itemWidget'].isEnabled():
if pattern == 'all':
self.silent_checkbox_change(self.fieldManagement[columnName]['itemWidget'], True)
elif pattern == 'none':
self.silent_checkbox_change(self.fieldManagement[columnName]['itemWidget'], False)
else:
# check the current column against all preset checkboxes, and if any are false, don't include it
# fieldName = unicode(self.fieldList.item(x).text())
columnNameF = self.fieldManagement[columnName]['name']
if pattern == 'nr':
# parameter specifically for setting the nr preset checkbox when new bird(s) is/are selected
self.silent_checkbox_change(self.fieldManagement['Trials']['itemWidget'], True)
nrCheck = self.noResponse_Checkbox.isChecked()
probeCheck = self.probe_Checkbox.isChecked()
rawCheck = self.raw_Checkbox.isChecked()
columnChecks = []
# No Response checkbox
if columnNameF in ["d'", 'Beta', 'S+ Rate', 'S- Rate', 'Total Corr',
"Probe d'", 'Probe Beta', 'Probe S+ Rate', 'Probe S- Rate', 'Probe Tot Corr']:
if nrCheck is True:
columnChecks.append(False)
else:
columnChecks.append(True)
elif columnNameF in ["d' (NR)", 'Beta (NR)', 'S+ (NR) Rate', 'S- (NR) Rate', 'Total Corr (NR)',
"Probe d' (NR)", 'Probe Beta (NR)', 'Probe S+ (NR) Rate', 'Probe S- (NR) Rate',
'Probe Tot Corr (NR)']:
if nrCheck is True:
columnChecks.append(True)
else:
columnChecks.append(False)
# elif pattern == 'probe':
if columnNameF in ["Probe d'", 'Probe Beta', 'Probe Trials',
'Probe Hit', 'Probe Miss', 'Probe Miss (NR)', 'Probe FA', 'Probe CR',
'Probe CR (NR)',
'Probe S+ Rate', 'Probe S- Rate', 'Probe Tot Corr',
"Probe d' (NR)", 'Probe Beta (NR)',
'Probe S+ (NR) Rate', 'Probe S- (NR) Rate', 'Probe Tot Corr (NR)']:
if probeCheck is True:
columnChecks.append(True)
else:
columnChecks.append(False)
# elif pattern == 'raw':
if columnNameF in ['Hit', 'Miss', 'Miss (NR)', 'FA', 'CR', 'CR (NR)', 'Probe Hit', 'Probe Miss',
'Probe Miss (NR)', 'Probe FA', 'Probe CR', 'Probe CR (NR)']:
if rawCheck is True:
columnChecks.append(True)
else:
columnChecks.append(False)
if len(columnChecks) == 0:
# skip any columns that weren't affected by preset checkboxes
pass
elif all(columnChecks):
self.silent_checkbox_change(self.fieldManagement[columnName]['itemWidget'], True)
elif not all(columnChecks):
self.silent_checkbox_change(self.fieldManagement[columnName]['itemWidget'], False)
else:
pass
self.recalculate()
# endregion Functions
# endregion Field Selection
# region Field grouping
def create_grouping_checkbox(self, group_name, group_type=None):
self.groupByFields[group_name] = {}
groupByCheckbox = QtGui.QCheckBox(self)
groupByCheckbox.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed))
groupByCheckbox.setFixedHeight(27)
groupByCheckbox.setMaximumWidth(300)
groupByCheckbox.setObjectName(_from_utf8("groupBy{}_Checkbox".format(group_name)))
if group_type is None:
groupByCheckbox.setText(group_name)
self.groupByFields[group_name]['checkbox'] = groupByCheckbox
self.groupGrid.addRow(self.groupByFields[group_name]['checkbox'])
else:
# to dynamically group by a certain number of fields
groupByCheckbox.setText("Every")
self.groupByFields[group_name]['checkbox'] = groupByCheckbox
rangeBox = QtGui.QSpinBox(self)
rangeBox.setFixedHeight(27)
rangeBox.setMaximumWidth(300)
rangeBox.setSuffix(' ' + group_name)
rangeBox.setMinimum(1)
rangeBox.setMaximum(9999)
rangeBox.setSingleStep(5)
rangeBox.setValue(50)
self.groupByFields[group_name]['range'] = rangeBox
self.groupGrid.addRow(self.groupByFields[group_name]['checkbox'], self.groupByFields[group_name][
'range'])
def group_by(self, group='group'):
"""
Constructs groupby parameter that pandas uses in analysis.py to group the data - there may be a better way
to do this:
Currently rebuilds the self.dataGroups var each time a box is checked or unchecked, which requires adding a
new checkbox for each column that could be grouped
Called as part of recalculate method rather than forcing a recalculation on its own
"""
self.dataGroups = []
if group == 'raw':
# if "show raw trial data" is checked/unchecked by user - code should only get to this section if user
# clicks checkbox
if self.groupByDisable_Checkbox.isChecked():
for field in self.groupByFields:
self.silent_checkbox_change(self.groupByFields[field]['checkbox'], newstate=False)
self.field_preset_select('all')
else:
# if user unchecks, check at least the first box so grouping is started
self.silent_checkbox_change(self.groupByFields['Subject']['checkbox'], newstate=True)
self.silent_checkbox_change(self.groupByFields['Date']['checkbox'], newstate=True)
self.silent_checkbox_change(self.groupByFields['Block']['checkbox'], newstate=True)
self.recalculate()
else:
atLeastOneCheck = False # tracking if at least one grouping box is checked
for field in self.groupByFields:
if self.groupByFields[field]['checkbox'].isChecked():
if 'range' in self.groupByFields[field]:
fieldRange = int(self.groupByFields[field]['range'].value())
self.dataGroups.append([field, fieldRange])
else:
self.dataGroups.append(field)
atLeastOneCheck = True
if atLeastOneCheck is True and self.groupByDisable_Checkbox.isChecked():
# uncheck the 'show raw data' checkbox if any of the groupby checkboxes are checked
self.silent_checkbox_change(self.groupByDisable_Checkbox, newstate=False)
# enable/disable raw field checkboxes depending on group state
for field in self.fieldManagement:
fieldType = self.fieldManagement[field]['type']
if fieldType == 'raw' or fieldType == 'index':
# raw and index fields can't be viewed for grouped data other than as a grouping index (most are
# text or values that can't return a single value for a group)
if len(self.dataGroups) > 0:
self.fieldManagement[field]['itemWidget'].setEnabled(False)
else:
self.fieldManagement[field]['itemWidget'].setEnabled(True)
elif fieldType == 'group':
# group fields only apply to grouped data, so if data isn't grouped these fields are useless
if len(self.dataGroups) > 0:
self.fieldManagement[field]['itemWidget'].setEnabled(True)
else:
self.fieldManagement[field]['itemWidget'].setEnabled(False)
# endregion
# region Filters
def create_filter_objects(self):
"""
Method to create the pyqt layout objects for filtering. Creates a layout for each extant field and fills it
with checkboxes for each item in that field. Or, for fields that accept a value range, creates a
user-enterable field and an equality field (e.g. for date)
:return:
"""
for columnName in self.fieldManagement:
if self.fieldManagement[columnName]['filter']['type'] == 'list':
# create widget for both select all/none and field list
parentGroupBox = QtGui.QGroupBox()
# Stylesheet so the groupbox can have a border without giving borders to all child components
parentGroupBox.setStyleSheet(
'QGroupBox {border: 1px solid gray;margin-top: 0.5em} ' +
'QGroupBox::title {subcontrol-origin: margin; left: 3px; padding: 0 3px 0 3px;}')
parentGroupBox.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.MinimumExpanding))
parentGroupBox.setMaximumWidth(self.optionWidth)
parentGroupBox.setMaximumHeight(180)
parentGroupBox.setContentsMargins(3, 3, 3, 3)
# Set title of groupbox
parentGroupBox.setTitle(columnName)
self.fieldManagement[columnName]['filter']['widget'] = parentGroupBox
# Add sublayout for both all/none buttons and value list
layout = QtGui.QVBoxLayout()
layout.setSpacing(0)
# Add widget for value list (that gets filled later)
scrollArea = QtGui.QScrollArea()
scrollArea.setMinimumHeight(40)
scrollArea.setMaximumWidth(self.optionWidth)
scrollArea.setMaximumHeight(150)
scrollArea.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding))
scrollArea.setContentsMargins(0, 0, 0, 0)
self.fieldManagement[columnName]['filter']['CheckBoxList'] = scrollArea
# Add widget for select all/none
self.fieldManagement[columnName]['filter']['selectAllNoneMenu'] = QtGui.QWidget()
selectLayout = QtGui.QHBoxLayout()
actionAll = QtGui.QPushButton("Select All", self)
actionAll.clicked.connect(lambda _, b=columnName: self.apply_filter(
column_name=b, filter_value='all'))
actionNone = QtGui.QPushButton("Select None", self)
actionNone.clicked.connect(lambda _, b=columnName: self.apply_filter(
column_name=b, filter_value='none'))
selectLayout.addWidget(actionAll)
selectLayout.addWidget(actionNone)
self.fieldManagement[columnName]['filter']['selectAllNoneMenu'].setLayout(selectLayout)
layout.addWidget(self.fieldManagement[columnName]['filter']['selectAllNoneMenu'])
layout.addWidget(self.fieldManagement[columnName]['filter']['CheckBoxList'])
self.fieldManagement[columnName]['filter']['widget'].setLayout(layout)
# self.fieldManagement[columnName]['filter']['CheckBoxList'].addSeparator()
self.filterGrid.addWidget(self.fieldManagement[columnName]['filter']['widget'])
elif self.fieldManagement[columnName]['filter']['type'] == 'range':
# create widget for both select all/none and field list
parentGroupBox = QtGui.QGroupBox()
# Stylesheet so the groupbox can have a border without giving borders to all child components
parentGroupBox.setStyleSheet(
'QGroupBox {border: 1px solid gray;margin-top: 0.5em} ' +
'QGroupBox::title {subcontrol-origin: margin; left: 3px; padding: 0 3px 0 3px;}')
parentGroupBox.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding))
parentGroupBox.setMaximumHeight(180)
parentGroupBox.setMaximumWidth(self.optionWidth)
parentGroupBox.setContentsMargins(3, 3, 3, 3)
# Set title of groupbox
parentGroupBox.setTitle(columnName)
self.fieldManagement[columnName]['filter']['widget'] = parentGroupBox
# Add sublayout for both all/none buttons and value list
layout = QtGui.QHBoxLayout()
# layout.setSpacing(0)
# Widget for equality selection
compareBox = QtGui.QComboBox()
compareBox.addItems(['<', '<=', '>', '>=', '==', '!='])
compareBox.setMaximumWidth(50)
compareBox.currentIndexChanged.connect(self.apply_filter)
# Add widget for date
dateBox = QtGui.QDateEdit()
dateBox.setCalendarPopup(True)
currDate = QtCore.QDate() # currentDate is called this way to avoid PyCharm claiming parameter
# 'self' is unfilled in currentDate()
dateBox.setDate(currDate.currentDate())
dateBox.setDisplayFormat('yyyy/MM/dd')
dateBox.setMinimumWidth(110)
dateBox.setMaximumWidth(150)
dateBox.dateChanged.connect(self.apply_filter)
layout.addSpacerItem(pyoperant_gui_layout.add_spacer(10))
layout.addWidget(compareBox)
layout.addWidget(dateBox)
layout.addSpacerItem(pyoperant_gui_layout.add_spacer(10))
self.fieldManagement[columnName]['filter']['widget'].setLayout(layout)
# self.fieldManagement[columnName]['filter']['CheckBoxList'].addSeparator()
self.filterGrid.addWidget(self.fieldManagement[columnName]['filter']['widget'])
def build_filter_value_lists(self):
"""
For each displayed field, create a value list of unique values from the extant data
Get values from model rather than table because table might be filtered and we want to see all available
fields
"""
for column in xrange(self.model.columnCount()):
if column == 'Bin':
pass # skip Bin, which | |
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import unittest
from collections import namedtuple
import hashlib
import time
from sawtooth_validator.database import dict_database
from sawtooth_validator.execution import context_manager
from sawtooth_validator.state.merkle import MerkleDatabase
from sawtooth_validator.state.state_delta_store import StateDeltaStore
from sawtooth_validator.protobuf.state_delta_pb2 import StateChange
from sawtooth_validator.protobuf.events_pb2 import Event
TestAddresses = namedtuple('TestAddresses',
['inputs', 'outputs', 'reads', 'writes'])
class TestContextManager(unittest.TestCase):
def setUp(self):
self.database_of_record = dict_database.DictDatabase()
self.state_delta_store = StateDeltaStore(dict_database.DictDatabase())
self.context_manager = context_manager.ContextManager(
self.database_of_record, self.state_delta_store)
self.first_state_hash = self.context_manager.get_first_root()
# used for replicating state hash through direct merkle tree updates
self.database_results = dict_database.DictDatabase()
def tearDown(self):
self.context_manager.stop()
def _create_address(self, value=None):
"""
Args:
value: (str)
Returns: (str) sha512 of value or random
"""
if value is None:
value = time.time().hex()
return hashlib.sha512(value.encode()).hexdigest()[:70]
def _setup_context(self):
# 1) Create transaction data
first_transaction = {'inputs': [self._create_address(a) for a in
['aaaa', 'bbbb', 'cccc']],
'outputs': [self._create_address(a) for a in
['llaa', 'aall', 'nnnn']]}
second_transaction = {
'inputs': [self._create_address(a) for a in
['aaaa', 'dddd']],
'outputs': [self._create_address(a) for a in
['zzzz', 'yyyy', 'tttt', 'qqqq']]
}
third_transaction = {
'inputs': [self._create_address(a) for a in
['eeee', 'dddd', 'ffff']],
'outputs': [self._create_address(a) for a in
['oooo', 'oozz', 'zzoo', 'ppoo', 'aeio']]
}
# 2) Create contexts based on that data
context_id_1 = self.context_manager.create_context(
state_hash=self.first_state_hash,
base_contexts=[],
inputs=first_transaction['inputs'],
outputs=first_transaction['outputs'])
context_id_2 = self.context_manager.create_context(
state_hash=self.first_state_hash,
base_contexts=[],
inputs=second_transaction['inputs'],
outputs=second_transaction['outputs'])
context_id_3 = self.context_manager.create_context(
state_hash=self.first_state_hash,
base_contexts=[],
inputs=third_transaction['inputs'],
outputs=third_transaction['outputs'])
# 3) Set addresses with values
self.context_manager.set(context_id_1, [{self._create_address(a): v}
for a, v in [('llaa', b'1'),
('aall', b'2'),
('nnnn', b'3')]])
self.context_manager.set(context_id_2, [{self._create_address(a): v}
for a, v in [('zzzz', b'9'),
('yyyy', b'11'),
('tttt', b'12'),
('qqqq', b'13')]])
self.context_manager.set(context_id_3, [{self._create_address(a): v}
for a, v in [('oooo', b'25'),
('oozz', b'26'),
('zzoo', b'27'),
('ppoo', b'28'),
('aeio', b'29')]])
# 4)
context_id = self.context_manager.create_context(
state_hash=self.first_state_hash,
base_contexts=[context_id_1, context_id_2, context_id_3],
inputs=[self._create_address(a)
for a in ['llaa', 'yyyy', 'tttt', 'zzoo']],
outputs=[self._create_address(a)
for a in ['llaa', 'yyyy', 'tttt', 'zzoo', 'aeio']])
return context_id
def _create_txn_inputs_outputs(self, start=None):
"""Create unique addresses that make up the inputs, outputs,
reads, and writes that are involved in a context.
Venn Diagram of relationship of disjoint sets that make up the
inputs, outputs, reads, and writes.
Knowledge of which disjoint set an address is a part of
may give knowledge about a test failure in the context
manager.
Inputs Outputs
+----------+--------------------------+-----------+
| | | |
| i___ |Reads io__ Writes _o__ |
| | | |
| +-----------+-----------+---------------+ |
| | | | | | | |
| | | | | | | |
| | | | | | | |
| | | | | | | |
| |i_r_ | ior_| iorw | io_w | _o_w | |
| | | | | | | |
| | | | | | | |
| | | | | | | |
| +-----------+-----------+---------------+ |
| | | |
| | | |
+----------+--------------------------+-----------+
Args:
start (int): An integer to start the sequence of integers being
hashed to addresses.
Returns (namedtuple): An object that holds inputs, outputs, reads,
and writes.
"""
if start is None:
start = 0
iorw = [self._create_address(str(i)) for i in range(start,
start + 10)]
i_r_ = [self._create_address(str(i)) for i in range(start + 10,
start + 20)]
ior_ = [self._create_address(str(i)) for i in range(start + 20,
start + 30)]
io__ = [self._create_address(str(i)) for i in range(start + 30,
start + 40)]
io_w = [self._create_address(str(i)) for i in range(start + 40,
start + 50)]
_o_w = [self._create_address(str(i)) for i in range(start + 50,
start + 60)]
_o__ = [self._create_address(str(i)) for i in range(start + 60,
start + 70)]
i___ = [self._create_address(str(i)) for i in range(start + 70,
start + 80)]
addresses = TestAddresses(
inputs=iorw + ior_ + io__ + io_w + i___,
outputs=ior_ + io__ + io_w + _o__ + _o_w,
reads=i_r_ + ior_,
writes=io_w + _o_w
)
return addresses
def test_execution_results(self):
"""Tests that get_execution_results returns the correct values."""
addr1 = self._create_address()
addr2 = self._create_address()
context_id = self.context_manager.create_context(
state_hash=self.context_manager.get_first_root(),
base_contexts=[],
inputs=[addr1, addr2],
outputs=[addr1, addr2])
sets = {addr1: b'1'}
events = [Event(
event_type=teststr,
attributes=[Event.Attribute(key=teststr, value=teststr)],
data=teststr.encode()) for teststr in ("test1", "test2")]
deletes = {addr2: None}
data = [(teststr, teststr.encode()) for teststr in ("test1", "test2")]
self.context_manager.set(context_id, [sets])
for event in events:
self.context_manager.add_execution_event(context_id, event)
self.context_manager.delete(context_id, deletes)
for datum in data:
self.context_manager.add_execution_data(
context_id, datum[0], datum[1])
results = self.context_manager.get_execution_results(context_id)
self.assertEqual(sets, results[0])
self.assertEqual(deletes, results[1])
self.assertEqual(events, results[2])
self.assertEqual(data, results[3])
def test_address_enforcement(self):
"""Tests that the ContextManager enforces address characteristics.
Notes:
1. Call get and set on the ContextManager with an address that is
under a namespace, but is an invalid address, and test that
the methods raise an AuthorizationException.
"""
# 1)
invalid_address1 = 'a' * 69 + 'n'
invalid_address2 = 'b' * 69 + 'y'
context_id1 = self.context_manager.create_context(
state_hash=self.context_manager.get_first_root(),
base_contexts=[],
inputs=['aaaaaaaa', 'bbbbbbbb'],
outputs=['aaaaaaaa', 'bbbbbbbb'])
with self.assertRaises(context_manager.AuthorizationException):
self.context_manager.get(
context_id=context_id1,
address_list=[invalid_address1, invalid_address2])
with self.assertRaises(context_manager.AuthorizationException):
self.context_manager.set(
context_id=context_id1,
address_value_list=[{invalid_address1: b'1'},
{invalid_address2: b'2'}])
def test_get_set_wrong_namespace(self):
"""Tests that getting and setting from outside the namespace will
raise a AuthorizationException.
Notes:
1. Assert that sets on a context with addresses that aren't
under an output namespace raise an AuthorizationException.
2. Assert that gets on a context with addresses that aren't under
an input namespace raise an AuthorizationException.
"""
wrong_namespace1 = self._create_address('a')[-10:]
wrong_namespace2 = '00000000'
ctx_1 = self.context_manager.create_context(
state_hash=self.context_manager.get_first_root(),
base_contexts=[],
inputs=[wrong_namespace1, wrong_namespace2],
outputs=[wrong_namespace1, wrong_namespace2])
# 1
with self.assertRaises(context_manager.AuthorizationException):
self.context_manager.set(
context_id=ctx_1,
address_value_list=[{self._create_address('a'): b'1'}])
with self.assertRaises(context_manager.AuthorizationException):
self.context_manager.set(
context_id=ctx_1,
address_value_list=[{self._create_address('c'): b'5'}])
# 2
with self.assertRaises(context_manager.AuthorizationException):
self.context_manager.get(
context_id=ctx_1,
address_list=[self._create_address('a')])
with self.assertRaises(context_manager.AuthorizationException):
self.context_manager.get(
context_id=ctx_1,
address_list=[self._create_address('c')])
def test_exception_on_invalid_input(self):
"""Tests that invalid inputs raise an exception. Tested with invalid
characters, odd number of characters, and too long namespace;
Notes:
1) Assert that inputs with a namespace with an odd number of
characters raise a CreateContextException.
2) Assert that inputs with a 71 character namespace raise a
CreateContextException.
3) Assert that inputs with a namespace with several invalid
characters raise a CreateContextException.
"""
invalid_input_output1 = '0db7e8zc' # invalid character
invalid_input_output2 = '7ef84ed' * 10 + '5' # too long, 71 chars
invalid_input_output3 = 'yy76ftoph7465873ddde389f' # invalid chars
valid_input_output1 = 'd8f533bbb74443222daad4'
valid_input_output2 = '77465847465784757848ddddddf'
state_hash = self.context_manager.get_first_root()
# 1
with self.assertRaises(context_manager.CreateContextException):
self.context_manager.create_context(
state_hash=state_hash,
base_contexts=[],
inputs=[invalid_input_output1, valid_input_output1],
outputs=[valid_input_output2])
# 2
with self.assertRaises(context_manager.CreateContextException):
self.context_manager.create_context(
state_hash=state_hash,
base_contexts=[],
inputs=[valid_input_output1, invalid_input_output2],
outputs=[valid_input_output2])
# 3
with self.assertRaises(context_manager.CreateContextException):
self.context_manager.create_context(
state_hash=state_hash,
base_contexts=[],
inputs=[invalid_input_output3, valid_input_output2],
outputs=[valid_input_output2, valid_input_output1])
def test_exception_on_invalid_output(self):
"""Tests that invalid outputs raise an exception. Tested with invalid
characters, odd number of characters, and too long namespace;
Notes:
1) Assert that outputs with a namespace with an odd number of
characters raise a CreateContextException.
2) Assert that outputs with a 71 character namespace raise a
CreateContextException.
3) Assert that outputs with a namespace with several invalid
characters raise a CreateContextException.
"""
invalid_input_output1 = '0db7e87' # Odd number of characters
invalid_input_output2 = '7ef84ed' * 10 + '5' # too long, 71 chars
invalid_input_output3 = 'yy76ftoph7465873ddde389f' # invalid chars
valid_input_output1 = 'd8f533bbb74443222daad4'
valid_input_output2 = '77465847465784757848ddddddff'
state_hash = self.context_manager.get_first_root()
# 1
with self.assertRaises(context_manager.CreateContextException):
self.context_manager.create_context(
state_hash=state_hash,
base_contexts=[],
inputs=[valid_input_output2, valid_input_output1],
outputs=[invalid_input_output1])
# 2
with self.assertRaises(context_manager.CreateContextException):
self.context_manager.create_context(
state_hash=state_hash,
base_contexts=[],
inputs=[valid_input_output1, valid_input_output2],
outputs=[invalid_input_output2])
# 3
with self.assertRaises(context_manager.CreateContextException):
self.context_manager.create_context(
state_hash=state_hash,
base_contexts=[],
inputs=[valid_input_output1, valid_input_output2],
outputs=[valid_input_output2, invalid_input_output3])
def test_namespace_gets(self):
"""Tests that gets for an address under a namespace will return the
correct value.
Notes:
1) Create ctx_1 and set 'b' to b'8'.
2) squash the previous context creating state_hash_1.
3) Create 2 contexts off of this state hash and assert
that gets on these contexts retrieve the correct
value for an address that is not fully specified in the inputs.
4) Set values to addresses in these contexts.
5) Create 1 context off of these prior 2 contexts and assert that
gets from this context retrieve the correct values for
addresses that are not fully specified in the | |
"""
NeuronBuild | <NAME> | 2013 | <EMAIL>
version date: January 26, 2020
A script to import swc files downloaded from neuromorpho.org, and create accurate
spline-based models of neuronal structure. The original swc file format is detailed here:
<NAME>, <NAME>, Pyapali, G.K, <NAME>. An on-line archive of reconstructed
hippocampal neurons. Journal of Neuroscience Methods. 84 1–2. pp 49-54. 1998
The reconstruction units are μm (micrometers).
Note: soma (cell body) definitions vary from file to file; this script assumes a three point spline
(which is very common). The soma object is disabled by default, since they rarely produce acceptable geometry.
Note: use of neuromorpho files may come with an obligation to cite the original publication.
How to use:
- add to your C4D scripts folder (on Mac OS X: Applications/MAXON/CINEMA 4D R14/library/scripts or in the user prefs folder)
- Browse and download a .swc or .swc.txt file from http://neuromorpho.org/
- Open the script manager in C4D, the script should be in the pop-up menu at the top of the window.
- In the Script manager in C4D load the NeuronBuild script and click "Execute".
- An import options dialog should appear; choose options for imported geometry, and click "Import File".
- In the open file dialog, choose the swc file and click "OK".
- A neuron should appear in your viewport.
- If all the geometry options are chosen, the geometry consists of a HyperNURBs object, which contains a Connect object, which
contains a null object, which contains the sweep objects that define the axons and dendrites.
Since the Soma (cell body) definition in the swc files is so rudimentary, you may want to
delete or hide it, and let the soma be defined by the merging dendrite roots. Within the sweep
objects are n-sided splines (named "Profile") set to 6-sides; you could search for these
objects and change the number of sides to 4 to simplify the geometry. Also in the SweepNURBs objects
are the splines that define the dendrite paths, and rail splines that define their radii.
The new volume builder and volume mesher objects allow for the creation of a single, optimized mesh. The resolution
of the mesh is set by the Voxel Size setting in the volume builder object. If the default resolution is too low
(which it almost ceratinly will be), gradually lower the Voxel Size until you have an acceptable result.
BE CAREFUL: jumping immediately to a very low voxel size may generate an enormous number of polygons, which could potentially
exhaust your system resources.
Version History
1.9 Updated for Python 3.x and Cinema4D R24 compatability ("print" statement parentheses added).
1.8 Added options (disabled by default) to insert the model hierarchy in a Volume builder and Volume mesher object,
yeilding a single mesh. Adjust the Voxel dimension in the volume builder to adjust resolution. Be careful not
to make the resolution too low, since this can potentially generate large poly counts.
Added support for other SWC entities. Removed R12, R13 compatibility.
1.7: Added support for glial processes (ID 7 in neuromorpho's version of the SWC file format).
This means that astrocytes are now supported
1.6: Incomplete conversion to Upy framework; do not use.
1.5: Stable version for C4D R13-R21
1.4: Modifications made by <NAME> on March 11, 2013
– support for Cinema 4D r12 and r13 (Oconnect and AddMultiLineEditText compatibility
– convert right handed .swc data to Cinema 4D left-handed with coordSystem test
– added safety test if user hits cancel button while in the system browser. Reports as 'Cancelled in Browser.'
This software is open-source under the MIT License.
Copyright (c) 2013-2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import c4d, os
from c4d import gui
#Welcome to the world of Python
#Create IDs for the GUI elements in the settings dialog
DLG_GROUP_1 = 1000
DLG_GROUP_2 = 1001
TEXTBOX = 1002
CANCELBUTTON = 1003
IMPORTBUTTON = 1004
HNCHECK = 1005
CONNECTCHECK = 1006
RAILCHECK = 1007
SWEEPCHECK = 1008
PROFILETEXT = 1009
PROFILESIDES = 1010
VOLUMEBUILDERCHECK = 1011
VOLUMEMESHERCHECK = 1012
SINGLESPLINECHECK = 1013
coordsystem="left"
versionNumber=c4d.GetC4DVersion()
def somaMake(somaLines, neuroFile, fileName):
"""Create splines to make the cell body."""
#reference global variables that set model parameters
global DoHN, DoConnect, DoRail, DoSweep, DoSingleSpline, NSides, DoVB, DoVM, NullName
#create spline
Spline = c4d.BaseObject(c4d.Ospline)
#add name to spline
Spline[c4d.ID_BASELIST_NAME] = "Soma"
#set type to linear
Spline[c4d.SPLINEOBJECT_TYPE] = 0
#set number of points for spline
Spline.ResizeObject(len(somaLines))
for n in range(0, len(somaLines)):
currLine = somaLines[n]
#create the variables for positioning the points
sx = float(currLine[2])
sy = float(currLine[3])
sz = float(currLine[4])
if coordsystem=="left": #Convert to left-hand for C4D added by GJ March 11, 2013
sz = -sz
sRad = float(currLine[5])
pos = c4d.Vector(sx, sy, sz)
Spline.SetPoint(n, pos)
#create the soma spline
doc.InsertObject(Spline)
Spline.Message(c4d.MSG_UPDATE) #Message Update
#create sweep object
if DoSweep == True:
Sweep = c4d.BaseObject(c4d.Osweep)
Sweep[c4d.ID_BASELIST_NAME] = "Soma Sweep"
Sweep[c4d.SWEEPOBJECT_CONSTANT] = False
Sweep[c4d.SWEEPOBJECT_RAILDIRECTION] = False
Sweep[c4d.CAP_TYPE] = 1
Sweep.SetPhong(True, True, 80)
Sweep.SetDeformMode(False)
doc.InsertObject(Sweep)
#create the profile for the sweep
Profile = c4d.BaseObject(c4d.Osplinenside)
Profile[c4d.ID_BASELIST_NAME] = "Profile"
Profile[c4d.PRIM_NSIDE_RADIUS] = sRad
Profile[c4d.PRIM_NSIDE_SIDES] = NSides
doc.InsertObject(Profile)
Profile.Message(c4d.MSG_UPDATE) #Message Update
Spline.InsertUnder(Sweep)
Profile.InsertUnder(Sweep)
Sweep.Message(c4d.MSG_UPDATE) #Message Update
#add undo for spline creation
doc.AddUndo(c4d.UNDOTYPE_NEW, Spline)
#insert the spline under the null object
if DoSweep == True:
parent = doc.SearchObject(NullName)
Sweep.InsertUnder(parent)
c4d.EventAdd()
def splineMake(splineLines, neuroFile, fileName):
#Create splines to define the dendrites and axons
#reference global variables that set model parameters
global DoHN, DoConnect, DoRail, DoSweep, DoSingleSpline, NSides, DoVB, DoVM, NullName
#run through the data file, identifying contiguous spline segments. this is possible because
#the last value in the data refers to the "root" point of that branch segment
splineSep = []
for n in range(0, len(splineLines)):
currLine = splineLines[n]
sIndex = int(currLine[0])
sRoot = int(currLine[6])
if sIndex > sRoot + 1:
splineSep.append(sIndex)
#ensures that the final spline is included in the drawing
finalIndex = int(splineLines[len(splineLines)-1][0])
if finalIndex not in splineSep:
splineSep.append(finalIndex)
#now build the spline segments as separate splines
for n in range(0, len(splineSep)):
if n < (len(splineSep) - 1):
#determine the offset between segemnts; this is the number of vertices in each spline segment
offset = splineSep[n+1] - splineSep[n] + 1
#ensures that the final point is included in the drawing
if splineSep[n+1] == finalIndex:
offset = offset + 1
#special case: if the spline is not rooted, one fewer point
if int(neuroFile[splineSep[n]-1][6]) < 0:
offset = offset - 1
#create an empty spline
Spline = c4d.BaseObject(c4d.Ospline)
#allocate points for the spline
Spline.ResizeObject(offset)
#get the data line that starts the segment
splineStart = neuroFile[splineSep[n]-1]
splineType = int(splineStart[1])
#determine what type of spline it is and name it
if splineType == 2:
name = "Axon " + str(n)
elif splineType == 3:
name = "Basal Dendrite " + str(n)
elif splineType == 4:
name = "Apical Dendrite " + str(n)
elif splineType == 5:
name = "Custom " + str(n)
elif splineType == 6:
name = "Unspecified Neurites " + str(n)
elif splineType == 7:
name = "Glial Process " + str(n)
Spline[c4d.ID_BASELIST_NAME] = name
Spline[c4d.SPLINEOBJECT_TYPE] = 0
#find the root point for this segment by going back to
#the line in neuroFile that contains it
#if the point is unrooted, let it be 'its own root'
if int(splineStart[6]) >= 0:
rootLine = neuroFile[((int(splineStart[6])) - 1)]
else:
rootLine = splineStart
x, y, z = float(rootLine[2]), float(rootLine[3]), float(rootLine[4])
| |
<filename>qsiprep/workflows/dwi/base.py
"""
Orchestrating the dwi-preprocessing workflow
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autofunction:: init_dwi_preproc_wf
"""
import os
from nipype import logging
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from nipype.interfaces.base import isdefined
from ...interfaces import DerivativesDataSink
from ...interfaces.reports import DiffusionSummary
from ...interfaces.confounds import DMRISummary
from ...interfaces.utils import TestInput
from ...engine import Workflow
# dwi workflows
from ..fieldmap.unwarp import init_fmap_unwarp_report_wf
from .hmc_sdc import init_qsiprep_hmcsdc_wf
from .fsl import init_fsl_hmc_wf
from .pre_hmc import init_dwi_pre_hmc_wf
from .util import _create_mem_gb, _get_wf_name
from .registration import init_b0_to_anat_registration_wf
from .confounds import init_dwi_confs_wf
DEFAULT_MEMORY_MIN_GB = 0.01
LOGGER = logging.getLogger('nipype.workflow')
def init_dwi_preproc_wf(scan_groups,
output_prefix,
ignore,
b0_threshold,
motion_corr_to,
b0_to_t1w_transform,
hmc_model,
hmc_transform,
shoreline_iters,
impute_slice_threshold,
eddy_config,
reportlets_dir,
output_spaces,
output_dir,
dwi_denoise_window,
unringing_method,
dwi_no_biascorr,
no_b0_harmonization,
denoise_before_combining,
template,
omp_nthreads,
fmap_bspline,
fmap_demean,
use_syn,
force_syn,
low_mem,
sloppy,
source_file,
layout=None):
"""
This workflow controls the dwi preprocessing stages of qsiprep.
.. workflow::
:graph2use: orig
:simple_form: yes
from qsiprep.workflows.dwi.base import init_dwi_preproc_wf
wf = init_dwi_preproc_wf({'dwi_series': ['fake.nii'],
'fieldmap_info': {'suffix': None},
'dwi_series_pedir': 'j'},
output_prefix='',
ignore=[],
b0_threshold=100,
motion_corr_to='iterative',
b0_to_t1w_transform='Rigid',
hmc_model='3dSHORE',
hmc_transform='Rigid',
shoreline_iters=2,
impute_slice_threshold=0,
eddy_config=None,
reportlets_dir='.',
output_spaces=['T1w', 'template'],
dwi_denoise_window=5,
unringing_method='none',
dwi_no_biascorr=False,
no_b0_harmonization=False,
denoise_before_combining=True,
template='MNI152NLin2009cAsym',
output_dir='.',
omp_nthreads=1,
fmap_bspline=False,
fmap_demean=True,
use_syn=True,
force_syn=False,
low_mem=False,
sloppy=True,
source_file='/data/bids/sub-1/dwi/sub-1_dwi.nii.gz',
layout=None)
**Parameters**
dwi_groups : list of dicts
List of dicts grouping files by PE-dir
output_prefix : str
beginning of the output file name (eg 'sub-1_buds-j')
ignore : list
Preprocessing steps to skip (eg "fieldmaps")
b0_threshold : int
Images with b-values less than this value will be treated as a b=0 image.
freesurfer : bool
Enable FreeSurfer functional registration (bbregister) and
resampling dwi series to FreeSurfer surface meshes.
motion_corr_to : str
Motion correct using the 'first' b0 image or use an 'iterative'
method to motion correct to the midpoint of the b0 images
b0_to_t1w_transform : "Rigid" or "Affine"
Use a rigid or full affine transform for b0-T1w registration
hmc_model : 'none', '3dSHORE', 'eddy' or 'eddy_ingress'
Model used to generate target images for head motion correction. If 'none'
the transform from the nearest b0 will be used.
hmc_transform : "Rigid" or "Affine"
Type of transform used for head motion correction
impute_slice_threshold : float
Impute data in slices that are this many SDs from expected. If 0, no slices
will be imputed.
eddy_config: str
Path to a JSON file containing config options for eddy
dwi_denoise_window : int
window size in voxels for ``dwidenoise``. Must be odd. If 0, '
'``dwidwenoise`` will not be run'
unringing_method : str
algorithm to use for removing Gibbs ringing. Options: none, mrdegibbs
dwi_no_biascorr : bool
run spatial bias correction (N4) on dwi series
no_b0_harmonization : bool
skip rescaling dwi scans to have matching b=0 intensities across scans
denoise_before_combining : bool
'run ``dwidenoise`` before combining dwis. Requires ``combine_all_dwis``'
reportlets_dir : str
Directory in which to save reportlets
output_spaces : list
List of output spaces functional images are to be resampled to.
Some parts of pipeline will only be instantiated for some output s
paces.
Valid spaces:
- T1w
- template
template : str
Name of template targeted by ``template`` output space
output_dir : str
Directory in which to save derivatives
omp_nthreads : int
Maximum number of threads an individual process may use
fmap_bspline : bool
**Experimental**: Fit B-Spline field using least-squares
fmap_demean : bool
Demean voxel-shift map during unwarp
use_syn : bool
**Experimental**: Enable ANTs SyN-based susceptibility distortion
correction (SDC). If fieldmaps are present and enabled, this is not
run, by default.
force_syn : bool
**Temporary**: Always run SyN-based SDC
low_mem : bool
Write uncompressed .nii files in some cases to reduce memory usage
layout : BIDSLayout
BIDSLayout structure to enable metadata retrieval
num_dwi : int
Total number of dwi files that have been set for preprocessing
(default is 1)
sloppy : bool
Use low-quality settings for motion correction
source_file : str
The file name template used for derivatives
**Inputs**
t1_preproc
Bias-corrected structural template image
t1_brain
Skull-stripped ``t1_preproc``
t1_mask
Mask of the skull-stripped template image
t1_output_grid
Image to write out DWIs aligned to t1
t1_seg
Segmentation of preprocessed structural image, including
gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
t1_tpms
List of tissue probability maps in T1w space
t1_2_mni_forward_transform
ANTs-compatible affine-and-warp transform file
t1_2_mni_reverse_transform
ANTs-compatible affine-and-warp transform file (inverse)
subjects_dir
FreeSurfer SUBJECTS_DIR
subject_id
FreeSurfer subject ID
t1_2_fsnative_forward_transform
LTA-style affine matrix translating from T1w to
FreeSurfer-conformed subject space
t1_2_fsnative_reverse_transform
LTA-style affine matrix translating from FreeSurfer-conformed
subject space to T1w
dwi_sampling_grid
A NIfTI1 file with the grid spacing and FoV to resample the DWIs
**Outputs**
dwi_t1
dwi series, resampled to T1w space
dwi_mask_t1
dwi series mask in T1w space
bvals_t1
bvalues of the dwi series
bvecs_t1
bvecs after aligning to the T1w and resampling
local_bvecs_t1
voxelwise bvecs accounting for local displacements
gradient_table_t1
MRTrix-style gradient table
dwi_mni
dwi series, resampled to template space
dwi_mask_mni
dwi series mask in template space
bvals_mni
bvalues of the dwi series
bvecs_mni
bvecs after aligning to the T1w and resampling
local_bvecs_mni
voxelwise bvecs accounting for local displacements
gradient_table_mni
MRTrix-style gradient table
confounds_file
estimated motion parameters and zipper scores
raw_qc_file
DSI Studio QC file for the raw data
raw_concatenated
concatenated raw images for a qc report
carpetplot_data
path to a file containing carpetplot data
**Subworkflows**
* :py:func:`~qsiprep.workflows.dwi.hmc.init_dwi_hmc_wf`
* :py:func:`~qsiprep.workflows.dwi.registration.init_dwi_t1_trans_wf`
* :py:func:`~qsiprep.workflows.dwi.registration.init_dwi_reg_wf`
* :py:func:`~qsiprep.workflows.dwi.confounds.init_dwi_confounds_wf`
* :py:func:`~qsiprep.workflows.dwi.resampling.init_dwi_trans_wf`
"""
# Check the inputs
if layout is not None:
all_dwis = scan_groups['dwi_series']
fieldmap_info = scan_groups['fieldmap_info']
dwi_metadata = layout.get_metadata(all_dwis[0])
else:
all_dwis = ['/fake/testing/path.nii.gz']
fieldmap_info = {'suffix': None}
dwi_metadata = {}
fieldmap_type = fieldmap_info['suffix']
doing_bidirectional_pepolar = fieldmap_type == 'rpe_series'
preprocess_rpe_series = doing_bidirectional_pepolar and hmc_model == 'eddy'
if fieldmap_type is not None:
fmap_key = "phase1" if fieldmap_type == "phase" else fieldmap_type
if fieldmap_type != "syn":
fieldmap_file = fieldmap_info[fmap_key]
# There can be a bunch of rpe series, so don't get the info yet
if fmap_key not in ('rpe_series', 'epi', 'dwi'):
fieldmap_info['metadata'] = layout.get_metadata(fieldmap_file)
mem_gb = {'filesize': 1, 'resampled': 1, 'largemem': 1}
dwi_nvols = 10
# Determine resource usage
for scan in all_dwis:
if not os.path.exists(scan):
# For docs building
continue
_dwi_nvols, _mem_gb = _create_mem_gb(scan)
dwi_nvols += _dwi_nvols
mem_gb['filesize'] += _mem_gb['filesize']
mem_gb['resampled'] += _mem_gb['resampled']
mem_gb['largemem'] += _mem_gb['largemem']
wf_name = _get_wf_name(output_prefix)
workflow = Workflow(name=wf_name)
LOGGER.log(25, ('Creating dwi processing workflow "%s" '
'to produce output %s '
'(%.2f GB / %d DWIs). '
'Memory resampled/largemem=%.2f/%.2f GB.'), wf_name,
output_prefix, mem_gb['filesize'], dwi_nvols, mem_gb['resampled'],
mem_gb['largemem'])
inputnode = pe.Node(
niu.IdentityInterface(fields=[
'dwi_files', 'sbref_file', 'subjects_dir', 'subject_id',
't1_preproc', 't1_brain', 't1_mask', 't1_seg', 't1_tpms',
't1_aseg', 't1_aparc', 't1_2_mni_forward_transform',
't1_2_mni_reverse_transform', 't1_2_fsnative_forward_transform',
't1_2_fsnative_reverse_transform', 'dwi_sampling_grid']),
name='inputnode')
outputnode = pe.Node(
niu.IdentityInterface(fields=[
'confounds', 'hmc_optimization_data', 'itk_b0_to_t1', 'noise_images', 'bias_images',
'dwi_files', 'cnr_map', 'bval_files', 'bvec_files', 'b0_ref_image', 'b0_indices',
'dwi_mask', 'hmc_xforms', 'fieldwarps', 'sbref_file', 'original_files',
'original_bvecs', 'raw_qc_file', 'coreg_score', 'raw_concatenated',
'carpetplot_data']),
name='outputnode')
workflow.__desc__ = """
Diffusion data preprocessing
: """
pre_hmc_wf = init_dwi_pre_hmc_wf(scan_groups=scan_groups,
b0_threshold=b0_threshold,
preprocess_rpe_series=preprocess_rpe_series,
dwi_denoise_window=dwi_denoise_window,
unringing_method=unringing_method,
dwi_no_biascorr=dwi_no_biascorr,
no_b0_harmonization=no_b0_harmonization,
orientation='LAS' if hmc_model == 'eddy' else 'LPS',
source_file=source_file,
low_mem=low_mem,
denoise_before_combining=denoise_before_combining,
omp_nthreads=omp_nthreads)
test_pre_hmc_connect = pe.Node(TestInput(), name='test_pre_hmc_connect')
if hmc_model in ('none', '3dSHORE'):
if not hmc_model == 'none' and shoreline_iters < 1:
raise Exception("--shoreline-iters must be > 0 when --hmc-model is " + hmc_model)
hmc_wf = init_qsiprep_hmcsdc_wf(
scan_groups=scan_groups,
source_file=source_file,
b0_threshold=b0_threshold,
hmc_transform=hmc_transform,
hmc_model=hmc_model,
hmc_align_to=motion_corr_to,
template=template,
shoreline_iters=shoreline_iters,
impute_slice_threshold=impute_slice_threshold,
omp_nthreads=omp_nthreads,
fmap_bspline=fmap_bspline,
fmap_demean=fmap_demean,
use_syn=use_syn,
force_syn=force_syn,
dwi_metadata=dwi_metadata,
sloppy=sloppy,
name="hmc_sdc_wf")
elif hmc_model == 'eddy':
hmc_wf = init_fsl_hmc_wf(
scan_groups=scan_groups,
b0_threshold=b0_threshold,
source_file=source_file,
impute_slice_threshold=impute_slice_threshold,
eddy_config=eddy_config,
mem_gb=mem_gb,
omp_nthreads=omp_nthreads,
fmap_bspline=fmap_bspline,
fmap_demean=fmap_demean,
dwi_metadata=dwi_metadata,
sloppy=sloppy,
name="hmc_sdc_wf")
workflow.connect([
(pre_hmc_wf, hmc_wf, [
('outputnode.dwi_file', 'inputnode.dwi_file'),
('outputnode.bval_file', 'inputnode.bval_file'),
('outputnode.bvec_file', 'inputnode.bvec_file'),
('outputnode.original_files', 'inputnode.original_files')]),
(inputnode, hmc_wf, [
('t1_brain', 'inputnode.t1_brain'),
('t1_mask', 'inputnode.t1_mask'),
('t1_2_mni_reverse_transform', 'inputnode.t1_2_mni_reverse_transform')]),
(pre_hmc_wf, outputnode, [
('outputnode.qc_file', 'raw_qc_file'),
('outputnode.original_files', 'original_files'),
('outputnode.bvec_file', 'original_bvecs'),
('outputnode.bias_images', 'bias_images'),
('outputnode.noise_images', 'noise_images'),
('outputnode.raw_concatenated', 'raw_concatenated')]),
(pre_hmc_wf, test_pre_hmc_connect, [('outputnode.raw_concatenated', 'test1')])
])
# calculate dwi registration to T1w
b0_coreg_wf = init_b0_to_anat_registration_wf(omp_nthreads=omp_nthreads,
mem_gb=mem_gb['resampled'],
write_report=True)
ds_report_coreg = pe.Node(
DerivativesDataSink(suffix="coreg", source_file=source_file),
name='ds_report_coreg', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
# Make a fieldmap report, save the transforms. Do it here because we need wm
if fieldmap_type is not None:
fmap_unwarp_report_wf = init_fmap_unwarp_report_wf()
ds_report_sdc = pe.Node(
DerivativesDataSink(desc="sdc", suffix='b0', source_file=source_file),
name='ds_report_sdc',
mem_gb=DEFAULT_MEMORY_MIN_GB,
run_without_submitting=True)
workflow.connect([
(inputnode, fmap_unwarp_report_wf, [
('t1_seg', 'inputnode.in_seg')]),
(hmc_wf, fmap_unwarp_report_wf, [
('outputnode.pre_sdc_template', 'inputnode.in_pre'),
('outputnode.b0_template', 'inputnode.in_post')]),
(b0_coreg_wf, fmap_unwarp_report_wf, [
('outputnode.itk_b0_to_t1', 'inputnode.in_xfm')]),
(fmap_unwarp_report_wf, ds_report_sdc, [('outputnode.report', 'in_file')])
])
summary = pe.Node(
DiffusionSummary(
pe_direction=scan_groups['dwi_series_pedir'],
hmc_model=hmc_model,
b0_to_t1w_transform=b0_to_t1w_transform,
hmc_transform=hmc_transform,
impute_slice_threshold=impute_slice_threshold,
dwi_denoise_window=dwi_denoise_window,
output_spaces=output_spaces),
name='summary',
mem_gb=DEFAULT_MEMORY_MIN_GB,
run_without_submitting=True)
workflow.connect([
(inputnode, b0_coreg_wf, [
('t1_brain', 'inputnode.t1_brain'),
('t1_seg', 'inputnode.t1_seg'),
('subjects_dir', 'inputnode.subjects_dir'),
('subject_id', 'inputnode.subject_id'),
('t1_2_fsnative_reverse_transform',
'inputnode.t1_2_fsnative_reverse_transform')]),
(hmc_wf, b0_coreg_wf, [('outputnode.b0_template',
'inputnode.ref_b0_brain')]),
(hmc_wf, summary, [('outputnode.sdc_method', 'distortion_correction')]),
(b0_coreg_wf, ds_report_coreg, [('outputnode.report', 'in_file')]),
(b0_coreg_wf, outputnode, [
(('outputnode.itk_b0_to_t1', _get_first), 'itk_b0_to_t1'),
('outputnode.coreg_metric', | |
Format for report. Valid values are: `textORcsv`, `Parquet`. If `Parquet` is used, then Compression must also be `Parquet`.
"""
return pulumi.get(self, "format")
@format.setter
def format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "format", value)
@property
@pulumi.getter(name="refreshClosedReports")
def refresh_closed_reports(self) -> Optional[pulumi.Input[bool]]:
"""
Set to true to update your reports after they have been finalized if AWS detects charges related to previous months.
"""
return pulumi.get(self, "refresh_closed_reports")
@refresh_closed_reports.setter
def refresh_closed_reports(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "refresh_closed_reports", value)
@property
@pulumi.getter(name="reportName")
def report_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name for the report. Must start with a number/letter and is case sensitive. Limited to 256 characters.
"""
return pulumi.get(self, "report_name")
@report_name.setter
def report_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "report_name", value)
@property
@pulumi.getter(name="reportVersioning")
def report_versioning(self) -> Optional[pulumi.Input[str]]:
"""
Overwrite the previous version of each report or to deliver the report in addition to the previous versions. Valid values are: `CREATE_NEW_REPORT` and `OVERWRITE_REPORT`.
"""
return pulumi.get(self, "report_versioning")
@report_versioning.setter
def report_versioning(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "report_versioning", value)
@property
@pulumi.getter(name="s3Bucket")
def s3_bucket(self) -> Optional[pulumi.Input[str]]:
"""
Name of the existing S3 bucket to hold generated reports.
"""
return pulumi.get(self, "s3_bucket")
@s3_bucket.setter
def s3_bucket(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "s3_bucket", value)
@property
@pulumi.getter(name="s3Prefix")
def s3_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Report path prefix. Limited to 256 characters.
"""
return pulumi.get(self, "s3_prefix")
@s3_prefix.setter
def s3_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "s3_prefix", value)
@property
@pulumi.getter(name="s3Region")
def s3_region(self) -> Optional[pulumi.Input[str]]:
"""
Region of the existing S3 bucket to hold generated reports.
"""
return pulumi.get(self, "s3_region")
@s3_region.setter
def s3_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "s3_region", value)
@property
@pulumi.getter(name="timeUnit")
def time_unit(self) -> Optional[pulumi.Input[str]]:
"""
The frequency on which report data are measured and displayed. Valid values are: `HOURLY`, `DAILY`.
"""
return pulumi.get(self, "time_unit")
@time_unit.setter
def time_unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_unit", value)
class ReportDefinition(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
additional_artifacts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
additional_schema_elements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
compression: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input[str]] = None,
refresh_closed_reports: Optional[pulumi.Input[bool]] = None,
report_name: Optional[pulumi.Input[str]] = None,
report_versioning: Optional[pulumi.Input[str]] = None,
s3_bucket: Optional[pulumi.Input[str]] = None,
s3_prefix: Optional[pulumi.Input[str]] = None,
s3_region: Optional[pulumi.Input[str]] = None,
time_unit: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages Cost and Usage Report Definitions.
> *NOTE:* The AWS Cost and Usage Report service is only available in `us-east-1` currently.
> *NOTE:* If AWS Organizations is enabled, only the master account can use this resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_cur_report_definition = aws.cur.ReportDefinition("exampleCurReportDefinition",
additional_artifacts=[
"REDSHIFT",
"QUICKSIGHT",
],
additional_schema_elements=["RESOURCES"],
compression="GZIP",
format="textORcsv",
report_name="example-cur-report-definition",
s3_bucket="example-bucket-name",
s3_region="us-east-1",
time_unit="HOURLY")
```
## Import
Report Definitions can be imported using the `report_name`, e.g.
```sh
$ pulumi import aws:cur/reportDefinition:ReportDefinition example_cur_report_definition example-cur-report-definition
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] additional_artifacts: A list of additional artifacts. Valid values are: `REDSHIFT`, `QUICKSIGHT`, `ATHENA`. When ATHENA exists within additional_artifacts, no other artifact type can be declared and report_versioning must be `OVERWRITE_REPORT`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] additional_schema_elements: A list of schema elements. Valid values are: `RESOURCES`.
:param pulumi.Input[str] compression: Compression format for report. Valid values are: `GZIP`, `ZIP`, `Parquet`. If `Parquet` is used, then format must also be `Parquet`.
:param pulumi.Input[str] format: Format for report. Valid values are: `textORcsv`, `Parquet`. If `Parquet` is used, then Compression must also be `Parquet`.
:param pulumi.Input[bool] refresh_closed_reports: Set to true to update your reports after they have been finalized if AWS detects charges related to previous months.
:param pulumi.Input[str] report_name: Unique name for the report. Must start with a number/letter and is case sensitive. Limited to 256 characters.
:param pulumi.Input[str] report_versioning: Overwrite the previous version of each report or to deliver the report in addition to the previous versions. Valid values are: `CREATE_NEW_REPORT` and `OVERWRITE_REPORT`.
:param pulumi.Input[str] s3_bucket: Name of the existing S3 bucket to hold generated reports.
:param pulumi.Input[str] s3_prefix: Report path prefix. Limited to 256 characters.
:param pulumi.Input[str] s3_region: Region of the existing S3 bucket to hold generated reports.
:param pulumi.Input[str] time_unit: The frequency on which report data are measured and displayed. Valid values are: `HOURLY`, `DAILY`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ReportDefinitionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages Cost and Usage Report Definitions.
> *NOTE:* The AWS Cost and Usage Report service is only available in `us-east-1` currently.
> *NOTE:* If AWS Organizations is enabled, only the master account can use this resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_cur_report_definition = aws.cur.ReportDefinition("exampleCurReportDefinition",
additional_artifacts=[
"REDSHIFT",
"QUICKSIGHT",
],
additional_schema_elements=["RESOURCES"],
compression="GZIP",
format="textORcsv",
report_name="example-cur-report-definition",
s3_bucket="example-bucket-name",
s3_region="us-east-1",
time_unit="HOURLY")
```
## Import
Report Definitions can be imported using the `report_name`, e.g.
```sh
$ pulumi import aws:cur/reportDefinition:ReportDefinition example_cur_report_definition example-cur-report-definition
```
:param str resource_name: The name of the resource.
:param ReportDefinitionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ReportDefinitionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
additional_artifacts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
additional_schema_elements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
compression: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input[str]] = None,
refresh_closed_reports: Optional[pulumi.Input[bool]] = None,
report_name: Optional[pulumi.Input[str]] = None,
report_versioning: Optional[pulumi.Input[str]] = None,
s3_bucket: Optional[pulumi.Input[str]] = None,
s3_prefix: Optional[pulumi.Input[str]] = None,
s3_region: Optional[pulumi.Input[str]] = None,
time_unit: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ReportDefinitionArgs.__new__(ReportDefinitionArgs)
__props__.__dict__["additional_artifacts"] = additional_artifacts
if additional_schema_elements is None and not opts.urn:
raise TypeError("Missing required property 'additional_schema_elements'")
__props__.__dict__["additional_schema_elements"] = additional_schema_elements
if compression is None and not opts.urn:
raise TypeError("Missing required property 'compression'")
__props__.__dict__["compression"] = compression
if format is None and not opts.urn:
raise TypeError("Missing required property 'format'")
__props__.__dict__["format"] = format
__props__.__dict__["refresh_closed_reports"] = refresh_closed_reports
if report_name is None and not opts.urn:
raise TypeError("Missing required property 'report_name'")
__props__.__dict__["report_name"] = report_name
__props__.__dict__["report_versioning"] = report_versioning
if s3_bucket is None and not opts.urn:
raise TypeError("Missing required property 's3_bucket'")
__props__.__dict__["s3_bucket"] = s3_bucket
__props__.__dict__["s3_prefix"] = s3_prefix
if s3_region is None and not opts.urn:
raise TypeError("Missing required property 's3_region'")
__props__.__dict__["s3_region"] = s3_region
if time_unit is None and not opts.urn:
raise TypeError("Missing required property 'time_unit'")
__props__.__dict__["time_unit"] = time_unit
__props__.__dict__["arn"] = None
super(ReportDefinition, __self__).__init__(
'aws:cur/reportDefinition:ReportDefinition',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
additional_artifacts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
additional_schema_elements: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
arn: Optional[pulumi.Input[str]] = None,
compression: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input[str]] = None,
refresh_closed_reports: Optional[pulumi.Input[bool]] = None,
report_name: Optional[pulumi.Input[str]] = None,
report_versioning: Optional[pulumi.Input[str]] = None,
s3_bucket: Optional[pulumi.Input[str]] = None,
s3_prefix: Optional[pulumi.Input[str]] = None,
s3_region: Optional[pulumi.Input[str]] = None,
time_unit: Optional[pulumi.Input[str]] = None) -> 'ReportDefinition':
"""
Get an existing ReportDefinition resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] additional_artifacts: A list of additional artifacts. Valid values are: `REDSHIFT`, `QUICKSIGHT`, `ATHENA`. When ATHENA exists within additional_artifacts, no other artifact type can be declared and report_versioning must be `OVERWRITE_REPORT`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] additional_schema_elements: A list of schema elements. Valid values are: `RESOURCES`.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) specifying the cur report.
:param pulumi.Input[str] compression: Compression format for report. Valid values are: `GZIP`, `ZIP`, `Parquet`. If `Parquet` is used, then format must also be `Parquet`.
:param pulumi.Input[str] format: Format for report. Valid values are: `textORcsv`, `Parquet`. If `Parquet` is used, then Compression must also be `Parquet`.
:param pulumi.Input[bool] refresh_closed_reports: Set to true to update your reports after they have been finalized if AWS detects charges related to previous months.
:param pulumi.Input[str] report_name: Unique name for the report. Must start with a number/letter and is case sensitive. Limited to 256 characters.
:param pulumi.Input[str] report_versioning: Overwrite the previous version of each report or to deliver | |
3, 111, 119, 110, 2, 1, 1))
) == (self.s, null)
def testWithOptionalAndDefaultedIndefModeChunked(self):
assert decoder.decode(
ints2octs((48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
) == (self.s, null)
def testWithOptionalAndDefaultedDefModeSubst(self):
assert decoder.decode(
ints2octs((48, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1)),
substrateFun=lambda a, b, c: (b, b[c:])
) == (ints2octs((5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1)), str2octs(''))
def testWithOptionalAndDefaultedIndefModeSubst(self):
assert decoder.decode(
ints2octs((48, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0)),
substrateFun=lambda a, b, c: (b, str2octs(''))
) == (ints2octs(
(5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0)), str2octs(''))
def testTagFormat(self):
try:
decoder.decode(
ints2octs((16, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
)
except PyAsn1Error:
pass
else:
assert 0, 'wrong tagFormat worked out'
class SequenceDecoderWithSchemaTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.s = univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType('place-holder', univ.Null(null)),
namedtype.OptionalNamedType('first-name', univ.OctetString()),
namedtype.DefaultedNamedType('age', univ.Integer(33)),
)
)
def __init(self):
self.s.clear()
self.s.setComponentByPosition(0, univ.Null(null))
def __initWithOptional(self):
self.s.clear()
self.s.setComponentByPosition(0, univ.Null(null))
self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
def __initWithDefaulted(self):
self.s.clear()
self.s.setComponentByPosition(0, univ.Null(null))
self.s.setComponentByPosition(2, univ.Integer(1))
def __initWithOptionalAndDefaulted(self):
self.s.clear()
self.s.setComponentByPosition(0, univ.Null(null))
self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
self.s.setComponentByPosition(2, univ.Integer(1))
def testDefMode(self):
self.__init()
assert decoder.decode(
ints2octs((48, 2, 5, 0)), asn1Spec=self.s
) == (self.s, null)
def testIndefMode(self):
self.__init()
assert decoder.decode(
ints2octs((48, 128, 5, 0, 0, 0)), asn1Spec=self.s
) == (self.s, null)
def testDefModeChunked(self):
self.__init()
assert decoder.decode(
ints2octs((48, 2, 5, 0)), asn1Spec=self.s
) == (self.s, null)
def testIndefModeChunked(self):
self.__init()
assert decoder.decode(
ints2octs((48, 128, 5, 0, 0, 0)), asn1Spec=self.s
) == (self.s, null)
def testWithOptionalDefMode(self):
self.__initWithOptional()
assert decoder.decode(
ints2octs((48, 15, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s
) == (self.s, null)
def testWithOptionaIndefMode(self):
self.__initWithOptional()
assert decoder.decode(
ints2octs((48, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 0, 0)),
asn1Spec=self.s
) == (self.s, null)
def testWithOptionalDefModeChunked(self):
self.__initWithOptional()
assert decoder.decode(
ints2octs((48, 21, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110)),
asn1Spec=self.s
) == (self.s, null)
def testWithOptionalIndefModeChunked(self):
self.__initWithOptional()
assert decoder.decode(
ints2octs((48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0,
0, 0, 0)),
asn1Spec=self.s
) == (self.s, null)
def testWithDefaultedDefMode(self):
self.__initWithDefaulted()
assert decoder.decode(
ints2octs((48, 5, 5, 0, 2, 1, 1)), asn1Spec=self.s
) == (self.s, null)
def testWithDefaultedIndefMode(self):
self.__initWithDefaulted()
assert decoder.decode(
ints2octs((48, 128, 5, 0, 2, 1, 1, 0, 0)), asn1Spec=self.s
) == (self.s, null)
def testWithDefaultedDefModeChunked(self):
self.__initWithDefaulted()
assert decoder.decode(
ints2octs((48, 5, 5, 0, 2, 1, 1)), asn1Spec=self.s
) == (self.s, null)
def testWithDefaultedIndefModeChunked(self):
self.__initWithDefaulted()
assert decoder.decode(
ints2octs((48, 128, 5, 0, 2, 1, 1, 0, 0)), asn1Spec=self.s
) == (self.s, null)
def testWithOptionalAndDefaultedDefMode(self):
self.__initWithOptionalAndDefaulted()
assert decoder.decode(
ints2octs((48, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1)),
asn1Spec=self.s
) == (self.s, null)
def testWithOptionalAndDefaultedIndefMode(self):
self.__initWithOptionalAndDefaulted()
assert decoder.decode(
ints2octs((48, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1,
0, 0)), asn1Spec=self.s
) == (self.s, null)
def testWithOptionalAndDefaultedDefModeChunked(self):
self.__initWithOptionalAndDefaulted()
assert decoder.decode(
ints2octs(
(48, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1)),
asn1Spec=self.s
) == (self.s, null)
def testWithOptionalAndDefaultedIndefModeChunked(self):
self.__initWithOptionalAndDefaulted()
assert decoder.decode(
ints2octs((48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0,
0, 2, 1, 1, 0, 0)), asn1Spec=self.s
) == (self.s, null)
class SequenceDecoderWithUnaggedOpenTypesTestCase(BaseTestCase):
def setUp(self):
openType = opentype.OpenType(
'id',
{1: univ.Integer(),
2: univ.OctetString()}
)
self.s = univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType('id', univ.Integer()),
namedtype.NamedType('blob', univ.Any(), openType=openType)
)
)
def testDecodeOpenTypesChoiceOne(self):
s, r = decoder.decode(
ints2octs((48, 6, 2, 1, 1, 2, 1, 12)), asn1Spec=self.s,
decodeOpenTypes=True
)
assert not r
assert s[0] == 1
assert s[1] == 12
def testDecodeOpenTypesChoiceTwo(self):
s, r = decoder.decode(
ints2octs((48, 16, 2, 1, 2, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s,
decodeOpenTypes=True
)
assert not r
assert s[0] == 2
assert s[1] == univ.OctetString('quick brown')
def testDecodeOpenTypesUnknownType(self):
try:
s, r = decoder.decode(
ints2octs((48, 6, 2, 1, 2, 6, 1, 39)), asn1Spec=self.s,
decodeOpenTypes=True
)
except PyAsn1Error:
pass
else:
assert False, 'unknown open type tolerated'
def testDecodeOpenTypesUnknownId(self):
s, r = decoder.decode(
ints2octs((48, 6, 2, 1, 3, 6, 1, 39)), asn1Spec=self.s,
decodeOpenTypes=True
)
assert not r
assert s[0] == 3
assert s[1] == univ.OctetString(hexValue='060127')
def testDontDecodeOpenTypesChoiceOne(self):
s, r = decoder.decode(
ints2octs((48, 6, 2, 1, 1, 2, 1, 12)), asn1Spec=self.s
)
assert not r
assert s[0] == 1
assert s[1] == ints2octs((2, 1, 12))
def testDontDecodeOpenTypesChoiceTwo(self):
s, r = decoder.decode(
ints2octs((48, 16, 2, 1, 2, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s
)
assert not r
assert s[0] == 2
assert s[1] == ints2octs((4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
class SequenceDecoderWithImplicitlyTaggedOpenTypesTestCase(BaseTestCase):
def setUp(self):
openType = opentype.OpenType(
'id',
{1: univ.Integer(),
2: univ.OctetString()}
)
self.s = univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType('id', univ.Integer()),
namedtype.NamedType(
'blob', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType
)
)
)
def testDecodeOpenTypesChoiceOne(self):
s, r = decoder.decode(
ints2octs((48, 8, 2, 1, 1, 131, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
)
assert not r
assert s[0] == 1
assert s[1] == 12
def testDecodeOpenTypesUnknownId(self):
s, r = decoder.decode(
ints2octs((48, 8, 2, 1, 3, 131, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
)
assert not r
assert s[0] == 3
assert s[1] == univ.OctetString(hexValue='02010C')
class SequenceDecoderWithExplicitlyTaggedOpenTypesTestCase(BaseTestCase):
def setUp(self):
openType = opentype.OpenType(
'id',
{1: univ.Integer(),
2: univ.OctetString()}
)
self.s = univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType('id', univ.Integer()),
namedtype.NamedType(
'blob', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType
)
)
)
def testDecodeOpenTypesChoiceOne(self):
s, r = decoder.decode(
ints2octs((48, 8, 2, 1, 1, 163, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
)
assert not r
assert s[0] == 1
assert s[1] == 12
def testDecodeOpenTypesUnknownId(self):
s, r = decoder.decode(
ints2octs((48, 8, 2, 1, 3, 163, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
)
assert not r
assert s[0] == 3
assert s[1] == univ.OctetString(hexValue='02010C')
class SetDecoderTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.s = univ.Set(
componentType=namedtype.NamedTypes(
namedtype.NamedType('place-holder', univ.Null(null)),
namedtype.NamedType('first-name', univ.OctetString(null)),
namedtype.NamedType('age', univ.Integer(33))
)
)
self.s.setComponentByPosition(0, univ.Null(null))
self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
self.s.setComponentByPosition(2, univ.Integer(1))
def testWithOptionalAndDefaultedDefMode(self):
assert decoder.decode(
ints2octs((49, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
) == (self.s, null)
def testWithOptionalAndDefaultedIndefMode(self):
assert decoder.decode(
ints2octs((49, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
) == (self.s, null)
def testWithOptionalAndDefaultedDefModeChunked(self):
assert decoder.decode(
ints2octs(
(49, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1))
) == (self.s, null)
def testWithOptionalAndDefaultedIndefModeChunked(self):
assert decoder.decode(
ints2octs((49, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
) == (self.s, null)
def testWithOptionalAndDefaultedDefModeSubst(self):
assert decoder.decode(
ints2octs((49, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1)),
substrateFun=lambda a, b, c: (b, b[c:])
) == (ints2octs((5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1)), str2octs(''))
def testWithOptionalAndDefaultedIndefModeSubst(self):
assert decoder.decode(
ints2octs((49, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, | |
Nmap As Root:
\033[31m>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
\033[31m#\033[37m nmap --interactive
\033[31m#\033[37m !sh
""")
#----------------------------------------------------------------------------------------------------------------------
def secure_con():
import socket, ssl
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_default_certs()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = input("\033[37mEnter server to connect \033[5;31m>\033[0;37m ")
try:
ssl_sock = context.wrap_socket(s, server_hostname=host, ciphers=TLS_RSA_WITH_AES_256_CBC_SHA|TLS_RSA_WITH_AES_128_CBC_SHA)
ssl_sock.connect((host, 443))
print("\033[32mConnection established!!")
except:
print("\033[31mServer unreachable or does not support the specific encryption")
#----------------------------------------------------------------------------------------------------------------------
def about_us():
print("""
= = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
Black Lotus is a Greek Black Hat group that is capable of protecting Greece
when it comes to cyberwar. Our Target scope does not stop there, we find and attack
illegal activity such as Child Pornography, Drugs, Human Trafficking and more.
We might be pirates but we respect human rights.
With black lotus these have come to an end:
+ 600 Child Pornography Websites
+ Human Trafficking
+ More than 100 pedophiles
If you are a criminal, you are already our target.
This Toolkit was designed first for the team of Black Lotus
Use it with caution
= = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
""")
#----------------------------------------------------------------------------------------------------------------------
def crack_emailxl():
os.system("cd /Black-Lotus")
os.system("sudo chmod +x craxl.sh")
os.system("sudo ./craxl.sh")
#----------------------------------------------------------------------------------------------------------------------
def meterpreter():
print("\033[0;37m ")
os.system('cls||clear')
print("""
od+-
-dMm//o:
`h+:yds`:` .-o+:-.
-o/hNN/` +/-:`..`:s/:/+.
`-///::ddydMm+.::/o/:. ` .-/-s.
`ohhhyso+//-/mNo-./+s/s/- ` ` +.
-ydoy+::/:-/-+oNh/:.o/:+:. yy.` `.
-/.dsy. `+s//++/:+h-.+o/://``dN// :-
-`-ymh/ `/+s-+os:.yy+-..--..:hm+/-s.
:`-:+yys.`:o+.-/-:ydyo/+-.`o:od//y/ `
-//:.--hd+``-` `///yh/.-`s--:oyoos/ohoss-
.d/`.:sd:/o`.:`..oy++syyhmNNdmdd+oy:sNd.
-///:o+yo.-++oshhddNNNNMNNmydmhh/::oNN-
`:o+/+syyymNNMMNmmmdddhsyyys+oy--+yy-
./sydhmNmNNNNNmNmmmddhyyyss/h/ `.`
.oyhhhdhddNNNmmNNNmmmmds+:+do
....yyddmhdmmmdmNNmmmmmh::oy+
`.` -yssyy++hmyhdmhsshhho/oh/
:/. `` `osomo:---`.+:yy`
+-. \033[5;31m*\033[0;37m `+oh/y. :sh+
`+. ``oy//. ...`oss+ho `yNss` .`-o+`
```..ym/+/:.` `.-/syyys+//.`.yhhdhdhhNNdh` `/dy+so+-`.:`
`ho/dy+/ooohhys+-` -...:::+...`yNdyhNmmmms::oydyy/ ../++-ss-
.yy:-` `./ydmdo-://::..`.-..-ddmhddshdhdhy+- ` :dy
./+. .-:-.` ..:+.:ssssddNd:ymdyo- ./+
`:-+/soymhdddy/..
---/o+yysdsoo.``
`.:++/ ````..`.:yhhhy+-`
`.:+shyyo:. .:ohhmdhs/.`
--`--.:/+osyyyo:-` `./sdmdhhs/:.``
-+++hhhy+/.` ./ohmmmdhoys`
.++s+`` \033[5;31mThe Jes7er\033[0;37m `-+s:+m/
.-.. :-
\033[37mx[ \033[31mBlack Lotus v2-dev- \033[37m ]\033[31m
\033[37m x --x[ \033[31mBlackhat Hacker\033[37m toolkit ]\033[31m
\033[37mx x --x[ \033[31m72\033[37m custom scripts ]\033[31m
\033[37m For help, type 'help' in the meterpreter\033[37m""")
metcon = True
while metcon:
a = input("\033[31mC:\Black Lotus \033[5;31m|>\033[0;37m ")
if a =="help":
os.system('cls||clear')
print("""\033[37m
\033[31mBlack Lotus Cyber Lab\033[37m
=============
\033[31mCORE COMMANDS \033[37m
=============
# Command Description
# ------- ---------
# clear Clear terminal
# exit/quit exit the console
# cd navigate through directories
# pwd print working directory
# ls list computer files
# mkdir make a new directory
# touch make a new file
# about-us Get to know Black Lotus Better
==================================
\033[31mUpcoming tools (not supported now) \033[37m
==================================
# Command Description
# ------- -----------
# ddos denial of service attacks
# drone Hack drones ...
===========
\033[31mNETWORK LAB\033[37m
===========
# Command Description
# ------- -----------
# wifi Automated Wifi cracking
# db-nmap use nmap to get sensitive information about target network
# db-netcat a computer networking utility for reading from and
|_writing to network connections via TCP/UDP
# observer Advanced Network SCanner
# blindmaiden Automated custom scan builder
# wireshark analyze and capture packets
# etherape Monitor network traffic
# listener make a custom listener for incoming connections
# atom Wifi Network Killer
# wipp Wireless Network Mapping ( Geolocate Routers )
# postman send and recieve files via tcp
# rtsp View rtsp ip cameras with megalodon's rtsp viewer
# sniper Reveal ip details(GEO location, provider, country etc)
# fpoint Create a Fake Wifi access point to capture sensitive info
# telnet Connect to a computer in your network
# dns-resolver Find dns record from various websites or hostnames
# reaper Automatic smb scanner/attacker
===========
\033[31mMALWARE LAB\033[37m
===========
# Command Description
# ------- -----------
# viking Black Lotus advanced malware development toolkit
# ransom Create a ransomware to deploy on the enemy machine
|_Cause your enemies must pay!!
# voodoo Manipulate the target computer using a revershell
# shells A list of shells to use on the enemy computer
=================
\033[31mPASSWORD CRACKING \033[37m
=================
# Command Description
# ------- -----------
# hash-lab encrypt/decrypt text or passwords
# zip-crack crack zip files
# pdf-crack crack pdf files
# password Custom length password generator
# craxl Email Cracking Automation Tool Using Hydra
=================
\033[31mINVESTIGATION LAB\033[37m
=================
# Command Description
# ------- -----------
# people find information about a person
# phone find information about a phone number
# email-va find if an email is Valid
# photo find information about a photo
# shodan search for public vulnerable servers, IoT devices
|_ power plants, security cams
# meta-scraper Scrape hidden files in target domain
# steg Use steganography to hide data in pictures
# face Search all over the web to find persons face
# google-maps translate addresses directly to geographic coordinates
==================
\033[31mDATABASE ASSEGMENT\033[37m
==================
# Command Description
# ------- -----------
# sql-injection Black Lotus SQL Injection Scanner
# database (upcoming) Create a MySQL Database
# db-sqlmap Use sqlmap for advanced SQL Injection
===========
\033[31mWEB HACKING\033[37m
===========
# Command Description
# ------- -----------
# emails Extract email addresses
# xss XSS vulnerability detection in web pages
# subdomain Scans web page for subdomains
# links Extract all internal/external website links
=================
\033[31mVULN RESEARCH LAB\033[37m
=================
# Command Description
# ------- -----------
# exploitdb Find details about an exploit using exploitdb
# cve Get information about a CVE Vulnerability
===============
\033[31mANONYMITY TOOLS \033[37m
===============
# Command Description
# ------- -----------
# tor-start Hide your IP via Tor Relays (3 Tor Relays)
# tor-stop Stop hiding via Tor Relays
# mac Change your MAC Address (Once you restart your
|_computer mac goes back to normal.)
# vpn Connect from custom VPN file
# ghost Become invisible using Black Lotus way of protection
|_(You might have slower internet connection)
# secure connect to servers using advanced encryption
============
\033[31mSECURITY LAB\033[37m
============
# Command Description
# ------- -----------
# arp-detector Detect ARP Spoof Attack via perform passive monitoring
# firewall Black Lotus Firewall Panel to utilise linux
====================
\033[31mSYSTEM UTILITY TOOLS \033[37m
====================
# Command Description
# ------- -----------
# host Basics about host machine ( Ip address, hostname, MAC, etc)
# diagnostics Black Lotus Advanced Computer/Network Diagnostics Panel
# text-editor Black Lotus offers integrated text editor for our hackers!
# webcam Custom webcam opener (not capturing image/video)
# browser Black Lotus custom lightweight web-browser app
# update-system Check for upgrades for your linux machine
# evolution Evolve Black Lotus to it's last version
=============
\033[31mDARKNET TOOLS (Not supported yet)\033[37m
=============
# Command Description
# ------- -----------
# exitmap Analyse tor exit nodes
# trevor TrevorC2 is a client/server model for masking command and control
|_through a normally browsable website
# koadic Windows post-exploitation rootkit
\033[37m----------------------------------------------------------------------------------------------------------------------------------
\033[5;31m|> Help menu \033[0;37m
\033[37m----------------------------------------------------------------------------------------------------------------------------------
""")
elif a =="host":
host_details()
elif a =="craxl":
crack_emailxl()
elif a =="about-us":
about_us()
elif a =="reaper":
reaper()
elif a =="cve":
cve_search()
elif a =="secure":
secure_con()
elif a =="dns-resolver":
dns_resolver()
elif a =="telnet":
telnet_connect()
elif a =="observer":
observer()
elif a =="arp-detector":
detect_arp()
elif a =="blindmaiden":
blindmaiden_automated_scanner()
#elif a =="koadic":
# koadic_darknet()
elif a =="face":
photo_ai()
elif a =="google-maps":
google_maps()
# elif a =="trevor":
# trevor_darknet()
elif a =="shells":
shell_lists()
elif a =="password":
meg_password_generator()
elif a =="steg":
steganography_meg()
elif a =="fpoint":
fake_wifi_access_point()
elif a =="sql-injection":
meg_sql_scan()
elif a =="update-system":
updatesystem()
elif a =="sniper":
sniper()
elif a=="wipp":
wipp()
elif a =="exploitdb":
exploitdb()
elif a=="webcam":
webcam_opener()
elif a=="browser":
carnival_webbrowser()
elif a=="diagnostics":
computer_diagnostics()
elif a=="meta-scraper":
meta_scraper()
elif a=="firewall":
firewall_utilis()
elif a=="postman":
postman()
elif a=="text-editor":
text_editor()
elif a=="rtsp":
rtsp_camera()
elif a=="email-va":
email_va()
elif a =="shodan":
shodan()
elif a =="exit":
os.system('cls||clear')
sys.exit()
elif a =="quit":
os.system('cls||clear')
sys.exit()
elif a =="clear":
os.system('cls||clear')
elif a =="cd":
os.chdir(input("Enter path: "))
elif a =="pwd":
path = str(os.getcwdb())
path2 = path.strip("b'")
print(path2)
elif a =="ls":
print(os.listdir(os.getcwd()))
elif a =="mkdir":
b = input("Enter the path needed for the new directory: ")
c = input("Enter new directory name: ")
path = os.path.join(b, c)
os.mkdir(path)
print("New directory ", c)
elif a =="touch":
file_name | |
"3585": 4620,
"3586": 4401,
"3587": 4767,
"2762": 5370,
"5009": 3031,
"2760": 5377,
"2765": 5365,
"999": 12925,
"4620": 3598,
"4428": 3217,
"905": 14420,
"908": 14615,
"4421": 3367,
"1847": 7709,
"1846": 8092,
"4424": 8311,
"254b": 140530,
"254a": 147896,
"4959": 1406,
"2165": 7175,
"4953": 2932,
"4950": 3748,
"2166": 6742,
"4956": 3638,
"4954": 1424,
"4955": 5024,
"2617": 5635,
"2614": 5636,
"2618": 5854,
"1906": 8565,
"3296": 4947,
"3297": 4632,
"854": 15761,
"857": 15440,
"852": 15876,
"3455": 4739,
"3458": 4446,
"858": 38275,
"1184": 10726,
"740": 18808,
"742": 19271,
"743": 19069,
"747": 40301,
"4811": 1890,
"3062": 5595,
"821": 16629,
"3069": 4913,
"2944": 5081,
"2942": 5083,
"1051": 12951,
"1052": 13779,
"1054": 12136,
"1056": 12112,
"1699": 8016,
"1278": 10043,
"4311": 3919,
"4312": 3552,
"4313": 3775,
"4316": 3343,
"4317": 4566,
"4318": 3676,
"613": 46387,
"610": 27396,
"611": 29372,
"1274": 14136,
"1275": 10059,
"1276": 11782,
"1277": 10050,
"3118": 4846,
"3117": 4997,
"3115": 4950,
"3111": 4862,
"3110": 5063,
"1309": 9875,
"4228": 3613,
"4229": 3512,
"4224": 4981,
"4225": 3513,
"496": 72198,
"497": 48227,
"4220": 3526,
"4221": 3524,
"4222": 3877,
"4223": 3797,
"24": 2129002,
"25": 2014290,
"27": 2067343,
"20": 2339421,
"21": 2330070,
"23": 2146757,
"28": 1986795,
"29": 1960306,
"4586": 3831,
"5028": 1213,
"5035": 4240,
"3939": 3916,
"3937": 4452,
"3933": 5003,
"4138": 3640,
"4139": 3638,
"4130": 3646,
"4132": 3646,
"4135": 3841,
"3841": 4016,
"3843": 4014,
"3842": 4390,
"3845": 4608,
"3844": 4665,
"3847": 4207,
"3846": 4010,
"3848": 4007,
"4040": 3763,
"4042": 3759,
"4048": 4571,
"7": 3840293,
"257b": 120786,
"257a": 165696,
"2288": 6275,
"3664": 4219,
"3666": 4314,
"3663": 4221,
"2280": 6285,
"2285": 6959,
"2287": 6940,
"4549": 3558,
"4548": 4075,
"974": 13217,
"4539": 3547,
"971": 13292,
"4826": 1838,
"2378": 6067,
"4927": 1522,
"2370": 6088,
"2371": 6083,
"3593": 4296,
"3590": 4989,
"5038": 1176,
"5039": 1168,
"5036": 1186,
"3599": 4392,
"3598": 6003,
"5032": 2399,
"5033": 2699,
"5030": 1481,
"5031": 1208,
"4435": 3861,
"1877": 8545,
"1874": 7502,
"4439": 3573,
"4945": 1448,
"4944": 1451,
"4947": 3357,
"4946": 1446,
"4940": 1465,
"4943": 1452,
"970": 13679,
"4949": 3640,
"4948": 1442,
"979": 13174,
"182": 535838,
"180": 563604,
"181": 520570,
"186": 494299,
"187": 487559,
"184": 494107,
"185": 502018,
"2110": 6789,
"2111": 6682,
"188": 532638,
"2113": 9127,
"2116": 6662,
"2117": 6662,
"2621": 6012,
"2625": 5620,
"5100": 1000,
"1912": 7356,
"1915": 7247,
"868": 15627,
"3449": 4554,
"3445": 4928,
"3444": 4460,
"862": 17427,
"865": 18092,
"3440": 4736,
"866": 18152,
"2027": 7064,
"2020": 7128,
"2021": 7142,
"2750": 5388,
"2023": 6869,
"887": 16666,
"889": 15023,
"4590": 2825,
"44c": 194500,
"104b": 564646,
"104a": 383072,
"775": 19522,
"776": 19641,
"771": 17792,
"70c": 287558,
"772": 17791,
"779": 17656,
"778": 17767,
"77": 1171582,
"76": 1152060,
"75": 1159690,
"74": 1242895,
"73": 1227457,
"72": 1279555,
"71": 1266760,
"3057": 4928,
"3054": 4936,
"2971": 5286,
"2976": 5038,
"79": 1123783,
"2975": 5039,
"1043": 12272,
"1042": 13250,
"1044": 43879,
"1683": 8326,
"1685": 9900,
"2981": 7583,
"3724": 4639,
"4300": 4022,
"4304": 3358,
"608": 28955,
"3023": 4974,
"1261": 10166,
"663": 25379,
"1264": 10148,
"3129": 4836,
"3121": 4845,
"1468": 9396,
"8": 3478782,
"1316": 9980,
"1315": 10061,
"1314": 9849,
"1313": 10339,
"1319": 40465,
"4239": 3612,
"4231": 4007,
"4230": 3986,
"319": 174329,
"318": 162397,
"3940": 4373,
"311": 171438,
"310": 172423,
"317": 190570,
"316": 271155,
"315": 171956,
"314": 173024,
"4129": 3648,
"4127": 4055,
"4126": 3757,
"4125": 4237,
"2755": 5606,
"97c": 216725,
"97b": 558433,
"440": 74820,
"97a": 217123,
"445": 76340,
"444": 64634,
"4057": 4048,
"4054": 3974,
"4051": 3911,
"630": 26420,
"3610": 4276,
"3617": 4273,
"48b": 679687,
"2292": 6366,
"2297": 6252,
"631": 25661,
"2568": 5713,
"2368": 6103,
"2364": 6810,
"5029": 1213,
"2586": 5691,
"2582": 7226,
"5021": 1243,
"5020": 1245,
"5022": 2062,
"5024": 1234,
"5027": 1214,
"5026": 1223,
"1862": 7430,
"3729": 4758,
"4446": 3643,
"1869": 8776,
"3726": 4151,
"3727": 4148,
"4686": 4731,
"4931": 1506,
"966": 13321,
"4933": 1583,
"4682": 3290,
"4683": 3098,
"4680": 3294,
"4681": 4366,
"4938": 1472,
"4689": 3228,
"2108": 6690,
"32": 1881789,
"2103": 7571,
"2102": 7638,
"2100": 6705,
"31": 1921653,
"2639": 5792,
"2635": 5597,
"2631": 5710,
"3431": 4478,
"3090": 4887,
"871": 15580,
"2741": 5859,
"4593": 3839,
"2743": 5395,
"2745": 5770,
"4597": 3491,
"2747": 5835,
"4845": 1751,
"4846": 1742,
"4599": 3124,
"4842": 1767,
"2038": 6843,
"896": 14815,
"897": 15528,
"898": 14724,
"899": 14619,
"649": 27672,
"3544": 4516,
"3543": 4832,
"2965": 5054,
"1789": 7692,
"768": 43371,
"769": 17839,
"762": 18149,
"1783": 7979,
"766": 18098,
"765": 22506,
"3040": 4958,
"3047": 4947,
"3046": 4948,
"1535": 10659,
"1070": 12362,
"1674": 8533,
"1675": 8085,
"1672": 8088,
"1096": 11721,
"679": 22368,
"4339": 3300,
"4336": 3634,
"4334": 4120,
"4335": 3806,
"4332": 3311,
"1099": 13921,
"4330": 4125,
"3131": 4835,
"3132": 5028,
"1419": 9567,
"1411": 9735,
"1416": 9763,
"1322": 12471,
"1324": 10719,
"1328": 11558,
"1329": 9777,
"4201": 3846,
"770": 17821,
"1257": 11260,
"1254": 10210,
"1520": 8859,
"1253": 38928,
"1522": 8855,
"1529": 8725,
"1258": 10169,
"308": 203699,
"309": 191298,
"3959": 4402,
"300": 226933,
"3952": 3888,
"303": 205006,
"304": 183046,
"3956": 3985,
"3955": 4310,
"307": 192848,
"4116": 5059,
"125b": 167710,
"125a": 636078,
"5011": 1271,
"4899": 3260,
"4063": 4839,
"3055": 4933,
"4064": 3739,
"4068": 3734,
"4069": 4772,
"5018": 4081,
"5019": 1249,
"4639": 3255,
"78": 1166408,
"3600": 4291,
"3606": 6422,
"3608": 4277,
"4896": 1598,
"2358": 6110,
"2350": 6945,
"4453": 3054,
"4451": 4021,
"4455": 3047,
"3738": 4141,
"3737": 4141,
"3736": 4514,
"3735": 4244,
"1897": 8067,
"1891": 7329,
"3730": 4714,
"4923": 2877,
"4690": 2417,
"4693": 4948,
"4692": 2406,
"4695": 3522,
"4926": 3162,
"4697": 3963,
"4698": 3483,
"954": 42072,
"957": 13816,
"2139": 7070,
"477": 54721,
"2648": 5692,
"2649": 5561, | |
import csv
import json
from io import open
from torch import LongTensor
import re
import random
# detect pattern
# detect <TIME>
pattern_time1 = re.compile(r"[0-9]+[ap]")
pattern_time2 = re.compile(r"[0-9]+[;.h][0-9]+")
pattern_time3 = re.compile(r"[ap][.][am]")
pattern_time4 = range(2000, 2020)
# pattern_time5: token.isdigit() and len(token) == 3
pattern_time_th1 = re.compile(r"[\u0E00-\u0E7F]+[0-9]+")
pattern_time_th2 = re.compile(r"[0-9]+[.]*[0-9]*[\u0E00-\u0E7F]+")
pattern_time_th3 = re.compile(r"[0-9]+[.][0-9]+")
# detect <LAST>
pattern_last1 = re.compile(r"[0-9]+min")
pattern_last2 = re.compile(r"[0-9]+h")
pattern_last3 = re.compile(r"[0-9]+sec")
# detect <DATE>
pattern_date1 = re.compile(r"[0-9]+st")
pattern_date2 = re.compile(r"[0-9]+nd")
pattern_date3 = re.compile(r"[0-9]+rd")
pattern_date4 = re.compile(r"[0-9]+th")
remove_list = ["'s", "'ll", "'ve", "'d", "'m"]
class AugmentedList:
def __init__(self, items, shuffle_between_epoch=False):
self.items = items
self.cur_idx = 0
self.shuffle_between_epoch = shuffle_between_epoch
if shuffle_between_epoch:
random.shuffle(self.items)
def next_items(self, batch_size):
if self.cur_idx == 0 and self.shuffle_between_epoch:
random.shuffle(self.items)
items = self.items
start_idx = self.cur_idx
end_idx = start_idx + batch_size
if end_idx <= self.size:
self.cur_idx = end_idx % self.size
return items[start_idx : end_idx]
else:
first_part = items[start_idx : self.size]
remain_size = batch_size - (self.size - start_idx)
second_part = items[0 : remain_size]
self.cur_idx = remain_size
returned_batch = [item for item in first_part + second_part]
if self.shuffle_between_epoch:
random.shuffle(self.items)
return returned_batch
@property
def size(self):
return len(self.items)
def _clean_text(token_list, lang):
"""
Applying the same pre-processing on NLU as in the latest AAAI 2020 publication
taken from https://github.com/zliucr/mixed-language-training
:param token_list:
:param lang:
:return:
"""
token_list_clean = []
for token in token_list:
new_token = token
# detect <TIME>
if lang != "th" and ( bool(re.match(pattern_time1, token)) or bool(re.match(pattern_time2, token))
or bool(re.match(pattern_time3, token)) or token in pattern_time4
or (token.isdigit() and len(token) == 3)):
new_token = "<TIME>"
token_list_clean.append(new_token)
continue
if lang == "th" and ( bool(re.match(pattern_time_th1, token)) or bool(re.match(pattern_time_th2, token))
or bool(re.match(pattern_time_th3, token))):
new_token = "<TIME>"
token_list_clean.append(new_token)
continue
# detect <LAST>
if lang == "en" and ( bool(re.match(pattern_last1, token)) or bool(re.match(pattern_last2, token))
or bool(re.match(pattern_last3, token))):
new_token = "<LAST>"
token_list_clean.append(new_token)
continue
# detect <DATE>
if lang == "en" and ( bool(re.match(pattern_date1, token)) or bool(re.match(pattern_date2, token))
or bool(re.match(pattern_date3, token)) or bool(re.match(pattern_date4, token))):
new_token = "<DATE>"
token_list_clean.append(new_token)
continue
# detect <LOCATION>
if lang != "th" and ( token.isdigit() and len(token) == 5):
new_token = "<LOCATION>"
token_list_clean.append(new_token)
continue
# detect <NUMBER>
if token.isdigit():
new_token = "<NUMBER>"
token_list_clean.append(new_token)
continue
if lang == "en" and ("n't" in token):
new_token = "not"
token_list_clean.append(new_token)
continue
if lang == "en":
for item in remove_list:
if item in token:
new_token = token.replace(item, "")
break
token_list_clean.append(new_token)
assert len(token_list_clean) == len(token_list)
return token_list_clean
def _parse_tsv(data_path, tokenizer, lang, intent_set=[], slot_set=["O", "X"]):
"""
Taken from https://github.com/zliucr/mixed-language-training
Input:
data_path: the path of data
intent_set: set of intent (empty if it is train data)
slot_set: set of slot type (empty if it is train data)
Output:
data_tsv: {"text": [[token1, token2, ...], ...], "slot": [[slot_type1, slot_type2, ...], ...],
"intent": [intent_type, ...]}
intent_set: set of intent
slot_set: set of slot type
"""
slot_type_list = ["alarm", "datetime", "location", "reminder", "weather"]
process_egs = []
with open(data_path) as tsv_file:
reader = csv.reader(tsv_file, delimiter="\t")
for i, line in enumerate(reader):
intent = line[0]
if intent not in intent_set:
intent_set.append(intent)
slot_splits = line[1].split(",")
slot_line = []
slot_flag = True
if line[1] != '':
for item in slot_splits:
item_splits = item.split(":")
assert len(item_splits) == 3
slot_item = {"start": item_splits[0], "end": item_splits[1], "slot": item_splits[2]}
flag = False
for slot_type in slot_type_list:
if slot_type in slot_item["slot"]:
flag = True
if not flag:
slot_flag = False
break
slot_line.append(slot_item)
if not slot_flag:
# slot flag not correct
continue
token_part = json.loads(line[4])
tokens = _clean_text(token_part["tokenizations"][0]["tokens"], lang)
tokenSpans = token_part["tokenizations"][0]["tokenSpans"]
slots = []
for tokenspan in tokenSpans:
nolabel = True
for slot_item in slot_line:
start = tokenspan["start"]
if int(start) == int(slot_item["start"]):
nolabel = False
slot_ = "B-" + slot_item["slot"]
slots.append(slot_)
if slot_ not in slot_set:
slot_set.append(slot_)
break
if int(slot_item["start"]) < int(start) < int(slot_item["end"]):
nolabel = False
slot_ = "I-" + slot_item["slot"]
slots.append(slot_)
if slot_ not in slot_set:
slot_set.append(slot_)
break
if nolabel:
slots.append("O")
assert len(slots) == len(tokens)
sub_tokens = ['[CLS]']
sub_slots = ['X']
for j, token in enumerate(tokens):
sub_sub_tokens = tokenizer.tokenize(token)
sub_tokens += sub_sub_tokens
for k, sub_token in enumerate(sub_sub_tokens):
if k == 0:
sub_slots.append(slots[j])
else:
sub_slots.append('X')
sub_tokens += ['[SEP']
sub_slots.append('X')
assert len(sub_slots) == len(sub_tokens)
process_egs.append((' '.join(tokens), sub_tokens, intent, sub_slots))
return process_egs, intent_set, slot_set
def _parse_json(data_path, tokenizer, intent_set=[]):
process_egs = []
with open(data_path) as fp:
for entry in json.load(fp):
intent = entry['intent']
if intent not in intent_set:
intent_set.append(intent)
words = entry['text'].lower().strip().split(' ')
if len(words) >= 3 and words[-2].endswith('?'):
words[-2] = words[-2][:-1]
tokenized_words = ['[CLS]'] + tokenizer.tokenize(' '.join(words)) + ['[SEP]']
process_egs.append((''.join(words), list(tokenized_words), intent))
return process_egs, intent_set
def _parse_mtop(data_path, tokenizer, intent_set=[], slot_set=["O", "X"]):
process_egs = []
with open(data_path) as tsv_file:
reader = csv.reader(tsv_file, delimiter="\t")
for i, line in enumerate(reader):
domain = line[3]
intent = domain+":"+line[0].split(":")[1]
if intent not in intent_set:
intent_set.append(intent)
slot_splits = line[1].split(",")
slot_line = []
if line[1] != '':
for item in slot_splits:
item_splits = item.split(":")
assert len(item_splits) == 4
slot_item = {"start": item_splits[0], "end": item_splits[1], "slot": item_splits[3]}
slot_line.append(slot_item)
token_part = json.loads(line[6])
tokens = token_part["tokens"]
tokenSpans = token_part["tokenSpans"]
slots = []
for tokenspan in tokenSpans:
nolabel = True
for slot_item in slot_line:
start = tokenspan["start"]
if int(start) == int(slot_item["start"]):
nolabel = False
slot_ = "B-" + slot_item["slot"]
slots.append(slot_)
if slot_ not in slot_set:
slot_set.append(slot_)
break
if int(slot_item["start"]) < int(start) < int(slot_item["end"]):
nolabel = False
slot_ = "I-" + slot_item["slot"]
slots.append(slot_)
if slot_ not in slot_set:
slot_set.append(slot_)
break
if nolabel:
slots.append("O")
assert len(slots) == len(tokens)
sub_tokens = ['[CLS]']
sub_slots = ['X']
for j, token in enumerate(tokens):
sub_sub_tokens = tokenizer.tokenize(token)
sub_tokens += sub_sub_tokens
for k, sub_token in enumerate(sub_sub_tokens):
if k == 0:
sub_slots.append(slots[j])
else:
sub_slots.append('X')
sub_tokens += ['[SEP']
sub_slots.append('X')
assert len(sub_slots) == len(sub_tokens)
process_egs.append((' '.join(tokens), sub_tokens, intent, sub_slots))
return process_egs, intent_set, slot_set
class Dataset:
""" """
def __init__(self, use_non_overlap, tokenizer, data_format, use_slots, train_fpaths, spt_paths, qry_paths,
val_paths, tune_paths, test_paths, portion_l, intent_types=[], slot_types=["O", "X"]):
self.tokenizer = tokenizer
self.use_slots = use_slots
self.data_format = data_format
self.intent_types = intent_types
self.slot_types = slot_types
# Train set
print("Train set ...")
train_set = self.read_split(train_fpaths)
self.train_size = len(train_set)
self.train = AugmentedList(train_set, shuffle_between_epoch=True)
# Support set
print("Support set ...")
spt_set = self.read_split(spt_paths)
self.spt_size = len(spt_set)
self.spt = AugmentedList(spt_set, shuffle_between_epoch=True)
if use_non_overlap:
# Query and Tune sets
print("Query and Tune set ...")
self.qry = {}
self.qry_size = {}
self.tune = {}
self.tune_size = {}
for lang in qry_paths:
qry_set, tune_set = self.read_split_qry_tune({lang: qry_paths[lang]}, portions=portion_l)
self.qry_size.update({lang: len(qry_set)})
self.tune_size.update({lang: len(tune_set)})
self.qry.update({lang: AugmentedList(qry_set, shuffle_between_epoch=True)})
self.tune.update({lang: AugmentedList(tune_set, shuffle_between_epoch=True)})
else:
# Query set
print("Query set ...")
self.qry = {}
self.qry_size = {}
for lang in qry_paths:
qry_set = self.read_split({lang: qry_paths[lang]}, portions=portion_l)
self.qry_size.update({lang: len(qry_set)})
self.qry.update({lang: AugmentedList(qry_set, shuffle_between_epoch=True)})
# Tune set
print("Tune set ...")
self.tune = {}
self.tune_size = {}
for lang in tune_paths:
tune_set = self.read_split({lang: tune_paths[lang]}, portions=portion_l)
self.tune_size.update({lang: len(tune_set)})
self.tune.update({lang: AugmentedList(tune_set, shuffle_between_epoch=True)})
print("Validation set ...")
self.val = {}
self.val_size = {}
for lang in val_paths:
val_set = self.read_split({lang: val_paths[lang]}, portions={})
self.val_size.update({lang: len(val_set)})
self.val.update({lang: AugmentedList(val_set, shuffle_between_epoch=True)})
print("Test set ...")
self.test = {}
self.test_size = {}
for lang in test_paths:
print("Reading language ", lang)
test_set = self.read_split({lang: test_paths[lang]}, portions={})
self.test_size.update({lang: len(test_set)})
self.test.update({lang: AugmentedList(test_set, shuffle_between_epoch=True)})
self.intent_types.sort()
if use_slots:
self.slot_types.sort()
def read_split(self, fpaths, portions={}):
"""
:param fpaths:
:return:
"""
if len(portions) == 0:
for lang in fpaths:
portions.update({lang: 1})
intent_set = self.intent_types
slot_set = self.slot_types
process_egs_shuffled = []
for lang in fpaths:
if self.data_format == "tsv":
process_egs, intent_set, slot_set = _parse_tsv(fpaths[lang],
self.tokenizer,
lang,
intent_set,
slot_set)
elif self.data_format == "json":
process_egs, intent_set = _parse_json(fpaths[lang],
self.tokenizer,
intent_set)
else:
process_egs, intent_set, slot_set = _parse_mtop(fpaths[lang],
self.tokenizer,
intent_set,
slot_set)
process_egs_shuffled.extend(random.sample(process_egs, k=int(portions[lang]*len(process_egs))))
self.intent_types = intent_set
if self.use_slots:
self.slot_types = slot_set
return process_egs_shuffled
def read_split_qry_tune(self, fpaths, portions={}):
"""
:param fpaths:
:return:
"""
if len(portions) == 0:
for lang in fpaths:
portions.update({lang: 1})
intent_set = self.intent_types
slot_set = self.slot_types
process_egs_shuffled_qry = []
process_egs_shuffled_tune = []
for lang in fpaths:
if self.data_format == "tsv":
process_egs, intent_set, slot_set = _parse_tsv(fpaths[lang],
self.tokenizer,
lang,
intent_set,
slot_set)
elif self.data_format == "json":
process_egs, intent_set = _parse_json(fpaths[lang],
self.tokenizer,
intent_set)
else:
process_egs, intent_set, slot_set = _parse_mtop(fpaths[lang],
self.tokenizer,
intent_set,
slot_set)
process_egs_shuffled = random.sample(process_egs, k=int(portions[lang]*len(process_egs)))
random.shuffle(process_egs_shuffled)
qry_size = int(len(process_egs_shuffled)*0.75)
process_egs_shuffled_qry.extend(process_egs_shuffled[:qry_size])
process_egs_shuffled_tune.extend(process_egs_shuffled[qry_size:])
self.intent_types = intent_set
if self.use_slots:
self.slot_types = slot_set
return process_egs_shuffled_qry, process_egs_shuffled_tune
def next_batch(self, batch_size, data_split, dev_langs):
"""
Usual next batch mechanism for pre-training base model
:param batch_size:
:param data_split: train or test
:return:
"""
examples = []
if len(dev_langs) != 0:
for lang in dev_langs:
examples.extend(data_split[lang].next_items(batch_size))
else:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.