content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = descriptor.FileDescriptor(
name='plaso/proto/plaso_storage.proto',
package='plaso_storage',
serialized_pb='\n\x1fplaso/proto/plaso_storage.proto\x12\rplaso_storage\"\xbd\x01\n\tAttribute\x12\x0b\n\x03key\x18\x01 \x02(\t\x12\x0e\n\x06string\x18\x02 \x01(\t\x12\x0f\n\x07integer\x18\x03 \x01(\x03\x12#\n\x05\x61rray\x18\x04 \x01(\x0b\x32\x14.plaso_storage.Array\x12!\n\x04\x64ict\x18\x05 \x01(\x0b\x32\x13.plaso_storage.Dict\x12\x0f\n\x07\x62oolean\x18\x06 \x01(\x08\x12\x0c\n\x04\x64\x61ta\x18\x07 \x01(\x0c\x12\r\n\x05\x66loat\x18\x08 \x01(\x02\x12\x0c\n\x04none\x18\t \x01(\x08\"4\n\x04\x44ict\x12,\n\nattributes\x18\x01 \x03(\x0b\x32\x18.plaso_storage.Attribute\"\xac\x01\n\x05Value\x12\x0f\n\x07integer\x18\x01 \x01(\x03\x12\x0e\n\x06string\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\x12#\n\x05\x61rray\x18\x04 \x01(\x0b\x32\x14.plaso_storage.Array\x12!\n\x04\x64ict\x18\x05 \x01(\x0b\x32\x13.plaso_storage.Dict\x12\x0f\n\x07\x62oolean\x18\x06 \x01(\x08\x12\r\n\x05\x66loat\x18\x07 \x01(\x02\x12\x0c\n\x04none\x18\x08 \x01(\x08\"-\n\x05\x41rray\x12$\n\x06values\x18\x01 \x03(\x0b\x32\x14.plaso_storage.Value\"\xfa\x08\n\x0b\x45ventObject\x12\x11\n\ttimestamp\x18\x01 \x01(\x03\x12\x16\n\x0etimestamp_desc\x18\x02 \x01(\t\x12\x11\n\tdata_type\x18\x03 \x02(\t\x12,\n\nattributes\x18\x04 \x03(\x0b\x32\x18.plaso_storage.Attribute\x12\x10\n\x08timezone\x18\x05 \x01(\t\x12\x10\n\x08\x66ilename\x18\x06 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x07 \x01(\t\x12\x10\n\x08pathspec\x18\x08 \x01(\x0c\x12\x0e\n\x06offset\x18\t \x01(\x03\x12\x14\n\x0cstore_number\x18\n \x01(\x03\x12\x13\n\x0bstore_index\x18\x0b \x01(\x03\x12(\n\x03tag\x18\x0c \x01(\x0b\x32\x1b.plaso_storage.EventTagging\x12<\n\x0csource_short\x18\r \x01(\x0e\x32&.plaso_storage.EventObject.SourceShort\x12\x13\n\x0bsource_long\x18\x0e \x01(\t\x12\x0e\n\x06parser\x18\x0f \x01(\t\x12\r\n\x05inode\x18\x10 \x01(\x03\x12\x10\n\x08hostname\x18\x11 \x01(\t\x12\x0e\n\x06plugin\x18\x12 \x01(\t\x12\x1a\n\x12registry_file_type\x18\x13 \x01(\t\x12\x11\n\tallocated\x18\x14 \x01(\x08\x12\x0f\n\x07\x66s_type\x18\x15 \x01(\t\x12\x11\n\trecovered\x18\x16 \x01(\x08\x12\x15\n\rrecord_number\x18\x17 \x01(\x03\x12\x13\n\x0bsource_name\x18\x18 \x01(\t\x12\x15\n\rcomputer_name\x18\x19 \x01(\t\x12\x18\n\x10\x65vent_identifier\x18\x1a \x01(\x03\x12\x13\n\x0b\x65vent_level\x18\x1b \x01(\x03\x12\x12\n\nxml_string\x18\x1c \x01(\t\x12%\n\x07strings\x18\x1d \x01(\x0b\x32\x14.plaso_storage.Array\x12\x10\n\x08username\x18\x1e \x01(\t\x12\x10\n\x08user_sid\x18\x1f \x01(\t\x12\x18\n\x10\x63\x61\x63hed_file_size\x18 \x01(\x03\x12\x16\n\x0enumber_of_hits\x18! \x01(\x03\x12\x1d\n\x15\x63\x61\x63he_directory_index\x18\" \x01(\x03\x12\r\n\x05title\x18# \x01(\t\x12%\n\x08metadata\x18$ \x01(\x0b\x32\x13.plaso_storage.Dict\x12\x0b\n\x03url\x18% \x01(\t\x12\x0f\n\x07keyname\x18& \x01(\t\x12%\n\x08regvalue\x18\' \x01(\x0b\x32\x13.plaso_storage.Dict\x12\x0c\n\x04text\x18( \x01(\t\x12\x0c\n\x04uuid\x18) \x01(\t\"\xad\x01\n\x0bSourceShort\x12\x06\n\x02\x41V\x10\x01\x12\x08\n\x04\x42\x41\x43K\x10\x02\x12\x07\n\x03\x45VT\x10\x03\x12\x08\n\x04\x45XIF\x10\x04\x12\x08\n\x04\x46ILE\x10\x05\x12\x07\n\x03LOG\x10\x06\x12\x07\n\x03LNK\x10\x07\x12\x07\n\x03LSO\x10\x08\x12\x08\n\x04META\x10\t\x12\t\n\x05PLIST\x10\n\x12\x07\n\x03RAM\x10\x0b\x12\n\n\x06RECBIN\x10\x0c\x12\x07\n\x03REG\x10\r\x12\x0b\n\x07WEBHIST\x10\x0e\x12\x0b\n\x07TORRENT\x10\x0f\x12\x07\n\x03JOB\x10\x10\"\xb2\x01\n\x0c\x45ventTagging\x12\x14\n\x0cstore_number\x18\x01 \x01(\x03\x12\x13\n\x0bstore_index\x18\x02 \x01(\x03\x12\x0f\n\x07\x63omment\x18\x03 \x01(\t\x12\r\n\x05\x63olor\x18\x04 \x01(\t\x12-\n\x04tags\x18\x05 \x03(\x0b\x32\x1f.plaso_storage.EventTagging.Tag\x12\x12\n\nevent_uuid\x18\x06 \x01(\t\x1a\x14\n\x03Tag\x12\r\n\x05value\x18\x01 \x02(\t\"\xfc\x01\n\nEventGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x17\n\x0f\x66irst_timestamp\x18\x03 \x01(\x03\x12\x16\n\x0elast_timestamp\x18\x04 \x01(\x03\x12\r\n\x05\x63olor\x18\x05 \x01(\t\x12\x10\n\x08\x63\x61tegory\x18\x06 \x01(\t\x12:\n\x06\x65vents\x18\x07 \x03(\x0b\x32*.plaso_storage.EventGroup.EventDescription\x1a=\n\x10\x45ventDescription\x12\x14\n\x0cstore_number\x18\x01 \x02(\x03\x12\x13\n\x0bstore_index\x18\x02 \x02(\x03\"\xed\x01\n\nPreProcess\x12\x33\n\x16\x63ollection_information\x18\x01 \x01(\x0b\x32\x13.plaso_storage.Dict\x12$\n\x07\x63ounter\x18\x02 \x01(\x0b\x32\x13.plaso_storage.Dict\x12)\n\x0bstore_range\x18\x03 \x01(\x0b\x32\x14.plaso_storage.Array\x12,\n\nattributes\x18\x04 \x03(\x0b\x32\x18.plaso_storage.Attribute\x12+\n\x0eplugin_counter\x18\x05 \x01(\x0b\x32\x13.plaso_storage.Dict\"\xc7\x01\n\x0e\x41nalysisReport\x12\x13\n\x0bplugin_name\x18\x01 \x01(\t\x12\x15\n\rtime_compiled\x18\x02 \x01(\x03\x12\x0c\n\x04text\x18\x03 \x01(\t\x12\x0e\n\x06images\x18\x04 \x03(\x0c\x12(\n\x0breport_dict\x18\x05 \x01(\x0b\x32\x13.plaso_storage.Dict\x12*\n\x0creport_array\x18\x06 \x01(\x0b\x32\x14.plaso_storage.Array\x12\x15\n\rfilter_string\x18\x07 \x01(\t')
_EVENTOBJECT_SOURCESHORT = descriptor.EnumDescriptor(
name='SourceShort',
full_name='plaso_storage.EventObject.SourceShort',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='AV', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BACK', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='EVT', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='EXIF', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='FILE', index=4, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='LOG', index=5, number=6,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='LNK', index=6, number=7,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='LSO', index=7, number=8,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='META', index=8, number=9,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='PLIST', index=9, number=10,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RAM', index=10, number=11,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RECBIN', index=11, number=12,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='REG', index=12, number=13,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='WEBHIST', index=13, number=14,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TORRENT', index=14, number=15,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='JOB', index=15, number=16,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1492,
serialized_end=1665,
)
_ATTRIBUTE = descriptor.Descriptor(
name='Attribute',
full_name='plaso_storage.Attribute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='key', full_name='plaso_storage.Attribute.key', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='string', full_name='plaso_storage.Attribute.string', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='integer', full_name='plaso_storage.Attribute.integer', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='array', full_name='plaso_storage.Attribute.array', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='dict', full_name='plaso_storage.Attribute.dict', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='boolean', full_name='plaso_storage.Attribute.boolean', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='data', full_name='plaso_storage.Attribute.data', index=6,
number=7, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='float', full_name='plaso_storage.Attribute.float', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='none', full_name='plaso_storage.Attribute.none', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=51,
serialized_end=240,
)
_DICT = descriptor.Descriptor(
name='Dict',
full_name='plaso_storage.Dict',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='attributes', full_name='plaso_storage.Dict.attributes', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=242,
serialized_end=294,
)
_VALUE = descriptor.Descriptor(
name='Value',
full_name='plaso_storage.Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='integer', full_name='plaso_storage.Value.integer', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='string', full_name='plaso_storage.Value.string', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='data', full_name='plaso_storage.Value.data', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='array', full_name='plaso_storage.Value.array', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='dict', full_name='plaso_storage.Value.dict', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='boolean', full_name='plaso_storage.Value.boolean', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='float', full_name='plaso_storage.Value.float', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='none', full_name='plaso_storage.Value.none', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=297,
serialized_end=469,
)
_ARRAY = descriptor.Descriptor(
name='Array',
full_name='plaso_storage.Array',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='values', full_name='plaso_storage.Array.values', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=471,
serialized_end=516,
)
_EVENTOBJECT = descriptor.Descriptor(
name='EventObject',
full_name='plaso_storage.EventObject',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='timestamp', full_name='plaso_storage.EventObject.timestamp', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='timestamp_desc', full_name='plaso_storage.EventObject.timestamp_desc', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='data_type', full_name='plaso_storage.EventObject.data_type', index=2,
number=3, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='attributes', full_name='plaso_storage.EventObject.attributes', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='timezone', full_name='plaso_storage.EventObject.timezone', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='filename', full_name='plaso_storage.EventObject.filename', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='display_name', full_name='plaso_storage.EventObject.display_name', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='pathspec', full_name='plaso_storage.EventObject.pathspec', index=7,
number=8, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='offset', full_name='plaso_storage.EventObject.offset', index=8,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='store_number', full_name='plaso_storage.EventObject.store_number', index=9,
number=10, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='store_index', full_name='plaso_storage.EventObject.store_index', index=10,
number=11, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='tag', full_name='plaso_storage.EventObject.tag', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='source_short', full_name='plaso_storage.EventObject.source_short', index=12,
number=13, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='source_long', full_name='plaso_storage.EventObject.source_long', index=13,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='parser', full_name='plaso_storage.EventObject.parser', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='inode', full_name='plaso_storage.EventObject.inode', index=15,
number=16, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='hostname', full_name='plaso_storage.EventObject.hostname', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='plugin', full_name='plaso_storage.EventObject.plugin', index=17,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='registry_file_type', full_name='plaso_storage.EventObject.registry_file_type', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='allocated', full_name='plaso_storage.EventObject.allocated', index=19,
number=20, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='fs_type', full_name='plaso_storage.EventObject.fs_type', index=20,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='recovered', full_name='plaso_storage.EventObject.recovered', index=21,
number=22, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='record_number', full_name='plaso_storage.EventObject.record_number', index=22,
number=23, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='source_name', full_name='plaso_storage.EventObject.source_name', index=23,
number=24, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='computer_name', full_name='plaso_storage.EventObject.computer_name', index=24,
number=25, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='event_identifier', full_name='plaso_storage.EventObject.event_identifier', index=25,
number=26, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='event_level', full_name='plaso_storage.EventObject.event_level', index=26,
number=27, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='xml_string', full_name='plaso_storage.EventObject.xml_string', index=27,
number=28, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='strings', full_name='plaso_storage.EventObject.strings', index=28,
number=29, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='username', full_name='plaso_storage.EventObject.username', index=29,
number=30, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='user_sid', full_name='plaso_storage.EventObject.user_sid', index=30,
number=31, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='cached_file_size', full_name='plaso_storage.EventObject.cached_file_size', index=31,
number=32, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='number_of_hits', full_name='plaso_storage.EventObject.number_of_hits', index=32,
number=33, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='cache_directory_index', full_name='plaso_storage.EventObject.cache_directory_index', index=33,
number=34, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='title', full_name='plaso_storage.EventObject.title', index=34,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='metadata', full_name='plaso_storage.EventObject.metadata', index=35,
number=36, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='url', full_name='plaso_storage.EventObject.url', index=36,
number=37, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='keyname', full_name='plaso_storage.EventObject.keyname', index=37,
number=38, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='regvalue', full_name='plaso_storage.EventObject.regvalue', index=38,
number=39, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='text', full_name='plaso_storage.EventObject.text', index=39,
number=40, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='uuid', full_name='plaso_storage.EventObject.uuid', index=40,
number=41, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_EVENTOBJECT_SOURCESHORT,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=519,
serialized_end=1665,
)
_EVENTTAGGING_TAG = descriptor.Descriptor(
name='Tag',
full_name='plaso_storage.EventTagging.Tag',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='value', full_name='plaso_storage.EventTagging.Tag.value', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1826,
serialized_end=1846,
)
_EVENTTAGGING = descriptor.Descriptor(
name='EventTagging',
full_name='plaso_storage.EventTagging',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='store_number', full_name='plaso_storage.EventTagging.store_number', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='store_index', full_name='plaso_storage.EventTagging.store_index', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='comment', full_name='plaso_storage.EventTagging.comment', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='color', full_name='plaso_storage.EventTagging.color', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='tags', full_name='plaso_storage.EventTagging.tags', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='event_uuid', full_name='plaso_storage.EventTagging.event_uuid', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_EVENTTAGGING_TAG, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1668,
serialized_end=1846,
)
_EVENTGROUP_EVENTDESCRIPTION = descriptor.Descriptor(
name='EventDescription',
full_name='plaso_storage.EventGroup.EventDescription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='store_number', full_name='plaso_storage.EventGroup.EventDescription.store_number', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='store_index', full_name='plaso_storage.EventGroup.EventDescription.store_index', index=1,
number=2, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2040,
serialized_end=2101,
)
_EVENTGROUP = descriptor.Descriptor(
name='EventGroup',
full_name='plaso_storage.EventGroup',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='plaso_storage.EventGroup.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='description', full_name='plaso_storage.EventGroup.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='first_timestamp', full_name='plaso_storage.EventGroup.first_timestamp', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='last_timestamp', full_name='plaso_storage.EventGroup.last_timestamp', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='color', full_name='plaso_storage.EventGroup.color', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='category', full_name='plaso_storage.EventGroup.category', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='events', full_name='plaso_storage.EventGroup.events', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_EVENTGROUP_EVENTDESCRIPTION, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1849,
serialized_end=2101,
)
_PREPROCESS = descriptor.Descriptor(
name='PreProcess',
full_name='plaso_storage.PreProcess',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='collection_information', full_name='plaso_storage.PreProcess.collection_information', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='counter', full_name='plaso_storage.PreProcess.counter', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='store_range', full_name='plaso_storage.PreProcess.store_range', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='attributes', full_name='plaso_storage.PreProcess.attributes', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='plugin_counter', full_name='plaso_storage.PreProcess.plugin_counter', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2104,
serialized_end=2341,
)
_ANALYSISREPORT = descriptor.Descriptor(
name='AnalysisReport',
full_name='plaso_storage.AnalysisReport',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='plugin_name', full_name='plaso_storage.AnalysisReport.plugin_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='time_compiled', full_name='plaso_storage.AnalysisReport.time_compiled', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='text', full_name='plaso_storage.AnalysisReport.text', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='images', full_name='plaso_storage.AnalysisReport.images', index=3,
number=4, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='report_dict', full_name='plaso_storage.AnalysisReport.report_dict', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='report_array', full_name='plaso_storage.AnalysisReport.report_array', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='filter_string', full_name='plaso_storage.AnalysisReport.filter_string', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2344,
serialized_end=2543,
)
_ATTRIBUTE.fields_by_name['array'].message_type = _ARRAY
_ATTRIBUTE.fields_by_name['dict'].message_type = _DICT
_DICT.fields_by_name['attributes'].message_type = _ATTRIBUTE
_VALUE.fields_by_name['array'].message_type = _ARRAY
_VALUE.fields_by_name['dict'].message_type = _DICT
_ARRAY.fields_by_name['values'].message_type = _VALUE
_EVENTOBJECT.fields_by_name['attributes'].message_type = _ATTRIBUTE
_EVENTOBJECT.fields_by_name['tag'].message_type = _EVENTTAGGING
_EVENTOBJECT.fields_by_name['source_short'].enum_type = _EVENTOBJECT_SOURCESHORT
_EVENTOBJECT.fields_by_name['strings'].message_type = _ARRAY
_EVENTOBJECT.fields_by_name['metadata'].message_type = _DICT
_EVENTOBJECT.fields_by_name['regvalue'].message_type = _DICT
_EVENTOBJECT_SOURCESHORT.containing_type = _EVENTOBJECT;
_EVENTTAGGING_TAG.containing_type = _EVENTTAGGING;
_EVENTTAGGING.fields_by_name['tags'].message_type = _EVENTTAGGING_TAG
_EVENTGROUP_EVENTDESCRIPTION.containing_type = _EVENTGROUP;
_EVENTGROUP.fields_by_name['events'].message_type = _EVENTGROUP_EVENTDESCRIPTION
_PREPROCESS.fields_by_name['collection_information'].message_type = _DICT
_PREPROCESS.fields_by_name['counter'].message_type = _DICT
_PREPROCESS.fields_by_name['store_range'].message_type = _ARRAY
_PREPROCESS.fields_by_name['attributes'].message_type = _ATTRIBUTE
_PREPROCESS.fields_by_name['plugin_counter'].message_type = _DICT
_ANALYSISREPORT.fields_by_name['report_dict'].message_type = _DICT
_ANALYSISREPORT.fields_by_name['report_array'].message_type = _ARRAY
DESCRIPTOR.message_types_by_name['Attribute'] = _ATTRIBUTE
DESCRIPTOR.message_types_by_name['Dict'] = _DICT
DESCRIPTOR.message_types_by_name['Value'] = _VALUE
DESCRIPTOR.message_types_by_name['Array'] = _ARRAY
DESCRIPTOR.message_types_by_name['EventObject'] = _EVENTOBJECT
DESCRIPTOR.message_types_by_name['EventTagging'] = _EVENTTAGGING
DESCRIPTOR.message_types_by_name['EventGroup'] = _EVENTGROUP
DESCRIPTOR.message_types_by_name['PreProcess'] = _PREPROCESS
DESCRIPTOR.message_types_by_name['AnalysisReport'] = _ANALYSISREPORT
# @@protoc_insertion_point(module_scope)
| [
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
198,
6738,
23645,
13,
11235,
672,
3046,
1330,
43087,
198,
6738,
23645,
13,
11235,
672,
3046,
1330,
3275,
198,
6738,
23645,
13,
11235,
672,
3046,
1330,
145... | 2.306651 | 19,051 |
from django.db import models
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
628
] | 3.75 | 8 |
import psycopg
import elasticsearch
import psycopg.extras
from datetime import datetime
from elasticsearch import helpers, Elasticsearch
start = datetime.now()
postgres_client = psycopg.connect("postgres://username:password@127.0.0.1")
elasticsearch_client = Elasticsearch("http://username:password@127.0.0.1:9200")
cursor = postgres_client.cursor()
elasticsearch_client.indices.create('index_name')
_id = 0
cursor.execute("""SELECT * FROM table_name""")
many = cursor.fetchmany(10000)
while many:
package = [{
'_index': 'index_name',
'_id': (_id := _id + 1),
'_source': {
'name': name,
'description': description
}
} for name, description in many]
helpers.bulk(elasticsearch_client, package, max_retries=10)
many = cursor.fetchmany(1000)
print(elasticsearch_client.count(index='index_name'))
elasticsearch_client.indices.delete('index_name')
cursor.close()
postgres_client.close()
print(datetime.now() - start) | [
11748,
17331,
22163,
70,
198,
11748,
27468,
12947,
198,
11748,
17331,
22163,
70,
13,
2302,
8847,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
27468,
12947,
1330,
49385,
11,
48567,
12947,
198,
198,
9688,
796,
4818,
8079,
13,
2197,
... | 2.796512 | 344 |
import pandas as pd
import os
import logging
from pathlib import Path
import sys
#
# Configure the path
#FILE_PATH = Path(__file__).resolve().parent
FILE_PATH = '/home/emi/unipd/Sartori_CBSD/project/cbsdproject'
DATA_PATH = FILE_PATH + '/data'
DATA_FILE = DATA_PATH + '/tweets_cleaned.csv'
UTILS_PATH = FILE_PATH + '/utils'
MODELS_PATH = FILE_PATH + '/models'
#DATA_PATH = FILE_PATH / 'data'
#DATA_FILE = DATA_PATH / 'tweets_cleaned.csv'
#UTILS_PATH = FILE_PATH / 'utils'
sys.path.append(str(UTILS_PATH))
import clustering_embeddings
CLASSIFIER_FILE = 'classifier_umlfit_parties_exported.pkl'
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('Clustering Embeddings')
df = pd.read_csv(DATA_FILE, nrows=300)
party = 1
df_party = df[df['party']==party]
OUTPUT_EMBEDDINGS_FILE = f'umlfit_embeddings_party{party}.pkl'
clustering_embeddings.get_save_embeddings(CLASSIFIER_FILE, df_party, 'full_text', embeddings_filename=OUTPUT_EMBEDDINGS_FILE)
party=1
OUTPUT_EMBEDDINGS_FILE = f'umlfit_embeddings_party{party}.pkl'
OUTPUT_KMEANS_EMBEDDINGS_FILE = f'labels_kmeanstfidf_party{party}.pkl'
clustering_embeddings.get_clusters(OUTPUT_EMBEDDINGS_FILE, OUTPUT_KMEANS_EMBEDDINGS_FILE)
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
198,
11748,
18931,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
25064,
198,
2,
198,
2,
17056,
495,
262,
3108,
198,
2,
25664,
62,
34219,
796,
10644,
7,
834,
7753,
834,
737,
411,
... | 2.421687 | 498 |
from datetime import datetime, timedelta
from django.db import models
from django.utils.timezone import utc
from django.contrib.auth.models import AbstractUser
from django.conf import settings
| [
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
2435,
11340,
1330,
3384,
66,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,... | 3.648148 | 54 |
#!/usr/bin/env python3
import os
import sys
import logging
import csv
import argparse
from signal import signal, SIGPIPE, SIG_DFL
logger = logging.getLogger()
signal(SIGPIPE, SIG_DFL)
FILTER_MAP = {
'ufo-states': unidentified_states,
'no-title': no_title,
'no-group': no_group,
'multi-cat': multiple_categories
}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Filters a CSV file using a custom set of predefined filters')
parser.add_argument('infile', nargs='?',
type=argparse.FileType('r'), default=sys.stdin,
help='Path to the CSV file to search on')
parser.add_argument('--columns', type=str, nargs='+',
help='Column names to output')
parser.add_argument('--filter', type=str,
choices=sorted(FILTER_MAP.keys()),
help='Specify a predefined filter to run on the CSV')
args = parser.parse_args()
main(args)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
18931,
198,
11748,
269,
21370,
198,
11748,
1822,
29572,
198,
6738,
6737,
1330,
6737,
11,
33993,
47,
4061,
36,
11,
33993,
62,
35,
3... | 2.302752 | 436 |
#-*- coding: utf-8 -*-
'''
evaluate AHDE
'''
import csv
from AHDE_Model import *
from AHDE_process_data import *
from AHDE_evaluation import *
import os
import time
import argparse
from random import shuffle
from params import Params
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('--model_path', type=str, default="")
p.add_argument('--batch_size', type=int, default=256)
p.add_argument('--encoder_size', type=int, default=80)
p.add_argument('--context_size', type=int, default=10)
p.add_argument('--encoderR_size', type=int, default=80)
# siaseme RNN
p.add_argument('--num_layer', type=int, default=2)
p.add_argument('--hidden_dim', type=int, default=300)
# context RNN
p.add_argument('--num_layer_con', type=int, default=2)
p.add_argument('--hidden_dim_con', type=int, default=300)
p.add_argument('--embed_size', type=int, default=200)
p.add_argument('--num_train_steps', type=int, default=10000)
p.add_argument('--lr', type=float, default=1e-1)
p.add_argument('--valid_freq', type=int, default=500)
p.add_argument('--is_save', type=int, default=0)
p.add_argument('--graph_prefix', type=str, default="default")
p.add_argument('--is_test', type=int, default=0)
p.add_argument('--use_glove', type=int, default=0)
p.add_argument('--fix_embed', type=int, default=0)
# latent topic
p.add_argument('--memory_dim', type=int, default=32)
p.add_argument('--topic_size', type=int, default=0)
args = p.parse_args()
main(
model_path=args.model_path,
batch_size=args.batch_size,
encoder_size=args.encoder_size,
context_size=args.context_size,
encoderR_size=args.encoderR_size,
num_layer=args.num_layer,
hidden_dim=args.hidden_dim,
num_layer_con=args.num_layer_con,
hidden_dim_con=args.hidden_dim_con,
embed_size=args.embed_size,
num_train_steps=args.num_train_steps,
lr=args.lr,
valid_freq=args.valid_freq,
is_save=args.is_save,
is_test=args.is_test,
use_glove=args.use_glove,
fix_embed=args.fix_embed,
memory_dim=args.memory_dim,
topic_size=args.topic_size
) | [
2,
12,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
198,
49786,
28159,
7206,
198,
7061,
6,
198,
11748,
269,
21370,
198,
6738,
28159,
7206,
62,
17633,
1330,
1635,
198,
6738,
28159,
7206,
62,
14681,
62,
7890,
... | 2.189215 | 1,057 |
# -*- coding: utf-8 -*-
from app import *
from functools import wraps
from datetime import datetime
# Decorators
# ----------------------------------------------------------------------------------------------------------------------
# TODO: Müssen Decorators in jedem .py - File vorkommen - oder kann man die nicht zentral ausgliedern?
def login_erforderlich(f):
"""Der Decorator fragt ab ob der Benutzer eingeloggt ist, wenn nicht leitet er zum login um"""
@wraps(f)
return decorated_function
def admin_erforderlich(f):
"""Der Decorator fragt ab ob der Benutzerstatus kleiner zwei ist und leitet sonst zur Startseite"""
@wraps(f)
return decorated_function
# ----------------------------------------------------------------------------------------------------------------------
# Standardabfragemethode zum Ausführen von Datenbankqueries
# Usermethoden
def get_user_id(username):
"""Convenience method to look up the id for a username."""
r = query_db('select user_id from user where user_name = ?',
[username], one=True)
return r if r else None
def get_user_name(userid):
"""Convenience method to look up the name for a userid."""
rv = query_db('select user_name from user where user_id = ?',
[userid], one=True)
return rv[0] if rv else None
# Userrouting
@app.route('/userlist')
def user_list():
""" Gibt eine Liste von allen Benutzern aus die registriert sind"""
rv = query_db('select user_name from user')
if rv is None:
abort(404)
return render_template('userlist.htm', users=query_db('''select user_name, user_id, user_email, user_land,
user_status, user_points from user order by user_points desc'''))
@app.route('/userinfo')
@login_erforderlich
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
598,
1330,
1635,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
2,
4280,
273,
2024,
198,
2,
16529,
3880,
19351,
438,
... | 2.774242 | 660 |
from .basic_led_strip_proxy import BasicLedStripProxy
| [
6738,
764,
35487,
62,
992,
62,
36311,
62,
36436,
1330,
14392,
42416,
1273,
5528,
44148,
198
] | 3.375 | 16 |
import image_sectioner
import video_capture
import video_feed_test
import pyueye_main
import video_capture_with_IDS
if __name__=="__main__":
main() | [
11748,
2939,
62,
5458,
263,
201,
198,
11748,
2008,
62,
27144,
495,
201,
198,
11748,
2008,
62,
12363,
62,
9288,
201,
198,
11748,
12972,
518,
5948,
62,
12417,
201,
198,
11748,
2008,
62,
27144,
495,
62,
4480,
62,
14255,
201,
198,
201,
... | 2.65 | 60 |
import unittest
| [
11748,
555,
715,
395,
628
] | 3.4 | 5 |
from cuschess.logic import *
import pygame
import time
LIGHTPINK = "#FFC0CB"
PINK = "#FF69B4"
RED = "#FF0000"
WHITE = "#FFFFFF"
BLACK = "#000000"
| [
198,
198,
6738,
269,
385,
2395,
824,
13,
6404,
291,
1330,
1635,
198,
11748,
12972,
6057,
198,
11748,
640,
198,
198,
43,
9947,
47,
17248,
796,
25113,
37,
4851,
15,
23199,
1,
198,
47,
17248,
796,
25113,
5777,
3388,
33,
19,
1,
198,
2... | 2.307692 | 65 |
import argparse
import os
import numpy as np
import autosklearn
import autosklearn.data
import autosklearn.data.data_manager
import autosklearn.models.evaluator
from ParamSklearn.classification import ParamSklearnClassifier
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
args = parser.parse_args()
input = args.input
dataset = 'madeline'
output = args.output
D = autosklearn.data.data_manager.DataManager(dataset, input)
X = D.data['X_train']
y = D.data['Y_train']
X_valid = D.data['X_valid']
X_test = D.data['X_test']
# Subset of features found with RFE. Feature with least importance in sklearn
# RF removed. Afterwards, trained RF on remaining features with 5CV. In the
# end, choose feature set with lowest error
features = [52, 70, 74, 83, 85, 135, 162, 183, 184, 185, 191, 197, 232, 237,
239, 252]
X = X[:, features]
X_valid = X_valid[:, features]
X_test = X_test[:, features]
# Weights of the ensemble members as determined by Ensemble Selection
weights = np.array([0.100000, 0.080000, 0.080000, 0.060000, 0.060000,
0.060000, 0.060000, 0.040000, 0.040000, 0.040000,
0.040000, 0.040000, 0.020000, 0.020000, 0.020000,
0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
0.020000, 0.020000, 0.020000, 0.020000, 0.020000,
0.020000, 0.020000])
# Ensemble members found by SMAC
configurations = [
{'balancing:strategy': 'weighting',
'classifier': 'k_nearest_neighbors',
'imputation:strategy': 'median',
'k_nearest_neighbors:algorithm': 'auto',
'k_nearest_neighbors:leaf_size': '30.0',
'k_nearest_neighbors:n_neighbors': '4.0',
'k_nearest_neighbors:p': '1.0',
'k_nearest_neighbors:weights': 'distance',
'preprocessor': 'select_rates',
'rescaling:strategy': 'standard',
'select_rates:alpha': '0.124513266268',
'select_rates:mode': 'fdr',
'select_rates:score_func': 'f_classif'},
{'balancing:strategy': 'weighting',
'classifier': 'qda',
'imputation:strategy': 'mean',
'kitchen_sinks:gamma': '0.802981892271',
'kitchen_sinks:n_components': '704.0',
'preprocessor': 'kitchen_sinks',
'qda:reg_param': '7.66537661987',
'qda:tol': '0.000779904033875',
'rescaling:strategy': 'standard'},
{'balancing:strategy': 'none',
'classifier': 'qda',
'imputation:strategy': 'mean',
'kitchen_sinks:gamma': '0.658527701661',
'kitchen_sinks:n_components': '499.0',
'preprocessor': 'kitchen_sinks',
'qda:reg_param': '4.13193776587',
'qda:tol': '0.0026677961139',
'rescaling:strategy': 'standard'},
{'balancing:strategy': 'none',
'classifier': 'qda',
'imputation:strategy': 'mean',
'kitchen_sinks:gamma': '0.658527701661',
'kitchen_sinks:n_components': '498.0',
'preprocessor': 'kitchen_sinks',
'qda:reg_param': '7.39545021165',
'qda:tol': '0.00116251661342',
'rescaling:strategy': 'standard'},
{'balancing:strategy': 'none',
'classifier': 'qda',
'imputation:strategy': 'mean',
'kitchen_sinks:gamma': '0.758771699267',
'kitchen_sinks:n_components': '794.0',
'preprocessor': 'kitchen_sinks',
'qda:reg_param': '4.57263430441',
'qda:tol': '0.00284918317943',
'rescaling:strategy': 'standard'},
{'balancing:strategy': 'none',
'classifier': 'k_nearest_neighbors',
'imputation:strategy': 'most_frequent',
'k_nearest_neighbors:algorithm': 'auto',
'k_nearest_neighbors:leaf_size': '30.0',
'k_nearest_neighbors:n_neighbors': '5.0',
'k_nearest_neighbors:p': '1.0',
'k_nearest_neighbors:weights': 'distance',
'preprocessor': 'select_rates',
'rescaling:strategy': 'min/max',
'select_rates:alpha': '0.0683198728939',
'select_rates:mode': 'fdr',
'select_rates:score_func': 'f_classif'},
{'balancing:strategy': 'none',
'classifier': 'qda',
'imputation:strategy': 'mean',
'kitchen_sinks:gamma': '0.773869494191',
'kitchen_sinks:n_components': '608.0',
'preprocessor': 'kitchen_sinks',
'qda:reg_param': '5.34388968302',
'qda:tol': '0.000118437687463',
'rescaling:strategy': 'standard'},
{'balancing:strategy': 'weighting',
'classifier': 'k_nearest_neighbors',
'imputation:strategy': 'mean',
'k_nearest_neighbors:algorithm': 'auto',
'k_nearest_neighbors:leaf_size': '30.0',
'k_nearest_neighbors:n_neighbors': '4.0',
'k_nearest_neighbors:p': '1.0',
'k_nearest_neighbors:weights': 'distance',
'preprocessor': 'select_rates',
'rescaling:strategy': 'min/max',
'select_rates:alpha': '0.0953909302386',
'select_rates:mode': 'fdr',
'select_rates:score_func': 'chi2'},
{'balancing:strategy': 'none',
'classifier': 'qda',
'imputation:strategy': 'mean',
'kitchen_sinks:gamma': '0.722743897655',
'kitchen_sinks:n_components': '952.0',
'preprocessor': 'kitchen_sinks',
'qda:reg_param': '3.61200930387',
'qda:tol': '0.000911935213882',
'rescaling:strategy': 'standard'},
{'balancing:strategy': 'weighting',
'classifier': 'k_nearest_neighbors',
'imputation:strategy': 'most_frequent',
'k_nearest_neighbors:algorithm': 'auto',
'k_nearest_neighbors:leaf_size': '30.0',
'k_nearest_neighbors:n_neighbors': '3.0',
'k_nearest_neighbors:p': '2.0',
'k_nearest_neighbors:weights': 'distance',
'preprocessor': 'select_rates',
'rescaling:strategy': 'standard',
'select_rates:alpha': '0.12499749257',
'select_rates:mode': 'fdr',
'select_rates:score_func': 'f_classif'},
{'balancing:strategy': 'none',
'classifier': 'qda',
'imputation:strategy': 'most_frequent',
'kitchen_sinks:gamma': '0.521009778754',
'kitchen_sinks:n_components': '581.0',
'preprocessor': 'kitchen_sinks',
'qda:reg_param': '0.570532656005',
'qda:tol': '0.00759604479274',
'rescaling:strategy': 'standard'},
{'balancing:strategy': 'none',
'classifier': 'qda',
'imputation:strategy': 'median',
'kitchen_sinks:gamma': '0.736334496442',
'kitchen_sinks:n_components': '590.0',
'preprocessor': 'kitchen_sinks',
'qda:reg_param': '8.78913455152',
'qda:tol': '0.0417125881025',
'rescaling:strategy': 'standard'},
{'balancing:strategy': 'weighting',
'classifier': 'k_nearest_neighbors',
'imputation:strategy': 'median',
'k_nearest_neighbors:algorithm': 'auto',
'k_nearest_neighbors:leaf_size': '30.0',
'k_nearest_neighbors:n_neighbors': '10.0',
'k_nearest_neighbors:p': '2.0',
'k_nearest_neighbors:weights': 'distance',
'preprocessor': 'select_rates',
'rescaling:strategy': 'min/max',
'select_rates:alpha': '0.065583595323',
'select_rates:mode': 'fdr',
'select_rates:score_func': 'f_classif'},
{'balancing:strategy': 'none',
'classifier': 'qda',
'imputation:strategy': 'mean',
'kitchen_sinks:gamma': '0.725282605688',
'kitchen_sinks:n_components': '591.0',
'preprocessor': 'kitchen_sinks',
'qda:reg_param': '4.32023431675',
'qda:tol': '2.95483713232e-05',
'rescaling:strategy': 'standard'},
{'balancing:strategy': 'none',
'classifier': 'qda',
'imputation:strategy': 'mean',
'kitchen_sinks:gamma': '0.686955501206',
'kitchen_sinks:n_components': '646.0',
'preprocessor': 'kitchen_sinks',
'qda:reg_param': '9.58493774318',
'qda:tol': '0.00612419830773',
'rescaling:strategy': 'standard'},
{'balancing:strategy': 'none',
'classifier': 'k_nearest_neighbors',
'imputation:strategy': 'median',
'k_nearest_neighbors:algorithm': 'auto',
'k_nearest_neighbors:leaf_size': '30.0',
'k_nearest_neighbors:n_neighbors': '6.0',
'k_nearest_neighbors:p': '2.0',
'k_nearest_neighbors:weights': 'distance',
'preprocessor': 'select_rates',
'rescaling:strategy': 'min/max',
'select_rates:alpha': '0.276130352686',
'select_rates:mode': 'fdr',
'select_rates:score_func': 'f_classif'},
{'balancing:strategy': 'none',
'classifier': 'qda',
'imputation:strategy': 'most_frequent',
'kitchen_sinks:gamma': '0.549862378472',
'kitchen_sinks:n_components': '591.0',
'preprocessor': 'kitchen_sinks',
'qda:reg_param': '1.11536443906',
'qda:tol': '4.98941924261e-05',
'rescaling:strategy': 'standard'},
{'balancing:strategy': 'none',
'classifier': 'qda',
'imputation:strategy': 'median',
'kitchen_sinks:gamma': '0.551878628115',
'kitchen_sinks:n_components': '913.0',
'preprocessor': 'kitchen_sinks',
'qda:reg_param': '2.80643663684',
'qda:tol': '0.0030955537468',
'rescaling:strategy': 'standard'},
{'balancing:strategy': 'none',
'classifier': 'qda',
'imputation:strategy': 'mean',
'kitchen_sinks:gamma': '0.797948222068',
'kitchen_sinks:n_components': '856.0',
'preprocessor': 'kitchen_sinks',
'qda:reg_param': '0.753439507859',
'qda:tol': '0.000179635997544',
'rescaling:strategy': 'standard'},
{'balancing:strategy': 'weighting',
'classifier': 'k_nearest_neighbors',
'imputation:strategy': 'median',
'k_nearest_neighbors:algorithm': 'auto',
'k_nearest_neighbors:leaf_size': '30.0',
'k_nearest_neighbors:n_neighbors': '6.0',
'k_nearest_neighbors:p': '2.0',
'k_nearest_neighbors:weights': 'distance',
'preprocessor': 'select_rates',
'rescaling:strategy': 'standard',
'select_rates:alpha': '0.121674691962',
'select_rates:mode': 'fdr',
'select_rates:score_func': 'f_classif'},
{'balancing:strategy': 'none',
'classifier': 'qda',
'imputation:strategy': 'median',
'kitchen_sinks:gamma': '0.870787144807',
'kitchen_sinks:n_components': '591.0',
'preprocessor': 'kitchen_sinks',
'qda:reg_param': '3.25265485261',
'qda:tol': '0.000232802336471',
'rescaling:strategy': 'standard'},
{'balancing:strategy': 'none',
'classifier': 'qda',
'imputation:strategy': 'mean',
'kitchen_sinks:gamma': '0.725282605688',
'kitchen_sinks:n_components': '469.0',
'preprocessor': 'kitchen_sinks',
'qda:reg_param': '4.32023431675',
'qda:tol': '6.11461737038e-05',
'rescaling:strategy': 'standard'},
{'balancing:strategy': 'none',
'classifier': 'qda',
'imputation:strategy': 'mean',
'kitchen_sinks:gamma': '0.742290491524',
'kitchen_sinks:n_components': '699.0',
'preprocessor': 'kitchen_sinks',
'qda:reg_param': '1.80605719583',
'qda:tol': '0.00759903394814',
'rescaling:strategy': 'standard'},
{'balancing:strategy': 'weighting',
'classifier': 'k_nearest_neighbors',
'imputation:strategy': 'mean',
'k_nearest_neighbors:algorithm': 'auto',
'k_nearest_neighbors:leaf_size': '30.0',
'k_nearest_neighbors:n_neighbors': '4.0',
'k_nearest_neighbors:p': '2.0',
'k_nearest_neighbors:weights': 'distance',
'preprocessor': 'select_rates',
'rescaling:strategy': 'min/max',
'select_rates:alpha': '0.0556366440458',
'select_rates:mode': 'fdr',
'select_rates:score_func': 'f_classif'},
{'balancing:strategy': 'none',
'classifier': 'qda',
'imputation:strategy': 'mean',
'kitchen_sinks:gamma': '0.69436212216',
'kitchen_sinks:n_components': '477.0',
'preprocessor': 'kitchen_sinks',
'qda:reg_param': '7.19343875838',
'qda:tol': '0.00130430743783',
'rescaling:strategy': 'standard'},
{'balancing:strategy': 'weighting',
'classifier': 'k_nearest_neighbors',
'imputation:strategy': 'median',
'k_nearest_neighbors:algorithm': 'auto',
'k_nearest_neighbors:leaf_size': '30.0',
'k_nearest_neighbors:n_neighbors': '8.0',
'k_nearest_neighbors:p': '1.0',
'k_nearest_neighbors:weights': 'distance',
'preprocessor': 'select_rates',
'rescaling:strategy': 'standard',
'select_rates:alpha': '0.0962781949808',
'select_rates:mode': 'fdr',
'select_rates:score_func': 'f_classif'},
{'balancing:strategy': 'none',
'classifier': 'qda',
'imputation:strategy': 'mean',
'kitchen_sinks:gamma': '0.680526800011',
'kitchen_sinks:n_components': '627.0',
'preprocessor': 'kitchen_sinks',
'qda:reg_param': '3.3758872613',
'qda:tol': '0.0025551077682',
'rescaling:strategy': 'standard'},
]
classifiers = []
predictions_valid = []
predictions_test = []
# Make predictions and weight them
for weight, configuration in zip(weights, configurations):
for param in configuration:
try:
configuration[param] = int(configuration[param])
except Exception:
try:
configuration[param] = float(configuration[param])
except Exception:
pass
classifier = ParamSklearnClassifier(configuration, 1)
classifiers.append(classifier)
try:
classifier.fit(X.copy(), y.copy())
predictions_valid.append(
classifier.predict_proba(X_valid.copy()) * weight)
predictions_test.append(
classifier.predict_proba(X_test.copy()) * weight)
except Exception as e:
print e
print configuration
# Output the predictions
for name, predictions in [('valid', predictions_valid),
('test', predictions_test)]:
predictions = np.array(predictions)
predictions = np.sum(predictions, axis=0)
predictions = predictions[:, 1].reshape((-1, 1))
filepath = os.path.join(output, '%s_%s_000.predict' % (dataset, name))
np.savetxt(filepath, predictions, delimiter=' ') | [
11748,
1822,
29572,
198,
11748,
28686,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
44619,
74,
35720,
198,
11748,
44619,
74,
35720,
13,
7890,
198,
11748,
44619,
74,
35720,
13,
7890,
13,
7890,
62,
37153,
198,
11748,
44619,
7... | 2.147119 | 6,403 |
print('登陆中...')
a = 0
b = 1
c = 2
d = 3
| [
4798,
10786,
163,
247,
119,
165,
247,
228,
40792,
986,
11537,
198,
64,
796,
657,
198,
65,
796,
352,
198,
66,
796,
362,
198,
67,
796,
513,
198
] | 1.428571 | 28 |
import os
import torch
from torch.utils.tensorboard.writer import SummaryWriter
from detection.src.loaders.data_manager import DetectionSetDataManager
from detection.src.yolo_maml import YOLOMAML
from utils import configs
from utils.io_utils import set_and_print_random_seed
from detection.src.yolov3.model import Darknet
from detection.src.yolov3.utils.parse_config import parse_data_config
class YOLOMAMLTraining():
"""
This step handles the training of the algorithm on the base dataset
"""
def __init__(
self,
dataset_config='yolov3/config/black.data',
model_config='yolov3/config/yolov3.cfg',
pretrained_weights=None,
n_way=5,
n_shot=5,
n_query=16,
optimizer='Adam',
learning_rate=0.001,
approx=True,
task_update_num=3,
print_freq=100,
validation_freq=1000,
n_epoch=100,
n_episode=100,
objectness_threshold=0.8,
nms_threshold=0.4,
iou_threshold=0.2,
image_size=416,
random_seed=None,
output_dir=configs.save_dir,
):
"""
Args:
dataset_config (str): path to data config file
model_config (str): path to model definition file
pretrained_weights (str): path to a file containing pretrained weights for the model
n_way (int): number of labels in a detection task
n_shot (int): number of support data in each class in an episode
n_query (int): number of query data in each class in an episode
optimizer (str): must be a valid class of torch.optim (Adam, SGD, ...)
learning_rate (float): learning rate fed to the optimizer
approx (bool): whether to use an approximation of the meta-backpropagation
task_update_num (int): number of updates inside each episode
print_freq (int): inside an epoch, print status update every print_freq episodes
validation_freq (int): inside an epoch, frequency with which we evaluate the model on the validation set
n_epoch (int): number of meta-training epochs
n_episode (int): number of episodes per epoch during meta-training
objectness_threshold (float): at evaluation time, only keep boxes with objectness above this threshold
nms_threshold (float): threshold for non maximum suppression, at evaluation time
iou_threshold (float): threshold for intersection over union
image_size (int): size of images (square)
random_seed (int): seed for random instantiations ; if none is provided, a seed is randomly defined
output_dir (str): path to experiments output directory
"""
self.dataset_config = dataset_config
self.model_config = model_config
self.pretrained_weights = pretrained_weights
self.n_way = n_way
self.n_shot = n_shot
self.n_query = n_query
self.optimizer = optimizer
self.learning_rate = learning_rate
self.approx = approx
self.task_update_num = task_update_num
self.print_freq = print_freq
self.validation_freq = validation_freq
self.n_epoch = n_epoch
self.n_episode = n_episode
self.objectness_threshold = objectness_threshold
self.nms_threshold = nms_threshold
self.iou_threshold = iou_threshold
self.image_size = image_size
self.random_seed = random_seed
self.checkpoint_dir = output_dir
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.writer = SummaryWriter(log_dir=output_dir)
def apply(self):
"""
Execute the YOLOMAMLTraining step
Returns:
dict: a dictionary containing the whole state of the model that gave the higher validation accuracy
"""
set_and_print_random_seed(self.random_seed, True, self.checkpoint_dir)
data_config = parse_data_config(self.dataset_config)
train_path = data_config["train"]
train_dict_path = data_config.get("train_dict_path", None)
valid_path = data_config.get("valid", None)
valid_dict_path = data_config.get("valid_dict_path", None)
base_loader = self._get_data_loader(train_path, train_dict_path)
val_loader = self._get_data_loader(valid_path, valid_dict_path)
model = self._get_model()
return self._train(base_loader, val_loader, model)
def _train(self, base_loader, val_loader, model):
"""
Trains the model on the base set
Args:
base_loader (torch.utils.data.DataLoader): data loader for base set
val_loader (torch.utils.data.DataLoader): data loader for validation set
model (YOLOMAML): neural network model to train
Returns:
dict: a dictionary containing the whole state of the model that gave the higher validation accuracy
"""
optimizer = self._get_optimizer(model)
for epoch in range(self.n_epoch):
loss_dict = model.train_loop(base_loader, optimizer)
self.plot_tensorboard(loss_dict, epoch)
if epoch % self.print_freq == 0:
print(
'Epoch {epoch}/{n_epochs} | Loss {loss}'.format(
epoch=epoch,
n_epochs=self.n_epoch,
loss=loss_dict['query_total_loss'],
)
)
if epoch % self.validation_freq == self.validation_freq - 1:
precision, recall, average_precision, f1, ap_class = model.eval_loop(val_loader)
self.writer.add_scalar('precision', precision.mean(), epoch)
self.writer.add_scalar('recall', recall.mean(), epoch)
self.writer.add_scalar('mAP', average_precision.mean(), epoch)
self.writer.add_scalar('F1', f1.mean(), epoch)
self.writer.close()
model.base_model.save_darknet_weights(os.path.join(self.checkpoint_dir, 'final.weights'))
return {'epoch': self.n_epoch, 'state': model.state_dict()}
def _get_optimizer(self, model):
"""
Get the optimizer from string self.optimizer
Args:
model (torch.nn.Module): the model to be trained
Returns: a torch.optim.Optimizer object parameterized with model parameters
"""
assert hasattr(torch.optim, self.optimizer), "The optimization method is not a torch.optim object"
optimizer = getattr(torch.optim, self.optimizer)(model.parameters(), lr=self.learning_rate)
return optimizer
def _get_data_loader(self, path_to_data_file, path_to_images_per_label):
"""
Args:
path_to_data_file (str): path to file containing paths to images
path_to_images_per_label (str): path to pickle file containing the dictionary of images per label
Returns:
torch.utils.data.DataLoader: samples data in the shape of a detection task
"""
data_manager = DetectionSetDataManager(self.n_way, self.n_shot, self.n_query, self.n_episode, self.image_size)
return data_manager.get_data_loader(path_to_data_file, path_to_images_per_label)
def _get_model(self):
"""
Returns:
YOLOMAML: meta-model
"""
base_model = Darknet(self.model_config, self.image_size, self.pretrained_weights)
model = YOLOMAML(
base_model,
self.n_way,
self.n_shot,
self.n_query,
self.image_size,
approx=self.approx,
task_update_num=self.task_update_num,
train_lr=self.learning_rate,
objectness_threshold=self.objectness_threshold,
nms_threshold=self.nms_threshold,
iou_threshold=self.iou_threshold,
device=self.device,
)
return model
def plot_tensorboard(self, loss_dict, epoch):
"""
Writes into summary the values present in loss_dict
Args:
loss_dict (dict): contains the different parts of the average loss on one epoch. Each key describes
a part of the loss (ex: query_classification_loss) and each value is a 0-dim tensor. This dictionary is
required to contain the keys 'support_total_loss' and 'query_total_loss' which contains respectively the
total loss on the support set, and the total meta-loss on the query set
epoch (int): global step value in the summary
Returns:
"""
for key, value in loss_dict.items():
self.writer.add_scalar(key, value, epoch)
return
| [
11748,
28686,
628,
198,
11748,
28034,
198,
6738,
28034,
13,
26791,
13,
83,
22854,
3526,
13,
16002,
1330,
21293,
34379,
198,
198,
6738,
13326,
13,
10677,
13,
2220,
364,
13,
7890,
62,
37153,
1330,
46254,
7248,
6601,
13511,
198,
6738,
1332... | 2.29392 | 3,865 |
from contextlib import contextmanager
import pytest
from typing_extensions import Annotated, Protocol, runtime_checkable
from antidote._internal.utils import enforce_subclass_if_possible, enforce_type_if_possible
@contextmanager
does_raise = pytest.raises(TypeError, match="(?i).*(isinstance|subclass|implement).*")
@runtime_checkable
@pytest.mark.parametrize(
"expectation, obj, tpe",
[
(does_raise, object(), int),
(does_not_raise(), object(), Annotated[ValidDummy, object()]),
(does_not_raise(), 1, int),
(does_not_raise(), 1, DummyProtocol),
(does_raise, 1, DummyRuntimeProtocol),
(does_not_raise(), InvalidDummy(), DummyProtocol),
(does_raise, InvalidDummy(), DummyRuntimeProtocol),
(does_not_raise(), ValidDummy(), DummyProtocol),
(does_not_raise(), ValidDummy(), DummyRuntimeProtocol),
(does_not_raise(), SubDummy(), DummyProtocol),
(does_not_raise(), SubDummy(), DummyRuntimeProtocol),
(does_not_raise(), SubDummy(), ValidDummy),
(does_raise, InvalidDummy(), ValidDummy),
],
)
@pytest.mark.parametrize(
"expectation, sub, tpe",
[
(does_not_raise(), ValidDummy, DummyProtocol),
(does_not_raise(), ValidDummy, DummyRuntimeProtocol),
(does_not_raise(), InvalidDummy, DummyProtocol),
(does_raise, InvalidDummy, DummyRuntimeProtocol),
(does_raise, InvalidDummy, ValidDummy),
(does_not_raise(), SubDummy, ValidDummy),
(does_not_raise(), 1, 1),
(does_not_raise(), 1, int),
(does_not_raise(), int, 1),
],
)
| [
6738,
4732,
8019,
1330,
4732,
37153,
198,
198,
11748,
12972,
9288,
198,
6738,
19720,
62,
2302,
5736,
1330,
1052,
1662,
515,
11,
20497,
11,
19124,
62,
9122,
540,
198,
198,
6738,
50131,
13557,
32538,
13,
26791,
1330,
4605,
62,
7266,
4871,... | 2.400888 | 676 |
import numpy as np
import time
from abc import abstractmethod
| [
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
198,
6738,
450,
66,
1330,
12531,
24396,
198
] | 3.705882 | 17 |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
import sys
import django
from scrapy import signals
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(os.path.join(BASE_DIR, 'store'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'store.settings'
django.setup()
from goods.tasks import save_goods_to_db
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
2896,
500,
534,
2378,
31108,
994,
198,
2,
198,
2,
2094,
470,
6044,
284,
751,
534,
11523,
284,
262,
7283,
3620,
62,
47,
4061,
3698,
1268,
1546,
4634,
198,
2,
... | 2.601093 | 183 |
from app import app
from flask import jsonify,render_template,json, request
from services import LinksServices
@app.route("/links",methods=['GET'])
@app.route("/link/getTopLinks",methods=['GET'])
@app.route("/link/getTotalLinks",methods=['GET'])
@app.route("/link/getMentionHistory",methods=['GET']) | [
6738,
598,
1330,
598,
198,
6738,
42903,
1330,
33918,
1958,
11,
13287,
62,
28243,
11,
17752,
11,
2581,
198,
6738,
2594,
1330,
21691,
31007,
198,
198,
31,
1324,
13,
38629,
7203,
14,
28751,
1600,
24396,
82,
28,
17816,
18851,
6,
12962,
19... | 2.885714 | 105 |
from utils_rouge import rouge_eval, rouge_log
rouge_ref_dir = './output/reference'
rouge_dec_dir = './output/decoded'
print("Decoder has finished reading dataset for single_pass.")
print("Now starting ROUGE eval...")
results_dict = rouge_eval(rouge_ref_dir, rouge_dec_dir)
rouge_log(results_dict, './output')
| [
6738,
3384,
4487,
62,
472,
469,
1330,
13805,
469,
62,
18206,
11,
13805,
469,
62,
6404,
628,
198,
472,
469,
62,
5420,
62,
15908,
796,
705,
19571,
22915,
14,
35790,
6,
198,
472,
469,
62,
12501,
62,
15908,
796,
705,
19571,
22915,
14,
... | 2.827273 | 110 |
from cached_property import cached_property
from sqlalchemy import text
from sqlalchemy import orm
from sqlalchemy.orm import selectinload
from sqlalchemy.sql.expression import func
from sqlalchemy.orm import deferred
from sqlalchemy.orm import foreign, remote
from collections import defaultdict
from time import sleep
import datetime
import json
from app import db
from util import normalize_title_like_sql
# alter table recordthresher_record add column started_label text;
# alter table recordthresher_record add column started datetime;
# alter table recordthresher_record add column finished datetime;
# alter table recordthresher_record add column work_id bigint
work_type_strings = """
lookup_string,work_type,doc_type
conference,proceedings,Conference
proceedings-series,proceedings-series,
journal-volume,journal-volume,
book-series,book-series,
dataset,dataset,Dataset
info:eu-repo/semantics/report,report,
guideline,other,
preprint,posted-content,Repository
report,report,
thesis/dissertation,dissertation,Thesis
corrected and republished article,journal-article,Journal
observational study,journal-article,Journal
systematic review,journal-article,Journal
info:eu-repo/semantics/workingpaper,report,
video-audio media,other,
english abstract,other,
personal narrative,other,
info:eu-repo/semantics/other,other,
letter,posted-content,Repository
proceedings,proceedings,Conference
technical report,report,
peer-review,peer-review,
program document,other,
info:eu-repo/semantics/doctoralthesis,dissertation,Thesis
info:eu-repo/semantics/masterthesis,dissertation,Thesis
other,other,
book,book,Book
dissertation,dissertation,Thesis
info:eu-repo/semantics/article,journal-article,Journal
monograph,monograph,Book
proceedings-article,proceedings-article,Conference
data,dataset,Dataset
info:eu-repo/semantics/conferencepaper,proceedings,Conference
info:eu-repo/semantics/patent,other,
info:eu-repo/semantics/preprint,posted-content,Repository
journal,journal,
practice guideline,other,
book-set,book-set,
grant,grant,
congress,other,
info:eu-repo/semantics/conferenceobject,proceedings-article,Conference
journal article: accepted manuscript,journal-article,Journal
report-series,report-series,
news,posted-content,Repository
reference-entry,reference-entry,
book-part,book-part,
clinical trial,journal-article,Journal
editorial,journal-article,Journal
info:eu-repo/semantics/book,book,Book
journal article: publisher's accepted manuscript,journal-article,Journal
posted-content,posted-content,Repository
published erratum,other,
reference-book,reference-book,
retraction of publication,other,
standard,standard,
info:eu-repo/semantics/bookpart,book-chapter,BookChapter
journal-issue,journal-issue,
book-chapter,book-chapter,BookChapter
interview,other,
introductory journal article,journal-article,Journal
historical article,journal-article,Journal
journal article,journal-article,Journal
journal-article,journal-article,Journal
meta-analysis,journal-article,Journal
article,journal-article,Journal
case reports,journal-article,Journal
component,component,
info:eu-repo/semantics/lecture,other,
journal article: published article,journal-article,Journal
patient education handout,other,
"""
work_type_lines = work_type_strings.split("\n")
work_type_lookup = dict()
for line in work_type_lines:
if line:
(lookup, work_type, doc_type) = line.split(",")
work_type_lookup[lookup.strip()] = {"work_type": work_type if work_type else None,
"doc_type": doc_type if doc_type else None}
| [
6738,
39986,
62,
26745,
1330,
39986,
62,
26745,
198,
6738,
44161,
282,
26599,
1330,
2420,
198,
6738,
44161,
282,
26599,
1330,
393,
76,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
2922,
259,
2220,
198,
6738,
44161,
282,
26599,
13,
25410... | 2.95843 | 1,299 |
NAME='ldap'
CFLAGS = []
LDFLAGS = []
LIBS = ['-lldap']
GCC_LIST = ['ldap']
| [
198,
20608,
11639,
335,
499,
6,
198,
34,
38948,
50,
796,
17635,
198,
11163,
38948,
50,
796,
17635,
198,
31271,
4462,
796,
685,
29001,
297,
67,
499,
20520,
198,
198,
38,
4093,
62,
45849,
796,
37250,
335,
499,
20520,
198
] | 1.925 | 40 |
import argparse
import wcxf
import sys
import logging
import os
import yaml
import pylha
| [
11748,
1822,
29572,
198,
11748,
266,
66,
26152,
198,
11748,
25064,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
331,
43695,
198,
11748,
279,
2645,
3099,
628,
628,
628,
628,
628
] | 3.16129 | 31 |
# -*- coding: UTF-8 -*-
from django.http import HttpResponseRedirect
from datetime import datetime
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator
from meregistro.shortcuts import my_render
from apps.seguridad.decorators import login_required, credential_required
from apps.seguridad.models import Usuario, Perfil
from apps.registro.models.Establecimiento import Establecimiento
from apps.registro.models.ExtensionAulica import ExtensionAulica
from apps.registro.models.EstadoExtensionAulica import EstadoExtensionAulica
from apps.registro.models.ExtensionAulicaMatricula import ExtensionAulicaMatricula
from apps.registro.forms.ExtensionAulicaMatriculaForm import ExtensionAulicaMatriculaForm
from apps.registro.forms.ExtensionAulicaFormFilters import ExtensionAulicaFormFilters
from apps.registro.forms.ExtensionAulicaMatriculaFormFilters import ExtensionAulicaMatriculaFormFilters
from apps.backend.models import ConfiguracionSolapasExtensionAulica
from apps.registro.forms.VerificacionDatosExtensionAulicaForm import VerificacionDatosExtensionAulicaForm
ITEMS_PER_PAGE = 50
@login_required
def __extension_aulica_dentro_del_ambito(request, extension_aulica):
"""
El extension_aulica está dentro del ámbito?
"""
try:
extension_aulica = ExtensionAulica.objects.get(id=extension_aulica.id, ambito__path__istartswith=request.get_perfil().ambito.path)
except extension_aulica.DoesNotExist:
return False
return True
@login_required
@login_required
@credential_required('reg_extension_aulica_consulta')
def build_query(filters, page, request):
"""
Construye el query de búsqueda a partir de los filtros.
"""
return filters.buildQuery().order_by('establecimiento__nombre', 'cue').filter(ambito__path__istartswith=request.get_perfil().ambito.path)
@login_required
@credential_required('reg_extension_aulica_consulta')
def build_query_matricula(filters, page, request):
"""
Construye el query de búsqueda a partir de los filtros.
"""
return filters.buildQuery().order_by('anio')
@login_required
@credential_required('reg_extension_aulica_modificar')
@login_required
@credential_required('reg_extension_aulica_modificar')
def edit(request, matricula_id):
"""
Edición de los datos de una matricula.
"""
matricula = ExtensionAulicaMatricula.objects.get(pk=matricula_id)
extension_aulica = __get_extension_aulica(request, matricula.extension_aulica_id)
if request.method == 'POST':
form = ExtensionAulicaMatriculaForm(request.POST, instance=matricula, extension_aulica=extension_aulica)
if form.is_valid():
matricula = form.save(commit=False)
matricula.set_formacion_continua()
matricula.set_formacion_docente()
matricula.save()
request.set_flash('success', 'Datos actualizados correctamente.')
return HttpResponseRedirect(reverse('extensionAulicaMatriculaIndexExtensionAulica', args=[matricula.extension_aulica_id]))
else:
request.set_flash('warning', 'Ocurrió un error actualizando los datos.')
else:
form = ExtensionAulicaMatriculaForm(instance=matricula, extension_aulica=extension_aulica)
return my_render(request, 'registro/extension_aulica/matricula/edit.html', {
'form': form,
'matricula': matricula,
'extension_aulica': extension_aulica,
})
@login_required
@credential_required('reg_extension_aulica_modificar')
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
7738,
1060,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
... | 2.66062 | 1,323 |
"""
Load a model
Derek van Tilborg, Eindhoven University of Technology, March 2022
"""
from MoleculeACE.benchmark.utils.const import Algorithms
from .model import Model
def load_model(data, algorithm: Algorithms, model_file: str):
""" Train a machine learning model
Args:
data: MoleculeACE.benchmark.Data object containing train x and y data
algorithm: MoleculeACE.benchmark.utils.Algorithms object - algorithm to use
model_path: string path to file with model. All models use pickle files except CNN, MLP, and LSTM which use .h5
Returns: MoleculeACE.benchmark.Model
"""
model = Model(data, algorithm=algorithm, descriptor=data.descriptor)
model.load_model(model_file)
return model | [
37811,
198,
8912,
257,
2746,
198,
35,
18238,
5719,
40422,
23297,
11,
412,
521,
8873,
574,
2059,
286,
8987,
11,
2805,
33160,
198,
37811,
198,
198,
6738,
25726,
23172,
11598,
13,
26968,
4102,
13,
26791,
13,
9979,
1330,
978,
7727,
907,
1... | 3.096234 | 239 |
import pickle
import pathlib as pl
from typing import List
# XMC imports
from xmc.tools import dynamicImport, splitOneListIntoTwo
from xmc.methodDefs_xmcAlgorithm import checkInitialisation
# Import ExaQUte API
from exaqute import get_value_from_remote
class XMCAlgorithm:
"""
This top-level class handles the overall algorithm: initialisation as well as everything
related to error, tolerance and iterations. It also possesses the necessary methods and
attributes to create new types of algorithms. However, the export of results is to be
handled outside.
This documentation is not yet complete.
* Methods
- runXMC: run an algorithm with generic structure. It is the method to call to run the
algorithm.
- runAsynchronousXMC: run an algorithm with generic structure exploiting the asynchronous
framework. It is the method to call to run the asynchronous algorithm.
* Attributes
- estimatorsForHierarchy: List[List]. This is a list of instructions for the indexwise
estimations to be sent to the hierarchy optimiser. estimatorsForHierarchy[0] contains
instructions for the first estimation to be sent (estimatorsForHierarchy[1] the second,
etc.). estimatorsForHierarchy[0][0] is the index of the estimator concerned, as ordered in
MonteCarloIndex.qoiEstimator; estimatorsForHierarchy[0][1] is the list of arguments to be
passed to the value method of this estimator. These values are used in
XMCAlgorithm.optimalHierarchy and eventually result in a call of the form
StatisticalEstimator.value(estimatorsForHierarchy[0], *estimatorsForHierarchy[1]).
"""
def hierarchy(self):
"""
Returns current hierarchy of the MC estimator.
"""
return self.monteCarloSampler.hierarchy()
def splitTolerance(self, splittingParameter):
"""
Method that interfaces with the MultiCriterion class to apply tolerance
splitting
"""
self.stoppingCriterion.splitTolerance(splittingParameter)
def optimalHierarchy(self):
"""
Method that interfaces with the HierarchyOptimiser class to compute
the optimal hierarchy
"""
input_dict = self.hierarchyOptimiser.inputDictionaryTemplate()
# No optimisation at first iteration
if self.iterationCounter < 1:
newHierarchy = self.hierarchyOptimiser.defaultHierarchy
splittingParameter = input_dict.get("splittingParameter", None)
return newHierarchy, splittingParameter
# Else, assemble data for hierarchy optimiser
# Random variables of interest
# Indexwise estimations
if self.estimatorsForHierarchy:
input_dict["estimations"] = [
get_value_from_remote(self.indexEstimation(c[0], c[1]))
for c in self.estimatorsForHierarchy
]
# Predictors
if self.predictorsForHierarchy:
input_dict["models"] = []
input_dict["parametersForModel"] = []
for coord in self.predictorsForHierarchy:
input_dict["models"].append(self.predictor(coord)._valueForParameters)
# TODO This should get self.predictor(coord).oldParameters
# and default to self.predictor(coord).parameters if they are None
params = get_value_from_remote(self.predictor(coord).parameters)
input_dict["parametersForModel"].append(params)
# Sample cost
# Indexwise estimation
if self.costEstimatorForHierarchy is not None:
input_dict["costEstimations"] = self.indexCostEstimation(
self.costEstimatorForHierarchy
)
# Predictor
if self.costPredictor() is not None:
input_dict["costModel"] = self.costPredictor()._valueForParameters
# TODO This should get self.costPredictor().oldParameters
# and default to self.costPredictor().parameters if they are None
input_dict["costParameters"] = get_value_from_remote(
self.costPredictor().parameters
)
# Error parameters
# TODO - Triple dereference below!! Add method to get errorEstimator parameters
# or errorEstimator objects themselves from monteCarloSampler
if self.errorParametersForHierarchy is not None:
input_dict["errorParameters"] = [
self.monteCarloSampler.errorEstimators[c].parameters
for c in self.errorParametersForHierarchy
]
# Miscellaneous parameters
input_dict["newSampleNumber"] = 25 # TODO configurable, not hard-coded
input_dict["oldHierarchy"] = self.hierarchy()
input_dict["defaultHierarchy"] = self.hierarchyOptimiser.defaultHierarchy
# Synchronisation
input_dict = get_value_from_remote(input_dict)
# Compute new hierarchy
newHierarchy = self.hierarchyOptimiser.optimalHierarchy(input_dict)
splittingParameter = input_dict.get("splittingParameter", None)
return newHierarchy, splittingParameter
def updateHierarchy(self, newHierarchy):
"""
Method that interfaces with the monteCarloSample class to execute
a given hierarchy
"""
# TODO could be confused with optimalHierarchy. Rename updateSampler or updateSamplerHierarchy?
self.monteCarloSampler.update(newHierarchy)
def estimation(self, assemblerCoordinates=None):
"""
Method that calls the estimation method of monteCarloSampler
"""
return self.monteCarloSampler.estimation(assemblerCoordinates)
def errorEstimation(self, errorEstimatorCoordinates=None):
"""
Method that calls the errorEstimation method of monteCarloSampler
"""
return self.monteCarloSampler.errorEstimation(errorEstimatorCoordinates)
def updateHierarchySpace(self, *args):
"""
Method that interfaces with the HierarchyOptimiser class to compute
the hierarchy space in which to search for the optimal hierarchy
"""
self.hierarchyOptimiser.updateHierarchySpace(args)
def stoppingCriterionFlag(self, currentCost=None):
"""
Call stoppingCriterion.flag with the proper arguments and return its output
(a.k.a flag).
Input argument: currentCost is an indication of the cost the algorithm has entailed
so far; we usually use the number of iterations.
Output argument: criterion flag structure as define in the MultiCriterion class.
"""
# Get errors required for stopping criterion
errors = self.errorEstimation(self.errorsForStoppingCriterion)
# Set up dictionary required for stoppingCriterion.flag
input_dictionary = {}
for i in range(len(errors)):
input_dictionary["error" + str(i)] = get_value_from_remote(errors[i])
input_dictionary["hierarchy"] = self.hierarchy()
input_dictionary["algorithmCost"] = currentCost
# Compute flag from dictionary and return
flag = self.stoppingCriterion.flag(input_dictionary)
return flag
def runXMC(self):
"""
Run an algorithm with generic structure.
"""
self.checkInitialisation(self)
# Iteration Loop will start here
flag = self.stoppingCriterion.flagStructure()
self.iterationCounter = 0
# print("Beginning Iterations for tolerance ", self.tolerances(self.errorsForStoppingCriterion)) #TODO not very robust
while not flag["stop"]:
# TODO Mostly outdated. Must be thoroughly checked.
newHierarchy, splittingParameter = self.optimalHierarchy()
self.splitTolerance(splittingParameter)
self.updateHierarchy(newHierarchy)
# synchronization point needed to launch new tasks if convergence is false
# put the synchronization point as in the end as possible
# TODO: remove from here the synchronization point to more hidden places
flag = self.stoppingCriterionFlag(self.iterationCounter)
flag = get_value_from_remote(flag)
# TODO Display selection is mostly guesswork here (very fragile)
errors = get_value_from_remote(
self.errorEstimation(self.errorsForStoppingCriterion)
)
dErrors = " ".join(["{err:.3e}".format(err=float(error)) for error in errors])
dHierarchy = " ".join([str(i[1]) for i in self.hierarchy()])
dTol = "None"
tols = self.tolerances(splittingParameter)
if tols:
dTol = " ".join(["{t:.3e}".format(t=tol) for tol in tols])
print(
f"Iteration — {self.iterationCounter}",
f"Tolerances — {dTol}",
f"Errors — {dErrors}",
f"Hierarchy — {dHierarchy}",
sep="\t",
)
if flag["updateTolerance"]:
self.updateTolerance()
if flag["updateIndexSpace"]:
self.updateHierarchySpace()
self.iterationCounter += 1
#### DATA DUMP ##########
if self.isDataDumped is True:
pathObject = pl.Path(self.outputFolderPath)
pathObject.mkdir(parents=True, exist_ok=True)
filename = (
self.outputFolderPath
+ "/iteration_"
+ str(self.iterationCounter)
+ ".pickle"
)
output_file = open(filename, "wb")
output_dict = {}
hier = self.hierarchy()
output_dict["predictedHierarchy"] = newHierarchy
output_dict["hierarchy"] = hier
if len(self.predictorsForHierarchy) != 0:
qoip = self.predictor()
costp = self.costPredictor()
output_dict["biasParameters"] = qoip[0].parameters
output_dict["varParameters"] = qoip[1].parameters
output_dict["costParameters"] = costp.parameters
output_dict["indexwiseBias"] = self.indexEstimation(0, [1, True, False])
errs = self.indexEstimation(0, [1, True, True])
levels, samples = splitOneListIntoTwo(hier)
output_dict["indexwiseVar"] = [errs[i] * samples[i] for i in range(len(errs))]
output_dict["indexwiseCost"] = self.indexCostEstimation([1, True, False])
hier = newHierarchy
levels, samples = splitOneListIntoTwo(hier)
costs = self.indexCostEstimation([1, True, False])
total_times = [sample * cost for sample, cost in zip(samples, costs)]
output_dict["totalTime"] = sum(total_times)
pickle.dump(output_dict, output_file)
# TODO - Debug statement. Remove for PYCOMPSS tests
displayEstimation = get_value_from_remote(self.estimation())
displayEstimation = " ".join(["{e:.3e}".format(e=est) for est in displayEstimation])
displayError = get_value_from_remote(
self.errorEstimation(self.errorsForStoppingCriterion)
)
displayError = " ".join(["{e:.3e}".format(e=error) for error in displayError])
displayCost = get_value_from_remote(self.indexCostEstimation([1, True, False]))
displayCost = " ".join(["{c:.3e}".format(c=cost) for cost in displayCost])
print(
f"Estimations — {displayEstimation}",
f"Final errors — {displayError}",
f"Levelwise mean costs — {displayCost}",
sep="\n",
)
####################################################################################################
###################################### ASYNCHRONOUS FRAMEWORK ######################################
####################################################################################################
def asynchronousFinalizeIteration(self):
"""
Method finalizing an iteration of the asynchornous framework. It synchronizes and calls all relevant methods of one single batch, the first available, before estimating convergence.
"""
continue_iterating = True
for batch in range(self.monteCarloSampler.numberBatches):
if (
self.monteCarloSampler.batchesLaunched[batch] is True
and self.monteCarloSampler.batchesExecutionFinished[batch] is True
and self.monteCarloSampler.batchesAnalysisFinished[batch] is True
and self.monteCarloSampler.batchesConvergenceFinished[batch] is not True
and continue_iterating is True
):
continue_iterating = False
self.monteCarloSampler.asynchronousFinalize(batch)
flag = self.stoppingCriterionFlag(self.iterationCounter)
self.monteCarloSampler.batchesConvergenceFinished[batch] = True
break
# screen iteration informations
errors = get_value_from_remote(self.errorEstimation(self.errorsForStoppingCriterion))
dTol = "None"
tols = self.tolerances()
if tols:
dTol = " ".join(["{t:.3e}".format(t=tol) for tol in tols])
print(
"Iteration ",
self.iterationCounter,
"\tTolerance - ",
dTol,
"\tError - ",
["%.3e" % err for err in errors],
"\tHierarchy - ",
self.hierarchy(),
)
# update tolerance and hierarchy space if required
if flag["updateTolerance"]:
self.updateTolerance()
if flag["updateIndexSpace"]:
self.updateHierarchySpace()
# update iteration counter
self.iterationCounter += 1
return flag
def runAsynchronousXMC(self):
"""
Run algorithm with asynchronous framework.
"""
self.checkInitialisation(self)
# set maximum number of iteration variable
self.monteCarloSampler.maximumNumberIterations = self.stoppingCriterion.tolerances(
[self.positionMaxNumberIterationsCriterion]
)[0]
# Iteration loop will start here
flag = self.stoppingCriterion.flagStructure()
self.iterationCounter = 0
while not flag["stop"]:
# estimate splitting parameter
newHierarchy, splittingParameter = self.optimalHierarchy()
self.splitTolerance(splittingParameter)
# launch asynchronous monteCarloSampler update method
self.monteCarloSampler.asynchronousUpdate(newHierarchy)
# finalize phase of the iteration and return flag
flag = self.asynchronousFinalizeIteration()
# screen results
displayEstimation = self.estimation()
displayError = self.errorEstimation(self.errorsForStoppingCriterion)
displayCost = self.indexCostEstimation([1, True, False])
print(
f"Estimations — {displayEstimation}",
f"Final errors — {displayError}",
f"Levelwise mean costs — {displayCost}",
sep="\n",
)
| [
11748,
2298,
293,
198,
11748,
3108,
8019,
355,
458,
198,
198,
6738,
19720,
1330,
7343,
198,
198,
2,
1395,
9655,
17944,
198,
6738,
2124,
23209,
13,
31391,
1330,
8925,
20939,
11,
6626,
3198,
8053,
5317,
78,
7571,
198,
6738,
2124,
23209,
... | 2.403819 | 6,389 |
#!/usr/bin/python
## Massimiliano Patacchiola, Plymouth University 2016
import numpy as np
import os
import time
from scipy import spatial
#Informant reputation is evaluated considering: agent_action, agent_confidence, informant_action
#If the confidence of the agent is high and the action suggested is different from the action taken
#then the informant is evaluated as unreliable and a counter is incremeneted.
#The reputation counter is considered separated by the Cost function. Harris et al. have showed
#that 3yo children can estimate the reliability of the informant but they cannot estimate the
#cost of following the informant suggestion. This is in accordance with our model where the two
#entities are separated.
#
#Intrinsic environment: evaluates the cost of taking an action
#Trusting an unreliable informant has a cost, because the child will store an information which is not useful.
#This mechanism can be considered part of a planning module (e.g. prefrontal cortex) with inibitory projections.
#The cost function C can be defined as a function that takes as input: current_state, agent_action, agent_confidence, informant_action, informant_reputation.
#The cost function evaluates what's the cost of having taken an action in state S given the informant advice .
#The output of the function C is a real number representing the COST of taking that action given the informant suggestion.
#This table can be represented through a table or can be approximated through a function approximator (e.g. neural network)
#
#The actor architecture is a table of state-action pairs.
#When the child has to give a label for an object the policy must be used and not the utility table.
#The most common associated label to a visual object can be estimated setting the SOM action node to ACCEPT and then
#computing the activation of the vocabulary unit. The argmax is the value we want.
def softmax(x):
'''Compute softmax values of array x.
@param x the input array
@return the softmax array
'''
return np.exp(x - np.max(x)) / np.sum(np.exp(x - np.max(x)))
def training(dataset, actor_matrix, critic_vector, informant_vector, tot_images, tot_labels, tot_actions, verbose=False):
'''Train the actor using Intrinsic Motivated Reinforcement Learning
General Algorithm Description:
1- To the agent is presented an object and a label (current state).
2- An informant suggest a possible action (accept or reject the label).
3- The agent take an action (with softmax) considering is current state-action table
4- (External) New state and reward obtained from the environment
5- (Intrinsic) The informant_reputation is updated through MLE: agent_action, agent_confidence, informant_action, reward
6- (Intrinsic) The Cost is estimated: current_state, agent_action, agent_confidence, informant_action, informant_reputation
7- The utility table is updated using: previous_state, current_state, cost, reward
8- The actor table is updated using the delta from the critic
'''
#Hyper-Parameters
reward = 0
gamma = 1.0 #no gamma used in thsi example
learning_rate = 0.1
counter = 1
for episode in dataset:
#1- To the agent is presented an object and a label (current state).
image = episode[0] #image of the object
label = episode[1] #label given by the informant
informant_index = episode[2] #a integer representing the informant
informant_action = episode[3] #0=reject, 1=accept
#3- The agent take an action (with softmax) considering is current state-action table
#[0=cup, 1=book, 2=ball]
col = (image * tot_images) + label
action_array = actor_matrix[:, col]
action_distribution = softmax(action_array)
child_action = np.random.choice(tot_actions, 1, p=action_distribution) #select the action through softmax
#4- (External) New state and reward obtained from the environment
u_t = critic_vector[0, col] #previous state
#New state is estimated, in this simple case nothing happen
#because the next state is terminal
u_t1 = u_t
#5- (Intrinsic) The informant_reputation is updated: agent_action, agent_confidence, informant_action, reward
#informant_vector: 0=unreliable, 1=reliable
#do_actions_agree: False, True
#Estimating child_confidence
#distance = scipy.spatial.distance.correlation(action_distribution[0], action_distribution[1])
#child_confidence_distribution = [np.min(action_distribution), np.max(action_distribution)] #non-knowledgeable, knowledgeable
distance = np.absolute(action_distribution[0] - action_distribution[1])
child_confidence_distribution = [1-distance, distance]
child_confidence = np.random.choice(2, 1, p=child_confidence_distribution)
#if(distance == 0):
#child_confidence=0
#child_confidence_distribution = [1, 0]
#else:
#child_confidence=1
#child_confidence_distribution = [0, 1]
#Check if child and informant agree
if(child_action == informant_action): do_actions_agree = True
else: do_actions_agree = False
#Increment the counter in the informant_vector.
#Here we update the counter distribtuion only if
#the child is confident, because it is only in that
#case that the child can say if the informant is
#reliable or not.
if(do_actions_agree==False and child_confidence==1):
informant_vector[informant_index][0] += 1 #unreliable
elif(do_actions_agree==True and child_confidence==1):
informant_vector[informant_index][1] += 1 #reliable
elif(do_actions_agree==False and child_confidence==0):
#When child is not confident cannot update the table
informant_vector[informant_index][1] += 0 #reliable
informant_vector[informant_index][0] += 0 #unreliable
elif(do_actions_agree==True and child_confidence==0):
#When child is not confident cannot update the table
informant_vector[informant_index][1] += 0 #reliable
informant_vector[informant_index][0] += 0 #unreliable
else:
raise ValueError("ERROR: anomaly in the IF condition for informant_vector update")
#Using the informant_vector given as input it estimates the reputation of the informant
informant_reputation_distribution = np.true_divide(informant_vector[informant_index], np.sum(informant_vector[informant_index]))
informant_reputation = np.random.choice(2, 1, p=informant_reputation_distribution)
#informant_reputation = np.argmax(informant_reputation_distribution)
#6- (Intrinsic) The Cost is estimated: current_state, agent_action, agent_confidence, informant_action, informant_reputation
#child_confidence: 0=non-knowledgeable, 1=knowledgeable
#informant_reputation: 0=non-knowledgeable, 1=knowledgeable
#action: 0=reject, 1=accept
#informant_action: 0=reject, 1=accept
if(child_confidence==1 and informant_reputation==1 and child_action==1 and informant_action==1): cost = -1.0 # (knowledge, knowledge, accept, accept) = reinforce
elif(child_confidence==1 and informant_reputation==1 and child_action==0 and informant_action==1): cost = +0.5 # (knowledge, knowledge, reject, accept) = slight punish
elif(child_confidence==1 and informant_reputation==1 and child_action==1 and informant_action==0): cost = +0.5 # (knowledge, knowledge, accept, reject) = slight punish
elif(child_confidence==1 and informant_reputation==1 and child_action==0 and informant_action==0): cost = -1.0 # (knowledge, knowledge, reject, reject) = reinforce
elif(child_confidence==0 and informant_reputation==1 and child_action==1 and informant_action==1): cost = -1.0 # (non-knowledge, knowledge, accept, accept) = reinforce
elif(child_confidence==0 and informant_reputation==1 and child_action==0 and informant_action==0): cost = -1.0 # (non-knowledge, knowledge, reject, reject) = reinforce
elif(child_confidence==0 and informant_reputation==1 and child_action==0 and informant_action==1): cost = +1.0 # (non-knowledge, knowledge, reject, accept) = punish
elif(child_confidence==0 and informant_reputation==1 and child_action==1 and informant_action==0): cost = +1.0 # (non-knowledge, knowledge, accept, reject) = punish
elif(child_confidence==1 and informant_reputation==0 and child_action==1 and informant_action==1): cost = 0.0 # (knowledge, non-knowledge, accept, accept) =
elif(child_confidence==1 and informant_reputation==0 and child_action==0 and informant_action==1): cost = 0.0 # (knowledge, non-knowledge, reject, accept) =
elif(child_confidence==1 and informant_reputation==0 and child_action==1 and informant_action==0): cost = 0.0 # (knowledge, non-knowledge, accept, reject) =
elif(child_confidence==1 and informant_reputation==0 and child_action==0 and informant_action==0): cost = 0.0 # (knowledge, non-knowledge, reject, reject) =
elif(child_confidence==0 and informant_reputation==0 and child_action==1 and informant_action==1): cost = 0.0 # (non-knowledge, non-knowledge, accept, accept) = zero_cost
elif(child_confidence==0 and informant_reputation==0 and child_action==0 and informant_action==1): cost = 0.0 # (non-knowledge, non-knowledge, reject, accept) = zero_cost
elif(child_confidence==0 and informant_reputation==0 and child_action==1 and informant_action==0): cost = 0.0 # (non-knowledge, non-knowledge, accept, reject) = zero_cost
elif(child_confidence==0 and informant_reputation==0 and child_action==0 and informant_action==0): cost = 0.0 # (non-knowledge, non-knowledge, reject, reject) = zero_cost
else: raise ValueError("ERROR: the Bayesian Networks input values are out of range")
#7- The utility table is updated using: preious_state, current_state, cost, reward
#Updating the critic using Temporal Differencing Learning
#In this simple case there is not a u_t1 state.
#The current state is considered terminal.
#We can delete the term (gamma*u_t1)-u_t and considering
#only (reward-cost) as utility of the state (you can cite Russel Norvig).
delta = (reward - cost) + (gamma*u_t1) - u_t
critic_vector[0, col] += learning_rate*delta
#8- The actor table is updated using the delta from the critic
#Update the ACTOR using the delta
actor_matrix[child_action, col] += learning_rate*delta #the current action
actor_matrix[1-child_action, col] -= learning_rate*delta #the opposite action
if(verbose==True):
print("")
print("===========================")
print("Episode: " + str(counter))
print("Image: " + str(image) + "; Label: " + str(label))
print("Child action distribution: " + str(action_distribution))
print("Child action: " + str(child_action))
print("Child knowledge distribution: " + str(child_confidence_distribution))
print("Child knowledge: " + str(child_confidence))
print("Informant index: " + str(informant_index))
print("Informant action: " + str(informant_action))
print("Informant knowledge: " + str(informant_reputation))
print("Informant knowledge distribution: " + str(informant_reputation_distribution))
print("Cost: " + str(cost))
print("")
print("critic vector: " + str(critic_vector))
print("")
print("actor_matrix: " + str(actor_matrix))
print("")
print("informant_vector: " + str(informant_vector))
counter += 1
return actor_matrix, critic_vector, informant_vector
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
2235,
5674,
26641,
10115,
3208,
330,
354,
30292,
11,
42125,
2059,
1584,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
220,
198,
11748,
640,
198,
6738,
629,
541,
88,
1330,
2... | 3.013628 | 3,889 |
import seaborn as sns
def deorganize(data, x=None, y=None, hue=None, size=None, style=None, **kwargs):
"""Make some chaos
The idea is that we want to use as many plotting dimensions
(e.g. marker color, marker shape, etc.) as possible when
we plot our data. So given a multidimensional dataset, this
function assigns fields (column names of the dataset) to
these plotting parameters. The plotting dimensions `row`
and `col` can only aid in organizing the plot, which
would defeat the purpose, so we decline to assign fields
to these plotting dimensions, choosing instead to cram as
many dimensions as possible into a single matplotlib axis.
Parameters
----------
data : `pd.DataFrame`
Input dataset
x, y : str, str
Names of variables in `data`
hue : str
Column name in `data` that will group data by color
size : str
Column name in `data` that will group data by marker size
style : str
Column name in `data` that will group data by marker style
kwargs : dict
Other keyword arguments that are passed through to some underlying
plotting function
Returns
-------
data : `pd.DataFrame`
(Unchanged) dataset
kwargs : dict
Dictionary of plotting parameters to pass through to some underlying
plotting function
"""
# Get data types of input dataset
dtypes = data.dtypes
# Remove used dimensions from dtypes
dtypes = dtypes.drop([x, y, hue, size, style], errors='ignore')
# Split dtypes into ordinal (object) and numerical (int or float)
fields_O = dtypes[dtypes.values == 'O'].index
fields_N = dtypes[dtypes.values != 'O'].index
# (Re)Assign parameters
# ---------------------
# Assign x if not provided
if x is None:
# Choose first field in the dataset with a numerical dtype
# (if possible)
if len(fields_N):
x = fields_N[0]
# Remove field (now that it has been assigned)
fields_N = fields_N.drop(x)
dtypes = dtypes.drop(x)
# Otherwise, just choose the first field in the dataset
# irrespective of dtype
else:
x = dtypes.index[0]
# Remove field (now that it has been assigned)
fields_O = fields_O.drop(x)
dtypes = dtypes.drop(x)
# Same thing for `y`
if y is None:
if len(fields_N):
y = fields_N[0]
fields_N = fields_N.drop(y)
dtypes = dtypes.drop(y)
else:
y = dtypes.index[0]
fields_O = fields_O.drop(y)
dtypes = dtypes.drop(y)
# Assign `hue` to the first available ordinal field
if hue is None:
if len(fields_O):
hue = fields_O[0]
fields_O = fields_O.drop(hue)
dtypes = dtypes.drop(hue)
# Settle for a numerical field if no ordinal fields available
else:
hue = fields_N[0]
fields_N = fields_N.drop(hue)
dtypes = dtypes.drop(hue)
# Same thing for size
if size is None:
if len(fields_O):
size = fields_O[0]
fields_O = fields_O.drop(size)
dtypes = dtypes.drop(size)
else:
size = fields_N[0]
fields_N = fields_N.drop(size)
dtypes = dtypes.drop(size)
# Same thing for style
if style is None:
if len(fields_O):
style = fields_O[0]
fields_O = fields_O.drop(style)
dtypes = dtypes.drop(style)
else:
style = fields_N[0]
fields_N = fields_N.drop(style)
dtypes = dtypes.drop(style)
# Redefine kwargs
kwargs['x'] = x
kwargs['y'] = y
kwargs['hue'] = hue
kwargs['size'] = size
kwargs['style'] = style
return data, kwargs
def plot(data, **kwargs):
"""Make an Aditi plot!"""
# Deorganize
data, kwargs = deorganize(data, **kwargs)
# Make call to seaborn relplot
g = sns.relplot(data=data, legend=False, **kwargs)
# Axis formatting
g.set(xticklabels=[])
g.set(xlabel=None)
g.set(ylabel=None)
return g
| [
11748,
384,
397,
1211,
355,
3013,
82,
201,
198,
201,
198,
201,
198,
4299,
390,
9971,
1096,
7,
7890,
11,
2124,
28,
14202,
11,
331,
28,
14202,
11,
37409,
28,
14202,
11,
2546,
28,
14202,
11,
3918,
28,
14202,
11,
12429,
46265,
22046,
... | 2.185836 | 1,991 |
from flask import Blueprint, make_response, request, send_file
from flask import current_app as app
from flask_cors import CORS
from queue import Empty
from kombu import Queue
import json
import sys
from . import tasks, celery
from .project import Project
widget_bp = Blueprint('widget_bp', __name__)
CORS(widget_bp)
def compose_response(obj, code):
"""
Utility method to create a response
:param obj:
The object to respond with
:param code:
The HTTP code to send
:return:
Returns the constructed response
"""
response = make_response(obj, code)
return response
@widget_bp.route('/download/', methods=['POST'])
def download_example():
"""
The route for a download request. This constructs a Project, zips it up, and responds with the zipfile
:return:
The constructed response for the zipfile blob
"""
data = request.get_json()
app.logger.debug(data)
project = Project(data['files'])
zipfile = project.zip()
archive = data['name'] + '.zip'
app.logger.debug(f"Sending zipped file {archive} size={sys.getsizeof(zipfile)}")
response = make_response(zipfile)
# We need to mess with the header here because we are sending file attachments in CORS
response.headers['Access-Control-Expose-Headers'] = 'Content-Disposition'
response.headers['Access-Control-Allow-Headers'] = 'Content-Disposition'
response.headers.set('Content-Type', 'application/zip')
response.headers.set('Content-Disposition', 'attachment', filename=archive)
return response
@widget_bp.route('/run_program/', methods=['POST'])
def run_program():
"""
The route for a run program request. This kicks off a celery task and responds back with the task id
:return:
The constructed response with the task id
"""
data = request.get_json()
app.logger.debug(data)
# Push the code to the container in Celery task
task = tasks.run_program.apply_async(kwargs={'data': data})
app.logger.debug(f'Starting Celery task with id={task.id}')
return compose_response({'identifier': task.id, 'message': "Pending"}, 200)
@widget_bp.route('/check_output/', methods=['POST'])
def check_run():
"""
The route for a check program request. The user should have a task id supplied by the run request. We expect that
as part of the incoming data and use it to query the message queues associated with that task id
:return:
The construct response with the output from the task, the task status, and completion status
"""
error_code = 200
data = request.get_json()
app.logger.debug(data)
identifier = data['identifier']
task = tasks.run_program.AsyncResult(identifier)
# Create a connection to the message queue associated with the task id
# This is how we receive intermediate results from the task during run
output = []
with celery.connection_or_acquire() as conn:
queue = conn.SimpleBuffer(data['identifier'])
while True:
try:
msg = queue.get(block=False)
app.logger.debug(f"Reading {msg.body} from mq")
output.append(msg.body)
# msg.ack()
except Empty:
break
queue.close()
app.logger.debug(f"output {output}")
response = {'output': [json.loads(l) for l in output],
'status': 0,
'completed': False,
'message': task.state}
app.logger.debug(f'Checking Celery task with id={identifier}')
app.logger.debug(f"Task state {task.state}")
if task.failed():
result = task.get()
app.logger.error(f'Task id={task.id} failed. Response={task.info}')
error_code = 500
if task.ready():
app.logger.debug(f"Task info {task.info}")
result = task.get()
elapsed = result["elapsed"]
app.logger.debug(f"Task took {elapsed}s.")
response['completed'] = True
response['status'] = result["status"]
app.logger.debug(f'Responding with response={response} and code={error_code}')
return compose_response(response, error_code)
| [
6738,
42903,
1330,
39932,
11,
787,
62,
26209,
11,
2581,
11,
3758,
62,
7753,
198,
6738,
42903,
1330,
1459,
62,
1324,
355,
598,
198,
6738,
42903,
62,
66,
669,
1330,
327,
20673,
198,
198,
6738,
16834,
1330,
33523,
198,
6738,
479,
2381,
... | 2.712338 | 1,540 |
import logging
import sys
import gym
logger = logging.getLogger(__name__)
root_logger = logging.getLogger()
requests_logger = logging.getLogger('requests')
# Set up the default handler
formatter = logging.Formatter('[%(asctime)s] %(message)s')
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
# We need to take in the gym logger explicitly since this is called
# at initialization time.
def undo_logger_setup():
"""Undoes the automatic logging setup done by OpenAI Gym. You should call
this function if you want to manually configure logging
yourself. Typical usage would involve putting something like the
following at the top of your script:
gym.undo_logger_setup()
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stderr))
"""
root_logger.removeHandler(handler)
gym.logger.setLevel(logging.NOTSET)
requests_logger.setLevel(logging.NOTSET)
| [
11748,
18931,
198,
11748,
25064,
198,
198,
11748,
11550,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
15763,
62,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
3419,
198,
8897,
3558,
62,
640... | 3.125828 | 302 |
from django.db import models
import re
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
11748,
302,
198,
198,
2,
13610,
534,
4981,
994,
13,
198,
220,
220,
220,
220,
198,
220,
220,
220,
220
] | 2.714286 | 28 |
""" rotational bond/torsion info for specific reaction classes
"""
from automol.par import ReactionClass
import automol.zmat
from automol.reac._util import hydrogen_abstraction_atom_keys
from automol.reac._util import substitution_atom_keys
# Bimolecular reactions
# 1. Hydrogen abstractions
def hydrogen_abstraction_linear_atom_keys(rxn, zma=None):
""" Obtain the linear atom keys for a hydrogen abstraction
:param rxn: a Reaction object
:param zma: a z-matrix; if passed in, the linear atoms will be determined
from this; otherwise they will be determined heuristically from the
reaction object
:returns: the keys of the linear atoms in the graph
:rtype: tuple[int]
"""
tsg = rxn.forward_ts_graph
if zma is not None:
lin_keys = list(automol.zmat.linear_atom_keys(zma))
else:
lin_keys = list(automol.graph.linear_atom_keys(tsg))
_, hyd_key, _ = hydrogen_abstraction_atom_keys(rxn)
lin_keys.append(hyd_key)
lin_keys = tuple(sorted(set(lin_keys)))
return lin_keys
# 4. Substitution
def substitution_linear_atom_keys(rxn, zma=None):
""" Obtain the linear atom keys for a substitution
:param rxn: a Reaction object
:param zma: a z-matrix; if passed in, the linear atoms will be determined
from this; otherwise they will be determined heuristically from the
reaction object
:returns: the keys of the linear atoms in the graph
:rtype: tuple[int]
"""
tsg = rxn.forward_ts_graph
if zma is not None:
lin_keys = list(automol.zmat.linear_atom_keys(zma))
else:
lin_keys = list(automol.graph.linear_atom_keys(tsg))
_, tra_key, _ = substitution_atom_keys(rxn)
lin_keys.append(tra_key)
lin_keys = tuple(sorted(set(lin_keys)))
return lin_keys
def linear_atom_keys(rxn, zma=None):
""" Obtain the linear atom keys
:param rxn: a hydrogen migration Reaction object
:param zma: a z-matrix; if passed in, the linear atoms will be determined
from this; otherwise they will be determined heuristically from the
reaction object
:returns: the keys of the linear atoms in the graph
:rtype: tuple[int]
"""
function_dct = {
# unimolecular
ReactionClass.HYDROGEN_MIGRATION: _default,
ReactionClass.BETA_SCISSION: _default,
ReactionClass.RING_FORM_SCISSION: _default,
ReactionClass.ELIMINATION: _default,
# bimolecular
ReactionClass.HYDROGEN_ABSTRACTION:
hydrogen_abstraction_linear_atom_keys,
ReactionClass.ADDITION: _default,
ReactionClass.INSERTION: _default,
ReactionClass.SUBSTITUTION: substitution_linear_atom_keys,
}
fun_ = function_dct[rxn.class_]
ret = fun_(rxn, zma=zma)
return ret
def rotational_bond_keys(rxn, zma=None):
""" Obtain the rotational bond keys
:param rxn: a hydrogen migration Reaction object
:param zma: a z-matrix; if passed in, the linear atoms will be determined
from this; otherwise they will be determined heuristically from the
reaction object
:returns: the keys of the rotational bonds in the graph
:rtype: tuple[frozenset[int]]
"""
tsg = rxn.forward_ts_graph
lin_keys = linear_atom_keys(rxn, zma=zma)
bnd_keys = automol.graph.rotational_bond_keys(tsg, lin_keys=lin_keys)
return bnd_keys
def rotational_groups(rxn, key1, key2, dummy=False):
""" Obtain the rotational groups for a given rotational axis
:param rxn: a hydrogen migration Reaction object
:param zma: a z-matrix; if passed in, the linear atoms will be determined
from this; otherwise they will be determined heuristically from the
reaction object
:returns: the rotational groups on either side of the axis
:rtype: (tuple[int], tuple[int])
"""
tsg = rxn.forward_ts_graph
grps = automol.graph.rotational_groups(tsg, key1, key2, dummy=dummy)
return grps
def rotational_symmetry_number(rxn, key1, key2, zma=None):
""" Obtain the rotational symmetry number for a given rotational axis
:param rxn: a hydrogen migration Reaction object
:param zma: a z-matrix; if passed in, the linear atoms will be determined
from this; otherwise they will be determined heuristically from the
reaction object
:returns: the rotational symmetry number of the axis
:rtype: int
"""
lin_keys = linear_atom_keys(rxn, zma=zma)
tsg = rxn.forward_ts_graph
sym_num = automol.graph.rotational_symmetry_number(tsg, key1, key2,
lin_keys=lin_keys)
return sym_num
| [
37811,
5724,
864,
6314,
14,
83,
669,
295,
7508,
329,
2176,
6317,
6097,
198,
37811,
198,
6738,
3557,
349,
13,
1845,
1330,
39912,
9487,
198,
11748,
3557,
349,
13,
89,
6759,
198,
6738,
3557,
349,
13,
260,
330,
13557,
22602,
1330,
17669,
... | 2.60123 | 1,788 |
import logging
from typing import Dict
from makeMKV.model.enum.item_attribute_id import ItemAttributeId
from makeMKV.model.enum.item_info import ItemInfo
from makeMKV.model.enum.stream_type import StreamType
from makeMKV.model.stream import Stream, VideoStream, SubtitleStream, AudioStream
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
6738,
19720,
1330,
360,
713,
198,
198,
6738,
787,
33907,
53,
13,
19849,
13,
44709,
13,
9186,
62,
42348,
62,
312,
1330,
9097,
33682,
7390,
198,
6738,
787,
33907,
53,
13,
19849,
13,
44709,
13,
9186,
62,
10951,
1330,
... | 3.402062 | 97 |
import os
import time
import numpy as np
import tensorflow as tf
from operator import mul
from functools import reduce
from pathlib import Path
from datetime import datetime
from tqdm import tqdm
from PIL import Image
from . import data_utils
from .. import settings
tf.compat.v1.enable_eager_execution()
@tf.function
@tf.function
# region Model hyperparameters
window_size = settings.WINDOW_SIZE
image_dims = [settings.CHUNK_SIZE, settings.N_MELS]
input_shape = [*image_dims, window_size]
latent_dims = settings.LATENT_DIMS
num_conv = 2
num_filters = 32
max_filters = 64
kernel_size = 3
# endregion
# region Training hyperparameters
num_epochs = settings.EPOCHS
batch_size = settings.BATCH_SIZE
# endregion
# region Model definition
inputs = tf.keras.layers.Input(shape=input_shape, name="encoder_input")
x = inputs
for i in range(num_conv):
x = tf.keras.layers.Conv2D(
filters=min(num_filters * (i + 1), max_filters),
kernel_size=kernel_size,
activation="relu",
strides=2,
padding="same",
activity_regularizer=tf.keras.regularizers.l1(0.01),
)(x)
latent_shape = x.shape
x = tf.keras.layers.Flatten()(x)
z_mean = tf.keras.layers.Dense(latent_dims, name="z_mean")(x)
z_log_var = tf.keras.layers.Dense(latent_dims, name="z_log_var")(x)
z = tf.keras.layers.Lambda(reparameterize, output_shape=[latent_dims], name="z")(
[z_mean, z_log_var]
)
encoder = tf.keras.Model(inputs, [z_mean, z_log_var, z], name="encoder")
encoder.summary()
latent_inputs = tf.keras.layers.Input(shape=(latent_dims,), name="z_sampled")
x = tf.keras.layers.Dense(reduce(mul, latent_shape[1:]), activation="relu")(
latent_inputs
)
x = tf.keras.layers.Reshape(latent_shape[1:])(x)
for i in range(num_conv):
x = tf.keras.layers.Conv2DTranspose(
filters=min(num_filters * (num_conv - i), max_filters),
kernel_size=kernel_size,
strides=2,
activation="relu",
padding="same",
activity_regularizer=tf.keras.regularizers.l1(0.01),
)(x)
reconstructed = tf.keras.layers.Conv2DTranspose(
filters=window_size, kernel_size=3, strides=1, padding="SAME", activation="sigmoid"
)(x)
decoder = tf.keras.Model(latent_inputs, reconstructed, name="decoder")
decoder.summary()
outputs = decoder(encoder(inputs)[2])
vae = tf.keras.Model(inputs, outputs, name="vae")
vae.compile(
optimizer=tf.keras.optimizers.Adam(1e-4),
loss=vae_loss(z_mean, z_log_var, image_dims),
experimental_run_tf_function=False,
)
vae.summary()
# endregion
# region Train and evaluate
train_dataset, test_dataset = data_utils.load_numpy_dataset(
settings.TRAIN_DATA_DIR, return_tuples=True
)
start = time.time()
num_samples = 2000
with tqdm(train_dataset.take(num_samples), total=num_samples) as pbar:
for i, element in enumerate(pbar):
# pbar.write(f"{i + 1}: {element[0].shape}")
pass
print("----------------FINISHED----------------")
print(time.time() - start)
# if Path(settings.MODEL_WEIGHTS).is_file():
# vae.load_weights(settings.MODEL_WEIGHTS)
# vae.fit(train_dataset, epochs=num_epochs, validation_data=(test_dataset, None))
# endregion
# optimizer = tf.keras.optimizers.Adam(1e-4)
# model = CVAE(num_conv=4)
# model.compile(optimizer=optimizer)
# if os.path.exists(settings.MODEL_WEIGHTS):
# print(f"Loading weights from '{settings.MODEL_WEIGHTS}'")
# model.load_weights(settings.MODEL_WEIGHTS)
# num_train = num_test = 0
# generation_vector = tf.random.normal(shape=[settings.EXAMPLES_TO_GENERATE, model.latent_dims])
# visualiziation_output_dir = os.path.join(settings.OUTPUT_DIR, 'progress')
# visualize_model_outputs(model, 0, generation_vector, visualiziation_output_dir)
#
# for epoch in range(1, settings.EPOCHS + 1):
# start = time.time()
# print(f"Training | Epoch {epoch} / {settings.EPOCHS}...")
# for train_x in tqdm(train_dataset, total=num_train or None):
# compute_apply_gradients(model, train_x, optimizer)
# if epoch == 1:
# num_train += 1
# print(f"Finished Train Step | Epoch {epoch} Train Step took {time.time() - start:.2f} seconds")
#
# if epoch % 1 == 0:
# # Evaluate Model
# print(f"Evaluation | Epoch {epoch}...")
# loss = tf.keras.metrics.Mean()
# for test_x in tqdm(test_dataset, total=num_test):
# loss(compute_loss(model, test_x))
# if epoch == 1:
# num_test += 1
# elbo = -loss.result()
# print(f"Epoch {epoch} took {time.time() - start:.2f} seconds | Test Set ELBO: {elbo}")
# # Save Model Weights
# os.makedirs(os.path.dirname(settings.MODEL_WEIGHTS), exist_ok=True) # Create dir if it doesn't exist
# model.save_weights(settings.MODEL_WEIGHTS)
# # Save Generated Samples
# visualize_model_outputs(model, epoch, generation_vector, visualiziation_output_dir)
| [
11748,
28686,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
10088,
1330,
35971,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
4818,
807... | 2.353705 | 2,078 |
import numpy as np
import pandas as pd
import os
import re
from os.path import join
from os import path, makedirs, rename
from tqdm import tqdm
#save_data_to_folders("../input")
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
198,
11748,
302,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
6738,
28686,
1330,
3108,
11,
285,
4335,
17062,
11,
36265,
198,
6738,
256,
80,
36020,
... | 2.919355 | 62 |
#! /usr/bin/env python
# Thomas Nagy, 2011 (ita)
"""
Create _moc.cpp files
The builds are 30-40% faster when .moc files are included,
you should NOT use this tool. If you really
really want it:
def configure(conf):
conf.load('compiler_cxx qt4')
conf.load('slow_qt4')
See playground/slow_qt/wscript for a complete example.
"""
from waflib.TaskGen import extension
from waflib import Task
import waflib.Tools.qt4
import waflib.Tools.cxx
@extension(*waflib.Tools.qt4.EXT_QT4)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
5658,
15196,
88,
11,
2813,
357,
5350,
8,
198,
198,
37811,
198,
16447,
4808,
76,
420,
13,
20322,
3696,
198,
198,
464,
12188,
389,
1542,
12,
1821,
4,
5443,
618,
764,
76,
420,
... | 2.754286 | 175 |
from .descriptor import EasyRepr
__all__ = ["easyrepr"]
def easyrepr(wrapped=None, **kwargs):
"""Decorator for an automatic `__repr__` method.
:param wrapped: the function to wrap
See `.descriptor.EasyRepr` for a full description of the accepted
keyword parameters.
This decorator wraps a function (which is available as `__wrapped__`). The
wrapped function should return a description of the attributes that should
be included in the repr.
>>> class UseEasyRepr:
... def __init__(self, foo, bar):
... self.foo = foo
... self.bar = bar
... @easyrepr
... def __repr__(self):
... ...
...
>>> x = UseEasyRepr(1, 2)
>>> repr(x)
'UseEasyRepr(foo=1, bar=2)'
This function may be called with all arguments up-front (wrapped function
and keyword arguments) ::
easyrepr(fn, style="<>")
or the wrapped function may be provided in a second call ::
easyrepr(style="<>")(fn)
to make it easier to use this function as a decorator.
"""
if wrapped is None:
return _easyrepr
return _easyrepr(wrapped)
| [
6738,
764,
20147,
1968,
273,
1330,
16789,
6207,
81,
628,
198,
834,
439,
834,
796,
14631,
38171,
260,
1050,
8973,
628,
198,
4299,
2562,
260,
1050,
7,
29988,
1496,
28,
14202,
11,
12429,
46265,
22046,
2599,
198,
220,
220,
220,
37227,
107... | 2.61036 | 444 |
# Generated by Django 2.1 on 2019-05-07 15:15
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
319,
13130,
12,
2713,
12,
2998,
1315,
25,
1314,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.966667 | 30 |
"""Verify the functionality of isilon_hadoop_tools.cli."""
from __future__ import absolute_import
from __future__ import unicode_literals
try:
from unittest.mock import Mock # Python 3
except ImportError:
from mock import Mock # Python 2
import pytest
from isilon_hadoop_tools import IsilonHadoopToolError, cli
def test_catches(exception):
"""Ensure cli.catches detects the desired exception."""
assert cli.catches(exception)(Mock(side_effect=exception))() == 1
def test_not_catches(exception):
"""Ensure cli.catches does not catch undesirable exceptions."""
with pytest.raises(exception):
cli.catches(())(Mock(side_effect=exception))()
@pytest.mark.parametrize(
'error, classinfo',
[
(cli.CLIError, IsilonHadoopToolError),
(cli.HintedError, cli.CLIError),
],
)
def test_errors_cli(error, classinfo):
"""Ensure that exception types remain consistent."""
assert issubclass(error, IsilonHadoopToolError)
assert issubclass(error, cli.CLIError)
assert issubclass(error, classinfo)
| [
37811,
13414,
1958,
262,
11244,
286,
318,
33576,
62,
71,
4533,
404,
62,
31391,
13,
44506,
526,
15931,
628,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
... | 2.712468 | 393 |
from unittest import TestCase
from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty
from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex1 import RemoveDupsSinglyLinkedList
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
327,
4825,
40,
13,
1925,
17,
62,
11280,
276,
62,
43,
1023,
13,
11321,
13,
50,
4420,
11280,
276,
8053,
1330,
33523,
198,
6738,
327,
4825,
40,
13,
1925,
17,
62,
11280,
276,
62,
43,
... | 2.632353 | 68 |
from cloudmesh.pi.board.monitor import Monitor
| [
6738,
6279,
76,
5069,
13,
14415,
13,
3526,
13,
41143,
1330,
18289,
628
] | 3.692308 | 13 |
"""Leetcode 647. Palindromic Substrings
Medium
URL: https://leetcode.com/problems/palindromic-substrings/
Given a string, your task is to count how many palindromic substrings in this string.
The substrings with different start indexes or end indexes are counted as different
substrings even they consist of same characters.
Example 1:
Input: "abc"
Output: 3
Explanation: Three palindromic strings: "a", "b", "c".
Example 2:
Input: "aaa"
Output: 6
Explanation: Six palindromic strings: "a", "a", "a", "aa", "aa", "aaa".
"""
if __name__ == '__main__':
main()
| [
37811,
3123,
316,
8189,
718,
2857,
13,
3175,
521,
398,
291,
3834,
37336,
198,
31205,
198,
198,
21886,
25,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
18596,
521,
398,
291,
12,
7266,
37336,
14,
198,
198,
15056,
257,
473... | 3 | 190 |
import os
import numpy as np
from keras.preprocessing.image import load_img
if __name__ == '__main__':
img = load_img('../dataset/validation/0000000.jpg', target_size=(1280, 720))
img.show() | [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
41927,
292,
13,
3866,
36948,
13,
9060,
1330,
3440,
62,
9600,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
33705,
796,
3440,
6... | 2.753425 | 73 |
import pyglet
import Block
import BlockArray
import random
#magic numbers
blockSize = 24
pointSize = 6
halfBorder = 1.0
height = 24
width = 36
#ugly global variables for visual effects
hoverGroup = []
#initialize the most important data structure
blocks = BlockArray.BlockArray(width, height)
#initialize the graphics
window = pyglet.window.Window(width*blockSize, height*blockSize)
pyglet.gl.glPointSize(pointSize)
triField = pyglet.graphics.vertex_list(blocks.count * 6, 'v2f', 'c4B')
triField.vertices = [0.0] * (blocks.count * 12)
triField.colors = [0xFF] * (blocks.count * 24)
pointField1 = pyglet.graphics.vertex_list(blocks.count, 'v2f', 'c4B')
pointField1.vertices = [0.0] * (blocks.count * 2)
pointField1.colors = [0xFF] * (blocks.count * 4)
pointField2 = pyglet.graphics.vertex_list(blocks.count, 'v2f', 'c4B')
pointField2.vertices = [0.0] * (blocks.count * 2)
pointField2.colors = [0xFF] * (blocks.count * 4)
for i in range(blocks.count):
x = blocks.getX(i) * float(blockSize)
y = blocks.getY(i) * float(blockSize)
triField.vertices[(i*12)+0] = x + halfBorder
triField.vertices[(i*12)+1] = y + halfBorder
triField.vertices[(i*12)+2] = x + float(blockSize) - halfBorder
triField.vertices[(i*12)+3] = y + float(blockSize) - halfBorder
triField.vertices[(i*12)+4] = x + float(blockSize) - halfBorder
triField.vertices[(i*12)+5] = y + halfBorder
triField.vertices[(i*12)+6] = x + halfBorder
triField.vertices[(i*12)+7] = y + halfBorder
triField.vertices[(i*12)+8] = x + float(blockSize) - halfBorder
triField.vertices[(i*12)+9] = y + float(blockSize) - halfBorder
triField.vertices[(i*12)+10] = x + halfBorder
triField.vertices[(i*12)+11] = y + float(blockSize) - halfBorder
pointField1.vertices[(i*2)] = x + (pointSize/2.0) + halfBorder
pointField1.vertices[(i*2)+1] = y + float(blockSize) - (pointSize / 2.0) - halfBorder
pointField2.vertices[(i*2)] = x + float(blockSize) - (pointSize / 2.0) - halfBorder
pointField2.vertices[(i*2)+1] = y + (pointSize/2.0) + halfBorder
@window.event
@window.event
@window.event
@window.event
@window.event
@window.event
pyglet.app.run()
| [
11748,
12972,
70,
1616,
198,
11748,
9726,
198,
11748,
9726,
19182,
198,
11748,
4738,
628,
198,
198,
2,
32707,
3146,
198,
9967,
10699,
796,
1987,
198,
4122,
10699,
796,
718,
198,
13959,
34189,
796,
352,
13,
15,
198,
17015,
796,
1987,
1... | 2.519481 | 847 |
# Generated by Django 3.2.5 on 2021-07-13 06:42
import cloudinary.models
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
20,
319,
33448,
12,
2998,
12,
1485,
9130,
25,
3682,
198,
198,
11748,
6279,
3219,
13,
27530,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 3 | 36 |
import unittest
import json
import os
from dacbench.benchmarks import OneLLBenchmark
from dacbench.envs import OneLLEnv
# TestOneLLBenchmark().test_get_env()
# TestOneLLBenchmark().test_scenarios()
# TestOneLLBenchmark().test_read_instances()
# TestOneLLBenchmark().test_save_conf()
| [
11748,
555,
715,
395,
198,
11748,
33918,
198,
11748,
28686,
198,
198,
6738,
288,
330,
26968,
13,
26968,
14306,
1330,
1881,
3069,
44199,
4102,
198,
6738,
288,
330,
26968,
13,
268,
14259,
1330,
1881,
43,
2538,
48005,
628,
198,
198,
2,
6... | 2.958763 | 97 |
from json import load, dump
from pathlib import Path
from shutil import rmtree
from unittest.mock import Mock
from pytest import fixture
from pc_spec.data import save_store, load_store
@fixture
@fixture
@fixture
@fixture
@fixture
@fixture
@fixture
@fixture
@fixture
@fixture
@fixture
@fixture
| [
6738,
33918,
1330,
3440,
11,
10285,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
4423,
346,
1330,
374,
16762,
631,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
44123,
198,
198,
6738,
12972,
9288,
1330,
29220,
198,
198,
6738,
40653,
... | 2.771186 | 118 |
from marshmallow import Schema, fields, validate
from schema.rating_movie import RatingMovieSchema
from schema.role import RoleSchema
| [
6738,
22397,
42725,
1330,
10011,
2611,
11,
7032,
11,
26571,
198,
198,
6738,
32815,
13,
8821,
62,
41364,
1330,
12028,
25097,
27054,
2611,
198,
6738,
32815,
13,
18090,
1330,
20934,
27054,
2611,
628
] | 4.121212 | 33 |
"""Test Blend modes."""
import unittest
from coloraide import Color
from . import util
# Colors that produce pretty distinct results
REDISH = '#fc3d99'
BLUISH = '#07c7ed'
YELLOWISH = '#f5d311'
class TestBlendModes(util.ColorAsserts, unittest.TestCase):
"""Test blend modes."""
def test_alpha(self):
"""Test normal blend mode with source alpha."""
self.assertColorEqual(Color('blue').compose('color(srgb 1 0 0 / 0.5)', blend='normal'), Color('blue'))
self.assertColorEqual(
Color('color(srgb 0 0 1 / 0.5)').compose('color(srgb 1 0 0)', blend='normal'),
Color('color(srgb 0.5 0 0.5)')
)
self.assertColorEqual(
Color('color(srgb 0 0 1 / 0.5)').compose('color(srgb 1 0 0 / 0.5)', blend='normal'),
Color('color(srgb 0.25 0 0.5 / 0.75)')
)
def test_normal(self):
"""Test normal."""
self.assertColorEqual(Color(REDISH).compose('black', blend='normal'), Color(REDISH))
self.assertColorEqual(Color(REDISH).compose('white', blend='normal'), Color(REDISH))
self.assertColorEqual(Color(REDISH).compose(YELLOWISH, blend='normal'), Color(REDISH))
self.assertColorEqual(Color(BLUISH).compose(REDISH, blend='normal'), Color(BLUISH))
self.assertColorEqual(Color(BLUISH).compose(YELLOWISH, blend='normal'), Color(BLUISH))
def test_multiply(self):
"""Test multiply."""
self.assertColorEqual(Color(REDISH).compose('black', blend='multiply'), Color('black'))
self.assertColorEqual(Color(REDISH).compose('white', blend='multiply'), Color(REDISH))
self.assertColorEqual(Color(REDISH).compose(YELLOWISH, blend='multiply'), Color('rgb(242.12 50.475 10.2)'))
self.assertColorEqual(Color(BLUISH).compose(REDISH, blend='multiply'), Color('rgb(6.9176 47.604 142.2)'))
self.assertColorEqual(Color(BLUISH).compose(YELLOWISH, blend='multiply'), Color('rgb(6.7255 164.66 15.8)'))
def test_screen(self):
"""Test screen."""
self.assertColorEqual(Color(REDISH).compose('black', blend='screen'), Color(REDISH))
self.assertColorEqual(Color(REDISH).compose('white', blend='screen'), Color('white'))
self.assertColorEqual(Color(REDISH).compose(YELLOWISH, blend='screen'), Color('rgb(254.88 221.53 159.8)'))
self.assertColorEqual(Color(BLUISH).compose(REDISH, blend='screen'), Color('rgb(252.08 212.4 247.8)'))
self.assertColorEqual(Color(BLUISH).compose(YELLOWISH, blend='screen'), Color('rgb(245.27 245.34 238.2)'))
def test_overlay(self):
"""Test overlay."""
self.assertColorEqual(Color(REDISH).compose('black', blend='overlay'), Color('black'))
self.assertColorEqual(Color(REDISH).compose('white', blend='overlay'), Color('white'))
self.assertColorEqual(Color(REDISH).compose(YELLOWISH, blend='overlay'), Color('rgb(254.76 188.05 20.4)'))
self.assertColorEqual(Color(BLUISH).compose(REDISH, blend='overlay'), Color('rgb(249.16 95.208 240.6)'))
self.assertColorEqual(Color(BLUISH).compose(YELLOWISH, blend='overlay'), Color('rgb(235.55 235.67 31.6)'))
def test_darken(self):
"""Test darken."""
self.assertColorEqual(Color(REDISH).compose('black', blend='darken'), Color('black'))
self.assertColorEqual(Color(REDISH).compose('white', blend='darken'), Color('rgb(252 61 153)'))
self.assertColorEqual(Color(REDISH).compose(YELLOWISH, blend='darken'), Color('rgb(245 61 17)'))
self.assertColorEqual(Color(BLUISH).compose(REDISH, blend='darken'), Color('rgb(7 61 153)'))
self.assertColorEqual(Color(BLUISH).compose(YELLOWISH, blend='darken'), Color('rgb(7 199 17)'))
def test_lighten(self):
"""Test lighten."""
self.assertColorEqual(Color(REDISH).compose('black', blend='lighten'), Color('rgb(252 61 153)'))
self.assertColorEqual(Color(REDISH).compose('white', blend='lighten'), Color('white'))
self.assertColorEqual(Color(REDISH).compose(YELLOWISH, blend='lighten'), Color('rgb(252 211 153)'))
self.assertColorEqual(Color(BLUISH).compose(REDISH, blend='lighten'), Color('rgb(252 199 237)'))
self.assertColorEqual(Color(BLUISH).compose(YELLOWISH, blend='lighten'), Color('rgb(245 211 237)'))
def test_color_dodge(self):
"""Test color dodge."""
self.assertColorEqual(Color(REDISH).compose('black', blend='color-dodge'), Color('black'))
self.assertColorEqual(Color(REDISH).compose('white', blend='color-dodge'), Color('white'))
self.assertColorEqual(Color(REDISH).compose(YELLOWISH, blend='color-dodge'), Color('rgb(255 255 42.5)'))
self.assertColorEqual(Color(BLUISH).compose(REDISH, blend='color-dodge'), Color('rgb(255 255 255)'))
self.assertColorEqual(Color(BLUISH).compose(YELLOWISH, blend='color-dodge'), Color('rgb(251.92 255 240.83)'))
# If source channel is 1 resultant channel will be 1
self.assertColorEqual(Color('white').compose(REDISH, blend='color-dodge'), Color('white'))
def test_color_burn(self):
"""Test color burn."""
self.assertColorEqual(Color(REDISH).compose('black', blend='color-burn'), Color('black'))
self.assertColorEqual(Color(REDISH).compose('white', blend='color-burn'), Color('white'))
self.assertColorEqual(Color(REDISH).compose(YELLOWISH, blend='color-burn'), Color('rgb(244.88 71.066 0)'))
self.assertColorEqual(Color(BLUISH).compose(REDISH, blend='color-burn'), Color('rgb(145.71 6.407 145.25)'))
self.assertColorEqual(Color(BLUISH).compose(YELLOWISH, blend='color-burn'), Color('rgb(0 198.62 0)'))
# If source is channel is 0, resultant channel will be 0
self.assertColorEqual(Color('black').compose(REDISH, blend='color-burn'), Color('black'))
def test_difference(self):
"""Test difference."""
self.assertColorEqual(Color(REDISH).compose('black', blend='difference'), Color('rgb(252 61 153)'))
self.assertColorEqual(Color(REDISH).compose('white', blend='difference'), Color('rgb(3 194 102)'))
self.assertColorEqual(Color(REDISH).compose(YELLOWISH, blend='difference'), Color('rgb(7 150 136)'))
self.assertColorEqual(Color(BLUISH).compose(REDISH, blend='difference'), Color('rgb(245 138 84)'))
self.assertColorEqual(Color(BLUISH).compose(YELLOWISH, blend='difference'), Color('rgb(238 12 220)'))
def test_exclusion(self):
"""Test exclusion."""
self.assertColorEqual(Color(REDISH).compose('black', blend='exclusion'), Color('rgb(252 61 153)'))
self.assertColorEqual(Color(REDISH).compose('white', blend='exclusion'), Color('rgb(3 194 102)'))
self.assertColorEqual(Color(REDISH).compose(YELLOWISH, blend='exclusion'), Color('rgb(12.765 171.05 149.6)'))
self.assertColorEqual(Color(BLUISH).compose(REDISH, blend='exclusion'), Color('rgb(245.16 164.79 105.6)'))
self.assertColorEqual(Color(BLUISH).compose(YELLOWISH, blend='exclusion'), Color('rgb(238.55 80.675 222.4)'))
def test_color_hard_light(self):
"""Test hard light."""
self.assertColorEqual(Color(REDISH).compose('black', blend='hard-light'), Color('rgb(249 0 51)'))
self.assertColorEqual(Color(REDISH).compose('white', blend='hard-light'), Color('rgb(255 122 255)'))
self.assertColorEqual(Color(REDISH).compose(YELLOWISH, blend='hard-light'), Color('rgb(254.76 100.95 64.6)'))
self.assertColorEqual(Color(BLUISH).compose(REDISH, blend='hard-light'), Color('rgb(13.835 169.79 240.6)'))
self.assertColorEqual(Color(BLUISH).compose(YELLOWISH, blend='hard-light'), Color('rgb(13.451 235.67 221.4)'))
def test_color_soft_light(self):
"""Test soft light."""
self.assertColorEqual(Color(REDISH).compose('black', blend='soft-light'), Color('black'))
self.assertColorEqual(Color(REDISH).compose('white', blend='soft-light'), Color('white'))
self.assertColorEqual(
Color(REDISH).compose(YELLOWISH, blend='soft-light'),
Color('rgb(249.83 192.01 24.722)')
)
self.assertColorEqual(Color(BLUISH).compose(REDISH, blend='soft-light'), Color('rgb(249.2 96.747 191.24)'))
self.assertColorEqual(
Color(BLUISH).compose(YELLOWISH, blend='soft-light'),
Color('rgb(235.92 222.75 50.158)')
)
def test_hue(self):
"""Test hue."""
self.assertColorEqual(Color(REDISH).compose('black', blend='hue'), Color('black'))
self.assertColorEqual(Color(REDISH).compose('white', blend='hue'), Color('white'))
self.assertColorEqual(Color(REDISH).compose(YELLOWISH, blend='hue'), Color('rgb(255 168.4 210.11)'))
self.assertColorEqual(Color(BLUISH).compose(REDISH, blend='hue'), Color('rgb(13.23 172.67 204.23)'))
self.assertColorEqual(Color(BLUISH).compose(YELLOWISH, blend='hue'), Color('rgb(113.71 231.66 255)'))
# sRGB must be forced
self.assertColorEqual(
Color(BLUISH).compose(YELLOWISH, blend='hue', space="display-p3"),
Color(BLUISH).compose(YELLOWISH, blend='hue', space="srgb")
)
def test_saturation(self):
"""Test hue."""
self.assertColorEqual(Color(REDISH).compose('black', blend='saturation'), Color('black'))
self.assertColorEqual(Color(REDISH).compose('white', blend='saturation'), Color('white'))
self.assertColorEqual(
Color(REDISH).compose(YELLOWISH, blend='saturation'),
Color('rgb(237.54 209.06 46.543)')
)
self.assertColorEqual(Color(BLUISH).compose(REDISH, blend='saturation'), Color('rgb(255 59.357 153.59)'))
self.assertColorEqual(Color(BLUISH).compose(YELLOWISH, blend='saturation'), Color('rgb(245.4 211.1 15.403)'))
# sRGB must be forced
self.assertColorEqual(
Color(BLUISH).compose(YELLOWISH, blend='saturation', space="display-p3"),
Color(BLUISH).compose(YELLOWISH, blend='saturation', space="srgb")
)
def test_luminosity(self):
"""Test luminosity."""
self.assertColorEqual(Color(REDISH).compose('black', blend='luminosity'), Color('rgb(128.6 128.6 128.6)'))
self.assertColorEqual(Color(REDISH).compose('white', blend='luminosity'), Color('rgb(128.6 128.6 128.6)'))
self.assertColorEqual(Color(REDISH).compose(YELLOWISH, blend='luminosity'), Color('rgb(161.06 137.04 0)'))
self.assertColorEqual(Color(BLUISH).compose(REDISH, blend='luminosity'), Color('rgb(255 86.175 167.49)'))
self.assertColorEqual(Color(BLUISH).compose(YELLOWISH, blend='luminosity'), Color('rgb(182.76 155.5 0)'))
# sRGB must be forced
self.assertColorEqual(
Color(BLUISH).compose(YELLOWISH, blend='luminosity', space="display-p3"),
Color(BLUISH).compose(YELLOWISH, blend='luminosity', space="srgb")
)
def test_color(self):
"""Test color."""
self.assertColorEqual(Color(REDISH).compose('black', blend='color'), Color('black'))
self.assertColorEqual(Color(REDISH).compose('white', blend='color'), Color('white'))
self.assertColorEqual(Color(REDISH).compose(YELLOWISH, blend='color'), Color('rgb(255 168.4 210.11)'))
self.assertColorEqual(Color(BLUISH).compose(REDISH, blend='color'), Color('rgb(0 177.73 212.9)'))
self.assertColorEqual(Color(BLUISH).compose(YELLOWISH, blend='color'), Color('rgb(113.71 231.66 255)'))
# sRGB must be forced
self.assertColorEqual(
Color(BLUISH).compose(YELLOWISH, blend='color', space="display-p3"),
Color(BLUISH).compose(YELLOWISH, blend='color', space="srgb")
)
| [
37811,
14402,
41198,
12881,
526,
15931,
198,
11748,
555,
715,
395,
198,
6738,
3124,
64,
485,
1330,
5315,
198,
6738,
764,
1330,
7736,
198,
198,
2,
29792,
326,
4439,
2495,
7310,
2482,
198,
22083,
18422,
796,
705,
2,
16072,
18,
67,
2079,... | 2.394801 | 4,886 |
import time
import pandas as pd
from brainflow.board_shim import (
BoardIds,
BoardShim,
BrainFlowInputParams,
BrainFlowError,
)
from timeflux.core.node import Node
class BrainFlow(Node):
"""Driver for BrainFlow.
This plugin provides a unified interface for all boards supported by BrainFlow.
Attributes:
o (Port): Default output, provides DataFrame.
Args:
board (string|int): The board ID.
Allowed values: numeric ID or name (e.g. ``synthetic``, ``cyton_wifi``,
``brainbit``, etc.).
channels (list): The EEG channel labels.
If not set, incrementing numbers will be used.
command (string): Send a command to the board.
Use it carefully and only if you understand what you are doing.
debug (boolean): Print debug messages.
**kwargs: The parameters specific for each board.
Allowed arguments: ``serial_port``, ``mac_address``, ``ip_address``,
``ip_port``, ``ip_protocol``, ``serial_number``, ``other_info``.
.. seealso::
List of `supported boads <https://brainflow.readthedocs.io/en/stable/SupportedBoards.html>`_.
Example:
.. literalinclude:: /../examples/synthetic.yaml
:language: yaml
"""
| [
11748,
640,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
3632,
11125,
13,
3526,
62,
1477,
320,
1330,
357,
198,
220,
220,
220,
5926,
7390,
82,
11,
198,
220,
220,
220,
5926,
2484,
320,
11,
198,
220,
220,
220,
14842,
37535,
20560,
... | 2.565737 | 502 |
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import plotly.io as pio
from IMLearn.learners.regressors.polynomial_fitting import PolynomialFitting
from IMLearn.utils import utils
pio.templates.default = "simple_white"
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
df = pd.read_csv(filename, parse_dates=["Date"]).dropna().drop_duplicates()
df['DayOfYear'] = df['Date'].dt.dayofyear
df = df.loc[df['Temp'] < 60]
df = df.loc[df['Temp'] > -60]
df = df.loc[df['Day'] >= 1]
df = df.loc[df['Day'] <= 31]
return df
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of city temperature dataset
data = load_data("C:\CS\IML\IML.HUJI\datasets\City_Temperature.csv")
# Question 2 - Exploring data for specific country
# raise NotImplementedError()
# question2(data)
# Question 3 - Exploring differences between countries
#question3(data)
# Question 4 - Fitting model for different values of `k`
question4(data)
# Question 5 - Evaluating fitted model on different countries
# raise NotImplementedError()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
7110,
306,
13,
42712,
355,
279,
87,
198,
11748,
7110,
306,
13,
34960,
62,
48205,
355,
467,
198,
11748,
7110,
306,
13,
952,
355,
279,
952,
198,
198,
673... | 2.766798 | 506 |
from more_itertools import one
from .._utils import names_and_abbrevs
from ..unit import CURRENT, Dimension, LENGTH, MASS, TEMPERATURE, TIME, Unit
base_unit_map = {
(names, abbrevs): Unit(dim, name=one(abbrevs))
for (names, abbrevs), dim in {
names_and_abbrevs(item): {val: 1} if isinstance(val, Dimension) else val
for item, val in {
(('meter', 'metre'), 'm'): LENGTH, 'second': TIME,
('kilogram', 'kg'): MASS, 'Ampere': CURRENT, 'Kelvin': TEMPERATURE,
}.items()
}.items()
}
| [
6738,
517,
62,
270,
861,
10141,
1330,
530,
198,
198,
6738,
11485,
62,
26791,
1330,
3891,
62,
392,
62,
397,
4679,
14259,
198,
6738,
11485,
20850,
1330,
327,
39237,
11,
34024,
11,
406,
49494,
11,
337,
10705,
11,
309,
3620,
18973,
40086,... | 2.303419 | 234 |
# Copyright 2019 - Nokia Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cliff import lister
from vitrageclient.common import utils
class ServiceList(lister.Lister):
"""List all services"""
| [
2,
220,
15069,
13130,
532,
26182,
10501,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
... | 3.720207 | 193 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import time
from redis_rate_limit import RateLimit, RateLimiter, TooManyRequests
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
555,
715,
395,
198,
11748,
640,
198,
6738,
2266,
271,
62,
4873,
62,
32374,
1330,
14806,
39184,
11,
14806,
19352,... | 2.608108 | 74 |
import os
from conans import ConanFile, CMake, AutoToolsBuildEnvironment, tools
from conans.util import files
| [
11748,
28686,
198,
6738,
369,
504,
1330,
31634,
8979,
11,
327,
12050,
11,
11160,
33637,
15580,
31441,
11,
4899,
198,
6738,
369,
504,
13,
22602,
1330,
3696,
628
] | 3.964286 | 28 |
import itertools as it
import networkx as nx
import numpy as np
from wepy.analysis.parents import DISCONTINUITY_VALUE, \
parent_panel, net_parent_table,\
ancestors, sliding_window
| [
11748,
340,
861,
10141,
355,
340,
198,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
356,
9078,
13,
20930,
13,
23743,
1330,
13954,
37815,
1268,
52,
9050,
62,
39488,
11,
3467,
198,
220,
220,
... | 2.016129 | 124 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import time
import string
import itertools
from elasticsearch import Elasticsearch, helpers
INDEX = 'passwd'
INDEX_PREFIX = 'pwd_'
DOC_TYPE = 'account'
INDEX_CFG = {
"settings": {
"index": {
#"number_of_shards": 8,
"refresh_interval": -1,
"number_of_replicas": 0
},
"analysis": {
"filter": {
"tld_filter": {
"type": "pattern_capture",
"preserve_original": False,
"patterns": ["\\.([^\\.]+?)$"]
}
},
"analyzer": {
"lc_analyzer": {
"type": "custom",
"tokenizer": "keyword",
"filter": ["lowercase"]
},
"user_analyzer": {
"type": "custom",
"tokenizer": "user_tokenizer",
"filter": ["lowercase"]
},
"domain_analyzer": {
"type": "custom",
"tokenizer": "domain_tokenizer",
"filter": ["lowercase"]
},
"domain_notld_analyzer": {
"type": "custom",
"tokenizer": "domain_notld_tokenizer",
"filter": ["lowercase"]
},
"tld_analyzer": {
"type": "custom",
"tokenizer": "tld_tokenizer",
"filter": ["lowercase"]
}
},
"tokenizer": {
"user_tokenizer": {
"type": "pattern",
"pattern": "(.+?)@",
"group": 1
},
"domain_tokenizer": {
"type": "pattern",
"pattern": "@(.+)",
"group": 1
},
"domain_notld_tokenizer": {
"type": "pattern",
"pattern": "@(.+)\\.",
"group": 1
},
"tld_tokenizer": {
"type": "pattern",
"pattern": "\\.([^\\.]+?)$",
"group": 1
}
},
"normalizer": {
"lc_normalizer": {
"type": "custom",
"char_filter": [],
"filter": ["lowercase"]
}
}
}
},
"mappings": {
DOC_TYPE: {
"properties": {
"email": {
"type": "text",
"analyzer": "simple",
"fields": {
"raw": {
"type": "keyword",
"normalizer": "lc_normalizer"
}
}
},
"username": {
"type": "text",
"analyzer": "simple",
"fields": {
"raw": {
"type": "keyword",
"normalizer": "lc_normalizer"
}
}
},
"domain": {
"type": "keyword",
"normalizer": "lc_normalizer"
},
"domain_notld": {
"type": "keyword",
"normalizer": "lc_normalizer"
},
"tld": {
"type": "keyword",
"normalizer": "lc_normalizer"
},
"password": {
"type": "text",
"analyzer": "simple",
"fields": {
"raw": {
"type": "keyword"
}
}
},
"password_length": {
"type": "short"
},
"source": {
"type": "short"
}
}
}
}
}
if __name__ == '__main__':
run(sys.argv[1])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
4731,
198,
11748,
340,
861,
10141,
198,
6738,
27468,
12947,
1330,
4... | 1.46049 | 2,936 |
import seaborn as sns
import matplotlib.pyplot as plt
def plot_boxplot_best_framework_designs(data, plot_file_name=False, latex_font=True):
"""
Parameters
----------
data: Data for plot
plot_file_name: Optional name for plot
latex_font: Whether latex font should be used
Returns
-------
"""
if latex_font:
# Use LaTex Font
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=15)
fontsize = 15
params = {'axes.labelsize': fontsize, 'axes.titlesize': fontsize, 'legend.fontsize': fontsize,
'xtick.labelsize': fontsize, 'ytick.labelsize': fontsize}
plt.rcParams.update(params)
plt.style.use('ggplot')
plt.tight_layout()
# Create Plot
ax = sns.boxplot(x="Metric name", y="Metric value", hue="Framework design", data=data)
# plt.title("Performance of different framework designs")
plt.xlabel("Metric name", fontsize=fontsize)
plt.ylabel("Metric value", fontsize=fontsize)
plt.legend(fontsize=fontsize)
if plot_file_name:
plt.savefig("Plots/" + str(plot_file_name))
plt.show()
| [
11748,
384,
397,
1211,
355,
3013,
82,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
4299,
7110,
62,
3524,
29487,
62,
13466,
62,
30604,
62,
26124,
82,
7,
7890,
11,
7110,
62,
7753,
62,
3672,
28,
25101,
11,
... | 2.413276 | 467 |
# Generated by Django 2.2.7 on 2019-12-13 03:23
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
22,
319,
13130,
12,
1065,
12,
1485,
7643,
25,
1954,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# classic implementation of Singleton Design pattern
# main method
if __name__ == "__main__":
# create object of Singleton Class
obj = Singleton()
print(obj)
# pick the instance of the class
obj = Singleton.getInstance()
print(obj)
| [
2,
6833,
7822,
286,
5573,
10565,
8495,
3912,
198,
198,
2,
1388,
2446,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
628,
197,
2,
2251,
2134,
286,
5573,
10565,
5016,
198,
197,
26801,
796,
5573,
10565,
3419,
198,
197,
... | 3.347222 | 72 |
#! /usr/bin/env python
from xml.dom import minidom
from ctypes import c_longlong
from mathml import cut_nomeaning_text, parse_file
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
35555,
13,
3438,
1330,
949,
312,
296,
198,
6738,
269,
19199,
1330,
269,
62,
6511,
6511,
198,
6738,
10688,
4029,
1330,
2005,
62,
77,
462,
7574,
62,
5239,
11,
21136,
62,
... | 2.933333 | 45 |
import re
import csv
import StringIO
import datetime
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import Vote
import lxml.html
import scrapelib
| [
11748,
302,
198,
11748,
269,
21370,
198,
11748,
10903,
9399,
198,
11748,
4818,
8079,
198,
198,
6738,
2855,
88,
13,
1416,
13484,
13,
65,
2171,
1330,
3941,
3351,
38545,
11,
3941,
198,
6738,
2855,
88,
13,
1416,
13484,
13,
29307,
1330,
19... | 3.122807 | 57 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import string
import time
import keystoneauth1
from keystoneauth1 import discover
from openstack import _log
from openstack import exceptions
def urljoin(*args):
"""A custom version of urljoin that simply joins strings into a path.
The real urljoin takes into account web semantics like when joining a url
like /path this should be joined to http://host/path as it is an anchored
link. We generally won't care about that in client.
"""
return '/'.join(str(a or '').strip('/') for a in args)
def iterate_timeout(timeout, message, wait=2):
"""Iterate and raise an exception on timeout.
This is a generator that will continually yield and sleep for
wait seconds, and if the timeout is reached, will raise an exception
with <message>.
"""
log = _log.setup_logging('openstack.iterate_timeout')
try:
# None as a wait winds up flowing well in the per-resource cache
# flow. We could spread this logic around to all of the calling
# points, but just having this treat None as "I don't have a value"
# seems friendlier
if wait is None:
wait = 2
elif wait == 0:
# wait should be < timeout, unless timeout is None
wait = 0.1 if timeout is None else min(0.1, timeout)
wait = float(wait)
except ValueError:
raise exceptions.SDKException(
"Wait value must be an int or float value. {wait} given"
" instead".format(wait=wait))
start = time.time()
count = 0
while (timeout is None) or (time.time() < start + timeout):
count += 1
yield count
log.debug('Waiting %s seconds', wait)
time.sleep(wait)
raise exceptions.ResourceTimeout(message)
def get_string_format_keys(fmt_string, old_style=True):
"""Gets a list of required keys from a format string
Required mostly for parsing base_path urls for required keys, which
use the old style string formatting.
"""
if old_style:
a = AccessSaver()
fmt_string % a
return a.keys
else:
keys = []
for t in string.Formatter().parse(fmt_string):
if t[1] is not None:
keys.append(t[1])
return keys
def supports_microversion(adapter, microversion):
"""Determine if the given adapter supports the given microversion.
Checks the min and max microversion asserted by the service and checks
to make sure that ``min <= microversion <= max``.
:param adapter:
:class:`~keystoneauth1.adapter.Adapter` instance.
:param str microversion:
String containing the desired microversion.
:returns: True if the service supports the microversion.
:rtype: bool
"""
endpoint_data = adapter.get_endpoint_data()
if (endpoint_data.min_microversion
and endpoint_data.max_microversion
and discover.version_between(
endpoint_data.min_microversion,
endpoint_data.max_microversion,
microversion)):
return True
return False
def pick_microversion(session, required):
"""Get a new microversion if it is higher than session's default.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param required: Version that is required for an action.
:type required: String or tuple or None.
:return: ``required`` as a string if the ``session``'s default is too low,
the ``session``'s default otherwise. Returns ``None`` of both
are ``None``.
:raises: TypeError if ``required`` is invalid.
"""
if required is not None:
required = discover.normalize_version_number(required)
if session.default_microversion is not None:
default = discover.normalize_version_number(
session.default_microversion)
if required is None:
required = default
else:
required = (default if discover.version_match(required, default)
else required)
if required is not None:
return discover.version_to_string(required)
def maximum_supported_microversion(adapter, client_maximum):
"""Determinte the maximum microversion supported by both client and server.
:param adapter: :class:`~keystoneauth1.adapter.Adapter` instance.
:param client_maximum: Maximum microversion supported by the client.
If ``None``, ``None`` is returned.
:returns: the maximum supported microversion as string or ``None``.
"""
if client_maximum is None:
return None
# NOTE(dtantsur): if we cannot determine supported microversions, fall back
# to the default one.
try:
endpoint_data = adapter.get_endpoint_data()
except keystoneauth1.exceptions.discovery.DiscoveryFailure:
endpoint_data = None
if endpoint_data is None:
log = _log.setup_logging('openstack')
log.warning('Cannot determine endpoint data for service %s',
adapter.service_type or adapter.service_name)
return None
if not endpoint_data.max_microversion:
return None
client_max = discover.normalize_version_number(client_maximum)
server_max = discover.normalize_version_number(
endpoint_data.max_microversion)
if endpoint_data.min_microversion:
server_min = discover.normalize_version_number(
endpoint_data.min_microversion)
if client_max < server_min:
# NOTE(dtantsur): we may want to raise in this case, but this keeps
# the current behavior intact.
return None
result = min(client_max, server_max)
return discover.version_to_string(result)
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 2.798937 | 2,258 |
from django.contrib import admin
try:
from django.conf.urls import patterns, include
except ImportError: # DROP_WITH_DJANGO13 pragma: no cover
from django.conf.urls.defaults import patterns, include
# DROP_WITH_DJANGO16
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/', include(admin.site.urls)),
)
# for shell & runserver: Django 1.3 and 1.4 don't need this, but 1.5 does
# it will only work if DEBUG is True
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns += staticfiles_urlpatterns()
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
7572,
11,
2291,
198,
16341,
17267,
12331,
25,
220,
1303,
10560,
3185,
62,
54,
10554,
62,
35028,... | 2.889474 | 190 |
#!/usr/bin/env python
#-*- encoding: utf8 -*-
import warnings
warnings.filterwarnings('ignore', category=RuntimeWarning)
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.lancaster import LancasterStemmer
from nltk import classify, pos_tag, word_tokenize
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.models import Sequential, load_model
from keras.layers import Dense, LSTM, Bidirectional, Embedding, Dropout
from keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from keras.callbacks import EarlyStopping
from sklearn.metrics import classification_report, confusion_matrix
from mind_msgs.msg import EntitiesIndex, Reply, ReplyAnalyzed
import rospy
import rospkg
import os
import numpy as np
import pandas as pd
import nltk
import re
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
# def predictions(text):
# clean = re.sub(r'[^ a-z A-Z 0-9]', " ", text)
# test_word = word_tokenize(clean)
# test_word = [w.lower() for w in test_word]
# test_ls = word_tokenizer.texts_to_sequences(test_word)
# print(test_word)
# #Check for unknown words
# if [] in test_ls:
# test_ls = list(filter(None, test_ls))
# test_ls = np.array(test_ls).reshape(1, len(test_ls))
# x = padding_doc(test_ls, max_length)
# print(x.shape)
# # pred = model.predict_proba(x)
# pred = model.predict_classes(x)
# return pred
if __name__ == '__main__':
rospy.init_node('tag_generator', anonymous=False)
m = TagGenerator()
rospy.spin()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
12,
9,
12,
21004,
25,
3384,
69,
23,
532,
9,
12,
198,
198,
11748,
14601,
198,
40539,
654,
13,
24455,
40539,
654,
10786,
46430,
3256,
6536,
28,
41006,
20361,
8,
198,
198,
6738,
29... | 2.681818 | 638 |
import logging, os
logging.basicConfig(level=os.environ.get("LOGLEVEL","INFO"))
log = logging.getLogger(__name__)
import base64, json, pickle, sys
from ._version import __version__
from .choice import Choice,TextInput,PointBuy,AssignAbstractGear
from .dietype import DieType
from .sheetmaker import SheetMaker
from .func import getModel
from .options import trees as TREES
class CharacterSheet:
""" A class to represent an EoS character sheet
***
Attributes
----------
filled: bool
Whether the character sheet has all its valid options
filled out in self.data. If False, cannot save or flush.
Set to True when character is loaded or when choices
are made through an Interface object.
options: list
List of all creation trees. Each item is a Choice
object.
data: list
List of all user selections for given character
choice_names: dict
Names of user selections to be displayed in upper
right box of character sheet
qualities: dict
Dictionary of quality names and values
skills: dict
Dictionary of skill names, levels, and linked
qualities
combat_stats: dict
Dictionary of Speed, AV, Toughness, and
Shooting / Fighting Dice
trivia: list
List of trained trivia
traits: list
List of traits
weapons: list
List of weapons
gear: list
money: int
_abstract_potions: dict
Tracks number of each level the character is
owed.
_abstract_weapons: list
List base weapons owned (before modifications).
_abstract_modifications: dict
Tracks number of each level the character is
owed.
_abstract_ammunition: dict
Tracks number of each level the character is
owed.
_abstract_grenades: dict
Tracks number of each level the character is
owed.
_abstract_kits: dict
Tracks number of each level the character is
owed.
Methods
-------
load: bool
Read pickled data from text file
save: bool
Write pickled data to text file
apply: bool
Apply a Choice to the character
sheet.
flush
output: bool
Print the character sheet to a
beautiful pdf file.
edit: bool
TODO
"""
def load(self,file_path) -> bool:
"""Read data from text file"""
file_path = file_path.strip("'").strip('"')
with open(file_path,'rb') as rf:
loadedPickle = pickle.loads(rf.read())
if self.__version__ != loadedPickle['__version__']:
log.warning(f"Loaded character created in version {loadedPickle['__version__']}, you are running version {self.__version__}. Potential compatibility issues.")
self.treePath = loadedPickle['treePath']
# load text input stuff
self.data.append(TextInput(name="Name",value = loadedPickle['name']))
self.data.append(TextInput(name="Motivation",value=loadedPickle['motivation']))
# run trees
treePath = self.treePath
try:
for t in TREES:
if t.name in ["Skills","Trivia","Name","Motivation","Assign Abstract Gear"]:
log.debug(f"Skipping {t.name}")
else:
treePath = autoTree(t,treePath)
except:
log.exception("Loaded tree path incompatible with options.trees")
return False
# load skills
skills = PointBuy(name="Skills",max_level=3,starting_level=0,categories=getModel('model_skills.json'),starting_points=5,points_per_level = {1:0,2:1,3:3},root_id=6)
skills.categories = loadedPickle['skills']
self.data.append(skills)
# load trivia
trivia = PointBuy(name="Trivia",max_level=1,starting_level=0,point_per_level = {0:0,1:1},categories=getModel('model_trivia.json'),root_id=9)
trivia.categories = loadedPickle['trivia']
self.data.append(trivia)
# load gear assignments
## TODO
assign_abstract_gear = AssignAbstractGear(name="Assign Abstract Gear")
assign_abstract_gear.assign(self)
assign_abstract_gear.gear += loadedPickle['assigned_gear']
for weapon_pickle in loadedPickle['weapon_pickles']:
assign_abstract_gear.weapons.append(pickle.loads(weapon_pickle))
self.data.append(assign_abstract_gear)
# flush and return
self.filled=True
self.flush()
return True
def save(self,file_path) ->bool:
"""Write data to text file"""
if not self.filled:
log.warning("Cannot save incomplete character")
return False
# create dictionary of values
outDict = {}
outDict['__version__'] = self.__version__
outDict['treePath'] = self.treePath
outDict['name'] = self.choice_names["Name"]
outDict['motivation'] = self.choice_names["Motivation"]
for node in self.data:
if node.name == "Skills":
# skills
outDict["skills"] = dict(node.categories)
elif node.name == "Trivia":
# trivia
outDict["trivia"]=dict(node.categories)
elif node.name == "Assign Abstract Gear":
# non-weapon gear
outDict["assigned_gear"]=node.gear
# modded weapons
outDict["weapon_pickles"]=[]
for weapon in self.weapons:
outDict["weapon_pickles"].append(pickle.dumps(weapon))
with open(file_path,'wb') as wf:
# wf.write('{')
# # version
# wf.write(f'"__version__":"{self.__version__}"')
# # treePath
# wf.write(',"treePath":')
# wf.write(json.dumps(self.treePath))
# # text fields
# wf.write(f',"name":"{self.choice_names["Name"]}"')
# wf.write(f',"motivation":"{self.choice_names["Motivation"]}"')
# for node in self.data:
# if node.name == "Skills":
# # skills
# wf.write(f',"skills":{json.dumps(node.categories)}')
# elif node.name == "Trivia":
# # trivia
# wf.write(f',"trivia":{json.dumps(node.categories)}')
# elif node.name == "Assign Abstract Gear":
# # non-weapon gear
# wf.write(f',"assigned_gear":{json.dumps(node.gear)}')
# # modded weapons
# wf.write(f',"weapon_pickles":["')
# wf.write('","'.join(["weapon_pickled="+str(base64.b64encode(pickle.dumps(weapon))) for weapon in self.weapons]))
# wf.write(f'"]')
# wf.write('}')
pickle.dump(outDict,wf)
return True
def apply(self,option)->bool:
"""Apply a choice to the character sheet"""
try:
option.implement(self)
except:
log.exception("Failed to apply")
return False
return True
def flush(self) -> bool:
"""Reset character and apply all current choices"""
if not self.filled:
log.warning("Character data incomplete, cannot flush")
return False
self.loadBlank()
for c in self.data:
self.apply(c)
return True
def output(self,pdf_path) -> bool:
"""Print the character sheet to a beautiful PDF file
***
Parameters
----------
pdf_path:str
Path to output file. Must end in .pdf extension.
If file exists, it will be overwritten.
"""
if not self.flush():
log.warning("Failed to flush, cannot output sheet")
return False
maker = SheetMaker()
try:
maker.read(self)
maker.make(pdf_path)
return True
except:
log.exception("Failed to output")
return False
def edit(self) -> bool:
"""TODO"""
return False | [
11748,
18931,
11,
28686,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
28,
418,
13,
268,
2268,
13,
1136,
7203,
25294,
2538,
18697,
2430,
10778,
48774,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
11748,
... | 2.688969 | 2,511 |
from pydantic import BaseModel, Field
from pydantic.networks import EmailStr
| [
6738,
279,
5173,
5109,
1330,
7308,
17633,
11,
7663,
198,
6738,
279,
5173,
5109,
13,
3262,
5225,
1330,
9570,
13290,
628
] | 3.714286 | 21 |
#!/usr/bin/python3
# python
import http.client
import httplib2
import os
import random
import sys
import time
import webbrowser
import threading
import pprint
# google
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaFileUpload
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
2,
21015,
198,
11748,
2638,
13,
16366,
198,
11748,
1841,
489,
571,
17,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
3992,
40259,
198,
11748,
4... | 3.442748 | 131 |
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# There's no good way in cosmos to create a tag
# based on output or to conditionally run a process
# This is a wrapper to run the umi utilities.
#
# Run a program (SeqPrep) for a directory in batches
# This is a helper script for Martin Aryee's
# scripts to demultiplex Illumina sequencing
# reads with sample specific and molecule
# specific tags.
# https://github.com/aryeelab/umi/wiki
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__author__ = 'Allison MacLeay'
import sys
import os
import argparse
import time
# -----------------------------------------
# Get bash command for specified
# file name prefix
# -----------------------------------------
# -----------------------------------------
# Return all unique file prefixes
# -----------------------------------------
# -----------------------------------------
# Delay completion of script until all
# files are written
# -----------------------------------------
# -----------------------------------------
# LSF utilities
# -----------------------------------------
# -----------------------------------------
# MAIN
# run a program (SeqPrep) for all files in a directory
# that have the same prefix
#-----------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Run command for removing adapter sequecnes in batches of similarly prefixed names.")
parser.add_argument('--dir', default='.', help='directory containing output of umi demultiplex')
parser.add_argument('--script', default='./SeqPrep',
help='SeqPrep absolute path. default is SeqPrep in current directory')
parser.add_argument('--a1', required=True, help='Adapter 1')
parser.add_argument('--a2', required=True, help='Adapter 2')
parser.add_argument('--out', default='tagout', help='directory to deposit output files')
parser.add_argument('--log', default='batch_log', help='directory to deposit bsub log files')
parser.add_argument('--bsub_off', action='store_true', help='turn bsub off to test on systems without lsf')
#parser.add_argument('--undet', action='store_false', help='include reads less than parameter set my min reads. Default will skip files named undetermined')
args = parser.parse_args()
p = {}
lsf_group = ''
lsf_group_cmd = ''
if hasattr(args, 'dir'):
p['path'] = args.dir
if hasattr(args, 'out'):
p['out'] = args.out
os.system('mkdir -p ' + args.out)
if hasattr(args, 'log'):
os.system('mkdir -p ' + args.log)
os.system('ls ' + p['path'] + ' >> ' + args.log + '/ls_inputdir.txt')
if hasattr(args, 'a1'):
p['a1'] = args.a1
if hasattr(args, 'a2'):
p['a2'] = args.a2
f = get_names(args.dir)
if len(f) < 1:
print "Error: No file prefixes were found in " + args.dir + "."
count_lsf = 0
if not args.bsub_off:
lsf_group = get_group_id("/demux")
lsf_group_cmd = ' -g ' + lsf_group
for tag in f:
if (tag.find('undetermined') > -1 ):
# skip undeterminded for now
cmd = 'echo skipping undetermined files'
elif (args.bsub_off):
cmd = get_cmd(tag, args.script, p)
else:
cmd = 'bsub -q medium -u am282 -o ' + os.path.join(args.log, 'lsf_out.log') + ' -e ' + os.path.join(
args.log, 'lsf_err.log') + lsf_group_cmd + ' ' + get_cmd(tag, args.script, p)
# Keep track of lsf job for listener
count_lsf = count_lsf + 1
print 'batch process running command:\n' + cmd
os.system(cmd)
if (count_lsf > 0):
if lsf_group != '':
check_done(lsf_group, count_lsf)
print 'batch_process done'
| [
2,
220,
27156,
27156,
27156,
15116,
198,
2,
1318,
338,
645,
922,
835,
287,
39385,
284,
2251,
257,
7621,
198,
2,
1912,
319,
5072,
393,
284,
4006,
453,
1057,
257,
1429,
198,
2,
770,
318,
257,
29908,
284,
1057,
262,
334,
11632,
20081,
... | 2.829993 | 1,347 |
import os
from time import sleep
sleep(0.5)
targets=os.listdir('./')
targets.remove(file_name())
# remove __init__.py
if('__init__.py' in targets):
targets.remove('__init__.py')
# remove everything that isn't a python file
for i in list(targets):
if('.py' != i[-3:]):
targets.remove(i)
with open('./__init__.py','w') as f:
f.write('# this file makes all functions in this directory available as a package.\n\n')
count = len(targets)*1.0
while(len(targets)):
sleep(0.1)
i = targets.pop(0)
print('{:>3}% - {}'.format(
to_pct((count-len(targets)),count),
i))
f.write('from %s import *\n'%(i[:-3]))
print('')
print('Done!')
| [
11748,
28686,
198,
6738,
640,
1330,
3993,
198,
198,
42832,
7,
15,
13,
20,
8,
198,
198,
83,
853,
1039,
28,
418,
13,
4868,
15908,
7,
4458,
14,
11537,
198,
198,
83,
853,
1039,
13,
28956,
7,
7753,
62,
3672,
28955,
198,
198,
2,
4781,... | 2.261324 | 287 |
"""Docstring."""
from typing import Optional, Union
from .sub import subfoo # NOQA
class Baz:
"""Baz test class."""
bute = 1
class Foo:
"""Foo test class."""
attr: str = 'test'
type_attr = Baz
def meth(self) -> Baz:
"""Test method."""
def selfref(self) -> "Foo":
"""Return self."""
def __call__(self) -> Baz:
"""Test call."""
def bar() -> Foo:
"""bar test function."""
def optional() -> Optional[Foo]:
"""Return optional type."""
def optional_manual() -> Union[None, Foo]:
"""Return manually constructed optional type."""
def optional_counter() -> Union[Foo, Baz]:
"""Failing case for incorrect optional type handling."""
def compile():
"""Shadows built in compile function."""
class Child(Foo):
"""Foo child class."""
| [
37811,
23579,
8841,
526,
15931,
198,
6738,
19720,
1330,
32233,
11,
4479,
198,
198,
6738,
764,
7266,
1330,
850,
21943,
220,
1303,
8005,
48,
32,
628,
198,
4871,
47099,
25,
198,
220,
220,
220,
37227,
33,
1031,
1332,
1398,
526,
15931,
628... | 2.74 | 300 |
import secrets
import random
import string
| [
11748,
13141,
198,
11748,
4738,
198,
11748,
4731,
628
] | 4.888889 | 9 |
# This Python file uses the following encoding: utf-8
import hashlib
from typing import List
from aiogram.types import InlineQuery, \
InputTextMessageContent, InlineQueryResultArticle
from core import dp, bot, lazy_get_text, cb as bank_api, crypto_price, Button
@dp.inline_handler()
async def inline_echo(inline_query: InlineQuery) -> InlineQueryResultArticle:
"""
:param inline_query:
:return:
"""
text = inline_query.query
res = await bank_api.build_list_coin()
crypto = await crypto_price.coin_list()
result_id: str = hashlib.sha256(text.encode()).hexdigest()
result_list: List[InlineQueryResultArticle] = []
if text in res.keys():
input_content = InputTextMessageContent(lazy_get_text(
"""название {name}
стоимость {name} {valvue}₽
дата {date}
""").format(name=text, valvue=res[text]["valvue"],
date=bank_api.date))
item = InlineQueryResultArticle(
id=result_id,
title=lazy_get_text('{name} {valvue}').format(name=text, valvue=res[text]["valvue"]),
input_message_content=input_content
)
elif text in crypto:
id_coin = crypto[text]["id"]
price = (await crypto_price.simple_price(ids=id_coin, vs_currestring="rub"))[id_coin]["rub"]
input_content = InputTextMessageContent(
lazy_get_text("""название {name}\nстоимость {name} {valvue}₽\nдата {date} """
).format(name=text, valvue=price, date=bank_api.date)
)
item = InlineQueryResultArticle(
id=result_id,
title=lazy_get_text('{name} {price}').format(name=text, price=price),
input_message_content=input_content
)
elif text == "rub":
id_coin = crypto["btc"]["id"]
price = (await crypto_price.simple_price(ids=id_coin, vs_currestring="rub"))[id_coin]["rub"]
input_content = InputTextMessageContent(
lazy_get_text("""название {name}\nстоимость 1 {name} \n{btc} btc\n$ {usd} \nдата {date} """
).format(name=text, btc=(1 / price), usd=(1 / res["USD"]["valvue"]), date=bank_api.date)
)
item = InlineQueryResultArticle(
id=result_id,
title=lazy_get_text('{name} {price} btc ').format(name=text, price=(1 / price)),
input_message_content=input_content
)
else:
input_content = InputTextMessageContent(
lazy_get_text("нет такой валюты\nдоступные {name}").format(name=list(res.keys())))
result_id: str = hashlib.md5(text.encode()).hexdigest()
item = InlineQueryResultArticle(
id=result_id,
title=lazy_get_text("нет такой валюты"),
input_message_content=input_content
)
result_list.append(item)
return await bot.answer_inline_query(inline_query.id, results=result_list, cache_time=1)
| [
2,
770,
11361,
2393,
3544,
262,
1708,
21004,
25,
3384,
69,
12,
23,
198,
11748,
12234,
8019,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
257,
72,
21857,
13,
19199,
1330,
554,
1370,
20746,
11,
3467,
198,
220,
220,
220,
23412,
8206,
... | 2.036177 | 1,465 |
# coding: utf-8
"""
Spinnaker API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import spinnaker-python-client
from spinnaker-python-client.api.cluster_controller_api import ClusterControllerApi # noqa: E501
from spinnaker-python-client.rest import ApiException
class TestClusterControllerApi(unittest.TestCase):
"""ClusterControllerApi unit test stubs"""
def test_get_cluster_load_balancers_using_get(self):
"""Test case for get_cluster_load_balancers_using_get
Retrieve a cluster's loadbalancers # noqa: E501
"""
pass
def test_get_clusters_using_get(self):
"""Test case for get_clusters_using_get
Retrieve a cluster's details # noqa: E501
"""
pass
def test_get_clusters_using_get1(self):
"""Test case for get_clusters_using_get1
Retrieve a list of clusters for an account # noqa: E501
"""
pass
def test_get_clusters_using_get2(self):
"""Test case for get_clusters_using_get2
Retrieve a list of cluster names for an application, grouped by account # noqa: E501
"""
pass
def test_get_scaling_activities_using_get(self):
"""Test case for get_scaling_activities_using_get
Retrieve a list of scaling activities for a server group # noqa: E501
"""
pass
def test_get_server_groups_using_get(self):
"""Test case for get_server_groups_using_get
Retrieve a server group's details # noqa: E501
"""
pass
def test_get_server_groups_using_get1(self):
"""Test case for get_server_groups_using_get1
Retrieve a list of server groups for a cluster # noqa: E501
"""
pass
def test_get_target_server_group_using_get(self):
"""Test case for get_target_server_group_using_get
Retrieve a server group that matches a target coordinate (e.g., newest, ancestor) relative to a cluster # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
1338,
3732,
3110,
7824,
628,
220,
220,
220,
1400,
6764,
2810,
357,
27568,
416,
2451,
7928,
6127,
5235,
3740,
1378,
12567,
13,
785,
14,
2032,
7928,
12,
15042,
14,
... | 2.507088 | 917 |
from abc import ABCMeta, abstractproperty, abstractmethod
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.model_selection import train_test_split
from ..dataset.utils import _get_nunique
class BaseParadigm(metaclass=ABCMeta):
"""
Base Paradigm.
"""
@abstractproperty
def scoring(self):
'''
Property that defines scoring metric (e.g. ROC-AUC or accuracy
or f-score), given as a sklearn-compatible string or a compatible
sklearn scorer.
'''
pass
@abstractproperty
def datasets(self):
'''Property that define the list of compatible datasets
'''
pass
@abstractmethod
def is_valid(self, dataset):
"""Verify the dataset is compatible with the paradigm.
This method is called to verify dataset is compatible with the
paradigm.
This method should raise an error if the dataset is not compatible
with the paradigm. This is for example the case if the
dataset is an Movielens dataset for DIN paradigm, or if the
dataset does not contain any of the required feature.
Parameters
----------
dataset : dataset instance
The dataset to verify.
"""
pass
@abstractmethod
def make_feature_cols(self, dataset, embedding_params):
'''Return deepctr.feature_column.
Parameters
---------
dataset : dataset instance.
a dataset instance.
embedding_params : dict
dict containing embedding params for create feature colmns
i.e. {embedding_dim: 8}
Returns
------
dnn_features : list
list of feature_column instance for dnn inputs.
linear_features : list
list of feature_column instance for linear inputs.
'''
pass
def _prepare_process(self, dataset):
"""Prepare processing of raw files
This function allows to set parameter of the paradigm class prior to
the preprocessing (process_raw). Does nothing by default and could be
overloaded if needed.
Parameters
----------
dataset : dataset instance
The dataset corresponding to the raw file. mainly use to access
dataset specific information.
"""
pass
def _data_munging(self, raw, dataset):
"""
Fill in missing values.
Parameters
----------
raw: DataFrame instance
the raw data.
dataset : dataset instance
The dataset corresponding to the raw file. mainly use to access
dataset specific information.
Returns
-------
metadata: pd.DataFrame
A dataframe containing the metadata.
"""
# fill nan
raw = self._default_filling_rule(raw, dataset)
return raw
def _feature_transform(self, raw, dataset):
"""
Label encoding for sparse features, and do simple transformation for
dense features
"""
for feat in dataset.sparse_features:
lbe = LabelEncoder()
raw[feat] = lbe.fit_transform(raw[feat])
dataset.nunique = _get_nunique(dataset, raw)
mms = MinMaxScaler(feature_range=(0, 1))
raw[dataset.dense_features] = mms.fit_transform(
raw[dataset.dense_features])
return raw
def _process_raw(self, raw, dataset):
"""
This function apply the preprocessing and return a dataframe.
Data is a dataframe with as many row as the length of the data
and labels.
"""
raw = self._data_munging(raw, dataset)
raw = self._feature_transform(raw, dataset)
return raw
def get_data(self, dataset):
"""
Return data of the dataset.
Parameters
----------
dataset:
dataset instance.
Returns
-------
train : pd.DataFrame
DataFrame containing train data.
test : pd.DataFrame
DataFrame containing test data.
"""
if not self.is_valid(dataset):
message = "Dataset {} is not valid for paradigm".format(
dataset.code)
raise AssertionError(message)
# TODO generater case
raw = dataset.get_data()
self._prepare_process(dataset)
raw = self._process_raw(raw, dataset)
train_data, test_data = train_test_split(raw,
test_size=dataset.test_size,
random_state=dataset.random)
return train_data, test_data
| [
6738,
450,
66,
1330,
9738,
48526,
11,
12531,
26745,
11,
12531,
24396,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
36052,
27195,
12342,
11,
1855,
11518,
33... | 2.334463 | 2,066 |
from flask_assistant import logger
from flask import json, Response, make_response
from xml.etree import ElementTree
class _Response(object):
"""docstring for _Response"""
class event(_Response):
"""Triggers an event to invoke it's respective intent.
When an event is triggered, speech, displayText and services' data will be ignored.
"""
| [
6738,
42903,
62,
562,
10167,
1330,
49706,
198,
6738,
42903,
1330,
33918,
11,
18261,
11,
787,
62,
26209,
198,
6738,
35555,
13,
316,
631,
1330,
11703,
27660,
628,
198,
4871,
4808,
31077,
7,
15252,
2599,
198,
220,
220,
220,
37227,
15390,
... | 3.63 | 100 |
"""
Application: HealthNet
File: /patientRegistration/forms.py
Authors:
- Nathan Stevens
- Phillip Bedward
- Daniel Herzig
- George Herde
- Samuel Launt
Description:
- This file contains all forms for Patient Registration.
"""
from django import forms
from django.apps import apps
from django.contrib.auth.models import User
from django.forms.widgets import NumberInput
from django.forms.extras.widgets import SelectDateWidget
from django.core.exceptions import ValidationError
from base.models import Address, Person, Insurance, Doctor, Nurse, Admin, Hospital, EmergencyContact
from datetime import date
"""
Forms for registering users
"""
class UserForm(forms.ModelForm):
"""
@class: UserForm
@description: When a Patient is registering, they al register as a User.
"""
first_name = forms.CharField(required=True, label='First Name:',
widget=forms.TextInput(attrs={'class': 'form-control'}), max_length=50)
last_name = forms.CharField(required=True, label='Last Name:',
widget=forms.TextInput(attrs={'class': 'form-control'}), max_length=50)
email = forms.EmailField(required=True, label='Email:',
widget=forms.TextInput(attrs={'class': 'form-control'}), max_length=100)
username = forms.CharField(required=True, label='Username:',
help_text='Required. Between 5 and 30 characters. Letters, digits and @/./+/-/_ only.',
widget=forms.TextInput(attrs={'class': 'form-control'}), max_length=30, min_length=5)
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control'}), label='Password:')
confirmP = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control'}), label='Confirm Password:')
class PersonRegistrationForm(forms.ModelForm):
"""
@class: PersonRegistrationForm
@description: A Patient's information is linked to the Person model. When a Patient registers,
they provide information for the Person model.
"""
birthday = forms.DateField(widget=SelectDateWidget(years={1950, 1951, 1952, 1953, 1954, 1955, 1956,
1957, 1958, 1959, 1960, 1961, 1962, 1963,
1964, 1965, 1966, 1967, 1968, 1969, 1970,
1971, 1972, 1973, 1974, 1975, 1976, 1977,
1978, 1979, 1980, 1981, 1982, 1983, 1984,
1985, 1986, 1987, 1988, 1989, 1990, 1991,
1992, 1993, 1994, 1995, 1996, 1997, 1998,
1999, 2000, 2001, 2002, 2003, 2004, 2005,
2006, 2007, 2008, 2009, 2010, 2011, 2012,
2013, 2014, 2015}),
label='Birthday:', required=True)
ssn = forms.IntegerField(widget=forms.TextInput(attrs={'class': 'form-control'}), label='SSN:', required=True,
max_value=1000000000, min_value=100000000)
phoneNumber = forms.IntegerField(widget=forms.TextInput(attrs={'class': 'form-control'}), label='Phone Number:',
required=True, min_value=100000000, max_value=9999999999)
class InsuranceForm(forms.ModelForm):
"""
class: InsuranceForm
@description: When a Patient Registers they must provide Insurance Information.
"""
name = forms.CharField(label='Name:', required=True,
widget=forms.TextInput(attrs={'class': 'form-control'}), max_length=100)
policyNumber = forms.IntegerField(label='Policy Number:', required=True, max_value=999999999, min_value=1,
widget=forms.TextInput(attrs={'class': 'form-control'}))
class AddressForm(forms.ModelForm):
"""
@class: AddressForm
@description: the Address of the Patient
"""
state = forms.CharField(required=True, label='State:',
widget=forms.TextInput(attrs={'class': 'form-control'}))
street = forms.CharField(required=True, label='Street:',
widget=forms.TextInput(attrs={'class': 'form-control'}), max_length=100)
city = forms.CharField(required=True, label='City:',
widget=forms.TextInput(attrs={'class': 'form-control'}), max_length=100)
zip = forms.CharField(required=True, label='Zip:',
widget=forms.TextInput(attrs={'class': 'form-control'}))
class EmergencyContactForm(forms.ModelForm):
"""
@class: EmergencyContact
@description: The EmergencyContact for the Patient
"""
firstName = forms.CharField(required=True, label='First Name:',
widget=forms.TextInput(attrs={'class': 'form-control'}), max_length=50)
lastName = forms.CharField(required=True, label='Last Name:',
widget=forms.TextInput(attrs={'class': 'form-control'}), max_length=50)
emergencyNumber = forms.IntegerField(widget=forms.TextInput(attrs={'class': 'form-control'}),
label='Emergency Phone Number:', required=True,
min_value=1000000000, max_value=10000000000)
| [
37811,
198,
220,
220,
220,
15678,
25,
3893,
7934,
198,
220,
220,
220,
9220,
25,
1220,
26029,
47133,
14,
23914,
13,
9078,
198,
220,
220,
220,
46665,
25,
198,
220,
220,
220,
220,
220,
220,
220,
532,
18106,
20019,
198,
220,
220,
220,
... | 2.160626 | 2,621 |
import math
| [
11748,
10688,
628,
628,
628,
198
] | 3 | 6 |
#!/usr/bin/env python3
import json
import argparse
from typing import List, NamedTuple
if __name__ == '__main__':
args = parse_args()
with open(args.input, 'r') as fi, open(args.output, 'w') as fo:
for line in fi:
fo.write(extract_tags(line.strip()))
fo.write('\n')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
33918,
198,
11748,
1822,
29572,
198,
6738,
19720,
1330,
7343,
11,
34441,
51,
29291,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
2... | 2.258993 | 139 |
import torch
import torch.nn as nn
import torch.nn.functional as F
# for test
if __name__ == "__main__":
net = Siamese()
print(net)
print(list(net.parameters()))
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
628,
198,
198,
2,
329,
1332,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
2010,
796,
1... | 2.565217 | 69 |
# -*- coding: utf-8 -*-
import os
from pysilcam.config import load_config, PySilcamSettings, load_camera_config
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
6738,
279,
893,
346,
20991,
13,
11250,
1330,
3440,
62,
11250,
11,
9485,
15086,
20991,
26232,
11,
3440,
62,
25695,
62,
11250,
628,
628
] | 2.804878 | 41 |
import sys
in_str = sys.stdin.read().replace('/', '//')
res = eval(in_str)
print(res)
| [
11748,
25064,
198,
198,
259,
62,
2536,
796,
25064,
13,
19282,
259,
13,
961,
22446,
33491,
10786,
14,
3256,
705,
1003,
11537,
198,
411,
796,
5418,
7,
259,
62,
2536,
8,
198,
4798,
7,
411,
8,
198
] | 2.351351 | 37 |
from zeep import Client, exceptions as zeep_exceptions
from payit import (
Gateway,
Transaction,
Redirection,
GatewayNetworkError,
TransactionError,
TransactionAlreadyPaidError,
)
class ParsianGateway(Gateway):
"""
Parsian Bank Gateway (PECCO)
Home: https://pec.ir
Documentation: https://pgw.pec.ir/IPG/NewIPGDocument.pdf
"""
__gateway_name__ = "parsian"
__gateway_unit__ = "IRR"
__config_params__ = ["pin", "callback_url", "proxies"]
_server_url_request = (
"https://pec.shaparak.ir" "/NewIPGServices/Sale/SaleService.asmx?WSDL"
)
_server_url_verify = (
"https://pec.shaparak.ir"
"/NewIPGServices/Confirm/ConfirmService.asmx?WSDL"
)
_response_message_map = {
"-32768": "UnknownError",
"-1552": "PaymentRequestIsNotEligibleToReversal",
"-1551": "PaymentRequestIsAlreadyReversed",
"-1550": "PaymentRequestStatusIsNotReversible",
"-1549": "MaxAllowedTimeToReversalHasExceeded",
"-1548": "BillPaymentRequestServiceFailed",
"-1540": "InvalidConfirmRequestService",
"-1536": "TopupChargeServiceTopupChargeRequestFailed",
"-1533": "PaymentIsAlreadyConfirmed",
"-1532": "MerchantHasConfirmedPaymentRequest",
"-1531": "CannotConfirmNonSuccessfulPayment",
"-1530": "MerchantConfirmPaymentRequestAccessViolated",
"-1528": "ConfirmPaymentRequestInfoNotFound",
"-1527": "CallSalePaymentRequestServiceFailed",
"-1507": "ReversalCompleted",
"-1505": "PaymentConfirmRequested",
"-138": "CanceledByUser",
"-132": "InvalidMinimumPaymentAmount",
"-131": "InvalidToken",
"-130": "TokenIsExpired",
"-128": "InvalidIpAddressFormat",
"-127": "InvalidMerchantIp",
"-126": "InvalidMerchantPin",
"-121": "InvalidStringIsNumeric",
"-120": "InvalidLength",
"-119": "InvalidOrganizationId",
"-118": "ValueIsNotNumeric",
"-117": "LengthIsLessOfMinimum",
"-116": "LengthIsMoreOfMaximum",
"-115": "InvalidPayId",
"-114": "InvalidBillId",
"-113": "ValueIsNull",
"-112": "OrderIdDuplicated",
"-111": "InvalidMerchantMaxTransAmount",
"-108": "ReverseIsNotEnabled",
"-107": "AdviceIsNotEnabled",
"-106": "ChargeIsNotEnabled",
"-105": "TopupIsNotEnabled",
"-104": "BillIsNotEnabled",
"-103": "SaleIsNotEnabled",
"-102": "ReverseSuccessful",
"-101": "MerchantAuthenticationFailed",
"-100": "MerchantIsNotActive",
"-1": "Server Error",
"0": "Successful",
"1": "Refer To Card Issuer Decline",
"2": "Refer To Card Issuer Special Conditions",
"3": "Invalid Merchant",
"5": "Do Not Honour",
"6": "Error",
"8": "Honour With Identification",
"9": "Request In-progress",
"10": "Approved For Partial Amount",
"12": "Invalid Transaction",
"13": "Invalid Amount",
"14": "Invalid Card Number",
"15": "No Such Issuer",
"17": "Customer Cancellation",
"20": "Invalid Response",
"21": "No Action Taken",
"22": "Suspected Malfunction",
"30": "Format Error",
"31": "Bank Not Supported By Switch",
"32": "Completed Partially",
"33": "Expired Card Pick Up",
"38": "Allowable PIN Tries Exceeded Pick Up",
"39": "No Credit Account",
"40": "Requested Function is not supported",
"41": "Lost Card",
"43": "Stolen Card",
"45": "Bill Can not Be Payed",
"51": "No Sufficient Funds",
"54": "Expired Account",
"55": "Incorrect PIN",
"56": "No Card Record",
"57": "Transaction Not Permitted To CardHolder",
"58": "Transaction Not Permitted To Terminal",
"59": "Suspected Fraud-Decline",
"61": "Exceeds Withdrawal Amount Limit",
"62": "Restricted Card-Decline",
"63": "Security Violation",
"65": "Exceeds Withdrawal Frequency Limit",
"68": "Response Received Too Late",
"69": "Allowable Number Of PIN Tries Exceeded",
"75": "PIN Reties Exceeds-Slm",
"78": "Deactivated Card-Slm",
"79": "Invalid Amount-Slm",
"80": "Transaction Denied-Slm",
"81": "Cancelled Card-Slm",
"83": "Host Refuse-Slm",
"84": "Issuer Down-Slm",
"91": "Issuer Or Switch Is Inoperative",
"92": "Not Found for Routing",
"93": "Cannot Be Completed",
}
| [
6738,
41271,
538,
1330,
20985,
11,
13269,
355,
41271,
538,
62,
1069,
11755,
198,
198,
6738,
1414,
270,
1330,
357,
198,
220,
220,
220,
29916,
11,
198,
220,
220,
220,
45389,
11,
198,
220,
220,
220,
2297,
4154,
11,
198,
220,
220,
220,
... | 2.250122 | 2,051 |
# -*- coding: utf-8 -*-
from .rte_dataset import RTEAutoInferenceDataset, RTEAutoInferenceReverseDataset, RTEAutoInferenceSignalDataset, \
RTET5InferenceDataset, RTET5InferenceReverseDataset, RTET5InferenceSignalDataset
from .mnli_dataset import MNLIAutoInferenceDataset, MNLIAutoInferenceReverseDataset, MNLIAutoInferenceSignalDataset, \
MNLIT5InferenceDataset, MNLIT5InferenceReverseDataset, MNLIT5InferenceSignalDataset
from .qnli_dataset import QNLIAutoInferenceDataset, QNLIAutoInferenceReverseDataset, QNLIAutoInferenceSignalDataset, \
QNLIT5InferenceDataset, QNLIT5InferenceReverseDataset, QNLIT5InferenceSignalDataset
from .qqp_dataset import QQPAutoInferenceDataset, QQPAutoInferenceReverseDataset, QQPAutoInferenceSignalDataset, \
QQPT5InferenceDataset, QQPT5InferenceReverseDataset, QQPT5InferenceSignalDataset
from .mrpc_dataset import MRPCAutoInferenceDataset, MRPCAutoInferenceReverseDataset, MRPCAutoInferenceSignalDataset, \
MRPCT5InferenceDataset, MRPCT5InferenceReverseDataset, MRPCT5InferenceSignalDataset
from .klue_nli_dataset import KlueNLIAutoInferenceDataset, KlueNLIAutoInferenceReverseDataset, \
KlueNLIAutoInferenceSignalDataset
from .klue_sts_dataset import KlueSTSAutoInferenceDataset, KlueSTSAutoInferenceReverseDataset, \
KlueSTSAutoInferenceSignalDataset
from .kornli_dataset import KorNLIAutoInferenceDataset, KorNLIAutoInferenceReverseDataset, \
KorNLIAutoInferenceSignalDataset
# with-paraphrase dataset
from .qnli_dataset import QNLIAutoParaInferenceDataset, QNLIAutoParaInferenceReverseDataset, \
QNLIAutoParaInferenceSignalDataset | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
764,
81,
660,
62,
19608,
292,
316,
1330,
371,
9328,
27722,
818,
4288,
27354,
292,
316,
11,
371,
9328,
27722,
818,
4288,
49,
964,
325,
27354,
292,
316,
11,
371,
... | 2.473035 | 649 |
# Copyright (c) 2019 Altinity LTD
#
# This product is licensed to you under the
# Apache License, Version 2.0 (the "License").
# You may not use this product except in compliance with the License.
#
# This product may include a number of subcomponents with
# separate copyright notices and license terms. Your use of the source
# code for the these subcomponents is subject to the terms and
# conditions of the subcomponent's license, as noted in the LICENSE file.
"""
Altinity Datasets Utility
"""
import sys
from setuptools import setup, find_packages
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
with open('README.md', 'r') as readme_file:
long_description = readme_file.read()
setup(
name="altinity_datasets",
version="0.1.2",
description="Altinity Datasets for ClickHouse",
long_description=long_description,
long_description_content_type='text/markdown',
license="Apache 2.0",
author="R Hodges",
author_email="info@altinity.com",
url='https://github.com/Altinity/altinity-datasets',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
],
install_requires=[
'click>=6.7',
'clickhouse-driver>=0.0.18',
'PyYAML>=3.13'
],
packages=find_packages(),
include_package_data=True,
entry_points = {
'console_scripts': ['ad-cli=altinity_datasets.ad_cli:ad_cli']
}
)
| [
2,
15069,
357,
66,
8,
13130,
12344,
6269,
42513,
198,
2,
198,
2,
770,
1720,
318,
11971,
284,
345,
739,
262,
198,
2,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
11074,
198,
2,
921,
743,
407,
779,
428,
1720,
2845,
... | 2.829966 | 594 |
import numpy as np
from numpy.matlib import repmat
from numpy import zeros, eye, ones, matrix
from numpy import cos, sin, arccos, sqrt, pi, arctan2
from panda3d.core import *
from direct.gui.DirectGui import *
from utils.ArgsPack import ArgsPack
class KinematicModel:
"""This is the base class for all robot dynamic models.
We assume the models are all in the form:
:math:`X' = A * X + B * u`
:math:`\dot X = fx + fu * u`
Because
:math:`X' = X + \dot X * dT`
Then
:math:`fx = (A - I) / dT`
:math:`fu = B / dT`
We just need to specify A and B to define different dynamic models.
There are two major phases in the control circle, update and move. In the update phase, the robot will update its information based on the environment. And in the move phase, the robot will execute control input.
"""
def __init__(self, init_state, agent, dT, auto, is_2D=False):
"""This function initilize the robot.
Args:
init_state (list): the init state of the robot, for example [x, y, vx, vy]
agent (MobileAgent()): the algorithm that controls this robot.
dT (float): the seperation between two control output
auto (bool): whether this robot is autonomous, if not, it is control by user input like mouse.
is_2D (bool): whether this model is a 2D model, which means it can only move on the groud plane.
"""
self.control_noise = 0.02 # noise scale
self.safe_dis = 1
self.map_size = 10 # map boundary size
self.fraction = 0.2 # velocity decrease rate per dT
self.disk_radius = 0.4 # radius of disk
self.measure_noise = 0.02 # noise scale
self.auto=False # whether is controled by human
self.RLS_cache = dict() # RLS cache
self.init_state = np.array(init_state)
self.set_saturation()
self.dT = dT
self.agent = agent
self.auto = auto
self.is_2D = is_2D
goals = np.stack([np.random.rand(100)* self.map_size/2 - self.map_size / 4,
np.random.rand(100)* self.map_size/2 - self.map_size / 4,
np.random.rand(100)* self.map_size/4 + self.map_size / 4,
zeros(100),
zeros(100),
zeros(100)], axis=0 )
self.reset(dT, goals)
self.get_closest_X(np.vstack([10,10,10,0,0,0]))
def reset(self, dT, goals):
"""This function reset the robot state to initial, and set the goals to given goals. This function is useful when the user need to make sure all the robot are tested under the same goal sequence,
Args:
dT (float): the seperation between two control output
goals (ndarray): n*6 array of goal specification. [x y z 0 0 0]
"""
self.dT = dT
self.set_goals(goals)
self.init_x(self.init_state)
self.x_his = repmat(self.x, 1, 50)
self.n = np.shape(self.x)[0]
self.H = matrix(eye(self.n))
self.kalman_P = matrix(eye(self.n)) * (self.measure_noise**2)
self.x_est = self.observe(self.x)
self.m = matrix(zeros((6,1)))
self.m_his = repmat(self.m, 1, 50)
self.x_pred = zeros((self.n,1))
self.trace = repmat(self.get_P(), 1, 100)
self.goal_achieved = 0
self.time = 0
self.last_collision_time = 0
self.score = dict()
self.score['collision_cnt'] = 0
self.score['safety'] = 0
self.score['nearest_dis'] = 1e9
self.score['efficiency'] = 0
self.predictability = 0
self.get_closest_X(np.vstack([10,10,10,0,0,0]))
def get_PV(self):
"""This function return the cartesian position and velocity of the robot,
Returns:
PV (ndarray): 6*1 array. [x y z vx vy vz]
"""
return np.vstack([self.get_P(), self.get_V()])
def fx(self):
"""
This function calculate fx from A,
Because
X' = X + dot_X * dT
Then
fx = (A - I) / dT
"""
return (self.A() - np.eye(np.shape(self.x)[0])) / self.dT * self.x
def fu(self):
"""
This function calculate fu from B,
Because
X' = X + dot_X * dT
Then
fu = B / dT
"""
return self.B() / self.dT
def filt_u(self, u):
"""return the saturated control input based the given reference control input
Args:
u (ndarray): reference control input
Returns:
u (ndarray): saturated control input
"""
u = np.minimum(u, self.max_u)
u = np.maximum(u, self.min_u)
return u
def filt_x(self, x):
"""return the saturated robot state based the given reference state
Args:
x (ndarray): reference state
Returns:
x (ndarray): saturated state
"""
x = np.minimum(x, self.max_x)
x = np.maximum(x, self.min_x)
return x
def update_score(self, obstacle):
"""Update the scores of the robot based on the relative postion and relative velocity to the obstacle. The scores are used to draw roc curves and generate statistical restuls.
Args:
obstacle (KinematicModel()): the obstacle
"""
dm = obstacle.m - self.m
dp = (obstacle.m - self.m)[[0,1,2],0]
dv = (obstacle.m - self.m)[[3,4,5],0]
dis = np.linalg.norm(dp)
v_op = np.asscalar(dv.T * dp / dis)
if dis < self.safe_dis:
if self.time - self.last_collision_time > 5:
self.score['collision_cnt'] = self.score['collision_cnt'] + 1
self.last_collision_time = self.time
if v_op < 0 and dis < 2*self.safe_dis:
self.score['safety'] = self.score['safety'] + min(0, np.log(dis / (2 * self.safe_dis) + 1e-20)) * abs(v_op);
# self.score['safety'] = self.score['safety'] + min(2 * self.safe_dis, dis);
self.score['nearest_dis'] = min(self.score['nearest_dis'], dis)
self.score['efficiency'] = self.goal_achieved
def update(self, obstacle):
"""Update phase. 1. update score, 2. update goal, 3. update self state estimation, 4. update the nearest point on self to obstacle, 5. calculate control input, 6. update historical trajectory.
Args:
obstacle (KinematicModel()): the obstacle
"""
self.time = self.time + 1
self.update_score(obstacle)
self.update_goal()
self.kalman_estimate_state()
self.update_m(obstacle.m)
self.calc_control(obstacle)
self.update_trace()
def update_trace(self):
"""
update trace of end effector
"""
self.trace = np.concatenate([self.trace[:,1:], self.get_P()],axis=1)
def update_m(self, Mh):
"""Update the nearest cartesian point on self to obstacle.
Args:
Mh (ndarray): 6*1 array, cartesian postion and velocity of the obstacle.
"""
self.m = self.get_closest_X(Mh)
def kalman_estimate_state(self):
"""
Use kalman filter to update the self state estimation.
"""
dT = self.dT
A = self.A()
B = self.B()
Q = B * B.T * (self.control_noise)**2 # adopt max_a / 2 as sigma. because 95# percent of value lie in mu-2*sigma to mu+2*sigma
R = matrix(eye(self.n)) * (self.measure_noise**2)
I = matrix(eye(self.n))
P = self.kalman_P
H = self.H
x_pred = A * self.x_est + B * self.u
P = A * P * A.T + Q
z = self.observe(self.x)
y = z - self.H * x_pred
S = R + H * P * H.T
K = P * H.T * np.linalg.inv(S)
x_est = x_pred + K * y
P = (I - K*H) * P * (I - K*H).T + K * R * K.T
self.kalman_P = P
self.x_est = self.filt_x(x_est) # \hat x(k|k)
self.x_pred = self.filt_x(A * self.x_est + B * self.u) # \hat x(k+1|k)
return x_est
def calc_control(self, obstacle):
"""
Generate control input by the agent.
Args:
obstacle (KinematicModel()): the obstacle
"""
dT = self.dT
goal = self.goal
fx = self.fx()
fu = self.fu()
Xr = self.x_est
Xh = obstacle.x_est
Mr = self.m
Mh = obstacle.m
dot_Xr = self.dot_X()
dot_Xh = obstacle.dot_X()
p_Mr_p_Xr = self.p_M_p_X()
p_Mh_p_Xh = obstacle.p_M_p_X()
u0 = self.u_ref()
min_u = self.min_u
max_u = self.max_u
self.u = self.agent.calc_control_input(dT, goal, fx, fu, Xr, Xh, dot_Xr, dot_Xh, Mr, Mh, p_Mr_p_Xr, p_Mh_p_Xh, u0, min_u, max_u)
self.u = self.filt_u(self.u)
def dot_X(self):
"""
First order estimation of dot_X using current state and last state.
"""
return (self.x - self.x_his[:,-2]) / self.dT
# return (self.x_pred - self.x_est) / self.dT
def move(self):
"""
Move phase. An random disturbance is added to the control input.
"""
self.x = self.A() * self.x + self.B() * (self.u + np.random.randn(np.shape(self.u)[0],1) * self.control_noise)
self.x = self.filt_x(self.x)
self.x_his = np.concatenate([self.x_his[:,1:], self.x],axis=1)
self.m_his = np.concatenate([self.m_his[:,1:], self.m], axis=1)
# The following functions are required to fill up for new models.
def init_x(self, init_state):
"""
init state x
"""
pass
def set_saturation(self):
"""
Set min and max cut off for state x and control u.
"""
pass
def get_P(self):
"""
Return position in the Cartisian space.
"""
pass
def get_V(self):
"""
Return velocity in the Cartisian space.
"""
pass
def set_P(self, p):
"""
Set position in the Cartisian space.
Args:
p (ndarray): position
"""
pass
def set_V(self, v):
"""
Set velocity in the Cartisian space
Args:
v (ndarray): velocity
"""
pass
def A(self):
"""
Transition matrix A as explained in the class definition.
"""
pass
def B(self):
"""
Transition matrix B as explained in the class definition.
"""
pass
def get_closest_X(self, Mh):
"""
Update the corresponding state of the nearest cartesian point on self to obstacle.
Args:
Mh (ndarray): 6*1 array, cartesian postion and velocity of the obstacle.
"""
pass
def p_M_p_X(self): # p closest point p X
""" dM / dX, the derivative of the nearest cartesian point to robot state.
"""
pass
def estimate_state(self):
"""
State estimater caller.
"""
pass
def u_ref(self):
"""
Reference control input.
"""
pass
############## Graphics ##############
def add_sphere(self, pos, color, scale=0.5, render_node=None):
"""
Add a sphere model into the scene.
Args:
pos: position to place the sphere
color: color of the sphere
scale: scale to zoom the sphere
"""
if render_node is None:
render_node = self.render
ret = loader.loadModel("resource/planet_sphere")
ret.reparentTo(render_node)
ret.setTransparency(TransparencyAttrib.MAlpha)
ret.setColor(color[0], color[1], color[2], color[3])
ret.setScale(scale)
ret.setPos(pos[0], pos[1], pos[2])
return ret;
def draw_trace(self):
"""
Show the trace of the end effector.
"""
if hasattr(self, 'trace_line_handle'):
self.trace_line_handle.removeNode()
segs = LineSegs( )
segs.setThickness( 5.0 )
segs.setColor(self.color[0], self.color[1], self.color[2], 0)
p_from = LVector3f(self.trace[0,0], self.trace[1,0], self.trace[2,0])
segs.moveTo( p_from )
for i in range(np.shape(self.trace)[1]):
p_to = LVector3f(self.trace[0,i], self.trace[1,i], self.trace[2,i])
segs.setColor(self.color[0], self.color[1], self.color[2], 0)
segs.drawTo( p_to )
trace_line = segs.create( )
self.trace_line_handle = self.render.attachNewNode(trace_line);
# def draw_arrow(self, p_from, p_to, color):
# segs = LineSegs( )
# segs.setThickness( 20.0 )
# segs.setColor( color )
# segs.moveTo( p_from )
# segs.drawTo( p_to )
# arrow = segs.create( )
# self.render.attachNewNode(arrow)
# return segs
def draw_movement(self, X, u):
"""
For debug use.
Show the velocity vector and control vector.
"""
p_from = LVector3f(X[0], X[1], X[2]);
v_to = p_from + LVector3f(X[3], X[4], X[5]);
u_to = p_from + LVector3f(u[0], u[1], u[2]);
u_color = Vec4(0.2, 0.8, 0.2, 0.5);
v_color = Vec4(0.8, 0.2, 0.8, 0.5);
return [self.draw_arrow(p_from, v_to, v_color), self.draw_arrow(p_from, u_to, u_color)];
def move_seg(self, vdata, p_from, vec):
"""
Move a segment line to a new position.
Args:
vdata: segment line handle
p_from: new start point
vec: the segment line vector
"""
p_from = LVector3f(p_from[0], p_from[1], p_from[2])
p_to = p_from + LVector3f(vec[0], vec[1], vec[2])
vdata.setVertex(0, p_from)
vdata.setVertex(1, p_to)
# The following functions are required to fill up for new models.
def load_model(self, render, loader, color=[0.1, 0.5, 0.8, 1], scale=0.5):
"""
Load the 3d model to be shown in the GUI
Args:
render : panda3d component
loader : panda3d component
color (list): RGB and alpha
scale (float): scale to zoom the loaded 3d model.
"""
self.color = color
self.render = render
def redraw_model(self):
"""
Refresh the position of the robot model and goal model in the GUI.
"""
pass
def model_auxiliary(self):
"""
This function is for debug use.
"""
pass | [
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
13,
6759,
8019,
1330,
1128,
6759,
198,
6738,
299,
32152,
1330,
1976,
27498,
11,
4151,
11,
3392,
11,
17593,
198,
6738,
299,
32152,
1330,
8615,
11,
7813,
11,
610,
535,
418,
11,
19862... | 2.037696 | 7,189 |
""" A collection of utility methods
:Authors: Sana dev team
:Version: 1.1
"""
import os, sys, traceback
import time
import logging
import cjson
from django.conf import settings
LOGGING_ENABLED = 'LOGGING_ENABLE'
LOGGING_START = 'LOGGING_START_TIME'
def trace(f):
"""Decorator to add traces to a method.
"""
new_f.func_name = f.func_name
return new_f
def log_traceback(logging):
"""Prints the traceback for the most recently caught exception to the log
and returns a nicely formatted message.
"""
et, val, tb = sys.exc_info()
trace = traceback.format_tb(tb)
stack = traceback.extract_tb(tb)
for item in stack:
logging.error(traceback.format_tb(item))
mod = stack[0]
return "Exception : %s %s %s" % (et, val, trace[0])
def flush(flushable):
""" Removes data stored for a model instance cached in this servers data
stores
flushable => a instance of a class which provides a flush method
"""
flush_setting = 'FLUSH_'+flushable.__class__.__name__.upper()
if getattr(settings, flush_setting):
flushable.flush()
def mark(module, line,*args):
""" in code tracing util for debugging """
print('Mark %s.%s: %s' % (module, line, args))
| [
37811,
317,
4947,
286,
10361,
5050,
198,
198,
25,
30515,
669,
25,
44500,
1614,
1074,
198,
25,
14815,
25,
352,
13,
16,
198,
37811,
198,
198,
11748,
28686,
11,
25064,
11,
12854,
1891,
198,
11748,
640,
198,
11748,
18931,
198,
11748,
269,... | 2.610766 | 483 |
from app.models.user import User
from app.models.role import Role
from app.core.security import get_password_hash
| [
6738,
598,
13,
27530,
13,
7220,
1330,
11787,
198,
6738,
598,
13,
27530,
13,
18090,
1330,
20934,
198,
6738,
598,
13,
7295,
13,
12961,
1330,
651,
62,
28712,
62,
17831,
628,
628,
628,
628,
628,
628,
628,
628,
198
] | 3.333333 | 39 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this will implement noderange grammar
import confluent.exceptions as exc
import codecs
import struct
import eventlet.green.socket as socket
import eventlet.support.greendns
getaddrinfo = eventlet.support.greendns.getaddrinfo
# TODO(jjohnson2): have a method to arbitrate setting methods, to aid
# in correct matching of net.* based on parameters, mainly for pxe
# The scheme for pxe:
# For one: the candidate net.* should have pxe set to true, to help
# disambiguate from interfaces meant for bmc access
# bmc relies upon hardwaremanagement.manager, plus we don't collect
# that mac address
# the ip as reported by recvmsg to match the subnet of that net.* interface
# if switch and port available, that should match.
def get_nic_config(configmanager, node, ip=None, mac=None):
"""Fetch network configuration parameters for a nic
For a given node and interface, find and retrieve the pertinent network
configuration data. The desired configuration can be searched
either by ip or by mac.
:param configmanager: The relevant confluent.config.ConfigManager
instance.
:param node: The name of the node
:param ip: An IP address on the intended subnet
:param mac: The mac address of the interface
:returns: A dict of parameters, 'ipv4_gateway', ....
"""
# ip parameter *could* be the result of recvmsg with cmsg to tell
# pxe *our* ip address, or it could be the desired ip address
#TODO(jjohnson2): ip address, prefix length, mac address,
# join a bond/bridge, vlan configs, etc.
# also other nic criteria, physical location, driver and index...
nodenetattribs = configmanager.get_node_attributes(
node, 'net*.ipv4_gateway').get(node, {})
cfgdata = {
'ipv4_gateway': None,
'prefix': None,
}
if ip is not None:
prefixlen = get_prefix_len_for_ip(ip)
cfgdata['prefix'] = prefixlen
for setting in nodenetattribs:
gw = nodenetattribs[setting].get('value', None)
if gw is None or not gw:
continue
if ip_on_same_subnet(ip, gw, prefixlen):
cfgdata['ipv4_gateway'] = gw
break
return cfgdata
def addresses_match(addr1, addr2):
"""Check two network addresses for similarity
Is it zero padded in one place, not zero padded in another? Is one place by name and another by IP??
Is one context getting a normal IPv4 address and another getting IPv4 in IPv6 notation?
This function examines the two given names, performing the required changes to compare them for equivalency
:param addr1:
:param addr2:
:return: True if the given addresses refer to the same thing
"""
for addrinfo in socket.getaddrinfo(addr1, 0, 0, socket.SOCK_STREAM):
rootaddr1 = socket.inet_pton(addrinfo[0], addrinfo[4][0])
if addrinfo[0] == socket.AF_INET6 and rootaddr1[:12] == b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff':
# normalize to standard IPv4
rootaddr1 = rootaddr1[-4:]
for otherinfo in socket.getaddrinfo(addr2, 0, 0, socket.SOCK_STREAM):
otheraddr = socket.inet_pton(otherinfo[0], otherinfo[4][0])
if otherinfo[0] == socket.AF_INET6 and otheraddr[:12] == b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff':
otheraddr = otheraddr[-4:]
if otheraddr == rootaddr1:
return True
return False
| [
2,
43907,
25,
7400,
11338,
28,
19,
6482,
10394,
28,
19,
2705,
8658,
11338,
28,
19,
198,
198,
2,
15069,
2177,
40269,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
... | 2.757432 | 1,480 |
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
from flask import Flask
app=Flask(__name__)
# app.config.from_pyfile('config.ini')
# app.config.from_envvar('FLASKCONFIG')
@app.route('/')
if __name__ == '__main__':
print(app.url_map)
app.run(host="0.0.0.0", port=5000, debug = True)
| [
2,
5145,
14,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
6738,
42903,
1330,
46947,
198,
1324,
28,
7414,
2093,
7,
834,
3672,
834,
8,
198,
2,
598,
13,
11250,
13,
6738,
6... | 2.246154 | 130 |
# -*- coding: utf-8 -*-
import csv
def convert_empty_to_none(val):
'''Converts empty or "None" strings to None Types
Arguments:
val: The field to be converted
Returns:
The passed value if the value is not an empty string or
'None', ``None`` otherwise.
'''
return val if val not in ['', 'None'] else None
def extract(file_target, first_row_headers=[]):
'''Pulls csv data out of a file target.
Arguments:
file_target: a file object
Keyword Arguments:
first_row_headers: An optional list of headers that can
be used as the keys in the returned DictReader
Returns:
A :py:class:`~csv.DictReader` object.
'''
data = []
with open(file_target, 'rU') as f:
fieldnames = first_row_headers if len(first_row_headers) > 0 else None
reader = csv.DictReader(f, fieldnames=fieldnames)
for row in reader:
data.append(row)
return data
def determine_company_contact(row):
'''Convert input data to
Arguments:
row: An input row of data from an input spreadsheet
Returns:
A dict object which can be used to create a new
:py:class:`~purchasing.data.companies.CompanyContact`
object
'''
try:
first_name, last_name = row.get('CONTACT').split()
except:
first_name, last_name = None, None
try:
tmp = row.get('ADDRESS2')
city = tmp.split(',')[0]
state, zip_code = tmp.split(',')[1].split()
if '-' in zip_code:
zip_code = zip_code.split('-')[0]
except:
city, state, zip_code = None, None, None
_first_name = convert_empty_to_none(first_name)
_last_name = convert_empty_to_none(last_name)
_addr1 = convert_empty_to_none(row.get('ADDRESS1'))
_city = convert_empty_to_none(city)
_state = convert_empty_to_none(state)
_zip_code = convert_empty_to_none(zip_code)
_phone_number = convert_empty_to_none(row.get('PHONE #'))
_fax_number = convert_empty_to_none(row.get('FAX #'))
_email = convert_empty_to_none(row.get('E-MAIL ADDRESS'))
if any(
(_first_name, _last_name, _addr1, _city, _state, _zip_code, _phone_number, _fax_number, _email)
):
return (dict(
first_name=_first_name, last_name=_last_name,
addr1=_addr1, city=_city, state=_state,
zip_code=_zip_code, phone_number=_phone_number,
fax_number=_fax_number, email=_email
))
return None
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
269,
21370,
198,
198,
4299,
10385,
62,
28920,
62,
1462,
62,
23108,
7,
2100,
2599,
198,
220,
220,
220,
705,
7061,
3103,
24040,
6565,
393,
366,
14202,
1,
130... | 2.275023 | 1,109 |
import argparse
| [
198,
11748,
1822,
29572,
628
] | 3.6 | 5 |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
from numpy import get_include as numpy_include
import os
import platform
cython_extra_compile_args = ['-O3', '-g', '-I' + numpy_include(), '-ffast-math']
is_mac = platform.system() == 'Darwin'
if is_mac:
cython_extra_compile_args += ['-stdlib=libc++']
kreg_cython = cythonize(Extension(name='PCAfold.kernel_regression',
sources=[os.path.join('PCAfold', 'kernel_regression_cython.pyx')],
extra_compile_args=cython_extra_compile_args,
language='c++'))
setup(name='PCAfold',
version='1.0.0',
license='MIT',
description='PCAfold is a Python software for generating, improving and analyzing PCA-derived low-dimensional manifolds',
author='Elizabeth Armstrong, Kamila Zdybal',
packages=['PCAfold'],
ext_modules=kreg_cython)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
6738,
1233,
26791,
13,
2302,
3004,
1330,
27995,
198,
6738,
327,
7535,
13,
15580,
1330,
3075,
400,
261,
1096,
198,
6738,
299,
32152,
1330,
651,
62,
17256,
355,
299,
32152,
62,
17256,
198,
1... | 2.29717 | 424 |
import os
import logging
import discord
from discord.ext import slash
client = slash.SlashBot(
# Pass help_command=None if the bot only uses slash commands
command_prefix='/', description='', help_command=None,
debug_guild=int(os.environ.get('DISCORD_DEBUG_GUILD', 0)) or None
)
@client.slash_cmd()
async def hello(ctx: slash.Context):
"""Hello World!"""
await ctx.respond('Hello World!', flags=slash.MessageFlags.EPHEMERAL,
rtype=slash.InteractionResponseType.ChannelMessage)
@client.slash_group()
async def say(ctx: slash.Context):
"""Send a message in the bot's name."""
print('Options:', ctx.options)
@say.check
emote_opt = slash.Option(
description='Message to send', required=True,
choices=['Hello World!', 'This is a premade message.',
slash.Choice('This will not say what this says.', 'See?')]
)
@say.slash_cmd()
async def emote(ctx: slash.Context, choice: emote_opt):
"""Send a premade message."""
await ctx.respond(choice, allowed_mentions=discord.AllowedMentions.none(),
# sends a message without showing the command invocation
rtype=slash.InteractionResponseType.ChannelMessageWithSource)
msg_opt = slash.Option(
description='Message to send', required=True)
@say.slash_cmd()
async def repeat(ctx: slash.Context, message: msg_opt):
"""Make the bot repeat your message."""
await ctx.respond(message, allowed_mentions=discord.AllowedMentions.none(),
# sends a message, showing command invocation
rtype=slash.InteractionResponseType.ChannelMessageWithSource)
@client.slash_cmd()
async def stop(ctx: slash.Context):
"""Stop the bot."""
await ctx.respond(rtype=slash.InteractionResponseType.Acknowledge)
await client.close()
@stop.check
# show extension logs
logger = logging.getLogger('discord.ext.slash')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
token = os.environ['DISCORD_TOKEN'].strip()
try:
client.run(token)
finally:
print('Goodbye.')
| [
11748,
28686,
198,
11748,
18931,
198,
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
24632,
198,
198,
16366,
796,
24632,
13,
11122,
1077,
20630,
7,
198,
220,
220,
220,
1303,
6251,
1037,
62,
21812,
28,
14202,
611,
262,
10214,
691,
3544,... | 2.643939 | 792 |
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from ...sell_marketing.api.ad_api import AdApi
from ...sell_marketing.api.ad_report_api import AdReportApi
from ...sell_marketing.api.ad_report_metadata_api import AdReportMetadataApi
from ...sell_marketing.api.ad_report_task_api import AdReportTaskApi
from ...sell_marketing.api.campaign_api import CampaignApi
from ...sell_marketing.api.item_price_markdown_api import ItemPriceMarkdownApi
from ...sell_marketing.api.item_promotion_api import ItemPromotionApi
from ...sell_marketing.api.promotion_api import PromotionApi
from ...sell_marketing.api.promotion_report_api import PromotionReportApi
from ...sell_marketing.api.promotion_summary_report_api import PromotionSummaryReportApi
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
2,
781,
539,
23,
25,
645,
20402,
198,
198,
2,
1330,
2471,
271,
656,
40391,
5301,
198,
6738,
2644,
7255,
62,
10728,
278,
13,
15042,
13,
324,
62,
15042,
1330,
1215,
32,
14415,
... | 3.243697 | 238 |
# _*_ coding: utf-8 _*_
"""cmdb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from app01 import views
import xadmin
urlpatterns = [
url(r'^$', views.asset),
url(r'^login/$', views.loginview),
url(r'^logout/$', views.logoutview),
# 后台
url(r'^adminn/', xadmin.site.urls),
# 更新单个资产
url(r'^getone/$', views.getOne),
# 更新全部资产
url(r'^getall/$', views.getAll),
# 资产搜索
url(r'^search/asset/$', views.search_asset),
# 主机搜索
url(r'^search/host/$', views.search_host),
# 删除资产
url(r'^delasset/$', views.delasset),
# 删除主机
url(r'^delhost/$', views.delhost),
# 菜单“主机列表”
url(r'^host/$', views.host),
# 下载模版
url(r'^download/template/$', views.download_template),
# 下载导出的主机列表
url(r'^download/host/$', views.download_host),
# 下载导出的资产列表
url(r'^download/asset/$', views.download_asset),
# 上传主机模版
url(r'^upload/$', views.upload),
# 模版添加主机
url(r'^addhost/template/$', views.template_add),
# 手动添加主机
url(r'^addhost/manual/$', views.manual_add),
# 检测主机状态
url(r'^chkhost/$', views.check_host),
# 更改主机密码
url(r'^pwd/update/$', views.UpdatePwd),
# 导出主机列表
url(r'^export/host/$', views.export_host),
# 导出资产列表
url(r'^export/asset/$', views.export_asset),
# 批量添加资产的进度
url(r'^percentage/asset/$', views.getAll_percentage),
# 批量添加主机的进度
url(r'^percentage/host/$', views.template_add_percentage),
]
| [
2,
4808,
9,
62,
19617,
25,
3384,
69,
12,
23,
4808,
9,
62,
198,
37811,
28758,
65,
10289,
28373,
198,
198,
464,
4600,
6371,
33279,
82,
63,
1351,
11926,
32336,
284,
5009,
13,
1114,
517,
1321,
3387,
766,
25,
198,
220,
220,
220,
3740,
... | 1.791155 | 1,221 |
from typing import List, Union
from pytest import raises # type: ignore
from graphql.error import GraphQLError, format_error
from graphql.language import Node, Source
| [
6738,
19720,
1330,
7343,
11,
4479,
198,
198,
6738,
12972,
9288,
1330,
12073,
220,
1303,
2099,
25,
8856,
198,
198,
6738,
4823,
13976,
13,
18224,
1330,
29681,
48,
2538,
81,
1472,
11,
5794,
62,
18224,
198,
6738,
4823,
13976,
13,
16129,
1... | 3.717391 | 46 |
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='ufcpy',
version='2.0.1',
author='youngtrep',
author_email='youngtrep.business@gmail.com',
description='A fast and easy way to access the UFC roster',
long_description=long_description,
url='https://github.com/YoungTrep/ufcpy',
packages=setuptools.find_packages(),
install_requires=[
'beautifulsoup4',
'urllib3'
],
license='MIT',
keywords=['ufc', 'mma', 'mixed martial arts', 'fighting', 'fighters', 'ufc-api', 'mma-api'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules'
],
python_requires='>=3.6'
)
| [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
10786,
15675,
11682,
13,
9132,
3256,
705,
81,
11537,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
2617,
37623,
10141,
13,
40406,
7,
198,... | 2.545662 | 438 |
import pytest
from fondat.string import Template
pytestmark = pytest.mark.asyncio
| [
11748,
12972,
9288,
198,
198,
6738,
16245,
265,
13,
8841,
1330,
37350,
628,
198,
9078,
9288,
4102,
796,
12972,
9288,
13,
4102,
13,
292,
13361,
952,
628,
628,
628,
628
] | 3.066667 | 30 |
"""Network Architectures"""
from typing import Callable, List
import torch
from torch import nn
from torch.nn import functional as F
class SmallNetwork(nn.Module):
"""
Network used in the experiments on MNIST and Fashion MNIST.
"""
class BigNetwork(nn.Module):
"""
Network used in the experiments on CIFAR-10
Code adopted from: https://github.com/ftramer/Handcrafted-DP/blob/main/models.py
"""
| [
37811,
26245,
17340,
942,
37811,
198,
198,
6738,
19720,
1330,
4889,
540,
11,
7343,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
13,
20471,
1330,
10345,
355,
376,
628,
198,
4871,
10452,
26245,
7,
20471,
13,
26796... | 3.162963 | 135 |