id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3298309 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
######################## write mindrecord example ########################
Write mindrecord by data dictionary:
python writer.py --mindrecord_script /YourScriptPath ...
"""
import argparse
import mindspore.dataset as ds
parser = argparse.ArgumentParser(description='Mind record reader')
parser.add_argument('--path', type=str, default="/tmp/cora/mindrecord/cora_mr",
help='data file')
args = parser.parse_args()
data_set = ds.MindDataset(args.path)
num_iter = 0
for item in data_set.create_dict_iterator(output_numpy=True, num_epochs=1):
print(item)
num_iter += 1
print("Total items # is {}".format(num_iter))
| StarcoderdataPython |
1613109 | #!/usr/bin/env python
#
# @file createNewPackage.py
# @brief Create package object to pass to functions
# @author <NAME>
#
import sys
def createSed():
m0 = dict({'name': 'SedDocument', 'typecode': 'SEDML_DOCUMENT'})
m1 = dict({'name': 'Model', 'typecode': 'SEDML_MODEL', 'isSedListOf': True})
c1 = dict({'name': 'Change', 'typecode': 'SEDML_CHANGE', 'isSedListOf': True})
c2 = dict({'name': 'NewXML', 'typecode': 'SEDML_CHANGE_NEW_XML'})
c3 = dict({'name': 'AddXML', 'typecode': 'SEDML_CHANGE_ADD_XML'})
c4 = dict({'name': 'ChangeAttribute', 'typecode': 'SEDML_CHANGE_ATTRIBUTE'})
c5 = dict({'name': 'ComputeChange', 'typecode': 'SEDML_CHANGE_COMPUTECHANGES'})
v1 = dict({'name': 'Variable', 'typecode': 'SEDML_VARIABLE'})
p1 = dict({'name': 'Parameter', 'typecode': 'SEDML_PARAMETER'})
s1 = dict({'name': 'Simulation', 'typecode': 'SEDML_SIMULATION', 'isSedListOf' : True})
s2 = dict({'name': 'UniformTimeCourse', 'typecode': 'SEDML_SIMULATION_UNIFORM_TIMECOURSE'})
s3 = dict({'name': 'Algorithm', 'typecode': 'SEDML_SIMULATION_ALGORITHM'})
t1 = dict({'name': 'Task', 'typecode': 'SEDML_TASK', 'isSedListOf' : True})
d1 = dict({'name': 'DataGenerator', 'typecode': 'SEDML_DATAGENERATOR', 'isSedListOf' : True})
o1 = dict({'name': 'Output', 'typecode': 'SEDML_OUTPUT', 'isSedListOf' : True})
o2 = dict({'name': 'Plot2D', 'typecode': 'SEDML_OUTPUT_PLOT2D'})
o3 = dict({'name': 'Plot3D', 'typecode': 'SEDML_OUTPUT_PLOT3D'})
o4 = dict({'name': 'Curve', 'typecode': 'SEDML_OUTPUT_CURVE'})
o5 = dict({'name': 'Surface', 'typecode': 'SEDML_OUTPUT_SURFACE'})
o6 = dict({'name': 'DataSet', 'typecode': 'SEDML_OUTPUT_DATASET'})
o7 = dict({'name': 'Report', 'typecode': 'SEDML_OUTPUT_REPORT'})
elem = [m0, m1, c1, c2, c3, c4, c5, v1, p1, s1, s2, s3, t1, d1, o1, o2, o3, o4, o5, o6, o7]
doc_elem = [m1, c1, s1, t1, d1, o1]
doc_plug = dict({'sbase': 'SedDocument', 'extension': doc_elem})
plug = [doc_plug]
package = dict({'name' : 'Sed', 'elements': elem, 'plugins': plug, 'number': 1000})
return package
| StarcoderdataPython |
1780896 | <filename>src/annalist_root/annalist/tests/entity_testsitedata.py
"""
This module contains definitions intended to reflect the site-wide data
used by all Annalist installations.
Test cases should use values returned by this module so that additions to
the site data can be updated here, in just one place.
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2014, <NAME>"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import os
from annalist import layout
from annalist.util import valid_id, split_type_entity_id
from annalist.views.form_utils.fieldchoice import FieldChoice
from .entity_testentitydata import entity_url
# -----------------------------------------------------------------------------
#
# ----- Field choice generation support functions
#
# -----------------------------------------------------------------------------
# Convert list of ids and labvels into list of field choices
def make_field_choices(options):
return [ FieldChoice(v, v, l) for v,l in options ]
def no_selection(label):
return [ FieldChoice("", "", label) ]
def id_from_field_choice(fc):
type_id, entity_id = split_type_entity_id(fc.id)
return entity_id
def add_link_to_field_choice(fc, coll_id, default_type_id=None):
type_id, entity_id = split_type_entity_id(fc.id, default_type_id=default_type_id)
return fc.add_link(entity_url(coll_id, type_id, entity_id))
# -----------------------------------------------------------------------------
#
# ----- Site-wide entities defined
#
# -----------------------------------------------------------------------------
# ----- Types -----
site_types = (
[ FieldChoice("_type/_initial_values")
, FieldChoice("_type/_coll", label="Collection" )
, FieldChoice("_type/_enum_list_type", label="List display type" )
, FieldChoice("_type/_enum_render_type", label="Field render type" )
, FieldChoice("_type/_enum_value_mode", label="Field value mode" )
, FieldChoice("_type/_enum_value_type", label="Field value type" )
, FieldChoice("_type/_field", label="Field" )
, FieldChoice("_type/_group", label="Field group" )
, FieldChoice("_type/_info", label="General information" )
, FieldChoice("_type/_list", label="List" )
, FieldChoice("_type/_type", label="Type" )
, FieldChoice("_type/_user", label="User permissions" )
, FieldChoice("_type/_view", label="View" )
, FieldChoice("_type/_vocab", label="Namespace" )
, FieldChoice("_type/Default_type", label="Default record" )
])
def get_site_types_sorted():
return site_types[1:]
def get_site_types_linked(coll_id):
return (
[ add_link_to_field_choice(fc, coll_id, "_type")
for fc in get_site_types_sorted()
])
def get_site_types():
return set( ( id_from_field_choice(fc) for fc in get_site_types_sorted() ) )
site_bib_types = (
site_types[0:1]+
sorted(site_types[1:] +
[ FieldChoice("_type/BibEntry_type", label="Bibliographic record" )
, FieldChoice("_type/Enum_bib_type", label="Bibliographic entry type" )
])
)
def get_site_bib_types_sorted():
return site_bib_types[1:]
def get_site_bib_types_linked(coll_id):
return (
[ add_link_to_field_choice(fc, coll_id, "_type")
for fc in get_site_bib_types_sorted()
])
def get_site_bib_types():
return set( ( id_from_field_choice(fc) for fc in get_site_bib_types_sorted() ) )
site_schema_types = (
site_types[0:1]+
sorted(site_types[1:] +
[ FieldChoice("_type/Class", label="Class" )
, FieldChoice("_type/Datatype", label="Datatype" )
, FieldChoice("_type/Property", label="Property" )
])
)
def get_site_schema_types_sorted():
return site_schema_types[1:]
def get_site_schema_types_linked(coll_id):
return (
[ add_link_to_field_choice(fc, coll_id, "_type")
for fc in get_site_schema_types_sorted()
])
def get_site_schema_types():
return set( ( id_from_field_choice(fc) for fc in get_site_schema_types_sorted() ) )
# ----- Lists -----
site_lists = (
[ FieldChoice("_list/_initial_values")
, FieldChoice("_list/Default_list", label="List entities")
, FieldChoice("_list/Default_list_all", label="List entities with type information")
, FieldChoice("_list/Enum_list_all", label="List enumeration values and types")
, FieldChoice("_list/Field_group_list", label="Field groups")
, FieldChoice("_list/Field_list", label="Field definitions")
, FieldChoice("_list/Info_list", label="General information")
, FieldChoice("_list/List_list", label="List definitions")
, FieldChoice("_list/Type_list", label="Entity types")
, FieldChoice("_list/User_list", label="User permissions")
, FieldChoice("_list/View_list", label="View definitions")
, FieldChoice("_list/Vocab_list", label="Vocabulary namespaces")
])
def get_site_lists_sorted():
return site_lists[1:]
def get_site_lists_linked(coll_id):
return (
[ add_link_to_field_choice(fc, coll_id, "_list")
for fc in get_site_lists_sorted()
])
def get_site_lists():
return set( ( id_from_field_choice(fc) for fc in get_site_lists_sorted() ) )
site_bib_lists = (
site_lists[0:1] +
sorted(site_lists[1:] +
[ FieldChoice("_list/BibEntry_list", label="List bibliographic entries")
])
)
def get_site_bib_lists_sorted():
return site_bib_lists[1:]
def get_site_bib_lists_linked(coll_id):
return (
[ add_link_to_field_choice(fc, coll_id, "_list")
for fc in get_site_bib_lists_sorted()
])
def get_site_bib_lists():
return set( ( id_from_field_choice(fc) for fc in get_site_bib_lists_sorted() ) )
site_schema_lists = (
site_lists[0:1] +
sorted(site_lists[1:] +
[ FieldChoice("_list/Classes", label="Classes")
, FieldChoice("_list/Properties", label="Properties")
])
)
def get_site_schema_lists_sorted():
return site_schema_lists[1:]
def get_site_schema_lists_linked(coll_id):
return (
[ add_link_to_field_choice(fc, coll_id, "_list")
for fc in get_site_schema_lists_sorted()
])
def get_site_schema_lists():
return set( ( id_from_field_choice(fc) for fc in get_site_schema_lists_sorted() ) )
# ----- List types -----
site_list_types = (
[ FieldChoice("_enum_list_type/_initial_values")
, FieldChoice("_enum_list_type/Grid", label="Grid display")
, FieldChoice("_enum_list_type/List", label="List display")
])
def get_site_list_types_sorted():
return site_list_types[1:]
def get_site_list_types_linked(coll_id):
return (
[ add_link_to_field_choice(fc, coll_id, layout.ENUM_LIST_TYPE_ID)
for fc in get_site_list_types_sorted()
])
def get_site_list_types():
return set( ( fc.id for fc in get_site_list_types_sorted() ) )
# ----- Views -----
site_views = (
[ FieldChoice("_view/_initial_values")
, FieldChoice("_view/Collection_view", label="Collection metadata" )
, FieldChoice("_view/Default_view", label="Default record view" )
, FieldChoice("_view/Enum_view", label="Enumerated value view" )
, FieldChoice("_view/Field_group_view", label="Field group definition" )
, FieldChoice("_view/Field_view", label="Field definition" )
, FieldChoice("_view/Info_view", label="General information" )
, FieldChoice("_view/List_view", label="List definition" )
, FieldChoice("_view/Type_view", label="Type definition" )
, FieldChoice("_view/User_view", label="User permissions" )
, FieldChoice("_view/View_view", label="View definition" )
, FieldChoice("_view/Vocab_view", label="Vocabulary namespace" )
])
def get_site_views_sorted():
return site_views[1:]
def get_site_views_linked(coll_id):
return (
[ add_link_to_field_choice(fc, coll_id, "_view")
for fc in get_site_views_sorted()
])
def get_site_views():
return set( ( id_from_field_choice(fc) for fc in get_site_views_sorted() ) )
site_bib_views = (
site_views[0:1] +
sorted(site_views[1:] +
[ FieldChoice("_view/BibEntry_view", label="Bibliographic metadata" )
])
)
def get_site_bib_views_sorted():
return site_bib_views[1:]
def get_site_bib_views_linked(coll_id):
return (
[ add_link_to_field_choice(fc, coll_id, "_view")
for fc in get_site_bib_views_sorted()
])
def get_site_bib_views():
return set( ( id_from_field_choice(fc) for fc in get_site_bib_views_sorted() ) )
# ----- Field groups -----
site_field_groups = (
[ FieldChoice("_group/_initial_values")
])
def get_site_field_groups_sorted():
return site_field_groups[1:]
def get_site_field_groups_linked(coll_id):
return (
[ add_link_to_field_choice(fc, coll_id, "_group")
for fc in get_site_field_groups_sorted()
])
def get_site_field_groups():
return set( ( id_from_field_choice(fc) for fc in get_site_field_groups_sorted() ) )
# ----- Fields -----
site_defined_entity_fields = (
[ FieldChoice("_field/_initial_values")
, FieldChoice("_field/Entity_comment", label="Comment" )
, FieldChoice("_field/Entity_id", label="Id" )
, FieldChoice("_field/Entity_label", label="Label" )
, FieldChoice("_field/Entity_ref", label="Entity ref" )
, FieldChoice("_field/Entity_see_also", label="See also" )
, FieldChoice("_field/Entity_see_also_r", label="See also" )
, FieldChoice("_field/Entity_type", label="Type" )
, FieldChoice("_field/Entity_uri", label="Entity URI" )
])
site_default_entity_fields = (
[ fc
for fc in site_defined_entity_fields
if fc.id != "_field/Entity_see_also"
])
site_enum_fields = (
[ FieldChoice("_field/_initial_values")
, FieldChoice("_field/Enum_uri", label="Value URI" )
])
site_coll_fields = (
[ FieldChoice("_field/_initial_values")
, FieldChoice("_field/Coll_comment", label="Collection metadata" )
, FieldChoice("_field/Coll_default_list_id", label="Default list" )
, FieldChoice("_field/Coll_default_view_entity", label="Default view entity" )
, FieldChoice("_field/Coll_default_view_id", label="Default view" )
, FieldChoice("_field/Coll_default_view_type", label="Default view type" )
, FieldChoice("_field/Coll_parent", label="Parent" )
, FieldChoice("_field/Coll_software_version", label="S/W version" )
])
site_bibentry_fields = (
[ FieldChoice("_field/_initial_values")
, FieldChoice("_field/Bib_address", label="Address" )
, FieldChoice("_field/Bib_alternate", label="Alternate name" )
, FieldChoice("_field/Bib_authors", label="Author(s)" )
, FieldChoice("_field/Bib_bookentry", label="Book content" )
, FieldChoice("_field/Bib_booktitle", label="Book title" )
, FieldChoice("_field/Bib_chapter", label="Chapter" )
, FieldChoice("_field/Bib_description", label="Description" )
, FieldChoice("_field/Bib_edition", label="Edition" )
, FieldChoice("_field/Bib_editors", label="Editor(s)" )
, FieldChoice("_field/Bib_eprint", label="ePrint" )
, FieldChoice("_field/Bib_firstname", label="First name" )
, FieldChoice("_field/Bib_howpublished", label="How published" )
, FieldChoice("_field/Bib_id", label="Id" )
, FieldChoice("_field/Bib_idanchor", label="Anchor" )
, FieldChoice("_field/Bib_identifiers", label="Identifiers" )
, FieldChoice("_field/Bib_idtype", label="Identifier type" )
, FieldChoice("_field/Bib_institution", label="Institution" )
, FieldChoice("_field/Bib_journal", label="Journal" )
, FieldChoice("_field/Bib_jurisdiction", label="Jurisdiction" )
, FieldChoice("_field/Bib_lastname", label="Last name" )
, FieldChoice("_field/Bib_license", label="License" )
, FieldChoice("_field/Bib_month", label="Month" )
, FieldChoice("_field/Bib_name", label="Name" )
, FieldChoice("_field/Bib_note", label="Note" )
, FieldChoice("_field/Bib_number", label="Issue number" )
, FieldChoice("_field/Bib_organization", label="Organization" )
, FieldChoice("_field/Bib_pages", label="Pages" )
, FieldChoice("_field/Bib_publication_details", label="Publication details" )
, FieldChoice("_field/Bib_publisher", label="Publisher" )
, FieldChoice("_field/Bib_school", label="School" )
, FieldChoice("_field/Bib_shortcode", label="Short code" )
, FieldChoice("_field/Bib_title", label="Title" )
, FieldChoice("_field/Bib_type", label="Publication type" )
, FieldChoice("_field/Bib_url", label="URL" )
, FieldChoice("_field/Bib_volume", label="Volume" )
, FieldChoice("_field/Bib_year", label="Year" )
])
site_defined_field_fields = (
[ FieldChoice("_field/_initial_values")
, FieldChoice("_field/Field_default", label="Default value" )
, FieldChoice("_field/Field_entity_type", label="Entity type" )
, FieldChoice("_field/Field_fieldref", label="Refer to field" )
, FieldChoice("_field/Field_fields", label="Subfields" )
, FieldChoice("_field/Field_groupref", label="Field group" )
, FieldChoice("_field/Field_help", label="Help" )
, FieldChoice("_field/Field_id", label="Field Id" )
, FieldChoice("_field/Field_label", label="Field label" )
, FieldChoice("_field/Field_missing", label="(Missing)" )
, FieldChoice("_field/Field_placeholder", label="Placeholder" )
, FieldChoice("_field/Field_placement", label="Position/size" )
, FieldChoice("_field/Field_property", label="Property URI" )
, FieldChoice("_field/Field_render_type", label="Render type" )
, FieldChoice("_field/Field_repeat_label_add", label="Add value label" )
, FieldChoice("_field/Field_repeat_label_delete", label="Remove value label" )
, FieldChoice("_field/Field_restrict", label="Value restriction" )
, FieldChoice("_field/Field_superproperty_uri", label="Superproperty URI" )
, FieldChoice("_field/Field_superproperty_uris", label="Superproperty URIs" )
, FieldChoice("_field/Field_tooltip", label="Tooltip" )
, FieldChoice("_field/Field_typeref", label="Refer to type" )
, FieldChoice("_field/Field_value_mode", label="Value mode" )
, FieldChoice("_field/Field_value_type", label="Value type" )
])
site_field_fields = (
[ fc for fc in site_defined_field_fields if
fc.id not in {"_field/Field_superproperty_uri"} ]
)
site_field_subfield_fields = (
[ FieldChoice("_field/_initial_values")
, FieldChoice("_field/Field_subfield_placement", label="Subfield Pos/size" )
, FieldChoice("_field/Field_subfield_property", label="Subfield URI" )
, FieldChoice("_field/Field_subfield_sel", label="Subfield ref" )
])
site_field_all_fields = (
[ FieldChoice("_field/_initial_values")]
+ sorted(site_defined_field_fields[1:] + site_field_subfield_fields[1:])
)
site_defined_group_fields = (
[ FieldChoice("_field/_initial_values")
, FieldChoice("_field/Group_comment", label="Help" )
, FieldChoice("_field/Group_entity_type", label="Group entity type" )
, FieldChoice("_field/Group_field_placement", label="Position/size" )
, FieldChoice("_field/Group_field_property", label="Property URI" )
, FieldChoice("_field/Group_field_sel", label="Field ref" )
, FieldChoice("_field/Group_fields", label="Fields" )
, FieldChoice("_field/Group_id", label="Group Id" )
, FieldChoice("_field/Group_label", label="Label" )
])
site_group_fields = (
[ fc
for fc in site_defined_group_fields
if fc.id not in
[ "_field/Group_field_placement"
, "_field/Group_field_property"
, "_field/Group_field_sel"
]
])
site_group_field_group_fields = (
[ FieldChoice("_field/_initial_values")
, FieldChoice("_field/Group_field_placement", label="Position/size" )
, FieldChoice("_field/Group_field_property", label="Property URI" )
, FieldChoice("_field/Group_field_sel", label="Field ref" )
])
site_defined_list_fields = (
[ FieldChoice("_field/_initial_values")
, FieldChoice("_field/List_choice", label="List view" )
, FieldChoice("_field/List_comment", label="Help" )
, FieldChoice("_field/List_default_type", label="Default type" )
, FieldChoice("_field/List_default_view", label="Default view" )
, FieldChoice("_field/List_entity_selector", label="Selector" )
, FieldChoice("_field/List_entity_type", label="List entity type" )
, FieldChoice("_field/List_field_placement", label="Position/size" )
, FieldChoice("_field/List_field_property", label="Property URI" )
, FieldChoice("_field/List_field_sel", label="Field ref" )
, FieldChoice("_field/List_fields", label="Fields" )
, FieldChoice("_field/List_id", label="List Id" )
, FieldChoice("_field/List_label", label="Label" )
, FieldChoice("_field/List_type", label="List display type" )
])
site_list_fields = (
[ fc
for fc in site_defined_list_fields
if fc.id not in
[ "_field/List_choice"
, "_field/List_field_placement"
, "_field/List_field_property"
, "_field/List_field_sel"
]
])
site_list_field_group_fields = (
[ FieldChoice("_field/_initial_values")
, FieldChoice("_field/List_field_placement", label="Position/size" )
, FieldChoice("_field/List_field_property", label="Property URI" )
, FieldChoice("_field/List_field_sel", label="Field ref" )
])
site_defined_type_fields = (
[ FieldChoice("_field/_initial_values")
, FieldChoice("_field/Type_alias_source", label="Field alias value" )
, FieldChoice("_field/Type_alias_target", label="Field alias name" )
, FieldChoice("_field/Type_aliases", label="Field aliases" )
, FieldChoice("_field/Type_comment", label="Comment" )
, FieldChoice("_field/Type_id", label="Type Id" )
, FieldChoice("_field/Type_label", label="Label" )
, FieldChoice("_field/Type_list", label="Default list" )
, FieldChoice("_field/Type_prefix", label="Namespace prefix" )
, FieldChoice("_field/Type_supertype_uri", label="Supertype URI" )
, FieldChoice("_field/Type_supertype_uris", label="Supertype URIs" )
, FieldChoice("_field/Type_uri", label="Type URI" )
, FieldChoice("_field/Type_view", label="Default view" )
])
site_type_fields = (
[ fc for fc in site_defined_type_fields if
fc.id not in {"_field/Type_alias_source", "_field/Type_alias_target", "_field/Type_supertype_uri"} ]
)
# site_type_supertype_uris_fields = (
# [ FieldChoice("_field/_initial_values")
# , FieldChoice("_field/Type_supertype_uri", label="Supertype URI" )
# , FieldChoice("_field/Type_supertype_uris", label="Supertype URIs" )
# ])
# site_type_aliases_fields = (
# [ FieldChoice("_field/_initial_values")
# , FieldChoice("_field/Type_alias_source", label="Type alias source" )
# , FieldChoice("_field/Type_alias_target", label="Type alias target" )
# ])
site_user_fields = (
[ FieldChoice("_field/_initial_values")
, FieldChoice("_field/User_description", label="Description" )
, FieldChoice("_field/User_id", label="User Id" )
, FieldChoice("_field/User_name", label="User name" )
, FieldChoice("_field/User_permissions", label="Permissions" )
, FieldChoice("_field/User_uri", label="User URI" )
])
site_defined_view_fields = (
[ FieldChoice("_field/_initial_values")
, FieldChoice("_field/View_choice", label="Choose view" )
, FieldChoice("_field/View_comment", label="Help" )
, FieldChoice("_field/View_edit_view", label="Editable view?" )
, FieldChoice("_field/View_entity_type", label="View entity type" )
, FieldChoice("_field/View_field_placement", label="Position/size" )
, FieldChoice("_field/View_field_property", label="Property URI" )
, FieldChoice("_field/View_field_sel", label="Field ref" )
, FieldChoice("_field/View_fields", label="Fields" )
, FieldChoice("_field/View_id", label="View Id" )
, FieldChoice("_field/View_label", label="Label" )
])
site_view_fields = (
[ fc
for fc in site_defined_view_fields
if fc.id not in
[ "_field/View_choice"
, "_field/View_field_placement"
, "_field/View_field_property"
, "_field/View_field_sel"
]
])
site_view_field_group_fields = (
[ FieldChoice("_field/_initial_values")
, FieldChoice("_field/View_field_placement", label="Position/size" )
, FieldChoice("_field/View_field_property", label="Property URI" )
, FieldChoice("_field/View_field_sel", label="Field ref" )
])
site_vocab_fields = (
[ FieldChoice("_field/_initial_values")
, FieldChoice("_field/Vocab_id", label="Prefix" )
, FieldChoice("_field/Vocab_uri", label="Vocabulary URI" )
])
site_fields = (
[ FieldChoice("_field/_initial_values")] +
# site_bibentry_fields[1:] +
site_coll_fields[1:] +
site_defined_entity_fields[1:] +
site_enum_fields[1:] +
site_field_all_fields[1:] +
site_defined_group_fields[1:] +
site_defined_list_fields[1:] +
site_defined_type_fields[1:] +
site_user_fields[1:] +
site_defined_view_fields[1:] +
site_vocab_fields[1:] +
[])
def get_site_fields_sorted():
return site_fields[1:]
def get_site_fields():
return set( ( id_from_field_choice(fc) for fc in get_site_fields_sorted() ) )
def get_site_default_entity_fields_sorted():
return site_default_entity_fields[1:]
def get_site_default_entity_fields_linked(coll_id):
return (
[ add_link_to_field_choice(fc, coll_id, "_field")
for fc in get_site_default_entity_fields_sorted()
])
def get_site_default_entity_fields():
return set( ( id_from_field_choice(fc) for fc in get_site_default_entity_fields_sorted() ) )
def get_site_default_entity_fields():
return set( ( id_from_field_choice(fc) for fc in get_site_default_entity_fields_sorted() ) )
def get_site_view_fields_sorted():
return site_default_entity_fields[1:] + site_view_fields[1:]
def get_site_view_fields():
return set( ( id_from_field_choice(fc) for fc in get_site_view_fields_sorted() ) )
def get_site_field_fields_sorted():
return site_default_entity_fields[1:] + site_field_fields[1:]
def get_site_field_fields():
return set( ( id_from_field_choice(fc) for fc in get_site_field_fields_sorted() ) )
def get_site_group_fields_sorted():
return site_default_entity_fields[1:] + site_group_fields[1:]
def get_site_group_fields():
return set( ( id_from_field_choice(fc) for fc in get_site_group_fields_sorted() ) )
def get_site_list_fields_sorted():
return site_default_entity_fields[1:] + site_list_fields[1:]
def get_site_list_fields():
return set( ( id_from_field_choice(fc) for fc in get_site_list_fields_sorted() ) )
def get_site_type_fields_sorted():
return site_default_entity_fields[1:] + site_type_fields[1:]
def get_site_type_fields():
return set( ( id_from_field_choice(fc) for fc in get_site_type_fields_sorted() ) )
def get_site_user_fields_sorted():
return site_default_entity_fields[1:] + site_user_fields[1:]
def get_site_user_fields():
return set( ( id_from_field_choice(fc) for fc in get_site_user_fields_sorted() ) )
def get_site_vocab_fields_sorted():
return (
site_default_entity_fields[1:] +
site_vocab_fields[1:]
)
def get_site_vocab_fields():
return set( ( id_from_field_choice(fc) for fc in get_site_vocab_fields_sorted() ) )
def get_site_bibentry_fields_sorted():
return site_bibentry_fields[1:] + site_default_entity_fields[1:]
def get_site_bibentry_fields():
return set( ( id_from_field_choice(fc) for fc in get_site_bibentry_fields_sorted() ) )
# ----- Field render types -----
site_field_types = (
[ FieldChoice("_enum_render_type/_initial_values")
, FieldChoice("_enum_render_type/CheckBox", label="CheckBox" )
, FieldChoice("_enum_render_type/Codearea", label="Monospace text" )
, FieldChoice("_enum_render_type/EntityId", label="Entity Id" )
, FieldChoice("_enum_render_type/EntityRef", label="Local entity ref" )
, FieldChoice("_enum_render_type/EntityTypeId", label="Entity type Id" )
, FieldChoice("_enum_render_type/Enum", label="Required/new entity reference")
, FieldChoice("_enum_render_type/Enum_choice", label="Required entity reference" )
, FieldChoice("_enum_render_type/Enum_choice_opt", label="Optional entity reference" )
, FieldChoice("_enum_render_type/Enum_optional", label="Optional/new entity reference")
, FieldChoice("_enum_render_type/FileUpload", label="File upload" )
, FieldChoice("_enum_render_type/Group_Seq", label="Field group sequence" )
, FieldChoice("_enum_render_type/Group_Seq_Row", label="Field group sequence as table")
, FieldChoice("_enum_render_type/Group_Set", label="Field group set" )
, FieldChoice("_enum_render_type/Group_Set_Row", label="Field group set as table" )
, FieldChoice("_enum_render_type/Identifier", label="Identifier" )
, FieldChoice("_enum_render_type/LangText", label="Language text" )
, FieldChoice("_enum_render_type/Markdown", label="Markdown rich text" )
, FieldChoice("_enum_render_type/Placement", label="Position/size" )
, FieldChoice("_enum_render_type/RefAudio", label="Audio clip reference" )
, FieldChoice("_enum_render_type/RefImage", label="Image reference" )
, FieldChoice("_enum_render_type/RefMultifield", label="Fields of referenced entity" )
, FieldChoice("_enum_render_type/RepeatGroup", label="Repeating field group "+
"(@@use Group_Seq 'Field group sequence')")
, FieldChoice("_enum_render_type/RepeatGroupRow", label="Repeating fields as table "+
"(@@use Group_Seq_Row 'Field group sequence as table')")
, FieldChoice("_enum_render_type/ShowMarkdown", label="Display Markdown rich text" )
, FieldChoice("_enum_render_type/Showtext", label="Display text" )
, FieldChoice("_enum_render_type/Text", label="Short text" )
, FieldChoice("_enum_render_type/Textarea", label="Multiline text" )
, FieldChoice("_enum_render_type/TokenSet", label="Space-separated tokens" )
, FieldChoice("_enum_render_type/URIImport", label="Web import" )
, FieldChoice("_enum_render_type/URILink", label="Web link" )
, FieldChoice("_enum_render_type/View_choice", label="Choose view" )
])
def get_site_field_types_sorted():
return site_field_types[1:]
def get_site_field_types_linked(coll_id):
return (
[ add_link_to_field_choice(fc, coll_id, "_enum_render_type")
for fc in get_site_field_types_sorted()
])
def get_site_field_types():
return set( ( id_from_field_choice(fc) for fc in get_site_field_types_sorted() ) )
# ----- General information records -----
site_infos = (
[ FieldChoice("_info/_initial_values")
, FieldChoice("_info/About", label="About this site")
, FieldChoice("_info/About_initial", label="About this site")
, FieldChoice("_info/Contact", label="Contact information")
, FieldChoice("_info/Contact_initial", label="Contact information")
, FieldChoice("_info/Sitemap", label="Navigating this site")
, FieldChoice("_info/Sitemap_initial", label="Navigating this site")
])
def get_site_infos_sorted():
return site_infos[1:]
def get_site_infos_linked(coll_id):
return (
[ add_link_to_field_choice(fc, coll_id, layout.ENUM_LIST_TYPE_ID)
for fc in get_site_infos_sorted()
])
def get_site_infos():
return set( ( fc.id for fc in get_site_infos_sorted() ) )
# ----- Users -----
site_users = (
[ FieldChoice("_user/_default_user_perms", label="Default permissions")
, FieldChoice("_user/_unknown_user_perms", label="Unknown user")
])
def get_site_users_sorted():
return site_users
def get_site_users_linked(coll_id):
return (
[ add_link_to_field_choice(fc, coll_id, "_vocab")
for fc in get_site_users_sorted()
])
def get_site_users():
return set( ( id_from_field_choice(fc) for fc in get_site_users_sorted() ) )
# ----- Vocabulary namespaces -----
site_vocabs = (
[ FieldChoice("_vocab/_initial_values")
, FieldChoice("_vocab/annal", label="Vocabulary namespace for Annalist-defined terms")
, FieldChoice("_vocab/owl", label="OWL ontology namespace")
, FieldChoice("_vocab/rdf", label="RDF core namespace")
, FieldChoice("_vocab/rdfs", label="RDF schema namespace")
, FieldChoice("_vocab/xsd", label="XML Schema datatypes namespace")
])
def get_site_vocabs_sorted():
return site_vocabs[1:]
def get_site_vocabs_linked(coll_id):
return (
[ add_link_to_field_choice(fc, coll_id, "_vocab")
for fc in get_site_vocabs_sorted()
])
def get_site_vocabs():
return set( ( id_from_field_choice(fc) for fc in get_site_vocabs_sorted() ) )
# ----- Field value mode types -----
site_value_modes = (
[ FieldChoice("_enum_value_mode/_initial_values")
, FieldChoice("_enum_value_mode/Value_direct", label="Direct value" )
, FieldChoice("_enum_value_mode/Value_entity", label="Entity reference" )
, FieldChoice("_enum_value_mode/Value_field", label="Field reference" )
, FieldChoice("_enum_value_mode/Value_import", label="Import from web" )
, FieldChoice("_enum_value_mode/Value_upload", label="File upload" )
])
def get_site_value_modes_sorted():
return site_value_modes[1:]
def get_site_value_modes_linked(coll_id):
return (
[ add_link_to_field_choice(fc, coll_id, "_enum_value_mode")
for fc in get_site_value_modes_sorted()
])
def get_site_value_modes():
return set( ( id_from_field_choice(fc) for fc in get_site_value_modes_sorted() ) )
# ----- Field value types -----
site_value_types = (
[ FieldChoice("_enum_value_type/_initial_values")
, FieldChoice("_enum_value_type/Longtext", label="annal:Longtext" )
, FieldChoice("_enum_value_type/Text", label="annal:Text" )
])
def get_site_value_types_sorted():
return site_value_types[1:]
def get_site_value_types_linked(coll_id):
return (
[ add_link_to_field_choice(fc, coll_id, "_enum_value_type")
for fc in get_site_value_types_sorted()
])
def get_site_value_types():
return set( ( id_from_field_choice(fc) for fc in get_site_value_types_sorted() ) )
# ----- All site entities (including test collection data) -----
test_types = (
[ FieldChoice("_type/testtype", label="RecordType testcoll/_type/testtype")
, FieldChoice("_type/testtype2", label="RecordType testcoll/_type/testtype2")
])
def get_test_types_sorted():
return test_types
test_users = (
[ FieldChoice("_user/testuser", label="Test User")
])
def get_test_users_sorted():
return test_users
test_entities = (
[ FieldChoice("testtype/entity1", label="Entity testcoll/testtype/entity1")
, FieldChoice("testtype/entity2", label="Entity testcoll/testtype/entity2")
, FieldChoice("testtype/entity3", label="Entity testcoll/testtype/entity3")
, FieldChoice("testtype2/entity4", label="Entity testcoll/testtype2/entity4")
])
def get_test_entities_sorted():
return test_entities
site_entities = (
get_site_list_types_sorted() +
get_site_field_types_sorted() + # @@TODO: change to render_types
get_site_value_modes_sorted() +
get_site_value_types_sorted() +
get_site_fields_sorted() +
get_site_field_groups_sorted() +
get_site_infos_sorted() +
get_site_lists_sorted() +
get_site_types_sorted() +
get_test_types_sorted() +
get_site_users_sorted() +
get_test_users_sorted() +
get_site_views_sorted() +
get_site_vocabs_sorted() +
get_test_entities_sorted()
)
def get_site_entities_sorted():
return site_entities
def get_site_entities():
return set( ( id_from_field_choice(fc) for fc in get_site_entities_sorted() ) )
# End.
| StarcoderdataPython |
1655642 | <gh_stars>10-100
import threading
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.patches as patches
from matplotlib.collections import PatchCollection
import numpy as np
import time
from pylab import *
import simulator
import Util
from geometry_msgs.msg import Pose, Twist
from simulator_util import DraggablePoint
# def remove_image(img):
# # type: (mpimg.AxesImage) -> None
# img.remove()
# img.figure.canvas.draw()
#
#
# def draw_rectangles(fig, obstacles, origin, resolution, colors):
# ax = fig.add_subplot(111, aspect='equal')
# patchs = []
# for index, elem in np.ndenumerate(obstacles):
# if elem != 0:
# pose = tuple(np.array(origin) + np.array(index)*resolution)
# patchs.append(patches.Rectangle(pose, resolution, resolution, color=(0,0,0)))
# pc = PatchCollection(patchs)
# ax.add_collection(pc)
# return ax
#
#
# def remove_plot(fig, ax):
# Util.tic()
# fig.delaxes(ax)
# ax.figure.canvas.draw()
# print Util.toc()
#
#
# def create_robot(sim, pose):
# # type: (simulator.Simulator, list) -> None
# pose_obj = Pose()
# pose_obj.position.x = pose[0]
# pose_obj.position.y = pose[1]
# sim.create_robot("a", pose_obj)
#
#
# def get_points(occ_grid, origin, resolution):
# points_x = []
# points_y = []
# origin = np.array(origin)
# for index, elem in np.ndenumerate(occ_grid):
# if elem != 0:
# points_x.append(origin + np.array(index[0])*resolution)
# points_y.append(origin + np.array(index[1])*resolution)
#
# return points_x, points_y
#image = plt.imread("/home/lady/Pictures/hex1.jpg")
randimg = np.random.rand(100, 100)
img = plt.imshow(image, extent=[4, 16, 5, 25], zorder=0, interpolation='nearest')
sim = simulator.Simulator()
pose1 = Pose()
pose1.position.x = 5
pose1.position.y = 5
sim.create_robot("0", pose1, [0, 255, 0])
pose2 = Pose()
pose2.position.x = 6
pose2.position.y = 8
sim.create_robot("1", pose2, [255, 0, 0])
ones_matrix = np.zeros((40, 40, 4))
rand_matrix = np.around(np.random.rand(40, 40))
ones_matrix[:, :, 3] = rand_matrix
sim.plot_image(ones_matrix, [0, 20, 0, 20])
plt.draw()
plt.show()
# fig = plt.figure(1)
# sim = simulator.Simulator()
# rand_matrix = np.round(np.random.rand(80, 80))
# Util.tic()
# ax = draw_rectangles(fig, rand_matrix, [0, 0], 0.25)
# print Util.toc()
# threading.Timer(5, create_robot, [sim, [5, 6]]).start()
# show()
| StarcoderdataPython |
105968 | import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn.functional as F
from torch import nn
import os
import matplotlib.pyplot as plt
import sys
import numpy as np
sys.path.append('./')
from args import opt
sys.path.append('./lib/')
from dataset import FruitFlyNeuronDataset
from cv2transforms import composed_transforms, RandomExtract
from utils import show_img, show_patches, initialize_weights
patch_h = int(opt.patch_height)
patch_w = int(opt.patch_width)
N_patches = int(opt.N_subimgs)
root_dir = opt.root_dir
training_dir = {_: os.path.join(root_dir, _)
for _ in ['images', '1st_manual', 'mask']}
compose = composed_transforms()
# imgs_dataset = FruitFlyNeuronDataset(root_dir=training_dir, transforms=compose)
# comp_imgs_dataset = FruitFlyNeuronDataset(root_dir=training_dir)
# for i in range(len(imgs_dataset)):
# show_img(imgs_dataset[i])
# show_img(comp_imgs_dataset[i])
# if i == 3:
# plt.show()
# break
training_dataset = FruitFlyNeuronDataset(
root_dir=training_dir, transforms=compose)
full_imgs = np.empty((20, 584, 565))
full_masks = np.empty((20, 584, 565))
for i in range(len(training_dataset)):
full_imgs[i] = training_dataset[i]['images']
full_masks[i] = training_dataset[i]['mask']
full_imgs = np.reshape(full_imgs, (20, 584, 565, 1)).transpose((0, 3, 1, 2))
full_masks = np.reshape(full_masks, (20, 584, 565, 1)).transpose((0, 3, 1, 2))
rx = RandomExtract(patch_h=patch_h, patch_w=patch_w, N_patches=N_patches)
patches, patches_masks = rx(full_imgs=full_imgs, full_masks=full_masks)
show_patches(patches, patches_masks)
class _EncoderBlock(nn.Module):
def __init__(self, in_channels, out_channels, dropout=False):
super(_EncoderBlock, self).__init__()
layers = [
nn.Conv2d(in_channels, out_channels, kernel_size=3),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
]
if dropout:
layers.append(nn.Dropout())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
self.encode = nn.Sequential(*layers)
def forward(self, x):
return self.encode(x)
class _DecoderBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels):
super(_DecoderBlock, self).__init__()
self.decode = nn.Sequential(
nn.Conv2d(in_channels, middle_channels, kernel_size=3),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, middle_channels, kernel_size=3),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(middle_channels, out_channels,
kernel_size=2, stride=2),
)
def forward(self, x):
return self.decode(x)
# U-Net model
class UNet(nn.Module):
def __init__(self, num_classes):
super(UNet, self).__init__()
self.enc1 = _EncoderBlock(3, 64)
self.enc2 = _EncoderBlock(64, 128)
self.enc3 = _EncoderBlock(128, 256)
self.enc4 = _EncoderBlock(256, 512, dropout=True)
self.center = _DecoderBlock(512, 1024, 512)
self.dec4 = _DecoderBlock(1024, 512, 256)
self.dec3 = _DecoderBlock(512, 256, 128)
self.dec2 = _DecoderBlock(256, 128, 64)
self.dec1 = nn.Sequential(
nn.Conv2d(128, 64, kernel_size=3),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
)
self.final = nn.Conv2d(64, num_classes, kernel_size=1)
initialize_weights(self)
def forward(self, x):
enc1 = self.enc1(x)
enc2 = self.enc2(enc1)
enc3 = self.enc3(enc2)
enc4 = self.enc4(enc3)
center = self.center(enc4)
dec4 = self.dec4(
torch.cat([center, F.upsample(enc4, center.size()[2:], mode='bilinear')], 1))
dec3 = self.dec3(
torch.cat([dec4, F.upsample(enc3, dec4.size()[2:], mode='bilinear')], 1))
dec2 = self.dec2(
torch.cat([dec3, F.upsample(enc2, dec3.size()[2:], mode='bilinear')], 1))
dec1 = self.dec1(
torch.cat([dec2, F.upsample(enc1, dec2.size()[2:], mode='bilinear')], 1))
final = self.final(dec1)
return F.upsample(final, x.size()[2:], mode='bilinear')
net = UNet(10)
print(net)
| StarcoderdataPython |
1712985 | <filename>my_university_api/application/api/mongodb/routes.py
# This file contain all routes of mongodb
####################################################################
# import
####################################################################
from flask_restx import Resource, reqparse, fields # to use Resource, that expose http request method
from application.api.mongodb.mongo_functions import *
from application.api.mongodb.models import *
@mongodb.route("/send_message")
class insertMessage(Resource):
@mongodb.expect(send_message_model)
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('id_conversation', type=str, help='id_conversation')
parser.add_argument('matricola_mittente', type=str, help='matricola_mittente')
parser.add_argument('matricola_destinatario', type=str, help='matricola_destinatario')
parser.add_argument('messaggio', type=str, help='messaggio')
args = parser.parse_args(strict=True)
send_message(args['id_conversation'], args['matricola_mittente'], args['matricola_destinatario'], args['messaggio'])
return args, 201
@mongodb.route("/create_new_conversation")
class createConversation(Resource):
@mongodb.expect(conversation_model)
@mongodb.marshal_with(id_conversation_model)
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('matricola1', type=str, help='matricola1')
parser.add_argument('matricola2', type=str, help='matricola2')
args = parser.parse_args(strict=True)
return create_conversation(args['matricola1'], args['matricola2']), 201
@mongodb.route('/get_all_conversations')
class GetAllConversations(Resource):
@mongodb.expect(freshman_model)
@mongodb.marshal_with(full_conversation_model)
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('matricola', type=str, help='mat')
args = parser.parse_args(strict=True)
return get_all_conversations(args['matricola'],), 201
@mongodb.route("/insert_discipline_color")
class insertDisciplineColor(Resource):
@mongodb.expect(discipline_color_model)
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('codice_corso', type=str, help='codice_corso')
parser.add_argument('codice_disciplina', type=str, help='codice_disciplina')
parser.add_argument('colore_esadecimale', type=str, help='colore_esadecimale')
args = parser.parse_args(strict=True)
return set_teach_color(args['codice_corso'], args['codice_disciplina'], args['colore_esadecimale']), 201
@mongodb.route('/get_all_colors')
class GetAllColors(Resource):
@mongodb.marshal_with(discipline_color_model)
def post(self):
return get_all_teach_colors(), 201 | StarcoderdataPython |
3210610 | <gh_stars>0
from abc import ABC, abstractmethod
class Parser(ABC):
def __init__(self):
self.delimiters={}
pass
@classmethod
@abstractmethod
def getParserInstance(cls):
pass
@abstractmethod
def setDelimitersUsingDictionary(self, delimitersPurposeMappedToDelimiter:dict):
self.delimiters = delimitersPurposeMappedToDelimiter
@abstractmethod
def loadTrainingDataAndSpecificationFromDictionaryOfFiles(self, filesPurposeMappedToPath:dict):
pass
@abstractmethod
def loadTrainingDataSpecificationFromFile(self, filesAndPurposeMappedToPath:dict):
pass | StarcoderdataPython |
3276101 | <gh_stars>0
from flask import Flask, jsonify, request, abort, session, redirect, url_for, escape, send_file
from flask_cors import CORS
import os
import bcrypt
from pymongo import MongoClient
from bson.objectid import ObjectId
import bson.errors
from jsonschema import validate, exceptions
import datetime
import pandas as pd
import dbworker
import mailsane
from schemaprovider import SchemaFactory
import spreadSheetHandler
from xlrd import open_workbook, XLRDError
import reportgen
from dateutil.parser import parse
import sys
import config
# Start the app and setup the static directory for the html, css, and js files.
STATIC_FOLDER = config.STATIC_FOLDER
app = Flask(__name__, static_url_path='', static_folder=STATIC_FOLDER)
CORS(app)
# DO NOT SHOW THIS PUBLICLY. THIS SHOULD BE HIDDEN IF CODE
# IS MADE PUBLIC
# THIS IS USED FOR THE SESSION COOKIE ENCRYPTION
app.secret_key = config.SECRET_KEY
# Turn this to False when properly deploying to make sure that all
# debugging routes are shut off.
ENABLE_DEBUG_ROUTES = config.ENABLE_DEBUG_ROUTES
@app.route('/favicon.ico')
def favicon():
return app.send_static_file('favicon.ico')
@app.route('/api/authenticate', methods=['POST'])
@app.route('/authenticate', methods=['POST'])
def authenticate():
# Use this route to log in and get a token
# Takes in a json of the form {email : '', password : ''}
if request.json is None:
abort(400)
for x in ['email', 'password']:
if x not in request.json:
abort(400)
email = mailsane.normalize(request.json['email'])
if email.error:
abort(400)
if dbworker.validateCredentials(str(email), request.json['password']):
userType = dbworker.getUserType(str(email))
session['email'] = str(email)
return jsonify({'userType' : userType, 'success' : True})
abort(401)
@app.route('/api/logout')
@app.route('/logout')
def logout():
if 'email' not in session:
abort(400) # Bad request
session.pop('email', None)
return redirect(url_for('index',_external=True,_scheme='https'))
@app.route('/api/updatepassword', methods=['POST'])
@app.route('/updatepassword', methods=['POST'])
def updatePassword():
# Takes in a json of the form {email : '', password : ''}
# Validate that the user calling this has access
# Either that they are the same user or that they are an admin
if request.json is None:
abort(400)
for x in ['email', 'password']:
if x not in request.json:
abort(400)
emailSess = mailsane.normalize(session['email'])
if emailSess.error:
abort(400)
email = mailsane.normalize(request.json['email'])
if email.error:
abort(400)
if dbworker.validateAccess(dbworker.userTypeMap['admin']):
pass
else:
abort(401)
if dbworker.getUser(str(email)) is None:
abort(404)
dbworker.setPassword(str(email), request.json['password'])
return jsonify({'success' : True})
@app.route('/api/admin/getclasses')
def getAllClasses():
"""
Returns a list of class ids from the database
"""
if not dbworker.validateAccess(dbworker.userTypeMap['admin']):
abort(401)
return jsonify({'classList' : dbworker.getAllClasses()})
@app.route('/api/getclasses')
@app.route('/getclasses')
def getClasses():
"""
Returns a list of class ids from the database
"""
if 'email' not in session or session['email'] is None:
abort(401)
email = mailsane.normalize(session['email'])
if email.error:
abort(400)
return jsonify({'classList' : dbworker.getClasses(str(email)), 'success' : True})
@app.route('/api/getactiveclasses')
@app.route('/getactiveclasses')
def getActiveClasses():
"""
Returns a list of active class ids from the database
"""
if 'email' not in session or session['email'] is None:
abort(401)
email = mailsane.normalize(session['email'])
if email.error:
abort(400)
return jsonify({'classList' : dbworker.getClasses(str(email), filt={'ongoing' : True}), 'success' : True})
@app.route('/api/whoami', methods=['GET'])
@app.route('/whoami', methods=['GET'])
def getFullName():
thisUser = dbworker.getCurrentUser()
if thisUser is None:
return jsonify({'firstName' : None, 'lastName' : None, 'success' : False})
return jsonify({'firstName' : thisUser['firstName'], 'lastName' : thisUser['lastName'], 'success' : True})
@app.route('/api/setupstudentdashboard', methods=['GET'])
@app.route('/setupstudentdashboard', methods=['GET'])
def getStudentDashboardInfo():
if 'email' not in session or session['email'] is None:
abort(401)
email = mailsane.normalize(session['email'])
if email.error:
abort(400)
studentDashboardDict = {}
classes = dbworker.getClasses(str(email), filt={'ongoing' : True})
thisStudent = dbworker.getCurrentUser()
studentDashboardDict['firstName'] = thisStudent['firstName'][:]
studentDashboardDict['lastName'] = thisStudent['lastName'][:]
studentDashboardDict['Classes'] = []
studentDashboardDict['Classes'] = studentDashboardDict['Classes'] + classes['student']
classReports = dbworker.mclient[dbworker.database]['reports']
for c in studentDashboardDict['Classes']:
foundClass = False
r = 0
while not foundClass and r < classReports.size():
if classReports[r]['classId'] == c['id']:
foundClass = True
else:
r += 1
if foundClass:
c['nextCourse'] = classReports[r]['nextCourse']
c['marks'] = classReports[r]['marks']
return jsonify(studentDashboardDict)
@app.route('/api/setmarkingsection', methods=['POST', 'PATCH'])
def setMarkingSection():
"""
Takes in a JSON of the following format
{classId, sectionTitle, weightInfo : JSON}
weightInfo will be of the form
{'weight' : Int, 'index' : Int}
Returns {success : Boolean}
Sets the weight of sectionTitle in classId to <weight>
This will override existing values
"""
if request.json is None or 'classId' not in request.json or 'sectionTitle' not in request.json or 'weightInfo' not in request.json:
abort(400)
for x in ['weight', 'index']:
if x not in request.json['weightInfo']:
abort(400)
# Validate credentials here
if 'email' not in session or session['email'] is None:
abort(401)
email = mailsane.normalize(session['email'])
if email.error:
abort(400)
try:
validate(instance=request.json, schema=SchemaFactory.set_marking)
except exceptions.ValidationError:
abort(400)
convClassId = ObjectId(request.json['classId'])
if not dbworker.validateAccess(dbworker.userTypeMap['admin']) and not dbworker.isClassInstructor(str(email), convClassId):
abort(401)
dbworker.addMarkingSection(convClassId, request.json['sectionTitle'], request.json['weightInfo'])
return jsonify({'success' : True})
@app.route('/api/deletemarkingsection', methods=['PATCH', 'DELETE'])
def deleteMarkingSection():
"""
Takes in a JSON of the following format
{classId, sectionTitle}
Returns {success : Boolean}
Deletes mark weights and marks for sectionTitle in <classId>
"""
# Validate credentials here
if 'email' not in session or session['email'] is None:
abort(401)
email = mailsane.normalize(session['email'])
if email.error:
abort(400)
if request.json is None:
abort(400)
for x in ['classId', 'sectionTitle']:
if x not in request.json:
abort(400)
convClassId = ObjectId(request.json['classId'])
if not dbworker.validateAccess(dbworker.userTypeMap['admin']) and not dbworker.isClassInstructor(str(email), convClassId):
abort(401)
dbworker.deleteMarkingSection(convClassId, request.json['sectionTitle'])
return jsonify({'success' : True})
@app.route('/api/admin/updatecourseinfo', methods=['POST'])
def changeCourseInfo():
if 'email' not in session or session['email'] is None:
abort(403)
if request.json is None or 'classId' not in request.json or 'status' not in request.json or 'newTitle' not in request.json:
abort(400)
convClassId = ObjectId(request.json['classId'])
json = {'ongoing' : request.json['status'], 'courseTitle' : request.json['newTitle']}
dbworker.updateClassInfo(convClassId, json)
return jsonify({'success' : True})
@app.route('/api/updatecourseinfo', methods=['POST', 'PATCH'])
def updateCourseInfo():
"""
Takes in a JSON of the following format
{classId, status : Boolean, newTitle : String}
Returns {success : Boolean}
Sets the <ongoing> of classId to <status>, and <courseTitle> to <newTitle>
If <semesterInfo> is in request.json, it will update <semester> to <semesterInfo>
"""
# Validate credentials here
if 'email' not in session or session['email'] is None:
abort(403)
email = mailsane.normalize(session['email'])
if email.error:
abort(400)
if request.json is None or 'classId' not in request.json or 'status' not in request.json or 'newTitle' not in request.json:
abort(400)
convClassId = ObjectId(request.json['classId'])
if not dbworker.validateAccess(dbworker.userTypeMap['admin']) and not dbworker.isClassInstructor(str(email), convClassId):
abort(401)
try:
validate(instance=request.json, schema=SchemaFactory.update_CI)
except exceptions.ValidationError:
abort(400)
json = {'ongoing' : request.json['status'], 'courseTitle' : request.json['newTitle']}
if 'semesterInfo' in request.json:
json['semester'] = request.json['semesterInfo']
dbworker.updateClassInfo(convClassId, json)
return jsonify({'success' : True})
@app.route('/api/class/<string:class_id>/marking', methods=['GET'])
def getCriteria(class_id):
if not dbworker.validateAccessList([dbworker.userTypeMap['admin'], dbworker.userTypeMap['instructor']]):
abort(401)
try:
cl = dbworker.getClass(ObjectId(class_id))
if cl is None:
abort(404)
to_return = {"courseTitle": cl['courseTitle'], "markingSections": cl['markingSections']}
to_return['_id'] = str(cl['_id'])
return jsonify({'result' : to_return, 'success' : True})
except bson.errors.InvalidId:
abort(400)
@app.route('/api/getclass', methods=['POST'])
def getClass():
"""
Takes in a JSON of the form {'_id' : String}
Returns all the information for a class including _id stringified
{'result' : None/JSON, 'success' : Boolean}
"""
if request.json is None or '_id' not in request.json:
abort(400)
if not dbworker.validateAccessList([dbworker.userTypeMap['admin'], dbworker.userTypeMap['instructor']]):
abort(401)
try:
validate(instance=request.json, schema=SchemaFactory.get_class)
except exceptions.ValidationError:
abort(400)
try:
cl = dbworker.getClass(ObjectId(request.json['_id']))
if cl is None:
abort(404)
cl['_id'] = str(cl['_id'])
return jsonify({'result' : cl, 'success' : True})
except bson.errors.InvalidId:
abort(400)
@app.route('/api/mymarks/')
def getMyMarks():
"""
Gets a student's marks
If the logged in user is not a student, then it will return a 403
Returned structure is {marks : {}, success : Boolean}
The keys for marks and markingSections will be class _ids
"""
if not dbworker.validateAccess(dbworker.userTypeMap['student']):
abort(403)
email = mailsane.normalize(session['email'])
if email.error:
abort(400)
marks = dbworker.getReports({'studentEmail' : str(email)})
classList = []
marksDict = {}
for m in marks:
# This is to hide the internal report _ids
m.pop('_id', None)
tmpId = m['classId']
m.pop('classId', None) # This has to be done as ObjectIds not serializable
m.pop('studentEmail', None)
classList.append(tmpId)
marksDict[str(tmpId)] = m
markingSections = dbworker.getMarkingSectionInformation(filt={'_id' : {'$in' : classList}})
for cl in classList:
stredCl = str(cl)
tmp = {}
for sectionTitle in markingSections[stredCl]:
tmp[sectionTitle] = {}
tmp[sectionTitle]['weight'] = markingSections[stredCl][sectionTitle]['weight']
tmp[sectionTitle]['index'] = markingSections[stredCl][sectionTitle]['index']
if sectionTitle in marksDict[stredCl]['marks']:
# This is to handle the case where a 'None' mark exists
tmp[sectionTitle]['mark'] = marksDict[stredCl]['marks'][sectionTitle]
else:
tmp[sectionTitle]['mark'] = None
marksDict[stredCl]['marks'] = tmp
return jsonify({'marks' : marksDict, 'success' : True})
@app.route('/api/updatereport', methods=['PUT', 'POST'])
def updateReport():
"""
Takes in a json of the form {'classId' : '123', 'email' : student_email, 'mark' : 90.00, 'comment' : "Great!"}
Returns a "success" json
"""
if request.json is None:
abort(400)
try:
validate(instance=request.json, schema=SchemaFactory.report_update)
except exceptions.ValidationError:
abort(400)
if not dbworker.validateAccessList([dbworker.userTypeMap['admin'], dbworker.userTypeMap['instructor']]):
abort(403)
studentEmail = mailsane.normalize(request.json['email'])
if studentEmail.error:
abort(400)
convClassId = ObjectId(request.json['classId'])
dbworker.updateReport(convClassId,
str(studentEmail),
mark={} if 'mark' not in request.json else request.json['mark'],
comments='' if 'comments' not in request.json else request.json['comments'],
nextCourse='' if 'nextCourse' not in request.json else request.json['nextCourse'])
return jsonify({'success': True})
@app.route('/api/checkemail')
def checkEmail():
"""
Takes in a json of the form {'email' : email_address}
Returns a json of the form {'message' : error_message, 'valid' : Boolean}
'message' will refer to the specific reason an email address is invalid
"""
if request.json is None or 'email' not in request.json:
abort(400)
# Use the verification library to check that it is a valid email
address = mailsane.normalize(request.json['email'])
if address.error:
return jsonify({'message' : str(address), 'valid' : False})
if dbworker.getUser(str(address)) is None:
return jsonify({'message' : 'Email address not found', 'valid' : False})
return jsonify({'message' : None, 'valid' : True})
@app.route('/api/loghours', methods=['POST', 'PUT'])
def logHours():
# request.json['hours'] is currently a string that gets converted server side
valid_access = [dbworker.userTypeMap['admin'], dbworker.userTypeMap['instructor'], dbworker.userTypeMap['volunteer']]
if not dbworker.validateAccessList(valid_access):
abort(403)
if request.json is None:
abort(400)
for x in ['email', 'purpose', 'paid', 'hours']:
if x not in request.json:
abort(400)
email = mailsane.normalize(request.json['email'])
if email.error:
abort(400)
hours = 0
try:
# Handle conversion from a string to a float
hours = float(request.json['hours'])
except:
abort(400)
if hours <= 0:
abort(400)
date = datetime.datetime.now()
dbworker.addHoursLog(str(email), request.json['purpose'], request.json['paid'], date, hours)
return jsonify({'dateTime': date})
@app.route('/api/admin/genhours', methods=['POST', 'PUT'])
def genHours():
# request.json['hours'] is currently a string that gets converted server side
valid_access = [dbworker.userTypeMap['admin'], dbworker.userTypeMap['instructor'], dbworker.userTypeMap['volunteer']]
if not dbworker.validateAccessList(valid_access):
abort(403)
if request.json is None:
abort(400)
for x in ['purpose', 'paid', 'hours', 'dateTime']:
if x not in request.json:
abort(400)
correctedTime = datetime.datetime.strptime(request.json['dateTime'], "%Y-%m-%dT%H:%M:%S.%fZ")
email = session['email']
if 'email' in request.json:
email = mailsane.normalize(request.json['email'])
if email.error:
abort(400)
hours = 0
try:
# Handle conversion from a string to a float
hours = float(request.json['hours'])
except:
abort(400)
if hours <= 0:
abort(400)
dbworker.addHoursLog(str(email), request.json['purpose'], request.json['paid'], correctedTime, hours)
return jsonify({'success' : True})
@app.route('/api/admin/edithours', methods=['PATCH'])
def editHours():
"""
Takes in a json of the form
{'currentId' : id of hour log as string, 'newAttributes' : {...}}
It can change any attribute that is not the _id
"""
if not dbworker.validateAccess(dbworker.userTypeMap['admin']):
abort(403)
if request.json is None or 'currentId' not in request.json or 'newAttributes' not in request.json:
abort(400)
convClassId = ObjectId(request.json['currentId'])
# Validate that all the changes made are valid
# ie. ban changes to any invalid attributes
if request.json['newAttributes'] == {} or '_id' in request.json['newAttributes']:
# No changes requested or an attempt was made to change the _id
abort(400)
try:
validate(instance=request.json, schema=SchemaFactory.edit_hours)
except exceptions.ValidationError:
abort(400)
if 'dateTime' in request.json['newAttributes']:
# Convert dateTime from string to datetime object
# See https://stackoverflow.com/questions/969285/how-do-i-translate-an-iso-8601-datetime-string-into-a-python-datetime-object
correctedTime = None
try:
correctedTime = datetime.datetime.strptime(request.json['newAttributes']['dateTime'], "%Y-%m-%dT%H:%M:%S.%fZ")
except:
abort(400)
correctedDict = {}
for x in request.json['newAttributes']:
if x == 'dateTime':
correctedDict['dateTime'] = correctedTime
else:
correctedDict[x] = request.json['newAttributes'][x]
dbworker.editHour(convClassId, correctedDict)
else:
dbworker.editHour(convClassId, request.json['newAttributes'])
return jsonify({'success' : True})
@app.route('/api/admin/deletehour', methods=['POST', 'DELETE'])
def deleteHour():
"""
Takes in a json of the form
{'id' : id of hour log as string}
Deletes the hour associated with id
Aborts with a 409 in the event that it failed to work in the database
"""
if not dbworker.validateAccess(dbworker.userTypeMap['admin']):
abort(403)
if request.json is None or 'id' not in request.json:
abort(400)
convClassId = ObjectId(request.json['id'])
res = dbworker.deleteHour(convClassId)
if not res:
# Failure
abort(409)
return jsonify({'success' : True})
@app.route('/api/gethours/', methods=['GET'])
@app.route('/api/hours/', methods=['GET'])
def getHours():
if not dbworker.validateAccessList([dbworker.userTypeMap['admin'],
dbworker.userTypeMap['instructor'],
dbworker.userTypeMap['volunteer']]):
abort(403)
pre_email = request.args.get('user', default=None, type=str)
email = None
if pre_email is None:
email = session.get('email')
if email is None:
abort(500)
else:
if not dbworker.validateAccessList([dbworker.userTypeMap['admin']]):
abort(403)
email = mailsane.normalize(pre_email)
if email.error:
abort(400)
hours = dbworker.getHours(filt={"email": str(email)}, projection={'_id' : 1, 'dateTime' : 1, 'purpose': 1, 'hours' : 1, 'paid' : 1})
hours_list = []
for doc in hours:
doc['_id'] = str(doc['_id'])
hours_list.append(doc)
return jsonify({"hours": hours_list})
@app.route('/api/report/', methods=['POST'])
def getReport():
"""
Return a PDF containing all worked/volunteer hours
"""
report_params = request.json
if report_params is None:
abort(400)
if 'email' not in report_params:
report_params['email'] = session['email']
try:
validate(instance=report_params, schema=SchemaFactory.report_hours)
except exceptions.ValidationError:
abort(400)
email = mailsane.normalize(report_params['email'])
if email.error:
abort(400)
if not dbworker.validateAccessList([dbworker.userTypeMap['admin']]) and str(email) != session['email']:
# Allows admins to see everyones reports, users to see their own
abort(403)
paid_hrs = None
filt = {"email": str(email)}
proj = {'_id': 0, 'hours': 1}
if 'paid' in request.json:
filt['paid'] = True if request.json['paid'] else False
paid_hrs = False if request.json['paid'] == 0 else True
# Convert date ranges into datetime objects and insert into filter
# Note: to enforce a specific date/time pattern you can also use strptime method:
# datetime.datetime.strptime(request.json['startRange'], '%Y-%m-%d') (complete pattern: "%Y-%m-%dT%H:%M:%S.%fZ")
if 'startRange' in report_params and 'endRange' in report_params:
start_time_stamp = parse(report_params['startRange'])
end_time_stamp = parse(report_params['endRange'])
filt["dateTime"] = {'$gte': start_time_stamp, '$lte': end_time_stamp}
elif 'startRange' in report_params:
start_time_stamp = parse(report_params['startRange'])
filt["dateTime"] = {'$gte': start_time_stamp}
elif 'endRange' in report_params:
end_time_stamp = parse(report_params['endRange'])
filt["dateTime"] = {'$lte': end_time_stamp}
hours = dbworker.getHours(filt=filt, projection=proj)
hours_list = []
for doc in hours:
hours_list.append(float(doc["hours"]))
file_name = reportgen.hours(email, hours_list, paid_hrs)
# Once generated, report PDFs are currently stored in the 'app' folder of docker container
resp_file = send_file(file_name, attachment_filename=file_name)
if os.path.exists("app/" + file_name):
os.remove("app/" + file_name)
return resp_file
abort(500)
@app.route('/api/report/<string:class_id>/<string:email>', methods=['GET'])
def getStudentReport(class_id, email):
"""
Return a report for a student for a specific class.
Expected json is {"email": <EMAIL>, "classId":"5e5ab2f6e7179a5e7ee4e81b"}
"""
# try:
# validate(instance={"email":email, "classId":class_id}, schema=SchemaFactory.report_student)
# except exceptions.ValidationError:
# abort(400)
email = mailsane.normalize(email)
if email.error:
abort(400)
if not dbworker.validateAccessList([dbworker.userTypeMap['admin'], dbworker.userTypeMap['instructor']]):
abort(403)
# Must first convert classId string in to a ObjectId before executing query
convClassId = ObjectId(class_id)
# Verify: 'email' is an existing user in DB and 'convClassId' is the idea of an existing class
us = dbworker.getUser(str(email))
cl = dbworker.getClass(convClassId)
if us is None or cl is None:
abort(404)
if us['userType'] != dbworker.userTypeMap['student']:
abort(400)
filt = {"classId": convClassId, "studentEmail": str(email)}
proj = {'_id': 0}
report = dbworker.getStudentReport(filt=filt, proj=proj)
if report is None:
abort(400)
# Must convert ObjectId 'classId' into a string before responding
report['classId'] = str(report['classId'])
return jsonify({"report": report})
@app.route('/api/admin/getusers')
def getUsers():
"""
Returns a json of the form {'result' : list of users with emails, first and last names, 'success' : True}
"""
if not dbworker.validateAccessList([dbworker.userTypeMap['admin'], dbworker.userTypeMap['instructor']]):
abort(403)
uList = dbworker.getUsers(projection={'_id' : 0, 'email' : 1, 'firstName': 1, 'lastName' : 1, 'userType': 1})
fixedList = []
for x in uList:
fixedList.append(x)
return jsonify({'result' : fixedList, 'success' : True})
@app.route('/api/getuser', methods=['POST'])
@app.route('/api/admin/getuser', methods=['POST'])
def getUser():
"""
Takes in a JSON of {'email'}
Returns {'result' : {user information, no id or password}, 'success' : True}
This method is not just usable by admins, but by instructors
"""
if dbworker.validateAccessList([dbworker.userTypeMap['admin'], dbworker.userTypeMap['instructor']]):
pass
else:
abort(403)
if request.json is None or 'email' not in request.json:
abort(400)
email = mailsane.normalize(request.json['email'])
if email.error:
abort(400)
u = dbworker.getUser(str(email))
if u is None:
abort(405)
u.pop('password')
u.pop('_id')
now = datetime.datetime.now()
bday = now
if 'birthday' in u:
bday = u['birthday']
delta = now - bday
age = int(delta.total_seconds() / (31536000))
u['age'] = age
return jsonify({'result' : u, 'success' : True})
@app.route('/api/admin/edituser', methods=['PATCH'])
def editUser():
"""
Takes in a json of the form
{'currentEmail' : email, 'newAttributes' : {...}}
It can change any attribute that is not the email
"""
sys.stderr.write(str(request.json) + '\n')
if not dbworker.validateAccess(dbworker.userTypeMap['admin']):
abort(403)
if request.json is None or 'currentEmail' not in request.json or 'newAttributes' not in request.json:
abort(400)
email = mailsane.normalize(request.json['currentEmail'])
if email.error:
abort(400)
if dbworker.getUser(str(email)) is None:
abort(404)
if request.json['newAttributes'] == {} or 'email' in request.json['newAttributes'] or '_id' in request.json['newAttributes']:
# No changes requested or an attempt was made to change the email or _id
abort(400)
# Validate that all the changes made are valid
# ie. ban changes to any invalid attributes
try:
validate(instance=request.json, schema=SchemaFactory.edit_user)
except exceptions.ValidationError:
abort(400)
if 'birthday' in request.json['newAttributes'] or 'password' in request.json['newAttributes']:
# Convert birthday from string to datetime object
# See https://stackoverflow.com/questions/969285/how-do-i-translate-an-iso-8601-datetime-string-into-a-python-datetime-object
correctedTime = None
try:
if 'birthday' in request.json['newAttributes']:
correctedTime = datetime.datetime.strptime(request.json['newAttributes']['birthday'], "%Y-%m-%dT%H:%M:%S.%fZ")
except:
abort(400)
correctedDict = {}
for x in request.json['newAttributes']:
if x == 'birthday':
correctedDict['birthday'] = correctedTime
elif x == 'password':
dbworker.setPassword(str(email), request.json['newAttributes']['password'])
else:
correctedDict[x] = request.json['newAttributes'][x]
dbworker.editUser(str(email), correctedDict)
else:
dbworker.editUser(str(email), request.json['newAttributes'])
return jsonify({'success' : True})
@app.route('/api/admin/createcourse', methods=['POST'])
def createCourse():
"""
Takes in a JSON of {'courseTitle'}
Returns {'_id' : newId (String), 'success' : True}
"""
if not dbworker.validateAccess(dbworker.userTypeMap['admin']):
abort(403)
if request.json is None or 'courseTitle' not in request.json:
abort(400)
semester = ""
if 'semester' in request.json:
semester = request.json['semester']
val = dbworker.createClass(request.json['courseTitle'], [], [], [], semester)
return jsonify({'success' : True})
@app.route('/api/admin/addstudent', methods=['POST'])
def addStudent():
"""
Takes in a JSON of the structure {'email', 'classId'}
Adds <email> to <classId> as a student
Returns {'success' : Boolean}
"""
if not dbworker.validateAccess(dbworker.userTypeMap['admin']):
abort(403)
if request.json is None or 'email' not in request.json or 'classId' not in request.json:
abort(400)
email = mailsane.normalize(request.json['email'])
if email.error:
abort(400)
convClassId = ObjectId(request.json['classId'])
try:
validate(instance=request.json, schema=SchemaFactory.move_user)
except exceptions.ValidationError:
abort(400)
us = dbworker.getUser(str(email))
cl = dbworker.getClass(convClassId)
if us is None or cl is None:
abort(404)
if us['userType'] != dbworker.userTypeMap['student']:
abort(400)
return jsonify({'success' : dbworker.addStudent(convClassId, str(email))})
@app.route('/api/admin/addinstructor', methods=['POST'])
def addInstructor():
"""
Takes in a JSON of the structure {'email', 'classId'}
Adds <email> to <classId> as an instructor
Returns {'success' : Boolean}
"""
if not dbworker.validateAccess(dbworker.userTypeMap['admin']):
abort(403)
if request.json is None or 'email' not in request.json or 'classId' not in request.json:
abort(400)
email = mailsane.normalize(request.json['email'])
if email.error:
abort(400)
convClassId = ObjectId(request.json['classId'])
try:
validate(instance=request.json, schema=SchemaFactory.move_user)
except exceptions.ValidationError:
abort(400)
us = dbworker.getUser(str(email))
cl = dbworker.getClass(convClassId)
if us is None or cl is None:
abort(404)
if us['userType'] not in [dbworker.userTypeMap['admin'], dbworker.userTypeMap['instructor'], dbworker.userTypeMap['volunteer']]:
abort(400)
return jsonify({'success' : dbworker.addInstructor(convClassId, str(email))})
@app.route('/api/admin/removeinstructor', methods=['POST', 'DELETE'])
def removeInstructor():
"""
Takes in a JSON of the structure {'email', 'classId'}
Removes <email> from <classId> as an instructor
Returns {'success' : Boolean}
"""
if not dbworker.validateAccess(dbworker.userTypeMap['admin']):
abort(403)
if request.json is None or 'email' not in request.json or 'classId' not in request.json:
abort(400)
email = mailsane.normalize(request.json['email'])
if email.error:
abort(400)
convClassId = ObjectId(request.json['classId'])
try:
validate(instance=request.json, schema=SchemaFactory.move_user)
except exceptions.ValidationError:
abort(400)
us = dbworker.getUser(str(email))
cl = dbworker.getClass(convClassId)
if us is None or cl is None:
abort(404)
if us['userType'] not in [dbworker.userTypeMap['admin'], dbworker.userTypeMap['instructor'], dbworker.userTypeMap['volunteer']]:
abort(400)
return jsonify({'success' : dbworker.removeInstructor(convClassId, str(email))})
@app.route('/api/admin/removestudent', methods=['POST', 'DELETE'])
def removeStudent():
"""
Takes in a JSON of the structure {'email', 'classId'}
Removes <email> from <classId> as a student
Returns {'success' : Boolean}
"""
if not dbworker.validateAccess(dbworker.userTypeMap['admin']):
abort(403)
if request.json is None or 'email' not in request.json or 'classId' not in request.json:
abort(400)
email = mailsane.normalize(request.json['email'])
if email.error:
abort(400)
convClassId = ObjectId(request.json['classId'])
try:
validate(instance=request.json, schema=SchemaFactory.move_user)
except exceptions.ValidationError:
abort(400)
us = dbworker.getUser(str(email))
cl = dbworker.getClass(convClassId)
if us is None or cl is None:
abort(404)
if us['userType'] not in [dbworker.userTypeMap['student']]:
abort(400)
return jsonify({'success' : dbworker.removeStudent(convClassId, str(email))})
@app.route('/api/admin/addvolunteer', methods=['POST'])
def addVolunteer():
"""
Takes in a JSON of the structure {'email', 'classId'}
Adds <email> to <classId> as a volunteer
Returns {'success' : Boolean}
"""
if not dbworker.validateAccess(dbworker.userTypeMap['admin']):
abort(403)
if request.json is None or 'email' not in request.json or 'classId' not in request.json:
abort(400)
email = mailsane.normalize(request.json['email'])
if email.error:
abort(400)
convClassId = ObjectId(request.json['classId'])
try:
validate(instance=request.json, schema=SchemaFactory.move_user)
except exceptions.ValidationError:
abort(400)
us = dbworker.getUser(str(email))
cl = dbworker.getClass(convClassId)
if us is None or cl is None:
abort(404)
if us['userType'] not in [dbworker.userTypeMap['admin'], dbworker.userTypeMap['instructor'], dbworker.userTypeMap['volunteer']]:
# Allow non volunteers to volunteer
abort(400)
return jsonify({'success' : dbworker.addVolunteer(convClassId, str(email))})
@app.route('/api/admin/removevolunteer', methods=['POST', 'DELETE'])
def removeVolunteer():
"""
Takes in a JSON of the structure {'email', 'classId'}
Removes <email> from <classId> as a volunteer
Returns {'success' : Boolean}
"""
if not dbworker.validateAccess(dbworker.userTypeMap['admin']):
abort(403)
if request.json is None or 'email' not in request.json or 'classId' not in request.json:
abort(400)
email = mailsane.normalize(request.json['email'])
if email.error:
abort(400)
convClassId = ObjectId(request.json['classId'])
try:
validate(instance=request.json, schema=SchemaFactory.move_user)
except exceptions.ValidationError:
abort(400)
us = dbworker.getUser(str(email))
cl = dbworker.getClass(convClassId)
if us is None or cl is None:
abort(404)
if us['userType'] not in [dbworker.userTypeMap['admin'], dbworker.userTypeMap['instructor'], dbworker.userTypeMap['volunteer']]:
# Allow non volunteers to be volunteers
abort(400)
return jsonify({'success' : dbworker.removeVolunteer(convClassId, str(email))})
@app.route('/api/admin/createuser', methods=['POST'])
def createUser():
"""
Takes in a JSON of the structure
{
"email": "<EMAIL>",
"password": "<PASSWORD>",
"userType": 1,
"firstName": "Test",
"lastName": "Admin",
"phoneNumber": "555-555-5555",
"birthday": "YYYY-MM-DD",
"parentEmail" : "",
"parentName" : ""
}
Returns {'success' : Boolean}
"""
if not dbworker.validateAccess(dbworker.userTypeMap['admin']):
abort(403)
if request.json is None:
abort(400)
for x in ['email', 'password', 'userType', 'firstName', 'lastName', 'phoneNumber', 'birthday', 'parentEmail', 'parentName']:
if x not in request.json:
abort(400)
email = mailsane.normalize(request.json['email'])
if email.error:
abort(400)
# Verify no duplicate email here or in the dbworker method
# likely better to do it there
parentEmail = mailsane.normalize(request.json['parentEmail'])
if parentEmail.error:
abort(400)
try:
validate(instance=request.json, schema=SchemaFactory.create_user)
except exceptions.ValidationError:
abort(400)
if dbworker.getUser(str(email)) is not None:
abort(400)
dbworker.createUser(str(email), str(parentEmail), request.json['firstName'], request.json['lastName'], request.json['password'], request.json['userType'], request.json['phoneNumber'], datetime.datetime.strptime(request.json['birthday'], '%Y-%m-%d'), request.json['parentName'])
return jsonify({'success' : True})
@app.route('/api/admin/uploadSpreadSheet', methods=['POST'])
def handleSpreadSheet():
if not dbworker.validateAccess(dbworker.userTypeMap['admin']):
abort(403)
if request.files is None or 'file' not in request.files:
abort(400)
sheetFile = request.files['file']
try:
sheetHandler = spreadSheetHandler.SheetHandler(sheetFile)
failures = sheetHandler.assignSpreadSheetUsers()
return jsonify(failures)
except XLRDError as e:
abort(400)
# This may be a debug route, not sure, made by Steffy
@app.route('/api/getClasses/<email>', methods=['GET'])
@app.route('/getClasses/<email>', methods=['GET'])
def getUserClasses(email):
if not dbworker.validateAccessList([dbworker.userTypeMap['admin'], dbworker.userTypeMap['instructor'], dbworker.userTypeMap['student']]):
abort(403)
email = mailsane.normalize(email)
if email.error:
abort(400)
classes = {'instructor': [], 'student': []}
for i in dbworker.mclient[dbworker.database]['classes'].find({'instructors': str(email)}):
tmp_id = i['_id']
classes['instructor'].append({"id": str(tmp_id), "name": i['courseTitle'], "ongoing": i['ongoing']})
for j in dbworker.mclient[dbworker.database]['classes'].find({"students": str(email)}):
tmp_id = j['_id']
classes['student'].append({"id": str(tmp_id), "name": j['courseTitle'], "ongoing": j['ongoing']})
return jsonify(classes)
# Debug routes are below, do not rely on these for any expected behaviour
@app.route('/salt')
def getASalt():
if not ENABLE_DEBUG_ROUTES:
abort(404)
return str(bcrypt.gensalt())
@app.route('/forcelogin/<int:userid>')
def forcelogin(userid):
# Used to test how the sessions work
if not ENABLE_DEBUG_ROUTES:
abort(404)
userid = str(userid)
session['email'] = userid
return redirect(url_for('index',_external=True,_scheme='https'))
@app.route('/checklogin')
def checklogin():
if not ENABLE_DEBUG_ROUTES:
abort(404)
if 'email' in session:
return "Logged in as " + session['email']
return "Not logged in"
@app.route('/addjunk')
def addjunk():
if not ENABLE_DEBUG_ROUTES:
abort(404)
dbworker.mclient[dbworker.database]['junk'].insert_one({"datetime" : datetime.datetime.now()})
return "Junk added"
@app.route('/seejunk')
def seejunk():
if not ENABLE_DEBUG_ROUTES:
abort(404)
outString = ""
for j in dbworker.mclient[dbworker.database]['junk'].find():
outString += str(j) + " "
return outString
@app.route('/clearjunk')
def clearjunk():
if not ENABLE_DEBUG_ROUTES:
abort(404)
dbworker.mclient[dbworker.database]['junk'].remove()
return "Cleared!"
@app.route('/addsampleuser/<username>')
def addSampleUser(username):
if not ENABLE_DEBUG_ROUTES:
abort(404)
email = mailsane.normalize(username + '@mcode.club')
if email.error:
abort(400)
if dbworker.getUser(str(username + '@mcode.club')) is not None:
abort(400)
dbworker.createUser(str(email), str(email), 'Sample', 'User', 'I love rock and roll', 1, '647-111-1111', datetime.datetime.strptime('1970-01-01', '%Y-%m-%d'), 'Parent Name')
return username
@app.route('/showusers')
def showAllUsersDebug():
if not ENABLE_DEBUG_ROUTES:
abort(404)
outString = ""
for j in dbworker.mclient[dbworker.database]['users'].find():
outString += str(j) + " "
return outString
@app.route('/dumpsession')
def dumpSession():
# Dump the session variables that are stored in the cookie
if not ENABLE_DEBUG_ROUTES:
abort(404)
return jsonify({'sessionVars' : str(session)})
@app.route('/fixreports')
def fixReportIssues():
# Fix missing reports
if not ENABLE_DEBUG_ROUTES:
abort(404)
return jsonify({'result' : dbworker.addMissingEmptyReports()})
@app.route('/deleteorphans')
def deleteOrphansDebug():
# Delete orphans (orphaned by class, not by user)
if not ENABLE_DEBUG_ROUTES:
abort(404)
dbworker.clearOrphanedReports()
return jsonify({'success' : True})
@app.route('/testFile', methods=['POST'])
def handleSPreadSheetDebug():
if request.files is None or 'file' not in request.files:
abort(400)
sheetFile = request.files['file']
try:
sheetHandler = spreadSheetHandler.SheetHandler(sheetFile)
failures = sheetHandler.assignSpreadSheetUsers()
return jsonify(failures)
except XLRDError as e:
abort(400)
# This blocks off routes like /a/.../.../.........
# This is used to allow the React app to have routes that won't throw a 404
@app.route('/a')
@app.route('/a/')
@app.route('/a/<path:path>')
@app.route('/s')
@app.route('/s/')
@app.route('/s/<path:path>')
@app.route('/t')
@app.route('/t/')
@app.route('/t/<path:path>')
@app.route('/v')
@app.route('/v/')
@app.route('/v/<path:path>')
@app.route('/')
def index(path='/'):
return app.send_static_file('index.html')
if __name__ == "__main__":
# Only for debugging while developing
app.run(host='0.0.0.0', debug=False, port=os.environ.get('PORT', 80))
| StarcoderdataPython |
3225915 | <reponame>pulumi/pulumi-alicloud
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['IndustrialPidLoopArgs', 'IndustrialPidLoop']
@pulumi.input_type
class IndustrialPidLoopArgs:
def __init__(__self__, *,
pid_loop_configuration: pulumi.Input[str],
pid_loop_dcs_type: pulumi.Input[str],
pid_loop_is_crucial: pulumi.Input[bool],
pid_loop_name: pulumi.Input[str],
pid_loop_type: pulumi.Input[str],
pid_project_id: pulumi.Input[str],
pid_loop_desc: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a IndustrialPidLoop resource.
:param pulumi.Input[str] pid_loop_configuration: The Pid Loop Configuration.
:param pulumi.Input[str] pid_loop_dcs_type: The dcs type of Pid Loop. Valid values: `standard`.
:param pulumi.Input[bool] pid_loop_is_crucial: Whether is crucial Pid Loop.
:param pulumi.Input[str] pid_loop_name: The name of Pid Loop.
:param pulumi.Input[str] pid_loop_type: The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
:param pulumi.Input[str] pid_project_id: The pid project id.
:param pulumi.Input[str] pid_loop_desc: The desc of Pid Loop.
"""
pulumi.set(__self__, "pid_loop_configuration", pid_loop_configuration)
pulumi.set(__self__, "pid_loop_dcs_type", pid_loop_dcs_type)
pulumi.set(__self__, "pid_loop_is_crucial", pid_loop_is_crucial)
pulumi.set(__self__, "pid_loop_name", pid_loop_name)
pulumi.set(__self__, "pid_loop_type", pid_loop_type)
pulumi.set(__self__, "pid_project_id", pid_project_id)
if pid_loop_desc is not None:
pulumi.set(__self__, "pid_loop_desc", pid_loop_desc)
@property
@pulumi.getter(name="pidLoopConfiguration")
def pid_loop_configuration(self) -> pulumi.Input[str]:
"""
The Pid Loop Configuration.
"""
return pulumi.get(self, "pid_loop_configuration")
@pid_loop_configuration.setter
def pid_loop_configuration(self, value: pulumi.Input[str]):
pulumi.set(self, "pid_loop_configuration", value)
@property
@pulumi.getter(name="pidLoopDcsType")
def pid_loop_dcs_type(self) -> pulumi.Input[str]:
"""
The dcs type of Pid Loop. Valid values: `standard`.
"""
return pulumi.get(self, "pid_loop_dcs_type")
@pid_loop_dcs_type.setter
def pid_loop_dcs_type(self, value: pulumi.Input[str]):
pulumi.set(self, "pid_loop_dcs_type", value)
@property
@pulumi.getter(name="pidLoopIsCrucial")
def pid_loop_is_crucial(self) -> pulumi.Input[bool]:
"""
Whether is crucial Pid Loop.
"""
return pulumi.get(self, "pid_loop_is_crucial")
@pid_loop_is_crucial.setter
def pid_loop_is_crucial(self, value: pulumi.Input[bool]):
pulumi.set(self, "pid_loop_is_crucial", value)
@property
@pulumi.getter(name="pidLoopName")
def pid_loop_name(self) -> pulumi.Input[str]:
"""
The name of Pid Loop.
"""
return pulumi.get(self, "pid_loop_name")
@pid_loop_name.setter
def pid_loop_name(self, value: pulumi.Input[str]):
pulumi.set(self, "pid_loop_name", value)
@property
@pulumi.getter(name="pidLoopType")
def pid_loop_type(self) -> pulumi.Input[str]:
"""
The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
"""
return pulumi.get(self, "pid_loop_type")
@pid_loop_type.setter
def pid_loop_type(self, value: pulumi.Input[str]):
pulumi.set(self, "pid_loop_type", value)
@property
@pulumi.getter(name="pidProjectId")
def pid_project_id(self) -> pulumi.Input[str]:
"""
The pid project id.
"""
return pulumi.get(self, "pid_project_id")
@pid_project_id.setter
def pid_project_id(self, value: pulumi.Input[str]):
pulumi.set(self, "pid_project_id", value)
@property
@pulumi.getter(name="pidLoopDesc")
def pid_loop_desc(self) -> Optional[pulumi.Input[str]]:
"""
The desc of Pid Loop.
"""
return pulumi.get(self, "pid_loop_desc")
@pid_loop_desc.setter
def pid_loop_desc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_loop_desc", value)
@pulumi.input_type
class _IndustrialPidLoopState:
def __init__(__self__, *,
pid_loop_configuration: Optional[pulumi.Input[str]] = None,
pid_loop_dcs_type: Optional[pulumi.Input[str]] = None,
pid_loop_desc: Optional[pulumi.Input[str]] = None,
pid_loop_is_crucial: Optional[pulumi.Input[bool]] = None,
pid_loop_name: Optional[pulumi.Input[str]] = None,
pid_loop_type: Optional[pulumi.Input[str]] = None,
pid_project_id: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering IndustrialPidLoop resources.
:param pulumi.Input[str] pid_loop_configuration: The Pid Loop Configuration.
:param pulumi.Input[str] pid_loop_dcs_type: The dcs type of Pid Loop. Valid values: `standard`.
:param pulumi.Input[str] pid_loop_desc: The desc of Pid Loop.
:param pulumi.Input[bool] pid_loop_is_crucial: Whether is crucial Pid Loop.
:param pulumi.Input[str] pid_loop_name: The name of Pid Loop.
:param pulumi.Input[str] pid_loop_type: The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
:param pulumi.Input[str] pid_project_id: The pid project id.
:param pulumi.Input[str] status: The status of Pid Loop.
"""
if pid_loop_configuration is not None:
pulumi.set(__self__, "pid_loop_configuration", pid_loop_configuration)
if pid_loop_dcs_type is not None:
pulumi.set(__self__, "pid_loop_dcs_type", pid_loop_dcs_type)
if pid_loop_desc is not None:
pulumi.set(__self__, "pid_loop_desc", pid_loop_desc)
if pid_loop_is_crucial is not None:
pulumi.set(__self__, "pid_loop_is_crucial", pid_loop_is_crucial)
if pid_loop_name is not None:
pulumi.set(__self__, "pid_loop_name", pid_loop_name)
if pid_loop_type is not None:
pulumi.set(__self__, "pid_loop_type", pid_loop_type)
if pid_project_id is not None:
pulumi.set(__self__, "pid_project_id", pid_project_id)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="pidLoopConfiguration")
def pid_loop_configuration(self) -> Optional[pulumi.Input[str]]:
"""
The Pid Loop Configuration.
"""
return pulumi.get(self, "pid_loop_configuration")
@pid_loop_configuration.setter
def pid_loop_configuration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_loop_configuration", value)
@property
@pulumi.getter(name="pidLoopDcsType")
def pid_loop_dcs_type(self) -> Optional[pulumi.Input[str]]:
"""
The dcs type of Pid Loop. Valid values: `standard`.
"""
return pulumi.get(self, "pid_loop_dcs_type")
@pid_loop_dcs_type.setter
def pid_loop_dcs_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_loop_dcs_type", value)
@property
@pulumi.getter(name="pidLoopDesc")
def pid_loop_desc(self) -> Optional[pulumi.Input[str]]:
"""
The desc of Pid Loop.
"""
return pulumi.get(self, "pid_loop_desc")
@pid_loop_desc.setter
def pid_loop_desc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_loop_desc", value)
@property
@pulumi.getter(name="pidLoopIsCrucial")
def pid_loop_is_crucial(self) -> Optional[pulumi.Input[bool]]:
"""
Whether is crucial Pid Loop.
"""
return pulumi.get(self, "pid_loop_is_crucial")
@pid_loop_is_crucial.setter
def pid_loop_is_crucial(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "pid_loop_is_crucial", value)
@property
@pulumi.getter(name="pidLoopName")
def pid_loop_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of Pid Loop.
"""
return pulumi.get(self, "pid_loop_name")
@pid_loop_name.setter
def pid_loop_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_loop_name", value)
@property
@pulumi.getter(name="pidLoopType")
def pid_loop_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
"""
return pulumi.get(self, "pid_loop_type")
@pid_loop_type.setter
def pid_loop_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_loop_type", value)
@property
@pulumi.getter(name="pidProjectId")
def pid_project_id(self) -> Optional[pulumi.Input[str]]:
"""
The pid project id.
"""
return pulumi.get(self, "pid_project_id")
@pid_project_id.setter
def pid_project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_project_id", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of Pid Loop.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
class IndustrialPidLoop(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
pid_loop_configuration: Optional[pulumi.Input[str]] = None,
pid_loop_dcs_type: Optional[pulumi.Input[str]] = None,
pid_loop_desc: Optional[pulumi.Input[str]] = None,
pid_loop_is_crucial: Optional[pulumi.Input[bool]] = None,
pid_loop_name: Optional[pulumi.Input[str]] = None,
pid_loop_type: Optional[pulumi.Input[str]] = None,
pid_project_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Brain Industrial Pid Loop resource.
> **NOTE:** Available in v1.117.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.brain.IndustrialPidLoop("example",
pid_loop_configuration="YourLoopConfiguration",
pid_loop_dcs_type="standard",
pid_loop_is_crucial=True,
pid_loop_name="tf-testAcc",
pid_loop_type="0",
pid_project_id="856c6b8f-ca63-40a4-xxxx-xxxx")
```
## Import
Brain Industrial Pid Loop can be imported using the id, e.g.
```sh
$ pulumi import alicloud:brain/industrialPidLoop:IndustrialPidLoop example <id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] pid_loop_configuration: The Pid Loop Configuration.
:param pulumi.Input[str] pid_loop_dcs_type: The dcs type of Pid Loop. Valid values: `standard`.
:param pulumi.Input[str] pid_loop_desc: The desc of Pid Loop.
:param pulumi.Input[bool] pid_loop_is_crucial: Whether is crucial Pid Loop.
:param pulumi.Input[str] pid_loop_name: The name of Pid Loop.
:param pulumi.Input[str] pid_loop_type: The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
:param pulumi.Input[str] pid_project_id: The pid project id.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IndustrialPidLoopArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Brain Industrial Pid Loop resource.
> **NOTE:** Available in v1.117.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.brain.IndustrialPidLoop("example",
pid_loop_configuration="YourLoopConfiguration",
pid_loop_dcs_type="standard",
pid_loop_is_crucial=True,
pid_loop_name="tf-testAcc",
pid_loop_type="0",
pid_project_id="856c6b8f-ca63-40a4-xxxx-xxxx")
```
## Import
Brain Industrial Pid Loop can be imported using the id, e.g.
```sh
$ pulumi import alicloud:brain/industrialPidLoop:IndustrialPidLoop example <id>
```
:param str resource_name: The name of the resource.
:param IndustrialPidLoopArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IndustrialPidLoopArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
pid_loop_configuration: Optional[pulumi.Input[str]] = None,
pid_loop_dcs_type: Optional[pulumi.Input[str]] = None,
pid_loop_desc: Optional[pulumi.Input[str]] = None,
pid_loop_is_crucial: Optional[pulumi.Input[bool]] = None,
pid_loop_name: Optional[pulumi.Input[str]] = None,
pid_loop_type: Optional[pulumi.Input[str]] = None,
pid_project_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IndustrialPidLoopArgs.__new__(IndustrialPidLoopArgs)
if pid_loop_configuration is None and not opts.urn:
raise TypeError("Missing required property 'pid_loop_configuration'")
__props__.__dict__["pid_loop_configuration"] = pid_loop_configuration
if pid_loop_dcs_type is None and not opts.urn:
raise TypeError("Missing required property 'pid_loop_dcs_type'")
__props__.__dict__["pid_loop_dcs_type"] = pid_loop_dcs_type
__props__.__dict__["pid_loop_desc"] = pid_loop_desc
if pid_loop_is_crucial is None and not opts.urn:
raise TypeError("Missing required property 'pid_loop_is_crucial'")
__props__.__dict__["pid_loop_is_crucial"] = pid_loop_is_crucial
if pid_loop_name is None and not opts.urn:
raise TypeError("Missing required property 'pid_loop_name'")
__props__.__dict__["pid_loop_name"] = pid_loop_name
if pid_loop_type is None and not opts.urn:
raise TypeError("Missing required property 'pid_loop_type'")
__props__.__dict__["pid_loop_type"] = pid_loop_type
if pid_project_id is None and not opts.urn:
raise TypeError("Missing required property 'pid_project_id'")
__props__.__dict__["pid_project_id"] = pid_project_id
__props__.__dict__["status"] = None
super(IndustrialPidLoop, __self__).__init__(
'alicloud:brain/industrialPidLoop:IndustrialPidLoop',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
pid_loop_configuration: Optional[pulumi.Input[str]] = None,
pid_loop_dcs_type: Optional[pulumi.Input[str]] = None,
pid_loop_desc: Optional[pulumi.Input[str]] = None,
pid_loop_is_crucial: Optional[pulumi.Input[bool]] = None,
pid_loop_name: Optional[pulumi.Input[str]] = None,
pid_loop_type: Optional[pulumi.Input[str]] = None,
pid_project_id: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None) -> 'IndustrialPidLoop':
"""
Get an existing IndustrialPidLoop resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] pid_loop_configuration: The Pid Loop Configuration.
:param pulumi.Input[str] pid_loop_dcs_type: The dcs type of Pid Loop. Valid values: `standard`.
:param pulumi.Input[str] pid_loop_desc: The desc of Pid Loop.
:param pulumi.Input[bool] pid_loop_is_crucial: Whether is crucial Pid Loop.
:param pulumi.Input[str] pid_loop_name: The name of Pid Loop.
:param pulumi.Input[str] pid_loop_type: The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
:param pulumi.Input[str] pid_project_id: The pid project id.
:param pulumi.Input[str] status: The status of Pid Loop.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _IndustrialPidLoopState.__new__(_IndustrialPidLoopState)
__props__.__dict__["pid_loop_configuration"] = pid_loop_configuration
__props__.__dict__["pid_loop_dcs_type"] = pid_loop_dcs_type
__props__.__dict__["pid_loop_desc"] = pid_loop_desc
__props__.__dict__["pid_loop_is_crucial"] = pid_loop_is_crucial
__props__.__dict__["pid_loop_name"] = pid_loop_name
__props__.__dict__["pid_loop_type"] = pid_loop_type
__props__.__dict__["pid_project_id"] = pid_project_id
__props__.__dict__["status"] = status
return IndustrialPidLoop(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="pidLoopConfiguration")
def pid_loop_configuration(self) -> pulumi.Output[str]:
"""
The Pid Loop Configuration.
"""
return pulumi.get(self, "pid_loop_configuration")
@property
@pulumi.getter(name="pidLoopDcsType")
def pid_loop_dcs_type(self) -> pulumi.Output[str]:
"""
The dcs type of Pid Loop. Valid values: `standard`.
"""
return pulumi.get(self, "pid_loop_dcs_type")
@property
@pulumi.getter(name="pidLoopDesc")
def pid_loop_desc(self) -> pulumi.Output[Optional[str]]:
"""
The desc of Pid Loop.
"""
return pulumi.get(self, "pid_loop_desc")
@property
@pulumi.getter(name="pidLoopIsCrucial")
def pid_loop_is_crucial(self) -> pulumi.Output[bool]:
"""
Whether is crucial Pid Loop.
"""
return pulumi.get(self, "pid_loop_is_crucial")
@property
@pulumi.getter(name="pidLoopName")
def pid_loop_name(self) -> pulumi.Output[str]:
"""
The name of Pid Loop.
"""
return pulumi.get(self, "pid_loop_name")
@property
@pulumi.getter(name="pidLoopType")
def pid_loop_type(self) -> pulumi.Output[str]:
"""
The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
"""
return pulumi.get(self, "pid_loop_type")
@property
@pulumi.getter(name="pidProjectId")
def pid_project_id(self) -> pulumi.Output[str]:
"""
The pid project id.
"""
return pulumi.get(self, "pid_project_id")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of Pid Loop.
"""
return pulumi.get(self, "status")
| StarcoderdataPython |
3371368 | <reponame>AI4SIM/model-collection
"""This module proposes a test suite for the inferer module."""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import unittest
import h5py
import numpy
import pickle
import torch
import torch.nn as nn
import torch_geometric as pyg
from models import LitGIN
from inferer import Inferer, InferencePthGnn
CURRENT_DIR = Path(__file__).parent.absolute()
TEST_DATA_PATH = os.path.join(CURRENT_DIR, "test_data")
class TestInferer(unittest.TestCase):
"""Inferer test suite."""
def setUp(self) -> None:
"""Init the inferer object used."""
self.model_pt = os.path.join(TEST_DATA_PATH, "test_model.pt")
model = nn.Linear(in_features=5, out_features=2)
torch.save(model, self.model_pt)
self.inferer = Inferer(model_path=self.model_pt, data_path="", wkd=TEST_DATA_PATH)
def test_data_processed_path(self) -> None:
"""Test the data_processed_path property returns the proper path of saved data."""
self.inferer.data_path = os.path.join(TEST_DATA_PATH, "test_data.origin")
saved_data = self.inferer.data_processed_path
self.assertEqual(saved_data, os.path.join(TEST_DATA_PATH, "test_data.data"))
def test_load_data_not_implemented(self) -> None:
"""Test the load_data method raises a NotImplementedError."""
self.assertRaises(NotImplementedError,
self.inferer.load_data)
def test_preprocess_not_implemented(self) -> None:
"""Test the preprocess method raises a NotImplementedError."""
self.assertRaises(NotImplementedError,
self.inferer.preprocess)
def test_predict(self) -> None:
"""Test the predict method returns a torch.Tensor."""
self.inferer.data = torch.testing.make_tensor((5,), device='cpu', dtype=torch.float32)
preds = self.inferer.predict()
self.assertIsInstance(preds, torch.Tensor)
def tearDown(self) -> None:
"""Clean up the test artifacts."""
os.remove(self.model_pt)
class TestInferencePthGnn(unittest.TestCase):
"""InferencePthGnn test suite."""
@classmethod
def setUpClass(cls) -> None:
"""Init the global data used."""
cls.infer_data = os.path.join(TEST_DATA_PATH, 'test_infer_data.h5')
with h5py.File(cls.infer_data, 'w') as file:
file['/c_filt'] = numpy.random.rand(42, 7, 66)
file['/c_grad_filt'] = numpy.random.rand(42, 7, 66)
file['/c_filt_grad'] = numpy.random.rand(42, 7, 66)
def setUp(self) -> None:
"""Init the inferer object used."""
self.model_ckpt = os.path.join(TEST_DATA_PATH, "test_model.ckpt")
self.inferer = InferencePthGnn(model_path=self.model_ckpt,
data_path=self.infer_data,
model_class=LitGIN,
wkd=TEST_DATA_PATH)
def test_load_data(self) -> None:
"""Test the load_data method returns an array from an hdf5 file."""
data = self.inferer.load_data()
self.assertIsInstance(data, numpy.ndarray)
with h5py.File(self.infer_data, 'r') as file:
expected_array = file['/c_filt'][:]
numpy.testing.assert_equal(data, expected_array)
def test_load_y_dns(self) -> None:
"""Test the load_y_dns method returns an array from an hdf5 file."""
data = self.inferer.load_y_dns()
self.assertIsInstance(data, numpy.ndarray)
with h5py.File(self.infer_data, 'r') as file:
expected_array = file['/c_grad_filt'][:]
numpy.testing.assert_equal(data, expected_array)
def test_load_y_les(self) -> None:
"""Test the load_y_les method returns an array from an hdf5 file."""
data = self.inferer.load_y_les()
self.assertIsInstance(data, numpy.ndarray)
with h5py.File(self.infer_data, 'r') as file:
expected_array = file['/c_filt_grad'][:]
numpy.testing.assert_equal(data, expected_array)
def test_create_graph(self) -> None:
"""Test the _create_graph method creates a data graph from input features."""
fake_data = torch.testing.make_tensor((2, 3, 4), device="cpu", dtype=torch.float32)
self.inferer._create_graph(fake_data)
self.assertIsInstance(self.inferer.data, pyg.data.Data)
torch.testing.assert_close(fake_data.reshape(-1, 1), self.inferer.data.x)
def test_create_graph_save(self) -> None:
"""Test the _create_graph method creates a data graph from input features and save it."""
patch_data_file = os.path.join(TEST_DATA_PATH, 'test_save_file.data')
self.inferer.data_path = os.path.join(TEST_DATA_PATH, 'test_save_file.h5')
fake_data = torch.testing.make_tensor((2, 3, 4), device="cpu", dtype=torch.float32)
self.inferer._create_graph(fake_data, save=True)
self.assertTrue(Path(patch_data_file).resolve().is_file())
os.remove(patch_data_file)
def test_create_graph_existing(self) -> None:
"""Test the _create_graph method creates a data graph from data loaded from a previously
saved file.
"""
# create the a false previoulsy saved data file
saved_data = torch.testing.make_tensor((1, 2, 3), device="cpu", dtype=torch.float32)
with open(self.inferer.data_processed_path, 'wb') as file:
pickle.dump(saved_data, file)
# create graph on same data -> no re-creation, just load the file
self.inferer._create_graph(saved_data)
torch.testing.assert_close(saved_data, self.inferer.data)
os.remove(self.inferer.data_processed_path)
def test_preprocess(self) -> None:
"""Test the preprocess method set the self.data attribute."""
self.assertIsNone(self.inferer.data)
self.inferer.preprocess()
self.assertIsInstance(self.inferer.data, pyg.data.Data)
def test_predict(self) -> None:
"""Test the predict method returns a torch.Tensor."""
self.inferer.preprocess()
preds = self.inferer.predict()
self.assertIsInstance(preds, torch.Tensor)
@classmethod
def tearDownClass(cls) -> None:
"""Clean the test artifacts."""
os.remove(cls.infer_data)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3270437 | #This file will generate an html file with pothole coodinates
#Here will define the templet of stsarting portion of the HTML file
head = """
<html>
<head>
<meta name="viewport" content="initial-scale=1.0, user-scalable=no" />
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
<title>Google Maps - gmplot</title>
<script type="text/javascript" src="https://maps.googleapis.com/maps/api/js?libraries=visualization"></script>
<script type="text/javascript">
function initialize() {
var map = new google.maps.Map(document.getElementById("map_canvas"), {
zoom: 12,
center: new google.maps.LatLng(28.393737899999994, 77.03196649)
});"""
#This sectins templet for puting a mark at specific location, this setion of code is placed in HTML for each location
def markers(marks, lat, long, linker):
var = marks
marker1= "const %s = new google.maps.Circle({strokeColor: '#FF0000',strokeOpacity: 1.0, strokeWeight: 1,fillColor: '#FF0000', fillOpacity: 0.3, map: map, center:" %(var)
marker2 = "new google.maps.LatLng(%f,"%(lat)
marker3= "%f), radius: 0.5});"%(long)
link = "%s.addListener"%(var)
link1 ='("click", () => {window.open("pot_holes_detected/%s");});'%(linker)
return marker1, marker2, marker3, link, link1
#This section is templet of last potion of HTML file , it also contains layout of the webpage
tail = """
}
</script>
</head>
<h1> This Map hilights pot holes on your Roads</h1>
<h5> Click on the red dots to see the pot hole </h5>
<h5> Source code:https://github.com/manish-sin/pothole_heatmap</h5>
<body style="margin:0px; padding:0px;" onload="initialize()">
<div id="map_canvas" style="width: 100%; height: 84.5%;" />
</body>
</html>"""
import pandas as pd
#loading the final_df.csv for ploting
core_df = pd.read_csv("final_df.csv")
#sttting the image names as index for simplifided iteration
core_df=core_df.set_index("Image")
#this variable is used to give each point a unique value
i=0
#creating the HTML file
with open('pothole_map.html', 'w') as f:
f.write(head)
space = """
"""
for img in core_df.index:
row = core_df.loc[img]
print(row)
marker = "marker%i"%(i)
marker1, marker2, marker3, link, link1 =markers(marker, row[3], row[4], img)
i=i+1
f.write(space)
f.write(marker1)
f.write(marker2)
f.write(marker3)
f.write(space)
f.write(link)
print(link1)
f.write(link1)
f.write(space)
f.write(tail) | StarcoderdataPython |
69900 | """Views related to the statistics module"""
from datetime import date, timedelta
# pylint: disable=redefined-builtin
from requests.exceptions import ConnectionError, InvalidURL
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic import TemplateView
from .matomo_api_manager import MatomoApiManager
from ...decorators import region_permission_required
from ...models import Region
@method_decorator(login_required, name="dispatch")
@method_decorator(region_permission_required, name="dispatch")
class AnalyticsView(TemplateView):
"""
Class to create the statistic page, that can be found via -> "Statistiken"
"""
template_name = "statistics/statistics_dashboard.html"
base_context = {"current_menu_item": "statistics"}
@staticmethod
def prepare_csv(languages, hits, dates):
"""
Method to create CSV String from the API hits
"""
csv_row = "date"
csv_raw = ""
for l_value in languages:
csv_row += "," + l_value[1]
csv_raw += csv_row + ";"
for date_index, _ in enumerate(dates):
csv_row = ""
csv_row += str(dates[date_index]) + ","
for idy in range(0, len(languages)):
csv_row += str(hits[idy][2][date_index])
if idy < (len(languages) - 1):
csv_row += ","
csv_row += ";"
csv_raw += str(csv_row)
return csv_raw
# pylint: disable=too-many-locals
def get(self, request, *args, **kwargs):
region_slug = kwargs.get("region_slug")
region = Region.get_current_region(request)
start_date = request.GET.get(
"start_date", str(date.today() - timedelta(days=30))
)
end_date = request.GET.get("end_date", str(date.today()))
if (start_date == "") or (end_date == ""):
messages.error(request, _("Please enter a correct start and enddate"))
return redirect("statistics", region_slug=region_slug)
languages = [
["de", "Deutsch", "#7e1e9c"],
["en", "Englisch", "#15b01a"],
["ar", "Arabisch", "#0343df"],
]
api_man = MatomoApiManager(
matomo_url=region.matomo_url,
matomo_api_key=region.matomo_token,
ssl_verify=True,
)
response_dates = []
response_hits = []
for lang in languages:
try:
api_hits = api_man.get_visitors_per_timerange(
date_string=start_date + "," + end_date,
region_id="2",
period=request.GET.get("peri", "day"),
lang=lang[0],
)
temp_hits = []
except ConnectionError:
messages.error(
request, _("Connection to Matomo could not be established")
)
return redirect("dashboard", region_slug=region_slug)
except InvalidURL:
messages.error(
request,
_(
"The url you have entered is invalid. Please check the corresponding settings."
),
)
return redirect("dashboard", region_slug=region_slug)
except TypeError:
messages.error(
request,
_(
"There was an error during the establishment of a connection. Please check the region and the entered key."
),
)
return redirect("dashboard", region_slug=region_slug)
for single_day in api_hits:
temp_hits.append(single_day[1])
response_hits.append([lang[1], lang[2], temp_hits])
for single_day in api_hits:
response_dates.append(single_day[0])
return render(
request,
self.template_name,
{
**self.base_context,
"csv": self.prepare_csv(languages, response_hits, response_dates),
"dates": response_dates,
"hits": response_hits,
},
)
| StarcoderdataPython |
82427 | import os
import argparse
from multiprocessing import Lock, Process, Queue
import subprocess
import traceback
from ytauth import YtAuth
from ytchat import YtChat
from twitchchat import TwitchChat
SECRET_FILE = 'client_secret.json'
def start_yt(prefix, vid_id, tts_queue, ytauth):
# TODO: Search for YouTube events and allow user to pick
ytchat = YtChat(secret_file=SECRET_FILE,
vid_id=vid_id, prefix=prefix,
ytauth=ytauth,
tts_queue=tts_queue)
ytchat.run()
def start_twitch(prefix, tts_queue):
bot = TwitchChat(
# set up the bot
token=os.environ['TOKEN'],
prefix=prefix,
tts_queue=tts_queue,
initial_channels=[os.environ['CHANNEL']]
)
bot.run()
def send_say_requests(tts_queue):
while True:
try:
cmd = tts_queue.get(block=True)
subprocess.run(cmd)
except Exception:
traceback.print_exc()
print("Ignoring exception for tts commands...")
def main():
prefix = os.environ['BOT_PREFIX']
vid_id = os.environ['VID_ID']
#parser = argparse.ArgumentParser(description='Twitch or YouTube chatbot and SeikaSay TTS')
#parser.add_argument('--yt', action='store_true', help='Run YouTube chatbot instead of Twitch')
#args = parser.parse_args()
tts_queue = Queue()
auth = YtAuth(SECRET_FILE, vid_id)
p_yt = Process(target=start_yt, args=(prefix, vid_id, tts_queue, auth))
p_tw = Process(target=start_twitch, args=(prefix, tts_queue))
p_yt.start()
p_tw.start()
# Send TTS requests
send_say_requests(tts_queue)
p_yt.join()
p_tw.join()
#if args.yt:
# start_yt(prefix, vid_id)
#else:
# start_twitch(prefix)
print("End.")
if __name__ == "__main__":
main()
| StarcoderdataPython |
92092 | <reponame>divmain/lightyear
import re
from collections import OrderedDict
from parsimonious.grammar import Grammar
from parsimonious.exceptions import IncompleteParseError
from .errors import IndentError, LyError, LySyntaxError, UnsupportedCommaNesting
from .globals import BLK_OPEN, BLK_CLOSE, INDENT_SIZE, COMMENT_OPEN, COMMENT_CLOSE
from .ly_types import RuleBlock, UnpackMe, RootBlock, IgnoreMe, ParentReference
from .vendor import vendorize_css, vendorize_tree
ly_grammar = ""
funcmap = {}
defer_children_eval = []
### GRAMMAR HANDLING ###
class GDef(object):
'''
Decorator for defining LightYear syntax.
'''
def __init__(self, ruletxt, defer=False):
global ly_grammar
ly_grammar += ruletxt + '\n'
self.rulenames = []
for line in ruletxt.split('\n'):
line = line.strip()
if line:
name = line.split('=')[0].strip()
self.rulenames.append(name)
if defer:
defer_children_eval.append(name)
def __call__(self, f):
for name in self.rulenames:
funcmap[name] = f
### LIGHTYEAR PARSER ###
class LY(object):
'''
Parses LightYear code and generates CSS as output.
'''
grammar = None
def __init__(self, env=None, debug=False, path=None, vendorize=False, vendor_targets=None):
if not self.grammar:
self.__class__.grammar = Grammar(ly_grammar)['ltree']
self.env = env or {}
self.debug = debug
self.path = path
self.vendorize = vendorize
self.vendor_targets = vendor_targets
def eval(self, ly_code):
'''
Accept a string containing LightYear code as input, and recursively
evaluate the root node.
'''
lines = ly_code.split('\n')
lines = tokenize_whitespace(lines)
lines = tokenize_comments(lines)
ly_code = '\n'.join(lines)
self.debug = DebugGenerator(ly_code) if self.debug else False
try:
node = self.grammar.parse(ly_code)
except IncompleteParseError as e:
raise LySyntaxError(e.pos, ly_code)
self.ltree = self._evalnode(node)
self.flatten()
if self.vendorize == 'offline':
vendorize_tree(self.ltree, offline=True, targets=self.vendor_targets)
if self.vendorize == 'online':
vendorize_tree(self.ltree, offline=False, targets=self.vendor_targets)
def _evalnode(self, node):
'''
Evaluate a Parsimonious node.
'''
fn = funcmap.get(node.expr_name, lambda env, node, children: children)
if node.expr_name in defer_children_eval:
return fn(self.env, node)
children = [self._evalnode(child) for child in node]
# Mixins return lists that need to be unpacked.
for i, child in enumerate(children):
if isinstance(child, UnpackMe):
for packed_child in reversed(child):
children.insert(i+1, packed_child)
return fn(self.env, node, children)
def flatten(self):
'''
Flatten all nested rules and convert parent references
to standard selectors. Execute only after LightYear
code evaluation.
'''
for i, element in enumerate(self.ltree):
if isinstance(element, RuleBlock):
for j, child_element in reversed(list(enumerate(element.block))):
# Move nested RuleBlock objects to ltree and modify selectors.
if isinstance(child_element, RuleBlock):
if len(child_element.selectors) > 1 and len(element.selectors) > 1:
raise UnsupportedCommaNesting()
elif len(child_element.selectors) > 1:
child_element.selectors = [
element.selectors[0] + ' ' + child_sel
for child_sel in child_element.selectors]
else:
child_element.selectors = [
parent_sel + ' ' + child_element.selectors[0]
for parent_sel in element.selectors]
self.ltree.insert(i+1, child_element)
element.block[j] = IgnoreMe()
# Find parent selectors and convert to standard RuleBlocks.
elif isinstance(child_element, ParentReference):
ps_rule_block = child_element.rule_block
if not ps_rule_block.tag:
ps_rule_block.tag = element.tag
if len(ps_rule_block.selectors) > 1:
new_selectors = (
child_element.selectors[:-1] +
[child_element.selectors[-1] + ps_rule_block.selectors[0]] +
ps_rule_block.selectors[1:]
)
else:
new_selectors = (
element.selectors[:-1] +
[element.selectors[-1] + ps_rule_block.selectors[0]]
)
new_block = RuleBlock(
tag=ps_rule_block.tag,
selectors=new_selectors,
block=ps_rule_block.block,
index=ps_rule_block.index)
self.ltree.insert(i+1, new_block)
element.block[j] = IgnoreMe()
def reduce(self):
'''
Consolidate rules with identical selectors into single rules.
'''
# Reduce blocks.
ltree_reduced = OrderedDict()
non_block_count = 0
for element in self.ltree:
if hasattr(element, 'selectors'):
hash_ = repr(element.selectors)
elif hasattr(element, 'text'):
hash_ = element.text
else:
hash_ = non_block_count
non_block_count += 1
if hasattr(element, 'block'):
if hash_ in ltree_reduced:
ltree_reduced[hash_].block += element.block
else:
ltree_reduced[hash_] = element
else:
ltree_reduced[hash_] = element
ltree_reduced = [ltree_reduced[k] for k in ltree_reduced]
# Reduce properties.
for element in ltree_reduced:
non_property_count = 0
if hasattr(element, 'block'):
block_reduced = OrderedDict()
for child in element.block:
if hasattr(child, 'prop'):
block_reduced[child.prop] = child
else:
block_reduced[non_property_count] = child
non_property_count += 1
element.block = [block_reduced[k] for k in block_reduced]
self.ltree = ltree_reduced
def css(self):
'''
Output minified CSS. Should not be run until LightYear code is
evaluated and the resulting structure flattened.
'''
root_blocks = []
for e in self.ltree:
if isinstance(e, RootBlock):
root_blocks.append(e)
if not root_blocks:
root_blocks.append(RootBlock(tag_name=None, prefix=''))
output = ''
for root_block in root_blocks:
output += root_block.prefix
if root_block.prefix:
output += '{'
output += ''.join(e.css(tag=root_block.tag_name, debug=self.debug)
if hasattr(e, 'css')
else ''
for e in self.ltree)
if root_block.prefix:
output += '}'
if self.vendorize == 'prefixr':
return vendorize_css(output)
return output
def pretty_css(self):
'''
Output prettified CSS.
'''
if self.vendorize == 'prefixr':
raise LyError('Unable to prettify prefixr.com CSS')
def inside(index, chars):
j = index
try:
while True:
j += 1
if chars[j] == '}' or chars[j] == ';':
return True
elif chars[j] == '{':
return False
except IndexError:
return False
css_chars = list(self.css())
# Insert spaces and newlines.
skip = False
for i, c in enumerate(css_chars):
this_two = ''.join(css_chars[i:i+2]) if len(css_chars) > i+1 else None
next_two = ''.join(css_chars[i+1:i+3]) if len(css_chars) > i+2 else None
third = css_chars[i+2] if len(css_chars) > i+2 else None
if c == ';' and not next_two == '/*':
css_chars.insert(i+1, '\n')
elif this_two == '/*':
if skip:
skip = False
continue
css_chars.insert(i, ' ')
skip = True
elif c == ':' and inside(i, css_chars):
css_chars.insert(i+1, ' ')
elif c == '{':
if skip:
skip = False
continue
css_chars.insert(i+1, '\n')
css_chars.insert(i, ' ')
skip = True
elif c == '}':
css_chars.insert(i+1, '\n')
elif this_two == '*/' and not third == '{':
css_chars.insert(i+2, '\n')
elif c == ',':
if not css_chars[i+1] == ' ':
css_chars[i] = ', '
if css_chars[i-1] == ' ':
css_chars[i-1] = ''
# Insert Indentation
dent = 0
tab = ' '
for i, c in enumerate(css_chars):
next = css_chars[i+1] if len(css_chars) > i+1 else None
if next == '}':
dent -= 1
if c == '{':
dent += 1
elif c == '\n':
css_chars.insert(i+1, tab*dent)
if next == '}':
dent += 1
return ''.join(css_chars)
# Import LightYear grammar after LY class definition.
from . import lang
### PRE-PEG TOKENIZATION ###
def tokenize_whitespace(lines):
"""
For each line, indentify current level of indendation and compare
against indentation of previous line. Insert BLK_OPEN or BLK_CLOSE
as appropriate.
"""
firstline = True
prevdent = 0
for lnum, line in enumerate(lines):
line = line.expandtabs(INDENT_SIZE)
# Don't allow empty lines to effect tracking of indentation.
stripped = line.strip()
if stripped == '' or stripped[:2] == '//':
yield line
continue
# Check for indentation on the first line.
if firstline:
if line[0] == " ":
raise IndentError(lnum)
firstline = False
leading_spaces = re.match('[ ]*', line).group()
curdent = len(leading_spaces) // INDENT_SIZE
if curdent == prevdent:
yield line
elif curdent == prevdent + 1:
yield BLK_OPEN + line
elif curdent < prevdent:
yield BLK_CLOSE * (prevdent - curdent) + line
else:
raise IndentError(lnum)
prevdent = curdent
# Handle indented last line.
yield BLK_CLOSE * prevdent
def tokenize_comments(lines):
'''
Identify and tokenize comments.
'''
for line in lines:
for possible in (x.start(0) for x in re.finditer('//', line)):
if not _isquoted(line, possible):
line = line[:possible] + COMMENT_OPEN + line[possible:] + COMMENT_CLOSE
break
yield line
def _isquoted(line, pos):
'''
Return boolean value indicating whether the character at position
pos resides within a quote.
'''
DQUO = False
SQUO = False
for i in range(0, pos):
if not DQUO and not SQUO:
if line[i] == '"':
DQUO = True
elif line[i] == "'":
SQUO = True
elif DQUO:
if line[i] == '"':
DQUO = False
elif SQUO:
if line[i] == "'":
SQUO = False
return (DQUO or SQUO)
### DEBUG ###
class DebugGenerator():
def __init__(self, ly_code):
self.ly_code = ly_code
def line_number(self, index):
return self.ly_code[:index].count('\n') + 1
def line_number_comment(self, index):
if index == 'generated':
return '/*GENERATED*/'
return '/*line{}*/'.format(self.line_number(index))
def __nonzero__(self):
return True
| StarcoderdataPython |
1766349 | <reponame>ujjwalsb/liveProject
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Form',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('title', models.CharField(verbose_name='title', max_length=100)),
('config_json', models.TextField(verbose_name='config', blank=True)),
],
options={
'verbose_name': 'form',
'verbose_name_plural': 'forms',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FormField',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('ordering', models.IntegerField(default=0, verbose_name='ordering')),
('title', models.CharField(verbose_name='title', max_length=100)),
('name', models.CharField(verbose_name='name', max_length=100)),
('type', models.CharField(verbose_name='type', choices=[('text', 'text'), ('email', 'e-mail address'), ('longtext', 'long text'), ('checkbox', 'checkbox'), ('select', 'select'), ('radio', 'radio'), ('multiple-select', 'multiple select'), ('hidden', 'hidden')], max_length=20)),
('choices', models.CharField(verbose_name='choices', help_text='Comma-separated', blank=True, max_length=1024)),
('help_text', models.CharField(verbose_name='help text', help_text='Optional extra explanatory text beside the field', blank=True, max_length=1024)),
('default_value', models.CharField(verbose_name='default value', help_text='Optional default value of the field', blank=True, max_length=255)),
('is_required', models.BooleanField(default=True, verbose_name='is required')),
('form', models.ForeignKey(related_name='fields', verbose_name='form', to='form_designer.Form', on_delete=models.CASCADE)),
],
options={
'ordering': ['ordering', 'id'],
'verbose_name': 'form field',
'verbose_name_plural': 'form fields',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='formfield',
unique_together=set([('form', 'name')]),
),
migrations.CreateModel(
name='FormSubmission',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('submitted', models.DateTimeField(auto_now_add=True)),
('data', models.TextField()),
('path', models.CharField(max_length=255)),
('form', models.ForeignKey(related_name='submissions', verbose_name='form', to='form_designer.Form', on_delete=models.CASCADE)),
],
options={
'ordering': ('-submitted',),
'verbose_name': 'form submission',
'verbose_name_plural': 'form submissions',
},
bases=(models.Model,),
),
]
| StarcoderdataPython |
62870 | from telethon import events
import asyncio
from userbot.utils import admin_cmd
@borg.on(admin_cmd("mc"))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 5)
await event.edit("mein")
animation_chars = [
"madarchod",
"Hu jo ",
"Isme",
"Aya",
"**Mein madarchod hu jo isme aya**"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i %5 ])
| StarcoderdataPython |
3284177 | <reponame>2dongheee/Udacity-CarND-Behavioral-Cloning-P3<gh_stars>1-10
from utils import *
from model import load_model
import cv2
import numpy as np
import pandas as pd
batch_size = 32
data_frame = pd.read_csv('data/driving_log.csv', usecols=[0, 1, 2, 3])
data_frame = data_frame.sample(frac=1).reset_index(drop=True)
num_rows_training = int(data_frame.shape[0]*0.9)
training_data = data_frame.loc[0:num_rows_training-1]
validation_data = data_frame.loc[num_rows_training:]
training_generator = get_data_generator(training_data, batch_size=batch_size)
validation_data_generator = get_data_generator(validation_data, batch_size=batch_size)
model = load_model()
samples_per_epoch = (20000//batch_size)*batch_size
model.fit_generator(training_generator, validation_data=validation_data_generator,
samples_per_epoch=samples_per_epoch, nb_epoch=3, nb_val_samples=3000)
print("Saving model weights and configuration file.")
model.save_weights('./model.h5') # always save your weights after training or during training
with open('./model.json', 'w') as outfile:
outfile.write(model.to_json()) | StarcoderdataPython |
3369760 | import math
import random
import numpy as np
N_CLASSES = 10
N_AMOSTRAS = 100
TEMPO_ENTRE_CHEGADA = 5
TEMPO_ENTRE_SERVICO = 5
TAMANHO_MAX_FILA = 10
def gera_amostras_exponencial(tam, param_lambda):
amostras = []
for i in range(0, tam):
amostras.append(math.floor(np.random.exponential(param_lambda)))
return amostras
def gera_valor_aleatorio(p_classe, classes):
n_classes = len(p_classe)
p = random.uniform(0, 1)
for i in range(0, n_classes):
p -= p_classe[i]
if p <= 0:
return random.choice(classes[i])
return random.choice(classes[-1])
# Gera funcao de distribuicao acumulada apartir das amostras dadas
def gera_funcao_distribuicao_acumulada(amostras = [], n_classes = 1):
n_amostras = len(amostras)
valor_max = max(amostras)
valor_min = min(amostras)
amplitude = valor_max - valor_min
intervalo_classe = amplitude / n_classes
classes = [[0] for i in range(0, n_classes)]
p_classe = [0 for i in range(0, n_classes)]
indice = 0
for amostra in amostras:
indice = math.floor(amostra / intervalo_classe)
if(amostra == valor_max):
indice = n_classes - 1
classes[indice].append(amostra)
p_classe[indice] += (1 / n_amostras)
return p_classe, classes
p_classe_TEC = []
classes_TEC = []
def proximo_TEC(deterministico):
if deterministico:
return TEMPO_ENTRE_CHEGADA
return gera_valor_aleatorio(p_classe_TEC, classes_TEC)
p_classe_TES = []
classes_TES = []
def proximo_TES(deterministico):
if deterministico:
return TEMPO_ENTRE_SERVICO
return gera_valor_aleatorio(p_classe_TES, classes_TES)
def simulacao(tempo_maximo, tec_deterministico, tes_deterministico, fila_finita):
global p_classe_TEC, classes_TEC
global p_classe_TES, classes_TES
if(not tec_deterministico):
amostras_TEC = gera_amostras_exponencial(N_AMOSTRAS, TEMPO_ENTRE_CHEGADA)
p_classe_TEC, classes_TEC = gera_funcao_distribuicao_acumulada(amostras_TEC, N_CLASSES)
if(not tes_deterministico):
amostras_TES = gera_amostras_exponencial(N_AMOSTRAS, TEMPO_ENTRE_SERVICO)
p_classe_TES, classes_TES = gera_funcao_distribuicao_acumulada(amostras_TES, N_CLASSES)
TR, ES, TF, HC, HS = 0, 0, 0, 0, 99999
cliente_saindo = 0
clientes = 0
interacoes = 0
sum_entidade_fila = 0
tempo_fila, tempo_ocupado, tempo_sistema = 0, 0, 0
tr_anterior, tf_anterior, es_anterior = 0, 0, 0
print("\n\nEvento", "Cliente", "TR", "ES", "TF", "HC", "HS")
print("Inicio", "-", TR, ES, TF, HC, HS)
while(TR < tempo_maximo):
if HC < HS: # Evento de chagada
TR = HC
if not fila_finita or (fila_finita and TF < TAMANHO_MAX_FILA):
clientes += 1
print("Chegada", clientes, tr_anterior, ES, TF, HC, HS)
if ES == 0:
ES = 1
HS = TR + proximo_TES(tes_deterministico)
else:
TF += 1
HC = TR + proximo_TEC(tes_deterministico)
else: # Evento de Saida
cliente_saindo += 1
print("Saida", cliente_saindo, tr_anterior, ES, TF, HC, HS)
TR = HS
if TF > 0:
TF -= 1
HS = TR + proximo_TES(tes_deterministico)
else:
ES = 0
HS = 99999
# atualiza estatistica
interacoes += 1
sum_entidade_fila += TF # Número Médio de Entidades nas Filas
if ES == 1:
tempo_ocupado += (TR - tr_anterior) # Taxa Média de Ocupação dos Servidores
tempo_fila += (TR - tr_anterior)*tf_anterior
tempo_sistema += (TR - tr_anterior)*(tf_anterior + es_anterior)
tr_anterior = TR
tf_anterior = TF
es_anterior = ES
a = sum_entidade_fila / interacoes
b = tempo_ocupado / TR
c = tempo_fila / clientes
d = tempo_sistema / clientes
print("\n\nNúmero Médio de Entidades nas Filas: ", a)
print("Taxa Média de Ocupação dos Servidores: ", b)
print("Tempo Médio de uma Entidade na Fila: ", c)
print("Tempo Médio no Sistema: ", d)
def main():
global TEMPO_ENTRE_CHEGADA, TEMPO_ENTRE_SERVICO, TAMANHO_MAX_FILA
continuar = True
while(continuar):
flag = True
while(flag) :
tec_deterministico = input("TEC é deterministico (S/N)? ") in ("S", "s")
if tec_deterministico :
TEMPO_ENTRE_CHEGADA = int(input("Insira o valor de TEC: "))
else :
TEMPO_ENTRE_CHEGADA = int(input("Defina o valor de lambda para TEC: "))
tes_deterministico = input("TES é deterministico (S/N)? ") in ("S", "s")
if tes_deterministico :
TEMPO_ENTRE_SERVICO = int(input("Insira o valor de TES: "))
else :
TEMPO_ENTRE_SERVICO = int(input("Defina o valor de lambda para TES: "))
if(TEMPO_ENTRE_CHEGADA < TEMPO_ENTRE_SERVICO):
flag = False
else:
print("É necessário que o valor TEC seja MENOR que o valor TES (estado estacionário)")
fila_finita = input("Fila é finita (S/N)? ") in ("S", "s")
if fila_finita:
TAMANHO_MAX_FILA = int(input("Tamanho máximo da fila: "))
simulacao(100, tec_deterministico, tes_deterministico, fila_finita)
continuar = input("\n\nDeseja continuar com outra simulacao(S/N)? ") in ("S", "s")
main()
| StarcoderdataPython |
3200927 | import os
import mysql.connector as mariadb
def createDBConnection():
mydb = mariadb.connect(
host = str(os.getenv('DBHOST', '0')),
user = str(os.getenv('DBUSER', '0')),
password = str(os.getenv('DBPWD', '0')),
database = str(os.getenv('DBNAME', '0'))
)
return mydb
def getProcedure(procName):
mydb = createDBConnection()
cursor = mydb.cursor(buffered=True)
cursor.callproc('test')
commitDBConnection(mydb)
def commitDBConnection(database):
database.commit()
database.close()
| StarcoderdataPython |
1675260 | # -*- coding: utf-8 -*-
"""
Presenters for API data.
"""
import collections
import copy
from pyramid import security
class AnnotationBasePresenter(object):
def __init__(self, annotation_resource):
self.annotation_resource = annotation_resource
self.annotation = annotation_resource.annotation
@property
def created(self):
if self.annotation.created:
return utc_iso8601(self.annotation.created)
@property
def updated(self):
if self.annotation.updated:
return utc_iso8601(self.annotation.updated)
@property
def links(self):
"""A dictionary of named hypermedia links for this annotation."""
return self.annotation_resource.links
@property
def text(self):
if self.annotation.text:
return self.annotation.text
else:
return ''
@property
def tags(self):
if self.annotation.tags:
return self.annotation.tags
else:
return []
@property
def target(self):
target = {'source': self.annotation.target_uri}
if self.annotation.target_selectors:
target['selector'] = self.annotation.target_selectors
return [target]
class AnnotationJSONPresenter(AnnotationBasePresenter):
"""Present an annotation in the JSON format returned by API requests."""
def asdict(self):
docpresenter = DocumentJSONPresenter(self.annotation.document)
base = {
'id': self.annotation.id,
'created': self.created,
'updated': self.updated,
'user': self.annotation.userid,
'uri': self.annotation.target_uri,
'text': self.text,
'tags': self.tags,
'group': self.annotation.groupid,
'permissions': self.permissions,
'target': self.target,
'document': docpresenter.asdict(),
'links': self.links,
}
if self.annotation.references:
base['references'] = self.annotation.references
annotation = copy.copy(self.annotation.extra) or {}
annotation.update(base)
return annotation
@property
def permissions(self):
"""
Return a permissions dict for the given annotation.
Converts our simple internal annotation storage format into the legacy
complex permissions dict format that is still used in some places.
"""
read = self.annotation.userid
if self.annotation.shared:
read = 'group:{}'.format(self.annotation.groupid)
principals = security.principals_allowed_by_permission(
self.annotation_resource, 'read')
if security.Everyone in principals:
read = 'group:__world__'
return {'read': [read],
'admin': [self.annotation.userid],
'update': [self.annotation.userid],
'delete': [self.annotation.userid]}
class AnnotationSearchIndexPresenter(AnnotationBasePresenter):
"""Present an annotation in the JSON format used in the search index."""
def __init__(self, annotation):
self.annotation = annotation
def asdict(self):
docpresenter = DocumentSearchIndexPresenter(self.annotation.document)
result = {
'id': self.annotation.id,
'created': self.created,
'updated': self.updated,
'user': self.annotation.userid,
'user_raw': self.annotation.userid,
'uri': self.annotation.target_uri,
'text': self.text,
'tags': self.tags,
'tags_raw': self.tags,
'group': self.annotation.groupid,
'shared': self.annotation.shared,
'target': self.target,
'document': docpresenter.asdict(),
}
result['target'][0]['scope'] = [self.annotation.target_uri_normalized]
if self.annotation.references:
result['references'] = self.annotation.references
return result
@property
def links(self):
# The search index presenter has no need to generate links, and so the
# `links_service` parameter has been removed from the constructor.
raise NotImplementedError("search index presenter doesn't have links")
class AnnotationJSONLDPresenter(AnnotationBasePresenter):
"""
Presenter for annotations that renders a JSON-LD format compatible with the
draft Web Annotation Data Model, as defined at:
https://www.w3.org/TR/annotation-model/
"""
CONTEXT_URL = 'http://www.w3.org/ns/anno.jsonld'
def asdict(self):
return {
'@context': self.CONTEXT_URL,
'type': 'Annotation',
'id': self.id,
'created': self.created,
'modified': self.updated,
'creator': self.annotation.userid,
'body': self.bodies,
'target': self.target,
}
@property
def id(self):
return self.annotation_resource.link('jsonld_id')
@property
def bodies(self):
bodies = [{
'type': 'TextualBody',
'text': self.text,
'format': 'text/markdown',
}]
for t in self.tags:
bodies.append({
'type': 'TextualBody',
'text': t,
'purpose': 'tagging',
})
return bodies
class DocumentJSONPresenter(object):
def __init__(self, document):
self.document = document
def asdict(self):
if not self.document:
return {}
d = {}
title = self.document.title
if title:
d['title'] = [title]
return d
class DocumentSearchIndexPresenter(object):
def __init__(self, document):
self.document = document
def asdict(self):
if not self.document:
return {}
d = {}
if self.document.title:
d['title'] = [self.document.title]
if self.document.web_uri:
d['web_uri'] = self.document.web_uri
return d
def utc_iso8601(datetime):
return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f+00:00')
def deep_merge_dict(a, b):
"""Recursively merges dict `b` into dict `a`."""
for k, v in b.items():
if isinstance(v, collections.Mapping):
if k not in a or not isinstance(a[k], dict):
a[k] = dict()
deep_merge_dict(a[k], v)
else:
a[k] = v
def _json_link(request, annotation):
return request.route_url('api.annotation', id=annotation.id)
def _jsonld_id_link(request, annotation):
return request.route_url('annotation', id=annotation.id)
def _permissions(annotation):
"""
Return a permissions dict for the given annotation.
Converts our simple internal annotation storage format into the legacy
complex permissions dict format that is still used in some places.
"""
read = annotation.userid
if annotation.shared:
read = 'group:{}'.format(annotation.groupid)
return {'read': [read],
'admin': [annotation.userid],
'update': [annotation.userid],
'delete': [annotation.userid]}
def includeme(config):
# Add a default 'json' link type
config.add_annotation_link_generator('json', _json_link)
# Add a 'jsonld_id' link type for generating the "id" field for JSON-LD
# annotations. This is hidden, and so not rendered in the annotation's
# "links" field.
config.add_annotation_link_generator('jsonld_id',
_jsonld_id_link,
hidden=True)
| StarcoderdataPython |
24912 | """
Tests for Day 22
"""
from day22.module import part_1, part_2, \
FULL_INPUT_FILE, TEST_INPUT_FILE_1, TEST_INPUT_FILE_2, TEST_INPUT_FILE_3
def test_part_1_1():
result = part_1(TEST_INPUT_FILE_1)
assert result == 39
def test_part_1_2():
result = part_1(TEST_INPUT_FILE_2)
assert result == 590784
def test_part_1_3():
result = part_1(TEST_INPUT_FILE_3)
assert result == 474140
def test_part_1_full():
result = part_1(FULL_INPUT_FILE)
assert result == 546724
def test_part_2():
result = part_2(TEST_INPUT_FILE_3)
assert result == 2758514936282235
def test_part_2_full():
result = part_2(FULL_INPUT_FILE)
assert result == 1346544039176841
| StarcoderdataPython |
7284 | <reponame>maya2250/nova<gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
RULE_AOO = base.RULE_ADMIN_OR_OWNER
SERVERS = 'os_compute_api:servers:%s'
NETWORK_ATTACH_EXTERNAL = 'network:attach_external_network'
ZERO_DISK_FLAVOR = SERVERS % 'create:zero_disk_flavor'
REQUESTED_DESTINATION = 'compute:servers:create:requested_destination'
CROSS_CELL_RESIZE = 'compute:servers:resize:cross_cell'
rules = [
policy.DocumentedRuleDefault(
SERVERS % 'index',
RULE_AOO,
"List all servers",
[
{
'method': 'GET',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'detail',
RULE_AOO,
"List all servers with detailed information",
[
{
'method': 'GET',
'path': '/servers/detail'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'index:get_all_tenants',
base.RULE_ADMIN_API,
"List all servers for all projects",
[
{
'method': 'GET',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'detail:get_all_tenants',
base.RULE_ADMIN_API,
"List all servers with detailed information for all projects",
[
{
'method': 'GET',
'path': '/servers/detail'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'allow_all_filters',
base.RULE_ADMIN_API,
"Allow all filters when listing servers",
[
{
'method': 'GET',
'path': '/servers'
},
{
'method': 'GET',
'path': '/servers/detail'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'show',
RULE_AOO,
"Show a server",
[
{
'method': 'GET',
'path': '/servers/{server_id}'
}
]),
# the details in host_status are pretty sensitive, only admins
# should do that by default.
policy.DocumentedRuleDefault(
SERVERS % 'show:host_status',
base.RULE_ADMIN_API,
"""
Show a server with additional host status information.
This means host_status will be shown irrespective of status value. If showing
only host_status UNKNOWN is desired, use the
``os_compute_api:servers:show:host_status:unknown-only`` policy rule.
Microvision 2.75 added the ``host_status`` attribute in the
``PUT /servers/{server_id}`` and ``POST /servers/{server_id}/action (rebuild)``
API responses which are also controlled by this policy rule, like the
``GET /servers*`` APIs.
""",
[
{
'method': 'GET',
'path': '/servers/{server_id}'
},
{
'method': 'GET',
'path': '/servers/detail'
},
{
'method': 'PUT',
'path': '/servers/{server_id}'
},
{
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'show:host_status:unknown-only',
base.RULE_ADMIN_API,
"""
Show a server with additional host status information, only if host status is
UNKNOWN.
This policy rule will only be enforced when the
``os_compute_api:servers:show:host_status`` policy rule does not pass for the
request. An example policy configuration could be where the
``os_compute_api:servers:show:host_status`` rule is set to allow admin-only and
the ``os_compute_api:servers:show:host_status:unknown-only`` rule is set to
allow everyone.
""",
[
{
'method': 'GET',
'path': '/servers/{server_id}'
},
{
'method': 'GET',
'path': '/servers/detail'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create',
RULE_AOO,
"Create a server",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create:forced_host',
base.RULE_ADMIN_API,
"""
Create a server on the specified host and/or node.
In this case, the server is forced to launch on the specified
host and/or node by bypassing the scheduler filters unlike the
``compute:servers:create:requested_destination`` rule.
""",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
REQUESTED_DESTINATION,
base.RULE_ADMIN_API,
"""
Create a server on the requested compute service host and/or
hypervisor_hostname.
In this case, the requested host and/or hypervisor_hostname is
validated by the scheduler filters unlike the
``os_compute_api:servers:create:forced_host`` rule.
""",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create:attach_volume',
RULE_AOO,
"Create a server with the requested volume attached to it",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create:attach_network',
RULE_AOO,
"Create a server with the requested network attached to it",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create:trusted_certs',
RULE_AOO,
"Create a server with trusted image certificate IDs",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
ZERO_DISK_FLAVOR,
base.RULE_ADMIN_API,
"""
This rule controls the compute API validation behavior of creating a server
with a flavor that has 0 disk, indicating the server should be volume-backed.
For a flavor with disk=0, the root disk will be set to exactly the size of the
image used to deploy the instance. However, in this case the filter_scheduler
cannot select the compute host based on the virtual image size. Therefore, 0
should only be used for volume booted instances or for testing purposes.
WARNING: It is a potential security exposure to enable this policy rule
if users can upload their own images since repeated attempts to
create a disk=0 flavor instance with a large image can exhaust
the local disk of the compute (or shared storage cluster). See bug
https://bugs.launchpad.net/nova/+bug/1739646 for details.
""",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
NETWORK_ATTACH_EXTERNAL,
'is_admin:True',
"Attach an unshared external network to a server",
[
# Create a server with a requested network or port.
{
'method': 'POST',
'path': '/servers'
},
# Attach a network or port to an existing server.
{
'method': 'POST',
'path': '/servers/{server_id}/os-interface'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'delete',
RULE_AOO,
"Delete a server",
[
{
'method': 'DELETE',
'path': '/servers/{server_id}'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'update',
RULE_AOO,
"Update a server",
[
{
'method': 'PUT',
'path': '/servers/{server_id}'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'confirm_resize',
RULE_AOO,
"Confirm a server resize",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (confirmResize)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'revert_resize',
RULE_AOO,
"Revert a server resize",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (revertResize)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'reboot',
RULE_AOO,
"Reboot a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (reboot)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'resize',
RULE_AOO,
"Resize a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (resize)'
}
]),
policy.DocumentedRuleDefault(
CROSS_CELL_RESIZE,
base.RULE_NOBODY,
"Resize a server across cells. By default, this is disabled for all "
"users and recommended to be tested in a deployment for admin users "
"before opening it up to non-admin users. Resizing within a cell is "
"the default preferred behavior even if this is enabled. ",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (resize)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'rebuild',
RULE_AOO,
"Rebuild a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'rebuild:trusted_certs',
RULE_AOO,
"Rebuild a server with trusted image certificate IDs",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create_image',
RULE_AOO,
"Create an image from a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (createImage)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create_image:allow_volume_backed',
RULE_AOO,
"Create an image from a volume backed server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (createImage)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'start',
RULE_AOO,
"Start a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (os-start)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'stop',
RULE_AOO,
"Stop a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (os-stop)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'trigger_crash_dump',
RULE_AOO,
"Trigger crash dump in a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (trigger_crash_dump)'
}
]),
]
def list_rules():
return rules
| StarcoderdataPython |
154259 | from .client import client_decorator
class Webapps:
def __init__(self, client):
self.client = client
@client_decorator(op="webapps")
def get(self):
"List all webapps"
@client_decorator(op="webapps")
def post(self, domain_name, python_version):
"""
Create a new webapp with manual configuration.
Use (for example) "python36" to specify Python 3.6.
"""
class DomaiName:
def __init__(self, client, domain_name):
self.client = client
self.domain_name = domain_name
@client_decorator(op="webapps", name="{self.domain_name}")
def get(self):
"Return information about a web app's configuration"
@client_decorator(op="webapps", name="{self.domain_name}")
def put(
self, python_version, source_directory, virtualenv_path, force_https
):
"Modify configuration of a web app. (NB a reload is usually required to apply changes)."
@client_decorator(op="webapps", name="{self.domain_name}")
def patch(
self, python_version, source_directory, virtualenv_path, force_https
):
"Modify configuration of a web app. (NB a reload is usually required to apply changes)."
@client_decorator(op="webapps", name="{self.domain_name}")
def delete(self):
"""
Delete the webapp. This will take the site offline.
Config is backed up in /var/www, and your code is not touched.
"""
class Reload:
def __init__(self, client, domain_name):
self.client = client
self.domain_name = domain_name
@client_decorator(op="webapps", name="{self.domain_name}", path="reload")
def post(self):
"Reload the webapp to reflect changes to configuration and/or source code on disk."
class Ssl:
def __init__(self, client, domain_name):
self.client = client
self.domain_name = domain_name
@client_decorator(op="webapps", name="{self.domain_name}", path="ssl")
def get(self):
"""
Get and set TLS/HTTPS info. POST parameters to the right are incorrect,
use `cert` and `private_key` when posting.
"""
@client_decorator(op="webapps", name="{self.domain_name}", path="ssl")
def post(
self, python_version, source_directory, virtualenv_path, force_https
):
"""
Get and set TLS/HTTPS info. POST parameters to the right are incorrect,
use `cert` and `private_key` when posting.
"""
@client_decorator(op="webapps", name="{self.domain_name}", path="ssl")
def delete(self):
"""
Get and set TLS/HTTPS info. POST parameters to the right are incorrect,
use `cert` and `private_key` when posting.
"""
class StaticFiles:
def __init__(self, client, domain_name):
self.client = client
self.domain_name = domain_name
@client_decorator(
op="webapps", name="{self.domain_name}", path="static_files"
)
def get(self):
"List all the static files mappings for a domain."
@client_decorator(
op="webapps", name="{self.domain_name}", path="static_files"
)
def post(self, url, path):
"Create a new static files mapping. (webapp restart required)"
class StaticFilesId:
def __init__(self, client, domain_name, id):
self.client = client
self.domain_name = domain_name
self.id = id
@client_decorator(
op="webapps", name="{self.domain_name}", path="static_files/{self.id}"
)
def get(self):
"Get URL and path of a particular mapping."
@client_decorator(
op="webapps", name="{self.domain_name}", path="static_files/{self.id}"
)
def put(self, url, path):
"Modify a static files mapping. (webapp restart required)"
@client_decorator(
op="webapps", name="{self.domain_name}", path="static_files/{self.id}"
)
def patch(self, url, path):
"Modify a static files mapping. (webapp restart required)"
@client_decorator(
op="webapps", name="{self.domain_name}", path="static_files/{self.id}"
)
def delete(self):
"Remove a static files mapping. (webapp restart required)"
| StarcoderdataPython |
3288236 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""## Activation Functions.
The activation ops provide different types of nonlinearities for use in neural
networks. These include smooth nonlinearities (`sigmoid`, `tanh`, `elu`,
`softplus`, and `softsign`), continuous but not everywhere differentiable
functions (`relu`, `relu6`, `crelu` and `relu_x`), and random regularization
(`dropout`).
All activation ops apply componentwise, and produce a tensor of the same
shape as the input tensor.
@@relu
@@relu6
@@crelu
@@elu
@@softplus
@@softsign
@@dropout
@@bias_add
@@sigmoid
@@tanh
## Convolution
The convolution ops sweep a 2-D filter over a batch of images, applying the
filter to each window of each image of the appropriate size. The different
ops trade off between generic vs. specific filters:
* `conv2d`: Arbitrary filters that can mix channels together.
* `depthwise_conv2d`: Filters that operate on each channel independently.
* `separable_conv2d`: A depthwise spatial filter followed by a pointwise filter.
Note that although these ops are called "convolution", they are strictly
speaking "cross-correlation" since the filter is combined with an input window
without reversing the filter. For details, see [the properties of
cross-correlation](https://en.wikipedia.org/wiki/Cross-correlation#Properties).
The filter is applied to image patches of the same size as the filter and
strided according to the `strides` argument. `strides = [1, 1, 1, 1]` applies
the filter to a patch at every offset, `strides = [1, 2, 2, 1]` applies the
filter to every other image patch in each dimension, etc.
Ignoring channels for the moment, and assume that the 4-D `input` has shape
`[batch, in_height, in_width, ...]` and the 4-D `filter` has shape
`[filter_height, filter_width, ...]`, then the spatial semantics of the
convolution ops are as follows: first, according to the padding scheme chosen
as `'SAME'` or `'VALID'`, the output size and the padding pixels are computed.
For the `'SAME'` padding, the output height and width are computed as:
out_height = ceil(float(in_height) / float(strides[1]))
out_width = ceil(float(in_width) / float(strides[2]))
and the padding on the top and left are computed as:
pad_along_height = ((out_height - 1) * strides[1] +
filter_height - in_height)
pad_along_width = ((out_width - 1) * strides[2] +
filter_width - in_width)
pad_top = pad_along_height / 2
pad_left = pad_along_width / 2
Note that the division by 2 means that there might be cases when the padding on
both sides (top vs bottom, right vs left) are off by one. In this case, the
bottom and right sides always get the one additional padded pixel. For example,
when `pad_along_height` is 5, we pad 2 pixels at the top and 3 pixels at the
bottom. Note that this is different from existing libraries such as cuDNN and
Caffe, which explicitly specify the number of padded pixels and always pad the
same number of pixels on both sides.
For the `'VALID`' padding, the output height and width are computed as:
out_height = ceil(float(in_height - filter_height + 1) / float(strides[1]))
out_width = ceil(float(in_width - filter_width + 1) / float(strides[2]))
and the padding values are always zero. The output is then computed as
output[b, i, j, :] =
sum_{di, dj} input[b, strides[1] * i + di - pad_top,
strides[2] * j + dj - pad_left, ...] *
filter[di, dj, ...]
where any value outside the original input image region are considered zero (
i.e. we pad zero values around the border of the image).
Since `input` is 4-D, each `input[b, i, j, :]` is a vector. For `conv2d`, these
vectors are multiplied by the `filter[di, dj, :, :]` matrices to produce new
vectors. For `depthwise_conv_2d`, each scalar component `input[b, i, j, k]`
is multiplied by a vector `filter[di, dj, k]`, and all the vectors are
concatenated.
@@convolution
@@conv2d
@@depthwise_conv2d
@@depthwise_conv2d_native
@@separable_conv2d
@@atrous_conv2d
@@conv2d_transpose
@@conv1d
@@conv3d
@@conv3d_transpose
@@conv2d_backprop_filter
@@conv2d_backprop_input
@@conv3d_backprop_filter_v2
@@depthwise_conv2d_native_backprop_filter
@@depthwise_conv2d_native_backprop_input
## Pooling
The pooling ops sweep a rectangular window over the input tensor, computing a
reduction operation for each window (average, max, or max with argmax). Each
pooling op uses rectangular windows of size `ksize` separated by offset
`strides`. For example, if `strides` is all ones every window is used, if
`strides` is all twos every other window is used in each dimension, etc.
In detail, the output is
output[i] = reduce(value[strides * i:strides * i + ksize])
where the indices also take into consideration the padding values. Please refer
to the `Convolution` section for details about the padding calculation.
@@avg_pool
@@max_pool
@@max_pool_with_argmax
@@avg_pool3d
@@max_pool3d
@@fractional_avg_pool
@@fractional_max_pool
@@pool
## Morphological filtering
Morphological operators are non-linear filters used in image processing.
[Greyscale morphological dilation
](https://en.wikipedia.org/wiki/Dilation_(morphology))
is the max-sum counterpart of standard sum-product convolution:
output[b, y, x, c] =
max_{dy, dx} input[b,
strides[1] * y + rates[1] * dy,
strides[2] * x + rates[2] * dx,
c] +
filter[dy, dx, c]
The `filter` is usually called structuring function. Max-pooling is a special
case of greyscale morphological dilation when the filter assumes all-zero
values (a.k.a. flat structuring function).
[Greyscale morphological erosion
](https://en.wikipedia.org/wiki/Erosion_(morphology))
is the min-sum counterpart of standard sum-product convolution:
output[b, y, x, c] =
min_{dy, dx} input[b,
strides[1] * y - rates[1] * dy,
strides[2] * x - rates[2] * dx,
c] -
filter[dy, dx, c]
Dilation and erosion are dual to each other. The dilation of the input signal
`f` by the structuring signal `g` is equal to the negation of the erosion of
`-f` by the reflected `g`, and vice versa.
Striding and padding is carried out in exactly the same way as in standard
convolution. Please refer to the `Convolution` section for details.
@@dilation2d
@@erosion2d
@@with_space_to_batch
## Normalization
Normalization is useful to prevent neurons from saturating when inputs may
have varying scale, and to aid generalization.
@@l2_normalize
@@local_response_normalization
@@sufficient_statistics
@@normalize_moments
@@moments
@@weighted_moments
@@fused_batch_norm
@@batch_normalization
@@batch_norm_with_global_normalization
## Losses
The loss ops measure error between two tensors, or between a tensor and zero.
These can be used for measuring accuracy of a network in a regression task
or for regularization purposes (weight decay).
@@l2_loss
@@log_poisson_loss
## Classification
TensorFlow provides several operations that help you perform classification.
@@sigmoid_cross_entropy_with_logits
@@softmax
@@log_softmax
@@softmax_cross_entropy_with_logits
@@sparse_softmax_cross_entropy_with_logits
@@weighted_cross_entropy_with_logits
## Embeddings
TensorFlow provides library support for looking up values in embedding
tensors.
@@embedding_lookup
@@embedding_lookup_sparse
## Recurrent Neural Networks
TensorFlow provides a number of methods for constructing Recurrent
Neural Networks. Most accept an `RNNCell`-subclassed object
(see the documentation for `tf.nn.rnn_cell`).
@@dynamic_rnn
@@rnn
@@state_saving_rnn
@@bidirectional_dynamic_rnn
@@bidirectional_rnn
@@raw_rnn
## Connectionist Temporal Classification (CTC)
@@ctc_loss
@@ctc_greedy_decoder
@@ctc_beam_search_decoder
## Evaluation
The evaluation ops are useful for measuring the performance of a network.
They are typically used at evaluation time.
@@top_k
@@in_top_k
## Candidate Sampling
Do you want to train a multiclass or multilabel model with thousands
or millions of output classes (for example, a language model with a
large vocabulary)? Training with a full Softmax is slow in this case,
since all of the classes are evaluated for every training example.
Candidate Sampling training algorithms can speed up your step times by
only considering a small randomly-chosen subset of contrastive classes
(called candidates) for each batch of training examples.
See our
[Candidate Sampling Algorithms Reference](../../extras/candidate_sampling.pdf)
### Sampled Loss Functions
TensorFlow provides the following sampled loss functions for faster training.
@@nce_loss
@@sampled_softmax_loss
### Candidate Samplers
TensorFlow provides the following samplers for randomly sampling candidate
classes when using one of the sampled loss functions above.
@@uniform_candidate_sampler
@@log_uniform_candidate_sampler
@@learned_unigram_candidate_sampler
@@fixed_unigram_candidate_sampler
### Miscellaneous candidate sampling utilities
@@compute_accidental_hits
### Quantization ops
@@quantized_conv2d
@@quantized_relu_x
@@quantized_max_pool
@@quantized_avg_pool
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys as _sys
# pylint: disable=unused-import
from tensorflow.python.ops import ctc_ops as _ctc_ops
from tensorflow.python.ops import embedding_ops as _embedding_ops
from tensorflow.python.ops import nn_grad as _nn_grad
from tensorflow.python.ops import nn_ops as _nn_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import seq2seq
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
# Bring more nn-associated functionality into this package.
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.ctc_ops import *
from tensorflow.python.ops.nn_impl import *
from tensorflow.python.ops.nn_ops import *
from tensorflow.python.ops.candidate_sampling_ops import *
from tensorflow.python.ops.embedding_ops import *
from tensorflow.python.ops.rnn import *
# pylint: enable=wildcard-import
# TODO(cwhipkey): sigmoid and tanh should not be exposed from tf.nn.
_allowed_symbols = [
"zero_fraction", # documented in training.py
# Modules whitelisted for reference through tf.nn.
# TODO(cwhipkey): migrate callers to use the submodule directly.
"rnn_cell",
"seq2seq",
# Symbols whitelisted for export without documentation.
# TODO(cwhipkey): review these and move to contrib or expose through
# documentation.
"all_candidate_sampler", # Excluded in gen_docs_combined.
"lrn", # Excluded in gen_docs_combined.
"relu_layer", # Excluded in gen_docs_combined.
"xw_plus_b", # Excluded in gen_docs_combined.
]
remove_undocumented(__name__, _allowed_symbols,
[_sys.modules[__name__], _ctc_ops, _nn_ops, _nn_grad,
rnn_cell, seq2seq])
| StarcoderdataPython |
1775643 | <reponame>flyliu2017/bert<filename>sequential_tag_processor.py
import os
import csv
import tensorflow as tf
import tokenization
from data_processor import DataProcessor, InputExample, InputFeatures
from model_fn import create_sequential_tagging_model
class SequentialTagProcessor(DataProcessor):
"""Base class for data converters for sequence classification data sets."""
def get_labels(self):
"""See base class."""
return ["0", "1"]
@property
def name_to_features(self):
return {
"input_ids": tf.FixedLenFeature([self.max_seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([self.max_seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([self.max_seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([self.max_seq_length], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
@property
def padding_input_features(self):
return InputFeatures(
input_ids=[0] * self.max_seq_length,
input_mask=[0] * self.max_seq_length,
segment_ids=[0] * self.max_seq_length,
label_id=[0] * self.max_seq_length,
is_real_example=False)
@property
def create_model(self):
return create_sequential_tagging_model
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
def create_label_features(self, example, tokens):
label = example.label
if not isinstance(label, list):
label = [label]
label_tokens = [self.tokenizer.tokenize(l) for l in label]
label_id = [0] * self.max_seq_length
for label in label_tokens:
length = len(label)
for i in range(len(tokens) - length + 1):
if tokens[i:i + length] == label:
start = i
end = i + length
label_id[start:end] = [1] * (end - start)
break
else:
raise ValueError("can't find phrase in text.")
return label_id
class ExtractPhrasesProcessor(SequentialTagProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self):
"""See base class."""
return self._create_examples( "train")
def get_dev_examples(self):
"""See base class."""
return self._create_examples( "eval")
def get_test_examples(self):
"""See base class."""
return self._create_examples("test")
def _create_examples(self, set_type):
"""Creates examples for the training and dev sets."""
examples = []
with open(os.path.join(self.data_dir, '{}_xs_converted_tag.txt'.format(set_type)), 'r', encoding='utf8') as f:
txts = f.read().splitlines()
with open(os.path.join(self.data_dir, '{}_ys_converted_tag.txt'.format(set_type)), 'r', encoding='utf8') as f:
labels = f.read().splitlines()
for (i, n) in enumerate(zip(txts, labels)):
txt, label = n
guid = "%s-%s" % (set_type, i)
text_a,text_b=txt.split(' | ')
label = label.split(' | ')[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ExtractPhrasesFromSegmentedInputProcessor(ExtractPhrasesProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def _create_examples(self,set_type):
"""Creates examples for the training and dev sets."""
examples = []
with open(os.path.join(self.data_dir, '{}_xs_converted_tag.txt'.format(set_type)), 'r', encoding='utf8') as f:
txts = f.read().splitlines()
with open(os.path.join(self.data_dir, '{}_ys_converted_tag.txt'.format(set_type)), 'r', encoding='utf8') as f:
labels = f.read().splitlines()
for (i, n) in enumerate(zip(txts, labels)):
txt, label = n
guid = "%s-%s" % (set_type, i)
text_a,text_b=txt.split(' | ')
text_a=' '.join(list(text_a))
label = label.split(' | ')[0]
label=' '.join(list(label))
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ExtractPhrasesTagPrefixedProcessor(ExtractPhrasesFromSegmentedInputProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def _create_examples(self, set_type):
"""Creates examples for the training and dev sets."""
examples = []
with open(os.path.join(self.data_dir, '{}_xs_prefix.txt'.format(set_type)), 'r', encoding='utf8') as f:
txts = f.read().splitlines()
with open(os.path.join(self.data_dir, '{}_ys_converted_tag.txt'.format(set_type)), 'r', encoding='utf8') as f:
labels = f.read().splitlines()
for (i, n) in enumerate(zip(txts, labels)):
txt, label = n
guid = "%s-%s" % (set_type, i)
text_a,text_b=txt.split(' | ')
text_b=' '.join(list(text_b))
label = label.split(' | ')[0]
label=' '.join(list(label))
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ExtractAllPhrasesProcessor(ExtractPhrasesFromSegmentedInputProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def _create_examples(self, set_type):
"""Creates examples for the training and dev sets."""
examples = []
with open(os.path.join(self.data_dir, '{}_xs_multitags'.format(set_type)), 'r', encoding='utf8') as f:
txts = f.read().splitlines()
with open(os.path.join(self.data_dir, '{}_ys_multitags'.format(set_type)), 'r', encoding='utf8') as f:
labels = f.read().splitlines()
for (i, n) in enumerate(zip(txts, labels)):
txt, label = n
guid = "%s-%s" % (set_type, i)
text_a=txt.split(' | ')[0]
text_a = ' '.join(list(text_a))
label = label.split(' | ')
label = [' '.join(list(txt)) for txt in label]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
| StarcoderdataPython |
166479 | from django.contrib.auth.models import User
from django.db.models import Q
from django.test import TestCase, Client, RequestFactory
from ..models import Report
# Create your tests here.
class SearchTest(TestCase):
def setUp(self):
self.client = Client()
self.client.post('/report/user_register/',
{'username': 'john',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>'})
self.client.login(username='john', password='<PASSWORD>')
request_factory = RequestFactory()
self.request = request_factory.get('/report/mypage/')
self.request.user = User.objects.get(pk=1)
self.client.post('/report/report_entries/',
{'report_author': self.request.user,
'report_title': 'test title',
'report_content': 'test'
})
self.client.post('/report/report_entries/',
{'report_author': self.request.user,
'report_title': 'search test',
'report_content': 'search'
})
def test_search_one_word(self):
query_search_word = 'search'
search_words = query_search_word.split()
search_reports = []
for i in range(len(search_words)):
search_reports += Report.objects.filter(Q(report_content__contains=search_words[i]))
count = len(search_reports)
self.assertEqual(count, 1)
def test_search_many_words(self):
query_search_word = 'test search'
search_words = query_search_word.split()
search_reports = []
for i in range(len(search_words)):
search_reports += Report.objects.filter(Q(report_content__contains=search_words[i]))
count = len(search_reports)
self.assertEqual(count, 2)
def test_search_no_hit_word(self):
query_search_word = 'python'
search_words = query_search_word.split()
search_reports = []
for i in range(len(search_words)):
search_reports += Report.objects.filter(Q(report_content__contains=search_words[i]))
count = len(search_reports)
self.assertEqual(count, 0)
class SearchReportsTest(TestCase):
def setUp(self):
self.client = Client()
self.client.post('/report/user_register/',
{'username': 'john',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>'})
self.client.login(username='john', password='<PASSWORD>')
request_factory = RequestFactory()
self.request = request_factory.get('/report/mypage/')
self.request.user = User.objects.get(pk=1)
self.client.post('/report/report_entries/',
{'report_author': self.request.user,
'report_title': 'test title',
'report_content': 'test'
})
self.client.post('/report/report_entries/',
{'report_author': self.request.user,
'report_title': 'search test',
'report_content': 'search'
})
self.client.post('/report/report_entries/',
{'report_author': self.request.user,
'report_title': 'search ',
'report_content': 'This is search '
})
"""
status_code = 302: created new score.
status_code = 200: not create new score.
"""
def test_search(self):
query_search_word = 'search'
search_words = query_search_word.split()
search_reports = []
for i in range(len(search_words)):
search_reports += Report.objects.filter(Q(report_content__contains=search_words[i]))
count = len(search_reports)
self.assertEqual(count, 2)
| StarcoderdataPython |
4826409 | <reponame>kazuki-cho/data_maintenance_tools
import os
import sys
import glob
import pickle
import numpy as np
import scipy.io as sio
COVAREP_DIR = 'segmented_covarep/'
OUTPUT = 'cmumosi_audio_noalign.pkl'
def main():
data = {}
mat_files = glob.glob(COVAREP_DIR + '*.mat')
for mat_file in mat_files:
segment_name = os.path.splitext(os.path.basename(mat_file))[0]
mat_content = sio.loadmat(mat_file)
intervals = []
data[segment_name] = {}
for i in range(len(mat_content['features'])):
start = float(i) / 100
end = float(i + 1) / 100
intervals = np.array([start, end])
data[segment_name]['features'] = mat_content['features']
data[segment_name]['intervals'] = intervals
with open(OUTPUT, mode='wb') as f:
pickle.dump(data, f)
if __name__ == '__main__':
main() | StarcoderdataPython |
1606198 | """
Finetune model.
"""
import argparse
from lib.LayoutLM import LayoutLM
from lib.LayoutLMv2 import LayoutLMv2
from lib.path_utils import existing_directory
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument(
"model",
choices=[
"finetuned_lmv1",
"finetuned_lmv2",
],
type=str,
help="The type of model to finetune on",
)
parser.add_argument(
"task",
choices=[
"process_image",
"get_encodings",
"finetune",
],
type=str,
help="Specific task - preprocessing or training",
)
parser.add_argument(
"-i",
"--images-dir",
type=existing_directory,
help="Path to directory containing raw images.",
default = "datasets/"
)
parser.add_argument(
"-e",
"--embeddings-dir",
type=existing_directory,
help="Path to directory where hidden states will be pickled and stored.",
default="embeddings/",
)
parser.add_argument(
"-en",
"--encodings-dir",
type=existing_directory,
help="Path to directory where intermediate tokenized encodings will be pickled and stored.",
default="data/encodings/",
)
parser.add_argument(
"-m",
"--models-dir",
type=existing_directory,
help="Path to directory where models will be stored.",
default="models/",
)
parser.add_argument(
"-n",
"--epochs",
type=int,
help="Number of training epochs.",
default=5,
)
return parser
def main(args: argparse.Namespace):
input_dir = args.images_dir
output_dir = args.embeddings_dir
if args.model == "finetuned_lmv1":
#Instatiate instance
#i1 = LayoutLM()
#Process images and save pickled data (lengthy)
#i1.process_images(in_directory, out_directory)
#outpath = '/Users/bryanchia/Desktop/stanford/classes/cs/cs224n/project/data/test_enc'
#Get encodings (Fast)
#i1.get_encodings(outpath, finetune = True, directory = out_directory)
#Finetune model (~25 minutes per epoch for 40k images)
#model_save_path = '/Users/bryanchia/Desktop/stanford/classes/cs/cs224n/doc_clustering/models/'
#i1.fine_tune(outpath, model_save_path, num_train_epochs = 1)
pass
elif args.model == "finetuned_lmv2":
i2 = LayoutLMv2()
if args.task == "get_encodings":
outpath = args.encodings_dir / "layoutlmv2_ft_encodings.pkl"
dict_path = args.encodings_dir / "layoutlmv2_ft_labels_dict.json"
hidden_state = i2.get_outputs(input_dir, labels = True, lhs = False, outpath = outpath, dict_path = dict_path, file_type = "tif")
print(hidden_state)
elif args.task == "finetune":
model_save_path = args.models_dir
input_dir = args.encodings_dir / "layoutlmv2_ft_encodings.pkl"
labels_dir = args.encodings_dir / "layoutlmv2_ft_labels_dict.json"
i2.fine_tune(input_dir, labels_dir, model_save_path, num_train_epochs = args.epochs)
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
main(args)
| StarcoderdataPython |
164222 | """
Contains possible interactions with the Chado Features
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import csv
import hashlib
import operator
import re
import time
from functools import reduce
from BCBio import GFF
from Bio import Seq, SeqIO
from Bio.SeqFeature import FeatureLocation, SeqFeature
import chado
from chado.client import Client
from chakin.io import warn
from future import standard_library
standard_library.install_aliases()
class FeatureClient(Client):
"""
Access to the chado features
"""
def __init__(self, engine, metadata, session, ci):
self._reset_cache()
Client.__init__(self, engine, metadata, session, ci)
def get_features(self, organism_id=None, analysis_id=None, name=None, uniquename=None):
"""
Get all or some features
:type organism_id: int
:param organism_id: organism_id filter
:type analysis_id: int
:param analysis_id: analysis_id filter
:type name: str
:param name: name filter
:type uniquename: str
:param uniquename: uniquename filter
:rtype: list of dict
:return: Features information
"""
res = self.session.query(self.model.feature, self.model.analysisfeature.analysis_id)
if organism_id:
res = res.filter_by(organism_id=organism_id)
if name:
res = res.filter_by(name=name)
if uniquename:
res = res.filter_by(uniquename=uniquename)
res = res.join(self.model.analysisfeature, self.model.analysisfeature.feature_id == self.model.feature.feature_id)
if analysis_id:
res = res.filter(self.model.analysisfeature.analysis_id == analysis_id)
data = []
for feat in res:
data.append({
'feature_id': feat.feature.feature_id,
'dbxref_id': feat.feature.dbxref_id,
'organism_id': feat.feature.organism_id,
'analysis_id': feat.analysis_id,
'name': feat.feature.name,
'uniquename': feat.feature.uniquename,
'residues': feat.feature.residues,
'seqlen': feat.feature.seqlen,
'md5checksum': feat.feature.md5checksum,
'type_id': feat.feature.type_id,
'is_analysis': feat.feature.is_analysis,
'is_obsolete': feat.feature.is_obsolete,
'timeaccessioned': str(feat.feature.timeaccessioned),
'timelastmodified': str(feat.feature.timelastmodified),
})
return data
def get_feature_cvterms(self, feature_id):
"""
Get cvterms associated with a feature
:type feature_id: int
:param feature_id: Id of the feature
:rtype: list
:return: Feature cvterms
"""
res = self.session.query(self.model.feature_cvterm, self.model.cvterm, self.model.cv, self.model.db, self.model.dbxref) \
.join(self.model.cvterm, self.model.feature_cvterm.cvterm_id == self.model.cvterm.cvterm_id) \
.join(self.model.cv, self.model.cvterm.cv_id == self.model.cv.cv_id) \
.join(self.model.dbxref, self.model.cvterm.dbxref_id == self.model.dbxref.dbxref_id) \
.join(self.model.db, self.model.dbxref.db_id == self.model.db.db_id) \
.filter(self.model.feature_cvterm.feature_id == feature_id)
data = []
for term in res:
data.append({
'cvterm_id': term.cvterm.cvterm_id,
'cvterm_name': term.cvterm.name,
'cvterm_definition': term.cvterm.definition,
'rank': term.feature_cvterm.rank,
'cv_name': term.cv.name,
'cv_definition': term.cv.definition,
'db_name': term.db.name,
'db_description': term.db.description,
'dbxref_accession': term.dbxref.accession,
'dbxref_description': term.dbxref.description,
})
return data
def get_feature_analyses(self, feature_id):
"""
Get analyses associated with a feature
:type feature_id: int
:param feature_id: Id of the feature
:rtype: list
:return: Feature analyses
"""
res = self.session.query(self.model.analysisfeature, self.model.analysis, self.model.analysisfeatureprop, self.model.cvterm, self.model.cv, self.model.db, self.model.dbxref) \
.join(self.model.analysis, self.model.analysisfeature.analysis_id == self.model.analysis.analysis_id) \
.outerjoin(self.model.analysisfeatureprop, self.model.analysisfeature.analysisfeature_id == self.model.analysisfeatureprop.analysisfeature_id) \
.outerjoin(self.model.cvterm, self.model.analysisfeatureprop.type_id == self.model.cvterm.cvterm_id) \
.outerjoin(self.model.cv, self.model.cvterm.cv_id == self.model.cv.cv_id) \
.outerjoin(self.model.dbxref, self.model.cvterm.dbxref_id == self.model.dbxref.dbxref_id) \
.outerjoin(self.model.db, self.model.dbxref.db_id == self.model.db.db_id) \
.filter(self.model.analysisfeature.feature_id == feature_id)
data = {}
for an in res:
if an.analysis.analysis_id in data:
data[an.analysis.analysis_id]['analysisfeatureprop'].append({
'value': an.analysisfeatureprop.value,
'rank': an.analysisfeatureprop.rank,
'type_id': an.analysisfeatureprop.type_id,
'cvterm_name': an.cvterm.name,
'cvterm_definition': an.cvterm.definition,
'cv_name': an.cv.name,
'cv_definition': an.cv.definition,
'db_name': an.db.name,
'db_description': an.db.description,
'dbxref_accession': an.dbxref.accession,
'dbxref_description': an.dbxref.description,
})
else:
data[an.analysis.analysis_id] = {
'analysis_id': an.analysis.analysis_id,
'rawscore': an.analysisfeature.rawscore,
'normscore': an.analysisfeature.normscore,
'significance': an.analysisfeature.significance,
'identity': an.analysisfeature.identity,
}
data[an.analysis.analysis_id]['analysisfeatureprop'] = []
if an.analysisfeatureprop:
data[an.analysis.analysis_id]['analysisfeatureprop'].append({
'value': an.analysisfeatureprop.value,
'rank': an.analysisfeatureprop.rank,
'type_id': an.analysisfeatureprop.type_id,
'cvterm_name': an.cvterm.name,
'cvterm_definition': an.cvterm.definition,
'cv_name': an.cv.name,
'cv_definition': an.cv.definition,
'db_name': an.db.name,
'db_description': an.db.description,
'dbxref_accession': an.dbxref.accession,
'dbxref_description': an.dbxref.description,
})
return list(data.values())
def delete_features(self, organism_id=None, analysis_id=None, name=None, uniquename=None):
"""
Get all or some features
:type organism_id: int
:param organism_id: organism_id filter
:type analysis_id: int
:param analysis_id: analysis_id filter
:type name: str
:param name: name filter
:type uniquename: str
:param uniquename: uniquename filter
:rtype: list of dict
:return: Features information
"""
# check if the organism exists
res = self.session.query(self.model.feature)
if organism_id:
res = res.filter_by(organism_id=organism_id)
if name:
res = res.filter_by(name=name)
if uniquename:
res = res.filter_by(uniquename=uniquename)
if analysis_id:
res = res.filter(
self.model.analysisfeature.feature_id == self.model.feature.feature_id,
self.model.analysisfeature.analysis_id == analysis_id
)
res = res.delete(synchronize_session=False)
self.session.commit()
self._reset_cache()
return res
def load_fasta(self, fasta, organism_id, sequence_type="contig", analysis_id=None,
re_name=None, re_uniquename=None, match_on_name=False, update=False, db=None,
re_db_accession=None, rel_type=None, re_parent=None, parent_type=None):
"""
Load features from a fasta file
:type fasta: str
:param fasta: Path to the Fasta file to load
:type organism_id: int
:param organism_id: Organism ID
:type sequence_type: str
:param sequence_type: Sequence type
:type analysis_id: int
:param analysis_id: Analysis ID
:type re_name: str
:param re_name: Regular expression to extract the feature name from the fasta sequence id (first capturing group will be used).
:type re_uniquename: str
:param re_uniquename: Regular expression to extract the feature name from the fasta sequence id (first capturing group will be used).
:type match_on_name: bool
:param match_on_name: Match existing features using their name instead of their uniquename
:type update: bool
:param update: Update existing feature with new sequence instead of throwing an error
:type db: int
:param db: External database to cross reference to.
:type re_db_accession: str
:param re_db_accession: Regular expression to extract an external database accession from the fasta sequence id (first capturing group will be used).
:type rel_type: str
:param rel_type: Relation type to parent feature ('part_of' or 'derives_from').
:type re_parent: str
:param re_parent: Regular expression to extract parent uniquename from the fasta sequence id (first capturing group will be used).
:type parent_type: str
:param parent_type: Sequence type of the parent feature
:rtype: dict
:return: Number of inserted sequences
"""
if rel_type and rel_type not in ['part_of', 'derives_from']:
raise Exception('Unsupported parent relation type (--rel_type)')
if (db or re_db_accession) and not (db and re_db_accession):
raise Exception('--db and --re_db_accession should both be specified')
if (rel_type or re_parent or parent_type) and not (rel_type and re_parent and parent_type):
raise Exception('--rel_type, --re_parent and --rel_type should all be specified')
if analysis_id and len(self.ci.analysis.get_analyses(analysis_id=analysis_id)) != 1:
raise Exception("Could not find analysis with id '{}'".format(analysis_id))
if len(self.ci.organism.get_organisms(organism_id=organism_id)) != 1:
raise Exception("Could not find organism with id '{}'".format(organism_id))
seqterm = self.ci.get_cvterm_id(sequence_type, 'sequence')
# Cache all possibly existing features
existing = self.session.query(self.model.feature.feature_id, self.model.feature.name, self.model.feature.uniquename) \
.filter_by(organism_id=organism_id, type_id=seqterm) \
.all()
if match_on_name:
existing = {ex.name: ex.feature_id for ex in existing}
else:
existing = {ex.uniquename: ex.feature_id for ex in existing}
# Cache all possible parent features
if parent_type:
parentterm = self.ci.get_cvterm_id(parent_type, 'sequence')
relterm = self.ci.get_cvterm_id(rel_type, 'sequence')
existing_parent = self.session.query(self.model.feature.feature_id, self.model.feature.uniquename) \
.filter_by(organism_id=organism_id, type_id=parentterm) \
.all()
existing_parent = {ex.uniquename: ex.feature_id for ex in existing_parent}
# Cache all existing dbxref
existing_dbxref = self.session.query(self.model.dbxref.accession, self.model.dbxref.dbxref_id) \
.filter_by(db_id=db) \
.all()
existing_dbxref = {ex.accession: ex.dbxref_id for ex in existing_dbxref}
count_ins = 0
count_up = 0
for seq_record in SeqIO.parse(fasta, "fasta"):
# Prepare the dbxref stuff if needed
dbxref_id = None
if db and re_db_accession:
accession = seq_record.id
re_res = re.search(re_db_accession, accession)
if re_res:
accession = re_res.group(1)
if accession in existing_dbxref:
dbxref_id = existing_dbxref[accession]
else:
dbx = self.model.dbxref()
dbx.db_id = db
dbx.accession = accession
self.session.add(dbx)
self.session.flush()
self.session.refresh(dbx)
dbxref_id = dbx.dbxref_id
existing_dbxref[accession] = dbx.dbxref_id
# Compute md5 checksum
md5 = hashlib.md5()
md5.update(str(seq_record.seq).encode('utf-8'))
md5 = md5.hexdigest()
# Determine identifiers
name_ok = seq_record.id
uname_ok = seq_record.id
if re_name:
re_res = re.search(re_name, name_ok)
if re_res:
name_ok = re_res.group(1)
if re_uniquename:
re_res = re.search(re_uniquename, uname_ok)
if re_res:
uname_ok = re_res.group(1)
identifier = name_ok if match_on_name else uname_ok
# Insert or update
if identifier not in existing:
feat = self.model.feature()
if dbxref_id:
feat.dbxref_id = dbxref_id
feat.organism_id = organism_id
feat.name = name_ok
feat.uniquename = uname_ok
feat.residues = str(seq_record.seq)
feat.seqlen = len(seq_record)
feat.md5checksum = md5
feat.type_id = seqterm
self.session.add(feat)
# Add link to analysis
if analysis_id:
afeat = self.model.analysisfeature()
afeat.feature = feat
afeat.analysis_id = analysis_id
self.session.add(afeat)
existing[identifier] = feat.feature_id
# Create relationship if needed
if parent_type:
# We won't touch relationship for updated seqs (cause I'm lazy + I don't want to break an already loaded feature)
parent_ok = seq_record.id
if re_parent:
re_res = re.search(re_parent, parent_ok)
if re_res:
parent_ok = re_res.group(1)
if parent_ok in existing_parent:
featr = self.model.feature_relationship()
featr.subject = feat
featr.object_id = existing_parent[parent_ok]
featr.type_id = relterm
self.session.add(featr)
else:
raise Exception("Could not find a parent feature with uniquename '{}' ('{}').".format(parent_ok, seq_record.id))
count_ins += 1
elif update:
self.session.query(self.model.feature). \
filter_by(feature_id=existing[identifier]). \
update({
'residues': str(seq_record.seq),
'seqlen': len(seq_record),
'md5checksum': md5
})
count_up += 1
else:
raise Exception("Found an existing feature with '{}': '{}' ('{}'). Use --update to update its sequence.".format('name' if match_on_name else 'uniquename', identifier, seq_record.id))
self.session.commit()
self._reset_cache()
return {'inserted': count_ins, 'updated': count_up}
def load_featureprops(self, tab_file, analysis_id, organism_id, prop_type, feature_type=None, match_on_name=False):
"""
Load feature properties from a tabular file (Column1: feature name or uniquename, Column2: property value)
:type tab_file: str
:param tab_file: Path to the tabular file to load
:type analysis_id: int
:param analysis_id: Analysis ID
:type organism_id: int
:param organism_id: Organism ID
:type prop_type: str
:param prop_type: Type of the feature property (cvterm will be created if it doesn't exist)
:type feature_type: str
:param feature_type: Type of the target features in sequence ontology (will speed up loading if specified)
:type match_on_name: bool
:param match_on_name: Match features using their name instead of their uniquename
:rtype: dict
:return: Number of inserted featureprop
"""
if len(self.ci.analysis.get_analyses(analysis_id=analysis_id)) != 1:
raise Exception("Could not find analysis with id '{}'".format(analysis_id))
if len(self.ci.organism.get_organisms(organism_id=organism_id)) != 1:
raise Exception("Could not find organism with id '{}'".format(organism_id))
# Cache all existing features
existing = self.session.query(self.model.feature.feature_id, self.model.feature.name, self.model.feature.uniquename) \
.filter_by(organism_id=organism_id)
if feature_type:
seqterm = self.ci.get_cvterm_id(feature_type, 'sequence')
existing = existing.filter(self.model.feature.type_id == seqterm)
existing = existing.all()
if match_on_name:
existing = {ex.name: ex.feature_id for ex in existing}
else:
existing = {ex.uniquename: ex.feature_id for ex in existing}
count_ins = 0
with open(tab_file, 'r') as tsvin:
tsvin = csv.reader(tsvin, delimiter=str("\t"))
for row in tsvin:
if len(row) != 2:
raise Exception("Malformed input tabular file '{}' (should be a 2-column tab delimited file)".format(tab_file))
# Insert or update
if row[0] not in existing:
matchon = 'name' if match_on_name else 'uniquename'
raise Exception("Could not find a feature with {} '{}'".format(matchon, row[0]))
if row[1]:
self._add_featureprop(organism_id, existing[row[0]], prop_type, row[1])
count_ins += 1
self.session.commit()
return {'inserted': count_ins}
def load_gff(self, gff, analysis_id, organism_id, landmark_type=None, re_protein=None, re_protein_capture="^(.*?)$", fasta=None, no_seq_compute=False, quiet=False, add_only=False, protein_id_attr=None):
"""
Load features from a gff file
:type gff: str
:param gff: Path to the Fasta file to load
:type analysis_id: int
:param analysis_id: Analysis ID
:type organism_id: int
:param organism_id: Organism ID
:type landmark_type: str
:param landmark_type: Type of the landmarks (will speed up loading if provided, e.g. contig, should be a term of the Sequence ontology)
:type re_protein: str
:param re_protein: Replacement string for the protein name using capturing groups defined by --re_protein_capture
:type re_protein_capture: str
:param re_protein_capture: Regular expression to capture groups in mRNA name to use in --re_protein (e.g. "^(.*?)-R([A-Z]+)$", default="^(.*?)$")
:type protein_id_attr: str
:param protein_id_attr: Attribute containing the protein uniquename. It is searched at the mRNA level, and if not found at CDS level.
:type fasta: str
:param fasta: Path to a Fasta containing sequences for some features. When creating a feature, if its sequence is in this fasta file it will be loaded. Otherwise for mRNA and polypeptides it will be computed from the genome sequence (if available), otherwise it will be left empty.
:type no_seq_compute: bool
:param no_seq_compute: Disable the computation of mRNA and polypeptides sequences based on genome sequence and positions.
:type quiet: bool
:param quiet: Hide progress information
:type add_only: bool
:param add_only: Use this flag if you're not updating existing features, but just adding new features to the selected analysis and organism. It will speedup loading, and reduce memory usage, but might produce errors in case of already existing feature.
:rtype: None
:return: None
"""
if len(self.ci.analysis.get_analyses(analysis_id=analysis_id)) != 1:
raise Exception("Could not find analysis with id '{}'".format(analysis_id))
if len(self.ci.organism.get_organisms(organism_id=organism_id)) != 1:
raise Exception("Could not find organism with id '{}'".format(organism_id))
if protein_id_attr and re_protein:
raise Exception("--protein_id_attr and --re_protein cannot be used at the same time.")
self.cache_existing = not add_only
# Get possible landmarks
landmarks = self.session.query(self.model.feature.name, self.model.feature.uniquename, self.model.feature.feature_id, self.model.feature.type_id, self.model.feature.organism_id) \
.filter_by(organism_id=organism_id)
if landmark_type:
# Filter by landmark type if provided (else we look for all features)
landmark_type_id = self.ci.get_cvterm_id(landmark_type, 'sequence')
landmarks = landmarks.filter(self.model.feature.type_id == landmark_type_id)
self._landmark_cache = {}
for lm in landmarks:
if lm.name not in self._landmark_cache:
self._landmark_cache[lm.name] = []
if lm.feature_id not in self._landmark_cache[lm.name]:
self._landmark_cache[lm.name].append(lm.feature_id) # There may be multiple landmarks with the same name
# Also look for uniquename
if lm.uniquename not in self._landmark_cache:
self._landmark_cache[lm.uniquename] = []
if lm.feature_id not in self._landmark_cache[lm.uniquename]:
self._landmark_cache[lm.uniquename].append(lm.feature_id)
examiner = GFF.GFFExaminer()
gff_handle = open(gff)
gff_limits = examiner.available_limits(gff_handle)
gff_handle.close()
# Check that we have all the cvterms in the db
self._blacklisted_cvterms = []
for feat_type in gff_limits['gff_type']:
type_to_check = feat_type[0]
# Be tolerant for proteins (shameless hard coding)
if type_to_check == 'protein':
type_to_check = 'polypeptide'
# Will raise an exception if not present + keep value in cache
try:
self.ci.get_cvterm_id(type_to_check, 'sequence', True)
except chado.RecordNotFoundError:
if type_to_check not in self._blacklisted_cvterms:
warn("WARNING: will skip features of unknown type: %s", type_to_check)
self._blacklisted_cvterms.append(type_to_check)
# Read optional fasta file
self._fasta_sequence_cache = {}
if fasta:
for record in SeqIO.parse(fasta, "fasta"):
self._fasta_sequence_cache[record.id] = str(record.seq)
# Check that all landmarks are there
for seq_id in gff_limits['gff_id']:
seq_id = seq_id[0]
if seq_id not in self._landmark_cache:
if landmark_type:
# Landmark does not exist yet, but we know how to create it
lm = SeqFeature(FeatureLocation(0, 1), type=landmark_type, qualifiers={'ID': [seq_id], 'Name': [seq_id]})
if seq_id in self._fasta_sequence_cache:
added_feat = self._add_feature_with_attr(None, lm, analysis_id, organism_id, have_loc=False, residues=self._fasta_sequence_cache[seq_id])
else:
added_feat = self._add_feature_with_attr(None, lm, analysis_id, organism_id, have_loc=False)
self._landmark_cache[seq_id] = [added_feat['feature_id']]
else:
raise Exception("Could not find landmark named '{}', add --landmark_type to create it".format(seq_id))
elif len(self._landmark_cache[seq_id]) > 1:
raise Exception("Found {} landmarks with same name '{}'".format(len(self._landmark_cache[seq_id]), seq_id))
count_ins = 0
for rec in GFF.parse(gff):
# Preload landmark seq to compute some seqs on it
# We compare to ????... as the gff parser will populate rec.seq with a fake sequence based on the size from "sequence-region" header
if not no_seq_compute:
if rec.id in self._fasta_sequence_cache:
rec.seq = Seq.Seq(self._fasta_sequence_cache[rec.id])
del self._fasta_sequence_cache[rec.id] # Save a little memory
elif len(rec.seq) == 0 or str(rec.seq)[0:10] == "??????????":
seq_res = self.session.query(self.model.feature.residues) \
.filter(self.model.feature.uniquename == rec.id)
if landmark_type:
seq_res = seq_res.filter(self.model.feature.type_id == landmark_type_id)
seq_res = seq_res.all()
if len(seq_res) == 1 and seq_res[0].residues:
rec.seq = Seq.Seq(seq_res[0].residues)
# Set a custom attr to store the chado feature_id
rec._chado_feature_id = self._landmark_cache[rec.id][0]
if not quiet:
print("Loading features on {}".format(rec.id))
for f in rec.features:
self._load_gff_feature_with_children(rec, f, analysis_id, organism_id, re_protein_capture, re_protein, protein_id_attr, no_seq_compute=no_seq_compute)
count_ins += 1
if not quiet:
print("Inserted feature #{}".format(count_ins))
self._update_rel_ranks()
self.session.commit()
self._reset_cache()
return {'inserted': count_ins}
def _load_gff_feature_with_children(self, rec, f, analysis_id, organism_id, re_protein_capture, re_protein, protein_id_attr, parent=None, no_seq_compute=False):
# Be tolerant for proteins (shameless hard coding)
if f.type == 'protein':
f.type = 'polypeptide'
if f.type in self._blacklisted_cvterms:
if 'ID' in f.qualifiers and len(f.qualifiers['ID']) > 1:
warn("WARNING: skipping feature %s of unknown type %s" % (f.qualifiers['ID'][0], f.type))
else:
warn("WARNING: skipping feature of unknown type %s" % (f.type))
return
full_transcript_seq = None
if f.type == 'mRNA':
seq_exons = []
seq_cds = []
min_cds = None
max_cds = None
detected_protein_id = None
if protein_id_attr:
if protein_id_attr in f.qualifiers and f.qualifiers[protein_id_attr]:
detected_protein_id = f.qualifiers[protein_id_attr][0]
# To compute mRNA and polypeptide
for subrna in f.sub_features:
if subrna.type == 'CDS':
seq_cds.append(rec.seq[subrna.location.nofuzzy_start:subrna.location.nofuzzy_end])
if min_cds is None or subrna.location.start < min_cds:
min_cds = subrna.location.start
if max_cds is None or subrna.location.end > max_cds:
max_cds = subrna.location.end
if protein_id_attr and not detected_protein_id:
if protein_id_attr in subrna.qualifiers and subrna.qualifiers[protein_id_attr]:
detected_protein_id = subrna.qualifiers[protein_id_attr][0]
if subrna.type == 'exon':
seq_exons.append(rec.seq[subrna.location.nofuzzy_start:subrna.location.nofuzzy_end])
if not no_seq_compute and len(rec.seq) > 0 and str(rec.seq)[0:10] != "??????????":
if seq_exons:
full_transcript_seq = reduce(operator.add, seq_exons)
elif seq_cds:
full_transcript_seq = reduce(operator.add, seq_cds)
if f.strand == -1:
full_transcript_seq = full_transcript_seq.reverse_complement()
if full_transcript_seq is not None:
added_feat = self._add_feature_with_attr(rec, f, analysis_id, organism_id, residues=str(full_transcript_seq), parent=parent)
else:
added_feat = self._add_feature_with_attr(rec, f, analysis_id, organism_id, parent=parent)
mrna_has_polypeptide = False
for subf in f.sub_features:
self._load_gff_feature_with_children(rec, subf, analysis_id, organism_id, re_protein_capture, re_protein, protein_id_attr, parent=added_feat['feature_id'], no_seq_compute=no_seq_compute)
if f.type == 'mRNA':
mrna_has_polypeptide = mrna_has_polypeptide or (subf.type == 'polypeptide')
# Create a polypeptide feature
if f.type == 'mRNA' and not mrna_has_polypeptide and min_cds is not None and max_cds is not None:
if re_protein:
pep_uname = re.sub(re_protein_capture, re_protein, added_feat['uniquename'])
elif detected_protein_id:
pep_uname = detected_protein_id
else:
pep_uname = added_feat['uniquename'] + '-protein'
polypeptide = SeqFeature(FeatureLocation(min_cds, max_cds), type="polypeptide", strand=f.location.strand, qualifiers={'ID': [pep_uname], 'Name': [added_feat['name']]})
if 'source' in subrna.qualifiers:
polypeptide.qualifiers['source'] = subrna.qualifiers['source']
protein_seq = None
if not no_seq_compute and len(rec.seq) > 0 and str(rec.seq)[0:10] != "??????????":
full_cds_seq = reduce(operator.add, seq_cds)
if f.strand == -1:
full_cds_seq = full_cds_seq.reverse_complement()
protein_seq = str(full_cds_seq.translate())
self._add_feature_with_attr(rec, polypeptide, analysis_id, organism_id, residues=protein_seq, parent=added_feat['feature_id'], parent_rel='derives_from')
def _add_feature_with_attr(self, rec, f, analysis_id, organism_id, residues=None, dbxref_id=None, is_analysis=None, is_obsolete=None, parent=None, parent_rel='part_of', have_loc=True):
# Prepare name and uniquename
if 'ID' in f.qualifiers:
f_uname = f.qualifiers['ID'][0]
elif 'Name' in f.qualifiers:
f_uname = f.qualifiers['Name'][0]
elif have_loc:
f_uname = "{}-{}-{}:{}..{}".format(time.time(), f.type, rec.id, f.location.start, f.location.end)
else:
f_uname = "{}-{}".format(time.time(), f.type)
if 'Name' in f.qualifiers:
f_name = f.qualifiers['Name'][0]
else:
f_name = f_uname
feat_term = self.ci.get_cvterm_id(f.type, 'sequence', True)
# Fill the existing feature cache if not already done
self._init_feature_cache(organism_id)
self._init_featureloc_cache(organism_id)
# See if we have a sequence to load from fasta file
if f_uname in self._fasta_sequence_cache:
residues = self._fasta_sequence_cache[f_uname]
feat_uid = (f_uname, organism_id, feat_term)
if feat_uid in self._feature_cache:
feat_id = self._feature_cache[feat_uid]['feature_id']
rank = 0
if feat_id in self._featureloc_cache:
rank = len(self._featureloc_cache[feat_id])
else:
md5checksum = None
seqlen = None
if residues is not None:
seqlen = len(residues)
# Compute md5 checksum
md5checksum = hashlib.md5()
md5checksum.update(str(residues).encode('utf-8'))
md5checksum = md5checksum.hexdigest()
else:
# We need an md5 even if empty...
md5checksum = hashlib.md5()
md5checksum.update("".encode('utf-8'))
md5checksum = md5checksum.hexdigest()
score = None
if 'score' in f.qualifiers and f.qualifiers['score']:
score = f.qualifiers['score'][0]
feat = self._add_feature(analysis_id, organism_id, f_uname, feat_term, name=f_name, residues=residues, seqlen=seqlen, md5checksum=md5checksum, dbxref_id=dbxref_id, is_analysis=is_analysis, is_obsolete=is_obsolete, score=score)
feat_id = feat.feature_id
rank = 0
self._feature_cache[feat_uid] = {'feature_id': feat_id, 'name': feat.name, 'uniquename': feat.uniquename}
if have_loc:
self._add_featureloc(rec._chado_feature_id, f, feat_id, rank)
self._load_feat_alias(f, feat_id)
self._load_feat_dbxref(f, feat_id)
if 'Gap' in f.qualifiers:
for gap in f.qualifiers['Gap']:
self._add_featureprop(organism_id, feat_id, 'Gap', gap)
if 'Note' in f.qualifiers:
for gap in f.qualifiers['Note']:
self._add_featureprop(organism_id, feat_id, 'Note', gap)
special_qualifiers = [
'ID',
'Name',
'Alias',
'Parent',
'Target',
'Derives_from',
'Dbxref',
'Ontology_term',
'Is_circular',
'target_organism',
'target_type',
'organism',
'Gap',
'Note',
'source',
'phase',
'score'
]
for qual in f.qualifiers:
if qual not in special_qualifiers:
for gap in f.qualifiers[qual]:
self._add_featureprop(organism_id, feat_id, qual, gap)
if parent:
self._set_feature_parent(feat_id, parent, parent_rel)
if 'Target' in f.qualifiers:
for target in f.qualifiers['Target']:
self._add_target(feat_id, target)
if 'Derives_from' in f.qualifiers:
for parent in f.qualifiers['Derives_from']:
for x in self._feature_cache:
if x[0] == parent:
parent = self._feature_cache[x]['feature_id']
self._set_feature_parent(feat_id, parent, 'derives_from')
break
return {'feature_id': feat_id, 'name': f_name, 'uniquename': f_uname}
def _add_featureloc(self, src, f, feat, rank=0):
phase = None
if 'phase' in f.qualifiers:
phase = f.qualifiers['phase'][0]
self._do_add_featureloc(src, feat, rank, f.location.start, f.location.end, f.location.strand, phase)
def _do_add_featureloc(self, src, feat, rank, start, end, strand, phase=None):
loc_hash = (src, start, end, strand)
if feat not in self._featureloc_cache or loc_hash not in self._featureloc_cache[feat]:
feat_loc = self.model.featureloc()
feat_loc.feature_id = feat
feat_loc.srcfeature_id = src
feat_loc.fmin = start
feat_loc.fmax = end
feat_loc.strand = strand
feat_loc.rank = rank
if phase is not None:
feat_loc.phase = phase
self.session.add(feat_loc)
self.session.flush()
self.session.refresh(feat_loc)
if feat not in self._featureloc_cache:
self._featureloc_cache[feat] = []
self._featureloc_cache[feat].append(loc_hash)
def _load_feat_alias(self, f, feat):
if 'Alias' in f.qualifiers:
exactterm = self.ci.get_cvterm_id('exact', 'synonym_type')
pub_id = self.ci.get_pub_id('null')
self._init_synonym_cache()
self._init_featsyn_cache()
for alias in f.qualifiers['Alias']:
if alias not in self._synonym_cache:
syn = self.model.synonym()
syn.name = alias
syn.type_id = exactterm
syn.synonym_sgml = ''
self.session.add(syn)
self.session.flush()
self.session.refresh(syn)
self._synonym_cache[alias] = syn.synonym_id
if feat not in self._featsyn_cache or self._synonym_cache[alias] not in self._featsyn_cache[feat]:
syn2feat = self.model.feature_synonym()
syn2feat.synonym_id = self._synonym_cache[alias]
syn2feat.feature_id = feat
syn2feat.pub_id = pub_id
self.session.add(syn2feat)
if feat not in self._featsyn_cache:
self._featsyn_cache[feat] = []
self._featsyn_cache[feat].append(self._synonym_cache[alias])
def _add_featureprop(self, organism_id, feat, prop, value):
try:
propterm = self.ci.get_cvterm_id(prop, 'feature_property')
except chado.RecordNotFoundError:
propterm = self.ci.create_cvterm(prop, 'feature_property', 'internal')
cache_hash = (feat, propterm)
rank = 0
self._init_featureprop_cache(organism_id)
if cache_hash in self._featureprop_cache:
if value in self._featureprop_cache[cache_hash]:
# Don't add two times the same featureprop
return
rank = len(self._featureprop_cache[cache_hash])
prop = self.model.featureprop()
prop.type_id = propterm
prop.feature_id = feat
prop.value = value
prop.rank = rank
self.session.add(prop)
if cache_hash not in self._featureprop_cache:
self._featureprop_cache[cache_hash] = []
self._featureprop_cache[cache_hash].append(value)
def _load_feat_dbxref(self, f, feat):
self._init_db_cache()
self._init_xref_cache()
self._init_featxref_cache()
self._init_featcvterm_cache()
if 'Dbxref' in f.qualifiers:
for xref in f.qualifiers['Dbxref']:
self._add_feat_dbxref(feat, xref)
if 'source' in f.qualifiers:
for source in f.qualifiers['source']:
self._add_feat_dbxref(feat, 'GFF_source:{}'.format(source))
if 'Ontology_term' in f.qualifiers:
for term in f.qualifiers['Ontology_term']:
self._add_feat_cvterm(feat, term)
def _add_feat_dbxref(self, feat, xref):
xref = xref.split(':')
if len(xref) != 2:
return
xref_db = xref[0]
xref_acc = xref[1]
if xref_db not in self._db_cache:
db = self.model.db()
db.name = xref_db
db.description = 'Added automatically by the GFF loader'
self.session.add(db)
self.session.flush()
self.session.refresh(db)
self._db_cache[xref_db] = db.db_id
if (xref_db, xref_acc) not in self._xref_cache:
dbxref = self.model.dbxref()
dbxref.db_id = self._db_cache[xref_db]
dbxref.accession = xref_acc
dbxref.version = ''
self.session.add(dbxref)
self.session.flush()
self.session.refresh(dbxref)
self._xref_cache[(xref_db, xref_acc)] = dbxref.dbxref_id
if feat not in self._featxref_cache or self._xref_cache[(xref_db, xref_acc)] not in self._featxref_cache[feat]:
dbx2feat = self.model.feature_dbxref()
dbx2feat.dbxref_id = self._xref_cache[(xref_db, xref_acc)]
dbx2feat.feature_id = feat
self.session.add(dbx2feat)
if feat not in self._featxref_cache:
self._featxref_cache[feat] = []
self._featxref_cache[feat].append(self._xref_cache[(xref_db, xref_acc)])
def _add_target(self, feat, target_str):
target = target_str.split(' ')
if len(target) != 3 and len(target) != 4:
warn('Malformed Target value: {}, skipping'.format(target_str))
return
strand = 1
if len(target) == 4:
if target[3] == '+':
strand = 1
elif target[3] == '-':
strand = -1
else:
warn('Malformed Target value (bad strand): {}, skipping'.format(target_str))
return
landmark_str = target[0]
landmark = None
start = int(target[1])
end = int(target[2])
rank = 0
if feat in self._featureloc_cache:
rank = len(self._featureloc_cache[feat])
for x in self._feature_cache:
if x[0] == landmark_str:
landmark = self._feature_cache[x]['feature_id']
break
if landmark is None:
warn('Malformed Target value (unknown target): {}, skipping'.format(target_str))
return
self._do_add_featureloc(landmark, feat, rank, start, end, strand)
def _set_feature_parent(self, feat, parent, parent_rel='part_of'):
partofterm = self.ci.get_cvterm_id('part_of', 'sequence', True)
reltypeterm = self.ci.get_cvterm_id(parent_rel, 'sequence', True)
self._init_featrel_cache()
if parent not in self._featrel_cache or (feat, reltypeterm) not in self._featrel_cache[parent]:
rel = self.model.feature_relationship()
rel.subject_id = feat
rel.object_id = parent
rel.type_id = reltypeterm
self.session.add(rel)
if parent not in self._featrel_cache:
self._featrel_cache[parent] = []
self._featrel_cache[parent].append((feat, reltypeterm))
if reltypeterm == partofterm:
if self._featured_dirty_rels is None:
self._featured_dirty_rels = []
if parent not in self._featured_dirty_rels:
self._featured_dirty_rels.append(parent)
def _update_rel_ranks(self):
"""
Updates the rank columns in feature_relationship table based on the order of child features
Only do this for part_of relationships
"""
if self._featured_dirty_rels:
partofterm = self.ci.get_cvterm_id('part_of', 'sequence', True)
for parent in self._featured_dirty_rels:
if parent in self._featureloc_cache:
parent_src = self._featureloc_cache[parent][0][0]
children = self._featrel_cache[parent]
children_locs = []
for x in children:
if x[1] == partofterm and x[0] in self._featureloc_cache:
for y in self._featureloc_cache[x[0]]:
if parent_src == y[0]:
children_locs.append((x[0], y[1]))
children_locs = sorted(children_locs, key=lambda x: x[1])
for rank, child in enumerate(children_locs):
self.session.query(self.model.feature_relationship) \
.filter(
self.model.feature_relationship.object_id == parent,
self.model.feature_relationship.subject_id == child[0],
self.model.feature_relationship.type_id == partofterm) \
.update({"rank": rank})
def _add_feature(self, analysis_id, organism_id, uniquename, type_id, name=None, residues=None, seqlen=None, md5checksum=None, dbxref_id=None, is_analysis=None, is_obsolete=None, score=None):
feat = self.model.feature()
feat.organism_id = organism_id
feat.uniquename = uniquename
feat.type_id = type_id
if name is not None:
feat.name = name
if residues is not None:
feat.residues = residues
if seqlen is not None:
feat.seqlen = seqlen
if md5checksum is not None:
feat.md5checksum = md5checksum
if is_analysis is not None:
feat.is_analysis = is_analysis
if is_obsolete is not None:
feat.is_obsolete = is_obsolete
self.session.add(feat)
self.session.flush()
self.session.refresh(feat)
afeat = self.model.analysisfeature()
afeat.feature = feat
afeat.analysis_id = analysis_id
if score:
afeat.significance = score
self.session.add(afeat)
return feat
def load_go(self, input, organism_id, analysis_id, query_type='polypeptide', match_on_name=False,
name_column=2, go_column=5, re_name=None, skip_missing=False):
"""
Load GO annotation from a tabular file
:type input: str
:param input: Path to the input tabular file to load
:type organism_id: int
:param organism_id: Organism ID
:type analysis_id: int
:param analysis_id: Analysis ID
:type query_type: str
:param query_type: The feature type (e.g. \'gene\', \'mRNA\', 'polypeptide', \'contig\') of the query. It must be a valid Sequence Ontology term.
:type match_on_name: bool
:param match_on_name: Match features using their name instead of their uniquename
:type name_column: int
:param name_column: Column containing the feature identifiers (2, 3, 10 or 11; default=2).
:type go_column: int
:param go_column: Column containing the GO id (default=5).
:type re_name: str
:param re_name: Regular expression to extract the feature name from the input file (first capturing group will be used).
:type skip_missing: bool
:param skip_missing: Skip lines with unknown features or GO id instead of aborting everything.
:rtype: dict
:return: Number of inserted GO terms
"""
raise Exception("This function has been renamed. Please use chado/chakin load load_go instead")
| StarcoderdataPython |
66984 | from django.views.generic import CreateView, TemplateView
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.forms import UserCreationForm
from django.views.generic import DetailView, ListView, FormView, TemplateView, View
from django.views.generic.edit import CreateView
from django.shortcuts import get_object_or_404
from django.utils.encoding import force_str
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from .tokens import account_verifier
from .models import User, Player, Team, Challenge, Category, Submition
from .mixins import IsNotInTeamMixin, IsInTeamMixin, IsVerifiedMixin
"""############ GENERAL VIEWS ############"""
class UserCreateView(SuccessMessageMixin, CreateView):
form_class = UserCreationForm
template_name = "user_new.html"
class IndexView(TemplateView):
template_name = "index.html"
"""############ PLAYER VIEWS ############"""
class PlayerDetailView(DetailView):
"""Get access to the details of a Player."""
model = Player
template_name = "player_profile.html"
def get_object(self):
return get_object_or_404(Player, user__username=self.kwargs["username"])
class PlayerProfileView(TemplateView):
"""Get access to the details of the CURRENT player."""
template_name = "player_detail.html"
class PlayerListView(ListView):
model = Player
template_name = "player_list.html"
class PlayerVerifyAccountView(View):
def get(self, request, uidb64, token):
try:
uid = force_str(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_verifier.check_token(user, token):
user.profile.email_confirmed = True
user.save()
messages.success(
request=request,
message=_("Your account has been verified successfuly!"),
)
return render(
request=request,
template_name="users/activation_success.html",
status=200,
)
else:
# invalid link
return render(
request=request, template_name="users/invalid_token.html", status=409
)
############ TEAM VIEWS ############
class TeamDetailView(DetailView):
model = Team
tempate_name = "team_details.html"
class TeamListView(ListView):
model = Team
template_name = "team_list.html"
class TeamCreateView(LoginRequiredMixin, IsVerifiedMixin, IsNotInTeamMixin, CreateView):
model = Team
template_name = "team_new.html"
class TeamJoinView(LoginRequiredMixin, IsVerifiedMixin, IsNotInTeamMixin, FormView):
pass
############ CHALLENGE VIEWS ############
class ChallengeListView(LoginRequiredMixin, IsVerifiedMixin, IsInTeamMixin):
# Althoug we mention here `Challenge`, we're meaning actually the `Category`.
model = Category
template_name = "challenge_list.html"
class ChallengeSubmitionView(LoginRequiredMixin, IsVerifiedMixin, IsInTeamMixin, View):
pass
| StarcoderdataPython |
3348583 | # pylint: skip-file
# -*- coding: utf-8 -*-
# Module: LibraryExporter
# Created on: 13.01.2017
import os
import re
import time
import threading
import xbmc
import xbmcgui
import xbmcvfs
import requests
from utils import noop
from KodiHelper import KodiHelper
try:
import cPickle as pickle
except:
import pickle
class Library(object):
"""Exports Netflix shows & movies to a local library folder"""
series_label = 'shows'
"""str: Label to identify shows"""
movies_label = 'movies'
"""str: Label to identify movies"""
metadata_label = 'metadata'
"""str: Label to identify metadata"""
imagecache_label = 'imagecache'
"""str: Label to identify imagecache"""
db_filename = 'lib.ndb'
"""str: (File)Name of the store for the database dump that contains
all shows/movies added to the library"""
def __init__(self, root_folder, library_settings, log_fn=noop):
"""
Takes the instances & configuration options needed to drive the plugin
Parameters
----------
root_folder : :obj:`str`
Cookie location
library_settings : :obj:`str`
User data cache location
library_db_path : :obj:`str`
User data cache location
log_fn : :obj:`fn`
optional log function
"""
enable_custom_folder = library_settings['enablelibraryfolder']
self.kodi_helper = KodiHelper()
self.base_data_path = root_folder
self.enable_custom_library_folder = enable_custom_folder
self.custom_library_folder = library_settings['customlibraryfolder']
self.db_filepath = os.path.join(self.base_data_path, self.db_filename)
self.log = log_fn
# check for local library folder & set up the paths
if self.enable_custom_library_folder != 'true':
lib_path = self.base_data_path
else:
lib_path = self.custom_library_folder
self.movie_path = os.path.join(lib_path, self.movies_label)
self.tvshow_path = os.path.join(lib_path, self.series_label)
self.metadata_path = os.path.join(lib_path, self.metadata_label)
self.imagecache_path = os.path.join(lib_path, self.imagecache_label)
# check if we need to setup the base folder structure & do so if needed
self.setup_local_netflix_library(source={
self.movies_label: self.movie_path,
self.series_label: self.tvshow_path,
self.metadata_label: self.metadata_path,
self.imagecache_label: self.imagecache_path
})
# load the local db
self.db = self._load_local_db(filename=self.db_filepath)
def setup_local_netflix_library(self, source):
"""Sets up the basic directories
Parameters
----------
source : :obj:`dict` of :obj:`str`
Dicitionary with directories to be created
"""
for label in source:
exists = xbmcvfs.exists(
path=self.kodi_helper.check_folder_path(source[label]))
if not exists:
xbmcvfs.mkdir(source[label])
def write_strm_file(self, path, url, title_player):
"""Writes the stream file that Kodi can use to integrate it into the DB
Parameters
----------
path : :obj:`str`
Filepath of the file to be created
url : :obj:`str`
Stream url
title_player : :obj:`str`
Video fallback title for m3u
"""
self.log('Writing {}'.format(path.decode('ascii','ignore')))
f = xbmcvfs.File(path, 'w')
f.write('#EXTINF:-1,'+title_player.encode('utf-8')+'\n')
f.write(url)
f.close()
self.log('Successfully wrote {}'.format(path.decode('ascii','ignore')))
def write_metadata_file(self, video_id, content):
"""Writes the metadata file that caches grabbed content from netflix
Parameters
----------
video_id : :obj:`str`
ID of video
content :
Unchanged metadata from netflix
"""
meta_file = os.path.join(self.metadata_path, video_id+'.meta')
if not xbmcvfs.exists(meta_file):
f = xbmcvfs.File(meta_file, 'wb')
pickle.dump(content, f)
f.close()
def read_metadata_file(self, video_id):
"""Reads the metadata file that caches grabbed content from netflix
Parameters
----------
video_id : :obj:`str`
ID of video
content :
Unchanged metadata from cache file
"""
meta_file = os.path.join(self.metadata_path, str(video_id)+'.meta')
if xbmcvfs.exists(meta_file):
f = xbmcvfs.File(meta_file, 'rb')
content = f.read()
f.close()
meta_data = pickle.loads(content)
return meta_data
return
def read_artdata_file(self, video_id):
"""Reads the artdata file that caches grabbed content from netflix
Parameters
----------
video_id : :obj:`str`
ID of video
content :
Unchanged artdata from cache file
"""
meta_file = os.path.join(self.metadata_path, str(video_id)+'.art')
if xbmcvfs.exists(meta_file):
f = xbmcvfs.File(meta_file, 'rb')
content = f.read()
f.close()
meta_data = pickle.loads(content)
return meta_data
return
def write_artdata_file(self, video_id, content):
"""Writes the art data file that caches grabbed content from netflix
Parameters
----------
video_id : :obj:`str`
ID of video
content :
Unchanged artdata from netflix
"""
meta_file = os.path.join(self.metadata_path, video_id+'.art')
if not xbmcvfs.exists(meta_file):
f = xbmcvfs.File(meta_file, 'wb')
pickle.dump(content, f)
f.close()
def _load_local_db(self, filename):
"""Loads the local db file and parses it, creates one if not existent
Parameters
----------
filename : :obj:`str`
Filepath of db file
Returns
-------
:obj:`dict`
Parsed contents of the db file
"""
# if the db doesn't exist, create it
if not os.path.isfile(filename):
data = {self.movies_label: {}, self.series_label: {}}
self.log('Setup local library DB')
self._update_local_db(filename=filename, db=data)
return data
with open(filename) as f:
data = pickle.load(f)
if data:
return data
else:
return {}
def _update_local_db(self, filename, db):
"""Updates the local db file with new data
Parameters
----------
filename : :obj:`str`
Filepath of db file
db : :obj:`dict`
Database contents
Returns
-------
bool
Update has been successfully executed
"""
if not os.path.isdir(os.path.dirname(filename)):
return False
with open(filename, 'w') as f:
f.truncate()
pickle.dump(db, f)
return True
def movie_exists(self, title, year):
"""Checks if a movie is already present in the local DB
Parameters
----------
title : :obj:`str`
Title of the movie
year : :obj:`int`
Release year of the movie
Returns
-------
bool
Movie exists in DB
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
movie_meta = '%s (%d)' % (title, year)
return movie_meta in self.db[self.movies_label]
def show_exists(self, title):
"""Checks if a show is present in the local DB
Parameters
----------
title : :obj:`str`
Title of the show
Returns
-------
bool
Show exists in DB
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
show_meta = '%s' % (title)
return show_meta in self.db[self.series_label]
def season_exists(self, title, season):
"""Checks if a season is present in the local DB
Parameters
----------
title : :obj:`str`
Title of the show
season : :obj:`int`
Season sequence number
Returns
-------
bool
Season of show exists in DB
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
if self.show_exists(title) is False:
return False
show_entry = self.db[self.series_label][title]
return season in show_entry['seasons']
def episode_exists(self, title, season, episode):
"""Checks if an episode if a show is present in the local DB
Parameters
----------
title : :obj:`str`
Title of the show
season : :obj:`int`
Season sequence number
episode : :obj:`int`
Episode sequence number
Returns
-------
bool
Episode of show exists in DB
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
if self.show_exists(title) is False:
return False
show_entry = self.db[self.series_label][title]
episode_entry = 'S%02dE%02d' % (season, episode)
return episode_entry in show_entry['episodes']
def add_movie(self, title, alt_title, year, video_id, build_url):
"""Adds a movie to the local db, generates & persists the strm file
Parameters
----------
title : :obj:`str`
Title of the show
alt_title : :obj:`str`
Alternative title given by the user
year : :obj:`int`
Release year of the show
video_id : :obj:`str`
ID of the video to be played
build_url : :obj:`fn`
Function to generate the stream url
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
movie_meta = '%s (%d)' % (title, year)
folder = re.sub(r'[?|$|!|:|#]', r'', alt_title)
dirname = self.kodi_helper.check_folder_path(
path=os.path.join(self.movie_path, folder))
filename = os.path.join(dirname, movie_meta + '.strm')
progress = xbmcgui.DialogProgress()
progress.create(self.kodi_helper.get_local_string(650), movie_meta)
if xbmcvfs.exists(filename):
return
if not xbmcvfs.exists(dirname):
xbmcvfs.mkdirs(dirname)
if self.movie_exists(title=title, year=year) is False:
progress.update(50)
time.sleep(0.5)
self.db[self.movies_label][movie_meta] = {'alt_title': alt_title}
self._update_local_db(filename=self.db_filepath, db=self.db)
url = build_url({'action': 'play_video', 'video_id': video_id})
self.write_strm_file(path=filename, url=url, title_player=movie_meta)
progress.update(100)
time.sleep(1)
progress.close()
def add_show(self, netflix_id, title, alt_title, episodes, build_url,
in_background=False):
"""Adds a show to the local db, generates & persists the strm files
Note: Can also used to store complete seasons or single episodes,
it all depends on what is present in the episodes dictionary
Parameters
----------
title : :obj:`str`
Title of the show
alt_title : :obj:`str`
Alternative title given by the user
episodes : :obj:`dict` of :obj:`dict`
Episodes that need to be added
build_url : :obj:`fn`
Function to generate the stream url
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
show_meta = '%s' % (title)
folder = re.sub(r'[?|$|!|:|#]', r'', alt_title.encode('utf-8'))
show_dir = self.kodi_helper.check_folder_path(
path=os.path.join(self.tvshow_path, folder))
progress = self._create_progress_dialog(in_background)
progress.create(self.kodi_helper.get_local_string(650), show_meta)
if not xbmcvfs.exists(show_dir):
self.log('Created show folder {}'.format(show_dir))
xbmcvfs.mkdirs(show_dir)
if self.show_exists(title) is False:
self.log('Show does not exists, adding entry to internal library')
self.db[self.series_label][show_meta] = {
'netflix_id': netflix_id,
'seasons': [],
'episodes': [],
'alt_title': alt_title}
else:
self.log('Show is present in internal library: {}'
.format(self.db[self.series_label][show_meta]))
if 'netflix_id' not in self.db[self.series_label][show_meta]:
self.db[self.series_label][show_meta]['netflix_id'] = netflix_id
self._update_local_db(filename=self.db_filepath, db=self.db)
self.log('Added missing netflix_id={} for {} to internal library.'
.format(netflix_id, title.encode('utf-8')),
xbmc.LOGNOTICE)
episodes = [episode for episode in episodes
if not self.episode_exists(title, episode['season'],
episode['episode'])]
self.log('Episodes to export: {}'.format(episodes))
if len(episodes) == 0:
self.log('No episodes to export, exiting')
return False
step = round(100.0 / len(episodes), 1)
percent = step
for episode in episodes:
desc = self.kodi_helper.get_local_string(20373) + ': '
desc += str(episode.get('season'))
long_desc = self.kodi_helper.get_local_string(20359) + ': '
long_desc += str(episode.get('episode'))
progress.update(
percent=int(percent),
line1=show_meta,
line2=desc,
line3=long_desc)
self._add_episode(
show_dir=show_dir,
title=title,
season=episode.get('season'),
episode=episode.get('episode'),
video_id=episode.get('id'),
build_url=build_url)
percent += step
time.sleep(0.05)
self._update_local_db(filename=self.db_filepath, db=self.db)
time.sleep(1)
progress.close()
if in_background:
self.kodi_helper.dialogs.show_episodes_added_notify(
title, len(episodes), self.kodi_helper.icon)
return show_dir
def _create_progress_dialog(self, is_noop):
if is_noop:
class NoopDialog():
def create(self, title, subtitle):
return noop()
def update(self, **kwargs):
return noop()
def close(self):
return noop()
return NoopDialog()
return xbmcgui.DialogProgress()
def _add_episode(self, title, show_dir, season, episode, video_id, build_url):
"""
Adds a single episode to the local DB,
generates & persists the strm file
Parameters
----------
title : :obj:`str`
Title of the show
show_dir : :obj:`str`
Directory that holds the stream files for that show
season : :obj:`int`
Season sequence number
episode : :obj:`int`
Episode sequence number
video_id : :obj:`str`
ID of the video to be played
build_url : :obj:`fn`
Function to generate the stream url
"""
season = int(season)
episode = int(episode)
title = re.sub(r'[?|$|!|:|#]', r'', title)
self.log('Adding S{}E{} (id={}) of {} (dest={})'
.format(season, episode, video_id, title.encode('utf-8'),
show_dir))
# add season
if self.season_exists(title=title, season=season) is False:
self.log(
'Season {} does not exist, adding entry to internal library.'
.format(season))
self.db[self.series_label][title]['seasons'].append(season)
# add episode
episode_meta = 'S%02dE%02d' % (season, episode)
episode_exists = self.episode_exists(
title=title,
season=season,
episode=episode)
if episode_exists is False:
self.log(
'S{}E{} does not exist, adding entry to internal library.'
.format(season, episode))
self.db[self.series_label][title]['episodes'].append(episode_meta)
# create strm file
filename = episode_meta + '.strm'
filepath = os.path.join(show_dir, filename)
if xbmcvfs.exists(filepath):
self.log('strm file {} already exists, not writing it'
.format(filepath))
return
url = build_url({'action': 'play_video', 'video_id': video_id})
self.write_strm_file(
path=filepath,
url=url,
title_player=title + ' - ' + episode_meta)
def remove_movie(self, title, year):
"""Removes the DB entry & the strm file for the movie given
Parameters
----------
title : :obj:`str`
Title of the movie
year : :obj:`int`
Release year of the movie
Returns
-------
bool
Delete successfull
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
movie_meta = '%s (%d)' % (title, year)
folder = re.sub(
pattern=r'[?|$|!|:|#]',
repl=r'',
string=self.db[self.movies_label][movie_meta]['alt_title'])
progress = xbmcgui.DialogProgress()
progress.create(self.kodi_helper.get_local_string(1210), movie_meta)
progress.update(50)
time.sleep(0.5)
del self.db[self.movies_label][movie_meta]
self._update_local_db(filename=self.db_filepath, db=self.db)
dirname = self.kodi_helper.check_folder_path(
path=os.path.join(self.movie_path, folder))
filename = os.path.join(self.movie_path, folder, movie_meta + '.strm')
if xbmcvfs.exists(dirname):
xbmcvfs.delete(filename)
xbmcvfs.rmdir(dirname)
return True
return False
time.sleep(1)
progress.close()
def remove_show(self, title):
"""Removes the DB entry & the strm files for the show given
Parameters
----------
title : :obj:`str`
Title of the show
Returns
-------
bool
Delete successfull
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
label = self.series_label
rep_str = self.db[label][title]['alt_title'].encode('utf-8')
folder = re.sub(
pattern=r'[?|$|!|:|#]',
repl=r'',
string=rep_str)
progress = xbmcgui.DialogProgress()
progress.create(self.kodi_helper.get_local_string(1210), title)
time.sleep(0.5)
del self.db[self.series_label][title]
self._update_local_db(filename=self.db_filepath, db=self.db)
show_dir = self.kodi_helper.check_folder_path(
path=os.path.join(self.tvshow_path, folder))
if xbmcvfs.exists(show_dir):
show_files = xbmcvfs.listdir(show_dir)[1]
episode_count_total = len(show_files)
step = round(100.0 / episode_count_total, 1)
percent = 100 - step
for filename in show_files:
progress.update(int(percent))
xbmcvfs.delete(os.path.join(show_dir, filename))
percent = percent - step
time.sleep(0.05)
xbmcvfs.rmdir(show_dir)
return True
return False
time.sleep(1)
progress.close()
def remove_season(self, title, season):
"""Removes the DB entry & the strm files for a season of a show given
Parameters
----------
title : :obj:`str`
Title of the show
season : :obj:`int`
Season sequence number
Returns
-------
bool
Delete successfull
"""
title = re.sub(r'[?|$|!|:|#]', r'', title.encode('utf-8'))
season = int(season)
season_list = []
episodes_list = []
show_meta = '%s' % (title)
for season_entry in self.db[self.series_label][show_meta]['seasons']:
if season_entry != season:
season_list.append(season_entry)
self.db[self.series_label][show_meta]['seasons'] = season_list
alt_title = self.db[self.series_label][show_meta]['alt_title']
show_dir = self.kodi_helper.check_folder_path(
path=os.path.join(self.tvshow_path, alt_title))
if xbmcvfs.exists(show_dir):
show_files = [f for f in xbmcvfs.listdir(show_dir) if xbmcvfs.exists(os.path.join(show_dir, f))]
for filename in show_files:
if 'S%02dE' % (season) in filename:
xbmcvfs.delete(os.path.join(show_dir, filename))
else:
episodes_list.append(filename.replace('.strm', ''))
self.db[self.series_label][show_meta]['episodes'] = episodes_list
self._update_local_db(filename=self.db_filepath, db=self.db)
return True
def remove_episode(self, title, season, episode):
"""Removes the DB entry & the strm files for an episode of a show given
Parameters
----------
title : :obj:`str`
Title of the show
season : :obj:`int`
Season sequence number
episode : :obj:`int`
Episode sequence number
Returns
-------
bool
Delete successfull
"""
title = re.sub(r'[?|$|!|:|#]', r'', title.encode('utf-8'))
episodes_list = []
show_meta = '%s' % (title)
episode_meta = 'S%02dE%02d' % (season, episode)
alt_title = self.db[self.series_label][show_meta]['alt_title']
show_dir = self.kodi_helper.check_folder_path(
path=os.path.join(self.tvshow_path, alt_title))
if xbmcvfs.exists(os.path.join(show_dir, episode_meta + '.strm')):
xbmcvfs.delete(os.path.join(show_dir, episode_meta + '.strm'))
for episode_entry in self.db[self.series_label][show_meta]['episodes']:
if episode_meta != episode_entry:
episodes_list.append(episode_entry)
self.db[self.series_label][show_meta]['episodes'] = episodes_list
self._update_local_db(filename=self.db_filepath, db=self.db)
return True
def list_exported_media(self):
"""Return List of exported movies
Returns
-------
obj:`dict`
Contents of export folder
"""
movies = (['', ''])
shows = (['', ''])
movie_path = self.movie_path
tvshow_path = self.tvshow_path
if xbmcvfs.exists(self.kodi_helper.check_folder_path(movie_path)):
movies = xbmcvfs.listdir(movie_path)
if xbmcvfs.exists(self.kodi_helper.check_folder_path(tvshow_path)):
shows = xbmcvfs.listdir(tvshow_path)
return movies + shows
def list_exported_shows(self):
return self.db[self.series_label]
def get_exported_movie_year(self, title):
"""Return year of given exported movie
Returns
-------
obj:`int`
year of given movie
"""
year = '0000'
folder = self.kodi_helper.check_folder_path(
path=os.path.join(self.movie_path, title))
if xbmcvfs.exists(folder):
file = xbmcvfs.listdir(folder)
year = str(file[1]).split('(', 1)[1].split(')', 1)[0]
return int(year)
def updatedb_from_exported(self):
"""Adds movies and shows from exported media to the local db
Returns
-------
bool
Process finished
"""
tv_show_path = self.tvshow_path
db_filepath = self.db_filepath
if xbmcvfs.exists(self.kodi_helper.check_folder_path(self.movie_path)):
movies = xbmcvfs.listdir(self.movie_path)
for video in movies[0]:
folder = os.path.join(self.movie_path, video)
file = xbmcvfs.listdir(folder)
year = int(str(file[1]).split("(", 1)[1].split(")", 1)[0])
alt_title = unicode(video.decode('utf-8'))
title = unicode(video.decode('utf-8'))
movie_meta = '%s (%d)' % (title, year)
if self.movie_exists(title=title, year=year) is False:
self.db[self.movies_label][movie_meta] = {
'alt_title': alt_title}
self._update_local_db(filename=db_filepath, db=self.db)
if xbmcvfs.exists(self.kodi_helper.check_folder_path(tv_show_path)):
shows = xbmcvfs.listdir(tv_show_path)
for video in shows[0]:
show_dir = os.path.join(tv_show_path, video)
title = unicode(video.decode('utf-8'))
alt_title = unicode(video.decode('utf-8'))
show_meta = '%s' % (title)
if self.show_exists(title) is False:
self.db[self.series_label][show_meta] = {
'seasons': [],
'episodes': [],
'alt_title': alt_title}
episodes = xbmcvfs.listdir(show_dir)
for episode in episodes[1]:
file = str(episode).split(".")[0]
season = int(str(file).split("S")[1].split("E")[0])
episode = int(str(file).split("E")[1])
episode_meta = 'S%02dE%02d' % (season, episode)
episode_exists = self.episode_exists(
title=title,
season=season,
episode=episode)
if episode_exists is False:
self.db[self.series_label][title]['episodes'].append(episode_meta)
self._update_local_db(
filename=self.db_filepath,
db=self.db)
return True
def download_image_file(self, title, url):
"""Writes thumb image which is shown in exported
Parameters
----------
title : :obj:`str`
Filename based on title
url : :obj:`str`
Image url
Returns
-------
bool
Download triggered
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
imgfile = title + '.jpg'
file = os.path.join(self.imagecache_path, imgfile)
folder_movies = self.kodi_helper.check_folder_path(
path=os.path.join(self.movie_path, title))
folder_tvshows = self.kodi_helper.check_folder_path(
path=os.path.join(self.tvshow_path, title))
file_exists = xbmcvfs.exists(file)
folder_exists = xbmcvfs.exists(folder_movies)
tv_shows_folder = xbmcvfs.exists(folder_tvshows)
if not file_exists and (folder_exists or tv_shows_folder):
thread = threading.Thread(target=self.fetch_url, args=(url, file))
thread.start()
return True
def fetch_url(self, url, file):
f = xbmcvfs.File(file, 'wb')
f.write(requests.get(url).content)
f.write(url)
f.close()
def get_previewimage(self, title):
"""Load thumb image which is shown in exported
Parameters
----------
title : :obj:`str`
Filename based on title
url : :obj:`str`
Image url
Returns
-------
obj:`int`
image of given title if exists
"""
title = re.sub(r'[?|$|!|:|#]', r'', title)
imgfile = title + '.jpg'
file = os.path.join(self.imagecache_path, imgfile)
if xbmcvfs.exists(file):
return file
return self.kodi_helper.default_fanart
| StarcoderdataPython |
1690874 | #!python3
# -*- coding:utf-8 -*-
# author: difosschan
#
__all__ = (
'run_shell',
'get_file_info',
'using'
)
import os
from typing import *
## Improved from <https://github.com/difosschan/difoss-pybase.git>
from subprocess import Popen, PIPE
import platform
# @return return_code, stdout, stderr
def run_shell(cmd, cwd=None, callback_after_run: Callable[[str, str, int, str, str], Any]=None) -> Tuple[int, str, str]:
close_fds = not (platform.system() == 'Windows') # WTF: close_fds=True NOT supported on windows.
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, close_fds=close_fds, cwd=cwd)
p.wait()
stdout, stderr = p.communicate()
stdout = str(stdout, 'utf-8').rstrip('\n')
stderr = str(stderr, 'utf-8').rstrip('\n')
code = p.returncode
if callback_after_run:
callback_after_run(cmd, cwd, code, stdout, stderr)
return (code, stdout, stderr)
def get_file_info(fullname):
(file_path, temp_filename) = os.path.split(fullname)
(short_name, extension) = os.path.splitext(temp_filename)
return {'short_name': short_name, 'extension': extension, 'path': file_path}
try:
import resource
def using():
usage = resource.getrusage(resource.RUSAGE_SELF)
return {
'user_time': usage[0],
'sys_time': usage[1],
'mem(mb)': usage[2] / 1024.0,
}
except:
def using():
return {}
| StarcoderdataPython |
3319704 | <filename>test/acoustics/3d/acoustics.py
#!/usr/bin/env python
# encoding: utf-8
import numpy as np
def acoustics3D(iplot=False,htmlplot=False,use_petsc=False,outdir='./_output',solver_type='classic',test='hom'):
"""
Example python script for solving the 3d acoustics equations.
"""
if use_petsc:
import petclaw as pyclaw
else:
import pyclaw
if solver_type=='classic':
solver=pyclaw.ClawSolver3D()
else:
raise Exception('Unrecognized solver_type.')
solver.bc_lower[0]=pyclaw.BC.periodic
solver.bc_upper[0]=pyclaw.BC.periodic
solver.bc_lower[1]=pyclaw.BC.periodic
solver.bc_upper[1]=pyclaw.BC.periodic
solver.bc_lower[2]=pyclaw.BC.periodic
solver.bc_upper[2]=pyclaw.BC.periodic
solver.aux_bc_lower[0]=pyclaw.BC.periodic
solver.aux_bc_upper[0]=pyclaw.BC.periodic
solver.aux_bc_lower[1]=pyclaw.BC.periodic
solver.aux_bc_upper[1]=pyclaw.BC.periodic
solver.aux_bc_lower[2]=pyclaw.BC.periodic
solver.aux_bc_upper[2]=pyclaw.BC.periodic
if test=='hom':
solver.dim_split=True
mx=256; my=4; mz=4
zr = 1.0 # Impedance in right half
cr = 1.0 # Sound speed in right half
elif test=='het':
solver.dim_split=False
solver.bc_lower[0] =pyclaw.BC.reflecting
solver.bc_lower[1] =pyclaw.BC.reflecting
solver.bc_lower[2] =pyclaw.BC.reflecting
solver.aux_bc_lower[0]=pyclaw.BC.reflecting
solver.aux_bc_lower[1]=pyclaw.BC.reflecting
solver.aux_bc_lower[2]=pyclaw.BC.reflecting
mx=30; my=30; mz=30
zr = 2.0 # Impedance in right half
cr = 2.0 # Sound speed in right half
solver.mwaves = 2
solver.limiters = pyclaw.limiters.tvd.MC
# Initialize grid
x = pyclaw.Dimension('x',-1.0,1.0,mx)
y = pyclaw.Dimension('y',-1.0,1.0,my)
z = pyclaw.Dimension('z',-1.0,1.0,mz)
grid = pyclaw.Grid([x,y,z])
meqn = 4
maux = 2 # density, sound speed
state = pyclaw.State(grid,meqn,maux)
zl = 1.0 # Impedance in left half
cl = 1.0 # Sound speed in left half
grid.compute_c_center()
X,Y,Z = grid._c_center
state.aux[0,:,:,:] = zl*(X<0.) + zr*(X>=0.) # Impedance
state.aux[1,:,:,:] = cl*(X<0.) + cr*(X>=0.) # Sound speed
x0 = -0.5; y0 = 0.; z0 = 0.
if test=='hom':
r = np.sqrt((X-x0)**2)
width=0.2
state.q[0,:,:,:] = (np.abs(r)<=width)*(1.+np.cos(np.pi*(r)/width))
elif test=='het':
r = np.sqrt((X-x0)**2 + (Y-y0)**2 + (Z-z0)**2)
width=0.1
state.q[0,:,:,:] = (np.abs(r-0.3)<=width)*(1.+np.cos(np.pi*(r-0.3)/width))
state.q[1,:,:,:] = 0.
state.q[2,:,:,:] = 0.
state.q[3,:,:,:] = 0.
claw = pyclaw.Controller()
claw.keep_copy = True
claw.solution = pyclaw.Solution(state)
claw.solver = solver
claw.outdir=outdir
# Solve
claw.tfinal = 2.0
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=outdir,format=claw.output_format)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir,format=claw.output_format)
#if use_petsc:
# pinitial=claw.frames[0].state.gqVec.getArray().reshape([state.meqn,grid.ng[0],grid.ng[1],grid.ng[2]],order='F')[0,:,:,mz/2]
# pfinal=claw.frames[10].state.gqVec.getArray().reshape([state.meqn,grid.ng[0],grid.ng[1],grid.ng[2]],order='F')[0,:,:,mz/2]
#else:
# pinitial=claw.frames[0].state.q[0,:,:,mz/2]
# pfinal=claw.frames[10].state.q[0,:,:,mz/2]
#import matplotlib.pyplot as plt
#for i in range(claw.nout):
# plt.pcolor(claw.frames[i].state.q[0,:,:,mz/2])
# plt.figure()
#plt.show()
if use_petsc:
pinitial=claw.frames[0].state.gqVec.getArray().reshape([state.meqn,grid.ng[0],grid.ng[1],grid.ng[2]],order='F')[0,:,:,:].reshape(-1)
pmiddle=claw.frames[claw.nout/2].state.gqVec.getArray().reshape([state.meqn,grid.ng[0],grid.ng[1],grid.ng[2]],order='F')[0,:,:,:].reshape(-1)
pfinal=claw.frames[claw.nout].state.gqVec.getArray().reshape([state.meqn,grid.ng[0],grid.ng[1],grid.ng[2]])[0,:,:,:].reshape(-1)
else:
pinitial=claw.frames[0].state.q[0,:,:,:].reshape(-1)
pmiddle =claw.frames[3].state.q[0,:,:,:].reshape(-1)
pfinal =claw.frames[claw.nout].state.q[0,:,:,:].reshape(-1)
final_difference =np.prod(grid.d)*np.linalg.norm(pfinal-pinitial,ord=1)
middle_difference=np.prod(grid.d)*np.linalg.norm(pmiddle-pinitial,ord=1)
if test=='hom':
return final_difference
elif test=='het':
return pfinal
if __name__=="__main__":
import sys
from pyclaw.util import run_app_from_main
output = run_app_from_main(acoustics3D)
| StarcoderdataPython |
96969 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# *********************************************************#
# @@ScriptName: mongo.py
# @@Author: Fang.Li<<EMAIL>>
# @@Create Date: 2013-07-10 10:16:52
# @@Modify Date: 2014-03-13 18:44:40
# @@Function:
# *********************************************************#
import sys
import time
import urllib2
try:
from libs import minjson
except ImportError:
import minjson
def printStats(metric, ts, value, tag=""):
if value is not None:
print "mongo.%s %i %s %s" % (metric.lower(), int(ts), str(value), str(tag))
def memItems(s):
return (
s == "resident"
or s == "virtual"
or s == "mapped"
or s == "mappedWithJournal"
)
def getServerStatus():
raw = urllib2.urlopen("http://127.0.0.1:28017/_status").read()
return minjson.read(raw)["serverStatus"]
def doData():
ts = time.time()
try:
ss = getServerStatus()
except:
sys.exit()
# Get opcounters
for k, v in ss["opcounters"].iteritems():
printStats('opcounters.' + str(k), ts, str(v), "cf_datatype=counter")
# Get Memory loc
for k, v in ss["mem"].iteritems():
if memItems(k):
printStats("mem." + str(k), ts, str(v), "cf_datatype=gauge")
# Print current connections
printStats("connections.count", ts, str(ss["connections"]["current"]), "cf_datatype=gauge")
if __name__ == "__main__":
doData()
| StarcoderdataPython |
10553 | <reponame>dongyan1024/overtime<filename>tfl_data.py<gh_stars>1-10
import overtime as ot
times = ['14:00','14:05', '14:10', '14:15', '14:20', '14:25', '14:30', '14:35', '14:40', '14:45', '14:50', '14:55']
tfl_data = ot.TflInput(['victoria', 'central', 'bakerloo', 'piccadilly'], ['inbound', 'outbound'], times)
| StarcoderdataPython |
3317183 | <gh_stars>10-100
import struct
import typing
import collections.abc
from .etc import TypeSignature, EncodeError
encoders = {}
def register(input_type: collections.abc.Sequence) -> typing.Callable:
"""
simple decorator for type dispatch
"""
def decorator(function):
if input_type in encoders:
return function
for types in input_type:
encoders[types] = function
return function
return decorator
def encode_element_name(name: str) -> bytes:
if isinstance(name, str):
name = name.encode("utf-8")
if b"\x00" in name:
raise EncodeError("null contained in name")
return name + b"\x00"
def encode_element(name: str, element) -> bytes:
encoder = encoders.get(type(element))
if encoder is None:
raise EncodeError(f"No encoder for : {type(element)}")
return encoder(name, element)
def encode_document(document):
buffer = b"".join([encode_element(key, document[key]) for key in document.keys()])
return struct.pack(f"<i{len(buffer)}sb", len(buffer) + 5, buffer, 0)
@register((str,))
def encode_string(name: str, value: str) -> bytes:
value = value.encode("utf-8")
return TypeSignature.string + encode_element_name(name) + struct.pack(f"<i{len(value)}sb", len(value) + 1, value, 0)
@register((bool,))
def encode_bool(name: str, value: bool) -> bytes:
return TypeSignature.bool + encode_element_name(name) + struct.pack("<b", value)
@register((None,))
def encode_null(name: str, value: None) -> bytes:
return TypeSignature.null + encode_element_name(name)
@register((int,))
def encode_int(name: str, value: int) -> bytes:
if -1 << 31 <= value <= 1 << 31:
return TypeSignature.int32 + encode_element_name(name) + struct.pack("<i", value)
elif -1 << 63 <= value <= 1 << 63:
return TypeSignature.int64 + encode_element_name(name) + struct.pack("<q", value)
elif 0 <= value <= (1 << 64) - 1:
return TypeSignature.uint64 + encode_element_name(name) + struct.pack("<Q", value)
else:
raise EncodeError("bson only support (-1 << 63) ~ (1 << 64) - 1 (int32, int64, uint64)")
@register((float,))
def encode_double(name: str, value: float) -> bytes:
return TypeSignature.double + encode_element_name(name) + struct.pack("<d", value)
@register((bytes,))
def encode_binary(name: str, value: bytes) -> bytes:
return TypeSignature.binary + encode_element_name(name) + struct.pack("<ib", len(value), 0) + value
@register((list, tuple))
def encode_list(name: str, value: (set, list)) -> bytes:
buffer = b"".join([encode_element(str(index), element) for index, element in enumerate(value)])
return TypeSignature.array + encode_element_name(name) + struct.pack(f"<i{len(buffer)}sb", len(buffer) + 5,
buffer, 0)
@register((dict,))
def encode_dict(name: str, value: dict) -> bytes:
buffer = bytearray()
for key in value.keys():
if type(key) not in (bytes, str):
key = str(key)
buffer.extend(encode_element(key, value[key]))
return TypeSignature.document + encode_element_name(name) + struct.pack(f"<i{len(buffer)}sb", len(buffer) + 5,
buffer, 0)
| StarcoderdataPython |
3347692 | <gh_stars>1-10
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: gdf
import flatbuffers
class gdf_column(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsgdf_column(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = gdf_column()
x.Init(buf, n + offset)
return x
# gdf_column
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# gdf_column
def Data(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .cudaIpcMemHandle_t import cudaIpcMemHandle_t
obj = cudaIpcMemHandle_t()
obj.Init(self._tab.Bytes, x)
return obj
return None
# gdf_column
def Valid(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .cudaIpcMemHandle_t import cudaIpcMemHandle_t
obj = cudaIpcMemHandle_t()
obj.Init(self._tab.Bytes, x)
return obj
return None
# gdf_column
def Size(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint16Flags, o + self._tab.Pos)
return 0
# gdf_column
def Dtype(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# gdf_column
def DtypeInfo(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .gdf_dtype_extra_info import gdf_dtype_extra_info
obj = gdf_dtype_extra_info()
obj.Init(self._tab.Bytes, x)
return obj
return None
def gdf_columnStart(builder): builder.StartObject(5)
def gdf_columnAddData(builder, data): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(data), 0)
def gdf_columnAddValid(builder, valid): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(valid), 0)
def gdf_columnAddSize(builder, size): builder.PrependUint16Slot(2, size, 0)
def gdf_columnAddDtype(builder, dtype): builder.PrependInt8Slot(3, dtype, 0)
def gdf_columnAddDtypeInfo(builder, dtypeInfo): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(dtypeInfo), 0)
def gdf_columnEnd(builder): return builder.EndObject()
| StarcoderdataPython |
1600117 | <reponame>y0-causal-inference/y0
# -*- coding: utf-8 -*-
"""A parser for causaleffect probability expressions based on :mod:`pyparsing`."""
from pyparsing import (
Group,
Optional,
ParseResults,
Suppress,
Word,
alphanums,
alphas,
delimitedList,
nums,
)
from ..craig.utils import _make_probability, _make_q, _unpack
from ...dsl import Variable
def _make_variable(_s, _l, tokens: ParseResults) -> Variable:
name = tokens["name"]
if "subscript" in tokens:
name += "_" + tokens["subscript"]
return Variable(name=name)
subscript = Suppress("_{") + Word(nums)("subscript") + Suppress("}")
variable_pe = Word(alphas, alphanums)("name") + Optional(subscript)
variable_pe.setParseAction(_make_variable)
variable_pe.setName("variable")
variables_pe = delimitedList(Group(variable_pe).setParseAction(_unpack))
_children_pe = Group(variables_pe).setResultsName("children")
_parents_pe = Group(Optional(Suppress("|") + variables_pe)).setResultsName("parents")
probability_pe = Suppress("P(") + _children_pe + _parents_pe + Suppress(")")
probability_pe.setParseAction(_make_probability)
probability_pe.setName("probability")
qfactor_pe = (
Suppress("Q[\\{")
+ Group(variables_pe).setResultsName("codomain")
+ Suppress("\\}](")
+ Group(variables_pe).setResultsName("domain")
+ Suppress(")")
)
qfactor_pe.setParseAction(_make_q)
qfactor_pe.setName("qfactor")
| StarcoderdataPython |
4838912 | <filename>inversion/DataIO.py
import json
import os
import random
import warnings
import numpy as np
from inversion.optimizators.optimizations import optimizers_dict
from objects.seismic.seismogram import Seismogram, Trace
from obspy_edited import segy
parameters_invers_1 = [
'Km',
'Gm',
'rho_m',
'Ks',
'Gs',
'rho_s',
'Kf',
'rho_f',
'phi',
'phi_s',
'h'
]
parameters_components_1 = [
'matrix',
'shale',
'fluid'
]
parameters_elastic_1 = [
'K',
'G',
'rho',
'volume'
]
keys_1 = {
'matrix': {
'K': 'Km',
'G': 'Gm',
'rho': 'rho_m',
},
'shale': {
'K': 'Ks',
'G': 'Gs',
'rho': 'rho_s',
'volume': 'phi_s'
},
'fluid': {
'K': 'Kf',
'rho': 'rho_f',
'volume': 'phi'
}
}
def get_starter_dict():
params = {key:[] for key in parameters_invers_1}
return params
def read_input_file(file_name):
with open(file_name, 'r') as f:
input = json.load(f)
nlayers = len(input['model']['layers'])
params_all_dict = get_starter_dict()
params_all_dict['nlayers'] = nlayers
params_to_optimize = []
bounds_to_optimize = []
for layer in input['model']['layers']:
params_all_dict['h'].append(layer['H'])
for pc in parameters_components_1:
for pe in parameters_elastic_1:
if (pc == 'matrix' and pe == 'volume') or (pc == 'fluid' and pe == 'G'):
continue
key = keys_1[pc][pe]
if layer[pc][pe]['optimize']:
params_to_optimize.append({key: layer['index']})
bound_min = layer[pc][pe]['min']
bound_max = layer[pc][pe]['max']
bounds_to_optimize.append(np.array([bound_min, bound_max]))
params_all_dict[key].append(random.uniform(bound_min, bound_max))
else:
params_all_dict[key].append(layer[pc][pe]['value'])
del(params_all_dict['h'][-1])
return nlayers, params_all_dict, params_to_optimize, np.array(bounds_to_optimize)
def read_input_fp_file(model_folder):
file_name = os.path.join(model_folder, 'input', 'input_fp.json')
with open(file_name, 'r') as f:
input = json.load(f)
nlayers = len(input['model']['layers'])
params_all_dict = get_starter_dict()
params_all_dict['nlayers'] = nlayers
params_to_optimize = []
bounds_to_optimize = []
for layer in input['model']['layers']:
params_all_dict['h'].append(layer['H'])
for pc in parameters_components_1:
for pe in parameters_elastic_1:
if (pc == 'matrix' and pe == 'volume') or (pc == 'fluid' and pe == 'G'):
continue
key = keys_1[pc][pe]
params_all_dict[key].append(layer[pc][pe]['value'])
if layer[pc][pe]['optimize']:
params_to_optimize.append({key: layer['index']})
bounds_to_optimize.append(np.array([layer[pc][pe]['min'], layer[pc][pe]['max']]))
del(params_all_dict['h'][-1])
observation_params = input['observation']
return nlayers, params_all_dict, params_to_optimize, np.array(bounds_to_optimize), observation_params
def create_start_stop_indexes(indexes_params, x_arr, dt):
if indexes_params['type'] == 'from_model':
# TODO при условии, что граница - горзинотальная!
def formula(v, h, x):
return np.sqrt(x**2 + 4*h*h) / v
start_indexes = formula(indexes_params['values']['start']['v'], indexes_params['values']['start']['h'], x_arr)
start_indexes = (start_indexes / dt).astype(int)
stop_indexes = formula(indexes_params['values']['stop']['v'], indexes_params['values']['stop']['h'], x_arr)
stop_indexes = (stop_indexes / dt).astype(int)
return start_indexes, stop_indexes
def read_input_ip_file(model_folder, x_arr, dt):
input_file_name = os.path.join(model_folder, 'input', 'input_ip.json')
with open(input_file_name, 'r') as f:
input = json.load(f)
input = input['inversion_params']
seismogram = read_segy(os.path.join(model_folder, 'input', input['segy_observed']))
err = input['error']
optimizers = [optimizers_dict[opt['name']](**opt['params']) for opt in input['optimizers'] if opt['use']]
start_indexes, stop_indexes = create_start_stop_indexes(input['seismic_indexes'], x_arr, dt)
return seismogram, err, optimizers, start_indexes, stop_indexes
def read_inversion_result_file(result_folder):
file_name = os.path.join(result_folder, 'model_values.txt')
with open(file_name, 'r') as f:
rows = f.readlines()
params_optimized = []
vals = []
for r in rows[:-2]:
rr = r.split(', ')
rr = rr[1].split(' = ')
rr = rr[0].split('[')
param = r.split(', ')[1].split(' = ')[0].split('[')[0][:-9]
index = float(r.split(', ')[1].split(' = ')[0].split('[')[1][:-1])
value = float(r.split(', ')[1].split(' = ')[1])
params_optimized.append({param: index})
vals.append(value)
return params_optimized, vals
def get_results_files_list(folder):
current_folder_numbers = [int(f.split('_')[-1]) for f in os.listdir(folder) if 'result' in f.lower() and not 'average' in f.lower()]
current_folder_numbers.sort()
return current_folder_numbers
def create_res_folder(folder):
base_folder = os.path.join(folder, 'output')
current_files_numbers = get_results_files_list(base_folder)
if len(current_files_numbers) == 0:
current_res_number = 1
else:
current_res_number = current_files_numbers[-1] + 1
res_folder = os.path.join(base_folder, 'result_{}'.format(current_res_number))
if not os.path.exists(res_folder):
os.makedirs(res_folder)
return current_res_number
def write_output_file(model_folder, params_all_, inversed_model, params_to_optimize, inverse_duration=None, res_folder_postfix=None):
base_folder = os.path.join(model_folder, "output")
print(inversed_model)
res_folder = os.path.join(base_folder, 'result_{}'.format(res_folder_postfix))
if not os.path.exists(res_folder):
os.makedirs(res_folder)
file_name = os.path.join(res_folder, 'model_values.txt')
rows = []
errs = []
for m, p in zip(inversed_model, params_to_optimize):
key = list(p.keys())[0]
val = list(p.values())[0]
true_val = params_all_[key][val]
if true_val != 0:
errs.append(abs((true_val - m) / true_val))
rows.append('{}_true[{}] = {}, {}_inversed[{}] = {}\n'.format(key, val, true_val,
key, val, m))
if inverse_duration is not None:
rows.append('inversion duration: {} min\n'.format(inverse_duration))
err_average = np.average(errs)
rows.append('Difference between true values and inverted values: {}\n'.format(err_average))
with open(file_name, 'w') as f:
f.writelines(rows)
def write_segy(seismogram, filename):
segy_obj = segy._read_segy("../Files/example.sgy")
tmp_name = "tmp_segy.sgy"
ntraces = seismogram.ntraces
segy_obj.traces = [segy_obj.traces[0]] * ntraces
segy_obj.write(tmp_name)
segy_obj = segy._read_segy(tmp_name)
segy_obj.binary_file_header.unassigned_1 = b""
segy_obj.binary_file_header.unassigned_2 = b""
segy_obj.binary_file_header.number_of_samples_per_data_trace = -1
segy_obj.binary_file_header.number_of_data_traces_per_ensemble = -1
segy_obj.binary_file_header.sample_interval_in_microseconds = int(seismogram.traces[0].dt)
# TODO добавить нормальное заполнение заголовков sou_x и rec_x
for i in range(ntraces):
segy_obj.traces[i].data = np.array(seismogram.traces[i].values, dtype=np.float32)
segy_obj.traces[i].header.source_coordinate_x = 0
segy_obj.traces[i].header.source_coordinate_y = 0
segy_obj.traces[i].header.ensemble_number = int(i)
segy_obj.traces[i].header.original_field_record_number = int(i)
segy_obj.traces[i].header.energy_source_point_number = int(i)
segy_obj.traces[i].header.distance_from_center_of_the_source_point_to_the_center_of_the_receiver_group = \
int(seismogram.traces[i].offset*1000)
segy_obj.traces[i].header.number_of_samples_in_this_trace = len(seismogram.traces[i].values)
segy_obj.traces[i].header.sample_interval_in_ms_for_this_trace = int(seismogram.traces[i].dt * 1000)
# segy_obj.traces[i]
os.remove(tmp_name)
segy_obj.write(filename)
def read_segy(filename):
segy_obj = segy._read_segy(filename)
warnings.warn("Offset value is multiplying with 0.001!!!")
traces = [Trace(trace.data, trace.header.sample_interval_in_ms_for_this_trace * 0.001,
trace.header.distance_from_center_of_the_source_point_to_the_center_of_the_receiver_group * 0.001)
for trace in segy_obj.traces]
return Seismogram(traces)
| StarcoderdataPython |
1730441 | #!/usr/bin/env python
# Name: utils.py
# Time:9/27/16 2:57 PM
# Author:luo1fly
from xml.etree.ElementTree import ElementTree, ElementTree, Element, SubElement, tostring
from PIL import Image
import subprocess
import os
import time
# import custom modules above
ROOT_PATH = '/www/htdocs'
def generate_xml_by_sku(sku_lst):
sku_info = ElementTree()
sku_img = Element('sku_img', {'version': '1.0'})
sku_info._setroot(sku_img)
SubElement(sku_img, 'host').text = 'img.xxx.com'
SubElement(sku_img, 'host').text = 'img.xxx.com'
SubElement(sku_img, 'host').text = 'img.xxx.com'
SubElement(sku_img, 'linkprefix').text = 'productimages'
SubElement(sku_img, 'linkprefix').text = 'productImages'
SubElement(sku_img, 'linkprefix').text = 'ProductImages'
for s in sku_lst:
sku = Element('sku')
sku_img.append(sku)
SubElement(sku, 'no').text = s
cmd = 'ls -v %s/images/%s/%s/%s/sku_%s_*' % (ROOT_PATH, s[0], s[1], s[2], s)
try:
b = subprocess.check_output(cmd, shell=True)
except subprocess.CalledProcessError as e:
continue
path_lst = b.decode('utf8').split('\n')
print(path_lst)
for path in path_lst:
if path:
try:
im = Image.open(path)
except FileNotFoundError as e:
continue
img = Element('img')
sku.append(img)
SubElement(img, 'name').text = os.path.basename(path)
# sku_123796_1.jpg
width, height = im.size
SubElement(img, 'width').text = str(width)
# 600
SubElement(img, 'height').text = str(height)
# 600
tm = os.stat(path).st_mtime
mtm = time.gmtime(tm)
str_utc = time.strftime('%Y-%m-%d %H:%M:%S', mtm)
SubElement(img, 'last-modified').text = str_utc
# 2016-09-22 08:50:27
size = os.stat(path).st_size
SubElement(img, 'size').text = str(size)
# 27625
return tostring(sku_img)
| StarcoderdataPython |
3237004 | <reponame>hazelcast-incubator/hazelcast-python-client
__author__ = 'jonathanbrodie'
import ctypes
'''
Helper module to be used with codecs to make sure Python stuff gets explicitly converted to certain data types
'''
def encodeboolean(bool):
if bool is False:
return bytearray(ctypes.c_uint8(0))
else:
return bytearray(ctypes.c_uint8(bool))
def encodeuint8(target):
return bytearray(ctypes.c_uint8(target))
def encodeuint16(target):
return bytearray(ctypes.c_uint16(target))
def encodeuint32(target):
return bytearray(ctypes.c_uint32(target))
def encodeuint64(target):
return bytearray(ctypes.c_uint64(target))
def encodeint8(target):
return bytearray(ctypes.c_int8(target))
def encodeint16(target):
return bytearray(ctypes.c_int16(target))
def encodeint32(target):
return bytearray(ctypes.c_int32(target))
def encodeint64(target):
return bytearray(ctypes.c_int64(target))
def encodefloat(target):
return bytearray(ctypes.c_float(target))
def encodedouble(target):
return bytearray(ctypes.c_double(target))
def encodestring(string):
newstring=string.encode("UTF8")
return encodeuint32(len(newstring))+newstring
def encodebytes(bytes):
return encodeuint32(len(bytes))+bytes
| StarcoderdataPython |
156658 | <gh_stars>0
def diff_tests(input_files):
tests = []
updates = []
for input_file in input_files:
genrule_name = "gen_{}.actual".format(input_file)
actual_file = "{}.actual".format(input_file)
native.genrule(
name = genrule_name,
srcs = [input_file],
outs = [actual_file],
tools = ["//main:as-tree"],
cmd = "$(location //main:as-tree) $(location {input_file}) > $(location {actual_file})".format(
input_file = input_file,
actual_file = actual_file
),
testonly = True,
# This is manual to avoid being caught with `//...`
tags = ["manual"],
)
test_name = "test_{}".format(input_file)
exp_file = "{}.exp".format(input_file)
native.sh_test(
name = test_name,
srcs = ["diff_one.sh"],
args = [
"$(location {})".format(exp_file),
"$(location {})".format(actual_file),
],
data = [
exp_file,
actual_file,
],
size = "small",
tags = [],
)
update_name = "update_{}".format(input_file)
native.sh_test(
name = update_name,
srcs = ["update_one.sh"],
args = [
"$(location {})".format(actual_file),
"$(location {})".format(exp_file),
],
data = [
actual_file,
exp_file,
],
size = "small",
tags = [
# Avoid being caught with `//...`
"manual",
# Forces the test to be run locally, without sandboxing
"local",
# Unconditionally run this rule, and don't run in the sandbox
"external",
],
)
tests.append(test_name)
updates.append(update_name)
native.test_suite(
name = "test",
tests = tests,
)
native.test_suite(
name = "update",
tests = updates,
tags = ["manual"],
)
| StarcoderdataPython |
151521 |
import scipy.misc as misc
import torch
import copy
import torchvision.models as models
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
# FCN Net model class for semantic segmentation
##############################################This is a standart FCN with the lat layer split into prediction of binary map for every class########################################################################333
class Net(nn.Module):
######################################Build FCN net layer##################################################################################
def __init__(self, CatDict):
# Generate standart FCN PSP net for image segmentation with only image as input
# --------------Build layers for standart FCN with only image as input------------------------------------------------------
super(Net, self).__init__()
# ---------------Load pretrained Resnet 50 encoder----------------------------------------------------------
self.Encoder = models.resnet101(pretrained=True)
# ---------------Create Pyramid Scene Parsing PSP layer -------------------------------------------------------------------------
self.PSPScales = [1, 1 / 2, 1 / 4, 1 / 8]
self.PSPLayers = nn.ModuleList() # [] # Layers for decoder
for Ps in self.PSPScales:
self.PSPLayers.append(nn.Sequential(
nn.Conv2d(2048, 1024, stride=1, kernel_size=3, padding=1, bias=True)))
# nn.BatchNorm2d(1024)))
self.PSPSqueeze = nn.Sequential(
nn.Conv2d(4096, 512, stride=1, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, stride=1, kernel_size=3, padding=0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU()
)
# ------------------Skip conncetion layers for upsampling-----------------------------------------------------------------------------
self.SkipConnections = nn.ModuleList()
self.SkipConnections.append(nn.Sequential(
nn.Conv2d(1024, 512, stride=1, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU()))
self.SkipConnections.append(nn.Sequential(
nn.Conv2d(512, 256, stride=1, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(256),
nn.ReLU()))
self.SkipConnections.append(nn.Sequential(
nn.Conv2d(256, 256, stride=1, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(256),
nn.ReLU()))
# ------------------Skip squeeze applied to the (concat of upsample+skip conncection layers)-----------------------------------------------------------------------------
self.SqueezeUpsample = nn.ModuleList()
self.SqueezeUpsample.append(nn.Sequential(
nn.Conv2d(1024, 512, stride=1, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU()))
self.SqueezeUpsample.append(nn.Sequential(
nn.Conv2d(256 + 512, 256, stride=1, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(256),
nn.ReLU()))
self.SqueezeUpsample.append(nn.Sequential(
nn.Conv2d(256 + 256, 256, stride=1, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(256),
nn.ReLU()))
# ----------------Final prediction layers------------------------------------------------------------------------------------------
self.OutLayersList =nn.ModuleList()
self.OutLayersDict={}
for f,nm in enumerate(CatDict):
self.OutLayersDict[nm]= nn.Conv2d(256, 2, stride=1, kernel_size=3, padding=1, bias=False)
self.OutLayersList.append(self.OutLayersDict[nm])
##########################################Run inference################################################################################################################
def forward(self,Images,UseGPU=True,TrainMode=True, FreezeBatchNormStatistics=False):
#----------------------Convert image to pytorch and normalize values-----------------------------------------------------------------
RGBMean = [123.68,116.779,103.939]
RGBStd = [65,65,65]
if TrainMode:
tp=torch.FloatTensor
else:
self.half()
tp=torch.HalfTensor
# self.eval()
InpImages = torch.autograd.Variable(torch.from_numpy(Images.astype(float)), requires_grad=False).transpose(2,3).transpose(1, 2).type(tp)
if FreezeBatchNormStatistics==True: self.eval()
#---------------Convert to cuda gpu or CPU -------------------------------------------------------------------------------------------------------------------
if UseGPU:
InpImages=InpImages.cuda()
self.cuda()
else:
self=self.cpu()
self.float()
InpImages=InpImages.type(torch.float).cpu()
#----------------Normalize image values-----------------------------------------------------------------------------------------------------------
for i in range(len(RGBMean)): InpImages[:, i, :, :]=(InpImages[:, i, :, :]-RGBMean[i])/RGBStd[i] # normalize image values
x=InpImages
#--------------------------------------------------------------------------------------------------------------------------
SkipConFeatures=[] # Store features map of layers used for skip connection
#---------------Run Encoder first layer-----------------------------------------------------------------------------------------------------
x = self.Encoder.conv1(x)
x = self.Encoder.bn1(x)
#-------------------------Run remaining encoder layer------------------------------------------------------------------------------------------
x = self.Encoder.relu(x)
x = self.Encoder.maxpool(x)
x = self.Encoder.layer1(x)
SkipConFeatures.append(x)
x = self.Encoder.layer2(x)
SkipConFeatures.append(x)
x = self.Encoder.layer3(x)
SkipConFeatures.append(x)
x = self.Encoder.layer4(x)
#------------------Run psp Layers----------------------------------------------------------------------------------------------
PSPSize=(x.shape[2],x.shape[3]) # Size of the original features map
PSPFeatures=[] # Results of various of scaled procceessing
for i,PSPLayer in enumerate(self.PSPLayers): # run PSP layers scale features map to various of sizes apply convolution and concat the results
NewSize=(np.array(PSPSize)*self.PSPScales[i]).astype(np.int)
if NewSize[0] < 1: NewSize[0] = 1
if NewSize[1] < 1: NewSize[1] = 1
# print(str(i)+")"+str(NewSize))
y = nn.functional.interpolate(x, tuple(NewSize), mode='bilinear')
#print(y.shape)
y = PSPLayer(y)
y = nn.functional.interpolate(y, PSPSize, mode='bilinear')
# if np.min(PSPSize*self.ScaleRates[i])<0.4: y*=0
PSPFeatures.append(y)
x=torch.cat(PSPFeatures,dim=1)
x=self.PSPSqueeze(x)
#----------------------------Upsample features map and combine with layers from encoder using skip connection-----------------------------------------------------------------------------------------------------------
for i in range(len(self.SkipConnections)):
sp=(SkipConFeatures[-1-i].shape[2],SkipConFeatures[-1-i].shape[3])
x=nn.functional.interpolate(x,size=sp,mode='bilinear') #Resize
x = torch.cat((self.SkipConnections[i](SkipConFeatures[-1-i]),x), dim=1)
x = self.SqueezeUpsample[i](x)
# print([i])
# print(self.SqueezeUpsample[i][0].weight.sum())
#---------------------------------Final prediction-------------------------------------------------------------------------------
self.OutProbDict = {}
self.OutLbDict = {}
# print("=====================================================")
#===============Run prediction for each class as binary mask========================================================================================
for nm in self.OutLayersDict:
# print(nm)
# print((self.OutLayersDict[nm].weight.mean().cpu().detach().numpy()))
l=self.OutLayersDict[nm](x)
# l = self.OutLayersDict[nm](x) # Make prediction per pixel
l = nn.functional.interpolate(l,size=InpImages.shape[2:4],mode='bilinear') # Resize to original image size
Prob = F.softmax(l, dim=1) # Calculate class probability per pixel
tt, Labels = l.max(1) # Find label per pixel
self.OutProbDict[nm]=Prob
self.OutLbDict[nm] = Labels
#********************************************************************************************************
return self.OutProbDict,self.OutLbDict
| StarcoderdataPython |
1623556 | import taco.common.exceptions
class AutoScalerWrapperException(taco.common.exceptions.DataDictException):
pass
# --- Table ---
class TargetRegistrationException(AutoScalerWrapperException):
def __init__(self, service_namespace, resource_id, exc):
super().__init__('Failed to register auto scaler',
data_dict={
'resource_id': resource_id,
'service_namespace': service_namespace
}, exc=exc)
class PutPolicyException(AutoScalerWrapperException):
def __init__(self, service_namespace, resource_id, exc):
super().__init__('Failed to register auto scaler',
data_dict={
'resource_id': resource_id,
'service_namespace': service_namespace
}, exc=exc)
| StarcoderdataPython |
3378094 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('coding', '0007_change_assignment_tag_ordering'),
]
operations = [
migrations.CreateModel(
name='CodingAssignmentActivity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('action_time', models.DateTimeField(default=django.utils.timezone.now, db_index=True)),
('action', models.CharField(max_length=10, choices=[(b'view', b'View'), (b'modify', b'Modify')])),
('state', models.CharField(max_length=10, choices=[(b'NS', b'Not started'), (b'ST', b'Started'), (b'FI', b'Finished')])),
('actor', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
('assignment', models.ForeignKey(to='coding.CodingAssignment', on_delete=models.CASCADE)),
],
options={
'ordering': (b'action_time',),
'verbose_name_plural': b'coding assignment activities',
},
bases=(models.Model,),
),
]
| StarcoderdataPython |
131459 | <reponame>flyinactor91/AVWX-Engine<gh_stars>10-100
"""
"""
# pylint: skip-file
# mypy: ignore-errors
# stdlib
from datetime import date
from typing import List
# module
from avwx.current.base import Reports
from avwx.parsing import sanitization
from avwx.structs import AirepData
def parse(report: str, issued: date = None) -> AirepData:
""""""
if not report:
return None
clean = sanitization.sanitize_report_string(report)
wxdata = sanitization.sanitize_report_list(clean.split())
wxresp = {"raw": report, "sanitized": " ".join(wxdata)}
print(wxdata)
print(wxresp)
return None
class Aireps(Reports):
"""Class to handle aircraft report data"""
data: List[AirepData] = None
@staticmethod
def _report_filter(reports: List[str]) -> List[str]:
"""Removes PIREPs before updating raw_reports"""
return [r for r in reports if r.startswith("ARP")]
async def _post_update(self):
self.data = []
for report in self.raw:
parse(report, self.issued)
def _post_parse(self):
self.data = []
for report in self.raw:
parse(report, self.issued)
| StarcoderdataPython |
3280695 | <gh_stars>1-10
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
''' espnet utils'''
import json
from collections import OrderedDict
import numpy as np
from absl import logging
from espnet.utils.cli_readers import file_reader_helper
from espnet.utils.cli_writers import file_writer_helper
from espnet.utils.training.batchfy import make_batchset as make_batchset_espnet
from espnet.utils.io_utils import LoadInputsAndTargets
from delta import utils
TASK_SET = {'asr': 'asr', 'tts': 'tts'}
#pylint: disable=too-many-locals
#pylint: disable=too-many-arguments
def make_batchset(task,
data,
batch_size,
max_length_in,
max_length_out,
num_batches=0,
batch_sort_key='shuffle',
min_batch_size=1,
shortest_first=False,
batch_bins=0,
batch_frames_in=0,
batch_frames_out=0,
batch_frames_inout=0,
batch_strategy='auto'):
"""Make batch set from json dictionary
if utts have "category" value,
>>> data = {'utt1': {'category': 'A', 'input': ...},
... 'utt2': {'category': 'B', 'input': ...},
... 'utt3': {'category': 'B', 'input': ...},
... 'utt4': {'category': 'A', 'input': ...}}
>>> make_batchset(data, batchsize=2, ...)
[[('utt1', ...), ('utt4', ...)], [('utt2', ...), ('utt3': ...)]]
Note that if any utts doesn't have "category",
perform as same as batchfy_by_{batch_strategy}
:param string task: task type, which in [asr, tts]
:param Dict[str, Dict[str, Any]] data: dictionary loaded from data.json
:param int batch_size: maximum number of sequences in a minibatch.
:param int batch_bins: maximum number of bins (frames x dim) in a minibatch.
:param int batch_frames_in: maximum number of input frames in a minibatch.
:param int batch_frames_out: maximum number of output frames in a minibatch.
:param int batch_frames_out: maximum number of input+output frames in a minibatch.
:param str batch_strategy: strategy to count maximum size of batch [auto, seq, bin, frame].
"auto" : automatically detect make_batch strategy by finding enabled args
"seq" : create the minibatch that has the maximum number of seqs under batch_size.
"bin" : create the minibatch that has the maximum number of bins under batch_bins,
where the "bin" means the number of frames x dim.
"frame": create the minibatch that has the maximum number of input, output and input+output
frames under batch_frames_in, batch_frames_out and batch_frames_inout, respectively
:param int max_length_in: maximum length of input to decide adaptive batch size
:param int max_length_out: maximum length of output to decide adaptive batch size
:param int num_batches: # number of batches to use (for debug)
:param int min_batch_size: minimum batch size (for multi-gpu)
:param bool shortest_first: Sort from batch with shortest samples to longest if true,
otherwise reverse
:param str batch_sort_key: how to sort data before creating minibatches [input, output, shuffle]
:return: List[List[Tuple[str, dict]]] list of batches
Reference: https://github.com/espnet/espnet/pull/759/files
https://github.com/espnet/espnet/commit/dc0a0d3cfc271af945804f391e81cd5824b08725
https://github.com/espnet/espnet/commit/73018318a65d18cf2e644a45aa725323c9e4a0e6
"""
assert task in list(TASK_SET.keys())
#swap_io: if True, use "input" as output and "output" as input in `data` dict
swap_io = False
if task == TASK_SET['tts']:
swap_io = True
minibatches = make_batchset_espnet(
data,
batch_size=batch_size,
max_length_in=max_length_in,
max_length_out=max_length_out,
num_batches=num_batches,
min_batch_size=min_batch_size,
shortest_first=shortest_first,
batch_sort_key=batch_sort_key,
swap_io=swap_io,
count=batch_strategy,
batch_bins=batch_bins,
batch_frames_in=batch_frames_in,
batch_frames_out=batch_frames_out,
batch_frames_inout=batch_frames_inout)
# for debugging
if num_batches > 0:
minibatches = minibatches[:num_batches]
logging.info('# minibaches: ' + str(len(minibatches)))
return minibatches
#pylint: disable=too-many-locals
def get_batches(config, mode):
''' make batches of metas and get dataset size'''
assert mode in (utils.TRAIN, utils.EVAL, utils.INFER)
# read meta of json
logging.info("load json data")
json_path = config['data'][mode]['paths']
assert len(json_path) == 1
#pylint: disable=invalid-name
with open(json_path[0], 'r', encoding='utf-8') as f:
metas_raw = json.load(f)['utts']
# sort by utts id
metas = OrderedDict(sorted(metas_raw.items(), key=lambda t: t[0]))
# dataset size
utts = len(metas.keys())
logging.info('# utts: ' + str(utts))
# make batchset
use_sortagrad = config['data']['task']['sortagrad']
task = config['data']['task']['type']
assert task in list(TASK_SET.keys())
# using same json for asr and tts task
if task == TASK_SET['asr']:
src = 'src'
tgt = 'tgt'
elif task == TASK_SET['tts']:
src = 'tgt'
tgt = 'src'
else:
raise ValueError("task type must int : {} get : {}".format(
list(TASK_SET.keys()), task))
maxlen_src = config['data']['task'][src]['max_len']
maxlen_tgt = config['data']['task'][tgt]['max_len']
batch_sort_key = config['data']['task']['batch_sort_key']
num_batches = config['data']['task']['num_batches']
_, ngpu = utils.gpu_device_names()
global_batch_size = config['solver']['optimizer']['batch_size']
batch_size = utils.per_device_batch_size(global_batch_size, ngpu)
batch_bins = config['solver']['optimizer']['batch_bins']
batch_frames_in = config['solver']['optimizer']['batch_frames_in']
batch_frames_out = config['solver']['optimizer']['batch_frames_out']
batch_frames_inout = config['solver']['optimizer']['batch_frames_inout']
batch_strategy = config['solver']['optimizer']['batch_strategy']
minibatches = make_batchset(
task=task,
data=metas,
batch_size=batch_size,
max_length_in=maxlen_src,
max_length_out=maxlen_tgt,
num_batches=num_batches,
batch_sort_key=batch_sort_key,
min_batch_size=ngpu if ngpu else 1,
shortest_first=use_sortagrad,
batch_bins=batch_bins,
batch_frames_in=batch_frames_in,
batch_frames_out=batch_frames_out,
batch_frames_inout=batch_frames_inout,
batch_strategy=batch_strategy)
return {'data': minibatches, 'n_utts': utts}
class Converter:
'''custom batch converter for kaldi
:param int subsampling_factor: The subsampling factor
:param object preprocess_conf: The preprocessing config
'''
def __init__(self, config):
self._config = config
self.subsampling_factor = None
self.preprocess_conf = None
self.load_inputs_and_targets = lambda x: x
@property
def config(self):
''' candy _config'''
return self._config
def transform(self, item):
''' load inputs and outputs '''
return self.load_inputs_and_targets(item)
def __call__(self, batch):
''' Transforms a batch
param: batch, list of (uttid, {'input': [{...}], 'output': [{...}]})
'''
# batch should be located in list
# list of examples
(xs, ys), uttid_list = self.transform(batch) #pylint: disable=invalid-name
# perform subsampling
if self.subsampling_factor > 1:
xs = [x[::self.subsampling_factor, :] for x in xs] #pylint: disable=invalid-name
# get batch of lengths of input and output sequences
ilens = [x.shape[0] for x in xs]
olens = [y.shape[0] for y in ys]
return xs, ilens, ys, olens, uttid_list
class ASRConverter(Converter):
''' ASR preprocess '''
def __init__(self, config):
super().__init__(config)
taskconf = self.config['data']['task']
assert taskconf['type'] == TASK_SET['asr']
self.subsampling_factor = taskconf['src']['subsampling_factor']
self.preprocess_conf = taskconf['src']['preprocess_conf']
# mode: asr or tts
self.load_inputs_and_targets = LoadInputsAndTargets(
mode=taskconf['type'],
load_output=True,
preprocess_conf=self.preprocess_conf)
#pylint: disable=arguments-differ
#pylint: disable=too-many-branches
def transform(self, batch):
"""Function to load inputs, targets and uttid from list of dicts
:param List[Tuple[str, dict]] batch: list of dict which is subset of
loaded data.json
:return: list of input token id sequences [(L_1), (L_2), ..., (L_B)]
:return: list of input feature sequences
[(T_1, D), (T_2, D), ..., (T_B, D)]
:rtype: list of float ndarray
:return: list of target token id sequences [(L_1), (L_2), ..., (L_B)]
:rtype: list of int ndarray
Reference: Espnet source code, /espnet/utils/io_utils.py
https://github.com/espnet/espnet/blob/master/espnet/utils/io_utils.py
"""
x_feats_dict = OrderedDict() # OrderedDict[str, List[np.ndarray]]
y_feats_dict = OrderedDict() # OrderedDict[str, List[np.ndarray]]
uttid_list = [] # List[str]
mode = self.load_inputs_and_targets.mode
for uttid, info in batch:
uttid_list.append(uttid)
if self.load_inputs_and_targets.load_input:
# Note(kamo): This for-loop is for multiple inputs
for idx, inp in enumerate(info['input']):
# {"input":
# [{"feat": "some/path.h5:F01_050C0101_PED_REAL",
# "filetype": "hdf5",
# "name": "input1", ...}], ...}
#pylint: disable=protected-access
x_data = self.load_inputs_and_targets._get_from_loader(
filepath=inp['feat'], filetype=inp.get('filetype', 'mat'))
x_feats_dict.setdefault(inp['name'], []).append(x_data)
elif mode == 'tts' and self.load_inputs_and_targets.use_speaker_embedding:
for idx, inp in enumerate(info['input']):
if idx != 1 and len(info['input']) > 1:
x_data = None
else:
x_data = self.load_inputs_and_targets._get_from_loader( #pylint: disable=protected-access
filepath=inp['feat'],
filetype=inp.get('filetype', 'mat'))
x_feats_dict.setdefault(inp['name'], []).append(x_data)
if self.load_inputs_and_targets.load_output:
for idx, inp in enumerate(info['output']):
if 'tokenid' in inp:
# ======= Legacy format for output =======
# {"output": [{"tokenid": "1 2 3 4"}])
x_data = np.fromiter(
map(int, inp['tokenid'].split()), dtype=np.int64)
else:
# ======= New format =======
# {"input":
# [{"feat": "some/path.h5:F01_050C0101_PED_REAL",
# "filetype": "hdf5",
# "name": "target1", ...}], ...}
x_data = self.load_inputs_and_targets._get_from_loader( #pylint: disable=protected-access
filepath=inp['feat'],
filetype=inp.get('filetype', 'mat'))
y_feats_dict.setdefault(inp['name'], []).append(x_data)
if self.load_inputs_and_targets.mode == 'asr':
#pylint: disable=protected-access
return_batch, uttid_list = self.load_inputs_and_targets._create_batch_asr(
x_feats_dict, y_feats_dict, uttid_list)
elif self.load_inputs_and_targets.mode == 'tts':
_, info = batch[0]
eos = int(info['output'][0]['shape'][1]) - 1
#pylint: disable=protected-access
return_batch, uttid_list = self.load_inputs_and_targets._create_batch_tts(
x_feats_dict, y_feats_dict, uttid_list, eos)
else:
raise NotImplementedError
if self.load_inputs_and_targets.preprocessing is not None:
# Apply pre-processing only to input1 feature, now
if 'input1' in return_batch:
return_batch['input1'] = \
self.load_inputs_and_targets.preprocessing(return_batch['input1'], uttid_list,
**self.load_inputs_and_targets.preprocess_args)
# Doesn't return the names now.
return tuple(return_batch.values()), uttid_list
| StarcoderdataPython |
1680241 | """Module that contains helpers related to pagination in the REST API."""
from flask_restplus import fields
from medtagger.api import api
PAGINATION = {
'pagination': fields.Nested(api.model('Pagination', {
'page': fields.Integer(description='Page number'),
'per_page': fields.Integer(description='Number of entries per page'),
'total': fields.Integer(description='Total numer of entries'),
})),
}
| StarcoderdataPython |
3346246 | import urllib
import os
import json
from bson import ObjectId
from flask import Flask, jsonify, request, Response
from flask_cors import CORS
from dotenv import load_dotenv
from pymongo import MongoClient
from better_profanity import profanity
# Load environment variables from our .env file
load_dotenv()
# Declare a new instance of a Flask application
app = Flask(__name__)
CORS(app)
"""
Flask is a web framework for Python that allows us to run simple web servers.
We can make use of flasks's routing to create an API that allows us to talk to MongoDB
using only HTTP requests.
Let's define a few "routes", or URL patterns, that allow us to get, modify, and delete messages.
Based on the HTTP Request method and the route, we define actions that our API can take to alter data
for us.
"""
# We are using Flask to write our API. Flask is a Python web framework that allows us to
# associate python methods with url endpooints. When a user hits a route
# that matches a url pattern, the corresponding python method is called.
@app.route("/welcome")
def get_text():
# This is a basic method that returns the text "Hello, world!" when
# a user goes to http://<our-domain>/welcome
return "Hello, world!"
@app.route("/welcome/<text>")
def get_custom_text(text):
# In Flask, we can also capture variables that are stored in our url.
# When a user visits "/welcome/<text>", this endpoint will return
# everything after the second "/" as text.
return text
"""
Now, we can move on to the guts of our API: actually interacting with our database.
"""
# Establish a connection to our MongoDB database
# This connection string tells the MongoDB Client which database to connect to in the cloud.
# Remember, a client is a service that talks to technologies on the web.
# It is username/password protected.
CONN_STR = f"mongodb+srv://mongouser:{urllib.parse.quote_plus(os.environ['MONGO_PASSWORD'])}@cluster0.4xnby.mongodb.net/postit?retryWrites=true&w=majority"
# Initialize a new MongoClient() with our connection string.
client = MongoClient(CONN_STR)
# Our database is a property of the connection to our client.
# We can now use this database connection to read, write, edit, and delete documents.
db = client["postit"]
# Our collection is called "messages", let's save this as a global variable for future use.
collection = "messages"
# To interact with our messages, let's define a "/messages"
# route. A "GET" request to this endpoint will return all messages in the database.
# A "POST" request will insert a new message.
@app.route("/messages", methods=["GET", "POST"])
def get_messages():
if request.method == "GET":
# (Web Browsers perform a GET request when you enter a URL into the search bar)
# Return a list of all documents in the "messages" collection.
return jsonify([x for x in db[collection].find()])
elif request.method == "POST":
# Get data from our request
data = request.json
# Construct a new dictionary with an "author" and "content" properties.
new_message = censor_values(
{"author": data["author"], "content": data["content"]}
)
# Insert new message
db[collection].insert_one(new_message)
# Return the newly inserted message
return jsonify(new_message)
return Response("Invalid method", status=400)
# Now, let's say we want to interact with a single message.
# Let's define the URL route "/messages/<id>" that will
# allow us to interact with messages with a particular id.
@app.route("/messages/<id>", methods=["GET", "PUT", "DELETE"])
def get_message(id):
# First, try to find a message with this ID.
message = db[collection].find_one({"_id": ObjectId(id)})
if not message:
# If we cannot find a message with this ID, return a 404 status code.
return Response("404 Not Found", status=404)
if request.method == "GET":
# Get a message with a particular ID
return message
elif request.method == "PUT":
# Modify a message with a particular ID
new_message_fields = censor_values(request.json)
# Find this message document in db and update it
updated = db[collection].find_one_and_update(
{"_id": ObjectId(id)}, {"$set": new_message_fields}
)
return jsonify(updated)
elif request.method == "DELETE":
# Delete a message with a particular ID
deleted = db[collection].find_one_and_delete({"_id": ObjectId(id)})
return jsonify(deleted)
return Response("Invalid method", status=400)
# # # #
# < Ignore this section >
# # # #
class JSONEncoder(json.JSONEncoder):
"""
We create our own JSONEncoder to handle MongoDB IDs of type ObjectId
when serializing objects.
"""
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
return json.JSONEncoder.default(self, o)
app.json_encoder = JSONEncoder
def censor_values(message):
if type(message) == str:
return profanity.censor(message)
elif type(message) == dict:
return {k: profanity.censor(v) for k, v in message.items()}
else:
# Not sure how to censor this object.
print(f"[ WARNING ]: Cannot censor object of type {type(message)}")
return message
# # # #
# < Ignore this section >
# # # #
if __name__ == "__main__":
app.run(port=5000)
| StarcoderdataPython |
111650 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-01-23 07:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icds_reports', '0163_update_agg_awc_monthly_view'),
]
operations = [
migrations.AddField(
model_name='agggovernancedashboard',
name='anc_today',
field=models.NullBooleanField(),
),
migrations.AddField(
model_name='agggovernancedashboard',
name='anm_mpw_present',
field=models.NullBooleanField(),
),
migrations.AddField(
model_name='agggovernancedashboard',
name='asha_present',
field=models.NullBooleanField(),
),
migrations.AddField(
model_name='agggovernancedashboard',
name='child_immu',
field=models.NullBooleanField(),
),
migrations.AddField(
model_name='agggovernancedashboard',
name='vhsnd_date_past_month',
field=models.DateField(null=True),
)
]
| StarcoderdataPython |
3295657 | # MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import frappe.sessions
from response import build_response,report_error
import api_handler
import json
from rest_api_methods import create_customer
def handle():
try:
frappe.response['X_ERROR_CODE'] = "02"
frappe.response['X_ERROR_DESC'] = "S"
response = build_response("xml")
create_scheduler_task(frappe.local.form_dict.cmd, frappe.local.form_dict.data)
return response
except Exception, e:
import traceback
print traceback.format_exc()
raise Exception("Error while processing request, Please contact Administrator")
def create_scheduler_task(method_name, request_data):
schedule_task = frappe.new_doc('Scheduler Task')
schedule_task.method_name = method_name
schedule_task.action = method_name.title().replace("_", " ")
schedule_task.request_data = json.dumps(request_data, indent=4)
schedule_task.save(ignore_permissions=True)
| StarcoderdataPython |
1720841 | <filename>mogan/scheduler/filter_scheduler.py
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
Leverages nova/scheduler/filter_scheduler.py
'''
"""The FilterScheduler is for creating servers.
You can customize this scheduler by specifying your own node Filters and
Weighing Functions.
"""
import itertools
from oslo_config import cfg
from oslo_log import log as logging
from mogan.common import exception
from mogan.common.i18n import _
from mogan.common import utils
from mogan import objects
from mogan.scheduler import client
from mogan.scheduler import driver
from mogan.scheduler import utils as sched_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class FilterScheduler(driver.Scheduler):
"""Scheduler that can be used for filtering and weighing."""
def __init__(self, *args, **kwargs):
super(FilterScheduler, self).__init__(*args, **kwargs)
self.max_attempts = self._max_attempts()
self.reportclient = client.SchedulerClient().reportclient
def _max_attempts(self):
max_attempts = CONF.scheduler.scheduler_max_attempts
if max_attempts < 1:
raise exception.InvalidParameterValue(
err=_("Invalid value for 'scheduler_max_attempts', "
"must be >=1"))
return max_attempts
def _log_server_error(self, server_id, retry):
"""Log requests with exceptions from previous server operations."""
exc = retry.pop('exc', None) # string-ified exception from server
if not exc:
return # no exception info from a previous attempt, skip
nodes = retry.get('nodes', None)
if not nodes:
return # no previously attempted nodes, skip
last_node = nodes[-1]
LOG.error("Error scheduling %(server_id)s from last node: "
"%(last_node)s : %(exc)s",
{'server_id': server_id,
'last_node': last_node,
'exc': exc})
def _populate_retry(self, filter_properties, request_spec):
"""Populate filter properties with history of retries for request.
If maximum retries is exceeded, raise NoValidNode.
"""
max_attempts = self.max_attempts
retry = filter_properties.pop('retry', {})
if max_attempts == 1:
# re-scheduling is disabled.
return
server_id = request_spec.get('server_ids')[0]
self._log_server_error(server_id, retry)
if retry['num_attempts'] > max_attempts:
raise exception.NoValidNode(
_("Exceeded max scheduling attempts %(max_attempts)d "
"for server %(server_id)s") %
{'max_attempts': max_attempts,
'server_id': server_id})
@staticmethod
def _get_res_cls_filters(request_spec):
flavor_dict = request_spec['flavor']
resources = dict([(sched_utils.ensure_resource_class_name(res[0]),
int(res[1]))
for res in flavor_dict['resources'].items()])
return resources
@staticmethod
def _get_res_aggregates_filters(context, request_spec):
flavor_dict = request_spec['flavor']
resource_aggregates = flavor_dict.get('resource_aggregates', {})
resource_aggregates_items = resource_aggregates.items()
# Add availability_zone aggregate
if request_spec['availability_zone']:
resource_aggregates_items.append(
('availability_zone', request_spec['availability_zone']))
filters = []
for key, value in resource_aggregates_items:
aggregates = objects.AggregateList.get_by_metadata(
context, key, value)
if not aggregates:
# if no aggregates match with the key/value,
# fail the scheduling.
return None
filters.append(aggregates)
return filters
@staticmethod
def _get_server_group_obj(context, request_spec):
server_group = request_spec.get('scheduler_hints', {}).get('group')
if not server_group:
return
server_group = objects.ServerGroup.get_by_uuid(context, server_group)
return server_group
def _get_nodes_of_aggregates(self, aggregates):
if not aggregates:
return []
agg_uuids = [agg.uuid for agg in aggregates]
query_filters = {'member_of': 'in:' + ','.join(agg_uuids)}
rps = self.reportclient.get_filtered_resource_providers(query_filters)
return [rp['uuid'] for rp in rps]
def _get_filtered_affzs_nodes(self, context, server_group, filtered_nodes,
num_servers):
"""Get the filtered affinity zone and nodes.
If affinity specified in request, this method will return a tuple
including filtered affinity zone and filtered nodes. e.g.
[(zone1, [node-1, node-2, node-3])].
If anti-affinity specified, this will return a list of tuples of
affinity zone and nodes list. e.g.
[(zone1, node-1]), (zone2, node-2), (zone3, node-3)]
"""
def _log_and_raise_error(policy):
LOG.error("No enough nodes filtered, request %(num_svr)s "
"server(s) with server group %(group)s with %(policy)s "
"policy specified.",
{"num_svr": num_servers, "group": server_group.name,
"policy": policy})
msg = (_("No enough nodes filtered, request %(num_svr)s server(s) "
"with %(policy)s policy specified.") %
{"num_svr": num_servers, "policy": policy})
raise exception.NoValidNode(msg)
if 'affinity' in server_group.policies:
selected_affz = None
if server_group.members:
for member in server_group.members:
server = objects.Server.get(context, member)
if server.affinity_zone:
selected_affz = server.affinity_zone
break
if selected_affz:
aggs = objects.AggregateList.get_by_metadata(
context, 'affinity_zone', selected_affz)
affz_nodes = self._get_nodes_of_aggregates(aggs)
selected_nodes = list(set(filtered_nodes) & set(affz_nodes))
if len(selected_nodes) < num_servers:
_log_and_raise_error('affinity')
return selected_affz, selected_nodes[:num_servers]
all_aggs = objects.AggregateList.get_by_metadata_key(
context, 'affinity_zone')
all_aggs = sorted(all_aggs, key=lambda a: a.metadata.get(
'affinity_zone'))
grouped_aggs = itertools.groupby(
all_aggs, lambda a: a.metadata.get('affinity_zone'))
if 'affinity' in server_group.policies:
for affz, aggs in grouped_aggs:
affz_nodes = self._get_nodes_of_aggregates(aggs)
affz_nodes = list(set(filtered_nodes) & set(affz_nodes))
if len(affz_nodes) >= num_servers:
return affz, affz_nodes[:num_servers]
_log_and_raise_error('affinity')
elif 'anti-affinity' in server_group.policies:
affinity_zones = []
for member in server_group.members:
server = objects.Server.get(context, member)
affinity_zone = server.affinity_zone
affinity_zones.append(affinity_zone)
selected_affz_nodes = []
for affz, aggs in grouped_aggs:
if affz in affinity_zones:
continue
affz_nodes = self._get_nodes_of_aggregates(aggs)
affz_nodes = list(set(filtered_nodes) & set(affz_nodes))
if affz_nodes:
selected_affz_nodes.append((affz, affz_nodes[0]))
if len(selected_affz_nodes) >= num_servers:
return selected_affz_nodes
_log_and_raise_error('anti-affinity')
def _consume_per_server(self, context, request_spec, node, server_id,
affinity_zone=None):
server_obj = objects.Server.get(context, server_id)
if affinity_zone:
server_obj.affinity_zone = affinity_zone
server_obj.save(context)
alloc_data = self._get_res_cls_filters(request_spec)
self.reportclient.put_allocations(
node, server_obj.uuid, alloc_data,
server_obj.project_id, server_obj.user_id)
def _consume_nodes_with_server_group(self, context, request_spec,
filtered_affzs_nodes, server_group):
if 'affinity' in server_group.policies:
affinity_zone, dest_nodes = filtered_affzs_nodes
for server_id, node in zip(request_spec['server_ids'],
dest_nodes):
self._consume_per_server(
context, request_spec, node, server_id, affinity_zone)
return dest_nodes
elif 'anti-affinity' in server_group.policies:
dest_nodes = []
for server_id, affz_node in zip(request_spec['server_ids'],
filtered_affzs_nodes):
affinity_zone, node = affz_node
dest_nodes.append(node)
self._consume_per_server(
context, request_spec, node, server_id, affinity_zone)
return dest_nodes
def _get_filtered_nodes(self, context, request_spec):
resources_filter = self._get_res_cls_filters(request_spec)
aggs_filters = self._get_res_aggregates_filters(context, request_spec)
# None indicates no matching aggregates
if aggs_filters is None:
return []
if aggs_filters:
filtered_nodes = set()
for agg_filter in aggs_filters:
filtered_rps = set(self._get_nodes_of_aggregates(agg_filter))
if not filtered_rps:
# if got empty, just break here.
return []
if not filtered_nodes:
# initialize the filtered_nodes
filtered_nodes = filtered_rps
else:
filtered_nodes &= filtered_rps
if not filtered_nodes:
# if got empty, just break here.
return []
else:
query_filters = {'resources': resources_filter}
filtered_nodes = self.reportclient.\
get_filtered_resource_providers(query_filters)
return [node['uuid'] for node in filtered_nodes]
return list(filtered_nodes)
def schedule(self, context, request_spec, filter_properties=None):
# TODO(zhenguo): Scheduler API is inherently multi-threaded as every
# incoming RPC message will be dispatched in it's own green thread.
# So we add a syncronized here to make sure the shared node states
# consistent, but lock the whole schedule process is not a good choice,
# we need to improve this.
@utils.synchronized('schedule')
def _schedule(self, context, request_spec, filter_properties):
self._populate_retry(filter_properties, request_spec)
filtered_nodes = self._get_filtered_nodes(context, request_spec)
if not filtered_nodes:
LOG.warning('No filtered nodes found for server '
'with properties: %s',
request_spec.get('flavor'))
raise exception.NoValidNode(
_("No filtered nodes available"))
dest_nodes = self._choose_nodes(filtered_nodes, request_spec)
server_group = self._get_server_group_obj(context, request_spec)
if not server_group:
for server_id, node in zip(request_spec['server_ids'],
dest_nodes):
self._consume_per_server(context, request_spec, node,
server_id)
return dest_nodes
else:
filtered_affzs_nodes = self._get_filtered_affzs_nodes(
context, server_group, filtered_nodes,
request_spec['num_servers'])
return self._consume_nodes_with_server_group(
context, request_spec, filtered_affzs_nodes, server_group)
return _schedule(self, context, request_spec, filter_properties)
def _choose_nodes(self, filtered_nodes, request_spec):
num_servers = request_spec['num_servers']
if num_servers > len(filtered_nodes):
msg = 'Not enough nodes found for servers, request ' \
'servers: %s, but only available nodes: %s' \
% (str(num_servers), str(len(filtered_nodes)))
raise exception.NoValidNode(_("Choose Node: %s") % msg)
return filtered_nodes[:num_servers]
| StarcoderdataPython |
1628924 | <filename>tools/BuiltinFilesToC.py
import os
def every_file_in_dir(dir):
result = []
for root, dirs, files in os.walk(dir):
for file in files:
result.append(os.path.join(root, file))
return result
def builtin_files_to_c(localDir, hostedPrefix):
out = []
out.append("// This file was autogenerated")
out.append("")
out.append("#include <cstring>")
out.append("")
out.append("namespace circa {")
out.append("")
out.append("const char* find_builtin_file(const char* filename) {")
out.append("")
out.append(" if (strncmp(filename, \""+hostedPrefix+"\", "+str(len(hostedPrefix))+") != 0)")
out.append(" return NULL;")
out.append("")
def escape_line(line):
out = []
for c in line:
if c == '"': out.append('\\"')
elif c == '\\': out.append('\\\\')
else: out.append(c)
return "".join(out)
for filename in every_file_in_dir(localDir):
source = open(filename)
localFilename = os.path.relpath(filename, localDir)
prefixedName = hostedPrefix+localFilename
out.append(' if (strncmp(filename, "'+prefixedName+'", '+str(len(prefixedName))+') == 0) return')
while source:
line = source.readline()
if line == "": break
line = line[:-1]
line = escape_line(line)
out.append(' "' + line + '\\n"')
out.append(" ;")
out.append("")
out.append("")
out.append(" return NULL;")
out.append("}")
out.append('')
out.append("} // namespace circa")
out.append("")
return "\n".join(out)
| StarcoderdataPython |
3259030 | r"""
Extending `py-fmas` by `optfrog` spectrograms
=============================================
This examples shows how to use the `py-fmas` library code in conjunction
with the optFrog spectrogram tool.
In particular, the example details a numerical experiment performing pulse
propagation in terms of the simplified forward model for the analytic signal
including the Raman effect [1]. Here, the model is used to perform simulations
on supercontinuum generation for an instance of a highly nonlinear photonic
crystal fiber (PCF) with an anomalous group-velocity dispersion regime [2].
The example also includes data postprocessing by calculating an analytic signal
spectrum with optimized time-frequency resolution using the `optfrog`-tool [3].
An example that shows how to use the simple `py-fmas` native spectrogram is
shown under the link below:
:ref:`sphx_glr_auto_tutorials_specific_g_spectrogram.py`
Note:
* For this exampe to run, the optfrog tool needs to be installed [3].
* The `py-fmas` package includes a simple spectrogram in module `tools`.
The `optfrog` Python package however includes extended functionality by
allowing a user to calculate spectrograms with optimized time and
frequency resolution.
References:
[1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, G.
Steinmeyer, Rogue wave formation by accelerated solitons at an optical
event horizon, Appl. Phys. B 115 (2014) 343,
http://dx.doi.org/10.1007/s00340-013-5609-9
[2] <NAME>, <NAME>, <NAME>,
Supercontinuum generation in photonic crystal fiber,
Rev. Mod. Phys. 78 (2006) 1135,
http://dx.doi.org/10.1103/RevModPhys.78.1135
[3] <NAME>, <NAME>, <NAME>, <NAME>,
OptFROG — Analytic signal spectrograms with optimized time–frequency resolution,
SoftwareX 10 (2019) 100275,
https://doi.org/10.1016/j.softx.2019.100275,
code repository: https://github.com/ElsevierSoftwareX/SOFTX_2019_130.
.. codeauthor:: <NAME> <<EMAIL>>
"""
import fmas
import numpy as np
from fmas.models import CustomModelPCF
from fmas.solver import IFM_RK4IP
from fmas.analytic_signal import AS
from fmas.grid import Grid
from fmas.tools import plot_spectrogram
from optfrog import optFrog
def main():
# -- DEFINE SIMULATION PARAMETERS
# ... COMPUTATIONAL DOMAIN
t_max = 3500. # (fs)
t_num = 2**14 # (-)
z_max = 0.10*1e6 # (micron)
z_num = 8000 # (-)
z_skip = 10 # (-)
# ... INITIAL CONDITION
P0 = 1e4 # (W)
t0 = 28.4 # (fs)
w0 = 2.2559 # (rad/fs)
E_0t_fun = lambda t: np.real(np.sqrt(P0)/np.cosh(t/t0)*np.exp(-1j*w0*t))
# -- INITIALIZATION STAGE
grid = Grid( t_max = t_max, t_num = t_num, z_max = z_max, z_num = z_num)
model = CustomModelPCF(w=grid.w)
solver = IFM_RK4IP( model.Lw, model.Nw, user_action = model.claw)
solver.set_initial_condition( grid.w, AS(E_0t_fun(grid.t)).w_rep)
# -- RUN SIMULATION
solver.propagate( z_range = z_max, n_steps = z_num, n_skip = z_skip)
# -- POSTPRICESSING: COMPUTE SPECTROGRAM USING OPTFROG
# ... Z-DISTANCE, Z-INDEX AND FIELD FOR WHICH TO COMPUTE TRACE
z0 = 0.08e6 # (micron)
z0_idx = np.argmin(np.abs(solver.z-z0))
Et = solver.utz[z0_idx]
# ... WINDOW FUNCTION FOR SIGNAL LOCALIZATION
def window_fun(s0):
return lambda t: np.exp(-t**2/2/s0/s0)/np.sqrt(2.*np.pi)/s0
# ... OPTFROG TRACE
res = optFrog(
grid.t, # TEMPORAL GRID
Et, # ANALYTIC SIGNAL
window_fun, # WINDOW FUNCTION
tLim = (-500.0, 3200.0, 10), # (tmin, fs) (tmax, fs) (nskip)
wLim = (0.9, 4.1, 3) # (wmin, fs) (wmax, fs) (nskip)
)
# ... SHOW SPECTROGRAM
plot_spectrogram(res.tau, res.w, res.P)
if __name__=='__main__':
main()
| StarcoderdataPython |
99892 | <reponame>jingwangian/aiven
import psutil
import json
import logging
import psycopg2
from datetime import datetime
from dataclasses import dataclass
logger = logging.getLogger(__name__)
@dataclass
class CPUMetric:
name = 'cpu_metric'
machine_id: int
user: float
nice: float
system: float
idle: float
created_at: str
@classmethod
def load_from_string(cls, str_value: str):
'''Return a CPUMetric instance
'''
value = json.loads(str_value)
return cls(**value)
def dump_to_string(self):
value = json.dumps({
"machine_id": self.machine_id,
"user": self.user,
"nice": self.nice,
"system": self.system,
"idle": self.idle,
"created_at": self.created_at
})
return value
def save_to_db(self, connection):
insert_str = f'INSERT INTO {self.name} \
(machine_id, "user",nice,system,idle,created_at) \
values (%s, %s,%s,%s,%s,%s)'
with connection.cursor() as cur:
try:
cur.execute(insert_str, (self.machine_id, self.user, self.nice,
self.system, self.idle, self.created_at))
logger.info(f"inserted one metric into table: {self.name}")
except psycopg2.Error as e:
logger.error('Failed to insert data into table: %s', e)
connection.commit()
@classmethod
def create_table(cls, connection):
ddl = '''create table if not exists cpu_metric (
machine_id int not null,
"user" float,
nice float,
system float,
idle float,
created_at TIMESTAMPTZ not null);'''
with connection.cursor() as cur:
try:
cur.execute(ddl)
logger.info(f"Create table {cls.name} successfully")
except psycopg2.Error as e:
logger.error(f'Failed to created table {cls.name} : %s', e)
connection.commit()
@dataclass
class MemMetric:
name = 'mem_metric'
machine_id: int
total: int
available: int
percent: float
used: int
free: int
created_at: str
@classmethod
def load_from_string(cls, str_value: str):
'''Return a MemMetric instance
'''
value = json.loads(str_value)
return cls(**value)
def dump_to_string(self):
value = json.dumps({
"machine_id": self.machine_id,
"total": self.total,
"available": self.available,
"percent": self.percent,
"used": self.used,
"free": self.free,
"created_at": self.created_at
})
return value
def save_to_db(self, connection):
insert_str = f'INSERT INTO {self.name} \
(machine_id,total,available,percent,used,free,created_at) \
values (%s,%s,%s,%s,%s,%s,%s)'
with connection.cursor() as cur:
try:
cur.execute(insert_str, (self.machine_id, self.total, self.available,
self.percent, self.used, self.free, self.created_at))
logger.info(f"inserted one metric into table: {self.name}")
except psycopg2.Error as e:
logger.error('Failed to insert data into table: %s', e)
connection.commit()
@classmethod
def create_table(cls, connection):
ddl = '''create table if not exists mem_metric(
machine_id int not null,
total bigint,
available bigint,
percent float,
used bigint,
free bigint,
created_at TIMESTAMPTZ not null);'''
with connection.cursor() as cur:
try:
cur.execute(ddl)
logger.info(f"Create table {cls.name} successfully")
except psycopg2.Error as e:
logger.error(f'Failed to created table {cls.name} : %s', e)
connection.commit()
@dataclass
class DiskMetric:
name = 'disk_metric'
machine_id: int
total: int
used: int
free: int
percent: float
created_at: str
@classmethod
def load_from_string(cls, str_value: str):
'''Return a DiskMetric instance
'''
value = json.loads(str_value)
return cls(**value)
def dump_to_string(self):
value = json.dumps({
"machine_id": self.machine_id,
"total": self.total,
"used": self.used,
"free": self.free,
"percent": self.percent,
"created_at": self.created_at
})
return value
def save_to_db(self, connection):
insert_str = f'INSERT INTO {self.name} \
(machine_id,total,used,free,percent,created_at) \
values (%s,%s,%s,%s,%s,%s)'
with connection.cursor() as cur:
try:
cur.execute(insert_str, (self.machine_id, self.total, self.used,
self.free, self.percent, self.created_at))
logger.info(f"inserted one metric into table: {self.name}")
except psycopg2.Error as e:
logger.error('Failed to insert data into table: %s', e)
connection.commit()
@classmethod
def create_table(cls, connection):
ddl = '''create table if not exists disk_metric(
time TIMESTAMPTZ default CURRENT_TIMESTAMP,
machine_id int,
total bigint,
used bigint,
free bigint,
percent float,
created_at TIMESTAMPTZ not null);'''
with connection.cursor() as cur:
try:
cur.execute(ddl)
logger.info(f"Create table {cls.name} successfully")
except psycopg2.Error as e:
logger.error(f'Failed to created table {cls.name} : %s', e)
connection.commit()
class SystemMonitor:
def __init__(self, machine_id):
'''Init a system monitor instance
'''
self.machine_id = machine_id
def get_cpu_percent(self) -> CPUMetric:
cpu = psutil.cpu_times_percent()
return CPUMetric(self.machine_id,
cpu.user,
cpu.nice,
cpu.system,
cpu.idle,
datetime.now().isoformat())
def get_memory_usage(self) -> MemMetric:
mem = psutil.virtual_memory()
return MemMetric(machine_id=self.machine_id,
total=mem.total,
available=mem.available,
percent=mem.percent,
used=mem.used,
free=mem.free,
created_at=datetime.now().isoformat())
def get_disk_usage(self) -> DiskMetric:
disk = psutil.disk_usage('/')
return DiskMetric(machine_id=self.machine_id,
total=disk.total,
used=disk.used,
free=disk.free,
percent=disk.free,
created_at=datetime.now().isoformat())
def get_metrics():
return [CPUMetric, MemMetric, DiskMetric]
| StarcoderdataPython |
165498 | #TODO: Write and run tests of regression and classification | StarcoderdataPython |
1673417 | import os
import time
import shutil
from collections import deque
from datetime import timedelta, datetime
from math import floor
import numpy as np
import scipy.misc
import tensorflow as tf
TF_VERSION = list(map(int, tf.__version__.split('.')[:2]))
class DenseNet:
# -------------------------------------------------------------------------
# --------------------------- CLASS INITIALIZER ---------------------------
# -------------------------------------------------------------------------
def __init__(self, data_provider, growth_rate, layer_num_list,
keep_prob, num_inter_threads, num_intra_threads,
weight_decay, nesterov_momentum, model_type, dataset,
should_self_construct, should_change_lr,
self_constructing_var, self_constr_rlr, block_count,
layer_cs, asc_thresh, patience_param,
std_tolerance, std_window, preserve_transition_l,
expansion_rate, dkCS_softening, dkCS_std_window,
dkCS_stl_thresh, usefulness_thresh, uselessness_thresh,
auto_usefulness_thresh, auto_uselessness_thresh,
m_asc_thresh, m_patience_param, impr_thresh, complementarity,
should_save_logs, should_save_ft_logs, ft_period,
ft_comma, ft_decimal, ft_filters, ft_kernels,
ft_cross_entropies,
should_save_model, should_save_images,
renew_logs=False,
reduction=1.0,
bc_mode=False,
**kwargs):
"""
Class to implement DenseNet networks as defined in this paper:
https://arxiv.org/pdf/1611.05552.pdf
Args:
data_provider: data provider object for the required data set;
growth_rate: `int`, number of convolutions in a new dense layer;
layer_num_list: `str`, list of number of layers in each block,
separated by commas (e.g. '12,12,12');
keep_prob: `float`, keep probability for dropout. If keep_prob = 1
dropout will be disabled;
weight_decay: `float`, weight decay for L2 loss, paper = 1e-4;
nesterov_momentum: `float`, momentum for Nesterov optimizer;
model_type: `str`, model type name ('DenseNet' or 'DenseNet-BC'),
should we use bottleneck layers and compression or not;
dataset: `str`, dataset name;
should_self_construct: `bool`, should use self-constructing or not;
should_change_lr: `bool`, should change the learning rate or not;
self_constructing_var: `int`, variant of the self-constructing
algorithm to be used, if the int does not identify any variant
the most recent (default) variant is used;
self_constr_rlr: `int`, learning rate reduction variant to be used
with the self-constructing algorithm, if the int does not
identify any variant the most recent (default) variant is used;
block_count: `int`, maximum number of blocks to self-construct;
layer_cs: `str`, 'layer CS', preferred interpretation of CS values
when evaluating layers (using 'relevance' or 'spread');
asc_thresh: `int`, ascension threshold for self-constructing;
patience_param: `int`, patience parameter for self-constructing;
std_tolerance: `int`, std tolerance for self-constructing;
std_window: `int`, std window for self-constructing;
preserve_transition_l: `bool`, should preserve transition to
classes after layer additions or not;
expansion_rate: `int`, rate at which new convolutions are added
together during the self-construction of a dense layer;
dkCS_softening: `int`, memory window for each kernel's CS during
kernel-level self-constructing (to soften the derivate);
dkCS_std_window: `int`, std window for each kernel's CS derivate
during kernel-level self-constructing;
dkCS_stl_thresh: `float`, settling threshold for each kernel's CS
derivate during kernel-level self-constructing;
usefulness_thresh: `float`, usefulness threshold for kernels during
kernel-level self-constructing;
uselessness_thresh: `float`, uselessness threshold for kernels
during kernel-level self-constructing;
auto_usefulness_thresh: `float`, usefulness threshold as a fraction
between 0 and 1 (used for automatic usefulness threshold).
auto_uselessness_thresh: `float`, uselessness threshold as a
fraction between 0 and 1 (used for automatic uselessness
threshold).
m_asc_thresh: `int`, micro-ascension threshold for kernel-level
self-constructing;
m_patience_param: `int`, micro-patience parameter for kernel-level
self-constructing;
impr_thresh: `int`, improvement threshold used during kernel-level
self-constructing;
complementarity: `bool`, whether or not to use complementarity when
adding new kernels during kernel-level self-constructing.
should_save_logs: `bool`, should tensorflow logs be saved or not;
should_save_ft_logs: `bool`, should feature logs be saved or not;
ft_period: `int`, number of epochs between two measurements of
feature values (e.g. accuracy, loss, weight mean and std);
ft_comma: `str`, 'comma' separator in the CSV feature logs;
ft_decimal: `str`, 'decimal' separator in the CSV feature logs;
ft_filters: `bool`, should check filter features or not;
ft_kernels: `bool`, should check kernel features or not;
ft_cross_entropies: `bool`, should measure cross-entropies for
each individual layer in the last block or not;
should_save_model: `bool`, should the model be saved or not;
should_save_images: `bool`, should images be saved or not;
renew_logs: `bool`, remove previous logs for current model;
reduction: `float`, reduction (theta) at transition layers for
DenseNets with compression (DenseNet-BC);
bc_mode: `bool`, boolean equivalent of model_type, should we use
bottleneck layers and compression (DenseNet-BC) or not.
"""
# Main DenseNet and DenseNet-BC parameters.
self.creation_time = datetime.now().strftime("%Y_%m_%d_%H%M%S")
self.data_provider = data_provider
self.data_shape = data_provider.data_shape
self.n_classes = data_provider.n_classes
self.growth_rate = growth_rate
self.num_inter_threads = num_inter_threads
self.num_intra_threads = num_intra_threads
# Number of outputs (feature maps) produced by the initial convolution
# (2*k, same value as in the original Torch code).
self.first_output_features = growth_rate * 2
self.layer_num_list = list(map(int, layer_num_list.split(',')))
self.total_blocks = len(self.layer_num_list)
self.bc_mode = bc_mode
self.reduction = reduction
print("Build %s model with %d blocks, "
"The number of layers in each block is:" % (
model_type, self.total_blocks))
if not bc_mode:
print('\n'.join('Block %d: %d composite layers.' % (
k, self.layer_num_list[k]) for k in range(len(
self.layer_num_list))))
if bc_mode:
print('\n'.join('Block %d: %d bottleneck layers and %d composite'
'layers.' % (k, self.layer_num_list[k],
self.layer_num_list[k])
for k in range(len(self.layer_num_list))))
print("Reduction at transition layers: %.1f" % self.reduction)
self.keep_prob = keep_prob
self.weight_decay = weight_decay
self.nesterov_momentum = nesterov_momentum
self.model_type = model_type
self.dataset_name = dataset
self.should_self_construct = should_self_construct
self.has_micro_algo = should_self_construct
self.preserve_transition_l = should_self_construct
self.should_change_lr = should_change_lr
self.block_count = max(1, block_count)
self.layer_cs = layer_cs
# Manage self construction only when self-constructing.
if should_self_construct:
# Choice of the self-constructing algorithm variant.
if self_constructing_var == 0:
self.self_constructing_step = self.self_constructing_var0
elif self_constructing_var == 1:
self.self_constructing_step = self.self_constructing_var1
elif self_constructing_var == 2:
self.self_constructing_step = self.self_constructing_var2
elif self_constructing_var == 3:
self.self_constructing_step = self.self_constructing_var3
elif self_constructing_var == 4:
self.self_constructing_step = self.self_constructing_var4
elif self_constructing_var == 5:
self.self_constructing_step = self.self_constructing_var5
# elif self_constructing_var >= 6:
# self.self_constructing_step = self.self_constructing_minimal
else:
self_constructing_var = 5
self.self_constructing_step = self.self_constructing_var5
self.has_micro_algo = self_constructing_var >= 4
# Choice of the self-constructing learning rate reduction variant.
if self_constr_rlr == 0:
self.self_constr_rlr = self.self_constr_rlr0
else:
self.self_constr_rlr = self.self_constr_rlr1
# Self-construction parameters.
self.asc_thresh = asc_thresh
self.patience_param = patience_param
self.patience_cntdwn = patience_param
self.std_tolerance = std_tolerance
self.std_window = std_window
self.preserve_transition_l = preserve_transition_l
self.pruned_varnames = []
# Accuracy FIFO list, only used in variants #2 and #3.
if self_constructing_var == 2 or self_constructing_var == 3:
self.accuracy_FIFO = deque(maxlen=self.std_window)
# Micro self-construction parameters.
if self.has_micro_algo:
self.expansion_rate = expansion_rate
self.dkCS_softening = dkCS_softening+1 # actual num of elems
self.dkCS_std_window = dkCS_std_window
self.dkCS_stl_thresh = dkCS_stl_thresh
self.usefulness_thresh = usefulness_thresh
self.uselessness_thresh = uselessness_thresh
self.auto_usefulness_thresh = auto_usefulness_thresh
self.auto_uselessness_thresh = auto_uselessness_thresh
# self.alt_uselessness_thresh = 0.25
self.m_asc_thresh = m_asc_thresh
self.m_patience_param = m_patience_param
self.m_patience_cntdwn = m_patience_param
self.impr_thresh = impr_thresh
self.complementarity = complementarity
# Data saving parameters.
self.should_save_logs = should_save_logs
self.should_save_ft_logs = should_save_ft_logs
self.ft_period = ft_period
self.ftc = ft_comma
self.ftd = ft_decimal
self.ft_filters = ft_filters and not ft_kernels
self.ft_kernels = ft_kernels
self.ft_cross_entropies = ft_cross_entropies
self.should_save_model = should_save_model
self.should_save_images = should_save_images
self.renew_logs = renew_logs
self.batches_step = 0
self._define_inputs()
self._build_graph()
self._initialize_session()
self._count_trainable_params_in_use()
# -------------------------------------------------------------------------
# ------------------------ SAVING AND LOADING DATA ------------------------
# -------------------------------------------------------------------------
def update_paths(self):
"""
Update all paths for saving data to their proper values.
This is used after the graph is modified (new block or layer).
This is also used after an AttributeError when calling these paths.
"""
save_path = 'saves/%s' % self.model_identifier
if self.should_save_model:
os.makedirs(save_path, exist_ok=True)
save_path = '%s/%s' % (save_path, 'model.chkpt')
self._save_path = save_path
logs_path = 'logs/%s' % self.model_identifier
if self.should_save_logs:
if self.renew_logs:
shutil.rmtree(logs_path, ignore_errors=True)
os.makedirs(logs_path, exist_ok=True)
self._logs_path = logs_path
ft_logs_path = 'ft_logs/%s' % self.run_identifier
if self.should_save_ft_logs:
os.makedirs('ft_logs/', exist_ok=True)
self._ft_logs_path = ft_logs_path
images_path = 'images/%s' % self.run_identifier
if self.should_save_images:
os.makedirs(images_path, exist_ok=True)
self._images_path = images_path
return save_path, logs_path, ft_logs_path, images_path
@property
def model_identifier(self):
"""
Returns an identifier `str` for the current DenseNet model.
It gives the model's type ('DenseNet' or 'DenseNet-BC'),
its growth rate k, the number of layers in each block,
and the dataset that was used.
"""
return "{}_dataset_{}_growth_rate={}_layer_num_list={}".format(
self.model_type, self.dataset_name, self.growth_rate, ",".join(map(
str, self.layer_num_list)))
@property
def run_identifier(self):
"""
Returns an identifier `str` for the current execution of the algorithm.
It gives the model's type ('DenseNet' or 'DenseNet-BC'),
its growth rate k, the dataset that was used,
and the date and hour at which the execution started.
"""
return "{}_{}_dataset_{}_growth_rate={}".format(
self.model_type, self.creation_time, self.dataset_name,
self.growth_rate)
@property
def save_path(self):
"""
Returns a path where the saver should save the current model.
"""
try:
save_path = self._save_path
except AttributeError:
save_path = self.update_paths()[0]
return save_path
@property
def logs_path(self):
"""
Returns a path where the logs for the current model should be written.
"""
try:
logs_path = self._logs_path
except AttributeError:
logs_path = self.update_paths()[1]
return logs_path
@property
def ft_logs_path(self):
"""
Returns a path where the evolution of features in the current execution
should be recorded.
"""
try:
ft_logs_path = self._ft_logs_path
except AttributeError:
ft_logs_path = self.update_paths()[2]
return ft_logs_path
@property
def images_path(self):
"""
Returns a path where images from the current execution should be saved.
"""
try:
images_path = self._images_path
except AttributeError:
images_path = self.update_paths()[3]
return images_path
def save_model(self, global_step=None):
"""
Saves the current trained model at the proper path, using the saver.
Args:
global_step: `int` or None, used for numbering saved model files
"""
self.saver.save(self.sess, self.save_path, global_step=global_step)
def load_model(self):
"""
Loads a saved model to use (instead of a new one) using the saver.
This is a previously trained and saved model using the model_type
('DenseNet' or 'DenseNet-BC'), growth rate, layers in each block,
and dataset that was specified in the program arguments.
"""
try:
self.saver.restore(self.sess, self.save_path)
except Exception as e:
raise IOError("Failed to to load model "
"from save path: %s" % self.save_path)
self.saver.restore(self.sess, self.save_path)
print("Successfully load model from save path: %s" % self.save_path)
def log_loss_accuracy(self, loss, accuracy, epoch, prefix,
should_print=True):
"""
Writes a log of the current mean loss (cross_entropy) and accuracy.
Args:
loss: `float`, loss (cross_entropy) for the current log;
accuracy: `float`, accuracy for the current log;
epoch: `int`, current training epoch (or batch);
prefix: `str`, is this log for a batch ('per_batch'), a
training epoch ('train') or a validation epoch ('valid');
should_print: `bool`, should we print this log on console or not.
"""
if should_print:
print("mean cross_entropy: %f, mean accuracy: %f" % (
loss, accuracy))
summary = tf.Summary(value=[
tf.Summary.Value(
tag='loss_%s' % prefix, simple_value=float(loss)),
tf.Summary.Value(
tag='accuracy_%s' % prefix, simple_value=float(accuracy))
])
self.summary_writer.add_summary(summary, epoch)
def ft_log_filters(self, b, cs_table_ls, lcs_dst, lcs_src):
"""
Write a feature log with data concerning filters: the CS of every
connection in a given block, the 'layer CS' (relevance or spread) for
destinations and sources for all layers in the same block.
Args:
b: `int`, identifier number for the block;
cs_table_ls: `list` of `list` of `float`, the table of CS for each
connection to a layer l from a previous layer s;
lcs_dst: `list` of `float`, 'layer CS' for destinations
for all layers in the block;
lcs_src: `list` of `float`, 'layer CS' for sources
for all layers in the block.
"""
# printing and saving the data to feature logs
for l in range(self.layer_num_list[b]):
# 'layer CS' for destinations of l-1
print(' - %s for destinations = %f' % (
self.layer_cs.capitalize(), lcs_dst[l]))
# destination layer CS (sent from l-1 towards d)
for d in range(l, self.layer_num_list[b]):
print(' - Towards layer %d: CS = %f' % (
d, cs_table_ls[d][l]))
# /max(fwd[l] for fwd in cs_table_ls if len(fwd) > l)
print('\n* Block %d filter %d:' % (b, l))
# source layer CS (received at l from s)
for s in range(len(cs_table_ls[l])):
print(' - From layer %d: CS = %f' % (
s, cs_table_ls[l][s])) # /max(cs_table_ls[l])))
# 'layer CS' for sources of l
print(' - %s for sources = %f' % (
self.layer_cs.capitalize(), lcs_src[l]))
if self.should_save_ft_logs:
# write all of the above in the feature log
self.feature_writer.write(('%s\"%f\"' % (self.ftc, lcs_dst[l])
).replace(".", self.ftd))
self.feature_writer.write('%s\"\"' % self.ftc)
for d in range(l, self.layer_num_list[b]):
self.feature_writer.write((
'%s\"%f\"' % (self.ftc, cs_table_ls[d][l])).replace(
".", self.ftd))
self.feature_writer.write('%s\"\"' % self.ftc)
for s in range(len(cs_table_ls[l])):
self.feature_writer.write((
'%s\"%f\"' % (self.ftc, cs_table_ls[l][s])).replace(
".", self.ftd))
self.feature_writer.write('%s\"\"' % self.ftc)
self.feature_writer.write(('%s\"%f\"' % (self.ftc, lcs_src[l])
).replace(".", self.ftd))
# -------------------------------------------------------------------------
# ----------------------- PROCESSING FEATURE VALUES -----------------------
# -------------------------------------------------------------------------
def get_cs_list(self, f_image, f_num):
"""
Get the list of connection strengths (CS) for all connections to a
given filter layer.
The CS of a connection is equal to the mean of its associated absolute
kernel weights (sum divided by num of weights).
Args:
f_image: `np.ndarray`, an array representation of the filter;
f_num: `int`, identifier for the filter within the block.
"""
# split kernels by groups, depending on which connection they belong to
# for this, use filter numbering (different in BC mode!)
splitting_guide = []
for i in range(int(f_num/(1+int(self.bc_mode))), 0, -1):
splitting_guide.append(f_image.shape[0] - i*self.growth_rate)
if len(splitting_guide) > 0:
f_split_image = np.split(f_image, splitting_guide)
else:
f_split_image = [f_image]
# calculate CS (means of abs weights) by groups of kernels
cs_list = []
for split in range(len(f_split_image)):
cs_list.append(np.mean(np.abs(f_split_image[split])))
return cs_list
def get_relev_dst(self, b, cs_table_ls, tresh_fract=0.67):
"""
Get the relevance for destinations for all layers (filters) in a block.
The relevance for destinations of a layer l expresses the portion of
the connections sent from l-1 that are 'relevant enough' for their
destination layers to receive information through them.
For each connection from l-1 to a future layer d, add +1/n_connections
if the connection's CS is >= tresh_fract * the max CS out of all
connections received by d.
N.B.: For l=0, the preceding l-1 is the output from the previous block.
Args:
b: `int`, identifier number for the block;
cs_table_ls: `list` of `list` of `float`, the table of CS for each
connection to a layer l from a previous layer s;
tresh_fract: `float`, the fraction of a layer's max CS that a CS
is compared to to be considered 'relevant enough'.
"""
relev_dst = []
max_cs = 0 # the max CS for each future layer
for l in range(self.layer_num_list[b]):
relev_dst.append(0)
for d in range(l, self.layer_num_list[b]):
max_cs = max(cs_table_ls[d])
relev_dst[l] += int(cs_table_ls[d][l]/max_cs >= tresh_fract)
# normalised: 0 = no relevant connections, 1 = all relevant
relev_dst[l] /= self.layer_num_list[b] - l
return relev_dst
def get_relev_src(self, b, cs_table_ls, tresh_fract=0.67):
"""
Get the relevance for sources for all layers (filters) in a block.
The relevance for sources of a layer l expresses the portion of the
connections received by l that are 'relevant enough' for their source
layers to send information through them.
For each connection from a past layer s-1 to l, add +1/n_connections
if the connection's CS is >= tresh_fract * the max CS out of all
connections sent from s-1.
N.B.: For s=0, the preceding s-1 is the output from the previous block.
Args:
b: `int`, identifier number for the block;
cs_table_ls: `list` of `list` of `float`, the table of CS for each
connection to a layer l from a previous layer s;
tresh_fract: `float`, the fraction of a layer's max CS that a CS
is compared to to be considered 'relevant enough'.
"""
relev_src = []
max_cs = 0 # the max CS for each past layer
for l in range(self.layer_num_list[b]):
relev_src.append(0)
for s in range(len(cs_table_ls[l])):
max_cs = max(fwd[s] for fwd in cs_table_ls[s:])
relev_src[l] += int(cs_table_ls[l][s]/max_cs >= tresh_fract)
# normalised: 0 = no relevant connections, 1 = all relevant
relev_src[l] /= l+1
return relev_src
def get_spread_emi(self, b, cs_table_ls, tresh_fract=0.67):
"""
Get the spread of emission for all layers (filters) in a block.
The spread of emission of a layer l expresses the portion of the
connections sent from l-1 that are 'relevant enough' for l-1 to send
(emit) information through them.
For each connection from l-1 to a future layer d, add +1/n_connections
if the connection's CS is >= tresh_fract * the max CS out of all
connections sent from l-1.
N.B.: For l=0, the preceding l-1 is the output from the previous block.
Args:
b: `int`, identifier number for the block;
cs_table_ls: `list` of `list` of `float`, the table of CS for each
connection to a layer l from a previous layer s;
tresh_fract: `float`, the fraction of a layer's max CS that a CS
is compared to to be considered 'relevant enough'.
"""
spread_emi = []
max_cs = 0 # the max CS for each future layer
for l in range(self.layer_num_list[b]):
spread_emi.append(0)
max_cs = max(fwd[l] for fwd in cs_table_ls[l:])
for d in range(l, self.layer_num_list[b]):
spread_emi[l] += int(cs_table_ls[d][l]/max_cs >= tresh_fract)
# normalised: 0 = no relevant connections, 1 = all relevant
spread_emi[l] /= self.layer_num_list[b] - l
return spread_emi
def get_spread_rec(self, b, cs_table_ls, tresh_fract=0.67):
"""
Get the spread of reception for all layers (filters) in a block.
The spread of reception of a layer l expresses the portion of the
connections received by l that are 'relevant enough' for l to receive
information through them.
For each connection from a past layer s-1 to l, add +1/n_connections
if the connection's CS is >= tresh_fract * the max CS out of all
connections received by l.
N.B.: For s=0, the preceding s-1 is the output from the previous block.
Args:
b: `int`, identifier number for the block;
cs_table_ls: `list` of `list` of `float`, the table of CS for each
connection to a layer l from a previous layer s;
tresh_fract: `float`, the fraction of a layer's max CS that a CS
is compared to to be considered 'relevant enough'.
"""
spread_rec = []
max_cs = 0 # the max CS for each past layer
for l in range(self.layer_num_list[b]):
spread_rec.append(0)
max_cs = max(cs_table_ls[l])
for s in range(len(cs_table_ls[l])):
spread_rec[l] += int(cs_table_ls[l][s]/max_cs >= tresh_fract)
# normalised: 0 = no relevant connections, 1 = all relevant
spread_rec[l] /= l+1
return spread_rec
def process_filter(self, filter, block_num, filter_num, epoch):
"""
Process a given convolution filter's kernel weights, in some cases
save a representation of the filter and its weights as a PNG image.
Returns a list with the connection strengths (CS) for connections
between any given layer l and each past layer s.
Args:
filter: tensor, the filter whose kernel weights are processed;
block_num: `int`, identifier number for the filter's block;
filter_num: `int`, identifier for the filter within the block;
epoch: `int`, current training epoch (or batch).
"""
# get an array representation of the filter, then get its dimensions
f_image = self.sess.run(filter)
f_d = filter.get_shape().as_list()
f_image = f_image.transpose()
f_image = np.moveaxis(f_image, [0, 1], [1, 0])
# calculate connection strength for all connections
cs_list = self.get_cs_list(f_image, filter_num)
if self.should_save_images:
# properly place the kernels to save the filter as an image
f_image = np.moveaxis(f_image, [1, 2], [0, 1])
f_image = np.resize(f_image, (f_d[1]*f_d[3], f_d[0]*f_d[2]))
# save the image in the proper file
im_filepath = './%s/block_%d_filter_%d' % (
self.images_path, block_num, filter_num)
os.makedirs(im_filepath, exist_ok=True)
im_filepath += '/epoch_%d.png' % epoch
scipy.misc.imsave(im_filepath, f_image)
return cs_list
def process_kernel(self, kernel):
"""
Process a given kernel's weights, returns the connection strength (CS)
for that kernel (the mean of its absolute weights).
Args:
kernel: tensor, the kernel whose weights are processed.
"""
# get an array representation of the kernel
k_image = self.sess.run(kernel)
# calculate its connection strength and return it
k_cs = np.mean(np.abs(k_image))
return k_cs
def process_block_filters(self, b, epoch):
"""
Process a given block's filters. Return values for features related to
the filters' kernel weights: connection strengths, 'layer CS' for
destinations, and 'layer CS' for sources. The 'layer CS' can be either
relevance or spread, depending on what is required by the algorithm.
Args:
b: `int`, identifier number for the block;
epoch: `int`, current training epoch (or batch).
"""
cs_table_ls = []
# process each filter separately (except BC bottlenecks),
# get the conection strength between each layer l and any past layer s
for f in range(len(self.filter_ref_list[b+1])):
if not self.bc_mode or not f % 2:
cs_table_ls.append(self.process_filter(
self.filter_ref_list[b+1][f], b, f, epoch))
# if the required 'layer CS' is relevance
if self.layer_cs == 'relevance':
# relevance for destinations: what portion of all the connections
# sent from a layer l-1 are relevant for their destination layers?
lcs_dst = self.get_relev_dst(b, cs_table_ls)
# relevance for sources: what portion of all the connections
# received by a layer l are relevant for their source layers?
lcs_src = self.get_relev_src(b, cs_table_ls)
# else (if the required 'layer CS' is spread)
else:
# spread of emission (for destinations): what portion of all the
# connections sent from a layer l-1 are relevant for l-1?
lcs_dst = self.get_spread_emi(b, cs_table_ls)
# spread of reception (for sources): what portion of all the
# connections received by a layer l are relevant for l?
lcs_src = self.get_spread_rec(b, cs_table_ls)
return(cs_table_ls, lcs_dst, lcs_src)
def process_layer_kernels(self, b, l, epoch):
"""
Process a given layer's kernels. Return the connection strenght value
for each kernel in the layer.
Args:
b: `int`, identifier number for the block;
l: `int`, identifier number for the layer inside the block;
epoch: `int`, current training epoch (or batch).
"""
cs_table_kernels = []
# get the conection strength for each kernel in layer l of block b
for k in range(len(self.kernels_ref_list[b][l])):
cs_table_kernels.append(
self.process_kernel(self.kernels_ref_list[b][l][k]))
return cs_table_kernels
# -------------------------------------------------------------------------
# ---------------------- DEFINING INPUT PLACEHOLDERS ----------------------
# -------------------------------------------------------------------------
def _define_inputs(self):
"""
Defines some imput placeholder tensors:
images, labels, learning_rate, is_training.
"""
shape = [None]
shape.extend(self.data_shape)
self.images = tf.placeholder(
tf.float32,
shape=shape,
name='input_images')
self.labels = tf.placeholder(
tf.float32,
shape=[None, self.n_classes],
name='labels')
self.learning_rate = tf.placeholder(
tf.float32,
shape=[],
name='learning_rate')
self.is_training = tf.placeholder(tf.bool, shape=[])
# -------------------------------------------------------------------------
# ---------------------- BUILDING THE DENSENET GRAPH ----------------------
# -------------------------------------------------------------------------
# SIMPLEST OPERATIONS -----------------------------------------------------
# -------------------------------------------------------------------------
def weight_variable_msra(self, shape, name):
"""
Creates weights for a fully-connected layer, using an initialization
method which does not scale the variance.
Args:
shape: `list` of `int`, shape of the weight matrix;
name: `str`, a name for identifying the weight matrix.
"""
# print("CREATING WEIGHT VARIABLE: " + name)
# print(shape)
return tf.get_variable(
name=name,
shape=shape,
initializer=tf.contrib.layers.variance_scaling_initializer())
def avg_pool(self, _input, k):
"""
Performs average pooling on a given input (_input),
within square kernels of side k and stride k.
Args:
_input: tensor, the operation's input;
k: `int`, the size and stride for the kernels.
"""
ksize = [1, k, k, 1]
strides = [1, k, k, 1]
padding = 'VALID'
output = tf.nn.avg_pool(_input, ksize, strides, padding)
return output
def batch_norm(self, _input, scope='BatchNorm'):
"""
Performs batch normalisation on a given input (_input).
Args:
_input: tensor, the operation's input.
scope: `str`, a variable scope for the operation.
"""
output = tf.contrib.layers.batch_norm(
_input, scale=True, is_training=self.is_training,
updates_collections=None, scope=scope)
return output
def conv2d(self, _input, out_features, kernel_size,
strides=[1, 1, 1, 1], padding='SAME'):
"""
Creates a 2d convolutional filter layer (applies a certain number of
kernels on some input features to obtain output features).
Returns the output of the layer and a reference to its filter.
Args:
_input: tensor, the operation's input;
out_features: `int`, number of feature maps at the output;
kernel_size: `int`, size of the square kernels (their side);
strides: `list` of `int`, strides in each direction for kernels;
padding: `str`, should we use padding ('SAME') or not ('VALID').
"""
in_features = int(_input.get_shape()[-1])
filter_ref = self.weight_variable_msra(
[kernel_size, kernel_size, in_features, out_features],
name='filter')
output = tf.nn.conv2d(_input, filter_ref, strides, padding)
return output, filter_ref
def conv2d_with_kernels(self, _input, out_features, kernel_size,
strides=[1, 1, 1, 1], padding='SAME'):
"""
Creates a 2d convolutional filter layer, by producing a list of 3d
kernels and then stacking them together to create the filter.
Returns the output of the layer and a reference to its convolutional
filter, as well as the newly generated list of kernels.
Args:
_input: tensor, the operation's input;
out_features: `int`, number of feature maps at the output;
kernel_size: `int`, size of the square kernels (their side);
strides: `list` of `int`, strides in each direction for kernels;
padding: `str`, should we use padding ('SAME') or not ('VALID').
"""
in_features = int(_input.get_shape()[-1])
# First create a list with the 3d kernels (easily modifiable):
kernels = []
for o in range(out_features):
kernels.append(self.weight_variable_msra(
[kernel_size, kernel_size, in_features], name='kernel'+str(o)))
# The kernels are stacked together so as to create a 4d filter
# (dimension 3 = output features).
filter_ref = tf.stack(kernels, axis=3, name='filter')
# Using the filter, the convolution is defined.
output = tf.nn.conv2d(_input, filter_ref, strides, padding)
return output, filter_ref, kernels
def conv2d_with_given_kernels(self, _input, kernels,
strides=[1, 1, 1, 1], padding='SAME'):
"""
Creates a 2d convolutional filter layer, by using a given list of 3d
kernels to create a filter (stacking them together).
Returns the output of the layer and a reference to its filter.
Args:
_input: tensor, the operation's input;
kernels: `list` of tensors, contains each of the kernels from which
the convolution will be built;
strides: `list` of `int`, strides in each direction for kernels;
padding: `str`, should we use padding ('SAME') or not ('VALID').
"""
# The kernels are stacked together so as to create a 4d filter.
# Using the same name = good idea?
filter_ref = tf.stack(kernels, axis=3, name='filter')
output = tf.nn.conv2d(_input, filter_ref, strides, padding)
return output, filter_ref
def dropout(self, _input):
"""
If the given keep_prob is not 1 AND if the graph is being trained,
performs a random dropout operation on a given input (_input).
The dropout probability is the keep_prob parameter.
Args:
_input: tensor, the operation's input.
"""
if self.keep_prob < 1:
output = tf.cond(
self.is_training,
lambda: tf.nn.dropout(_input, self.keep_prob),
lambda: _input
)
else:
output = _input
return output
# SIMPLEST OPERATIONS (FULLY CONNECTED) -----------------------------------
# -------------------------------------------------------------------------
def weight_variable_xavier(self, shape, name):
"""
Creates weights for a fully-connected layer, using the Xavier
initializer (keeps gradient scale roughly the same in all layers).
Args:
shape: `list` of `int`, shape of the weight matrix;
name: `str`, a name for identifying the weight matrix.
"""
return tf.get_variable(
name,
shape=shape,
initializer=tf.contrib.layers.xavier_initializer())
def bias_variable(self, shape, name='bias'):
"""
Creates bias terms for a fully-connected layer, initialized to 0.0.
Args:
shape: `list` of `int`, shape of the bias matrix;
name: `str`, a name for identifying the bias matrix.
"""
initial = tf.constant(0.0, shape=shape)
return tf.get_variable(name, initializer=initial)
# COMPOSITE FUNCTION + BOTTLENECK -----------------------------------------
# -------------------------------------------------------------------------
def composite_function(self, _input, out_features, kernel_size=3):
"""
Composite function H_l([x_0, ..., x_l-1]) for a dense layer.
Takes a concatenation of previous outputs and performs:
- batch normalisation;
- ReLU activation function;
- 2d convolution, with required kernel size (side);
- dropout, if required (training the graph and keep_prob not set to 1).
Returns the output tensor and a reference to the 2d convolution filter,
as well as a list of the kernels in that filter, and the input tensor
for the 2d convolution.
Args:
_input: tensor, the operation's input;
out_features: `int`, number of feature maps at the output;
kernel_size: `int`, size of the square kernels (their side).
"""
with tf.variable_scope("composite_function"):
# batch normalisation
in_cv = self.batch_norm(_input)
# ReLU activation function
in_cv = tf.nn.relu(in_cv)
# 2d convolution
output, filter_ref, kernels = self.conv2d_with_kernels(
in_cv, out_features=out_features, kernel_size=kernel_size)
# dropout (if the graph is being trained and keep_prob is not 1)
output = self.dropout(output)
return output, filter_ref, kernels, in_cv
def reconstruct_composite_function(self, in_cv, kernels):
"""
Reconstruct the output of the composite function H_l([x_0, ..., x_l-1])
for a dense layer, given the convolution's input and its kernels.
Args:
in_cv: tensor, the input of the convolution;
kernels: `list` of tensors, the kernels for the convolution.
"""
# 2d convolution
output, filter_ref = self.conv2d_with_given_kernels(
in_cv, kernels)
# dropout
output = self.dropout(output)
return output, filter_ref
def bottleneck(self, _input, out_features):
"""
Bottleneck function, used before the composite function H_l in the
dense layers of DenseNet-BC.
Takes a concatenation of previous outputs and performs:
- batch normalisation,
- ReLU activation function,
- 2d convolution, with kernel size 1 (produces 4x the features of H_l),
- dropout, if required (training the graph and keep_prob not set to 1).
Returns the output tensor and a reference to the 2d convolution kernel.
Args:
_input: tensor, the operation's input;
out_features: `int`, number of feature maps at the output of H_l;
kernel_size: `int`, size of the square kernels (their side).
"""
with tf.variable_scope("bottleneck"):
# batch normalisation
output = self.batch_norm(_input)
# ReLU activation function
output = tf.nn.relu(output)
inter_features = out_features * 4
# 2d convolution (produces intermediate features)
output, filter_ref = self.conv2d(
output, out_features=inter_features, kernel_size=1,
padding='VALID')
# dropout (if the graph is being trained and keep_prob is not 1)
output = self.dropout(output)
return output, filter_ref
# BLOCKS AND THEIR INTERNAL LAYERS ----------------------------------------
# -------------------------------------------------------------------------
def add_new_kernels_to_layer(self, _input, in_cv, layer, kernel_num,
complementarity=True, kernel_size=3):
"""
Adds new convolution kernels to a layer within a block:
creates the kernels, reconstructs the composite function, and
concatenates outputs to ensure the DenseNet paradigm.
If required, uses a complementarity mechanism to initialise the new
kernels: the sign configuration is the opposite of that of the kernels
with lowest CS, unless that configuration is already taken (in which
case it must be differnet, but close to the opposite).
Returns the layer's new output tensor.
N.B.: This function is meant to be used ONLY in self-constructing mode
(i.e. when should_self_construct is true).
Args:
_input: tensor, the layer's input;
in_cv: tensor, the input for the layer's convolution;
layer: `int`, identifier number for this layer (within a block);
kernel_num: `int`, number of new (square) kernels to be added;
complementarity: `bool`, whether the complementarity mechanism
should be used to initialise new kernels or not;
kernel_size: `int`, size of the kernels (their side).
"""
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("composite_function"):
# if using the complementarity mechanism
if complementarity:
# get the sign distribution of all kernels in the layer
kernel_signs = []
for old_kernel in self.kernels_ref_list[-1][-1]:
kernel_signs.append(
np.sign(self.sess.run(old_kernel)))
# get the ids of the kernels with lowest CS
compl_kernels = sorted(
range(len(self.kCS_FIFO)),
key=lambda i: self.kCS_FIFO[i][-1])[:kernel_num]
# create and initialise kernel_num new kernels
in_features = int(in_cv.get_shape()[-1])
for new_k in range(kernel_num):
self.kernel_name_counter += 1
self.kernels_ref_list[-1][-1].append(
self.weight_variable_msra(
[kernel_size, kernel_size, in_features],
name='kernel'+str(self.kernel_name_counter)))
self.sess.run(tf.variables_initializer(
[self.kernels_ref_list[-1][-1][-1]]))
# if complementarity, make each new kernel complementary to
# one of the previously identified low-CS kernels
if complementarity:
# get the abs value contents of the new kernel
new_k_image = self.sess.run(
self.kernels_ref_list[-1][-1][-1])
new_k_image = np.absolute(new_k_image)
# sign distribution = opposite to the low-CS kernel
new_k_signs = -1*kernel_signs[compl_kernels[new_k]]
# check if sign distribution already exists
new_k_signs_try = new_k_signs
sign_distr_exists = True
patience = kernel_size*kernel_size*in_features
while sign_distr_exists and patience:
# compare with each of the distributions
sign_distr_exists = False
for sign_distr in kernel_signs:
sign_distr_exists = sign_distr_exists and (
new_k_signs == sign_distr).all()
# if so, switch one of the signs randomly
if sign_distr_exists:
new_k_signs_try = np.copy(new_k_signs)
new_k_signs_try[
np.random.randint(kernel_size)][
np.random.randint(kernel_size)][
np.random.randint(in_features)
] *= -1
patience -= 1
# finally, apply the sign distr and add it to the list
new_k_image = np.multiply(new_k_image, new_k_signs_try)
kernel_signs.append(new_k_signs_try)
# assign the new weight values to the kernel
self.sess.run(self.kernels_ref_list[-1][-1][-1].assign(
new_k_image))
# reconstruct the composite function from the current kernels
comp_out, filter_ref = self.reconstruct_composite_function(
in_cv, self.kernels_ref_list[-1][-1])
# save a reference to the composite function's filter
self.filter_ref_list[-1][-1] = filter_ref
# concatenate output with layer input to ensure DenseNet paradigm
if TF_VERSION[0] >= 1 and TF_VERSION[1] >= 0:
output = tf.concat(axis=3, values=(_input, comp_out))
else:
output = tf.concat(3, (_input, comp_out))
# Keep track of kernel CS.
self.kCS_FIFO.extend([
deque(maxlen=self.dkCS_softening) for i in range(kernel_num)])
self.dkCS_FIFO.extend([
deque(maxlen=self.dkCS_std_window) for i in range(kernel_num)])
return output
def remove_kernels_from_layer(self, _input, in_cv, layer,
kernels_to_prune):
"""
Removes specific convolution kernels in a layer within a block:
removes the kernels from the list, reconstructs the composite function,
and concatenates outputs to ensure the DenseNet paradigm.
Returns the layer's new output tensor.
N.B.: This function is meant to be used ONLY in self-constructing mode
(i.e. when should_self_construct is true).
Args:
_input: tensor, the layer's input;
in_cv: tensor, the input for the layer's convolution;
layer: `int`, identifier number for this layer (within a block);
kernels_to_prune: `list` of `int`, the specific kernels to remove.
"""
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("composite_function"):
# remove the kernels specified in kernels_to_prune
in_features = int(in_cv.get_shape()[-1])
print("\nPre-pruning kernels_ref_list length: %d" % len(
self.kernels_ref_list[-1][-1]))
for i in reversed(kernels_to_prune):
# iterate backwards so that kernel ids remain meaningful
self.pruned_varnames.append(
self.kernels_ref_list[-1][-1][i].name)
del self.kernels_ref_list[-1][-1][i]
for elem in self.pruned_varnames:
print(elem)
print("Post-pruning kernels_ref_list length: %d\n" % len(
self.kernels_ref_list[-1][-1]))
# reconstruct the composite function from the current kernels
comp_out, filter_ref = self.reconstruct_composite_function(
in_cv, self.kernels_ref_list[-1][-1])
# save a reference to the composite function's filter
self.filter_ref_list[-1][-1] = filter_ref
# concatenate output with layer input to ensure DenseNet paradigm
if TF_VERSION[0] >= 1 and TF_VERSION[1] >= 0:
output = tf.concat(axis=3, values=(_input, comp_out))
else:
output = tf.concat(3, (_input, comp_out))
# Keep track of kernel CS.
for i in reversed(kernels_to_prune):
del self.kCS_FIFO[i], self.dkCS_FIFO[i]
return output
def add_internal_layer(self, _input, layer, growth_rate):
"""
Adds a new convolutional (dense) layer within a block.
This layer will perform the composite function H_l([x_0, ..., x_l-1])
to obtain its output x_l.
It will then concatenate x_l with the layer's input: all the outputs of
the previous layers, resulting in [x_0, ..., x_l-1, x_l].
Returns the layer's output, as well as the input of its conv2d.
Args:
_input: tensor, the operation's input;
layer: `int`, identifier number for this layer (within a block);
growth_rate: `int`, number of new convolutions per dense layer.
"""
with tf.variable_scope("layer_%d" % layer):
# use the composite function H_l (3x3 kernel conv)
if not self.bc_mode:
comp_out, filter_ref, kernels, in_cv = self.composite_function(
_input, out_features=growth_rate, kernel_size=3)
# in DenseNet-BC mode, add a bottleneck layer before H_l (1x1 conv)
elif self.bc_mode:
bottleneck_out, filter_ref = self.bottleneck(
_input, out_features=growth_rate)
if self.ft_filters or self.should_self_construct:
self.filter_ref_list[-1].append(filter_ref)
comp_out, filter_ref, kernels, in_cv = self.composite_function(
bottleneck_out, out_features=growth_rate, kernel_size=3)
# save a reference to the composite function's filter
if self.ft_filters or self.should_self_construct:
self.filter_ref_list[-1].append(filter_ref)
if self.ft_kernels or self.should_self_construct:
self.kernel_name_counter = growth_rate-1
self.kernels_ref_list[-1].append(kernels)
# concatenate output of H_l with layer input (all previous outputs)
if TF_VERSION[0] >= 1 and TF_VERSION[1] >= 0:
output = tf.concat(axis=3, values=(_input, comp_out))
else:
output = tf.concat(3, (_input, comp_out))
# If self-constructing at kernel level, keep track of kernel CS.
if self.has_micro_algo:
self.kCS_FIFO = [
deque(maxlen=self.dkCS_softening) for i in range(growth_rate)]
self.dkCS_FIFO = [
deque(maxlen=self.dkCS_std_window) for i in range(growth_rate)]
return output, in_cv
def add_block(self, _input, block, growth_rate, layers_in_block, is_last):
"""
Adds a new block containing several convolutional (dense) layers.
These are connected together following a DenseNet architecture,
as defined in the paper.
Returns the block's output, as well as the inputs to the last layer
and to its conv2d.
Args:
_input: tensor, the operation's input;
block: `int`, identifier number for this block;
growth_rate: `int`, number of new convolutions per dense layer;
layers_in_block: `int`, number of dense layers in this block;
is_last: `bool`, is this the last block in the network or not.
"""
if self.ft_filters or self.should_self_construct:
self.filter_ref_list.append([])
if self.ft_kernels or self.should_self_construct:
self.kernels_ref_list.append([])
if is_last:
self.cross_entropy = []
with tf.variable_scope("Block_%d" % block) as self.current_block:
output = _input
for layer in range(layers_in_block):
# The inputs of the last layer and its conv2d must be saved
# (useful for self-construction kernel by kernel)
input_lt_lay = output
output, input_lt_cnv = self.add_internal_layer(
input_lt_lay, layer, growth_rate)
if self.ft_cross_entropies and is_last:
# Save the cross-entropy for all layers except the last one
# (it is always saved as part of the end-graph operations)
if layer != layers_in_block-1:
_, cross_entropy = self.cross_entropy_loss(
output, self.labels, block, layer,
preserve_transition=self.preserve_transition_l)
self.cross_entropy.append(cross_entropy)
return output, input_lt_lay, input_lt_cnv
# TRANSITION LAYERS -------------------------------------------------------
# -------------------------------------------------------------------------
def transition_layer(self, _input, block):
"""
Adds a new transition layer after a block. This layer's inputs are the
concatenated feature maps of each layer in the block.
The layer first runs the composite function with kernel size 1:
- In DenseNet mode, it produces as many feature maps as the input had.
- In DenseNet-BC mode, it produces reduction (theta) times as many,
compressing the output.
Afterwards, an average pooling operation (of size 2) is carried to
change the output's size.
Args:
_input: tensor, the operation's input;
block: `int`, identifier number for the previous block.
"""
with tf.variable_scope("Transition_after_block_%d" % block):
# add feature map compression in DenseNet-BC mode
out_features = int(int(_input.get_shape()[-1]) * self.reduction)
# use the composite function H_l (1x1 kernel conv)
output, filter_ref, kernels, in_cv = self.composite_function(
_input, out_features=out_features, kernel_size=1)
# save a reference to the composite function's filter
if self.ft_filters or self.should_self_construct:
self.filter_ref_list[-1].append(filter_ref)
if self.ft_kernels or self.should_self_construct:
self.kernels_ref_list[-1].append(kernels)
# use average pooling to reduce feature map size
output = self.avg_pool(output, k=2)
return output
def transition_layer_to_classes(self, _input, block, layer):
"""
Adds the transition layer after the last block. This layer outputs the
estimated probabilities by classes.
It performs:
- batch normalisation,
- ReLU activation function,
- wider-than-normal average pooling,
- reshaping the output into a 1d tensor,
- fully-connected layer (matrix multiplication, weights and biases).
Args:
_input: tensor, the operation's input;
block: `int`, identifier number for the last block;
layer: `int`, identifier number for the last layer in that block.
"""
self.features_total = int(_input.get_shape()[-1])
var_scope = "Transition_to_FC_block_%d" % block
FC_name = "FC_block_%d" % block
if not self.preserve_transition_l:
var_scope += "_layer_%d" % layer
FC_name += "_layer_%d" % layer
with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):
# Batch normalisation.
self.batch_norm_counter = 0
output = self.batch_norm(
_input, scope='BatchNorm_'+str(self.batch_norm_counter))
# ReLU activation function.
output = tf.nn.relu(output)
# Wide average pooling.
last_pool_kernel = int(output.get_shape()[-2])
output = self.avg_pool(output, k=last_pool_kernel)
# Reshaping the output into 1d.
output = tf.reshape(output, [-1, self.features_total])
# FC (fully-connected) layer.
self.FC_W = []
for i in range(self.features_total):
self.FC_W.append(self.weight_variable_xavier(
[self.n_classes], name=FC_name+("_W%d" % i)))
self.FC_W_counter = self.features_total-1
self.FC_bias = self.bias_variable(
[self.n_classes], name=FC_name+"_bias")
stacked_FC_W = tf.stack(self.FC_W, axis=0)
logits = tf.matmul(output, stacked_FC_W) + self.FC_bias
return logits
def reconstruct_transition_to_classes(self, _input, block, layer):
"""
Reconstruct the transition layer to classes after adding a new kernel
or layer in the last block (in such a case, the transition layer must
remain mostly unchanged except for the new weights).
Args:
_input: tensor, the operation's input;
block: `int`, identifier number for the last block;
layer: `int`, identifier number for the last layer in that block.
"""
new_features_total = int(_input.get_shape()[-1])
var_scope = "Transition_to_FC_block_%d" % block
FC_name = "FC_block_%d" % block
if not self.preserve_transition_l:
var_scope += "_layer_%d" % layer
FC_name += "_layer_%d" % layer
with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):
# The batch norm contains beta and gamma params for each kernel,
# we first copy the param values from old kernels.
beta_values = self.sess.run(tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/beta",
[self.features_total]))
gamma_values = self.sess.run(tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/gamma",
[self.features_total]))
# Then we create a new batch norm and initialize its params.
self.batch_norm_counter += 1
output = self.batch_norm(
_input, scope='BatchNorm_'+str(self.batch_norm_counter))
new_beta = tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/beta",
[new_features_total])
new_gamma = tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/gamma",
[new_features_total])
self.sess.run(tf.variables_initializer([new_beta, new_gamma]))
# For these params, we copy the old param values, and leave
# the remaining new values for the new kernels.
new_beta_values = self.sess.run(new_beta)
new_gamma_values = self.sess.run(new_gamma)
difference = new_features_total-self.features_total
new_beta_values[:-difference] = beta_values
new_gamma_values[:-difference] = gamma_values
# Then we assign the modified values to reconstruct the batch norm.
self.sess.run(new_beta.assign(new_beta_values))
self.sess.run(new_gamma.assign(new_gamma_values))
self.features_total = new_features_total
# ReLU, average pooling, and reshaping into 1d
# these do not contain any trainable params, so they are rewritten.
output = tf.nn.relu(output)
last_pool_kernel = int(output.get_shape()[-2])
output = self.avg_pool(output, k=last_pool_kernel)
features_total = int(output.get_shape()[-1])
output = tf.reshape(output, [-1, features_total])
# For the FC layer: add new weights, keep biases and old weights.
for i in range(len(self.FC_W), features_total):
self.FC_W_counter += 1
self.FC_W.append(self.weight_variable_xavier(
[self.n_classes],
name=FC_name+("_W%d" % self.FC_W_counter)))
stacked_FC_W = tf.stack(self.FC_W, axis=0)
logits = tf.matmul(output, stacked_FC_W) + self.FC_bias
return logits
def reconstruct_transition_to_classes_post_pruning(self, _input,
block, layer,
kernels_to_prune):
"""
Reconstruct the transition layer to classes after pruning kernels in
the last layer of the last block (in such a case, the transition layer
must remain mostly unchanged and unused weights must be removed).
Args:
_input: tensor, the operation's input;
block: `int`, identifier number for the last block;
layer: `int`, identifier number for the last layer in that block;
kernels_to_prune: `list` of `int`, gives the specific kernels that
were pruned in the last layer.
"""
new_features_total = int(_input.get_shape()[-1])
var_scope = "Transition_to_FC_block_%d" % block
if not self.preserve_transition_l:
var_scope += "_layer_%d" % layer
with tf.variable_scope(var_scope, reuse=tf.AUTO_REUSE):
# Copy the batch norm beta and gamma param values from old kernels.
beta_values = self.sess.run(tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/beta",
[self.features_total]))
gamma_values = self.sess.run(tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/gamma",
[self.features_total]))
# Create a new batch norm and get its param variables.
self.batch_norm_counter += 1
output = self.batch_norm(
_input, scope='BatchNorm_'+str(self.batch_norm_counter))
new_beta = tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/beta",
[new_features_total])
new_gamma = tf.get_variable(
"BatchNorm_"+str(self.batch_norm_counter)+"/gamma",
[new_features_total])
# self.sess.run(tf.variables_initializer([new_beta, new_gamma]))
# Copy the param values corresponding to the remaining kernels.
prepruning_kernel_count = len(self.kernels_ref_list[block][-1])
prepruning_kernel_count += len(kernels_to_prune)
difference = self.features_total - prepruning_kernel_count
new_beta_values = beta_values[:difference]
new_gamma_values = gamma_values[:difference]
for k in range(prepruning_kernel_count):
if k not in kernels_to_prune:
new_beta_values = np.append(
new_beta_values, beta_values[k+difference])
new_gamma_values = np.append(
new_gamma_values, gamma_values[k+difference])
print(new_features_total)
print(len(new_beta_values))
print("%d (difference) = %d (new_features_total) - %d (current_kernel_count)" % (
difference, new_features_total, prepruning_kernel_count-len(kernels_to_prune)))
print("%d (old difference) = %d (features_total) - %d (prepruning_kernel_count)" % (
difference, self.features_total, prepruning_kernel_count))
# Assign those param values to reconstruct the batch norm.
self.sess.run(new_beta.assign(new_beta_values))
self.sess.run(new_gamma.assign(new_gamma_values))
self.features_total = new_features_total
# Rewrite: ReLU, average pooling, and reshaping into 1d.
output = tf.nn.relu(output)
last_pool_kernel = int(output.get_shape()[-2])
output = self.avg_pool(output, k=last_pool_kernel)
features_total = int(output.get_shape()[-1])
output = tf.reshape(output, [-1, features_total])
# For the FC layer: remove weights for unpruned kernels, keep all else.
for i in reversed(kernels_to_prune):
self.pruned_varnames.append(self.FC_W[difference+i].name)
del self.FC_W[difference+i]
stacked_FC_W = tf.stack(self.FC_W, axis=0)
logits = tf.matmul(output, stacked_FC_W) + self.FC_bias
return logits
# END GRAPH OPERATIONS ----------------------------------------------------
# -------------------------------------------------------------------------
def cross_entropy_loss(self, _input, labels, block, layer,
preserve_transition=False, kernels_to_prune=None):
"""
Takes an input and adds a transition layer to obtain predictions for
classes. Then calculates the cross-entropy loss for that input with
respect to expected labels. Returns the prediction tensor and the
calculated cross-entropy.
Args:
_input: tensor, the operation's input;
labels: tensor, the expected labels (classes) for the data;
block: `int`, identifier number for the last block;
layer: `int`, identifier number for the last layer in that block;
preserve_transition: `bool`, whether or not to preserve the
transition to classes (if yes, adapts the previous transition,
otherwise creates a new one).
kernels_to_prune: `list` of `int` or None, identifiers of recently
pruned kernels (used after kernel-level pruning, otherwise its
value is None).
"""
# add the FC transition layer to the classes (+ softmax)
if preserve_transition:
# the reconstruction depends on the las self-constructing action
if kernels_to_prune is None:
logits = self.reconstruct_transition_to_classes(
_input, block, layer)
else:
logits = self.reconstruct_transition_to_classes_post_pruning(
_input, block, layer, kernels_to_prune)
else:
logits = self.transition_layer_to_classes(_input, block, layer)
prediction = tf.nn.softmax(logits)
# set the calculation for the losses (cross_entropy and l2_loss)
if TF_VERSION[0] >= 1 and TF_VERSION[1] >= 5:
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,
labels=labels))
else:
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits,
labels=labels))
return prediction, cross_entropy
def _define_end_graph_operations(self, preserve_transition=False,
kernels_to_prune=None):
"""
Adds the last layer on top of the (editable portion of the) graph.
Then defines the operations for cross-entropy, the training step,
and the accuracy.
Args:
preserve_transition: `bool`, whether or not to preserve the
transition to classes (if yes, adapts the previous transition,
otherwise creates a new one);
kernels_to_prune: `list` of `int` or None, identifiers of recently
pruned kernels (used after kernel-level pruning, otherwise its
value is None).
"""
# obtain the predicted logits, set the calculation for the losses
# (cross_entropy and l2_loss)
prediction, cross_entropy = self.cross_entropy_loss(
self.output, self.labels, self.total_blocks-1,
self.layer_num_list[-1]-1, preserve_transition, kernels_to_prune)
self.cross_entropy.append(cross_entropy)
var_list = self.get_variables_in_use()
l2_loss = tf.add_n(
[tf.nn.l2_loss(var) for var in var_list])
# set the optimizer and define the training step
optimizer = tf.train.MomentumOptimizer(
self.learning_rate, self.nesterov_momentum, use_nesterov=True)
self.train_step = optimizer.minimize(
cross_entropy + l2_loss * self.weight_decay, var_list=var_list)
# set the calculation for the accuracy
correct_prediction = tf.equal(
tf.argmax(prediction, 1),
tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# MAIN GRAPH BUILDING FUNCTIONS -------------------------------------------
# -------------------------------------------------------------------------
def _new_kernels_to_last_layer(self, kernel_num, complementarity=True):
"""
Add new convolution kernels to the current last (composite) layer.
The number of kernels to be added is given by the kernel_num parameter.
Args:
kernel_num: `int`, number of new kernels (i.e. convolutions) to add
to the last layer (usually the specified expansion rate);
complementarity: `bool`, whether a complementarity mechanism
should be used to initialise new kernels or not
(see add_new_kernels_to_layer for more on this).
"""
# Safely access the current block's variable scope.
with tf.variable_scope(self.current_block,
auxiliary_name_scope=False) as cblock_scope:
with tf.name_scope(cblock_scope.original_name_scope):
# Add the kernel and save the new relevant inputs and outputs.
self.output = self.add_new_kernels_to_layer(
self.input_lt_lay, self.input_lt_cnv,
self.layer_num_list[-1]-1, kernel_num,
complementarity=complementarity)
# Delete the last cross-entropy from the list, we will recreate it.
del self.cross_entropy[-1]
print("ADDED %d NEW KERNEL(S) TO LAYER #%d (BLOCK #%d)! "
"It now has got %d kernels." %
(kernel_num, self.layer_num_list[-1]-1, self.total_blocks-1,
len(self.kernels_ref_list[-1][-1])))
self._define_end_graph_operations(preserve_transition=True)
self._initialize_uninitialized_variables()
self._count_trainable_params_in_use()
def _prune_kernels_in_last_layer(self, kernels_to_prune):
"""
Prune some convolution kernels in the current last (composite) layer.
The specific kernels to be pruned are given as a list (the
kernels_to_prune parameter).
Args:
kernels_to_prune: `list` of `int`, gives the specific kernels (i.e.
convolutions) to prune in the last layer.
"""
# Safely access the current block's variable scope.
with tf.variable_scope(self.current_block,
auxiliary_name_scope=False) as cblock_scope:
with tf.name_scope(cblock_scope.original_name_scope):
# Remove the kernels, save the new relevant inputs and outputs.
self.output = self.remove_kernels_from_layer(
self.input_lt_lay, self.input_lt_cnv,
self.layer_num_list[-1]-1, kernels_to_prune)
# Delete the last cross-entropy from the list, we will recreate it.
del self.cross_entropy[-1]
print("PRUNED KERNELS (%s) IN LAYER #%d (BLOCK #%d)! "
"It now has got %d kernels." %
(', '.join(map(str, kernels_to_prune)),
self.layer_num_list[-1]-1, self.total_blocks-1,
len(self.kernels_ref_list[-1][-1])))
# Register which kernels were pruned in the ft-logs.
if self.should_save_ft_logs:
self.feature_writer.write(
'\"Pruned: {}\"\n'.format(kernels_to_prune))
self._define_end_graph_operations(preserve_transition=True,
kernels_to_prune=kernels_to_prune)
self._initialize_uninitialized_variables()
self._count_trainable_params_in_use()
def _new_layer(self, growth_rate):
"""
Add a new layer at the end of the current last block.
In DenseNet-BC mode, two layers (bottleneck and composite/convolution)
will be added instead of just one.
Args:
growth_rate: `int`, number of kernels (i.e. convolutions) in the
new composite layer (usually the specified growth rate).
"""
# Safely access the current block's variable scope.
with tf.variable_scope(self.current_block,
auxiliary_name_scope=False) as cblock_scope:
with tf.name_scope(cblock_scope.original_name_scope):
# Add the layer and save the new relevant inputs and outputs.
self.input_lt_lay = self.output
self.output, self.input_lt_cnv = self.add_internal_layer(
self.input_lt_lay, self.layer_num_list[-1], growth_rate)
self.layer_num_list[-1] += 1
# Refresh cross-entropy list if not measuring layer cross-entropies.
if not self.ft_cross_entropies:
self.cross_entropy = []
if not self.bc_mode:
print("ADDED A NEW LAYER (%d kernels) to the last block (#%d)! "
"It now has got %d layers." %
(growth_rate, self.total_blocks-1, self.layer_num_list[-1]))
if self.bc_mode:
print("ADDED A NEW BOTTLENECK AND A NEW COMPOSITE LAYER "
"(%d kernels) to the last block (#%d)! "
"It now has got %d bottleneck and %d composite layers." %
(growth_rate, self.total_blocks-1,
self.layer_num_list[-1], self.layer_num_list[-1]))
self.update_paths()
self._define_end_graph_operations(
preserve_transition=self.preserve_transition_l)
self._initialize_uninitialized_variables()
self._count_trainable_params_in_use()
def _new_block(self):
"""
Add a transition layer, and a new block (with one layer) at the end
of the current last block.
In DenseNet-BC mode, the new module will begin with two layers
(bottleneck and composite/convolution) instead of just one.
"""
# The input of the last block is useful if the block must be ditched.
self.input_lt_blc = self.transition_layer(
self.output, self.total_blocks-1)
# The inputs of the last layer and conv are for kernel-wise self-const.
self.output, self.input_lt_lay, self.input_lt_cnv = self.add_block(
self.input_lt_blc, self.total_blocks, self.growth_rate, 1, True)
self.layer_num_list.append(1)
self.total_blocks += 1
print("ADDED A NEW BLOCK (#%d), "
"The number of layers in each block is now:" %
(self.total_blocks-1))
if not self.bc_mode:
print('\n'.join('Block %d: %d composite layers.' % (
k, self.layer_num_list[k]) for k in range(len(
self.layer_num_list))))
if self.bc_mode:
print('\n'.join('Block %d: %d bottleneck layers and %d composite'
'layers.' % (k, self.layer_num_list[k],
self.layer_num_list[k])
for k in range(len(self.layer_num_list))))
self.update_paths()
self._define_end_graph_operations()
self._initialize_uninitialized_variables()
self._count_trainable_params_in_use()
def _build_graph(self):
"""
Builds the graph and defines the operations for:
cross-entropy (also l2_loss and a momentum optimizer),
training step (minimize momentum optimizer using l2_loss + cross-entr),
accuracy (reduce mean).
"""
growth_rate = self.growth_rate
layers_in_each_block = self.layer_num_list
self.output = self.images
# first add a 3x3 convolution layer with first_output_features outputs
with tf.variable_scope("Initial_convolution"):
self.input_lt_blc, filter_ref = self.conv2d(
self.output, out_features=self.first_output_features,
kernel_size=3)
if self.ft_filters or self.should_self_construct:
self.filter_ref_list = [[filter_ref]]
if self.ft_kernels or self.should_self_construct:
self.kernels_ref_list = []
# then add the required blocks (and save the relevant inputs)
for block in range(self.total_blocks):
self.output, self.input_lt_lay, self.input_lt_cnv = self.add_block(
self.input_lt_blc, block, growth_rate,
layers_in_each_block[block], block == self.total_blocks - 1)
# all blocks except the last have transition layers
if block != self.total_blocks - 1:
self.input_lt_blc = self.transition_layer(self.output, block)
self._define_end_graph_operations()
# -------------------------------------------------------------------------
# ------------------ INITIALIZING THE TENSORFLOW SESSION ------------------
# -------------------------------------------------------------------------
def _initialize_uninitialized_variables(self):
"""
Finds the references to all uninitialized variables, then tells
TensorFlow to initialize these variables.
"""
# get a set with all the names of uninitialized variables
uninit_varnames = list(map(str, self.sess.run(
tf.report_uninitialized_variables())))
uninit_vars = []
# for every variable, check if its name is in the uninitialized set
for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
varname = 'b\'' + var.name.split(':')[0] + '\''
if varname in uninit_varnames:
uninit_vars.append(var)
# initialize all the new variables
self.sess.run(tf.variables_initializer(uninit_vars))
def _initialize_all_variables(self):
"""
Tells TensorFlow to initialize all variables, using the proper method
for the TensorFlow version.
"""
if TF_VERSION[0] >= 0 and TF_VERSION[1] >= 10:
self.sess.run(tf.global_variables_initializer())
else:
self.sess.run(tf.initialize_all_variables())
def _initialize_session(self):
"""
Starts a TensorFlow session with the correct configuration.
Then tells TensorFlow to initialize all variables, create a saver
and a log file writer.
"""
config = tf.ConfigProto()
# specify the CPU inter and intra threads used by MKL
config.intra_op_parallelism_threads = self.num_intra_threads
config.inter_op_parallelism_threads = self.num_inter_threads
# restrict model GPU memory utilization to the minimum required
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
# initialize variables, create saver, create log file writers
self._initialize_all_variables()
self.saver = tf.train.Saver()
if self.should_save_logs:
if TF_VERSION[0] >= 0 and TF_VERSION[1] >= 10:
logswriter = tf.summary.FileWriter
else:
logswriter = tf.train.SummaryWriter
self.summary_writer = logswriter(self.logs_path)
if self.should_save_ft_logs:
self.feature_writer = open('./%s.csv' % self.ft_logs_path, "w")
# -------------------------------------------------------------------------
# ------------------- COUNTING ALL TRAINABLE PARAMETERS -------------------
# -------------------------------------------------------------------------
def _count_trainable_params(self):
"""
Uses TensorFlow commands to count the number of trainable parameters
in the graph (sum of the multiplied dimensions of each TF variable).
Then prints the number of parameters.
"""
total_parameters = 0
# print("Variable names:")
for variable in tf.trainable_variables():
# print(variable.name)
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
print("Total trainable params: %.1fk" % (total_parameters / 1e3))
def _count_trainable_params_in_use(self, write_to_ft_logs=False):
"""
Uses TensorFlow commands to count the total number of trainable
parameters in the graph, as well as the number of parameters that are
currently 'in use'. This refers specifically to: the multiplied
dimensions of each TF variable that is not a discarded transition to
classes or batch normalization, or a pruned element.
The method prints not only the number of parameters, but also the
number of parameters in the convolutional and fully connected parts
of the TensorFlow graph.
Args:
write_to_ft_logs: `bool`, if feature logs are being written,
whether or not to write the parameter counts to those logs;
"""
total_parameters = 0
conv_params_in_use = 0
fc_params_in_use = 0
fc_name = 'FC_'
t2fc_name = 'Transition_to_FC_'
suffix = 'block_%d' % (self.total_blocks-1)
if not self.preserve_transition_l:
suffix += '_layer_%d' % (self.layer_num_list[-1]-1)
true_fc_name = fc_name + suffix
true_t2fc_name = t2fc_name + suffix
true_t2fc_name += '/BatchNorm_%d' % self.batch_norm_counter
# print("Variable names:")
for variable in tf.trainable_variables():
# print(variable.name)
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
# Add all identified parameters to total_parameters.
total_parameters += variable_parameters
# From here onwards only consider non-pruned parameters.
if not (self.should_self_construct and
variable.name in self.pruned_varnames):
# Add params from the current FC layer to fc_params_in_use.
if variable.name.startswith(true_fc_name):
fc_params_in_use += variable_parameters
# Add params from the current batchnorm to conv_params_in_use.
elif variable.name.startswith(true_t2fc_name):
conv_params_in_use += variable_parameters
# Add params not in a rejected batchnorm or FC layer (to conv).
elif (not variable.name.startswith(fc_name) and
not variable.name.startswith(t2fc_name)):
conv_params_in_use += variable_parameters
# Add together the two counts for parameters 'in use'.
total_parameters_in_use = conv_params_in_use + fc_params_in_use
print("Total trainable params: %.1fk" % (total_parameters / 1e3))
print("Total params in use: %.1fk" % (total_parameters_in_use / 1e3))
print("\tConvolutional: %.1fk" % (conv_params_in_use / 1e3))
print("\tFully Connected: %.1fk" % (fc_params_in_use / 1e3))
if self.should_save_ft_logs and write_to_ft_logs:
self.feature_writer.write("\nTotal trainable params: %.1fk\n" % (
total_parameters / 1e3))
self.feature_writer.write("Total params in use: %.1fk\n" % (
total_parameters_in_use / 1e3))
self.feature_writer.write("Convolutional: %.1fk\n" % (
conv_params_in_use / 1e3))
self.feature_writer.write("Fully Connected: %.1fk\n" % (
fc_params_in_use / 1e3))
def get_variables_in_use(self):
"""
Get a list of the trainable variables in the graph that are currently
'in use' (all variables except those in discarded transitions to
classes or batch normalizations, or in pruned elements).
"""
vars_in_use = []
fc_name = 'FC_'
t2fc_name = 'Transition_to_FC_'
suffix = 'block_%d' % (self.total_blocks-1)
if not self.preserve_transition_l:
suffix += '_layer_%d' % (self.layer_num_list[-1]-1)
true_fc_name = fc_name + suffix
true_t2fc_name = t2fc_name + suffix
true_t2fc_name += '/BatchNorm_%d' % self.batch_norm_counter
for variable in tf.trainable_variables():
# From here onwards only consider non-pruned parameters.
if not (self.should_self_construct and
variable.name in self.pruned_varnames):
# Add variables from the current FC layer.
if variable.name.startswith(true_fc_name):
vars_in_use.append(variable)
# Add variables from the current batchnorm.
elif variable.name.startswith(true_t2fc_name):
vars_in_use.append(variable)
# Add variables not in a rejected batchnorm or FC layer.
elif (not variable.name.startswith(fc_name) and
not variable.name.startswith(t2fc_name)):
vars_in_use.append(variable)
# print("Variables in use:")
# for var in vars_in_use:
# print(var.name)
# if len(self.pruned_varnames) != 0:
# print("Pruned variable names:")
# for varname in self.pruned_varnames:
# print(varname)
return vars_in_use
# -------------------------------------------------------------------------
# -------------------- TRAINING AND TESTING THE MODEL ---------------------
# -------------------------------------------------------------------------
def print_pertinent_features(self, loss, accuracy, epoch, validation_set):
"""
Prints on console the current values of pertinent features.
The loss and accuracy are those on the validation set if such a set is
being used, otherwise they are those on the training set.
If feature logs are being saved, this function saves feature values.
If images are being saved, it also saves filter features as images.
Args:
loss: `list` of `float` (if validation_set == True, else `float`),
loss (cross_entropy) for this epoch, in some cases (as `list`
of `float`) contains several loss values, each corresponding to
each internal layer of the last block;
accuracy: `float`, accuracy for this epoch;
epoch: `int`, current training epoch;
validation_set: `bool`, whether a validation set is used or not.
"""
# print the current accuracy
print("Current accuracy = %f" % accuracy)
if validation_set:
# print a cross-entropy value for each layer, if calculating them
if self.ft_cross_entropies:
print("Cross-entropy per layer in block #%d:" % (
self.total_blocks-1))
for l in range(len(loss)):
print("* Layer #%d: cross-entropy = %f" % (l, loss[l]))
# else print only the current validation cross-entropy
else:
print("Current cross-entropy = %f" % loss[-1])
else:
print("Current cross-entropy = %f" % loss)
if self.should_save_ft_logs:
# save the previously printed feature values
self.feature_writer.write(("\"%d\"%s\"%f\"%s" % (
epoch, self.ftc, accuracy, self.ftc)).replace(".", self.ftd))
if validation_set:
for l in range(len(loss)):
self.feature_writer.write(("\"%f\"%s" % (loss[l], self.ftc)
).replace(".", self.ftd))
else:
self.feature_writer.write(("\"%f\"%s" % (loss, self.ftc)
).replace(".", self.ftd))
self.feature_writer.write('\"\"')
if self.ft_filters:
# process filters, sometimes save their state as images
print('-' * 40 + "\nProcessing filters:")
print('\n* Global input data (post-processed):')
for b in range(0, self.total_blocks):
cs, lcs_dst, lcs_src = self.process_block_filters(b, epoch)
self.ft_log_filters(b, cs, lcs_dst, lcs_src)
elif self.ft_kernels:
# process kernels instead
print('-' * 40 + "\nProcessing kernels:")
for b in range(0, self.total_blocks):
for l in range(0, self.layer_num_list[b]):
print('\n* Block %d filter %d:' % (b, l))
cs = self.process_layer_kernels(b, l, epoch)
for k in range(len(cs)):
print(' - Kernel %d: CS = %f' % (k, cs[k]))
if self.should_save_ft_logs:
for k in range(len(cs)):
self.feature_writer.write((
'%s\"%f\"' % (self.ftc, cs[k])).replace(
".", self.ftd))
self.feature_writer.write('%s\"\"' % self.ftc)
print('-' * 40)
if self.should_save_ft_logs:
self.feature_writer.write('\n')
# SELF-CONSTRUCTING ALGORITHM VARIANTS ------------------------------------
# -------------------------------------------------------------------------
def self_constructing_var0(self, epoch, accuracy):
"""
A step of the self-constructing algorithm (variant #0) for one
training epoch.
Adds new layers to the last block depending on parameters.
Returns True if training should continue, False otherwise.
This algorithm consists in a succession of two stages:
- Ascension: add one layer every asc_thresh training epochs, break the
loop (end the stage) when a layer settles (its layer cs for sources
is == 1).
- Improvement: end the stage when a total of max_n_ep epochs have
elapsed (since the addition of the last block).
Args:
epoch: `int`, current training epoch (since adding the last block);
accuracy: `float`, accuracy for this epoch (here unused).
"""
continue_training = True
cs, lcs_dst, lcs_src = self.process_block_filters(
self.total_blocks-1, epoch)
# calculate number of settled layers (layers with lcs_src == 1)
settled_layers = 0
for src in range(1, len(lcs_src)):
if lcs_src[src] >= 1:
settled_layers += 1
# stage #0 = ascension stage
if self.algorithm_stage == 0:
if settled_layers > 0:
self.settled_layers_ceil = settled_layers
self.algorithm_stage += 1
elif (epoch-1) % self.asc_thresh == 0:
self._new_layer(self.growth_rate)
# stage #1 = improvement stage
if self.algorithm_stage == 1:
if epoch >= self.max_n_ep:
# stop algorithm and reset everything
continue_training = False
self.algorithm_stage = 0
return continue_training
def self_constructing_var1(self, epoch, accuracy):
"""
A step of the self-constructing algorithm (variant #1) for one
training epoch.
Adds new layers to the last block depending on parameters.
Returns True if training should continue, False otherwise.
This algorithm consists in a succession of two stages:
- Ascension: add one layer every asc_thresh training epochs, break the
loop (end the stage) when a layer settles (its layer cs for sources
is == 1).
- Improvement: end the stage when a total of max_n_ep epochs have
elapsed (since the addition of the last block);
if another layer settles, add a layer and restart the countdown.
Args:
epoch: `int`, current training epoch (since adding the last block);
accuracy: `float`, accuracy for this epoch (here unused).
"""
continue_training = True
cs, lcs_dst, lcs_src = self.process_block_filters(
self.total_blocks-1, epoch)
# calculate number of settled layers (layers with lcs_src == 1)
settled_layers = 0
for src in range(1, len(lcs_src)):
if lcs_src[src] >= 1:
settled_layers += 1
# stage #0 = ascension stage
if self.algorithm_stage == 0:
if settled_layers > 0:
self.settled_layers_ceil = settled_layers
self.algorithm_stage += 1
elif (epoch-1) % self.asc_thresh == 0:
self._new_layer(self.growth_rate)
# stage #1 = improvement stage
if self.algorithm_stage == 1:
if epoch >= self.max_n_ep:
# stop algorithm and reset everything
continue_training = False
self.algorithm_stage = 0
elif settled_layers > self.settled_layers_ceil:
self.settled_layers_ceil = settled_layers
self._new_layer(self.growth_rate)
return continue_training
def self_constructing_var2(self, epoch, accuracy):
"""
A step of the self-constructing algorithm (variant #2) for one
training epoch.
Adds new layers to the last block depending on parameters.
Returns True if training should continue, False otherwise.
This algorithm consists in a succession of two stages:
- Ascension: add one layer every asc_thresh training epochs, break the
loop (end the stage) when a layer settles (its layer cs for sources
is == 1), or after std_window epochs or more if accuracy hasn't
changed much.
- Improvement: countdown of patience_param epochs until the stage ends;
if another layer settles, add a layer and restart the countdown.
Args:
epoch: `int`, current training epoch (since adding the last block);
accuracy: `float`, accuracy for this epoch.
"""
continue_training = True
cs, lcs_dst, lcs_src = self.process_block_filters(
self.total_blocks-1, epoch)
# calculate number of settled layers (layers with lcs_src == 1)
settled_layers = 0
for src in range(1, len(lcs_src)):
if lcs_src[src] >= 1:
settled_layers += 1
# stage #0 = ascension stage
if self.algorithm_stage == 0:
# the ascension stage uses a FIFO list of past accuracies.
self.accuracy_FIFO.append(accuracy)
# after std_window ascension stage epochs, end the stage if the
# accuracy didn't change much in a while.
if (len(self.accuracy_FIFO) == self.std_window and
np.std(self.accuracy_FIFO) < self.std_tolerance):
self.algorithm_stage += 1
# max_n_ep is used to estimate completion time
self.max_n_ep = epoch + self.patience_param + 1
# else follow the usual protocol based on settled layers.
else:
if settled_layers > 0 and self.layer_num_list[-1] > 2:
self.settled_layers_ceil = settled_layers
self.algorithm_stage += 1
# max_n_ep is used to estimate completion time
self.max_n_ep = epoch + self.patience_param + 1
elif (epoch-1) % self.asc_thresh == 0:
self._new_layer(self.growth_rate)
# stage #1 = improvement stage
if self.algorithm_stage == 1:
if self.patience_cntdwn <= 0:
# stop algorithm and reset everything
continue_training = False
self.algorithm_stage = 0
self.patience_cntdwn = self.patience_param
elif settled_layers > self.settled_layers_ceil:
# if a layer settles, add a layer and restart the countdown
self.settled_layers_ceil = settled_layers
self._new_layer(self.growth_rate)
self.patience_cntdwn = self.patience_param
# max_n_ep is used to estimate completion time
self.max_n_ep = epoch + self.patience_param + 1
else:
self.patience_cntdwn -= 1
return continue_training
def self_constructing_var3(self, epoch, accuracy):
"""
A step of the self-constructing algorithm (variant #3) for one
training epoch.
Adds new layers to the last block depending on parameters.
Returns True if training should continue, False otherwise.
This algorithm consists in a succession of two stages:
- Ascension: add one layer every asc_thresh training epochs, break the
loop (end the stage) after std_window epochs or more if accuracy
hasn't changed much.
- Improvement: countdown of patience_param epochs until the stage ends;
if another layer settles, add a layer and restart the countdown.
Args:
epoch: `int`, current training epoch (since adding the last block);
accuracy: `float`, accuracy for this epoch.
"""
continue_training = True
cs, lcs_dst, lcs_src = self.process_block_filters(
self.total_blocks-1, epoch)
# calculate number of settled layers (layers with lcs_src == 1)
settled_layers = 0
for src in range(1, len(lcs_src)):
if lcs_src[src] >= 1:
settled_layers += 1
# stage #0 = ascension stage
if self.algorithm_stage == 0:
# the ascension stage uses a FIFO list of past accuracies.
self.accuracy_FIFO.append(accuracy)
# after std_window ascension stage epochs, end the stage if the
# accuracy didn't change much in a while.
if (len(self.accuracy_FIFO) == self.std_window and
np.std(self.accuracy_FIFO) < self.std_tolerance):
self.algorithm_stage += 1
# max_n_ep is used to estimate completion time
self.max_n_ep = epoch + self.patience_param + 1
elif (epoch-1) % self.asc_thresh == 0:
self._new_layer(self.growth_rate)
# stage #1 = improvement stage
if self.algorithm_stage == 1:
if self.patience_cntdwn <= 0:
# stop algorithm and reset everything
continue_training = False
self.algorithm_stage = 0
self.patience_cntdwn = self.patience_param
elif settled_layers > self.settled_layers_ceil:
# if a layer settles, add a layer and restart the countdown
self.settled_layers_ceil = settled_layers
self._new_layer(self.growth_rate)
self.patience_cntdwn = self.patience_param
# max_n_ep is used to estimate completion time
self.max_n_ep = epoch + self.patience_param + 1
else:
self.patience_cntdwn -= 1
return continue_training
def self_constructing_var4(self, epoch, accuracy):
"""
A step of the self-constructing algorithm (variant #4) for one
training epoch.
Builds new layers in the last block depending on parameters.
Returns True if training should continue, False otherwise.
This algorithm consists of an macro-algorithm, which adds layers to the
last block, and a micro-algorithm, which builds those layers kernel by
kernel.
- The macro-algorithm adds a new layer with one kernel and runs the
micro-algorithm to build it (i.e. to add/prune kernels in it), then
checks if the accuracy has improved significantly since the previous
layer addition. If so it adds a new layer, else the algorithm ends.
- The micro-algorithm consists of a succession of four stages:
- Ascension: add one kernel every m_asc_thresh training epochs,
break the loop (end the stage) when one of the kernels settles
(its CS has remained stable for a certain number of epochs).
- Improvement: countdown of m_patience_param epochs until the stage
ends; if another kernel settles AND if it is useful (CS above
usefulness_thresh, set by the user), add a kernel and restart the
countdown.
- Pruning: first wait until all kernels have settled (useful or
not), then save the current accuracy and prune all useless
kernels (CS below uselessness_thresh, set by the user) to end the
stage.
- Recovery: wait for one last countdown of m_patience_param epochs
(optionally resetting the learning rate to its initial value and
reducing it according to rlr0); after this countdown wait until
reaching pre-pruning accuracy, then end the stage.
Args:
epoch: `int`, current training epoch (since adding the last block);
accuracy: `float`, accuracy for this epoch.
"""
continue_training = True
settled_kernels_count = 0
useful_kernels_count = 0
useless_kernels_list = []
# Update the kernel CS lists, count settled and useful kernels
cs_kernels = self.process_layer_kernels(
self.total_blocks-1, self.layer_num_list[-1]-1, epoch)
for k in range(len(self.kCS_FIFO)):
self.kCS_FIFO[k].append(cs_kernels[k])
if len(self.kCS_FIFO[k]) == self.dkCS_softening:
self.dkCS_FIFO[k].append(
(self.kCS_FIFO[k][-1] - self.kCS_FIFO[k][0])/(
self.dkCS_softening-1))
# Settled = kCS remained close to 0 during the last epochs
if ((len(self.dkCS_FIFO[k]) == self.dkCS_std_window) and (
np.abs(np.mean(self.dkCS_FIFO[k])
) <= self.dkCS_stl_thresh) and
(np.abs(np.std(self.dkCS_FIFO[k])
) <= self.dkCS_stl_thresh)):
settled_kernels_count += 1
# Useful = settled, and kCS above the usefulness thresh
if np.mean(self.kCS_FIFO[k]) >= self.usefulness_thresh:
useful_kernels_count += 1
# Useless = settled, and kCS below the uselessness thresh
if np.mean(self.kCS_FIFO[k]) <= self.uselessness_thresh:
useless_kernels_list.append(k)
# stage #0 = ascension stage (currently does nothing)
if self.algorithm_stage == 0:
self.algorithm_stage += 1
# stage #1 = improvement stage
if self.algorithm_stage == 1:
# micro-stage #0 = micro-ascension stage
if self.micro_stage == 0:
if settled_kernels_count >= 1:
# end stage when one or various kernels have settled
self.useful_kernels_ceil = useful_kernels_count
self.micro_stage += 1
# max_n_ep is used to estimate completion time
self.max_n_ep = epoch + 2*(self.m_patience_param + 1)
elif (epoch-1) % self.m_asc_thresh == 0:
self._new_kernels_to_last_layer(
self.expansion_rate,
complementarity=self.complementarity)
# micro-stage #1 = micro-improvement stage
if self.micro_stage == 1:
if self.m_patience_cntdwn <= 0:
# at the end of the patience countdown, end stage
self.micro_stage += 1
elif useful_kernels_count > self.useful_kernels_ceil:
# if a new kernel is useful, add a kernel and restart ctdwn
self.useful_kernels_ceil = useful_kernels_count
self._new_kernels_to_last_layer(
self.expansion_rate,
complementarity=self.complementarity)
self.m_patience_cntdwn = self.m_patience_param
self.max_n_ep = epoch + 2*(self.m_patience_param + 1)
else:
# patience countdown progress
self.m_patience_cntdwn -= 1
# micro-stage #2 = micro-pruning stage
if self.micro_stage == 2:
# wait until all kernels have settled (bound to happen?)
if settled_kernels_count == len(self.kCS_FIFO):
# save the accuracy, prune useless kernels and end stage
self.accuracy_pre_pruning = accuracy
self._prune_kernels_in_last_layer(useless_kernels_list)
self.micro_stage += 1
# run one last patience countdown for recovery
self.m_patience_cntdwn = self.m_patience_param
self.max_n_ep = epoch + self.m_patience_param + 1
# micro-stage #3 = micro-recovery stage (accessed in next epoch)
elif self.micro_stage == 3:
# patience countdown to ensure recovery
if self.m_patience_cntdwn > 0:
self.m_patience_cntdwn -= 1
# wait until reaching pre-pruning accuracy
elif accuracy >= self.accuracy_pre_pruning:
# prune again if there are useless kernels, else end stage
# if len(useless_kernels_list) >= 1:
# self.micro_stage = 2
# else:
self.micro_stage += 1
# self.algorithm_stage = 2
# at the end of the micro-algorithm, try to add a new layer
if self.micro_stage == 4:
# reset everything for the micro-algorithm
self.micro_stage = 0
self.useful_kernels_ceil = 0
self.m_patience_cntdwn = self.m_patience_param
self.accuracy_pre_pruning = 0
# check if the accuracy has improved since the last layer
# if so, add a layer, else end the improvement stage
if abs(accuracy-self.accuracy_last_layer) >= self.impr_thresh:
self.accuracy_last_layer = accuracy
self._new_layer(self.growth_rate)
# different uselessness threshold for new layers
# self.uselessness_thresh = self.alt_uselessness_thresh
# alt. number of kernels = half the previous
# layer's number if during the ascension stage.
# self._new_layer(floor(
# len(self.kernels_ref_list[-1][-1])/2))
# else:
self.algorithm_stage += 1
# stage #2 (nothing yet, stop the algorithm and reset everything)
if self.algorithm_stage == 2:
continue_training = False
self.algorithm_stage = 0
self.patience_cntdwn = self.patience_param
self.accuracy_last_layer = 0
return continue_training
def self_constructing_var5(self, epoch, accuracy):
"""
A step of the self-constructing algorithm (variant #5) for one
training epoch.
Builds new layers in the last block depending on parameters.
Returns True if training should continue, False otherwise.
This algorithm consists of an macro-algorithm, which adds layers to the
last block, and a micro-algorithm, which builds those layers kernel by
kernel.
- The macro-algorithm adds a new layer with one kernel and runs the
micro-algorithm to build it (i.e. to add/prune kernels in it), then
checks if the accuracy has improved significantly since the previous
layer addition. If so it adds a new layer, else the algorithm ends.
- The micro-algorithm consists of a succession of three stages:
- Improvement: countdown of m_patience_param epochs; if the number
of useful kernels (CS above usefulness_thresh, automatically set)
is above the latest max number of useful kernels, add a kernel
and restart the countdown; if the countdown ends, wait until all
kernels have settled and end the stage.
- Pruning: save the current accuracy and prune all useless kernels
(CS below uselessness_thresh, automatically set) to end the
stage.
- Recovery: wait for one last countdown of m_patience_param epochs
(optionally resetting the learning rate to its initial value and
reducing it according to rlr0); after this countdown wait until
reaching pre-pruning accuracy, then if there are any new useless
kernels wait for all kernels to settle and return to pruning,
else end the stage.
Args:
epoch: `int`, current training epoch (since adding the last block);
accuracy: `float`, accuracy for this epoch.
"""
continue_training = True
settled_kernels_count = 0
useful_kernels_count = 0
useless_kernels_list = []
kCS_settled = []
# Update the kernel CS lists, count settled kernels
cs_kernels = self.process_layer_kernels(
self.total_blocks-1, self.layer_num_list[-1]-1, epoch)
for k in range(len(self.kCS_FIFO)):
self.kCS_FIFO[k].append(cs_kernels[k])
if len(self.kCS_FIFO[k]) == self.dkCS_softening:
self.dkCS_FIFO[k].append(
(self.kCS_FIFO[k][-1] - self.kCS_FIFO[k][0])/(
self.dkCS_softening-1))
# Settled = kCS remained close to 0 during the last epochs
if ((len(self.dkCS_FIFO[k]) == self.dkCS_std_window) and (
np.abs(np.mean(self.dkCS_FIFO[k])
) <= self.dkCS_stl_thresh) and
(np.abs(np.std(self.dkCS_FIFO[k])
) <= self.dkCS_stl_thresh)):
settled_kernels_count += 1
if self.micro_stage == 1:
kCS_settled.append(self.kCS_FIFO[k][-1])
# If half of the original kernels have settled
if settled_kernels_count >= 0.5*self.growth_rate:
# During impr. stage, calculate usefulness and uselessness thresh
if self.micro_stage == 1:
self.usefulness_thresh = min(kCS_settled) + (
max(kCS_settled) - min(kCS_settled)
)*self.auto_usefulness_thresh
self.uselessness_thresh = min(kCS_settled) + (
max(kCS_settled) - min(kCS_settled)
)*self.auto_uselessness_thresh
# Detect and count useful and useless kernels
for k in range(len(self.kCS_FIFO)):
# Useful = kCS above the usefulness thresh
if np.mean(self.kCS_FIFO[k]) >= self.usefulness_thresh:
useful_kernels_count += 1
# Useless = kCS below the uselessness thresh
if np.mean(self.kCS_FIFO[k]) <= self.uselessness_thresh:
useless_kernels_list.append(k)
# stage #0 = ascension stage (currently does nothing)
if self.algorithm_stage == 0:
self.algorithm_stage += 1
# stage #1 = improvement stage
if self.algorithm_stage == 1:
# micro-stage #0 = just some settings for the next (actual) stage
if self.micro_stage == 0:
self.micro_stage += 1
# max_n_ep is used to estimate completion time
self.max_n_ep = epoch + 2*(self.m_patience_param + 1)
# micro-stage #1 = micro-improvement stage
if self.micro_stage == 1:
if self.m_patience_cntdwn <= 0:
# at the end of the patience countdown, end stage when all
# the kernels have settled
if settled_kernels_count == len(self.kCS_FIFO):
self.micro_stage += 1
elif useful_kernels_count > self.useful_kernels_ceil:
# if the number of useful kernels is above the latest max,
# add a kernel and restart ctdwn
self.useful_kernels_ceil = useful_kernels_count
self._new_kernels_to_last_layer(
self.expansion_rate,
complementarity=self.complementarity)
self.m_patience_cntdwn = self.m_patience_param
self.max_n_ep = epoch + 2*(self.m_patience_param + 1)
else:
# patience countdown progress
self.m_patience_cntdwn -= 1
# micro-stage #2 = micro-pruning stage
if self.micro_stage == 2:
# save the accuracy, prune useless kernels and end stage
self.accuracy_pre_pruning = accuracy
self._prune_kernels_in_last_layer(useless_kernels_list)
self.micro_stage += 1
# run one last patience countdown for recovery
self.m_patience_cntdwn = self.m_patience_param
self.max_n_ep = epoch + self.m_patience_param + 1
# micro-stage #3 = micro-recovery stage (accessed in next epoch)
elif self.micro_stage == 3:
# patience countdown to ensure recovery
if self.m_patience_cntdwn > 0:
self.m_patience_cntdwn -= 1
# wait until reaching pre-pruning accuracy
elif accuracy >= self.accuracy_pre_pruning:
# prune again if there are useless kernels, else end stage
if len(useless_kernels_list) >= 1:
# but first, wait for all kernels to settle
if settled_kernels_count == len(self.kCS_FIFO):
self.micro_stage = 2
else:
self.micro_stage += 1
# at the end of the micro-algorithm, try to add a new layer
if self.micro_stage == 4:
# reset everything for the micro-algorithm
self.micro_stage = 0
self.useful_kernels_ceil = 0
self.m_patience_cntdwn = self.m_patience_param
self.accuracy_pre_pruning = 0
# check if the accuracy has improved since the last layer
# if so, add a layer, else end the improvement stage
if abs(accuracy-self.accuracy_last_layer) >= self.impr_thresh:
self.accuracy_last_layer = accuracy
self._new_layer(self.growth_rate)
# alt. number of kernels = half the previous
# layer's number if during the ascension stage.
# self._new_layer(floor(
# len(self.kernels_ref_list[-1][-1])/2))
else:
self.algorithm_stage += 1
# stage #2 (nothing yet, stop the algorithm and reset everything)
if self.algorithm_stage == 2:
continue_training = False
self.algorithm_stage = 0
self.patience_cntdwn = self.patience_param
self.accuracy_last_layer = 0
return continue_training
def self_constructing_minimal(self, epoch, accuracy):
"""
A step of the self-constructing algorithm (minimal kernel-by-kernel
variant) for one training epoch.
Builds new layers in the last block depending on parameters.
Returns True if training should continue, False otherwise.
The algorithm is meant to be run with an initial architecture of 1
layer with 1 kernel. It only adds an additional kernel to the
layer after m_asc_thresh epochs, then ends after performing a patience
countdown (to further train the network). This is meant to represent a
"minimal" kernel-level self-construction.
Args:
epoch: `int`, current training epoch (since adding the last block);
accuracy: `float`, accuracy for this epoch.
"""
continue_training = True
# Update the kernel CS lists
cs_kernels = self.process_layer_kernels(
self.total_blocks-1, self.layer_num_list[-1]-1, epoch)
for k in range(len(self.kCS_FIFO)):
self.kCS_FIFO[k].append(cs_kernels[k])
# stage #0 = ascension stage (currently does nothing)
if self.algorithm_stage == 0:
self.algorithm_stage += 1
# stage #1 = improvement stage
if self.algorithm_stage == 1:
# micro-stage #0 = minimal micro-ascension stage
if self.micro_stage == 0:
if len(self.kernels_ref_list[-1][-1]) >= 3:
# end stage when there are at least two kernels.
self.micro_stage += 1
# max_n_ep is used to estimate completion time
self.max_n_ep = epoch + self.m_patience_param + 1
elif (epoch-1) % self.m_asc_thresh == 0:
self._new_kernels_to_last_layer(
self.expansion_rate,
complementarity=self.complementarity)
# micro-stage #1 = patience countdown
if self.micro_stage == 1:
if self.m_patience_cntdwn <= 0:
# reset everything for the micro-algorithm
self.micro_stage = 0
self.useful_kernels_ceil = 0
self.m_patience_cntdwn = self.m_patience_param
# at the end of the patience countdown, end macro-stage
self.algorithm_stage += 1
else:
# patience countdown progress
self.m_patience_cntdwn -= 1
# stage #2 (nothing yet, stop the algorithm and reset everything)
if self.algorithm_stage == 2:
continue_training = False
self.algorithm_stage = 0
self.patience_cntdwn = self.patience_param
self.accuracy_last_layer = 0
return continue_training
# LEARNING RATE REDUCTION VARIANTS (FOR SELF CONSTRUCTING) ----------------
# -------------------------------------------------------------------------
def self_constr_rlr0(self, learning_rate, initial_lr, rlr_1, rlr_2):
"""
An optional learning rate reduction (Reduce LR #0) to be performed
after a step of the self-constructing algorithm (based on the patience
countdown, so it only works with variant #2 onwards).
Returns the new learning rate value.
Whenever the countdown reaches an epoch that corresponds to a given
fraction of the patience parameter (the patience_param multiplied by
1-rlr_1 or 1-rlr_2), the current learning rate is divided by 10.
If at any point the countdown is reset, the current learning rate
returns to its initial value.
Args:
learning_rate: `int`, the current learning rate value.
initial_lr: the initial value for the learning rate.
rlr_1: the fraction of epochs through the countdown at which
the learning rate must be reduced (/10) for the first time.
rlr_2: the fraction of epochs through the countdown at which
the learning rate must be reduced (/10) for the second time.
"""
if not self.has_micro_algo:
patience_cntdwn = self.patience_cntdwn
patience_param = self.patience_param
else:
patience_cntdwn = self.m_patience_cntdwn
patience_param = self.m_patience_param
if (patience_cntdwn == int(patience_param * (1-rlr_1))):
learning_rate = learning_rate / 10
elif (patience_cntdwn == int(patience_param * (1-rlr_2))):
learning_rate = learning_rate / 10
elif (patience_cntdwn == patience_param):
learning_rate = initial_lr
return learning_rate
def self_constr_rlr1(self, learning_rate, initial_lr, rlr_1, rlr_2):
"""
An optional learning rate reduction (Reduce LR #1) to be performed
after a step of the self-constructing algorithm (based on the patience
countdown, so it only works with variant #2 onwards).
Returns the new learning rate value.
The initial learning rate value is initial_lr.
The first time that the countdown reaches an epoch that corresponds to
patience_param * (1 - rlr_1), the learning rate becomes initial_lr/10.
The first time that the countdown reaches an epoch that corresponds to
patience_param * (1 - rlr_2), the learning rate becomes initial_lr/100.
Args:
learning_rate: `int`, the current learning rate value.
initial_lr: the initial value for the learning rate.
rlr_1: the fraction of epochs through the countdown at which
the learning rate must be reduced (/10) for the first time.
rlr_2: the fraction of epochs through the countdown at which
the learning rate must be reduced (/10) for the second time.
"""
if not self.has_micro_algo:
patience_cntdwn = self.patience_cntdwn
patience_param = self.patience_param
else:
patience_cntdwn = self.m_patience_cntdwn
patience_param = self.m_patience_param
if (patience_cntdwn == int(patience_param * (1-rlr_1))):
learning_rate = min(learning_rate, initial_lr / 10)
elif (patience_cntdwn == int(patience_param * (1-rlr_2))):
# learning_rate = min(learning_rate, initial_lr / 100)
learning_rate = initial_lr / 100 # min is unnecessary here
return learning_rate
# MAIN TRAINING AND TESTING FUNCTIONS -------------------------------------
# -------------------------------------------------------------------------
def train_one_epoch(self, data, batch_size, learning_rate):
"""
Trains the model for one epoch using data from the proper training set.
Args:
data: training data yielded by the dataset's data provider;
batch_size: `int`, number of examples in a training batch;
learning_rate: `int`, learning rate for the optimizer.
"""
num_examples = data.num_examples
total_loss = []
total_accuracy = []
# save each training batch's loss and accuracy
for i in range(num_examples // batch_size):
batch = data.next_batch(batch_size)
images, labels = batch
feed_dict = {
self.images: images,
self.labels: labels,
self.learning_rate: learning_rate,
self.is_training: True,
}
fetches = [self.train_step, self.cross_entropy[-1], self.accuracy]
result = self.sess.run(fetches, feed_dict=feed_dict)
_, loss, accuracy = result
total_loss.append(loss)
total_accuracy.append(accuracy)
if self.should_save_logs:
self.batches_step += 1
self.log_loss_accuracy(
loss, accuracy, self.batches_step, prefix='per_batch',
should_print=False)
# use the saved data to calculate the mean loss and accuracy
mean_loss = np.mean(total_loss)
mean_accuracy = np.mean(total_accuracy)
return mean_loss, mean_accuracy
def test(self, data, batch_size):
"""
Tests the model using the proper testing set.
Args:
data: testing data yielded by the dataset's data provider;
batch_size: `int`, number of examples in a testing batch.
"""
num_examples = data.num_examples
total_loss = []
for l in range(len(self.cross_entropy)):
total_loss.append([])
total_accuracy = []
# save each testing batch's loss and accuracy
for i in range(num_examples // batch_size):
batch = data.next_batch(batch_size)
feed_dict = {
self.images: batch[0],
self.labels: batch[1],
self.is_training: False,
}
loss = self.sess.run(self.cross_entropy, feed_dict=feed_dict)
accuracy = self.sess.run(self.accuracy, feed_dict=feed_dict)
for j in range(len(loss)):
total_loss[j].append(loss[j])
total_accuracy.append(accuracy)
# use the saved data to calculate the mean loss and accuracy
mean_loss = []
for loss_list in total_loss:
mean_loss.append(np.mean(loss_list))
mean_accuracy = np.mean(total_accuracy)
return mean_loss, mean_accuracy
def train_all_epochs(self, train_params):
"""
Trains the model for a certain number of epochs, using parameters
specified in the train_params argument.
Args (in train_params):
batch_size: `int`, number of examples in a training batch;
max_n_ep: `int`, maximum number of training epochs to run;
initial_learning_rate: `int`, initial learning rate for optimizer;
reduce_lr_1: `float`, if not self-constructing the network,
first fraction of max_n_ep after which the current
learning rate is divided by 10 (initial_learning_rate/10);
reduce_lr_2: `float`, if not self-constructing the network,
second fraction of max_n_ep after which the current
learning rate is divided by 10 (initial_learning_rate/100);
validation_set: `bool`, should a validation set be used or not;
validation_split: `float` or None;
`float`: chunk of the training set used as the validation set;
None: use the testing set as the validation set;
shuffle: `str` or None, or `bool`;
`str` or None: used with CIFAR datasets, should we shuffle the
data only before training ('once_prior_train'), on every
epoch ('every_epoch') or not at all (None);
`bool`: used with SVHN, should we shuffle the data or not;
normalisation: `str` or None;
None: don't use any normalisation for pixels;
'divide_255': divide all pixels by 255;
'divide_256': divide all pixels by 256;
'by_chanels': substract the mean of the pixel's chanel and
divide the result by the channel's standard deviation.
"""
self.max_n_ep = train_params['max_n_ep']
initial_lr = train_params['initial_learning_rate']
learning_rate = train_params['initial_learning_rate']
batch_size = train_params['batch_size']
rlr_1 = train_params['reduce_lr_1']
rlr_2 = train_params['reduce_lr_2']
validation_set = train_params.get('validation_set', False)
total_start_time = time.time()
epoch = 1 # current training epoch
epoch_last_b = 0 # epoch at which the last block was added
while True:
# only print epoch name on certain epochs
if (epoch-1) % self.ft_period == 0:
print('\n', '-'*30, "Train epoch: %d" % epoch, '-'*30, '\n')
start_time = time.time()
# if not self-constructing, may reduce learning rate at some epochs
if not self.should_self_construct and self.should_change_lr:
if (epoch == int(self.max_n_ep * rlr_1)) or (
epoch == int(self.max_n_ep * rlr_2)):
learning_rate = learning_rate / 10
print("Learning rate has been divided by 10, new lr = %f" %
learning_rate)
# training step for one epoch
print("Training...", end=' ')
loss, acc = self.train_one_epoch(
self.data_provider.train, batch_size, learning_rate)
# save logs
if self.should_save_logs:
self.log_loss_accuracy(loss, acc, epoch, prefix='train')
# validation step after the epoch
if validation_set:
print("Validation...")
loss, acc = self.test(
self.data_provider.validation, batch_size)
# save logs
if self.should_save_logs:
self.log_loss_accuracy(loss[-1], acc, epoch,
prefix='valid')
# save feature logs (on certain epochs)
if (epoch-1) % self.ft_period == 0:
self.print_pertinent_features(loss, acc, epoch, validation_set)
# save model if required
if self.should_save_model:
self.save_model()
# step of the self-constructing algorithm
if self.should_self_construct:
if epoch - epoch_last_b != 1:
# can break here if self-constructing algorithm is over
if not self.self_constructing_step(
epoch - epoch_last_b, acc):
# add another block if block_count not yet exceeded
if self.total_blocks < self.block_count:
self._new_block()
else:
break
# optional learning rate reduction for self-constructing
if self.should_change_lr:
if self.has_micro_algo and self.micro_stage == 3:
# micro-recovery uses rlr0 for proper recovery
learning_rate = self.self_constr_rlr0(
learning_rate, initial_lr, rlr_1, rlr_2)
else:
learning_rate = self.self_constr_rlr(
learning_rate, initial_lr, rlr_1, rlr_2)
# if this is a new block, reset the algorithm's variables
else:
self.settled_layers_ceil = 0 # highest num of settled lay
self.algorithm_stage = 0 # start with ascension stage
self.patience_cntdwn = self.patience_param
if self.has_micro_algo:
self.useful_kernels_ceil = 0 # highest n of settled k
self.micro_stage = 0 # kernel-level stages (if needed)
self.m_patience_cntdwn = self.m_patience_param
self.accuracy_pre_pruning = 0
self.accuracy_last_layer = 0
# measure training time for this epoch
time_per_epoch = time.time() - start_time
seconds_left = int((self.max_n_ep - epoch) * time_per_epoch)
print("Time per epoch: %s, Est. complete (%d epochs) in: %s" % (
str(timedelta(seconds=time_per_epoch)),
self.max_n_ep,
str(timedelta(seconds=seconds_left))))
# increase epoch, break at max_n_ep if not self-constructing
epoch += 1
if not self.should_self_construct and epoch >= self.max_n_ep+1:
break
# measure total training time
total_training_time = time.time() - total_start_time
print("\nTOTAL TRAINING TIME: %s\n" % str(timedelta(
seconds=total_training_time)))
if self.should_save_ft_logs:
self.feature_writer.write("\nTOTAL TRAINING TIME: %s\n" % str(
timedelta(seconds=total_training_time)))
self._count_trainable_params_in_use(write_to_ft_logs=True)
| StarcoderdataPython |
1763688 | length = 2
breadth = 5
area = length*breadth
print("area is " + str(area))
print ('perimeter is ' + str(2 * (length + breadth)))
| StarcoderdataPython |
3334900 | <reponame>jlangdev/falconpy
"""
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
incidents - CrowdStrike Falcon Incidents API interface class
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
from ._util import force_default, process_service_request
from ._service_class import ServiceClass
from ._endpoint._incidents import _incidents_endpoints as Endpoints
class Incidents(ServiceClass):
"""
The only requirement to instantiate an instance of this class
is a valid token provided by the Falcon API SDK OAuth2 class, a
existing instance of the authentication class as an object or a
valid set of credentials.
"""
@force_default(defaults=["parameters"], default_types=["dict"])
def crowdscore(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Query environment wide CrowdScore and return the entity data.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/incidents/CrowdScore
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="CrowdScore",
keywords=kwargs,
params=parameters
)
def get_behaviors(self: object, body: dict) -> dict:
"""
Get details on behaviors by providing behavior IDs.
"""
# [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/incidents/GetBehaviors
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetBehaviors",
body=body
)
def perform_incident_action(self: object, body: dict) -> dict:
"""
Perform a set of actions on one or more incidents, such as
adding tags or comments or updating the incident name or description.
"""
# [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/incidents/PerformIncidentAction
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="PerformIncidentAction",
body=body
)
def get_incidents(self: object, body: dict) -> dict:
"""
Get details on incidents by providing incident IDs.
"""
# [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/incidents/GetIncidents
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetIncidents",
body=body
)
@force_default(defaults=["parameters"], default_types=["dict"])
def query_behaviors(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Search for behaviors by providing an FQL filter, sorting, and paging details.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/incidents/QueryBehaviors
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="QueryBehaviors",
keywords=kwargs,
params=parameters
)
@force_default(defaults=["parameters"], default_types=["dict"])
def query_incidents(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Search for incidents by providing an FQL filter, sorting, and paging details.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/incidents/QueryIncidents
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="QueryIncidents",
keywords=kwargs,
params=parameters
)
# These method names align to the operation IDs in the API but
# do not conform to snake_case / PEP8 and are defined here for
# backwards compatibility / ease of use purposes
CrowdScore = crowdscore
GetBehaviors = get_behaviors
PerformIncidentAction = perform_incident_action
GetIncidents = get_incidents
QueryBehaviors = query_behaviors
QueryIncidents = query_incidents
| StarcoderdataPython |
4804475 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
import itertools
import string
from morphforge.simulation.base.core.cell import Cell
from morphforgecontrib.tags import StdTagFunctors
from morphforge.core.objectnumberer import ObjectLabeller
from morphforge.core.misc import is_iterable
class NeuronPopulation(object):
def __init__(
self,
sim,
neuron_functor,
n,
pop_name=None,
name_tmpl_str=None,
user_tags=None,
):
user_tags = user_tags or []
if pop_name:
user_tags.extend(pop_name.split('_'))
if pop_name is None:
pop_name = ObjectLabeller.get_next_unamed_object_name(NeuronPopulation, prefix='NrnPop', num_fmt_string='%d')
self.pop_name = pop_name
if name_tmpl_str is None:
name_tmpl_str = '%s_$i' % self.pop_name
name_tmpl = string.Template(name_tmpl_str)
self.sim = sim
# Create the neurons:
self._nrns = []
for i in range(n):
cell_name = name_tmpl.substitute({'i': i})
cell_tags = user_tags + ['Index%d' % i]
n = neuron_functor(sim=sim, name=cell_name,
cell_tags=cell_tags)
n.population = self
self._nrns.append(n)
self._cell_to_index_lut = self._build_cell_to_index_lut()
def __len__(self):
return len(self._nrns)
def __getitem__(self, i):
return self._nrns[i]
def __iter__(self):
return iter(self._nrns)
@property
def cell_types(self):
return set(['<Unknown>'])
def record(
self,
cell,
location_func=None,
what=None,
user_tags=None,
user_tag_functors=None,
**kwargs
):
# Indexable by index of cell reference
if isinstance(cell, int):
cell = self[cell]
assert cell in self._nrns
what = what or Cell.Recordables.MembraneVoltage
user_tags = user_tags or []
user_tag_functors = user_tag_functors \
or StdTagFunctors.get_record_functors_neuron()
location_func = location_func or (lambda cell: cell.soma)
cell_location = location_func(cell)
kw_utf = {'cell_location': cell_location,
'neuron_population': self, 'neuron': cell}
functor_tags = list(itertools.chain(*[utf(**kw_utf) for utf in
user_tag_functors]))
r = self.sim.record(cell, cell_location=location_func(cell),
what=what, user_tags=user_tags
+ functor_tags, **kwargs)
return r
def record_all(self, **kwargs):
assert False, "Method renamed to 'record_from_all"
return [self.record(cell, **kwargs) for cell in self._nrns]
def record_from_all(self, **kwargs):
return [self.record(cell, **kwargs) for cell in self._nrns]
def for_each(self, func):
return [func(cell=nrn) for nrn in self._nrns]
def _build_cell_to_index_lut(self):
return dict([ (cell,index) for (index,cell) in enumerate(self._nrns)])
def index(self, cell):
return self._cell_to_index_lut[cell]
class SynapsePopulation(object):
"""A synapse population is a container for a set of synapses. It does not do anything special,
except add methods that make it easier to handle the synapse population"""
def __init__(
self,
sim,
synapses,
synapse_pop_name=None,
user_tags=None,
):
# Some functions return lists of synapses; so we
# reduce the input down to a flat list:
self.synapses = []
for s in synapses:
if s is None:
continue
elif is_iterable(s):
self.synapses.extend(s)
else:
#print 'Not Iterable:', s
self.synapses.append(s)
for s in self.synapses:
#print s, type(s)
assert s.population is None
s.population = self
self.sim = sim
self.synapse_pop_name=synapse_pop_name if synapse_pop_name is not None else ObjectLabeller.get_next_unamed_object_name(SynapsePopulation, prefix="SynPop")
user_tags = user_tags or []
@property
def synapse_types(self):
return set(['UnknownType'])
def record(self, synapse, what, user_tags=None, user_tag_functors=None, **kwargs):
if isinstance(synapse, int):
synapse = self[synapse]
assert synapse in self.synapses
user_tags = user_tags or []
user_tag_functors = user_tag_functors \
or StdTagFunctors.get_record_functors_synapse()
kw_utf = {'synapse': synapse, 'synapse_population': self}
functor_tags = list(itertools.chain(*[utf(**kw_utf) for utf in
user_tag_functors]))
return self.sim.record(synapse, what=what, user_tags=user_tags
+ functor_tags, **kwargs)
def record_all(self, **kwargs):
assert False, "method renamed to 'record_from_all'"
return [self.record(syn, **kwargs) for syn in self.synapses]
def record_from_all(self, **kwargs):
return [self.record(syn, **kwargs) for syn in self.synapses]
def __len__(self):
return len(self.synapses)
def __getitem__(self, i):
return self.synapses[i]
def __iter__(self):
return iter(self.synapses)
def where_presynaptic(self, cell=None):
return [syn for syn in self.synapses
if syn.get_presynaptic_cell() == cell]
def where_postsynaptic(self, cell=None):
return [syn for syn in self.synapses
if syn.get_postsynaptic_cell() == cell]
def get_where_presynaptic(self, cell=None):
assert False
return SynapsePopulation(sim=self.sim,
synapse_pop_name=self.synapse_pop_name,
synapses=[syn for syn in self.synapses
if syn.get_presynaptic_cell() == cell])
def get_where_postsynaptic(self, cell=None):
assert False
return SynapsePopulation(sim=self.sim,
synapse_pop_name=self.synapse_pop_name,
synapses=[syn for syn in self.synapses
if syn.get_postsynaptic_cell()
== cell])
@property
def presynaptic_population(self):
pre_pops = set([])
for syn in self.synapses:
pre = syn.get_presynaptic_cell()
if pre and pre.population:
pre_pops.add(pre.population)
if not pre_pops:
return None
else:
assert len(pre_pops) == 1
return list(pre_pops)[0]
@property
def postsynaptic_population(self):
post_pops = set([])
for syn in self.synapses:
post = syn.get_postsynaptic_cell()
if post and post.population:
post_pops.add(post.population)
if not post_pops:
return None
else:
assert len(post_pops) == 1
return list(post_pops)[0]
@property
def presynaptic_times(self):
assert False
class Connectors(object):
@classmethod
def all_to_all(
cls,
sim,
presynaptic_population,
postsynaptic_population,
connect_functor,
synapse_pop_name=None,
):
pre_post_it = itertools.product(presynaptic_population,
postsynaptic_population)
synapses = [connect_functor(sim=sim, presynaptic=pre,
postsynaptic=post) for (pre, post) in pre_post_it
if pre != post]
return SynapsePopulation(sim=sim, synapses=synapses,
synapse_pop_name=synapse_pop_name)
@classmethod
def times_to_all(
cls,
sim,
syncronous_times,
postsynaptic_population,
connect_functor,
synapse_pop_name=None,
):
synapses = [connect_functor(sim=sim, postsynaptic=post,
times=syncronous_times) for post in
postsynaptic_population]
return SynapsePopulation(sim=sim, synapses=synapses,
synapse_pop_name=synapse_pop_name)
@classmethod
def all_to_all_template(
cls,
sim,
presynaptic_population,
postsynaptic_population,
post_synaptic_template,
pconnection=1.0,
synapse_pop_name=None,
presynaptic_location_functor=None,
postsynaptic_location_functor=None,
presynaptic_kwargs=None,
postsynaptic_kwargs=None,
):
#TODO: presynaptic_location_functor, postsynaptic_location_functor are not handled properly!
assert presynaptic_location_functor == None
assert postsynaptic_location_functor == None
if presynaptic_kwargs is None:
presynaptic_kwargs = {}
if postsynaptic_kwargs is None:
postsynaptic_kwargs = {}
env = sim.environment
import numpy as np
from morphforgecontrib.stdimports import *
# Lets build a connectivity matrix:
npre = len(presynaptic_population)
npost = len(postsynaptic_population)
connectivity = np.random.rand(npre, npost) < pconnection
needs_presynaptic = np.any(connectivity, axis=1)
needs_postsynaptic = np.any(connectivity, axis=0)
# OK, so lets make the presynaptic objects:
presynaptic_objects = {}
for i in range(npre):
if needs_presynaptic[i]:
pre_cell = presynaptic_population[i]
pre_cell_loc = presynaptic_location_functor(pre_cell) if presynaptic_location_functor else pre_cell.soma
presynaptic_objects[i] = env.SynapticTrigger( SynapticTriggerByVoltageThreshold, cell_location=pre_cell_loc,**presynaptic_kwargs )
# And lets make the post-synaptic objects:
postsynaptic_objects = {}
for i in range(npost):
if needs_postsynaptic[i]:
post_cell = postsynaptic_population[i]
post_cell_loc = postsynaptic_location_functor(post_cell) if postsynaptic_location_functor else post_cell.soma
postsynaptic_objects[i] = post_synaptic_template.instantiate(cell_location=post_cell_loc, **postsynaptic_kwargs)
# And let connect them up, according to our connectivty matrix:
synapses = []
for (pre_index,post_index), are_connected in np.ndenumerate(connectivity):
if not are_connected or pre_index==post_index:
continue
# Connecting:
syn = sim.create_synapse( trigger = presynaptic_objects[pre_index], postsynaptic_mech=postsynaptic_objects[post_index] )
synapses.append(syn)
return SynapsePopulation(sim=sim, synapses=synapses,
synapse_pop_name=synapse_pop_name)
#assert False
#pre_post_it = itertools.product(presynaptic_population, postsynaptic_population)
#synapses = [connect_functor(sim=sim, presynaptic=pre, postsynaptic=post) for (pre, post) in pre_post_it if pre != post]
#return SynapsePopulation(sim=sim, synapses=synapses, synapse_pop_name=synapse_pop_name)
| StarcoderdataPython |
1773347 | <gh_stars>0
#!/usr/bin/env python3
import re
import os
import argparse
from glob import glob
from utils import sync
from bmt import Bmt
class Iozone(Bmt):
def __init__(self, size='64M', record='1M', threads=4, **kwargs):
super().__init__(**kwargs)
self.name = 'IOZONE'
self.bin = 'iozone'
self.header = ['Node', 'Thread', 'Size', 'Record', 'Write(MB/s)', 'Read(MB/s)', 'R_Write(OPS)', 'R_Read(OPS)']
self.bandwidth = []
self.size = size
self.record = record
self.threads = threads
self.getopt()
def build(self):
if os.path.exists(self.bin):
return
self.buildcmd += [
f'wget http://www.iozone.org/src/current/iozone3_491.tgz -O {self.builddir}/iozone3_491.tgz',
f'cd {self.builddir}; tar xf iozone3_491.tgz',
f'cd {self.builddir}/iozone3_491/src/current; make linux',
f'cp {self.builddir}/iozone3_491/src/current/iozone {self.bindir}']
super().build()
def run(self):
self.mkoutdir()
self.write_hostfile()
option = (
f'-s {self.size} ' # file size per threads
f'-r {self.record} ' # record size
f'-+m {self.hostfile} ' # hostfile: <hostname> <outdir> <iozone bin>
f'-t {str(self.threads*len(self.host))} ' # total number of threads
'-c ' # includes close in timing calculation
'-e ' # incldues flush in timing calculation
'-w ' # keep temporary files for read test
'-+n') # skip retests
self.bandwidth = []
# write
self.output = f'iozone-i0-n{self.nodes}-t{self.threads}-s{self.size}-r{self.record}.out'
self.runcmd = f'RSH=ssh {self.bin} -i 0 {option}'
sync(self.host)
super().run(1)
# read
self.output = f'iozone-i1-n{self.nodes}-t{self.threads}-s{self.size}-r{self.record}.out'
self.runcmd = f'RSH=ssh {self.bin} -i 1 {option}'
sync(self.host)
super().run(1)
# random read/write
# -I: Use direct IO
# -O: Return result in OPS
self.output = f'iozone-i2-n{self.nodes}-t{self.threads}-s{self.size}-r{self.record}.out'
self.runcmd = f'RSH=ssh {self.bin} -i 2 -I -O {option}'
sync(self.host)
super().run(1)
self.result.append([self.nodes, self.threads, self.size, self.record] + self.bandwidth)
self.clean()
def write_hostfile(self):
outdir = os.path
with open(self.hostfile, 'w') as fh:
for host in self.host:
for threads in range(self.threads):
fh.write(f'{host} {self.outdir} {self.bin}\n')
def parse(self):
with open(self.output, 'r') as output_fh:
for line in output_fh:
if re.search('Children see throughput', line):
result, unit = line.split()[-2:]
if unit == 'kB/sec':
self.bandwidth.append(float(result)/1024)
else:
self.bandwidth.append(float(result))
def clean(self):
for io_file in sorted(glob(f'{self.outdir}/*DUMMY*')):
os.remove(io_file)
def getopt(self):
parser = argparse.ArgumentParser(
usage = '%(prog)s -s 1G -r 1M -t 8',
description = 'IOZONE Benchmark',
formatter_class = argparse.RawDescriptionHelpFormatter,
add_help = False )
opt = parser.add_argument_group(
title='Optional arguments',
description=(
'-h, --help show this help message and exit\n'
'-v, --version show program\'s version number and exit\n'
'-s, --size file size/threads\n'
'-r, --record record size\n'
'-n, --nodes number of nodes\n'
'-t, --threads number of threadss per node\n' ))
opt.add_argument('-h', '--help' , action='help' , help=argparse.SUPPRESS)
opt.add_argument('-v', '--version', action='version',
version='%(prog)s '+self.version, help=argparse.SUPPRESS)
opt.add_argument('-s', '--size' , type=str, metavar='' , help=argparse.SUPPRESS)
opt.add_argument('-r', '--record' , type=str, metavar='' , help=argparse.SUPPRESS)
opt.add_argument('-n', '--nodes' , type=int, metavar='' , help=argparse.SUPPRESS)
opt.add_argument('-t', '--threads', type=int, metavar='' , help=argparse.SUPPRESS)
self.args = vars(parser.parse_args())
| StarcoderdataPython |
98430 | import pytest
from django.core.cache import cache
from rest_framework.test import APIClient
from environments.identities.models import Identity
from environments.identities.traits.models import Trait
from environments.models import Environment
from features.feature_types import MULTIVARIATE
from features.models import Feature
from features.multivariate.models import MultivariateFeatureOption
from features.value_types import STRING
from organisations.models import Organisation, OrganisationRole
from projects.models import Project
from segments.models import EQUAL, Condition, Segment, SegmentRule
from users.models import FFAdminUser
trait_key = "key1"
trait_value = "value1"
@pytest.fixture()
def admin_client(admin_user):
client = APIClient()
client.force_authenticate(user=admin_user)
return client
@pytest.fixture()
def organisation(db, admin_user):
org = Organisation.objects.create(name="Test Org")
admin_user.add_organisation(org, role=OrganisationRole.ADMIN)
return org
@pytest.fixture()
def project(organisation):
return Project.objects.create(name="Test Project", organisation=organisation)
@pytest.fixture()
def environment(project):
return Environment.objects.create(name="Test Environment", project=project)
@pytest.fixture()
def identity(environment):
return Identity.objects.create(identifier="test_identity", environment=environment)
@pytest.fixture()
def trait(identity):
return Trait.objects.create(
identity=identity, trait_key=trait_key, string_value=trait_value
)
@pytest.fixture()
def multivariate_feature(project):
feature = Feature.objects.create(
name="feature", project=project, type=MULTIVARIATE, initial_value="control"
)
for percentage_allocation in (30, 30, 40):
string_value = f"multivariate option for {percentage_allocation}% of users."
MultivariateFeatureOption.objects.create(
feature=feature,
default_percentage_allocation=percentage_allocation,
type=STRING,
string_value=string_value,
)
return feature
@pytest.fixture()
def identity_matching_segment(project, trait):
segment = Segment.objects.create(name="Matching segment", project=project)
matching_rule = SegmentRule.objects.create(
segment=segment, type=SegmentRule.ALL_RULE
)
Condition.objects.create(
rule=matching_rule,
property=trait.trait_key,
operator=EQUAL,
value=trait.trait_value,
)
return segment
@pytest.fixture()
def api_client():
return APIClient()
@pytest.fixture()
def feature(project, environment):
return Feature.objects.create(name="Test Feature1", project=project)
@pytest.fixture()
def user_password():
return FFAdminUser.objects.make_random_password()
@pytest.fixture()
def reset_cache():
# https://groups.google.com/g/django-developers/c/zlaPsP13dUY
# TL;DR: Use this if your test interacts with cache since django
# does not clear cache after every test
cache.clear()
yield
cache.clear()
| StarcoderdataPython |
51493 | <reponame>mesnardo/cuIBM
"""
Calls VisIt in batch mode to generate .png files of the 2D field contour.
cli: visit -nowin -cli -s plotField2dVisIt.py <arguments>
"""
import os
import sys
import math
import argparse
sys.path.append(os.environ['SNAKE'])
from snake import miscellaneous
def parse_command_line():
"""
Parses the command-line.
"""
print('[info] parsing the command-line ...'),
# create the parser
formatter_class = argparse.ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(description='Plots the vorticity field '
'with VisIt',
formatter_class=formatter_class)
# fill the parser with arguments
parser.add_argument('--directory',
dest='directory',
type=str,
default=os.getcwd(),
help='directory of the IBAMR simulation')
parser.add_argument('--solution-folder',
dest='solution_folder',
type=str,
default='numericalSolution',
help='name of folder containing the solution in time')
parser.add_argument('--body',
dest='body',
type=str,
default=None,
help='name of the body file (without the .vertex '
'extension)')
parser.add_argument('--field',
dest='field_name',
type=str,
choices=['vorticity', 'pressure',
'x-velocity', 'x-velocity',
'velocity-magnitude'],
help='name of the field to plot')
parser.add_argument('--range',
dest='field_range',
type=float, nargs=2,
default=(-1.0, 1.0),
metavar=('min', 'max'),
help='Range of the field to plot')
parser.add_argument('--states',
dest='states',
type=int, nargs=3,
default=(0, 2**10000, 1),
metavar=('min', 'max', 'increment'),
help='steps to plot')
parser.add_argument('--view',
dest='view',
type=float,
nargs=4,
default=(-2.0, -2.0, 2.0, 2.0),
metavar=('x-bl', 'y-bl', 'x-tr', 'y-tr'),
help='bottom-left coordinates followed by top-right '
'coordinates of the view to display')
parser.add_argument('--width',
dest='width',
type=int,
default=800,
help='figure width in pixels')
# parse given options file
parser.add_argument('--options',
type=open,
action=miscellaneous.ReadOptionsFromFile,
help='path of the file with options to parse')
print('done')
return parser.parse_args()
def check_version():
"""
Check the VisIt version and prints warning if the version has not been
tested.
"""
script_version = '2.8.2'
tested_versions = ['2.8.2', '2.10.2', '2.12.1']
current_version = Version()
print('VisIt version: {}\n'.format(Version()))
if current_version not in tested_versions:
print('[warning] You are using VisIt-{}'.format(current_version))
print('[warning] This script was created with '
'VisIt-{}.'.format(script_version))
print('[warning] This script was tested with versions: '
'{}.'.format(tested_versions))
print('[warning] It may not work as expected')
def plot_field_contours(field_name, field_range,
body=None,
directory=os.getcwd(),
solution_folder='numericalSolution',
states=(0, 2**10000, 1),
view=(-2.0, -2.0, 2.0, 2.0),
width=800):
"""
Plots the contour of a given field using VisIt.
Parameters
----------
field_name: string
Name of field to plot;
choices: vorticity, pressure, velocity-magnitude, x-velocity, y-velocity.
field_range: 2-tuple of floats
Range of the field to plot (min, max).
body: string, optional
Name of the immersed body;
default: None.
directory: string, optional
Directory of the IBAMR simulation;
default: current directory.
solution_folder: string, optional
Relative path of the folder containing the numerical solution;
default: 'numericalSolution'.
states: 3-tuple of integers, optional
Limits of index of the states to plot followed by the increment;
default: (0, 20000, 1).
view: 4-tuple of floats, optional
Bottom-left and top-right coordinates of the view to display;
default: (-2.0, -2.0, 2.0, 2.0).
width: integer, optional
Width (in pixels) of the figure;
default: 800.
"""
info = {}
info['vorticity'] = {'variable': 'Omega',
'color-table': 'RdBu',
'invert-color-table': 1}
info['pressure'] = {'variable': 'P',
'color-table': 'hot',
'invert-color-table': 0}
info['velocity-magnitude'] = {'variable': 'U_magnitude',
'color-table': 'RdBu',
'invert-color-table': 1}
info['x-velocity'] = {'variable': 'U_x',
'color-table': 'RdBu',
'invert-color-table': 1}
info['y-velocity'] = {'variable': 'U_y',
'color-table': 'RdBu',
'invert-color-table': 1}
# define dimensions of domain to plot
height = int(math.floor(width * (view[3] - view[1]) / (view[2] - view[0])))
# create images directory
view_string = '{:.2f}_{:.2f}_{:.2f}_{:.2f}'.format(*view)
images_directory = os.path.join(directory,
'images',
'_'.join([field_name, view_string]))
if not os.path.isdir(images_directory):
print('[info] creating images directory {} ...'.format(images_directory))
os.makedirs(images_directory)
ShowAllWindows()
# display body
if body:
OpenDatabase(GetLocalHostName() + ':' + os.path.join(directory,
solution_folder,
'lag_data.visit'), 0)
AddPlot('Mesh', body + '_vertices', 1, 1)
DrawPlots()
MeshAtts = MeshAttributes()
MeshAtts.legendFlag = 0
MeshAtts.lineStyle = MeshAtts.SOLID # SOLID, DASH, DOT, DOTDASH
MeshAtts.lineWidth = 0
MeshAtts.meshColor = (0, 0, 0, 255)
try:
MeshAtts.outlineOnlyFlag = 0
MeshAtts.errorTolerance = 0.01
except:
pass
MeshAtts.meshColorSource = MeshAtts.Foreground # Foreground, MeshCustom
MeshAtts.opaqueColorSource = MeshAtts.Background # Background, OpaqueCustom
MeshAtts.opaqueMode = MeshAtts.Auto # Auto, On, Off
MeshAtts.pointSize = 0.05
MeshAtts.opaqueColor = (255, 255, 255, 255)
# MeshAtts.smoothingLevel = MeshAtts.None # None, Fast, High
MeshAtts.pointSizeVarEnabled = 0
MeshAtts.pointSizeVar = 'default'
MeshAtts.pointType = MeshAtts.Point # Box, Axis, Icosahedron, Octahedron, Tetrahedron, SphereGeometry, Point, Sphere
MeshAtts.showInternal = 0
MeshAtts.pointSizePixels = 2
MeshAtts.opacity = 1
SetPlotOptions(MeshAtts)
# display vorticity field
OpenDatabase(GetLocalHostName() + ':' + os.path.join(directory,
solution_folder,
'dumps.visit'), 0)
HideActivePlots()
AddPlot('Pseudocolor', info[field_name]['variable'], 1, 1)
DrawPlots()
PseudocolorAtts = PseudocolorAttributes()
PseudocolorAtts.scaling = PseudocolorAtts.Linear # Linear, Log, Skew
PseudocolorAtts.skewFactor = 1
PseudocolorAtts.limitsMode = PseudocolorAtts.OriginalData # OriginalData, CurrentPlot
PseudocolorAtts.minFlag = 1
PseudocolorAtts.min = field_range[0]
PseudocolorAtts.maxFlag = 1
PseudocolorAtts.max = field_range[1]
PseudocolorAtts.centering = PseudocolorAtts.Natural # Natural, Nodal, Zonal
PseudocolorAtts.colorTableName = info[field_name]['color-table']
PseudocolorAtts.invertColorTable = info[field_name]['invert-color-table']
PseudocolorAtts.opacityType = PseudocolorAtts.FullyOpaque # ColorTable, FullyOpaque, Constant, Ramp, VariableRange
PseudocolorAtts.opacityVariable = ''
PseudocolorAtts.opacity = 1
PseudocolorAtts.opacityVarMin = 0
PseudocolorAtts.opacityVarMax = 1
PseudocolorAtts.opacityVarMinFlag = 0
PseudocolorAtts.opacityVarMaxFlag = 0
PseudocolorAtts.pointSize = 0.05
PseudocolorAtts.pointType = PseudocolorAtts.Point # Box, Axis, Icosahedron, Octahedron, Tetrahedron, SphereGeometry, Point, Sphere
PseudocolorAtts.pointSizeVarEnabled = 0
PseudocolorAtts.pointSizeVar = 'default'
PseudocolorAtts.pointSizePixels = 2
PseudocolorAtts.lineType = PseudocolorAtts.Line # Line, Tube, Ribbon
PseudocolorAtts.lineStyle = PseudocolorAtts.SOLID # SOLID, DASH, DOT, DOTDASH
PseudocolorAtts.lineWidth = 0
if Version() in ['2.8.2', '2.10.2']:
PseudocolorAtts.tubeDisplayDensity = 10
elif Version() in ['2.12.1']:
PseudocolorAtts.tubeResolution = 10
else:
PseudocolorAtts.tubeDisplayDensity = 10
PseudocolorAtts.tubeRadiusSizeType = PseudocolorAtts.FractionOfBBox # Absolute, FractionOfBBox
PseudocolorAtts.tubeRadiusAbsolute = 0.125
PseudocolorAtts.tubeRadiusBBox = 0.005
if Version() in ['2.8.2', '2.10.2']:
PseudocolorAtts.varyTubeRadius = 0
PseudocolorAtts.varyTubeRadiusVariable = ''
PseudocolorAtts.varyTubeRadiusFactor = 10
# PseudocolorAtts.endPointType = PseudocolorAtts.None
PseudocolorAtts.endPointStyle = PseudocolorAtts.Spheres
elif Version() in ['2.12.1']:
PseudocolorAtts.tubeRadiusVarEnabled = 0
PseudocolorAtts.tubeRadiusVar = ''
PseudocolorAtts.tubeRadiusVarRatio = 10
# PseudocolorAtts.tailStyle = PseudocolorAtts.None
# PseudocolorAtts.headStyle = PseudocolorAtts.None
else:
PseudocolorAtts.varyTubeRadius = 0
PseudocolorAtts.varyTubeRadiusVariable = ''
PseudocolorAtts.varyTubeRadiusFactor = 10
# PseudocolorAtts.endPointType = PseudocolorAtts.None
PseudocolorAtts.endPointStyle = PseudocolorAtts.Spheres
PseudocolorAtts.endPointRadiusSizeType = PseudocolorAtts.FractionOfBBox
PseudocolorAtts.endPointRadiusAbsolute = 1
PseudocolorAtts.endPointRadiusBBox = 0.005
PseudocolorAtts.endPointRatio = 2
PseudocolorAtts.renderSurfaces = 1
PseudocolorAtts.renderWireframe = 0
PseudocolorAtts.renderPoints = 0
PseudocolorAtts.smoothingLevel = 0
PseudocolorAtts.legendFlag = 1
PseudocolorAtts.lightingFlag = 1
SetPlotOptions(PseudocolorAtts)
# colorbar of pseudocolor plot
legend = GetAnnotationObject(GetPlotList().GetPlots(2).plotName)
legend.xScale = 1.5
legend.yScale = 0.5
legend.numberFormat = '%# -9.2g'
legend.orientation = legend.HorizontalBottom
legend.managePosition = 0
legend.position = (0.10, 0.10)
legend.fontFamily = legend.Courier
legend.fontBold = 1
legend.fontHeight = 0.1
legend.drawMinMax = 0
legend.drawTitle = 0
print('[info] legend settings:')
print(legend)
# set up view
View2DAtts = View2DAttributes()
View2DAtts.windowCoords = (view[0], view[2], view[1], view[3])
View2DAtts.viewportCoords = (0, 1, 0, 1)
View2DAtts.fullFrameActivationMode = View2DAtts.Auto # On, Off, Auto
View2DAtts.fullFrameAutoThreshold = 100
View2DAtts.xScale = View2DAtts.LINEAR # LINEAR, LOG
View2DAtts.yScale = View2DAtts.LINEAR # LINEAR, LOG
View2DAtts.windowValid = 1
print('[info] view settings:')
print(View2DAtts)
SetView2D(View2DAtts)
# Logging for SetAnnotationObjectOptions is not implemented yet.
AnnotationAtts = AnnotationAttributes()
AnnotationAtts.axes2D.visible = 1
AnnotationAtts.axes2D.autoSetTicks = 1
AnnotationAtts.axes2D.autoSetScaling = 1
AnnotationAtts.axes2D.lineWidth = 0
AnnotationAtts.axes2D.tickLocation = AnnotationAtts.axes2D.Inside # Inside, Outside, Both
AnnotationAtts.axes2D.tickAxes = AnnotationAtts.axes2D.BottomLeft # Off, Bottom, Left, BottomLeft, All
# x-axis
AnnotationAtts.axes2D.xAxis.title.visible = 0 # hide x-axis title
AnnotationAtts.axes2D.xAxis.label.visible = 0 # hide x-axis label
AnnotationAtts.axes2D.xAxis.tickMarks.visible = 0 # hide x-axis tick marks
AnnotationAtts.axes2D.xAxis.grid = 0 # no grid
# y-axis
AnnotationAtts.axes2D.yAxis.title.visible = 0 # hide y-axis title
AnnotationAtts.axes2D.yAxis.label.visible = 0 # hide y-axis label
AnnotationAtts.axes2D.yAxis.tickMarks.visible = 0 # hide y-axis tick marks
AnnotationAtts.axes2D.yAxis.grid = 0 # no grid
AnnotationAtts.userInfoFlag = 0 # hide text with user's name
# settings for legend
AnnotationAtts.databaseInfoFlag = 0
AnnotationAtts.timeInfoFlag = 0
AnnotationAtts.legendInfoFlag = 1
AnnotationAtts.backgroundColor = (255, 255, 255, 255)
AnnotationAtts.foregroundColor = (0, 0, 0, 255)
AnnotationAtts.gradientBackgroundStyle = AnnotationAtts.Radial # TopToBottom, BottomToTop, LeftToRight, RightToLeft, Radial
AnnotationAtts.gradientColor1 = (0, 0, 255, 255)
AnnotationAtts.gradientColor2 = (0, 0, 0, 255)
AnnotationAtts.backgroundMode = AnnotationAtts.Solid # Solid, Gradient, Image, ImageSphere
AnnotationAtts.backgroundImage = ''
AnnotationAtts.imageRepeatX = 1
AnnotationAtts.imageRepeatY = 1
AnnotationAtts.axesArray.visible = 0
SetAnnotationAttributes(AnnotationAtts)
print('[info] annotation settings:')
print(AnnotationAtts)
SetActiveWindow(1)
# create time-annotation
time_annotation = CreateAnnotationObject('Text2D')
time_annotation.position = (0.05, 0.90)
time_annotation.fontFamily = 1
time_annotation.fontBold = 0
time_annotation.height = 0.05
print('[info] time-annotation:')
print(time_annotation)
# check number of states available
if states[1] > TimeSliderGetNStates():
print('[warning] maximum number of states available is '
'{}'.format(TimeSliderGetNStates()))
print('[warning] setting new final state ...')
states[1] = TimeSliderGetNStates()
# loop over saved time-steps
for state in xrange(args.states[0], args.states[1], args.states[2]):
SetTimeSliderState(state)
time = float(Query('Time')[:-1].split()[-1])
print('\n[state {}] time: {} - creating and saving the field ...'
''.format(state, time))
time_annotation.text = 'Time: {0:.3f}'.format(time)
RenderingAtts = RenderingAttributes()
RenderingAtts.antialiasing = 0
RenderingAtts.multiresolutionMode = 0
RenderingAtts.multiresolutionCellSize = 0.002
RenderingAtts.geometryRepresentation = RenderingAtts.Surfaces # Surfaces, Wireframe, Points
RenderingAtts.displayListMode = RenderingAtts.Auto # Never, Always, Auto
RenderingAtts.stereoRendering = 0
RenderingAtts.stereoType = RenderingAtts.CrystalEyes # RedBlue, Interlaced, CrystalEyes, RedGreen
RenderingAtts.notifyForEachRender = 0
RenderingAtts.scalableActivationMode = RenderingAtts.Auto # Never, Always, Auto
RenderingAtts.scalableAutoThreshold = 2000000
RenderingAtts.specularFlag = 0
RenderingAtts.specularCoeff = 0.6
RenderingAtts.specularPower = 10
RenderingAtts.specularColor = (255, 255, 255, 255)
RenderingAtts.doShadowing = 0
RenderingAtts.shadowStrength = 0.5
RenderingAtts.doDepthCueing = 0
RenderingAtts.depthCueingAutomatic = 1
RenderingAtts.startCuePoint = (-10, 0, 0)
RenderingAtts.endCuePoint = (10, 0, 0)
RenderingAtts.compressionActivationMode = RenderingAtts.Never # Never, Always, Auto
RenderingAtts.colorTexturingFlag = 1
RenderingAtts.compactDomainsActivationMode = RenderingAtts.Never # Never, Always, Auto
RenderingAtts.compactDomainsAutoThreshold = 256
SetRenderingAttributes(RenderingAtts)
SaveWindowAtts = SaveWindowAttributes()
SaveWindowAtts.outputToCurrentDirectory = 0
SaveWindowAtts.outputDirectory = images_directory
SaveWindowAtts.fileName = '{}{:0>7}'.format(field_name, state)
SaveWindowAtts.family = 0
SaveWindowAtts.format = SaveWindowAtts.PNG # BMP, CURVE, JPEG, OBJ, PNG, POSTSCRIPT, POVRAY, PPM, RGB, STL, TIFF, ULTRA, VTK, PLY
SaveWindowAtts.width = width
SaveWindowAtts.height = height
SaveWindowAtts.screenCapture = 0
SaveWindowAtts.saveTiled = 0
SaveWindowAtts.quality = 100
SaveWindowAtts.progressive = 0
SaveWindowAtts.binary = 0
SaveWindowAtts.stereo = 0
SaveWindowAtts.compression = SaveWindowAtts.PackBits # None, PackBits, Jpeg, Deflate
SaveWindowAtts.forceMerge = 0
SaveWindowAtts.resConstraint = SaveWindowAtts.NoConstraint # NoConstraint, EqualWidthHeight, ScreenProportions
SaveWindowAtts.advancedMultiWindowSave = 0
SetSaveWindowAttributes(SaveWindowAtts)
SaveWindow()
def main(args):
check_version()
plot_field_contours(args.field_name, args.field_range,
directory=args.directory,
body=args.body,
solution_folder=args.solution_folder,
states=args.states,
view=args.view,
width=args.width)
os.remove('visitlog.py')
if __name__ == '__main__':
args = parse_command_line()
main(args)
sys.exit(0)
| StarcoderdataPython |
1797901 | import logging
from maxosc.sender import Sender
class OscLogHandler(logging.Handler):
def __init__(self, sender: Sender, log_level: int = logging.INFO, log_format: str = '%(levelname)s %(message)s'):
super().__init__()
self.sender: Sender = sender
self.setLevel(log_level)
self.setFormatter(logging.Formatter(log_format))
def emit(self, record: logging.LogRecord):
self.sender.send_warning(self.format(record))
| StarcoderdataPython |
3235168 | import pygame, sys, math, random, time
from pygame.locals import*
pygame.init()
FPS = 25
fpsClock = pygame.time.Clock()
#set up window
windowX = 1080
windowY = 720
DISPLAYSURF = pygame.display.set_mode((windowX,windowY))
pygame.display.set_caption('graphs')
font = pygame.font.SysFont('consolas',20)
#set up colors
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
points = []
formula = ['']
def graphs(mode = 0):
for angle in range(0,1000,1):
x,y = 0,0
if mode == 0:
x = int(windowX/2 + math.radians(angle)*math.cos(math.radians(angle)))
y = int(windowY/2 - math.radians(angle)*math.sin(math.radians(angle)))
if mode == 1:
x = int(windowX/2 + math.radians(math.sqrt(angle*50000))*math.cos(math.radians(angle)))
y = int(windowY/2 - math.radians(math.sqrt(angle*50000))*math.sin(math.radians(angle)))
if mode == 2:
x = int(windowX/2 + math.radians(angle*angle/500)*math.cos(math.radians(angle)))
y = int(windowY/2 - math.radians(angle*angle/500)*math.sin(math.radians(angle)))
if mode == 3:
x = int(windowX/2 + 400*math.cos(math.radians(angle))*math.cos(math.radians(angle)))
y = int(windowY/2 - 400*math.cos(math.radians(angle))*math.sin(math.radians(angle)))
if mode == 4: # flower
petals = 6 #even nmbers produce double
x = int(windowX/2 + 2+200*math.cos(math.radians(angle*petals))*math.cos(math.radians(angle)))
y = int(windowY/2 - 2+200*math.cos(math.radians(angle*petals))*math.sin(math.radians(angle)))
if mode == 5: # heart
multiplier = 10
x = int(windowX/2 + multiplier* 16*(math.sin(math.radians(angle))**3 ))
y = int(windowY/2 + multiplier* -13*(math.cos(math.radians(angle))) -\
multiplier* -5*(math.cos(math.radians(2*angle))) -\
multiplier* -2*(math.cos(math.radians(3*angle))) -\
multiplier* -1*(math.cos(math.radians(4*angle))))
if mode == 6:
x = int(windowX/2 + 300*math.sin(math.radians(angle/20))*math.cos(math.radians(angle)))
y = int(windowY/2 - 300*math.sin(math.radians(angle/20))*math.sin(math.radians(angle)))
if mode == 7:
multiplier = 300
x = int(windowX/2 + (100 - multiplier*math.sin(math.radians(2*angle))*math.cos(math.radians(1*angle))) *math.cos(math.radians(angle)))
y = int(windowY/2 - (100 - multiplier*math.sin(math.radians(2*angle))*math.cos(math.radians(1*angle))) *math.sin(math.radians(angle)))
points.append([x,y])
def dynaGraph(a, mul,cosang, cosdiv, sinang, sindiv):
for angle in range(0,5000,5):
multiplier = 300
sdiv = sindiv
if sdiv == 0: sdiv = 1
cdiv = cosdiv
if cdiv == 0: cdiv = 1
x = int(windowX/2 + (a - mul*math.sin(math.radians(sinang*angle/sdiv))*math.cos(math.radians(cosang*angle/cdiv))) *math.cos(math.radians(angle)))
y = int(windowY/2 - (a - mul*math.sin(math.radians(sinang*angle/sdiv))*math.cos(math.radians(cosang*angle/cdiv))) *math.sin(math.radians(angle)))
formula[0] = 'r = '+str(a)+' - ' +str(mul)+'cos('+str(cosang)+'t/'+str(cdiv)+')sin('+str(sinang)+'t/'+str(sdiv)+') [z:help]'
points.append([x,y])
def drawPoints():
if len(points) > 2:
for point in points:
color = [random.randint(50,200) for i in range(3)]
pygame.draw.lines(DISPLAYSURF,color,False,points,2)
#graphs(2)
dyna = [50,50,2,2,2,2]
dynaGraph(dyna[0],dyna[1],dyna[2], dyna[3],dyna[4], dyna[5])
angle = 0
while True: ##main game loop
keys = pygame.key.get_pressed()
## if angle < 10000: angle+=20
## else:
## angle = 0
## points = []
#get mouse position
#mposX, mposY = pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1]
## x = int(windowX/2 + 300*math.sin(math.radians(angle/20))*math.cos(math.radians(angle)))
## y = int(windowY/2 - 300*math.sin(math.radians(angle/20))*math.sin(math.radians(angle)))
## points.append([x,y])
#time.sleep(0.2)
DISPLAYSURF.fill(BLACK)
if keys[pygame.K_z]:
helps = 'press and hold A,S,D,F,G,H and MOUSEWHEEL to change numbers :)'
DISPLAYSURF.blit(font.render(helps,1,(150,150,150)),(0,20))
drawPoints()
DISPLAYSURF.blit(font.render(formula[0],1,WHITE),(0,0))
for event in pygame.event.get():
if event.type == MOUSEBUTTONDOWN:
if event.button == 4:
if keys[pygame.K_a]:
points = []
dyna[0] += 10
dynaGraph(dyna[0],dyna[1],dyna[2], dyna[3],dyna[4], dyna[5])
if keys[pygame.K_s]:
points = []
dyna[1] += 10
dynaGraph(dyna[0],dyna[1],dyna[2], dyna[3],dyna[4], dyna[5])
if keys[pygame.K_d]:
points = []
dyna[2] += 1
dynaGraph(dyna[0],dyna[1],dyna[2], dyna[3],dyna[4], dyna[5])
if keys[pygame.K_f]:
points = []
dyna[3] += 1
dynaGraph(dyna[0],dyna[1],dyna[2], dyna[3],dyna[4], dyna[5])
if keys[pygame.K_g]:
points = []
dyna[4] += 1
dynaGraph(dyna[0],dyna[1],dyna[2], dyna[3],dyna[4], dyna[5])
if keys[pygame.K_h]:
points = []
dyna[5] += 1
dynaGraph(dyna[0],dyna[1],dyna[2], dyna[3],dyna[4], dyna[5])
if event.button == 5:
if keys[pygame.K_a]:
points = []
dyna[0] -= 10
dynaGraph(dyna[0],dyna[1],dyna[2], dyna[3],dyna[4], dyna[5])
if keys[pygame.K_s]:
points = []
dyna[1] -= 10
dynaGraph(dyna[0],dyna[1],dyna[2], dyna[3],dyna[4], dyna[5])
if keys[pygame.K_d]:
points = []
dyna[2] -= 1
dynaGraph(dyna[0],dyna[1],dyna[2], dyna[3],dyna[4], dyna[5])
if keys[pygame.K_f]:
points = []
dyna[3] -= 1
dynaGraph(dyna[0],dyna[1],dyna[2], dyna[3],dyna[4], dyna[5])
if keys[pygame.K_g]:
points = []
dyna[4] -= 1
dynaGraph(dyna[0],dyna[1],dyna[2], dyna[3],dyna[4], dyna[5])
if keys[pygame.K_h]:
points = []
dyna[5] -= 1
dynaGraph(dyna[0],dyna[1],dyna[2], dyna[3],dyna[4], dyna[5])
if event.type == KEYDOWN and event.key == K_c:
dyna[0] = 50
dyna[1] = 50
dyna[2] = 1
dyna[3] = 1
dyna[4] = 1
dyna[5] = 1
points = []
dynaGraph(dyna[0],dyna[1],dyna[2], dyna[3],dyna[4], dyna[5])
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
fpsClock.tick(FPS)
| StarcoderdataPython |
1689463 | import unittest
import torch
import torch_testing as tt
from all import nn
from all.core import StateArray
from all.approximation import VNetwork, FeatureNetwork
from all.memory import NStepAdvantageBuffer
class NStepAdvantageBufferTest(unittest.TestCase):
def setUp(self):
torch.manual_seed(1)
self.features = FeatureNetwork(nn.Linear(1, 2), None)
self.v = VNetwork(nn.Linear(2, 1), None)
def _compute_expected_advantages(self, states, returns, next_states, lengths):
return (
returns
+ (0.5 ** lengths.float()) * self.v.eval(self.features.eval(next_states))
- self.v.eval(self.features.eval(states))
)
def test_rollout(self):
buffer = NStepAdvantageBuffer(self.v, self.features, 2, 3, discount_factor=0.5)
actions = torch.ones((3))
states = StateArray(torch.arange(0, 12).unsqueeze(1).float(), (12,))
buffer.store(states[0:3], actions, torch.zeros(3))
buffer.store(states[3:6], actions, torch.ones(3))
states, _, advantages = buffer.advantages(states[6:9])
expected_states = StateArray(torch.arange(0, 6).unsqueeze(1).float(), (6,))
expected_next_states = StateArray(
torch.cat((torch.arange(6, 9), torch.arange(6, 9))).unsqueeze(1).float(), (6,)
)
expected_returns = torch.tensor([
0.5, 0.5, 0.5,
1, 1, 1
]).float()
expected_lengths = torch.tensor([
2., 2, 2,
1, 1, 1
])
self.assert_states_equal(states, expected_states)
tt.assert_allclose(advantages, self._compute_expected_advantages(
expected_states, expected_returns, expected_next_states, expected_lengths
))
def test_rollout_with_dones(self):
buffer = NStepAdvantageBuffer(self.v, self.features, 3, 3, discount_factor=0.5)
done = torch.tensor([False] * 12)
done[5] = True
done[7] = True
done[9] = True
states = StateArray(torch.arange(0, 12).unsqueeze(1).float(), (12,), done=done)
actions = torch.ones((3))
buffer.store(states[0:3], actions, torch.zeros(3))
buffer.store(states[3:6], actions, torch.ones(3))
buffer.store(states[6:9], actions, 2 * torch.ones(3))
states, actions, advantages = buffer.advantages(states[9:12])
expected_states = StateArray(torch.arange(0, 9).unsqueeze(1).float(), (9,), done=done[0:9])
expected_next_done = torch.tensor([True] * 9)
expected_next_done[5] = False
expected_next_done[7] = False
expected_next_done[8] = False
expected_next_states = StateArray(torch.tensor([
9, 7, 5,
9, 7, 11,
9, 10, 11
]).unsqueeze(1).float(), (9,), done=expected_next_done)
expected_returns = torch.tensor([
1, 0.5, 0,
2, 1, 2,
2, 2, 2
]).float()
expected_lengths = torch.tensor([
3, 2, 1,
2, 1, 2,
1, 1, 1
]).float()
self.assert_states_equal(states, expected_states)
tt.assert_allclose(advantages, self._compute_expected_advantages(
expected_states, expected_returns, expected_next_states, expected_lengths
))
def test_multi_rollout(self):
buffer = NStepAdvantageBuffer(self.v, self.features, 2, 2, discount_factor=0.5)
raw_states = StateArray(torch.arange(0, 12).unsqueeze(1).float(), (12,))
actions = torch.ones((2))
buffer.store(raw_states[0:2], actions, torch.ones(2))
buffer.store(raw_states[2:4], actions, torch.ones(2))
states, actions, advantages = buffer.advantages(raw_states[4:6])
expected_states = StateArray(torch.arange(0, 4).unsqueeze(1).float(), (4,))
expected_returns = torch.tensor([1.5, 1.5, 1, 1])
expected_next_states = StateArray(torch.tensor([4., 5, 4, 5]).unsqueeze(1), (4,))
expected_lengths = torch.tensor([2., 2, 1, 1])
self.assert_states_equal(states, expected_states)
tt.assert_allclose(advantages, self._compute_expected_advantages(
expected_states,
expected_returns,
expected_next_states,
expected_lengths
))
buffer.store(raw_states[4:6], actions, torch.ones(2))
buffer.store(raw_states[6:8], actions, torch.ones(2))
states, actions, advantages = buffer.advantages(raw_states[8:10])
expected_states = StateArray(torch.arange(4, 8).unsqueeze(1).float(), (4,))
self.assert_states_equal(states, expected_states)
tt.assert_allclose(advantages, self._compute_expected_advantages(
expected_states,
torch.tensor([1.5, 1.5, 1, 1]),
StateArray(torch.tensor([8, 9, 8, 9]).unsqueeze(1).float(), (4,)),
torch.tensor([2., 2, 1, 1])
))
def assert_array_equal(self, actual, expected):
for i, exp in enumerate(expected):
self.assertEqual(actual[i], exp, msg=(
("\nactual: %s\nexpected: %s") % (actual, expected)))
def assert_states_equal(self, actual, expected):
tt.assert_almost_equal(actual.observation, expected.observation)
tt.assert_equal(actual.mask, expected.mask)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4820223 | '''METHOD TO FIND THE TEMPLATE ON THE LARGE IMAGE'''
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
img = cv.imread('bird.jpg',0) #'''IMAGE'''
frame = img[345:678,266:466] #'''TEMPLATE'''
h,w = frame.shape
res = cv.matchTemplate(img,frame,cv.TM_CCOEFF_NORMED)
loc = np.where(res>=0.97) # around 0.8 there will be a point which is brightest point in the image (and it will give the coordinates of that (the upper left corner))
for pt in zip(*loc[::-1]): # ::-1 is use to reverse he values to get (x,y)
cv.rectangle(img,pt,(pt[0]+w,pt[1]+h),(0,0,255),1)
cv.imshow('img',img)
cv.imshow('frame',frame)
cv.waitKey(0) | StarcoderdataPython |
29477 | #!/cm/shared/languages/python-3.3.2/bin/python
# submit script for submission of mizuRoute simualtions
# <NAME> Oct 29 2019
#
# call this script from 'run_mizuRoute_templated_mswep050calib.py which creates a qsub job to submit to the HPC queue
# This script is actually called from 'call_pythonscript.sh' (which is needed to load modules before calling the script)
import os,glob,subprocess,sys,shutil,multiprocessing
import datetime
def call_subproc(cmd,logfile):
subprocess.call(cmd,stdout=open(logfile,'w'),stderr=subprocess.STDOUT)
# Print start time
print('Starting:',datetime.datetime.now())
# Get environment variables
control_files = os.environ['CONTROL_FLIST'].split(':')
logdir = os.environ['LOGDIR']
ncpus = int(os.environ['NCPUS'])
mizuexe = os.environ['MIZU_EXE']
print('running simulations',len(control_files))
print(os.environ['CONTROL_FLIST'])
pool = multiprocessing.Pool(processes=ncpus)
for control_file in control_files:
# Todo, could add check if this simulation has already been run
fname = os.path.basename(control_file)
sim_name =fname[8:-4]
logfile = os.path.join(logdir,sim_name+'.log')
cmd = ['time',mizuexe,control_file]
print('command',cmd)
print('log',logfile)
#ret = pool.apply_async(subprocess.call,cmd,{'stdout':open(logfile,'w') ,'stderr':subprocess.STDOUT})
#subprocess.call(cmd,stdout=open(logfile,'w'),stderr=subprocess.STDOUT)
ret = pool.apply_async(call_subproc,[cmd,logfile])
pool.close()
pool.join()
print('Finished:',datetime.datetime.now())
| StarcoderdataPython |
3342672 | from __future__ import division
import itertools as it
import os
import utils as ut
keys = "<KEY> Hs-Mm Hs-Nv Hs-Sp Hs-Sc Hs_uni-Ce_uni Ce-Dm Ce-Mm Ce-Nv Ce-Sp Sp-Dm Sp-Mm Sp-Nv Mm-Dm Dm-Nv Hs-Xl Hs-X2 Hs-X3 Hs-Pf Hs-Tg".split()
def odict(from_sp, to_sp):
"""
Load a dict from file, eg:
{HsProt1: set([CeProtA, CeProtB,...]), ...}
"""
if from_sp != to_sp:
fname, swap_order = orth_fname(from_sp, to_sp)
ogroups = load_ogroups(from_sp, to_sp)
# change: ogroups already swapped 10/30/2013
return _ogroups_to_odict(ogroups, swap_order=False)
else:
return None
def orth_fname(from_sp, to_sp):
key = from_sp + '-' + to_sp
if key in keys:
swap_order=False
else:
key = to_sp + '-' + from_sp
if key in keys:
swap_order=True
else:
assert False, "Orthogroup key %s not in keys list" % key
fname = ut.proj_path('convert_orth', 'table.'+key)
return fname, swap_order
def odict_1to1(from_sp, to_sp):
"""
Filtered flat odict with only 1to1 orthologs.
"""
od = odict(from_sp, to_sp)
od_rev = odict(to_sp, from_sp)
return dict([(k,list(v)[0]) for k,v in od.items() if len(v)==1 and
len(od_rev[list(v)[0]])==1])
def convert_dict_single(fromtype, totype):
"""
totype: must be Sp (eg 'Hs') or Sp_seqdb
Returns None if not necessary or not found.
"""
if len(totype.split('_')) > 1:
# Get rid of the 2nd half of totype if it's default for that species
tosp, toseqdb = totype.split('_')
if toseqdb == ut.config()[tosp+'_default']:
totype = tosp
if fromtype == totype:
return None
elif len(fromtype) == len(totype) == 2:
return odict(fromtype, totype)
else:
return custom_conversion(fromtype, totype)
def convert_dict(fromtype, totype):
"""
First looks for single conversion step. If not found, splits it up.
Returns None if not necessary or not found.
"""
conv1 = convert_dict_single(fromtype, totype)
if conv1:
return conv1
else:
# If we made it here, try first converting to second species,
# then looking for other conversion.
conv1 = convert_dict_single(fromtype, totype[:2])
conv2 = convert_dict_single(totype[:2], totype)
if conv1 and conv2:
return ut.compose_dict_sets(conv1,conv2)
def all_odicts(sp, sps):
d_odicts = {}
for other in sps:
if sp!=other:
d_odicts[other] = odict(sp,other)
return d_odicts
def all_ogroup_sizes(fromsp, tosps):
odicts = all_odicts(fromsp, tosps)
ogsizes = {}
for othersp, od in odicts.items():
ogsizes[othersp] = ogroup_size_dict(od)
return ogsizes
def ogroup_size_dict(odict):
"""
Takes a normal odict of fromid: set(toids) and returns a dict of fromid:
size of that side of the orthogroup.
"""
ogsize = {}
odinv = ut.dict_inverse_sets(odict)
for fromid in odict:
# Can just use the first one since orthogroups are cohesive
ogsize[fromid] = len(odinv[list(odict[fromid])[0]])
return ogsize
def custom_conversion(fromtype, totype):
"""
Check for a custom file in data/convert
Return None if not found.
"""
fname = "%s2%s.tab" % (fromtype, totype)
fpath = ut.proj_path('convert',fname)
if os.path.exists(fpath):
return ut.load_dict_sets(fpath)
def _ogroups_to_odict(ogroups, swap_order=False):
"""
From a list of orthogroups, return a dict from sp1 prots to a set of sp2
prots. We want a dictionary from the first species in the file to the second,
unless swap_order is True.
"""
sp1col = 1 if swap_order else 0
sp2col = 0 if swap_order else 1
orthdict = dict([(p1,set([p2 for p2 in og[sp2col]])) for og in ogroups for
p1 in og[sp1col]])
return orthdict
def load_ogroups(from_sp, to_sp, fname=None):
"""
Load an inparanoid table.Sp1-Sp2 file into a list of orthogroups, where
each orthogroup is a tuple containing 1) a list of proteins in sp1 and 2) a
list of proteins in sp2.
Eg: [([HsProtA, HsProtB,..],[CeProtA,CeProtB,..]), ([..],[..]), ...]
"""
# Skip header row; protein ids alternate with meaningless conf scores in
# columns 2 and 3 in the order of the filename
if fname is None:
fname, swap_order = orth_fname(from_sp, to_sp)
else:
fname, swap_order = fname, False
(from_ind, to_ind) = (2,3) if not swap_order else (3,2)
ogroups = [([p for p in row[from_ind].split()[::2]],[p for p in
row[to_ind].split()[::2]]) for row in ut.load_tab_file(fname)][1:]
return ogroups
def orth_pairs(p, od):
"""
p: a ppi pair of ids
od: an orth.odict; None means same species, so just returns what it's given
"""
if od is not None:
return it.product(od[p[0]],od[p[1]]) if p[0] in od and p[1] in od else []
else:
return [p]
| StarcoderdataPython |
152930 | <gh_stars>0
from __future__ import annotations
import logging
import math
from typing import Union, Optional
class Node:
def __init__(self):
self.parent: Union[Node, None] = None
self.left: Union[Node, None] = None
self.right: Union[Node, None] = None
def explode(self, depth=0) -> bool:
return False
def split(self) -> bool:
return self.left.split() or self.right.split()
def reduce(self):
logging.debug("Starting: %s", self)
while self.explode() or self.split():
logging.debug("After: %s", self)
pass
logging.debug("Finished: %s", self)
def all_literals(self) -> list[Literal]:
return []
def replace(self, old_node, new_node):
raise Exception("Not implemented")
def top(self):
if self.parent is None:
return self
else:
return self.parent.top()
def make_pair(self, left: Node, right: Node) -> Node:
raise Exception("Not implemented")
def add(self, tree: Node) -> Node:
pair = Pair(self, tree)
pair.reduce()
return pair
def magnitude(self) -> int:
raise Exception("Not implemented")
class Literal(Node):
def __init__(self, value: int):
super().__init__()
self.value = value
def __str__(self):
return str(self.value)
def all_literals(self) -> list[Literal]:
return [self]
def split(self) -> bool:
if self.value < 10:
return False
logging.debug("Need to split %d", self.value)
new_left = Literal(math.floor(self.value / 2))
new_right = Literal(math.ceil(self.value / 2))
new_node = self.parent.make_pair(new_left, new_right)
self.parent.replace(self, new_node)
return True
def magnitude(self) -> int:
return self.value
class Pair(Node):
def __init__(self, left: Optional[Node] = None, right: Optional[Node] = None):
super().__init__()
self.left = left
if left is not None:
left.parent = self
self.right = right
if right is not None:
right.parent = self
def __str__(self):
return f"[{str(self.left)},{str(self.right)}]"
def explode(self, depth=0) -> bool:
if depth < 4:
return self.left.explode(depth+1) or self.right.explode(depth+1)
if not isinstance(self.left, Literal) or not isinstance(self.right, Literal):
raise Exception("Trying to explode with non-literals")
all_literals = self.top().all_literals()
# Add to the left
left_index = all_literals.index(self.left)
if left_index > 0:
left_neighbor = all_literals[left_index - 1]
left_neighbor.value += self.left.value
# Add to the right
right_index = all_literals.index(self.right)
if right_index < len(all_literals) - 1:
right_neighbor = all_literals[right_index + 1]
right_neighbor.value += self.right.value
self.parent.replace(self, Literal(0))
return True
def replace(self, old_node, new_node):
if self.left == old_node:
self.left = new_node
new_node.parent = self
elif self.right == old_node:
self.right = new_node
new_node.parent = self
else:
raise Exception("replace on non-existent old_node")
def all_literals(self) -> list[Literal]:
return self.left.all_literals() + self.right.all_literals()
def make_pair(self, left: Node, right: Node) -> Node:
pair = Pair(left, right)
return pair
def magnitude(self) -> int:
return 3 * self.left.magnitude() + 2 * self.right.magnitude()
@staticmethod
def parse(snum) -> Node:
if isinstance(snum, str):
snum = eval(snum)
if isinstance(snum, int):
return Literal(snum)
pair = Pair()
pair.left = Pair.parse(snum[0])
pair.left.parent = pair
pair.right = Pair.parse(snum[1])
pair.right.parent = pair
return pair
def parse_snailfish(number: str):
num_list = eval(number)
tree = Pair.parse(num_list)
| StarcoderdataPython |
1654537 | <filename>bleson/beacons/mesh.py
from bleson.core.roles import Advertiser
from bleson.core.types import Advertisement
from bleson.interfaces.adapter import Adapter
from bleson.logger import log
class MeshBeacon(Advertiser):
def __init__(self, adapter, beacon_type: bytes, data: bytes):
super().__init__(adapter)
self.advertisement=Advertisement()
self.beacon_type = beacon_type[0:1]
self.data = data
self.len = int(len(data) + 1).to_bytes(1, 'big')
self.advertisement.raw_data=self.mesh_packet()
print(f"Beacon Adv raw data = {self.advertisement.raw_data}")
def mesh_packet(self):
return self.len + self.beacon_type + self.data
| StarcoderdataPython |
114554 | out_data = b"\x1c\x91\x73\x94\xba\xfb\x3c\x30" + \
b"\x3c\x30\xac\x26\x62\x09\x74\x65" + \
b"\x73\x74\x31\x2e\x74\x78\x74\x5e" + \
b"\xc5\xf7\x32\x4c\x69\x76\x65\x20" + \
b"\x66\x72\x65\x65\x20\x6f\x72\x20" + \
b"\x64\x69\x65\x20\x68\x61\x72\x64" + \
b"\x0d\x0a\xd3\x14\x7c\x2e\x86\x89" + \
b"\x7a\x42\x58\xed\x06\x53\x9f\x15" + \
b"\xcc\xca\x7e\x7b\x37\x28\x5f\x3c" | StarcoderdataPython |
3337365 | import os
import sys
import argparse
import importlib
import numpy as np
import tensorflow as tf
import pdb
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '..'))
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import provider
import provider_riconv
import tf_util
import pdb
import time
import scipy
import re
import pickle
import gc
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='rinet', help='Model name')
parser.add_argument('--load_dir', required=True, default='rinet')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
parser.add_argument('--max_epoch', type=int, default=251, help='Epoch to run [default: 251]')
parser.add_argument('--batch_size', type=int, default=4, help='Batch Size during training [default: 32]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.7]')
parser.add_argument('--rotation', action='store_true', help='Whether to apply rotation during training [default: False]')
parser.add_argument('--finetune', action='store_true', help='Whether to finetune [default: False]')
parser.add_argument('--checkpoint', default='log/model.ckpt', help='Checkpoint directory to finetune [default: log/model.ckpt]')
parser.add_argument('--num_pool', type=int, default=256, help='Number of pooling [default: 64]')
parser.add_argument('--pool_knn1', type=int, default=64, help='Number of neighbors for lf [default: 128]')
parser.add_argument('--num_votes', type=int, default=12)
parser.add_argument('--so3', action='store_true', default=True, help='Whether training in SO3 setting')
parser.add_argument('--azi', action='store_true', help='Whether training in azimuthal rotation')
FLAGS = parser.parse_args()
LOAD_DIR = FLAGS.load_dir
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
FINETUNE = FLAGS.finetune
CHECKPOINT = FLAGS.checkpoint
NUM_VOTES = FLAGS.num_votes
sys.path.append(os.path.join(BASE_DIR, FLAGS.load_dir))
MODEL = importlib.import_module(FLAGS.model)
MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py')
print(MODEL_FILE)
LOG_FOUT = open(os.path.join(LOAD_DIR, 'log_test.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
MAX_NUM_POINT = 2048
NUM_CLASSES = 40
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
BASE_REG_WEIGHT = 0.001
REG_WEIGHT_DECAY_RATE = 0.5
REG_WEIGHT_DECAY_STEP = float(DECAY_STEP)
# ModelNet40 official train/test split
TRAIN_FILES = provider.getDataFiles(
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'))
TEST_FILES = provider.getDataFiles(
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'))
# Load data beforehand
KEYS = ['data', 'label']
TRAIN_DATA, TRAIN_LABEL = \
zip(*[provider.loadDataFile_with_keys(fn, KEYS) for fn in TRAIN_FILES])
TEST_DATA, TEST_LABEL = \
zip(*[provider.loadDataFile_with_keys(fn, KEYS) for fn in TEST_FILES])
# concatenate batches
TRAIN_DATA = np.concatenate(TRAIN_DATA, axis=0)
TRAIN_LABEL = np.squeeze(np.concatenate(TRAIN_LABEL, axis=0))
TEST_DATA = np.concatenate(TEST_DATA, axis=0)
TEST_LABEL = np.squeeze(np.concatenate(TEST_LABEL, axis=0))
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def get_reg_weight(batch):
reg_weight = tf.train.exponential_decay(
BASE_REG_WEIGHT,
batch * BATCH_SIZE,
REG_WEIGHT_DECAY_STEP,
REG_WEIGHT_DECAY_RATE,
staircase=False)
reg_weight = tf.maximum(reg_weight, 0.00001)
return reg_weight
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split('(\d+)', text)]
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, 1024)
input_graph = tf.placeholder(tf.float32, shape = (BATCH_SIZE, NUM_POINT, NUM_POINT))
is_training_pl = tf.placeholder(tf.bool, shape=())
flag_pl = tf.placeholder(tf.int32, shape=())
flag1 = tf.placeholder(tf.int32, shape=())
flag2 = tf.placeholder(tf.int32, shape=())
flag3 = tf.placeholder(tf.int32, shape=())
dilation = tf.placeholder(tf.int32, shape=())
gcn1 = tf.placeholder(tf.int32, shape=())
gcn2 = tf.placeholder(tf.int32, shape=())
gcn3 = tf.placeholder(tf.int32, shape=())
print(is_training_pl)
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0, trainable=False)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Get model and loss
pred, end_points = MODEL.get_model(pointclouds_pl, FLAGS.num_pool, FLAGS.pool_knn1,
is_training_pl, bn_decay=bn_decay, flag=flag_pl, flag2=flag2, flag3=flag3,gcn1=gcn1, gcn2=gcn2, gcn3=gcn3, dilation=dilation)
reg_weight = get_reg_weight(batch)
loss = MODEL.get_loss(pred, labels_pl, end_points)
tf.summary.scalar('loss', loss)
correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(labels_pl))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE)
tf.summary.scalar('accuracy', accuracy)
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
if FINETUNE:
"""THIS IS NOT WORKING CURRENTLY"""
finetune_var_names = ['fc1', 'fc2', 'fc3']
finetuning_vars = [v for v in tf.trainable_variables() if v.name.split('/')[0] in finetune_var_names]
orig_vars = [v for v in tf.trainable_variables() if v.name.split('/')[0] not in finetune_var_names]
gvs = optimizer.compute_gradients(loss, [orig_vars, finetuning_vars])
scaled_gvs = [(grad * 0.1, var) for (grad, var) in gvs[:len(orig_vars)]] + gvs[len(orig_vars):]
train_op = optimizer.apply_gradients(scaled_gvs, global_step=batch)
else:
gvs = optimizer.compute_gradients(loss)
train_op = optimizer.apply_gradients(gvs, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Load parameters before finetuning
if FINETUNE:
variables_to_restore = [v for v in tf.all_variables() if 'rel' not in v.name.split('/')[0]]
variables_to_restore = [v for v in variables_to_restore if not v.name == 'batch']
pre_saver = tf.train.Saver(variables_to_restore)
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
saver.restore(sess, LOAD_DIR + '/model.ckpt')
# Add summary writers
#merged = tf.merge_all_summaries()
merged = tf.summary.merge_all()
test_writer = tf.summary.FileWriter(os.path.join(LOAD_DIR, 'test'))
# Init variables
init = tf.global_variables_initializer()
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': loss,
'gvs': gvs,
'train_op': train_op,
'merged': merged,
'step': batch,
'flag': flag_pl,
'flag2': flag2,
'flag3': flag3,
'dilation' : dilation,
'gcn1' : gcn1,
'gcn2' : gcn2,
'gcn3' : gcn3
}
acc, cls_avg = eval_one_epoch(sess, ops, test_writer, NUM_VOTES)
print('Overall accuracy: ', acc)
def eval_one_epoch(sess, ops, test_writer, num_votes):
""" ops: dict mapping from string to tf ops """
is_training = False
current_data = TEST_DATA[:, 0:NUM_POINT, :]
current_label = TEST_LABEL
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
pred_conf = np.zeros((0, 40))
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
shape_txt = open('data/modelnet40_ply_hdf5_2048/shape_names.txt', 'r')
label_to_class = shape_txt.read().split('\n')
flag1 = 64
flag2 = 32
flag3 = 16
gcn1 = 16
gcn2 = 8
gcn3 = 4
log_string('----------------')
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx + 1 ) * BATCH_SIZE
batch_pred_sum = np.zeros((BATCH_SIZE, NUM_CLASSES))
for vote_idx in range(NUM_VOTES):
shuffle = np.arange(NUM_POINT)
np.random.shuffle(shuffle)
rot_data = provider_riconv.so3_rotate(current_data[start_idx:end_idx, shuffle, :])
feed_dict = {
ops['pointclouds_pl']: rot_data,
ops['labels_pl']: current_label[start_idx:end_idx],
ops['is_training_pl']: is_training,
ops['flag'] : flag1,
ops['flag2'] : flag2,
ops['flag3'] : flag3,
ops['dilation'] : 3,
ops['gcn1'] : gcn1,
ops['gcn2'] : gcn2,
ops['gcn3'] : gcn3
}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred']], feed_dict=feed_dict)
batch_pred_sum += pred_val
pred_conf = np.argmax(batch_pred_sum, 1)
test_writer.add_summary(summary, step)
correct = np.sum(pred_conf == current_label[start_idx:end_idx])
total_correct += correct
total_seen += BATCH_SIZE
loss_sum += (loss_val*BATCH_SIZE)
for i in range(start_idx, end_idx):
l = current_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_conf[i-start_idx] == l)
# Handle remaining
if file_size - num_batches * BATCH_SIZE > 0:
start_idx = num_batches * BATCH_SIZE
end_idx = file_size
input_data = np.zeros((BATCH_SIZE, 1024, 3))
input_label = np.zeros(BATCH_SIZE)
input_label[0:end_idx-start_idx] = current_label[start_idx:end_idx]
batch_pred_sum = np.zeros((BATCH_SIZE, NUM_CLASSES))
for vote_idx in range(NUM_VOTES):
shuffle = np.arange(NUM_POINT)
np.random.shuffle(shuffle)
input_data[0:end_idx - start_idx, ...] = provider_riconv.so3_rotate(current_data[start_idx:end_idx, 0:NUM_POINT, :])
feed_dict = {
ops['pointclouds_pl']: input_data,
ops['labels_pl']: input_label,
ops['is_training_pl']: is_training,
ops['flag'] : flag1,
ops['flag2'] : flag2,
ops['flag3'] : flag3,
ops['dilation'] : 3,
ops['gcn1'] : gcn1,
ops['gcn2'] : gcn2,
ops['gcn3'] : gcn3
}
summary, step, loss_val, pred_val= sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred']], feed_dict=feed_dict)
batch_pred_sum += pred_val
pred_conf = np.argmax(batch_pred_sum, 1)
test_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, axis=1)
correct = np.sum(pred_conf[0:end_idx-start_idx] == current_label[start_idx:end_idx])
total_correct += correct
total_seen += end_idx - start_idx
loss_sum += (loss_val * (end_idx - start_idx))
for i in range(start_idx, end_idx):
l = current_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_conf[i - start_idx] == l)
return (total_correct / float(total_seen)), np.mean(np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float))
if __name__ == "__main__":
start_time = time.time()
train()
end_time = time.time()
LOG_FOUT.close()
| StarcoderdataPython |
1697106 | import urllib2
from config import *
#off bmc
p = urllib2.HTTPPasswordMgrWithDefaultRealm()
p.add_password(None, url, username, password)
handler = urllib2.HTTPDigestAuthHandler(p)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
page = urllib2.urlopen(url).read()
#time.sleep(1)
#page = urllib2.urlopen(url).read()
| StarcoderdataPython |
198035 | <gh_stars>1-10
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
###############################################################################
#234567890123456789012345678901234567890123456789012345678901234567890123456789
#--------1---------2---------3---------4---------5---------6---------7---------
# ##### BEGIN COPYRIGHT BLOCK #####
#
# initial script copyright (c)2011-2013 <NAME>
#
# ##### END COPYRIGHT BLOCK #####
from struct import (
pack,
unpack,
)
from sys import (
exc_info,
)
from codecs import (
register_error,
)
###############################################################################
#
# MilkShape 3D 1.8.5 File Format Specification
#
# all specifications were taken from SDK 1.8.5
#
# some additional specifications were taken from
# MilkShape 3D Viewer v2.0 (Nov 06 2007) - msMesh.h
#
###############################################################################
#
# sizes
#
class Ms3dSpec:
###########################################################################
#
# max values
#
MAX_VERTICES = 65534 # 0..65533; note: (65534???, 65535???)
MAX_TRIANGLES = 65534 # 0..65533; note: (65534???, 65535???)
MAX_GROUPS = 255 # 1..255; note: (0 default group)
MAX_MATERIALS = 128 # 0..127; note: (-1 no material)
MAX_JOINTS = 128 # 0..127; note: (-1 no joint)
MAX_SMOOTH_GROUP = 32 # 0..32; note: (0 no smoothing group)
MAX_TEXTURE_FILENAME_SIZE = 128
###########################################################################
#
# flags
#
FLAG_NONE = 0
FLAG_SELECTED = 1
FLAG_HIDDEN = 2
FLAG_SELECTED2 = 4
FLAG_DIRTY = 8
FLAG_ISKEY = 16 # additional spec from [2]
FLAG_NEWLYCREATED = 32 # additional spec from [2]
FLAG_MARKED = 64 # additional spec from [2]
FLAG_TEXTURE_NONE = 0x00
FLAG_TEXTURE_COMBINE_ALPHA = 0x20
FLAG_TEXTURE_HAS_ALPHA = 0x40
FLAG_TEXTURE_SPHERE_MAP = 0x80
MODE_TRANSPARENCY_SIMPLE = 0
MODE_TRANSPARENCY_DEPTH_BUFFERED_WITH_ALPHA_REF = 1
MODE_TRANSPARENCY_DEPTH_SORTED_TRIANGLES = 2
###########################################################################
#
# values
#
HEADER = "MS3D000000"
## TEST_STR = 'START@€@µ@²@³@©@®@¶@ÿ@A@END.bmp'
## TEST_RAW = b'START@\x80@\xb5@\xb2@\xb3@\xa9@\xae@\xb6@\xff@A@END.bmp\x00'
##
STRING_MS3D_REPLACE = 'use_ms3d_replace'
STRING_ENCODING = "ascii" # wrong encoding (too limited), but there is an UnicodeEncodeError issue, that prevent using the correct one for the moment
##STRING_ENCODING = "cp437" # US, wrong encoding and shows UnicodeEncodeError
##STRING_ENCODING = "cp858" # Europe + €, wrong encoding and shows UnicodeEncodeError
##STRING_ENCODING = "cp1252" # WIN EU, this would be the better codepage, but shows UnicodeEncodeError, on print on system console and writing to file
STRING_ERROR = STRING_MS3D_REPLACE
##STRING_ERROR = 'replace'
##STRING_ERROR = 'ignore'
##STRING_ERROR = 'surrogateescape'
STRING_TERMINATION = b'\x00'
STRING_REPLACE = u'_'
###########################################################################
#
# min, max, default values
#
NONE_VERTEX_BONE_ID = -1
NONE_GROUP_MATERIAL_INDEX = -1
DEFAULT_HEADER = HEADER
DEFAULT_HEADER_VERSION = 4
DEFAULT_VERTEX_BONE_ID = NONE_VERTEX_BONE_ID
DEFAULT_TRIANGLE_SMOOTHING_GROUP = 0
DEFAULT_TRIANGLE_GROUP = 0
DEFAULT_MATERIAL_MODE = FLAG_TEXTURE_NONE
DEFAULT_GROUP_MATERIAL_INDEX = NONE_GROUP_MATERIAL_INDEX
DEFAULT_MODEL_JOINT_SIZE = 1.0
DEFAULT_MODEL_TRANSPARENCY_MODE = MODE_TRANSPARENCY_SIMPLE
DEFAULT_MODEL_ANIMATION_FPS = 25.0
DEFAULT_MODEL_SUB_VERSION_COMMENTS = 1
DEFAULT_MODEL_SUB_VERSION_VERTEX_EXTRA = 2
DEFAULT_MODEL_SUB_VERSION_JOINT_EXTRA = 1
DEFAULT_MODEL_SUB_VERSION_MODEL_EXTRA = 1
DEFAULT_FLAGS = FLAG_NONE
MAX_MATERIAL_SHININESS = 128
# blender default / OpenGL default
DEFAULT_MATERIAL_AMBIENT = (0.2, 0.2, 0.2, 1.0)
DEFAULT_MATERIAL_DIFFUSE = (0.8, 0.8, 0.8, 1.0)
DEFAULT_MATERIAL_SPECULAR = (1.0, 1.0, 1.0, 1.0)
DEFAULT_MATERIAL_EMISSIVE = (0.0, 0.0, 0.0, 1.0)
DEFAULT_MATERIAL_SHININESS = 12.5
DEFAULT_JOINT_COLOR = (0.8, 0.8, 0.8)
###############################################################################
#
# helper class for basic raw io
#
class Ms3dIo:
# sizes for IO
SIZE_BYTE = 1
SIZE_SBYTE = 1
SIZE_WORD = 2
SIZE_DWORD = 4
SIZE_FLOAT = 4
LENGTH_ID = 10
LENGTH_NAME = 32
LENGTH_FILENAME = 128
PRECISION = 4
@staticmethod
def read_byte(raw_io):
""" read a single byte from raw_io """
buffer = raw_io.read(Ms3dIo.SIZE_BYTE)
if not buffer:
raise EOFError()
value = unpack('<B', buffer)[0]
return value
@staticmethod
def write_byte(raw_io, value):
""" write a single byte to raw_io """
raw_io.write(pack('<B', value))
@staticmethod
def read_sbyte(raw_io):
""" read a single signed byte from raw_io """
buffer = raw_io.read(Ms3dIo.SIZE_BYTE)
if not buffer:
raise EOFError()
value = unpack('<b', buffer)[0]
return value
@staticmethod
def write_sbyte(raw_io, value):
""" write a single signed byte to raw_io """
raw_io.write(pack('<b', value))
@staticmethod
def read_word(raw_io):
""" read a word from raw_io """
buffer = raw_io.read(Ms3dIo.SIZE_WORD)
if not buffer:
raise EOFError()
value = unpack('<H', buffer)[0]
return value
@staticmethod
def write_word(raw_io, value):
""" write a word to raw_io """
raw_io.write(pack('<H', value))
@staticmethod
def read_dword(raw_io):
""" read a double word from raw_io """
buffer = raw_io.read(Ms3dIo.SIZE_DWORD)
if not buffer:
raise EOFError()
value = unpack('<I', buffer)[0]
return value
@staticmethod
def write_dword(raw_io, value):
""" write a double word to raw_io """
raw_io.write(pack('<I', value))
@staticmethod
def read_float(raw_io):
""" read a float from raw_io """
buffer = raw_io.read(Ms3dIo.SIZE_FLOAT)
if not buffer:
raise EOFError()
value = unpack('<f', buffer)[0]
return value
@staticmethod
def write_float(raw_io, value):
""" write a float to raw_io """
raw_io.write(pack('<f', value))
@staticmethod
def read_array(raw_io, itemReader, count):
""" read an array[count] of objects from raw_io, by using a itemReader """
value = []
for i in range(count):
itemValue = itemReader(raw_io)
value.append(itemValue)
return tuple(value)
@staticmethod
def write_array(raw_io, itemWriter, count, value):
""" write an array[count] of objects to raw_io, by using a itemWriter """
for i in range(count):
itemValue = value[i]
itemWriter(raw_io, itemValue)
@staticmethod
def read_array2(raw_io, itemReader, count, count2):
""" read an array[count][count2] of objects from raw_io,
by using a itemReader """
value = []
for i in range(count):
itemValue = Ms3dIo.read_array(raw_io, itemReader, count2)
value.append(tuple(itemValue))
return value
@staticmethod
def write_array2(raw_io, itemWriter, count, count2, value):
""" write an array[count][count2] of objects to raw_io,
by using a itemWriter """
for i in range(count):
itemValue = value[i]
Ms3dIo.write_array(raw_io, itemWriter, count2, itemValue)
@staticmethod
def ms3d_replace(exc):
""" http://www.python.org/dev/peps/pep-0293/ """
if isinstance(exc, UnicodeEncodeError):
return ((exc.end-exc.start)*Ms3dSpec.STRING_REPLACE, exc.end)
elif isinstance(exc, UnicodeDecodeError):
return (Ms3dSpec.STRING_REPLACE, exc.end)
elif isinstance(exc, UnicodeTranslateError):
return ((exc.end-exc.start)*Ms3dSpec.STRING_REPLACE, exc.end)
else:
raise TypeError("can't handle %s" % exc.__name__)
@staticmethod
def read_string(raw_io, length):
""" read a string of a specific length from raw_io """
buffer = raw_io.read(length)
if not buffer:
raise EOFError()
eol = buffer.find(Ms3dSpec.STRING_TERMINATION)
if eol < 0:
eol = len(buffer)
register_error(Ms3dSpec.STRING_MS3D_REPLACE, Ms3dIo.ms3d_replace)
s = buffer[:eol].decode(encoding=Ms3dSpec.STRING_ENCODING, errors=Ms3dSpec.STRING_ERROR)
return s
@staticmethod
def write_string(raw_io, length, value):
""" write a string of a specific length to raw_io """
register_error(Ms3dSpec.STRING_MS3D_REPLACE, Ms3dIo.ms3d_replace)
buffer = value.encode(encoding=Ms3dSpec.STRING_ENCODING, errors=Ms3dSpec.STRING_ERROR)
if not buffer:
buffer = bytes()
raw_io.write(pack('<{}s'.format(length), buffer))
return
###############################################################################
#
# multi complex types
#
###############################################################################
class Ms3dHeader:
""" Ms3dHeader """
__slots__ = (
'id',
'version',
)
def __init__(
self,
default_id=Ms3dSpec.DEFAULT_HEADER,
default_version=Ms3dSpec.DEFAULT_HEADER_VERSION
):
self.id = default_id
self.version = default_version
def __repr__(self):
return "\n<id='{}', version={}>".format(
self.id,
self.version
)
def __hash__(self):
return hash(self.id) ^ hash(self.version)
def __eq__(self, other):
return ((self is not None) and (other is not None)
and (self.id == other.id)
and (self.version == other.version))
def read(self, raw_io):
self.id = Ms3dIo.read_string(raw_io, Ms3dIo.LENGTH_ID)
self.version = Ms3dIo.read_dword(raw_io)
return self
def write(self, raw_io):
Ms3dIo.write_string(raw_io, Ms3dIo.LENGTH_ID, self.id)
Ms3dIo.write_dword(raw_io, self.version)
class HeaderError(Exception):
pass
###############################################################################
class Ms3dVertex:
""" Ms3dVertex """
"""
__slots__ was taking out,
to be able to inject additional attributes during runtime
__slots__ = (
'flags',
'bone_id',
'reference_count',
'_vertex',
'_vertex_ex_object', # Ms3dVertexEx
)
"""
def __init__(
self,
default_flags=Ms3dSpec.DEFAULT_FLAGS,
default_vertex=(0.0, 0.0, 0.0),
default_bone_id=Ms3dSpec.DEFAULT_VERTEX_BONE_ID,
default_reference_count=0,
default_vertex_ex_object=None, # Ms3dVertexEx
):
self.flags = default_flags
self._vertex = default_vertex
self.bone_id = default_bone_id
self.reference_count = default_reference_count
if default_vertex_ex_object is None:
default_vertex_ex_object = Ms3dVertexEx2()
# Ms3dSpec.DEFAULT_MODEL_SUB_VERSION_VERTEX_EXTRA = 2
self._vertex_ex_object = default_vertex_ex_object
# Ms3dVertexEx
def __repr__(self):
return "\n<flags={}, vertex=({:.{p}f}, {:.{p}f}, {:.{p}f}), bone_id={},"\
" reference_count={}>".format(
self.flags,
self._vertex[0],
self._vertex[1],
self._vertex[2],
self.bone_id,
self.reference_count,
p=Ms3dIo.PRECISION
)
def __hash__(self):
return (hash(self.vertex)
#^ hash(self.flags)
#^ hash(self.bone_id)
#^ hash(self.reference_count)
)
def __eq__(self, other):
return ((self.vertex == other.vertex)
#and (self.flags == other.flags)
#and (self.bone_id == other.bone_id)
#and (self.reference_count == other.reference_count)
)
@property
def vertex(self):
return self._vertex
@property
def vertex_ex_object(self):
return self._vertex_ex_object
def read(self, raw_io):
self.flags = Ms3dIo.read_byte(raw_io)
self._vertex = Ms3dIo.read_array(raw_io, Ms3dIo.read_float, 3)
self.bone_id = Ms3dIo.read_sbyte(raw_io)
self.reference_count = Ms3dIo.read_byte(raw_io)
return self
def write(self, raw_io):
Ms3dIo.write_byte(raw_io, self.flags)
Ms3dIo.write_array(raw_io, Ms3dIo.write_float, 3, self.vertex)
Ms3dIo.write_sbyte(raw_io, self.bone_id)
Ms3dIo.write_byte(raw_io, self.reference_count)
###############################################################################
class Ms3dTriangle:
""" Ms3dTriangle """
"""
__slots__ was taking out,
to be able to inject additional attributes during runtime
__slots__ = (
'flags',
'smoothing_group',
'group_index',
'_vertex_indices',
'_vertex_normals',
'_s',
'_t',
)
"""
def __init__(
self,
default_flags=Ms3dSpec.DEFAULT_FLAGS,
default_vertex_indices=(0, 0, 0),
default_vertex_normals=(
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0),
(0.0, 0.0, 0.0)),
default_s=(0.0, 0.0, 0.0),
default_t=(0.0, 0.0, 0.0),
default_smoothing_group=Ms3dSpec.DEFAULT_TRIANGLE_SMOOTHING_GROUP,
default_group_index=Ms3dSpec.DEFAULT_TRIANGLE_GROUP
):
self.flags = default_flags
self._vertex_indices = default_vertex_indices
self._vertex_normals = default_vertex_normals
self._s = default_s
self._t = default_t
self.smoothing_group = default_smoothing_group
self.group_index = default_group_index
def __repr__(self):
return "\n<flags={}, vertex_indices={}, vertex_normals=(({:.{p}f}, "\
"{:.{p}f}, {:.{p}f}), ({:.{p}f}, {:.{p}f}, {:.{p}f}), ({:.{p}f}, "\
"{:.{p}f}, {:.{p}f})), s=({:.{p}f}, {:.{p}f}, {:.{p}f}), "\
"t=({:.{p}f}, {:.{p}f}, {:.{p}f}), smoothing_group={}, "\
"group_index={}>".format(
self.flags,
self.vertex_indices,
self.vertex_normals[0][0],
self.vertex_normals[0][1],
self.vertex_normals[0][2],
self.vertex_normals[1][0],
self.vertex_normals[1][1],
self.vertex_normals[1][2],
self.vertex_normals[2][0],
self.vertex_normals[2][1],
self.vertex_normals[2][2],
self.s[0],
self.s[1],
self.s[2],
self.t[0],
self.t[1],
self.t[2],
self.smoothing_group,
self.group_index,
p=Ms3dIo.PRECISION
)
@property
def vertex_indices(self):
return self._vertex_indices
@property
def vertex_normals(self):
return self._vertex_normals
@property
def s(self):
return self._s
@property
def t(self):
return self._t
def read(self, raw_io):
self.flags = Ms3dIo.read_word(raw_io)
self._vertex_indices = Ms3dIo.read_array(raw_io, Ms3dIo.read_word, 3)
self._vertex_normals = Ms3dIo.read_array2(raw_io, Ms3dIo.read_float, 3, 3)
self._s = Ms3dIo.read_array(raw_io, Ms3dIo.read_float, 3)
self._t = Ms3dIo.read_array(raw_io, Ms3dIo.read_float, 3)
self.smoothing_group = Ms3dIo.read_byte(raw_io)
self.group_index = Ms3dIo.read_byte(raw_io)
return self
def write(self, raw_io):
Ms3dIo.write_word(raw_io, self.flags)
Ms3dIo.write_array(raw_io, Ms3dIo.write_word, 3, self.vertex_indices)
Ms3dIo.write_array2(raw_io, Ms3dIo.write_float, 3, 3, self.vertex_normals)
Ms3dIo.write_array(raw_io, Ms3dIo.write_float, 3, self.s)
Ms3dIo.write_array(raw_io, Ms3dIo.write_float, 3, self.t)
Ms3dIo.write_byte(raw_io, self.smoothing_group)
Ms3dIo.write_byte(raw_io, self.group_index)
###############################################################################
class Ms3dGroup:
""" Ms3dGroup """
"""
__slots__ was taking out,
to be able to inject additional attributes during runtime
__slots__ = (
'flags',
'name',
'material_index',
'_triangle_indices',
'_comment_object', # Ms3dComment
)
"""
def __init__(
self,
default_flags=Ms3dSpec.DEFAULT_FLAGS,
default_name="",
default_triangle_indices=None,
default_material_index=Ms3dSpec.DEFAULT_GROUP_MATERIAL_INDEX,
default_comment_object=None, # Ms3dComment
):
if (default_name is None):
default_name = ""
if (default_triangle_indices is None):
default_triangle_indices = []
self.flags = default_flags
self.name = default_name
self._triangle_indices = default_triangle_indices
self.material_index = default_material_index
if default_comment_object is None:
default_comment_object = Ms3dCommentEx()
self._comment_object = default_comment_object # Ms3dComment
def __repr__(self):
return "\n<flags={}, name='{}', number_triangles={},"\
" triangle_indices={}, material_index={}>".format(
self.flags,
self.name,
self.number_triangles,
self.triangle_indices,
self.material_index
)
@property
def number_triangles(self):
if self.triangle_indices is None:
return 0
return len(self.triangle_indices)
@property
def triangle_indices(self):
return self._triangle_indices
@property
def comment_object(self):
return self._comment_object
def read(self, raw_io):
self.flags = Ms3dIo.read_byte(raw_io)
self.name = Ms3dIo.read_string(raw_io, Ms3dIo.LENGTH_NAME)
_number_triangles = Ms3dIo.read_word(raw_io)
self._triangle_indices = Ms3dIo.read_array(
raw_io, Ms3dIo.read_word, _number_triangles)
self.material_index = Ms3dIo.read_sbyte(raw_io)
return self
def write(self, raw_io):
Ms3dIo.write_byte(raw_io, self.flags)
Ms3dIo.write_string(raw_io, Ms3dIo.LENGTH_NAME, self.name)
Ms3dIo.write_word(raw_io, self.number_triangles)
Ms3dIo.write_array(
raw_io, Ms3dIo.write_word, self.number_triangles,
self.triangle_indices)
Ms3dIo.write_sbyte(raw_io, self.material_index)
###############################################################################
class Ms3dMaterial:
""" Ms3dMaterial """
"""
__slots__ was taking out,
to be able to inject additional attributes during runtime
__slots__ = (
'name',
'shininess',
'transparency',
'mode',
'texture',
'alphamap',
'_ambient',
'_diffuse',
'_specular',
'_emissive',
'_comment_object', # Ms3dComment
)
"""
def __init__(
self,
default_name="",
default_ambient=list(Ms3dSpec.DEFAULT_MATERIAL_AMBIENT),
default_diffuse=list(Ms3dSpec.DEFAULT_MATERIAL_DIFFUSE),
default_specular=list(Ms3dSpec.DEFAULT_MATERIAL_SPECULAR),
default_emissive=list(Ms3dSpec.DEFAULT_MATERIAL_EMISSIVE),
default_shininess=Ms3dSpec.DEFAULT_MATERIAL_SHININESS,
default_transparency=0.0,
default_mode=Ms3dSpec.DEFAULT_MATERIAL_MODE,
default_texture="",
default_alphamap="",
default_comment_object=None, # Ms3dComment
):
if (default_name is None):
default_name = ""
if (default_texture is None):
default_texture = ""
if (default_alphamap is None):
default_alphamap = ""
self.name = default_name
self._ambient = default_ambient
self._diffuse = default_diffuse
self._specular = default_specular
self._emissive = default_emissive
self.shininess = default_shininess
self.transparency = default_transparency
self.mode = default_mode
self.texture = default_texture
self.alphamap = default_alphamap
if default_comment_object is None:
default_comment_object = Ms3dCommentEx()
self._comment_object = default_comment_object # Ms3dComment
def __repr__(self):
return "\n<name='{}', ambient=({:.{p}f}, {:.{p}f}, {:.{p}f}, {:.{p}f}), "\
"diffuse=({:.{p}f}, {:.{p}f}, {:.{p}f}, {:.{p}f}), specular=("\
"{:.{p}f}, {:.{p}f}, {:.{p}f}, {:.{p}f}), emissive=({:.{p}f}, "\
"{:.{p}f}, {:.{p}f}, {:.{p}f}), shininess={:.{p}f}, transparency="\
"{:.{p}f}, mode={}, texture='{}', alphamap='{}'>".format(
self.name,
self.ambient[0],
self.ambient[1],
self.ambient[2],
self.ambient[3],
self.diffuse[0],
self.diffuse[1],
self.diffuse[2],
self.diffuse[3],
self.specular[0],
self.specular[1],
self.specular[2],
self.specular[3],
self.emissive[0],
self.emissive[1],
self.emissive[2],
self.emissive[3],
self.shininess,
self.transparency,
self.mode,
self.texture,
self.alphamap,
p=Ms3dIo.PRECISION
)
def __hash__(self):
return (hash(self.name)
^ hash(self.ambient)
^ hash(self.diffuse)
^ hash(self.specular)
^ hash(self.emissive)
^ hash(self.shininess)
^ hash(self.transparency)
^ hash(self.mode)
^ hash(self.texture)
^ hash(self.alphamap)
)
def __eq__(self, other):
return ((self.name == other.name)
and (self.ambient == other.ambient)
and (self.diffuse == other.diffuse)
and (self.specular == other.specular)
and (self.emissive == other.emissive)
and (self.shininess == other.shininess)
and (self.transparency == other.transparency)
and (self.mode == other.mode)
#and (self.texture == other.texture)
#and (self.alphamap == other.alphamap)
)
@property
def ambient(self):
return self._ambient
@property
def diffuse(self):
return self._diffuse
@property
def specular(self):
return self._specular
@property
def emissive(self):
return self._emissive
@property
def comment_object(self):
return self._comment_object
def read(self, raw_io):
self.name = Ms3dIo.read_string(raw_io, Ms3dIo.LENGTH_NAME)
self._ambient = Ms3dIo.read_array(raw_io, Ms3dIo.read_float, 4)
self._diffuse = Ms3dIo.read_array(raw_io, Ms3dIo.read_float, 4)
self._specular = Ms3dIo.read_array(raw_io, Ms3dIo.read_float, 4)
self._emissive = Ms3dIo.read_array(raw_io, Ms3dIo.read_float, 4)
self.shininess = Ms3dIo.read_float(raw_io)
self.transparency = Ms3dIo.read_float(raw_io)
self.mode = Ms3dIo.read_byte(raw_io)
self.texture = Ms3dIo.read_string(raw_io, Ms3dIo.LENGTH_FILENAME)
self.alphamap = Ms3dIo.read_string(raw_io, Ms3dIo.LENGTH_FILENAME)
return self
def write(self, raw_io):
Ms3dIo.write_string(raw_io, Ms3dIo.LENGTH_NAME, self.name)
Ms3dIo.write_array(raw_io, Ms3dIo.write_float, 4, self.ambient)
Ms3dIo.write_array(raw_io, Ms3dIo.write_float, 4, self.diffuse)
Ms3dIo.write_array(raw_io, Ms3dIo.write_float, 4, self.specular)
Ms3dIo.write_array(raw_io, Ms3dIo.write_float, 4, self.emissive)
Ms3dIo.write_float(raw_io, self.shininess)
Ms3dIo.write_float(raw_io, self.transparency)
Ms3dIo.write_byte(raw_io, self.mode)
Ms3dIo.write_string(raw_io, Ms3dIo.LENGTH_FILENAME, self.texture)
Ms3dIo.write_string(raw_io, Ms3dIo.LENGTH_FILENAME, self.alphamap)
###############################################################################
class Ms3dRotationKeyframe:
""" Ms3dRotationKeyframe """
__slots__ = (
'time',
'_rotation',
)
def __init__(
self,
default_time=0.0,
default_rotation=(0.0, 0.0, 0.0)
):
self.time = default_time
self._rotation = default_rotation
def __repr__(self):
return "\n<time={:.{p}f}, rotation=({:.{p}f}, {:.{p}f}, {:.{p}f})>".format(
self.time,
self.rotation[0],
self.rotation[1],
self.rotation[2],
p=Ms3dIo.PRECISION
)
@property
def rotation(self):
return self._rotation
def read(self, raw_io):
self.time = Ms3dIo.read_float(raw_io)
self._rotation = Ms3dIo.read_array(raw_io, Ms3dIo.read_float, 3)
return self
def write(self, raw_io):
Ms3dIo.write_float(raw_io, self.time)
Ms3dIo.write_array(raw_io, Ms3dIo.write_float, 3, self.rotation)
###############################################################################
class Ms3dTranslationKeyframe:
""" Ms3dTranslationKeyframe """
__slots__ = (
'time',
'_position',
)
def __init__(
self,
default_time=0.0,
default_position=(0.0, 0.0, 0.0)
):
self.time = default_time
self._position = default_position
def __repr__(self):
return "\n<time={:.{p}f}, position=({:.{p}f}, {:.{p}f}, {:.{p}f})>".format(
self.time,
self.position[0],
self.position[1],
self.position[2],
p=Ms3dIo.PRECISION
)
@property
def position(self):
return self._position
def read(self, raw_io):
self.time = Ms3dIo.read_float(raw_io)
self._position = Ms3dIo.read_array(raw_io, Ms3dIo.read_float, 3)
return self
def write(self, raw_io):
Ms3dIo.write_float(raw_io, self.time)
Ms3dIo.write_array(raw_io, Ms3dIo.write_float, 3, self.position)
###############################################################################
class Ms3dJoint:
""" Ms3dJoint """
"""
__slots__ was taking out,
to be able to inject additional attributes during runtime
__slots__ = (
'flags',
'name',
'parent_name',
'_rotation',
'_position',
'_rotation_keyframes',
'_translation_keyframes',
'_joint_ex_object', # Ms3dJointEx
'_comment_object', # Ms3dComment
)
"""
def __init__(
self,
default_flags=Ms3dSpec.DEFAULT_FLAGS,
default_name="",
default_parent_name="",
default_rotation=(0.0, 0.0, 0.0),
default_position=(0.0, 0.0, 0.0),
default_rotation_keyframes=None,
default_translation_keyframes=None,
default_joint_ex_object=None, # Ms3dJointEx
default_comment_object=None, # Ms3dComment
):
if (default_name is None):
default_name = ""
if (default_parent_name is None):
default_parent_name = ""
if (default_rotation_keyframes is None):
default_rotation_keyframes = [] #Ms3dRotationKeyframe()
if (default_translation_keyframes is None):
default_translation_keyframes = [] #Ms3dTranslationKeyframe()
self.flags = default_flags
self.name = default_name
self.parent_name = default_parent_name
self._rotation = default_rotation
self._position = default_position
self._rotation_keyframes = default_rotation_keyframes
self._translation_keyframes = default_translation_keyframes
if default_comment_object is None:
default_comment_object = Ms3dCommentEx()
self._comment_object = default_comment_object # Ms3dComment
if default_joint_ex_object is None:
default_joint_ex_object = Ms3dJointEx()
self._joint_ex_object = default_joint_ex_object # Ms3dJointEx
def __repr__(self):
return "\n<flags={}, name='{}', parent_name='{}', rotation=({:.{p}f}, "\
"{:.{p}f}, {:.{p}f}), position=({:.{p}f}, {:.{p}f}, {:.{p}f}), "\
"number_rotation_keyframes={}, number_translation_keyframes={},"\
" rotation_key_frames={}, translation_key_frames={}>".format(
self.flags,
self.name,
self.parent_name,
self.rotation[0],
self.rotation[1],
self.rotation[2],
self.position[0],
self.position[1],
self.position[2],
self.number_rotation_keyframes,
self.number_translation_keyframes,
self.rotation_key_frames,
self.translation_key_frames,
p=Ms3dIo.PRECISION
)
@property
def rotation(self):
return self._rotation
@property
def position(self):
return self._position
@property
def number_rotation_keyframes(self):
if self.rotation_key_frames is None:
return 0
return len(self.rotation_key_frames)
@property
def number_translation_keyframes(self):
if self.translation_key_frames is None:
return 0
return len(self.translation_key_frames)
@property
def rotation_key_frames(self):
return self._rotation_keyframes
@property
def translation_key_frames(self):
return self._translation_keyframes
@property
def joint_ex_object(self):
return self._joint_ex_object
@property
def comment_object(self):
return self._comment_object
def read(self, raw_io):
self.flags = Ms3dIo.read_byte(raw_io)
self.name = Ms3dIo.read_string(raw_io, Ms3dIo.LENGTH_NAME)
self.parent_name = Ms3dIo.read_string(raw_io, Ms3dIo.LENGTH_NAME)
self._rotation = Ms3dIo.read_array(raw_io, Ms3dIo.read_float, 3)
self._position = Ms3dIo.read_array(raw_io, Ms3dIo.read_float, 3)
_number_rotation_keyframes = Ms3dIo.read_word(raw_io)
_number_translation_keyframes = Ms3dIo.read_word(raw_io)
self._rotation_keyframes = []
for i in range(_number_rotation_keyframes):
self.rotation_key_frames.append(Ms3dRotationKeyframe().read(raw_io))
self._translation_keyframes = []
for i in range(_number_translation_keyframes):
self.translation_key_frames.append(
Ms3dTranslationKeyframe().read(raw_io))
return self
def write(self, raw_io):
Ms3dIo.write_byte(raw_io, self.flags)
Ms3dIo.write_string(raw_io, Ms3dIo.LENGTH_NAME, self.name)
Ms3dIo.write_string(raw_io, Ms3dIo.LENGTH_NAME, self.parent_name)
Ms3dIo.write_array(raw_io, Ms3dIo.write_float, 3, self.rotation)
Ms3dIo.write_array(raw_io, Ms3dIo.write_float, 3, self.position)
Ms3dIo.write_word(raw_io, self.number_rotation_keyframes)
Ms3dIo.write_word(raw_io, self.number_translation_keyframes)
for i in range(self.number_rotation_keyframes):
self.rotation_key_frames[i].write(raw_io)
for i in range(self.number_translation_keyframes):
self.translation_key_frames[i].write(raw_io)
###############################################################################
class Ms3dCommentEx:
""" Ms3dCommentEx """
__slots__ = (
'index',
'comment',
)
def __init__(
self,
default_index=0,
default_comment=""
):
if (default_comment is None):
default_comment = ""
self.index = default_index
self.comment = default_comment
def __repr__(self):
return "\n<index={}, comment_length={}, comment='{}'>".format(
self.index,
self.comment_length,
self.comment
)
@property
def comment_length(self):
if self.comment is None:
return 0
return len(self.comment)
def read(self, raw_io):
self.index = Ms3dIo.read_dword(raw_io)
_comment_length = Ms3dIo.read_dword(raw_io)
self.comment = Ms3dIo.read_string(raw_io, _comment_length)
return self
def write(self, raw_io):
Ms3dIo.write_dword(raw_io, self.index)
Ms3dIo.write_dword(raw_io, self.comment_length)
Ms3dIo.write_string(raw_io, self.comment_length, self.comment)
###############################################################################
class Ms3dComment:
""" Ms3dComment """
__slots__ = (
'comment',
)
def __init__(
self,
default_comment=""
):
if (default_comment is None):
default_comment = ""
self.comment = default_comment
def __repr__(self):
return "\n<comment_length={}, comment='{}'>".format(
self.comment_length,
self.comment
)
@property
def comment_length(self):
if self.comment is None:
return 0
return len(self.comment)
def read(self, raw_io):
_comment_length = Ms3dIo.read_dword(raw_io)
self.comment = Ms3dIo.read_string(raw_io, _comment_length)
return self
def write(self, raw_io):
Ms3dIo.write_dword(raw_io, self.comment_length)
Ms3dIo.write_string(raw_io, self.comment_length, self.comment)
###############################################################################
class Ms3dVertexEx1:
""" Ms3dVertexEx1 """
__slots__ = (
'_bone_ids',
'_weights',
)
def __init__(
self,
default_bone_ids=(
Ms3dSpec.DEFAULT_VERTEX_BONE_ID,
Ms3dSpec.DEFAULT_VERTEX_BONE_ID,
Ms3dSpec.DEFAULT_VERTEX_BONE_ID),
default_weights=(100, 0, 0)
):
self._bone_ids = default_bone_ids
self._weights = default_weights
def __repr__(self):
return "\n<bone_ids={}, weights={}>".format(
self.bone_ids,
self.weights
)
@property
def bone_ids(self):
return self._bone_ids
@property
def weights(self):
return self._weights
@property
def weight_bone_id(self):
if self._weights[0] or self._weights[1] or self._weights[2]:
return self._weights
return 100
@property
def weight_bone_id0(self):
if self._weights[0] or self._weights[1] or self._weights[2]:
return self._weights[0]
return 0
@property
def weight_bone_id1(self):
if self._weights[0] or self._weights[1] or self._weights[2]:
return self._weights[1]
return 0
@property
def weight_bone_id2(self):
if self._weights[0] or self._weights[1] or self._weights[2]:
return 100 - (self._weights[0] + self._weights[1] \
+ self._weights[2])
return 0
def read(self, raw_io):
self._bone_ids = Ms3dIo.read_array(raw_io, Ms3dIo.read_sbyte, 3)
self._weights = Ms3dIo.read_array(raw_io, Ms3dIo.read_byte, 3)
return self
def write(self, raw_io):
Ms3dIo.write_array(raw_io, Ms3dIo.write_sbyte, 3, self.bone_ids)
Ms3dIo.write_array(raw_io, Ms3dIo.write_byte, 3, self.weights)
###############################################################################
class Ms3dVertexEx2:
""" Ms3dVertexEx2 """
__slots__ = (
'extra',
'_bone_ids',
'_weights',
)
def __init__(
self,
default_bone_ids=(
Ms3dSpec.DEFAULT_VERTEX_BONE_ID,
Ms3dSpec.DEFAULT_VERTEX_BONE_ID,
Ms3dSpec.DEFAULT_VERTEX_BONE_ID),
default_weights=(100, 0, 0),
default_extra=0
):
self._bone_ids = default_bone_ids
self._weights = default_weights
self.extra = default_extra
def __repr__(self):
return "\n<bone_ids={}, weights={}, extra={}>".format(
self.bone_ids,
self.weights,
self.extra
)
@property
def bone_ids(self):
return self._bone_ids
@property
def weights(self):
return self._weights
@property
def weight_bone_id(self):
if self._weights[0] or self._weights[1] or self._weights[2]:
return self._weights
return 100
@property
def weight_bone_id0(self):
if self._weights[0] or self._weights[1] or self._weights[2]:
return self._weights[0]
return 0
@property
def weight_bone_id1(self):
if self._weights[0] or self._weights[1] or self._weights[2]:
return self._weights[1]
return 0
@property
def weight_bone_id2(self):
if self._weights[0] or self._weights[1] or self._weights[2]:
return 100 - (self._weights[0] + self._weights[1] \
+ self._weights[2])
return 0
def read(self, raw_io):
self._bone_ids = Ms3dIo.read_array(raw_io, Ms3dIo.read_sbyte, 3)
self._weights = Ms3dIo.read_array(raw_io, Ms3dIo.read_byte, 3)
self.extra = Ms3dIo.read_dword(raw_io)
return self
def write(self, raw_io):
Ms3dIo.write_array(raw_io, Ms3dIo.write_sbyte, 3, self.bone_ids)
Ms3dIo.write_array(raw_io, Ms3dIo.write_byte, 3, self.weights)
Ms3dIo.write_dword(raw_io, self.extra)
###############################################################################
class Ms3dVertexEx3:
""" Ms3dVertexEx3 """
#char bone_ids[3]; // index of joint or -1, if -1, then that weight is
# ignored, since subVersion 1
#byte weights[3]; // vertex weight ranging from 0 - 100, last weight is
# computed by 1.0 - sum(all weights), since subVersion 1
#// weight[0] is the weight for bone_id in Ms3dVertex
#// weight[1] is the weight for bone_ids[0]
#// weight[2] is the weight for bone_ids[1]
#// 1.0f - weight[0] - weight[1] - weight[2] is the weight for bone_ids[2]
#unsigned int extra; // vertex extra, which can be used as color or
# anything else, since subVersion 2
__slots__ = (
'extra',
'_bone_ids',
'_weights',
)
def __init__(
self,
default_bone_ids=(
Ms3dSpec.DEFAULT_VERTEX_BONE_ID,
Ms3dSpec.DEFAULT_VERTEX_BONE_ID,
Ms3dSpec.DEFAULT_VERTEX_BONE_ID),
default_weights=(100, 0, 0),
default_extra=0
):
self._bone_ids = default_bone_ids
self._weights = default_weights
self.extra = default_extra
def __repr__(self):
return "\n<bone_ids={}, weights={}, extra={}>".format(
self.bone_ids,
self.weights,
self.extra
)
@property
def bone_ids(self):
return self._bone_ids
@property
def weights(self):
return self._weights
@property
def weight_bone_id(self):
if self._weights[0] or self._weights[1] or self._weights[2]:
return self._weights
return 100
@property
def weight_bone_id0(self):
if self._weights[0] or self._weights[1] or self._weights[2]:
return self._weights[0]
return 0
@property
def weight_bone_id1(self):
if self._weights[0] or self._weights[1] or self._weights[2]:
return self._weights[1]
return 0
@property
def weight_bone_id2(self):
if self._weights[0] or self._weights[1] or self._weights[2]:
return 100 - (self._weights[0] + self._weights[1] \
+ self._weights[2])
return 0
def read(self, raw_io):
self._bone_ids = Ms3dIo.read_array(raw_io, Ms3dIo.read_sbyte, 3)
self._weights = Ms3dIo.read_array(raw_io, Ms3dIo.read_byte, 3)
self.extra = Ms3dIo.read_dword(raw_io)
return self
def write(self, raw_io):
Ms3dIo.write_array(raw_io, Ms3dIo.write_sbyte, 3, self.bone_ids)
Ms3dIo.write_array(raw_io, Ms3dIo.write_byte, 3, self.weights)
Ms3dIo.write_dword(raw_io, self.extra)
###############################################################################
class Ms3dJointEx:
""" Ms3dJointEx """
__slots__ = (
'_color',
)
def __init__(
self,
default_color=Ms3dSpec.DEFAULT_JOINT_COLOR
):
self._color = default_color
def __repr__(self):
return "\n<color=({:.{p}f}, {:.{p}f}, {:.{p}f})>".format(
self.color[0],
self.color[1],
self.color[2],
p=Ms3dIo.PRECISION
)
@property
def color(self):
return self._color
def read(self, raw_io):
self._color = Ms3dIo.read_array(raw_io, Ms3dIo.read_float, 3)
return self
def write(self, raw_io):
Ms3dIo.write_array(raw_io, Ms3dIo.write_float, 3, self.color)
###############################################################################
class Ms3dModelEx:
""" Ms3dModelEx """
__slots__ = (
'joint_size',
'transparency_mode',
'alpha_ref',
)
def __init__(
self,
default_joint_size=Ms3dSpec.DEFAULT_MODEL_JOINT_SIZE,
default_transparency_mode\
=Ms3dSpec.DEFAULT_MODEL_TRANSPARENCY_MODE,
default_alpha_ref=0.0
):
self.joint_size = default_joint_size
self.transparency_mode = default_transparency_mode
self.alpha_ref = default_alpha_ref
def __repr__(self):
return "\n<joint_size={:.{p}f}, transparency_mode={}, alpha_ref={:.{p}f}>".format(
self.joint_size,
self.transparency_mode,
self.alpha_ref,
p=Ms3dIo.PRECISION
)
def read(self, raw_io):
self.joint_size = Ms3dIo.read_float(raw_io)
self.transparency_mode = Ms3dIo.read_dword(raw_io)
self.alpha_ref = Ms3dIo.read_float(raw_io)
return self
def write(self, raw_io):
Ms3dIo.write_float(raw_io, self.joint_size)
Ms3dIo.write_dword(raw_io, self.transparency_mode)
Ms3dIo.write_float(raw_io, self.alpha_ref)
###############################################################################
#
# file format
#
###############################################################################
class Ms3dModel:
""" Ms3dModel """
__slot__ = (
'header',
'animation_fps',
'current_time',
'number_total_frames',
'sub_version_comments',
'sub_version_vertex_extra',
'sub_version_joint_extra',
'sub_version_model_extra',
'name',
'_vertices',
'_triangles',
'_groups',
'_materials',
'_joints',
'_has_model_comment',
'_comment_object', # Ms3dComment
'_model_ex_object', # Ms3dModelEx
)
def __init__(
self,
default_name=""
):
if (default_name is None):
default_name = ""
self.name = default_name
self.animation_fps = Ms3dSpec.DEFAULT_MODEL_ANIMATION_FPS
self.current_time = 0.0
self.number_total_frames = 0
self.sub_version_comments \
= Ms3dSpec.DEFAULT_MODEL_SUB_VERSION_COMMENTS
self.sub_version_vertex_extra \
= Ms3dSpec.DEFAULT_MODEL_SUB_VERSION_VERTEX_EXTRA
self.sub_version_joint_extra \
= Ms3dSpec.DEFAULT_MODEL_SUB_VERSION_JOINT_EXTRA
self.sub_version_model_extra \
= Ms3dSpec.DEFAULT_MODEL_SUB_VERSION_MODEL_EXTRA
self._vertices = [] #Ms3dVertex()
self._triangles = [] #Ms3dTriangle()
self._groups = [] #Ms3dGroup()
self._materials = [] #Ms3dMaterial()
self._joints = [] #Ms3dJoint()
self.header = Ms3dHeader()
self._model_ex_object = Ms3dModelEx()
self._comment_object = None #Ms3dComment()
@property
def number_vertices(self):
if self.vertices is None:
return 0
return len(self.vertices)
@property
def vertices(self):
return self._vertices
@property
def number_triangles(self):
if self.triangles is None:
return 0
return len(self.triangles)
@property
def triangles(self):
return self._triangles
@property
def number_groups(self):
if self.groups is None:
return 0
return len(self.groups)
@property
def groups(self):
return self._groups
@property
def number_materials(self):
if self.materials is None:
return 0
return len(self.materials)
@property
def materials(self):
return self._materials
@property
def number_joints(self):
if self.joints is None:
return 0
return len(self.joints)
@property
def joints(self):
return self._joints
@property
def number_group_comments(self):
if self.groups is None:
return 0
number = 0
for item in self.groups:
if item.comment_object is not None and item.comment_object.comment:
number += 1
return number
@property
def group_comments(self):
if self.groups is None:
return None
items = []
for item in self.groups:
if item.comment_object is not None and item.comment_object.comment:
items.append(item)
return items
@property
def number_material_comments(self):
if self.materials is None:
return 0
number = 0
for item in self.materials:
if item.comment_object is not None and item.comment_object.comment:
number += 1
return number
@property
def material_comments(self):
if self.materials is None:
return None
items = []
for item in self.materials:
if item.comment_object is not None and item.comment_object.comment:
items.append(item)
return items
@property
def number_joint_comments(self):
if self.joints is None:
return 0
number = 0
for item in self.joints:
if item.comment_object is not None and item.comment_object.comment:
number += 1
return number
@property
def joint_comments(self):
if self.joints is None:
return None
items = []
for item in self.joints:
if item.comment_object is not None and item.comment_object.comment:
items.append(item)
return items
@property
def has_model_comment(self):
if self.comment_object is not None and self.comment_object.comment:
return 1
return 0
@property
def comment_object(self):
return self._comment_object
@property
def vertex_ex(self):
if not self.sub_version_vertex_extra:
return None
return [item.vertex_ex_object for item in self.vertices]
@property
def joint_ex(self):
if not self.sub_version_joint_extra:
return None
return [item.joint_ex_object for item in self.joints]
@property
def model_ex_object(self):
if not self.sub_version_model_extra:
return None
return self._model_ex_object
def print_internal(self):
print()
print("##############################################################")
print("## the internal data of Ms3dModel object...")
print("##")
print("header={}".format(self.header))
print("number_vertices={}".format(self.number_vertices))
print("vertices=[", end="")
if self.vertices:
for obj in self.vertices:
print("{}".format(obj), end="")
print("]")
print("number_triangles={}".format(self.number_triangles))
print("triangles=[", end="")
if self.triangles:
for obj in self.triangles:
print("{}".format(obj), end="")
print("]")
print("number_groups={}".format(self.number_groups))
print("groups=[", end="")
if self.groups:
for obj in self.groups:
print("{}".format(obj), end="")
print("]")
print("number_materials={}".format(self.number_materials))
print("materials=[", end="")
if self.materials:
for obj in self.materials:
print("{}".format(obj), end="")
print("]")
print("animation_fps={}".format(self.animation_fps))
print("current_time={}".format(self.current_time))
print("number_total_frames={}".format(self.number_total_frames))
print("number_joints={}".format(self.number_joints))
print("joints=[", end="")
if self.joints:
for obj in self.joints:
print("{}".format(obj), end="")
print("]")
print("sub_version_comments={}".format(self.sub_version_comments))
print("number_group_comments={}".format(self.number_group_comments))
print("group_comments=[", end="")
if self.group_comments:
for obj in self.group_comments:
print("{}".format(obj.comment_object), end="")
print("]")
print("number_material_comments={}".format(
self.number_material_comments))
print("material_comments=[", end="")
if self.material_comments:
for obj in self.material_comments:
print("{}".format(obj.comment_object), end="")
print("]")
print("number_joint_comments={}".format(self.number_joint_comments))
print("joint_comments=[", end="")
if self.joint_comments:
for obj in self.joint_comments:
print("{}".format(obj.comment_object), end="")
print("]")
print("has_model_comment={}".format(self.has_model_comment))
print("model_comment={}".format(self.comment_object))
print("sub_version_vertex_extra={}".format(
self.sub_version_vertex_extra))
print("vertex_ex=[", end="")
if self.vertex_ex:
for obj in self.vertex_ex:
print("{}".format(obj), end="")
print("]")
print("sub_version_joint_extra={}".format(
self.sub_version_joint_extra))
print("joint_ex=[", end="")
if self.joint_ex:
for obj in self.joint_ex:
print("{}".format(obj), end="")
print("]")
print("sub_version_model_extra={}".format(
self.sub_version_model_extra))
print("model_ex={}".format(self.model_ex_object))
print("##")
print("## ...end")
print("##############################################################")
print()
def read(self, raw_io):
"""
opens, reads and pars MS3D file.
add content to blender scene
"""
debug_out = []
self.header.read(raw_io)
if (self.header != Ms3dHeader()):
debug_out.append("\nwarning, invalid file header\n")
raise Ms3dHeader.HeaderError
_number_vertices = Ms3dIo.read_word(raw_io)
if (_number_vertices > Ms3dSpec.MAX_VERTICES):
debug_out.append("\nwarning, invalid count: number_vertices: {}\n".format(
_number_vertices))
self._vertices = []
for i in range(_number_vertices):
self.vertices.append(Ms3dVertex().read(raw_io))
_number_triangles = Ms3dIo.read_word(raw_io)
if (_number_triangles > Ms3dSpec.MAX_TRIANGLES):
debug_out.append("\nwarning, invalid count: number_triangles: {}\n".format(
_number_triangles))
self._triangles = []
for i in range(_number_triangles):
self.triangles.append(Ms3dTriangle().read(raw_io))
_number_groups = Ms3dIo.read_word(raw_io)
if (_number_groups > Ms3dSpec.MAX_GROUPS):
debug_out.append("\nwarning, invalid count: number_groups: {}\n".format(
_number_groups))
self._groups = []
for i in range(_number_groups):
self.groups.append(Ms3dGroup().read(raw_io))
_number_materials = Ms3dIo.read_word(raw_io)
if (_number_materials > Ms3dSpec.MAX_MATERIALS):
debug_out.append("\nwarning, invalid count: number_materials: {}\n".format(
_number_materials))
self._materials = []
for i in range(_number_materials):
self.materials.append(Ms3dMaterial().read(raw_io))
self.animation_fps = Ms3dIo.read_float(raw_io)
self.current_time = Ms3dIo.read_float(raw_io)
self.number_total_frames = Ms3dIo.read_dword(raw_io)
_progress = set()
try:
# optional data
# doesn't matter if doesn't existing.
_number_joints = Ms3dIo.read_word(raw_io)
_progress.add('NUMBER_JOINTS')
if (_number_joints > Ms3dSpec.MAX_JOINTS):
debug_out.append("\nwarning, invalid count: number_joints: {}\n".format(
_number_joints))
self._joints = []
for i in range(_number_joints):
self.joints.append(Ms3dJoint().read(raw_io))
_progress.add('JOINTS')
self.sub_version_comments = Ms3dIo.read_dword(raw_io)
_progress.add('SUB_VERSION_COMMENTS')
_number_group_comments = Ms3dIo.read_dword(raw_io)
_progress.add('NUMBER_GROUP_COMMENTS')
if (_number_group_comments > Ms3dSpec.MAX_GROUPS):
debug_out.append("\nwarning, invalid count:"\
" number_group_comments: {}\n".format(
_number_group_comments))
if _number_group_comments > _number_groups:
debug_out.append("\nwarning, invalid count:"\
" number_group_comments: {}, number_groups: {}\n".format(
_number_group_comments, _number_groups))
for i in range(_number_group_comments):
item = Ms3dCommentEx().read(raw_io)
if item.index >= 0 and item.index < _number_groups:
self.groups[item.index]._comment_object = item
else:
debug_out.append("\nwarning, invalid index:"\
" group_index: {}, number_groups: {}\n".format(
item.index, _number_groups))
_progress.add('GROUP_COMMENTS')
_number_material_comments = Ms3dIo.read_dword(raw_io)
_progress.add('NUMBER_MATERIAL_COMMENTS')
if (_number_material_comments > Ms3dSpec.MAX_MATERIALS):
debug_out.append("\nwarning, invalid count:"\
" number_material_comments: {}\n".format(
_number_material_comments))
if _number_material_comments > _number_materials:
debug_out.append("\nwarning, invalid count:"\
" number_material_comments:"\
" {}, number_materials: {}\n".format(
_number_material_comments, _number_materials))
for i in range(_number_material_comments):
item = Ms3dCommentEx().read(raw_io)
if item.index >= 0 and item.index < _number_materials:
self.materials[item.index]._comment_object = item
else:
debug_out.append("\nwarning, invalid index:"\
" material_index: {}, number_materials:"\
" {}\n".format(item.index, _number_materials))
_progress.add('MATERIAL_COMMENTS')
_number_joint_comments = Ms3dIo.read_dword(raw_io)
_progress.add('NUMBER_JOINT_COMMENTS')
if (_number_joint_comments > Ms3dSpec.MAX_JOINTS):
debug_out.append("\nwarning, invalid count:"\
" number_joint_comments: {}\n".format(
_number_joint_comments))
if _number_joint_comments > _number_joints:
debug_out.append("\nwarning, invalid count:"\
" number_joint_comments: {}, number_joints: {}\n".format(
_number_joint_comments, _number_joints))
for i in range(_number_joint_comments):
item = Ms3dCommentEx().read(raw_io)
if item.index >= 0 and item.index < _number_joints:
self.joints[item.index]._comment_object = item
else:
debug_out.append("\nwarning, invalid index:"\
" joint_index: {}, number_joints: {}\n".format(
item.index, _number_joints))
_progress.add('JOINT_COMMENTS')
_has_model_comment = Ms3dIo.read_dword(raw_io)
_progress.add('HAS_MODEL_COMMENTS')
if (_has_model_comment != 0):
self._comment_object = Ms3dComment().read(raw_io)
else:
self._comment_object = None
_progress.add('MODEL_COMMENTS')
self.sub_version_vertex_extra = Ms3dIo.read_dword(raw_io)
_progress.add('SUB_VERSION_VERTEX_EXTRA')
if self.sub_version_vertex_extra > 0:
length = len(self.joints)
for i in range(_number_vertices):
if self.sub_version_vertex_extra == 1:
item = Ms3dVertexEx1()
elif self.sub_version_vertex_extra == 2:
item = Ms3dVertexEx2()
elif self.sub_version_vertex_extra == 3:
item = Ms3dVertexEx3()
else:
debug_out.append("\nwarning, invalid version:"\
" sub_version_vertex_extra: {}\n".format(
sub_version_vertex_extra))
continue
self.vertices[i]._vertex_ex_object = item.read(raw_io)
_progress.add('VERTEX_EXTRA')
self.sub_version_joint_extra = Ms3dIo.read_dword(raw_io)
_progress.add('SUB_VERSION_JOINT_EXTRA')
if self.sub_version_joint_extra > 0:
for i in range(_number_joints):
self.joints[i]._joint_ex_object = Ms3dJointEx().read(raw_io)
_progress.add('JOINT_EXTRA')
self.sub_version_model_extra = Ms3dIo.read_dword(raw_io)
_progress.add('SUB_VERSION_MODEL_EXTRA')
if self.sub_version_model_extra > 0:
self._model_ex_object.read(raw_io)
_progress.add('MODEL_EXTRA')
except EOFError:
# reached end of optional data.
debug_out.append("Ms3dModel.read - optional data read: {}\n".format(_progress))
pass
except Exception:
type, value, traceback = exc_info()
debug_out.append("Ms3dModel.read - exception in optional try block,"
" _progress={0}\n type: '{1}'\n value: '{2}'\n".format(
_progress, type, value, traceback))
else:
pass
# try best to continue far as possible
if not 'JOINTS' in _progress:
_number_joints = 0
self._joints = []
if not 'GROUP_COMMENTS' in _progress:
self.sub_version_comments = 0
_number_group_comments = 0
if not 'MATERIAL_COMMENTS' in _progress:
_number_material_comments = 0
if not 'JOINT_COMMENTS' in _progress:
_number_joint_comments = 0
if not 'MODEL_COMMENTS' in _progress:
_has_model_comment = 0
self._comment_object = None # Ms3dComment()
if not 'VERTEX_EXTRA' in _progress:
self.sub_version_vertex_extra = 0
if not 'JOINT_EXTRA' in _progress:
self.sub_version_joint_extra = 0
if not 'MODEL_EXTRA' in _progress:
self.sub_version_model_extra = 0
self._model_ex_object = Ms3dModelEx()
return "".join(debug_out)
def write(self, raw_io):
"""
add blender scene content to MS3D
creates, writes MS3D file.
"""
debug_out = []
self.header.write(raw_io)
Ms3dIo.write_word(raw_io, self.number_vertices)
for i in range(self.number_vertices):
self.vertices[i].write(raw_io)
Ms3dIo.write_word(raw_io, self.number_triangles)
for i in range(self.number_triangles):
self.triangles[i].write(raw_io)
Ms3dIo.write_word(raw_io, self.number_groups)
for i in range(self.number_groups):
self.groups[i].write(raw_io)
Ms3dIo.write_word(raw_io, self.number_materials)
for i in range(self.number_materials):
self.materials[i].write(raw_io)
Ms3dIo.write_float(raw_io, self.animation_fps)
Ms3dIo.write_float(raw_io, self.current_time)
Ms3dIo.write_dword(raw_io, self.number_total_frames)
try:
# optional part
# doesn't matter if it doesn't complete.
Ms3dIo.write_word(raw_io, self.number_joints)
for i in range(self.number_joints):
self.joints[i].write(raw_io)
Ms3dIo.write_dword(raw_io, self.sub_version_comments)
Ms3dIo.write_dword(raw_io, self.number_group_comments)
for i in range(self.number_group_comments):
self.group_comments[i].comment_object.write(raw_io)
Ms3dIo.write_dword(raw_io, self.number_material_comments)
for i in range(self.number_material_comments):
self.material_comments[i].comment_object.write(raw_io)
Ms3dIo.write_dword(raw_io, self.number_joint_comments)
for i in range(self.number_joint_comments):
self.joint_comments[i].comment_object.write(raw_io)
Ms3dIo.write_dword(raw_io, self.has_model_comment)
if (self.has_model_comment != 0):
self.comment_object.write(raw_io)
Ms3dIo.write_dword(raw_io, self.sub_version_vertex_extra)
if (self.sub_version_vertex_extra in {1, 2, 3}):
for i in range(self.number_vertices):
self.vertex_ex[i].write(raw_io)
Ms3dIo.write_dword(raw_io, self.sub_version_joint_extra)
for i in range(self.number_joints):
self.joint_ex[i].write(raw_io)
Ms3dIo.write_dword(raw_io, self.sub_version_model_extra)
self.model_ex_object.write(raw_io)
except Exception:
type, value, traceback = exc_info()
debug_out.append("Ms3dModel.write - exception in optional try block"
"\n type: '{0}'\n value: '{1}'\n".format(
type, value, traceback))
pass
else:
pass
return "".join(debug_out)
def is_valid(self):
valid = True
result = []
format1 = "\n number of {0}: {1}"
format2 = " limit exceeded! (limit is {0})"
result.append("MS3D statistics:")
result.append(format1.format("vertices ........",
self.number_vertices))
if (self.number_vertices > Ms3dSpec.MAX_VERTICES):
result.append(format2.format(Ms3dSpec.MAX_VERTICES))
valid &= False
result.append(format1.format("triangles .......",
self.number_triangles))
if (self.number_triangles > Ms3dSpec.MAX_TRIANGLES):
result.append(format2.format(Ms3dSpec.MAX_TRIANGLES))
valid &= False
result.append(format1.format("groups ..........",
self.number_groups))
if (self.number_groups > Ms3dSpec.MAX_GROUPS):
result.append(format2.format(Ms3dSpec.MAX_GROUPS))
valid &= False
result.append(format1.format("materials .......",
self.number_materials))
if (self.number_materials > Ms3dSpec.MAX_MATERIALS):
result.append(format2.format(Ms3dSpec.MAX_MATERIALS))
valid &= False
result.append(format1.format("joints ..........",
self.number_joints))
if (self.number_joints > Ms3dSpec.MAX_JOINTS):
result.append(format2.format(Ms3dSpec.MAX_JOINTS))
valid &= False
result.append(format1.format("model comments ..",
self.has_model_comment))
result.append(format1.format("group comments ..",
self.number_group_comments))
result.append(format1.format("material comments",
self.number_material_comments))
result.append(format1.format("joint comments ..",
self.number_joint_comments))
#if (not valid):
# result.append("\n\nthe data may be corrupted.")
return (valid, ("".join(result)))
###############################################################################
#234567890123456789012345678901234567890123456789012345678901234567890123456789
#--------1---------2---------3---------4---------5---------6---------7---------
# ##### END OF FILE #####
| StarcoderdataPython |
3340827 | import csv
import json
import re
import sys, os
import requests
from biorun import utils
from biorun.libs import placlib as plac
logger = utils.logger
# SRR numbers: SRR5260547
SRR = re.compile(r'(ERR|SRR|DRR)\d+')
# Bioproject numbers: PRJNA374918
PRJ = re.compile(r'PRJ([A-Z])+\d+')
# Genbank accessions: NC_045512
GBK = re.compile(r'(?P<letters>[a-zA-Z]+)(?P<under>_?)(?P<digits>\d+)(\.(?P<version>\d+))?')
# ENA API points
ENA_API = "https://www.ebi.ac.uk/ena/portal/api"
ENA_FIELDS = f"{ENA_API}/returnFields"
ENA_REPORT = f"{ENA_API}/filereport"
# The assembly summary file
ASSEMBLY_SUMMARY_URL = 'https://ftp.ncbi.nlm.nih.gov/genomes/ASSEMBLY_REPORTS/assembly_summary_genbank.txt'
ASSEMBLY_SUMMARY_PATH = "assembly_summary_genbank.txt"
ASSEMBLY_SUMMARY_PATH = utils.cache_path(ASSEMBLY_SUMMARY_PATH)
def match_srr(text):
"""
Pattern for SRR numbers.
"""
return bool(SRR.search(text))
# Documentation at https://www.ebi.ac.uk/ena/portal/api
def get_ena_fields(db='ena'):
"""
Returns all valid ENA fields for a database
"""
params = dict(dataPortal=db, result='read_run')
stream = get_request(url=ENA_FIELDS, params=params, sep="\t")
fields = [r['columnId'] for r in stream]
fields.sort()
return fields
def get_ncbi(text, db="protein", format="json"):
drops = "statistics properties oslt".split()
url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi'
params = dict(db=db, format=format, id=text)
stream = get_request(url, params=params, bulk=True)
logger.info(f"esummary ncbi db={db} for {text}")
data = json.loads(stream)
data = data.get("result", {})
collect = []
for key in data:
if key == 'uids':
continue
entry = data[key]
for drop in drops:
if drop in entry:
del entry[drop]
res = {}
for key in sorted(entry.keys()):
res[key] = entry[key]
collect.append(res)
if not collect:
collect = [dict(error="invalid genbank id", db=db, value=f"{text}")]
return collect, None
def get_srr(text, all=False, sep=None):
"""
Returns a list of SRR data.
"""
logger.info(f"searching ENA for {text}")
url = ENA_REPORT
if all:
fields = get_ena_fields()
else:
fields = [
'run_accession',
"sample_accession",
'first_public',
'country',
'sample_alias',
'fastq_bytes',
'read_count',
'library_name',
"library_strategy",
"library_source",
'library_layout',
'instrument_platform', 'instrument_model',
'study_title',
'fastq_ftp',
]
fields = ",".join(fields)
params = dict(
accession=text,
fields=fields,
result='read_run',
)
stream = get_request(url, params=params, sep=sep)
return stream, None
def match_bioproject(text):
"""
Pattern for project numbers.
"""
return bool(PRJ.search(text))
def match_ncbi_assembly(text):
"""
Pattern for NCBI assembly numbers
"""
pieces = text.split("_")
cond = pieces[0] == 'GCF' if len(pieces) > 1 else False
return cond
def parse_genbank(text):
"""
Attempts to parse text into a NCBI structure.
"""
m = GBK.search(text)
code = m.group("letters") if m else ''
digits = m.group("digits") if m else ''
refseq = m.group("under") if m else ''
version = m.group("version") if m else ''
return code, digits, refseq, version
#
# https://www.ncbi.nlm.nih.gov/genbank/acc_prefix/
#
# Lengths of digits and letters for GenBank numbers
VALID_NUC = {
(1, 5), (2, 5), (2, 6), (3, 8), (4, 8)
}
VALID_PROT = {
(3, 5), (3, 7)
}
def update_assembly_stats():
"""
Downloads the latest assembly summary file
"""
utils.download(url=ASSEMBLY_SUMMARY_URL, fname=ASSEMBLY_SUMMARY_PATH)
def search_assemblies(word):
headers = ["assembly_accession", "bioproject", "biosample", "wgs_master", "refseq_category",
"taxid", "species_taxid", "organism_name", "infraspecific_name", "isolate", "version_status",
"assembly_level", "release_type", "genome_rep", "seq_rel_date", "asm_name", "submitter",
"gbrs_paired_asm", "paired_asm_comp", "ftp_path", "excluded_from_refseq",
"relation_to_type_material" "asm_not_live_date",
]
if not os.path.isfile(ASSEMBLY_SUMMARY_PATH):
utils.error(f"Data not found: {ASSEMBLY_SUMMARY_PATH} ", stop=False)
utils.error("Run: bio --download ")
sys.exit()
patt = re.compile(word, flags=re.IGNORECASE)
stream = open(ASSEMBLY_SUMMARY_PATH, encoding="utf-8")
stream = filter(lambda x: not x.startswith("#"), stream)
stream = filter(lambda x: x.strip(), stream)
coll = []
for line in stream:
if patt.search(line):
elems = line.split("\t")
data = dict(zip(headers, elems))
coll.append(data)
return coll, None
def match_genbank_nucleotide(text):
"""
Returns true if text matches NCBI nucleotides.
"""
code, digits, refseq, version = parse_genbank(text)
if refseq:
cond = code in ["AC", "NC", "NG", "NT", "NW", "NZ", "NM", "XM", "XR", "NR"]
else:
num1, num2 = len(code), len(digits)
cond = (num1, num2) in VALID_NUC
return cond
def match_genbank_protein(text):
"""
Returns true if text matches NCBI protein sequences
"""
code, digits, refseq, version = parse_genbank(text)
if refseq:
cond = code in ["AP", "NP", "YP", "XP", "WP"]
else:
num1, num2 = len(code), len(digits)
cond = (num1, num2) in VALID_PROT
return cond
def match_mygene(text):
"""
Returns true if text matches NCBI protein sequences
"""
pieces = text.split(":")
cond = len(pieces) == 2
return cond
def dictreader(stream, sep=None):
"""
Function to wrap a stream into a DictReader.
"""
return csv.DictReader(stream, delimiter=sep)
def get_request(url, params={}, sep=None, bulk=False):
try:
# print (url, params, file=sys.stderr)
r = requests.get(url, params=params)
if not r.ok:
r.raise_for_status()
sys.exit()
if bulk:
return r.text
else:
stream = r.iter_lines(decode_unicode=True)
stream = dictreader(stream, sep=sep) if sep else stream
return stream
except Exception as exc:
utils.error(f"Error for {url}, {params}: {exc}")
def search_mygene(query, fields, species='', scopes='', limit=5):
import mygene
from biorun import taxon
logger.info(f"searching mygene for {query}")
client = mygene.MyGeneInfo()
data = client.query(query, fields=fields, scopes=scopes, species=species, size=limit)
total = data.get('total', 0)
# Get rid of data we don't need
hits = data.get('hits', [])
# Fill in taxonomy name to the database
names, graph = taxon.get_data(strict=False)
# Fill the taxonmy name, get rid of fields we don't want.
for hit in hits:
del hit['_id']
del hit['_score']
hit['taxname'] = names.get(hit.get('taxid'), [''])[0]
if len(hits) < total:
warn = f'# showing {len(hits)} out of {total} results.'
else:
warn = None
return hits, warn
def dispatch(word, all=False, fields='', limit=5, species='', scopes=''):
if match_srr(word) or match_bioproject(word):
values, warn = get_srr(word, all=all, sep="\t")
elif match_genbank_nucleotide(word):
values, warn = get_ncbi(word, db="nuccore")
elif match_genbank_protein(word):
values, warn = get_ncbi(word, db="protein")
elif match_mygene(word):
fields = ",".join(['symbol', 'name', 'taxid', fields])
values, warn = search_mygene(word, fields=fields, limit=limit, species=species, scopes=scopes)
else:
values, warn = search_assemblies(word)
return values, warn
@plac.flg('csv_', "produce comma separated output")
@plac.flg('tab', "produce tab separated output")
@plac.flg('all', "get all possible fields")
@plac.flg('header', "show headers", abbrev="H")
@plac.opt('limit', "download limit", abbrev='l')
@plac.opt('fields', "fields", abbrev='f')
@plac.opt('species', "species", abbrev='s')
@plac.opt('scopes', "scopes", abbrev='S')
@plac.pos('query', "query terms")
@plac.flg('update', "download the latest assebmly summary")
def run(all=False, csv_=False, tab=False, header=False, species='', scopes='symbol', update=False, limit=5,
fields='refseq', *words):
if update:
update_assembly_stats()
return
sep = None
sep = "," if csv_ else sep
sep = "\t" if tab else sep
collect = []
warns = []
for word in words:
values, warn = dispatch(word, all=all, limit=limit, fields=fields, species=species, scopes=scopes)
collect.extend(values)
warns.append(warn)
if sep:
fields = collect[0].keys()
wrt = csv.writer(sys.stdout, delimiter=sep)
#wrt.writerow(fields)
stream = [x.values() for x in collect]
keys = [x.keys() for x in collect]
if header:
wrt.writerow(keys[0])
wrt.writerows(stream)
else:
text = json.dumps(collect, indent=4)
print(text)
# Show collected warnings at the end where it is not so easy to miss.
warns = filter(None, warns)
for warn in warns:
print(warn, file=sys.stderr)
# SRR5260547
if __name__ == '__main__':
plac.call(run)
| StarcoderdataPython |
167052 | #Three character NHGIS codes to postal abbreviations
state_codes = {
'530':'WA',
'100':'DE',
'110':'DC',
'550':'WI',
'540':'WV',
'150':'HI',
'120':'FL',
'560':'WY',
'720':'PR',
'340':'NJ',
'350':'NM',
'480':'TX',
'220':'LA',
'370':'NC',
'380':'ND',
'310':'NE',
'470':'TN',
'360':'NY',
'420':'PA',
'020':'AK',
'320':'NV',
'330':'NH',
'510':'VA',
'080':'CO',
'060':'CA',
'010':'AL',
'050':'AR',
'500':'VT',
'170':'IL',
'130':'GA',
'180':'IN',
'190':'IA',
'250':'MA',
'040':'AZ',
'160':'ID',
'090':'CT',
'230':'ME',
'240':'MD',
'400':'OK',
'390':'OH',
'490':'UT',
'290':'MO',
'270':'MN',
'260':'MI',
'440':'RI',
'200':'KS',
'300':'MT',
'280':'MS',
'450':'SC',
'210':'KY',
'410':'OR',
'460':'SD',
'720':'PR'
}
| StarcoderdataPython |
1635945 | <reponame>dshea89/luminoth
import numpy as np
import tensorflow.compat.v1 as tf
from luminoth.utils.bbox_overlap import bbox_overlap_tf, bbox_overlap
class BBoxOverlapTest(tf.test.TestCase):
"""Tests for bbox_overlap
bbox_overlap has a TensorFlow and a Numpy implementation.
We test both at the same time by getting both values and making sure they
are both equal before doing any assertions.
"""
def tearDown(self):
tf.reset_default_graph()
def _get_iou(self, bbox1_val, bbox2_val):
"""Get IoU for two sets of bounding boxes.
It also checks that both implementations return the same before
returning.
Args:
bbox1_val: Array of shape (total_bbox1, 4).
bbox2_val: Array of shape (total_bbox2, 4).
Returns:
iou: Array of shape (total_bbox1, total_bbox2)
"""
bbox1 = tf.placeholder(tf.float32, (None, 4))
bbox2 = tf.placeholder(tf.float32, (None, 4))
iou = bbox_overlap_tf(bbox1, bbox2)
with self.test_session() as sess:
iou_val_tf = sess.run(iou, feed_dict={
bbox1: np.array(bbox1_val),
bbox2: np.array(bbox2_val),
})
iou_val_np = bbox_overlap(np.array(bbox1_val), np.array(bbox2_val))
self.assertAllClose(iou_val_np, iou_val_tf)
return iou_val_tf
def testNoOverlap(self):
# Single box test
iou = self._get_iou([[0, 0, 10, 10]], [[11, 11, 20, 20]])
self.assertAllEqual(iou, [[0.]])
# Multiple boxes.
iou = self._get_iou(
[[0, 0, 10, 10], [5, 5, 10, 10]],
[[11, 11, 20, 20], [15, 15, 20, 20]]
)
self.assertAllEqual(iou, [[0., 0.], [0., 0.]])
def testAllOverlap(self):
# Equal boxes
iou = self._get_iou([[0, 0, 10, 10]], [[0, 0, 10, 10]])
self.assertAllEqual(iou, [[1.]])
# Crossed equal boxes.
iou = self._get_iou(
[[0, 0, 10, 10], [11, 11, 20, 20]],
[[0, 0, 10, 10], [11, 11, 20, 20]]
)
# We should get an identity matrix.
self.assertAllEqual(iou, [[1., 0.], [0., 1.]])
def testInvalidBoxes(self):
# Zero area, bbox1 has x_min == x_max
iou = self._get_iou([[10, 0, 10, 10]], [[0, 0, 10, 10]])
# self.assertAllEqual(iou, [[0.]]) TODO: Fails
# Negative area, bbox1 has x_min > x_max (only by one)
iou = self._get_iou([[10, 0, 9, 10]], [[0, 0, 10, 10]])
self.assertAllEqual(iou, [[0.]])
# Negative area, bbox1 has x_min > x_max
iou = self._get_iou([[10, 0, 7, 10]], [[0, 0, 10, 10]])
self.assertAllEqual(iou, [[0.]])
# Negative area in both cases, both boxes equal but negative
iou = self._get_iou([[10, 0, 7, 10]], [[10, 0, 7, 10]])
self.assertAllEqual(iou, [[0.]])
if __name__ == '__main__':
tf.test.main()
| StarcoderdataPython |
3326550 | import os,sys,time
from unittest_find import unittest
import array
import numpy as np
import chroma.api as api
api.use_cuda()
#api.use_opencl()
from chroma.sim import Simulation
from chroma.event import Photons
import chroma.event as event
from chroma.geometry import Surface
from chroma.uboone.uboonedet import ubooneDet
from chroma.uboone.daq_uboone import GPUDaqUBooNE
try:
import ROOT as rt
from rootpy.tree import Tree, TreeModel, FloatCol, IntCol, FloatArrayCol
from rootpy.io import root_open
has_root = True
except:
has_root = False
raise ValueError("No ROOT")
# CHANNEL INFORMATION
GPUDaqUBooNE.NTDC = 10000
GPUDaqUBooNE.NS_PER_TDC = 1.0
NCHANNELS = 1000
if has_root:
class PhotonData( TreeModel ):
end_x = FloatCol()
end_y = FloatCol()
end_z = FloatCol()
reflect_diffuse = IntCol()
reflect_specular = IntCol()
bulk_scatter = IntCol()
bulk_absorb = IntCol()
surface_detect = IntCol()
surface_absorb = IntCol()
surface_reemit = IntCol()
def reset(self):
self.reflect_diffuse = 0
self.reflect_specular = 0
self.bulk_scatter = 0
self.bulk_absorb = 0
self.surface_detect = 0
self.surface_absorb = 0
self.surface_reemit = 0
class OpDet( TreeModel ):
eventid = IntCol()
id = IntCol()
NTDC = IntCol()
NS_PER_TDC = FloatCol()
adc = FloatArrayCol(GPUDaqUBooNE.NTDC)
q = FloatCol()
t = FloatCol()
class OpMap( TreeModel ):
opid = IntCol()
x = FloatCol()
y = FloatCol()
z = FloatCol()
lar1nd_wireplane = Surface( 'lar1nd_wireplane' )
lar1nd_wireplane.nplanes = 3.0
lar1nd_wireplane.wire_pitch = 0.3
lar1nd_wireplane.wire_diameter = 0.015
lar1nd_wireplane.transmissive = 1
lar1nd_wireplane.model = Surface.SURFACE_WIREPLANE
def add_wireplane_surface( solid ):
# function detector class will use to add a wireplane surface to the geometry
# LAr1ND has two drift regions, so we need two planes
# set surface for triangles on x=-2023.25 and x=2023.25 planes
for n,triangle in enumerate(solid.mesh.triangles):
#print [ solid.mesh.vertices[x] for x in triangle ] # for debug
nxplane = 0
for ivert in triangle:
if solid.mesh.vertices[ivert,0]==-2023.25 or solid.mesh.vertices[ivert,0]==2023.25:
nxplane += 1
# if the numbr of vertices have the correct x value, we say we have the right traingle
if nxplane==3:
print [ solid.mesh.vertices[x] for x in triangle ]
solid.surface[ n ] = lar1nd_wireplane
solid.unique_surfaces = np.unique( solid.surface )
class TestUbooneDetector(unittest.TestCase):
def setUp(self):
daefile = "dae/lar1nd_lightguides_nowires_chroma.dae" # without wires
#daefile = "dae/lar1nd_chroma.dae" # with wires
self.geo = ubooneDet( daefile, detector_volumes=["vollightguidedetector"],
wireplane_volumes=[('volTPCPlaneVert_PV0x7fdcd2728c70',add_wireplane_surface)],
acrylic_detect=True, acrylic_wls=False,
read_bvh_cache=True, cache_dir="./lar1nd_cache",
dump_node_info=False )
self.sim = Simulation(self.geo, geant4_processes=0, nthreads_per_block=192, max_blocks=1024, user_daq=GPUDaqUBooNE )
@unittest.skip('skipping testDet')
def testDet(self):
# Run only one photon at a time
nphotons = 100000
pos = np.tile([0,0,0], (nphotons,1)).astype(np.float32)
dir = np.tile([0,0,1], (nphotons,1)).astype(np.float32)
pol = np.zeros_like(pos)
phi = np.random.uniform(0, 2*np.pi, nphotons).astype(np.float32)
pol[:,0] = np.cos(phi)
pol[:,1] = np.sin(phi)
t = np.zeros(nphotons, dtype=np.float32) + 100.0 # Avoid negative photon times
wavelengths = np.empty(nphotons, np.float32)
wavelengths.fill(128.0)
photons = Photons(pos=pos, dir=dir, pol=pol, t=t, wavelengths=wavelengths)
hit_charges = []
for ev in self.sim.simulate( (photons for i in xrange(1)), keep_photons_end=True, keep_photons_beg=False, ):
ev.photons_end.dump_history()
lht = ev.photons_end[0].last_hit_triangles
print "LHT: ",lht
def testPhotonBomb(self):
# Run only one photon at a time
nphotons_test = 256*1000
#nphotons = 7200000
if has_root:
root_file = root_open("output_test_lar1nd_scanx_high_stats.root", "recreate")
root_tree = Tree("PhotonData", model=PhotonData )
root_tree.reset()
root_channels = Tree("OpDet", model=OpDet )
root_opmap = Tree("OpMap", model=OpMap)
channelmap = self.sim.gpu_geometry.solid_id_to_channel_index_gpu.get()
channels = np.argwhere(channelmap>-1)
nchannels = NCHANNELS
channeldict = dict( zip( range(0,nchannels), channels.ravel().tolist() ) )
for ich in range(0,nchannels):
root_opmap.opid = ich
solid = self.sim.detector.solids[channeldict[ich]]
root_opmap.x = np.sum( solid.mesh.vertices[:,0] )/len( solid.mesh.vertices )
root_opmap.y = np.sum( solid.mesh.vertices[:,1] )/len( solid.mesh.vertices )
root_opmap.z = np.sum( solid.mesh.vertices[:,2] )/len( solid.mesh.vertices )
root_opmap.fill()
root_opmap.write()
for eventid in xrange(0,102):
print "Event: ",eventid
if eventid<101:
nphotons = nphotons_test*20
z = -200 + 4*eventid
else:
# reference
nphotons = nphotons_test
z = 0
t_photon_start = time.time()
dphi = np.random.uniform(0,2.0*np.pi, nphotons)
dcos = np.random.uniform(-1.0, 1.0, nphotons)
dir = np.array( zip( np.sqrt(1-dcos[:]*dcos[:])*np.cos(dphi[:]), np.sqrt(1-dcos[:]*dcos[:])*np.sin(dphi[:]), dcos[:] ), dtype=np.float32 )
pos = np.tile([-1000+z,0,0], (nphotons,1)).astype(np.float32)
pol = np.zeros_like(pos)
phi = np.random.uniform(0, 2*np.pi, nphotons).astype(np.float32)
pol[:,0] = np.cos(phi)
pol[:,1] = np.sin(phi)
t = np.zeros(nphotons, dtype=np.float32) + 100.0 # Avoid negative photon times
wavelengths = np.empty(nphotons, np.float32)
wavelengths.fill(128.0)
photons = Photons(pos=pos, dir=dir, pol=pol, t=t, wavelengths=wavelengths)
t_end_start = time.time()
print "define photon time: ",t_end_start-t_photon_start,"sec"
for ev in self.sim.simulate( (photons for i in xrange(1)), keep_photons_end=True, keep_photons_beg=False, ):
#ev.photons_end.dump_history()
#print ev.channels.t
if ( eventid%10==0 ):
print "Event: ",eventid
#print "nchannels: ",len(ev.channels.hit)
#print nhits
#print ev.channels.q
#print ev.channels.t
if False:
# Fill Tree
#print "save info for ",len(ev.photons_end)
for photon in ev.photons_end:
root_tree.end_x = photon.pos[0]
root_tree.end_y = photon.pos[1]
root_tree.end_z = photon.pos[2]
root_tree.reflect_diffuse = int( event.REFLECT_DIFFUSE & photon.flags )
root_tree.reflect_specular = int( event.REFLECT_SPECULAR & photon.flags )
root_tree.bulk_scatter = int( event.RAYLEIGH_SCATTER & photon.flags )
root_tree.bulk_absorb = int( event.BULK_ABSORB & photon.flags )
root_tree.surface_detect = int( event.SURFACE_DETECT & photon.flags )
root_tree.surface_absorb = int( event.SURFACE_ABSORB & photon.flags )
root_tree.surface_reemit = int( event.SURFACE_REEMIT & photon.flags )
root_tree.fill()
if True:
root_channels.eventid = eventid
t_root_start = time.time()
for ichannel in xrange(0,NCHANNELS):
root_channels.id = ichannel
root_channels.NTDC = GPUDaqUBooNE.NTDC
root_channels.NS_PER_TDC = GPUDaqUBooNE.NS_PER_TDC
channeladc = ev.channels.q[ GPUDaqUBooNE.NTDC*ichannel: (ichannel+1)*GPUDaqUBooNE.NTDC]
root_channels.adc[:] = array.array( 'f', channeladc[:].ravel().tolist() )[:]
root_channels.q = np.sum( channeladc )
#if root_channels.q>0:
# print channeladc[ np.where( channeladc>0.0 ) ]
root_channels.t = ev.channels.t[ichannel]
root_channels.fill()
t_root_end = time.time()
print "ROOT Fill time: ",t_root_end-t_root_start," sec"
if has_root:
root_tree.write()
root_channels.write()
if __name__ == "__main__":
unittest.main()
pass
| StarcoderdataPython |
32971 | <reponame>ondewo/ondewo-csi-client-python<filename>examples/multi_client_example.py
#!/usr/bin/env python
# coding: utf-8
#
# Copyright 2021 ONDEWO GmbH
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ondewo.nlu.agent_pb2 as agent
import ondewo.s2t.speech_to_text_pb2 as s2t
import ondewo.t2s.text_to_speech_pb2 as t2s
from ondewo.nlu.client import Client as NluClient
from ondewo.nlu.client_config import ClientConfig as NluClientConfig
from ondewo.s2t.client.client import Client as S2tClient
from ondewo.t2s.client.client import Client as T2sClient
from ondewo.csi.client.client import Client as CsiClient
from ondewo.csi.client.client_config import ClientConfig
with open("csi.json") as fi:
config = ClientConfig.from_json(fi.read())
with open("csi.json") as fi:
nlu_config = NluClientConfig.from_json(fi.read())
csi_client = CsiClient(config=config)
s2t_client = S2tClient(config=config)
t2s_client = T2sClient(config=config)
nlu_client = NluClient(config=nlu_config)
s2t_pipelines = s2t_client.services.speech_to_text.list_s2t_pipelines(request=s2t.ListS2tPipelinesRequest())
t2s_pipelines = t2s_client.services.text_to_speech.list_t2s_pipelines(request=t2s.ListT2sPipelinesRequest())
print(f"Speech to text pipelines: {[pipeline.id for pipeline in s2t_pipelines.pipeline_configs]}")
print(f"Text to speech pipelines: {[pipeline.id for pipeline in t2s_pipelines.pipelines]}")
agents = nlu_client.services.agents.list_agents(request=agent.ListAgentsRequest())
print(f"Nlu agents: {[agent.agent.parent for agent in agents.agents_with_owners]}")
| StarcoderdataPython |
1600988 | # -*- coding: utf-8 -*-
import requests
from urllib.parse import urljoin
def make_url(base, part):
"""
Helper method for URL consistency. Ensures a trailing slash is present.
"""
url = urljoin(base, part)
if not url.endswith('/'):
url += '/'
return url
class Person:
resource_url = 'https://swapi.co/api/people/'
def __init__(self, data=None):
self.name=data["name"]
self.height=data["height"]
self.mass=data["mass"]
self.hair_color=data["hair_color"]
self.eye_color=data["eye_color"]
self.skin_color=data["skin_color"]
self.birth_year=data["birth_year"]
self.gender=data["gender"]
self.url=data["url"]
self._films=data["films"]
self._species=data["species"]
self._vehicles=data["vehicles"]
self._starships=data["starships"]
@property
def main_character(self):
return len(self._films) > 3
@classmethod
def get(cls, id):
url = make_url(cls.resource_url,str(id))
response = requests.get(url)
if response.ok:
return cls(data=response.json())
elif response.status_code == 404:
return None
class Film:
resource_url = 'https://swapi.co/api/films/'
def __init__(self, data=None):
self.title = data["title"]
self.episode_id = data["episode_id"]
self.opening_crawl = data["opening_crawl"]
self.director = data["director"]
self.producers = [p.strip() for p in data["producer"].split(",")]
self.release_date = data["release_date"]
self.characters = data["characters"]
self.planets = data["planets"]
self.starships = data["starships"]
self.vehicles = data["vehicles"]
self.species = data["species"]
self.created = data["created"]
self.edited = data["edited"]
self.url = data["url"]
@classmethod
def get(cls, id):
url = make_url(cls.resource_url,str(id))
response = requests.get(url)
if response.ok:
return cls(data=response.json())
class Starship:
pass
class Vehicle:
pass
class Species:
pass
class Planet:
pass | StarcoderdataPython |
3393607 | <reponame>bintulab/storm-analysis
#!/usr/bin/env python
"""
Simple Python interface to fft_fit.c.
Hazen 10/17
"""
import ctypes
import numpy
from numpy.ctypeslib import ndpointer
import os
import sys
import storm_analysis.sa_library.ia_utilities_c as utilC
import storm_analysis.sa_library.loadclib as loadclib
import storm_analysis.sa_library.dao_fit_c as daoFitC
def loadFFTFitC():
fft_fit = loadclib.loadCLibrary("fft_fit")
# From sa_library/multi_fit.c
fft_fit.mFitGetFitImage.argtypes = [ctypes.c_void_p,
ndpointer(dtype=numpy.float64)]
fft_fit.mFitGetNError.argtypes = [ctypes.c_void_p]
fft_fit.mFitGetNError.restype = ctypes.c_int
fft_fit.mFitGetPeakPropertyDouble.argtypes = [ctypes.c_void_p,
ndpointer(dtype=numpy.float64),
ctypes.c_char_p]
fft_fit.mFitGetPeakPropertyInt.argtypes = [ctypes.c_void_p,
ndpointer(dtype=numpy.int32),
ctypes.c_char_p]
fft_fit.mFitGetResidual.argtypes = [ctypes.c_void_p,
ndpointer(dtype=numpy.float64)]
fft_fit.mFitGetUnconverged.argtypes = [ctypes.c_void_p]
fft_fit.mFitGetUnconverged.restype = ctypes.c_int
fft_fit.mFitIterateLM.argtypes = [ctypes.c_void_p]
fft_fit.mFitNewBackground.argtypes = [ctypes.c_void_p,
ndpointer(dtype=numpy.float64)]
fft_fit.mFitNewImage.argtypes = [ctypes.c_void_p,
ndpointer(dtype=numpy.float64)]
fft_fit.mFitRemoveErrorPeaks.argtypes = [ctypes.c_void_p]
fft_fit.mFitRemoveRunningPeaks.argtypes = [ctypes.c_void_p]
fft_fit.mFitSetPeakStatus.argtypes = [ctypes.c_void_p,
ndpointer(dtype=numpy.int32)]
# From psf_fft/fft_fit.c
fft_fit.ftFitCleanup.argtypes = [ctypes.c_void_p]
fft_fit.ftFitInitialize.argtypes = [ctypes.c_void_p,
ndpointer(dtype=numpy.float64),
ndpointer(dtype=numpy.float64),
ctypes.c_double,
ctypes.c_int,
ctypes.c_int]
fft_fit.ftFitInitialize.restype = ctypes.POINTER(daoFitC.fitData)
fft_fit.ftFitNewPeaks.argtypes = [ctypes.c_void_p,
ndpointer(dtype=numpy.float64),
ctypes.c_char_p,
ctypes.c_int]
return fft_fit
#
# Classes.
#
class CFFTFit(daoFitC.MultiFitterArbitraryPSF):
def __init__(self, psf_fn = None, **kwds):
super(CFFTFit, self).__init__(**kwds)
self.psf_fn = psf_fn
self.clib = loadFFTFitC()
def cleanup(self, spacing = " ", verbose = True):
super(CFFTFit, self).cleanup(spacing = spacing,
verbose = verbose)
if self.mfit is not None:
self.clib.ftFitCleanup(self.mfit)
self.mfit = None
self.psf_fft = None
def getSize(self):
return self.psf_fn.getSize()
def initializeC(self, image):
"""
This initializes the C fitting library.
"""
super(CFFTFit, self).initializeC(image)
self.mfit = self.clib.ftFitInitialize(self.psf_fn.getCPointer(),
self.rqe,
self.scmos_cal,
self.default_tol,
self.scmos_cal.shape[1],
self.scmos_cal.shape[0])
def newPeaks(self, peaks, peaks_type):
"""
Pass new peaks to the C library.
"""
c_peaks = self.formatPeaks(peaks, peaks_type)
self.clib.ftFitNewPeaks(self.mfit,
c_peaks,
ctypes.c_char_p(peaks_type.encode()),
c_peaks.shape[0])
def rescaleZ(self, z):
return self.psf_fn.rescaleZ(z)
| StarcoderdataPython |
172460 | import os
import pytest
from pathlib import Path
from pydantic import ValidationError
from BALSAMIC.utils.models import (
VCFAttributes, VarCallerFilter, QCModel, VarcallerAttribute, AnalysisModel,
SampleInstanceModel, ReferenceUrlsModel, ReferenceMeta, UMIworkflowConfig,
UMIParamsCommon, UMIParamsUMIextract, UMIParamsConsensuscall,
UMIParamsTNscope, UMIParamsVardict, UMIParamsVEP)
def test_referencemeta():
"""test ReferenceMeta for correctly building model"""
# GIVEN a reference model
reference_files = {
"basedir": "basedir",
"reference_genome": {
"url": "gs://some_path/b37/human_g1k_v37.fasta.gz",
"file_type": "fasta",
"gzip": True,
"genome_version": "hg19",
"output_file": "genome.fa",
"output_path": "genome",
},
"dbsnp": {
"url": "gs://some_path/b37/dbsnp_138.b37.vcf.gz",
"file_type": "fasta",
"gzip": True,
"genome_version": "hg19",
"output_file": "dbsnp.vcf",
},
}
# WHEN build the model
build_model = ReferenceMeta.parse_obj(reference_files)
# THEN model should have correct attributes
assert build_model.reference_genome.genome_version == "hg19"
assert build_model.dbsnp.genome_version == "hg19"
assert build_model.reference_genome.get_output_file == "basedir/genome/genome.fa"
def test_referenceurlsmodel_build_model():
"""test ReferenceUrlsModel for correctly building the model"""
# GIVEN a reference model
dummy_output_file = "some_random_file"
dummy_output_path = "some_path"
actual_path = Path(dummy_output_path, dummy_output_file).as_posix()
dummy_reference = {
"url": "gs://domain/file_name",
"file_type": "fasta",
"gzip": True,
"genome_version": "hg19",
"output_file": dummy_output_file,
"output_path": dummy_output_path,
}
# WHEN building the model
built_model = ReferenceUrlsModel.parse_obj(dummy_reference)
# THEN model should have correct attributes
assert built_model.url.scheme == "gs"
assert built_model.get_output_file == actual_path
def test_referenceurlsmodel_validate_file_type():
"""test ReferenceUrlsModel for validating file type"""
# GIVEN a reference model
dummy_output_file = "some_random_file"
dummy_output_path = "some_path"
actual_path = Path(dummy_output_path, dummy_output_file).as_posix()
dummy_reference = {
"url": "gs://domain/file_name",
"file_type": "wrong_type",
"gzip": True,
"genome_version": "hg19",
"output_file": dummy_output_file,
"output_path": dummy_output_path,
}
# WHEN building the model
# THEN model raise error on validation
with pytest.raises(ValidationError) as excinfo:
built_model = ReferenceUrlsModel.parse_obj(dummy_reference)
assert "not a valid reference file format" in excinfo.value
def test_referenceurlsmodel_write_md5(tmp_path_factory):
"""test ReferenceUrlsModel for writing md5 of the output file"""
# GIVEN a reference model
dummy_output_file = "some_random_file"
dummy_output_path = tmp_path_factory.mktemp("some_path")
Path(dummy_output_path, dummy_output_file).write_bytes(os.urandom(8196))
actual_md5_file = Path(dummy_output_path, dummy_output_file + ".md5")
dummy_reference = {
"url": "gs://domain/file_name",
"file_type": "fasta",
"gzip": True,
"genome_version": "hg19",
"output_file": dummy_output_file,
"output_path": dummy_output_path.as_posix(),
}
# WHEN building the model
built_model = ReferenceUrlsModel.parse_obj(dummy_reference)
# THEN when md5 of the file should exist
built_model.write_md5
assert actual_md5_file.is_file()
def test_referenceurlsmodel_write_md5_no_output_file(tmp_path_factory):
"""test ReferenceUrlsModel for failing to write md5 if outputfile doesn't exist"""
# GIVEN a reference model
dummy_output_file = "some_random_file"
dummy_output_path = tmp_path_factory.mktemp("some_path")
actual_md5_file = Path(dummy_output_path, dummy_output_file + ".md5")
dummy_reference = {
"url": "gs://domain/file_name",
"file_type": "fasta",
"gzip": True,
"genome_version": "hg19",
"output_file": dummy_output_file,
"output_path": dummy_output_path.as_posix(),
}
# WHEN building the model
built_model = ReferenceUrlsModel.parse_obj(dummy_reference)
# THEN when md5 of the file should exist
with pytest.raises(FileNotFoundError) as excinfo:
built_model.write_md5
assert "file does not exist" in excinfo.value
def test_referenceurlsmodel_validate_genome_version():
"""test ReferenceUrlsModel for validating genome version """
# GIVEN a reference model
dummy_output_file = "some_random_file"
dummy_output_path = "some_path"
actual_path = Path(dummy_output_path, dummy_output_file).as_posix()
dummy_reference = {
"url": "gs://domain/file_name",
"file_type": "fasta",
"gzip": True,
"genome_version": "wrong_genome",
"output_file": dummy_output_file,
"output_path": dummy_output_path,
}
with pytest.raises(ValidationError) as excinfo:
# WHEN building the model
built_model = ReferenceUrlsModel.parse_obj(dummy_reference)
# THEN model raise error on validation
assert "not a valid genome version" in excinfo.value
def test_vcfattributes():
"""test VCFAttributes model for correct validation"""
# GIVEN a VCF attribute
dummy_attribute = {
"tag_value": 5.0,
"filter_name": "dummy_filter_name",
"field": "INFO"
}
# WHEN building the model
dummy_attribute_built = VCFAttributes(**dummy_attribute)
# THEN assert values can be reterived currently
assert dummy_attribute_built.tag_value == 5.0
assert dummy_attribute_built.field == "INFO"
assert dummy_attribute_built.filter_name == "dummy_filter_name"
def test_varcallerfilter():
"""test required VarCallerFilters for being set correctly"""
# GIVEN a VarCallerFilter
dummy_varcaller = {
"AD": {
"tag_value": 5.0,
"filter_name": "dummy_alt_depth",
"field": "INFO"
},
"DP": {
"tag_value": 100.0,
"filter_name": "dummy_depth",
"field": "INFO"
},
"pop_freq": {
"tag_value": 0.005,
"filter_name": "dummy_pop_freq",
"field": "INFO"
},
"varcaller_name": "dummy_varcaller",
"filter_type": "dummy_ffpe_filter",
"analysis_type": "dummy_tumor_only",
"description": "dummy description of this filter",
}
# WHEN building the model
dummy_varcaller_filter = VarCallerFilter(**dummy_varcaller)
# THEN assert required values are set
assert dummy_varcaller_filter.AD.tag_value == 5.0
assert dummy_varcaller_filter.DP.tag_value == 100.0
assert dummy_varcaller_filter.analysis_type == "dummy_tumor_only"
def test_qc_model():
# GIVEN valid input arguments
# THEN we can successully create a config dict
valid_args = {"umi_trim": True, "min_seq_length": 25, "umi_trim_length": 5}
assert QCModel.parse_obj(valid_args)
def test_varcaller_attribute():
# GIVEN valid input arguments
valid_args = {"mutation": "somatic", "type": "SNV"}
# THEN we can successully create a config dict
assert VarcallerAttribute.parse_obj(valid_args)
# GIVEN invalid input arguments
invalid_args = {"mutation": "strange", "type": "unacceptable"}
# THEN should trigger ValueError
with pytest.raises(ValueError) as excinfo:
VarcallerAttribute.parse_obj(invalid_args)
assert "not a valid argument" in excinfo.value
def test_analysis_model():
# GIVEN valid input arguments
valid_args = {
"case_id": "case_id",
"analysis_type": "paired",
"sequencing_type": "targeted",
"analysis_dir": "tests/test_data",
"umiworkflow": "true"
}
# THEN we can successully create a config dict
assert AnalysisModel.parse_obj(valid_args)
# GIVEN invalid input arguments
invalid_args = {
"case_id": "case_id",
"analysis_type": "odd",
"sequencing_type": "wrong",
"analysis_dir": "tests/test_data",
}
# THEN should trigger ValueError
with pytest.raises(ValueError) as excinfo:
AnalysisModel.parse_obj(invalid_args)
assert "not supported" in excinfo.value
def test_sample_instance_model():
# GIVEN valid input arguments
valid_args = {"file_prefix": "S2_R", "type": "normal", "sample_name": "S2"}
# THEN we can successully create a config dict
assert SampleInstanceModel.parse_obj(valid_args)
# GIVEN invalid input arguments
invalid_args = {
"file_prefix": "S2_R",
"type": "fungal",
}
# THEN should trigger ValueError
with pytest.raises(ValueError) as excinfo:
SampleInstanceModel.parse_obj(invalid_args)
assert "not supported" in excinfo.value
def test_umiparams_common():
""" test UMIParamsCommon model for correct validation """
# GIVEN a UMI workflow common params
test_commonparams = {
"align_header": "test_header_name",
"align_intbases": 100,
"filter_tumor_af": 0.01
}
# WHEN building the model
test_commonparams_built = UMIParamsCommon(**test_commonparams)
# THEN assert values
assert test_commonparams_built.align_header == "test_header_name"
assert test_commonparams_built.filter_tumor_af == 0.01
assert test_commonparams_built.align_intbases == 100
def test_umiparams_umiextract():
""" test UMIParamsUMIextract model for correct validation """
# GIVEN umiextract params
test_umiextractparams = {"read_structure": "['mode', 'r1,r2']"}
# WHEN building the model
test_umiextractparams_built = UMIParamsUMIextract(**test_umiextractparams)
# THEN assert values
assert test_umiextractparams_built.read_structure == "['mode', 'r1,r2']"
def test_umiparams_consensuscall():
""" test UMIParamsConsensuscall model for correct validation """
#GIVEN consensuscall params
test_consensuscall = {
"align_format": "BAM",
"filter_minreads": "6,3,3",
"tag": "XZ"
}
#WHEN building the model
test_consensuscall_built = UMIParamsConsensuscall(**test_consensuscall)
#THEN assert values
assert test_consensuscall_built.align_format == "BAM"
assert test_consensuscall_built.filter_minreads == "6,3,3"
assert test_consensuscall_built.tag == "XZ"
def test_umiparams_tnscope():
""" test UMIParamsTNscope model for correct validation """
#GIVEN tnscope params
test_tnscope_params = {
"algo": "algoname",
"min_tumorLOD": 6,
"error_rate": 5,
"prunefactor": 3,
"disable_detect": "abc"
}
#WHEN building the model
test_tnscope_params_built = UMIParamsTNscope(**test_tnscope_params)
#THEN assert values
assert test_tnscope_params_built.algo == "algoname"
assert test_tnscope_params_built.min_tumorLOD == 6
assert test_tnscope_params_built.error_rate == 5
assert test_tnscope_params_built.prunefactor == 3
assert test_tnscope_params_built.disable_detect == "abc"
def test_umiparams_vardict():
""" test UMIParamsVardict model for correct validation"""
#GIVEN vardict params
test_umivardict = {"vardict_filters": "-a 1 -b 2 -c 5"}
#WHEN building the model
test_umivardict_built = UMIParamsVardict(**test_umivardict)
#THEN assert values
assert test_umivardict_built.vardict_filters == "-a 1 -b 2 -c 5"
def test_umiparams_vep():
""" test UMIParamsVEP model for correct validation"""
#GIVEN vardict params
test_umivep = {"vep_filters": "all defaults params"}
#WHEN building the model
test_umivep_built = UMIParamsVEP(**test_umivep)
#THEN assert values
assert test_umivep_built.vep_filters == "all defaults params"
| StarcoderdataPython |
123154 | <reponame>ZhuofanXie/Copulas
from functools import partial
import numpy as np
from scipy.optimize import brentq
from scipy.special import ndtr
from scipy.stats import gaussian_kde
from copulas import EPSILON, scalarize, store_args
from copulas.univariate.base import BoundedType, ParametricType, ScipyModel
class GaussianKDE(ScipyModel):
"""A wrapper for gaussian Kernel density estimation implemented
in scipy.stats toolbox. gaussian_kde is slower than statsmodels
but allows more flexibility.
When a sample_size is provided the fit method will sample the
data, and mask the real information. Also, ensure the number of
entries will be always the value of sample_size.
Args:
sample_size(int): amount of parameters to sample
"""
PARAMETRIC = ParametricType.NON_PARAMETRIC
BOUNDED = BoundedType.UNBOUNDED
MODEL_CLASS = gaussian_kde
@store_args
def __init__(self, sample_size=None, random_seed=None, bw_method=None, weights=None):
self.random_seed = random_seed
self._sample_size = sample_size
self.bw_method = bw_method
self.weights = weights
def _get_model(self):
dataset = self._params['dataset']
self._sample_size = self._sample_size or len(dataset)
return gaussian_kde(dataset, bw_method=self.bw_method, weights=self.weights)
def _get_bounds(self):
X = self._params['dataset']
lower = np.min(X) - (5 * np.std(X))
upper = np.max(X) + (5 * np.std(X))
return lower, upper
def probability_density(self, X):
"""Compute the probability density for each point in X.
Arguments:
X (numpy.ndarray):
Values for which the probability density will be computed.
It must have shape (n, 1).
Returns:
numpy.ndarray:
Probability density values for points in X.
Raises:
NotFittedError:
if the model is not fitted.
"""
self.check_fit()
return self._model.evaluate(X)
def sample(self, n_samples=1):
"""Sample values from this model.
Argument:
n_samples (int):
Number of values to sample
Returns:
numpy.ndarray:
Array of shape (n_samples, 1) with values randomly
sampled from this model distribution.
Raises:
NotFittedError:
if the model is not fitted.
"""
self.check_fit()
return self._model.resample(size=n_samples)[0]
def cumulative_distribution(self, X):
"""Compute the cumulative distribution value for each point in X.
Arguments:
X (numpy.ndarray):
Values for which the cumulative distribution will be computed.
It must have shape (n, 1).
Returns:
numpy.ndarray:
Cumulative distribution values for points in X.
Raises:
NotFittedError:
if the model is not fitted.
"""
self.check_fit()
stdev = np.sqrt(self._model.covariance[0, 0])
lower = ndtr((self._get_bounds()[0] - self._model.dataset) / stdev)[0]
uppers = np.vstack([ndtr((x - self._model.dataset) / stdev)[0] for x in X])
return (uppers - lower).dot(self._model.weights)
def _brentq_cdf(self, value):
"""Helper function to compute percent_point.
As scipy.stats.gaussian_kde doesn't provide this functionality out of the box we need
to make a numerical approach:
- First we scalarize and bound cumulative_distribution.
- Then we define a function `f(x) = cdf(x) - value`, where value is the given argument.
- As value will be called from ppf we can assume value = cdf(z) for some z that is the
value we are searching for. Therefore the zeros of the function will be x such that:
cdf(x) - cdf(z) = 0 => (becasue cdf is monotonous and continous) x = z
Args:
value (float):
cdf value, that is, in [0,1]
Returns:
callable:
function whose zero is the ppf of value.
"""
# The decorator expects an instance method, but usually are decorated before being bounded
bound_cdf = partial(scalarize(GaussianKDE.cumulative_distribution), self)
def f(x):
return bound_cdf(x) - value
return f
def percent_point(self, U):
"""Compute the inverse cumulative distribution value for each point in U.
Arguments:
U (numpy.ndarray):
Values for which the cumulative distribution will be computed.
It must have shape (n, 1) and values must be in [0,1].
Returns:
numpy.ndarray:
Inverse cumulative distribution values for points in U.
Raises:
NotFittedError:
if the model is not fitted.
"""
self.check_fit()
if isinstance(U, np.ndarray):
if len(U.shape) == 1:
U = U.reshape([-1, 1])
if len(U.shape) == 2:
return np.fromiter(
(self.percent_point(u[0]) for u in U),
np.dtype('float64')
)
else:
raise ValueError('Arrays of dimensionality higher than 2 are not supported.')
if np.any(U > 1.0) or np.any(U < 0.0):
raise ValueError("Expected values in range [0.0, 1.0].")
is_one = U >= 1.0 - EPSILON
is_zero = U <= EPSILON
is_valid = not (is_zero or is_one)
lower, upper = self._get_bounds()
X = np.zeros(U.shape)
X[is_one] = float("inf")
X[is_zero] = float("-inf")
X[is_valid] = brentq(self._brentq_cdf(U[is_valid]), lower, upper)
return X
def _fit_constant(self, X):
sample_size = self._sample_size or len(X)
constant = np.unique(X)[0]
self._params = {
'dataset': [constant] * sample_size,
}
def _fit(self, X):
if self._sample_size:
X = gaussian_kde(X, bw_method=self.bw_method,
weights=self.weights).resample(self._sample_size)
self._params = {
'dataset': X.tolist()
}
def _is_constant(self):
return len(np.unique(self._params['dataset'])) == 1
| StarcoderdataPython |
1776971 | from textblob import TextBlob
from textblob.sentiments import NaiveBayesAnalyzer
def analyze(query):
sentiments = []
text = query['text']
general_score = {
"polarity": 0,
"subjectivity": 0
}
blob = TextBlob(text)
for sentence in blob.sentences:
sentiments.append({
"sentence": str(sentence),
"sentiment": sentence.sentiment
})
general_score["polarity"] += sentence.sentiment[0]
general_score["subjectivity"] += sentence.sentiment[1]
general_score["polarity"] = general_score["polarity"] / len(blob.sentences)
general_score["subjectivity"] = general_score["subjectivity"] / len(blob.sentences)
result = {
"sentiments": sentiments,
"general_score": general_score
}
return result
| StarcoderdataPython |
1642160 | from os import path
from unittest.mock import MagicMock
from piu.cli import validate_ssh_key, check_ssh_key
def test_validate_ssh_fallback(monkeypatch):
mock_exit = MagicMock()
monkeypatch.setattr("sys.exit", mock_exit)
fallback_path = path.join(path.abspath(path.dirname(__file__)), "resources/id_rsa_fallback.pub")
final_path = validate_ssh_key("", "", fallback_path, False)
assert final_path == fallback_path
mock_exit.assert_not_called()
def test_validate_ssh_valid_input(monkeypatch):
mock_exit = MagicMock()
monkeypatch.setattr("sys.exit", mock_exit)
option_path = path.join(path.abspath(path.dirname(__file__)), "resources/id_rsa_option.pub")
config_path = path.join(path.abspath(path.dirname(__file__)), "resources/id_rsa_config.pub")
fallback_path = path.join(path.abspath(path.dirname(__file__)), "resources/id_rsa_fallback.pub")
final_path = validate_ssh_key(option_path, config_path, fallback_path, False)
assert final_path == option_path
mock_exit.assert_not_called()
def test_validate_ssh_valid_config(monkeypatch):
mock_exit = MagicMock()
monkeypatch.setattr("sys.exit", mock_exit)
config_path = path.join(path.abspath(path.dirname(__file__)), "resources/id_rsa_config.pub")
fallback_path = path.join(path.abspath(path.dirname(__file__)), "resources/id_rsa_fallback.pub")
final_path = validate_ssh_key("", config_path, fallback_path, False)
assert final_path == config_path
mock_exit.assert_not_called()
def test_validate_ssh_valid_error(monkeypatch):
mock_exit = MagicMock()
monkeypatch.setattr("sys.exit", mock_exit)
final_path = validate_ssh_key("", "", "", False)
assert final_path == ""
mock_exit.assert_called_with(1)
def test_validate_ssh_valid_no_error_interactive(monkeypatch):
mock_exit = MagicMock()
monkeypatch.setattr("sys.exit", mock_exit)
final_path = validate_ssh_key("", "", "", True)
assert final_path == ""
mock_exit.assert_not_called()
def test_malformed_ssh_key(monkeypatch):
malformed_path = path.join(path.abspath(path.dirname(__file__)), "resources/id_rsa_malformed.pub")
assert not check_ssh_key(malformed_path)
| StarcoderdataPython |
159222 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' system_constants.py: defines constants for generic purposes and system config'''
##### Generic constants #####
KB = 1024
MB = 1024 * KB
GB = 1024 * MB
MS_TO_SEC = 0.001
SEC_TO_NS = 1000000000
NS_TO_MS = 0.000001
RAM_FOR_STMGR = 1 * GB
DEFAULT_RAM_FOR_INSTANCE = 1 * GB
DEFAULT_DISK_PADDING_PER_CONTAINER = 12 * GB
####################################################################################################
############################# Constants for System configuration ##################################
####################################################################################################
# The relative path to the logging directory
HERON_LOGGING_DIRECTORY = 'heron.logging.directory'
# The maximum log file size in MB
HERON_LOGGING_MAXIMUM_SIZE_MB = "heron.logging.maximum.size.mb"
# The maximum number of log files
HERON_LOGGING_MAXIMUM_FILES = "heron.logging.maximum.files"
# The threshold level to log error
HERON_LOGGING_ERR_THRESHOLD = "heron.logging.err.threshold"
# The interval in seconds to get and reset the system metrics.
HERON_METRICS_EXPORT_INTERVAL_SEC = "heron.metrics.export.interval.sec"
####################################################################################################
############################### Sytem config: Instance related #####################################
####################################################################################################
# The queue capacity (num of items) in bolt for buffer packets to read from stream manager
INSTANCE_INTERNAL_BOLT_READ_QUEUE_CAPACITY = "heron.instance.internal.bolt.read.queue.capacity"
# The queue capacity (num of items) in bolt for buffer packets to write to stream manager
INSTANCE_INTERNAL_BOLT_WRITE_QUEUE_CAPACITY = "heron.instance.internal.bolt.write.queue.capacity"
# The queue capacity (num of items) in spout for buffer packets to read from stream manager
INSTANCE_INTERNAL_SPOUT_READ_QUEUE_CAPACITY = "heron.instance.internal.spout.read.queue.capacity"
# The queue capacity (num of items) in spout for buffer packets to write to stream manager
INSTANCE_INTERNAL_SPOUT_WRITE_QUEUE_CAPACITY = "heron.instance.internal.spout.write.queue.capacity"
# The queue capacity (num of items) for metrics packets to write to metrics manager
INSTANCE_INTERNAL_METRICS_WRITE_QUEUE_CAPACITY = \
"heron.instance.internal.metrics.write.queue.capacity"
# Time based, the maximum batch time in ms for instance to read from stream manager per attempt
INSTANCE_NETWORK_READ_BATCH_TIME_MS = "heron.instance.network.read.batch.time.ms"
# Size based, the maximum batch size in bytes to read from stream manager
INSTANCE_NETWORK_READ_BATCH_SIZE_BYTES = "heron.instance.network.read.batch.size.bytes"
# Time based, the maximum batch time in ms for instance to write to stream manager per attempt
INSTANCE_NETWORK_WRITE_BATCH_TIME_MS = "heron.instance.network.write.batch.time.ms"
# Size based, the maximum batch size in bytes to write to stream manager
INSTANCE_NETWORK_WRITE_BATCH_SIZE_BYTES = "heron.instance.network.write.batch.size.bytes"
# The maximum socket's received buffer size in bytes of instance's network options
INSTANCE_NETWORK_OPTIONS_SOCKET_RECEIVED_BUFFER_SIZE_BYTES = \
"heron.instance.network.options.socket.received.buffer.size.bytes"
# The maximum socket's send buffer size in bytes
INSTANCE_NETWORK_OPTIONS_SOCKET_SEND_BUFFER_SIZE_BYTES = \
"heron.instance.network.options.socket.send.buffer.size.bytes"
# The maximum # of data tuple to batch in a HeronDataTupleSet protobuf
INSTANCE_SET_DATA_TUPLE_CAPACITY = "heron.instance.set.data.tuple.capacity"
# The maximum size in bytes of data tuple to batch in a HeronDataTupleSet protobuf
INSTANCE_SET_DATA_TUPLE_SIZE_BYTES = "heron.instance.set.data.tuple.size.bytes"
# The maximum # of control tuple to batch in a HeronControlTupleSet protobuf
INSTANCE_SET_CONTROL_TUPLE_CAPACITY = "heron.instance.set.control.tuple.capacity"
# The maximum time in ms for an spout to do acknowledgement per attempt
INSTANCE_ACK_BATCH_TIME_MS = "heron.instance.ack.batch.time.ms"
# The maximum time in ms for an spout instance to emit tuples per attempt
INSTANCE_EMIT_BATCH_TIME_MS = "heron.instance.emit.batch.time.ms"
# The maximum batch size in bytes for an spout instance to emit tuples per attempt
INSTANCE_EMIT_BATCH_SIZE_BYTES = "heron.instance.emit.batch.size.bytes"
# The maximum time in ms for an bolt instance to execute tuples per attempt
INSTANCE_EXECUTE_BATCH_TIME_MS = "heron.instance.execute.batch.time.ms"
# The maximum batch size in bytes for an bolt instance to execute tuples per attempt
INSTANCE_EXECUTE_BATCH_SIZE_BYTES = "heron.instance.execute.batch.size.bytes"
# The time to wait before the instance exits forcibly when uncaught exception happens
INSTANCE_FORCE_EXIT_TIMEOUT_MS = "heron.instance.force.exit.timeout.ms"
# Interval in seconds to reconnect to the stream manager
INSTANCE_RECONNECT_STREAMMGR_INTERVAL_SEC = "heron.instance.reconnect.streammgr.interval.sec"
# Interval in seconds to reconnect to the metrics manager
INSTANCE_RECONNECT_METRICSMGR_INTERVAL_SEC = "heron.instance.reconnect.metricsmgr.interval.sec"
# Interval in seconds to sample a system metric, for instance, JVM used memory.
INSTANCE_METRICS_SYSTEM_SAMPLE_INTERVAL_SEC = "heron.instance.metrics.system.sample.interval.sec"
# The lookForTimeout Interval in spout instance will be timeoutInSeconds / NBUCKETS
# For instance, if a tuple's timeout is 30s, and NBUCKETS is 10
# The spout instance will check whether there are timeout tuples every 3 seconds
INSTANCE_ACKNOWLEDGEMENT_NBUCKETS = "heron.instance.acknowledgement.nbuckets"
# The expected size on read queue in bolt
INSTANCE_TUNING_EXPECTED_BOLT_READ_QUEUE_SIZE \
= "heron.instance.tuning.expected.bolt.read.queue.size"
# The expected size on write queue in bolt
INSTANCE_TUNING_EXPECTED_BOLT_WRITE_QUEUE_SIZE \
= "heron.instance.tuning.expected.bolt.write.queue.size"
# The expected size on read queue in spout
INSTANCE_TUNING_EXPECTED_SPOUT_READ_QUEUE_SIZE \
= "heron.instance.tuning.expected.spout.read.queue.size"
# The exepected size on write queue in spout
INSTANCE_TUNING_EXPECTED_SPOUT_WRITE_QUEUE_SIZE \
= "heron.instance.tuning.expected.spout.write.queue.size"
# The expected size on metrics write queue
INSTANCE_TUNING_EXPECTED_METRICS_WRITE_QUEUE_SIZE \
= "heron.instance.tuning.expected.metrics.write.queue.size"
# During dynamically tuning, the weight of new sample size to calculate
# the available capacity of queue.
# We use Exponential moving average: En = (1-w) * En-1 + w * An
# http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
INSTANCE_TUNING_CURRENT_SAMPLE_WEIGHT = "heron.instance.tuning.current.sample.weight"
# Interval in ms to tune the size of in & out data queue in instance
INSTANCE_TUNING_INTERVAL_MS = "heron.instance.tuning.interval.ms"
####################################################################################################
########################### Sytem config: Metrics Manager related ##################################
####################################################################################################
# Time based, the maximum batch time in ms for instance to read from socket per attempt
METRICSMGR_NETWORK_READ_BATCH_TIME_MS = "heron.metricsmgr.network.read.batch.time.ms"
# Size based,the maximum batch size in bytes to read from socket
METRICSMGR_NETWORK_READ_BATCH_SIZE_BYTES = "heron.metricsmgr.network.read.batch.size.bytes"
# Time based, the maximum batch time in ms to write to socket
METRICSMGR_NETWORK_WRITE_BATCH_TIME_MS = "heron.metricsmgr.network.write.batch.time.ms"
# Size based, the maximum batch size in bytes to write to socket
METRICSMGR_NETWORK_WRITE_BATCH_SIZE_BYTES = "heron.metricsmgr.network.write.batch.size.bytes"
# The maximum socket's received buffer size in bytes
METRICSMGR_NETWORK_OPTIONS_SOCKET_RECEIVED_BUFFER_SIZE_BYTES \
= "heron.metricsmgr.network.options.socket.received.buffer.size.bytes"
# The maximum socket's send buffer size in bytes
METRICSMGR_NETWORK_OPTIONS_SOCKET_SEND_BUFFER_SIZE_BYTES \
= "heron.metricsmgr.network.options.socket.send.buffer.size.bytes"
| StarcoderdataPython |
1737082 | <reponame>dssantos/Cloud-Energy-Saver<gh_stars>0
#coding: utf-8
import subprocess, mac
from subprocess import Popen, PIPE, STDOUT
def wake(host):
mac_list = mac.get(host)
mac_address = ''
for mac_address in mac_list:
command = "sudo etherwake -i eno1 %s" %mac_address
output = subprocess.check_output(command, shell=True)
def shutdown(host):
command = "ssh user@%s 'sudo shutdown now'" %host
p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) # Runs command and store STDOUT
output = p.stdout.read()
print output
| StarcoderdataPython |
3219048 | import numpy as np
from ..normalizations import minmax_normalization
from ..distance_metrics import euclidean
from .mcda_method import MCDA_method
class TOPSIS(MCDA_method):
def __init__(self, normalization_method = minmax_normalization, distance_metric = euclidean):
"""
Create the TOPSIS method object and select normalization method `normalization_method` and
distance metric `distance metric`.
Parameters
-----------
normalization_method : function
method for decision matrix normalization chosen from `normalizations`
distance_metric : functions
method for calculating the distance between two vectors
"""
self.normalization_method = normalization_method
self.distance_metric = distance_metric
def __call__(self, matrix, weights, types):
"""
Score alternatives provided in decision matrix `matrix` with m alternatives in rows and
n criteria in columns using criteria `weights` and criteria `types`.
Parameters
----------
matrix : ndarray
Decision matrix with m alternatives in rows and n criteria in columns.
weights: ndarray
Vector with criteria weights. Sum of weights must be equal to 1.
types: ndarray
Vector with criteria types. Profit criteria are represented by 1 and cost by -1.
Returns
-------
ndrarray
Vector with preference values of each alternative. The best alternative has the highest preference value.
Examples
---------
>>> topsis = TOPSIS(normalization_method = minmax_normalization, distance_metric = euclidean)
>>> pref = topsis(matrix, weights, types)
>>> rank = rank_preferences(pref, reverse = True)
"""
TOPSIS._verify_input_data(matrix, weights, types)
return TOPSIS._topsis(matrix, weights, types, self.normalization_method, self.distance_metric)
@staticmethod
def _topsis(matrix, weights, types, normalization_method, distance_metric):
# Normalize matrix using chosen normalization (for example linear normalization)
norm_matrix = normalization_method(matrix, types)
# Multiply all rows of normalized matrix by weights
weighted_matrix = norm_matrix * weights
# Calculate vectors of PIS (ideal solution) and NIS (anti-ideal solution)
pis = np.max(weighted_matrix, axis=0)
nis = np.min(weighted_matrix, axis=0)
# Calculate chosen distance of every alternative from PIS and NIS using chosen distance metric `distance_metric` from `distance_metrics`
Dp = np.array([distance_metric(x, pis) for x in weighted_matrix])
Dm = np.array([distance_metric(x, nis) for x in weighted_matrix])
C = Dm / (Dm + Dp)
return C | StarcoderdataPython |
1658828 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved
# ============================================================================
""" Abstract Writer """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABC, abstractmethod
from typing import Tuple, Dict, List
import numpy
from xai.data import explorer
from xai.formatter.report.section import OverviewSection, DetailSection
################################################################################
### Writer Visitor
################################################################################
class Writer(ABC):
"""
The Strategy interface declares operations common to all
supported report output.
"""
def __init__(self, *values) -> None:
"""
Abstract Writer
"""
self._values = values
@property
def values(self):
"""Returns keyword-ed variable."""
return self._values
def __str__(self):
return 'Writer:(' + str(self.values) + ')'
@abstractmethod
def out(self):
"""
Output Report
"""
pass
@abstractmethod
def build(self, title: str, overview: OverviewSection,
detail: DetailSection, *, content_table=False):
"""
Build Report
Args:
title(str): header title
overview(OverviewSection): Cover Section of report
detail(DetailSection): Details Section of report
content_table (bool): is content table enabled
default False
"""
pass
################################################################################
### Base Section
################################################################################
@abstractmethod
def add_new_page(self):
"""
Add new page
"""
pass
@abstractmethod
def draw_header(self, text: str, level: int, *, link=None):
"""
Draw Header
Args:
text(str): header text in the report
level(int): header level
link: header link
"""
pass
@abstractmethod
def draw_title(self, text: str, level: int, *, link=None):
"""
Draw Title
Args:
text(str): title in the report
level(int): title type (section or paragraph)
link: title link
"""
pass
@abstractmethod
def draw_paragraph(self, text: str):
"""
Draw Paragraph
Args:
text(str): html text to render in the report
"""
pass
################################################################################
### Basic/Reusable Section
################################################################################
@abstractmethod
def draw_basic_key_value_pairs(self, notes: str, *,
info: list):
"""
Draw key-value pairs information to the report
Args:
notes(str): Explain the block
info (list): list of tuple / list of (list of tuple))
multi-level rendering, e.g. to display `model_info`
"""
pass
@abstractmethod
def draw_basic_table(self, notes: str, *,
table_header: list, table_data: list,
col_width: list):
"""
Draw table to the report
Args:
notes(str): Explain the block
table_header (list): list of str
table_data (list): list of str
col_width: list of float,
default: None (evenly divided for the whole page width)
"""
pass
@abstractmethod
def draw_basic_images_with_grid_spec(self, notes: str, *,
image_list, grid_spec):
"""
Draw image blocks with formatted grid specification
Args
notes(str): Explain the block
image_list (list): the list of image_paths
grid_spec (dict): indicate image size and position
- key: image_name, or index if image_set is a list
- value: (x,y,w,h) position and weight/height of image,
with left top corner of the block as (0,0), unit in mm
"""
pass
################################################################################
### Summary Section
################################################################################
@abstractmethod
def draw_training_time(self, notes: str, *, timing: List[Tuple[str, int]]):
"""
Draw information of timing to the report
Args:
notes(str): Explain the block
timing (:obj:`list` of :obj:`tuple`): list of tuple
(name, time in second)
"""
pass
@abstractmethod
def draw_data_set_summary(self, notes: str, *,
data_summary: List[Tuple[str, int]]):
"""
Draw information of dataset summary to the report
Args:
notes(str): Explain the block
data_summary (:obj:`list` of :obj:`tuple`): list of tuple
(dataset_name, dataset_sample_number)
"""
pass
@abstractmethod
def draw_evaluation_result_summary(self, notes: str, *,
evaluation_result: dict):
"""
Draw information of training performance to the result
Args:
evaluation_result (dict): evaluation metric
- key: metric_name
- value: metric_value: single float value for average/overall metric,
list for class metrics
sample input 1: {'precision': 0.5}, report value directly
sample input 2: {'precision': {'class':[0.5,0.4,0.3],'average':0.5}},
report "average" value
sample input 3: {'precision': {'class':[0.5,0.4,0.3]},
report unweighted average for "class" value
notes (str, Optional): explain the block
"""
pass
@abstractmethod
def draw_model_info_summary(self, notes: str, *, model_info: list):
"""
Draw information of model info to the result
Args:
model_info (:obj:`list` of :obj:
`tuple`, Optional): list of tuple (model info attribute, model info value).
Default information include `use case name`, `version`, `use case team`.
notes (str, Optional): explain the block
"""
pass
################################################################################
### Data Section
################################################################################
@abstractmethod
def draw_data_missing_value(self, notes: str, *, missing_count: dict,
total_count: dict, ratio=False):
"""
Draw Missing Data Value Summary Table
Args:
notes(str): Explain the block
missing_count(dict): Missing Count
total_count(list): Total Count
ratio(bool): True if `missing_value` is the percentage
"""
pass
@abstractmethod
def draw_data_set_distribution(self, notes: str, *,
data_set_distribution: Tuple[str, explorer.CategoricalStats],
max_class_shown=20):
"""
Draw information of distribution on data set
Args:
notes(str): Explain the block
data_set_distribution (tuple: (str,dict)):
- tuple[0] str: label/split name
- tuple[1] CategoricalStats object: `frequency_count` attribute
key - class_name/split_name,
value - class_count/split_count
max_class_shown (int, Optional): maximum number of classes shown
in the figure, default is 20
notes (str, Optional):
explain the block
"""
pass
@abstractmethod
def draw_data_attributes(self, notes: str, *, data_attribute: Dict):
"""
Draw information of data attribute for data fields to the report
Args:
notes(str): Explain the block
data_attribute (:dict of :dict):
-key: data field name
-value: attributes (dict)
- key: attribute name
- value: attribute value
"""
pass
@abstractmethod
def draw_categorical_field_distribution(self, notes: str, *,
field_name: str,
field_distribution: Dict[str,
explorer.CategoricalStats],
max_values_display=20,
colors=None):
"""
Draw information of field value distribution for categorical type to
the report.
Args:
notes(str): Explain the block
field_name (str): data field name
field_distribution (:dict of :CategoricalStats):
-key: label_name
-value: CategoricalStats object
max_values_display (int): maximum number of values displayed
colors (list): the list of color code for rendering different class
"""
pass
@abstractmethod
def draw_numeric_field_distribution(self, notes: str, *,
field_name: str,
field_distribution: Dict[str,
explorer.NumericalStats],
force_no_log=False,
x_limit=False,
colors=None):
"""
Draw information of field value distribution for numerical type to
the report.
Args:
notes(str): Explain the block
field_name (str): data field name
field_distribution (:dict of :NumericalStats):
-key: label_name
-value: NumericalStats object
force_no_log (bool): whether to change y-scale to logrithmic
scale for a more balanced view
x_limit (list:): whether x-axis only display the required percentile range.
If True, field_distribution should have a
key "x_limit" and value of [x_min, x_max].
colors (list): the list of color code for rendering different class
"""
pass
@abstractmethod
def draw_text_field_distribution(self, notes: str, *,
field_name: str,
field_distribution: Dict[str,explorer.TextStats]):
"""
Draw information of field value distribution for text type to the
report.
Args:
notes(str): Explain the block
field_name (str): data field name
field_distribution (:dict of :TextStats):
-key: label_name
-value: TextStats object
"""
pass
@abstractmethod
def draw_datetime_field_distribution(self, notes: str, *,
field_name: str,
field_distribution: Dict[str,
explorer.DatetimeStats]):
"""
Draw information of field value distribution for datetime type to the
report.
Args:
notes(str): Explain the block
field_name (str): data field name
field_distribution (:dict of :dict):
-key: label_name
-value (:dict of :DatetimeStats):
Note that in order to render it in 2D diagram, the resolution has to be ['YEAR','MONTH'].
- 1st level key: year_X(int)
- 1st level value:
- 2nd level key: month_X(int)
- 2nd level value: count of sample in month_X of year_X
"""
pass
################################################################################
### Feature Section
################################################################################
@abstractmethod
def draw_feature_importance(self, notes: str, *,
importance_ranking: List[List],
importance_threshold: float,
maximum_number_feature=20):
"""
Add information of feature importance to the report.
Args:
notes(str): Explain the block
importance_ranking(:list of :list): a list of 2-item lists,
item[0]: score, item[1] feature_name
importance_threshold(float): threshold for displaying the feature
name and score in tables
maximum_number_feature(int): maximum number of features shown in bar-chart diagram
"""
pass
################################################################################
### Feature Shap Values
################################################################################
@abstractmethod
def draw_feature_shap_values(self, notes: str, *, mode: str,
feature_shap_values: List[Tuple[str, List]],
class_id: int,
train_data: numpy.ndarray = None):
"""
Add information of feature shap values to the report.
Args:
notes(str): Explain the block
mode (str): Model Model - classification/regression model
feature_shap_values(:list of :tuple): a list of 2-item tuple,
item[0]: feature name, item[1] shap values on each training samples
class_id(int): the class id for visualization.
train_data(numpy.dnarray): Optional, training data, row is for samples, column is for features.
"""
pass
################################################################################
### Training Section
################################################################################
@abstractmethod
def draw_hyperparameter_tuning(self, notes: str, *,
history: dict, best_idx: str,
search_space=None, benchmark_metric=None,
benchmark_threshold=None,
non_hyperopt_score=None):
"""
Add information of hyperparameter tuning to the report.
Args:
notes(str): Explain the block
history(:dict of dict): a dict of training log dict.
key: iteration index
value: hyperparameter tuning information
Each dict has two keys:
- params: a dict of which key is the parameter name
and value is parameter value
- val_scores: a dict of which key is the metric name
and value is metric value
best_idx(str):
- the best idx based on benchmark metric, corresponding the `history` dict key
search_space(:dict): parameter name and the search space for each parameter
benchmark_metric(:str): the metric used for benchmarking during hyperparameter tuning
benchmark_threshold(:float, Optional): the benchmarking threshold to accept the training
non_hyperopt_score(:float, Optional): the training metric without hyperparameter tuning
"""
pass
@abstractmethod
def draw_learning_curve(self, notes: str, *,
history: dict, best_idx: str,
benchmark_metric=None, benchmark_threshold=None,
training_params=None):
"""
Add information of learning curve to report.
Args:
notes(str): Explain the block
history(:dict of dict): a dict of training log dict.
key: epoch index
value: learning epoch information
Each dict has two keys:
- params: a dict of params on current epochs (Optional)
- val_scores: a dict of which key is the metric name
and value is metric value
best_idx(str):
- the best epoch based on benchmark metric, corresponding the `history` dict key
benchmark_metric(:str): the metric used for benchmarking during learning
benchmark_threshold(:float, Optional): the benchmarking threshold to accept the training
training_params(:dict): a dict of which key is training parameter name and
value is training parameter value
"""
pass
################################################################################
### Interpreter Section
################################################################################
@abstractmethod
def draw_model_interpreter(self, notes: str, *,
mode: str, class_stats: dict,
total_count: int, stats_type: str,
k:int, top: int=15):
"""
Add model interpreter for classification
Args:
mode (str): Model Model - classification/regression model
class_stats (dict): A dictionary maps the label to its aggregated statistics
total_count (int): The total number of explanations to generate the statistics
stats_type (str): The defined stats_type for statistical analysis
k (int): The k value of the defined stats_type
top (int): the number of top explanation to display
notes(str): text to explain the block
"""
pass
@abstractmethod
def draw_error_analysis(self, notes: str, *, mode: str, error_stats: dict,
stats_type: str, k: int, top: int=15):
"""
Add error analysis for classification
Args:
mode (str): Model Model - classification/regression model
error_stats (dict): A dictionary maps the label to its aggregated statistics
stats_type (str): The defined stats_type for statistical analysis
k (int): The k value of the defined stats_type
top (int): the number of top explanation to display
notes(str): text to explain the block
"""
pass
################################################################################
### Evaluation Section
################################################################################
@abstractmethod
def draw_multi_class_evaluation_metric_results(self, notes: str, *,
metric_tuple):
"""
Add information about metric results for multi-class evaluation
Args:
notes(str): Explain the block
*metric_tuple(tuple): (evaluation_header, evaluation_metric_dict)
- evaluation_header(str): a header for current evaluation,
can be split or round number.
- evaluation_metric_dict(dict): key-value pair for metric
- key: metric name
- value: metric dict. The dict should either
(1) have a `class` keyword, with key-value pair of class name
and corresponding values, or
(2) have a `average` keyword to show a macro-average metric.
"""
pass
@abstractmethod
def draw_binary_class_evaluation_metric_results(self, notes: str, *,
metric_tuple: tuple,
aggregated=True):
"""
Add information about metric results for binary-class evaluation
Args:
notes(str): Explain the block
metric_tuple(tuple): (evaluation_header, evaluation_metric_dict)
- evaluation_header(str): a header for current evaluation,
can be split or round number.
- evaluation_metric_dict(dict): key-value pair for metric
- key: metric name
- value: metric value
aggregated(bool): whether to aggregate multiple result tables into one
"""
pass
@abstractmethod
def draw_confusion_matrix_results(self, notes: str, *,
confusion_matrix_tuple: tuple):
"""
Add information about confusion matrix to report
Args:
notes(str): Explain the block
confusion_matrix_tuple(tuple): (confusion_matrix_header, confusion_matrix_dict)
- confusion_matrix_header(str): a header for confusion_matrix,
can be split or round number.
- confusion_matrix_dict(dict):
- `labels`(:list of :str): label of classes
- `values`(:list of :list): 2D list for confusion matrix value,
row for predicted, column for true.
"""
pass
@abstractmethod
def draw_multi_class_confidence_distribution(self, notes: str, *,
visual_result_tuple: tuple,
max_num_classes=9):
"""
Add information about multi class confidence distribution to report
Args:
notes(str): Explain the block
visual_result_tuple(tuple): (visual_result_header, visual_result_dict)
- visual_result_header(str): a header for confusion_matrix,
can be split or round number.
- visual_result_dict(dict): key-value
- key(str): the predicted class
- value(dit): result dict
- `gt` (:list of :str): ground truth class label for all samples
- `values` (:list of :float): probability for all samples
max_num_classes(int, Optional): maximum number of classes
displayed for each graph, default 9
"""
pass
@abstractmethod
def draw_binary_class_confidence_distribution(self, notes: str, *,
visual_result_tuple: tuple):
"""
Add information about binary class confidence distribution to report
Args:
notes(str): Explain the block
visual_result_tuple(tuple): (visual_result_header, visual_result_dict)
- visual_result_header(str): a header for confusion_matrix,
can be split or round number.
- visual_result_dict(dict): key-value
- `gt` (:list of :str): ground truth class label for all samples
- `probability` (:list of :list): 2D list (N sample * 2) to
present probability distribution of each sample
"""
pass
@abstractmethod
def draw_binary_class_reliability_diagram(self, notes: str, *,
visual_result_tuple: tuple):
"""
Add information about reliability to report
Args:
notes(str): Explain the block
visual_result_tuple(tuple): (visual_result_header, visual_result_dict)
- visual_result_header(str): a header for confusion_matrix,
can be split or round number.
- visual_result_dict(dict): key-value
- `gt` (:list of :str): ground truth class label for all samples
- `probability` (:list of :list): 2D list (N sample * 2) to
present probability distribution of each sample
"""
pass | StarcoderdataPython |
4831101 |
#Ref: <NAME>
"""
Code tested on Tensorflow: 2.2.0
Keras: 2.4.3
dataset: https://finance.yahoo.com/quote/GE/history/
Also try S&P: https://finance.yahoo.com/quote/%5EGSPC/history?p=%5EGSPC
"""
import numpy as np
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Dense, Dropout
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.preprocessing import StandardScaler
import seaborn as sns
#from datetime import datetime
df = pd.read_csv('data/GE.csv')
#Separate dates for future plotting
train_dates = pd.to_datetime(df['Date'])
#Variables for training
cols = list(df)[1:6]
df_for_training = df[cols].astype(float)
# df_for_plot=df_for_training.tail(5000)
# df_for_plot.plot.line()
#LSTM uses sigmoid and tanh that are sensitive to magnitude so values need to be normalized
# normalize the dataset
scaler = StandardScaler()
scaler = scaler.fit(df_for_training)
df_for_training_scaled = scaler.transform(df_for_training)
#As required for LSTM networks, we require to reshape an input data into n_samples x timesteps x n_features.
#In this example, the n_features is 2. We will make timesteps = 3.
#With this, the resultant n_samples is 5 (as the input data has 9 rows).
trainX = []
trainY = []
n_future = 1 # Number of days we want to predict into the future
n_past = 14 # Number of past days we want to use to predict the future
for i in range(n_past, len(df_for_training_scaled) - n_future +1):
trainX.append(df_for_training_scaled[i - n_past:i, 0:df_for_training.shape[1]])
trainY.append(df_for_training_scaled[i + n_future - 1:i + n_future, 0])
trainX, trainY = np.array(trainX), np.array(trainY)
print('trainX shape == {}.'.format(trainX.shape))
print('trainY shape == {}.'.format(trainY.shape))
# define Autoencoder model
model = Sequential()
model.add(LSTM(64, activation='relu', input_shape=(trainX.shape[1], trainX.shape[2]), return_sequences=True))
model.add(LSTM(32, activation='relu', return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(trainY.shape[1]))
model.compile(optimizer='adam', loss='mse')
model.summary()
# fit model
history = model.fit(trainX, trainY, epochs=10, batch_size=16, validation_split=0.1, verbose=1)
# plt.plot(history.history['loss'], label='Training loss')
# plt.plot(history.history['val_loss'], label='Validation loss')
# plt.legend()
#Forecasting...
#Start with the last day in training date and predict future...
n_future=90 #Redefining n_future to extend prediction dates beyond original n_future dates...
forecast_period_dates = pd.date_range(list(train_dates)[-1], periods=n_future, freq='1d').tolist()
forecast = model.predict(trainX[-n_future:]) #forecast
#Perform inverse transformation to rescale back to original range
#Since we used 5 variables for transform, the inverse expects same dimensions
#Therefore, let us copy our values 5 times and discard them after inverse transform
forecast_copies = np.repeat(forecast, df_for_training.shape[1], axis=-1)
y_pred_future = scaler.inverse_transform(forecast_copies)[:,0]
# Convert timestamp to date
forecast_dates = []
for time_i in forecast_period_dates:
forecast_dates.append(time_i.date())
df_forecast = pd.DataFrame({'Date':np.array(forecast_dates), 'Open':y_pred_future})
df_forecast['Date']=pd.to_datetime(df_forecast['Date'])
original = df[['Date', 'Open']]
original['Date']=pd.to_datetime(original['Date'])
original = original.loc[original['Date'] >= '2020-5-1']
sns.lineplot(original['Date'], original['Open'])
sns.lineplot(df_forecast['Date'], df_forecast['Open'])
| StarcoderdataPython |
1706311 | <gh_stars>100-1000
""" This file defines general utility functions and classes. """
import math
import numpy as np
# import sys
#
# sys.path.append('/home/rkojcev/ros_python3/devel/lib')
import PyKDL as kdl
class BundleType():
"""
This class bundles many fields, similar to a record or a mutable
namedtuple.
"""
def __init__(self, variables):
for var, val in variables.items():
object.__setattr__(self, var, val)
# Freeze fields so new ones cannot be set.
def __setattr__(self, key, value):
if not hasattr(self, key):
raise AttributeError("%r has no attribute %s" % (self, key))
object.__setattr__(self, key, value)
def checkShape(value, expectedShape, name=''):
"""
Throws a ValueError if value.shape != expectedShape.
Args:
value: Matrix to shape check.
expectedShape: A tuple or list of integers.
name: An optional name to add to the exception message.
"""
if value.shape != tuple(expectedShape):
raise ValueError('Shape mismatch %s: Expected %s, got %s' %
(name, str(expectedShape), str(value.shape)))
def finiteDifferences(func, inputs, funcOutputShape=(), epsilon=1e-5):
"""
Computes gradients via finite differences.
derivative = (func(x+epsilon) - func(x-epsilon)) / (2*epsilon)
Args:
func: Function to compute gradient of. Inputs and outputs can be
arbitrary dimension.
inputs: Vector value to compute gradient at.
funcOutputShape: Shape of the output of func. Default is
empty-tuple, which works for scalar-valued functions.
epsilon: Difference to use for computing gradient.
Returns:
Gradient vector of each dimension of func with respect to each
dimension of input.
"""
gradient = np.zeros(inputs.shape+funcOutputShape)
for idx, _ in np.ndenumerate(inputs):
testInput = np.copy(inputs)
testInput[idx] += epsilon
objD1 = func(testInput)
assert objD1.shape == funcOutputShape
testInput = np.copy(inputs)
testInput[idx] -= epsilon
objD2 = func(testInput)
assert objD2.shape == funcOutputShape
diff = (objD1 - objD2) / (2 * epsilon)
gradient[idx] += diff
return gradient
def approxEqual(a01, b01, threshold=1e-5):
"""
Return whether two numbers are equal within an absolute threshold.
Returns:
True if a01 and b01 are equal within threshold.
"""
return np.all(np.abs(a01 - b01) < threshold)
def extractCondition(hyperparams, m01):
"""
Pull the relevant hyperparameters corresponding to the specified
condition, and return a new hyperparameter dictionary.
"""
return {var: val[m01] if isinstance(val, list) else val
for var, val in hyperparams.items()}
def getEePoints(offsets, eePos, eeRot):
"""
Helper method for computing the end effector points given a
position, rotation matrix, and offsets for each of the ee points.
Args:
offsets: N x 3 array where N is the number of points.
eePos: 1 x 3 array of the end effector position.
eeRot: 3 x 3 rotation matrix of the end effector.
Returns:
3 x N array of end effector points.
"""
return np.asarray(eeRot.dot(offsets.T) + eePos.T)
def getPosition(tf1, target, source, time):
"""
Utility function that uses tf to return the position of target
relative to source at time
tf1: Object that implements TransformListener
target: Valid label corresponding to target link
source: Valid label corresponding to source link
time: Time given in TF's time structure of secs and nsecs
"""
# Calculate the quaternion data for the relative position
# between the target and source.
# translation, rot = tf1.lookupTransform(target, source, time)
position, _ = tf1.lookupTransform(source, target, time)
position = np.asarray(position)
return position
def getRotationMatrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction.
>>> rot = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0])
>>> np.allclose(np.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1])
True
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = np.random.random(3) - 0.5
>>> point = np.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = np.identity(4, np.float64)
>>> np.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> np.allclose(2, np.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
"""
sina = math.sin(angle)
cosa = math.cos(angle)
# rotation matrix around unit vector
rot = np.diag([cosa, cosa, cosa])
rot += np.outer(direction, direction) * (1.0 - cosa)
direction *= sina
rot += np.array([[0.0, -direction[2], direction[1]],
[direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0]])
matrix = np.identity(4)
matrix[:3, :3] = rot
if point is not None:
# rotation not around origin
point = np.array(point[:3], dtype=np.float64, copy=False)
matrix[:3, 3] = point - np.dot(rot, point)
return matrix
def rotationFromMatrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = np.random.random(3) - 0.5
>>> point = np.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotationFromMatrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
rot = np.array(matrix, dtype=np.float64, copy=False)
r33 = rot[:3, :3]
# direction: unit eigenvector of r33 corresponding to eigenvalue of 1
w00, w01 = np.linalg.eig(r33.T)
i = np.where(abs(np.real(w00) - 1.0) < 1e-8)[0]
if not i:
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = np.real(w01[:, i[-1]]).squeeze()
# point: unit eigenvector of r33 corresponding to eigenvalue of 1
w00, q00 = np.linalg.eig(rot)
i = np.where(abs(np.real(w00) - 1.0) < 1e-8)[0]
if not i:
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = np.real(q00[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (np.trace(r33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (rot[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (rot[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (rot[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def quaternionFromMatrix(matrix, isprecise=False):
"""Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
>>> q00 = quaternionFromMatrix(np.identity(4), True)
>>> np.allclose(q, [1, 0, 0, 0])
True
>>> q00 = quaternionFromMatrix(np.diag([1, -1, -1, 1]))
>>> np.allclose(q, [0, 1, 0, 0]) or np.allclose(q, [0, -1, 0, 0])
True
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q00 = quaternionFromMatrix(R, True)
>>> np.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])
True
>>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
>>> q00 = quaternionFromMatrix(R)
>>> np.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
True
>>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
>>> q00 = quaternionFromMatrix(R)
>>> np.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
True
>>> R = random_rotation_matrix()
>>> q00 = quaternionFromMatrix(R)
>>> is_same_transform(R, quaternion_matrix(q))
True
>>> is_same_quaternion(quaternionFromMatrix(R, isprecise=False),
... quaternionFromMatrix(R, isprecise=True))
True
>>> R = euler_matrix(0.0, 0.0, np.pi/2.0)
>>> is_same_quaternion(quaternionFromMatrix(R, isprecise=False),
... quaternionFromMatrix(R, isprecise=True))
True
"""
matrix = np.array(matrix, dtype=np.float64, copy=False)[:4, :4]
if isprecise:
q00 = np.empty((4, ))
t00 = np.trace(matrix)
if t00 > matrix[3, 3]:
q00[0] = t00
q00[3] = matrix[1, 0] - matrix[0, 1]
q00[2] = matrix[0, 2] - matrix[2, 0]
q00[1] = matrix[2, 1] - matrix[1, 2]
else:
i, j, k = 0, 1, 2
if matrix[1, 1] > matrix[0, 0]:
i, j, k = 1, 2, 0
if matrix[2, 2] > matrix[i, i]:
i, j, k = 2, 0, 1
t00 = matrix[i, i] - (matrix[j, j] + matrix[k, k]) + matrix[3, 3]
q00[i] = t00
q00[j] = matrix[i, j] + matrix[j, i]
q00[k] = matrix[k, i] + matrix[i, k]
q00[3] = matrix[k, j] - matrix[j, k]
q00 = q00[[3, 0, 1, 2]]
q00 *= 0.5 / math.sqrt(t00 * matrix[3, 3])
else:
m00 = matrix[0, 0]
m01 = matrix[0, 1]
m02 = matrix[0, 2]
m10 = matrix[1, 0]
m11 = matrix[1, 1]
m12 = matrix[1, 2]
m20 = matrix[2, 0]
m21 = matrix[2, 1]
m22 = matrix[2, 2]
# symmetric matrix k00
k00 = np.array([[m00-m11-m22, 0.0, 0.0, 0.0],
[m01+m10, m11-m00-m22, 0.0, 0.0],
[m02+m20, m12+m21, m22-m00-m11, 0.0],
[m21-m12, m02-m20, m10-m01, m00+m11+m22]])
k00 /= 3.0
# quaternion is eigenvector of k00 that corresponds to largest eigenvalue
w00, v00 = np.linalg.eigh(k00)
q00 = v00[[3, 0, 1, 2], np.argmax(w00)]
if q00[0] < 0.0:
np.negative(q00, q00)
# exchange (w, x, y, z) to (x, y, z, w)
qNew = np.empty(4)
qNew[:3] = q00[1:]
qNew[3] = q00[0]
return qNew
def jointListToKdl(q00):
""" Return KDL JntArray converted from list q00 """
if q00 is None:
return None
if isinstance(q00, np.matrix) and q00.shape[1] == 0:
q00 = q00.T.tolist()[0]
qKdl = kdl.JntArray(len(q00))
for i, qi0 in enumerate(q00):
qKdl[i] = qi0
return qKdl
def jointKdlToList(q00):
""" Return list converted from KDL JntArray"""
if q00 is None:
return None
return [q00[i] for i in range(q00.rows())]
def forwardKinematics(robotChain, linkNames, q00, baseLink='base', endLink='ee_link'):
"""
Perform forward kinematics
Args:
robotChain: robot's chain object
linkNames: list of robot link names
q00: list of joint positions
baseLink: name of the link regarded as origin
endLink: name of the link regarded as target
Returns:
translation vector and rotation matrix from endLink to baseLink
"""
baseTrans = doKdlFk(robotChain, q00, linkNames.index(baseLink))
if baseTrans is None:
print("FK KDL failure on base transformation.")
endTrans = doKdlFk(robotChain, q00, linkNames.index(endLink))
if endTrans is None:
print("FK KDL failure on end transformation.")
pose = np.dot(np.linalg.inv(baseTrans), endTrans)
pos = pose[:3, 3].reshape(1, 3)
rot = pose[:3, :3]
return pos, rot
def doKdlFk(robotChain, q00, linkNumber):
endeffecFrame = kdl.Frame()
fkKdl = kdl.ChainFkSolverPos_recursive(robotChain)
kinematicsStatus = fkKdl.JntToCart(jointListToKdl(q00),
endeffecFrame,
linkNumber)
if kinematicsStatus >= 0:
p00 = endeffecFrame.p
matrix = endeffecFrame.M
return np.array([[matrix[0, 0], matrix[0, 1], matrix[0, 2], p00.x()],
[matrix[1, 0], matrix[1, 1], matrix[1, 2], p00.y()],
[matrix[2, 0], matrix[2, 1], matrix[2, 2], p00.z()],
[0, 0, 0, 1]])
else:
return None
def inverseKinematics(robotChain, pos, rot, qGuess=None, minJoints=None, maxJoints=None):
"""
Perform inverse kinematics
Args:
robotChain: robot's chain object
pos: 1 x 3 or 3 x 1 array of the end effector position.
rot: 3 x 3 array of the end effector rotation
qGuess: guess values for the joint positions
minJoints: minimum value of the position for each joint
maxJoints: maximum value of the position for each joint
Returns:
list of joint positions or None (no solution)
"""
# print("inside inverse: ", pos, " ; ", rot)
posKdl = kdl.Vector(pos[0], pos[1], pos[2])
rotKdl = kdl.Rotation(rot[0, 0], rot[0, 1], rot[0, 2],
rot[1, 0], rot[1, 1], rot[1, 2],
rot[2, 0], rot[2, 1], rot[2, 2])
frameKdl = kdl.Frame(rotKdl, posKdl)
numJoints = robotChain.getNrOfJoints()
minJoints = -np.pi * np.ones(numJoints) if minJoints is None else minJoints
maxJoints = np.pi * np.ones(numJoints) if maxJoints is None else maxJoints
minsKdl = jointListToKdl(minJoints)
maxsKdl = jointListToKdl(maxJoints)
fkKdl = kdl.ChainFkSolverPos_recursive(robotChain)
ikVKdl = kdl.ChainIkSolverVel_pinv(robotChain)
ikPKdl = kdl.ChainIkSolverPos_NR_JL(robotChain, minsKdl, maxsKdl,
fkKdl, ikVKdl)
if qGuess is None:
# use the midpoint of the joint limits as the guess
lowerLim = np.where(np.isfinite(minJoints), minJoints, 0.)
upperLim = np.where(np.isfinite(maxJoints), maxJoints, 0.)
qGuess = (lowerLim + upperLim) / 2.0
qGuess = np.where(np.isnan(qGuess), [0.]*len(qGuess), qGuess)
qKdl = kdl.JntArray(numJoints)
qGuessKdl = jointListToKdl(qGuess)
if ikPKdl.CartToJnt(qGuessKdl, frameKdl, qKdl) >= 0:
return jointKdlToList(qKdl)
else:
return None
| StarcoderdataPython |
4823088 | <gh_stars>1-10
from utils import tab_printer
from gpn import GPNTrainer
from parser import parameter_parser
from fine_tune import finetune_GPN
import networkx as nx
def main():
"""
Parsing command line parameters, reading data, fitting and scoring a GPN model.
"""
args = parameter_parser()
tab_printer(args)
trainer = GPNTrainer(args)
# trainer.fit()
"""
Scoring on the prediction and learning ability.
"""
trainer.score()
"""
Scoring on the subgraph test set.
"""
# trainer.score2()
"""
Scoring on the generalization ability.
"""
# trainer.score3()
"""
Finetuning for downstream tasks.
"""
# model = finetune_GPN(args, trainer.number_of_labels)
# model.finetune()
if __name__ == "__main__":
main()
| StarcoderdataPython |
82632 | # -*- coding: utf-8 -*-
'''
Created on 1983. 08. 09.
@author: <NAME>, CMBU Specialist in Korea, VMware [<EMAIL>]
'''
name = 'Project' # custom resource name
sdk = 'vra' # imported SDK at common directory
inputs = {
'create': {
'VraManager': 'constant'
},
'read': {
},
'update': {
'VraManager': 'constant'
},
'delete': {
'VraManager': 'constant'
}
}
properties = {
'name': {
'type': 'string',
'title': 'name',
'description': 'Unique name of project'
},
'description': {
'type': 'string',
'title': 'description',
'default': '',
'description': 'Project descriptions'
},
'sharedResources': {
'type': 'boolean',
'title': 'sharedResources',
'default': True,
'description': 'Deployments are shared between all users in the project'
},
'administrators': {
'type': 'array',
'title': 'administrators',
'default': [],
'items': {
'type': 'string'
},
'description': 'Accounts of administrator user'
},
'members': {
'type': 'array',
'title': 'members',
'default': [],
'items': {
'type': 'string'
},
'description': 'Accounts of member user'
},
'viewers': {
'type': 'array',
'title': 'viewers',
'default': [],
'items': {
'type': 'string'
},
'description': 'Accounts of viewer user'
},
'zones': {
'type': 'array',
'title': 'viewers',
'default': [],
'items': {
'type': 'string'
},
'description': 'Specify the zones ID that can be used when users provision deployments in this project'
},
'placementPolicy': {
'type': 'string',
'title': 'placementPolicy',
'default': 'default',
'enum': [
'default',
'spread'
],
'description': 'Specify the placement policy that will be applied when selecting a cloud zone for provisioning'
},
'customProperties': {
'type': 'object',
'title': 'customProperties',
'default': {},
'description': 'Specify the custom properties that should be added to all requests in this project'
},
'machineNamingTemplate': {
'type': 'string',
'title': 'machineNamingTemplate',
'default': '',
'description': 'Specify the naming template to be used for machines, networks, security groups and disks provisioned in this project'
},
'operationTimeout': {
'type': 'integer',
'title': 'operationTimeout',
'default': 0,
'description': 'Request timeout seconds'
}
} | StarcoderdataPython |
3392830 | <filename>tests/conftest.py
import pytest
import sys
import logging
from grpc_server.common.utils import (get_current_time)
sys.path.append('.')
@pytest.fixture(scope='session')
def snappiserver():
"""Demonstrates creating Mock Snappi Servers.
"""
from .snappiserver import SnappiServer
pytest.snappiserver = SnappiServer().start()
yield
@pytest.fixture(scope='session')
def serverlogfile():
"""Demonstrates creating server log file
"""
from grpc_server.common.utils import init_logging
log_level = logging.INFO
log_file = 'test-'+str(get_current_time())+'.log'
serverlogfile = init_logging('unit', 'confest', log_file, log_level, False)
yield serverlogfile
| StarcoderdataPython |
1699460 | <reponame>senadkurtisi/ml-tracking-ops<filename>setup.py
import os
import io
from setuptools import setup
def read(path, encoding="utf-8"):
path = os.path.join(os.path.dirname(__file__), path)
with io.open(path, encoding=encoding) as fp:
return fp.read()
NAME = "ml_tracking_ops"
AUTHOR = "<NAME>"
VERSION = "0.0.1"
LICENCE = "MIT"
PACKAGES = ["ml_tracking_ops", 'ml_tracking_ops.experiment', 'ml_tracking_ops.ml_tracking_ops']
DESCRIPTION = 'ML-Ops-Tracking: An ML Ops library which enables tracking and visualizing machine learning experiments '
LONG_DESCRIPTION = read("README.md")
LONG_DESCRIPTION_CONTENT_TYPE = "text/markdown"
ENTRY_POINTS = {
"console_scripts": ['ml-tracking-ops = ml_tracking_ops.main:main']
}
INSTALL_REQUIREMENTS = [
"flask>=1.1.2",
"numpy>=1.21.2",
"watchdog>=2.1.6"
]
setup(
name=NAME,
author=AUTHOR,
licence=LICENCE,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,
packages=PACKAGES,
entry_points=ENTRY_POINTS,
include_package_data=True,
zip_safe=False,
install_requires=INSTALL_REQUIREMENTS,
classifiers= [
"Development Status :: 3 - Alpha",
"Environment :: Console"
"Operating System :: Microsoft :: Windows",
"Intended Audience :: Science/Research",
"Natural Language :: English",
],
)
| StarcoderdataPython |
142317 | from unittest import TestCase
from datetime import datetime
import django
from django.urls import reverse
from django.utils import timezone
from faker import Faker
from rest_framework.test import APIClient
from rest_framework import status
from core.models import WatchListModel
from watchlist_app.api.serializers import WatchListSerializer
from watchlist_app.tests.factories import WatchListFactory, StreamPlatformFactory
MOVIES_URL = reverse('movie_list')
def movie_url_pk(pk):
return reverse('movie_detail', kwargs={'pk': pk})
def sample_stream_platform(user, name='Main course'):
return StreamPlatformFactory()
def valid_watch_list(stream_platform_id):
return {
'title': faker.company(),
'platform': stream_platform_id,
'storyline': faker.sentence(),
'website': faker.url(),
'active': faker.boolean(),
# 'created': datetime.now().strftime("%A, %d. %B %Y %I:%M%p")
# 'created': str(timezone.now())
# 'created': datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
# 'created': datetime.now().strftime("%Y-%m-%d %H:%M[:%S[.uuuuuu]][TZ]")
# 'created': datetime.now().strftime("YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]")
}
faker = Faker()
class MoviesApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.client = APIClient()
self.stream_platform = StreamPlatformFactory()
self.valid_watch_list = valid_watch_list(self.stream_platform.id)
self.invalid_watch_list = {
'title': '',
}
def test_retrieve_movies(self):
"""Test retrieving tags"""
WatchListFactory(platform=self.stream_platform)
WatchListFactory(platform=self.stream_platform)
res = self.client.get(MOVIES_URL)
movies = WatchListModel.objects.all()
serializer = WatchListSerializer(movies, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_create_movie_successful(self):
"""Test creating a new tag"""
res = self.client.post(MOVIES_URL, self.valid_watch_list)
print("res: ")
print(res)
# exists = WatchList.objects.filter(
# title=self.valid_watch_list['title'],
# about=self.valid_watch_list['about'],
# website=self.valid_watch_list['website'],
# active=self.valid_watch_list['active'],
# ).exists()
exists = WatchListModel.objects.filter(
**self.valid_watch_list
).exists()
self.assertTrue(exists)
def test_create_movie_invalid(self):
"""Test creating a new tag with invalid payload"""
res = self.client.post(MOVIES_URL, self.invalid_watch_list)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_valid_update_movie(self):
"""
Validated data case
"""
movie = WatchListFactory(platform=self.stream_platform)
response = self.client.put(
movie_url_pk(movie.pk),
data=self.valid_watch_list
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_invalid_update_movie(self):
"""
Invalid data case
"""
movie = WatchListFactory(platform=self.stream_platform)
response = self.client.put(
movie_url_pk(movie.pk),
data=self.invalid_watch_list
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete_movie(self):
movie = WatchListFactory(platform=self.stream_platform)
response = self.client.delete(movie_url_pk(movie.pk))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
| StarcoderdataPython |
4820501 | # def number_increment(numbers):
# def increase():
# nonlocal numbers
# numbers = [x + 1 for x in numbers]
#
# def get():
# return numbers
#
# return {
# 'increase': increase,
# 'get': get,
# }
#
# operations = number_increment([1, 2, 3])
# operations['increase']()
# print(operations['get']())
# operations['increase']()
# operations['increase']()
# operations['increase']()
# print(operations['get']())
def number_increment(numbers):
def increase():
return [x + 1 for x in numbers]
return increase()
| StarcoderdataPython |
1709341 | import os
import numpy as np
from array import array
from sklearn.metrics import mean_absolute_error
from skmultiflow.data import RegressionGenerator
from skmultiflow.trees import HoeffdingTreeRegressor
from difflib import SequenceMatcher
def test_hoeffding_tree_regressor():
stream = RegressionGenerator(n_samples=500, n_features=20, n_informative=15, random_state=1)
learner = HoeffdingTreeRegressor(leaf_prediction='mean')
cnt = 0
max_samples = 500
y_pred = array('d')
y_true = array('d')
wait_samples = 10
while cnt < max_samples:
X, y = stream.next_sample()
# Test every n samples
if (cnt % wait_samples == 0) and (cnt != 0):
y_pred.append(learner.predict(X)[0])
y_true.append(y[0])
learner.partial_fit(X, y)
cnt += 1
expected_predictions = array('d', [102.38946041769101, 55.6584574987656, 5.746076599168373, 17.11797209372667,
2.566888222752787, 9.188247802192826, 17.87894804676911, 15.940629626883966,
8.981172175448485, 13.152624115190092, 11.106058099429399, 6.473195313058236,
4.723621479590173, 13.825568609556493, 8.698873073880696, 1.6452441811010252,
5.123496188584294, 6.34387187194982, 5.9977733790395105, 6.874251577667707,
4.605348088338317, 8.20112636572672, 9.032631648758098, 4.428189978974459,
4.249801041367518, 9.983272668044492, 12.859518508979734, 11.741395774380285,
11.230028410261868, 9.126921979081521, 9.132146661688296, 7.750655625124709,
6.445145118245414, 5.760928671876355, 4.041291302080659, 3.591837600560529,
0.7640424010500604, 0.1738639840537784, 2.2068337802212286, -81.05302946841077,
96.17757415335177, -77.35894903819677, 95.85568683733698, 99.1981674250886,
99.89327888035015, 101.66673013734784, -79.1904234513751, -80.42952143783687,
100.63954789983896])
assert np.allclose(y_pred, expected_predictions)
error = mean_absolute_error(y_true, y_pred)
expected_error = 143.11351404083086
assert np.isclose(error, expected_error)
expected_info = "HoeffdingTreeRegressor(binary_split=False, grace_period=200, leaf_prediction='mean', " \
"learning_ratio_const=True, learning_ratio_decay=0.001, learning_ratio_perceptron=0.02, " \
"max_byte_size=33554432, memory_estimate_period=1000000, nb_threshold=0, no_preprune=False, " \
"nominal_attributes=None, random_state=None, remove_poor_atts=False, split_confidence=1e-07, " \
"stop_mem_management=False, tie_threshold=0.05)"
info = " ".join([line.strip() for line in learner.get_info().split()])
assert info == expected_info
assert isinstance(learner.get_model_description(), type(''))
assert type(learner.predict(X)) == np.ndarray
def test_hoeffding_tree_regressor_perceptron():
stream = RegressionGenerator(n_samples=500, n_features=20, n_informative=15, random_state=1)
learner = HoeffdingTreeRegressor(leaf_prediction='perceptron', random_state=1)
cnt = 0
max_samples = 500
y_pred = array('d')
y_true = array('d')
wait_samples = 10
while cnt < max_samples:
X, y = stream.next_sample()
# Test every n samples
if (cnt % wait_samples == 0) and (cnt != 0):
y_pred.append(learner.predict(X)[0])
y_true.append(y[0])
learner.partial_fit(X, y)
cnt += 1
expected_predictions = array('d', [525.7553636732247, 352.8160300365902, 224.80744320456478,
193.72837054292074, 132.6059603765031, 117.06974933197759,
114.53342429855932, 89.37195405567235, 57.85335051891305,
60.00883955911155, 47.263185779784266, 25.17616431074491,
17.43259526890146, 47.33468996498019, 22.83975208548138,
-7.659282840823236, 8.564101665071064, 14.61585289361161,
11.560941733770441, 13.70120291865976, 1.1938438210799651,
19.01970713481836, 21.23459424444584, -5.667473522309328,
-5.203149619381393, 28.726275200889173, 41.03406433337882,
27.950322712127267, 21.267116786963925, 5.53344652490152,
6.753264259267268, -2.3288137435962213, -10.492766334689875,
-11.19641058176631, -20.134685945295644, -19.36581990084085,
-38.26894947177957, -34.90246284430353, -11.019543212232008,
-22.016714766708127, -18.710456277443544, -20.5568019328217,
-2.636583876625667, 24.787714491718187, 29.325261678088406,
45.31267371823666, -48.271054430207776, -59.7649172085901,
48.22724814037523])
# assert np.allclose(y_pred, expected_predictions)
error = mean_absolute_error(y_true, y_pred)
expected_error = 152.12931270533377
assert np.isclose(error, expected_error)
expected_info = "HoeffdingTreeRegressor(binary_split=False, grace_period=200, leaf_prediction='perceptron', " \
"learning_ratio_const=True, learning_ratio_decay=0.001, learning_ratio_perceptron=0.02, " \
"max_byte_size=33554432, memory_estimate_period=1000000, nb_threshold=0, no_preprune=False, " \
"nominal_attributes=None, random_state=1, remove_poor_atts=False, split_confidence=1e-07, " \
"stop_mem_management=False, tie_threshold=0.05)"
info = " ".join([line.strip() for line in learner.get_info().split()])
assert info == expected_info
assert isinstance(learner.get_model_description(), type(''))
assert type(learner.predict(X)) == np.ndarray
def test_hoeffding_tree_regressor_coverage(test_path):
# Cover nominal attribute observer
test_file = os.path.join(test_path, 'regression_data.npz')
data = np.load(test_file)
X = data['X']
y = data['y']
# Typo in leaf prediction
learner = HoeffdingTreeRegressor(
leaf_prediction='percptron', nominal_attributes=[i for i in range(3)]
)
print(learner.split_criterion)
# Invalid split_criterion
learner.split_criterion = 'VR'
learner.partial_fit(X, y)
assert learner._estimator_type == 'regressor'
def test_hoeffding_tree_regressor_model_description():
stream = RegressionGenerator(
n_samples=500, n_features=20, n_informative=15, random_state=1
)
learner = HoeffdingTreeRegressor(leaf_prediction='mean')
max_samples = 500
X, y = stream.next_sample(max_samples)
learner.partial_fit(X, y)
expected_description = "if Attribute 6 <= 0.1394515530995348:\n" \
" Leaf = Statistics {0: 276.0000, 1: -21537.4157, 2: 11399392.2187}\n" \
"if Attribute 6 > 0.1394515530995348:\n" \
" Leaf = Statistics {0: 224.0000, 1: 22964.8868, 2: 10433581.2534}\n"
assert SequenceMatcher(
None, expected_description, learner.get_model_description()
).ratio() > 0.9
def test_hoeffding_tree_regressor_categorical_features(test_path):
data_path = os.path.join(test_path, 'ht_categorical_features_testcase.npy')
stream = np.load(data_path)
# Remove class value
stream = stream[:, np.delete(np.arange(8), 7)]
# Removes the last column (used only in the multi-target regression case)
stream = stream[:, :-1]
X, y = stream[:, :-1], stream[:, -1]
nominal_attr_idx = np.arange(7).tolist()
learner = HoeffdingTreeRegressor(nominal_attributes=nominal_attr_idx)
learner.partial_fit(X, y)
expected_description = "if Attribute 4 = 0.0:\n" \
" Leaf = Statistics {0: 606.0000, 1: 1212.0000, 2: 3626.0000}\n" \
"if Attribute 4 = 1.0:\n" \
" Leaf = Statistics {0: 551.0000, 1: 1128.0000, 2: 3400.0000}\n" \
"if Attribute 4 = 2.0:\n" \
" Leaf = Statistics {0: 566.0000, 1: 1139.0000, 2: 3423.0000}\n" \
"if Attribute 4 = 3.0:\n" \
" Leaf = Statistics {0: 577.0000, 1: 1138.0000, 2: 3374.0000}\n" \
"if Attribute 4 = 4.0:\n" \
" Leaf = Statistics {0: 620.0000, 1: 1233.0000, 2: 3725.0000}\n" \
"if Attribute 4 = -3.0:\n" \
" Leaf = Statistics {0: 80.0000, 1: 163.0000, 2: 483.0000}\n"
assert SequenceMatcher(
None, expected_description, learner.get_model_description()
).ratio() > 0.9
| StarcoderdataPython |
3254881 | import os
import pandas as pd
import sandy
__author__ = "<NAME>"
__all__ = [
"ELEMENTS",
"METASTATES",
"NATURAL_ABUNDANCE",
"abundance_per_element",
"expand_za",
"expand_za",
"za2latex",
"zam2latex",
]
pd.options.display.float_format = '{:.5e}'.format
ELEMENTS = {
1: 'H',
2: 'He',
3: 'Li',
4: 'Be',
5: 'B',
6: 'C',
7: 'N',
8: 'O',
9: 'F',
10: 'Ne',
11: 'Na',
12: 'Mg',
13: 'Al',
14: 'Si',
15: 'P',
16: 'S',
17: 'Cl',
18: 'Ar',
19: 'K',
20: 'Ca',
21: 'Sc',
22: 'Ti',
23: 'V',
24: 'Cr',
25: 'Mn',
26: 'Fe',
27: 'Co',
28: 'Ni',
29: 'Cu',
30: 'Zn',
31: 'Ga',
32: 'Ge',
33: 'As',
34: 'Se',
35: 'Br',
36: 'Kr',
37: 'Rb',
38: 'Sr',
39: 'Y',
40: 'Zr',
41: 'Nb',
42: 'Mo',
43: 'Tc',
44: 'Ru',
45: 'Rh',
46: 'Pd',
47: 'Ag',
48: 'Cd',
49: 'In',
50: 'Sn',
51: 'Sb',
52: 'Te',
53: 'I',
54: 'Xe',
55: 'Cs',
56: 'Ba',
57: 'La',
58: 'Ce',
59: 'Pr',
60: 'Nd',
61: 'Pm',
62: 'Sm',
63: 'Eu',
64: 'Gd',
65: 'Tb',
66: 'Dy',
67: 'Ho',
68: 'Er',
69: 'Tm',
70: 'Yb',
71: 'Lu',
72: 'Hf',
73: 'Ta',
74: 'W',
75: 'Re',
76: 'Os',
77: 'Ir',
78: 'Pt',
79: 'Au',
80: 'Hg',
81: 'Tl',
82: 'Pb',
83: 'Bi',
84: 'Po',
85: 'At',
86: 'Rn',
87: 'Fr',
88: 'Ra',
89: 'Ac',
90: 'Th',
91: 'Pa',
92: 'U',
93: 'Np',
94: 'Pu',
95: 'Am',
96: 'Cm',
97: 'Bk',
98: 'Cf',
99: 'Es',
100: 'Fm',
101: 'Md',
102: 'No',
103: 'Lr',
104: 'Rf',
105: 'Db',
106: 'Sg',
107: 'Bh',
108: 'Hs',
109: 'Mt',
110: 'Ds',
111: 'Rg',
112: 'Uub',
113: 'Uut',
114: 'Uuq',
115: 'Uup',
116: 'Uuh',
117: 'Uus',
118: 'UUp',
}
METASTATES = {
0: "g",
1: "m",
2: "n",
3: "o",
}
NATURAL_ABUNDANCE = {
10010: 0.99984426,
10020: 0.00015574,
20030: 2e-06,
20040: 0.999998,
30060: 0.07589,
30070: 0.92411,
40090: 1.0,
50100: 0.1982,
50110: 0.8018,
60120: 0.988922,
60130: 0.011078,
70140: 0.996337,
70150: 0.003663,
80160: 0.9976206,
80170: 0.000379,
80180: 0.0020004,
90190: 1.0,
100200: 0.9048,
100210: 0.0027,
100220: 0.0925,
110230: 1.0,
120240: 0.78951,
120250: 0.1002,
120260: 0.11029,
130270: 1.0,
140280: 0.9222968,
140290: 0.0468316,
140300: 0.0308716,
150310: 1.0,
160320: 0.9504074,
160330: 0.0074869,
160340: 0.0419599,
160360: 0.0001458,
170350: 0.757647,
170370: 0.242353,
180360: 0.003336,
180380: 0.000629,
180400: 0.996035,
190390: 0.932581,
190400: 0.000117,
190410: 0.067302,
200400: 0.96941,
200420: 0.00647,
200430: 0.00135,
200440: 0.02086,
200460: 4e-05,
200480: 0.00187,
210450: 1.0,
220460: 0.0825,
220470: 0.0744,
220480: 0.7372,
220490: 0.0541,
220500: 0.0518,
230500: 0.0025,
230510: 0.9975,
240500: 0.04345,
240520: 0.83789,
240530: 0.09501,
240540: 0.02365,
250550: 1.0,
260540: 0.05845,
260560: 0.91754,
260570: 0.02119,
260580: 0.00282,
270590: 1.0,
280580: 0.680769,
280600: 0.262231,
280610: 0.011399,
280620: 0.036345,
280640: 0.009256,
290630: 0.6915,
290650: 0.3085,
300640: 0.4917,
300660: 0.2773,
300670: 0.0404,
300680: 0.1845,
300700: 0.0061,
310690: 0.60108,
310710: 0.39892,
320700: 0.2052,
320720: 0.2745,
320730: 0.0776,
320740: 0.3652,
320760: 0.0775,
330750: 1.0,
340740: 0.0086,
340760: 0.0923,
340770: 0.076,
340780: 0.2369,
340800: 0.498,
340820: 0.0882,
350790: 0.50686,
350810: 0.49314,
360780: 0.00355,
360800: 0.02286,
360820: 0.11593,
360830: 0.115,
360840: 0.56987,
360860: 0.17279,
370850: 0.7217,
370870: 0.2783,
380840: 0.0056,
380860: 0.0986,
380870: 0.07,
380880: 0.8258,
390890: 1.0,
400900: 0.5145,
400910: 0.1122,
400920: 0.1715,
400940: 0.1738,
400960: 0.028,
410930: 1.0,
420920: 0.14649,
420940: 0.09187,
420950: 0.15873,
420960: 0.16673,
420970: 0.09582,
420980: 0.24292,
421000: 0.09744,
440960: 0.0554,
440980: 0.0187,
440990: 0.1276,
441000: 0.126,
441010: 0.1706,
441020: 0.3155,
441040: 0.1862,
451030: 1.0,
461020: 0.0102,
461040: 0.1114,
461050: 0.2233,
461060: 0.2733,
461080: 0.2646,
461100: 0.1172,
471070: 0.51839,
471090: 0.48161,
481060: 0.01245,
481080: 0.00888,
481100: 0.1247,
481110: 0.12795,
481120: 0.24109,
481130: 0.12227,
481140: 0.28754,
481160: 0.07512,
491130: 0.04281,
491150: 0.95719,
501120: 0.0097,
501140: 0.0066,
501150: 0.0034,
501160: 0.1454,
501170: 0.0768,
501180: 0.2422,
501190: 0.0859,
501200: 0.3258,
501220: 0.0463,
501240: 0.0579,
511210: 0.5721,
511230: 0.4279,
521200: 0.0009,
521220: 0.0255,
521230: 0.0089,
521240: 0.0474,
521250: 0.0707,
521260: 0.1884,
521280: 0.3174,
521300: 0.3408,
531270: 1.0,
541240: 0.00095,
541260: 0.00089,
541280: 0.0191,
541290: 0.26401,
541300: 0.04071,
541310: 0.21232,
541320: 0.26909,
541340: 0.10436,
541360: 0.08857,
551330: 1.0,
561300: 0.0011,
561320: 0.001,
561340: 0.0242,
561350: 0.0659,
561360: 0.0785,
561370: 0.1123,
561380: 0.717,
571380: 0.0008881,
571390: 0.9991119,
581360: 0.00186,
581380: 0.00251,
581400: 0.88449,
581420: 0.11114,
591410: 1.0,
601420: 0.27153,
601430: 0.12173,
601440: 0.23798,
601450: 0.08293,
601460: 0.17189,
601480: 0.05756,
601500: 0.05638,
621440: 0.0308,
621470: 0.15,
621480: 0.1125,
621490: 0.1382,
621500: 0.0737,
621520: 0.2674,
621540: 0.2274,
631510: 0.4781,
631530: 0.5219,
641520: 0.002,
641540: 0.0218,
641550: 0.148,
641560: 0.2047,
641570: 0.1565,
641580: 0.2484,
641600: 0.2186,
651590: 1.0,
661560: 0.00056,
661580: 0.00095,
661600: 0.02329,
661610: 0.18889,
661620: 0.25475,
661630: 0.24896,
661640: 0.2826,
671650: 1.0,
681620: 0.00139,
681640: 0.01601,
681660: 0.33503,
681670: 0.22869,
681680: 0.26978,
681700: 0.1491,
691690: 1.0,
701680: 0.00123,
701700: 0.02982,
701710: 0.14086,
701720: 0.21686,
701730: 0.16103,
701740: 0.32025,
701760: 0.12995,
711750: 0.97401,
711760: 0.02599,
721740: 0.0016,
721760: 0.0526,
721770: 0.186,
721780: 0.2728,
721790: 0.1362,
721800: 0.3508,
731800: 0.0001201,
731810: 0.9998799,
741800: 0.0012,
741820: 0.265,
741830: 0.1431,
741840: 0.3064,
741860: 0.2843,
751850: 0.374,
751870: 0.626,
761840: 0.0002,
761860: 0.0159,
761870: 0.0196,
761880: 0.1324,
761890: 0.1615,
761900: 0.2626,
761920: 0.4078,
771910: 0.373,
771930: 0.627,
781900: 0.00012,
781920: 0.00782,
781940: 0.32864,
781950: 0.33775,
781960: 0.25211,
781980: 0.07356,
791970: 1.0,
801960: 0.0015,
801980: 0.1004,
801990: 0.1694,
802000: 0.2314,
802010: 0.1317,
802020: 0.2974,
802040: 0.0682,
812030: 0.29524,
812050: 0.70476,
822040: 0.014,
822060: 0.241,
822070: 0.221,
822080: 0.524,
832090: 1.0,
902300: 0.0002,
902320: 0.9998,
912310: 1.0,
922340: 5.4e-05,
922350: 0.007204,
922380: 0.992742,
}
def abundance_per_element():
abundance_per_element = {
expand_zam(zam)[0]: {} for zam in NATURAL_ABUNDANCE
}
for zam, v in NATURAL_ABUNDANCE.items():
z, a, m = expand_zam(zam)
abundance_per_element[z][zam] = v
return abundance_per_element
def expand_za(za, method="nndc", meta=0):
z = int(za//1000)
a = int(za - z*1000)
if method == "nndc":
m = 0
if a >= 300:
m = 1
a = a - 300 - m*100
else:
m = int(meta)
return z, a, m
def get_za(z, a, m, method="nndc"):
if m != 0 and method == "nndc":
za = z*1000 + a + 300 + m*100
else:
za = z*1000 + a
return int(za), m
def expand_zam(zam):
z = int(zam//10000)
a = int(zam - z*10000)//10
m = int(zam - z*10000 - a*10)
return z, a, m
def get_zam(z, a, m):
zam = z*10000 + a*10 + m
return int(zam)
def za2zam(za, method="nndc", meta=0):
return get_zam(*expand_za(za, method=method, meta=meta))
def zam2za(zam, method="nndc"):
z, a, m = expand_zam(zam)
return get_za(z, a, m, method=method)
def z2sym(z):
return ELEMENTS[z]
def za2latex(za):
z, a, m = expand_za(za)
string = "$^{" + f"{a}" + "}$"
sym = z2sym(z)
string += f"{sym}"
return string
def zam2latex(zam):
z, a, m = expand_zam(zam)
string = "$^{" + f"{a}"
if m == 1:
string += "m"
elif m == 2:
string += "n"
string += "}$"
sym = z2sym(z)
string += f"{sym}"
return string
def zam2nuclide(zam, atomic_number=False, sep=""):
"""
Convert ZAM to string such with symbol and mass, such as `922350` to
`"U235"` or `952421` to `"Am242m"`.
Parameters
----------
zam : `int`
nuclide ZAM indicator
atomic_number : `bool`, optional, default is `False`
flag to include the atomic number in the nuclide name
sep : `str`, optional, default is `''`
separation character(s) to place between the atomic number
(if present), the element ID, and the mass number.
Returns
-------
`string`
nuclide expressed with symbol and mass
Examples
--------
>>> zam2nuclide(922350)
'U235'
>>> zam2nuclide(922350, atomic_number=True)
'92U235'
>>> zam2nuclide(922350, atomic_number=True, sep="-")
'92-U-235'
>>> zam2nuclide(922350, atomic_number=False, sep="-")
'U-235'
>>> zam2nuclide(952420)
'Am242'
>>> zam2nuclide(952421)
'Am242m'
>>> zam2nuclide(952421, atomic_number=True, sep="_")
'95_Am_242m'
>>> zam2nuclide(952422)
'Am242n'
"""
z, a, m = expand_zam(zam)
sym = z2sym(z)
meta = get_meta_letter(m, skip_ground=True)
out = f"{sym}{sep}{a}{meta}"
if atomic_number:
out = f"{z}{sep}{out}"
return out
def get_meta_letter(m, skip_ground=False):
meta = METASTATES[m]
if skip_ground and m == 0:
meta = ""
return meta
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.