text stringlengths 38 1.54M |
|---|
import six
import unittest
from os.path import os
from parserutils.collections import wrap_value
from parserutils.elements import element_exists, element_to_dict, element_to_string
from parserutils.elements import clear_element, get_element_text, get_elements, get_remote_element
from parserutils.elements import insert_element, remove_element, remove_element_attributes, set_element_attributes
from gis_metadata.arcgis_metadata_parser import ArcGISParser, ARCGIS_NODES, ARCGIS_ROOTS
from gis_metadata.fgdc_metadata_parser import FgdcParser, FGDC_ROOT
from gis_metadata.iso_metadata_parser import IsoParser, ISO_ROOTS, _iso_tag_formats
from gis_metadata.metadata_parser import MetadataParser, get_metadata_parser, get_parsed_content
from gis_metadata.exceptions import ConfigurationError, InvalidContent, NoContent, ValidationError
from gis_metadata.utils import format_xpaths, get_complex_definitions, get_default_for_complex, get_supported_props
from gis_metadata.utils import DATE_TYPE, DATE_VALUES
from gis_metadata.utils import DATE_TYPE_SINGLE, DATE_TYPE_RANGE, DATE_TYPE_MISSING, DATE_TYPE_MULTIPLE
from gis_metadata.utils import ATTRIBUTES, CONTACTS, DIGITAL_FORMS, PROCESS_STEPS
from gis_metadata.utils import BOUNDING_BOX, DATES, LARGER_WORKS, RASTER_INFO
from gis_metadata.utils import KEYWORDS_PLACE, KEYWORDS_STRATUM, KEYWORDS_TEMPORAL, KEYWORDS_THEME
from gis_metadata.utils import ParserProperty
iteritems = getattr(six, 'iteritems')
StringIO = getattr(six, 'StringIO')
KEYWORD_PROPS = (KEYWORDS_PLACE, KEYWORDS_STRATUM, KEYWORDS_TEMPORAL, KEYWORDS_THEME)
TEST_TEMPLATE_VALUES = {
'dist_contact_org': 'ORG',
'dist_contact_person': 'PERSON',
'dist_address_type': 'PHYSICAL ADDRESS',
'dist_address': 'ADDRESS LOCATION',
'dist_city': 'CITY',
'dist_state': 'STATE',
'dist_postal': '12345',
'dist_country': 'USA',
'dist_phone': '123-456-7890',
'dist_email': 'EMAIL@DOMAIN.COM',
}
TEST_METADATA_VALUES = {
'abstract': 'Test Abstract',
'attribute_accuracy': 'Test Attribute Accuracy',
'attributes': [{
'definition': 'Attributes Definition 1',
'label': 'Attributes Label 1',
'aliases': 'Attributes Alias 1',
'definition_source': 'Attributes Definition Source 1'
}, {
'definition': 'Attributes Definition 2',
'label': 'Attributes Label 2',
'aliases': 'Attributes Alias 2',
'definition_source': 'Attributes Definition Source 2'
}, {
'definition': 'Attributes Definition 3',
'label': 'Attributes Label 3',
'aliases': 'Attributes Alias 3',
'definition_source': 'Attributes Definition Source 3'
}],
'bounding_box': {
'east': '179.99999999998656',
'north': '87.81211601444309',
'west': '-179.99999999998656',
'south': '-86.78249642712764'
},
'contacts': [{
'name': 'Contact Name 1', 'email': 'Contact Email 1',
'position': 'Contact Position 1', 'organization': 'Contact Organization 1'
}, {
'name': 'Contact Name 2', 'email': 'Contact Email 2',
'position': 'Contact Position 2', 'organization': 'Contact Organization 2'
}],
'dataset_completeness': 'Test Dataset Completeness',
'data_credits': 'Test Data Credits',
'dates': {'type': 'multiple', 'values': ['Multiple Date 1', 'Multiple Date 2', 'Multiple Date 3']},
'digital_forms': [{
'access_desc': 'Digital Form Access Description 1',
'version': 'Digital Form Version 1',
'specification': 'Digital Form Specification 1',
'access_instrs': 'Digital Form Access Instructions 1',
'name': 'Digital Form Name 1',
'network_resource': 'Digital Form Resource 1',
'content': 'Digital Form Content 1',
'decompression': 'Digital Form Decompression 1'
}, {
'access_desc': 'Digital Form Access Description 2',
'version': 'Digital Form Version 2',
'specification': 'Digital Form Specification 2',
'access_instrs': 'Digital Form Access Instructions 2',
'name': 'Digital Form Name 2',
'network_resource': 'Digital Form Resource 2',
'content': 'Digital Form Content 2',
'decompression': 'Digital Form Decompression 2'
}],
'dist_address': 'Test Distribution Address',
'dist_address_type': 'Test Distribution Address Type',
'dist_city': 'Test Distribution City',
'dist_contact_org': 'Test Distribution Org',
'dist_contact_person': 'Test Distribution Person',
'dist_country': 'US',
'dist_email': 'Test Distribution Email',
'dist_liability': 'Test Distribution Liability',
'dist_phone': 'Test Distribution Phone',
'dist_postal': '12345',
'dist_state': 'OR',
'larger_works': {
'publish_place': 'Larger Works Place',
'publish_info': 'Larger Works Info',
'other_citation': 'Larger Works Other Citation',
'online_linkage': 'http://test.largerworks.online.linkage.com',
'publish_date': 'Larger Works Date',
'title': 'Larger Works Title',
'edition': 'Larger Works Edition',
'origin': ['Larger Works Originator']
},
'raster_info': {
'dimensions': 'Test # Dimensions',
'row_count': 'Test Row Count',
'column_count': 'Test Column Count',
'vertical_count': 'Test Vertical Count',
'x_resolution': 'Test X Resolution',
'y_resolution': 'Test Y Resolution',
},
'online_linkages': 'http://test.onlinelinkages.org',
'originators': 'Test Originators',
'other_citation_info': 'Test Other Citation Info',
'place_keywords': ['Oregon', 'Washington'],
'process_steps': [{
'sources': ['Process Step Sources 1.1', 'Process Step Sources 1.2'],
'description': 'Process Step Description 1',
'date': 'Process Step Date 1'
}, {
'sources': [],
'description': 'Process Step Description 2',
'date': ''
}, {
'sources': [], 'description': '', 'date': 'Process Step Date 3'
}, {
'sources': ['Process Step Sources 4.1', 'Process Step Sources 4.2'],
'description': 'Process Step Description 4',
'date': ''
}],
'processing_fees': 'Test Processing Fees',
'processing_instrs': 'Test Processing Instructions',
'purpose': 'Test Purpose',
'publish_date': 'Test Publish Date',
'resource_desc': 'Test Resource Description',
'stratum_keywords': ['Layer One', 'Layer Two'],
'supplementary_info': 'Test Supplementary Info',
'tech_prerequisites': 'Test Technical Prerequisites',
'temporal_keywords': ['Now', 'Later'],
'thematic_keywords': ['Ecoregion', 'Risk', 'Threat', 'Habitat'],
'title': 'Test Title',
'use_constraints': 'Test Use Constraints'
}
class MetadataParserTestCase(unittest.TestCase):
valid_complex_values = ('one', ['before', 'after'], ['first', 'next', 'last'])
def setUp(self):
sep = os.path.sep
dir_name = os.path.dirname(os.path.abspath(__file__))
self.data_dir = sep.join((dir_name, 'data'))
self.arcgis_file = sep.join((self.data_dir, 'arcgis_metadata.xml'))
self.fgdc_file = sep.join((self.data_dir, 'fgdc_metadata.xml'))
self.iso_file = sep.join((self.data_dir, 'iso_metadata.xml'))
# Initialize metadata files
self.arcgis_metadata = open(self.arcgis_file)
self.fgdc_metadata = open(self.fgdc_file)
self.iso_metadata = open(self.iso_file)
self.metadata_files = (self.arcgis_metadata, self.fgdc_metadata, self.iso_metadata)
# Define test file paths
self.test_arcgis_file_path = '/'.join((self.data_dir, 'test_arcgis.xml'))
self.test_fgdc_file_path = '/'.join((self.data_dir, 'test_fgdc.xml'))
self.test_iso_file_path = '/'.join((self.data_dir, 'test_iso.xml'))
self.test_file_paths = (self.test_arcgis_file_path, self.test_fgdc_file_path, self.test_iso_file_path)
def assert_equal_for(self, parser_type, prop, value, target):
self.assertEqual(
value, target,
'Parser property "{0}.{1}" does not equal target:{2}'.format(
parser_type, prop, '\n\tparsed: "{0}" ({1})\n\texpected: "{2}" ({3})'.format(
value, type(value).__name__, target, type(target).__name__
)
)
)
def assert_reparsed_complex_for(self, parser, prop, value, target):
setattr(parser, prop, value)
parser_type = type(parser)
parser_name = parser_type.__name__
reparsed = getattr(parser_type(parser.serialize()), prop)
if prop in get_complex_definitions():
target = get_default_for_complex(prop, target)
if isinstance(reparsed, dict):
# Reparsed is a dict: compare each value with corresponding in target
for key, val in iteritems(reparsed):
self.assert_equal_for(
parser_name, '{0}.{1}'.format(prop, key), val, target.get(key, u'')
)
elif len(reparsed) <= 1:
# Reparsed is empty or a single-item list: do a single value comparison
self.assert_equal_for(parser_name, prop, reparsed, target)
else:
# Reparsed is a multiple-item list: compare each value with corresponding in target
for idx, value in enumerate(reparsed):
if not isinstance(value, dict):
self.assert_equal_for(parser_name, '{0}[{1}]'.format(prop, idx), value, target[idx])
else:
for key, val in iteritems(value):
self.assert_equal_for(
parser_name, '{0}.{1}'.format(prop, key), val, target[idx].get(key, u'')
)
def assert_reparsed_simple_for(self, parser, props, value=None, target=None):
use_props = isinstance(props, dict)
for prop in props:
if use_props:
value = props[prop]
setattr(parser, prop, value)
parser_type = type(parser)
parser_name = parser_type.__name__
reparsed = parser_type(parser.serialize())
for prop in props:
if use_props:
target = props[prop]
self.assert_equal_for(parser_name, prop, getattr(reparsed, prop), target)
def assert_parser_conversion(self, content_parser, target_parser, comparison_type=''):
converted = content_parser.convert_to(target_parser)
self.assert_valid_parser(content_parser)
self.assert_valid_parser(target_parser)
self.assertFalse(
converted is target_parser,
'{0} conversion is returning the original {0} instance'.format(type(converted).__name__)
)
for prop in get_supported_props():
self.assertEqual(
getattr(content_parser, prop), getattr(converted, prop),
'{0} {1}conversion does not equal original {2} content for {3}'.format(
type(converted).__name__, comparison_type, type(content_parser).__name__, prop
)
)
def assert_parsers_are_equal(self, parser_tgt, parser_val):
parser_type = type(parser_tgt).__name__
self.assert_valid_parser(parser_tgt)
self.assert_valid_parser(parser_val)
for prop in get_supported_props():
self.assert_equal_for(parser_type, prop, getattr(parser_val, prop), getattr(parser_tgt, prop))
def assert_parser_after_write(self, parser_type, in_file, out_file_path, use_template=False):
parser = parser_type(in_file, out_file_path)
complex_defs = get_complex_definitions()
# Update each value and read the file in again
for prop in get_supported_props():
if prop in (ATTRIBUTES, CONTACTS, DIGITAL_FORMS, PROCESS_STEPS):
value = [
{}.fromkeys(complex_defs[prop], 'test'),
{}.fromkeys(complex_defs[prop], prop)
]
elif prop in (BOUNDING_BOX, LARGER_WORKS, RASTER_INFO):
value = {}.fromkeys(complex_defs[prop], 'test ' + prop)
elif prop == DATES:
value = {DATE_TYPE: DATE_TYPE_RANGE, DATE_VALUES: ['test', prop]}
elif prop in KEYWORD_PROPS:
value = ['test', prop]
else:
value = 'test ' + prop
if prop in get_complex_definitions():
value = get_default_for_complex(prop, value)
setattr(parser, prop, value)
parser.write(use_template=use_template)
with open(out_file_path) as out_file:
self.assert_parsers_are_equal(parser, parser_type(out_file))
def assert_valid_parser(self, parser):
parser_type = type(parser.validate()).__name__
self.assertIsNotNone(parser._xml_root, '{0} root not set'.format(parser_type))
self.assertIsNotNone(parser._xml_tree)
self.assertEqual(parser._xml_tree.getroot().tag, parser._xml_root)
def assert_validates_for(self, parser, prop, invalid):
valid = getattr(parser, prop)
setattr(parser, prop, invalid)
try:
parser.validate()
except Exception as e:
# Not using self.assertRaises to customize the failure message
self.assertEqual(type(e), ValidationError, (
'Property "{0}.{1}" does not raise ParserError for value: "{2}" ({3})'.format(
type(parser).__name__, prop, invalid, type(invalid).__name__
)
))
finally:
setattr(parser, prop, valid) # Reset value for next test
def tearDown(self):
for metadata_file in self.metadata_files:
metadata_file.close()
for test_file_path in self.test_file_paths:
if os.path.exists(test_file_path):
os.remove(test_file_path)
class MetadataParserTemplateTests(MetadataParserTestCase):
def assert_template_after_write(self, parser_type, out_file_path):
parser = parser_type(out_file_or_path=out_file_path)
# Reverse each value and read the file in again
for prop, val in iteritems(TEST_TEMPLATE_VALUES):
setattr(parser, prop, val[::-1])
parser.write()
with open(out_file_path) as out_file:
self.assert_parsers_are_equal(parser, parser_type(out_file))
def assert_valid_template(self, parser, root):
parser_type = type(parser.validate()).__name__
self.assertIsNotNone(parser._xml_root, '{0} root not set'.format(parser_type))
self.assertEqual(parser._xml_root, root)
self.assertIsNotNone(parser._xml_tree)
self.assertEqual(parser._xml_tree.getroot().tag, parser._xml_root)
for prop, val in iteritems(TEST_TEMPLATE_VALUES):
parsed_val = getattr(parser, prop)
self.assertEqual(parsed_val, val, (
'{0} property {1}, "{2}", does not equal "{3}"'.format(parser_type, prop, parsed_val, val)
))
def test_arcgis_template_values(self):
arcgis_template = ArcGISParser(**TEST_TEMPLATE_VALUES)
self.assert_valid_template(arcgis_template, root='metadata')
self.assert_reparsed_simple_for(arcgis_template, TEST_TEMPLATE_VALUES)
def test_fgdc_template_values(self):
fgdc_template = FgdcParser(**TEST_TEMPLATE_VALUES)
self.assert_valid_template(fgdc_template, root='metadata')
self.assert_reparsed_simple_for(fgdc_template, TEST_TEMPLATE_VALUES)
def test_iso_template_values(self):
iso_template = IsoParser(**TEST_TEMPLATE_VALUES)
self.assert_valid_template(iso_template, root='MD_Metadata')
self.assert_reparsed_simple_for(iso_template, TEST_TEMPLATE_VALUES)
def test_template_conversion(self):
arcgis_template = ArcGISParser()
fgdc_template = FgdcParser()
iso_template = IsoParser()
self.assert_parser_conversion(arcgis_template, fgdc_template, 'template')
self.assert_parser_conversion(arcgis_template, iso_template, 'template')
self.assert_parser_conversion(fgdc_template, iso_template, 'template')
self.assert_parser_conversion(fgdc_template, arcgis_template, 'template')
self.assert_parser_conversion(iso_template, fgdc_template, 'template')
self.assert_parser_conversion(iso_template, arcgis_template, 'template')
def test_template_conversion_bad_roots(self):
bad_root_format = 'Bad root test failed for {0} with {1}'
for bad_root in (None, u'', StringIO(u''), {}, '<?xml version="1.0" encoding="UTF-8"?>\n'):
with self.assertRaises(NoContent, msg=bad_root_format.format('get_parsed_content', bad_root)):
get_parsed_content(bad_root)
with self.assertRaises(NoContent, msg=bad_root_format.format('get_parsed_content', bad_root)):
get_metadata_parser(bad_root)
if bad_root is not None:
with self.assertRaises(NoContent, msg=bad_root_format.format('ArcGISParser', bad_root)):
ArcGISParser(bad_root)
with self.assertRaises(NoContent, msg=bad_root_format.format('FgdcParser', bad_root)):
FgdcParser(bad_root)
with self.assertRaises(NoContent, msg=bad_root_format.format('IsoParser', bad_root)):
IsoParser(bad_root)
for bad_root in (u'NOT XML', u'<badRoot/>', u'<badRoot>invalid</badRoot>'):
with self.assertRaises(InvalidContent, msg=bad_root_format.format('get_parsed_content', bad_root)):
get_parsed_content(bad_root)
with self.assertRaises(InvalidContent, msg=bad_root_format.format('get_parsed_content', bad_root)):
get_metadata_parser(bad_root)
with self.assertRaises(InvalidContent, msg=bad_root_format.format('ArcGISParser', bad_root)):
ArcGISParser(bad_root)
with self.assertRaises(InvalidContent, msg=bad_root_format.format('FgdcParser', bad_root)):
FgdcParser(bad_root)
with self.assertRaises(InvalidContent, msg=bad_root_format.format('IsoParser', bad_root)):
IsoParser(bad_root)
with self.assertRaises(InvalidContent, msg=bad_root_format.format('get_parsed_content', bad_root)):
IsoParser(FGDC_ROOT.join(('<', '></', '>')))
for iso_root in ISO_ROOTS:
with self.assertRaises(InvalidContent, msg=bad_root_format.format('get_parsed_content', bad_root)):
ArcGISParser(iso_root.join(('<', '></', '>')))
with self.assertRaises(InvalidContent, msg=bad_root_format.format('get_parsed_content', bad_root)):
FgdcParser(iso_root.join(('<', '></', '>')))
for arcgis_root in ARCGIS_ROOTS:
with self.assertRaises(InvalidContent, msg=bad_root_format.format('get_parsed_content', bad_root)):
IsoParser(arcgis_root.join(('<', '></', '>')))
if arcgis_root != FGDC_ROOT:
with self.assertRaises(InvalidContent, msg=bad_root_format.format('get_parsed_content', bad_root)):
FgdcParser(arcgis_root.join(('<', '></', '>')))
def test_template_conversion_from_dict(self):
for arcgis_root in ARCGIS_ROOTS:
for arcgis_node in ARCGIS_NODES:
data = {'name': arcgis_root, 'children': [{'name': arcgis_node}]}
self.assert_parser_conversion(
FgdcParser(), get_metadata_parser(data), 'dict-based template'
)
self.assert_parser_conversion(
IsoParser(), get_metadata_parser(data), 'dict-based template'
)
self.assert_parser_conversion(
ArcGISParser(), get_metadata_parser({'name': FGDC_ROOT}), 'dict-based template'
)
self.assert_parser_conversion(
IsoParser(), get_metadata_parser({'name': FGDC_ROOT}), 'dict-based template'
)
for iso_root in ISO_ROOTS:
self.assert_parser_conversion(
ArcGISParser(), get_metadata_parser({'name': iso_root}), 'dict-based template'
)
self.assert_parser_conversion(
FgdcParser(), get_metadata_parser({'name': iso_root}), 'dict-based template'
)
def test_template_conversion_from_str(self):
for arcgis_root in ARCGIS_ROOTS:
for arcgis_node in ARCGIS_NODES:
data = arcgis_node.join(('<', '></', '>'))
data = arcgis_root.join(('<', '>{0}</', '>')).format(data)
self.assert_parser_conversion(
FgdcParser(), get_metadata_parser(data), 'dict-based template'
)
self.assert_parser_conversion(
IsoParser(), get_metadata_parser(data), 'dict-based template'
)
self.assert_parser_conversion(
ArcGISParser(), get_metadata_parser(FGDC_ROOT.join(('<', '></', '>'))), 'str-based template'
)
self.assert_parser_conversion(
IsoParser(), get_metadata_parser(FGDC_ROOT.join(('<', '></', '>'))), 'str-based template'
)
for iso_root in ISO_ROOTS:
self.assert_parser_conversion(
ArcGISParser(), get_metadata_parser(iso_root.join(('<', '></', '>'))), 'str-based template'
)
self.assert_parser_conversion(
FgdcParser(), get_metadata_parser(iso_root.join(('<', '></', '>'))), 'str-based template'
)
def test_template_conversion_from_type(self):
self.assert_parser_conversion(
ArcGISParser(), get_metadata_parser(FgdcParser), 'type-based template'
)
self.assert_parser_conversion(
ArcGISParser(), get_metadata_parser(IsoParser), 'type-based template'
)
self.assert_parser_conversion(
IsoParser(), get_metadata_parser(ArcGISParser), 'type-based template'
)
self.assert_parser_conversion(
IsoParser(), get_metadata_parser(FgdcParser), 'type-based template'
)
self.assert_parser_conversion(
FgdcParser(), get_metadata_parser(ArcGISParser), 'type-based template'
)
self.assert_parser_conversion(
FgdcParser(), get_metadata_parser(IsoParser), 'type-based template'
)
def test_write_template(self):
self.assert_template_after_write(ArcGISParser, self.test_arcgis_file_path)
self.assert_template_after_write(FgdcParser, self.test_fgdc_file_path)
self.assert_template_after_write(IsoParser, self.test_iso_file_path)
class MetadataParserTests(MetadataParserTestCase):
def test_custom_parser(self):
""" Covers support for custom parsers """
target_values = {
'metadata_contacts': [{
'name': 'Custom Contact Name', 'email': 'Custom Contact Email', 'phone': 'Custom Contact Phone',
'position': 'Custom Contact Position', 'organization': 'Custom Contact Organization'
}],
'metadata_language': ['eng', 'esp']
}
custom_parser = CustomIsoParser(self.iso_metadata)
for prop in target_values:
self.assertEqual(getattr(custom_parser, prop), target_values[prop], 'Custom parser values were not parsed')
complex_val = {
'name': 'Changed Contact Name', 'email': 'Changed Contact Email', 'phone': 'Changed Contact Phone',
'position': 'Changed Contact Position', 'organization': 'Changed Contact Organization'
}
self.assert_reparsed_complex_for(custom_parser, 'metadata_contacts', complex_val, [complex_val])
self.assert_reparsed_simple_for(custom_parser, ['metadata_language'], ['en', 'es'], ['en', 'es'])
# Test conversion with custom props
converted_parser = custom_parser.convert_to(CustomIsoParser)
self.assert_parsers_are_equal(custom_parser, converted_parser)
self.assertEqual(converted_parser.metadata_contacts, custom_parser.metadata_contacts)
self.assertEqual(converted_parser.metadata_language, custom_parser.metadata_language)
# Test conversion that must ignore custom props
agis_parser = custom_parser.convert_to(ArcGISParser)
fgdc_parser = custom_parser.convert_to(FgdcParser)
self.assert_parsers_are_equal(agis_parser, fgdc_parser)
# Test invalid custom complex structure value
metadata_contacts = custom_parser.metadata_contacts
custom_parser.metadata_contacts = u'None'
with self.assertRaises(ValidationError):
custom_parser.validate()
custom_parser.metadata_contacts = metadata_contacts
# Test invalid custom simple value
metadata_language = custom_parser.metadata_language
custom_parser.metadata_language = {}
with self.assertRaises(ValidationError):
custom_parser.validate()
custom_parser.metadata_language = metadata_language
def test_generic_parser(self):
""" Covers code that enforces certain behaviors for custom parsers """
parser = MetadataParser()
prop_get = '{0}'.format
prop_set = '{xpaths}'.format
with self.assertRaises(ConfigurationError):
# Un-callable property parser (no xpath)
ParserProperty(None, None)
with self.assertRaises(ConfigurationError):
# Un-callable property parser (no xpath)
ParserProperty(None, prop_set)
with self.assertRaises(ConfigurationError):
# Un-callable property updater
ParserProperty(prop_get, None)
parser_prop = ParserProperty(None, prop_set, 'path')
with self.assertRaises(ConfigurationError):
# Un-callable property parser with xpath
parser_prop.get_prop('prop')
parser_prop = ParserProperty(prop_get, prop_set, 'path')
self.assertEqual(parser_prop.get_prop('prop'), 'prop')
self.assertEqual(parser_prop.set_prop(), 'path')
self.assertEqual(parser_prop.set_prop(xpaths='diff'), 'path')
data_map_1 = parser._data_map
parser._init_data_map()
data_map_2 = parser._data_map
self.assertIs(data_map_1, data_map_2, 'Data map was reinitialized after instantiation')
with self.assertRaises(IOError):
parser.write()
def test_specific_parsers(self):
""" Ensures code enforces certain behaviors for existing parsers """
for parser_type in (ArcGISParser, FgdcParser, IsoParser):
parser = parser_type()
data_map_1 = parser._data_map
parser._init_data_map()
data_map_2 = parser._data_map
self.assertIs(data_map_1, data_map_2, 'Data map was reinitialized after instantiation')
with self.assertRaises(IOError):
parser.write()
with self.assertRaises(ValidationError):
parser._data_map.clear()
parser.validate()
def test_arcgis_parser(self):
""" Tests behavior unique to the ArcGIS parser """
# Test dates structure defaults
# Remove multiple dates to ensure range is queried
arcgis_element = get_remote_element(self.arcgis_file)
remove_element(arcgis_element, 'dataIdInfo/dataExt/tempEle/TempExtent/exTemp/TM_Instant', True)
arcgis_parser = ArcGISParser(element_to_string(arcgis_element))
# Assert that ArcGIS-specific keywords are read in correctly
self.assertEqual(arcgis_parser.discipline_keywords, ['ArcGIS Discipline One', 'ArcGIS Discipline Two'])
self.assertEqual(arcgis_parser.other_keywords, ['ArcGIS Other One', 'ArcGIS Other Two'])
self.assertEqual(arcgis_parser.product_keywords, ['ArcGIS Product One', 'ArcGIS Product Two'])
self.assertEqual(arcgis_parser.search_keywords, ['ArcGIS Search One', 'ArcGIS Search Two'])
self.assertEqual(arcgis_parser.topic_category_keywords, ['ArcGIS Topical One', 'ArcGIS Topical Two'])
# Assert that the backup dates are read in successfully
self.assertEqual(arcgis_parser.dates, {'type': 'range', 'values': ['Date Range Start', 'Date Range End']})
# Remove one of the date range values and assert that only the end date is read in as a single
remove_element(arcgis_element, 'dataIdInfo/dataExt/tempEle/TempExtent/exTemp/TM_Period/tmBegin', True)
arcgis_parser = ArcGISParser(element_to_string(arcgis_element))
self.assertEqual(arcgis_parser.dates, {'type': 'single', 'values': ['Date Range End']})
# Remove the last of the date range values and assert that no dates are read in
remove_element(arcgis_element, 'dataIdInfo/dataExt/tempEle/TempExtent/exTemp/TM_Period', True)
arcgis_parser = ArcGISParser(element_to_string(arcgis_element))
self.assertEqual(arcgis_parser.dates, {})
# Insert a single date value and assert that only it is read in
single_path = 'dataIdInfo/dataExt/tempEle/TempExtent/exTemp/TM_Instant/tmPosition'
single_text = 'Single Date'
insert_element(arcgis_element, 0, single_path, single_text)
arcgis_parser = ArcGISParser(element_to_string(arcgis_element))
self.assertEqual(arcgis_parser.dates, {'type': 'single', 'values': [single_text]})
def test_fgdc_parser(self):
""" Tests behavior unique to the FGDC parser """
# Test dates structure defaults
# Remove multiple dates to ensure range is queried
fgdc_element = get_remote_element(self.fgdc_file)
remove_element(fgdc_element, 'idinfo/timeperd/timeinfo/mdattim', True)
# Assert that the backup dates are read in successfully
fgdc_parser = FgdcParser(element_to_string(fgdc_element))
self.assertEqual(fgdc_parser.dates, {'type': 'range', 'values': ['Date Range Start', 'Date Range End']})
# Test contact data structure defaults
contacts_def = get_complex_definitions()[CONTACTS]
# Remove the contact organization completely
fgdc_element = get_remote_element(self.fgdc_file)
for contact_element in get_elements(fgdc_element, 'idinfo/ptcontac'):
if element_exists(contact_element, 'cntinfo/cntorgp'):
clear_element(contact_element)
# Assert that the contact organization has been read in
fgdc_parser = FgdcParser(element_to_string(fgdc_element))
for key in contacts_def:
for contact in fgdc_parser.contacts:
self.assertIsNotNone(contact[key], 'Failed to read contact.{0}'.format(key))
# Remove the contact person completely
fgdc_element = get_remote_element(self.fgdc_file)
for contact_element in get_elements(fgdc_element, 'idinfo/ptcontac'):
if element_exists(contact_element, 'cntinfo/cntperp'):
clear_element(contact_element)
# Assert that the contact organization has been read in
fgdc_parser = FgdcParser(element_to_string(fgdc_element))
for key in contacts_def:
for contact in fgdc_parser.contacts:
self.assertIsNotNone(contact[key], 'Failed to read updated contact.{0}'.format(key))
def test_iso_parser(self):
""" Tests behavior unique to the ISO parser """
# Remove the attribute details href attribute
iso_element = get_remote_element(self.iso_file)
for citation_element in get_elements(iso_element, _iso_tag_formats['_attr_citation']):
removed = remove_element_attributes(citation_element, 'href')
# Assert that the href attribute was removed and a different one was read in
iso_parser = IsoParser(element_to_string(iso_element))
attribute_href = iso_parser._attr_details_file_url
self.assertIsNotNone(removed, 'ISO file URL was not removed')
self.assertIsNotNone(attribute_href, 'ISO href attribute was not read in')
self.assertNotEqual(attribute_href, removed, 'ISO href attribute is the same as the one removed')
# Remove the attribute details linkage attribute
iso_element = get_remote_element(self.iso_file)
for linkage_element in get_elements(iso_element, _iso_tag_formats['_attr_contact_url']):
removed = get_element_text(linkage_element)
clear_element(linkage_element)
# Assert that the linkage URL was removed and a different one was read in
iso_parser = IsoParser(element_to_string(iso_element))
linkage_url = iso_parser._attr_details_file_url
self.assertIsNotNone(removed, 'ISO linkage URL was not removed')
self.assertIsNotNone(linkage_url, 'ISO linkage URL was not read in')
self.assertNotEqual(linkage_url, removed, 'ISO file URL is the same as the one removed')
# Change the href attribute so that it is invalid
for citation_element in get_elements(iso_element, _iso_tag_formats['_attr_citation']):
removed = set_element_attributes(citation_element, href='neither url nor file')
# Assert that the href attribute was removed and a different one was read in
iso_parser = IsoParser(element_to_string(iso_element))
attributes = iso_parser.attributes
self.assertIsNone(iso_parser._attr_details_file_url, 'Invalid URL stored with parser')
self.assertEqual(
attributes, TEST_METADATA_VALUES[ATTRIBUTES], 'Invalid parsed attributes: {0}'.format(attributes)
)
def test_parser_values(self):
""" Tests that parsers are populated with the expected values """
arcgis_element = get_remote_element(self.arcgis_file)
arcgis_parser = ArcGISParser(element_to_string(arcgis_element))
arcgis_new = ArcGISParser(**TEST_METADATA_VALUES)
# Test that the two ArcGIS parsers have the same data given the same input file
self.assert_parsers_are_equal(arcgis_parser, arcgis_new)
fgdc_element = get_remote_element(self.fgdc_file)
fgdc_parser = FgdcParser(element_to_string(fgdc_element))
fgdc_new = FgdcParser(**TEST_METADATA_VALUES)
# Test that the two FGDC parsers have the same data given the same input file
self.assert_parsers_are_equal(fgdc_parser, fgdc_new)
iso_element = get_remote_element(self.iso_file)
remove_element(iso_element, _iso_tag_formats['_attr_citation'], True)
iso_parser = IsoParser(element_to_string(iso_element))
iso_new = IsoParser(**TEST_METADATA_VALUES)
# Test that the two ISO parsers have the same data given the same input file
self.assert_parsers_are_equal(iso_parser, iso_new)
# Test that all distinct parsers have the same data given equivalent input files
self.assert_parsers_are_equal(arcgis_parser, fgdc_parser)
self.assert_parsers_are_equal(fgdc_parser, iso_parser)
self.assert_parsers_are_equal(iso_parser, arcgis_parser)
# Test that each parser's values correspond to the target values
for parser in (arcgis_parser, fgdc_parser, iso_parser):
parser_type = type(parser)
for prop, target in TEST_METADATA_VALUES.items():
self.assert_equal_for(parser_type, prop, getattr(parser, prop), target)
def test_parser_conversion(self):
arcgis_parser = ArcGISParser(self.arcgis_metadata)
fgdc_parser = FgdcParser(self.fgdc_metadata)
iso_parser = IsoParser(self.iso_metadata)
self.assert_parser_conversion(arcgis_parser, fgdc_parser, 'file')
self.assert_parser_conversion(arcgis_parser, iso_parser, 'file')
self.assert_parser_conversion(fgdc_parser, arcgis_parser, 'file')
self.assert_parser_conversion(fgdc_parser, iso_parser, 'file')
self.assert_parser_conversion(iso_parser, arcgis_parser, 'file')
self.assert_parser_conversion(iso_parser, fgdc_parser, 'file')
def test_conversion_from_dict(self):
arcgis_parser = ArcGISParser(self.arcgis_metadata)
fgdc_parser = FgdcParser(self.fgdc_metadata)
iso_parser = IsoParser(self.iso_metadata)
self.assert_parser_conversion(
arcgis_parser, get_metadata_parser(element_to_dict(fgdc_parser._xml_tree, recurse=True)), 'dict-based'
)
self.assert_parser_conversion(
arcgis_parser, get_metadata_parser(element_to_dict(iso_parser._xml_tree, recurse=True)), 'dict-based'
)
self.assert_parser_conversion(
fgdc_parser, get_metadata_parser(element_to_dict(arcgis_parser._xml_tree, recurse=True)), 'dict-based'
)
self.assert_parser_conversion(
fgdc_parser, get_metadata_parser(element_to_dict(iso_parser._xml_tree, recurse=True)), 'dict-based'
)
self.assert_parser_conversion(
iso_parser, get_metadata_parser(element_to_dict(arcgis_parser._xml_tree, recurse=True)), 'dict-based'
)
self.assert_parser_conversion(
iso_parser, get_metadata_parser(element_to_dict(fgdc_parser._xml_tree, recurse=True)), 'dict-based'
)
def test_conversion_from_str(self):
arcgis_parser = ArcGISParser(self.arcgis_metadata)
fgdc_parser = FgdcParser(self.fgdc_metadata)
iso_parser = IsoParser(self.iso_metadata)
self.assert_parser_conversion(
arcgis_parser, get_metadata_parser(fgdc_parser.serialize()), 'str-based'
)
self.assert_parser_conversion(
arcgis_parser, get_metadata_parser(iso_parser.serialize()), 'str-based'
)
self.assert_parser_conversion(
fgdc_parser, get_metadata_parser(arcgis_parser.serialize()), 'str-based'
)
self.assert_parser_conversion(
fgdc_parser, get_metadata_parser(iso_parser.serialize()), 'str-based'
)
self.assert_parser_conversion(
iso_parser, get_metadata_parser(arcgis_parser.serialize()), 'str-based'
)
self.assert_parser_conversion(
iso_parser, get_metadata_parser(fgdc_parser.serialize()), 'str-based'
)
def test_reparse_complex_lists(self):
complex_defs = get_complex_definitions()
complex_lists = (ATTRIBUTES, CONTACTS, DIGITAL_FORMS)
arcgis_parser = ArcGISParser(self.arcgis_metadata)
fgdc_parser = FgdcParser(self.fgdc_metadata)
iso_parser = IsoParser(self.iso_metadata)
for parser in (arcgis_parser, fgdc_parser, iso_parser):
# Test reparsed empty complex lists
for prop in complex_lists:
for empty in (None, [], [{}], [{}.fromkeys(complex_defs[prop], u'')]):
self.assert_reparsed_complex_for(parser, prop, empty, [])
# Test reparsed valid complex lists (strings and lists for each property in each struct)
for prop in complex_lists:
complex_list = []
for val in self.valid_complex_values:
# Test with single unwrapped value
next_complex = {}.fromkeys(complex_defs[prop], val)
self.assert_reparsed_complex_for(parser, prop, next_complex, wrap_value(next_complex))
# Test with accumulated list of values
complex_list.append({}.fromkeys(complex_defs[prop], val))
self.assert_reparsed_complex_for(parser, prop, complex_list, wrap_value(complex_list))
def test_reparse_complex_structs(self):
complex_defs = get_complex_definitions()
complex_structs = (BOUNDING_BOX, LARGER_WORKS, RASTER_INFO)
arcgis_parser = ArcGISParser(self.arcgis_metadata)
fgdc_parser = FgdcParser(self.fgdc_metadata)
iso_parser = IsoParser(self.iso_metadata)
for parser in (arcgis_parser, fgdc_parser, iso_parser):
# Test reparsed empty complex structures
for prop in complex_structs:
for empty in (None, {}, {}.fromkeys(complex_defs[prop], u'')):
self.assert_reparsed_complex_for(parser, prop, empty, {})
# Test reparsed valid complex structures
for prop in complex_structs:
for val in self.valid_complex_values:
complex_struct = {}.fromkeys(complex_defs[prop], val)
self.assert_reparsed_complex_for(parser, prop, complex_struct, complex_struct)
def test_reparse_dates(self):
valid_values = (
(DATE_TYPE_SINGLE, ['one']),
(DATE_TYPE_RANGE, ['before', 'after']),
(DATE_TYPE_MULTIPLE, ['first', 'next', 'last'])
)
arcgis_parser = ArcGISParser(self.arcgis_metadata)
fgdc_parser = FgdcParser(self.fgdc_metadata)
iso_parser = IsoParser(self.iso_metadata)
for parser in (arcgis_parser, fgdc_parser, iso_parser):
# Test reparsed empty dates
for empty in (None, {}, {DATE_TYPE: u'', DATE_VALUES: []}):
self.assert_reparsed_complex_for(parser, DATES, empty, {})
# Test reparsed valid dates
for val in valid_values:
complex_struct = {DATE_TYPE: val[0], DATE_VALUES: val[1]}
self.assert_reparsed_complex_for(
parser, DATES, complex_struct, complex_struct
)
def test_reparse_keywords(self):
arcgis_parser = ArcGISParser(self.arcgis_metadata)
fgdc_parser = FgdcParser(self.fgdc_metadata)
iso_parser = IsoParser(self.iso_metadata)
for parser in (arcgis_parser, fgdc_parser, iso_parser):
# Test reparsed empty keywords
for keywords in ('', u'', []):
for keyword_prop in KEYWORD_PROPS:
self.assert_reparsed_complex_for(parser, keyword_prop, keywords, [])
# Test reparsed valid keywords
for keywords in ('keyword', ['keyword', 'list']):
for keyword_prop in KEYWORD_PROPS:
self.assert_reparsed_complex_for(parser, keyword_prop, keywords, wrap_value(keywords))
def test_reparse_process_steps(self):
proc_step_def = get_complex_definitions()[PROCESS_STEPS]
arcgis_parser = ArcGISParser(self.arcgis_metadata)
fgdc_parser = FgdcParser(self.fgdc_metadata)
iso_parser = IsoParser(self.iso_metadata)
for parser in (arcgis_parser, fgdc_parser, iso_parser):
# Test reparsed empty process steps
for empty in (None, [], [{}], [{}.fromkeys(proc_step_def, u'')]):
self.assert_reparsed_complex_for(parser, PROCESS_STEPS, empty, [])
complex_list = []
# Test reparsed valid process steps
for val in self.valid_complex_values:
complex_struct = {}.fromkeys(proc_step_def, val)
# Process steps must have a single string value for all but sources
complex_struct.update({
k: ', '.join(wrap_value(v)) for k, v in iteritems(complex_struct) if k != 'sources'
})
complex_list.append(complex_struct)
self.assert_reparsed_complex_for(parser, PROCESS_STEPS, complex_list, complex_list)
def test_reparse_simple_values(self):
complex_props = set(get_complex_definitions().keys())
required_props = set(get_supported_props())
simple_props = required_props.difference(complex_props)
simple_props = simple_props.difference(KEYWORD_PROPS)
simple_empty_vals = ('', u'', [])
simple_valid_vals = (u'value', [u'item', u'list'])
arcgis_parser = ArcGISParser(self.arcgis_metadata)
fgdc_parser = FgdcParser(self.fgdc_metadata)
iso_parser = IsoParser(self.iso_metadata)
for parser in (arcgis_parser, fgdc_parser, iso_parser):
# Test reparsed empty values
for val in simple_empty_vals:
self.assert_reparsed_simple_for(parser, simple_props, val, u'')
# Test reparsed valid values
for val in simple_valid_vals:
self.assert_reparsed_simple_for(parser, simple_props, val, val)
def test_validate_complex_lists(self):
complex_props = (ATTRIBUTES, CONTACTS, DIGITAL_FORMS, PROCESS_STEPS)
invalid_values = ('', u'', {'x': 'xxx'}, [{'x': 'xxx'}], set(), tuple())
for parser in (ArcGISParser().validate(), FgdcParser().validate(), IsoParser().validate()):
for prop in complex_props:
for invalid in invalid_values:
self.assert_validates_for(parser, prop, invalid)
def test_validate_complex_structs(self):
complex_props = (BOUNDING_BOX, DATES, LARGER_WORKS, RASTER_INFO)
invalid_values = ('', u'', {'x': 'xxx'}, list(), set(), tuple())
for parser in (ArcGISParser().validate(), FgdcParser().validate(), IsoParser().validate()):
for prop in complex_props:
for invalid in invalid_values:
self.assert_validates_for(parser, prop, invalid)
def test_validate_dates(self):
invalid_values = (
(DATE_TYPE_MISSING, ['present']),
(DATE_TYPE_MULTIPLE, ['single']),
(DATE_TYPE_MULTIPLE, ['first', 'last']),
(DATE_TYPE_RANGE, []),
(DATE_TYPE_RANGE, ['just one']),
(DATE_TYPE_SINGLE, []),
(DATE_TYPE_SINGLE, ['one', 'two']),
('unknown', ['unknown'])
)
arcgis_parser = ArcGISParser(self.arcgis_metadata)
fgdc_parser = FgdcParser(self.fgdc_metadata)
iso_parser = IsoParser(self.iso_metadata)
for parser in (arcgis_parser, fgdc_parser, iso_parser):
for val in invalid_values:
self.assert_validates_for(parser, DATES, {DATE_TYPE: val[0], DATE_VALUES: val[1]})
def test_validate_simple_values(self):
complex_props = set(get_complex_definitions().keys())
simple_props = set(get_supported_props()).difference(complex_props)
invalid_values = (None, [None], dict(), [dict()], set(), [set()], tuple(), [tuple()])
for parser in (ArcGISParser().validate(), FgdcParser().validate(), IsoParser().validate()):
for prop in simple_props:
for invalid in invalid_values:
self.assert_validates_for(parser, prop, invalid)
def test_write_values(self):
self.assert_parser_after_write(ArcGISParser, self.arcgis_metadata, self.test_arcgis_file_path)
self.assert_parser_after_write(FgdcParser, self.fgdc_metadata, self.test_fgdc_file_path)
self.assert_parser_after_write(IsoParser, self.iso_metadata, self.test_iso_file_path)
def test_write_values_to_template(self):
self.assert_parser_after_write(ArcGISParser, self.arcgis_metadata, self.test_arcgis_file_path, True)
self.assert_parser_after_write(FgdcParser, self.fgdc_metadata, self.test_fgdc_file_path, True)
self.assert_parser_after_write(IsoParser, self.iso_metadata, self.test_iso_file_path, True)
class CustomIsoParser(IsoParser):
def _init_data_map(self):
super(CustomIsoParser, self)._init_data_map()
# Basic property: text or list (with backup location referencing codeListValue attribute)
lang_prop = 'metadata_language'
self._data_map[lang_prop] = 'language/CharacterString' # Parse from here if present
self._data_map['_' + lang_prop] = 'language/LanguageCode/@codeListValue' # Otherwise, try from here
# Complex structure (reuse of contacts structure plus phone)
# Define some basic variables
ct_prop = 'metadata_contacts'
ct_xpath = 'contact/CI_ResponsibleParty/{ct_path}'
ct_defintion = get_complex_definitions()[CONTACTS]
ct_defintion['phone'] = '{phone}'
# Reuse CONTACT structure to specify locations per prop (adapted only slightly from parent)
self._data_structures[ct_prop] = format_xpaths(
ct_defintion,
name=ct_xpath.format(ct_path='individualName/CharacterString'),
organization=ct_xpath.format(ct_path='organisationName/CharacterString'),
position=ct_xpath.format(ct_path='positionName/CharacterString'),
phone=ct_xpath.format(
ct_path='contactInfo/CI_Contact/phone/CI_Telephone/voice/CharacterString'
),
email=ct_xpath.format(
ct_path='contactInfo/CI_Contact/address/CI_Address/electronicMailAddress/CharacterString'
)
)
# Set the root and add getter/setter (parser/updater) to the data map
self._data_map['_{prop}_root'.format(prop=ct_prop)] = 'contact'
self._data_map[ct_prop] = ParserProperty(self._parse_complex_list, self._update_complex_list)
# And finally, let the parent validation logic know about the two new custom properties
self._metadata_props.add(lang_prop)
self._metadata_props.add(ct_prop)
|
import socket
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.connect((socket.gethostname(), 2905))
recmsg = soc.recv(1024)
soc.close()
print("The time got from the server is %s" % recmsg.decode("ascii"))
|
import yfinance as yf #finance data from yahoo api
import streamlit as st #streamlit for building a data-driven web app
import pandas as pd #pandas for manipulate data
# st.write can read markdown style and pass the string to the web app
st.write("""
## Simple streamlit app
* Show the stock closing price and volume of Google.
""")
#hardcoding the google stock ticker
ticker_symbol = 'GOOGL'
#get the ticker data
ticker_data = yf.Ticker(ticker_symbol)
#create a datafram between the dates by the period
ticker_df = ticker_data.history(period='1d', start='2010-5-31', end='2020-5-31')
#create the two line charts of Close and Volume
st.line_chart(ticker_df['Close'])
st.line_chart(ticker_df['Volume']) |
# -*- coding: utf-8 -*-
import sys
import logging
import json
from decimal import *
from quant.core.Spider import *
class StockSpider(SpiderEngine):
'''
更新指数
'''
def __init__(self):
SpiderEngine.__init__(self)
self.today = sys.argv[2]
def run(self):
self.tools.setup_logging(sys.argv[1], True, True)
#self.get_xueqiu_base(1)
#sys.exit()
logging.debug('Start Base Stock=====Days:%s ' % sys.argv[2])
day_list = self.mysql.getRecord("SELECT * FROM `s_stock_list` WHERE `run_market` =0 OR all_market =0")
for i in range(0, len(day_list)):
self.get_xueqiu_base(day_list[i])
def get_level_2():
#十档行情
#https://app.leverfun.com/timelyInfo/timelyOrderForm?stockCode=300190
pass
def get_xueqiu_base(self, data):
#s_code = 'SH600180'
#print data
s_code = data['s_code'].upper()
self.curl_get('https://xueqiu.com/8205215793')
url = "https://xueqiu.com/v4/stock/quote.json?code=%s&_=1423121365509" % s_code
#url = "https://www.baidu.com"
_data = self.curl_get(url)
re = json.loads(_data)
#流通
a = int(Decimal(re[s_code]['float_shares']) * data['close'])
#总股
b = int(Decimal(re[s_code]['totalShares']) * data['close'])
self.mysql.dbQuery("update s_stock_list set run_market=%s,all_market=%s where id=%s" % (a, b, data['id']))
|
from .base import ApiBase
import logging
from ..internal.utils import PaginatedSearch
logger = logging.getLogger(__name__)
class ExpenseReports(ApiBase):
"""
ExpenseReports are not directly searchable - only via as employees
"""
def __init__(self, ns_client):
ApiBase.__init__(self, ns_client=ns_client, type_name='ExpenseReport')
def get_all_generator(self):
record_type_search_field = self.ns_client.SearchStringField(searchValue='ExpenseReport', operator='contains')
basic_search = self.ns_client.basic_search_factory('Employee', recordType=record_type_search_field)
paginated_search = PaginatedSearch(client=self.ns_client,
type_name='Employee',
basic_search=basic_search,
pageSize=20)
return self._paginated_search_to_generator(paginated_search=paginated_search)
|
import pytest
from ..__main__ import birthday_cake_candles
def test_case_1():
result = birthday_cake_candles(4, [3, 2, 1, 3])
assert result == 2
def test_more_age():
result = birthday_cake_candles(2, [3, 3, 3, 3, 3])
assert result == 2
def test_not_as_many_as_age():
result = birthday_cake_candles(10, [3, 3, 3, 3, 3])
assert result == 5
|
import math
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.contrib.layers.python.layers import batch_norm
def batchnorm(inputT, is_training=False, scope=None):
# return inputT
# Note: is_training is tf.placeholder(tf.bool) type
is_training = tf.get_collection('istrainvar')[0]
return tf.cond(is_training,
lambda: batch_norm(inputT, is_training=True,
center=True, scale=True, decay=0.9, updates_collections=None, scope=scope),
lambda: batch_norm(inputT, is_training=False,
center=True, scale=True, decay=0.9, updates_collections=None, scope=scope,
reuse=True))
def stability_loss(inp, sample_size):
# if randvec is not None:
# o1 = tf.gather(inp,randvec[0:sample_size])
# me1, var1 = tf.nn.moments(o1,0)
# o2 = tf.gather(inp,randvec[sample_size:2*sample_size])
# me2, var2 = tf.nn.moments(o2,0)
# shape = var1.get_shape()
## eps1 = tf.get_variable("var1", (shape[0]), tf.float32, tf.constant_initializer(0.1))
# else:
me1, var1 = tf.nn.moments(inp[0:sample_size,:],0)
me2, var2 = tf.nn.moments(inp[sample_size:2*sample_size,:],0)
shape = var1.get_shape()
eps1 = tf.get_variable("var2", (shape[0]), tf.float32, tf.constant_initializer(0.1))
var1 = tf.abs(var1)
var2 = tf.abs(var2)
tf.add_to_collection('l2_norm',(tf.reduce_mean(tf.square(1 - (var1)/(var2+eps1)))))
def linear(input_, output_size, sample_size, eps, scope=None, bn = False, activation = None, hidden = True):
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable(scope + "Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=1/np.sqrt(shape[1])))
# we = tf.reshape(matrix,(shape[1]*output_size,1))
# randvec = tf.cast(tf.random_uniform((2*sample_size,1),0,shape[1]*output_size),tf.int32)
# stability_loss(we,sample_size,eps,randvec)
tf.add_to_collection('l2_loss',(tf.nn.l2_loss(matrix)))
bias = tf.get_variable(scope + "bias", [output_size],
initializer=tf.constant_initializer(0.0))
output = tf.matmul(input_, matrix) + bias
if bn:
output = batchnorm(output, scope = scope)
if hidden:
stability_loss(output,sample_size)
if activation:
output = activation(output)
# if hidden:
# stability_loss(output,sample_size,eps)
# if activation:
# output = activation(output)
return output
def conic_architecture(inp, init_dim, num_of_labels, activation, depth, sample_size, eps, isTrain_node, keep = 1, bn_ = 0, conic = 0):
bn = bn_==1
factor = (np.power(float(num_of_labels)/float(init_dim),1/float(depth)))
tmp = linear(inp, init_dim, sample_size, eps, scope='init', bn = bn, activation = activation, hidden = False)
# tmp = tf.cond(isTrain_node, lambda: tf.nn.dropout(tmp, keep), lambda: tmp)
dim = init_dim
for i in range(depth - 1):
if conic==1:
dim = (dim*factor).astype('int')
tmp = linear(tmp, dim, sample_size, eps, scope='layer_'+str(i), bn = bn, activation = activation)
# tmp = tf.cond(isTrain_node, lambda: tf.nn.dropout(tmp, keep), lambda: tmp)
tmp = tf.cond(isTrain_node, lambda: tf.nn.dropout(tmp, keep), lambda: tmp)
out = linear(tmp, num_of_labels, sample_size, eps, scope='out', bn = False, hidden = False)
return out
def conic_architecture_selu(inp, init_dim, num_of_labels, activation, depth, sample_size, eps, isTrain_node, keep = 1, bn_ = 0, conic = 0):
bn = bn_==1
factor = (np.power(float(num_of_labels)/float(init_dim),1/float(depth)))
tmp = linear(inp, init_dim, sample_size, eps, scope='init', bn = bn, activation = activation, hidden = False)
tmp = tf.cond(isTrain_node, lambda: tf.contrib.nn.alpha_dropout(tmp, keep), lambda: tmp)
dim = init_dim
for i in range(depth - 1):
if conic==1:
dim = (dim*factor).astype('int')
tmp = linear(tmp, dim, sample_size, eps, scope='layer_'+str(i), bn = bn, activation = activation)
tmp = tf.cond(isTrain_node, lambda: tf.contrib.nn.alpha_dropout(tmp, keep), lambda: tmp)
out = linear(tmp, num_of_labels, sample_size, eps, scope='out', bn = False, hidden = False)
return out
|
import requests
import os
from redis.exceptions import RedisError
from redis_util.redis_connection import ping_redis
from elasticsearch import ElasticsearchException
from elasticsearch_util.elasticsearch_connection import get_es_health
def parse_args(sub_parser):
subparser = sub_parser.add_parser("status", help="Report on the status of the application")
# register the function to be executed when command "status" is called
subparser.set_defaults(func=print_status)
def print_status(arg):
""" Prints the status of the aladdin-demo pod and the redis pod """
print_aladdin_demo_server_status()
print_redis_status()
print_elasticsearch_status()
def print_aladdin_demo_server_status():
print("pinging aladdin-demo-server ...")
# These environment variables are provided by kubernetes and are how we discover the address of
# the redis service
project_name = os.environ["PROJECT_NAME"].upper().replace("-", "_")
host = os.environ[project_name + "_SERVER_SERVICE_HOST"]
port = os.environ[project_name + "_SERVER_SERVICE_PORT"]
url = "http://{}:{}/ping".format(host, port)
try:
r = requests.get(url)
if r.status_code == 200:
print("aladdin demo server endpoint ping successful")
else:
print("aladdin demo server endpoint ping returned with status code {}".format(r.status_code))
except requests.exceptions.ConnectionError as e:
print("aladdin demo endpoint connection error: {}".format(e))
def print_redis_status():
# TODO have this ping external redis when that gets added
print("pinging redis ...")
if os.environ["REDIS_CREATE"] == "false":
print("redis creation flag set to false, no other redis connection available at this time")
return
try:
status = ping_redis()
print("redis connection ping successful {}".format(status))
except RedisError as e:
print("redis connection error: {}".format(e))
def print_elasticsearch_status():
print("getting elasticsearch health ...")
if os.environ["ELASTICSEARCH_CREATE"] == "false":
print("elasticsearch creation flag set to false, no other elasticsearch connection available at this time")
return
try:
status = get_es_health()
print("elasticsearch health retrieved: {}".format(status))
except ElasticsearchException as e:
print("encountered elasticsearch error: {}".format(e))
|
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, min, max
# Cria objeto da Spark Session
spark = (SparkSession.builder.appName("DeltaExercise")
.getOrCreate()
)
# Leitura de dados
enem = (
spark.read.format("csv")
.option("inferSchema", True)
.option("header", True)
.option("delimiter", ";")
.load("s3://igti-bootcamp-ed-2021-468906988086/raw/")
)
# Escreve a tabela em staging em formato delta
print("Writing delta table...")
(
enem
.write
.mode("overwrite")
.format("delta")
.partitionBy("year")
.save("s3://datalake-bruno-468906988086-igti-edc-tf/staging-zone/censo")
)
|
# open(fileName, 'wb') as f:
# while f:
"""
二进制文件的存储(音频,视频......)
1. 获取到下载文件的url 二进制方式下载
2.使用urllib模块的urlretrieve()可以进行音频文件下载
也支持远程下载到本地
urlretrieve(url, filename=None, reporthook=None, data=None)
url: 文件地址
filename: 存储路径/文件名
reporthook: 回调函数,连接上服务器时或相应数据下载完成后触发该函数
一般用来显示当前下载进度
data: (filename, headers)元祖
"""
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
from urllib import request
import requests
import os
from lxml import etree
headers = {
"User-Agent": "Mozilla / 5.0(Macintosh;IntelMacOSX10_14_2) AppleWebKit / \
537.36(KHTML, likeGecko) Chrome / 78.0.3904.87Safari / 537.36"}
url = 'https://www.ivsky.com/tupian/ziranfengguang/'
# 回调函数
def Schedule(blocknum, blocksize, totalsize):
"""
显示下载进度
:param blocknum: 已下载数据块
:param blocksizi: 数据块大小
:param totalsize: 文件大小
:return: None
"""
per = 100.0 * blocknum * blocksize / totalsize
if per > 100:
per = 100
print('当前下载进度: {}'.format(per))
rsp = requests.get(url, headers=headers)
# print(rsp.text)
html = etree.HTML(rsp.text)
# 找到所有图片连接
img_url = html.xpath('//div[@class="il_img"]//img/@src')
print(img_url)
for img in img_url:
root_dir = 'img'
if not os.path.exists(root_dir):
os.mkdir(root_dir)
filename = img.split('/')[-1]
# print(filename)
dir = "./" + root_dir + "/" + filename
# print(dir)
"""
获取到页面中封面图的url与实际不符,列封面中的url为:
//img.ivsky.com/img/tupian/li/201904/22/lantian_baiyun-002.jpg
而实际的地址为:
//img.ivsky.com/img/tupian/li/201904/22/lantian_baiyun-002.jpg
则所有获取到的地址均需要手动拼接图片的完整url。
"""
request.urlretrieve("https:" + img, dir, Schedule) |
# A simple lambda translator and evaluator
# - Converts a lambda-expression into SKI-expression (translate)
# and then reduce it using a combinator graph reduction engine (evaluate)
# - Lambda to SKI-expression conversion is done according to bracket abstraction
from functools import reduce
# Abstract Syntax for Lambda Expressions
_Lam_ = 'Lam'
_App_ = 'App'
def Lam(vars, exp): # Associates right
if type(vars) == str:
vars = [vars]
return reduce(lambda w, a: (_Lam_, a, w), vars[::-1], exp)
def App(*exps): # Associates left
return reduce(lambda w, a: (_App_, w, a), exps[1:], exps[0])
def Var(name):
return name
def is_lambda(exp):
return type(exp) == tuple and exp[0] == _Lam_
def is_application(exp):
return type(exp) == tuple and exp[0] == _App_
def is_variable(exp):
return type(exp) == str
def ppLamExp(exp): # Pretty-printer
if is_lambda(exp):
return f"(\\{''.join(exp[1])}.{ppLamExp(exp[2])})"
elif is_application(exp):
exp_1 = ppLamExp(exp[1])
if exp_1[0] == '(' and exp_1[-1] == ')':
exp_1 = exp_1[1:-1]
return f"({exp_1} {ppLamExp(exp[2])})"
elif is_variable(exp):
return exp
# Translate a lambda-expression into an SKI-expression.
# Implements so-called the bracket abstraction algorithm
def translate(exp):
if is_variable(exp):
return exp # Rule 1
if is_application(exp): # Rule 2
return App(translate(exp[1]), translate(exp[2]))
# if is_lambda(exp):
var, eps = exp[1], exp[2]
if var not in free_vars(eps):
return App("K", translate(eps)) # Rule 3
if is_variable(eps) and eps == var:
return "I" # == App("S", "K", "K") # Rule 4
if is_lambda(eps) and var in free_vars(eps[2]):
return translate(Lam(var, translate(eps))) # Rule 5
if is_application(eps): # Rule 6
return App("S", translate(Lam(var, eps[1])), translate(Lam(var, eps[2])))
def free_vars(exp):
def collect(vs, exp):
if is_variable(exp):
return [] if exp in vs else [exp]
if is_application(exp):
return collect(vs, exp[1]) + collect(vs, exp[2])
if is_lambda(exp):
return collect([exp[1]] + vs, exp[2])
return set(collect([], exp))
def ppSkiExp(ski): # Pretty-printer
# Optimize S(KK)I which is equivalent to K
ppExp = ppLamExp(ski).replace(" ", "").replace("(S(KK)I)", "K")
if ppExp[0] == '(' and ppExp[-1] == ')':
ppExp = ppExp[1:-1]
return ppExp
# Naive evaluator for SKI-expressions
def evaluate(ski):
if is_variable(ski):
return ski
if is_application(ski):
ski1, ski2 = ski[1], ski[2]
# App(I, x)
if ski1 == 'I':
return evaluate(ski2)
# App((K x) y)
if is_application(ski1) and ski1[1] == 'K':
return evaluate(ski1[2])
# (((S x) y) z)
if is_application(ski1) and is_application(ski1[1]) and ski1[1][1] == 'S':
return evaluate(App(App(ski1[1][2], ski2), App(ski1[2], ski2)))
# All else
red1 = evaluate(ski1)
red2 = evaluate(ski2)
if red1 == ski1 and red2 == ski2:
return ski
else:
return evaluate(App(red1, red2))
return ski
# Examples
true = Lam(["x", "y"], "x")
false = Lam(["x", "y"], "y")
zero = Lam(["f", "x"], "x")
one = Lam(["f", "x"], App("f", "x"))
two = Lam(["f", "x"], App("f", App("f", "x")))
add = Lam(["m", "n", "f", "x"], App("m", "f", App("n", "f", "x")))
three = App(add, one, two)
succ = Lam(["n", "f", "x"], App("f", App("n", "f", "x")))
pred = Lam(["n", "f", "x"], App("n", Lam(["g", "h"], App("h", App("g", "f"))),
Lam(["u"], "x"), Lam(["u"], "u")))
mul = Lam(["m", "n", "f"], App("m", App("n", "f")))
square = Lam("n", App(mul, "n", "n"))
is0 = Lam("n", App("n", Lam("x", false), true))
# For Church numeral evaluation
# e.g. eval('s(s(s(s(s(s(s(s(s(s(s(sz)))))))))))') evaluates to 12
s = lambda n: n+1
z = 0
sz = s(z)
# A sequence of steps to calculate a lambda expression
def eval_lambda(demo_name, exp):
ski = translate(exp) # Convert to an SKI-expression
print(demo_name, "=", ppSkiExp(ski), end=" ==> ")
eval_on_sz = ppSkiExp(evaluate(App(ski, "s", "z"))) # Evaluate using "s" "z" as arguments
print(eval_on_sz, "which evaluates to", eval(eval_on_sz))
print()
eval_lambda("add(one, two)", App(add, one, two))
eval_lambda("mul(two, three)", App(mul, two, three))
eval_lambda("square(three)", App(square, three))
eval_lambda("mul(add(one, two), square(three))",
App(mul, App(add, one, two), App(square, three)))
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateSignatureRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'cas', '2018-08-13', 'CreateSignature','cas_esign_fdd')
def get_Quantity(self):
return self.get_query_params().get('Quantity')
def set_Quantity(self,Quantity):
self.add_query_param('Quantity',Quantity)
def get_HandSignImg(self):
return self.get_query_params().get('HandSignImg')
def set_HandSignImg(self,HandSignImg):
self.add_query_param('HandSignImg',HandSignImg)
def get_DocId(self):
return self.get_query_params().get('DocId')
def set_DocId(self,DocId):
self.add_query_param('DocId',DocId)
def get_CustomApi(self):
return self.get_query_params().get('CustomApi')
def set_CustomApi(self,CustomApi):
self.add_query_param('CustomApi',CustomApi)
def get_PositionPage(self):
return self.get_query_params().get('PositionPage')
def set_PositionPage(self,PositionPage):
self.add_query_param('PositionPage',PositionPage)
def get_DocTitle(self):
return self.get_query_params().get('DocTitle')
def set_DocTitle(self,DocTitle):
self.add_query_param('DocTitle',DocTitle)
def get_PositionX(self):
return self.get_query_params().get('PositionX')
def set_PositionX(self,PositionX):
self.add_query_param('PositionX',PositionX)
def get_PositionY(self):
return self.get_query_params().get('PositionY')
def set_PositionY(self,PositionY):
self.add_query_param('PositionY',PositionY)
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp)
def get_PeopleId(self):
return self.get_query_params().get('PeopleId')
def set_PeopleId(self,PeopleId):
self.add_query_param('PeopleId',PeopleId)
def get_PositionType(self):
return self.get_query_params().get('PositionType')
def set_PositionType(self,PositionType):
self.add_query_param('PositionType',PositionType)
def get_SignKeyword(self):
return self.get_query_params().get('SignKeyword')
def set_SignKeyword(self,SignKeyword):
self.add_query_param('SignKeyword',SignKeyword)
def get_NotifyUrl(self):
return self.get_query_params().get('NotifyUrl')
def set_NotifyUrl(self,NotifyUrl):
self.add_query_param('NotifyUrl',NotifyUrl)
def get_Validity(self):
return self.get_query_params().get('Validity')
def set_Validity(self,Validity):
self.add_query_param('Validity',Validity)
def get_ReturnUrl(self):
return self.get_query_params().get('ReturnUrl')
def set_ReturnUrl(self,ReturnUrl):
self.add_query_param('ReturnUrl',ReturnUrl)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_KeywordStrategy(self):
return self.get_query_params().get('KeywordStrategy')
def set_KeywordStrategy(self,KeywordStrategy):
self.add_query_param('KeywordStrategy',KeywordStrategy) |
""" Day 9
- add relative mode - 2
- add rel_base
- add opcode 9 - jump rel_base
"""
import common
from computer import Computer
##########
if __name__ == "__main__":
program = [int(n) for n in common.listify_input_string('09-input.txt')]
# Part 1 -- answer 2662308295
comp = Computer()
comp.load_program(program=program)
print(f"\nPart 1: {comp.run_program(inputs=[1])}")
# Part 2 -- answer 63441
comp = Computer()
comp.load_program(program=program)
print(f"\nPart 2: {comp.run_program(inputs=[2])}") |
hours_worked = float(input("Hours worked: "))
ot_hours = hours_worked - 40
straight_pay = 15.00 #this is a constant.
overtime_rate = straight_pay * 1.5
after_tax = 0.75 #35% removed for taxes, etc., conservative result
if ot_hours > 0:
net_pay = ((straight_pay * 40) + (ot_hours * overtime_rate)) * after_tax
else:
net_pay = (straight_pay * hours_worked) * after_tax
print('$%.2f' % net_pay )
|
import numpy as np
import pandas as pd
import os
def get_AIS_csv_data(f):
if not os.path.exists(f):
from import_csv import get_csv_data
get_csv_data()
df = pd.read_csv(f,
usecols=[0, 1, 2, 3],
parse_dates=['BaseDateTime'],
engine='python')
return df
def partition_AIS(df, sz):
dfs = []
sz = int(sz)
lon_max = df['LON'].max()
lon_min = df['LON'].min()
lon_stp = (lon_max - lon_min)/sz
lat_max = df['LAT'].max()
lat_min = df['LAT'].min()
lat_stp = (lat_max - lat_min)/sz
del lon_max, lat_max
for i in range(sz):
lon = lon_min + lon_stp * i
for j in range(sz):
lat = lat_min + lat_stp * j
dft = df[
(df.LON >= lon) & (df.LON < lon + lon_stp) &
(df.LAT >= lat) & (df.LAT < lat + lat_stp)
]
if dft.empty:
continue
dfs.append(dft)
print('Partition %d has done' % (sz * i + j + 1))
return dfs
def write_to_file(s, dfs):
i = 1
root = './AIS_cell'
for df in dfs:
f = 'AIS_' + s + '_part_' + str(i) + '.txt'
file_path = root + os.sep + f
df.to_csv(file_path, mode='w', index=False)
i += 1
s = input('Input month length:')
f = 'AIS_cvs_file_' + s + '.txt'
sz = input('Input partition size:')
df = get_AIS_csv_data(f)
del f
write_to_file(s, partition_AIS(df, sz))
|
# -*- coding: utf-8 -*-
import logging
from flask import current_app as app
from flask import request
from agroutils.restful.resource import patch_response_data, ok_response, error_response, Resource as AgroutilResource
from agroutils.session.auth import AuthenticationError
from eventtracker.utils.client import current_user
from agroutils.exceptions.error_handler import ErrorHandler
from eventtracker.utils.logger import send_exception_mail
from eventtracker.service_api_handlers.get_template_handler import get_template
crash_logger = logging.getLogger('crash')
class Template(AgroutilResource):
@ErrorHandler("GET Template",app=app , exception_mailer=send_exception_mail)
def get(self, template_id=None):
app.logger.info("GET {}".format(self.__class__.__name__))
params = request.args.to_dict()
template = get_template(params)
return ok_response(template)
get.authenticated = False
|
#!/usr/bin/env python
# encoding: utf-8
import ConfigParser
from wxpy import *
import logging
import time
import smtplib
import time
import os
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
logging.basicConfig(filename='/tmp/sendmail.log', level=logging.DEBUG)
cf = ConfigParser.ConfigParser()
cf.read('wechat.conf')
mail_host = cf.get('email', 'host')
mail_user = cf.get('email', 'user')
mail_pass = cf.get('email', 'pass')
mail_port = cf.get('email', 'port')
to = cf.get('email', 'to')
group_name = cf.get('wechat', 'group_name')
path = cf.get('wechat', 'path')
sender = mail_user
day = time.strftime("%Y-%m-%d")
def sendmail(receivers, message):
smtpObj = smtplib.SMTP()
smtpObj.connect(mail_host, mail_port)
smtpObj.ehlo()
smtpObj.starttls()
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(sender, receivers, message.as_string())
smtpObj.close()
def get_subject():
subject = u'微信群[%s]聊天记录%s' % (group_name.decode('utf-8'), day)
return subject
def get_message(subject):
message = MIMEMultipart()
message['Subject'] = Header(subject, 'utf-8')
message['From'] = Header('<' + sender + '>', 'utf-8')
file_name = ('%s.%s.txt' % (group_name, day))
file_ab_path = os.path.join(path, file_name)
if not os.path.exists(file_ab_path):
logging.warning('%s is not existed!' % file_ab_path)
return None
att = MIMEText(open(file_ab_path,'rb').read(),'base64','utf-8')
att["Content-Type"] = 'application/octet-stream'
att["Content-Disposition"] = 'attachment; filename=%s' % (file_name)
message.attach(att)
return message
def main():
receivers = to.split(' ')
subject = get_subject()
message = get_message(subject)
if message is not None:
logging.info(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
sendmail(receivers, message)
if __name__ == '__main__':
main()
|
import sqlite3
import time
############### Settings ####################
#DB Name
DB_NAME = "mandal_sensor.db"
#SQL File with Table Schema and Initialization Data
SQL_File_Name = "voltage_current.sql"
##############################################
#Connect or Create DB File
conn = sqlite3.connect(DB_NAME)
#Read Table Schema into a Variable and remove all New Line Chars
TableSchema=""
with open(SQL_File_Name, 'r') as SchemaFile:
for query in SchemaFile:
if query:
time.sleep(2) # need to update the timing as well
print("Executing script {}".format(query))
conn.execute(query)
print("All done")
conn.commit()
# Check the table
cursor = conn.execute("SELECT * from voltage_current")
for row in cursor:
print("id = ", row[0])
print("voltage = ", row[1])
print("current = ", row[2])
print("date = ", row[3])
print("*" * 10)
print()
conn.close()
|
dagen = "ma", "di", "wo", "do", "vr", "za", "zo"
vraag = input("Welke dag van de week kiest u? {} ".format(dagen))
i=0
while i < dagen.index(vraag)+1:
print(dagen[i])
i+=1 |
import thread, time, datetime, csv
import requests, json
session = requests.Session()
global totalBadRequests
totalBadRequests = 0
def input_thread(L):
raw_input()
L.append(None)
def do_rec():
global totalBadRequests
L = []
thread.start_new_thread(input_thread, (L,))
bad_requests = 0
good_requests = 0
while len(L) == 0 and bad_requests <= tolerance:
time.sleep(sleeptime)
r = session.get("https://api.twitch.tv/kraken/streams/" + streamer)
if r.status_code != requests.codes.ok:
print "!!!! Request Failed !!!! \a"
totalBadRequests += 1
bad_requests += 1
good_requests = 0
else:
good_requests += 1
if(good_requests >= 5):
bad_requests = 0
good_requests = 0
jsonObject = r.json()
if jsonObject is None or jsonObject['stream'] is None:
print "!!!! Stream is offline !!!!"
break
num_viewers = jsonObject['stream']['viewers']
num_followers = jsonObject['stream']['channel']['followers']
num_views = jsonObject['stream']['channel']['views']
currTime = datetime.datetime.time(datetime.datetime.now())
str_game = jsonObject['stream']['game']
if str_game is None:
str_game = "No Game"
print "Health Check: ", num_viewers, num_followers, num_views, datetime.datetime.time(datetime.datetime.now())
currRowToWrite = [num_viewers, num_followers, num_views, currTime, str_game.encode('utf8')]
wr.writerow(currRowToWrite)
streamer = raw_input("Please enter the streamer: ")
sleeptime = float(raw_input("Please input sleep time: "))
tolerance = raw_input("Please input bad request tolerance: ")
filename = streamer + "_" + str(datetime.datetime.now().time()).replace(":", "_").replace(".", "_") + ".csv"
resultFile = open(filename, "a")
wr = csv.writer(resultFile, delimiter=",")
rowToWrite = ["Viewers","Followers","Views","Timestamp","Game"]
wr.writerow(rowToWrite)
print "<Press \"Enter\" to stop>"
do_rec()
print "Closing csv and session"
session.close()
resultFile.close()
print "Total Bad Requests: ", totalBadRequests |
from game import Game
alpha = 0.60
gamma = 0.75
epsilon = 0.01
results = Game(alpha=alpha, gamma=gamma, epsilon=epsilon).run(
epochs=10, episodes=100)
with open(f'results_a{alpha}_{gamma}.md', 'w') as file:
file.write(''.join(results))
|
import math, collections, copy
from nltk.corpus import brown
BACKOFF_COEFFICIENT = .9
DISCOUNT = .35
STRIP_CHARS = "<>.\",?! "
class KneserBigramModel:
"""Kneser-Ney Backoff language model - Implements the Kneser-Ney model
with bigrams and backoffs to laplace unigram if the given bigram does
not exist in the training corpus."""
def __init__(self):
"""Initialize your data structures in the constructor."""
self.bigramCounts = collections.defaultdict(lambda : 0)
self.unigramCounts = collections.defaultdict(lambda : 1)
self.continuationCounts = collections.defaultdict(lambda: 0)
self.followingCounts = collections.defaultdict(lambda: 0)
self.total = 1
print "Training Language Model..."
self.train(brown.sents())
print "--Training Complete--"
def train(self, corpus):
""" Takes a corpus and trains your language model.
Compute any counts or other corpus statistics in this function.
"""
# TODO your code here
# Tip: To get words from the corpus, try
# for sentence in corpus.corpus:
# for datum in sentence.data:
# word = datum.word
for sentence in corpus:
previousWord = ""
for word in sentence:
word = word.strip(STRIP_CHARS)
word = word.lower()
currentWord = word
self.unigramCounts[currentWord] += 1
self.total += 1
if previousWord != "":
bigram = (previousWord, currentWord)
if bigram not in self.bigramCounts:
self.continuationCounts[currentWord] += 1
self.followingCounts[previousWord] += 1
self.bigramCounts[bigram] += 1
previousWord = currentWord
self.total += len(self.unigramCounts)
def score(self, sentence):
""" Takes a list of strings as argument and returns the log-probability of the
sentence using your language model. Use whatever data you computed in train() here.
"""
# TODO your code here
score = 0.0
previousWord = ""
newSentence = []
for word in sentence:
newSentence += word.split()
for currentWord in sentence:
currentWord = currentWord.strip(STRIP_CHARS)
currentWord = currentWord.lower()
if previousWord != "":
bigram = (previousWord, currentWord)
bigramCount = self.bigramCounts[bigram]
if bigramCount > 0:
score += math.log(max(self.bigramCounts[bigram] - DISCOUNT, 0)*len(self.bigramCounts) + DISCOUNT*self.followingCounts[previousWord]*self.continuationCounts[currentWord])
# Subtraction by 1 removes the add one count from the laplace
# smoothing
score -= math.log((self.unigramCounts[previousWord] -1) * len(self.bigramCounts))
else:
count = self.unigramCounts[currentWord]
score += math.log(count * BACKOFF_COEFFICIENT)
score -= math.log(self.total)
previousWord = currentWord
return -score
|
33.Write a program to input all sides of a triangle and check whether triangle is valid or not.
SOL:
side1=int(input('Enter 1st angle of a triangle:'))
side2=int(input('Enter 2nd angle of a triangle:'))
sidle3=int(input('Enter 3rd angle of a triangle:'))
if side1**2==(side2**2+sidle3**2) or side2**2==(side1**2+sidle3**2) or sidle3**2==(side1**2+side2**2):
print("Triangle is valid")
else:
print("Triangle is not valid")
|
#!/usr/bin/env python3
from datetime import datetime
from random import randint
from dateutil.relativedelta import relativedelta
# relativedelta由第三方库提供, 第三方的日期处理库: python-dateutil
# pip3 install python-dateutil
class Date:
"""
随机日期时间:
时间戳: 从1970年1月1号0点0分0秒到现在有多少秒(格林)
"""
CMD = {'y': 'years', 'm': 'months', 'd': 'days',
'H': 'hours', 'M': 'minutes', 'S': 'seconds', 'w': 'weeks'}
def __init__(self, range_str=None):
if range_str is None:
self.start = self.end = datetime.now()
else:
self.start, self.end = self.parse(range_str)
def rand_dt(self, start=None, end=None):
start = start or self.start
end = end or self.end
m = randint(int(start.timestamp()), int(end.timestamp()))
return datetime.fromtimestamp(m)
def rand(self, start=None, end=None, format="%F %T"):
return self.rand_dt(start, end).strftime(format)
@staticmethod
def parse(range_str):
"""
解析字符串生成起始日期和结束日期
2y 2年内(从2年前到现在的范围)
-2y 2年内(从2年前到现在的范围)
2m 2个月内
2d 2天内
2H 2时内
2M 2分钟内
2S 2秒内
2w 2周内
+2y 2年内(从现在到2年后的范围)
-5:-3y 5年前到3年前的范围
-5:3y 5年前到3年后的范围
3:5y 3年后到5年后的范围
"""
range_str = range_str.strip()
name = Date.CMD.get(range_str[-1], None)
if name is None:
raise Exception("范围字符命令格式错误")
range_str = range_str[:-1]
now = datetime.now()
if ':' in range_str:
s, e = range_str.split(':')
return (now + relativedelta(**{name: int(s)}),
now + relativedelta(**{name: int(e)}))
range_str = range_str if range_str[0] in '+-' else '-' + range_str
newTime = now + relativedelta(**{name: int(range_str)})
if now > newTime:
return newTime, now
return now, newTime
|
from abc import abstractmethod
from typing import overload, _T, Sequence, Generic, MutableSequence, Iterable
class people(Sequence[_T], Generic[_T]):
@abstractmethod
def insert(self, index: int, object: _T) -> None: ...
@overload
@abstractmethod
def __getitem__(self, i: int) -> _T: ...
@overload
@abstractmethod
def __getitem__(self, s: slice) -> MutableSequence[_T]: ...
@overload
@abstractmethod
def __setitem__(self, i: int, o: _T) -> None: ...
@overload
@abstractmethod
def __setitem__(self, s: slice, o: Iterable[_T]) -> None: ...
@overload
@abstractmethod
def __delitem__(self, i: int) -> None: ...
@overload
@abstractmethod
def __delitem__(self, i: slice) -> None: ...
# Mixin methods
def append(self, object: _T) -> None: ...
def clear(self) -> None: ...
def extend(self, iterable: Iterable[_T]) -> None: ...
def reverse(self) -> None: ...
def pop(self, index: int = ...) -> _T: ...
def remove(self, object: _T) -> None: ...
def __iadd__(self, x: Iterable[_T]) -> MutableSequence[_T]: ... |
from cycloauth.storage import BaseStorage, BaseToken, BaseConsumer, key_secret_generator
from txmongo import MongoConnectionPool
from cyclone.web import HTTPError
from twisted.internet import defer
class MongoToken(BaseToken):
m_id = None
def to_dict(self):
ret = {
'key': self.key,
'secret': self.secret,
'callback': self.__dict__.get('callback', None),
'callback_confirmed': self.__dict__.get('callback_confirmed', False),
'verifier': self.__dict__.get('verifier', None)
}
if self.m_id:
ret['_id'] = self.m_id
return ret
@classmethod
def from_dict(cls, d):
if not d: return None
ret = cls(key=d['key'], secret=d['secret'])
if d.get('callback', None):
ret.set_callback(d['callback'])
ret.m_id = d.get('_id', None)
if d.get('verifier', None):
ret.set_verifier(d['verifier'])
return ret
class MongoConsumer(BaseConsumer):
m_id = None
def to_dict(self):
ret = {
'key': self.key,
'secret': self.secret,
'callback': self.callback
}
if self.m_id:
ret['_id'] = self.m_id
return ret
@classmethod
def from_dict(cls, d):
if not d: return None
ret = cls(key=d['key'], secret=d['secret'])
ret.callback = d.get('callback', None)
ret.m_id = d.get('_id', None)
return ret
class MongoDBStorage(BaseStorage):
"implements an storage mechanism for MongoDB"
request_token_factory = MongoToken
access_token_factory = MongoToken
consumer_factory = MongoConsumer
def __init__(self, settings):
self.settings = settings
self.access_token_collection = settings.get('oauth_access_token_collection', 'oauth_access_tokens')
self.consumer_collection = settings.get('oauth_consumer_collection', 'oauth_consumers')
self.request_token_collection = settings.get('oauth_request_token_collection', 'oauth_requets_tokens')
self.ensured_indexes = {}
@defer.inlineCallbacks
def add_consumer(self, key=None, secret=None, **kwargs):
yield self.mongo_ensure(self.consumer_collection, {'key': 1})
ks = yield key_secret_generator(self, 'get_consumer', key, secret)
ret = yield self.save_consumer(self.consumer_factory(key=ks['key'], secret=ks['secret'], **kwargs))
defer.returnValue(ret)
@defer.inlineCallbacks
def save_consumer(self, consumer):
yield self.mongo_ensure(self.consumer_collection, {'key': 1})
r = consumer.to_dict()
yield self.mongo_save(self.consumer_collection, r)
defer.returnValue(MongoConsumer.from_dict(r))
@defer.inlineCallbacks
def get_consumer(self, key):
yield self.mongo_ensure(self.consumer_collection, {'key': 1})
r = yield self.mongo_find_one_or_none(self.consumer_collection, {'key': key})
defer.returnValue(MongoConsumer.from_dict(r) if r else None)
@defer.inlineCallbacks
def remove_consumer(self, key):
yield self.mongo_ensure(self.consumer_collection, {'key': 1})
yield self.mongo_remove(self.consumer_collection, {'key': key})
defer.returnValue(True)
@defer.inlineCallbacks
def add_request_token(self, key=None, secret=None, **kwargs):
self.mongo_ensure(self.request_token_collection, {'key': 1})
ks = yield key_secret_generator(self, 'get_request_token', key, secret)
ret = yield self.save_request_token(self.request_token_factory(key=ks['key'], secret=ks['secret'], **kwargs))
defer.returnValue(ret)
@defer.inlineCallbacks
def save_request_token(self, token):
yield self.mongo_ensure(self.request_token_collection, {'key': 1})
r = token.to_dict()
yield self.mongo_save(self.request_token_collection, r)
defer.returnValue(MongoToken.from_dict(r) if r else None)
@defer.inlineCallbacks
def get_request_token(self, key):
yield self.mongo_ensure(self.request_token_collection, {'key': 1})
r = yield self.mongo_find_one_or_none(self.request_token_collection, {'key': key})
defer.returnValue(MongoToken.from_dict(r))
@defer.inlineCallbacks
def remove_request_token(self, key):
yield self.mongo_ensure(self.request_token_collection, {'key': 1})
yield self.mongo_remove(self.request_token_collection, {'key': key})
defer.returnValue(True)
@defer.inlineCallbacks
def add_access_token(self, key=None, secret=None, **kwargs):
self.mongo_ensure(self.access_token_collection, {'key': 1})
ks = yield key_secret_generator(self, 'get_access_token', key, secret)
ret = yield self.save_access_token(self.access_token_factory(key=ks['key'], secret=ks['secret'], **kwargs))
defer.returnValue(ret)
@defer.inlineCallbacks
def save_access_token(self, token):
yield self.mongo_ensure(self.access_token_collection, {'key': 1})
r = token.to_dict()
yield self.mongo_save(self.request_token_collection, r)
defer.returnValue(MongoToken.from_dict(r))
@defer.inlineCallbacks
def get_access_token(self, key):
yield self.mongo_ensure(self.access_token_collection, {'key': 1})
r = yield self.mongo_find_one_or_none(self.access_token_collection, {'key': key})
defer.returnValue(MongoToken.from_dict(r) if r else None)
@defer.inlineCallbacks
def remove_access_token(self, key):
yield self.mongo_ensure(self.access_token_collection, {'key': 1})
yield self.mong_remove(self.access_token_collection, {'key': key})
defer.returnValue(True)
@property
@defer.inlineCallbacks
def db(self):
if not getattr(self, '_db', None):
pool = yield self.pool
self._db = getattr(pool, self.settings.get('oauth_mongo_database', 'oauth'))
defer.returnValue(self._db)
@property
@defer.inlineCallbacks
def pool(self):
if not getattr(self, '_pool', None):
mongo = yield MongoConnectionPool(
host=self.settings.get('oauth_mongo_host', '127.0.0.1'),
port=self.settings.get('oauth_mongo_port', 27017),
reconnect=self.settings.get('oauth_mongo_reconnect', True),
pool_size=self.settings.get('oauth_mongo_pool_size', 5))
self._pool = mongo
defer.returnValue(self._pool)
@defer.inlineCallbacks
def mongo_find_one_or_none(self, collection, query):
col = getattr((yield self.db), collection)
r = yield col.find_one(query)
if not r:
r = None
defer.returnValue(r)
@defer.inlineCallbacks
def mongo_insert(self, collection, *args, **kwargs):
col = getattr((yield self.db), collection)
r = yield col.insert(*args, **kwargs)
defer.returnValue(r)
@defer.inlineCallbacks
def mongo_save(self, collection, *args, **kwargs):
col = getattr((yield self.db), collection)
r = yield col.save(*args, **kwargs)
defer.returnValue(r)
@defer.inlineCallbacks
def mongo_remove(self, collection, *args, **kwargs):
col = getattr((yield self.db), collection)
r = yield col.remove(*args, **kwargs)
defer.returnValue(r)
@defer.inlineCallbacks
def mongo_ensure(self, collection, index):
k = 'collection' + ''.join(list(index.iterkeys()))
if k in self.ensured_indexes:
defer.returnValue(True)
else:
col = getattr((yield self.db), collection)
yield col.ensureIndex(index)
defer.returnValue(True)
|
# Generated by Django 3.1.1 on 2020-09-13 12:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('restauracja_app', '0004_typ'),
]
operations = [
migrations.RenameModel(
old_name='Typ',
new_name='Type',
),
migrations.AddField(
model_name='restauracja',
name='type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='restauracja_app.type'),
),
]
|
import random
import os
from art import logo, vs
from game_data import data
clear = lambda: os.system('cls')
def choose_person(data):
accountA = random.choice(data)
accountB = random.choice(data)
while accountA == accountB:
accountB = random.choice(data)
print(accountA)
print(accountB)
# personA =
def format_data(account):
account_name = account["name"]
account_follower_count = account["follower_count"]
account_description = account["description"]
account_country = account["country"]
return f"{account_name}, a {account_description}, from {account_country}"
def check_answer(guess, a_followers, b_followers):
"""Take the ussers guess and follower counts and return if guess is right"""
if a_followers > b_followers:
return guess == "A"
else:
return guess == "B"
print(logo)
score = 0
game_should_continue = True
accountB = random.choice(data)
while game_should_continue:
accountA = accountB
accountB = random.choice(data)
while accountA == accountB:
accountB = random.choice(data)
print(f"Compare A: {format_data(accountA)}.")
print(vs)
print(f"Compare B: {format_data(accountB)}.")
guess = input("Who do you think has more followers?: 'A' or 'B': ").upper()
a_follower_count = accountA["follower_count"]
b_follower_count = accountB["follower_count"]
is_correct = check_answer(guess, a_follower_count, b_follower_count)
if is_correct:
score += 1
clear()
print(f"You're right. Current score = {score} \n")
else:
game_should_continue = False
print(f"You are wrong. The final score is = {score}")
play_again = input("Do you want to play again? 'yes' or 'no' ").lower()
if play_again == "yes":
score = 0
clear()
game_should_continue = True
# play_again = input("Do you want to play again? 'yes' or 'no' ").lower()
# if play_again == "yes":
# clear()
# game_should_continue = True
|
from random import randint
import random
class Playground(object):
size = (20, 20)
rows = size[0]
cols = size[1]
length = rows * cols
def __init__(self, size=size):
self.dim = size
self.borders = [(-1, i) for i in range(self.dim[1])] # top
self.borders += [(size[0], i) for i in range(self.dim[1])] # bottom
self.borders += [(i, -1) for i in range(self.dim[0])] # left
self.borders += [(i, size[0]) for i in range(self.dim[0])] # right
def convert_index(self, ind):
rows = ind // self.dim[0]
cols = ind % self.dim[1]
return rows, cols
def get_index(self, pos):
rows = pos[0]
cols = pos[1]
return rows * self.cols + cols
class Food:
def __init__(self):
self.position = Position((0, 0))
self.rows = Playground.size[0]
self.cols = Playground.size[1]
def change_pos(self, snake_body):
n = Playground.size[0]
m = Playground.size[1]
choices_set = []
[[choices_set.append((i, j)) for j in range(m)] for i in range(n)]
snake_set = [seg.get_pos() for seg in snake_body]
[choices_set.remove(seg) for seg in snake_set]
self.position = Position(random.choice(choices_set))
def get_pos(self):
return self.position.pos
class Direction:
up = (-1, 0)
dn = (1, 0)
lt = (0, -1)
rt = (0, 1)
get_direction = {'up': up, 'dn': dn, 'lt': lt, 'rt': rt}
def __init__(self, direc='up'):
self.dir = Direction.get_direction[direc]
def __add__(self, other):
return self.dir[0] + other.dir[0], self.dir[1] + other.dir[1]
def get_dir(self):
return self.dir
def is_zero(self):
return self.dir[0] == 0 and self.dir[1] == 0
class Position:
def __init__(self, origin=(0, 0)):
self.pos = origin
def __add__(self, other):
y = self.pos[0] + other.pos[0]
x = self.pos[1] + other.pos[1]
return Position(origin=(y, x))
def __sub__(self, other):
y = self.pos[0] - other.pos[0]
x = self.pos[1] - other.pos[1]
return Position(origin=(y, x))
def __truediv__(self, other: int):
y = self.pos[0] // other
x = self.pos[1] // other
return Position(origin=(y, x))
def get_pos(self):
return self.pos
class Snake(object):
self_collision = "Injuring itself"
wall_collision = "Wall collision"
def __init__(self, length=4, position=Playground.size):
self.__position = Position(position) / 2
self.__speed = Direction()
self.__length = length
self.__body = [self.__position + Position((i, 0)) for i in range(self.__length)]
self.__pg = Playground()
self.__borders = self.__pg.borders
self.__food = Food()
self.__food.change_pos(self.get_body())
def __change_food_pos(self):
self.__food.change_pos(self.get_body())
def __move_body(self): # TODO: it must be a private method
self.__body.insert(0, self.__position)
if self.__position.get_pos() == self.__food.get_pos():
self.__change_food_pos()
return
self.__body.pop()
def __is_injuring_itself(self, new_position):
segments = [segment.pos for segment in self.__body]
if new_position.get_pos() in segments:
return True
return False
def __is_colliding_wall(self, new_position):
if new_position.get_pos() in self.__borders:
return True
return False
def get_body(self):
return self.__body
def get_body_list(self):
return [segment.get_pos() for segment in self.get_body()]
def get_speed(self):
return self.__speed.dir
def get_position(self):
return self.__position.pos
def get_allowed_space(self):
return self.__pg.rows, self.__pg.rows
def get_seen_food_pos(self):
return self.__food.get_pos()
def turn(self, new_dir):
new_speed = self.__speed + Direction(direc=new_dir)
if any(new_speed) != 0:
self.__speed = Direction(direc=new_dir)
def move(self):
new_position = self.__position + Position(self.__speed.get_dir())
if self.__is_injuring_itself(new_position):
return Snake.self_collision
if self.__is_colliding_wall(new_position):
return Snake.wall_collision
self.__position = new_position
self.__move_body()
def cli(self):
command = input("Input your command (h/j/k/l) or (q for quitting):")
if command == "h":
self.turn("lt")
elif command == "j":
self.turn("dn")
elif command == "k":
self.turn("up")
elif command == "l":
self.turn("rt")
elif command == "q":
return False
else:
# for the future
pass
return True
|
class Variables:
"""
Methods for Part A.
Contains the methods required to complete Part A.
Author: Connor McDermid
Date: 2019.10.15
"""
def get_name(self):
name = input("Please input your name: ")
return name
def get_age(self):
age = int(input("Please input your age: "))
return age
def get_last_birthday(self):
lastbirthday = int(input("Please input the year of your last birthday: "))
return lastbirthday
def __main__(self):
usrname = self.get_name()
usrage = self.get_age()
usrbirth = self.get_last_birthday()
print("{} is {} years old.".format(usrname, usrage))
print("Their last birthday was in {}".format(usrbirth))
print("Because of this, we can determine that they were born in {}".format(usrbirth - usrage))
return
def __init__(self):
pass
class Math:
"""
Methods for Part B.
Contains the methods required to complete Part B.
Author: Connor McDermid
Date: 2019.10.15
"""
def get_x(self):
return int(input("Please enter an integer: "))
def get_y(self):
return int(input("Please enter another integer: "))
def get_z(self):
return int(input("Please enter a third integer: "))
def product(self, x, y, z):
if z is None:
return x * y
else:
return x * y * z
def modulo(self, x, y):
return x % y
def __main__(self):
usrx = self.get_x()
usry = self.get_y()
usrz = self.get_z()
print("You entered {}, {}, and {}".format(usrx, usry, usrz))
print("The product of {} and {} is {}".format(usrx, usry, self.product(usrx, usry, None)))
print("The product of all three is {}".format(self.product(usrx, usry, usrz)))
print("{} mod {} is {}".format(usrx, usry, self.modulo(usrx, usry)))
print("{} mod {} is {}".format(usrx, usrz, self.modulo(usrx, usrz)))
print("{} mod {} is {}".format(usry, usrz, self.modulo(usry, usrz)))
def __init__(self):
pass
class Divider:
def divide(self, x, y):
return x / y
def parse_input(self):
usr = input("Please enter a fraction: ")
nums = usr.split("/")
return nums
def __main__(self):
usr = self.parse_input()
print("That fraction is equal to {}".format(self.divide(int(usr[0]), int(usr[1]))))
def __init__(self):
pass |
import numpy as np
import cv2
from graphics import *
class Postprocessor:
def postprocess(self, image):
#image = cv2.blur(image, (3,3))
_, image = cv2.threshold(image, 230, 255, cv2.THRESH_TOZERO)
if np.random.randint(0, 2) == 0:
image = cv2.erode(image, (1,1), iterations=np.random.randint(1, 5))
else:
image = cv2.erode(image, (3, 3), iterations=np.random.randint(1, 3))
pads = np.random.randint(10, 50, 4)
image = pad_image(image, pads[0], pads[1], pads[2], pads[3])
#image = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 5, 0)
#image = cv2.GaussianBlur(image, (1, 1), 0)
#image = cv2.bilateralFilter(image, 9, 75, 75)
return image
|
# REFERENCE: https://github.com/seungeunrho/minimalRL "
import gym
import collections
import random
import torch
import torch.nn as nn
import numpy as np
import math
import torch.nn.functional as F
import torch.optim as optim
from collections import namedtuple
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#Hyperparameters
learning_rate = 0.0005
gamma = 0.98
buffer_limit = 50000
batch_size = 32
class ReplayBuffer():
def __init__(self):
self.buffer = collections.deque(maxlen=buffer_limit)
def put(self, transition):
self.buffer.append(transition)
def sample(self, n):
mini_batch = random.sample(self.buffer, n)
s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []
for transition in mini_batch:
s, a, r, s_prime, done_mask = transition
s_lst.append(s)
a_lst.append([a])
r_lst.append([r])
s_prime_lst.append(s_prime)
done_mask_lst.append([done_mask])
#print(goal_lst)
#print(goal_lst)
return torch.stack(s_lst), torch.tensor(a_lst), \
torch.tensor(r_lst), torch.stack(s_prime_lst), \
torch.tensor(done_mask_lst)
def size(self):
return len(self.buffer)
class Qnet(nn.Module):
def __init__(self):
super(Qnet, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 5, kernel_size=3, stride=1, padding=1)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.fc1 = nn.Linear(5*20*16, 256)
self.fc2 = nn.Linear(256, 3)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = x.view(-1, 5 * 20 *16)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def sample_action(self, obs, epsilon):
#print('obs',obs)
out = self.forward(obs.unsqueeze(0).unsqueeze(0).float())
coin = random.random()
if coin < epsilon:
return random.randint(0,2)
else :
return out.argmax().item()
def train(q, q_target, memory, optimizer):
#for i in range(10):
s,a,r,s_prime,done_mask = memory.sample(batch_size)
r = r.to(device)
a = a.to(device)
done_mask = done_mask.to(device)
#print(s_prime)
#print(goal)
q_out = q(s.unsqueeze(1).float())
q_a = q_out.gather(1,a)
max_q_prime = q_target(s_prime.unsqueeze(1).float()).to(dtype=torch.float).max(1)[0].unsqueeze(1)
target = r + gamma * max_q_prime * done_mask
loss = F.smooth_l1_loss(q_a, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def ner_cube(n_episodes_, batch_size_, buf_size_):
global buffer_limit
buffer_limit = buf_size_
global batch_size
batch_size = batch_size_
HindsightTransition = namedtuple('HindsightTransition', ('state', 'action', 'next_state', 'reward'))
env = gym.make('CubeCrashSparse-v0')
q = Qnet().to(device)
q_target = Qnet().to(device)
q_target.load_state_dict(q.state_dict())
memory = ReplayBuffer()
n_success = 0
print_interval = 100
score = 0.0
optimizer = optim.Adam(q.parameters(), lr=learning_rate)
succes_rate = []
for n_epi in range(n_episodes_):
epsilon = max(0.01, 0.08 - 0.01*(n_epi/200)) #Linear annealing from 8% to 1%
s = torch.tensor(np.mean(env.reset(),axis = 2)).to(device)
transitions = []
for t in range(600):
a = q.sample_action(s, epsilon)
s_prime, r, done, info = env.step(a)
s_prime = torch.tensor(np.mean(s_prime,axis = 2)).to(device)
done_mask = 0.0 if done else 1.0
memory.put((s,a,r/1.0,s_prime, done_mask))
transitions.append(HindsightTransition(s, a, s_prime, r))
s = s_prime.clone()
score += r
if done:
if r == 1:
n_success += 1
break
if memory.size()>2000:
train(q, q_target, memory, optimizer)
if n_epi%print_interval==0 and n_epi!=0:
q_target.load_state_dict(q.state_dict())
succes_rate.append(n_success/print_interval)
print('# of episode : {}, avg score : {:.1f}, success rate : {:.1f}%, buffer size : {}'.format(n_epi, score/print_interval , n_success/print_interval * 100, memory.size()))
n_success = 0.0
score = 0.0
env.close()
return succes_rate
|
import pytest
from expects import *
from os import walk
from pmp.experiments import Experiment, ExperimentConfig
from pmp.experiments.election_config import ElectionConfig
from pmp.rules import Bloc
@pytest.fixture
def experiment_config(approval_profile):
config = ExperimentConfig()
config.add_candidates(approval_profile.candidates)
config.add_voters(approval_profile.preferences)
return config
@pytest.fixture
def experiment(experiment_config):
experiment = Experiment(experiment_config)
return experiment
def generated_files(path):
"""Helper returning files generated by an experiment"""
for _, dirs, files in walk(path):
if len(dirs) > 0:
return []
return files
def test_run_experiment_set_election_precedence(experiment, tmpdir):
experiment.set_generated_dir_path(tmpdir)
experiment.set_election(Bloc(), 2)
experiment.set_result_filename('bloc')
experiment.run(n=1, log_on=False, save_win=True, split_dirs=False)
files = generated_files(tmpdir)
expect(len(files)).to(equal(1))
election_id = files[0].split('_')[0]
expect(election_id).to(equal('bloc'))
def test_run_experiment_add_election_precedence(experiment, tmpdir):
experiment.set_generated_dir_path(tmpdir)
experiment.set_election(Bloc(), 2)
experiment.set_result_filename('bloc')
experiment.add_election(Bloc(), 1, 'other')
experiment.run(n=1, log_on=False, save_win=True, split_dirs=False)
files = generated_files(tmpdir)
expect(len(files)).to(equal(1))
election_id = files[0].split('_')[0]
expect(election_id).to(equal('other'))
def test_run_experiment_elect_configs_precedence(experiment, tmpdir):
experiment.set_generated_dir_path(tmpdir)
experiment.set_election(Bloc(), 2)
experiment.set_result_filename('bloc')
experiment.add_election(Bloc(), 1, 'other')
election_configs = [ElectionConfig(Bloc(), 1, 'moreOther')]
experiment.run(n=1, log_on=False, save_win=True, elect_configs=election_configs, split_dirs=False)
files = generated_files(tmpdir)
expect(len(files)).to(equal(1))
election_id = files[0].split('_')[0]
expect(election_id).to(equal('moreOther'))
def test_inout_files(experiment):
expect(experiment._Experiment__generate_inout).to(be_false)
experiment.set_inout_filename('inout_fname')
expect(experiment._Experiment__generate_inout).to(be_true)
|
"""Analyse tweets"""
import webhandler
import numpy as np
def edit_dist(a,b):
# hack to avoid recognising these as same
if a == 'Entertainment' and b == 'Oystertainment':
return 10
elif a == 'Oystertainment' and b == 'Entertainment':
return 10
sol=np.zeros((len(a),len(b)))
for i in range(len(a)):
for j in range(len(b)):
add=0
if a[i]==b[j]:
add += 1
if i==0 or j==0:
sol[i][j]=add
if i>0:
sol[i][j]=max(sol[i][j],sol[i-1][j])
if j>0:
sol[i][j]=max(sol[i][j],sol[i][j-1])
else:
if add:
sol[i][j]=sol[i-1][j-1]+1
else:
sol[i][j]=max(sol[i][j-1],sol[i-1][j])
# return (sol[len(a)-1][len(b)-1] - abs(len(a) - len(b))) / len(b)
return (len(a) + len(b) - 2 * sol[len(a) - 1][len(b) - 1]) / 2
def clean(word):
return word.replace(',', '').replace('.', '')
class SentimentAnalyser(object):
def __init__(self):
self.negative_words = webhandler.get_negative_words()
self.neutral_words = webhandler.get_neutral_words()
self.positive_words = webhandler.get_positive_words()
self.companies = webhandler.get_company_info()
self.comparison_words = ['better', 'worse', 'prefer']
self.company_names = [c.name for c in self.companies]
def splitt(self, tweet):
subject1 = None
subject2 = None
negated = False
for word in tweet.split(" "):
if word.upper() == "WORSE":
negated = not negated
if word.lower() in ['not', 'don\'t']:
negated = not negated
cs = self.tweet_subjects(tweet)
for cand in cs:
if subject1 is None:
subject1 = cand
else:
subject2 = cand
alen = tweet.find(subject1)
blen = tweet.find(subject2)
if alen > blen:
tmp = subject1
subject1 = subject2
subject2 = tmp
if negated:
return [(subject1, -1), (subject2, 1)]
else:
return [(subject1, 1), (subject2, -1)]
def analyse_tweet(self, tweet, multi=False, verbose=False):
"""Analyse a tweet, extracting the subject and sentiment"""
sentiment = 0
# subject = self.tweet_subject(tweet)
subjects = self.tweet_subjects(tweet)
subject = subjects[0] if len(subjects) > 0 else "NONE"
negated = False
for word in tweet.split(" "):
if word in self.comparison_words:
return self.splitt(tweet)
for word in tweet.split(" "):
if clean(word) in self.positive_words:
sentiment = sentiment + 1
if clean(word) in self.negative_words:
sentiment = sentiment - 1
if clean(word).lower() in ['not', 'don\'t']:
negated = not negated
if verbose:
print(word, sentiment)
if sentiment < 0:
sentiment = -1
elif sentiment > 0:
sentiment = 1
if negated:
if sentiment != 0:
sentiment = -sentiment
else:
sentiment = -1
if verbose:
print(tweet, subjects, sentiment)
if multi:
return [(s, sentiment) for s in subjects]
else:
return [(subject, sentiment)]
def tweet_subject(self, tweet):
best = None
best_my = None
best_score = -1
for name in self.company_names:
candidates = tweet.split(' ')
if ' ' in name:
candidates = [a + ' ' + b for a, b in zip(candidates, candidates[1:])]
for c in candidates:
score = edit_dist(c, name)
if score > best_score:
best_score = score
best = name
best_my = c
# print(best, best_my, best_score)
return best
def remove_stop(self, words):
res = []
stop_words = ['i', 'is', 'that', 'ok', 'good', 'okay', 'bad',
'cool', 'not', 'do', 'to', 'worse', 'than',
'am', 'My', 'my', 'a', 'had', 'and', 'so',
'are', 'about', 'don\'t']
for word in words:
if not word.lower() in stop_words:
res.append(word)
return res
def tweet_contains(self, tweet, obj):
candidates = self.remove_stop(tweet.split(' '))
if ' ' in obj:
candidates = [a + ' ' + b for a, b in zip(candidates, candidates[1:])]
for c in candidates:
if edit_dist(c, obj) < (3 if (len(c) < 7 or len(obj) < 7) else 3.1):
if (len(obj) > 4 and len(c) > 4) or c == obj:
if c != obj:
print(c, obj, edit_dist(c, obj))
return True
return False
def tweet_subjects(self, tweet):
res = []
for company in self.companies:
words = [company.name] + [p.name for p in company.products]
recognised = False
for word in words:
recognised = recognised or self.tweet_contains(tweet, word)
if recognised:
res.append(company.name)
return res
|
employee_dict = {
'employee':{
'first_name':'John', 'last_name':'Doe', 'email':'john_doe@fake.com', 'phone':'123456789'
},
'devices': [
{'model':'HP Laptop', 'serial':'123', 'asset_tag':'456'},
{'model':'Surface Tablet', 'serial':'789', 'asset_tag':'000'}
]
}
# employee_dict = {
# 'employee':{
# 'first_name':'', 'last_name':'', 'email':'', 'phone':''
# },
# 'devices': []
# }
# employee_dict['employee']['first_name'] = employee[0]
# employee_dict['employee']['last_name'] = employee[1]
# employee_dict['devices'].append({'model': employee[2], 'serial': employee[3], 'asset_tag': employee[4]})
# employee_entry(cursor, employee_dict)
# employee_lookup(cursor, employee_dict)
# device_entry(cursor, employee_dict)
# device_lookup(cursor, employee_dict)
# device_checkin(cursor, employee_dict) |
# coding: utf-8
import os
import sys
import random
import math
import json
ROOT_DIR = os.getcwd()
with open('C0026_2.json', 'a') as outfile:
json_decode = json.load(outfile)
for i in json_decode:
print("key: ", i)
print("value: ", dict[i])
if i == "x":
print("value: ", dict[i])
print ("add to value : ", 10+dict[i]) |
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.layers.python.layers import initializers
import math
def get_num_params():
from functools import reduce
from operator import mul
num_params = 0
print('All trainable variable: ')
for variable in tf.trainable_variables():
print('-- ', variable.name)
shape = variable.get_shape()
num_params += reduce(mul, [dim.value for dim in shape], 1)
return num_params
def compute_error(real,fake):
return tf.reduce_mean(tf.abs(real-fake), reduction_indices=[0,1,2,3])
class Generator(object):
def __init__(self, nin, nout_ch, first_layer_ch, bottleneck_sp=8, bottleneck_ch=256, res_block_num=4):
self.nin = nin # assuming nin is 'NHWC' format
self.nout_ch = nout_ch
self.first_layer_ch = first_layer_ch
self.bottleneck_sp = bottleneck_sp
self.bottleneck_ch = bottleneck_ch
self.res_block_num = res_block_num
self.nin_sp = nin.get_shape().as_list()[1]
self.nin_ch = nin.get_shape().as_list()[3]
self.encoder_layer_num = int(math.log(self.nin_sp/bottleneck_sp, 2)) + 1
self.decoder_layer_num = self.encoder_layer_num
self.layers = []
self._build_net()
def get_network_output(self):
return self.layers[-1]
def _build_net(self):
print('Constructing generator with resolution of %dx%d' % (self.nin_sp,self.nin_sp))
self.layers = []
with tf.variable_scope('encoder_in'):
net = slim.conv2d(self.nin, self.first_layer_ch, [1,1], stride=1,
padding='SAME',
weights_initializer=initializers.xavier_initializer_conv2d(),
weights_regularizer=None,
rate=1, normalizer_fn=slim.batch_norm, activation_fn=tf.nn.leaky_relu,
scope='conv0')
self.layers.append(net)
print('-- Layer %d: ' % len(self.layers), 'encoder_in ', self.layers[-1].get_shape().as_list())
for i in range(1, self.encoder_layer_num, 1):
sp = self.layers[-1].get_shape().as_list()[-2]
with tf.variable_scope('encoder_%dx%d' % (sp, sp)):
net = slim.conv2d(self.layers[-1], min(self.first_layer_ch*(2**i), self.bottleneck_ch), [4,4],
stride=2, padding='SAME',
weights_initializer=initializers.xavier_initializer_conv2d(),
weights_regularizer=None,
rate=1, normalizer_fn=slim.batch_norm, activation_fn=tf.nn.leaky_relu,
scope='conv0')
self.layers.append(net)
print('-- Layer %d: ' % len(self.layers), 'encoder_%dx%d ' % (sp, sp), self.layers[-1].get_shape().as_list())
for i in range(self.res_block_num):
with tf.variable_scope('residual_block_%d' % i):
net = slim.conv2d(self.layers[-1], self.bottleneck_ch, [3,3], stride=1, padding='SAME',
weights_initializer=initializers.xavier_initializer_conv2d(),
weights_regularizer=None,
rate=1, normalizer_fn=None, activation_fn=tf.nn.leaky_relu,
scope='conv0')
net = tf.add(net, self.layers[-1])
self.layers.append(net)
print('-- Layer %d: ' % len(self.layers), 'residual_block_%d ' % i, self.layers[-1].get_shape().as_list())
for i in range(self.decoder_layer_num-1, 0, -1):
sp = self.layers[-1].get_shape().as_list()[-2]
with tf.variable_scope('decoder_%dx%d' % (sp*2, sp*2)):
net = tf.image.resize_bilinear(self.layers[-1], (sp*2, sp*2), align_corners=True)
net = slim.conv2d(net, min(self.first_layer_ch*(2**i), self.bottleneck_ch), [3,3],
stride=1, padding='SAME',
weights_initializer=initializers.xavier_initializer_conv2d(),
weights_regularizer=None,
rate=1, normalizer_fn=slim.batch_norm, activation_fn=tf.nn.relu,
scope='conv0')
net = tf.concat([net, self.layers[i-1], tf.image.resize_area(self.nin, (sp*2,sp*2), align_corners=False)], axis=3)
net = slim.conv2d(net, min(self.first_layer_ch*(2**i), self.bottleneck_ch), [3,3],
stride=1, padding='SAME',
weights_initializer=initializers.xavier_initializer_conv2d(),
weights_regularizer=None,
rate=1, normalizer_fn=slim.batch_norm, activation_fn=tf.nn.relu,
scope='conv1')
self.layers.append(net)
print('-- Layer %d: ' % len(self.layers), 'decoder_%dx%d ' % (sp*2, sp*2), self.layers[-1].get_shape().as_list())
with tf.variable_scope('decoder_out'):
net = slim.conv2d(self.layers[-1], self.nout_ch, [1,1], stride=1, padding='SAME',
weights_initializer=initializers.xavier_initializer_conv2d(),
rate=1, activation_fn=tf.nn.sigmoid, scope='conv0')
self.layers.append(net)
print('-- Layer %d: ' % len(self.layers), 'decoder_out ', self.layers[-1].get_shape().as_list())
class Discriminator(object):
def __init__(self, nin, cond, first_layer_ch, disc_patch_res=32):
self.nin = nin
self.cond = cond
self.first_layer_ch = first_layer_ch
self.nin_sp = nin.get_shape().as_list()[1]
self.nin_ch = nin.get_shape().as_list()[3]
self.cond_sp = cond.get_shape().as_list()[1]
self.cond_ch = cond.get_shape().as_list()[3]
# self.first_layer_ch = self.nin_ch+self.cond_ch
self.disc_patch_res = disc_patch_res
self.encoder_layer_num = int(math.log(self.nin_sp / disc_patch_res, 2))+1
self.layers = []
self._build_net()
def get_network_output(self):
return self.layers[-1]
def _build_net(self):
print('Constructing discriminator with resolution of %dx%d' % (self.nin_sp,self.nin_sp))
self.layers = []
nin = tf.concat([self.nin, self.cond], axis=3)
with tf.variable_scope('encoder_in'):
net = slim.conv2d(nin, self.first_layer_ch, [1,1], stride=1,
padding='SAME',
weights_initializer=tf.random_normal_initializer(0, 0.02),
weights_regularizer=slim.l2_regularizer(0.0001),
rate=1, normalizer_fn=None, activation_fn=tf.nn.leaky_relu,
scope='conv0')
self.layers.append(net)
print('-- Layer %d: ' % len(self.layers), 'encoder_in ', self.layers[-1].get_shape().as_list())
for i in range(1, self.encoder_layer_num, 1):
sp = self.layers[-1].get_shape().as_list()[-2]
with tf.variable_scope('encoder_%dx%d' % (sp, sp)):
net = slim.conv2d(self.layers[-1], self.first_layer_ch*(2**i), [4,4],
stride=2, padding='SAME',
weights_initializer=tf.random_normal_initializer(0, 0.02),
weights_regularizer=slim.l2_regularizer(0.0001),
rate=1, normalizer_fn=slim.batch_norm, activation_fn=tf.nn.leaky_relu,
scope='conv0')
self.layers.append(net)
print('-- Layer %d: ' % len(self.layers), 'encoder_%dx%d ' % (sp, sp), self.layers[-1].get_shape().as_list())
with tf.variable_scope('encoder_out'):
net = slim.conv2d(self.layers[-1], 1, [1,1], stride=1, padding='SAME',
weights_initializer=tf.random_normal_initializer(0, 0.02),
weights_regularizer=slim.l2_regularizer(0.0001),
rate=1, normalizer_fn=None, activation_fn=tf.nn.sigmoid,
scope='conv0')
self.layers.append(net)
print('-- Layer %d: ' % len(self.layers), 'encoder_out ', self.layers[-1].get_shape().as_list())
|
import math
import os
import random
import re
import sys
def countBinaryOnes(n):
# Converting number to binary
# Python's bin functions returns binary starting with '0b'
# [2:] removes the first two characters of the string
binary_n = str(bin(n))[2:]
countOnes = 0
consecOnes = 0
for i in range(len(binary_n)):
if binary_n[i] == '0':
countOnes = 0
else:
countOnes += 1
consecOnes = max(consecOnes, countOnes)
print(consecOnes)
if __name__ == '__main__':
n = int(input())
countBinaryOnes(n)
|
#!/usr/bin/env python3
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2017 Bahtiar `kalkin-` Gadimov <bahtiar@gadimov.de>
# Copyright (C) 2017 itinerarium <code@0n0e.com>
# Copyright (C) 2016 Jean-Philippe Ouellet <jpo@vt.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# pylint: disable=import-error
''' Sends notifications via Gio.Notification when something is Copy-Pasted
via Qubes RPC '''
# pylint: disable=invalid-name,wrong-import-position
import asyncio
import math
import os
import time
import gi
gi.require_version('Gtk', '3.0') # isort:skip
from gi.repository import Gtk, Gio # isort:skip
import gbulb
import pyinotify
gbulb.install()
class EventHandler(pyinotify.ProcessEvent):
# pylint: disable=arguments-differ
def my_init(self, loop=None, gtk_app=None):
''' This method is called from ProcessEvent.__init__(). '''
self.notification_id = "org.qubes.qui.clipboard"
self.gtk_app = gtk_app
self._copy()
self.loop = loop if loop else asyncio.get_event_loop()
def _copy(self, vmname: str = None):
''' Sends Copy notification via Gio.Notification
'''
if vmname is None:
with open(FROM, 'r') as vm_from_file:
vmname = vm_from_file.readline().strip('\n')
size = clipboard_formatted_size()
body = "Qubes Clipboard fetched from VM: <b>'{0}'</b>\n" \
"Copied <b>{1}</b> to the clipboard.\n" \
"<small>Press Ctrl-Shift-v to copy this clipboard into dest" \
" VM's clipboard.</small>".format(vmname, size)
self._notify(body)
def _paste(self):
''' Sends Paste notification via Gio.Notification.
'''
body = "Qubes Clipboard has been copied to the VM and wiped.<i/>\n" \
"<small>Trigger a paste operation (e.g. Ctrl-v) to insert " \
"it into an application.</small>"
self._notify(body)
def _notify(self, body):
# pylint: disable=attribute-defined-outside-init
notification = Gio.Notification.new("Qubes Clipboard")
notification.set_body(body)
notification.set_priority(Gio.NotificationPriority.NORMAL)
self.gtk_app.send_notification(self.notification_id, notification)
def process_IN_CLOSE_WRITE(self, _):
''' Reacts to modifications of the FROM file '''
with open(FROM, 'r') as vm_from_file:
vmname = vm_from_file.readline().strip('\n')
if vmname == "":
self._paste()
else:
self._copy(vmname=vmname)
def process_IN_MOVE_SELF(self, _):
''' Stop loop if file is moved '''
self.loop.stop()
def process_IN_DELETE(self, _):
''' Stop loop if file is deleted '''
self.loop.stop()
def clipboard_formatted_size() -> str:
units = ['B', 'KiB', 'MiB', 'GiB']
try:
file_size = os.path.getsize(DATA)
except OSError:
return '? bytes'
else:
if file_size == 1:
formatted_bytes = '1 byte'
else:
formatted_bytes = str(file_size) + ' bytes'
if file_size > 0:
magnitude = min(
int(math.log(file_size) / math.log(2) * 0.1), len(units) - 1)
if magnitude > 0:
return '%s (%.1f %s)' % (formatted_bytes,
file_size / (2.0**(10 * magnitude)),
units[magnitude])
return '%s' % (formatted_bytes)
DATA = "/var/run/qubes/qubes-clipboard.bin"
FROM = "/var/run/qubes/qubes-clipboard.bin.source"
class NotificationApp(Gtk.Application):
def __init__(self, **properties):
super().__init__(**properties)
self.set_application_id("org.qubes.qui.clipboard")
self.register() # register Gtk Application
def main():
loop = asyncio.get_event_loop()
mask = pyinotify.ALL_EVENTS
gtk_app = NotificationApp()
wm = pyinotify.WatchManager()
while True:
if not os.path.exists(DATA):
time.sleep(0.5)
else:
wm.add_watch(FROM, mask)
handler = EventHandler(loop=loop, gtk_app=gtk_app)
pyinotify.AsyncioNotifier(wm, loop, default_proc_fun=handler)
loop.run_forever()
if __name__ == '__main__':
main()
|
def ghj(y):
if y > 0 :
return y
else:
return y**2
print(ghj(99-588))
if 14 <= 18:
pass
def pto():
pass |
#!/usr/bin/env python3
import sys
import pycalculix as pyc
# Model of a pinned plate with 3 pins
# one will be force, the others will be fixed
proj_name = 'pinned-plate'
model = pyc.FeaModel(proj_name)
model.set_units('in')
# set whether or not to show gui plots
show_gui = True
if '-nogui' in sys.argv:
show_gui = False
# set element shape
eshape = 'quad'
if '-tri' in sys.argv:
eshape = 'tri'
# pin locations
pin1 = [0, 0]
pin2 = [pin1[0], 4]
pin3 = [4, 8]
# dimensions for drawing parts
pinhole_rad = 0.25
pin_rad = pinhole_rad - .015
width_plate = 1
near_hole_th = 0.5*width_plate
left_y = pin1[1] - near_hole_th
left_x = pin1[0] - near_hole_th
pinhole_dist = pin2[1] - pin1[1]
# make main part
part = pyc.Part(model)
part.goto(left_x, left_y)
part.draw_line_ax(pinhole_dist + width_plate)
part.draw_line_rad(width_plate)
part.draw_line_ax(-width_plate)
part.draw_line_ax(-(pinhole_dist-width_plate))
part.draw_line_rad(pin3[0] - pin1[0] - width_plate)
part.draw_line_ax(pin3[1] - pin1[0])
part.draw_line_rad(width_plate)
part.draw_line_ax(-width_plate)
part.draw_line_ax(-(pin3[1] - pin1[1]))
part.draw_line_to(left_x, left_y)
fillet_arcs = part.fillet_all(near_hole_th)
# make pinholes
holes = []
hole_arcs = []
for pin_point in [pin1, pin2, pin3]:
hole = part.draw_hole(pin_point[0], pin_point[1], pinhole_rad)
hole_arcs += hole
holes.append(hole)
# make pins
pin_parts = []
pins = []
pin_arcs = []
for pin_point in [pin1, pin2, pin3]:
pin = pyc.Part(model)
arcs = pin.draw_circle(pin_point[0], pin_point[1], pin_rad)
#pin.chunk(exclude_convex=False)
pins.append(arcs)
pin_parts.append(pin)
pin_arcs += arcs
# set divisions on pin holes and pins
all_arcs = fillet_arcs + hole_arcs + pin_arcs
model.set_ediv(all_arcs, 10)
# plot model
model.plot_areas(proj_name+'_prechunk_areas', label=False,
display=show_gui)
part.chunk('ext')
model.plot_areas(proj_name+'_areas', label=False, display=show_gui)
model.plot_parts(proj_name+'_parts', display=show_gui)
model.plot_points(proj_name+'_points', display=show_gui)
model.plot_lines(proj_name+'_points', label=False, display=show_gui)
# set loads and constraints
pin_vert_pts = model.get_items(['P40','P42', 'P45', 'P47'])
pin_horiz_pts = model.get_items(['P38','P41', 'P43', 'P46'])
top_pin_vert_pts = model.get_items(['P50', 'P52'])
top_pin_pt = model.get_item('P50')
model.set_constr('fix',pin_horiz_pts,'x')
model.set_constr('fix',pin_vert_pts,'y')
model.set_constr('fix',top_pin_vert_pts,'y')
model.set_load('force',top_pin_pt,-500,'x')
model.set_gravity(9.81, [part] + pin_parts)
# set part material
mat = pyc.Material('steel')
youngs = 210*(10**9)
mat.set_mech_props(7800, youngs, 0.3)
model.set_matl(mat, pin_parts)
model.set_matl(mat, part)
# set contact
factor = 5 # can be between 5 and 50
kval = youngs*factor
for (pin, hole) in zip(pins, holes):
model.set_contact_linear(pin, hole, kval, True)
# mesh the model
model.set_eshape(eshape, 2)
model.set_etype('plstress', part, 0.1)
model.set_etype('plstress', pin_parts, 0.1)
model.mesh(0.5, 'gmsh')
model.plot_elements(proj_name+'_elem', display=show_gui)
model.plot_constraints(proj_name+'_constr', display=show_gui)
# make and solve the model
prob = pyc.Problem(model, 'struct')
prob.solve()
# Plot results
fields = 'Sx,Sy,S1,S2,S3,Seqv,ux,uy,utot' # store the fields to plot
fields = fields.split(',')
for field in fields:
fname = proj_name+'_'+field
prob.rfile.nplot(field, fname, display=False)
model.view.select(part)
model.view.allsel_under('parts')
for field in fields:
fname = proj_name+'_PART_'+field
prob.rfile.nplot(field, fname, display=False)
|
import uuid
from django.db import models
from django.contrib.auth.models import User
class Movie(models.Model):
uuid = models.UUIDField(primary_key=True, editable=True)
title = models.CharField(max_length=256)
description = models.TextField()
genres = models.CharField(max_length=256, null=True, blank=True)
def __str__(self):
return str(self.uuid)
class Meta:
verbose_name = 'Movie'
verbose_name_plural = 'Movies'
class Collection(models.Model):
uuid = models.UUIDField(
primary_key=True, editable=False, default=uuid.uuid4)
title = models.CharField(max_length=256, unique=True)
description = models.TextField()
movies = models.ManyToManyField(Movie)
def __str__(self):
return str(self.uuid)
class Meta:
verbose_name = 'Collection'
verbose_name_plural = 'Collections'
class UserCollection(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
collections = models.ManyToManyField(Collection)
def __str__(self):
return str(self.user.username)
class Meta:
verbose_name = 'UserCollection'
verbose_name_plural = 'UserCollections'
|
# coding: utf-8
# In[94]:
graph = {
'A':{'B':5, 'C':1},
'B':{'A':5, 'C':2, 'D':1},
'C':{'A':1, 'B':2, 'D':4, 'E':8},
'D':{'B':1, 'C':4, 'E':3, 'F':6},
'E':{'C':8, 'D':3},
'F':{'D':6}
}
# In[104]:
import heapq
parent_dict = {}
start_node = 'A'
visited = set()
pqueue = []
heapq.heappush(pqueue, (0, start_node))
parent_node = 'N/A'
while(len(pqueue) > 0):
pair = heapq.heappop(pqueue)
print(pair)
distance = pair[0]
current_node = pair[1]
if current_node not in visited:
parent_dict[current_node] = (distance, parent_node)
#print(current_node)
visited.add(current_node)
neighbors = graph[current_node].keys()
for neighbor in neighbors:
if neighbor not in visited:
heapq.heappush(pqueue, (graph[current_node][neighbor], neighbor))
parent_node = current_node
# In[105]:
parent_dict
|
from wpilib.command import TimedCommand
class AutoBallIntake(TimedCommand):
'''
ball intake command that can be used for autonomous.
'''
def __init__(self, motorspeed, timeoutInSeconds):
super().__init__("AutoBallIntake", timeoutInSeconds)
self.requires(self.getRobot().doublemotor)
self.motorspeed = motorspeed
def initialize(self):
self.getRobot().doublemotor.setdoublemotor(self.motorspeed)
def end(self):
self.getRobot().doublemotor.setdoublemotor(0) |
# 문제 설명
# 정수 배열 numbers가 주어집니다. numbers에서 서로 다른 인덱스에 있는 두 개의 수를 뽑아 더해서
# 만들 수 있는 모든 수를 배열에 오름차순으로 담아 return 하도록 solution 함수를 완성해주세요.
# 제한사항
# numbers의 길이는 2 이상 100 이하입니다.
# numbers의 모든 수는 0 이상 100 이하입니다.
# 입출력 예
# numbers result
# [2,1,3,4,1] [2,3,4,5,6,7]
# [5,0,2,7] [2,5,7,9,12]
# 입출력 예 설명
# 입출력 예 #1
# 2 = 1 + 1 입니다. (1이 numbers에 두 개 있습니다.)
# 3 = 2 + 1 입니다.
# 4 = 1 + 3 입니다.
# 5 = 1 + 4 = 2 + 3 입니다.
# 6 = 2 + 4 입니다.
# 7 = 3 + 4 입니다.
# 따라서 [2,3,4,5,6,7] 을 return 해야 합니다.
# 입출력 예 #2
# 2 = 0 + 2 입니다.
# 5 = 5 + 0 입니다.
# 7 = 0 + 7 = 5 + 2 입니다.
# 9 = 2 + 7 입니다.
# 12 = 5 + 7 입니다.
# 따라서 [2,5,7,9,12] 를 return 해야 합니다.
# 리스트 안에서 더해서 나올수 있는 숫자 다 뽑아서 중복 제거하고 정렬하기
def solution(numbers):
answer = []
for i in range(len(numbers)):
for j in range(i+1,len(numbers)):
if (numbers[j] + numbers[i]) not in answer:
answer.append(numbers[j] + numbers[i])
answer.sort()
return answer
print(solution([3,2,5])) |
from django.urls import path
from orders.views import OrdertListView,OrderUpdateView,OrderDetailView
app_name = 'products'
urlpatterns = [
path('', OrdertListView.as_view(), name='index'),
path('update-order/<int:pk>/', OrderUpdateView.as_view(), name='update_order'),
path('detail/<int:pk>/', OrderDetailView.as_view(), name='detail'),
]
|
import data_utils
from config.Deconfig import Deconfig
import pickle
ll = data_utils.get_some_captions(5000)
max = 0
for l in ll:
h = l.split()
if max < len(h):
max = len(h)
print(max)
|
from __future__ import annotations
from typing import Sequence, Optional, Any
import numpy as np
import tensorflow as tf
from sknlp.vocab import Vocab
from .nlp_dataset import NLPDataset
from .bert_mixin import BertDatasetMixin
from .utils import serialize_example
class ClassificationDataset(NLPDataset):
def __init__(
self,
vocab: Vocab,
labels: Sequence[str],
segmenter: Optional[str] = None,
X: Optional[Sequence[Any]] = None,
y: Optional[Sequence[Any]] = None,
csv_file: Optional[str] = None,
in_memory: bool = True,
has_label: bool = True,
is_multilabel: bool = False,
is_pair_text: bool = False,
max_length: Optional[int] = None,
text_dtype: tf.DType = tf.int64,
label_dtype: tf.DType = tf.float32,
**kwargs,
):
self.labels = list(labels)
self.is_pair_text = is_pair_text
self.is_multilabel = is_multilabel
column_dtypes = ["str", "str"]
if self.is_pair_text:
column_dtypes.append("str")
self.label2idx = dict(zip(labels, range(len(labels))))
super().__init__(
vocab,
segmenter=segmenter,
X=X,
y=y,
csv_file=csv_file,
in_memory=in_memory,
has_label=has_label,
max_length=max_length,
na_value="NULL",
column_dtypes=column_dtypes,
text_dtype=text_dtype,
label_dtype=label_dtype,
**kwargs,
)
@property
def y(self) -> list[str] | list[list[str]]:
if not self.has_label:
return []
return [
data[-1].decode("UTF-8").split("|")
if self.is_multilabel
else data[-1].decode("UTF-8")
for data in self._original_dataset.as_numpy_iterator()
]
@property
def batch_padding_shapes(self) -> list[tuple]:
shapes = [(None,), (None,)]
return shapes[: None if self.has_label else -1]
def _format_y(self, y: Sequence[Any]) -> list[Sequence[Any]]:
if isinstance(y[0], (list, tuple)):
y = ["|".join(map(str, yi)) for yi in y]
return [y]
def py_label_binarizer(self, labels: list[str]) -> np.ndarray:
label2idx = self.label2idx
res = np.zeros(len(label2idx), dtype=np.float32)
res[[label2idx[label] for label in labels if label in label2idx]] = 1
return res
def py_label_transform(self, label: tf.Tensor) -> np.ndarray:
_label = super().py_label_transform(label)
if self.is_multilabel:
labels = _label.split("|")
else:
labels = [_label]
return self.py_label_binarizer(labels)
def to_tfrecord(self, filename: str) -> None:
def func(text: np.ndarray, label: np.ndarray):
return tf.reshape(
serialize_example(
(self._text_transform(text), self._label_transform(label)),
("tensor", "tensor"),
),
(),
)
tf_writer = tf.data.experimental.TFRecordWriter(filename)
tf_writer.write(
self._dataset.map(
lambda t, l: tf.py_function(func, inp=[t, l], Tout=tf.string),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
)
@classmethod
def from_tfrecord(cls, filename: str) -> tf.data.Dataset:
def func(record: tf.Tensor):
parsed_record = tf.io.parse_single_example(
record,
{
"feature0": tf.io.FixedLenFeature([], tf.string, default_value=""),
"feature1": tf.io.FixedLenFeature([], tf.string, default_value=""),
},
)
return (
tf.io.parse_tensor(parsed_record["feature0"], tf.int32),
tf.io.parse_tensor(parsed_record["feature1"], tf.float32),
)
return tf.data.TFRecordDataset(filename).map(func)
class BertClassificationDataset(BertDatasetMixin, ClassificationDataset):
pass |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
class event_type(osv.osv):
""" Event Type """
_name = 'event.type'
_description = __doc__
_columns = {
'name': fields.char('Event Type', size=64, required=True),
'default_reply_to': fields.char('Default Reply-To', size=64,help="The email address of the organizer which is put in the 'Reply-To' of all emails sent automatically at event or registrations confirmation. You can also put your email address of your mail gateway if you use one." ),
'default_email_event': fields.many2one('email.template','Event Confirmation Email', help="It will select this default confirmation event mail value when you choose this event"),
'default_email_registration': fields.many2one('email.template','Registration Confirmation Email', help="It will select this default confirmation registration mail value when you choose this event"),
'default_registration_min': fields.integer('Default Minimum Registration', help="It will select this default minimum value when you choose this event"),
'default_registration_max': fields.integer('Default Maximum Registration', help="It will select this default maximum value when you choose this event"),
}
_defaults = {
'default_registration_min': 0,
'default_registration_max': 0,
}
class event_event(osv.osv):
"""Event"""
_name = 'event.event'
_description = __doc__
_order = 'date_begin'
_inherit = ['mail.thread', 'ir.needaction_mixin']
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (long, int)):
ids = [ids]
res = []
for record in self.browse(cr, uid, ids, context=context):
date = record.date_begin.split(" ")[0]
date_end = record.date_end.split(" ")[0]
if date != date_end:
date += ' - ' + date_end
display_name = record.name + ' (' + date + ')'
res.append((record['id'], display_name))
return res
def copy(self, cr, uid, id, default=None, context=None):
""" Reset the state and the registrations while copying an event
"""
if not default:
default = {}
default.update({
'state': 'draft',
'registration_ids': False,
})
return super(event_event, self).copy(cr, uid, id, default=default, context=context)
def button_draft(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def button_cancel(self, cr, uid, ids, context=None):
registration = self.pool.get('event.registration')
reg_ids = registration.search(cr, uid, [('event_id','in',ids)], context=context)
for event_reg in registration.browse(cr,uid,reg_ids,context=context):
if event_reg.state == 'done':
raise osv.except_osv(_('Error!'),_("You have already set a registration for this event as 'Attended'. Please reset it to draft if you want to cancel this event.") )
registration.write(cr, uid, reg_ids, {'state': 'cancel'}, context=context)
return self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
def button_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'done'}, context=context)
def check_registration_limits(self, cr, uid, ids, context=None):
for self.event in self.browse(cr, uid, ids, context=context):
total_confirmed = self.event.register_current
if total_confirmed < self.event.register_min or total_confirmed > self.event.register_max and self.event.register_max!=0:
raise osv.except_osv(_('Error!'),_("The total of confirmed registration for the event '%s' does not meet the expected minimum/maximum. Please reconsider those limits before going further.") % (self.event.name))
def check_registration_limits_before(self, cr, uid, ids, no_of_registration, context=None):
for event in self.browse(cr, uid, ids, context=context):
available_seats = event.register_avail
if available_seats and no_of_registration > available_seats:
raise osv.except_osv(_('Warning!'),_("Only %d Seats are Available!") % (available_seats))
elif available_seats == 0:
raise osv.except_osv(_('Warning!'),_("No Tickets Available!"))
def confirm_event(self, cr, uid, ids, context=None):
register_pool = self.pool.get('event.registration')
if self.event.email_confirmation_id:
#send reminder that will confirm the event for all the people that were already confirmed
reg_ids = register_pool.search(cr, uid, [
('event_id', '=', self.event.id),
('state', 'not in', ['draft', 'cancel'])], context=context)
register_pool.mail_user_confirm(cr, uid, reg_ids)
return self.write(cr, uid, ids, {'state': 'confirm'}, context=context)
def button_confirm(self, cr, uid, ids, context=None):
""" Confirm Event and send confirmation email to all register peoples
"""
if isinstance(ids, (int, long)):
ids = [ids]
self.check_registration_limits(cr, uid, ids, context=context)
return self.confirm_event(cr, uid, ids, context=context)
def _get_register(self, cr, uid, ids, fields, args, context=None):
"""Get Confirm or uncofirm register value.
@param ids: List of Event registration type's id
@param fields: List of function fields(register_current and register_prospect).
@param context: A standard dictionary for contextual values
@return: Dictionary of function fields value.
"""
res = {}
for event in self.browse(cr, uid, ids, context=context):
res[event.id] = {}
reg_open = reg_done = reg_draft =0
for registration in event.registration_ids:
if registration.state == 'open':
reg_open += registration.nb_register
elif registration.state == 'done':
reg_done += registration.nb_register
elif registration.state == 'draft':
reg_draft += registration.nb_register
for field in fields:
number = 0
if field == 'register_current':
number = reg_open
elif field == 'register_attended':
number = reg_done
elif field == 'register_prospect':
number = reg_draft
elif field == 'register_avail':
#the number of ticket is unlimited if the event.register_max field is not set.
#In that cas we arbitrary set it to 9999, it is used in the kanban view to special case the display of the 'subscribe' button
number = event.register_max - reg_open if event.register_max != 0 else 9999
res[event.id][field] = number
return res
def _subscribe_fnc(self, cr, uid, ids, fields, args, context=None):
"""This functional fields compute if the current user (uid) is already subscribed or not to the event passed in parameter (ids)
"""
register_pool = self.pool.get('event.registration')
res = {}
for event in self.browse(cr, uid, ids, context=context):
res[event.id] = False
curr_reg_id = register_pool.search(cr, uid, [('user_id', '=', uid), ('event_id', '=' ,event.id)])
if curr_reg_id:
for reg in register_pool.browse(cr, uid, curr_reg_id, context=context):
if reg.state in ('open','done'):
res[event.id]= True
continue
return res
def _get_visibility_selection(self, cr, uid, context=None):
return [('public', 'All Users'),
('employees', 'Employees Only')]
# Lambda indirection method to avoid passing a copy of the overridable method when declaring the field
_visibility_selection = lambda self, *args, **kwargs: self._get_visibility_selection(*args, **kwargs)
_columns = {
'name': fields.char('Name', size=64, required=True, translate=True, readonly=False, states={'done': [('readonly', True)]}),
'user_id': fields.many2one('res.users', 'Responsible User', readonly=False, states={'done': [('readonly', True)]}),
'type': fields.many2one('event.type', 'Type of Event', readonly=False, states={'done': [('readonly', True)]}),
'register_max': fields.integer('Maximum Registrations', help="You can for each event define a maximum registration level. If you have too much registrations you are not able to confirm your event. (put 0 to ignore this rule )", readonly=True, states={'draft': [('readonly', False)]}),
'register_min': fields.integer('Minimum Registrations', help="You can for each event define a minimum registration level. If you do not enough registrations you are not able to confirm your event. (put 0 to ignore this rule )", readonly=True, states={'draft': [('readonly', False)]}),
'register_current': fields.function(_get_register, string='Confirmed Registrations', multi='register_numbers'),
'register_avail': fields.function(_get_register, string='Available Registrations', multi='register_numbers',type='integer'),
'register_prospect': fields.function(_get_register, string='Unconfirmed Registrations', multi='register_numbers'),
'register_attended': fields.function(_get_register, string='# of Participations', multi='register_numbers'),
'registration_ids': fields.one2many('event.registration', 'event_id', 'Registrations', readonly=False, states={'done': [('readonly', True)]}),
'date_begin': fields.datetime('Start Date', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'date_end': fields.datetime('End Date', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection([
('draft', 'Unconfirmed'),
('cancel', 'Cancelled'),
('confirm', 'Confirmed'),
('done', 'Done')],
'Status', readonly=True, required=True,
track_visibility='onchange',
help='If event is created, the status is \'Draft\'.If event is confirmed for the particular dates the status is set to \'Confirmed\'. If the event is over, the status is set to \'Done\'.If event is cancelled the status is set to \'Cancelled\'.'),
'email_registration_id' : fields.many2one('email.template','Registration Confirmation Email', help='This field contains the template of the mail that will be automatically sent each time a registration for this event is confirmed.'),
'email_confirmation_id' : fields.many2one('email.template','Event Confirmation Email', help="If you set an email template, each participant will receive this email announcing the confirmation of the event."),
'reply_to': fields.char('Reply-To Email', size=64, readonly=False, states={'done': [('readonly', True)]}, help="The email address of the organizer is likely to be put here, with the effect to be in the 'Reply-To' of the mails sent automatically at event or registrations confirmation. You can also put the email address of your mail gateway if you use one."),
'main_speaker_id': fields.many2one('res.partner','Main Speaker', readonly=False, states={'done': [('readonly', True)]}, help="Speaker who will be giving speech at the event."),
'address_id': fields.many2one('res.partner','Location Address', readonly=False, states={'done': [('readonly', True)]}),
'street': fields.related('address_id','street',type='char',string='Street'),
'street2': fields.related('address_id','street2',type='char',string='Street2'),
'state_id': fields.related('address_id','state_id',type='many2one', relation="res.country.state", string='State'),
'zip': fields.related('address_id','zip',type='char',string='zip'),
'city': fields.related('address_id','city',type='char',string='city'),
'speaker_confirmed': fields.boolean('Speaker Confirmed', readonly=False, states={'done': [('readonly', True)]}),
'country_id': fields.related('address_id', 'country_id',
type='many2one', relation='res.country', string='Country', readonly=False, states={'done': [('readonly', True)]}),
'note': fields.text('Description', readonly=False, states={'done': [('readonly', True)]}),
'company_id': fields.many2one('res.company', 'Company', required=False, change_default=True, readonly=False, states={'done': [('readonly', True)]}),
'is_subscribed' : fields.function(_subscribe_fnc, type="boolean", string='Subscribed'),
'visibility': fields.selection(_visibility_selection, 'Privacy / Visibility',
select=True, required=True),
}
_defaults = {
'state': 'draft',
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'event.event', context=c),
'user_id': lambda obj, cr, uid, context: uid,
'visibility': 'employees',
}
def subscribe_to_event(self, cr, uid, ids, context=None):
register_pool = self.pool.get('event.registration')
user_pool = self.pool.get('res.users')
num_of_seats = int(context.get('ticket', 1))
self.check_registration_limits_before(cr, uid, ids, num_of_seats, context=context)
user = user_pool.browse(cr, uid, uid, context=context)
curr_reg_ids = register_pool.search(cr, uid, [('user_id', '=', user.id), ('event_id', '=' , ids[0])])
#the subscription is done with SUPERUSER_ID because in case we share the kanban view, we want anyone to be able to subscribe
if not curr_reg_ids:
curr_reg_ids = [register_pool.create(cr, SUPERUSER_ID, {'event_id': ids[0] ,'email': user.email, 'name':user.name, 'user_id': user.id, 'nb_register': num_of_seats})]
else:
register_pool.write(cr, uid, curr_reg_ids, {'nb_register': num_of_seats}, context=context)
return register_pool.confirm_registration(cr, SUPERUSER_ID, curr_reg_ids, context=context)
def unsubscribe_to_event(self, cr, uid, ids, context=None):
register_pool = self.pool.get('event.registration')
#the unsubscription is done with SUPERUSER_ID because in case we share the kanban view, we want anyone to be able to unsubscribe
curr_reg_ids = register_pool.search(cr, SUPERUSER_ID, [('user_id', '=', uid), ('event_id', '=', ids[0])])
return register_pool.button_reg_cancel(cr, SUPERUSER_ID, curr_reg_ids, context=context)
def _check_closing_date(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context=context):
if event.date_end < event.date_begin:
return False
return True
_constraints = [
(_check_closing_date, 'Error ! Closing Date cannot be set before Beginning Date.', ['date_end']),
]
def onchange_event_type(self, cr, uid, ids, type_event, context=None):
if type_event:
type_info = self.pool.get('event.type').browse(cr,uid,type_event,context)
dic ={
'reply_to': type_info.default_reply_to,
'email_registration_id': type_info.default_email_registration.id,
'email_confirmation_id': type_info.default_email_event.id,
'register_min': type_info.default_registration_min,
'register_max': type_info.default_registration_max,
}
return {'value': dic}
def on_change_address_id(self, cr, uid, ids, address_id, context=None):
values = {}
if not address_id:
return values
address = self.pool.get('res.partner').browse(cr, uid, address_id, context=context)
values.update({
'street' : address.street,
'street2' : address.street2,
'city' : address.city,
'country_id' : address.country_id and address.country_id.id or False,
'state_id' : address.state_id and address.state_id.id or False,
'zip' : address.zip,
})
return {'value' : values}
def onchange_start_date(self, cr, uid, ids, date_begin=False, date_end=False, context=None):
res = {'value':{}}
if date_end:
return res
if date_begin and isinstance(date_begin, str):
date_begin = datetime.strptime(date_begin, "%Y-%m-%d %H:%M:%S")
date_end = date_begin + timedelta(hours=1)
res['value'] = {'date_end': date_end.strftime("%Y-%m-%d %H:%M:%S")}
return res
class event_registration(osv.osv):
"""Event Registration"""
_name= 'event.registration'
_description = __doc__
_inherit = ['mail.thread', 'ir.needaction_mixin']
_columns = {
'id': fields.integer('ID'),
'origin': fields.char('Source Document', size=124,readonly=True,help="Reference of the sales order which created the registration"),
'nb_register': fields.integer('Number of Participants', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'event_id': fields.many2one('event.event', 'Event', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'partner_id': fields.many2one('res.partner', 'Partner', states={'done': [('readonly', True)]}),
'create_date': fields.datetime('Creation Date' , readonly=True),
'date_closed': fields.datetime('Attended Date', readonly=True),
'date_open': fields.datetime('Registration Date', readonly=True),
'reply_to': fields.related('event_id','reply_to',string='Reply-to Email', type='char', size=128, readonly=True,),
'log_ids': fields.one2many('mail.message', 'res_id', 'Logs', domain=[('model','=',_name)]),
'event_end_date': fields.related('event_id','date_end', type='datetime', string="Event End Date", readonly=True),
'event_begin_date': fields.related('event_id', 'date_begin', type='datetime', string="Event Start Date", readonly=True),
'user_id': fields.many2one('res.users', 'User', states={'done': [('readonly', True)]}),
'company_id': fields.related('event_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True, states={'draft':[('readonly',False)]}),
'state': fields.selection([('draft', 'Unconfirmed'),
('cancel', 'Cancelled'),
('open', 'Confirmed'),
('done', 'Attended')], 'Status',
track_visibility='onchange',
size=16, readonly=True),
'email': fields.char('Email', size=64),
'phone': fields.char('Phone', size=64),
'name': fields.char('Name', size=128, select=True),
}
_defaults = {
'nb_register': 1,
'state': 'draft',
}
_order = 'name, create_date desc'
def do_draft(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def confirm_registration(self, cr, uid, ids, context=None):
for reg in self.browse(cr, uid, ids, context=context or {}):
self.pool.get('event.event').message_post(cr, uid, [reg.event_id.id], body=_('New registration confirmed: %s.') % (reg.name or '', ),subtype="event.mt_event_registration", context=context)
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
def registration_open(self, cr, uid, ids, context=None):
""" Open Registration
"""
event_obj = self.pool.get('event.event')
for register in self.browse(cr, uid, ids, context=context):
event_id = register.event_id.id
no_of_registration = register.nb_register
event_obj.check_registration_limits_before(cr, uid, [event_id], no_of_registration, context=context)
res = self.confirm_registration(cr, uid, ids, context=context)
self.mail_user(cr, uid, ids, context=context)
return res
def button_reg_close(self, cr, uid, ids, context=None):
""" Close Registration
"""
if context is None:
context = {}
today = fields.datetime.now()
for registration in self.browse(cr, uid, ids, context=context):
if today >= registration.event_id.date_begin:
values = {'state': 'done', 'date_closed': today}
self.write(cr, uid, ids, values)
else:
raise osv.except_osv(_('Error!'), _("You must wait for the starting day of the event to do this action."))
return True
def button_reg_cancel(self, cr, uid, ids, context=None, *args):
return self.write(cr, uid, ids, {'state': 'cancel'})
def mail_user(self, cr, uid, ids, context=None):
"""
Send email to user with email_template when registration is done
"""
for registration in self.browse(cr, uid, ids, context=context):
if registration.event_id.state == 'confirm' and registration.event_id.email_confirmation_id.id:
self.mail_user_confirm(cr, uid, ids, context=context)
else:
template_id = registration.event_id.email_registration_id.id
if template_id:
mail_message = self.pool.get('email.template').send_mail(cr,uid,template_id,registration.id)
return True
def mail_user_confirm(self, cr, uid, ids, context=None):
"""
Send email to user when the event is confirmed
"""
for registration in self.browse(cr, uid, ids, context=context):
template_id = registration.event_id.email_confirmation_id.id
if template_id:
mail_message = self.pool.get('email.template').send_mail(cr,uid,template_id,registration.id)
return True
def onchange_contact_id(self, cr, uid, ids, contact, partner, context=None):
if not contact:
return {}
addr_obj = self.pool.get('res.partner')
contact_id = addr_obj.browse(cr, uid, contact, context=context)
return {'value': {
'email':contact_id.email,
'name':contact_id.name,
'phone':contact_id.phone,
}}
def onchange_partner_id(self, cr, uid, ids, part, context=None):
res_obj = self.pool.get('res.partner')
data = {}
if not part:
return {'value': data}
addr = res_obj.address_get(cr, uid, [part]).get('default', False)
if addr:
d = self.onchange_contact_id(cr, uid, ids, addr, part, context)
data.update(d['value'])
return {'value': data}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
from yumi.modulecontext import ModuleContext
from yumi.modulebase import Module
from yumi.api import init_global
class Initializer(Module):
@staticmethod
def do(args: list, context: ModuleContext):
return init_global(context.working_dir)
Modules = {
"init": Initializer
} |
from eod import tm_utils, trmm
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
import xarray as xr
import unittest
from utils import u_arrays as ua
class TestTMUtils(unittest.TestCase):
def test_minute_delta(self):
tt = [59, 0, 15, 13, 9, 2, 31, 27, 43, 45]
result = [1, 0, 0, 2, 6, -2, -1, 3, 2, 0]
for min, res in zip(tt, result):
assert tm_utils.minute_delta(min, 15) == res
def test_ll_to_MSG(self):
zp = 1856
# 1 x/y ca 0.025 lon/lat = ca. 3km
test = [(0,0), (-0.01, 0), (-0.025, 0), (0,0.025), (0,-0.1)]
result = [(zp, zp), (zp, zp), (zp+1, zp), (zp,zp+1), (zp,zp-4)]
for t, r in zip(test, result):
dir = tm_utils.ll_toMSG(t[0], t[1])
assert (dir['x'], dir['y']) == r
def test_ll_to_MSG_TRMM(self):
test_dir = '/users/global/cornkle/data/pythonWorkspace/proj_CEH/eod/tests/test_files/trmm'
obj = trmm.ReadWA(test_dir)
dat = obj.get_data(obj.fpaths[0], cut=[3,4])
lon = dat['lon'].values
lat = dat['lat'].values
dir = tm_utils.ll_toMSG(lon, lat)
assert np.unique(ua.unique_of_pair(dir['x'],dir['y'])).size == lon.size
# old: this is because the TRMM distance is sometimes 3km due to lacking precision (just two decimal places, thanks Chris!)
# new lat lon with more precision: works!
def test_kernel_no_zero(self):
dat = np.array([[1,4,3,6,4,0, 0, 0], ] * 4)
xx = [0,2,3,5,6,7]
yy = [2,3,3,3,2,1]
res = [1,3,6,4,False,False]
for x, y,r in zip(xx, yy,res):
nb = tm_utils.kernel_no_zero(dat,x,y)
assert nb == r
def test_cut_kernel(self):
# ATTENTION, DOES NOT TEST BOUNDARIES
dat = np.array([[1,4,3,6,4,0, 0, 0], ] * 4)
xx = [2,3]
yy = [2,1]
res = [np.array([[4,3,6], ] * 3), np.array([[3,6,4], ] * 3) ]
for x, y,r in zip(xx, yy,res):
nb = tm_utils.cut_kernel(dat, x, y, 1)
assert_array_equal(nb, r)
|
import apiclient.discovery
import googleapiclient.http
import httplib2
import oauth2client.client
import oauth2client.file
import oauth2client.tools
import threading
from . import util, config
_api_name = 'youtube'
_api_version = 'v3'
_auth_scope = 'https://www.googleapis.com/auth/youtube.readonly'
_client_secrets_path_key = config.Key('client_secrets_path', str, 'client_secrets.json')
_oauth2_token_path_key = config.Key('oauth2_token_path', str, 'oauth2_token.json')
def _get_authenticated_service(api_name, api_version, auth_scope, client_secrets_path, oauth2_token_path):
flow = oauth2client.client.flow_from_clientsecrets(
client_secrets_path,
scope=auth_scope)
storage = oauth2client.file.Storage(oauth2_token_path)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = oauth2client.tools.run_flow(flow, storage, oauth2client.tools.argparser.parse_args(['--noauth_local_webserver']))
return apiclient.discovery.build(api_name, api_version,
http=credentials.authorize(httplib2.Http()))
class _Item:
def __init__(self, items):
self._items = items
def __getattr__(self, name):
item = self._items.get(name)
if item is None:
raise AttributeError('No item named {}.'.format(name))
return item
@classmethod
def wrap_json(cls, value):
if isinstance(value, dict):
return cls({k: cls.wrap_json(v) for k, v in value.items()})
else:
return value
class YouTube:
_max_results_per_request = 50
def __init__(self, service):
self._service = service
self._service_lock = threading.Lock()
def get_channel_by_id_or_username(self, channel_id_or_username, part):
channel = self.get_channels(channel_id_or_username, part)
if channel is None:
items = self._get(
self._service.channels(),
part=part,
forUsername=channel_id_or_username)
if items:
item, *rest = items
if rest:
raise ValueError(
'Multiple channels found with username {}.',
channel_id_or_username)
return item
else:
return None
else:
return channel
def get_channels(self, channel_id, part):
return self._get(self._service.channels(), part, id=channel_id)
def get_playlists(self, playlist_id, part):
return self._get(self._service.playlists(), part, id=playlist_id)
def get_playlist_items(self, playlist_id, part):
return self._get(self._service.playlistItems(), part, playlistId=playlist_id)
def get_channel_videos(self, channelId, part, order='date', max_results=None):
return self._get(
self._service.search(),
part,
channelId=channelId,
order=order,
type='video',
max_results=max_results)
def get_videos(self, video_id, part):
return self._get(self._service.videos(), part, id=video_id)
@classmethod
def get_authenticated_instance(cls, settings: config.Configuration):
service = _get_authenticated_service(
_api_name,
_api_version,
_auth_scope,
settings.get(_client_secrets_path_key),
settings.get(_oauth2_token_path_key))
return cls(service)
def _get(self, resource, part, *, id=None, max_results=None, **kwargs):
assert id is None or max_results is None
if isinstance(part, list):
part = ','.join(part)
with self._service_lock:
if id is None:
items = self._get_raw(resource, part, max_results, **kwargs)
else:
if isinstance(id, list):
items = [
j
for i in range(0, len(id), self._max_results_per_request)
for j in self._get_raw(
resource,
part,
id=','.join(id[i:i + self._max_results_per_request]),
**kwargs)]
else:
items = self._get_raw(resource, part, id=id, **kwargs)
if isinstance(id, list) or id is None:
return items
else:
if items:
item, *rest = items
if rest:
raise ValueError('Multiple items were returned.')
return item
else:
return None
@classmethod
def _get_raw(cls, resource, part, max_results=None, **kwargs):
if max_results is not None and max_results < cls._max_results_per_request:
max_results_per_request = max_results
else:
max_results_per_request = cls._max_results_per_request
results = []
request = resource.list(part=part, maxResults=max_results_per_request, **kwargs)
while request and (max_results is None or len(results) < max_results):
util.log('Requesting {} ...', request.uri)
try:
response = request.execute()
except googleapiclient.http.HttpError as e:
# The HttpError class is currently broken and does not decode
# the received data before parsing it.
if isinstance(e.content, bytes):
e.content = e.content.decode()
raise
results.extend(map(_Item.wrap_json, response.get('items', [])))
request = resource.list_next(request, response)
return results[:max_results]
|
"""
Author: Reinaldo Mateus R J, Test version: 0.1
"""
import time
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from settings.variables_test import *
from classes import common_functions
class project_test(common_functions.functions,object):
def setProject(self,test_number, menu_name, project_name, prefix,driver):
self.driver = driver
wait = WebDriverWait(self.driver, 90)
self.driver.implicitly_wait(DELAY_HIGH)
try:
print ("Open page: " +str(BASE_URL + MAIN_PAGE))
self.driver.get(BASE_URL + MAIN_PAGE)
self.FindFrame(self.driver)
print ("Find menu_name and click and button: " +str(menu_name))
menu_name_test = wait.until(lambda driver: self.driver.find_element_by_link_text(menu_name))
menu_name_test.click()
button_create = wait.until(lambda driver: driver.find_element_by_id("create"))
button_create.click()
self.driver.implicitly_wait(DELAY_HIGH)
self.driver.find_element_by_name("tprojectName").clear()
self.driver.find_element_by_name("tprojectName").send_keys(project_name)
self.driver.find_element_by_name("tcasePrefix").clear()
self.driver.find_element_by_name("tcasePrefix").send_keys(prefix)
confirm_button = wait.until(lambda driver: driver.find_element_by_xpath("/html/body/div/div/form/table/tbody/tr[15]/td/div/input[3]"))
confirm_button.click()
# compare second word with user_type
if self.FindElementXpath("/html/body/div[1]/p[1]",driver):
result = self.driver.find_element_by_xpath("/html/body/div[1]/p[1]").text
print ("The project already exists: " +str(result.encode('UTF-8')))
return False
else:
return True
except:
self.PrintException()
print ("Failed class project_test, Test: ",test_number, menu_name, project_name, prefix)
return False
|
from datetime import datetime, timedelta
from pyrogram import Client, filters
import config
from vk_bot import VKBot
tg_client = Client("kispython-translator", api_id=config.TG_API_ID, api_hash=config.TG_API_HASH)
def make_message_header(author, msg_time, edit=False):
""" Создание заголовка сообщения для VK. """
emoji = '🐍'
msg_time = datetime.utcfromtimestamp(msg_time) + timedelta(hours=3) # Для отображения Московского времени
msg_time = msg_time.strftime("%H:%M:%S")
if not edit:
return f'{emoji} {author} ({msg_time})\n\n'
else:
return f'{emoji} {author} (изменено в {msg_time})\n\n'
def get_message_for_vk(message, edit=False, media=False):
""" Создание сообщения для VK. """
author = message.author_signature
message_text = message.text if not media else message.caption
if not edit:
msg_time = message.date
message_header = make_message_header(author, msg_time)
else:
msg_time = message.edit_date
message_header = make_message_header(author, msg_time, edit=True)
message = message_header + message_text if message_text is not None else message_header
return message
@tg_client.on_message(filters.chat(chats=config.TG_CHAT_ID) & filters.text)
def message_handler(client, message):
""" Обработчик текстовых сообщений в канале. """
if message.edit_date is not None:
msg = get_message_for_vk(message, edit=True)
else:
msg = get_message_for_vk(message)
vk_bot.send_text_message(msg)
@tg_client.on_message(filters.chat(chats=config.TG_CHAT_ID) & (filters.photo | filters.document))
def media_message_handler(client, message):
""" Обработчик сообщений с медиа в канале. """
if message.edit_date is not None:
msg = get_message_for_vk(message, edit=True, media=True)
else:
msg = get_message_for_vk(message, media=True)
# Если сообщение содержит фото
if message.photo:
file_path = message.download()
vk_bot.send_message_with_photo(msg, file_path)
# Если сообщение содержит документ
elif message.document:
file_name = message.document.file_name
file_path = message.download()
vk_bot.send_message_with_document(msg, file_path, file_name)
if __name__ == '__main__':
vk_bot = VKBot()
tg_client.run()
|
import math
class Math:
def __init__(self, eq):
self.equation = eq
self.equation_list = None
def print_equation(self):
print(self.equation)
def remove_spaces(self):
self.equation = self.equation.replace(" ", "")
def split_equation(self):
self.equation_list = self.equation.split(" ")
def is_operator(self, char):
if char is "+" or char is "-" or char is "*" or char is "/":
return True
else:
return False
def remove_spaces(self):
counter = 0
eq_list = []
#equation = ""
length = len(self.equation)
for char in self.equation:
eq_list.append(char)
while eq_list.count(" ") is not 0:
if counter is length:
counter = 0
if eq_list[counter] is " ":
eq_list.pop(counter )
length -= 1
counter += 1
self.equation = ""
for char in eq_list:
self.equation += char
#return equation
def check_num(self, num):
check = False
if num is "(" or num is ")" or num is "+" or num is "-" or num is "*" or num is "/" or num is "^":
return False
for n in range(0, 10):
if num == str(n) or num is ".":
return True
return False
def prep_equation(self):
eq_list = []
#equation = ""
#print(self.equation)
for char in self.equation:
eq_list.append(char)
counter = 0
#eq_list[counter] is not " " and eq_list[counter + 1] is not " " and
# and self.check_num(eq_list[counter]) is False and self.check_num(eq_list[counter + 1]) is not False
#print(self.check_num((".")))
#print(self.check_num("1"))
while counter is not len(eq_list) - 1:
if eq_list[counter] is not " " and eq_list[counter + 1] is not " ":
if self.check_num(eq_list[counter]) is True and self.check_num(eq_list[counter + 1]) is True:
pass
else:
eq_list.insert(counter + 1, " ")
counter += 1
self.equation = ""
for char in eq_list:
self.equation += char
#print(self.equation)
#return equation
def find_operators(self, equation_list):
add = []
sub = []
mul = []
div = []
left_par = []
right_par = []
pows = []
operator_count = 0
counter = 0
for char in equation_list:
if char is "+":
add.append(counter)
operator_count += 1
elif char is "-":
sub.append(counter)
operator_count += 1
elif char is "*":
mul.append(counter)
operator_count += 1
elif char is "/":
div.append(counter)
operator_count += 1
elif char is "(":
left_par.append(counter)
elif char is ")":
right_par.append(counter)
elif char is "^":
pows.append(counter)
operator_count += 1
counter += 1
#print(add)
#print(sub)
#print(mul)
#print(div)
#print(left_par)
#print(right_par)
#print(equation_list)
return add, sub, mul, div, left_par, right_par, pows, operator_count
#def find_par(self):
def solve_equation(self, equation):
add, sub, mul, div, left_par, right_par, pows, operator_count = self.find_operators(equation)
#print(add)
#print(sub)
#print(mul)
#print(div)
#print(left_par)
#print(right_par)
#print(operator_count)
solved = self.solve(add, sub, mul, div, pows, operator_count, equation)
return solved
def solve(self, add, sub, mul, div, pows, operator_count, equation):
#print(equation)
counter = 0
while operator_count is not 0:
if len(pows) is not 0:
char = "^"
counter = pows.pop(0)
elif len(mul) is not 0:
char = "*"
counter = mul.pop(0)
elif len(div) is not 0:
char = "/"
counter = div.pop(0)
elif len(add) is not 0:
char = "+"
counter = add.pop(0)
elif len(sub) is not 0:
char = "-"
counter = sub.pop(0)
#print(char)
x = equation[counter - 1]
y = equation[counter + 1]
equation.pop(counter - 1)
equation.pop(counter - 1)
equation.pop(counter - 1)
if char is "+":
equation.insert(counter -1, float(x) + float(y))
elif char is "-":
equation.insert(counter -1, float(x) - float(y))
elif char is "*":
equation.insert(counter -1, float(x) * float(y))
elif char is "/":
equation.insert(counter -1, float(x) / float(y))
elif char is "^":
equation.insert(counter - 1, pow(float(x), float(y)))
while len(add) is not 0:
add.pop(0)
while len(sub) is not 0:
sub.pop(0)
while len(mul) is not 0:
mul.pop(0)
while len(div) is not 0:
div.pop(0)
while len(pows) is not 0:
pows.pop(0)
operator_count -= 1
add, sub, mul, div, left_par, right_par, pows, operator_count = self.find_operators(equation)
return equation[0]
def parse_equation(self):
sub_eq = []
in_par = False
try:
self.remove_spaces()
self.prep_equation()
self.split_equation()
#print(self.equation_list)
add, sub, mul, div, left_par, right_par, pows, operator_count = self.find_operators(self.equation_list)
while len(left_par) is not 0:
for char in self.equation_list:
if char is ")":
in_par = False
break
if in_par:
sub_eq.append(char)
if char is "(":
in_par = True
solved_sub = self.solve_equation(sub_eq)
sub_eq = []
#print(solved_sub)
placed = left_par_pos = left_par.pop(0)
right_par_pos = right_par.pop(0)
while left_par_pos < right_par_pos + 1:
#print(self.equation_list)
self.equation_list.pop(placed)
left_par_pos += 1
self.equation_list.insert(placed, solved_sub)
#print(self.equation_list)
while len(left_par) is not 0:
left_par.pop(0)
while len(right_par) is not 0:
right_par.pop(0)
add, sub, mul, div, left_par, right_par, pow, operator_count = self.find_operators(self.equation_list)
#print(self.equation_list)
value = self.solve_equation(self.equation_list)
if isinstance(value, (int, float)):
return(value)
#print(equation)
# return equation
except Exception as error:
#print("Please check your equation.")
pass
def main():
eq = "2.4 + 2.6"
#eq = remove_spaces(eq, len(eq))
#equation = prep_equation(eq)
math = Math(eq)
value = math.parse_equation()
print(value)
if __name__ == "__main__":
main()
|
import json
from home_run.models import ScikitLearnModel
# Create the flask app
from flask import Flask, request
app = Flask(__name__)
from sklearn.externals import joblib
# Load in options form disk
with open('options.json') as fp:
options = json.load(fp)
# Instantiate a HRModel for sklearn type
ml = ScikitLearnModel(**options)
@app.route("/", methods=["POST"])
def slash_post():
body = request.get_json()
return json.dumps(ml.run(body))
if __name__ == "__main__":
app.run(debug=True) |
#coding:utf-8
'''
Created on 2016-1-4
@author:
description:
'''
from subject.models import Activity, User
import time
from django.utils import timezone
def select_activity(req):
dao = Activity.objects.order_by("-id")[:req]
rsp = []
for v in dao:
rsp.append('\t'.join([timezone.localtime(v.time).strftime('%Y-%m-%d %H:%M:%S'), v.content]))
return rsp
def is_activity(req):
if Activity.objects.filter(content=req):
return True
return False
class activityDao():
def __init__(self,req):
if req.has_key("userid"):
self.us = User.objects.get(id=req["userid"])
def add_a_activity(self,realcontent):
realtime = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
Activity(userid=self.us,content=self.us.username+realcontent,time=realtime).save()
return
|
class Solution(object):
def threeSum(self, nums):
if not nums:
return []
if len(set(nums))==1 and nums[0]==0 and len(nums)>2:
return [[0,0,0]]
output=[]
target=0
nums.sort()
for i in range(len(nums)-1):
if nums[i]==nums[i-1]:
continue
target=nums[i]*(-1)
s,e=i+1,len(nums)-1
while(s<e):
if (nums[s]+nums[e])==target:
output.append([nums[i],nums[s],nums[e]])
s+=1
while s<e and nums[s]==nums[s-1]:
s+=1
elif nums[s]+nums[e]<target:
s+=1
else:
e-=1
return output
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
|
import numpy as np
from scipy.signal import correlate
import matplotlib.pyplot as plt
# load datasets
period = 1.0 # period of oscillations (seconds)
w = 50
t_min = 0
t_max = 0.1
t_step = 0.001
t = np.arange(t_min, t_max, t_step, dtype=np.float)
fai = np.pi / 4
A = np.sin(2 * np.pi * w * t)
B = np.sin(2 * np.pi * w * t + fai)
# B = np.roll(A, 2)
nsamples = A.size
plt.plot(range(nsamples), A)
plt.plot(range(nsamples), B)
# plt.show()
xcorr = correlate(A, B)
print(xcorr.shape)
xcorr_pos = np.argmax(xcorr)
print(xcorr_pos)
plt.plot(range(2*nsamples-1), xcorr)
# plt.show()
print('t:', xcorr_pos*t_step)
print('fai:', xcorr_pos*t_step * 2 * np.pi * w)
# The peak of the cross-correlation gives the shift between the two signals
# The xcorr array goes from -nsamples to nsamples
dt = np.linspace(-t[-1], t[-1], 2*nsamples-1)
recovered_time_shift = dt[xcorr.argmax()]
print(recovered_time_shift)
# force the phase shift to be in [-pi:pi]
recovered_phase_shift = 2*np.pi*(((0.5 + recovered_time_shift/period) % 1.0) - 0.5)
relative_error = (recovered_phase_shift - fai)/(2*np.pi)
print("Original phase shift: %.2f pi" % (fai/np.pi))
print("Recovered phase shift: %.2f pi" % (recovered_phase_shift/np.pi))
print("Relative error: %.4f" % (relative_error))
|
import postfix_conf
import re
class SpaceConf(postfix_conf.PostfixConf):
def parse_row(self, row):
items = re.split('\s+', row)
row = Row()
row.key = items[0]
row.items = items[1:]
return row
def append_kv(self, key, value):
r = Row()
r.key = key
r.items = [value]
self.vals.append(r)
class Row:
def __init__(self):
self.key = None
self.items = []
def __str__(self):
if self.key is None:
return ''
return "%s\t%s\n" % (self.key, ' '.join(self.items))
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from sign.models import Event, Guest
from django.db.models import Q
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from PIL import Image, ImageDraw, ImageFont
import io, random
# Create your views here.
def get_valid_img(request):
def get_random_color():
return random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)
img_obj = Image.new('RGB', (220, 35), get_random_color())
draw_obj = ImageDraw.Draw(img_obj)
font_obj = ImageFont.truetype('C:\\Windows\\Fonts\\simsun.ttc', 28)
tmp_list = []
for i in range(5):
u = chr(random.randint(65, 90))
l = chr(random.randint(97, 122))
n = str(random.randint(0, 9))
tmp = random.choice([u, l, n])
tmp_list.append(tmp)
draw_obj.text((20 + 40 * i, 0), tmp, fill=get_random_color(), font=font_obj)
request.session['valid_code'] = ''.join(tmp_list)
width = 200
height = 35
for i in range(5):
x1 = random.randint(0, width)
x2 = random.randint(0, width)
y1 = random.randint(0, height)
y2 = random.randint(0, height)
draw_obj.line((x1, x2, y1, y2), fill=get_random_color())
for i in range(40):
draw_obj.point((random.randint(0, width), random.randint(0, height)), fill=get_random_color())
x = random.randint(0, width)
y = random.randint(0, height)
draw_obj.arc((x, y, x+4, y+4), 0, 90, fill=get_random_color())
io_obj = io.BytesIO()
img_obj.save(io_obj, 'png')
data = io_obj.getvalue()
return HttpResponse(data)
def index(request):
return render(request, 'index.html')
def login_action(request):
if request.method == "POST":
username = request.POST.get('username', '')
password = request.POST.get('password', '')
check_code = request.POST.get('checkcode')
session_code = request.session['valid_code']
if check_code.strip().lower() != session_code.lower():
return render(request, 'index.html', {'error': 'code error'})
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
request.session['user'] = username
response = HttpResponseRedirect('/event_manage/')
return response
# if username == 'admin' and password == 'admin123':
# response = HttpResponseRedirect('/event_manage/')
# # response.set_cookie('user', username, 3600 )
# request.session['user'] = username
# return response
else:
return render(request, 'index.html', {'error': 'username or password error!'})
else:
return render(request, 'index.html', {'error': 'username or password error!'})
@login_required
def event_manage(request):
# username = request.COOKIES.get('user', '')
event_list = Event.objects.all()
username = request.session.get('user', '')
return render(request, "event_manage.html", {"user":username,
"events": event_list})
@login_required
def search_name(request):
username = request.session.get('user', '')
search_name = request.GET.get('name', '')
event_list = Event.objects.filter(name__contains=search_name)
return render(request, "event_manage.html", {"user": username,
"events": event_list})
@login_required
def guest_manage(request):
username = request.session.get('user', '')
guest_list = Guest.objects.all()
paginator = Paginator(guest_list, 2)
page = request.GET.get('page')
try:
contacts = paginator.page(page)
except PageNotAnInteger:
contacts = paginator.page(1)
except EmptyPage:
contacts = paginator.page(paginator.num_pages)
return render(request, "guest_manage.html", {"user": username,
"guests": contacts})
@login_required
def search_phone(request):
username = request.session.get('user', '')
search_phone = request.GET.get('phone', '')
guest_list = Guest.objects.filter(Q(realname__contains=search_phone) | Q(phone__contains=search_phone))
paginator = Paginator(guest_list, 2)
page = request.GET.get('page')
try:
contacts = paginator.page(page)
except PageNotAnInteger:
contacts = paginator.page(1)
except EmptyPage:
contacts = paginator.page(paginator.num_pages)
return render(request, "guest_manage.html", {"user": username,
"guests": contacts})
@login_required
def logout(request):
auth.logout(request)
response = HttpResponseRedirect('/index/')
return response
@login_required
def sign_index(request, event_id):
event = get_object_or_404(Event, id=event_id)
return render(request, 'sign_index.html', {'event': event})
@login_required
def sign_index_action(request, event_id):
event = get_object_or_404(Event, id=event_id)
phone = request.POST.get('phone', '')
result = Guest.objects.filter(phone=phone)
if not result:
return render(request, 'sign_index.html', {'event': event,
'hint': 'phone error.'})
result = Guest.objects.filter(phone=phone, event_id=event_id)
if not result:
return render(request, 'sign_index.html', {'event': event,
'hint': 'event id or phone error.'})
result = Guest.objects.get(phone=phone, event_id=event_id)
if result.sign:
return render(request, 'sign_index.html', {'event': event,
'hint': 'user has sign in.'})
else:
Guest.objects.filter(phone=phone, event_id=event_id).update(sign = '1')
return render(request, 'sign_index.html', {'event': event,
'hint': 'sign in success!',
'guest': result})
|
from django.shortcuts import render, redirect
from django.contrib.auth.hashers import check_password
from store.models.customer import Customer
from django.views import View
from store.models.product import Product
from store.models.orders import Order
from store.middlewares.auth import auth_middleware
class OrderView(View):
def get(self , request,pk=None ):
if pk is not None:
orders=Order.objects.get(pk=pk)
orders.delete()
return redirect('orders')
customer = request.session.get('customer')
orders = Order.get_orders_by_customer(customer)
print(orders)
return render(request , 'orders.html' , {'orders' : orders})
|
import hashlib
import logging
import requests
import time
from irodsManager.irodsUtils import get_bag_generator, bag_generator_faker, ExporterClient, ExporterState as Status
from http import HTTPStatus
from multiprocessing import Pool
from xml.etree import ElementTree
logger = logging.getLogger('iRODS to Dataverse')
class EasyClient(ExporterClient):
"""Easy client to import bagged collection
"""
def __init__(self, host, user, pwd, token, irodsclient):
"""
:param host: String IP of the EASY's host
:param user: String user name
:param pwd: String user password
:param irodsclient: irodsClient object - client to iRODS database user
"""
self.host = host
self.user = user
self.pwd = pwd
self.token = token
self.irods_client = irodsclient
self.collection = irodsclient.coll
self.session = irodsclient.session
self.rulemanager = irodsclient.rulemanager
self.imetadata = irodsclient.imetadata
self.dataset_status = None
self.dataset_url = None
self.dataset_deposit_url = f"{self.host}/sword2/collection/1"
self.dataset_pid = None
self.last_export = None
self.pool = None
self.result = None
# self.bag_md5 = None
self.upload_success = {}
self.deletion = False
self.restrict = False
self.restrict_list = []
self.zip_name = "debug_archive.zip"
def post_it(self):
logger.info(f"{'--':<10}Prepare bag")
self.irods_client.update_metadata_state('create-exporter', 'prepare-bag')
self.pool = Pool(processes=1)
self.result = self.pool.apply_async(self.run_checksum, [self.collection.path])
irods_md5 = hashlib.md5()
bag_size = bag_generator_faker(self.irods_client, self.upload_success, irods_md5)
md5_hexdigest = irods_md5.hexdigest()
logger.info(f"{'--':<20}Stream predicted size: {bag_size}")
logger.info(f"{'--':<20}iRODS buffer MD5: {md5_hexdigest}")
self.irods_client.update_metadata_state('prepare-bag', Status.VALIDATE_CHECKSUM.value)
self.pool.close()
self.pool.join()
chksums = self.result.get()
count = 0
# validated = False
for k in self.upload_success.keys():
if self.upload_success[k] == chksums[k]:
self.upload_success.update({k: True})
count += 1
if count == len(self.upload_success):
# validated = True
logger.info(f"{'--':<20}iRODS & buffer SHA-256 checksum: validated")
self.irods_client.update_metadata_state(Status.VALIDATE_CHECKSUM.value, 'prepare-bag')
else:
logger.error(f"{'--':<20}SHA-256 checksum: failed")
self.irods_client.update_metadata_state(Status.VALIDATE_UPLOAD.value, Status.UPLOAD_CORRUPTED.value)
# print(validated)
self.irods_client.update_metadata_state('prepare-bag', 'zip-bag')
self.upload_success = {}
bag_md5 = hashlib.md5()
bag_iterator = get_bag_generator(self.irods_client, self.upload_success, bag_md5, bag_size)
logger.info(f"{'--':<10}Upload bag")
self.irods_client.update_metadata_state('zip-bag', 'upload-bag')
resp = requests.post(
self.dataset_deposit_url,
data=bag_iterator,
auth=(self.user, self.pwd),
headers={
"X-Authorization": self.token,
"Content-Disposition": "filename=debug_archive00.zip",
"Content-MD5": f"{md5_hexdigest}",
"In-Progress": "false",
"Packaging": "http://purl.org/net/sword/package/SimpleZip",
"Content-Type": "application/octet-stream"
},
)
logger.info(f"{'--':<20}Bag buffer MD5: {bag_md5.hexdigest()}")
if resp.status_code == HTTPStatus.CREATED:
logger.debug(f"{'--':<30}{resp.content.decode('utf-8')}")
self.check_status(resp.content.decode('utf-8'))
else:
logger.error(f"{'--':<30}status_code: {resp.status_code}")
logger.error(f"{'--':<30}{resp.content.decode('utf-8')}")
def check_status(self, content):
logger.info(f"{'--':<20}Check deposit status")
ElementTree.register_namespace("atom", "http://www.w3.org/2005/Atom")
ElementTree.register_namespace("terms", "http://purl.org/net/sword/terms/")
root = ElementTree.fromstring(content)
href = root.find("./{http://www.w3.org/2005/Atom}link/[@rel='http://purl.org/net/sword/terms/statement']").get(
'href')
previous_term = "UPLOADED"
self.irods_client.update_metadata_state('upload-bag', previous_term)
while True:
resp = requests.get(href,
auth=(self.user, self.pwd),
headers={"X-Authorization": self.token}
)
if resp.status_code != HTTPStatus.OK:
content = resp.content.decode("utf-8")
logger.debug(f"{'--':<30}{resp.status_code}")
logger.debug(f"{'--':<30}{content}")
break
content = resp.content.decode("utf-8")
root = ElementTree.fromstring(content)
category = root.find("./{http://www.w3.org/2005/Atom}category")
term_refreshed = category.get('term')
if term_refreshed == "INVALID" or term_refreshed == "REJECTED" or term_refreshed == "FAILED":
logger.error(f"{'--':<30}state: {term_refreshed}")
logger.error(f"{'--':<30}state description: {category.text}")
self.irods_client.update_metadata_state(previous_term, term_refreshed)
break
elif term_refreshed == "ARCHIVED":
logger.info(f"{'--':<30}state: {term_refreshed}")
logger.info(f"{'--':<30}state description: {category.text}")
self.dataset_pid = category.text
self.irods_client.update_metadata_state(previous_term, term_refreshed)
break
else:
logger.info(f"{'--':<30}state: {term_refreshed}")
logger.info(f"{'--':<30}state description: {category.text}")
if previous_term != term_refreshed:
self.irods_client.update_metadata_state(previous_term, term_refreshed)
previous_term = term_refreshed
time.sleep(15)
def final_report(self):
logger.info("Report final progress")
self.irods_client.add_metadata('externalPID', self.dataset_pid, "Easy")
self.irods_client.update_metadata_state('ARCHIVED', 'exported')
time.sleep(5)
self.irods_client.remove_metadata('exporterState', 'exported')
logger.info("Upload Done")
|
from flask_restplus import Resource
import app.main.service.user_service as user_service
from app.main.response.user_response import UserResponse
from app.main.controller.rest_plus_api import Api
api = Api.api
_userResponseType = UserResponse.user
@api.route('/<id>')
@api.doc(params={'id': 'The users heatmap id'})
class UserList(Resource):
@api.doc('Returns the base map of the user')
@api.marshal_with(_userResponseType)
def get(self, id):
"""Returns the specified user"""
return user_service.getUserById(id)
|
import socket,sys
client = socket.socket()
client.connect(('localhost',9999))
while True:
msg = input(">>>>").strip()
if len(msg)==0:
continue
client.send(msg.encode('utf-8'))
res_return_size = client.recv(1024) #接收的命令结果的大小
print('接收的数据的大小:',res_return_size)
total_res_size = int(res_return_size)
print('数据总量大小:',total_res_size)
client.send('准备好接收了,请发送'.encode('utf-8'))
received_size = 0 #已接收到的数据
cmd_res = b''
f = open('test_copy.html','wb') #把接收到的数据结果存下来,等待接下来的校验
while received_size != total_res_size:
data = client.recv(1024)
received_size += len(data) #注意实际收到的数据可能比1024要少
cmd_res += data
else:
print("数据接收完了",received_size)
#print(cmd_res.decode())
f.write(cmd_res) #把接收到的数据存下来,等待校验
#print(data.decode())
client.close()
|
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
from PIL import Image
class FacesDataset(Dataset):
def __init__(self, root_dir, size, n, total):
self.root_dir = root_dir
self.size = size
self.n_celebs = n
self.total = total
def __len__(self):
return self.n_celebs*self.size
def __getitem__(self, idx):
if self.root_dir == "train/":
label = idx//self.size + 1
num = idx%self.size + 1
else:
label = idx//self.size + 1
num = idx%self.size + self.total + 1 -self.size
img = Image.open(self.root_dir+"s"+str(label)+"/"+str(num)+".png")
trans = torchvision.transforms.ToTensor()
sample = (trans(img),torch.tensor(label-1))
return sample
def getFacesDataset(frac,n,total):
train = FacesDataset(root_dir="train/",size=frac,n=n, total=total)
test = FacesDataset(root_dir="test/",size=total-frac,n=n,total=total)
return {'train': train, 'eval': test}
|
from telegram import Update
from telegram.ext import Updater, CommandHandler, CallbackContext
def start(update: Update, context: CallbackContext) -> None:
update.message.reply_text(f'Hello {update.effective_user.first_name}')
updater = Updater('1766280930:AAHtKtuWbjSx_vpQmO1UNR4g_QkBqR6zd6I')
updater.dispatcher.add_handler(CommandHandler('start', start))
updater.start_polling()
updater.idle()
|
#프로그래머스 같은 곳에서는 def solution 안에서 작업해야 하기 때문에, global variable설정이 어렵다.
def solution(n,m, g):
#변수와 데이터 입력구간
graph=[]
n,m= n,m
for _ in range(n):
graph.append(g[_])
print(graph[_])
# 본격적인 알고리즘 구간 inner function 으로 정의하기.#이렇게 nested function 으로 정의하면, outer function에 접근할 수 있다.
def dfs(x, y):
if (x <= -1) or (x >= n) or (y <= -1) or (y >= m):
return False
if graph[x][y] == 0:
# 해당 노드 방문 처리
graph[x][y] =1
# 상, 하, 좌, 우 위치 모두 재귀호출
dfs(x - 1, y) # 얘네들이 하는건 재귀적으로 graph에 방문표시하는것 밖에 없음
dfs(x, y - 1)
dfs(x + 1, y)
dfs(x, y + 1)
return True
"""
True 를 하는 이유: graph[x][y] == 0이어서. 오직 해당 그래프 값이 0일 때 True를 리턴한다. 이 경우
상하좌우로 dfs가 퍼져나가면서 1을 퍼트린다. 결국 하나의 좌표가 True를 리턴하지만,
다음 실행에서는 인접한 0의 값 좌표 들은 True가 되지 못한다.
"""
return False
result=0
for i in range(n):
for j in range(m):
#현재 위치에서 dfs수행
if dfs(i,j) ==True:
result +=1
return result
graph2 =[
[0,0,1,1,0],
[0,0,0,1,1],
[1,1,1,1,1],
[0,0,0,0,0]
]
print(solution(len(graph2),len(graph2[0]),graph2))
|
#!/usr/bin/env python3
#this script defines the variables and they can't be changed when the script is run
#unless you change the variables name in the script
name=("Nathalia")
origin=("Brazil")
color=("green")
activity=("sports")
animal=("dog")
breed=("corgi")
dog_name=("Noel")
print("My name is", name, ".", "I'm from", origin,".", "My favorite color is", color, ".", "I love practicing",\
activity, "with my friends.", "My favorite animal is my", animal,",", dog_name, ".", "My", animal, dog_name,\
"is a", breed, ".")
|
import spacy
import warnings
from medspacy.common.regex_matcher import RegexMatcher
nlp = spacy.load("en_core_web_sm")
class TestTargetMatcher:
def test_initiate(self):
assert RegexMatcher(nlp.vocab)
def test_add(self):
matcher = RegexMatcher(nlp.vocab)
matcher.add("my_rule", ["my_pattern"])
assert matcher._patterns
def test_basic_match(self):
matcher = RegexMatcher(nlp.vocab)
matcher.add("CONDITION", ["pulmonary embolisms?"])
doc = nlp("Past Medical History: Pulmonary embolism")
matches = matcher(doc)
assert matches
match_id, start, end = matches[0]
assert start, end == (4, 5)
assert nlp.vocab[match_id] == "CONDITION"
span = doc[start:end]
assert span.text == "Pulmonary embolism"
def test_resolve_default(self):
matcher = RegexMatcher(nlp.vocab)
matcher.add("ENTITY", ["ICE: Rad"])
doc = nlp("SERVICE: Radiology")
matches = matcher(doc)
assert matches
_, start, end = matches[0]
span = doc[start:end]
assert span.text == "SERVICE: Radiology"
def test_resolve_start_right(self):
matcher = RegexMatcher(nlp.vocab, resolve_start="right")
matcher.add("ENTITY", ["ICE: Rad"])
doc = nlp("SERVICE: Radiology")
matches = matcher(doc)
assert matches
_, start, end = matches[0]
span = doc[start:end]
assert span.text == ": Radiology"
def test_resolve_end_left(self):
matcher = RegexMatcher(nlp.vocab, resolve_end="left")
matcher.add("ENTITY", ["ICE: Rad"])
doc = nlp("SERVICE: Radiology")
matches = matcher(doc)
assert matches
_, start, end = matches[0]
span = doc[start:end]
assert span.text == "SERVICE:"
def test_resolve_inward(self):
matcher = RegexMatcher(nlp.vocab, resolve_start="right", resolve_end="left")
matcher.add("ENTITY", ["ICE: Rad"])
doc = nlp("SERVICE: Radiology")
matches = matcher(doc)
assert matches
_, start, end = matches[0]
span = doc[start:end]
assert span.text == ":"
def test_resolve_single_matched_token(self):
matcher = RegexMatcher(nlp.vocab, resolve_start="left", resolve_end="right")
matcher.add("ENTITY", ["ICE"])
doc = nlp("SERVICE: Radiology")
matches = matcher(doc)
assert matches
_, start, end = matches[0]
span = doc[start:end]
assert span.text == "SERVICE"
def test_resolve_inward_single_matched_token_is_none(self):
matcher = RegexMatcher(nlp.vocab, resolve_start="right", resolve_end="left")
matcher.add("ENTITY", ["ICE"])
doc = nlp("SERVICE: Radiology")
matches = matcher(doc)
assert matches == []
|
#!/usr/bin/python
import socket
import sys
import os
from subprocess import Popen
socket_address = ('localhost', 8888)
#TODO get freedback
def sendMessage(message):
# Create a UDS socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
print 'connecting to %s, %s' % socket_address
try:
sock.connect(socket_address)
except socket.error, msg:
print msg
sys.exit(1)
try:
# Send data
print 'sending "%s"' % message
sock.sendall(message)
s = sock.recv(1024)
while s != "":
print s
s = sock.recv(1024)
finally:
sock.close()
def handle(command):
#TODO: check if client already started
#TODO: figure out why you have to press enter every time for new line
if command == "start":
from daemon import asynch_start
asynch_start()
else:
sendMessage(command)
def main():
handle(sys.argv[1])
if __name__ == "__main__":
main()
|
# Generated by Django 2.0.7 on 2019-09-19 15:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('paintings', '0016_auto_20190919_0221'),
]
operations = [
migrations.AddField(
model_name='painting',
name='series',
field=models.CharField(blank=True, default='', max_length=255, null=True),
),
]
|
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
from collections import Counter
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.stem import PorterStemmer
#nltk.download('vader_lexicon')
import sklearn
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from xgboost import XGBClassifier
transcripts = "ted-talks/transcripts.csv"
main = "ted-talks/ted_main.csv"
class SourceData():
def __init__(self,transcripts_path, main_path):
self.t = transcripts_path
self.m = main_path
self.df_t = pd.read_csv(self.t)
self.df_m = pd.read_csv(self.m)
self.df = self._merge_frames()
def _merge_frames(self):
df_t = self.df_t.copy()
df_t.drop_duplicates(inplace=True)
#join tables on 'url'
df = pd.merge(self.df_m,df_t,how='inner',on='url')
return df
class SentimentCalculator():
def __init__(self,df):
self.df = df
self._new_words_dict = {'unconvincing':'unconvinced',
'ingenious':'brilliant',
'persuasive':'convincing',
'longwinded':'boring',
'informative':'enlightening',
'jaw-dropping':'astounding'
}
self.talk_scores = self._get_all_talk_scores()
def _str_to_dic_list(self,string_dic_list):
s = string_dic_list
d_list = eval(s)
return d_list
def _words_not_in_vader_lexicon(self):
'''
assumes df has series called 'ratings' and within that
series is lists of dictionaries as strings
'''
df = self.df
def get_rating_name_set(dic_list):
rating_names = [d['name'].lower() for d in dic_list]
return list(set(rating_names))
def get_series_rating_name_set(rating_series):
rs = rating_series
names_all = []
for ratings_list in rs:
d_list = self._str_to_dic_list(ratings_list)
names = get_rating_name_set(d_list)
names_all.extend(names)
return list(set(names_all))
sid = SentimentIntensityAnalyzer()
names_list = get_series_rating_name_set(df['ratings'])
bad_names = [n for n in names_list if n not in sid.lexicon.keys()]
return bad_names
def _get_talk_score(self, ratings_df):
new_words = self._new_words_dict
sid = SentimentIntensityAnalyzer()
r_df = ratings_df
r_df['name'] = r_df['name'].str.lower()
r_df.replace({'name':new_words}, inplace=True)
r_df['base_sentiment_score'] = r_df['name'].map(sid.lexicon)
score = np.average(r_df['base_sentiment_score'], weights=r_df['count'])
return score
def _get_all_talk_scores(self):
scores = []
for talk_ratings in self.df['ratings']:
talk = pd.DataFrame(self._str_to_dic_list(talk_ratings))
score = self._get_talk_score(talk)
scores.append(score)
return scores
class Tags():
def __init__(self,df):
self.df = df.copy()
self.all_tags = self._get_tag_set()
self.all_tag_dummies = self._get_tag_dummies_df()
self.filterd_tag_dummies = self._filter_df_by_correlation()
def _str_to_list(self, string_list):
s = string_list
e_list = eval(s)
return e_list
def _get_tag_set(self):
tags_list = []
series = self.df['tags'].copy()
for tag_list in series:
lst = self._str_to_list(tag_list)
tags_list.extend(lst)
tag_set = set(tags_list)
return list(tag_set)
def _get_tag_dummies_df(self):
s = self.df['tags'].apply(self._str_to_list)
tag_dummies_df = pd.get_dummies(s.apply(pd.Series).stack()).sum(level=0)
tag_dummies_df['sentiment_score'] = self.df['sentiment_score']
return tag_dummies_df
def get_tag_score_correlation(self):
tag_corr= self.all_tag_dummies.corr()['sentiment_score'].sort_values(ascending = False)
tag_corr= tag_corr.drop(index='sentiment_score')
return tag_corr
def _filter_df_by_correlation(self,min=-0.04,max=0.04):
tag_corr = self.get_tag_score_correlation()
tag_corr_filtered = tag_corr[(tag_corr>=max) | (tag_corr<=min)]
cols = list(tag_corr_filtered.index)
cols.append('sentiment_score')
filtered_df= self.all_tag_dummies[cols].copy()
return filtered_df
class Transcripts():
def __init__(self,df):
self.df = df
self.words1 = self._distill_text_to_words()
self._more_stops= self._get_stop_words_by_freq(self.words1)
self.words2 = self._distill_to_words_with_more_stops()
self.stems = self._get_stems()
self.all_stem_dummies = self._get_all_stem_dummies()
self.filtered_stem_dummies = self._get_filtered_dummies()
def _distill_text_to_words(self, more_stop_words=[]):
stoplist = stopwords.words('english')
stoplist.extend(more_stop_words)
def distill(series):
text = series.split()
text = [s.lower() for s in text]
text = [s for s in text if '(' not in s]
text = [s.strip('?') for s in text]
text = [s for s in text if s.isalpha()]
text = list(set(text).difference(set(stoplist)))
return text
words = df['transcript'].map(distill)
return words
def _get_stop_words_by_freq(self,
words_Series,
min_count = 25,
max_count=1000):
'''
returns stop words that are outside of min, max
'''
def get_all_word_counts(pandas_Series):
s = pandas_Series
combined = sum(s,[])
fdist = nltk.FreqDist(combined)
counts = fdist.most_common()
return counts
def get_words_list(counts):
words = [c[0] for c in counts]
return words
def slice_word_counts(counts, min_count, max_count):
sliced = [c[0] for c in counts if c[1]>=min_count and c[1]<=max_count]
#sliced = [c[0] for c in counts if c[1]<=max_count]
return sliced
s = words_Series
counts = get_all_word_counts(s)
words = get_words_list(counts)
words_slice = slice_word_counts(counts,min_count,max_count)
stop_words = list(set(words).difference(set(words_slice)))
return stop_words
def _distill_to_words_with_more_stops(self):
stops=self._more_stops
words= self._distill_text_to_words(stops)
return words
def _get_stems(self):
ps = PorterStemmer()
def get_stems_set(word_list):
wl = word_list
stems = [ps.stem(w) for w in wl]
stems_list_set = list(set(stems))
return stems_list_set
stems = self.words2.map(get_stems_set)
return stems
def _get_all_stem_dummies(self):
dummies = pd.get_dummies(self.stems.apply(pd.Series).stack()).sum(level=0)
dummies = dummies.merge(df['sentiment_score'],
left_index=True,
right_index=True)
return dummies
def _get_filtered_dummies(self, corr_inner_low = -.055,
corr_inner_high = 0.055):
dummies = self.all_stem_dummies
#filter low correlation stems (columns)
corr = dummies.corr()['sentiment_score'].sort_values(ascending = False)
corr = corr.drop(index='sentiment_score')
corr_filtered = corr[(corr>=corr_inner_high) | (corr<=corr_inner_low)]
cols = list(corr_filtered.index)
cols.append('sentiment_score')
dummies = dummies[cols].copy()
return dummies
class FinalCombinedData():
def __init__(self,df):
self.tags = Tags(df)
self.t = Transcripts(df)
self.dummies = self._combine_dummies()
def _combine_dummies(self):
stem_dummies = self.t.filtered_stem_dummies.drop('sentiment_score',
axis=1
)
A = set(list(stem_dummies.columns))
B = set(list(self.tags.filterd_tag_dummies.columns))
common = list(A.intersection(B))
stem_dummies = stem_dummies.drop(columns = common, axis=1)
dummies = stem_dummies.merge(self.tags.filterd_tag_dummies,
left_index=True,
right_index=True
)
dummies = dummies[(dummies['sentiment_score']>0) & (dummies['sentiment_score']<=2.5)].copy()
return dummies
def print_probability_of_guessing_label_by_chance(label_set):
c = Counter(label_set)
print('Test label Counts:')
print(c)
print('')
print('Probabilities of gussing by chance:')
for k in c.keys():
p = c[k]/len(y_test)
p=round(p,2)
print(''.join([k,': ',str(p)]))
#*************************** Main ******************************************
#clean and combine source data
source = SourceData(transcripts, main)
df = source.df
#calculate and add talk sentiment score column
sent_calc = SentimentCalculator(df)
df['sentiment_score'] = sent_calc.talk_scores
'''
get dummies table, conssint of score corrleated talk 'tag' and
word stem dummies
'''
combo = FinalCombinedData(df)
dummies = combo.dummies
#label bins sized to distribute labels somewhat
dummies['label'] = pd.cut(dummies['sentiment_score'],
[0,1.8,2.1,2.5],
labels=['C','B','A'])
dummies = dummies[~dummies['label'].isna()]
dummies_df = dummies.drop(['sentiment_score'], axis=1)
#split into features(X) and targets (y)
X = dummies_df.drop(['label'], axis=1)
y = dummies_df['label']
#build training and test pairs
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X,
y,
test_size = 0.1,
random_state=100)
print('######################################################################')
print('#################### Neural Net Classifer ############################')
print('')
#neural net classifier
mlp = MLPClassifier(max_iter=10000)
#iterate through model parameter combinations
nn_parameter_space = {'hidden_layer_sizes': [(300,200,100,50,10)],
'activation': ['relu'],
'solver': ['adam','lbfgs'],
'alpha': [0.001,0.0001],
}
clf = GridSearchCV(mlp,
nn_parameter_space,
n_jobs=-1,
cv=3,
verbose=0
)
clf.fit(X_train, y_train)
print('')
print('Best estimater data...')
print('')
print(clf.best_estimator_)
print('')
y_pred = clf.predict(X_test)
print('############### Neural Net Results on the test set:')
print(classification_report(y_test, y_pred))
print('')
print('############### Probabbility of guessing test label by chance:')
print_probability_of_guessing_label_by_chance(y_test)
print('')
print('################ End Neural Net Classifer ############################')
print('######################################################################')
print('')
print('')
print('######################################################################')
print('#################### XGBoost Classifer ###############################')
print('')
#XGboost classifier
xgb_model = XGBClassifier()
xgb_parameter_space = {'learning_rate':[0.001,0.01,0.05],
'n_estimators':[100],
'max_depth':[2,3,6],
'objective':['multi:softmax'],
'num_class':[3]
}
xgb_clf = GridSearchCV(xgb_model,
xgb_parameter_space,
n_jobs=-1,
cv=3,
verbose=0
)
xgb_clf.fit(X_train,y_train)
print('')
print('Best estimater data...')
print('')
print(xgb_clf.best_estimator_)
print('')
y_pred = xgb_clf.predict(X_test)
print('####################### XGB Results on the test set:')
print(classification_report(y_test, y_pred))
print('')
print('############### Probabbility of guessing test label by chance:')
print_probability_of_guessing_label_by_chance(y_test)
print('')
print('################ End XGBoost Classifer ###############################')
print('######################################################################') |
from django.shortcuts import render
from django.http import HttpResponse
from rest_framework.views import APIView
from rest_framework.response import Response
from django.contrib.auth.models import User, Group
from rest_framework.generics import (
RetrieveUpdateDestroyAPIView,
CreateAPIView
)
from .models import UserProfile, HomeDetails, HomeRentDetails
from .serializer import (
ProfileSerializer, HomeDetailSerializer,
HomeRentSerializer
)
def index(request):
return HttpResponse("<h1>Welcome to home rental site</h1>")
class CreateUserProfileView(APIView):
""" for create user profile of home owner and renter
"""
def post(self, request, format=None):
try:
user_obj = User.objects.create_user(
first_name=request.data.get('first_name', ''),
last_name=request.data.get('last_name', ''),
username=request.data['username'],
password=request.data['password'],
email=request.data.get('email', '')
)
except Exception as e:
result = {'status': '0', 'error': str(e)}
return Response(result)
if user_obj:
# it contains user type home owner or renter
user_type = request.data.get('type')
g, created = Group.objects.get_or_create(
name=user_type)
user_obj.groups.add(g)
prof_obj = UserProfile.objects.create(
user=user_obj,
phone=request.data.get('phone',''),
address=request.data.get('address','')
)
result = {'status': '1', 'msg': 'Created Succesfully'}
return Response(result)
class UserProfileView(RetrieveUpdateDestroyAPIView):
lookup_field = 'pk'
serializer_class = ProfileSerializer
queryset = UserProfile.objects.all()
class HomeViewSerializerView(CreateAPIView):
serializer_class = HomeDetailSerializer
queryset = HomeDetails.objects.all()
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class HomeReadUpdateDeleteView(RetrieveUpdateDestroyAPIView):
lookup_field = 'pk'
serializer_class = HomeDetailSerializer
queryset = HomeDetails.objects.all()
class CreateHomeRentAPIView(APIView):
def post(self, request, format=None):
try:
data = request.data
import pdb ; pdb.set_trace()
home_id = data.get('home_id')
home_obj = HomeDetails.objects.get(pk=home_id)
renter_id = data.get('renter_id')
print('...', renter_id)
renter_obj = UserProfile.objects.get(pk=renter_id)
rental_obj = HomeRentDetails.objects.create(
home=home_obj,
renter=renter_obj,
start_date=data.get('start'),
end_date=data.get('end'),
description=data.get('desc')
)
return Response({'status': '1', 'msg': 'created successfully'})
except Exception as e:
return Response({'status': '0', 'error': str(e)})
class HomeRentReadUpdateDelete(RetrieveUpdateDestroyAPIView):
lookup_field = 'pk'
serializer_class = HomeRentSerializer
queryset = HomeRentDetails.objects.all()
|
import rdkit
from rdkit import Chem
from rdkit.Chem import rdmolops
import numpy
import chainer.datasets as D
import chainer.datasets.tuple_dataset as Tuple
filename_train = 'tox21_10k_data_all.sdf'
filename_val = 'tox21_10k_challenge_test.sdf'
label_names = ['NR-AR', 'NR-AR-LBD', 'NR-AhR', 'NR-Aromatase', 'NR-ER',
'NR-ER-LBD', 'NR-PPAR-gamma', 'SR-ARE', 'SR-ATAD5',
'SR-HSE', 'SR-MMP', 'SR-p53']
MAX_NUMBER_ATOM = 140
def construct_edge_matrix(mol):
if mol is None:
return None
N = mol.GetNumAtoms()
size = MAX_NUMBER_ATOM
adjs = numpy.zeros((4, size, size), dtype=numpy.float32)
for i in range(N):
for j in range(N):
bond = mol.GetBondBetweenAtoms(i, j) # type: Chem.Bond
if bond is not None:
bondType = str(bond.GetBondType())
if bondType == 'SINGLE':
adjs[0, i, j] = 1.0
elif bondType == 'DOUBLE':
adjs[1, i, j] = 1.0
elif bondType == 'TRIPLE':
adjs[2, i, j] = 1.0
elif bondType == 'AROMATIC':
adjs[3, i, j] = 1.0
else:
print("[ERROR] Unknown bond type", bondType)
assert False # Should not come here
return adjs
def preprocessor(mol_supplier, label_names):
descriptors = []
labels = []
count = 0
for mol in mol_supplier:
if mol is None:
continue
label = []
for task in label_names:
if mol.HasProp(task):
label.append(int(mol.GetProp(task)))
else:
label.append(-1)
#adj = rdmolops.GetAdjacencyMatrix(mol)
adj = construct_edge_matrix(mol)
atom_list = [a.GetSymbol() for a in mol.GetAtoms()]
labels.append(label)
descriptors.append((adj, atom_list))
if count == 10000:
break
count += 1
labels = numpy.array(labels, dtype=numpy.int32)
return descriptors, labels
def getAtom2id(train, val):
max_atom = 0
for data in [train, val]:
for d in data:
adj = d[0][0]
atom_list = d[0][1]
max_atom = max(max_atom, len(atom_list))
assert max_atom <= MAX_NUMBER_ATOM
# Construct atom2id dictionary
atom2id = {'empty': 0}
atoms = [d[0][1] for d in train] + [d[0][1] for d in val]
atoms = sum(atoms, [])
for a in atoms:
if a not in atom2id:
atom2id[a] = len(atom2id)
train = convert_dataset(train, atom2id)
val = convert_dataset(val, atom2id)
return train, val, atom2id
def get_tox21():
molSupplier_train = Chem.SDMolSupplier(filename_train)
molSupplier_val = Chem.SDMolSupplier(filename_val)
descriptors_train, label_train = preprocessor(molSupplier_train, label_names)
descriptors_val, label_val = preprocessor(molSupplier_val, label_names)
return Tuple.TupleDataset(descriptors_train, label_train), Tuple.TupleDataset(descriptors_val, label_val)
def convert_dataset(dataset, atom2id):
ret = []
for d in dataset:
(adj, atom_list), label = d
# 0 padding for adj matrix
#s0, s1 = adj.shape
#adj = adj + numpy.eye(s0)
#adj_array = numpy.zeros((MAX_NUMBER_ATOM, MAX_NUMBER_ATOM),
# dtype=numpy.float32)
#adj_array[:s0, :s1] = adj.astype(numpy.float32)
# print('adj_array', adj_array)
# 0 padding for atom_list
atom_list = [atom2id[a] for a in atom_list]
n_atom = len(atom_list)
atom_array = numpy.zeros((MAX_NUMBER_ATOM,), dtype=numpy.int32)
atom_array[:n_atom] = numpy.array(atom_list)
ret.append((adj, atom_array, label))
return ret
def load_one_task(task, filename, atom2id):
molSupplier = Chem.SDMolSupplier(filename)
descriptors = []
labels = []
count = 0
for mol in molSupplier:
if mol is None:
continue
label = []
for _ in label_names:
if mol.HasProp(task):
label.append(int(mol.GetProp(task)))
else:
label.append(-1)
# adj = rdmolops.GetAdjacencyMatrix(mol)
adj = construct_edge_matrix(mol)
atom_list = [a.GetSymbol() for a in mol.GetAtoms()]
labels.append(label)
descriptors.append((adj, atom_list))
labels = numpy.array(labels, dtype=numpy.int32)
dataset = Tuple.TupleDataset(descriptors, labels)
dataset = convert_dataset(dataset, atom2id)
return dataset
def make_dataset():
train, test = get_tox21()
train, test, atom2id = getAtom2id(train, test)
train, val = D.split_dataset(train, int(0.9 * len(train)))
print("size of train set:", len(train))
print("size of val set:", len(val))
print('size of test set:', len(test))
return train, val, test, atom2id
make_dataset() |
########################################################
# desc : deep neural network model for CTR prediction.
#
# @author : qiangz2012@yeah.net
#
########################################################
import tensorflow as tf
import numpy as np
class Dnn_Mlp(object):
def __init__(self, epoch, batch_size, \
lr, optimizer_type, l2_reg, \
hidden_units, output_units, feat_size, \
activation, loss_type):
self.epoch = epoch
self.batch_size = batch_size
self.lr = lr
self.optimizer_type = optimizer_type
self.l2_reg = l2_reg
self.hidden_units = hidden_units
self.output_units = output_units
self.feat_size = feat_size
self.activation = activation
self.loss_type = loss_type
self._init_graph()
init = tf.global_variables_initializer()
self.sess = self._init_session()
self.sess.run(init)
# self.saver = tf.train.Saver()
# self.my_test()
def my_test(self):
print self.epoch
print self.batch_size
print self.lr
print self.optimizer_type
print self.l2_reg
print self.hidden_units
print self.output_units
print self.feat_size
print self.activation
print self.loss_type
def _init_graph(self):
#self.graph = tf.Graph()
#with self.graph.as_default():
# have existing a default Graph in context, now we will
# add some edges or nodes into it
self.X = tf.placeholder(tf.float32,shape=[None,self.feat_size])
self.Y = tf.placeholder(tf.float32,shape=[None,1])
self.weights = self._init_weight()
# ---------------- model ---------------------------
# input -> hidden
hidden = self._add_neuron_layer(self.X,self.weights["layer_0"],self.weights["bias_0"],"hidden_layer",self.activation)
# hidden -> output
output = self._add_neuron_layer(hidden,self.weights["layer_1"],self.weights["bias_1"],"output_layer",self.activation)
# output -> label
y = self._add_neuron_layer(output,self.weights["layer_2"],self.weights["bias_2"],"pCTR","sigmoid")
# --------------- train ---------------------------
# loss
if self.loss_type == "logloss":
self.loss = tf.losses.log_loss(self.Y,y)
else: # mse
self.loss = tf.losses.l2_loss( tf.subtract(self.Y,y) )
# L2
if self.l2_reg > 0.0:
self.loss += tf.contrib.layers.l2_regularizer(tf.l2_reg)( self.weights["layer_2"] )
self.loss += tf.contrib.layers.l2_regularizer(tf.l2_reg)( self.weights["layer_1"] )
self.loss += tf.contrib.layers.l2_regularizer(tf.l2_reg)( self.weights["layer_0"] )
# optimizer
if self.optimizer_type == "adam":
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr,beta1=0.9,beta2=0.999,epsilon=1e-8)
elif self.optimizer_type == "adagrad":
self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.lr,initial_accumulator_value=1e-8)
else: # gd
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr)
# train
self.train_op = self.optimizer.minimize(self.loss)
def _init_weight(self):
weights = dict()
# W1: input -> hidden
in_size = self.feat_size
hidden_size = self.hidden_units
glorot = np.sqrt( 2.0 / (in_size + hidden_size) )
weights["layer_0"] = tf.Variable( np.random.normal(loc=0.0,scale=glorot,size=(in_size,hidden_size)).astype(np.float32) )
weights["bias_0"] = tf.Variable( np.random.normal(loc=0.0,scale=glorot,size=(1,hidden_size)).astype(np.float32) )
# W2: hiddent -> output
out_size = self.output_units
glorot = np.sqrt( 2.0 / (hidden_size + out_size) )
weights["layer_1"] = tf.Variable( np.random.normal(loc=0.0,scale=glorot,size=(hidden_size,out_size)).astype(np.float32) )
weights["bias_1"] = tf.Variable( np.random.normal(loc=0.0,scale=glorot,size=(1,out_size)).astype(np.float32) )
# W3: concat
label_size = 1
glorot = np.sqrt( 2.0 / (out_size + label_size) )
weights["layer_2"] = tf.Variable( np.random.normal(loc=0.0,scale=glorot,size=(out_size,1)).astype(np.float32) )
weights["bias_2"] = tf.Variable( tf.constant(0.01), dtype=np.float32 )
return weights
def _add_neuron_layer(self,X,W,b,name,activation=None):
with tf.name_scope(name):
Z = tf.matmul(X,W)+b
if self.activation == "relu":
return tf.nn.relu(Z)
elif self.activation == "sigmoid":
return tf.sigmoid(Z)
elif self.activation == "tanh":
return tf.tanh(Z)
else:
return Z
def _init_session(self):
# config_ = tf.ConfigProto(device_count={"gpu": 0})
# config_.gpu_options.allow_growth = True
# return tf.Session(config=config_)
return tf.Session()
def _train(self, X_reader, Y_reader):
# train
for epoch in range(self.epoch):
X_batch = X_reader._next_batch( self.batch_size, self.feat_size )
Y_batch = Y_reader._next_batch( self.batch_size, 1 )
loss,opt = self.sess.run( (self.loss, self.train_op), feed_dict={self.X:X_batch,self.Y:Y_batch} )
print "epoch=%d, loss=%f" % (epoch,loss)
# save model
#self.saver.save(self.sess,"mlp_ctr.model")
|
import sys
def hash():
args = sys.argv
arg_length = len(args)-1
if arg_length == 1:
if args[1] == "-h" or args[1] == "-help" or args[1] == "--help":
print("detect-a-hash: detect.py <hash>")
else:
hash_length = (len(args[1]))
if hash_length == 32:
print("detect-a-hash: The hash provided appears to be a MD5 hash.")
elif hash_length == 40:
print("detect-a-hash: The hash provided appears to be a SHA-1 hash.")
elif hash_length == 64:
print("detect-a-hash: The hash provided appears to be a SHA-256 hash.")
else:
print("detect-a-hash: detect.py <hash>")
hash()
|
'''
@author: Kaiwen Luo (k0l06rk)
this file holds the code for the main/experiments of tote merge.
'''
import random
import time
from MaxToteUtilAlgos.runnableHelpers import ReadSimulation
from MaxToteUtilAlgos.runnableHelpers import partition
from MaxToteUtilAlgos.runnableHelpers import Box
from MaxToteUtilAlgos.tote_merge.tote_merge import ToteMerger
import multiprocessing as mp
import pandas as pd
# TOTE INFO - 2 Types
TOTE_INFO = {"w": 23.6, "l": 15.9, "h": 12.2, "weight": 55}
# TOTE_INFO = { "w":25.6, "l":17.7, "h":12.8, "weight":55 }
tote = Box(TOTE_INFO["w"], TOTE_INFO["l"], TOTE_INFO["h"], 0, TOTE_INFO["weight"])
# Experiment Settings
## NUM_INPUTS: Total number of input totes
## INTERVALS: Increment of number of totes in experiments
NUM_INPUTS = 2000
INTERVALS = 200
# Mehtod Selection - 2 types of tote merge
method = "MAX_UTIL"
# method = "IN_ORDER"
# Multiprocessing or not
# NUM_PROCS = mp.cpu_count()
BATCH_SIZE = 500
NUM_PROCS = None
# Output Summary
summary = {"num_totes": [], "run_time": [], "num_save": [], "avg_util_before_3d": [],
"avg_util_after_3d": [], "avg_util_before_2d": [], "avg_util_after_2d": []}
# Helper function to choose tote merge method.
def merge(totes,method):
Merger = ToteMerger(totes)
if method == "IN_ORDER":
result_totes = Merger.merge_in_order()
elif method == "MAX_UTIL":
result_totes = Merger.merge_max_util()
else:
raise TypeError("Method can be only chosen from IN_ORDER or MAX_UTIL")
return result_totes
if __name__ == "__main__":
# Read input: if a .xlsx is given, it will be first convert to a .pkl file for further use
# totes = ReadSimulation("./data/NJ51_Simulation_Data_Bin_Packing.xlsx",tote)
totes = ReadSimulation("./data/raw_totes_NJ51.pkl")
# Experiment start
for i in range(1200, NUM_INPUTS + INTERVALS, INTERVALS):
tic = time.time()
# Random sample input totes
random.seed(i)
sub_totes = random.sample(totes, i)
# Merging
if not NUM_PROCS:
result_totes = merge(sub_totes,method)
else:
totes_batch = partition(sub_totes,round(len(sub_totes)/BATCH_SIZE))
pool = mp.Pool(NUM_PROCS)
msgs = zip(totes_batch,len(totes_batch)*[method])
result = pool.starmap_async(merge,msgs)
result_totes = result.get()
pool.close()
pool.join()
result_totes = [item for batch in result_totes for item in batch]
# Result collection
num_save = i - len(result_totes)
util_before_2d = sum([x.util2d for x in sub_totes]) / i
util_after_2d = sum([x.util2d for x in result_totes]) / len(result_totes)
util_before_3d = sum([x.util3d for x in sub_totes]) / i
util_after_3d = sum([x.util3d for x in result_totes]) / len(result_totes)
run_time = time.time() - tic
print("Run time: ", time.time() - tic)
print("num_save: ", num_save)
print("avg_util_before_3d: ", util_before_3d)
print("avg_util_after_3d: ", util_after_3d)
print("avg_util_before_2d: ", util_before_2d)
print("avg_util_after_2d: ", util_after_2d)
print("Complete run", i)
summary["num_totes"].append(i)
summary["run_time"].append(time.time() - tic)
summary["num_save"].append(num_save)
summary["avg_util_before_3d"].append(util_before_3d)
summary["avg_util_after_3d"].append(util_after_3d)
summary["avg_util_before_2d"].append(util_before_2d)
summary["avg_util_after_2d"].append(util_after_2d)
result = pd.DataFrame.from_dict(summary)
if NUM_PROCS:
result.to_csv(f"./result/{method}_summary_i{NUM_INPUTS}_t{INTERVALS}_b{BATCH_SIZE}.csv", index=False)
else:
result.to_csv(f"./result/{method}_summary_i{NUM_INPUTS}_t{INTERVALS}_b0.csv", index=False)
print("complete")
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Telenav, Inc. All rights reserved.
import md5, os, re, subprocess, urllib
import cacheutils
def parseurl(cmd):
print 'cmd:', cmd
cmd = urllib.unquote(cmd)
print 'ucmd:', cmd
# mo = re.search(r'(http[s]://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)(.*)', cmd)
mo = re.search(r'(https?://(?:[-.a-zA-Z0-9]+))(?::[0-9a-fA-F]+)?/(.*)', cmd)
if None != mo:
print 'len:', len(mo.groups())
print mo.groups()
print 'q:', urllib.quote(mo.groups()[1])
def query(cmd):
''' run cmd to send query and get response
'''
print 'cmd:', urllib.unquote(cmd)
proc = subprocess.Popen(cmd + ' --compressed', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(output, err) = proc.communicate()
if 0 != proc.returncode:
# curl return error
print 'ret = ' + str(proc.returncode)
print 'output : ' + output
print 'err : ' + err
print cmd + ' --compressed'
exit(0)
return output
# def cachedQueryFilename(cmd, outdir):
# ''' get filename for the query [cmd]
# '''
# fn = md5.new(cmd).hexdigest()
# fn = os.path.normpath(os.path.join(outdir, 'cache_%s' % fn))
# return fn
#
#
def cachedQuery(cmd, outdir):
''' if no cache, run http(s) query and store the response to cache
'''
# parseurl(cmd)
plainCmd = urllib.unquote(cmd)
# 1. check if we can find the response in cache or not
ret = cacheutils.retrieve(plainCmd, outdir)
if ret:
return ret
output = query(cmd)
cacheutils.store(plainCmd, outdir, output)
# print '\noutput:\n',output
return output
def removeCachedQuery(cmd, outdir):
''' remove cache for query : cmd
'''
plainCmd = urllib.unquote(cmd)
cacheutils.clean(plainCmd, outdir)
if __name__ == '__main__':
parseurl(url)
|
import couchbase
from couchbase.bucket import Bucket
import json
from random import randint
import requests
import threading
import time
SLEEP_DURATION = 3
v8_debug_endpoint = "http://localhost:6061/v8debug/"
conn_str = "couchbase://localhost/default"
cb = Bucket(conn_str)
def populate_one_doc():
ssn1 = str(randint(100, 999))
ssn2 = str(randint(10, 99))
ssn3 = str(randint(1000, 9999))
ssn = ssn1 + "_" + ssn2 + "_" + ssn3
key = "v8_debug_test_" + ssn
credit_score = randint(600, 800)
credit_card_count = randint(1, 5)
total_credit_limit = randint(10000, 100000)
credit_limit_used = int((randint(0, 9) / 10.0) * total_credit_limit)
missed_emi_payments = randint(0, 12)
value = {'ssn': ssn, 'credit_score': credit_score,
'credit_card_count': credit_card_count,
'total_credit_limit': total_credit_limit,
'credit_limit_used': credit_limit_used,
'missed_emi_payments': missed_emi_payments}
try:
cb.upsert(key, value, ttl=0, format=couchbase.FMT_JSON)
except:
print "Upsert failed for doc id: ", key
def fire_continue_request(seq):
command = dict()
command["seq"] = seq
command["type"] = "request"
command["command"] = "continue"
command["arguments"] = dict()
command["arguments"]["stepaction"] = "next"
command["arguments"]["stepcount"] = 1 # default is 1
command_to_fire = json.dumps(command)
query_params = {"command": "continue", "appname": "credit_score"}
r = requests.post(v8_debug_endpoint, data=command_to_fire,
params=query_params)
print "Continue call: ", r.text
def fire_evaluate_request(seq):
command = dict()
command["seq"] = seq
command["type"] = "request"
command["command"] = "evaluate"
command["arguments"] = dict()
command["arguments"]["global"] = True
# command["arguments"]["expression"] = "JSON.stringify(process.version)"
# command["arguments"]["expression"] = "100 + 300"
# command["arguments"]["expression"] = "JSON.stringify(updated_doc)"
# command["arguments"]["expression"] = "updated_doc.toString()"
# command["arguments"]["expression"] = "JSON.stringify(updated_doc)"
command["arguments"]["expression"] = "v1"
command["arguments"]["global"] = True
# command["arguments"]["additional_context"] = list()
# context = dict()
# context["name"] = "string"
# context["handle"] = 11
# command["arguments"]["additional_context"].append(context)
# command["arguments"]["disable_break"] = True
command_to_fire = json.dumps(command)
print command_to_fire
query_params = {"command": "evaluate", "appname": "credit_score"}
r = requests.post(v8_debug_endpoint, data=command_to_fire,
params=query_params)
print "Evaluate call: ", r.text
def fire_lookup_request(seq):
command = dict()
command["seq"] = seq
command["type"] = "request"
command["command"] = "lookup"
command["arguments"] = dict()
command["arguments"]["handles"] = list()
command["arguments"]["handles"].append("meta")
command["arguments"]["includeSource"] = True
command_to_fire = json.dumps(command)
query_params = {"command": "lookup", "appname": "credit_score"}
r = requests.post(v8_debug_endpoint, data=command_to_fire,
params=query_params)
print "Lookup call: ", r.text
def fire_backtrace_request(seq):
command = dict()
command["seq"] = seq
command["type"] = "request"
command["command"] = "backtrace"
command["arguments"] = dict()
command["arguments"]["fromFrame"] = 0
command["arguments"]["toFrame"] = 1
command["arguments"]["bottom"] = True
command_to_fire = json.dumps(command)
query_params = {"command": "backtrace", "appname": "credit_score"}
r = requests.post(v8_debug_endpoint, data=command_to_fire,
params=query_params)
print "Backtrace call: ", r.text
def fire_frame_request(seq):
command = dict()
command["seq"] = seq
command["type"] = "request"
command["command"] = "frame"
command["arguments"] = dict()
command["arguments"]["number"] = 1
command_to_fire = json.dumps(command)
query_params = {"command": "frame", "appname": "credit_score"}
r = requests.post(v8_debug_endpoint, data=command_to_fire,
params=query_params)
print "Frame call: ", r.text
def fire_source_request(seq):
command = dict()
command["seq"] = seq
command["type"] = "request"
command["command"] = "source"
command["arguments"] = dict()
command["arguments"]["frame"] = 2
command["arguments"]["fromLine"] = 10
command["arguments"]["toLine"] = 20
command_to_fire = json.dumps(command)
query_params = {"command": "source", "appname": "credit_score"}
r = requests.post(v8_debug_endpoint, data=command_to_fire,
params=query_params)
print "Source call: ", r.text
def fire_setbreakpoint_request(seq):
command = dict()
command["seq"] = seq
command["type"] = "request"
command["command"] = "setbreakpoint"
command["arguments"] = dict()
command["arguments"]["type"] = "function"
command["arguments"]["target"] = "OnUpdate"
command["arguments"]["line"] = 1
command_to_fire = json.dumps(command)
query_params = {"command": "setbreakpoint", "appname": "credit_score"}
r = requests.post(v8_debug_endpoint, data=command_to_fire,
params=query_params)
print "setbreakpoint: ", r.text
def fire_clearbreakpoint_request(seq):
command = dict()
command["seq"] = seq
command["type"] = "request"
command["command"] = "clearbreakpoint"
command["arguments"] = dict()
command["arguments"]["type"] = "function"
command["arguments"]["breakpoint"] = 1 # no. of breakpoints to clear
command_to_fire = json.dumps(command)
query_params = {"command": "clearbreakpoint", "appname": "credit_score"}
r = requests.post(v8_debug_endpoint, data=command_to_fire,
params=query_params)
print "clearbreakpoint call: ", r.text
def fire_listbreakpoints_request(seq):
command = dict()
command["seq"] = seq
command["type"] = "request"
command["command"] = "listbreakpoints"
command_to_fire = json.dumps(command)
query_params = {"command": "listbreakpoints", "appname": "credit_score"}
r = requests.post(v8_debug_endpoint, data=command_to_fire,
params=query_params)
print "listbreakpoints call: ", r.text
def cb_store():
# debugging cycle being followed is:
# setbreakpoint -> 3 continue -> clearbreakpoint
# -> 2 continue -> listbreakpoints
populate_one_doc()
time.sleep(2)
populate_one_doc()
for i in xrange(3):
time.sleep(SLEEP_DURATION)
populate_one_doc()
time.sleep(SLEEP_DURATION)
populate_one_doc()
for i in xrange(2):
time.sleep(SLEEP_DURATION)
populate_one_doc()
time.sleep(SLEEP_DURATION)
populate_one_doc()
def test_eval():
populate_one_doc()
time.sleep(2)
populate_one_doc()
for i in xrange(6):
time.sleep(SLEEP_DURATION)
populate_one_doc()
time.sleep(SLEEP_DURATION)
for i in xrange(10):
populate_one_doc()
time.sleep(2)
def main():
seq = randint(100, 1000)
# fire_continue_request(seq)
# fire_evaluate_request(seq + 1)
# fire_lookup_request(seq + 2)
# fire_backtrace_request(seq + 3)
# fire_frame_request(seq + 4)
# fire_source_request(seq + 5)
# fire_setbreakpoint_request(seq + 6)
# fire_clearbreakpoint_request(seq + 7)
# fire_listbreakpoints_request(seq + 8)
""""t = threading.Thread(target=test_eval)
t.start()
fire_setbreakpoint_request(seq + 6)
time.sleep(SLEEP_DURATION)
for i in xrange(6):
fire_continue_request(seq)
time.sleep(SLEEP_DURATION)
fire_evaluate_request(seq + 1)
time.sleep(SLEEP_DURATION)
t.join()"""
t = threading.Thread(target=cb_store)
t.start()
fire_evaluate_request(seq + 1)
fire_setbreakpoint_request(seq + 6)
time.sleep(SLEEP_DURATION)
for i in xrange(3):
fire_continue_request(seq)
time.sleep(SLEEP_DURATION)
fire_clearbreakpoint_request(seq + 7)
time.sleep(SLEEP_DURATION)
for i in xrange(2):
fire_continue_request(seq)
time.sleep(SLEEP_DURATION)
fire_listbreakpoints_request(seq + 8)
time.sleep(SLEEP_DURATION)
t.join()
if __name__ == "__main__":
main()
|
""" This simulates when multiple balls in a container where collisions are perfectly elastic"""
from ball import Ball
import pygame
from pygame import Vector2
from itertools import combinations
from random import randint, randrange
""" Create a pygame window """
pygame.init()
width, height = 400, 400
win = pygame.display.set_mode((width, height))
winRect = win.get_rect()
clock = pygame.time.Clock()
""" Setting radius and mass of all balls same"""
radius = 10
mass = 1
max_speed = 10
num_balls = 40
""" Create balls at random locations with random velocities """
balls = [Ball(Vector2(randint(radius, width-radius), randint(radius, height-radius)),
Vector2(randrange(max_speed), randrange(max_speed)), winRect, radius=radius, mass=mass) for _ in range(num_balls)]
""" Colors to use for the balls """
colors = [(0, 0, 0), (255, 0, 0), (0, 255, 0), (0, 0, 255), (0, 255, 255)]
Run = True
while Run:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
Run = False
""" Fill the background """
win.fill((255, 255, 255))
""" Draw each of the ball """
for index, ball in enumerate(balls):
pygame.draw.circle(win, colors[index%len(colors)], (int(ball.position.x), int(ball.position.y)), int(ball.radius))
for _ in range(10):
""" Update velocities on collision """
for ball1, ball2 in combinations(balls, 2):
if ball1.check_collision(ball2):
ball1.collide(ball2)
""" Update positions of balls """
for ball in balls:
ball.update(0.1)
# print("Total kinetic energy:", sum(ball.kinetic_energy() for ball in balls))
pygame.display.update()
|
# -*-coding:utf-8-*-
'''
Tip 6_2. binary-coded decimal - Decimalism exchange
'''
print(int("30", 20))
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render, redirect, reverse
from django.http import HttpResponse, JsonResponse
from django.core import serializers
from django.views.generic import ListView, CreateView, UpdateView, DeleteView, TemplateView, FormView
from .models import *
from directorio.forms import *
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from django.forms.models import model_to_dict
import json
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models import *
from django.contrib import messages
import calendar
import datetime
from django.contrib.auth.hashers import check_password
from django.contrib.auth.models import User
from django.contrib.auth.views import PasswordChangeView
from django.contrib.auth.forms import UserCreationForm, UserChangeForm, PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
class Round(Func):
function = 'ROUND'
arity = 2
def cambio_pass(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user)
return render(request, 'cambio_pass.html',context={'cambiado':'Password Cambiado'})
#messages.success(request,'Contra cambiada')
#return redirect('logout')
else:
return render(request, 'cambio_pass.html', context={'error':'Corrija los datos', 'form':form})
#messages.error(request, 'Corrija el error')
else:
form = PasswordChangeForm(request.user)
return render(request, 'cambio_pass.html', {'form': form})
def login(request):
return render(request, template_name= 'login.html', context={'login': login})
def index(request):
return render(request, template_name= 'index.html', context={'index': index})
def base_usuarios(request):
return render(request, template_name= 'base_usuarios.html')
def graficas_municipales(request):
ahora = datetime.datetime.now()
meses = ("Enero", "Febrero", "Marzo", "Abril", "Mayo", "Junio", "Julio", "Agosto", "Septiembre", "Octubre", "Noviembre", "Diciembre")
mes = meses[ahora.month -1]
condicion = Q(municipio__usuario__id=request.user.id,atendido='si')
if request.user.groups.filter(name ='Instructores').exists():
pedidos = pedido.objects.filter(condicion).order_by('date__month').values('date__month').annotate(recaudacion=Sum('sub_total'))
return HttpResponse(json.dumps(list(pedidos), cls=DjangoJSONEncoder))
else:
pedidos = pedido.objects.filter(atendido='si').order_by('date__month').values('date__month').annotate(recaudacion=Sum('sub_total'))
return HttpResponse(json.dumps(list(pedidos), cls=DjangoJSONEncoder))
def graficas_provinciales(request):
ahora = datetime.datetime.now()
mes=ahora.month
condicion = Q(provincia__usuario__id=request.user.id,atendido='si')
if request.user.groups.filter(name ='Directores').exists():
pedidos = pedido.objects.filter(condicion,date__month=mes).order_by('date__month','municipio').values('municipio__municipio').annotate(recaudacion=Sum('sub_total'))
return HttpResponse(json.dumps(list(pedidos), cls=DjangoJSONEncoder))
else:
pedidos = pedido.objects.filter(atendido='si',date__month=mes).order_by('date__month','municipio').values('municipio__municipio','date__month').annotate(recaudacion=Sum('sub_total'))
return HttpResponse(json.dumps(list(pedidos), cls=DjangoJSONEncoder))
def list_servicios(request):
servicios = servicio.objects.all()
return render(request, template_name= 'list_servicios.html', context={'servicios': servicios})
def instructores(request):
return render(request, template_name= 'instructores.html')
def por_atender(request):
#condicion = Q(municipio__usuario__id=request.user.id,atendido='no')
if request.user.groups.filter(name ='Instructores').exists():
datos=list(pedido.objects.filter(municipio__usuario__id=request.user.id,atendido='no').order_by('date','carnet').values('carnet','date','nombre','apellido1','apellido2').annotate(total=Sum('sub_total'),contratos=Count('id')))
return HttpResponse(json.dumps(datos,cls=DjangoJSONEncoder))
else:
datos=list(pedido.objects.filter(atendido='no').order_by('date', 'carnet').values('carnet','date','nombre','apellido1','apellido2').annotate(total=Sum('sub_total'),contratos=Count('id')))
return HttpResponse(json.dumps(datos,cls=DjangoJSONEncoder))
def clientes_atendidos(request):
return render(request, template_name= 'clientes_atendidos.html')
def atendidos(request):
#condicion = Q(municipio__usuario__id=request.user.id,atendido='si')
if request.user.groups.filter(name ='Instructores').exists():
datos=list(pedido.objects.filter(municipio__usuario__id=request.user.id,atendido='si').order_by('date','carnet').values('carnet','date','nombre','apellido1','apellido2').annotate(total=Sum('sub_total'),contratos=Count('id')))
return HttpResponse(json.dumps(datos,cls=DjangoJSONEncoder))
else:
datos=list(pedido.objects.filter(atendido='si').order_by('date', 'carnet').values('carnet','date','nombre','apellido1','apellido2').annotate(total=Sum('sub_total'),contratos=Count('id')))
return HttpResponse(json.dumps(datos,cls=DjangoJSONEncoder))
def contador_atendidos(request):
if request.user.groups.filter(name ='Instructores').exists():
pedidos = pedido.objects.filter(municipio__usuario__id=request.user.id,atendido='si').order_by('date','carnet').values('carnet','nombre').annotate(contratos=Count('id')).count()
return HttpResponse(json.dumps(pedidos, cls=DjangoJSONEncoder))
else:
pedidos = pedido.objects.filter(atendido='si').order_by('date','carnet').values('carnet','nombre').annotate(contratos=Count('id')).count()
return HttpResponse(json.dumps(pedidos, cls=DjangoJSONEncoder))
def buscador(request, fecha1,fecha2):
condicion1 = Q(municipio__usuario__id=request.user.id,atendido='si')
if request.user.groups.filter(name = 'Instructores').exists():
datos = list(pedido.objects.filter(condicion1, date__range=[fecha1,fecha2]).order_by('date','carnet').values('carnet','nombre','apellido1','apellido2', 'date').annotate(total=Sum('sub_total'),contratos=Count('id')))
return HttpResponse(json.dumps(datos,cls=DjangoJSONEncoder))
else:
datos = list(pedido.objects.filter(atendido='si', date__range=[fecha1,fecha2]).order_by('date','carnet').values('carnet','nombre','apellido1','apellido2', 'date').annotate(total=Sum('sub_total'),contratos=Count('id')))
return HttpResponse(json.dumps(datos,cls=DjangoJSONEncoder))
def atender_usuario(request,carnet,date):
pedido.objects.filter(carnet=carnet,date=date).update(atendido='si')
pedido.save()
return render(request, template_name= 'instructores.html', context={'atendido': 'Se atendio el usuario'})
def municipio_provincia(request, id_provincia):
prov = provincia.objects.filter(id=id_provincia).select_related('municipio').values('municipio__municipio','municipio__id')
return HttpResponse(json.dumps(list(prov), cls=DjangoJSONEncoder))
def procesar(request):
form = pedidoForm(request.POST)
#serv= servicio.objects.filter(temporal='si')
return render(request, template_name= 'procesar.html', context= {'form':form})
def pedidoCreate(request):
if request.method == 'POST':
form = pedidoForm(request.POST)
data=list(json.loads(request.POST['servicios']))
lis=[]
for x in data:
p= pedido()
p.nombre=request.POST['nombre']
p.carnet=request.POST['carnet']
p.apellido1=request.POST['apellido1']
p.apellido2=request.POST['apellido2']
p.provincia=provincia.objects.get(id=request.POST['provincia'])
p.municipio=municipio.objects.get(id=request.POST['municipio'])
p.direccion=request.POST['direccion']
p.servicio=servicio.objects.get(id=x['id_servicio'])
p.sub_total=x['subtotal']
p.cantidad=x['cantidad']
p.atendido='no'
p.save()
return redirect('list_servicios')
else:
form = pedidoForm()
return render(request, 'procesar.html', {'form':form})
def detalles_modal_atendidos(request, carnet, date):
datos=list(pedido.objects.filter(carnet=carnet,date=date,atendido='si').order_by('date','carnet').values('carnet','nombre','apellido1','apellido2','direccion').annotate(total=Sum('sub_total'),cantidad=Sum('cantidad')))
return HttpResponse(json.dumps(datos,cls=DjangoJSONEncoder))
def detalles_modal_atender(request, carnet, date):
datos=list(pedido.objects.filter(carnet=carnet,date=date,atendido='no').order_by('date','carnet').values('carnet','nombre','apellido1','apellido2','direccion').annotate(total=Sum('sub_total'),cantidad=Sum('cantidad')))
return HttpResponse(json.dumps(datos,cls=DjangoJSONEncoder))
def detalles_modal_servicio(request, carnet, date):
datos = pedido.objects.filter(carnet=carnet,date=date,atendido='si').order_by('date','carnet').values('servicio_id','cantidad').annotate(total=Sum('sub_total'))
lis=[]
for x in datos:
ser = servicio.objects.get(id=x['servicio_id'])
data=dict()
data=model_to_dict(ser)
lis.append(data)
return HttpResponse(json.dumps(lis,cls=DjangoJSONEncoder))
def detalles_modal_servicio_atender(request, carnet, date):
datos = pedido.objects.filter(carnet=carnet,date=date,atendido='no').order_by('date','carnet').values('servicio_id','cantidad').annotate(total=Sum('sub_total'))
lis=[]
for x in datos:
ser = servicio.objects.get(id=x['servicio_id'])
data=dict()
data=model_to_dict(ser)
lis.append(data)
return HttpResponse(json.dumps(lis,cls=DjangoJSONEncoder))
def cantidades(request, carnet, date):
datos = pedido.objects.filter(carnet=carnet,date=date,atendido='si').order_by('date','carnet').values('servicio_id','cantidad').annotate(total=Sum('sub_total'))
p= []
for x in datos:
p.append(x['cantidad'])
return HttpResponse(json.dumps(p,cls=DjangoJSONEncoder))
def cantidades_atender(request, carnet, date):
datos = pedido.objects.filter(carnet=carnet,date=date,atendido='no').order_by('date','carnet').values('servicio_id','cantidad').annotate(total=Sum('sub_total'))
p= []
for x in datos:
p.append(x['cantidad'])
return HttpResponse(json.dumps(p,cls=DjangoJSONEncoder))
def contador_atendidos_municipios(request):
pedidos = pedido.objects.filter(provincia__usuario__id=request.user.id,atendido='si').order_by('date','carnet').values('carnet','nombre','municipio').annotate(contratos=Count('id'))
return HttpResponse(pedidos)
def agregar_carrito(request, id):
ser = servicio.objects.get(id=id)
data=dict()
data['servicio']=model_to_dict(ser)
return JsonResponse(data)
def cargar_servicios(request, servicios):
servicios=servicios.split(',')
data=list(servicio.objects.filter(id__in=servicios).values('id','nomb_servicio','descripcion','precio'))
#id_in pq servicios es un array
return HttpResponse(json.dumps(data,cls=DjangoJSONEncoder))
def cargar_envio(request, servicios):
servicios=servicios.split(',')
data=list(servicio.objects.all())
#id_in pq servicios es un array
return HttpResponse(json.dumps(data,cls=DjangoJSONEncoder))
def contador_atender(request):
if request.user.groups.filter(name ='Instructores').exists():
pedidos = pedido.objects.filter(municipio__usuario__id=request.user.id,atendido='no').order_by('date','carnet').values('carnet','nombre').annotate(contratos=Count('id')).count()
return HttpResponse(json.dumps(pedidos, cls=DjangoJSONEncoder))
else:
pedidos = pedido.objects.filter(atendido='no').order_by('date','carnet').values('carnet','nombre').annotate(contratos=Count('id')).count()
return HttpResponse(json.dumps(pedidos, cls=DjangoJSONEncoder))
|
import torch
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
from fuzzywuzzy import fuzz
import json, re, random, os
from copy import deepcopy
from tqdm import tqdm
flatten = lambda l: [item for sublist in l for item in sublist]
nlp = spacy.load('en_core_web_sm')
def remove_stop(input):
# input: list of str
output = [word for word in input if word not in STOP_WORDS]
return output
def remove_th(input):
pattern = re.compile(r'[0-9]+th($|\s)')
output = [word.strip('th') if re.match(pattern, word) else word for word in input]
return output
def get_ngram(tokens):
ngram = []
for i in range(1, len(tokens) + 1):
for s in range(len(tokens) - i + 1):
ngram.append((" ".join(tokens[s: s + i]), s, i + s))
return ngram
def kvret2kvret_star(config):
origin_file_set = ['kvret/train',
'kvret/dev',
'kvret/test']
file_set = ['kvret_star/train',
'kvret_star/dev',
'kvret_star/test']
def sessions_write(sessions,f_w):
for session in sessions:
f_w.writelines(session)
f_w.write('\n')
for origin_file_name,file_name in zip(origin_file_set,file_set):
f_r = open('data/' + origin_file_name + '.iob', 'r', encoding='utf-8').readlines()
f_w = open('data/' + file_name + '.iob', 'w', encoding='utf-8')
sessions=[]
current_session = []
for line in f_r:
if line=='\n':
sessions.append(current_session)
current_session = []
if len(sessions) > 1:
sessions_write(sessions,f_w)
sessions=[]
elif random.Random(24022019).randint(1,9) < 6:
continue
else:
sessions_write(sessions, f_w)
sessions = []
else:
current_session.append(line)
def json2iob_kvret():
file_set = ['train',
'dev',
'test']
for file_name in file_set:
f_r = open('data/kvret/' + file_name + '.json', 'r', encoding='utf-8')
f_w = open('data/kvret/' + file_name + '.iob', 'w', encoding='utf-8')
json_data = json.load(f_r)
for dialogue in json_data:
session_intent = dialogue['scenario']['task']['intent']
driver = ''
for turn in dialogue['dialogue']:
speaker = turn['turn']
if speaker == 'driver':
driver = turn['data']['utterance']
continue
else:
if '|||' not in driver and driver!='':
end = turn['data']['end_dialogue']
intent = 'thanks' if end else session_intent
slots = turn['data']['slots']
driver_seg = [token.text for token in nlp.tokenizer(driver) if not token.is_space]
driver_seg_lower = [token.lower() for token in driver_seg]
driver_seg_lower_rs = [token.lower().strip('s') for token in driver_seg]
driver_seg_lower_rs_th = remove_th(driver_seg_lower_rs)
driver_len = len(driver_seg_lower)
driver_iob = ['O' for i in driver_seg_lower]
for slot, value in slots.items():
flag_find = False
flag_exist = False
value_seg_lower = [token.text.lower().strip('s') for token in
nlp.tokenizer(value.strip().replace('.', ''))]
value_seg_lower = remove_th(value_seg_lower)
value_len = len(value_seg_lower)
# match exactly
for i in range(driver_len):
if (i + value_len <= driver_len) and (driver_seg_lower_rs_th[i:i + value_len] == value_seg_lower):
driver_iob[i] = 'B-' + slot
for j in range(1, value_len):
driver_iob[i + j] = 'I-' + slot
flag_find = True
break
if driver_seg_lower_rs_th[i] in value_seg_lower and driver_seg_lower[i] not in STOP_WORDS:
flag_exist = True
if flag_exist and not flag_find:
# remove stop word in slot_value
n_gram_candidate = get_ngram(driver_seg_lower_rs_th)
n_gram_candidate = sorted(n_gram_candidate, key=lambda x: (fuzz.token_sort_ratio(x[0], value_seg_lower),-len(x[0].split())),
reverse=True)
top = n_gram_candidate[0]
for i in range(top[1], top[2]):
if i == top[1]:
driver_iob[i] = 'B-' + slot
else:
driver_iob[i] = 'I-' + slot
#print('{}\t{}'.format(value,' '.join(driver_seg[top[1]:top[2]])))
driver = ' '.join(driver_seg) + '|||' + ' '.join(driver_iob) + '|||' + intent
f_w.write(driver + '\n')
assistant = turn['data']['utterance']
f_w.write(assistant + '\n')
f_w.write('\n')
f_w.close()
def json2iob_m2m():
file_set = ['sim-M/dev',
'sim-M/train',
'sim-M/test',
'sim-R/dev',
'sim-R/train',
'sim-R/test'
]
for file_name in file_set:
f_r = open('data/m2m/' + file_name + '.json', 'r', encoding='utf-8')
f_w = open('data/m2m/' + file_name + '.iob', 'w', encoding='utf-8')
json_data = json.load(f_r)
for dialogue in json_data:
user_intent = ''
for i, turn in enumerate(dialogue['turns']):
if i == 0:
user_intent = turn['user_intents'][0]
user_act = [act['type'] for act in turn['user_acts']]
user_tokens = turn['user_utterance']['tokens']
slots = turn['user_utterance']['slots']
user_iob = ['O' for token in user_tokens]
for slot in slots:
start = slot['start']
end = slot['exclusive_end']
slot_name = slot['slot']
user_iob[start] = 'B-'+slot_name
for j in range(start+1,end):
user_iob[j] = 'I-'+slot_name
if i != 0:
sys = turn['system_utterance']['text']
f_w.write(sys + '\n')
user = ' '.join(user_tokens)+'|||'+' '.join(user_iob)+'|||'+user_intent+'|||'+' '.join(user_act)
f_w.write(user + '\n')
f_w.write('\n')
f_w.close()
for file_name in ['train.iob','test.iob','dev.iob']:
os.system('cat data/m2m/sim-R/'+ file_name+' data/m2m/sim-M/'+file_name+' > data/m2m/'+file_name)
def build_vocab(path,user_only=False):
print('building dictionary first...')
data = open(path,"r",encoding="utf-8").readlines()
p_data = []
bot = []
for d in data:
if d=="\n":
bot=[]
continue
dd = d.replace("\n","").split("|||")
if len(dd)==1:
if user_only:
pass
else:
bot = dd[0].split()
else:
user = dd[0].split()
tag = dd[1].split()
intent = dd[2]
p_data.append([bot,user,tag,intent])
bots, currents, slots, intents = list(zip(*p_data))
vocab = list(set(flatten(currents+bots)))
vocab_freq = {}
for word in flatten(currents+bots):
if vocab_freq.get(word)==None:
vocab_freq[word]=1
else:
vocab_freq[word]+=1
vocab = [v for v, f in vocab_freq.items() if f > 1]
slot_vocab = list(set(flatten(slots)))
intent_vocab = list(set(intents))
for rand_vocab in [vocab,slot_vocab,intent_vocab]:
rand_vocab.sort()
word2index={"<pad>" : 0, "<unk>" : 1, "<null>" : 2, "<s>" : 3, "</s>" : 4}
for vo in vocab:
if word2index.get(vo)==None:
word2index[vo] = len(word2index)
slot2index={"<pad>" : 0}
for vo in slot_vocab:
if slot2index.get(vo)==None:
slot2index[vo] = len(slot2index)
intent2index={}
for vo in intent_vocab:
if intent2index.get(vo)==None:
intent2index[vo] = len(intent2index)
return [word2index,slot2index,intent2index]
def prepare_dataset(path,config,built_vocab,user_only=False):
slm = config.slm_weight>0
data = open(path,"r",encoding="utf-8").readlines()
p_data = []
c_data = []
history=[["<null>"]]
for d in data:
if d=="\n":
if slm:
temp = deepcopy(history)
for i in range(1,len(history)):
c_data.append([temp[:i],temp[i:]])
history=[["<null>"]]
continue
dd = d.replace("\n","").split("|||")
if len(dd)==1:
if user_only:
pass
else:
bot = dd[0].split()
history.append(bot)
else:
user = dd[0].split()
tag = dd[1].split()
intent = dd[2]
temp = deepcopy(history)
p_data.append([temp,user,tag,intent])
history.append(user)
word2index, slot2index, intent2index = built_vocab
for t in tqdm(p_data):
for i,history in enumerate(t[0]):
t[0][i] = prepare_sequence(history, word2index).view(1, -1)
t[1] = prepare_sequence(t[1], word2index).view(1, -1)
t[2] = prepare_sequence(t[2], slot2index).view(1, -1)
t[3] = torch.LongTensor([intent2index[t[3]]]).view(1,-1)
if slm:
for t in tqdm(c_data):
for i, history in enumerate(t[0]):
t[0][i] = prepare_sequence(history, word2index).view(1, -1)
for i, candidate in enumerate(t[1]):
t[1][i] = prepare_sequence(candidate, word2index).view(1, -1)
t.append(torch.LongTensor([1]+[0 for i in range(i-1)]).view(1, -1))
else:
c_data = p_data
return p_data,c_data
def prepare_sequence(seq, to_index):
idxs = list(map(lambda w: to_index[w] if to_index.get(w) is not None else to_index["<unk>"], seq))
return torch.LongTensor(idxs)
def data_loader(train_data,batch_size,shuffle=False):
if shuffle: random.Random(24022019).shuffle(train_data)
sindex = 0
eindex = batch_size
while eindex < len(train_data):
batch = train_data[sindex: eindex]
temp = eindex
eindex = eindex + batch_size
sindex = temp
yield batch
if eindex >= len(train_data):
batch = train_data[sindex:]
yield batch
def pad_to_batch_slm(batch, w_to_ix): # for bAbI dataset
history, candidate, label = list(zip(*batch))
max_history = max([len(h) for h in history])
max_len = max([h.size(1) for h in flatten(history)])
max_candidate = max([len(h) for h in candidate])
max_len_candidate = max([h.size(1) for h in flatten(candidate)])
historys, candidates, labels = [], [], []
for i in range(len(batch)):
history_p_t = []
for j in range(len(history[i])):
if history[i][j].size(1) < max_len:
history_p_t.append(torch.cat([history[i][j], torch.LongTensor(
[w_to_ix['<pad>']] * (max_len - history[i][j].size(1))).view(1, -1)], 1))
else:
history_p_t.append(history[i][j])
while len(history_p_t) < max_history:
history_p_t.append(torch.LongTensor([w_to_ix['<pad>']] * max_len).view(1, -1))
history_p_t = torch.cat(history_p_t)
historys.append(history_p_t)
candidate_p_t = []
for j in range(len(candidate[i])):
if candidate[i][j].size(1) < max_len_candidate:
candidate_p_t.append(torch.cat([candidate[i][j], torch.LongTensor(
[w_to_ix['<pad>']] * (max_len_candidate - candidate[i][j].size(1))).view(1, -1)], 1))
else:
candidate_p_t.append(candidate[i][j])
while len(candidate_p_t) < max_candidate:
candidate_p_t.append(torch.LongTensor([w_to_ix['<pad>']] * max_len_candidate).view(1, -1))
candidate_p_t = torch.cat(candidate_p_t)
candidates.append(candidate_p_t)
if label[i].size(1) < max_candidate:
labels.append(torch.cat(
[label[i], torch.LongTensor([0] * (max_candidate - label[i].size(1))).view(1, -1)], 1))
else:
labels.append(label[i])
labels = torch.cat(labels)
return historys, candidates, labels
def pad_to_batch(batch, w_to_ix,s_to_ix): # for bAbI dataset
history,current,slot,intent = list(zip(*batch))
max_history = max([len(h) for h in history])
max_len = max([h.size(1) for h in flatten(history)])
max_current = max([c.size(1) for c in current])
max_slot = max([s.size(1) for s in slot])
historys, currents, slots = [], [], []
for i in range(len(batch)):
history_p_t = []
for j in range(len(history[i])):
if history[i][j].size(1) < max_len:
history_p_t.append(torch.cat([history[i][j], torch.LongTensor([w_to_ix['<pad>']] * (max_len - history[i][j].size(1))).view(1, -1)], 1))
else:
history_p_t.append(history[i][j])
while len(history_p_t) < max_history:
history_p_t.append(torch.LongTensor([w_to_ix['<pad>']] * max_len).view(1, -1))
history_p_t = torch.cat(history_p_t)
historys.append(history_p_t)
if current[i].size(1) < max_current:
currents.append(torch.cat([current[i], torch.LongTensor([w_to_ix['<pad>']] * (max_current - current[i].size(1))).view(1, -1)], 1))
else:
currents.append(current[i])
if slot[i].size(1) < max_slot:
slots.append(torch.cat([slot[i], torch.LongTensor([s_to_ix['<pad>']] * (max_slot - slot[i].size(1))).view(1, -1)], 1))
else:
slots.append(slot[i])
currents = torch.cat(currents)
slots = torch.cat(slots)
intents = torch.cat(intent)
return historys, currents, slots, intents
def pad_to_history(history, x_to_ix): # this is for inference
max_x = max([len(s) for s in history])
x_p = []
for i in range(len(history)):
h = prepare_sequence(history[i],x_to_ix).unsqueeze(0)
if len(history[i]) < max_x:
x_p.append(torch.cat([h,torch.LongTensor([x_to_ix['<pad>']] * (max_x - h.size(1))).view(1, -1)], 1))
else:
x_p.append(h)
history = torch.cat(x_p)
return [history]
|
from agents.simpleAgent import simpleAgent
from agents.markovAgent import markovAgent
from json import dump,load
from heuristics import *
class markovSimpleAgent(markovAgent,simpleAgent):
'''Not implemented'''
pass
|
"""
Divides the dataset_readers into training, development, and test split.
"""
import random
from pathlib import Path
from absl import app, flags, logging
from tqdm import tqdm
FLAGS = flags.FLAGS
flags.DEFINE_string('input_path', default=None,
help='Path to the input data')
flags.DEFINE_string('save_dir', default=None,
help='Directory to save the splits')
flags.DEFINE_integer('num_dev_data', default=10000,
help='The size of the development set')
flags.DEFINE_integer('num_test_data', default=10000,
help='The size of the test set')
random.seed(1234)
def main(argv):
# Count number of data
num_data = 0
with open(FLAGS.input_path, 'r') as f:
for line in tqdm(f, desc='Counting data'):
num_data += 1
num_dev_data = FLAGS.num_dev_data
num_test_data = FLAGS.num_test_data
num_train_data = num_data - num_dev_data - num_test_data
logging.info(f'# training samples: {num_train_data}')
logging.info(f'# development samples: {num_dev_data}')
logging.info(f'# test samples: {num_test_data}')
indices = list(range(num_data))
random.shuffle(indices)
train_indices = set(indices[:num_train_data])
dev_indices = set(indices[num_train_data:-num_test_data])
test_indices = set(indices[-num_test_data:])
assert len(train_indices) == num_train_data
assert len(dev_indices) == num_dev_data
assert len(test_indices) == num_test_data
save_dir = Path(FLAGS.save_dir)
save_dir.mkdir()
train_file = open(save_dir / 'train.jsonl', 'w')
dev_file = open(save_dir / 'dev.jsonl', 'w')
test_file = open(save_dir / 'test.jsonl', 'w')
with open(FLAGS.input_path, 'r') as f:
for line_num, line in tqdm(enumerate(f), desc='Splitting dataset_readers'):
if line_num in train_indices:
file_to_write = train_file
elif line_num in dev_indices:
file_to_write = dev_file
else:
file_to_write = test_file
file_to_write.write(line)
if __name__ == '__main__':
flags.mark_flags_as_required(['input_path', 'save_dir'])
app.run(main)
|
import random as rd
import time
import cv2
from detection.opencv_dnn.detector import Detector
from tracking.deep_sort import generate_detections as gdet
from tracking.deep_sort import nn_matching, preprocessing
from tracking.deep_sort.detection import Detection
from tracking.deep_sort.tracker import Tracker
from utils import check_position, draw, mouse_event
from face_recog.face_detection.face_detector import get_detector, face_detect
import imutils
import numpy as np
COLOR = [tuple([rd.randint(0, 255)]*3), tuple([rd.randint(0, 255)]*3), tuple([rd.randint(0, 255)]*3)]
COLOR = [(255, 255, 0), (204, 0, 153), (26, 209, 255), (71, 107, 107)]
print(COLOR)
cap = cv2.VideoCapture("videos/01.mp4")
weights = "models/yolo/weights/yolov4_tiny.weights"
config = "models/yolo/configs/yolov4_tiny.cfg"
classes = "models/yolo/classes.txt"
detector = Detector(weights, config, gpu=False, classes_name=classes)
detector.init_yolo()
print("===============================================================")
MAX_COSINE_DISTANCE = 0.3
nn_budget = None
model_filename = "models/deepsort_model/mars-small128.pb"
encoder = gdet.create_box_encoder(model_filename, batch_size=1)
metric = nn_matching.NearestNeighborDistanceMetric("cosine", MAX_COSINE_DISTANCE, nn_budget)
tracker = Tracker(metric)
# Define face detection model
face_detector = get_detector(hog=True)
fourcc = cv2.VideoWriter_fourcc(*"XVID")
w = int(cap.get(3))
h = int(cap.get(4))
fourcc = cv2.VideoWriter_fourcc(*"XVID")
out = cv2.VideoWriter("Test_Result.avi", fourcc, 30, (w, h))
cv2.namedWindow("Test")
cv2.setMouseCallback("Test", mouse_event)
points = [(264, 295), (405, 286), (430, 359), (254, 373)]
while cap.isOpened():
_, frame = cap.read()
if not _:
break
frame = cv2.resize(frame, (640, 480))
start = time.time()
classes, scores, bb_list = detector.detect(frame=frame, confidence_threshold=0.4, nms_threshold=0.4)
s1 = time.time()
# print("Detection time: {}".format(s1 - start))
# after detection we will check position of object
bb_check = dict()
check_insides = []
for box in bb_list:
check, center_point = check_position(box, points)
check_insides.append(check)
features = encoder(frame, boxes=bb_list)
detections = [ Detection(bbox, confidence, cls, feature, check_inside)
for bbox, confidence, cls, feature, check_inside in zip(
bb_list, scores, classes, features, check_insides)]
tracker.predict()
tracker.update(detections)
s2 = time.time()
# print("Tracking time: {}".format(s2-s1))
for track in tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlbr()
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),
COLOR[track.track_id%3],2)
try:
obj_img = frame[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])]
scale_factor = 450 / obj_img.shape[1]
obj_img = imutils.resize(obj_img, width=450)
s3 = time.time()
face_boxes = face_detect(obj_img, face_detector)
# print("Face detection time: {}".format(time.time() - s3))
for box in face_boxes:
box = np.array(box) * (1/scale_factor)
x_min, x_max, y_min, y_max = box
x_min_real = int(x_min + bbox[0])
y_min_real = int(y_min + bbox[1])
x_max_real = int(x_max + bbox[0])
y_max_real = int(y_max + bbox[1])
# cv2.rectangle(obj_img, (int(x_min), int(y_min)), (int(x_max), int(y_max)), (0, 255, 0), 3)
cv2.rectangle(frame, (x_min_real, y_min_real), (x_max_real, y_max_real), (0, 255, 0), 2)
except:
print("Error when crop object image")
# cv2.imshow("Obj ID {}".format(str(track.track_id)), obj_img)
# cv2.putText(frame, "ID: " + str(track.track_id), (int(bbox[0]), int(bbox[1])), 0, 1, (0, 0, 255), 2)
# for det in detections:
# bbox = det.to_tlbr()
# score = "%.2f" % round(det.confidence * 100, 2) + "%"
# cv2.rectangle(frame,(int(bbox[0]), int(bbox[1])),(int(bbox[2]), int(bbox[3])),(0, 0, 255),2)
end = time.time()
# fps_label = "FPS: %.2f" % (1 / (end - start))
# cv2.putText(frame, fps_label, (0, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
frame = draw(frame, points)
cv2.imshow("Test", frame)
out.write(frame)
if cv2.waitKey(1) & 0xFF == 32:
break
if cv2.waitKey(1) == ord("p"):
print("Pause")
cv2.waitKey(0)
cv2.destroyAllWindows()
out.release()
|
################### PDF uncertainty calculator #################
## Edited by Raymond
## Edited on 4/16/14
## A better version!
#!/usr/bin/python
import re
import os
import math
import ROOT
import numpy
from ROOT import *
from array import array
from optparse import OptionParser
# MC truth of fitted vars
afb_mc = '0.036'
rqqbar_mc = '0.075'
rbck_mc = '0.33'
delta_mc = '0'
nevents = '2M'
# Declare global vars
num_pdf = 53
Rqqbar= num_pdf*[0.0]
Rbck= num_pdf*[0.0]
delta= num_pdf*[0.0]
Afb= num_pdf*[0.0]
pdf = 0
Rqqbar_err=Rbck_err=delta_err=Afb_err=0
# rqq4_0=rqq5_0=rbck4_0=rbck5_0=del4_0=del5_0=xi4_0=xi5_0=afb_0=None
output = open('PDF_uncertainty.txt','w')
# COMMAND LINE OPTIONS
parser = OptionParser()
parser.add_option('--plot', metavar='F', type='string', action='store',
default='no',
dest='make_plots',
help='') ## Sets which files to run on
(options, args) = parser.parse_args()
def main():
# global num_pdf,Rqqbar_4jet,Rbck_4jet,Rqqbar_5jet,Rbck_5jet,delta_4jet,delta_5jet,Afb
# Parse fit results and write into files
parsing()
# Calculate PDF systematics
output.write('Number of Events: '+nevents+'\n')
# Prepare for inputs into error function
list_results = []
list_results.append(Afb)
list_results.append(Rqqbar)
list_results.append(Rbck)
list_results.append(delta)
list_para_names = []
list_para_names.append('Afb ')
list_para_names.append('Rqqbar')
list_para_names.append('Rbck ')
list_para_names.append('delta ')
list_stat_err = []
list_stat_err.append(Afb_err)
list_stat_err.append(Rqqbar_err)
list_stat_err.append(Rbck_err)
list_stat_err.append(delta_err)
list_mc_truth = []
list_mc_truth.append(afb_mc)
list_mc_truth.append(rqqbar_mc)
list_mc_truth.append(rbck_mc)
list_mc_truth.append(delta_mc)
# Run error functions
for i in range(len(list_results)):
get_errors(list_results[i],list_para_names[i],list_stat_err[i],list_mc_truth[i])
# Make plots of vars
if options.make_plots == 'yes':
print 'Making plots...'
for i in range(len(list_results)):
plot(list_results[i],list_para_names[i])
else:
print 'No plots are made!'
output.close()
########################################### Parse the output txt and write into lists of vars ############################
def parsing():
global num_pdf,Rqqbar,Rbck,delta,Afb
global Rqqbar_err,Rbck_err,delta_err,Afb_err
# Open and read the list of result.txt
f = open("result_list.txt")
result_list = list(f)
result_list = [word.strip() for word in result_list] #word.strip() get rid of \n in this line
f.close()
# Loop over result.txt
for fname in result_list:
# Read in results
f = open(fname)
result = list(f)
result = [word.strip() for word in result]
# Find the pdf_index
pdf_index1 = re.search(r'^\D*(\d+)\D*',fname)
if pdf_index1 :
pdf = int(pdf_index1.group(1))
# Find the numbers
numbers = result[0].split(',')
Rqqbar[pdf] = numbers[1]
Rbck[pdf] = numbers[3]
delta[pdf] = numbers[7]
Afb[pdf] = numbers[9]
# Find errors for only nominal PDF
if pdf == 0:
Rqqbar_err = numbers[2]
Rbck_err = numbers[4]
delta_err = numbers[8]
Afb_err = numbers[10]
# Validation
print str(Rqqbar[pdf])+','+str(Rbck[pdf])+','+str(delta[pdf])+','+str(Afb[pdf])
else :
print 'wrong file name! will stop.'
break
f.close()
def get_errors(var,var_name,stat_err,mc_truth): # var_name here should be the string of nominal var results,e.g. rqq4_0
global output
mid = float(var[0])
sum_up = 0.
sum_down = 0.
for i in range (1,num_pdf):
if i%2 == 1:
# print str(i)
up = float(var[i])
down = float(var[i+1])
temp_up = max_up(up,down,mid)
temp_down = max_down(up,down,mid)
sum_up += pow(temp_up,2)
sum_down += pow(temp_down,2)
error_up = math.sqrt(sum_up)
error_down = math.sqrt(sum_down)
#Print out result and write into files
error_up = format(error_up,'.4f')
error_down = format(error_down,'.4f')
printout = var_name+' = '+str(var[0])+' +/- '+str(stat_err)+' (statistics) +/- '+ str(error_up)+'/' +str(error_down)+' (PDF systematics)'+' | MC truth '+str(mc_truth)
#convert the array into float type
var_float = []
for j in range(len(var)):
var_float.append(float(var[j]))
printout += ' stdev '+format((numpy.std(var_float)),'.4f')
output.write(printout+'\n')
def max_up(up,down,mid):
return max(up-mid,down-mid,0)
def max_down(up,down,mid):
return max(mid-up,mid-down,0)
def plot(var,var_name):
fig_name = var_name+'.eps'
dim = len(var)
# Make x and y array for TGraph
y = array('f',dim*[0.0])
index = array('f',dim*[0])
for i in range(dim):
index[i] = i
y[i] = float(var[i])
# Make a reference formula
nominal = TF1('func1',var[0],0,dim)
# Set canvas and graph
c1 = TCanvas('c1','var vs pdf_member',200,10,700,500)
c1.SetGrid()
gr1 = TGraph(dim,index,y)
gr1.SetMarkerStyle( 21 )
gr1.SetTitle(var_name)
gr1.GetXaxis().SetTitle('pdf index')
gr1.GetYaxis().SetTitle(var_name)
gr1.Draw('ACP')
# Draw reference line
nominal.Draw('same')
c1.Update()
# Save to eps
c1.SaveAs(fig_name)
# #Set up output file
# outputname = "fit_result.root"
# f = TFile( outputname, "Recreate" )
# f.cd()
# c1.Write()
# f.Write()
# f.Close()
main()
|
model_name = "spice_self"
pre_model = "./results/stl10/moco/checkpoint_0999.pth.tar"
embedding = "./results/stl10/embedding/feas_moco_512_l2.npy"
resume = "./results/stl10/{}/checkpoint_last.pth.tar".format(model_name)
model_type = "clusterresnet"
num_head = 10
num_workers = 4
device_id = 0
num_train = 5
num_cluster = 10
batch_size = 5000
target_sub_batch_size = 100
train_sub_batch_size = 128
batch_size_test = 100
num_trans_aug = 1
num_repeat = 8
fea_dim = 512
att_conv_dim = num_cluster
att_size = 7
center_ratio = 0.5
sim_center_ratio = 0.9
epochs = 100
world_size = 1
workers = 4
rank = 0
dist_url = 'tcp://localhost:10001'
dist_backend = "nccl"
seed = None
gpu = None
multiprocessing_distributed = True
start_epoch = 0
print_freq = 1
test_freq = 1
eval_ent = False
eval_ent_weight = 0
data_train = dict(
type="stl10_emb",
root_folder="./datasets/stl10",
embedding=embedding,
split="train+test",
ims_per_batch=batch_size,
shuffle=True,
aspect_ratio_grouping=False,
train=True,
show=False,
trans1=dict(
aug_type="weak",
crop_size=96,
normalize=dict(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
),
trans2=dict(
aug_type="scan",
crop_size=96,
normalize=dict(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
num_strong_augs=4,
cutout_kwargs=dict(n_holes=1,
length=32,
random=True)
),
)
data_test = dict(
type="stl10_emb",
root_folder="./datasets/stl10",
embedding=embedding,
split="train+test",
shuffle=False,
ims_per_batch=50,
aspect_ratio_grouping=False,
train=False,
show=False,
trans1=dict(
aug_type="test",
normalize=dict(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
),
trans2=dict(
aug_type="test",
normalize=dict(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
),
)
model = dict(
feature=dict(
type=model_type,
num_classes=num_cluster,
in_channels=3,
in_size=96,
batchnorm_track=True,
test=False,
feature_only=True
),
head=dict(type="sem_multi",
multi_heads=[dict(classifier=dict(type="mlp", num_neurons=[fea_dim, fea_dim, num_cluster], last_activation="softmax"),
feature_conv=None,
num_cluster=num_cluster,
loss_weight=dict(loss_cls=1),
iter_start=epochs,
iter_up=epochs,
iter_down=epochs,
iter_end=epochs,
ratio_start=1.0,
ratio_end=1.0,
center_ratio=center_ratio,
)]*num_head,
),
model_type="moco",
pretrained=pre_model,
freeze_conv=True,
)
solver = dict(
type="adam",
base_lr=0.005,
bias_lr_factor=1,
weight_decay=0,
weight_decay_bias=0,
target_sub_batch_size=target_sub_batch_size,
batch_size=batch_size,
train_sub_batch_size=train_sub_batch_size,
num_repeat=num_repeat,
)
results = dict(
output_dir="./results/stl10/{}".format(model_name),
)
|
import pandas as pd
def download_scalabrino():
df = pd.read_csv("https://dibt.unimol.it/reports/clap/downloads/rq3-manually-classified-implemented-reviews.csv")
df = df.rename(columns = {"body": "text", "category": "label"})
df["app"] = df["App-name"]
df["sublabel"] = df["rating"]
return df
|
import os
def add_supported_project(project_name):
if not __project_folder_has_dir(project_name):
raise Exception("Could not find Project with name {} in Projects folder. \n Will not add it to supported projects!".format(project_name))
__write_to_supported_projects(project_name)
print("Added {} to list of supprted Projects.".format(project_name))
return
def __write_to_supported_projects(project_name):
suppored_projects = open('/Users/lucas.reich/Bash-Functions/project_functions/projects.txt', 'a')
suppored_projects.write(project_name)
suppored_projects.close()
return
def __project_folder_has_dir(dir):
projects_path = __get_projects_path()
project_dir_src = projects_path + "/" + dir
return os.path.exists(project_dir_src)
def __get_projects_path():
return '/Users/lucas.reich/PhpstormProjects'
|
import cv2
import os
import numpy as np
from PIL import Image
from keras.models import load_model
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
font = cv2.FONT_HERSHEY_SIMPLEX
def generate_gesture(ges_name, num_train_samples, save=True):
cam = cv2.VideoCapture(0)
x, y, w, h = (160, 100, 300, 300)
clf = load_model('trained.h5')
# clf = load_model('trained2.h5')
mapper = {5: "One!", 3: "Five!", 4: "Three!", 7: "Two!", 6:"Two!"}
while True:
ret, frame = cam.read() # frame is unaltered webcam feed
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # convert webcam feed to grayscale
_, thresh = cv2.threshold(grey, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) # apply some filtering
cv2.rectangle(frame, (x, y), (x+w, y+h), (255,0,0), 2)
ROI = thresh[y: y+h, x: x+w]
cv2.imshow('thresh', ROI)
cv2.waitKey(1)
# print(ROI.shape)
im_in = cv2.imwrite('predictor.jpg', ROI)
im_in = load_img('predictor.jpg')
im_in = im_in.resize((28, 28), Image.NEAREST)
im_in = img_to_array(im_in)
im_in = np.expand_dims(im_in, axis=0)
pred = clf.predict_classes(im_in)[0]
cv2.putText(frame, mapper.get(pred, 'not sure'), (174, 35), font, 0.8, (0, 0, 0), 2, cv2.LINE_AA)
# cv2.putText(frame, str(pred), (230, 200), font, 0.8, (0, 0, 0), 2, cv2.LINE_AA)
cv2.imshow('Input', frame)
if cv2.waitKey(1) and 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
generate_gesture('five', 500, save=True) |
import FWCore.ParameterSet.Config as cms
####### Ecal Iso
hltEgammaEcalPFClusterIso = cms.EDProducer( "EgammaHLTEcalPFClusterIsolationProducer",
energyEndcap = cms.double( 0.0 ),
effectiveAreaBarrel = cms.double( 0.149 ),
etaStripBarrel = cms.double( 0.01 ),
rhoProducer = cms.InputTag( "hltFixedGridRhoFastjetAllCaloForMuons" ),
pfClusterProducer = cms.InputTag( "hltParticleFlowClusterECALL1Seeded" ),
etaStripEndcap = cms.double( 0.015 ),
drVetoBarrel = cms.double( 0.0 ),
drMax = cms.double( 0.3 ),
doRhoCorrection = cms.bool( False ),
energyBarrel = cms.double( 0.0 ),
effectiveAreaEndcap = cms.double( 0.097 ),
drVetoEndcap = cms.double( 0.0 ),
recoEcalCandidateProducer = cms.InputTag( "hltEgammaCandidates" ),
rhoMax = cms.double( 9.9999999E7 ),
rhoScale = cms.double( 1.0 )
)
####### HCAL Iso
hltEgammaHcalPFClusterIso = cms.EDProducer( "EgammaHLTHcalPFClusterIsolationProducer",
energyEndcap = cms.double( 0.0 ),
useHF = cms.bool( False ),
effectiveAreaBarrel = cms.double( 0.06 ),
etaStripBarrel = cms.double( 0.0 ),
pfClusterProducerHFHAD = cms.InputTag( "hltParticleFlowClusterHFHADForEgamma" ),
rhoProducer = cms.InputTag( "hltFixedGridRhoFastjetAllCaloForMuons" ),
etaStripEndcap = cms.double( 0.0 ),
drVetoBarrel = cms.double( 0.0 ),
pfClusterProducerHCAL = cms.InputTag( "hltParticleFlowClusterHCALForEgamma" ),
drMax = cms.double( 0.3 ),
doRhoCorrection = cms.bool( False ),
energyBarrel = cms.double( 0.0 ),
effectiveAreaEndcap = cms.double( 0.089 ),
drVetoEndcap = cms.double( 0.0 ),
recoEcalCandidateProducer = cms.InputTag( "hltEgammaCandidates" ),
rhoMax = cms.double( 9.9999999E7 ),
pfClusterProducerHFEM = cms.InputTag( "hltParticleFlowClusterHFEMForEgamma" ),
rhoScale = cms.double( 1.0 )
)
########## Tracker iso
hltL1SeededEgammaRegionalPixelSeedGenerator = cms.EDProducer( "EgammaHLTRegionalPixelSeedGeneratorProducers",
deltaPhiRegion = cms.double( 0.3 ),
vertexZ = cms.double( 0.0 ),
originHalfLength = cms.double( 15.0 ),
BSProducer = cms.InputTag( "hltOnlineBeamSpot" ),
UseZInVertex = cms.bool( False ),
OrderedHitsFactoryPSet = cms.PSet(
maxElement = cms.uint32( 0 ),
ComponentName = cms.string( "StandardHitPairGenerator" ),
SeedingLayers = cms.string( "hltESPPixelLayerPairs" )
),
deltaEtaRegion = cms.double( 0.3 ),
ptMin = cms.double( 1.5 ),
TTRHBuilder = cms.string( "WithTrackAngle" ),
candTag = cms.InputTag( "hltL1SeededRecoEcalCandidate" ),
candTagEle = cms.InputTag( "pixelMatchElectrons" ),
originRadius = cms.double( 0.02 )
)
hltL1SeededEgammaRegionalCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
src = cms.InputTag( "hltL1SeededEgammaRegionalPixelSeedGenerator" ),
maxSeedsBeforeCleaning = cms.uint32( 1000 ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterial" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialOpposite" )
),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
cleanTrajectoryAfterInOut = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
doSeedingRegionRebuilding = cms.bool( False ),
maxNSeeds = cms.uint32( 100000 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
TrajectoryBuilder = cms.string( "hltESPCkfTrajectoryBuilder" )
)
hltL1SeededEgammaRegionalCTFFinalFitWithMaterial = cms.EDProducer( "TrackProducer",
src = cms.InputTag( "hltL1SeededEgammaRegionalCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPKFFittingSmoother" ),
useHitsSplitting = cms.bool( False ),
MeasurementTracker = cms.string( "" ),
AlgorithmName = cms.string( "undefAlgorithm" ),
alias = cms.untracked.string( "hltEgammaRegionalCTFFinalFitWithMaterial" ),
NavigationSchool = cms.string( "" ),
TrajectoryInEvent = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
GeometricInnerState = cms.bool( True ),
Propagator = cms.string( "PropagatorWithMaterial" )
)
HLTL1SeededEgammaRegionalRecoTrackerSequence = cms.Sequence( hltL1SeededEgammaRegionalPixelSeedGenerator +
hltL1SeededEgammaRegionalCkfTrackCandidates +
hltL1SeededEgammaRegionalCTFFinalFitWithMaterial )
hltEleAnyL1SeededElectronTrackIso = cms.EDProducer( "EgammaHLTElectronTrackIsolationProducers",
egTrkIsoStripEndcap = cms.double( 0.03 ),
electronProducer = cms.InputTag( "hltEleAnyPixelMatchElectronsL1Seeded" ),
egTrkIsoZSpan = cms.double( 0.15 ),
useGsfTrack = cms.bool( False ),
useSCRefs = cms.bool( False ),
egTrkIsoConeSize = cms.double( 0.3 ),
trackProducer = cms.InputTag( "hltL1SeededEgammaRegionalCTFFinalFitWithMaterial" ),
egTrkIsoStripBarrel = cms.double( 0.03 ),
egTrkIsoVetoConeSizeBarrel = cms.double( 0.03 ),
egTrkIsoVetoConeSize = cms.double( 0.03 ),
egTrkIsoRSpan = cms.double( 999999.0 ),
egTrkIsoVetoConeSizeEndcap = cms.double( 0.03 ),
recoEcalCandidateProducer = cms.InputTag( "" ),
beamSpotProducer = cms.InputTag( "hltOnlineBeamSpot" ),
egTrkIsoPtMin = cms.double( 1.0 ),
egCheckForOtherEleInCone = cms.untracked.bool( False )
)
hltGsfEleAnyL1SeededElectronTrackIso = cms.EDProducer( "EgammaHLTElectronTrackIsolationProducers",
egTrkIsoStripEndcap = cms.double( 0.03 ),
electronProducer = cms.InputTag( "hltL1SeededGsfElectrons" ),
egTrkIsoZSpan = cms.double( 0.15 ),
useGsfTrack = cms.bool( True ),
useSCRefs = cms.bool( False ),
egTrkIsoConeSize = cms.double( 0.3 ),
trackProducer = cms.InputTag( "hltL1SeededEgammaRegionalCTFFinalFitWithMaterial" ),
egTrkIsoStripBarrel = cms.double( 0.03 ),
egTrkIsoVetoConeSizeBarrel = cms.double( 0.03 ),
egTrkIsoVetoConeSize = cms.double( 0.03 ),
egTrkIsoRSpan = cms.double( 999999.0 ),
egTrkIsoVetoConeSizeEndcap = cms.double( 0.03 ),
recoEcalCandidateProducer = cms.InputTag( "hltL1SeededRecoEcalCandidate" ),
beamSpotProducer = cms.InputTag( "hltOnlineBeamSpot" ),
egTrkIsoPtMin = cms.double( 1.0 ),
egCheckForOtherEleInCone = cms.untracked.bool( False )
)
### tracker iso with for gsf electrons
### for the whole ECAL
hltActivityPhotonEcalIso = cms.EDProducer( "EgammaHLTEcalRecIsolationProducer",
etMinEndcap = cms.double( 0.11 ),
tryBoth = cms.bool( True ),
ecalBarrelRecHitProducer = cms.InputTag( "hltEcalRecHitAll" ),
rhoMax = cms.double( 9.9999999E7 ),
useNumCrystals = cms.bool( True ),
etMinBarrel = cms.double( -9999.0 ),
doRhoCorrection = cms.bool( False ),
eMinEndcap = cms.double( -9999.0 ),
intRadiusEndcap = cms.double( 3.0 ),
jurassicWidth = cms.double( 3.0 ),
useIsolEt = cms.bool( True ),
ecalBarrelRecHitCollection = cms.InputTag( "EcalRecHitsEB" ),
recoEcalCandidateProducer = cms.InputTag( "hltRecoEcalSuperClusterActivityCandidate" ),
eMinBarrel = cms.double( 0.095 ),
effectiveAreaEndcap = cms.double( 0.046 ),
ecalEndcapRecHitProducer = cms.InputTag( "hltEcalRecHitAll" ),
extRadius = cms.double( 0.3 ),
intRadiusBarrel = cms.double( 3.0 ),
subtract = cms.bool( False ),
rhoScale = cms.double( 1.0 ),
effectiveAreaBarrel = cms.double( 0.101 ),
ecalEndcapRecHitCollection = cms.InputTag( "EcalRecHitsEE" ),
rhoProducer = cms.InputTag( 'hltKT6CaloJets','rho' ))
hltActivityPhotonEcalIsoRhoCorr = cms.EDProducer( "EgammaHLTEcalRecIsolationProducer",
etMinEndcap = cms.double( 0.11 ),
tryBoth = cms.bool( True ),
ecalBarrelRecHitProducer = cms.InputTag( "hltEcalRecHitAll" ),
rhoMax = cms.double( 9.9999999E7 ),
useNumCrystals = cms.bool( True ),
etMinBarrel = cms.double( -9999.0 ),
doRhoCorrection = cms.bool( True ),
eMinEndcap = cms.double( -9999.0 ),
intRadiusEndcap = cms.double( 3.0 ),
jurassicWidth = cms.double( 3.0 ),
useIsolEt = cms.bool( True ),
ecalBarrelRecHitCollection = cms.InputTag( "EcalRecHitsEB" ),
recoEcalCandidateProducer = cms.InputTag( "hltRecoEcalSuperClusterActivityCandidate" ),
eMinBarrel = cms.double( 0.095 ),
effectiveAreaEndcap = cms.double( 0.046 ),
ecalEndcapRecHitProducer = cms.InputTag( "hltEcalRecHitAll" ),
extRadius = cms.double( 0.3 ),
intRadiusBarrel = cms.double( 3.0 ),
subtract = cms.bool( False ),
rhoScale = cms.double( 1.0 ),
effectiveAreaBarrel = cms.double( 0.101 ),
ecalEndcapRecHitCollection = cms.InputTag( "EcalRecHitsEE" ),
rhoProducer = cms.InputTag( 'hltKT6CaloJets','rho' ))
hltActivityPhotonHcalForHE = cms.EDProducer( "EgammaHLTHcalIsolationProducersRegional",
eMinHE = cms.double( 0.8 ),
hbheRecHitProducer = cms.InputTag( "hltHbhereco" ),
effectiveAreaBarrel = cms.double( 0.105 ),
outerCone = cms.double( 0.14 ),
eMinHB = cms.double( 0.7 ),
innerCone = cms.double( 0.0 ),
etMinHE = cms.double( -1.0 ),
etMinHB = cms.double( -1.0 ),
rhoProducer = cms.InputTag( 'hltKT6CaloJets','rho' ),
depth = cms.int32( -1 ),
doRhoCorrection = cms.bool( False ),
effectiveAreaEndcap = cms.double( 0.17 ),
recoEcalCandidateProducer = cms.InputTag( "hltRecoEcalSuperClusterActivityCandidate" ),
rhoMax = cms.double( 9.9999999E7 ),
rhoScale = cms.double( 1.0 ),
doEtSum = cms.bool( False )
)
hltActivityPhotonHcalIso = cms.EDProducer( "EgammaHLTHcalIsolationProducersRegional",
eMinHE = cms.double( 0.8 ),
hbheRecHitProducer = cms.InputTag( "hltHbhereco" ),
effectiveAreaBarrel = cms.double( 0.105 ),
outerCone = cms.double( 0.29 ),
eMinHB = cms.double( 0.7 ),
innerCone = cms.double( 0.16 ),
etMinHE = cms.double( -1.0 ),
etMinHB = cms.double( -1.0 ),
rhoProducer = cms.InputTag( 'hltKT6CaloJets','rho' ),
depth = cms.int32( -1 ),
doRhoCorrection = cms.bool( False ),
effectiveAreaEndcap = cms.double( 0.17 ),
recoEcalCandidateProducer = cms.InputTag( "hltRecoEcalSuperClusterActivityCandidate" ),
rhoMax = cms.double( 9.9999999E7 ),
rhoScale = cms.double( 1.0 ),
doEtSum = cms.bool( True ))
hltActivityPhotonHcalIsoRhoCorr = cms.EDProducer( "EgammaHLTHcalIsolationProducersRegional",
eMinHE = cms.double( 0.8 ),
hbheRecHitProducer = cms.InputTag( "hltHbhereco" ),
effectiveAreaBarrel = cms.double( 0.105 ),
outerCone = cms.double( 0.29 ),
eMinHB = cms.double( 0.7 ),
innerCone = cms.double( 0.16 ),
etMinHE = cms.double( -1.0 ),
etMinHB = cms.double( -1.0 ),
rhoProducer = cms.InputTag( 'hltKT6CaloJets','rho' ),
depth = cms.int32( -1 ),
doRhoCorrection = cms.bool( True ),
effectiveAreaEndcap = cms.double( 0.17 ),
recoEcalCandidateProducer = cms.InputTag( "hltRecoEcalSuperClusterActivityCandidate" ),
rhoMax = cms.double( 9.9999999E7 ),
rhoScale = cms.double( 1.0 ),
doEtSum = cms.bool( True )
)
########## Tracker iso
hltEgammaEleGsfTrackIso = cms.EDProducer( "EgammaHLTElectronTrackIsolationProducers",
egTrkIsoStripEndcap = cms.double( 0.03 ),
egTrkIsoVetoConeSizeBarrel = cms.double( 0.03 ),
useGsfTrack = cms.bool( True ),
useSCRefs = cms.bool( True ),
trackProducer = cms.InputTag( "hltIter2MergedForElectrons" ),
egTrkIsoStripBarrel = cms.double( 0.03 ),
electronProducer = cms.InputTag( "hltEgammaGsfElectrons" ),
egTrkIsoConeSize = cms.double( 0.3 ),
egTrkIsoRSpan = cms.double( 999999.0 ),
egTrkIsoVetoConeSizeEndcap = cms.double( 0.03 ),
recoEcalCandidateProducer = cms.InputTag( "hltEgammaCandidates" ),
beamSpotProducer = cms.InputTag( "hltOnlineBeamSpot" ),
egTrkIsoPtMin = cms.double( 1.0 ),
egTrkIsoZSpan = cms.double( 0.15 )
)
|
from src.MOOA.NSGA_II import nsga_ii
from src.ea_nas.moo_operators import architectural as moo
from src.ea_nas.evolutionary_operations.mutation_for_operators import mutate
def optimize(population, selection, steps, config):
for i in range(steps):
# Preparation:
children = []
# Mutation:
for selected in selection(population, config.population_size):
mutated = mutate(selected)
if mutated:
children += [mutated]
# Training networks:
children = list(set(children)) # Preventing inbreeding
# Elitism:
population += children
population = nsga_ii(
population,
moo.architecture_objectives(),
moo.architecture_domination_operator(
moo.architecture_objectives()
),
config,
force_moo=True
)
keep = len(population) - config.population_size
population, removed = population[keep:], population[:keep]
avg_size = sum([len(x.children) for x in population]) / len(population)
print(f" - Progress: {int(i/steps*100)} %. Average size: {avg_size} ops, best size: {len(population[-1].children)}, worst size: {len(population[0].children)}", end="\r")
print(f" - Architectures Optimized! Average size: {avg_size} ops ")
return population
|
import requests
import urllib.parse
def get_usernames_from_names(name_file):
usernames = []
with open(name_file, mode='r', encoding='utf-8') as names_file:
for line in names_file:
query = line.strip()
query_encoded = urllib.parse.quote(query)
search_url = f'https://www.instagram.com/web/search/topsearch/?query={query_encoded}'
response = requests.get(search_url)
data = response.json()
if data and 'users' in data and data['users']:
username = data['users'][0]['user']['username']
usernames.append(username)
return usernames
def write_usernames_to_file(usernames):
with open('usernames.txt', 'x') as usernames_file:
usernames_file.write('\n'.join(usernames))
def read_usernames_from_file(username_file):
usernames = []
with open(username_file, 'r') as usernames_file:
for username in usernames_file:
usernames.append(username.strip())
return usernames
|
from distutils.core import setup
setup(name='vacationlibrery',
version='1.1',
author='Mateusz Wojcik',
author_email='226611@student.pwr.edu.pl',
url='https://github.com/BombaGR',
packages=['myPackage'],
)
|
# standard library
import json
import argparse
# third-party libraries
import torch
# project libraries
from decoder import GreedyDecoder
from test import evaluate
from data.data_loader import AudioDataLoader, SpectrogramDataset
from model import DeepSpeech
parser = argparse.ArgumentParser(description="evaluates model and shows model contents")
parser.add_argument('--model-path', default='', type=str, help='path to model to load')
if __name__ == "__main__":
args = parser.parse_args()
model_path = args.model_path
package = torch.load(model_path, map_location=lambda storage, loc: storage)
model = DeepSpeech.load_model_package(package)
print(f"package keys: {[key for key in package.keys()]}")
show_keys = ['version', 'hidden_size', 'hidden_layers', 'rnn_type', 'audio_conf', 'labels',
'bidirectional', 'amp', 'epoch', 'loss_results', 'cer_results', 'wer_results']
for key in package.keys():
if key in show_keys:
print(f"key: {key}, contents: {package.get(key)}")
print(f"model: {model}")
|
class employee:
def __init__(self,name,dob):
self.name = name
self.dob = dob
self.salary = 50
def salary_review(self,review):
self.salary = self.salary*(1+review*0.01)
emp1 = employee("Nguyen Van A", 15022019)
print(emp1.name,emp1.dob,emp1.salary)
emp1.salary_review(50)
print(emp1.name,emp1.dob,emp1.salary) |
# 평균값 구하기
f = open("sample.txt", 'r', encoding='UTF-8')
data = f.readlines()
f.close()
sum=0
for i in range(len(data)):
sum += int(data[i])
# print(sum)
average = sum / len(data)
# print("average = ", sum / len(data))
if len(data) > 0:
with open('result.txt', 'w', encoding='UTF-8') as f:
f.write(str(average))
|
#main code to do classification
import torch, torchvision
from torchvision import datasets, models, transforms
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import time
from torchsummary import summary
import numpy as np
import matplotlib.pyplot as plt
import os
from PIL import Image
image_transforms = {
'test': transforms.Compose([
transforms.Resize(size=256),
transforms.CenterCrop(size=224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
}
idx_to_class={0: 'bedroom', 1: 'diningroom', 2: 'kitchen', 3: 'livingroom'}
def predict(model, test_image_name):
transform = image_transforms['test']
test_image = Image.open(test_image_name)
plt.imshow(test_image)
test_image_tensor = transform(test_image)
test_image_tensor = test_image_tensor.view(1, 3, 224, 224)
with torch.no_grad():
model.eval()
# Model outputs log probabilities
out = model(test_image_tensor)
ps = torch.exp(out)
topk, topclass = ps.topk(3, dim=1)
result=dict()
for i in range(3):
print("Predcition", i+1, ":", idx_to_class[topclass.cpu().numpy()[0][i]], ", Score: ", topk.cpu().numpy()[0][i])
result[idx_to_class[topclass.cpu().numpy()[0][i]]]=topk.cpu().numpy()[0][i]
return result
# dataset = 'allrooms'
# model = torch.load('_model_24.pt')
# result=predict(model, 'images.jpeg')
# for i in result:
# if result[i]==max(result.values()):
# print(i)
# break
#
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.