hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fc898bad2efa60cee2c4ef24696c39c99d84411b
| 8,634
|
py
|
Python
|
dashboard/tests/test_inventory.py
|
vishalvvr/transtats
|
ec71f40b338cab36eb907f6faba262dfeb858b80
|
[
"Apache-2.0"
] | null | null | null |
dashboard/tests/test_inventory.py
|
vishalvvr/transtats
|
ec71f40b338cab36eb907f6faba262dfeb858b80
|
[
"Apache-2.0"
] | null | null | null |
dashboard/tests/test_inventory.py
|
vishalvvr/transtats
|
ec71f40b338cab36eb907f6faba262dfeb858b80
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
from fixture import DjangoFixture
from fixture.style import NamedDataStyle
from fixture.django_testcase import FixtureTestCase
from dashboard.managers.inventory import InventoryManager
from dashboard.models import Product
from dashboard.tests.testdata.db_fixtures import (
LanguageData, LanguageSetData, PlatformData, ProductData, ReleaseData
)
db_fixture = DjangoFixture(style=NamedDataStyle())
class InventoryManagerTest(FixtureTestCase):
inventory_manager = InventoryManager()
fixture = db_fixture
datasets = [LanguageData, LanguageSetData, PlatformData, ProductData, ReleaseData]
def test_get_locales(self):
"""
Test get_locales
"""
japanese_locale = self.inventory_manager.get_locales(pick_locales=['ja_JP'])
self.assertEqual(len(japanese_locale), 1)
self.assertEqual(japanese_locale[0].lang_name, 'Japanese')
self.assertEqual(japanese_locale[0].locale_alias, 'ja')
self.assertEqual(japanese_locale[0].locale_script, 'Hani')
def test_get_active_locales_count(self):
"""
Test get_active_locales_count
"""
active_locales = self.inventory_manager.get_active_locales_count()
self.assertEqual(active_locales, 3)
def test_get_locale_alias(self):
"""
Test get_locale_alias
"""
locale_alias = self.inventory_manager.get_locale_alias('fr_FR')
self.assertEqual(locale_alias, 'fr')
locale_alias = self.inventory_manager.get_locale_alias('de_DE')
self.assertEqual(locale_alias, 'de_DE')
def test_get_alias_locale(self):
"""
Test get_alias_locale
"""
alias_locale = self.inventory_manager.get_alias_locale('fr')
self.assertEqual(alias_locale, 'fr_FR')
alias_locale = self.inventory_manager.get_alias_locale('de_DE')
self.assertEqual(alias_locale, 'de_DE')
def test_get_locales_set(self):
"""
Test get_locales_set
"""
active_locales, inactive_locales, aliases = \
self.inventory_manager.get_locales_set()
self.assertEqual(len(active_locales), 3)
self.assertEqual(len(inactive_locales), 1)
self.assertEqual(len(aliases), 4)
def test_get_locale_lang_tuple(self):
"""
Test get_locale_lang_tuple
"""
ru_tuple = ('ru_RU', 'Russian')
fr_tuple = ('fr_FR', 'French')
locale_lang_tuple = self.inventory_manager.get_locale_lang_tuple()
self.assertEqual(len(locale_lang_tuple), 3)
locale_lang_tuple = self.inventory_manager.get_locale_lang_tuple(locales=['fr_FR', 'ru_RU'])
self.assertEqual(len(locale_lang_tuple), 2)
self.assertTupleEqual(locale_lang_tuple[0], ru_tuple)
self.assertTupleEqual(locale_lang_tuple[1], fr_tuple)
def test_get_langset(self):
"""
Test get_get_langset
"""
lang_set = self.inventory_manager.get_langset(langset_slug='custom-set')
self.assertEqual(lang_set.lang_set_name, 'Custom Set')
self.assertEqual(lang_set.lang_set_color, 'Peru')
def test_get_langsets(self):
"""
Test get_langsets
"""
lang_sets = self.inventory_manager.get_langsets(
fields=['lang_set_name', 'locale_ids']
)
self.assertEqual(len(lang_sets), 2)
self.assertNotIn('lang_set_color', vars(lang_sets[0]))
self.assertListEqual(lang_sets[0].locale_ids, ['fr_FR', 'ja_JP'])
def test_get_locale_groups(self):
"""
Test get_locale_groups
"""
locale_groups = self.inventory_manager.get_locale_groups('ja_JP')
self.assertDictEqual(locale_groups, {'ja_JP': ['custom-set', 'f27-set']})
def test_get_all_locales_groups(self):
"""
Test get_all_locales_groups
"""
groups_of_all_locales = self.inventory_manager.get_all_locales_groups()
self.assertDictEqual(groups_of_all_locales,
{'ja_JP': ['custom-set', 'f27-set'], 'fr_FR': ['custom-set', 'f27-set'],
'ru_RU': ['f27-set'], 'ko_KR': []})
def test_get_translation_platforms(self):
"""
Test get_translation_platforms
"""
transplatforms = self.inventory_manager.get_translation_platforms(engine='zanata')
self.assertEqual(transplatforms[1].api_url, 'https://translate.zanata.org')
self.assertEqual(transplatforms[1].platform_slug, 'ZNTAPUB')
def test_get_ci_platforms(self):
"""
Test get_translation_platforms
"""
ciplatforms = self.inventory_manager.get_translation_platforms(ci=True)
self.assertEqual(ciplatforms[0].api_url, 'https://cloud.memsource.com/web')
self.assertEqual(ciplatforms[0].platform_slug, 'MSRCPUB')
def test_get_transplatforms_set(self):
"""
Test get_transplatforms_set
"""
active_platforms, inactive_platforms = self.inventory_manager.get_transplatforms_set()
self.assertEqual(len(active_platforms), 3)
self.assertEqual(len(inactive_platforms), 0)
def test_get_engine_from_slug(self):
"""
Test get_engine_from_slug
"""
platform_engine = self.inventory_manager.get_engine_from_slug(
PlatformData.platform_zanata_fedora.platform_slug
)
self.assertEqual(platform_engine, 'zanata')
platform_engine = self.inventory_manager.get_engine_from_slug(
PlatformData.platform_memsource_cloud.platform_slug
)
self.assertEqual(platform_engine, 'memsource')
def test_get_transplatform_slug_url(self):
"""
test get_transplatform_slug_url
"""
slug_url_tuple = self.inventory_manager.get_transplatform_slug_url()
self.assertTupleEqual(slug_url_tuple, (('MSRCPUB', 'https://cloud.memsource.com/web'),
('ZNTAFED', 'https://fedora.zanata.org'),
('ZNTAPUB', 'https://translate.zanata.org')))
def test_get_relbranch_locales(self):
"""
Test get_relbranch_locales
"""
relbranch_locales = self.inventory_manager.get_relbranch_locales("nonexisting-relbranch")
self.assertFalse(relbranch_locales)
relbranch_locales = self.inventory_manager.get_relbranch_locales('fedora-27')
self.assertListEqual(relbranch_locales, ['ja_JP', 'fr_FR', 'ru_RU'])
def test_get_release_streams(self):
"""
Test get_release_streams
"""
relstream_fedora = Product.objects.get(product_name='Fedora')
relstream_rhel = Product.objects.get(product_name='RHEL')
release_streams = self.inventory_manager.get_release_streams()
self.assertEqual(len(release_streams), 2)
self.assertIn(relstream_fedora, release_streams)
self.assertIn(relstream_rhel, release_streams)
release_streams = self.inventory_manager.get_release_streams(stream_slug='RHEL')
self.assertEqual(len(release_streams), 1)
self.assertIn(relstream_rhel, release_streams)
release_streams = self.inventory_manager.get_release_streams(only_active=True)
self.assertEqual(len(release_streams), 1)
self.assertIn(relstream_fedora, release_streams)
def test_get_relstream_slug_name(self):
"""
Test get_relstream_slug_name
"""
relstream_slug_name_tuple = self.inventory_manager.get_relstream_slug_name()
self.assertEqual(len(relstream_slug_name_tuple), 1)
self.assertTupleEqual(relstream_slug_name_tuple[0], ('fedora', 'Fedora'))
def test_get_relstream_build_tags(self):
"""
Test get_relstream_build_tags
"""
tags = self.inventory_manager.get_relstream_build_tags(stream_slug='fedora')
self.assertIsInstance(tags, dict)
self.assertDictEqual(tags, {'fedora': ['f28', 'f29', 'rawhide']})
| 38.717489
| 101
| 0.67848
| 1,005
| 8,634
| 5.494527
| 0.18607
| 0.048171
| 0.094169
| 0.108294
| 0.476096
| 0.262405
| 0.174574
| 0.174574
| 0.118979
| 0.103223
| 0
| 0.007115
| 0.21867
| 8,634
| 222
| 102
| 38.891892
| 0.811444
| 0.122307
| 0
| 0.067227
| 0
| 0
| 0.075768
| 0.002947
| 0
| 0
| 0
| 0
| 0.378151
| 1
| 0.159664
| false
| 0
| 0.058824
| 0
| 0.252101
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc8994bb32a375675e5f3f534446419ae71a9b08
| 9,221
|
py
|
Python
|
web_console_v2/api/fedlearner_webconsole/rpc/server.py
|
nolanliou/fedlearner
|
54127c465b3b5d77ae41b823e42efbc1b707e826
|
[
"Apache-2.0"
] | null | null | null |
web_console_v2/api/fedlearner_webconsole/rpc/server.py
|
nolanliou/fedlearner
|
54127c465b3b5d77ae41b823e42efbc1b707e826
|
[
"Apache-2.0"
] | null | null | null |
web_console_v2/api/fedlearner_webconsole/rpc/server.py
|
nolanliou/fedlearner
|
54127c465b3b5d77ae41b823e42efbc1b707e826
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=broad-except, cyclic-import
import logging
import threading
from concurrent import futures
import grpc
from fedlearner_webconsole.proto import (
service_pb2, service_pb2_grpc,
common_pb2
)
from fedlearner_webconsole.db import db
from fedlearner_webconsole.project.models import Project
from fedlearner_webconsole.workflow.models import (
Workflow, WorkflowState, TransactionState
)
from fedlearner_webconsole.exceptions import (
UnauthorizedException
)
class RPCServerServicer(service_pb2_grpc.WebConsoleV2ServiceServicer):
def __init__(self, server):
self._server = server
def CheckConnection(self, request, context):
try:
return self._server.check_connection(request)
except UnauthorizedException as e:
return service_pb2.CheckConnectionResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNAUTHORIZED,
msg=repr(e)))
except Exception as e:
logging.error('CheckConnection rpc server error: %s', repr(e))
return service_pb2.CheckConnectionResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNKNOWN_ERROR,
msg=repr(e)))
def UpdateWorkflowState(self, request, context):
try:
return self._server.update_workflow_state(request)
except UnauthorizedException as e:
return service_pb2.UpdateWorkflowStateResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNAUTHORIZED,
msg=repr(e)))
except Exception as e:
logging.error('UpdateWorkflowState rpc server error: %s', repr(e))
return service_pb2.UpdateWorkflowStateResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNKNOWN_ERROR,
msg=repr(e)))
def GetWorkflow(self, request, context):
try:
return self._server.get_workflow(request)
except UnauthorizedException as e:
return service_pb2.GetWorkflowResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNAUTHORIZED,
msg=repr(e)))
except Exception as e:
logging.error('GetWorkflow rpc server error: %s', repr(e))
return service_pb2.GetWorkflowResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNKNOWN_ERROR,
msg=repr(e)))
class RpcServer(object):
def __init__(self):
self._lock = threading.Lock()
self._started = False
self._server = None
self._app = None
def start(self, app):
assert not self._started, "Already started"
self._app = app
listen_port = app.config.get('GRPC_LISTEN_PORT', 1999)
with self._lock:
self._server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10))
service_pb2_grpc.add_WebConsoleV2ServiceServicer_to_server(
RPCServerServicer(self), self._server)
self._server.add_insecure_port('[::]:%d' % listen_port)
self._server.start()
self._started = True
def stop(self):
if not self._started:
return
with self._lock:
self._server.stop(None).wait()
del self._server
self._started = False
def check_auth_info(self, auth_info):
logging.debug('auth_info: %s', auth_info)
project = Project.query.filter_by(
name=auth_info.project_name).first()
if project is None:
raise UnauthorizedException('Invalid project')
project_config = project.get_config()
# TODO: fix token verification
# if project_config.token != auth_info.auth_token:
# raise UnauthorizedException('Invalid token')
if project_config.domain_name != auth_info.target_domain:
raise UnauthorizedException('Invalid domain')
source_party = None
for party in project_config.participants:
if party.domain_name == auth_info.source_domain:
source_party = party
if source_party is None:
raise UnauthorizedException('Invalid domain')
return project, source_party
def check_connection(self, request):
with self._app.app_context():
_, party = self.check_auth_info(request.auth_info)
logging.debug(
'received check_connection from %s', party.domain_name)
return service_pb2.CheckConnectionResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_SUCCESS))
def update_workflow_state(self, request):
with self._app.app_context():
project, party = self.check_auth_info(request.auth_info)
logging.debug(
'received update_workflow_state from %s: %s',
party.domain_name, request)
name = request.workflow_name
state = WorkflowState(request.state)
target_state = WorkflowState(request.target_state)
transaction_state = TransactionState(request.transaction_state)
workflow = Workflow.query.filter_by(
name=request.workflow_name,
project_id=project.id).first()
if workflow is None:
assert state == WorkflowState.NEW
assert target_state == WorkflowState.READY
workflow = Workflow(
name=name,
project_id=project.id,
state=state, target_state=target_state,
transaction_state=transaction_state)
db.session.add(workflow)
db.session.commit()
db.session.refresh(workflow)
workflow.update_state(
state, target_state, transaction_state)
db.session.commit()
return service_pb2.UpdateWorkflowStateResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_SUCCESS),
transaction_state=workflow.transaction_state.value)
def _filter_workflow(self, workflow, modes):
# filter peer-readable and peer-writable variables
if workflow is None:
return
var_list = [
i for i in workflow.variables if i.access_mode in modes]
workflow.ClearField('variables')
for i in var_list:
workflow.variables.append(i)
for job_def in workflow.job_definitions:
var_list = [
i for i in job_def.variables if i.access_mode in modes]
job_def.ClearField('variables')
for i in var_list:
job_def.variables.append(i)
def get_workflow(self, request):
with self._app.app_context():
project, party = self.check_auth_info(request.auth_info)
workflow = Workflow.query.filter_by(
name=request.workflow_name,
project_id=project.id).first()
assert workflow is not None
config = workflow.get_config()
self._filter_workflow(
config,
[
common_pb2.Variable.PEER_READABLE,
common_pb2.Variable.PEER_WRITABLE
])
# job details
jobs = [service_pb2.JobDetail(
name=job.name, state=job.get_state_for_front())
for job in workflow.get_jobs()]
# fork info
forked_from = ''
if workflow.forked_from:
forked_from = Workflow.query.get(workflow.forked_from).name
return service_pb2.GetWorkflowResponse(
name=request.workflow_name,
status=common_pb2.Status(
code=common_pb2.STATUS_SUCCESS),
config=config,
jobs=jobs,
state=workflow.state.value,
target_state=workflow.target_state.value,
transaction_state=workflow.transaction_state.value,
forkable=workflow.forkable,
forked_from=forked_from,
reuse_job_names=workflow.get_reuse_job_names(),
peer_reuse_job_names=workflow.get_peer_reuse_job_names(),
fork_proposal_config=workflow.get_fork_proposal_config()
)
rpc_server = RpcServer()
| 39.74569
| 78
| 0.615335
| 980
| 9,221
| 5.564286
| 0.209184
| 0.03466
| 0.049514
| 0.03466
| 0.391711
| 0.338529
| 0.31689
| 0.268293
| 0.248487
| 0.226848
| 0
| 0.008184
| 0.310921
| 9,221
| 231
| 79
| 39.917749
| 0.850016
| 0.090771
| 0
| 0.371728
| 0
| 0
| 0.035279
| 0.002511
| 0
| 0
| 0
| 0.004329
| 0.020942
| 1
| 0.062827
| false
| 0
| 0.04712
| 0
| 0.198953
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc8d8f892ae337d196878475837e1f9cc4ae1047
| 956
|
py
|
Python
|
chapter15/async_aiohttp.py
|
haru-256/ExpertPython3_Source
|
5ef412ef217c6078248ff9546e23ed9b69aadcff
|
[
"MIT"
] | 9
|
2021-07-30T07:57:55.000Z
|
2021-12-30T12:38:21.000Z
|
chapter15/async_aiohttp.py
|
haru-256/ExpertPython3_Source
|
5ef412ef217c6078248ff9546e23ed9b69aadcff
|
[
"MIT"
] | null | null | null |
chapter15/async_aiohttp.py
|
haru-256/ExpertPython3_Source
|
5ef412ef217c6078248ff9546e23ed9b69aadcff
|
[
"MIT"
] | 2
|
2021-09-05T11:39:50.000Z
|
2021-09-17T05:27:37.000Z
|
"""
「非同期プログラミング」の節で登場するサンプルコード
aiohttpを使って非同期にHTTPのリクエストを送信する方法
"""
import asyncio
import time
import aiohttp
from asyncrates import get_rates
SYMBOLS = ('USD', 'EUR', 'PLN', 'NOK', 'CZK')
BASES = ('USD', 'EUR', 'PLN', 'NOK', 'CZK')
async def fetch_rates(session, place):
return await get_rates(session, place)
async def present_result(result):
base, rates = (await result)
rates_line = ", ".join(
[f"{rates[symbol]:7.03} {symbol}" for symbol in SYMBOLS]
)
print(f"1 {base} = {rates_line}")
async def main():
async with aiohttp.ClientSession() as session:
await asyncio.wait([
asyncio.create_task(present_result(fetch_rates(session, base)))
for base in BASES
])
if __name__ == "__main__":
started = time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
elapsed = time.time() - started
print()
print(f"経過時間: {elapsed:.2f}s")
| 20.782609
| 75
| 0.641213
| 118
| 956
| 5.016949
| 0.483051
| 0.040541
| 0.030405
| 0.040541
| 0.050676
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006658
| 0.214435
| 956
| 45
| 76
| 21.244444
| 0.781625
| 0.061715
| 0
| 0
| 0
| 0
| 0.126126
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.148148
| 0
| 0.185185
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc8f2034e17191e49d05923f9d20c0bfa677c7bb
| 4,433
|
py
|
Python
|
experiments/nginx/run.py
|
OleksiiOleksenko/intel_mpx_explained
|
dd6da57e0fcf22df358d1a742079b414620a7c88
|
[
"MIT"
] | 15
|
2017-02-08T04:02:50.000Z
|
2021-02-20T16:47:25.000Z
|
experiments/nginx/run.py
|
OleksiiOleksenko/intel_mpx_explained
|
dd6da57e0fcf22df358d1a742079b414620a7c88
|
[
"MIT"
] | 1
|
2020-02-01T00:29:32.000Z
|
2020-02-04T14:25:57.000Z
|
experiments/nginx/run.py
|
OleksiiOleksenko/intel_mpx_explained
|
dd6da57e0fcf22df358d1a742079b414620a7c88
|
[
"MIT"
] | 3
|
2017-02-08T04:02:51.000Z
|
2018-03-30T07:58:45.000Z
|
#!/usr/bin/env python
from __future__ import print_function
import logging
import os
import signal
from time import sleep
from subprocess import Popen, PIPE
import socket
from core.common_functions import *
from core.run import Runner
class NginxPerf(Runner):
"""
Runs Nginx
"""
name = "nginx"
exp_name = "nginx"
bench_suite = False
benchmarks = {"nginx": ""}
test_benchmarks = {"nginx": ""}
client_numbers = [1, 5, 9, 13, 17, 21, 25, 29]
ab = "ab"
duration = 20 # in seconds
requests_num = 1000000 # some huge number so we always take 20 seconds
def __init__(self, *args, **kwargs):
super(NginxPerf, self).__init__(*args, **kwargs)
if self.config.input_type == "test":
self.client_numbers = (1,)
def per_benchmark_action(self, type_, benchmark, args):
self.log_build(type_, benchmark)
build_path = "/".join([self.dirs["build"], type_])
self.current_exe = build_path + '/sbin/' + benchmark
build_benchmark(
b=benchmark,
t=type_,
makefile=self.dirs['bench_src'],
build_path=build_path
)
# generate an input file
with open(build_path + "/html/index.html", "w") as f:
f.write("<html><body><h1>It works!</h1>")
random_text = my_check_output("lorem -p 10")
f.write(random_text)
f.write("</body></html>")
# config Nginx
replace_in_file(build_path + "/conf/nginx.conf", "listen 80;", "listen 8080;", ignoreifcontains=True)
replace_in_file(build_path + "/conf/nginx.conf", "worker_processes 1;", "worker_processes auto;", ignoreifcontains=True)
def per_thread_action(self, type_, benchmark, args, thread_num):
servercmd = "{action} {exe} -g \"daemon off;\"".format(
action=self.action,
exe=self.current_exe,
)
logging.debug("Server command: %s" % servercmd)
# by default start client on local machine
if env.get("CLIENT_MACHINE"):
ssh = "ssh %s" % env["CLIENT_MACHINE"]
logging.debug("Using remote client: %s" % env["CLIENT_MACHINE"])
else:
ssh = ""
logging.debug("Using local client (use CLIENT_MACHINE env var to specify remote client)")
myip = [l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
with open(self.dirs["log_file"], "a") as f:
for client_number in self.client_numbers:
# start server
my_check_output("pkill -9 nginx > /dev/null || true") # for sanity
sleep(1)
server = Popen(servercmd, shell=True, stdout=PIPE, stderr=PIPE, preexec_fn=os.setsid)
sleep(1)
# start client (possibly on another machine)
msg = self.run_message.format(input=client_number, **locals())
self.log_run(msg)
f.write("[run] " + msg + "\n")
out = my_check_output("{ssh} {ab} -k -t {duration} -n {requests_num} -c {client_number} http://{myip}:8080/".format(
ab=self.ab,
duration=self.duration,
requests_num=self.requests_num,
**locals()
))
f.write("===== client =====\n")
f.write(out)
# log and stop server
f.write("===== return code is %s =====\n" % str(server.poll()))
try:
os.killpg(server.pid, signal.SIGINT)
except:
pass
f.write("===== stdout =====\n")
for line in server.stdout:
f.write(line.decode('utf-8'))
f.write("===== stderr =====\n")
for line in server.stderr:
f.write(line.decode('utf-8'))
sleep(1)
def set_logging(self):
self.num_benchmarks = len(self.benchmarks) * len(self.types) * self.num_runs * len(self.client_numbers)
logging.info("Total runs: %d" % self.num_benchmarks)
def main(benchmark_name=None):
runner = NginxPerf()
runner.main()
| 36.336066
| 268
| 0.551771
| 538
| 4,433
| 4.395911
| 0.386617
| 0.027907
| 0.021564
| 0.01945
| 0.082875
| 0.046512
| 0.029598
| 0.029598
| 0
| 0
| 0
| 0.020222
| 0.308369
| 4,433
| 121
| 269
| 36.636364
| 0.751142
| 0.056846
| 0
| 0.056818
| 0
| 0.011364
| 0.161213
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056818
| false
| 0.011364
| 0.102273
| 0
| 0.272727
| 0.011364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc9074fa7981c7335753a01097902625525ccf9a
| 17,367
|
py
|
Python
|
seqparse/test/test_seqparse.py
|
hoafaloaf/seqparse
|
1d2446070c5627a5cb880d00ef327b892b4dedef
|
[
"MIT"
] | 1
|
2021-06-08T17:24:41.000Z
|
2021-06-08T17:24:41.000Z
|
seqparse/test/test_seqparse.py
|
hoafaloaf/seqparse
|
1d2446070c5627a5cb880d00ef327b892b4dedef
|
[
"MIT"
] | null | null | null |
seqparse/test/test_seqparse.py
|
hoafaloaf/seqparse
|
1d2446070c5627a5cb880d00ef327b892b4dedef
|
[
"MIT"
] | 1
|
2021-10-05T15:44:07.000Z
|
2021-10-05T15:44:07.000Z
|
"""Test file sequence discovery on disk."""
# "Future" Libraries
from __future__ import print_function
# Standard Libraries
import os
import unittest
# Third Party Libraries
import mock
from builtins import range
from future.utils import lrange
from . import (DirEntry, generate_entries, initialise_mock_scandir_data,
mock_scandir_deep)
from .. import (__version__, get_parser, get_sequence, get_version, invert,
validate_frame_sequence)
from ..sequences import FileSequence, FrameChunk, FrameSequence
###############################################################################
# class: TestSeqparseModule
class TestSeqparseModule(unittest.TestCase):
"""Test file discovery on the seqparse module."""
_test_ext = "exr"
_test_file_name = "TEST_DIR"
_test_root = "test_dir"
_singletons = ["singleton0.jpg", "singleton1.jpg"]
def setUp(self):
"""Set up the test case."""
pass
@mock.patch("seqparse.seqparse.scandir")
def test_singletons(self, mock_api_call):
"""Seqparse: Test file singleton discovery from disk location."""
# Expected outputs ...
output = [os.path.join(self._test_root, x) for x in self._singletons]
entries = list()
for file_name in output:
entries.append(DirEntry(file_name))
mock_api_call.return_value = iter(entries)
parser = get_parser()
parser.scan_path(self._test_root)
file_names = parser.singletons
self.assertIn(self._test_root, file_names)
self.assertEqual(self._test_root, file_names[self._test_root].path)
self.assertEqual(len(file_names), 1)
self.assertEqual(
len(file_names[self._test_root]), len(self._singletons))
self.assertEqual(
sorted(self._singletons), sorted(file_names[self._test_root]))
# Check parser output ...
self.assertEqual(sorted(map(str, parser.output())), output)
# Test seqs_only option ...
self.assertEqual(sorted(parser.output(seqs_only=True)), [])
@mock.patch("seqparse.seqparse.scandir")
def test_single_padded_file(self, mock_api_call):
"""Seqparse: Test single padded file sequence discovery."""
frames = {4: [1]}
# Expected outputs ...
frame_seq_output = "0001"
file_seq_output = ".".join(
(self._test_file_name, frame_seq_output, self._test_ext))
final_output = os.path.join(self._test_root, file_seq_output)
input_entries = generate_entries(
ext=self._test_ext,
frames=frames,
name=self._test_file_name,
root=self._test_root)
mock_api_call.return_value = iter(input_entries)
parser = get_parser()
parser.scan_path(self._test_root)
data = parser.sequences
test_output = list(parser.output())
self.assertEqual(len(test_output), 1)
self.assertEqual(str(test_output[0]), final_output)
# Check the structure of the sequences property.
self.assertIn(self._test_root, data)
self.assertEqual(len(data), 1)
self.assertIn(self._test_file_name, data[self._test_root])
self.assertEqual(len(data[self._test_root]), 1)
# Now check the file sequence itself.
file_seq = data[self._test_root][self._test_file_name]
test_output = list(file_seq.output())
self.assertEqual(len(test_output), 1)
self.assertEqual(str(test_output[0]), final_output)
self.assertIn(self._test_ext, file_seq)
self.assertEqual(len(file_seq), 1)
self.assertTrue(4 in file_seq[self._test_ext])
self.assertEqual(len(file_seq[self._test_ext]), 1)
# And finally, the file sequence.
file_seq = file_seq[self._test_ext][4]
self.assertEqual(len(file_seq), len(frames[4]))
self.assertEqual(str(file_seq), final_output)
@mock.patch("seqparse.seqparse.scandir")
def test_simple_sequence(self, mock_api_call):
"""Seqparse: Test simple file sequence discovery."""
frames = {4: [0, 1, 2, 3, 4]}
# Expected outputs ...
frame_seq_output = "0000-0004"
file_seq_output = ".".join(
(self._test_file_name, frame_seq_output, self._test_ext))
final_output = os.path.join(self._test_root, file_seq_output)
input_entries = generate_entries(
ext=self._test_ext,
frames=frames,
name=self._test_file_name,
root=self._test_root)
mock_api_call.return_value = iter(input_entries)
parser = get_parser()
parser.scan_path(self._test_root)
data = parser.sequences
test_output = list(parser.output())
self.assertEqual(len(test_output), 1)
self.assertEqual(str(test_output[0]), final_output)
# Check the structure of the sequences property.
self.assertIn(self._test_root, data)
self.assertEqual(len(data), 1)
self.assertIn(self._test_file_name, data[self._test_root])
self.assertEqual(len(data[self._test_root]), 1)
# Now check the file sequence itself.
file_seq = data[self._test_root][self._test_file_name]
test_output = list(file_seq.output())
self.assertEqual(len(test_output), 1)
self.assertEqual(str(test_output[0]), final_output)
self.assertIn(self._test_ext, file_seq)
self.assertEqual(len(file_seq), 1)
self.assertTrue(4 in file_seq[self._test_ext])
self.assertEqual(len(file_seq[self._test_ext]), 1)
# And finally, the file sequence.
file_seq = file_seq[self._test_ext][4]
self.assertEqual(len(file_seq), len(frames[4]))
self.assertEqual(str(file_seq), final_output)
@mock.patch("seqparse.seqparse.scandir")
def test_complex_sequence(self, mock_api_call):
"""Seqparse: Test complex file sequence discovery."""
frames = {
1: [5, 6, 7, 8, 114, 199, 2000],
3: [8, 9, 10, 12],
4: [0, 1, 2, 3, 4, 5, 6, 8, 10, 12, 101]
}
input_entries = generate_entries(
ext=self._test_ext,
frames=frames,
name=self._test_file_name,
root=self._test_root)
# Expected output frame sequences. Note how frames 114, 199 move to the
# "pad 3" group and 2000 moves to the "pad 4" group!
output_seqs = {
1: "5-8",
3: "008-010,012,114,199",
4: "0000-0006,0008-0012x2,0101,2000"
}
# Expected final output (where "/" is os.sep):
# test_dir/TEST_DIR.5-8.exr
# test_dir/TEST_DIR.008-010,012,114,199.exr
# test_dir/TEST_DIR.0000-0006,0008-0012x2,0101,2000.exr
mock_api_call.return_value = iter(input_entries)
parser = get_parser()
parser.scan_path(self._test_root)
final_output = list()
for pad, seq_frames in sorted(output_seqs.items()):
bits = (self._test_file_name, seq_frames, self._test_ext)
output_seqs[pad] = os.path.join(self._test_root, ".".join(bits))
final_output.append(output_seqs[pad])
data = parser.sequences
# Check the structure of the sequences property.
self.assertIn(self._test_root, data)
self.assertEqual(len(data), 1)
self.assertIn(self._test_file_name, data[self._test_root])
self.assertEqual(len(data[self._test_root]), 1)
# Now check the file sequence itself.
file_seq = data[self._test_root][self._test_file_name]
test_output = list(file_seq.output())
self.assertEqual(len(test_output), 3)
self.assertEqual(list(map(str, test_output)), final_output)
self.assertIn(self._test_ext, file_seq)
self.assertEqual(len(file_seq), 1)
self.assertEqual(set(file_seq[self._test_ext]), set(output_seqs))
# And finally, the file sequences.
for pad in sorted(output_seqs):
self.assertEqual(output_seqs[pad],
str(file_seq[self._test_ext][pad]))
@mock.patch("seqparse.seqparse.scandir")
def test_nested_sequences(self, mock_api_call):
"""Seqparse: Test file sequence discovery in nested directories."""
mock_api_call.side_effect = mock_scandir_deep
print("\n\n SEQUENCES\n ---------")
initialise_mock_scandir_data(self._test_root)
parser = get_parser()
parser.scan_path(self._test_root)
for seq in parser.output():
print(" ", seq)
print("\n MAX LEVELS\n ----------")
for max_levels in range(-1, 4):
initialise_mock_scandir_data(self._test_root)
parser = get_parser()
parser.scan_path(self._test_root, max_levels=max_levels)
expected_seqs = max_levels + 2
if max_levels == -1:
expected_seqs = 5
seqs = list(parser.output())
blurb = " o max_levels == {:d}: {:d} ({:d} expected) entries"
print(blurb.format(max_levels, len(seqs), expected_seqs))
for seq in seqs:
print(" -", seq)
self.assertEqual(len(seqs), expected_seqs)
print("\n MIN LEVELS\n ----------")
for min_levels in range(-1, 4):
initialise_mock_scandir_data(self._test_root)
parser = get_parser()
parser.scan_path(self._test_root, min_levels=min_levels)
expected_seqs = 3 - min_levels
if min_levels == -1:
expected_seqs = 5
seqs = list(parser.output())
blurb = " o min_levels == {:d}: {:d} ({:d} expected) entries"
print(blurb.format(min_levels, len(seqs), expected_seqs))
for seq in seqs:
print(" -", seq)
self.assertEqual(len(seqs), expected_seqs)
print("")
def test_valid_frame_sequences(self):
"""Seqparse: Test validity of simple frame ranges."""
good_frame_seqs = [
"0001", ",0001", "0001,", "0001-0001", "0001-0001x0",
"0001-0003x3", "0001,0003", "0001,,0003", "0001-0010",
"0001-0010x0", "0001-0011x2", "0001-0012x2", "0001-0005,0007-0010",
"0001-0005x2,0007-0010", "0001-0005,0007-0011x2",
"0001-0005,0006,0008-0012x2", "0001,0003-0007,0009-0015x2",
"3,1,5,7", "01-05,03-07"
]
bad_frame_seqs = [
"-0001", "0001-", "0001x2", "x2", "0001,0003x2", "0001-0005x",
"0010-0001", "x", ",", ",,", ""
]
print("\n\n GOOD SEQUENCES\n --------------")
for frame_seq in good_frame_seqs:
output = validate_frame_sequence(frame_seq)
print(' o {!r} --> {!r}'.format(frame_seq, output))
self.assertTrue(output)
print("\n BAD SEQUENCES\n -------------")
for frame_seq in bad_frame_seqs:
print(' o {!r}'.format(frame_seq))
self.assertFalse(validate_frame_sequence(frame_seq))
print("")
def test_add_file_sequence(self):
"""Seqparse: Test file sequence addition via seqparse.add_file."""
input_file = ".".join((self._test_file_name, "0005", self._test_ext))
input_file = os.path.join(self._test_root, input_file)
# Expected outputs ...
input_frame_seq = "0000-0004"
output_frame_seq = "0000-0005"
input_file_seq = ".".join(
(self._test_file_name, input_frame_seq, self._test_ext))
input_file_seq = os.path.join(self._test_root, input_file_seq)
output_file_seq = ".".join(
(self._test_file_name, output_frame_seq, self._test_ext))
output_file_seq = os.path.join(self._test_root, output_file_seq)
print("\n\n INPUT FILES\n -----------")
print(" o", input_file_seq)
print(" o", input_file)
parser = get_parser()
parser.add_file(input_file_seq)
parser.add_file(input_file)
output = list(parser.output())
print("\n OUTPUT FILES\n ------------")
for line in output:
print(" o", line)
print("\n EXPECTED OUTPUT\n ---------------")
print(" o", output_file_seq)
print("")
self.assertEqual(len(output), 1)
self.assertEqual(str(output[0]), output_file_seq)
input_frame_seq = "0000-0002,,0003-0005"
input_file_seq = ".".join(
(self._test_file_name, input_frame_seq, self._test_ext))
input_file_seq = os.path.join(self._test_root, input_file_seq)
print("\n INPUT FILES\n -----------")
print(" o", input_file_seq)
print(" o", input_file)
parser = get_parser()
parser.add_file(input_file_seq)
parser.add_file(input_file)
output = list(parser.output())
print("\n OUTPUT FILES\n ------------")
for line in output:
print(" o", line)
print("\n EXPECTED OUTPUT\n ---------------")
print(" o", output_file_seq)
print("")
self.assertEqual(len(output), 1)
self.assertEqual(str(output[0]), output_file_seq)
@mock.patch("seqparse.seqparse.scandir")
def test_inversion(self, mock_api_call):
"""Seqparse: Test usage of the "missing" option in Seqparse.output."""
file_path = os.path.join(self._test_root, self._test_file_name)
chunk_in = FrameChunk(first=1, last=11, step=2, pad=4)
fseq = FileSequence(
name=file_path, ext=self._test_ext, frames=chunk_in)
input_entries = [DirEntry(x) for x in fseq]
mock_api_call.return_value = input_entries
chunk_out = FrameChunk(first=2, last=10, step=2, pad=4)
expected = FileSequence(
name=file_path, ext=self._test_ext, frames=chunk_out)
parser = get_parser()
parser.scan_path(self._test_root)
inverted = list(parser.output(missing=True))
self.assertEqual(len(inverted), 1)
print("\n\n SEQUENCE\n --------")
print(" input files: ", fseq)
print(" expected files:", expected)
print(" inverted files:", inverted[0])
self.assertEqual(str(inverted[0]), str(expected))
fseq = FileSequence(
name=file_path, ext=self._test_ext, frames=[1, 2, 3, 4, 6], pad=4)
input_entries = [DirEntry(x) for x in fseq]
mock_api_call.return_value = input_entries
expected = FileSequence(
name=file_path, ext=self._test_ext, frames=[5], pad=4)
parser = get_parser()
parser.scan_path(self._test_root)
inverted = list(parser.output(missing=True))
self.assertEqual(len(inverted), 1)
print("\n\n SEQUENCE\n --------")
print(" input files: ", fseq)
print(" expected files:", expected)
print(" inverted files:", inverted[0])
self.assertEqual(str(inverted[0]), str(expected))
@mock.patch("seqparse.seqparse.scandir")
def test_scan_options(self, mock_api_call):
"""Seqparse: Make sure scan_options works as expected."""
frames = {4: (1, 2, 3, 4, 6)}
input_entries = generate_entries(
name="test", ext="py", frames=frames, root=self._test_root)
input_entries.extend(
generate_entries(
name=".test", ext="py", frames=frames, root=self._test_root))
input_entries.append(
DirEntry(os.path.join(self._test_root, "pony.py")))
mock_api_call.return_value = input_entries
parser = get_parser()
parser.scan_options["stat"] = True
parser.scan_path(self._test_root)
output = list(parser.output())
expected = [
os.path.join(self._test_root, "test.0001-0004,0006.py"),
os.path.join(self._test_root, "pony.py")
]
self.assertEqual(len(output), 2)
self.assertEqual(list(map(str, output)), expected)
self.assertEqual(output[0].ctime, 1490908340)
self.assertEqual(output[0].mtime, 1490908305)
self.assertEqual(output[0].size, 36520)
parser = get_parser()
parser.scan_options["all"] = True
parser.scan_path(self._test_root)
output = list(parser.output())
expected = [
os.path.join(self._test_root, ".test.0001-0004,0006.py"),
os.path.join(self._test_root, "test.0001-0004,0006.py"),
os.path.join(self._test_root, "pony.py")
]
self.assertEqual(len(output), 3)
self.assertEqual(list(map(str, output)), expected)
def test_api_calls(self):
"""Seqparse: Test API calls at root of module."""
chunk = FrameChunk(first=1, last=7, step=2, pad=4)
seq = get_sequence(lrange(1, 8, 2), pad=4)
self.assertTrue(isinstance(seq, FrameSequence))
self.assertEqual(str(seq), "0001-0007x2")
expected = FrameChunk(first=2, last=6, step=2, pad=4)
inverted = invert(chunk)
self.assertEqual(str(inverted), str(expected))
inverted = invert(seq)
self.assertEqual(str(inverted), str(expected))
with self.assertRaises(TypeError):
invert(get_parser())
self.assertEqual(get_version(), __version__)
| 35.442857
| 79
| 0.604653
| 2,188
| 17,367
| 4.543876
| 0.102834
| 0.074834
| 0.061557
| 0.027359
| 0.684872
| 0.640113
| 0.594548
| 0.536814
| 0.527962
| 0.519916
| 0
| 0.045518
| 0.259976
| 17,367
| 489
| 80
| 35.515337
| 0.728058
| 0.086313
| 0
| 0.553892
| 0
| 0
| 0.090515
| 0.023377
| 0
| 0
| 0
| 0
| 0.209581
| 1
| 0.032934
| false
| 0.002994
| 0.026946
| 0
| 0.07485
| 0.116766
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc918b97de90432887a40a5ee37a55b41149a19b
| 12,666
|
py
|
Python
|
deliveroo_scraping.py
|
ragreener1/deliveroo-scraping
|
c8e3de2503a6198734904fb937a77dd38ef05581
|
[
"MIT"
] | null | null | null |
deliveroo_scraping.py
|
ragreener1/deliveroo-scraping
|
c8e3de2503a6198734904fb937a77dd38ef05581
|
[
"MIT"
] | null | null | null |
deliveroo_scraping.py
|
ragreener1/deliveroo-scraping
|
c8e3de2503a6198734904fb937a77dd38ef05581
|
[
"MIT"
] | 1
|
2021-03-16T16:43:34.000Z
|
2021-03-16T16:43:34.000Z
|
import urllib.request
import pandas as pd
import sqlite3
import re
from bs4 import BeautifulSoup
# Parameters
postcodes_list = ["W1F7EY"]
db_name = "scraped.db"
# This is so that Deliveroo think the scraper is Google Chrome
# as opposed to a web scraper
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11' +
'(KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*' +
';q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
def process_menu(doc, url, tags_df, tag_type, restaurants, restaurants_to_tags,
menu_sections, menu_items):
# This function processes the menu
# This gets the restaurant_name by finding the <h1> tag with the CSS class
# restaurant_name
restaurant_name = doc.find("h1", class_="restaurant__name", text=True).text
# This gets the deliveroo_name by selecting the appropriate part from the
# URL
# This will fail on restaurants not in London
deliveroo_name = re.findall(
'(?<=https://deliveroo.co.uk/menu/london/)(.*)(?=\\?postcode=)',
url)[0]
# This adds this to the restaurants dataframe
# This isn't very efficient, if you were wanting to scrape large numbers
# you wouldn't want to use .append
restaurants = restaurants.append(
{"name": restaurant_name, "deliveroo_name": deliveroo_name},
ignore_index=True)
# This gets the restaurant_id by finding the index of what as inserted
# Again this isn't very efficient
restaurant_id = restaurants[
(restaurants == [restaurant_name, deliveroo_name]).all(
axis=1)].index[0]
restaurant_tags = []
# Deal with tags
# Start by finding all <small> tags with the CSS class tag
for tag in doc.find_all("small", class_="tag"):
# The second element of the <small> CSS class is the type of the tag
# this could be locale or food etc.
tagtype = tag['class'][1]
# The name of the tag is what is inside the <small>
name = tag.text
# See if the tagtype exists in the tag_type dataframe
type_matches = tag_type[(tag_type == [tagtype]).all(axis=1)]
# If it doesn't
if len(type_matches) == 0:
# Add it (again not very efficient)
tag_type = tag_type.append({"name": tagtype}, ignore_index=True)
# Update the matches
type_matches = tag_type[(tag_type == [tagtype]).all(axis=1)]
# See if the tag already exists in the tags_df dataframe
matches = tags_df[
(tags_df == [name, type_matches.index[0]]).all(axis=1)]
# If it doesn't
if len(matches) == 0:
# Add it
entry = {"name": name, "type": type_matches.index[0]}
tags_df = tags_df.append(entry, ignore_index=True)
matches = tags_df[(tags_df == [name, type_matches.index[0]]).all(
axis=1)]
# Add the tag to a list of tags for that restaurant
restaurant_tags.append(matches.index[0])
# For each tag
for tag in restaurant_tags:
# Add this to restaurants_to_tags df
restaurants_to_tags = restaurants_to_tags.append(
{"restaurant_id": restaurant_id, "tag_id": tag}, ignore_index=True)
# For each category (in the menu, e.g. Sides, Mains, Desserts, Drinks -
# different for every restaurant though!) process the menu items
# This is found by looking for <div> tags with the CSS class
# menu-index-page__menu-category
categories = doc.find_all("div", class_="menu-index-page__menu-category")
for category in categories:
# the category name is inside the h3 inside the div
category_name = category.h3.text
# Add the category to the menu_sections data frame. Again this isn't
# efficient.
menu_sections = menu_sections.append(
{"restaurant_id": restaurant_id, "name": category_name},
ignore_index=True)
# Get the id in the menu_sections data frame
category_id = menu_sections[
(menu_sections == [restaurant_id, category_name]).all(
axis=1)].index[0]
# Get each of the items in that category
category_items = []
# For each menu item. Found by looking for <div> inside the category
# with the CSS class menu-index-page__item_content
items_html = category.find_all("div",
class_="menu-index-page__item-content")
for menu_item in items_html:
# The name is the <h6> with the CSS class
# menu-index-page__item-title
item_name = \
menu_item.find("h6", class_="menu-index-page__item-title").text
# The price is the <span> with the CSS class
# menu-index-page__item-price. The £ symbol is dropped, it is then
# converted to a floating-point number (decimal), multiplied by 100
# so that it is in pence. It is then converted to an integer.
#
# https://stackoverflow.com/questions/3730019/why-not-use-double-or-float-to-represent-currency
price_as_text = \
menu_item.find("span", class_="menu-index-page__item-price")\
.text[1:]
price_as_float = float(price_as_text)
item_price = int(price_as_float * 100)
# If an item is popular it has a <span> with the CSS class
# menu-index-page__item-popular
# So this tries to find it, if it exists is_item_popular = True,
# False otherwise.
is_item_popular = menu_item.find(
"span", class_="menu-index-page__item-popular") is not None
# Add this menu_item to category_items
category_items.append(
{"menu_section_id": category_id,
"name": item_name,
"price_in_pence": item_price,
"is_popular": is_item_popular}
)
# Add all the menu items in that category to the menu_items data frame,
# this is more efficient than doing this one at a time
menu_items = menu_items.append(category_items, ignore_index=True)
# Return the updated dataframes
return (tags_df, tag_type, restaurants, restaurants_to_tags, menu_sections,
menu_items)
def get_restaurant_and_process_menu(url, tags_df, tag_type, restaurants,
restaurants_to_tags, menu_sections,
menu_items, restaurants_to_locs,
postcodes):
# This functions gets the restaurant and then processes its menu if it
# hasn't been processed before
# Get the deliveroo name from the url
deliveroo_name = re.findall(
'(?<=https://deliveroo.co.uk/menu/london/)(.*)(?=\\?postcode=)',
url)[0]
# If this restaurant hasn't been seen before
if deliveroo_name not in restaurants['deliveroo_name']:
# Get the webpage
request = urllib.request.Request(url, headers=hdr)
page = urllib.request.urlopen(request)
soup = BeautifulSoup(page)
# Try and process the menu, if it doesn't work handle it nicely
try:
(tags_df, tag_type, restaurants, restaurants_to_tags,
menu_sections, menu_items) = process_menu(soup, url, tags_df,
tag_type, restaurants,
restaurants_to_tags,
menu_sections,
menu_items)
except Exception:
print(f"Fail on {url}")
# Get the postcode from the URL
postcode = re.findall('(?<=\\?postcode=)(.)*', url)[0]
# Find where it is in the postcodes data frame
postcodes_index = (postcodes['post_code'] == postcode).index[0]
# Find the restaurants id in the restaurants dataframe using the deliveroo
# name
restaurant_index = \
(restaurants['deliveroo_name'] == deliveroo_name).index[0]
# Add an entry to restaurants_to_locs saying that this restaurant is
# available at this location
restaurants_to_locs = restaurants_to_locs.append(
{"restaurant_id": restaurant_index, "loc_id": postcodes_index},
ignore_index=True)
# Return the amended dataframes
return (tags_df, tag_type, restaurants, restaurants_to_tags, menu_sections,
menu_items, restaurants_to_locs)
def process_restaurants_for_postcode(postcode, tags_df, tag_type, restaurants,
restaurants_to_tags, menu_sections,
menu_items, restaurants_to_locs,
postcodes):
# This function processes the restaurants for the postcodes
# Add the postcode to the URL - it doesn't matter that it says camden, it
# will update as appropriate.
url = "https://deliveroo.co.uk/restaurants/london/camden" \
f"?postcode={postcode}&sort=time"
# Create the HTTP request
request = urllib.request.Request(url, headers=hdr)
# Get the page
page = urllib.request.urlopen(request)
soup = BeautifulSoup(page)
# For every link in the page
for i, link in enumerate(soup.find_all("a")):
print(i)
# Get the destination of the link
destination = link.get("href")
# If it's to a menu, get the restaurant and process the menu
if "/menu" in destination:
(tags_df, tag_type, restaurants, restaurants_to_tags,
menu_sections, menu_items, restaurants_to_locs) = \
get_restaurant_and_process_menu(
"https://deliveroo.co.uk" + destination, tags_df, tag_type,
restaurants, restaurants_to_tags, menu_sections,
menu_items, restaurants_to_locs, postcodes)
# Return the amended dataframes
return (tags_df, tag_type, restaurants, restaurants_to_tags, menu_sections,
menu_items, restaurants_to_locs)
def process_all_restaurants(postcodes, db_name):
# This function processes all of the postcodes
# Create the dataframes
tags_df = pd.DataFrame({"name": [], "type": []})\
.astype({"name": "str", "type": "int32"})
tag_type = pd.DataFrame({"name": []})
restaurants = pd.DataFrame({"name": [], "deliveroo_name": []})\
.astype({"name": "str", "deliveroo_name": "str"})
restaurants_to_tags = pd.DataFrame({"restaurant_id": [], "tag_id": []})\
.astype({"restaurant_id": "int64", "tag_id": "int64"})
menu_sections = pd.DataFrame({"restaurant_id": [], "name": []})\
.astype({"restaurant_id": "int64", "name": "str"})
menu_items = pd.DataFrame(
{"menu_section_id": [],
"name": [],
"price_in_pence": [],
"is_popular": []}).astype(
{"menu_section_id": "int64",
"name": "str",
"price_in_pence": "int64",
"is_popular": "bool"})
restaurants_to_locs = pd.DataFrame({"restaurant_id": [], "loc_id": []})\
.astype({"restaurant_id": "int64", "loc_id": "int64"})
for post_code in postcodes['post_code']:
(tags_df, tag_type, restaurants, restaurants_to_tags, menu_sections,
menu_items, restaurants_to_locs) =\
process_restaurants_for_postcode(post_code, tags_df, tag_type,
restaurants, restaurants_to_tags,
menu_sections, menu_items,
restaurants_to_locs, postcodes)
# Write to db
cnx = sqlite3.connect(db_name)
postcodes.to_sql("POSTCODES", cnx, index_label="id")
restaurants.to_sql("RESTAURANTS", cnx, index_label="id")
restaurants_to_locs.to_sql("RESTAURANTS_AVAILABLE", cnx, index_label="id")
menu_items.to_sql("MENU_ITEMS", cnx, index_label="id")
menu_sections.to_sql("MENU_SECTIONS", cnx, index_label="id")
tags_df.to_sql("CATEGORIES", cnx, index_label="id")
tag_type.to_sql("CATEGORY_TYPES", cnx, index_label="id")
restaurants_to_tags.to_sql("RESTAURANT_CATEGORIES", cnx, index_label="id")
cnx.close()
if __name__ == "__main__":
postcodes_df = pd.DataFrame({
'post_code': postcodes_list
})
process_all_restaurants(postcodes_df, db_name)
| 41.527869
| 107
| 0.614243
| 1,611
| 12,666
| 4.617629
| 0.175667
| 0.054174
| 0.038849
| 0.020971
| 0.352063
| 0.282162
| 0.252857
| 0.230811
| 0.208227
| 0.184299
| 0
| 0.011449
| 0.282804
| 12,666
| 304
| 108
| 41.664474
| 0.807354
| 0.265435
| 0
| 0.2
| 0
| 0.005882
| 0.15116
| 0.034917
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023529
| false
| 0
| 0.029412
| 0
| 0.070588
| 0.011765
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc91ff3844434233ea3b10e4ec6bd4eb042adfbd
| 2,455
|
py
|
Python
|
DataWrangling/TTNData2Gsheet_Auto.py
|
diliprk/SmartCityVisualization
|
618cd433c2f6bb55042c643ccaef12b5814ccb77
|
[
"MIT"
] | null | null | null |
DataWrangling/TTNData2Gsheet_Auto.py
|
diliprk/SmartCityVisualization
|
618cd433c2f6bb55042c643ccaef12b5814ccb77
|
[
"MIT"
] | null | null | null |
DataWrangling/TTNData2Gsheet_Auto.py
|
diliprk/SmartCityVisualization
|
618cd433c2f6bb55042c643ccaef12b5814ccb77
|
[
"MIT"
] | null | null | null |
#### Reading Data from The Things Network Data and Automatically Storing it to a Google Spreadsheet
# Author: Dilip Rajkumar
# Email: d.rajkumar@hbksaar.de
# Date: 19/01/2018
# Revision: version#1
# License: MIT License
import pandas as pd
import requests
from df2gspread import df2gspread as d2g
import time
## Set Initial Time Duration in mins to query TTN Data:
time_duration = 5
# Insert spreadsheet file id of Google Spreadsheet
spreadsheet = '1ftXlebCTDp5tTxvlm5K3Sv1oNttDHR7s1xTi-i-ZR_o' ## Google SpreadSheet Title: TTN_Live_DataLogger
# Insert Sheet Name
wks_name = 'Sheet1'
def queryttndata(time_duration):
'''
This function queries data from TTN Swagger API based on a time duration which is given as an input
'''
headers = {'Accept': 'application/json','Authorization': 'key ttn-account-v2.P4kRaEqenNGbIdFSgSLDJGMav5K9YrekkMm_F1lOVrw'}
## Set query duration in minutes
querytime = str(time_duration) + 'm'
params = (('last', querytime),)
response = requests.get('https://vehiclecounter.data.thethingsnetwork.org/api/v2/query', headers=headers, params=params).json()
df_raw = pd.DataFrame.from_dict(response)
return df_raw
def cleandf(df):
'''
In this function we pass as input the raw dataframe from TTN in JSON format to clean and optimize the data.
This function is customized and unique to every dataset
'''
df.rename(columns={'time': 'TTNTimeStamp'}, inplace=True)
df['TTNTimeStamp'] = pd.to_datetime(df['TTNTimeStamp'])
df['TTNTimeStamp'] = df['TTNTimeStamp'] + pd.Timedelta(hours=1) ## Offset Time by 1 hour to fix TimeZone Error of Swagger API TimeStamps
df['TTNTimeStamp'] = df['TTNTimeStamp'].values.astype('datetime64[s]')
drop_cols = ['raw','device_id']
df = df.drop(drop_cols, 1)
df.reset_index()
df = df.reindex(['TTNTimeStamp','Count'], axis=1)
print("Latest Data:")
print(df.tail(1),'\n')
return df
while True:
#begin your infinite loop
df_raw = queryttndata(time_duration)
df_clean = cleandf(df_raw)
d2g.upload(df_clean, spreadsheet,wks_name,col_names=True,clean=True) # Write dataframe to Google Spreadsheet
df_clean.to_csv('TTN_VehicleCountData.csv', date_format="%d/%m/%Y %H:%M:%S",index=True) # Save DataFrame locally
time.sleep(60) # Call function every 60 seconds
time_duration += 1 ## Increment query duration by 1 mins at the end of every function call
| 41.610169
| 140
| 0.714053
| 341
| 2,455
| 5.058651
| 0.483871
| 0.048696
| 0.027826
| 0.048696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020287
| 0.176782
| 2,455
| 59
| 141
| 41.610169
| 0.833251
| 0.375967
| 0
| 0
| 0
| 0
| 0.271117
| 0.085831
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.121212
| 0
| 0.242424
| 0.060606
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc92c4f874ac07a82bec9855ab06be0ea305a134
| 4,323
|
py
|
Python
|
alleycat/reactive/property.py
|
mysticfall/alleycat-reactive
|
69ff2f283627a6c613b084677be707234b29164c
|
[
"MIT"
] | 14
|
2020-07-13T08:15:27.000Z
|
2021-02-17T21:22:22.000Z
|
alleycat/reactive/property.py
|
mysticfall/alleycat-reactive
|
69ff2f283627a6c613b084677be707234b29164c
|
[
"MIT"
] | 4
|
2020-08-18T18:50:00.000Z
|
2021-12-04T07:09:12.000Z
|
alleycat/reactive/property.py
|
mysticfall/alleycat-reactive
|
69ff2f283627a6c613b084677be707234b29164c
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import TypeVar, Generic, Callable, Optional, Any, cast, Tuple
import rx
from returns import pipeline
from returns.functions import identity
from returns.maybe import Maybe, Nothing
from rx import Observable
from rx.subject import BehaviorSubject
from . import ReactiveValue, ReactiveView
from .value import Modifier
T = TypeVar("T")
class ReactiveProperty(Generic[T], ReactiveValue[T]):
def __init__(
self,
init_value: Maybe[T] = Nothing,
read_only=False,
modifier: Callable[[Any], Modifier] = lambda _: identity,
validator: Callable[[Any, T], T] = lambda _, v: v) -> None:
super().__init__(read_only)
self._init_value = init_value
self._modifier = modifier
self._validator = validator
@property
def init_value(self) -> Maybe[T]:
return self._init_value
@property
def validator(self) -> Callable[[T, Any], T]:
return self._validator
@property
def modifier(self) -> Callable[[Any], Modifier]:
return self._modifier
def as_view(self) -> ReactiveView[T]:
return ReactiveView(self.context, self.read_only)
def pipe(self, modifiers: Callable[[Any], Tuple[Modifier, ...]]) -> ReactiveProperty:
def stack(obj: Any):
# FIXME: Not sure why both PyCharm and Mypy fails to resolve pipeline.pipe(). Should investigate later.
# noinspection PyUnresolvedReferences
return pipeline.pipe(*([self.modifier(obj)] + list(modifiers(obj)))) # type:ignore
return ReactiveProperty(self.init_value, self.read_only, stack, self.validator)
def validate(self, validator: Callable[[Any, T], T]) -> ReactiveProperty[T]:
if validator is None:
raise ValueError("Argument 'modifier' is required.")
def validate(obj: Any, v: T) -> T:
return validator(obj, self.validator(obj, v))
return ReactiveProperty(self.init_value, self.read_only, self.modifier, validate)
class PropertyData(ReactiveValue.Data[T]):
def __init__(
self,
name: str,
init_value: Maybe[T],
modifier: Modifier,
validator: Callable[[T], T]):
assert name is not None
assert init_value is not None
assert modifier is not None
assert validator is not None
self._validator = validator
self._property: Optional[BehaviorSubject] = None
obs: Observable
if init_value != Nothing:
self._property = BehaviorSubject(init_value.map(validator).unwrap())
obs = self._property
else:
obs = rx.empty()
super().__init__(name, obs, modifier)
# Must override to appease Mypy... I hate Python.
@property
def value(self) -> T:
return super().value
@value.setter
def value(self, value: T):
self._check_disposed()
if self.initialized:
assert self._property is not None
self._property.on_next(self.validator(value))
else:
self._property = BehaviorSubject(self.validator(value))
self.observable = self._property
@property
def validator(self) -> Callable[[T], T]:
return self._validator
def dispose(self) -> None:
assert self._property is not None
self._check_disposed()
self._property.on_completed()
super().dispose()
def _create_data(self, obj: Any) -> PropertyData:
assert obj is not None
assert self.name is not None
def validate(v: T) -> T:
return self.validator(obj, v)
return self.PropertyData(self.name, self.init_value, self.modifier(obj), validate)
def _get_data(self, obj: Any) -> PropertyData:
assert obj is not None
return cast(ReactiveProperty.PropertyData, super()._get_data(obj))
def _set_value(self, obj: Any, data: ReactiveValue.Data, value: Any) -> None:
assert obj is not None
assert isinstance(data, ReactiveProperty.PropertyData)
data.value = value
| 30.443662
| 115
| 0.609762
| 483
| 4,323
| 5.310559
| 0.213251
| 0.042105
| 0.035088
| 0.02924
| 0.176218
| 0.132554
| 0.095127
| 0.070955
| 0.034308
| 0.034308
| 0
| 0
| 0.294471
| 4,323
| 141
| 116
| 30.659574
| 0.840984
| 0.04557
| 0
| 0.229167
| 0
| 0
| 0.008008
| 0
| 0
| 0
| 0
| 0.007092
| 0.114583
| 1
| 0.1875
| false
| 0
| 0.104167
| 0.09375
| 0.447917
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc9452d9c2ee3ebb7757e04a3373872166150cb1
| 3,700
|
py
|
Python
|
utils/mask/converter.py
|
csgcmai/cvat
|
074500de7bf638fdf66f3874b80df9e87d58a746
|
[
"MIT"
] | 4
|
2019-01-12T07:32:48.000Z
|
2019-08-01T12:11:33.000Z
|
utils/mask/converter.py
|
csgcmai/cvat
|
074500de7bf638fdf66f3874b80df9e87d58a746
|
[
"MIT"
] | 12
|
2019-08-06T02:45:31.000Z
|
2022-02-10T00:16:32.000Z
|
utils/mask/converter.py
|
csgcmai/cvat
|
074500de7bf638fdf66f3874b80df9e87d58a746
|
[
"MIT"
] | 11
|
2018-11-04T19:04:59.000Z
|
2018-12-02T13:30:22.000Z
|
#!/usr/bin/env python
#
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
from __future__ import absolute_import, division, print_function
import argparse
import os
import glog as log
import numpy as np
import cv2
from lxml import etree
from tqdm import tqdm
def parse_args():
"""Parse arguments of command line"""
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@',
description='Convert CVAT XML annotations to masks'
)
parser.add_argument(
'--cvat-xml', metavar='FILE', required=True,
help='input file with CVAT annotation in xml format'
)
parser.add_argument(
'--background-color', metavar='COLOR_BGR', default="0,0,0",
help='specify background color (by default: 0,0,0)'
)
parser.add_argument(
'--label-color', metavar='LABEL:COLOR_BGR', action='append',
default=[],
help="specify a label's color (e.g. 255 or 255,0,0). The color will " +
"be interpreted in accordance with the mask format."
)
parser.add_argument(
'--mask-bitness', type=int, choices=[8, 24], default=8,
help='choose bitness for masks'
)
parser.add_argument(
'--output-dir', metavar='DIRECTORY', required=True,
help='directory for output masks'
)
return parser.parse_args()
def parse_anno_file(cvat_xml):
root = etree.parse(cvat_xml).getroot()
anno = []
for image_tag in root.iter('image'):
image = {}
for key, value in image_tag.items():
image[key] = value
image['shapes'] = []
for poly_tag in image_tag.iter('polygon'):
polygon = {'type': 'polygon'}
for key, value in poly_tag.items():
polygon[key] = value
image['shapes'].append(polygon)
for box_tag in image_tag.iter('box'):
box = {'type': 'box'}
for key, value in box_tag.items():
box[key] = value
box['points'] = "{0},{1};{2},{1};{2},{3};{0},{3}".format(
box['xtl'], box['ytl'], box['xbr'], box['ybr'])
image['shapes'].append(box)
image['shapes'].sort(key=lambda x: int(x.get('z_order', 0)))
anno.append(image)
return anno
def create_mask_file(mask_path, width, height, bitness, color_map, background, shapes):
mask = np.zeros((height, width, bitness // 8), dtype=np.uint8)
for shape in shapes:
color = color_map.get(shape['label'], background)
points = [tuple(map(float, p.split(','))) for p in shape['points'].split(';')]
points = np.array([(int(p[0]), int(p[1])) for p in points])
mask = cv2.fillPoly(mask, [points], color=color)
cv2.imwrite(mask_path, mask)
def to_scalar(str, dim):
scalar = list(map(int, str.split(',')))
if len(scalar) < dim:
scalar.extend([scalar[-1]] * dim)
return tuple(scalar[0:dim])
def main():
args = parse_args()
anno = parse_anno_file(args.cvat_xml)
color_map = {}
dim = args.mask_bitness // 8
for item in args.label_color:
label, color = item.split(':')
color_map[label] = to_scalar(color, dim)
background = to_scalar(args.background_color, dim)
for image in tqdm(anno, desc='Generate masks'):
mask_path = os.path.join(args.output_dir, os.path.splitext(image['name'])[0] + '.png')
mask_dir = os.path.dirname(mask_path)
if mask_dir:
os.makedirs(mask_dir, exist_ok=True)
create_mask_file(mask_path, int(image['width']), int(image['height']),
args.mask_bitness, color_map, background, image['shapes'])
if __name__ == "__main__":
main()
| 31.355932
| 94
| 0.603784
| 495
| 3,700
| 4.375758
| 0.319192
| 0.022161
| 0.039243
| 0.018006
| 0.036011
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015048
| 0.245676
| 3,700
| 117
| 95
| 31.623932
| 0.761018
| 0.031892
| 0
| 0.055556
| 0
| 0.011111
| 0.162094
| 0.008679
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.088889
| 0
| 0.177778
| 0.011111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc95ea517dd66fa33c7319705ecac90ffc97a9fe
| 2,099
|
py
|
Python
|
examples/plot_afq_callosal.py
|
gkiar/pyAFQ
|
fb6985c2a9715a378e1ca94dc89f6bc966c60ab5
|
[
"BSD-2-Clause"
] | null | null | null |
examples/plot_afq_callosal.py
|
gkiar/pyAFQ
|
fb6985c2a9715a378e1ca94dc89f6bc966c60ab5
|
[
"BSD-2-Clause"
] | null | null | null |
examples/plot_afq_callosal.py
|
gkiar/pyAFQ
|
fb6985c2a9715a378e1ca94dc89f6bc966c60ab5
|
[
"BSD-2-Clause"
] | null | null | null |
"""
==========================
Callosal bundles using AFQ API
==========================
An example using the AFQ API to find callosal bundles using the templates from:
http://hdl.handle.net/1773/34926
"""
import os.path as op
import plotly
from AFQ import api
from AFQ.mask import RoiMask
import AFQ.data as afd
##########################################################################
# Get some example data
# ---------------------
#
# Retrieves `Stanford HARDI dataset <https://purl.stanford.edu/ng782rw8378>`_.
#
afd.organize_stanford_data(clear_previous_afq=True)
##########################################################################
# Set tractography parameters (optional)
# ---------------------
# We make this tracking_params which we will pass to the AFQ object
# which specifies that we want 100,000 seeds randomly distributed
# in the ROIs of every bundle.
#
# We only do this to make this example faster and consume less space.
tracking_params = dict(seed_mask=RoiMask(),
n_seeds=10000,
random_seeds=True,
rng_seed=42)
##########################################################################
# Initialize an AFQ object:
# -------------------------
#
# We specify bundle_info as the default bundles list (api.BUNDLES) plus the
# callosal bundle list. This tells the AFQ object to use bundles from both
# the standard and callosal templates.
myafq = api.AFQ(bids_path=op.join(afd.afq_home,
'stanford_hardi'),
dmriprep='vistasoft',
bundle_info=api.BUNDLES + api.CALLOSUM_BUNDLES,
tracking_params=tracking_params)
##########################################################################
# Visualizing bundles and tract profiles:
# ---------------------------------------
# This would run the script and visualize the bundles using the plotly
# interactive visualization, which should automatically open in a
# new browser window.
bundle_html = myafq.viz_bundles(export=True, n_points=50)
plotly.io.show(bundle_html[0])
| 34.409836
| 79
| 0.552644
| 232
| 2,099
| 4.900862
| 0.530172
| 0.049252
| 0.03518
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018486
| 0.175322
| 2,099
| 60
| 80
| 34.983333
| 0.638359
| 0.512625
| 0
| 0
| 0
| 0
| 0.032999
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.294118
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc991b843e076978c3f0a3ec13e52528bd5dcb1e
| 2,157
|
py
|
Python
|
parsy-backend/flaskApp/assignment/views.py
|
dstambler17/Parsy.io
|
14c4905809f79f191efbbbdfbd0e8d9e838478e7
|
[
"MIT"
] | null | null | null |
parsy-backend/flaskApp/assignment/views.py
|
dstambler17/Parsy.io
|
14c4905809f79f191efbbbdfbd0e8d9e838478e7
|
[
"MIT"
] | null | null | null |
parsy-backend/flaskApp/assignment/views.py
|
dstambler17/Parsy.io
|
14c4905809f79f191efbbbdfbd0e8d9e838478e7
|
[
"MIT"
] | null | null | null |
import sys
from flask import Blueprint, request, jsonify
from flaskApp import db
from flaskApp.assignment.utils import *
from flaskApp.error.error_handlers import *
import json
from flaskApp.helpers import getAssignmentData
assignment = Blueprint('assignment', __name__)
@assignment.route('/restoreAssignment/<calID>/<courseID>', methods=['POST'])
def restore_assignment(calID, courseID):
try:
DbAssignmentUtils.restore_all_original_assignment(calID, courseID)
return jsonify({"restore" : "success"}), 201
except (NotFound) as e:
return jsonify(e.body), e.status_code
@assignment.route('/getAssignment/<calID>/<courseID>/<assignment>', methods=['GET'])
def get_assignment_details(calID, courseID, assignment):
try:
res = DbAssignmentUtils.get_assignment_slot_details(calID, courseID, assignment)
return jsonify(res), 200
except (NotFound) as e:
return jsonify(e.body), e.status_code
@assignment.route('/deleteAssignment/<calID>/<courseID>', methods=['DELETE'])
def delete_assignment(calID, courseID):
try:
request_body = json.loads(request.get_data())
DbAssignmentUtils.delete_assignment_slot(calID, courseID, request_body)
return jsonify({}), 204
except (NotFound, BadRequest) as e:
return jsonify(e.body), e.status_code
@assignment.route('/addAssignment/<calID>/<courseID>', methods=['POST'])
def add_assignment(calID, courseID):
try:
request_body = json.loads(request.get_data())
res = DbAssignmentUtils.add_Assignment_slot(calID, courseID, request_body)
return jsonify(res), 201
except (NotFound, BadRequest, ValidationFailed) as e:
return jsonify(e.body), e.status_code
'''Test method, keep just in case. Will prob be moved to seperate API designed to
interact with just the MySQL database that the data pipeline will drop stuff into'''
@assignment.route('/getAssignmentTest/<courseID>', methods=['GET'])
def get_session_assignment(courseID):
try:
result = getAssignmentData(courseID)
return jsonify(result)
except (NotFound) as e:
return jsonify(e.body), e.status_code
| 39.944444
| 88
| 0.72369
| 257
| 2,157
| 5.945525
| 0.315175
| 0.102094
| 0.02945
| 0.052356
| 0.342277
| 0.306937
| 0.306937
| 0.306937
| 0.240183
| 0.219241
| 0
| 0.006637
| 0.161799
| 2,157
| 53
| 89
| 40.698113
| 0.838496
| 0
| 0
| 0.333333
| 0
| 0
| 0.113009
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.155556
| 0
| 0.488889
| 0.044444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc998d9236fe5233cce504679e6058e6b30ca879
| 1,411
|
py
|
Python
|
Apache Spark with Python - Big Data with PySpark and Spark/6-PairRDD/filter/AirportsNotInUsa.py
|
jrderek/Big_Data_Engineering_Portfolio
|
bf7a5efb24f2c6e860e5ead544dadc08f791814e
|
[
"MIT"
] | null | null | null |
Apache Spark with Python - Big Data with PySpark and Spark/6-PairRDD/filter/AirportsNotInUsa.py
|
jrderek/Big_Data_Engineering_Portfolio
|
bf7a5efb24f2c6e860e5ead544dadc08f791814e
|
[
"MIT"
] | null | null | null |
Apache Spark with Python - Big Data with PySpark and Spark/6-PairRDD/filter/AirportsNotInUsa.py
|
jrderek/Big_Data_Engineering_Portfolio
|
bf7a5efb24f2c6e860e5ead544dadc08f791814e
|
[
"MIT"
] | null | null | null |
import sys
sys.path.insert(0, '.')
from pyspark import SparkContext, SparkConf
from commons.Utils import Utils
if __name__ == "__main__":
'''
Create a Spark program to read the airport data from in/airports.text;
generate a pair RDD with airport name being the key and country name being the value.
Then remove all the airports which are located in United States and output the pair RDD to out/airports_not_in_usa_pair_rdd.text
Each row of the input file contains the following columns:
Airport ID, Name of airport, Main city served by airport, Country where airport is located,
IATA/FAA code, ICAO Code, Latitude, Longitude, Altitude, Timezone, DST, Timezone in Olson format
Sample output:
("Kamloops", "Canada")
("Wewak Intl", "Papua New Guinea")
...
'''
conf = SparkConf().setAppName("airports").setMaster("local[*]")
sc = SparkContext(conf=conf)
airportsRDD = sc.textFile("inputs/airports.text")
airportPairRDD = airportsRDD.map(lambda line:
(Utils.COMMA_DELIMITER.split(line)[1],
Utils.COMMA_DELIMITER.split(line)[3]))
airportsNotInUSA = airportPairRDD.filter(
lambda keyValue: keyValue[1] != "\"United States\"")
airportsNotInUSA.saveAsTextFile(
"outputs/airports_not_in_usa_pair_rdd.text")
| 35.275
| 133
| 0.659816
| 173
| 1,411
| 5.265896
| 0.589595
| 0.030735
| 0.026345
| 0.035126
| 0.120746
| 0.059276
| 0.059276
| 0
| 0
| 0
| 0
| 0.003777
| 0.249468
| 1,411
| 39
| 134
| 36.179487
| 0.856468
| 0
| 0
| 0
| 0
| 0
| 0.123229
| 0.058074
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc99f86b822d06474cc1888107ef3e865f27a1cd
| 16,668
|
py
|
Python
|
linux/keyman-config/keyman_config/keyboard_details.py
|
srl295/keyman
|
4dfd0f71f3f4ccf81d1badbd824900deee1bb6d1
|
[
"MIT"
] | null | null | null |
linux/keyman-config/keyman_config/keyboard_details.py
|
srl295/keyman
|
4dfd0f71f3f4ccf81d1badbd824900deee1bb6d1
|
[
"MIT"
] | null | null | null |
linux/keyman-config/keyman_config/keyboard_details.py
|
srl295/keyman
|
4dfd0f71f3f4ccf81d1badbd824900deee1bb6d1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# Keyboard details window
import logging
import json
from os import path
import qrcode
import tempfile
import gi
from gi.repository import Gtk
from keyman_config import KeymanComUrl, _, secure_lookup
from keyman_config.accelerators import init_accel
from keyman_config.kmpmetadata import parsemetadata
gi.require_version('Gtk', '3.0')
# basics: keyboard name, package version, description
# other things: filename (of kmx), ,
# OSK availability, documentation availability, package copyright
# also: supported languages, fonts
# from kmx?: keyboard version, encoding, layout type
# there is data in kmp.inf/kmp.json
# there is possibly data in kbid.json (downloaded from api)
class KeyboardDetailsView(Gtk.Dialog):
# TODO Display all the information that is available
# especially what is displayed for Keyman on Windows
# TODO clean up file once have what we want
def __init__(self, parent, kmp):
# kmp has name, version, packageID, area
if "keyboard" in kmp["name"].lower():
wintitle = kmp["name"]
else:
wintitle = _("{name} keyboard").format(name=kmp["name"])
Gtk.Dialog.__init__(self, wintitle, parent)
init_accel(self)
self.set_border_width(6)
packageDir = path.join(kmp['areapath'], kmp['packageID'])
kmp_json = path.join(packageDir, "kmp.json")
info, system, options, keyboards, files = parsemetadata(kmp_json)
if info is None:
# Dialog when invalid metadata
self.add_button(_("_Close"), Gtk.ResponseType.CLOSE)
grid = Gtk.Grid()
self.get_content_area().pack_start(grid, True, True, 12)
lbl_invalid_metadata = Gtk.Label()
lbl_invalid_metadata.set_text(_("ERROR: Keyboard metadata is damaged.\nPlease \"Uninstall\" and then \"Install\" the keyboard."))
lbl_invalid_metadata.set_halign(Gtk.Align.END)
grid.add(lbl_invalid_metadata)
self.resize(700, 200)
self.show_all()
return
kbdata = None
jsonfile = path.join(packageDir, kmp['packageID'] + ".json")
if path.isfile(jsonfile):
try:
with open(jsonfile, "r") as read_file:
kbdata = json.load(read_file)
except Exception as e:
logging.warning('Exception %s reading %s %s', type(e), jsonfile, e.args)
grid = Gtk.Grid()
# grid.set_column_homogeneous(True)
# kbdatapath = path.join("/usr/local/share/keyman", kmp["id"], kmp["id"] + ".json")
# Package info
lbl_pkg_name = Gtk.Label()
lbl_pkg_name.set_text(_("Package name: "))
lbl_pkg_name.set_halign(Gtk.Align.END)
grid.add(lbl_pkg_name)
prevlabel = lbl_pkg_name
label = Gtk.Label()
if secure_lookup(info, 'name', 'description'):
label.set_text(secure_lookup(info, 'name', 'description'))
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_pkg_name, Gtk.PositionType.RIGHT, 1, 1)
lbl_pkg_id = Gtk.Label()
lbl_pkg_id.set_text(_("Package id: "))
lbl_pkg_id.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pkg_id, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_pkg_id
label = Gtk.Label()
if secure_lookup(kmp, 'packageID'):
label.set_text(kmp['packageID'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_pkg_id, Gtk.PositionType.RIGHT, 1, 1)
lbl_pkg_vrs = Gtk.Label()
lbl_pkg_vrs.set_text(_("Package version: "))
lbl_pkg_vrs.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pkg_vrs, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_pkg_vrs
label = Gtk.Label()
if secure_lookup(info, 'version', 'description'):
label.set_text(secure_lookup(info, 'version', 'description'))
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_pkg_vrs, Gtk.PositionType.RIGHT, 1, 1)
if secure_lookup(kbdata, 'description'):
lbl_pkg_desc = Gtk.Label()
lbl_pkg_desc.set_text(_("Package description: "))
lbl_pkg_desc.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pkg_desc, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_pkg_desc
label = Gtk.Label()
label.set_text(kbdata['description'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
label.set_line_wrap(80)
grid.attach_next_to(label, lbl_pkg_desc, Gtk.PositionType.RIGHT, 1, 1)
if secure_lookup(info, "author"):
lbl_pkg_auth = Gtk.Label()
lbl_pkg_auth.set_text(_("Package author: "))
lbl_pkg_auth.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pkg_auth, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_pkg_auth
label = Gtk.Label()
if secure_lookup(info, 'author', 'description'):
label.set_text(secure_lookup(info, 'author', 'description'))
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_pkg_auth, Gtk.PositionType.RIGHT, 1, 1)
if secure_lookup(info, "copyright"):
lbl_pkg_cpy = Gtk.Label()
lbl_pkg_cpy.set_text(_("Package copyright: "))
lbl_pkg_cpy.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pkg_cpy, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_pkg_cpy
label = Gtk.Label()
if secure_lookup(info, 'copyright', 'description'):
label.set_text(secure_lookup(info, 'copyright', 'description'))
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_pkg_cpy, Gtk.PositionType.RIGHT, 1, 1)
# Padding and full width horizontal divider
lbl_pad = Gtk.Label()
lbl_pad.set_text("")
lbl_pad.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pad, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
prevlabel = lbl_pad
divider_pkg = Gtk.HSeparator()
grid.attach_next_to(divider_pkg, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
prevlabel = divider_pkg
# Keyboard info for each keyboard
if keyboards:
for kbd in keyboards:
kbdata = None
jsonfile = path.join(packageDir, kbd['id'] + ".json")
if path.isfile(jsonfile):
try:
with open(jsonfile, "r") as read_file:
kbdata = json.load(read_file)
except Exception as e:
logging.warning('Exception %s reading %s %s', type(e), jsonfile, e.args)
# start with padding
lbl_pad = Gtk.Label()
lbl_pad.set_text("")
lbl_pad.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pad, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
prevlabel = lbl_pad
# show the icon somewhere
lbl_kbd_file = Gtk.Label()
lbl_kbd_file.set_text(_("Keyboard filename: "))
lbl_kbd_file.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_file, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_file
label = Gtk.Label()
label.set_text(path.join(packageDir, kbd['id'] + ".kmx"))
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_kbd_file, Gtk.PositionType.RIGHT, 1, 1)
if kbdata and secure_lookup(kbdata, 'id') != secure_lookup(kmp, 'packageID'):
lbl_kbd_name = Gtk.Label()
lbl_kbd_name.set_text(_("Keyboard name: "))
lbl_kbd_name.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_name, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_name
label = Gtk.Label()
if secure_lookup(kbdata, 'name'):
label.set_text(kbdata['name'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_kbd_name, Gtk.PositionType.RIGHT, 1, 1)
lbl_kbd_id = Gtk.Label()
lbl_kbd_id.set_text(_("Keyboard id: "))
lbl_kbd_id.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_id, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_id
label = Gtk.Label()
if secure_lookup(kbdata, 'id'):
label.set_text(kbdata['id'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_kbd_id, Gtk.PositionType.RIGHT, 1, 1)
lbl_kbd_vrs = Gtk.Label()
lbl_kbd_vrs.set_text(_("Keyboard version: "))
lbl_kbd_vrs.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_vrs, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_vrs
label = Gtk.Label()
if secure_lookup(kbdata, 'version'):
label.set_text(kbdata['version'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_kbd_vrs, Gtk.PositionType.RIGHT, 1, 1)
if secure_lookup(info, "author"):
lbl_kbd_auth = Gtk.Label()
lbl_kbd_auth.set_text(_("Keyboard author: "))
lbl_kbd_auth.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_auth, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_auth
label = Gtk.Label()
if secure_lookup(kbdata, 'authorName'):
label.set_text(kbdata['authorName'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_kbd_auth, Gtk.PositionType.RIGHT, 1, 1)
lbl_kbd_lic = Gtk.Label()
lbl_kbd_lic.set_text(_("Keyboard license: "))
lbl_kbd_lic.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_lic, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_lic
label = Gtk.Label()
if secure_lookup(kbdata, 'license'):
label.set_text(kbdata['license'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_kbd_lic, Gtk.PositionType.RIGHT, 1, 1)
lbl_kbd_desc = Gtk.Label()
lbl_kbd_desc.set_text(_("Keyboard description: "))
lbl_kbd_desc.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_desc, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_desc
label = Gtk.Label()
if secure_lookup(kbdata, 'description'):
label.set_text(kbdata['description'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
label.set_line_wrap(80)
grid.attach_next_to(label, lbl_kbd_desc, Gtk.PositionType.RIGHT, 1, 1)
# Padding and full width horizontal divider
lbl_pad = Gtk.Label()
lbl_pad.set_text("")
lbl_pad.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pad, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
prevlabel = lbl_pad
divider_pkg = Gtk.HSeparator()
grid.attach_next_to(divider_pkg, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
# label7 = Gtk.Label()
# label7.set_text(_("On Screen Keyboard: "))
# label7.set_halign(Gtk.Align.END)
# grid.attach_next_to(label7, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
# prevlabel = label7
# # label = Gtk.Label()
# # label.set_text(secure_lookup(info, 'version', 'description'))
# # label.set_halign(Gtk.Align.START)
# # label.set_selectable(True)
# # grid.attach_next_to(label, label7, Gtk.PositionType.RIGHT, 1, 1)
# label8 = Gtk.Label()
# label8.set_text(_("Documentation: "))
# label8.set_halign(Gtk.Align.END)
# grid.attach_next_to(label8, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
# prevlabel = label8
# #TODO need to know which area keyboard is installed in to show this
# # label = Gtk.Label()
# # welcome_file = path.join("/usr/local/share/doc/keyman", kmp["id"], "welcome.htm")
# # if path.isfile(welcome_file):
# # label.set_text(_("Installed"))
# # else:
# # label.set_text(_("Not installed"))
# # label.set_halign(Gtk.Align.START)
# # label.set_selectable(True)
# # grid.attach_next_to(label, label8, Gtk.PositionType.RIGHT, 1, 1)
# label9 = Gtk.Label()
# # stored in kmx
# label9.set_text(_("Message: "))
# label9.set_halign(Gtk.Align.END)
# grid.attach_next_to(label9, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
# prevlabel = label9
# label = Gtk.Label()
# label.set_line_wrap(True)
# label.set_text(
# "This keyboard is distributed under the MIT license (MIT) as described somewhere")
# #label.set_text(kmp["description"])
# label.set_halign(Gtk.Align.START)
# label.set_selectable(True)
# grid.attach_next_to(label, label9, Gtk.PositionType.RIGHT, 1, 1)
# Add an entire row of padding
lbl_pad = Gtk.Label()
lbl_pad.set_text("")
lbl_pad.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pad, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
prevlabel = lbl_pad
# If it doesn't exist, generate QR code to share keyboard package
path_qr = path.join(tempfile.gettempdir(), kmp['packageID'] + '_qrcode.png')
url = KeymanComUrl + "/go/keyboard/" + kmp['packageID'] + "/share"
if not path.isfile(path_qr):
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=4,
border=4)
qr.add_data(url)
qr.make(fit=True)
img = qr.make_image()
img.save(path_qr)
# Display QR Code, spanning 2 columns so it will be centered
image = Gtk.Image()
image.set_from_file(path_qr)
grid.attach_next_to(image, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
lbl_share_kbd = Gtk.Label()
lbl_share_kbd.set_markup(_("Scan this code to load this keyboard\non another device or <a href='{uri}'>share online</a>").format(uri=url))
lbl_share_kbd.set_halign(Gtk.Align.CENTER)
lbl_share_kbd.set_line_wrap(True)
grid.attach_next_to(lbl_share_kbd, image, Gtk.PositionType.BOTTOM, 2, 1)
prevlabel = lbl_share_kbd
self.add_button(_("_Close"), Gtk.ResponseType.CLOSE)
self.get_content_area().pack_start(grid, True, True, 12)
self.resize(800, 450)
self.show_all()
| 45.917355
| 146
| 0.567075
| 1,964
| 16,668
| 4.568737
| 0.131874
| 0.047253
| 0.060849
| 0.069542
| 0.606152
| 0.581968
| 0.560236
| 0.450463
| 0.443776
| 0.356848
| 0
| 0.011093
| 0.329374
| 16,668
| 362
| 147
| 46.044199
| 0.791644
| 0.151548
| 0
| 0.370518
| 0
| 0.003984
| 0.064718
| 0
| 0
| 0
| 0
| 0.002762
| 0
| 1
| 0.003984
| false
| 0
| 0.039841
| 0
| 0.051793
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc9e17c4eb3df368c94743c5840b9e4da127a474
| 11,837
|
py
|
Python
|
codalab/lib/path_util.py
|
kl-chou/codalab-worksheets
|
101d1d9f86d3f7b8dae3b4fc3e2335fcf8d7c3d7
|
[
"Apache-2.0"
] | 236
|
2015-12-29T22:50:03.000Z
|
2022-03-28T21:12:34.000Z
|
codalab/lib/path_util.py
|
kl-chou/codalab-worksheets
|
101d1d9f86d3f7b8dae3b4fc3e2335fcf8d7c3d7
|
[
"Apache-2.0"
] | 2,628
|
2015-12-27T09:45:13.000Z
|
2022-03-30T16:18:25.000Z
|
codalab/lib/path_util.py
|
kl-chou/codalab-worksheets
|
101d1d9f86d3f7b8dae3b4fc3e2335fcf8d7c3d7
|
[
"Apache-2.0"
] | 87
|
2015-12-30T01:36:46.000Z
|
2022-03-08T15:21:30.000Z
|
"""
path_util contains helpers for working with local filesystem paths.
There are a few classes of methods provided here:
Functions to normalize paths and check that they are in normal form:
normalize, check_isvalid, check_isdir, check_isfile, path_is_url
Functions to list directories and to deal with subpaths of paths:
safe_join, get_relative_path, ls, recursive_ls
Functions to read files to compute hashes, write results to stdout, etc:
getmtime, get_size, hash_directory, hash_file_contents
Functions that modify that filesystem in controlled ways:
copy, make_directory, set_write_permissions, rename, remove
"""
import errno
import hashlib
import itertools
import os
import shutil
import subprocess
import sys
from typing import Optional
from codalab.common import precondition, UsageError, parse_linked_bundle_url
from codalab.lib import file_util
from codalab.worker.file_util import get_path_size
# Block sizes and canonical strings used when hashing files.
BLOCK_SIZE = 0x40000
FILE_PREFIX = 'file'
LINK_PREFIX = 'link'
def path_error(message, path):
"""
Raised when a user-supplied path causes an exception.
"""
return UsageError(message + ': ' + path)
################################################################################
# Functions to normalize paths and check that they are in normal form.
################################################################################
def normalize(path):
"""
Return the absolute path of the location specified by the given path.
This path is returned in a "canonical form", without ~'s, .'s, ..'s.
"""
if path == '-':
return '/dev/stdin'
elif path_is_url(path):
return path
else:
return os.path.abspath(os.path.expanduser(path))
def check_isvalid(path, fn_name):
"""
Raise a PreconditionViolation if the path is not absolute or normalized.
Raise a UsageError if the file at that path does not exist.
"""
precondition(os.path.isabs(path), '%s got relative path: %s' % (fn_name, path))
# Broken symbolic links are valid paths, so we use lexists instead of exists.
if not os.path.lexists(path):
raise path_error('%s got non-existent path:' % (fn_name,), path)
def check_isdir(path, fn_name):
"""
Check that the path is valid, then raise UsageError if the path is a file.
"""
check_isvalid(path, fn_name)
if not os.path.isdir(path):
raise path_error('%s got non-directory:' % (fn_name,), path)
def check_isfile(path, fn_name):
"""
Check that the path is valid, then raise UsageError if the path is a file.
"""
check_isvalid(path, fn_name)
if os.path.isdir(path):
raise path_error('%s got directory:' % (fn_name,), path)
def path_is_url(path):
if isinstance(path, str):
for prefix in ['http', 'https', 'ftp']:
if path.startswith(prefix + '://'):
return True
return False
################################################################################
# Functions to list directories and to deal with subpaths of paths.
################################################################################
def safe_join(*paths):
"""
Join a sequence of paths but filter out any that are empty. Used for targets.
Note that os.path.join has this functionality EXCEPT at the end of the list,
which causes problems when a target subpath is empty.
"""
return os.path.join(*[_f for _f in paths if _f])
def get_relative_path(root, path):
"""
Return the relative path from root to path, which should be nested under root.
"""
precondition(path.startswith(root), '%s is not under %s' % (path, root))
return path[len(root) :]
def ls(path):
"""
Return a (list of directories, list of files) in the given directory.
"""
check_isdir(path, 'ls')
(directories, files) = ([], [])
for file_name in os.listdir(path):
if os.path.isfile(os.path.join(path, file_name)):
files.append(file_name)
else:
directories.append(file_name)
return (directories, files)
def recursive_ls(path):
"""
Return a (list of directories, list of files) in the given directory and
all of its nested subdirectories. All paths returned are absolute.
Symlinks are returned in the list of files, even if they point to directories.
This makes it possible to distinguish between real and symlinked directories
when computing the hash of a directory. This function will NOT descend into
symlinked directories.
"""
check_isdir(path, 'recursive_ls')
(directories, files) = ([], [])
for (root, _, file_names) in os.walk(path):
assert os.path.isabs(root), 'Got relative root in os.walk: %s' % (root,)
directories.append(root)
for file_name in file_names:
files.append(os.path.join(root, file_name))
# os.walk ignores symlinks to directories, but we should count them as files.
# However, we can't used the followlinks parameter, because a) we don't want
# to descend into directories and b) we could end up in an infinite loop if
# we were to pass that flag. Instead, we handle symlinks here:
for subpath in os.listdir(root):
full_subpath = os.path.join(root, subpath)
if os.path.islink(full_subpath) and os.path.isdir(full_subpath):
files.append(full_subpath)
return (directories, files)
################################################################################
# Functions to read files to compute hashes, write results to stdout, etc.
################################################################################
def getmtime(path):
"""
Like os.path.getmtime, but does not follow symlinks.
"""
return os.lstat(path).st_mtime
def get_size(path, dirs_and_files=None):
"""
Get the size (in bytes) of the file or directory at or under the given path.
Does not include symlinked files and directories.
"""
if parse_linked_bundle_url(path).uses_beam:
return get_path_size(path)
if os.path.islink(path) or not os.path.isdir(path):
return os.lstat(path).st_size
dirs_and_files = dirs_and_files or recursive_ls(path)
return sum(os.lstat(path).st_size for path in itertools.chain(*dirs_and_files))
def hash_directory(path, dirs_and_files=None):
"""
Return the hash of the contents of the folder at the given path.
This hash is independent of the path itself - if you were to move the
directory and call get_hash again, you would get the same result.
"""
if parse_linked_bundle_url(path).uses_beam:
# On Azure Blob Storage, we just use the directory size for the hashed contents.
return get_size(path)
(directories, files) = dirs_and_files or recursive_ls(path)
# Sort and then hash all directories and then compute a hash of the hashes.
# This two-level hash is necessary so that the overall hash is unambiguous -
# if we updated directory_hash with the directory names themselves, then
# we'd be hashing the concatenation of these names, which could be generated
# in multiple ways.
directory_hash = hashlib.sha1()
for directory in sorted(directories):
relative_path = get_relative_path(path, directory)
directory_hash.update(hashlib.sha1(relative_path.encode()).hexdigest().encode())
# Use a similar two-level hashing scheme for all files, but incorporate a
# hash of both the file name and contents.
file_hash = hashlib.sha1()
for file_name in sorted(files):
relative_path = get_relative_path(path, file_name)
file_hash.update(hashlib.sha1(relative_path.encode()).hexdigest().encode())
file_hash.update(hash_file_contents(file_name).encode())
# Return a hash of the two hashes.
overall_hash = hashlib.sha1(directory_hash.hexdigest().encode())
overall_hash.update(file_hash.hexdigest().encode())
return overall_hash.hexdigest()
def hash_file_contents(path):
"""
Return the hash of the file's contents, read in blocks of size BLOCK_SIZE.
"""
message = 'hash_file called with relative path: %s' % (path,)
precondition(os.path.isabs(path), message)
if os.path.islink(path):
contents_hash = hashlib.sha1(LINK_PREFIX.encode())
contents_hash.update(os.readlink(path).encode())
else:
contents_hash = hashlib.sha1(FILE_PREFIX.encode())
with open(path, 'rb') as file_handle:
while True:
data = file_handle.read(BLOCK_SIZE)
if not data:
break
contents_hash.update(data)
return contents_hash.hexdigest()
################################################################################
# Functions that modify that filesystem in controlled ways.
################################################################################
def copy(source_path: str, dest_path: str, follow_symlinks: Optional[bool] = False):
"""
Copy |source_path| to |dest_path|.
Assume dest_path doesn't exist.
|follow_symlinks|: whether to follow symlinks
Note: this only works in Linux.
"""
if os.path.exists(dest_path):
raise path_error('already exists', dest_path)
if source_path == '/dev/stdin':
with open(dest_path, 'wb') as dest:
file_util.copy(
sys.stdin,
dest,
autoflush=False,
print_status='Copying %s to %s' % (source_path, dest_path),
)
else:
if not follow_symlinks and os.path.islink(source_path):
raise path_error('not following symlinks', source_path)
if not os.path.exists(source_path):
raise path_error('does not exist', source_path)
command = [
'rsync',
'-pr%s' % ('L' if follow_symlinks else 'l'),
source_path
+ ('/' if not os.path.islink(source_path) and os.path.isdir(source_path) else ''),
dest_path,
]
if subprocess.call(command) != 0:
raise path_error('Unable to copy %s to' % source_path, dest_path)
def make_directory(path):
"""
Create the directory at the given path.
"""
try:
os.mkdir(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
check_isdir(path, 'make_directory')
def set_write_permissions(path):
# Recursively give give write permissions to |path|, so that we can operate
# on it.
if not os.path.islink(path): # Don't need write permissions if symlink
subprocess.call(['chmod', '-R', 'u+w', path])
def rename(old_path, new_path):
# Allow write permissions, or else the move will fail.
set_write_permissions(old_path)
subprocess.call(['mv', old_path, new_path])
def remove(path):
"""
Remove the given path, whether it is a directory, file, or link.
"""
if parse_linked_bundle_url(path).uses_beam:
from apache_beam.io.filesystems import FileSystems
if not FileSystems.exists(path):
FileSystems.delete([path])
return
check_isvalid(path, 'remove')
set_write_permissions(path) # Allow permissions
if os.path.islink(path):
os.unlink(path)
elif os.path.isdir(path):
try:
shutil.rmtree(path)
except shutil.Error:
pass
else:
os.remove(path)
if os.path.exists(path):
print('Failed to remove %s' % path)
def soft_link(source, path):
"""
Create a symbolic link to source at path. This is basically the same as doing "ln -s $source $path"
"""
check_isvalid(source, 'soft_link')
os.symlink(source, path)
| 35.334328
| 103
| 0.630396
| 1,591
| 11,837
| 4.567568
| 0.212445
| 0.023944
| 0.008807
| 0.014862
| 0.22843
| 0.167057
| 0.152745
| 0.148892
| 0.112013
| 0.088069
| 0
| 0.001529
| 0.226662
| 11,837
| 334
| 104
| 35.44012
| 0.792331
| 0.345696
| 0
| 0.107143
| 0
| 0
| 0.059032
| 0
| 0
| 0
| 0.001036
| 0
| 0.005952
| 1
| 0.119048
| false
| 0.005952
| 0.071429
| 0
| 0.297619
| 0.011905
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc9e3eb40f996353595bada2ec265eba3f86bf6f
| 25,948
|
py
|
Python
|
statsmodels/regression/tests/test_glsar_gretl.py
|
aliavni/statsmodels
|
ef5d57a8d45de76a895e9401705280d558d688ad
|
[
"BSD-3-Clause"
] | 1
|
2022-01-24T15:17:37.000Z
|
2022-01-24T15:17:37.000Z
|
statsmodels/regression/tests/test_glsar_gretl.py
|
aliavni/statsmodels
|
ef5d57a8d45de76a895e9401705280d558d688ad
|
[
"BSD-3-Clause"
] | null | null | null |
statsmodels/regression/tests/test_glsar_gretl.py
|
aliavni/statsmodels
|
ef5d57a8d45de76a895e9401705280d558d688ad
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Tests of GLSAR and diagnostics against Gretl
Created on Thu Feb 02 21:15:47 2012
Author: Josef Perktold
License: BSD-3
"""
import os
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_allclose, assert_array_less)
from statsmodels.regression.linear_model import OLS, GLSAR
from statsmodels.tools.tools import add_constant
from statsmodels.datasets import macrodata
import statsmodels.stats.sandwich_covariance as sw
import statsmodels.stats.diagnostic as smsdia
import statsmodels.stats.outliers_influence as oi
def compare_ftest(contrast_res, other, decimal=(5,4)):
assert_almost_equal(contrast_res.fvalue, other[0], decimal=decimal[0])
assert_almost_equal(contrast_res.pvalue, other[1], decimal=decimal[1])
assert_equal(contrast_res.df_num, other[2])
assert_equal(contrast_res.df_denom, other[3])
assert_equal("f", other[4])
class TestGLSARGretl:
def test_all(self):
d = macrodata.load_pandas().data
#import datasetswsm.greene as g
#d = g.load('5-1')
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv'].values))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp'].values))
#simple diff, not growthrate, I want heteroscedasticity later for testing
endogd = np.diff(d['realinv'])
exogd = add_constant(np.c_[np.diff(d['realgdp'].values), d['realint'][:-1].values])
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, d['realint'][:-1].values])
res_ols = OLS(endogg, exogg).fit()
#print res_ols.params
mod_g1 = GLSAR(endogg, exogg, rho=-0.108136)
res_g1 = mod_g1.fit()
#print res_g1.params
mod_g2 = GLSAR(endogg, exogg, rho=-0.108136) #-0.1335859) from R
res_g2 = mod_g2.iterative_fit(maxiter=5)
#print res_g2.params
rho = -0.108136
# coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
partable = np.array([
[-9.50990, 0.990456, -9.602, 3.65e-018, -11.4631, -7.55670], # ***
[ 4.37040, 0.208146, 21.00, 2.93e-052, 3.95993, 4.78086], # ***
[-0.579253, 0.268009, -2.161, 0.0319, -1.10777, -0.0507346]]) # **
#Statistics based on the rho-differenced data:
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.113973),
endog_std = ("S.D. dependent var", 18.67447),
ssr = ("Sum squared resid", 22530.90),
mse_resid_sqrt = ("S.E. of regression", 10.66735),
rsquared = ("R-squared", 0.676973),
rsquared_adj = ("Adjusted R-squared", 0.673710),
fvalue = ("F(2, 198)", 221.0475),
f_pvalue = ("P-value(F)", 3.56e-51),
resid_acf1 = ("rho", -0.003481),
dw = ("Durbin-Watson", 1.993858))
#fstatistic, p-value, df1, df2
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
reset_3 = [5.248951, 0.023, 1, 198, "f"]
#LM-statistic, p-value, df
arch_4 = [7.30776, 0.120491, 4, "chi2"]
#multicollinearity
vif = [1.002, 1.002]
cond_1norm = 6862.0664
determinant = 1.0296049e+009
reciprocal_condition_number = 0.013819244
#Chi-square(2): test-statistic, pvalue, df
normality = [20.2792, 3.94837e-005, 2]
#tests
res = res_g1 #with rho from Gretl
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 6)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=4)
assert_allclose(res.f_pvalue,
result_gretl_g1['f_pvalue'][1],
rtol=1e-2)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=4)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
#tests
res = res_g2 #with estimated rho
#estimated lag coefficient
assert_almost_equal(res.model.rho, rho, decimal=3)
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 3)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=0)
assert_almost_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], decimal=6)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(2,4))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(2,4))
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=1)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=2)
'''
Performing iterative calculation of rho...
ITER RHO ESS
1 -0.10734 22530.9
2 -0.10814 22530.9
Model 4: Cochrane-Orcutt, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
rho = -0.108136
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.50990 0.990456 -9.602 3.65e-018 ***
ds_l_realgdp 4.37040 0.208146 21.00 2.93e-052 ***
realint_1 -0.579253 0.268009 -2.161 0.0319 **
Statistics based on the rho-differenced data:
Mean dependent var 3.113973 S.D. dependent var 18.67447
Sum squared resid 22530.90 S.E. of regression 10.66735
R-squared 0.676973 Adjusted R-squared 0.673710
F(2, 198) 221.0475 P-value(F) 3.56e-51
rho -0.003481 Durbin-Watson 1.993858
'''
'''
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023:
'''
'''
Test for ARCH of order 4
coefficient std. error t-ratio p-value
--------------------------------------------------------
alpha(0) 97.0386 20.3234 4.775 3.56e-06 ***
alpha(1) 0.176114 0.0714698 2.464 0.0146 **
alpha(2) -0.0488339 0.0724981 -0.6736 0.5014
alpha(3) -0.0705413 0.0737058 -0.9571 0.3397
alpha(4) 0.0384531 0.0725763 0.5298 0.5968
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491:
'''
'''
Variance Inflation Factors
Minimum possible value = 1.0
Values > 10.0 may indicate a collinearity problem
ds_l_realgdp 1.002
realint_1 1.002
VIF(j) = 1/(1 - R(j)^2), where R(j) is the multiple correlation coefficient
between variable j and the other independent variables
Properties of matrix X'X:
1-norm = 6862.0664
Determinant = 1.0296049e+009
Reciprocal condition number = 0.013819244
'''
'''
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491
Test of common factor restriction -
Null hypothesis: restriction is acceptable
Test statistic: F(2, 195) = 0.426391
with p-value = P(F(2, 195) > 0.426391) = 0.653468
Test for normality of residual -
Null hypothesis: error is normally distributed
Test statistic: Chi-square(2) = 20.2792
with p-value = 3.94837e-005:
'''
#no idea what this is
'''
Augmented regression for common factor test
OLS, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
coefficient std. error t-ratio p-value
---------------------------------------------------------------
const -10.9481 1.35807 -8.062 7.44e-014 ***
ds_l_realgdp 4.28893 0.229459 18.69 2.40e-045 ***
realint_1 -0.662644 0.334872 -1.979 0.0492 **
ds_l_realinv_1 -0.108892 0.0715042 -1.523 0.1294
ds_l_realgdp_1 0.660443 0.390372 1.692 0.0923 *
realint_2 0.0769695 0.341527 0.2254 0.8219
Sum of squared residuals = 22432.8
Test of common factor restriction
Test statistic: F(2, 195) = 0.426391, with p-value = 0.653468
'''
################ with OLS, HAC errors
#Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
#Dependent variable: ds_l_realinv
#HAC standard errors, bandwidth 4 (Bartlett kernel)
#coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
#for confidence interval t(199, 0.025) = 1.972
partable = np.array([
[-9.48167, 1.17709, -8.055, 7.17e-014, -11.8029, -7.16049], # ***
[4.37422, 0.328787, 13.30, 2.62e-029, 3.72587, 5.02258], #***
[-0.613997, 0.293619, -2.091, 0.0378, -1.19300, -0.0349939]]) # **
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.257395),
endog_std = ("S.D. dependent var", 18.73915),
ssr = ("Sum squared resid", 22799.68),
mse_resid_sqrt = ("S.E. of regression", 10.70380),
rsquared = ("R-squared", 0.676978),
rsquared_adj = ("Adjusted R-squared", 0.673731),
fvalue = ("F(2, 199)", 90.79971),
f_pvalue = ("P-value(F)", 9.53e-29),
llf = ("Log-likelihood", -763.9752),
aic = ("Akaike criterion", 1533.950),
bic = ("Schwarz criterion", 1543.875),
hqic = ("Hannan-Quinn", 1537.966),
resid_acf1 = ("rho", -0.107341),
dw = ("Durbin-Watson", 2.213805))
linear_logs = [1.68351, 0.430953, 2, "chi2"]
#for logs: dropping 70 nan or incomplete observations, T=133
#(res_ols.model.exog <=0).any(1).sum() = 69 ?not 70
linear_squares = [7.52477, 0.0232283, 2, "chi2"]
#Autocorrelation, Breusch-Godfrey test for autocorrelation up to order 4
lm_acorr4 = [1.17928, 0.321197, 4, 195, "F"]
lm2_acorr4 = [4.771043, 0.312, 4, "chi2"]
acorr_ljungbox4 = [5.23587, 0.264, 4, "chi2"]
#break
cusum_Harvey_Collier = [0.494432, 0.621549, 198, "t"] #stats.t.sf(0.494432, 198)*2
#see cusum results in files
break_qlr = [3.01985, 0.1, 3, 196, "maxF"] #TODO check this, max at 2001:4
break_chow = [13.1897, 0.00424384, 3, "chi2"] # break at 1984:1
arch_4 = [3.43473, 0.487871, 4, "chi2"]
normality = [23.962, 0.00001, 2, "chi2"]
het_white = [33.503723, 0.000003, 5, "chi2"]
het_breusch_pagan = [1.302014, 0.521520, 2, "chi2"] #TODO: not available
het_breusch_pagan_konker = [0.709924, 0.701200, 2, "chi2"]
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
reset_3 = [5.248951, 0.023, 1, 198, "f"] #not available
cond_1norm = 5984.0525
determinant = 7.1087467e+008
reciprocal_condition_number = 0.013826504
vif = [1.001, 1.001]
names = 'date residual leverage influence DFFITS'.split()
cur_dir = os.path.abspath(os.path.dirname(__file__))
fpath = os.path.join(cur_dir, 'results/leverage_influence_ols_nostars.txt')
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=1,
converters={0:lambda s: s})
#either numpy 1.6 or python 3.2 changed behavior
if np.isnan(lev[-1]['f1']):
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=2,
converters={0:lambda s: s})
lev.dtype.names = names
res = res_ols #for easier copying
cov_hac = sw.cov_hac_simple(res, nlags=4, use_correction=False)
bse_hac = sw.se_cov(cov_hac)
assert_almost_equal(res.params, partable[:,0], 5)
assert_almost_equal(bse_hac, partable[:,1], 5)
#TODO
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=4) #not in gretl
assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=6) #FAIL
assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=6) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
#f-value is based on cov_hac I guess
#res2 = res.get_robustcov_results(cov_type='HC1')
# TODO: fvalue differs from Gretl, trying any of the HCx
#assert_almost_equal(res2.fvalue, result_gretl_g1['fvalue'][1], decimal=0) #FAIL
#assert_approx_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], significant=1) #FAIL
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(6,5))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(6,5))
linear_sq = smsdia.linear_lm(res.resid, res.model.exog)
assert_almost_equal(linear_sq[0], linear_squares[0], decimal=6)
assert_almost_equal(linear_sq[1], linear_squares[1], decimal=7)
hbpk = smsdia.het_breuschpagan(res.resid, res.model.exog)
assert_almost_equal(hbpk[0], het_breusch_pagan_konker[0], decimal=6)
assert_almost_equal(hbpk[1], het_breusch_pagan_konker[1], decimal=6)
hw = smsdia.het_white(res.resid, res.model.exog)
assert_almost_equal(hw[:2], het_white[:2], 6)
#arch
#sm_arch = smsdia.acorr_lm(res.resid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.resid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=5)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
vif2 = [oi.variance_inflation_factor(res.model.exog, k) for k in [1,2]]
infl = oi.OLSInfluence(res_ols)
#print np.max(np.abs(lev['DFFITS'] - infl.dffits[0]))
#print np.max(np.abs(lev['leverage'] - infl.hat_matrix_diag))
#print np.max(np.abs(lev['influence'] - infl.influence)) #just added this based on Gretl
#just rough test, low decimal in Gretl output,
assert_almost_equal(lev['residual'], res.resid, decimal=3)
assert_almost_equal(lev['DFFITS'], infl.dffits[0], decimal=3)
assert_almost_equal(lev['leverage'], infl.hat_matrix_diag, decimal=3)
assert_almost_equal(lev['influence'], infl.influence, decimal=4)
def test_GLSARlag():
#test that results for lag>1 is close to lag=1, and smaller ssr
from statsmodels.datasets import macrodata
d2 = macrodata.load_pandas().data
g_gdp = 400*np.diff(np.log(d2['realgdp'].values))
g_inv = 400*np.diff(np.log(d2['realinv'].values))
exogg = add_constant(np.c_[g_gdp, d2['realint'][:-1].values], prepend=False)
mod1 = GLSAR(g_inv, exogg, 1)
res1 = mod1.iterative_fit(5)
mod4 = GLSAR(g_inv, exogg, 4)
res4 = mod4.iterative_fit(10)
assert_array_less(np.abs(res1.params / res4.params - 1), 0.03)
assert_array_less(res4.ssr, res1.ssr)
assert_array_less(np.abs(res4.bse / res1.bse) - 1, 0.015)
assert_array_less(np.abs((res4.fittedvalues / res1.fittedvalues - 1).mean()),
0.015)
assert_equal(len(mod4.rho), 4)
if __name__ == '__main__':
t = TestGLSARGretl()
t.test_all()
'''
Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: ds_l_realinv
HAC standard errors, bandwidth 4 (Bartlett kernel)
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.48167 1.17709 -8.055 7.17e-014 ***
ds_l_realgdp 4.37422 0.328787 13.30 2.62e-029 ***
realint_1 -0.613997 0.293619 -2.091 0.0378 **
Mean dependent var 3.257395 S.D. dependent var 18.73915
Sum squared resid 22799.68 S.E. of regression 10.70380
R-squared 0.676978 Adjusted R-squared 0.673731
F(2, 199) 90.79971 P-value(F) 9.53e-29
Log-likelihood -763.9752 Akaike criterion 1533.950
Schwarz criterion 1543.875 Hannan-Quinn 1537.966
rho -0.107341 Durbin-Watson 2.213805
QLR test for structural break -
Null hypothesis: no structural break
Test statistic: max F(3, 196) = 3.01985 at observation 2001:4
(10 percent critical value = 4.09)
Non-linearity test (logs) -
Null hypothesis: relationship is linear
Test statistic: LM = 1.68351
with p-value = P(Chi-square(2) > 1.68351) = 0.430953
Non-linearity test (squares) -
Null hypothesis: relationship is linear
Test statistic: LM = 7.52477
with p-value = P(Chi-square(2) > 7.52477) = 0.0232283
LM test for autocorrelation up to order 4 -
Null hypothesis: no autocorrelation
Test statistic: LMF = 1.17928
with p-value = P(F(4,195) > 1.17928) = 0.321197
CUSUM test for parameter stability -
Null hypothesis: no change in parameters
Test statistic: Harvey-Collier t(198) = 0.494432
with p-value = P(t(198) > 0.494432) = 0.621549
Chow test for structural break at observation 1984:1 -
Null hypothesis: no structural break
Asymptotic test statistic: Chi-square(3) = 13.1897
with p-value = 0.00424384
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 3.43473
with p-value = P(Chi-square(4) > 3.43473) = 0.487871:
#ANOVA
Analysis of Variance:
Sum of squares df Mean square
Regression 47782.7 2 23891.3
Residual 22799.7 199 114.571
Total 70582.3 201 351.156
R^2 = 47782.7 / 70582.3 = 0.676978
F(2, 199) = 23891.3 / 114.571 = 208.528 [p-value 1.47e-049]
#LM-test autocorrelation
Breusch-Godfrey test for autocorrelation up to order 4
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: uhat
coefficient std. error t-ratio p-value
------------------------------------------------------------
const 0.0640964 1.06719 0.06006 0.9522
ds_l_realgdp -0.0456010 0.217377 -0.2098 0.8341
realint_1 0.0511769 0.293136 0.1746 0.8616
uhat_1 -0.104707 0.0719948 -1.454 0.1475
uhat_2 -0.00898483 0.0742817 -0.1210 0.9039
uhat_3 0.0837332 0.0735015 1.139 0.2560
uhat_4 -0.0636242 0.0737363 -0.8629 0.3893
Unadjusted R-squared = 0.023619
Test statistic: LMF = 1.179281,
with p-value = P(F(4,195) > 1.17928) = 0.321
Alternative statistic: TR^2 = 4.771043,
with p-value = P(Chi-square(4) > 4.77104) = 0.312
Ljung-Box Q' = 5.23587,
with p-value = P(Chi-square(4) > 5.23587) = 0.264:
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023
#heteroscedasticity White
White's test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: uhat^2
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const 104.920 21.5848 4.861 2.39e-06 ***
ds_l_realgdp -29.7040 6.24983 -4.753 3.88e-06 ***
realint_1 -6.93102 6.95607 -0.9964 0.3203
sq_ds_l_realg 4.12054 0.684920 6.016 8.62e-09 ***
X2_X3 2.89685 1.38571 2.091 0.0379 **
sq_realint_1 0.662135 1.10919 0.5970 0.5512
Unadjusted R-squared = 0.165860
Test statistic: TR^2 = 33.503723,
with p-value = P(Chi-square(5) > 33.503723) = 0.000003:
#heteroscedasticity Breusch-Pagan (original)
Breusch-Pagan test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: scaled uhat^2
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const 1.09468 0.192281 5.693 4.43e-08 ***
ds_l_realgdp -0.0323119 0.0386353 -0.8363 0.4040
realint_1 0.00410778 0.0512274 0.08019 0.9362
Explained sum of squares = 2.60403
Test statistic: LM = 1.302014,
with p-value = P(Chi-square(2) > 1.302014) = 0.521520
#heteroscedasticity Breusch-Pagan Koenker
Breusch-Pagan test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: scaled uhat^2 (Koenker robust variant)
coefficient std. error t-ratio p-value
------------------------------------------------------------
const 10.6870 21.7027 0.4924 0.6230
ds_l_realgdp -3.64704 4.36075 -0.8363 0.4040
realint_1 0.463643 5.78202 0.08019 0.9362
Explained sum of squares = 33174.2
Test statistic: LM = 0.709924,
with p-value = P(Chi-square(2) > 0.709924) = 0.701200
########## forecast
#forecast mean y
For 95% confidence intervals, t(199, 0.025) = 1.972
Obs ds_l_realinv prediction std. error 95% interval
2008:3 -7.134492 -17.177905 2.946312 -22.987904 - -11.367905
2008:4 -27.665860 -36.294434 3.036851 -42.282972 - -30.305896
2009:1 -70.239280 -44.018178 4.007017 -51.919841 - -36.116516
2009:2 -27.024588 -12.284842 1.427414 -15.099640 - -9.470044
2009:3 8.078897 4.483669 1.315876 1.888819 - 7.078520
Forecast evaluation statistics
Mean Error -3.7387
Mean Squared Error 218.61
Root Mean Squared Error 14.785
Mean Absolute Error 12.646
Mean Percentage Error -7.1173
Mean Absolute Percentage Error -43.867
Theil's U 0.4365
Bias proportion, UM 0.06394
Regression proportion, UR 0.13557
Disturbance proportion, UD 0.80049
#forecast actual y
For 95% confidence intervals, t(199, 0.025) = 1.972
Obs ds_l_realinv prediction std. error 95% interval
2008:3 -7.134492 -17.177905 11.101892 -39.070353 - 4.714544
2008:4 -27.665860 -36.294434 11.126262 -58.234939 - -14.353928
2009:1 -70.239280 -44.018178 11.429236 -66.556135 - -21.480222
2009:2 -27.024588 -12.284842 10.798554 -33.579120 - 9.009436
2009:3 8.078897 4.483669 10.784377 -16.782652 - 25.749991
Forecast evaluation statistics
Mean Error -3.7387
Mean Squared Error 218.61
Root Mean Squared Error 14.785
Mean Absolute Error 12.646
Mean Percentage Error -7.1173
Mean Absolute Percentage Error -43.867
Theil's U 0.4365
Bias proportion, UM 0.06394
Regression proportion, UR 0.13557
Disturbance proportion, UD 0.80049
'''
| 40.1051
| 100
| 0.581509
| 3,662
| 25,948
| 4.001092
| 0.216821
| 0.040131
| 0.056852
| 0.03549
| 0.557603
| 0.497475
| 0.440622
| 0.4138
| 0.377218
| 0.350669
| 0
| 0.194039
| 0.283606
| 25,948
| 646
| 101
| 40.167183
| 0.594169
| 0.113535
| 0
| 0.175824
| 0
| 0
| 0.064351
| 0.003911
| 0
| 0
| 0
| 0.003096
| 0.269231
| 1
| 0.016484
| false
| 0
| 0.054945
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fc9fa91745bf91e0ba0d83de869daec634544f40
| 377
|
py
|
Python
|
pondus/backends/__init__.py
|
enicklas/pondus
|
c94edce0351697c96f2ad046e8f602448d2e0df0
|
[
"MIT"
] | 1
|
2021-12-20T18:18:52.000Z
|
2021-12-20T18:18:52.000Z
|
pondus/backends/__init__.py
|
enicklas/pondus
|
c94edce0351697c96f2ad046e8f602448d2e0df0
|
[
"MIT"
] | null | null | null |
pondus/backends/__init__.py
|
enicklas/pondus
|
c94edce0351697c96f2ad046e8f602448d2e0df0
|
[
"MIT"
] | 2
|
2021-12-20T18:18:57.000Z
|
2022-01-11T10:28:22.000Z
|
# -*- coding: UTF-8 -*-
"""
This file is part of Pondus, a personal weight manager.
Copyright (C) 2011 Eike Nicklas <eike@ephys.de>
This program is free software licensed under the MIT license. For details
see LICENSE or http://www.opensource.org/licenses/mit-license.php
"""
__all__ = ['csv_backend', 'sportstracker_backend', 'xml_backend',
'xml_backend_old']
| 29
| 73
| 0.713528
| 54
| 377
| 4.814815
| 0.814815
| 0.076923
| 0.130769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015823
| 0.161804
| 377
| 12
| 74
| 31.416667
| 0.806962
| 0.710875
| 0
| 0
| 0
| 0
| 0.574257
| 0.207921
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fca036e2b91d7c4599177ff08add5e2065d64d53
| 1,053
|
py
|
Python
|
setup.py
|
specialprocedures/chpy
|
3bbe66da96abe95653722682754b4d48f9c8eba1
|
[
"MIT"
] | null | null | null |
setup.py
|
specialprocedures/chpy
|
3bbe66da96abe95653722682754b4d48f9c8eba1
|
[
"MIT"
] | null | null | null |
setup.py
|
specialprocedures/chpy
|
3bbe66da96abe95653722682754b4d48f9c8eba1
|
[
"MIT"
] | null | null | null |
import pathlib
from setuptools import find_packages, setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="chpy",
version="0.1.1",
description="Build networks from the Companies House API",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/specialprocedures/chpy",
author="Ian Goodrich",
# author_email="office@realpython.com",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=find_packages(exclude=["collections", "time", "math", "re", "os"]),
include_package_data=True,
# install_requires=["networkx", "pandas", "progressbar", "fuzzywuzzy",
# "os", "requests", "math", "time", "collections", "re"]
)
| 31.909091
| 81
| 0.633428
| 118
| 1,053
| 5.525424
| 0.661017
| 0.03681
| 0.076687
| 0.079755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007326
| 0.222222
| 1,053
| 32
| 82
| 32.90625
| 0.788767
| 0.266857
| 0
| 0
| 0
| 0
| 0.358799
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.095238
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fca03cd82b40377a907a5c97cfd27492d8e5ee1d
| 2,275
|
py
|
Python
|
src/sentry/eventtypes/error.py
|
boblail/sentry
|
71127331e58791d4651e480b65dd66f06cadc1c8
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/eventtypes/error.py
|
boblail/sentry
|
71127331e58791d4651e480b65dd66f06cadc1c8
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/eventtypes/error.py
|
boblail/sentry
|
71127331e58791d4651e480b65dd66f06cadc1c8
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import six
from sentry.utils.safe import get_path, trim
from sentry.utils.strings import truncatechars
from .base import BaseEvent
def get_crash_location(exception, platform=None):
default = None
for frame in reversed(get_path(exception, 'stacktrace', 'frames', filter=True) or ()):
fn = frame.get('filename') or frame.get('abs_path')
if fn:
func = frame.get('function')
if func is not None:
from sentry.interfaces.stacktrace import trim_function_name
func = trim_function_name(func, frame.get('platform') or platform)
if frame.get('in_app'):
return fn, func
if default is None:
default = fn, func
return default
class ErrorEvent(BaseEvent):
key = 'error'
def has_metadata(self, data):
exception = get_path(data, 'exception', 'values', -1)
return exception and any(v is not None for v in six.itervalues(exception))
def get_metadata(self, data):
exception = get_path(data, 'exception', 'values', -1)
if not exception:
return {}
loc = get_crash_location(exception, data.get('platform'))
rv = {
'value': trim(get_path(exception, 'value', default=''), 1024),
}
# If the exception mechanism indicates a synthetic exception we do not
# want to record the type and value into the metadata.
if not get_path(exception, 'mechanism', 'synthetic'):
rv['type'] = trim(get_path(exception, 'type', default='Error'), 128)
# Attach crash location if available
if loc is not None:
fn, func = loc
if fn:
rv['filename'] = fn
if func:
rv['function'] = func
return rv
def get_title(self, metadata):
ty = metadata.get('type')
if ty is None:
return metadata.get('function') or '<unknown>'
if not metadata.get('value'):
return ty
return u'{}: {}'.format(
ty,
truncatechars(metadata['value'].splitlines()[0], 100),
)
def get_location(self, metadata):
return metadata.get('filename')
| 31.597222
| 90
| 0.586813
| 273
| 2,275
| 4.794872
| 0.307692
| 0.037433
| 0.048892
| 0.038197
| 0.07945
| 0.07945
| 0.07945
| 0.07945
| 0.07945
| 0.07945
| 0
| 0.008238
| 0.306374
| 2,275
| 71
| 91
| 32.042254
| 0.821293
| 0.068571
| 0
| 0.075472
| 0
| 0
| 0.09409
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09434
| false
| 0
| 0.113208
| 0.018868
| 0.415094
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fca0ce34b50df500e879e8841c0ceca83a278655
| 8,333
|
py
|
Python
|
capirca/lib/ipset.py
|
google-admin/capirca
|
8c9e66456fedb3c0fc1c641dbefc41793e5c68d5
|
[
"Apache-2.0"
] | 604
|
2015-08-08T22:44:25.000Z
|
2022-03-30T11:51:23.000Z
|
capirca/lib/ipset.py
|
google-admin/capirca
|
8c9e66456fedb3c0fc1c641dbefc41793e5c68d5
|
[
"Apache-2.0"
] | 213
|
2015-08-04T20:11:22.000Z
|
2022-03-30T18:08:15.000Z
|
capirca/lib/ipset.py
|
google-admin/capirca
|
8c9e66456fedb3c0fc1c641dbefc41793e5c68d5
|
[
"Apache-2.0"
] | 207
|
2015-08-07T10:55:00.000Z
|
2022-03-02T17:07:34.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Ipset iptables generator. This is a subclass of Iptables generator.
ipset is a system inside the Linux kernel, which can very efficiently store
and match IPv4 and IPv6 addresses. This can be used to dramatically increase
performace of iptables firewall.
"""
import string
from capirca.lib import iptables
from capirca.lib import nacaddr
class Error(Exception):
"""Base error class."""
class Term(iptables.Term):
"""Single Ipset term representation."""
_PLATFORM = 'ipset'
_SET_MAX_LENGTH = 31
_POSTJUMP_FORMAT = None
_PREJUMP_FORMAT = None
_TERM_FORMAT = None
_COMMENT_FORMAT = string.Template(
'-A $filter -m comment --comment "$comment"')
_FILTER_TOP_FORMAT = string.Template('-A $filter')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# This stores tuples of set name and set contents, keyed by direction.
# For example:
# { 'src': ('set_name', [ipaddr object, ipaddr object]),
# 'dst': ('set_name', [ipaddr object, ipaddr object]) }
self.addr_sets = {}
def _CalculateAddresses(self, src_addr_list, src_addr_exclude_list,
dst_addr_list, dst_addr_exclude_list):
"""Calculates source and destination address list for a term.
Since ipset is very efficient at matching large number of
addresses, we never return any exclude addresses. Instead
least positive match is calculated for both source and destination
addresses.
For source and destination address list, three cases are possible.
First case is when there are no addresses. In that case we return
_all_ips.
Second case is when there is strictly one address. In that case,
we optimize by not generating a set, and it's then the only
element of returned set.
Third case is when there are more than one address in a set.
In that case we generate a set and also return _all_ips. Note the
difference to the first case where no set is actually generated.
Args:
src_addr_list: source address list of the term.
src_addr_exclude_list: source address exclude list of the term.
dst_addr_list: destination address list of the term.
dst_addr_exclude_list: destination address exclude list of the term.
Returns:
tuple containing source address list, source address exclude list,
destination address list, destination address exclude list in
that order.
"""
target_af = self.AF_MAP[self.af]
src_addr_list = self._CalculateAddrList(src_addr_list,
src_addr_exclude_list, target_af,
'src')
dst_addr_list = self._CalculateAddrList(dst_addr_list,
dst_addr_exclude_list, target_af,
'dst')
return (src_addr_list, [], dst_addr_list, [])
def _CalculateAddrList(self, addr_list, addr_exclude_list,
target_af, direction):
"""Calculates and stores address list for target AF and direction.
Args:
addr_list: address list.
addr_exclude_list: address exclude list of the term.
target_af: target address family.
direction: direction in which address list will be used.
Returns:
calculated address list.
"""
if not addr_list:
addr_list = [self._all_ips]
addr_list = [addr for addr in addr_list if addr.version == target_af]
if addr_exclude_list:
addr_exclude_list = [addr_exclude for addr_exclude in addr_exclude_list if
addr_exclude.version == target_af]
addr_list = nacaddr.ExcludeAddrs(addr_list, addr_exclude_list)
if len(addr_list) > 1:
set_name = self._GenerateSetName(self.term.name, direction)
self.addr_sets[direction] = (set_name, addr_list)
addr_list = [self._all_ips]
return addr_list
def _GenerateAddressStatement(self, src_addr, dst_addr):
"""Returns the address section of an individual iptables rule.
See _CalculateAddresses documentation. Three cases are possible here,
and they map directly to cases in _CalculateAddresses.
First, there can be no addresses for a direction (value is _all_ips then)
In that case we return empty string.
Second there can be stricly one address. In that case we return single
address match (-s or -d).
Third case, is when the value is _all_ips but also the set for particular
direction is present. That's when we return a set match.
Args:
src_addr: ipaddr address or network object with source
address of the rule.
dst_addr: ipaddr address or network object with destination
address of the rule.
Returns:
tuple containing source and destination address statement, in
that order.
"""
src_addr_stmt = ''
dst_addr_stmt = ''
if src_addr and dst_addr:
if src_addr == self._all_ips:
if 'src' in self.addr_sets:
src_addr_stmt = ('-m set --match-set %s src' %
self.addr_sets['src'][0])
else:
src_addr_stmt = '-s %s/%d' % (src_addr.network_address,
src_addr.prefixlen)
if dst_addr == self._all_ips:
if 'dst' in self.addr_sets:
dst_addr_stmt = ('-m set --match-set %s dst' %
self.addr_sets['dst'][0])
else:
dst_addr_stmt = '-d %s/%d' % (dst_addr.network_address,
dst_addr.prefixlen)
return (src_addr_stmt, dst_addr_stmt)
def _GenerateSetName(self, term_name, suffix):
if self.af == 'inet6':
suffix += '-v6'
if len(term_name) + len(suffix) + 1 > self._SET_MAX_LENGTH:
set_name_max_lenth = self._SET_MAX_LENGTH - len(suffix) - 1
term_name = term_name[:set_name_max_lenth]
return '%s-%s' % (term_name, suffix)
class Ipset(iptables.Iptables):
"""Ipset generator."""
_PLATFORM = 'ipset'
_SET_TYPE = 'hash:net'
SUFFIX = '.ips'
_TERM = Term
_MARKER_BEGIN = '# begin:ipset-rules'
_MARKER_END = '# end:ipset-rules'
_GOOD_OPTIONS = ['nostate', 'abbreviateterms', 'truncateterms', 'noverbose',
'exists']
# TODO(vklimovs): some not trivial processing is happening inside this
# __str__, replace with explicit method
def __str__(self):
# Actual rendering happens in __str__, so it has to be called
# before we do set specific part.
iptables_output = super().__str__()
output = []
output.append(self._MARKER_BEGIN)
for (_, _, _, _, terms) in self.iptables_policies:
for term in terms:
output.extend(self._GenerateSetConfig(term))
output.append(self._MARKER_END)
output.append(iptables_output)
return '\n'.join(output)
def _GenerateSetConfig(self, term):
"""Generates set configuration for supplied term.
Args:
term: input term.
Returns:
string that is configuration of supplied term.
"""
output = []
c_str = 'create'
a_str = 'add'
if 'exists' in self.filter_options:
c_str = c_str + ' -exist'
a_str = a_str + ' -exist'
for direction in sorted(term.addr_sets, reverse=True):
set_name, addr_list = term.addr_sets[direction]
set_hashsize = 1 << len(addr_list).bit_length()
set_maxelem = set_hashsize
output.append('%s %s %s family %s hashsize %i maxelem %i' %
(c_str,
set_name,
self._SET_TYPE,
term.af,
set_hashsize,
set_maxelem))
for address in addr_list:
output.append('%s %s %s' % (a_str, set_name, address))
return output
| 36.709251
| 80
| 0.657626
| 1,121
| 8,333
| 4.672614
| 0.251561
| 0.038183
| 0.034364
| 0.011455
| 0.194731
| 0.103093
| 0.053837
| 0
| 0
| 0
| 0
| 0.003251
| 0.26173
| 8,333
| 226
| 81
| 36.871681
| 0.848179
| 0.436817
| 0
| 0.075472
| 0
| 0
| 0.076435
| 0
| 0
| 0
| 0
| 0.004425
| 0
| 1
| 0.066038
| false
| 0
| 0.028302
| 0
| 0.311321
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fca0fd3742b37f4cd3594891d70e849a0cb23d56
| 5,580
|
py
|
Python
|
straxen/analyses/records_matrix.py
|
zhut19/straxen
|
20dea986790ef168ba7052d652a7aa19ab836943
|
[
"BSD-3-Clause"
] | 14
|
2019-06-06T21:38:05.000Z
|
2022-02-16T16:35:16.000Z
|
straxen/analyses/records_matrix.py
|
zhut19/straxen
|
20dea986790ef168ba7052d652a7aa19ab836943
|
[
"BSD-3-Clause"
] | 613
|
2018-10-04T09:15:55.000Z
|
2022-03-31T10:48:04.000Z
|
straxen/analyses/records_matrix.py
|
ahiguera-mx/straxen
|
25b92dd4f18b51700e6df83b230e58ec3bbb7163
|
[
"BSD-3-Clause"
] | 48
|
2019-02-01T12:40:25.000Z
|
2022-02-28T16:59:18.000Z
|
import warnings
import numba
import numpy as np
import strax
import straxen
DEFAULT_MAX_SAMPLES = 20_000
@straxen.mini_analysis(requires=('records',),
warn_beyond_sec=10,
default_time_selection='touching')
def records_matrix(records, time_range, seconds_range, config, to_pe,
max_samples=DEFAULT_MAX_SAMPLES,
ignore_max_sample_warning=False):
"""Return (wv_matrix, times, pms)
- wv_matrix: (n_samples, n_pmt) array with per-PMT waveform intensity in PE/ns
- times: time labels in seconds (corr. to rows)
- pmts: PMT numbers (corr. to columns)
Both times and pmts have one extra element.
:param max_samples: Maximum number of time samples. If window and dt
conspire to exceed this, waveforms will be downsampled.
:param ignore_max_sample_warning: If True, suppress warning when this happens.
Example:
wvm, ts, ys = st.records_matrix(run_id, seconds_range=(1., 1.00001))
plt.pcolormesh(ts, ys, wvm.T,
norm=matplotlib.colors.LogNorm())
plt.colorbar(label='Intensity [PE / ns]')
"""
if len(records):
dt = records[0]['dt']
samples_per_record = len(records[0]['data'])
else:
# Defaults here do not matter, nothing will be plotted anyway
dt = 10, 110
record_duration = samples_per_record * dt
window = time_range[1] - time_range[0]
if window / dt > max_samples:
with np.errstate(divide='ignore', invalid='ignore'):
# Downsample. New dt must be
# a) multiple of old dt
dts = np.arange(0, record_duration + dt, dt).astype(np.int)
# b) divisor of record duration
dts = dts[record_duration / dts % 1 == 0]
# c) total samples < max_samples
dts = dts[window / dts < max_samples]
if len(dts):
# Pick lowest dt that satisfies criteria
dt = dts.min()
else:
# Records will be downsampled to single points
dt = max(record_duration, window // max_samples)
if not ignore_max_sample_warning:
warnings.warn(f"Matrix would exceed max_samples {max_samples}, "
f"downsampling to dt = {dt} ns.")
wvm = _records_to_matrix(
records,
t0=time_range[0],
n_channels=config['n_tpc_pmts'],
dt=dt,
window=window)
wvm = wvm.astype(np.float32) * to_pe.reshape(1, -1) / dt
# Note + 1, so data for sample 0 will range from 0-1 in plot
ts = (np.arange(wvm.shape[0] + 1) * dt / int(1e9) + seconds_range[0])
ys = np.arange(wvm.shape[1] + 1)
return wvm, ts, ys
@straxen.mini_analysis(requires=('raw_records',),
warn_beyond_sec=3e-3,
default_time_selection='touching')
def raw_records_matrix(context, run_id, raw_records, time_range,
ignore_max_sample_warning=False,
max_samples=DEFAULT_MAX_SAMPLES,
**kwargs):
# Convert raw to records. We may not be able to baseline correctly
# at the start of the range due to missing zeroth fragments
records = strax.raw_to_records(raw_records)
strax.baseline(records, allow_sloppy_chunking=True)
strax.zero_out_of_bounds(records)
return context.records_matrix(run_id=run_id,
records=records,
time_range=time_range,
max_samples=max_samples,
ignore_max_sample_warning=ignore_max_sample_warning,
**kwargs)
@numba.njit
def _records_to_matrix(records, t0, window, n_channels, dt=10):
n_samples = (window // dt) + 1
# Use 32-bit integers, so downsampling saturated samples doesn't
# cause wraparounds
# TODO: amplitude bit shift!
y = np.zeros((n_samples, n_channels),
dtype=np.int32)
if not len(records):
return y
samples_per_record = len(records[0]['data'])
for r in records:
if r['channel'] > n_channels:
continue
if dt >= samples_per_record * r['dt']:
# Downsample to single sample -> store area
idx = (r['time'] - t0) // dt
if idx >= len(y):
print(len(y), idx)
raise IndexError('Despite n_samples = window // dt + 1, our '
'idx is too high?!')
y[idx, r['channel']] += r['area']
continue
# Assume out-of-bounds data has been zeroed, so we do not
# need to do r['data'][:r['length']] here.
# This simplifies downsampling.
w = r['data'].astype(np.int32)
if dt > r['dt']:
# Downsample
duration = samples_per_record * r['dt']
assert duration % dt == 0, "Cannot downsample fractionally"
# .astype here keeps numba happy ... ??
w = w.reshape(duration // dt, -1).sum(axis=1).astype(np.int32)
elif dt < r['dt']:
raise ValueError("Upsampling not yet implemented")
(r_start, r_end), (y_start, y_end) = strax.overlap_indices(
r['time'] // dt, len(w),
t0 // dt, n_samples)
# += is paranoid, data in individual channels should not overlap
# but... https://github.com/AxFoundation/strax/issues/119
y[y_start:y_end, r['channel']] += w[r_start:r_end]
return y
| 37.702703
| 86
| 0.577061
| 714
| 5,580
| 4.341737
| 0.333333
| 0.045161
| 0.029032
| 0.042581
| 0.123871
| 0.040645
| 0.02
| 0
| 0
| 0
| 0
| 0.017669
| 0.32043
| 5,580
| 147
| 87
| 37.959184
| 0.799842
| 0.278136
| 0
| 0.136364
| 0
| 0
| 0.077488
| 0
| 0
| 0
| 0
| 0.006803
| 0.011364
| 1
| 0.034091
| false
| 0
| 0.056818
| 0
| 0.136364
| 0.011364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fca3b5ff0625e2c4785f87d64f205243acd800d3
| 4,311
|
py
|
Python
|
reviewboard/webapi/resources/change.py
|
mnoorenberghe/reviewboard
|
b8ba9d662c250cb5ec704a50f619adbf3be8cbf0
|
[
"MIT"
] | null | null | null |
reviewboard/webapi/resources/change.py
|
mnoorenberghe/reviewboard
|
b8ba9d662c250cb5ec704a50f619adbf3be8cbf0
|
[
"MIT"
] | null | null | null |
reviewboard/webapi/resources/change.py
|
mnoorenberghe/reviewboard
|
b8ba9d662c250cb5ec704a50f619adbf3be8cbf0
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.utils import six
from djblets.util.decorators import augment_method_from
from reviewboard.changedescs.models import ChangeDescription
from reviewboard.reviews.fields import get_review_request_field
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.mixins import MarkdownFieldsMixin
from reviewboard.webapi.resources import resources
class ChangeResource(MarkdownFieldsMixin, WebAPIResource):
"""Provides information on a change made to a public review request.
A change includes, optionally, text entered by the user describing the
change, and also includes a list of fields that were changed on the
review request.
The list of fields changed are in ``fields_changed``. The keys are the
names of the fields, and the values are details on that particular
change to the field.
For ``summary``, ``description``, ``testing_done`` and ``branch`` fields,
the following detail keys will be available:
* ``old``: The old value of the field.
* ``new``: The new value of the field.
For ``diff`` fields:
* ``added``: The diff that was added.
For ``bugs_closed`` fields:
* ``old``: A list of old bugs.
* ``new``: A list of new bugs.
* ``removed``: A list of bugs that were removed, if any.
* ``added``: A list of bugs that were added, if any.
For ``file_attachments``, ``screenshots``, ``target_people`` and
``target_groups`` fields:
* ``old``: A list of old items.
* ``new``: A list of new items.
* ``removed``: A list of items that were removed, if any.
* ``added``: A list of items that were added, if any.
For ``screenshot_captions`` and ``file_captions`` fields:
* ``old``: The old caption.
* ``new``: The new caption.
* ``screenshot``: The screenshot that was updated.
"""
added_in = '1.6'
model = ChangeDescription
name = 'change'
fields = {
'id': {
'type': int,
'description': 'The numeric ID of the change description.',
},
'fields_changed': {
'type': dict,
'description': 'The fields that were changed.',
},
'text': {
'type': six.text_type,
'description': 'The description of the change written by the '
'submitter.',
'supports_text_types': True,
},
'text_type': {
'type': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The mode for the text field.',
'added_in': '2.0',
},
'timestamp': {
'type': six.text_type,
'description': 'The date and time that the change was made '
'(in YYYY-MM-DD HH:MM:SS format).',
},
}
uri_object_key = 'change_id'
model_parent_key = 'review_request'
allowed_methods = ('GET',)
mimetype_list_resource_name = 'review-request-changes'
mimetype_item_resource_name = 'review-request-change'
def serialize_fields_changed_field(self, obj, **kwargs):
review_request = obj.review_request.get()
fields_changed = {}
for field_name, data in six.iteritems(obj.fields_changed):
field_cls = get_review_request_field(field_name)
field = field_cls(review_request)
fields_changed[field.field_id] = field.serialize_change_entry(obj)
return fields_changed
def has_access_permissions(self, request, obj, *args, **kwargs):
return obj.review_request.get().is_accessible_by(request.user)
def get_queryset(self, request, *args, **kwargs):
review_request = resources.review_request.get_object(
request, *args, **kwargs)
return review_request.changedescs.filter(public=True)
@webapi_check_local_site
@augment_method_from(WebAPIResource)
def get_list(self, *args, **kwargs):
"""Returns a list of changes made on a review request."""
pass
@webapi_check_local_site
@augment_method_from(WebAPIResource)
def get(self, *args, **kwargs):
"""Returns the information on a change to a review request."""
pass
change_resource = ChangeResource()
| 33.944882
| 78
| 0.64579
| 530
| 4,311
| 5.092453
| 0.288679
| 0.077066
| 0.025936
| 0.02223
| 0.13857
| 0.128937
| 0.06595
| 0.06595
| 0.06595
| 0.042238
| 0
| 0.001235
| 0.248434
| 4,311
| 126
| 79
| 34.214286
| 0.83179
| 0.321967
| 0
| 0.119403
| 0
| 0
| 0.160702
| 0.01539
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074627
| false
| 0.029851
| 0.134328
| 0.014925
| 0.402985
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fca4787d63d5c744297f12e8eaf44573826eecbb
| 1,990
|
py
|
Python
|
controllers/notes/NewNote.py
|
heminsatya/free_notes
|
88272a34c48e60d1a82e28b0b2d56883fa724bb3
|
[
"MIT"
] | null | null | null |
controllers/notes/NewNote.py
|
heminsatya/free_notes
|
88272a34c48e60d1a82e28b0b2d56883fa724bb3
|
[
"MIT"
] | null | null | null |
controllers/notes/NewNote.py
|
heminsatya/free_notes
|
88272a34c48e60d1a82e28b0b2d56883fa724bb3
|
[
"MIT"
] | null | null | null |
# Dependencies
from aurora import Controller, View, Forms
from models import Users, Notes
from aurora.security import login_required, get_session
from flask import request
from datetime import datetime
# The controller class
class NewNote(Controller):
# POST Method
@login_required(app='users')
def post(self):
# The required models
user = Users().read(where={'username':get_session('user')}).first()
notes = Notes()
# Form data
data = request.form
form = Forms(data)
# Valid form data
if form.validate():
# Collect form inputs
title = data.get('title')
content = data.get('content')
# Required fields
if not title or not content:
return {
'error': '<i class="fas fa-exclamation-triangle mr-1"></i> Form data is invalid!',
}, 400
# Everything is fine
# Insert new note into the database
data = {
'user_id': user['id'],
'title': title,
'content': content,
# 'date': datetime.now().strftime("%m-%d-%Y")
}
notes.create(data=data)
# Return the result
return {
'success': '<i class="fas fa-check-circle mr-1"></i> The new note created successfully!',
}, 200
# Invalid form data
else:
# Return the result
return {
'error': '<i class="fas fa-exclamation-triangle mr-1"></i> Form data is invalid!',
}, 400
# GET Method
@login_required(app='users')
def get(self):
# The required models
user = Users().read(where={'username':get_session('user')}).first()
notes = Notes().read(where={'user_id':user['id']}, order_by={'id':'DESC'}).all()
form = Forms()
return View('create', user=user, form=form)
| 29.264706
| 105
| 0.529648
| 221
| 1,990
| 4.728507
| 0.371041
| 0.038278
| 0.025837
| 0.031579
| 0.327273
| 0.327273
| 0.269856
| 0.269856
| 0.269856
| 0.269856
| 0
| 0.009224
| 0.346231
| 1,990
| 67
| 106
| 29.701493
| 0.794005
| 0.155276
| 0
| 0.289474
| 0
| 0.078947
| 0.192423
| 0.027661
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.131579
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fca52f1665be31c3ef3732af8d77d4f70b20bc49
| 2,762
|
py
|
Python
|
EDA-&-Data-Preprocessing/code.py
|
udayraj-gupta/ga-learner-dsmp-repo
|
90b16345fb3fd4f6f4f201012995eea7ff1e73e9
|
[
"MIT"
] | null | null | null |
EDA-&-Data-Preprocessing/code.py
|
udayraj-gupta/ga-learner-dsmp-repo
|
90b16345fb3fd4f6f4f201012995eea7ff1e73e9
|
[
"MIT"
] | null | null | null |
EDA-&-Data-Preprocessing/code.py
|
udayraj-gupta/ga-learner-dsmp-repo
|
90b16345fb3fd4f6f4f201012995eea7ff1e73e9
|
[
"MIT"
] | null | null | null |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data = pd.read_csv(path)
data['Rating'].hist()
data = data[data['Rating']<=5]
data['Rating'].hist()
#Code ends here
# --------------
# code starts here
total_null = data.isnull().sum()
percent_null = (total_null/data.isnull().count())*100
missing_data = pd.concat([total_null,percent_null],axis=1,keys=['Total','Percentage'])
print(missing_data)
data = data.dropna()
total_null_1 = data.isnull().sum()
percent_null_1 = (total_null_1/data.isnull().count())*100
missing_data_1 = pd.concat([total_null_1,percent_null_1],axis=1,keys=['Total','Percentage'])
print(missing_data_1)
# code ends here
# --------------
#Code starts here
a = sns.catplot(x='Category',y='Rating',data=data, kind="box", height = 10)
a.set_xticklabels(rotation=90)
a.set_titles('Rating vs Category [BoxPlot]')
#Code ends here
# --------------
#Importing header files
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
#Code starts here
le = LabelEncoder()
#data['Installs'] = data['Installs'].str.replace(',','').str.replace('+','')
data['Installs'] = data['Installs'].apply(lambda x : x.replace(',','')).apply(lambda x : x.replace('+',''))
data['Installs'] =data['Installs'].astype(int)
print(data['Installs'])
data['Installs'] = le.fit_transform(data['Installs'])
a = sns.regplot(x="Installs", y="Rating" , data=data)
a.set_title('Rating vs Installs [RegPlot]')
#Code ends here
# --------------
#Code starts here
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
import seaborn as sns
#Code starts here
d=data['Price'].value_counts()
print(d)
data['Price']=data['Price'].apply(lambda x : x.replace('$',''))
d=data['Price'].value_counts()
print(d)
data['Price']=data['Price'].astype(float)
#le=LabelEncoder()
#data['Installs'] = le.fit_transform(data['Installs'])
y=sns.regplot(data=data,x='Price',y='Rating')
y.set_title('Rating vs Installs [RegPlot]')
#Code ends here
# --------------
#Code starts here
data['Genres']=data['Genres'].str.split(';').str[0]
#print(data['Genres'])
df=data[['Genres','Rating']]
gr_mean=df.groupby(['Genres'],as_index=False).mean()
gr_mean=gr_mean.sort_values(by=['Rating'])
gr_mean=pd.DataFrame(gr_mean)
print(gr_mean)#,gr_mean[-1,:])
#Code ends heree
# --------------
#Code starts here
import seaborn as sns
data['Last Updated'] = pd.to_datetime(data['Last Updated'])
print(data['Last Updated'].max())
max_date=data['Last Updated'].max()
data['Last Updated Days']=max_date-data['Last Updated']
data['Last Updated Days']=data['Last Updated Days'].dt.days
sns.regplot(data=data,x='Last Updated Days',y='Rating').set_title('Rating vs Last Updated [RegPlot]')
#Code ends here
| 24.442478
| 107
| 0.685373
| 403
| 2,762
| 4.590571
| 0.243176
| 0.071351
| 0.060541
| 0.034595
| 0.49027
| 0.347027
| 0.229189
| 0.153514
| 0.11027
| 0.11027
| 0
| 0.008775
| 0.092324
| 2,762
| 112
| 108
| 24.660714
| 0.729158
| 0.202028
| 0
| 0.22
| 0
| 0
| 0.214549
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.14
| 0
| 0.14
| 0.14
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fca54a750bbd17151c5163b01b4d00722314de04
| 4,008
|
py
|
Python
|
openpnm/algorithms/ChargeConservation.py
|
rguan-uoft/OpenPNM
|
b3873d35270b0acaad019264368d0055c677d159
|
[
"MIT"
] | 1
|
2020-02-06T19:21:20.000Z
|
2020-02-06T19:21:20.000Z
|
openpnm/algorithms/ChargeConservation.py
|
ChahatAggarwal/OpenPNM
|
b3873d35270b0acaad019264368d0055c677d159
|
[
"MIT"
] | null | null | null |
openpnm/algorithms/ChargeConservation.py
|
ChahatAggarwal/OpenPNM
|
b3873d35270b0acaad019264368d0055c677d159
|
[
"MIT"
] | null | null | null |
import numpy as np
from openpnm.algorithms import ReactiveTransport
from openpnm.models.physics import generic_source_term as gst
from openpnm.utils import logging
logger = logging.getLogger(__name__)
class ChargeConservation(ReactiveTransport):
r"""
A class to enforce charge conservation in ionic transport simulations.
Parameters
----------
network : OpenPNM Network object
The network on which this algorithm operates
project : OpenPNM Project object
Either a network or a project must be specified
name : string, optional
A unique name to give the object for easier identification. If not
given, one is generated.
"""
def __init__(self, settings={}, phase=None, **kwargs):
def_set = {'phase': None,
'quantity': 'pore.potential',
'conductance': 'throat.ionic_conductance',
'charge_conservation': 'electroneutrality',
'gui': {'setup': {'phase': None,
'quantity': '',
'conductance': '',
'charge_conservation': ''},
'set_rate_BC': {'pores': None,
'values': None},
'set_value_BC': {'pores': None,
'values': None},
'set_source': {'pores': None,
'propname': ''}
}
}
super().__init__(**kwargs)
self.settings.update(def_set)
self.settings.update(settings)
if phase is not None:
self.setup(phase=phase)
def setup(self, phase=None, quantity='', conductance='',
charge_conservation=None, **kwargs):
r"""
This method takes several arguments that are essential to running the
algorithm and adds them to the settings.
Parameters
----------
phase : OpenPNM Phase object
The phase on which the algorithm is to be run.
quantity : string
(default is ``'pore.mole_fraction'``) The name of the physical
quantity to be calculated.
conductance : string
(default is ``'throat.diffusive_conductance'``) The name of the
pore-scale transport conductance values. These are typically
calculated by a model attached to a *Physics* object associated
with the given *Phase*.
charge_conservation : string
The assumption adopted to enforce charge conservation when
performing ions transport simulations (default is
"electroneutrality").
Notes
-----
Any additional arguments are added to the ``settings`` dictionary of
the object.
"""
if phase:
self.settings['phase'] = phase.name
if quantity:
self.settings['quantity'] = quantity
if conductance:
self.settings['conductance'] = conductance
if charge_conservation:
self.settings['charge_conservation'] = charge_conservation
super().setup(**kwargs)
def _charge_conservation_eq_source_term(self, e_alg):
# Source term for Poisson or charge conservation (electroneutrality) eq
phase = self.project.phases()[self.settings['phase']]
Ps = (self['pore.all'] * np.isnan(self['pore.bc_value']) *
np.isnan(self['pore.bc_rate']))
mod = gst.charge_conservation
phys = self.project.find_physics(phase=phase)
phys[0].add_model(propname='pore.charge_conservation', model=mod,
phase=phase, p_alg=self, e_alg=e_alg,
assumption=self.settings['charge_conservation'])
self.set_source(propname='pore.charge_conservation', pores=Ps)
| 39.683168
| 79
| 0.560629
| 396
| 4,008
| 5.550505
| 0.338384
| 0.122839
| 0.023203
| 0.024568
| 0.079163
| 0.063694
| 0
| 0
| 0
| 0
| 0
| 0.000382
| 0.347305
| 4,008
| 100
| 80
| 40.08
| 0.839832
| 0.326597
| 0
| 0.039216
| 0
| 0
| 0.148676
| 0.029328
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.078431
| 0
| 0.156863
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fca7c37bc274f186e6ec2be9680fe84ac2ca179f
| 2,318
|
py
|
Python
|
jno/commands/upload.py
|
Kosinkadink/jno
|
773806dd737c1ef0b0a89a7e4086da9c2c1260c1
|
[
"MIT"
] | 1
|
2017-03-07T20:15:44.000Z
|
2017-03-07T20:15:44.000Z
|
jno/commands/upload.py
|
Kosinkadink/jno
|
773806dd737c1ef0b0a89a7e4086da9c2c1260c1
|
[
"MIT"
] | null | null | null |
jno/commands/upload.py
|
Kosinkadink/jno
|
773806dd737c1ef0b0a89a7e4086da9c2c1260c1
|
[
"MIT"
] | null | null | null |
from jno.util import interpret_configs
from jno.util import run_arduino_process
from jno.util import create_build_directory
from jno.util import get_common_parameters
from jno.util import verify_arduino_dir
from jno.util import verify_and_get_port
from jno.util import JnoException
from jno.commands.command import Command
import getopt
from colorama import Fore
class Upload(Command):
help_name = "Upload"
help_usage = "jno upload [-b, --board=] boardname [-p, --ports=] port [-v, --verbose]"
help_description = "Runs build and uploads to board. Without arguments, uses board/port defined locally/globally. " \
"If port is not defined, uses first available port. With -v, more info will be displayed during upload."
def run(self,argv,location):
jno_dict = interpret_configs()
verify_arduino_dir(jno_dict)
create_build_directory(jno_dict)
arg_list = self.perform_upload(argv,jno_dict)
run_arduino_process(arg_list)
# Create argument list for arduino build
def perform_upload(self,argv,jno_dict):
# assemble command query
# GOAL: <arduino exec> --upload <script> --board <board> --port <serial>
arg_list = [jno_dict["EXEC_SCRIPT"]]
# add common params - set pref
arg_list.extend(get_common_parameters(jno_dict))
# add upload params
arg_list.append("--upload")
arg_list.append(jno_dict["SKETCH_INO"])
try:
opts,args = getopt.getopt(argv, 'b:p:v',['board=','port=','verbose'])
except getopt.GetoptError as e:
raise JnoException(str(e))
for opt, arg in opts:
if opt in ("-b","--board"):
jno_dict["board"] = arg.strip()
elif opt in ("-p","--port"):
jno_dict["port"] = arg.strip()
elif opt in ("-v","--verbose"):
arg_list.append("--verbose")
# verify port or get first available
port = verify_and_get_port(jno_dict["port"])
if not port:
if jno_dict["port"] == "DEFAULT":
raise JnoException("no ports available")
raise JnoException("port does not exist: {}".format(jno_dict["port"]))
else:
if jno_dict["port"] == "DEFAULT":
print("{1}No port provided, using available port {0}{2}".format(port,Fore.YELLOW,Fore.RESET))
# add board params
arg_list.append("--board")
arg_list.append(self.formatBoard(jno_dict["board"],jno_dict))
# add port params
arg_list.append("--port")
arg_list.append(port)
return arg_list
| 34.597015
| 118
| 0.718723
| 346
| 2,318
| 4.644509
| 0.32659
| 0.069695
| 0.047915
| 0.074051
| 0.074673
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001521
| 0.148835
| 2,318
| 66
| 119
| 35.121212
| 0.812975
| 0.10742
| 0
| 0.04
| 0
| 0.04
| 0.246602
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.2
| 0
| 0.34
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fca94cd64ba4f1d65d89141aae52e93be1d8a3f6
| 11,440
|
py
|
Python
|
modelling/inference_multi_attribute.py
|
rizwan09/hydra-sum
|
42088dde4e2b109fdb222ad4c329ca7bbfe9db2f
|
[
"BSD-3-Clause"
] | 5
|
2021-11-12T12:03:47.000Z
|
2022-02-09T11:07:23.000Z
|
modelling/inference_multi_attribute.py
|
rizwan09/hydra-sum
|
42088dde4e2b109fdb222ad4c329ca7bbfe9db2f
|
[
"BSD-3-Clause"
] | null | null | null |
modelling/inference_multi_attribute.py
|
rizwan09/hydra-sum
|
42088dde4e2b109fdb222ad4c329ca7bbfe9db2f
|
[
"BSD-3-Clause"
] | 1
|
2021-10-22T04:20:34.000Z
|
2021-10-22T04:20:34.000Z
|
import argparse
import json
import logging
import os
import torch
from transformers.file_utils import ModelOutput
from typing import Dict, Optional, Tuple
from torch.utils.data import DataLoader, SequentialSampler
from transformers.modeling_outputs import Seq2SeqLMOutput
import train_seq2seq_utils
import single_head_utils
import multi_head_utils
from torch import nn
from generation_utils_multi_attribute import GenerationMixinCustomCombined
from transformers import (
PreTrainedModel,
PreTrainedTokenizer,
BartConfig,
BartTokenizer
)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {"bart_mult_heads_2": (BartConfig,
multi_head_utils.ConditionalGenerationCustomBartMultHeads,
BartTokenizer),
}
class Seq2SeqLMOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values_1: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
past_key_values_2: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
class BartModelCombined(GenerationMixinCustomCombined, nn.Module):
def __init__(self, model1, model2, config: BartConfig):
super().__init__()
self.model1 = model1
self.model2 = model2
self.config = config
self.device = model2.device
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs_1=None,
encoder_outputs_2=None,
past_key_values_1=None,
past_key_values_2=None,
inputs_embeds=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=None,
use_mixed=False,
use_head_1=0,
use_head_2=0,
gate_prob=0.5,
):
args1 = {'input_ids': input_ids,
'attention_mask': attention_mask,
'decoder_input_ids': decoder_input_ids,
'decoder_attention_mask': decoder_attention_mask,
'head_mask': head_mask,
'decoder_head_mask': decoder_head_mask,
'cross_attn_head_mask': cross_attn_head_mask,
'encoder_outputs': encoder_outputs_1,
'past_key_values': past_key_values_1,
'inputs_embeds': inputs_embeds,
'use_cache': use_cache,
'output_attentions': False,
'output_hidden_states': False,
'return_dict': None,
'use_mixed': False,
'use_head': use_head_1,
}
out1 = self.model1(**args1)
softmax_0 = torch.exp(out1.logits)
args2 = {'input_ids': input_ids,
'attention_mask': attention_mask,
'decoder_input_ids': decoder_input_ids,
'decoder_attention_mask': decoder_attention_mask,
'head_mask': head_mask,
'decoder_head_mask': decoder_head_mask,
'cross_attn_head_mask': cross_attn_head_mask,
'encoder_outputs': encoder_outputs_2,
'past_key_values': past_key_values_2,
'inputs_embeds': inputs_embeds,
'use_cache': use_cache,
'output_attentions': output_attentions,
'output_hidden_states': output_hidden_states,
'return_dict': None,
'use_mixed': False,
'use_head': use_head_2,
}
out2 = self.model2(**args2)
softmax_1 = torch.exp(out2.logits)
softmax_0 = softmax_0 * gate_prob
softmax_1 = softmax_1 * (1 - gate_prob)
lm_logits = torch.log(softmax_0 + softmax_1)
return_output = Seq2SeqLMOutput(
logits=lm_logits,
past_key_values_1=out1.past_key_values,
past_key_values_2=out2.past_key_values)
return return_output
# unchanged
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past_1=None,
past_2=None,
attention_mask=None,
head_mask=None,
use_cache=None,
encoder_outputs_1=None,
encoder_outputs_2=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past_1 is not None and past_2 is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs_1": encoder_outputs_1,
"encoder_outputs_2": encoder_outputs_2,
"past_key_values_1": past_1,
"past_key_values_2": past_2,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def load_model(path):
args = json.load(open(path))
config_class, model_class = BartConfig, multi_head_utils.ConditionalGenerationCustomBartMultHeads
config = config_class.from_pretrained(args['path'])
model = model_class.from_pretrained(
args['path'],
from_tf=bool(".ckpt" in args['path']),
config=config)
return model, args, config
def evaluate(args, eval_dataset, model: PreTrainedModel, args1, args2, tokenizer: PreTrainedTokenizer,
suffix="") -> Dict:
eval_output_dir = args.output_dir
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
if args.generate:
f_out = open(os.path.join(eval_output_dir, 'test_out%s.txt' % suffix), 'w')
print(eval_output_dir)
k = 0
with torch.no_grad():
model.eval()
for batch in eval_dataloader:
batch = tuple(t.to(args.device) for t in batch)
input_ids, input_attention_mask, decoder_ids = batch[0], batch[1], batch[2]
for j in range(input_ids.shape[0]):
gold = tokenizer.decode(decoder_ids[j], skip_special_tokens=True)
input = tokenizer.decode(input_ids[j], skip_special_tokens=True)
input_args = {'input_ids': input_ids[j].unsqueeze(0),
'attention_mask': input_attention_mask[j].unsqueeze(0), 'num_beams': 6,
'length_penalty': 2, 'no_repeat_ngram_size': 3, 'max_length': 200, 'min_length': 12,
'top_k': 30, 'top_p': 0.5, 'do_sample': True,
'decoder_start_token_id': tokenizer.bos_token_id, 'num_return_sequences': 1,
'gate_prob': args.gate_probability, 'use_head_1': args1['use_head'],
'use_head_2': args2['use_head']}
gen = model.generate(**input_args)
gen = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in
gen]
# gen = gen[0]
print(gen[0].strip())
f_out.write(input + '\n')
f_out.write(gold + '\n')
for g in gen:
f_out.write(g.strip() + '\n')
f_out.write('\n')
k += 1
if k > 1000:
break
f_out.close()
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_type",
default=None,
type=str,
help="base model, used to load tokenizer",
)
parser.add_argument(
"--model_1_config",
default=None,
type=str,
help="Path to model 1 config",
)
parser.add_argument(
"--model_2_config",
default=None,
type=str,
required=True,
help="Path to model 2 config",
)
parser.add_argument(
"--test_data_file",
default=None,
type=str,
required=True,
help="Evaluation data file to evaluate the perplexity on (a text file).",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--max_seq_length",
default=1024,
type=int,
help="The maximum total input sequence length after tokenization.",
)
parser.add_argument(
"--max_decoder_length",
default=128,
type=int,
help="The maximum total decoder sequence length after tokenization.",
)
parser.add_argument("--per_gpu_eval_batch_size", default=32, type=int, help="Batch size evaluation.", )
parser.add_argument("--gpu_device", type=int, default=0, help="gpu device")
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached data sets", )
# custom flags
parser.add_argument("--generate", action="store_true", help="Generate summaries for dev set", )
parser.add_argument("--dump_posteriors", action="store_true", help="Dump posterior probs at intermediate steps", )
parser.add_argument("--gate_probability", type=float, default=None, help="gate prob")
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
args.n_gpu = 1
device = torch.device("cuda", args.gpu_device)
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
filename=os.path.join(args.output_dir, 'model.log')
)
# Set seed
model1, args1, config = load_model(args.model_1_config)
model1.to(args.device)
model2, args2, _ = load_model(args.model_2_config)
model2.to(args.device)
f_out = open(os.path.join(args.output_dir, 'model_configs.json'), 'w')
json.dump(args1, f_out)
f_out.write('\n')
json.dump(args2, f_out)
f_out.write('\n')
json.dump({'gate_prob': args.gate_probability}, f_out)
f_out.write('\n')
f_out.close()
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')
model = BartModelCombined(model1, model2, config)
eval_dataset = train_seq2seq_utils.load_and_cache_examples(args, tokenizer, 'test')
evaluate(args, eval_dataset, model, args1, args2, tokenizer, 'final')
logger.info("Training/evaluation parameters %s", args)
if __name__ == "__main__":
main()
| 34.878049
| 118
| 0.600699
| 1,328
| 11,440
| 4.866717
| 0.205572
| 0.02847
| 0.02816
| 0.020424
| 0.33715
| 0.25762
| 0.191242
| 0.134303
| 0.127495
| 0.114498
| 0
| 0.016457
| 0.298864
| 11,440
| 327
| 119
| 34.984709
| 0.789303
| 0.021329
| 0
| 0.23221
| 0
| 0
| 0.161123
| 0.008137
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022472
| false
| 0
| 0.05618
| 0
| 0.11236
| 0.007491
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fca96624113002ffa1bf51ca5fff111307a9a56b
| 2,199
|
py
|
Python
|
stp_core/common/logging/handlers.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 148
|
2017-07-11T19:05:25.000Z
|
2022-03-16T21:31:20.000Z
|
stp_core/common/logging/handlers.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 561
|
2017-06-29T17:59:56.000Z
|
2022-03-09T15:47:14.000Z
|
stp_core/common/logging/handlers.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 378
|
2017-06-29T17:45:27.000Z
|
2022-03-26T07:27:59.000Z
|
import logging
class CallbackHandler(logging.Handler):
def __init__(self, typestr, default_tags, callback, override_tags):
"""
Initialize the handler.
"""
super().__init__()
self.callback = callback
self.tags = default_tags
self.update_tags(override_tags or {})
self.typestr = typestr
def update_tags(self, override_tags):
self.tags.update(override_tags)
def emit(self, record):
"""
Passes the log record back to the CLI for rendering
"""
should_cb = None
attr_val = None
if hasattr(record, self.typestr):
attr_val = getattr(record, self.typestr)
should_cb = bool(attr_val)
if should_cb is None and record.levelno >= logging.INFO:
should_cb = True
if hasattr(record, 'tags'):
for t in record.tags:
if t in self.tags:
if self.tags[t]:
should_cb = True
continue
else:
should_cb = False
break
if should_cb:
self.callback(record, attr_val)
class CliHandler(CallbackHandler):
def __init__(self, callback, override_tags=None):
default_tags = {
"add_replica": True
}
super().__init__(typestr="cli",
default_tags=default_tags,
callback=callback,
override_tags=override_tags)
class DemoHandler(CallbackHandler):
def __init__(self, callback, override_tags=None):
default_tags = {
"add_replica": True
}
super().__init__(typestr="demo",
default_tags=default_tags,
callback=callback,
override_tags=override_tags)
class TestingHandler(logging.Handler):
def __init__(self, tester):
"""
Initialize the handler.
"""
super().__init__()
self.tester = tester
def emit(self, record):
"""
Captures a record.
"""
self.tester(record)
| 28.558442
| 71
| 0.530241
| 215
| 2,199
| 5.12093
| 0.260465
| 0.108992
| 0.090827
| 0.038147
| 0.392371
| 0.346957
| 0.287012
| 0.287012
| 0.287012
| 0.287012
| 0
| 0
| 0.383811
| 2,199
| 76
| 72
| 28.934211
| 0.812546
| 0.053661
| 0
| 0.339623
| 0
| 0
| 0.0166
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.132075
| false
| 0
| 0.018868
| 0
| 0.226415
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d78de49bd48a1e6e3f364af456fa6175d8f4166
| 10,534
|
py
|
Python
|
tools/python/utils/config_parser.py
|
hanhan9449/mace
|
63feaf5055bab6a081d36edfab8f963a624899aa
|
[
"Apache-2.0"
] | 1
|
2020-09-07T02:40:28.000Z
|
2020-09-07T02:40:28.000Z
|
tools/python/utils/config_parser.py
|
hanhan9449/mace
|
63feaf5055bab6a081d36edfab8f963a624899aa
|
[
"Apache-2.0"
] | null | null | null |
tools/python/utils/config_parser.py
|
hanhan9449/mace
|
63feaf5055bab6a081d36edfab8f963a624899aa
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The MACE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import os
import copy
import yaml
from enum import Enum
from utils.util import mace_check
from utils.util import MaceLogger
from py_proto import mace_pb2
CPP_KEYWORDS = [
'alignas', 'alignof', 'and', 'and_eq', 'asm', 'atomic_cancel',
'atomic_commit', 'atomic_noexcept', 'auto', 'bitand', 'bitor',
'bool', 'break', 'case', 'catch', 'char', 'char16_t', 'char32_t',
'class', 'compl', 'concept', 'const', 'constexpr', 'const_cast',
'continue', 'co_await', 'co_return', 'co_yield', 'decltype', 'default',
'delete', 'do', 'double', 'dynamic_cast', 'else', 'enum', 'explicit',
'export', 'extern', 'false', 'float', 'for', 'friend', 'goto', 'if',
'import', 'inline', 'int', 'long', 'module', 'mutable', 'namespace',
'new', 'noexcept', 'not', 'not_eq', 'nullptr', 'operator', 'or', 'or_eq',
'private', 'protected', 'public', 'register', 'reinterpret_cast',
'requires', 'return', 'short', 'signed', 'sizeof', 'static',
'static_assert', 'static_cast', 'struct', 'switch', 'synchronized',
'template', 'this', 'thread_local', 'throw', 'true', 'try', 'typedef',
'typeid', 'typename', 'union', 'unsigned', 'using', 'virtual', 'void',
'volatile', 'wchar_t', 'while', 'xor', 'xor_eq', 'override', 'final',
'transaction_safe', 'transaction_safe_dynamic', 'if', 'elif', 'else',
'endif', 'defined', 'ifdef', 'ifndef', 'define', 'undef', 'include',
'line', 'error', 'pragma',
]
def sanitize_load(s):
# do not let yaml parse ON/OFF to boolean
for w in ["ON", "OFF", "on", "off"]:
s = re.sub(r":\s+" + w + "$", r": '" + w + "'", s)
# sub ${} to env value
s = re.sub(r"\${(\w+)}", lambda x: os.environ[x.group(1)], s)
return yaml.load(s)
def parse(path):
with open(path) as f:
config = sanitize_load(f.read())
return config
def parse_device_info(path):
conf = parse(path)
return conf["devices"]
class ModelKeys(object):
platform = "platform"
runtime = "runtime"
models = 'models'
graph_optimize_options = "graph_optimize_options"
input_tensors = "input_tensors"
input_shapes = "input_shapes"
input_data_types = "input_data_types"
input_data_formats = "input_data_formats"
input_ranges = "input_ranges"
output_tensors = "output_tensors"
output_shapes = "output_shapes"
output_data_types = "output_data_types"
output_data_formats = "output_data_formats"
check_tensors = "check_tensors"
check_shapes = "check_shapes"
model_file_path = "model_file_path"
model_sha256_checksum = "model_sha256_checksum"
weight_file_path = "weight_file_path"
weight_sha256_checksum = "weight_sha256_checksum"
quantize_range_file = "quantize_range_file"
quantize = "quantize"
quantize_schema = "quantize_schema"
quantize_large_weights = "quantize_large_weights"
quantize_stat = "quantize_stat"
change_concat_ranges = "change_concat_ranges"
winograd = "winograd"
cl_mem_type = "cl_mem_type"
data_type = "data_type"
subgraphs = "subgraphs"
validation_inputs_data = "validation_inputs_data"
class DataFormat(Enum):
NONE = 0
NHWC = 1
NCHW = 2
HWIO = 100
OIHW = 101
HWOI = 102
OHWI = 103
AUTO = 1000
def parse_data_format(str):
str = str.upper()
mace_check(str in [e.name for e in DataFormat],
"unknown data format %s" % str)
return DataFormat[str]
class DeviceType(Enum):
CPU = 0
GPU = 2
HEXAGON = 3
HTA = 4
APU = 5
CPU_GPU = 100
DEVICE_MAP = {
"cpu": DeviceType.CPU,
"gpu": DeviceType.GPU,
"hexagon": DeviceType.HEXAGON,
"dsp": DeviceType.HEXAGON,
"hta": DeviceType.HTA,
"apu": DeviceType.APU,
"cpu+gpu": DeviceType.CPU_GPU
}
def parse_device_type(str):
mace_check(str in DEVICE_MAP, "unknown device %s" % str)
return DEVICE_MAP[str]
class Platform(Enum):
TENSORFLOW = 0
CAFFE = 1
ONNX = 2
MEGENGINE = 3
def parse_platform(str):
str = str.upper()
mace_check(str in [e.name for e in Platform],
"unknown platform %s" % str)
return Platform[str]
DATA_TYPE_MAP = {
'float32': mace_pb2.DT_FLOAT,
'int32': mace_pb2.DT_INT32,
}
def parse_data_type(str):
if str == "float32":
return mace_pb2.DT_FLOAT
elif str == "int32":
return mace_pb2.DT_INT32
else:
mace_check(False, "data type %s not supported" % str)
def parse_internal_data_type(str):
if str == 'fp32_fp32':
return mace_pb2.DT_FLOAT
elif str == 'bf16_fp32':
return mace_pb2.DT_BFLOAT16
else:
return mace_pb2.DT_HALF
def to_list(x):
if isinstance(x, list):
return x
else:
return [x]
def parse_int_array(xs):
if len(xs) is 0:
return [1]
return [int(x) for x in xs.split(",")]
def parse_float_array(xs):
return [float(x) for x in xs.split(",")]
def normalize_model_config(conf):
conf = copy.deepcopy(conf)
if ModelKeys.subgraphs in conf:
subgraph = conf[ModelKeys.subgraphs][0]
del conf[ModelKeys.subgraphs]
conf.update(subgraph)
conf[ModelKeys.platform] = parse_platform(conf[ModelKeys.platform])
conf[ModelKeys.runtime] = parse_device_type(conf[ModelKeys.runtime])
if ModelKeys.quantize in conf and conf[ModelKeys.quantize] == 1:
conf[ModelKeys.data_type] = mace_pb2.DT_FLOAT
else:
if ModelKeys.data_type in conf:
conf[ModelKeys.data_type] = parse_internal_data_type(
conf[ModelKeys.data_type])
else:
conf[ModelKeys.data_type] = mace_pb2.DT_HALF
# parse input
conf[ModelKeys.input_tensors] = to_list(conf[ModelKeys.input_tensors])
conf[ModelKeys.input_tensors] = [str(i) for i in
conf[ModelKeys.input_tensors]]
input_count = len(conf[ModelKeys.input_tensors])
conf[ModelKeys.input_shapes] = [parse_int_array(shape) for shape in
to_list(conf[ModelKeys.input_shapes])]
mace_check(
len(conf[ModelKeys.input_shapes]) == input_count,
"input node count and shape count do not match")
input_data_types = [parse_data_type(dt) for dt in
to_list(conf.get(ModelKeys.input_data_types,
["float32"]))]
if len(input_data_types) == 1 and input_count > 1:
input_data_types = [input_data_types[0]] * input_count
mace_check(len(input_data_types) == input_count,
"the number of input_data_types should be "
"the same as input tensors")
conf[ModelKeys.input_data_types] = input_data_types
input_data_formats = [parse_data_format(df) for df in
to_list(conf.get(ModelKeys.input_data_formats,
["NHWC"]))]
if len(input_data_formats) == 1 and input_count > 1:
input_data_formats = [input_data_formats[0]] * input_count
mace_check(len(input_data_formats) == input_count,
"the number of input_data_formats should be "
"the same as input tensors")
conf[ModelKeys.input_data_formats] = input_data_formats
input_ranges = [parse_float_array(r) for r in
to_list(conf.get(ModelKeys.input_ranges,
["-1.0,1.0"]))]
if len(input_ranges) == 1 and input_count > 1:
input_ranges = [input_ranges[0]] * input_count
mace_check(len(input_ranges) == input_count,
"the number of input_ranges should be "
"the same as input tensors")
conf[ModelKeys.input_ranges] = input_ranges
# parse output
conf[ModelKeys.output_tensors] = to_list(conf[ModelKeys.output_tensors])
conf[ModelKeys.output_tensors] = [str(i) for i in
conf[ModelKeys.output_tensors]]
output_count = len(conf[ModelKeys.output_tensors])
conf[ModelKeys.output_shapes] = [parse_int_array(shape) for shape in
to_list(conf[ModelKeys.output_shapes])]
mace_check(len(conf[ModelKeys.output_tensors]) == output_count,
"output node count and shape count do not match")
output_data_types = [parse_data_type(dt) for dt in
to_list(conf.get(ModelKeys.output_data_types,
["float32"]))]
if len(output_data_types) == 1 and output_count > 1:
output_data_types = [output_data_types[0]] * output_count
mace_check(len(output_data_types) == output_count,
"the number of output_data_types should be "
"the same as output tensors")
conf[ModelKeys.output_data_types] = output_data_types
output_data_formats = [parse_data_format(df) for df in
to_list(conf.get(ModelKeys.output_data_formats,
["NHWC"]))]
if len(output_data_formats) == 1 and output_count > 1:
output_data_formats = [output_data_formats[0]] * output_count
mace_check(len(output_data_formats) == output_count,
"the number of output_data_formats should be "
"the same as output tensors")
conf[ModelKeys.output_data_formats] = output_data_formats
if ModelKeys.check_tensors in conf:
conf[ModelKeys.check_tensors] = to_list(conf[ModelKeys.check_tensors])
conf[ModelKeys.check_shapes] = [parse_int_array(shape) for shape in
to_list(conf[ModelKeys.check_shapes])]
mace_check(len(conf[ModelKeys.check_tensors]) == len(
conf[ModelKeys.check_shapes]),
"check tensors count and shape count do not match.")
MaceLogger.summary(conf)
return conf
| 34.424837
| 78
| 0.638599
| 1,359
| 10,534
| 4.701987
| 0.231788
| 0.077308
| 0.0241
| 0.015023
| 0.371831
| 0.341002
| 0.291862
| 0.189045
| 0.13349
| 0.111424
| 0
| 0.01448
| 0.24606
| 10,534
| 305
| 79
| 34.537705
| 0.790103
| 0.062749
| 0
| 0.077253
| 0
| 0
| 0.189143
| 0.013496
| 0
| 0
| 0
| 0
| 0.004292
| 1
| 0.051502
| false
| 0
| 0.051502
| 0.004292
| 0.399142
| 0.004292
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d7980863ca4f07807819ec6305da79cbf107a53
| 14,852
|
py
|
Python
|
main_cross_testing_iseg.py
|
sami-ets/DeepNormalize
|
5ed53280d98a201d45bb9973e79736136273eaea
|
[
"MIT"
] | 1
|
2020-05-21T20:52:48.000Z
|
2020-05-21T20:52:48.000Z
|
main_cross_testing_iseg.py
|
sami-ets/DeepNormalize
|
5ed53280d98a201d45bb9973e79736136273eaea
|
[
"MIT"
] | null | null | null |
main_cross_testing_iseg.py
|
sami-ets/DeepNormalize
|
5ed53280d98a201d45bb9973e79736136273eaea
|
[
"MIT"
] | 1
|
2020-05-21T20:52:54.000Z
|
2020-05-21T20:52:54.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2019 Pierre-Luc Delisle. All Rights Reserved.
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import multiprocessing
import numpy as np
import os
import random
import torch
import torch.backends.cudnn as cudnn
from kerosene.configs.configs import RunConfiguration, DatasetConfiguration
from kerosene.configs.parsers import YamlConfigurationParser
from kerosene.loggers.visdom import PlotType, PlotFrequency
from kerosene.loggers.visdom.config import VisdomConfiguration
from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData
from kerosene.training.trainers import ModelTrainerFactory
from samitorch.inputs.utils import augmented_sample_collate
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import DataLoader
from deepNormalize.config.parsers import ArgsParserFactory, ArgsParserType
from deepNormalize.factories.customModelFactory import CustomModelFactory
from deepNormalize.factories.customTrainerFactory import TrainerFactory
from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory
from deepNormalize.nn.criterions import CustomCriterionFactory
from deepNormalize.utils.constants import *
from deepNormalize.utils.image_slicer import ImageReconstructor
cudnn.benchmark = True
cudnn.enabled = True
np.random.seed(42)
random.seed(42)
if __name__ == '__main__':
# Basic settings
logging.basicConfig(level=logging.INFO)
torch.set_num_threads(multiprocessing.cpu_count())
torch.set_num_interop_threads(multiprocessing.cpu_count())
args = ArgsParserFactory.create_parser(ArgsParserType.MODEL_TRAINING).parse_args()
# Create configurations.
run_config = RunConfiguration(use_amp=args.use_amp, local_rank=args.local_rank, amp_opt_level=args.amp_opt_level)
model_trainer_configs, training_config = YamlConfigurationParser.parse(args.config_file)
if not isinstance(model_trainer_configs, list):
model_trainer_configs = [model_trainer_configs]
dataset_configs = YamlConfigurationParser.parse_section(args.config_file, "dataset")
dataset_configs = {k: DatasetConfiguration(v) for k, v, in dataset_configs.items()}
data_augmentation_config = YamlConfigurationParser.parse_section(args.config_file, "data_augmentation")
config_html = [training_config.to_html(), list(map(lambda config: config.to_html(), dataset_configs.values())),
list(map(lambda config: config.to_html(), model_trainer_configs))]
# Prepare the data.
train_datasets = list()
valid_datasets = list()
test_datasets = list()
reconstruction_datasets = list()
iSEG_train = None
iSEG_CSV = None
MRBrainS_train = None
MRBrainS_CSV = None
ABIDE_train = None
ABIDE_CSV = None
iSEG_augmentation_strategy = None
MRBrainS_augmentation_strategy = None
ABIDE_augmentation_strategy = None
# Initialize the model trainers
model_trainer_factory = ModelTrainerFactory(model_factory=CustomModelFactory(),
criterion_factory=CustomCriterionFactory())
model_trainers = model_trainer_factory.create(model_trainer_configs)
if not isinstance(model_trainers, list):
model_trainers = [model_trainers]
# Create datasets
if dataset_configs.get("iSEG", None) is not None:
iSEG_train, iSEG_valid, iSEG_test, iSEG_reconstruction = iSEGSliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["iSEG"].path,
modalities=dataset_configs["iSEG"].modalities,
dataset_id=ISEG_ID,
test_size=dataset_configs["iSEG"].validation_split,
max_subjects=dataset_configs["iSEG"].max_subjects,
max_num_patches=dataset_configs["iSEG"].max_num_patches,
augment=dataset_configs["iSEG"].augment,
patch_size=dataset_configs["iSEG"].patch_size,
step=dataset_configs["iSEG"].step,
test_patch_size=dataset_configs["iSEG"].test_patch_size,
test_step=dataset_configs["iSEG"].test_step,
data_augmentation_config=data_augmentation_config)
train_datasets.append(iSEG_train)
valid_datasets.append(iSEG_valid)
reconstruction_datasets.append(iSEG_reconstruction)
if dataset_configs.get("MRBrainS", None) is not None:
MRBrainS_train, MRBrainS_valid, MRBrainS_test, MRBrainS_reconstruction = MRBrainSSliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["MRBrainS"].path,
modalities=dataset_configs["MRBrainS"].modalities,
dataset_id=MRBRAINS_ID,
test_size=dataset_configs["MRBrainS"].validation_split,
max_subjects=dataset_configs["MRBrainS"].max_subjects,
max_num_patches=dataset_configs["MRBrainS"].max_num_patches,
augment=dataset_configs["MRBrainS"].augment,
patch_size=dataset_configs["MRBrainS"].patch_size,
step=dataset_configs["MRBrainS"].step,
test_patch_size=dataset_configs["MRBrainS"].test_patch_size,
test_step=dataset_configs["MRBrainS"].test_step,
data_augmentation_config=data_augmentation_config)
test_datasets.append(MRBrainS_test)
reconstruction_datasets.append(MRBrainS_reconstruction)
if dataset_configs.get("ABIDE", None) is not None:
ABIDE_train, ABIDE_valid, ABIDE_test, ABIDE_reconstruction = ABIDESliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["ABIDE"].path,
modalities=dataset_configs["ABIDE"].modalities,
dataset_id=ABIDE_ID,
sites=dataset_configs["ABIDE"].sites,
max_subjects=dataset_configs["ABIDE"].max_subjects,
test_size=dataset_configs["ABIDE"].validation_split,
max_num_patches=dataset_configs["ABIDE"].max_num_patches,
augment=dataset_configs["ABIDE"].augment,
patch_size=dataset_configs["ABIDE"].patch_size,
step=dataset_configs["ABIDE"].step,
test_patch_size=dataset_configs["ABIDE"].test_patch_size,
test_step=dataset_configs["ABIDE"].test_step,
data_augmentation_config=data_augmentation_config)
train_datasets.append(ABIDE_train)
valid_datasets.append(ABIDE_valid)
test_datasets.append(ABIDE_test)
reconstruction_datasets.append(ABIDE_reconstruction)
if len(list(dataset_configs.keys())) == 2:
segmentation_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs["iSEG"].test_step,
models=[model_trainers[0]],
segment=True,
batch_size=8)
input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50)
gt_reconstructor = ImageReconstructor(
[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
is_ground_truth=True,
batch_size=50)
if dataset_configs["iSEG"].augment:
augmented_input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
alpha=data_augmentation_config["test"]["bias_field"]["alpha"][0],
prob_bias=data_augmentation_config["test"]["bias_field"]["prob_bias"],
snr=data_augmentation_config["test"]["noise"]["snr"],
prob_noise=data_augmentation_config["test"]["noise"]["prob_noise"])
else:
augmented_input_reconstructor = None
augmented_normalized_input_reconstructor = None
else:
segmentation_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
models=[model_trainers[0]],
normalize_and_segment=True,
batch_size=4)
input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50)
gt_reconstructor = ImageReconstructor(
[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0],
ABIDE_reconstruction._target_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
is_ground_truth=True)
if dataset_configs["iSEG"].augment:
augmented_input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
alpha=data_augmentation_config["test"]["bias_field"]["alpha"][0],
prob_bias=data_augmentation_config["test"]["bias_field"]["prob_bias"],
snr=data_augmentation_config["test"]["noise"]["snr"],
prob_noise=data_augmentation_config["test"]["noise"]["prob_noise"])
else:
augmented_input_reconstructor = None
augmented_normalized_input_reconstructor = None
# Concat datasets.
if len(dataset_configs) > 1:
train_dataset = torch.utils.data.ConcatDataset(train_datasets)
valid_dataset = torch.utils.data.ConcatDataset(valid_datasets)
test_dataset = torch.utils.data.ConcatDataset(test_datasets)
else:
train_dataset = train_datasets[0]
valid_dataset = valid_datasets[0]
test_dataset = test_datasets[0]
# Create loaders.
dataloaders = list(map(lambda dataset: DataLoader(dataset,
training_config.batch_size,
sampler=None,
shuffle=True,
num_workers=args.num_workers,
collate_fn=augmented_sample_collate,
drop_last=True,
pin_memory=True),
[train_dataset, valid_dataset, test_dataset]))
# Initialize the loggers.
visdom_config = VisdomConfiguration.from_yml(args.config_file, "visdom")
exp = args.config_file.split("/")[-3:]
if visdom_config.save_destination is not None:
save_folder = visdom_config.save_destination + os.path.join(exp[0], exp[1],
os.path.basename(
os.path.normpath(visdom_config.env)))
else:
save_folder = "saves/{}".format(os.path.basename(os.path.normpath(visdom_config.env)))
[os.makedirs("{}/{}".format(save_folder, model), exist_ok=True)
for model in
["Discriminator", "Generator", "Segmenter"]]
visdom_logger = VisdomLogger(visdom_config)
visdom_logger(VisdomData("Experiment", "Experiment Config", PlotType.TEXT_PLOT, PlotFrequency.EVERY_EPOCH, None,
config_html))
visdom_logger(VisdomData("Experiment", "Patch count", PlotType.BAR_PLOT, PlotFrequency.EVERY_EPOCH,
x=[len(iSEG_train) if iSEG_train is not None else 0,
len(MRBrainS_train) if MRBrainS_train is not None else 0,
len(ABIDE_train) if ABIDE_train is not None else 0],
y=["iSEG", "MRBrainS", "ABIDE"], params={"opts": {"title": "Patch count"}}))
trainer = TrainerFactory(training_config.trainer).create(training_config,
model_trainers,
dataloaders,
reconstruction_datasets,
None,
input_reconstructor,
segmentation_reconstructor,
augmented_input_reconstructor,
None,
gt_reconstructor,
run_config,
dataset_configs,
save_folder,
visdom_logger)
trainer.train(training_config.nb_epochs)
| 52.295775
| 133
| 0.637961
| 1,509
| 14,852
| 5.963552
| 0.178926
| 0.085565
| 0.048005
| 0.045005
| 0.439604
| 0.383154
| 0.339482
| 0.309034
| 0.284476
| 0.274919
| 0
| 0.01535
| 0.271883
| 14,852
| 283
| 134
| 52.480565
| 0.816812
| 0.05373
| 0
| 0.299578
| 0
| 0
| 0.040057
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.097046
| 0
| 0.097046
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d799babef8aac803fb3da8b5588e54e7c3ffd6d
| 10,137
|
py
|
Python
|
docs/10.level3_demo_streaming/pc_server/server.py
|
FaBoPlatform/RobotCarAI
|
c89d3330a2beda0f253733d3252b2b035b153b6b
|
[
"Apache-2.0"
] | 10
|
2017-12-27T20:51:26.000Z
|
2020-05-27T05:29:13.000Z
|
docs/10.level3_demo_streaming/pc_server/server.py
|
FaBoPlatform/RobotCarAI
|
c89d3330a2beda0f253733d3252b2b035b153b6b
|
[
"Apache-2.0"
] | null | null | null |
docs/10.level3_demo_streaming/pc_server/server.py
|
FaBoPlatform/RobotCarAI
|
c89d3330a2beda0f253733d3252b2b035b153b6b
|
[
"Apache-2.0"
] | 3
|
2017-12-27T20:51:30.000Z
|
2019-03-15T02:49:25.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ClientからOpenCV画像データを受け取り、ライン検出して制御命令を送る
# Server: Jetson TX2
# Client: Jetson TX2/Raspberry Pi3 Docker
# 1. FFMPEG UDP StreamingをClientで実行する。AWS向け10FPS,Jetson TX2向け1FPS
# 2. Serverを起動する
# 3. Clientを起動する
# コード修正
# lib/camera.py: vid = cv2.VideoCapture()を環境に合わせて修正する必要がある
# lib/object_detection.py: /home/ubuntu/notebooks/github/SSD-Tensorflow/ を環境に合わせて修正する必要がある
'''
Python 3.6
送信するmessageは.encode('ascii')や.encode('utf-8')等でエンコードする必要がる
ここではClientから送られてくるOpenCV BGR画像データが'ascii'に変換されているので'ascii'で統一している
'''
print("wait. launching...")
import socket, select
import time
import cv2
import numpy as np
import time
import os
import sys
import logging
import threading
import numpy as np
from lib.functions import *
from lib.object_detection import ObjectDetection
from lib.opencv_lane_detection import LaneDetection
from lib.webcam import WebcamVideoStream
# ログ設定
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] time:%(created).8f pid:%(process)d pn:%(processName)-10s tid:%(thread)d tn:%(threadName)-10s fn:%(funcName)-10s %(message)s',
)
# 解析、送信スレッド動作フラグ
is_analyze_running = False
sock = None
out = None
# IPM変換後の画像におけるx,yメートル(黒い部分も含む)
X_METER=1.5
Y_METER=1
# ライン検出クラス
ld = None
# 物体検出クラス
od = None
def do_analyze():
global is_analyze_running
global sock
global out
global X_METER
global Y_METER
global ld
global od
# 映像を保存するかどうか
IS_SAVE = True
OUTPUT_DIR ='./'
OUTPUT_FILENAME = 'received.avi'
HANDLE_ANGLE = 42
frame_counter = 0
fourcc = None
control = None
roi_vertices = None
ipm_vertices = None
speed = None
# 映像準備
camera = WebcamVideoStream()
cols,rows,fps,fourcc = camera.init_webcam()
camera.start()
fps = 1
if IS_SAVE:
out = cv2.VideoWriter(os.path.join(OUTPUT_DIR, OUTPUT_FILENAME), int(fourcc), fps, (int(cols), int(rows)))
########################################
# ライン検出準備
########################################
ld = LaneDetection(X_METER,Y_METER,cols=cols,rows=rows)
while is_analyze_running:
frame_start_time = time.time()
#time.sleep(0.2)
########################################
# 映像取得
########################################
cv_bgr = camera.read()
frame_counter += 1
########################################
# 物体認識
########################################
# avi動画に保存する
if IS_SAVE:
out.write(cv_bgr)
rclasses,rscores,rbboxes = od.get_detection(cv_bgr)
print(rclasses,rscores,rbboxes)
if len(rclasses) > 0:
prediction_class = np.min(rclasses)
if prediction_class == 1:
# 止まれを検出した
is_need_header_receive = True
control='0,0,'
sock.sendall(("CONTROL,"+ control).encode('ascii'))
continue
elif prediction_class == 2:
# 10を検出した
speed = 40
elif prediction_class == 3:
# 20を検出した
speed = 50
elif prediction_class == 4:
# 30を検出した
speed = 60
else:
# 物体検出無し
if speed is None:
speed = 40
handle_angle = 0
########################################
# ライン検出
########################################
ld.cv_bgr = cv_bgr
# ラインを検出する
try:
tilt1_deg,tilt2_deg,angle1_deg,angle2_deg,curve1_r,curve2_r, \
meters_from_center = ld.lane_detection()
except:
# ライン検出失敗
is_need_header_receive = True
control='0,0,'
sock.sendall(("CONTROL,"+ control).encode('ascii'))
continue
########################################
# 速度調整を行う
########################################
#if np.abs(angle2_deg) > np.abs(angle1_deg):
# speed = 50
#else:
# speed = 60
'''
左右について
tilt_deg: -が右、+が左
angle_deg: +が右、-が左
meters_from_center: -が右にいる、+が左にいる
handle_angle: +が右、-が左
'''
########################################
# ハンドル角調整を行う
########################################
handle_angle = -1*tilt1_deg
if meters_from_center >= 0:
# 左にいる
if np.abs(meters_from_center)*100 > 20:
# とても離れて左にいる:右に全開で曲がる
handle_angle=HANDLE_ANGLE
elif np.abs(meters_from_center)*100 > 10:
if tilt2_deg > 0 :
# 離れて左いる、奥は左カーブ:右に少し曲がる
handle_angle=HANDLE_ANGLE/2
else:
# 離れて左いる、奥は右カーブ:右に全開で曲がる
handle_angle=HANDLE_ANGLE
else:
# 右にいる
if np.abs(meters_from_center)*100 > 20:
# とても離れて右にいる:左に全開で曲がる
handle_angle=-1*HANDLE_ANGLE
elif np.abs(meters_from_center)*100 > 10:
if tilt2_deg < 0 :
# 離れて右いる、奥は右カーブ:左に少し曲がる
handle_angle=-1*HANDLE_ANGLE/2
else:
# 離れて右いる、奥は左カーブ、左に全開で曲がる
handle_angle=-1*HANDLE_ANGLE
# 動作可能な角度内に調整する
if handle_angle > HANDLE_ANGLE:
handle_angle = HANDLE_ANGLE
if handle_angle < -1*HANDLE_ANGLE:
handle_angle = -1*HANDLE_ANGLE
# 車両制御送信
control=str(speed)+','+str(handle_angle)+','
print("speed={},handle_angle={},CONTROL,{}".format(speed,handle_angle,control))
sock.sendall(("CONTROL,"+ control).encode('ascii'))
frame_end_time = time.time()
print("FPS={}".format(round(1/(frame_end_time-frame_start_time),2)))
def main():
global is_analyze_running
global sock
global out
global ld
global od
# 通信設定
HOST = '192.168.0.77' # Server IP Address
PORT = 6666 # Server TCP Port
#HOST = 'a32158c3da9f' # AWS Docker
#PORT = 8091 # AWS TCP Port
#HOST = '2204f9b0e871' # PC Docker
#PORT = 8091 # PC TCP Port
########################################
# 通信準備
########################################
connected_clients_sockets = []
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((HOST, PORT))
server_socket.listen(10)
connected_clients_sockets.append(server_socket)
# Headerの受信が必要かどうか。Headerを受信したら、encode('ascii')を通さずに受信データを解析する
is_need_header_receive = True
########################################
# 物体認識準備
########################################
od = ObjectDetection()
print("Server start")
try:
while True:
########################################
# 受信待ち
########################################
read_sockets, write_sockets, error_sockets = select.select(connected_clients_sockets, [], [])
for sock in read_sockets:
if sock == server_socket:
sockfd, client_address = server_socket.accept()
connected_clients_sockets.append(sockfd)
else:
# ClientがServerにHeaderを送る時は4096 Byte以下にすること
packet = sock.recv(4096)
print(type(packet))
#
if is_need_header_receive:
print('header')
packet = packet.decode('ascii')
txt = str(packet)
if packet:
print('packet True')
if packet == 'START':
is_analyze_running = True
t = threading.Thread(target=do_analyze)
t.start()
elif packet.startswith('BYE'):
print('got BYE')
is_need_header_receive = True
is_analyze_running = False
sock.shutdown(socket.SHUT_RDWR)
sock.close()
connected_clients_sockets.remove(sock)
if out is not None:
out.release()
else:
print('client disconnect')
is_need_header_receive = True
is_analyze_running = False
sock.shutdown(socket.SHUT_RDWR)
sock.close()
connected_clients_sockets.remove(sock)
if out is not None:
out.release()
if not is_need_header_receive:
# ここには来ない
print('body')
if packet:
print('packet True')
is_need_header_receive = True
else:
print('data finished')
is_need_header_receive = True
is_analyze_running = False
sock.shutdown(socket.SHUT_RDWR)
sock.close()
connected_clients_sockets.remove(sock)
if out is not None:
out.release()
except:
import traceback
traceback.print_exc()
finally:
is_need_header_receive = True
is_analyze_running = False
sock.shutdown(socket.SHUT_RDWR)
sock.close()
connected_clients_sockets.remove(sock)
server_socket.close()
if out is not None:
out.release()
if __name__ == '__main__':
main()
print("end server")
| 32.594855
| 169
| 0.487126
| 948
| 10,137
| 5.006329
| 0.313291
| 0.062579
| 0.025284
| 0.040034
| 0.278129
| 0.228614
| 0.201011
| 0.201011
| 0.183312
| 0.163506
| 0
| 0.023891
| 0.364112
| 10,137
| 310
| 170
| 32.7
| 0.71238
| 0.124297
| 0
| 0.427835
| 0
| 0.005155
| 0.04855
| 0.007062
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010309
| false
| 0
| 0.07732
| 0
| 0.087629
| 0.07732
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d79ef8df5146da3723b7ca2139e31f87b3fe948
| 5,498
|
py
|
Python
|
client.py
|
zackorndorff/revsync
|
17255aebd281edffb3f3330c21cda00039bc51a3
|
[
"MIT"
] | 94
|
2017-05-13T05:39:06.000Z
|
2022-01-11T18:14:54.000Z
|
client.py
|
zackorndorff/revsync
|
17255aebd281edffb3f3330c21cda00039bc51a3
|
[
"MIT"
] | 5
|
2020-06-11T19:09:43.000Z
|
2021-05-01T05:01:55.000Z
|
client.py
|
zackorndorff/revsync
|
17255aebd281edffb3f3330c21cda00039bc51a3
|
[
"MIT"
] | 25
|
2017-05-13T18:15:23.000Z
|
2022-02-03T22:32:41.000Z
|
from collections import defaultdict
import json
import re
import redis
import threading
import time
import traceback
import uuid
import base64
import binascii
TTL = 2
hash_keys = ('cmd', 'user')
cmd_hash_keys = {
'comment': ('addr',),
'extra_comment': ('addr',),
'area_comment': ('addr',),
'rename': ('addr',),
'stackvar_renamed': ('addr', 'offset', 'name',),
'struc_created': ('struc_name', 'is_union',),
'struc_deleted': ('struc_name',),
'struc_renamed': ('old_name', 'new_name',),
'struc_member_created': ('struc_name', 'offset', 'member_name', 'size', 'flag',),
'struc_member_deleted': ('struc_name', 'offset',),
'struc_member_renamed': ('struc_name', 'offset', 'member_name',),
'struc_member_changed': ('struc_name', 'offset', 'size',),
}
key_dec = {
'c': 'cmd',
'a': 'addr',
'u': 'user',
't': 'text',
'i': 'uuid',
'b': 'blocks'
}
key_enc = dict((v, k) for k, v in key_dec.items())
nick_filter = re.compile(r'[^a-zA-Z0-9_\-]')
def decode(data):
d = json.loads(data)
return dict((key_dec.get(k, k), v) for k, v in d.items())
def dtokey(d):
return tuple(((k, v) for k, v in sorted(d.items()) if k not in ('user', 'ts', 'uuid')))
def remove_ttl(a):
now = time.time()
return [d for d in a if now - d[0] < TTL]
class Client:
def __init__(self, host, port, nick, password=None):
self.r = redis.StrictRedis(host=host, port=port, password=password, socket_connect_timeout=5)
self.r.info()
self.nick = nick_filter.sub('_', nick)
self.ps = {}
self.nolock = threading.Lock()
self.nosend = defaultdict(list)
self.uuid = str(base64.b64encode(binascii.unhexlify(uuid.uuid4().hex)).decode('ascii'))
def debounce(self, no, data):
dkey = dtokey(data)
now = time.time()
with self.nolock:
for data in no:
ts = data[0]
key = data[1:]
if dkey == key and now - ts < TTL:
no.remove(data)
return True
return False
def _sub_thread(self, ps, cb, key):
for item in ps.listen():
try:
if item['type'] == 'message':
data = decode(item['data'])
if 'user' in data:
data['user'] = nick_filter.sub('_', data['user'])
# reject our own messages
if data.get('uuid') == self.uuid:
continue
with self.nolock:
self.nosend[key] = remove_ttl(self.nosend[key])
self.nosend[key].append((time.time(),) + dtokey(data))
cb(key, data)
elif item['type'] == 'subscribe':
decoded = []
for data in self.r.lrange(key, 0, -1):
try:
decoded.append(decode(data))
except Exception:
print('error decoding history', data)
traceback.print_exc()
state = []
dedup = set()
for data in reversed(decoded):
cmd = data.get('cmd')
if cmd:
keys = hash_keys + cmd_hash_keys.get(cmd, ())
hashkey = tuple([str(data.get(k)) for k in keys])
if all(hashkey):
if hashkey in dedup:
continue
dedup.add(hashkey)
state.append(data)
for data in reversed(state):
try:
with self.nolock:
self.nosend[key].append((time.time(),) + dtokey(data))
cb(key, data, replay=True)
except Exception:
print('error replaying history', data)
traceback.print_exc()
else:
print('unknown redis push', item)
except Exception:
print('error processing item', item)
traceback.print_exc()
def join(self, key, cb):
ps = self.r.pubsub()
ps.subscribe(key)
t = threading.Thread(target=self._sub_thread, args=(ps, cb, key))
t.daemon = True
t.start()
self.ps[key] = ps
self.publish(key, {'cmd': 'join'}, perm=False)
def leave(self, key):
ps = self.ps.pop(key, None)
if ps:
ps.unsubscribe(key)
def publish(self, key, data, perm=True, send_uuid=True):
if self.debounce(self.nosend[key], data):
return
data['user'] = self.nick
data['ts'] = self.r.time()[0]
if send_uuid:
data['uuid'] = self.uuid
data = dict((key_enc.get(k, k), v) for k, v in data.items())
data = json.dumps(data, separators=(',', ':'), sort_keys=True)
if perm:
self.r.rpush(key, data)
self.r.publish(key, data)
def push(self, key, data, send_uuid=True):
if send_uuid:
data['uuid'] = self.uuid
data = dict((key_enc.get(k, k), v) for k, v in data.items())
data = json.dumps(data, separators=(',', ':'), sort_keys=True)
self.r.lpush(key, data)
| 34.797468
| 101
| 0.483812
| 634
| 5,498
| 4.094637
| 0.263407
| 0.006934
| 0.00963
| 0.013482
| 0.178351
| 0.137519
| 0.118259
| 0.118259
| 0.113251
| 0.113251
| 0
| 0.004939
| 0.373954
| 5,498
| 157
| 102
| 35.019108
| 0.749274
| 0.004183
| 0
| 0.185714
| 0
| 0
| 0.10433
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0.014286
| 0.071429
| 0.007143
| 0.192857
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d7c0c4ed976d906f1c11a70f28b08240f91a61e
| 1,109
|
py
|
Python
|
ontask/condition/urls.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 33
|
2017-12-02T04:09:24.000Z
|
2021-11-07T08:41:57.000Z
|
ontask/condition/urls.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 189
|
2017-11-16T04:06:29.000Z
|
2022-03-11T23:35:59.000Z
|
ontask/condition/urls.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 30
|
2017-11-30T03:35:44.000Z
|
2022-01-31T03:08:08.000Z
|
# -*- coding: utf-8 -*-
"""URLs to manipulate columns."""
from django.urls import path
from ontask.condition import views
app_name = 'condition'
urlpatterns = [
#
# FILTERS
#
path(
'<int:pk>/create_filter/',
views.FilterCreateView.as_view(),
name='create_filter'),
path('<int:pk>/edit_filter/', views.edit_filter, name='edit_filter'),
path('<int:pk>/delete_filter/', views.delete_filter, name='delete_filter'),
#
# CONDITIONS
#
path(
'<int:pk>/create_condition/',
views.ConditionCreateView.as_view(),
name='create_condition'),
path(
'<int:pk>/edit_condition/',
views.edit_condition,
name='edit_condition'),
path(
'<int:pk>/delete_condition/',
views.delete_condition,
name='delete_condition'),
# Clone the condition
path(
'<int:pk>/clone_condition/',
views.clone_condition,
name='clone_condition'),
path(
'<int:pk>/<int:action_pk>/clone_condition/',
views.clone_condition,
name='clone_condition'),
]
| 23.595745
| 79
| 0.598738
| 119
| 1,109
| 5.361345
| 0.268908
| 0.087774
| 0.112853
| 0.112853
| 0.166144
| 0.166144
| 0.166144
| 0.166144
| 0.166144
| 0
| 0
| 0.0012
| 0.248873
| 1,109
| 46
| 80
| 24.108696
| 0.764706
| 0.080252
| 0
| 0.322581
| 0
| 0
| 0.328699
| 0.207547
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.064516
| 0
| 0.064516
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d7c6de5b4a074a83b3f637a428933b749ec22a8
| 13,437
|
py
|
Python
|
VideoClassification/SegmentLevelClassifier/model.py
|
googleinterns/via-content-understanding
|
ca12ebe6aa4da16224a8ca86dc45aaaaa7cfda09
|
[
"Apache-2.0"
] | 1
|
2020-05-22T14:51:28.000Z
|
2020-05-22T14:51:28.000Z
|
VideoClassification/SegmentLevelClassifier/model.py
|
googleinterns/via-content-understanding
|
ca12ebe6aa4da16224a8ca86dc45aaaaa7cfda09
|
[
"Apache-2.0"
] | 4
|
2020-05-31T21:57:44.000Z
|
2020-07-23T23:32:52.000Z
|
VideoClassification/SegmentLevelClassifier/model.py
|
googleinterns/via-content-understanding
|
ca12ebe6aa4da16224a8ca86dc45aaaaa7cfda09
|
[
"Apache-2.0"
] | 1
|
2020-05-19T17:28:10.000Z
|
2020-05-19T17:28:10.000Z
|
"""Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Defines the architecture of the Video Classifier.
"""
import math
import tensorflow as tf
class NetVLAD(tf.keras.layers.Layer):
"""Applies NetVLAD to the input.
Args:
num_clusters: The number of clusters to use.
input_shape: 3D tensor denoting the input shape of the NetVLAD layer.
Input Shape:
3D tensor with shape: `(batch_size, time, feature_dim)`.
Output shape:
2D tensor with shape: `(batch_size, feature_dim * num_clusters)`.
"""
def __init__(self, num_clusters, input_shape, **kwargs):
super().__init__(**kwargs)
if num_clusters <= 0:
raise ValueError("`num_clusters` must be greater than 1: %i" % num_clusters)
self.num_clusters = num_clusters
feature_dim = input_shape[-1]
if not isinstance(feature_dim, int):
feature_dim = feature_dim.value
self.fc = tf.keras.layers.Dense(
units=self.num_clusters,
activation=tf.nn.softmax,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
name="vlad_fc" + str(num_clusters)
)
self.cluster_centers = self.add_weight(
shape=(1, feature_dim, self.num_clusters),
initializer=tf.keras.initializers.TruncatedNormal(
stddev=1.0 / math.sqrt(feature_dim)
),
trainable=True,
name="cluster_centers" + str(num_clusters)
)
self.feature_dim = feature_dim
self.max_frames = input_shape[-2]
def call(self, frames):
"""Apply the NetVLAD module to the given frames.
Args:
frames: A tensor with shape [batch_size, max_frames, feature_dim].
Returns:
vlad_out: A tensor with shape [batch_size, feature_dim * num_clusters].
Raises:
ValueError: If the `feature_dim` of input is not defined.
"""
feature_dim = self.feature_dim
max_frames = self.max_frames
frames = tf.reshape(frames, (-1, feature_dim))
activation = self.fc(frames)
activation = tf.reshape(activation, (-1, max_frames, self.num_clusters))
activation_sum = tf.math.reduce_sum(activation, axis=-2, keepdims=True)
cluster_activation = activation_sum * self.cluster_centers
frames = tf.reshape(frames, (-1, max_frames, feature_dim))
activation = tf.transpose(
tf.matmul(tf.transpose(activation, perm=(0, 2, 1)), frames), perm=(0, 2, 1)
)
vlad_out = activation - cluster_activation
vlad_out = tf.nn.l2_normalize(vlad_out, 1)
vlad_out = tf.reshape(vlad_out, (-1, feature_dim * self.num_clusters))
vlad_out = tf.nn.l2_normalize(vlad_out, 1)
return vlad_out
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
return tf.TensorShape([input_shape[0], input_shape[-1] * self.num_clusters])
def get_config(self):
config = {"num_clusters": self.num_clusters}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class ContextGating(tf.keras.layers.Layer):
"""Implements the Context Gating Layer from https://arxiv.org/abs/1706.06905
Input shape:
2D tensor with shape: `(batch_size, feature_dim)`.
Output shape:
2D tensor with shape: `(batch_size, feature_dim)`.
"""
def __init__(self, input_shape, **kwargs):
super(ContextGating, self).__init__(**kwargs)
feature_dim = input_shape[-1]
if not isinstance(feature_dim, int):
feature_dim = feature_dim.value
self.fc = tf.keras.layers.Dense(
units=feature_dim,
activation=tf.nn.sigmoid,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
)
def call(self, model_input):
"""Apply the ContextGating module to the given input.
Args:
model_input: A tensor with shape [batch_size, feature_dim].
Returns:
A tensor with shape [batch_size, feature_dim].
Raises:
ValueError: If the `feature_dim` of model_input is not defined.
"""
model_input.shape.assert_has_rank(2)
feature_dim = model_input.shape.as_list()[-1]
if feature_dim is None:
raise ValueError("Last dimension must be defined.")
context_gate = self.fc(model_input)
output = tf.math.multiply(context_gate, model_input)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
return dict(list(base_config.items()))
class MOELogistic(tf.keras.layers.Layer):
"""Implements a Mixture of Logistic Experts classifier.
Input shape:
2D tensor with shape: `(batch_size, feature_dim)`.
Output shape:
2D tensor with shape: `(batch_size, num_classes)`.
"""
def __init__(self, input_shape, num_classes, num_mixtures, **kwargs):
super(MOELogistic, self).__init__(**kwargs)
self.num_classes = num_classes
self.num_mixtures = num_mixtures
self.gate_fc = tf.keras.layers.Dense(
units=num_classes*(num_mixtures+1),
kernel_regularizer=tf.keras.regularizers.l2(1e-6),
)
self.expert_fc = tf.keras.layers.Dense(
units=num_classes*num_mixtures,
kernel_regularizer=tf.keras.regularizers.l2(1e-6),
)
def call(self, input):
"""Apply the MoE algorithm to the given input.
Args:
input: A tensor with shape [batch_size, feature_dim].
Returns:
A tensor with shape [batch_size, num_classes].
Raises:
ValueError: If the `feature_dim` of input is not defined.
"""
gate_activations = self.gate_fc(input)
expert_activations = self.expert_fc(input)
#Calculate the distribution across mixtures
gate_dist = tf.nn.softmax(tf.reshape(gate_activations, [-1, self.num_mixtures+1]))
expert_dist = tf.nn.sigmoid(tf.reshape(expert_activations, [-1, self.num_mixtures]))
probs = tf.reduce_sum(tf.math.multiply(gate_dist[:,:self.num_mixtures], expert_dist),1)
probs = tf.reshape(probs, [-1, self.num_classes])
return probs
def compute_output_shape(self, input_shape):
return (input_shape[0], self.num_classes)
def get_config(self):
base_config = super().get_config()
config = base_config.update({'number of classes': self.num_classes, 'number of mixtures': self.num_mixtures})
return config
class VideoClassifier:
"""The Video Classifier model, implemented according to the winning model from the Youtube-8M Challenge.
The model can be found here: https://arxiv.org/pdf/1706.06905.pdf
Arguments:
num_clusters: the number of clusters to be used for NetVLAD. The audio clusters will be num_clusters/2.
video_input_shape: shape of the input video features. Shape of [batch_size, num_samples, video_feature_dim].
audio_input_shape: shape fo the input audio features. Shape of [batch_size, num_samples, audio_feature_dim].
Raises:
ValueError: If num_clusters is not divisible by 2.
ValueError: If the batch sizes of the audio_input_shape and video_input_shape do not match.
ValueError: If the number of samples of the audio_input_shape and video_input_shape do not match.
"""
def __init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes, num_mixtures, fc_units, iterations, **kwargs):
super(VideoClassifier, self).__init__(**kwargs)
if num_clusters % 2 != 0:
raise ValueError("num_clusters must be divisible by 2.")
batch_size = video_input_shape[0]
if audio_input_shape[0] != batch_size:
raise ValueError("audio_input_shape[0] must equal video_input_shape[0]. Batch sizes must equal.")
if audio_input_shape[1] != video_input_shape[1]:
raise ValueError("audio_input_shape[1] must equal video_input_shape[1]. Number of samples must equal.")
self.num_frames = video_input_shape[1]
self.num_classes = num_classes
self.num_mixtures = num_mixtures
self.iterations = iterations
self.video_feature_dim = video_input_shape[2]
self.video_vlad = NetVLAD(num_clusters, input_shape=video_input_shape, name="video_vlad")
self.audio_vlad = NetVLAD(num_clusters//2, input_shape=audio_input_shape, name="audio_vlad")
#Relu6 is used as it is employed in the paper.
self.fc = tf.keras.layers.Dense(
units=fc_units,
activation=tf.nn.relu6,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
name="main_fc"
)
self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name="first_cg")
self.moe = MOELogistic(input_shape=self.first_cg.compute_output_shape((batch_size, fc_units)), num_classes=self.num_classes, num_mixtures=self.num_mixtures, name="moe")
self.second_cg = ContextGating(input_shape=self.moe.compute_output_shape((batch_size, fc_units)), name="second_cg")
def build_model(self, input_shape, batch_size):
"""Perform one forward pass of the model.
Args:
model_input: input features of shape [batch_size, max_frames, video_feature_dim + audio_feature_dim].
Returns:
A tensor with shape [batch_size, num_classes].
"""
model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size)
video_input = model_input[:,:,:self.video_feature_dim]
audio_input = model_input[:,:,self.video_feature_dim:]
video_vlad_out = self.video_vlad(video_input)
audio_vlad_out = self.audio_vlad(audio_input)
vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1)
fc_out = self.fc(vlad_out)
cg_out = self.first_cg(fc_out)
moe_out = self.moe(cg_out)
final_out = self.second_cg(moe_out)
final_model = tf.keras.models.Model(inputs=model_input, outputs=final_out)
return final_model
class SegmentClassifier:
"""The Segment Classifier model, implemented according to the winning model from the Youtube-8M Challenge.
The model can be found here: https://arxiv.org/abs/1911.08548
Arguments:
num_clusters: the number of clusters to be used for NetVLAD. The audio clusters will be num_clusters/2.
video_input_shape: shape of the input video features. Shape of [batch_size, num_samples, video_feature_dim].
audio_input_shape: shape fo the input audio features. Shape of [batch_size, num_samples, audio_feature_dim].
Raises:
ValueError: If num_clusters is not divisible by 2.
ValueError: If the batch sizes of the audio_input_shape and video_input_shape do not match.
ValueError: If the number of samples of the audio_input_shape and video_input_shape do not match.
"""
def __init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes, fc_units, **kwargs):
super(SegmentClassifier, self).__init__(**kwargs)
if num_clusters % 2 != 0:
raise ValueError("num_clusters must be divisible by 2.")
batch_size = video_input_shape[0]
if audio_input_shape[0] != batch_size:
raise ValueError("audio_input_shape[0] must equal video_input_shape[0]. Batch sizes must equal.")
if audio_input_shape[1] != video_input_shape[1]:
raise ValueError("audio_input_shape[1] must equal video_input_shape[1]. Number of samples must equal.")
self.num_frames = video_input_shape[1]
self.num_classes = num_classes
self.video_feature_dim = video_input_shape[2]
self.video_vlad = NetVLAD(num_clusters, input_shape=video_input_shape, name="video_vlad")
self.audio_vlad = NetVLAD(num_clusters//2, input_shape=audio_input_shape, name="audio_vlad")
#Relu6 is used as it is employed in the paper.
self.fc = tf.keras.layers.Dense(
units=fc_units,
activation=tf.nn.relu6,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
name="main_fc"
)
self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name="first_cg")
self.fc2 = tf.keras.layers.Dense(
units=1,
activation=tf.keras.activations.sigmoid,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
name="main_fc2"
)
def build_model(self, input_shape, second_input_shape, batch_size):
"""Perform one forward pass of the model.
Args:
input_shape: input shape for video features. Shape is of the form: [max_frames, video_feature_dim + audio_feature_dim].
second_input_shape: input shape of new class specific features. Shape is of the form [num_new_features]
Returns:
A tensor with shape [batch_size, num_classes].
"""
model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size)
model_input2 = tf.keras.layers.Input(shape=second_input_shape, batch_size=batch_size)
video_input = model_input[:,:,:self.video_feature_dim]
audio_input = model_input[:,:,self.video_feature_dim:]
video_vlad_out = self.video_vlad(video_input)
audio_vlad_out = self.audio_vlad(audio_input)
vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1)
vlad_out = tf.concat([vlad_out, model_input2], axis=1)
fc_out = self.fc(vlad_out)
cg_out = self.first_cg(fc_out)
final_out = self.fc2(cg_out)
final_model = tf.keras.models.Model(inputs=[model_input, model_input2], outputs=final_out)
return final_model
| 39.520588
| 172
| 0.723599
| 1,975
| 13,437
| 4.666835
| 0.126582
| 0.090051
| 0.036454
| 0.030379
| 0.668113
| 0.622328
| 0.59336
| 0.566996
| 0.549528
| 0.520017
| 0
| 0.013082
| 0.175113
| 13,437
| 340
| 173
| 39.520588
| 0.818477
| 0.316961
| 0
| 0.442623
| 0
| 0
| 0.070079
| 0.009449
| 0
| 0
| 0
| 0
| 0.005464
| 1
| 0.087432
| false
| 0
| 0.010929
| 0.010929
| 0.185792
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d7cd2c4d715d5bc952ad3e374f1d2268aa25788
| 310
|
py
|
Python
|
ivy/functional/backends/jax/old/math.py
|
faruq2021/ivy
|
1b24beadbd673d6a9dd504e037c68547e5640627
|
[
"Apache-2.0"
] | null | null | null |
ivy/functional/backends/jax/old/math.py
|
faruq2021/ivy
|
1b24beadbd673d6a9dd504e037c68547e5640627
|
[
"Apache-2.0"
] | null | null | null |
ivy/functional/backends/jax/old/math.py
|
faruq2021/ivy
|
1b24beadbd673d6a9dd504e037c68547e5640627
|
[
"Apache-2.0"
] | null | null | null |
"""
Collection of Jax math functions, wrapped to fit Ivy syntax and signature.
"""
# global
import jax as _jax
import jax.numpy as _jnp
tan = _jnp.tan
acos = _jnp.arccos
atan = _jnp.arctan
atan2 = _jnp.arctan2
cosh = _jnp.cosh
atanh = _jnp.arctanh
log = _jnp.log
exp = _jnp.exp
erf = _jax.scipy.special.erf
| 16.315789
| 74
| 0.729032
| 51
| 310
| 4.215686
| 0.627451
| 0.083721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007813
| 0.174194
| 310
| 18
| 75
| 17.222222
| 0.832031
| 0.264516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d7d4f2cec08d8e71851176546c2923392d1f51a
| 7,675
|
py
|
Python
|
test/tst_vlen.py
|
timgates42/netcdf4-python
|
d8b1cb11454f9beec674a29904c91f48db608c2c
|
[
"MIT"
] | 574
|
2015-01-16T02:21:19.000Z
|
2022-03-27T14:05:55.000Z
|
test/tst_vlen.py
|
timgates42/netcdf4-python
|
d8b1cb11454f9beec674a29904c91f48db608c2c
|
[
"MIT"
] | 681
|
2015-01-02T20:26:17.000Z
|
2022-03-24T00:59:15.000Z
|
test/tst_vlen.py
|
timgates42/netcdf4-python
|
d8b1cb11454f9beec674a29904c91f48db608c2c
|
[
"MIT"
] | 257
|
2015-01-20T16:42:17.000Z
|
2022-03-29T03:49:37.000Z
|
import sys
import unittest
import os
import tempfile
from netCDF4 import Dataset
import numpy as np
from numpy.testing import assert_array_equal
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
VL_NAME = 'vlen_type'
VL_BASETYPE = np.int16
DIM1_NAME = 'lon'
DIM2_NAME = 'lat'
nlons = 5; nlats = 5
VAR1_NAME = 'ragged'
VAR2_NAME = 'strings'
VAR3_NAME = 'strings_alt'
VAR4_NAME = 'string_scalar'
VAR5_NAME = 'vlen_scalar'
data = np.empty(nlats*nlons,object)
datas = np.empty(nlats*nlons,object)
nn = 0
for n in range(nlats*nlons):
nn = nn + 1
data[n] = np.arange(nn,dtype=VL_BASETYPE)
datas[n] = ''.join([chr(i) for i in range(97,97+nn+1)])
data = np.reshape(data,(nlats,nlons))
datas = np.reshape(datas,(nlats,nlons))
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)
f.createDimension(DIM1_NAME,nlons)
f.createDimension(DIM2_NAME,nlats)
ragged = f.createVariable(VAR1_NAME, vlen_t,\
(DIM2_NAME,DIM1_NAME))
strings = f.createVariable(VAR2_NAME, str,
(DIM2_NAME,DIM1_NAME))
strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,
(DIM2_NAME, DIM1_NAME))
string_scalar = f.createVariable(VAR4_NAME,str,())
vlen_scalar = f.createVariable(VAR5_NAME,vlen_t,())
ragged[:] = data
ragged[-1,-1] = data[-1,-1]
strings[:] = datas
strings[-2,-2] = datas[-2,-2]
strings_alt[:] = datas.astype(str)
string_scalar[...] = 'foo' #issue458
vlen_scalar[...] = np.array([1,2,3],np.int16)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing vlen variables"""
f = Dataset(self.file, 'r')
v = f.variables[VAR1_NAME]
vs = f.variables[VAR2_NAME]
vs_alt = f.variables[VAR3_NAME]
assert list(f.vltypes.keys()) == [VL_NAME]
assert f.vltypes[VL_NAME].dtype == VL_BASETYPE
assert f.variables['string_scalar'][...] == 'foo'
assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16))
data2 = v[:]
data2s = vs[:]
for i in range(nlons):
for j in range(nlats):
assert_array_equal(data2[j,i], data[j,i])
assert datas[j,i] == data2s[j,i]
assert_array_equal(datas, vs_alt[:])
f.close()
class TestInvalidDataType(unittest.TestCase):
def runTest(self):
f = Dataset(FILE_NAME, 'w', format='NETCDF3_CLASSIC')
f.createDimension('x', 1)
# using assertRaisesRegext as a context manager
# only works with python >= 2.7 (issue #497)
#with self.assertRaisesRegexp(ValueError, 'strings are only supported'):
# f.createVariable('foo', str, ('x',))
try:
f.createVariable('foo', str, ('x',))
except ValueError:
pass
f.close()
os.remove(FILE_NAME)
class TestScalarVlenString(unittest.TestCase):
# issue 333
def runTest(self):
f = Dataset(FILE_NAME, 'w', format='NETCDF4')
teststring = f.createVariable('teststring', str)
stringout = "yyyymmdd_hhmmss"
teststring[()] = stringout
f.close()
f = Dataset(FILE_NAME)
assert f.variables['teststring'][:] == stringout
f.close()
os.remove(FILE_NAME)
class TestIntegerIndex(unittest.TestCase):
# issue 526
def runTest(self):
strtest = Dataset(FILE_NAME, 'w', format='NETCDF4')
strtest.createDimension('tenstrings', 10)
strtest.createVariable('tenstrings', str, ['tenstrings'])
strtest['tenstrings'][np.int32(5)] = 'asdf'
strtest['tenstrings'][6.0] = 'asdf'
strtest.close()
f = Dataset(FILE_NAME)
assert f.variables['tenstrings'][np.int32(5)] == 'asdf'
assert f.variables['tenstrings'][6.0] == 'asdf'
f.close()
os.remove(FILE_NAME)
class TestObjectArrayIndexing(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)
f.createDimension(DIM1_NAME,nlons)
f.createDimension(DIM2_NAME,nlats)
strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,
(DIM2_NAME, DIM1_NAME))
strings_alt[:] = datas.astype(str)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing vlen variables"""
f = Dataset(self.file, 'r')
vs_alt = f.variables[VAR3_NAME]
unicode_strings = vs_alt[:]
fancy_indexed = unicode_strings[0][[1,2,4]]
assert fancy_indexed[0] == 'abc'
assert fancy_indexed[1] == 'abcd'
assert fancy_indexed[2] == 'abcdef'
f.close()
class VlenAppendTestCase(unittest.TestCase):
def setUp(self):
import netCDF4
if netCDF4.__netcdf4libversion__ < "4.4.1":
self.skip = True
try:
self.skipTest("This test requires NetCDF 4.4.1 or later.")
except AttributeError:
# workaround for Python 2.6 (skipTest(reason) is new
# in Python 2.7)
pass
else:
self.skip = False
self.file = FILE_NAME
f = Dataset(self.file, 'w')
vlen_type = f.createVLType(np.float64, 'vltest')
f.createDimension('x', None)
v = f.createVariable('vl', vlen_type, 'x')
w = f.createVariable('vl2', np.float64, 'x')
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing appending to vlen variables (issue #527)."""
# workaround for Python 2.6
if self.skip:
return
f = Dataset(self.file, 'a')
w = f.variables["vl2"]
v = f.variables["vl"]
w[0:3] = np.arange(3, dtype=np.float64)
v[0] # sometimes crashes
v[0].tolist() # sometimes crashes
v[0].size # BOOM!
f.close()
class Vlen_ScaledInts(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
nc = Dataset(self.file, 'w')
vlen_type = nc.createVLType(np.uint8, 'vltest')
nc.createDimension('x', None)
v = nc.createVariable('vl', vlen_type, 'x')
v.scale_factor = 1./254.
v.missing_value=np.array(255,np.uint8)
# random lengths between 1 and 1000
ilen = np.random.randint(1,1000,size=100)
n = 0
for nlen in ilen:
data = np.random.uniform(low=0.0, high=1.0, size=nlen)
v[n] = data
if n==99: self.data = data
n += 1
nc.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing packing float vlens as scaled integers (issue #1003)."""
nc = Dataset(self.file)
data = nc['vl'][-1]
# check max error of compression
err = np.abs(data - self.data)
assert(err.max() < nc['vl'].scale_factor)
# turn off auto-scaling
nc.set_auto_maskandscale(False)
data = nc['vl'][-1]
assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor))
nc.close()
if __name__ == '__main__':
unittest.main()
| 33.515284
| 86
| 0.580456
| 961
| 7,675
| 4.51821
| 0.224766
| 0.02948
| 0.027637
| 0.02211
| 0.377706
| 0.305159
| 0.273837
| 0.255182
| 0.238139
| 0.198526
| 0
| 0.030915
| 0.283518
| 7,675
| 228
| 87
| 33.662281
| 0.758683
| 0.093029
| 0
| 0.360215
| 0
| 0
| 0.052746
| 0
| 0
| 0
| 0
| 0
| 0.086022
| 1
| 0.080645
| false
| 0.010753
| 0.043011
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d7e5adfacc7c05430120bb4ddda519a5d8edcca
| 9,774
|
py
|
Python
|
env.py
|
DGarciaMedina/PiArmDiego
|
cb4664796aa99b0717145f9e4889bfba5190059f
|
[
"MIT"
] | null | null | null |
env.py
|
DGarciaMedina/PiArmDiego
|
cb4664796aa99b0717145f9e4889bfba5190059f
|
[
"MIT"
] | null | null | null |
env.py
|
DGarciaMedina/PiArmDiego
|
cb4664796aa99b0717145f9e4889bfba5190059f
|
[
"MIT"
] | null | null | null |
import piarm
import time
import numpy as np
import cv2
import random
class MyArm2D:
def __init__(self, move_robot = False):
self.move_robot = move_robot
if self.move_robot:
self.robot = piarm.PiArm()
self.open_connection()
self.DEFAULT = [500, 500, 500, 500, 500, 500]
self.num_members = 3
self.adjustable_joints = [3,4,5]
self.initial_height = 73 # height in mm of motor 5's axle
self.lengths = {
"h_0": 73,
"a": 97.5,
"b": 96,
"c": 160
}
self.base_width = 110
self.base_height = 45
# All the angles are with respect to the vertical
self.max_angles = [90 for _ in range(self.num_members)]
self.min_angles = [-90 for _ in range(self.num_members)]
self.min_angles[0] = 0 # To prevent it from hitting the base of the arm
self.angles = 90*np.ones(self.num_members) # angles of motor 3, 4 and 5 ranging between
# min_angle and max_angle
self.member_thickness = 30
self.img_width = 1000
self.x_offset = int(self.img_width/2)
self.y_offset = self.lengths["h_0"]
self.img_height = int(sum(list(self.lengths.values())) + self.y_offset + 20)
self.img = np.zeros((self.img_height, self.img_width, 3))
self.timestep = 0
self.max_timestep = 200
# This is to check that all the joints (except for the last one) is above
# the ground
self.min_joint_heights = [20, 20, 10]
self.goal_coords = [None, None]
self.update_goal_coords()
self.joint_positions = [[0,0] for i in range(self.num_members + 1)]
self.update_positions()
self.distance2goal = None
self.update_distance_2_goal()
def __del__(self):
print("Closing connection...")
if self.move_robot:
self.close_connection()
def open_connection(self):
if self.robot.alive:
raise Exception("Robot is already switched on")
self.robot.connect("/dev/ttyS0")
if self.robot.alive:
print("Success connecting to robot")
return True
else:
print("Failed to connect to robot")
return False
def move_to_default_pos(self):
if self.robot.alive:
for ID in range(1, 7):
self.robot.servoWrite(ID, int(self.DEFAULT[ID - 1]), 500)
return True
else:
return False
def move_to_pos(self):
# First, convert the angles in degrees between -90º and +90º
# to angles between 125 and 875
# 90 -> 500
# 0 -> 125
angles_deg = self.angles - 90
angles_deg[2] -= angles_deg[1]
angles_deg[1] -= angles_deg[0]
angles_piarm = [int(500 + (375/90)*angle_deg) for angle_deg in angles_deg]
angles_piarm[0] = 1000 - angles_piarm[0]
angles_piarm[1] = 1000 - angles_piarm[1]
print("Angles in degrees: ", angles_deg)
print("Moving arms with angles: ", angles_piarm)
if self.robot.alive:
for ID in range(3, 6):
self.robot.servoWrite(8 - ID, int(angles_piarm[ID - 3]), 500)
time.sleep(1)
return True
else:
return False
def close_connection(self):
if not self.robot.alive:
raise Exception("Robot is already switched off")
self.robot.disconnect()
if not self.robot.alive:
print("Success disconnecting from robot")
return True
else:
print("Failed to disconnect from robot")
return False
def update_goal_coords(self):
max_length = sum(list(self.lengths.values())[1:])
r = random.uniform(0.8*max_length,max_length)
theta = random.uniform(-np.pi/4, np.pi/2)
x = r * np.sin(theta)
y = r * np.cos(theta)
self.goal_coords = [int(x), int(y)]
def update_distance_2_goal(self):
gripper_pos = self.joint_positions[-1]
self.distance2goal = np.sqrt(sum([(gripper_pos[i] - self.goal_coords[i])**2 for i in range(2)]))
def update_positions(self):
"""
Positions are with respect to the origin (0,0), right underneath
motor 5. It is positive if it is away from the origin.
"""
self.joint_positions[0] = [0, self.lengths["h_0"]]
self.joint_positions[1] = [
self.joint_positions[0][0] + self.lengths["a"] * np.sin(np.deg2rad(self.angles[0])),
self.joint_positions[0][1] + self.lengths["a"] * np.cos(np.deg2rad(self.angles[0]))
]
self.joint_positions[2] = [
self.joint_positions[1][0] + self.lengths["b"] * np.sin(np.deg2rad(self.angles[1])),
self.joint_positions[1][1] + self.lengths["b"] * np.cos(np.deg2rad(self.angles[1]))
]
self.joint_positions[3] = [
self.joint_positions[2][0] + self.lengths["c"] * np.sin(np.deg2rad(self.angles[2])),
self.joint_positions[2][1] + self.lengths["c"] * np.cos(np.deg2rad(self.angles[2]))
]
# Convert to integers
self.joint_positions = [[int(x[0]),int(x[1])] for x in self.joint_positions]
def move_arm(self, actions):
"""
The inputs are the new set of angles [theta0, theta1, theta2]
"""
for i, action in enumerate(actions):
self.angles[i:] += action
for member_index in range(1,self.num_members):
self.max_angles[member_index] = self.angles[member_index - 1] + 90
self.min_angles[member_index] = self.angles[member_index - 1] - 90
self.update_positions()
self.update_distance_2_goal()
def render(self):
self.img = np.zeros((self.img_height, self.img_width, 3))
# Render the floor
self.img = cv2.rectangle(self.img, (0,0), (self.img_width, self.y_offset), (0,255,0), -1)
# Render the base of the arm
self.img = cv2.rectangle(self.img,
(int(self.x_offset - self.base_width/2), self.y_offset),
(int(self.x_offset - self.base_width/2 + self.base_width), self.y_offset + self.base_height),
(0, 165, 255),
-1)
goal_x, goal_y = self.goal_coords
self.img = cv2.circle(self.img, (goal_x + self.x_offset, goal_y + self.y_offset), int(self.member_thickness/2), (128, 0, 128), 5)
for member_id in range(self.num_members):
first_joint = self.joint_positions[member_id].copy()
second_joint = self.joint_positions[member_id + 1].copy()
first_joint[0] += self.x_offset
first_joint[1] += self.y_offset
second_joint[0] += self.x_offset
second_joint[1] += self.y_offset
self.img = cv2.line(self.img, tuple(first_joint), tuple(second_joint), (255,0,0), self.member_thickness)
self.img = cv2.circle(self.img, tuple(first_joint), int(self.member_thickness/2), (255,255,0), -1)
# Flip image upside down
self.img = cv2.flip(self.img, 0)
self.img = cv2.putText(self.img,
"Distance: " + str(round(self.distance2goal,2)),
(10, 30),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(255,255,255),
2)
cv2.imshow("Arm", self.img)
cv2.moveWindow("Arm",20,50)
def reset(self):
self.angles = 90*np.ones(self.num_members)
self.update_positions()
self.img = np.zeros((self.img_height, self.img_width, 3))
self.timestep = 0
self.update_goal_coords()
self.render()
if self.move_robot:
self.move_to_default_pos()
def check_arm_angles(self):
for member_index in range(self.num_members):
if self.angles[member_index] < self.min_angles[member_index]:
return False
if self.angles[member_index] > self.max_angles[member_index]:
return False
return True
def check_arm_positions(self):
for joint_index in range(1,len(self.joint_positions)):
member_pos = self.joint_positions[joint_index][1]
min_height = self.min_joint_heights[joint_index-1]
if member_pos < min_height:
return False
return True
def get_reward(self, forbidden_action):
if forbidden_action:
reward_scaling_factor = 2
else:
reward_scaling_factor = 1
return - self.distance2goal * reward_scaling_factor
def step(self, actions):
self.move_arm(actions)
forbidden_action = False
okay_angles = self.check_arm_angles()
okay_positions = self.check_arm_positions()
if not okay_angles:
print("An angle threshold was exceeded")
self.move_arm(-actions)
forbidden_action = True
if not okay_positions:
print("A position threshold was exqqceeded")
self.move_arm(-actions)
forbidden_action = True
self.render()
if self.move_robot:
self.move_to_pos()
r = self.get_reward(forbidden_action)
self.timestep += 1
is_done = self.timestep >= self.max_timestep
return self.angles, r, is_done
| 32.151316
| 137
| 0.563024
| 1,274
| 9,774
| 4.144427
| 0.17033
| 0.037121
| 0.061364
| 0.021591
| 0.429545
| 0.281629
| 0.195833
| 0.157955
| 0.106439
| 0.062121
| 0
| 0.043498
| 0.327297
| 9,774
| 304
| 138
| 32.151316
| 0.759544
| 0.067015
| 0
| 0.24
| 0
| 0
| 0.0385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.025
| 0
| 0.185
| 0.045
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d7e67724340bd64fb013a94027277d42f8215af
| 10,105
|
py
|
Python
|
src/python/src/grpc/_adapter/_links_test.py
|
jonywtf/grpc
|
124f3c5a4b65bb88f13be7c68482eb83d945ad02
|
[
"BSD-3-Clause"
] | 1
|
2022-01-14T04:25:01.000Z
|
2022-01-14T04:25:01.000Z
|
src/python/src/grpc/_adapter/_links_test.py
|
jonywtf/grpc
|
124f3c5a4b65bb88f13be7c68482eb83d945ad02
|
[
"BSD-3-Clause"
] | null | null | null |
src/python/src/grpc/_adapter/_links_test.py
|
jonywtf/grpc
|
124f3c5a4b65bb88f13be7c68482eb83d945ad02
|
[
"BSD-3-Clause"
] | 1
|
2022-01-14T04:25:02.000Z
|
2022-01-14T04:25:02.000Z
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test of the GRPC-backed ForeLink and RearLink."""
import threading
import unittest
from grpc._adapter import _proto_scenarios
from grpc._adapter import _test_links
from grpc._adapter import fore
from grpc._adapter import rear
from grpc.framework.base import interfaces
from grpc.framework.base.packets import packets as tickets
from grpc.framework.foundation import logging_pool
_IDENTITY = lambda x: x
_TIMEOUT = 2
class RoundTripTest(unittest.TestCase):
def setUp(self):
self.fore_link_pool = logging_pool.pool(80)
self.rear_link_pool = logging_pool.pool(80)
def tearDown(self):
self.rear_link_pool.shutdown(wait=True)
self.fore_link_pool.shutdown(wait=True)
def testZeroMessageRoundTrip(self):
test_operation_id = object()
test_method = 'test method'
test_fore_link = _test_links.ForeLink(None, None)
def rear_action(front_to_back_ticket, fore_link):
if front_to_back_ticket.kind in (
tickets.Kind.COMPLETION, tickets.Kind.ENTIRE):
back_to_front_ticket = tickets.BackToFrontPacket(
front_to_back_ticket.operation_id, 0, tickets.Kind.COMPLETION, None)
fore_link.accept_back_to_front_ticket(back_to_front_ticket)
test_rear_link = _test_links.RearLink(rear_action, None)
fore_link = fore.ForeLink(
self.fore_link_pool, {test_method: None}, {test_method: None}, None, ())
fore_link.join_rear_link(test_rear_link)
test_rear_link.join_fore_link(fore_link)
fore_link.start()
port = fore_link.port()
rear_link = rear.RearLink(
'localhost', port, self.rear_link_pool, {test_method: None},
{test_method: None}, False, None, None, None)
rear_link.join_fore_link(test_fore_link)
test_fore_link.join_rear_link(rear_link)
rear_link.start()
front_to_back_ticket = tickets.FrontToBackPacket(
test_operation_id, 0, tickets.Kind.ENTIRE, test_method,
interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT)
rear_link.accept_front_to_back_ticket(front_to_back_ticket)
with test_fore_link.condition:
while (not test_fore_link.tickets or
test_fore_link.tickets[-1].kind is tickets.Kind.CONTINUATION):
test_fore_link.condition.wait()
rear_link.stop()
fore_link.stop()
with test_fore_link.condition:
self.assertIs(test_fore_link.tickets[-1].kind, tickets.Kind.COMPLETION)
def testEntireRoundTrip(self):
test_operation_id = object()
test_method = 'test method'
test_front_to_back_datum = b'\x07'
test_back_to_front_datum = b'\x08'
test_fore_link = _test_links.ForeLink(None, None)
rear_sequence_number = [0]
def rear_action(front_to_back_ticket, fore_link):
if front_to_back_ticket.payload is None:
payload = None
else:
payload = test_back_to_front_datum
terminal = front_to_back_ticket.kind in (
tickets.Kind.COMPLETION, tickets.Kind.ENTIRE)
if payload is not None or terminal:
back_to_front_ticket = tickets.BackToFrontPacket(
front_to_back_ticket.operation_id, rear_sequence_number[0],
tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION,
payload)
rear_sequence_number[0] += 1
fore_link.accept_back_to_front_ticket(back_to_front_ticket)
test_rear_link = _test_links.RearLink(rear_action, None)
fore_link = fore.ForeLink(
self.fore_link_pool, {test_method: _IDENTITY},
{test_method: _IDENTITY}, None, ())
fore_link.join_rear_link(test_rear_link)
test_rear_link.join_fore_link(fore_link)
fore_link.start()
port = fore_link.port()
rear_link = rear.RearLink(
'localhost', port, self.rear_link_pool, {test_method: _IDENTITY},
{test_method: _IDENTITY}, False, None, None, None)
rear_link.join_fore_link(test_fore_link)
test_fore_link.join_rear_link(rear_link)
rear_link.start()
front_to_back_ticket = tickets.FrontToBackPacket(
test_operation_id, 0, tickets.Kind.ENTIRE, test_method,
interfaces.ServicedSubscription.Kind.FULL, None,
test_front_to_back_datum, _TIMEOUT)
rear_link.accept_front_to_back_ticket(front_to_back_ticket)
with test_fore_link.condition:
while (not test_fore_link.tickets or
test_fore_link.tickets[-1].kind is not tickets.Kind.COMPLETION):
test_fore_link.condition.wait()
rear_link.stop()
fore_link.stop()
with test_rear_link.condition:
front_to_back_payloads = tuple(
ticket.payload for ticket in test_rear_link.tickets
if ticket.payload is not None)
with test_fore_link.condition:
back_to_front_payloads = tuple(
ticket.payload for ticket in test_fore_link.tickets
if ticket.payload is not None)
self.assertTupleEqual((test_front_to_back_datum,), front_to_back_payloads)
self.assertTupleEqual((test_back_to_front_datum,), back_to_front_payloads)
def _perform_scenario_test(self, scenario):
test_operation_id = object()
test_method = scenario.method()
test_fore_link = _test_links.ForeLink(None, None)
rear_lock = threading.Lock()
rear_sequence_number = [0]
def rear_action(front_to_back_ticket, fore_link):
with rear_lock:
if front_to_back_ticket.payload is not None:
response = scenario.response_for_request(front_to_back_ticket.payload)
else:
response = None
terminal = front_to_back_ticket.kind in (
tickets.Kind.COMPLETION, tickets.Kind.ENTIRE)
if response is not None or terminal:
back_to_front_ticket = tickets.BackToFrontPacket(
front_to_back_ticket.operation_id, rear_sequence_number[0],
tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION,
response)
rear_sequence_number[0] += 1
fore_link.accept_back_to_front_ticket(back_to_front_ticket)
test_rear_link = _test_links.RearLink(rear_action, None)
fore_link = fore.ForeLink(
self.fore_link_pool, {test_method: scenario.deserialize_request},
{test_method: scenario.serialize_response}, None, ())
fore_link.join_rear_link(test_rear_link)
test_rear_link.join_fore_link(fore_link)
fore_link.start()
port = fore_link.port()
rear_link = rear.RearLink(
'localhost', port, self.rear_link_pool,
{test_method: scenario.serialize_request},
{test_method: scenario.deserialize_response}, False, None, None, None)
rear_link.join_fore_link(test_fore_link)
test_fore_link.join_rear_link(rear_link)
rear_link.start()
commencement_ticket = tickets.FrontToBackPacket(
test_operation_id, 0, tickets.Kind.COMMENCEMENT, test_method,
interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT)
fore_sequence_number = 1
rear_link.accept_front_to_back_ticket(commencement_ticket)
for request in scenario.requests():
continuation_ticket = tickets.FrontToBackPacket(
test_operation_id, fore_sequence_number, tickets.Kind.CONTINUATION,
None, None, None, request, None)
fore_sequence_number += 1
rear_link.accept_front_to_back_ticket(continuation_ticket)
completion_ticket = tickets.FrontToBackPacket(
test_operation_id, fore_sequence_number, tickets.Kind.COMPLETION, None,
None, None, None, None)
fore_sequence_number += 1
rear_link.accept_front_to_back_ticket(completion_ticket)
with test_fore_link.condition:
while (not test_fore_link.tickets or
test_fore_link.tickets[-1].kind is not tickets.Kind.COMPLETION):
test_fore_link.condition.wait()
rear_link.stop()
fore_link.stop()
with test_rear_link.condition:
requests = tuple(
ticket.payload for ticket in test_rear_link.tickets
if ticket.payload is not None)
with test_fore_link.condition:
responses = tuple(
ticket.payload for ticket in test_fore_link.tickets
if ticket.payload is not None)
self.assertTrue(scenario.verify_requests(requests))
self.assertTrue(scenario.verify_responses(responses))
def testEmptyScenario(self):
self._perform_scenario_test(_proto_scenarios.EmptyScenario())
def testBidirectionallyUnaryScenario(self):
self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario())
def testBidirectionallyStreamingScenario(self):
self._perform_scenario_test(
_proto_scenarios.BidirectionallyStreamingScenario())
if __name__ == '__main__':
unittest.main()
| 40.42
| 80
| 0.740129
| 1,358
| 10,105
| 5.182622
| 0.164212
| 0.070475
| 0.046036
| 0.050725
| 0.656579
| 0.615658
| 0.600739
| 0.578005
| 0.563228
| 0.528986
| 0
| 0.003881
| 0.183968
| 10,105
| 249
| 81
| 40.582329
| 0.849624
| 0.150322
| 0
| 0.543011
| 0
| 0
| 0.0076
| 0
| 0
| 0
| 0
| 0
| 0.026882
| 1
| 0.05914
| false
| 0
| 0.048387
| 0
| 0.112903
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d7f67f5f4955fd254f6885d027ea817e96c1e91
| 620
|
py
|
Python
|
tests/_test_progress_board.py
|
stjordanis/Hyperactive
|
5acf247d8023ff6761593b9d0954bdd912d20aed
|
[
"MIT"
] | 382
|
2019-07-16T13:30:15.000Z
|
2022-03-30T22:29:07.000Z
|
tests/_test_progress_board.py
|
stjordanis/Hyperactive
|
5acf247d8023ff6761593b9d0954bdd912d20aed
|
[
"MIT"
] | 46
|
2019-08-27T18:07:47.000Z
|
2022-03-16T16:28:10.000Z
|
tests/_test_progress_board.py
|
stjordanis/Hyperactive
|
5acf247d8023ff6761593b9d0954bdd912d20aed
|
[
"MIT"
] | 35
|
2019-08-03T00:51:09.000Z
|
2021-12-03T19:06:07.000Z
|
import os, glob
import subprocess
from subprocess import DEVNULL, STDOUT
abspath = os.path.abspath(__file__)
dir_ = os.path.dirname(abspath)
files = glob.glob(dir_ + "/_progress_board_tests/_test_progress_board_*.py")
for file_path in files:
file_name = str(file_path.rsplit("/", maxsplit=1)[1])
try:
print("\033[0;33;40m Testing", file_name, end="...\r")
subprocess.check_call(["pytest", file_path], stdout=DEVNULL, stderr=STDOUT)
except subprocess.CalledProcessError:
print("\033[0;31;40m Error in", file_name)
else:
print("\033[0;32;40m", file_name, "is correct")
| 29.52381
| 83
| 0.68871
| 88
| 620
| 4.613636
| 0.522727
| 0.078818
| 0.066502
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050193
| 0.164516
| 620
| 20
| 84
| 31
| 0.733591
| 0
| 0
| 0
| 0
| 0
| 0.203226
| 0.077419
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d7f92bfcfd4bf66faaae6ec55c26244db6de591
| 8,695
|
py
|
Python
|
pages/forest_pages.py
|
jhalljhall/beiwe-backend
|
06d28926a2830c7ad53c32ec41ff49320932aeed
|
[
"BSD-3-Clause"
] | 1
|
2022-03-09T03:20:37.000Z
|
2022-03-09T03:20:37.000Z
|
pages/forest_pages.py
|
jhalljhall/beiwe-backend
|
06d28926a2830c7ad53c32ec41ff49320932aeed
|
[
"BSD-3-Clause"
] | null | null | null |
pages/forest_pages.py
|
jhalljhall/beiwe-backend
|
06d28926a2830c7ad53c32ec41ff49320932aeed
|
[
"BSD-3-Clause"
] | null | null | null |
import csv
import datetime
from collections import defaultdict
from django.contrib import messages
from django.http.response import FileResponse
from django.shortcuts import redirect, render
from django.utils import timezone
from django.views.decorators.http import require_GET, require_http_methods, require_POST
from authentication.admin_authentication import (authenticate_admin,
authenticate_researcher_study_access, forest_enabled)
from constants.data_access_api_constants import CHUNK_FIELDS
from constants.forest_constants import ForestTaskStatus, ForestTree
from database.data_access_models import ChunkRegistry
from database.study_models import Study
from database.tableau_api_models import ForestTask
from database.user_models import Participant
from forms.django_forms import CreateTasksForm
from libs.http_utils import easy_url
from libs.internal_types import ParticipantQuerySet, ResearcherRequest
from libs.streaming_zip import zip_generator
from libs.utils.date_utils import daterange
from middleware.abort_middleware import abort
from serializers.forest_serializers import ForestTaskCsvSerializer, ForestTaskSerializer
@require_GET
@authenticate_researcher_study_access
@forest_enabled
def analysis_progress(request: ResearcherRequest, study_id=None):
study: Study = Study.objects.get(pk=study_id)
participants: ParticipantQuerySet = Participant.objects.filter(study=study_id)
# generate chart of study analysis progress logs
trackers = ForestTask.objects.filter(participant__in=participants).order_by("created_on")
start_date = (study.get_earliest_data_time_bin() or study.created_on).date()
end_date = (study.get_latest_data_time_bin() or timezone.now()).date()
# this code simultaneously builds up the chart of most recent forest results for date ranges
# by participant and tree, and tracks the metadata
params = dict()
results = defaultdict(lambda: "--")
tracker: ForestTask
for tracker in trackers:
for date in daterange(tracker.data_date_start, tracker.data_date_end, inclusive=True):
results[(tracker.participant_id, tracker.forest_tree, date)] = tracker.status
if tracker.status == tracker.status.success:
params[(tracker.participant_id, tracker.forest_tree, date)] = tracker.forest_param_id
else:
params[(tracker.participant_id, tracker.forest_tree, date)] = None
# generate the date range for charting
dates = list(daterange(start_date, end_date, inclusive=True))
chart = []
for participant in participants:
for tree in ForestTree.values():
row = [participant.patient_id, tree] + \
[results[(participant.id, tree, date)] for date in dates]
chart.append(row)
# ensure that within each tree, only a single set of param values are used (only the most recent runs
# are considered, and unsuccessful runs are assumed to invalidate old runs, clearing params)
params_conflict = False
for tree in set([k[1] for k in params.keys()]):
if len(set([m for k, m in params.items() if m is not None and k[1] == tree])) > 1:
params_conflict = True
break
return render(
request,
'forest/analysis_progress.html',
context=dict(
study=study,
chart_columns=["participant", "tree"] + dates,
status_choices=ForestTaskStatus,
params_conflict=params_conflict,
start_date=start_date,
end_date=end_date,
chart=chart # this uses the jinja safe filter and should never involve user input
)
)
@require_http_methods(['GET', 'POST'])
@authenticate_admin
@forest_enabled
def create_tasks(request: ResearcherRequest, study_id=None):
# Only a SITE admin can queue forest tasks
if not request.session_researcher.site_admin:
return abort(403)
try:
study = Study.objects.get(pk=study_id)
except Study.DoesNotExist:
return abort(404)
# FIXME: remove this double endpoint pattern, it is bad.
if request.method == "GET":
return render_create_tasks(request, study)
form = CreateTasksForm(data=request.POST, study=study)
if not form.is_valid():
error_messages = [
f'"{field}": {message}'
for field, messages in form.errors.items()
for message in messages
]
error_messages_string = "\n".join(error_messages)
messages.warning(request, f"Errors:\n\n{error_messages_string}")
return render_create_tasks(request, study)
form.save()
messages.success(request, "Forest tasks successfully queued!")
return redirect(easy_url("forest_pages.task_log", study_id=study_id))
@require_GET
@authenticate_researcher_study_access
@forest_enabled
def task_log(request: ResearcherRequest, study_id=None):
study = Study.objects.get(pk=study_id)
forest_tasks = ForestTask.objects.filter(participant__study_id=study_id).order_by("-created_on")
return render(
request,
"forest/task_log.html",
context=dict(
study=study,
is_site_admin=request.session_researcher.site_admin,
status_choices=ForestTaskStatus,
forest_log=ForestTaskSerializer(forest_tasks, many=True).data,
)
)
@require_GET
@authenticate_admin
def download_task_log(request: ResearcherRequest):
forest_tasks = ForestTask.objects.order_by("created_on")
return FileResponse(
stream_forest_task_log_csv(forest_tasks),
content_type="text/csv",
filename=f"forest_task_log_{timezone.now().isoformat()}.csv",
as_attachment=True,
)
@require_POST
@authenticate_admin
@forest_enabled
def cancel_task(request: ResearcherRequest, study_id, forest_task_external_id):
if not request.session_researcher.site_admin:
return abort(403)
number_updated = \
ForestTask.objects.filter(
external_id=forest_task_external_id, status=ForestTaskStatus.queued
).update(
status=ForestTaskStatus.cancelled,
stacktrace=f"Canceled by {request.session_researcher.username} on {datetime.date.today()}",
)
if number_updated > 0:
messages.success(request, "Forest task successfully cancelled.")
else:
messages.warning(request, "Sorry, we were unable to find or cancel this Forest task.")
return redirect(easy_url("forest_pages.task_log", study_id=study_id))
@require_GET
@authenticate_admin
@forest_enabled
def download_task_data(request: ResearcherRequest, study_id, forest_task_external_id):
try:
tracker: ForestTask = ForestTask.objects.get(
external_id=forest_task_external_id, participant__study_id=study_id
)
except ForestTask.DoesNotExist:
return abort(404)
chunks = ChunkRegistry.objects.filter(participant=tracker.participant).values(*CHUNK_FIELDS)
f = FileResponse(
zip_generator(chunks),
content_type="zip",
as_attachment=True,
filename=f"{tracker.get_slug()}.zip",
)
f.set_headers(None)
return f
def stream_forest_task_log_csv(forest_tasks):
buffer = CSVBuffer()
writer = csv.DictWriter(buffer, fieldnames=ForestTaskCsvSerializer.Meta.fields)
writer.writeheader()
yield buffer.read()
for forest_task in forest_tasks:
writer.writerow(ForestTaskCsvSerializer(forest_task).data)
yield buffer.read()
def render_create_tasks(request: ResearcherRequest, study: Study):
participants = Participant.objects.filter(study=study)
try:
start_date = ChunkRegistry.objects.filter(participant__in=participants).earliest("time_bin")
end_date = ChunkRegistry.objects.filter(participant__in=participants).latest("time_bin")
start_date = start_date.time_bin.date()
end_date = end_date.time_bin.date()
except ChunkRegistry.DoesNotExist:
start_date = study.created_on.date()
end_date = timezone.now().date()
return render(
request,
"forest/create_tasks.html",
context=dict(
study=study,
participants=list(
study.participants.order_by("patient_id").values_list("patient_id", flat=True)
),
trees=ForestTree.choices(),
start_date=start_date.strftime('%Y-%m-%d'),
end_date=end_date.strftime('%Y-%m-%d')
)
)
class CSVBuffer:
line = ""
def read(self):
return self.line
def write(self, line):
self.line = line
| 36.533613
| 105
| 0.702588
| 1,045
| 8,695
| 5.629665
| 0.240191
| 0.020228
| 0.014958
| 0.026347
| 0.293728
| 0.211967
| 0.172871
| 0.102159
| 0.059493
| 0.043175
| 0
| 0.002332
| 0.210926
| 8,695
| 237
| 106
| 36.687764
| 0.855123
| 0.066475
| 0
| 0.260417
| 0
| 0
| 0.069693
| 0.032194
| 0
| 0
| 0
| 0.004219
| 0
| 1
| 0.052083
| false
| 0
| 0.114583
| 0.005208
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d7fb293f532babd1479825528080b1664689540
| 6,138
|
py
|
Python
|
data_scout/transformations/math_custom.py
|
janthiemen/data_scout
|
6366eedfb20ed429bc96100de4dd2c7409e5dd88
|
[
"MIT"
] | null | null | null |
data_scout/transformations/math_custom.py
|
janthiemen/data_scout
|
6366eedfb20ed429bc96100de4dd2c7409e5dd88
|
[
"MIT"
] | null | null | null |
data_scout/transformations/math_custom.py
|
janthiemen/data_scout
|
6366eedfb20ed429bc96100de4dd2c7409e5dd88
|
[
"MIT"
] | null | null | null |
from __future__ import division
from .transformation import Transformation
from pyparsing import (Literal, CaselessLiteral, Word, Combine, Group, Optional,
ZeroOrMore, Forward, nums, alphas, oneOf)
import math
import re
import operator
__author__ = 'Paul McGuire'
__version__ = '$Revision: 0.0 $'
__date__ = '$Date: 2009-03-20 $'
__source__ = '''http://pyparsing.wikispaces.com/file/view/fourFn.py
http://pyparsing.wikispaces.com/message/view/home/15549426
'''
__note__ = '''
All I've done is rewrap Paul McGuire's fourFn.py as a class, so I can use it
more easily in other places.
'''
class Custom(Transformation):
"""
Most of this code comes from the fourFn.py pyparsing example
"""
title = "Custom equation"
key = "Math equation"
fields = {
"equation": {"name": "Equation", "type": "string", "help": "The equation to evaluate. Column values should be entered as {COLUMN NAME}",
"required": True, "input": "text", "default": ""},
"output": {"name": "Output column", "type": "string", "input": "text", "required": True,
"help": "The name of the (newly created) column that contains the results", "default": ""},
}
def __init__(self, arguments: dict, sample_size: int, example: dict = None):
"""
Initialize the transformation with the given parameters.
expop :: '^'
multop :: '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
atom :: PI | E | real | fn '(' expr ')' | '(' expr ')'
factor :: atom [ expop factor ]*
term :: factor [ multop factor ]*
expr :: term [ addop term ]*
Arguments:
arguments {dict} -- The arguments
"""
super().__init__(arguments, sample_size, example)
self.equation = arguments["equation"]
self.output = arguments["output"]
point = Literal(".")
e = CaselessLiteral("E")
fnumber = Combine(Word("+-" + nums, nums) +
Optional(point + Optional(Word(nums))) +
Optional(e + Word("+-" + nums, nums)))
ident = Word(alphas, alphas + nums + "_$")
plus = Literal("+")
minus = Literal("-")
mult = Literal("*")
div = Literal("/")
mod = Literal("%")
lpar = Literal("(").suppress()
rpar = Literal(")").suppress()
addop = plus | minus
multop = mult | div | mod
expop = Literal("^")
pi = CaselessLiteral("PI")
expr = Forward()
atom = ((Optional(oneOf("- +")) +
(ident + lpar + expr + rpar | pi | e | fnumber).setParseAction(self.push_first))
| Optional(oneOf("- +")) + Group(lpar + expr + rpar)
).setParseAction(self.push_u_minus)
# by defining exponentiation as "atom [ ^ factor ]..." instead of
# "atom [ ^ atom ]...", we get right-to-left exponents, instead of left-to-right
# that is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = Forward()
factor << atom + \
ZeroOrMore((expop + factor).setParseAction(self.push_first))
term = factor + \
ZeroOrMore((multop + factor).setParseAction(self.push_first))
expr << term + \
ZeroOrMore((addop + term).setParseAction(self.push_first))
# addop_term = ( addop + term ).setParseAction( self.push_first )
# general_term = term + ZeroOrMore( addop_term ) | OneOrMore( addop_term)
# expr << general_term
self.bnf = expr
# map operator symbols to corresponding arithmetic operations
epsilon = 1e-12
self.opn = {"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"%": operator.mod,
"/": operator.truediv,
"^": operator.pow}
self.expr_stack = None
self.fn = {"sin": math.sin,
"sinh": math.sinh,
"cos": math.cos,
"cosh": math.cosh,
"tan": math.tan,
"tanh": math.tanh,
"exp": math.exp,
"sqrt": math.sqrt,
"radians": math.radians,
"degrees": math.degrees,
"sign": lambda x: 0 if x == 0 else x / abs(x),
"log": math.log10,
"ln": math.log,
"abs": abs,
"trunc": lambda a: int(a),
"round": round,
"floor": math.floor,
"ceil": math.ceil,
"sgn": lambda a: abs(a) > epsilon and cmp(a, 0) or 0}
def push_first(self, strg, loc, toks):
self.expr_stack.append(toks[0])
def push_u_minus(self, strg, loc, toks):
if toks and toks[0] == '-':
self.expr_stack.append('unary -')
def evaluate_stack(self, s):
op = s.pop()
if op == 'unary -':
return -self.evaluate_stack(s)
if op in "+-*/^%":
op2 = self.evaluate_stack(s)
op1 = self.evaluate_stack(s)
return self.opn[op](op1, op2)
elif op == "PI":
return math.pi # 3.1415926535
elif op == "E":
return math.e # 2.718281828
elif op in self.fn:
return self.fn[op](self.evaluate_stack(s))
elif op[0].isalpha():
return 0
else:
return float(op)
def eval(self, num_string, parse_all=True):
self.expr_stack = []
results = self.bnf.parseString(num_string, parse_all)
val = self.evaluate_stack(self.expr_stack[:])
return val
def __call__(self, row, index: int):
"""This class is called on each row.
Arguments:
row {dict} -- The complete row
Returns:
dict -- The row, including the extra output column
"""
row[self.output] = self.eval(re.sub(r'{(\w+)}', lambda x: str(row.get(x.group(1), 0)), self.equation))
return row, index
| 37.2
| 144
| 0.515477
| 664
| 6,138
| 4.661145
| 0.343373
| 0.017448
| 0.042649
| 0.043619
| 0.044588
| 0.023263
| 0
| 0
| 0
| 0
| 0
| 0.017024
| 0.339687
| 6,138
| 164
| 145
| 37.426829
| 0.746607
| 0.161453
| 0
| 0.016949
| 0
| 0.008475
| 0.140933
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050847
| false
| 0
| 0.050847
| 0
| 0.211864
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d8266dee4d58a4d6dfa39e78c02bb8ed2be2717
| 6,261
|
py
|
Python
|
project/cloudmesh-storage/cloudmesh/vdir/api/manager.py
|
cybertraining-dsc/fa19-516-171
|
1dba8cde09f7b05c80557ea7ae462161c590568b
|
[
"Apache-2.0"
] | null | null | null |
project/cloudmesh-storage/cloudmesh/vdir/api/manager.py
|
cybertraining-dsc/fa19-516-171
|
1dba8cde09f7b05c80557ea7ae462161c590568b
|
[
"Apache-2.0"
] | 2
|
2019-12-02T03:11:42.000Z
|
2021-02-08T20:37:15.000Z
|
project/cloudmesh-storage/cloudmesh/vdir/api/manager.py
|
cybertraining-dsc/fa19-516-171
|
1dba8cde09f7b05c80557ea7ae462161c590568b
|
[
"Apache-2.0"
] | 2
|
2019-09-10T00:56:11.000Z
|
2020-05-05T02:54:31.000Z
|
#
# this manager stores directly into the db wit Database update
from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate
from cloudmesh.mongo.CmDatabase import CmDatabase
from cloudmesh.common.console import Console
from cloudmesh.storage.Provider import Provider
import os
from datetime import datetime
class Vdir(object):
def __init__(self):
self.cm = CmDatabase()
self.col = self.cm.db['local-vdir']
self.directory = 'vdir'
def cd(self, dirname=None):
try:
if dirname is None:
if self.directory == 'vdir':
Console.error("Root directory reached.")
else:
cwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory})
self.directory = cwd['parent']
pwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory})
return pwd
else:
directory = self.col.find_one({'type': 'directory', 'cm.name': dirname})
if directory['parent'] == self.directory:
self.directory = dirname
pwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory})
return pwd
else:
Console.error('Directory does not exist at this location.')
except Exception as e:
print(e)
@DatabaseUpdate()
def mkdir(self, dirname):
try:
directory = self.col.find_one({"cm.name": dirname, 'type': 'directory'})
if directory is None:
dir_dict = dict()
dir_dict['cm'] = {
'name': dirname,
'kind': 'vdir',
'cloud': 'local'
}
dir_dict['type'] = 'directory'
dir_dict['parent'] = self.directory
dir_dict['cm']['created'] = datetime.utcnow()
dir_dict['cm']['modified'] = datetime.utcnow()
return dir_dict
else:
Console.error("Directory with that name exists.")
except Exception as e:
print(e)
def ls(self, directory=None):
try:
dash = '-' * 40
if directory is not None:
cloudmesh = self.col.find({'$or': [{'vdirectory': directory}, {'parent': directory}]})
count = self.col.count_documents({'$or': [{'vdirectory': directory}, {'parent': directory}]})
else:
cloudmesh = self.col.find({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]})
count = self.col.count_documents({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]})
locations = "{:<20} {:>}".format("Name", "Location") + "\n" + dash + "\n"
for i in range(0, count):
entry = cloudmesh[i]
if entry['type'] == 'fileendpoint':
location = entry['provider'] + ":" + entry['cloud_directory'] + "/" + entry['filename']
else:
if self.directory == '':
location = 'Vdir'
else:
location = self.directory
locations += "{:<20} {:>}".format(entry['cm']['name'], location) + "\n"
print(locations)
return locations
except Exception as e:
print(e)
@DatabaseUpdate()
def add(self, endpoint, dir_and_name):
try:
dirname = os.path.dirname(dir_and_name).split('/')[-1]
if dirname == '':
dirname = 'vdir'
directory = 'vdir'
else:
directory = self.col.find_one({"cm.name": dirname, 'type': 'directory'})
filename = os.path.basename(dir_and_name)
file = self.col.find_one({"cm.name": filename, 'type': 'fileendpoint'})
if directory is not None and file is None:
file_dict = dict()
file_dict['cm'] = {
'name': filename,
'kind': 'vdir',
'cloud': 'local'
}
file_dict['type'] = 'fileendpoint'
file_dict['vdirectory'] = dirname
file_dict['cloud_directory'] = os.path.dirname(endpoint).split(':')[1]
file_dict['filename'] = os.path.basename(endpoint)
file_dict['provider'] = os.path.dirname(endpoint).split(':')[0]
file_dict['cm']['created'] = datetime.utcnow()
file_dict['cm']['modified'] = datetime.utcnow()
return file_dict
elif directory is None:
Console.error("Virtual directory not found.")
elif file is not None:
print(file)
Console.error("File with that name already exists.")
except Exception as e:
print(e)
def get(self, name, destination=None):
try:
doc = self.col.find_one({'cm.name': name, 'type': 'fileendpoint'})
if doc is not None:
self.col.update_one({'cm.name': name, 'type': 'fileendpoint'},
{'$set': {'modified': datetime.utcnow()}})
service = doc['provider']
source = os.path.join(doc['cloud_directory'], doc['filename'])
print(source)
if destination is None:
destination = '~/.cloudmesh/vdir'
p = Provider(service)
file = p.get(source, destination, False)
return file
else:
Console.error("File not found.")
except Exception as e:
print(e)
def delete(self, dir_or_name):
try:
result = self.col.find_one({'cm.name': dir_or_name})
self.col.delete_one({'cm.name': dir_or_name})
return result
except Exception as e:
print(e)
def status(self, dir_or_name):
try:
result = self.col.find_one({'cm.name': dir_or_name})
return result
except Exception as e:
print(e)
| 40.921569
| 119
| 0.50008
| 629
| 6,261
| 4.891892
| 0.18283
| 0.038674
| 0.042899
| 0.045499
| 0.41794
| 0.342541
| 0.270393
| 0.238869
| 0.149496
| 0.149496
| 0
| 0.002528
| 0.368152
| 6,261
| 152
| 120
| 41.190789
| 0.775278
| 0.009583
| 0
| 0.330935
| 0
| 0
| 0.132946
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057554
| false
| 0
| 0.043165
| 0
| 0.165468
| 0.071942
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d830daa46eeb8bf91aea2f52e4b5f3c2d74b15e
| 2,605
|
py
|
Python
|
redash/query_runner/influx_db.py
|
cjpit/redash
|
27aafdb07e3a427da8f88d55a0c0d7cc64379da2
|
[
"BSD-2-Clause"
] | 1
|
2018-09-13T13:50:17.000Z
|
2018-09-13T13:50:17.000Z
|
redash/query_runner/influx_db.py
|
cjpit/redash
|
27aafdb07e3a427da8f88d55a0c0d7cc64379da2
|
[
"BSD-2-Clause"
] | null | null | null |
redash/query_runner/influx_db.py
|
cjpit/redash
|
27aafdb07e3a427da8f88d55a0c0d7cc64379da2
|
[
"BSD-2-Clause"
] | 1
|
2018-10-25T12:09:32.000Z
|
2018-10-25T12:09:32.000Z
|
import json
import logging
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
try:
from influxdb import InfluxDBClusterClient
enabled = True
except ImportError:
enabled = False
def _transform_result(results):
result_columns = []
result_rows = []
for result in results:
for series in result.raw.get('series', []):
for column in series['columns']:
if column not in result_columns:
result_columns.append(column)
tags = series.get('tags', {})
for key in tags.keys():
if key not in result_columns:
result_columns.append(key)
for result in results:
for series in result.raw.get('series', []):
for point in series['values']:
result_row = {}
for column in result_columns:
tags = series.get('tags', {})
if column in tags:
result_row[column] = tags[column]
elif column in series['columns']:
index = series['columns'].index(column)
value = point[index]
result_row[column] = value
result_rows.append(result_row)
return json.dumps({
"columns": [{'name': c} for c in result_columns],
"rows": result_rows
}, cls=JSONEncoder)
class InfluxDB(BaseQueryRunner):
noop_query = "show measurements limit 1"
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'url': {
'type': 'string'
}
},
'required': ['url']
}
@classmethod
def enabled(cls):
return enabled
@classmethod
def annotate_query(cls):
return False
@classmethod
def type(cls):
return "influxdb"
def run_query(self, query, user):
client = InfluxDBClusterClient.from_DSN(self.configuration['url'])
logger.debug("influxdb url: %s", self.configuration['url'])
logger.debug("influxdb got query: %s", query)
try:
results = client.query(query)
if not isinstance(results, list):
results = [results]
json_data = _transform_result(results)
error = None
except Exception as ex:
json_data = None
error = ex.message
return json_data, error
register(InfluxDB)
| 26.313131
| 74
| 0.542035
| 261
| 2,605
| 5.291188
| 0.318008
| 0.065894
| 0.043447
| 0.026068
| 0.182476
| 0.182476
| 0.125996
| 0.072411
| 0.072411
| 0.072411
| 0
| 0.000602
| 0.362764
| 2,605
| 98
| 75
| 26.581633
| 0.831325
| 0
| 0
| 0.157895
| 0
| 0
| 0.07025
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078947
| false
| 0
| 0.078947
| 0.052632
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d83ad6cff3db1b6e4ae4b1be6ce413b57641a09
| 9,537
|
py
|
Python
|
ics/mergeGatingSets.py
|
victorfica/utils
|
b61935a860838a0e70afde7c9ecf2c68f51a2c4b
|
[
"MIT"
] | 5
|
2015-12-16T01:23:07.000Z
|
2020-04-27T11:41:43.000Z
|
ics/mergeGatingSets.py
|
victorfica/utils
|
b61935a860838a0e70afde7c9ecf2c68f51a2c4b
|
[
"MIT"
] | 1
|
2021-05-06T23:47:20.000Z
|
2021-05-06T23:48:33.000Z
|
ics/mergeGatingSets.py
|
victorfica/utils
|
b61935a860838a0e70afde7c9ecf2c68f51a2c4b
|
[
"MIT"
] | 6
|
2016-04-29T14:04:22.000Z
|
2021-05-06T23:49:34.000Z
|
#!/usr/bin/env python
"""
Usage examples:
python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out functions_extract.csv
sbatch -n 1 -t 3-0 -c 4 -o functions_slurm.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out functions_extract.csv"
sbatch -n 1 -t 3-0 -c 4 -o functions_markers_slurm.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions_markers --ncpus 4 --out functions_markers_extract.csv"
sbatch -n 1 -t 3-0 -c 4 -o functions_markers_sparse_slurm_gby.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 4 --subsets /home/agartlan/gitrepo/utils/ics/allcombs_subsets.csv --out functions_markers_sparse_24Jul2018_gby.csv"
sbatch -n 1 -t 3-0 -c 4 -o cell_functions_slurm.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 4 --out cell_functions_22Aug2018.feather --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv"
python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 3 --out cell_functions_extract.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv
python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 3 --out sparse_functions_extract_23Aug2018.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv
python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function bool_functions --ncpus 6 --out bool_functions_extract_05May2020.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv
To delete all tmp files use:
find . -name \merged_tmp*.feather -type f -delete
"""
def mergeBatches(dataFolder, extractionFunc, extractionKwargs, ncpus, testsamples, testbatch, outFile, metaCols=None, filters=None, useFeather=False):
out = []
batchList = [opj(dataFolder, bf) for bf in os.listdir(dataFolder) if os.path.isdir(opj(dataFolder, bf))]
if testbatch:
batchList = batchList[:1]
matchStr = 'gs_*.feather'
if ncpus > 1 and _PARMAP:
res = parmap.map(mergeSamples,
batchList,
extractionFunc,
extractionKwargs,
matchStr,
testsamples,
metaCols,
filters,
pool=Pool(processes=ncpus))
else:
if _PARMAP:
res = parmap.map(mergeSamples,
batchList,
extractionFunc,
extractionKwargs,
matchStr,
testsamples,
metaCols,
filters,
parallel=False)
else:
func = partial(mergeSamples,
extractionFunc=extractionFunc,
extractionKwargs=extractionKwargs,
matchStr=matchStr,
test=testsamples,
metaCols=metaCols,
filters=filters)
res = list(map(func, batchList))
outFilename = mergeFeathers(res, outFile, writeCSV=1 - int(useFeather))
return outFilename
def testMatching(dataFolder):
out = []
for bf in os.listdir(dataFolder):
batchFolder = opj(dataFolder, bf)
if os.path.isdir(opj(dataFolder, bf)):
featherLU = matchSamples(batchFolder, test=False)
tmp = pd.Series(featherLU).to_frame()
tmp.loc[:, 'batch'] = bf
tmp.loc[:, 'batch_folder'] = opj(dataFolder, bf)
out.append(tmp)
return pd.concat(out, axis=0)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Extract features and merge batches into one CSV.')
parser.add_argument('--folder', type=str,
help='Data folder containing all batch folders.',
default='/fh/fast/gilbert_p/grp/hvtn602_compass/tmpdata')
parser.add_argument('--function', type=str,
help='Name of extraction to apply ("functions")',
default='functions')
parser.add_argument('--subsets', type=str,
help='Filename listing subsets for analysis.',
default='/home/agartlan/gitrepo/utils/ics/sample_subsets2.csv')
parser.add_argument('--out', type=str,
help='Output filename for CSV.',
default='merged_out.csv')
parser.add_argument('--ncpus', type=int,
help='Number of CPUs/cores to use for parallelization.',
default=1)
parser.add_argument('--testsamples', action='store_true', help='Only process two samples from each batch.')
parser.add_argument('--testbatch', action='store_true', help='Only process twp samples from one batch.')
parser.add_argument('--matchingonly', action='store_true', help='Only perform sample matching, to validate metadata.')
parser.add_argument('--feather', action='store_true', help='Store as feather as oposed to CSV')
parser.add_argument('--utils', default='/home/agartlan/gitrepo/utils', help='Location of agartland/utils repo from public github.com')
args = parser.parse_args()
try:
import parmap
from multiprocessing import Pool
_PARMAP = True
except:
_PARMAP = False
print('Could not find package "parmap", parallelization not enabled.')
import itertools
import pandas as pd
import numpy as np
from os.path import join as opj
import os
from functools import partial
import time
import sys
import feather
"""Make sure the utils are on path before importing"""
sys.path.append(args.utils)
# from ics import extractFunctionsGBY, extractFunctionsMarkersGBY, parseSubsets, mergeSamples, matchSamples
from ics import *
if args.matchingonly:
metaDf = testMatching(args.folder)
metaDf.to_csv(opj(args.folder, 'metamatch_' + args.out))
print('Wrote matching metadata to %s.' % opj(args.folder, 'metamatch_' + args.out))
else:
subsets, markers, functions, exclude = parseSubsets(args.subsets)
features = {'sparse_functions':(extractFunctionsGBY, dict(subsets=subsets,
functions=functions,
mincells=5)),
'bool_functions':(extractFunctionsGBY, dict(subsets=subsets,
functions=functions,
mincells=0)),
'functions_markers':(extractFunctionsMarkersGBY, dict(subsets=subsets,
functions=functions,
markers=markers,
compressions=[('ALL', 2),
(['IFNg','IL2', 'TNFa'], 2)])),
'functions':(extractFunctionsGBY, dict(subsets=subsets,
functions=functions,
compressions=[('ALL', 1),
('ALL', 2),
(['IFNg','IL2', 'TNFa'], 1),
(['IFNg','IL2', 'TNFa'], 2),
(['IFNg','IL2'], 1)])),
'cell_functions':(extractRawFunctions, dict(subsets=subsets, functions=functions, downsample=1))}
extractionFunc, extractionKwargs = features[args.function]
if args.testbatch:
print('Test: processing samples from one batch')
if args.testsamples:
print('Test: processing two samples per batch')
outFile = opj(args.folder, args.out)
if args.feather:
outFile = outFile.replace('.csv', '.feather')
wrote = mergeBatches(args.folder,
extractionFunc=extractionFunc,
extractionKwargs=extractionKwargs,
testsamples=args.testsamples,
testbatch=args.testbatch,
outFile=outFile,
metaCols=['PTID', 'VISITNO', 'Global.Spec.Id', 'TESTDT', 'STIM'],
filters={'STIM':['negctrl', 'TB WCL', 'BCG-Pasteur', 'Ag85B', 'TB 10.4'], 'VISITNO':[2, 6, 7, 10, 11, 12]},
useFeather=int(args.feather),
ncpus=args.ncpus)
if wrote == outFile:
print('Wrote extracted data to %s.' % outFile)
else:
print('Error writing file to disk: %s' % wrote)
| 52.401099
| 284
| 0.554996
| 923
| 9,537
| 5.63922
| 0.265439
| 0.034582
| 0.054755
| 0.069164
| 0.377714
| 0.34121
| 0.302017
| 0.278963
| 0.251297
| 0.251297
| 0
| 0.014208
| 0.34319
| 9,537
| 182
| 285
| 52.401099
| 0.816731
| 0.209919
| 0
| 0.217391
| 0
| 0
| 0.165529
| 0.016861
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014493
| false
| 0.007246
| 0.094203
| 0
| 0.123188
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d85c596f37801463f956fbf7ef5af170636decb
| 1,000
|
py
|
Python
|
setup.py
|
uuosio/uuosio.gscdk
|
a2e364d4499c1372567aa5933e2d8e02340a8385
|
[
"BSD-3-Clause"
] | 6
|
2021-09-03T09:02:39.000Z
|
2022-01-12T06:31:09.000Z
|
setup.py
|
uuosio/uuosio.gscdk
|
a2e364d4499c1372567aa5933e2d8e02340a8385
|
[
"BSD-3-Clause"
] | 1
|
2021-11-01T16:46:09.000Z
|
2021-11-04T12:51:45.000Z
|
setup.py
|
uuosio/uuosio.gscdk
|
a2e364d4499c1372567aa5933e2d8e02340a8385
|
[
"BSD-3-Clause"
] | 2
|
2021-11-10T01:56:15.000Z
|
2022-01-13T14:27:31.000Z
|
import os
import shutil
import setuptools
# from skbuild import setup
from distutils.core import setup
from distutils.sysconfig import get_python_lib
import glob
# if os.path.exists('pysrc/tinygo'):
# shutil.rmtree('pysrc/tinygo')
# shutil.copytree('tinygo/build/release/tinygo', 'pysrc/tinygo')
release_files = []
for root, dirs, files in os.walk("pysrc/tinygo"):
for f in files:
release_files.append(os.path.join(root.replace('pysrc/', ''), f))
# print(release_files)
setup(
name="gscdk",
version="0.3.5",
description="Go Smart Contract Development Kit",
author='The UUOSIO Team',
license="BSD-3-Clause",
url="https://github.com/uuosio/uuosio.gscdk",
packages=['gscdk'],
package_dir={'gscdk': 'pysrc'},
package_data={
# "": ["*"],
'gscdk': release_files,
},
setup_requires=['wheel']
# scripts=['compiler/build/release/tinygo/bin/eosio-go'],
# install_requires=[
# ],
# include_package_data=True
)
| 24.390244
| 77
| 0.658
| 126
| 1,000
| 5.126984
| 0.555556
| 0.068111
| 0.04644
| 0.074303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004884
| 0.181
| 1,000
| 40
| 78
| 25
| 0.783883
| 0.3
| 0
| 0
| 0
| 0
| 0.219477
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d88a4a57aa7fe412e25b74cc37254832f74121b
| 1,113
|
py
|
Python
|
treenode/debug.py
|
domlysi/django-treenode
|
86e7c76e2b2d60c071cfce6ad1493b2b51f2d304
|
[
"MIT"
] | null | null | null |
treenode/debug.py
|
domlysi/django-treenode
|
86e7c76e2b2d60c071cfce6ad1493b2b51f2d304
|
[
"MIT"
] | null | null | null |
treenode/debug.py
|
domlysi/django-treenode
|
86e7c76e2b2d60c071cfce6ad1493b2b51f2d304
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.db import connection
import logging
import timeit
logger = logging.getLogger(__name__)
class debug_performance(object):
def __init__(self, message_prefix=''):
super(debug_performance, self).__init__()
self.__message_prefix = message_prefix
@staticmethod
def _get_queries():
return len(connection.queries)
@staticmethod
def _get_timer():
return timeit.default_timer()
def __enter__(self):
self.__init_queries = debug_performance._get_queries()
self.__init_timer = debug_performance._get_timer()
return None
def __exit__(self, type_, value, traceback):
queries = (debug_performance._get_queries() - self.__init_queries)
timer = (debug_performance._get_timer() - self.__init_timer)
if settings.DEBUG:
message = '\r%sexecuted %s %s in %ss.' % (
self.__message_prefix,
queries,
'query' if queries == 1 else 'queries',
timer, )
print(message)
| 26.5
| 74
| 0.637017
| 120
| 1,113
| 5.408333
| 0.4
| 0.14792
| 0.117103
| 0.064715
| 0.215716
| 0.126348
| 0.126348
| 0
| 0
| 0
| 0
| 0.002445
| 0.265049
| 1,113
| 41
| 75
| 27.146341
| 0.790954
| 0.018868
| 0
| 0.068966
| 0
| 0
| 0.034862
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.172414
| false
| 0
| 0.137931
| 0.068966
| 0.448276
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d8a580c6383e0fa95c751c549eb6cc5184e491f
| 3,705
|
py
|
Python
|
String_tool.py
|
vibhorvk/BlendString
|
3bf62083716b3b1f4976abeb3528771eeb79e2cf
|
[
"MIT"
] | null | null | null |
String_tool.py
|
vibhorvk/BlendString
|
3bf62083716b3b1f4976abeb3528771eeb79e2cf
|
[
"MIT"
] | null | null | null |
String_tool.py
|
vibhorvk/BlendString
|
3bf62083716b3b1f4976abeb3528771eeb79e2cf
|
[
"MIT"
] | null | null | null |
bl_info = {
"name": "STRING",
"blender": (2, 80, 0),
"category": "Object",
'Author' : 'Vibhor Gupta'
}
import bpy
import bmesh
class STRING(bpy.types.Operator):
"""My Object Moving Script""" # Use this as a tooltip for menu items and buttons.
bl_idname = "object.stringtool_ot" # Unique identifier for buttons and menu items to reference.
bl_label = "String" # Display name in the interface.
bl_options = {'REGISTER', 'UNDO'} # Enable undo for the operator.
bdepth: bpy.props.FloatProperty(name = "String Thickness", min = 0.1, max = 5, precision = 2 )
def execute(self, context):
# The original script
####################
#to create an edge between two given objects
def Edgify(ob1,ob2):
loc1 = ob1.location
loc2 = ob2.location
verts = [loc1,loc2]
bpy.ops.mesh.primitive_plane_add(location = (0,0,0))
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.delete(type='VERT')
#creating the vertices using the current mesh data into bmesh
pipe = bpy.context.object.data
bm = bmesh.new()
for v in verts:
bm.verts.new(v)
bpy.ops.object.editmode_toggle()
bm.to_mesh(pipe)
bm.free()
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.edge_face_add()
bpy.ops.object.editmode_toggle()
def string(olist):
edges = []
l = len(olist)
for x in range(l):
for y in range(l):
if y != x and x < y :
Edgify(olist[x], olist[y])
edges.append(bpy.context.active_object)
return edges
def piper(xlist):
bpy.ops.object.select_all(action='DESELECT')
for x in xlist:
x.select_set(True)
bpy.ops.object.join()
bpy.ops.object.convert(target='CURVE')
def check(olist):
if len(olist) == 0:
self.report({'INFO'},'NONE SELECTED OBJECTS')
return 0
else:
return 1
oblist = bpy.context.selected_objects
Edgelist = string(oblist)
piper(Edgelist)
actob = bpy.context.active_object
actob.data.bevel_depth = self.bdepth
bpy.ops.object.shade_smooth()
########################
return {'FINISHED'} # Lets Blender know the operator finished successfully.
class STRING_PT(bpy.types.Panel):
bl_idname = "object_stringtool_pt"
bl_label = "String"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "newprop"
def draw(self, context):
# You can set the property values that should be used when the user
# presses the button in the UI.
layout = self.layout
props = layout.operator('object.stringtool_ot')
def register():
bpy.utils.register_class(STRING)
def unregister():
bpy.utils.unregister_class(STRING)
# This allows you to run the script directly from Blender's Text editor
# to test the add-on without having to install it.
if __name__ == "__main__":
register()
| 29.879032
| 107
| 0.515789
| 413
| 3,705
| 4.51816
| 0.423729
| 0.041801
| 0.051447
| 0.042872
| 0.099678
| 0.071811
| 0.071811
| 0.071811
| 0.071811
| 0.071811
| 0
| 0.010022
| 0.380567
| 3,705
| 124
| 108
| 29.879032
| 0.80305
| 0.161673
| 0
| 0.105263
| 0
| 0
| 0.080397
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.026316
| 0
| 0.328947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d8ccdbb74a4a8ff8a56e579b885b0bbd0743a4f
| 7,666
|
py
|
Python
|
awx/plugins/library/scan_services.py
|
Avinesh/awx
|
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
|
[
"Apache-2.0"
] | 17
|
2021-04-03T01:40:17.000Z
|
2022-03-03T11:45:20.000Z
|
awx/plugins/library/scan_services.py
|
Avinesh/awx
|
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
|
[
"Apache-2.0"
] | 24
|
2021-05-18T21:13:35.000Z
|
2022-03-29T10:23:52.000Z
|
awx/plugins/library/scan_services.py
|
Avinesh/awx
|
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
|
[
"Apache-2.0"
] | 14
|
2021-04-06T20:05:41.000Z
|
2022-03-24T14:16:03.000Z
|
#!/usr/bin/env python
import re
from ansible.module_utils.basic import * # noqa
DOCUMENTATION = '''
---
module: scan_services
short_description: Return service state information as fact data
description:
- Return service state information as fact data for various service management utilities
version_added: "1.9"
options:
requirements: [ ]
author: Matthew Jones
'''
EXAMPLES = '''
- monit: scan_services
# Example fact output:
# host | success >> {
# "ansible_facts": {
# "services": {
# "network": {
# "source": "sysv",
# "state": "running",
# "name": "network"
# },
# "arp-ethers.service": {
# "source": "systemd",
# "state": "stopped",
# "name": "arp-ethers.service"
# }
# }
# }
'''
class BaseService(object):
def __init__(self, module):
self.module = module
self.incomplete_warning = False
class ServiceScanService(BaseService):
def gather_services(self):
services = {}
service_path = self.module.get_bin_path("service")
if service_path is None:
return None
initctl_path = self.module.get_bin_path("initctl")
chkconfig_path = self.module.get_bin_path("chkconfig")
# sysvinit
if service_path is not None and chkconfig_path is None:
rc, stdout, stderr = self.module.run_command("%s --status-all 2>&1 | grep -E \"\\[ (\\+|\\-) \\]\"" % service_path, use_unsafe_shell=True)
for line in stdout.split("\n"):
line_data = line.split()
if len(line_data) < 4:
continue # Skipping because we expected more data
service_name = " ".join(line_data[3:])
if line_data[1] == "+":
service_state = "running"
else:
service_state = "stopped"
services[service_name] = {"name": service_name, "state": service_state, "source": "sysv"}
# Upstart
if initctl_path is not None and chkconfig_path is None:
p = re.compile(r'^\s?(?P<name>.*)\s(?P<goal>\w+)\/(?P<state>\w+)(\,\sprocess\s(?P<pid>[0-9]+))?\s*$')
rc, stdout, stderr = self.module.run_command("%s list" % initctl_path)
real_stdout = stdout.replace("\r","")
for line in real_stdout.split("\n"):
m = p.match(line)
if not m:
continue
service_name = m.group('name')
service_goal = m.group('goal')
service_state = m.group('state')
if m.group('pid'):
pid = m.group('pid')
else:
pid = None # NOQA
payload = {"name": service_name, "state": service_state, "goal": service_goal, "source": "upstart"}
services[service_name] = payload
# RH sysvinit
elif chkconfig_path is not None:
#print '%s --status-all | grep -E "is (running|stopped)"' % service_path
p = re.compile(
r'(?P<service>.*?)\s+[0-9]:(?P<rl0>on|off)\s+[0-9]:(?P<rl1>on|off)\s+[0-9]:(?P<rl2>on|off)\s+'
r'[0-9]:(?P<rl3>on|off)\s+[0-9]:(?P<rl4>on|off)\s+[0-9]:(?P<rl5>on|off)\s+[0-9]:(?P<rl6>on|off)')
rc, stdout, stderr = self.module.run_command('%s' % chkconfig_path, use_unsafe_shell=True)
# Check for special cases where stdout does not fit pattern
match_any = False
for line in stdout.split('\n'):
if p.match(line):
match_any = True
if not match_any:
p_simple = re.compile(r'(?P<service>.*?)\s+(?P<rl0>on|off)')
match_any = False
for line in stdout.split('\n'):
if p_simple.match(line):
match_any = True
if match_any:
# Try extra flags " -l --allservices" needed for SLES11
rc, stdout, stderr = self.module.run_command('%s -l --allservices' % chkconfig_path, use_unsafe_shell=True)
elif '--list' in stderr:
# Extra flag needed for RHEL5
rc, stdout, stderr = self.module.run_command('%s --list' % chkconfig_path, use_unsafe_shell=True)
for line in stdout.split('\n'):
m = p.match(line)
if m:
service_name = m.group('service')
service_state = 'stopped'
if m.group('rl3') == 'on':
rc, stdout, stderr = self.module.run_command('%s %s status' % (service_path, service_name), use_unsafe_shell=True)
service_state = rc
if rc in (0,):
service_state = 'running'
#elif rc in (1,3):
else:
if 'root' in stderr or 'permission' in stderr.lower() or 'not in sudoers' in stderr.lower():
self.incomplete_warning = True
continue
else:
service_state = 'stopped'
service_data = {"name": service_name, "state": service_state, "source": "sysv"}
services[service_name] = service_data
return services
class SystemctlScanService(BaseService):
def systemd_enabled(self):
# Check if init is the systemd command, using comm as cmdline could be symlink
try:
f = open('/proc/1/comm', 'r')
except IOError:
# If comm doesn't exist, old kernel, no systemd
return False
for line in f:
if 'systemd' in line:
return True
return False
def gather_services(self):
services = {}
if not self.systemd_enabled():
return None
systemctl_path = self.module.get_bin_path("systemctl", opt_dirs=["/usr/bin", "/usr/local/bin"])
if systemctl_path is None:
return None
rc, stdout, stderr = self.module.run_command("%s list-unit-files --type=service | tail -n +2 | head -n -2" % systemctl_path, use_unsafe_shell=True)
for line in stdout.split("\n"):
line_data = line.split()
if len(line_data) != 2:
continue
if line_data[1] == "enabled":
state_val = "running"
else:
state_val = "stopped"
services[line_data[0]] = {"name": line_data[0], "state": state_val, "source": "systemd"}
return services
def main():
module = AnsibleModule(argument_spec = dict()) # noqa
service_modules = (ServiceScanService, SystemctlScanService)
all_services = {}
incomplete_warning = False
for svc_module in service_modules:
svcmod = svc_module(module)
svc = svcmod.gather_services()
if svc is not None:
all_services.update(svc)
if svcmod.incomplete_warning:
incomplete_warning = True
if len(all_services) == 0:
results = dict(skipped=True, msg="Failed to find any services. Sometimes this is due to insufficient privileges.")
else:
results = dict(ansible_facts=dict(services=all_services))
if incomplete_warning:
results['msg'] = "WARNING: Could not find status for all services. Sometimes this is due to insufficient privileges."
module.exit_json(**results)
main()
| 40.136126
| 155
| 0.530655
| 878
| 7,666
| 4.490888
| 0.235763
| 0.03297
| 0.024854
| 0.031955
| 0.339336
| 0.315496
| 0.23231
| 0.23231
| 0.124017
| 0.0667
| 0
| 0.009185
| 0.346726
| 7,666
| 190
| 156
| 40.347368
| 0.778155
| 0.059223
| 0
| 0.257862
| 0
| 0.025157
| 0.229262
| 0.044602
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031447
| false
| 0
| 0.012579
| 0
| 0.113208
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d8e9f525045331a16efdc9df5a7b8042480b89c
| 1,118
|
py
|
Python
|
cppgym/ToyText/BlackJack.py
|
anhydrous99/cppgym
|
0b1009a74faebfe5a31bcfd6a86c74cf13464d56
|
[
"MIT"
] | null | null | null |
cppgym/ToyText/BlackJack.py
|
anhydrous99/cppgym
|
0b1009a74faebfe5a31bcfd6a86c74cf13464d56
|
[
"MIT"
] | 1
|
2021-01-03T10:21:36.000Z
|
2021-01-26T03:59:07.000Z
|
cppgym/ToyText/BlackJack.py
|
anhydrous99/cppgym
|
0b1009a74faebfe5a31bcfd6a86c74cf13464d56
|
[
"MIT"
] | null | null | null |
from .._BlackJack import BlackJackCPP
import gym
import ctypes
import numpy as np
from gym import spaces
class BlackJack(gym.Env):
def __init__(self, natural=False):
self.env = BlackJackCPP(natural)
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Tuple((
spaces.Discrete(32),
spaces.Discrete(11),
spaces.Discrete(2)
))
self.state = None
self.natural = natural
def seed(self, seed=None):
if seed is None:
return [self.env.get_seed()]
else:
if not isinstance(seed, ctypes.c_uint32):
seed = ctypes.c_uint32(seed).value
self.env.set_seed(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action)
state, reward, done = self.env.step(action)
self.state = np.array(state)
return self.state, reward, done, {}
def render(self, mode='human'):
return None
def reset(self):
self.state = np.array(self.env.reset())
return self.state
| 26.619048
| 53
| 0.593918
| 137
| 1,118
| 4.759124
| 0.357664
| 0.053681
| 0.046012
| 0.058282
| 0.058282
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012804
| 0.301431
| 1,118
| 41
| 54
| 27.268293
| 0.822023
| 0
| 0
| 0
| 0
| 0
| 0.004472
| 0
| 0
| 0
| 0
| 0
| 0.029412
| 1
| 0.147059
| false
| 0
| 0.147059
| 0.029412
| 0.470588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d8f21c23f88e0ce2aa150c385f666597b203749
| 5,827
|
py
|
Python
|
sdks/python/apache_beam/runners/direct/consumer_tracking_pipeline_visitor_test.py
|
ajothomas/beam
|
4774c1caf3dac3b6a7dd161f82559a26fa380920
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 5
|
2019-07-27T11:54:33.000Z
|
2021-06-06T11:53:36.000Z
|
sdks/python/apache_beam/runners/direct/consumer_tracking_pipeline_visitor_test.py
|
ajothomas/beam
|
4774c1caf3dac3b6a7dd161f82559a26fa380920
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 12
|
2019-04-15T15:27:23.000Z
|
2019-07-01T18:13:10.000Z
|
sdks/python/apache_beam/runners/direct/consumer_tracking_pipeline_visitor_test.py
|
ajothomas/beam
|
4774c1caf3dac3b6a7dd161f82559a26fa380920
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2021-06-03T19:54:48.000Z
|
2021-06-03T19:54:48.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for consumer_tracking_pipeline_visitor."""
# pytype: skip-file
import logging
import unittest
import apache_beam as beam
from apache_beam import pvalue
from apache_beam.pipeline import Pipeline
from apache_beam.pvalue import AsList
from apache_beam.runners.direct import DirectRunner
from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import ConsumerTrackingPipelineVisitor
from apache_beam.transforms import CoGroupByKey
from apache_beam.transforms import Create
from apache_beam.transforms import DoFn
from apache_beam.transforms import FlatMap
from apache_beam.transforms import Flatten
from apache_beam.transforms import ParDo
# Disable frequent lint warning due to pipe operator for chaining transforms.
# pylint: disable=expression-not-assigned
# pylint: disable=pointless-statement
class ConsumerTrackingPipelineVisitorTest(unittest.TestCase):
def setUp(self):
self.pipeline = Pipeline(DirectRunner())
self.visitor = ConsumerTrackingPipelineVisitor()
def test_root_transforms(self):
root_read = beam.Impulse()
root_flatten = Flatten(pipeline=self.pipeline)
pbegin = pvalue.PBegin(self.pipeline)
pcoll_read = pbegin | 'read' >> root_read
pcoll_read | FlatMap(lambda x: x)
[] | 'flatten' >> root_flatten
self.pipeline.visit(self.visitor)
root_transforms = [t.transform for t in self.visitor.root_transforms]
self.assertCountEqual(root_transforms, [root_read, root_flatten])
pbegin_consumers = [
c.transform for c in self.visitor.value_to_consumers[pbegin]
]
self.assertCountEqual(pbegin_consumers, [root_read])
self.assertEqual(len(self.visitor.step_names), 3)
def test_side_inputs(self):
class SplitNumbersFn(DoFn):
def process(self, element):
if element < 0:
yield pvalue.TaggedOutput('tag_negative', element)
else:
yield element
class ProcessNumbersFn(DoFn):
def process(self, element, negatives):
yield element
def _process_numbers(pcoll, negatives):
first_output = (
pcoll
| 'process numbers step 1' >> ParDo(ProcessNumbersFn(), negatives))
second_output = (
first_output
| 'process numbers step 2' >> ParDo(ProcessNumbersFn(), negatives))
output_pc = ((first_output, second_output)
| 'flatten results' >> beam.Flatten())
return output_pc
root_read = beam.Impulse()
result = (
self.pipeline
| 'read' >> root_read
| ParDo(SplitNumbersFn()).with_outputs('tag_negative', main='positive'))
positive, negative = result
_process_numbers(positive, AsList(negative))
self.pipeline.visit(self.visitor)
root_transforms = [t.transform for t in self.visitor.root_transforms]
self.assertEqual(root_transforms, [root_read])
self.assertEqual(len(self.visitor.step_names), 5)
self.assertEqual(len(self.visitor.views), 1)
self.assertTrue(isinstance(self.visitor.views[0], pvalue.AsList))
def test_co_group_by_key(self):
emails = self.pipeline | 'email' >> Create([('joe', 'joe@example.com')])
phones = self.pipeline | 'phone' >> Create([('mary', '111-222-3333')])
{'emails': emails, 'phones': phones} | CoGroupByKey()
self.pipeline.visit(self.visitor)
root_transforms = [t.transform for t in self.visitor.root_transforms]
self.assertEqual(len(root_transforms), 2)
self.assertGreater(
len(self.visitor.step_names), 3) # 2 creates + expanded CoGBK
self.assertEqual(len(self.visitor.views), 0)
def test_visitor_not_sorted(self):
p = Pipeline()
# pylint: disable=expression-not-assigned
from apache_beam.testing.test_stream import TestStream
p | TestStream().add_elements(['']) | beam.Map(lambda _: _)
original_graph = p.to_runner_api(return_context=False)
out_of_order_graph = p.to_runner_api(return_context=False)
root_id = out_of_order_graph.root_transform_ids[0]
root = out_of_order_graph.components.transforms[root_id]
tmp = root.subtransforms[0]
root.subtransforms[0] = root.subtransforms[1]
root.subtransforms[1] = tmp
p = beam.Pipeline().from_runner_api(
out_of_order_graph, runner='BundleBasedDirectRunner', options=None)
v_out_of_order = ConsumerTrackingPipelineVisitor()
p.visit(v_out_of_order)
p = beam.Pipeline().from_runner_api(
original_graph, runner='BundleBasedDirectRunner', options=None)
v_original = ConsumerTrackingPipelineVisitor()
p.visit(v_original)
# Convert to string to assert they are equal.
out_of_order_labels = {
str(k): [str(t) for t in v_out_of_order.value_to_consumers[k]]
for k in v_out_of_order.value_to_consumers
}
original_labels = {
str(k): [str(t) for t in v_original.value_to_consumers[k]]
for k in v_original.value_to_consumers
}
self.assertDictEqual(out_of_order_labels, original_labels)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| 35.530488
| 105
| 0.727476
| 748
| 5,827
| 5.474599
| 0.296791
| 0.037607
| 0.041026
| 0.035165
| 0.304029
| 0.205128
| 0.139438
| 0.139438
| 0.101832
| 0.069109
| 0
| 0.006255
| 0.176935
| 5,827
| 163
| 106
| 35.748466
| 0.847581
| 0.185172
| 0
| 0.11215
| 0
| 0
| 0.045763
| 0.009746
| 0
| 0
| 0
| 0
| 0.102804
| 1
| 0.074766
| false
| 0
| 0.140187
| 0
| 0.252336
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d90667a56cb7c978d1072b2a27a14dbab5c4dfc
| 3,798
|
py
|
Python
|
agents/EWPublisherAgent.py
|
marc4gov/tokenspice2
|
1993383674f35b20e11e54606b3dac8e4c05c0f9
|
[
"Apache-2.0"
] | 1
|
2021-01-12T08:06:21.000Z
|
2021-01-12T08:06:21.000Z
|
agents/EWPublisherAgent.py
|
marc4gov/tokenspice2
|
1993383674f35b20e11e54606b3dac8e4c05c0f9
|
[
"Apache-2.0"
] | null | null | null |
agents/EWPublisherAgent.py
|
marc4gov/tokenspice2
|
1993383674f35b20e11e54606b3dac8e4c05c0f9
|
[
"Apache-2.0"
] | null | null | null |
import logging
log = logging.getLogger('marketagents')
from enforce_typing import enforce_types # type: ignore[import]
import random
from agents.PublisherAgent import PublisherAgent
from agents.PoolAgent import PoolAgent
from util import constants
from util.constants import POOL_WEIGHT_DT, POOL_WEIGHT_OCEAN
from web3engine import bfactory, bpool, datatoken, dtfactory, globaltokens
from web3tools.web3util import toBase18
@enforce_types
class EWPublisherAgent(PublisherAgent):
def __init__(self, name: str, USD: float, OCEAN: float):
super().__init__(name, USD, OCEAN)
self._s_since_create = 0
self._s_between_create = 7 * constants.S_PER_DAY #magic number
self._s_since_unstake = 0
self._s_between_unstake = 3 * constants.S_PER_DAY #magic number
def takeStep(self, state) -> None:
self._s_since_create += state.ss.time_step
self._s_since_unstake += state.ss.time_step
if self._doCreatePool():
self._s_since_create = 0
self._createPoolAgent(state)
if self._doUnstakeOCEAN(state):
self._s_since_unstake = 0
self._unstakeOCEANsomewhere(state)
def _doCreatePool(self) -> bool:
if self.OCEAN() < 200.0: #magic number
return False
return self._s_since_create >= self._s_between_create
def _createPoolAgent(self, state) -> PoolAgent:
assert self.OCEAN() > 0.0, "should not call if no OCEAN"
wallet = self._wallet._web3wallet
OCEAN = globaltokens.OCEANtoken()
#name
pool_i = len(state.agents.filterToPool())
dt_name = f'DT{pool_i}'
pool_agent_name = f'pool{pool_i}'
#new DT
DT = self._createDatatoken(dt_name, mint_amt=1000.0) #magic number
#new pool
pool_address = bfactory.BFactory().newBPool(from_wallet=wallet)
pool = bpool.BPool(pool_address)
#bind tokens & add initial liquidity
OCEAN_bind_amt = self.OCEAN() #magic number: use all the OCEAN
DT_bind_amt = 20.0 #magic number
DT.approve(pool.address, toBase18(DT_bind_amt), from_wallet=wallet)
OCEAN.approve(pool.address, toBase18(OCEAN_bind_amt),from_wallet=wallet)
pool.bind(DT.address, toBase18(DT_bind_amt),
toBase18(POOL_WEIGHT_DT), from_wallet=wallet)
pool.bind(OCEAN.address, toBase18(OCEAN_bind_amt),
toBase18(POOL_WEIGHT_OCEAN), from_wallet=wallet)
pool.finalize(from_wallet=wallet)
#create agent
pool_agent = PoolAgent(pool_agent_name, pool)
state.addAgent(pool_agent)
return pool_agent
def _doUnstakeOCEAN(self, state) -> bool:
if not state.agents.filterByNonzeroStake(self):
return False
return self._s_since_unstake >= self._s_between_unstake
def _unstakeOCEANsomewhere(self, state):
"""Choose what pool to unstake and by how much. Then do the action."""
pool_agents = state.agents.filterByNonzeroStake(self)
pool_agent = random.choice(list(pool_agents.values()))
BPT = self.BPT(pool_agent.pool)
BPT_unstake = 0.10 * BPT #magic number
self.unstakeOCEAN(BPT_unstake, pool_agent.pool)
def _createDatatoken(self,dt_name:str,mint_amt:float)-> datatoken.Datatoken:
"""Create datatoken contract and mint DTs to self."""
wallet = self._wallet._web3wallet
DT_address = dtfactory.DTFactory().createToken(
'', dt_name, dt_name, toBase18(mint_amt), from_wallet=wallet)
DT = datatoken.Datatoken(DT_address)
DT.mint(wallet.address, toBase18(mint_amt), from_wallet=wallet)
return DT
| 37.98
| 80
| 0.662191
| 466
| 3,798
| 5.128755
| 0.255365
| 0.025105
| 0.033473
| 0.026778
| 0.191632
| 0.107113
| 0
| 0
| 0
| 0
| 0
| 0.016152
| 0.250132
| 3,798
| 99
| 81
| 38.363636
| 0.823034
| 0.079516
| 0
| 0.114286
| 0
| 0
| 0.017564
| 0
| 0
| 0
| 0
| 0
| 0.014286
| 1
| 0.1
| false
| 0
| 0.128571
| 0
| 0.328571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d930df4a535163668fcaae7a75a25d2de903db1
| 13,123
|
py
|
Python
|
tests/test_optimizers_v2/test_optimizers_v2.py
|
OverLordGoldDragon/dummy
|
5192b91c57721f37b906f670ad954a46f98bf5b5
|
[
"MIT"
] | null | null | null |
tests/test_optimizers_v2/test_optimizers_v2.py
|
OverLordGoldDragon/dummy
|
5192b91c57721f37b906f670ad954a46f98bf5b5
|
[
"MIT"
] | null | null | null |
tests/test_optimizers_v2/test_optimizers_v2.py
|
OverLordGoldDragon/dummy
|
5192b91c57721f37b906f670ad954a46f98bf5b5
|
[
"MIT"
] | null | null | null |
import os
import tempfile
import numpy as np
import tensorflow as tf
from time import time
from termcolor import cprint
from unittest import TestCase
from .. import K
from .. import Input, Dense, GRU, Bidirectional, Embedding
from .. import Model, load_model
from .. import l2
from .. import maxnorm
from .. import Adam, Nadam, SGD
from .. import AdamW, NadamW, SGDW
from .. import get_weight_decays, fill_dict_in_order, reset_seeds, K_eval
print("TF version: %s" % tf.__version__)
tf_eager = bool(os.environ["TF_EAGER"] == "True")
if tf_eager:
print("TF running eagerly")
else:
tf.compat.v1.disable_eager_execution()
print("TF running in graph mode")
class TestOptimizers(TestCase):
def test_all(self): # Save/Load, Warm Restarts (w/ cosine annealing)
for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:
cprint("<< TESTING {} OPTIMIZER >>".format(optimizer_name), 'blue')
reset_seeds()
num_batches, num_epochs = 25, 4
batch_size, timesteps, num_channels = 16, 8, 4
batch_shape = (batch_size, timesteps, num_channels)
total_iterations = num_batches # due to warm restarts
self.model = self._make_model(batch_shape, total_iterations)
optimizer = self._make_optimizer(optimizer_name, self.model,
total_iterations)
self.model.compile(optimizer, loss='binary_crossentropy')
self.assertTrue(self._valid_weight_decays(self.model))
self.model._make_train_function() # else K.eval before train may fail
X, Y = self._make_data(num_batches, *batch_shape)
self.eta_history = [] # for stop-introspection
self.t_cur_history = [] # for stop-introspection
for epoch in range(num_epochs):
for batch_num in range(num_batches):
self.t_cur_history += [K_eval(self.model.optimizer.t_cur, K)]
self.eta_history += [K_eval(self.model.optimizer.eta_t, K)]
self.model.train_on_batch(X[batch_num], Y[batch_num])
self.eta_history += [K_eval(self.model.optimizer.eta_t, K)]
self.eta_history.pop(-(1 + int(tf_eager)))
K.set_value(self.model.optimizer.t_cur, 0)
self.assertTrue(self._valid_cosine_annealing(self.eta_history,
total_iterations, num_epochs))
self._test_save_load(self.model, X, optimizer_name, optimizer)
# cleanup
del self.model, optimizer
reset_seeds(reset_graph_with_backend=K)
cprint("\n<< {} MAIN TEST PASSED >>\n".format(optimizer_name), 'green')
cprint("\n<< ALL MAIN TESTS PASSED >>\n", 'green')
def test_misc(self): # tests of non-main features to improve coverage
for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:
cprint("<< TESTING {} OPTIMIZER >>".format(optimizer_name), 'blue')
reset_seeds()
optimizer_kw = {'total_iterations': 0, 'decay': 1e-3,
'amsgrad': optimizer_name == 'AdamW',
'nesterov': optimizer_name == 'SGDW'}
num_batches = 4
batch_size, timesteps = 16, 8
batch_shape = (batch_size, timesteps)
embed_input_dim = 5
total_iterations = 0
self.model = self._make_model(batch_shape, total_iterations,
embed_input_dim=embed_input_dim,
dense_constraint=1, l2_reg=1e-4,
bidirectional=False, sparse=True)
optimizer = self._make_optimizer(optimizer_name, self.model,
**optimizer_kw)
self.model.compile(optimizer, loss='sparse_categorical_crossentropy')
X, Y = self._make_data(num_batches, *batch_shape,
embed_input_dim=embed_input_dim, sparse=True)
for batch_num in range(num_batches):
self.model.train_on_batch(X[batch_num], Y[batch_num])
self._test_save_load(self.model, X, optimizer_name, optimizer)
# cleanup
del self.model, optimizer
reset_seeds(reset_graph_with_backend=K)
cprint("\n<< {} MISC TEST PASSED >>\n".format(optimizer_name), 'green')
cprint("\n<< ALL MISC TESTS PASSED >>\n", 'green')
def test_control(self): # tests losses against original optimizers'
for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:
cprint("<< TESTING {} OPTIMIZER >>".format(optimizer_name), 'blue')
pass_txt = "Control Test Passed"
if optimizer_name == 'AdamW':
for amsgrad in [True, False]:
self._test_control(optimizer_name, amsgrad=amsgrad)
print("\n>> AdamW amsgrad={} {}".format(amsgrad, pass_txt))
elif optimizer_name == 'NadamW':
self._test_control(optimizer_name)
elif optimizer_name == 'SGDW':
for nesterov in [True, False]:
self._test_control(optimizer_name, nesterov=nesterov)
print("\n>> SGDW nesterov={} {}".format(nesterov, pass_txt))
o_name = optimizer_name
cprint("\n<< {} {} >>\n".format(o_name, pass_txt.upper()), 'green')
cprint("\n<< ALL CONTROL TESTS PASSED >>\n", 'green')
def _test_control(self, optimizer_name, amsgrad=False, nesterov=False):
optimizer_kw = dict(total_iterations=0, decay=1e-3,
amsgrad=amsgrad, nesterov=nesterov,
control_mode=True)
num_batches = 100
batch_size, timesteps = 16, 32
batch_shape = (batch_size, timesteps)
embed_input_dim = 5
total_iterations = 0
model_kw = dict(batch_shape=batch_shape, dense_constraint=1,
total_iterations=total_iterations,
embed_input_dim=embed_input_dim, l2_reg=0,
bidirectional=False, sparse=True)
loss_name = 'sparse_categorical_crossentropy'
reset_seeds(verbose=0)
X, Y = self._make_data(num_batches, *batch_shape,
embed_input_dim=embed_input_dim, sparse=True)
reset_seeds(reset_graph_with_backend=K, verbose=0)
self.model_custom = self._make_model(**model_kw)
optimizer_custom = self._make_optimizer(optimizer_name,
self.model_custom,
**optimizer_kw)
self.model_custom.compile(optimizer_custom, loss=loss_name)
self.loss_custom = [] # for introspection
t0 = time()
for batch_num in range(num_batches):
self.loss_custom += [self.model_custom.train_on_batch(
X[batch_num], Y[batch_num])]
print("model_custom -- %s batches -- time: %.2f sec" % (num_batches,
time() - t0))
reset_seeds(reset_graph_with_backend=K, verbose=0)
self.model_control = self._make_model(**model_kw)
optimizer_control = self._make_optimizer(optimizer_name[:-1],
self.model_control,
**optimizer_kw)
self.model_control.compile(optimizer_control, loss=loss_name)
self.loss_control = [] # for introspection
t0 = time()
for batch_num in range(num_batches):
self.loss_control += [self.model_control.train_on_batch(
X[batch_num], Y[batch_num])]
print("model_control -- %s batches -- time: %.2f sec" % (num_batches,
time() - t0))
loss_diff = np.abs(np.array(self.loss_custom) -
np.array(self.loss_control))
print("%s max loss diff: %e" % (optimizer_name, np.max(loss_diff)))
self.assertTrue(np.allclose(self.loss_custom, self.loss_control,
rtol=0, atol=1e-3))
# cleanup
del self.model_custom, self.model_control
del optimizer_custom, optimizer_control
reset_seeds(reset_graph_with_backend=K, verbose=0)
def _test_save_load(self, model, X, optimizer_name, optimizer):
saved_model_preds = model.predict(X[0])
saved_model_weights = K.batch_get_value(model.trainable_weights)
saved_optim_weights = K.batch_get_value(model.optimizer.weights)
test_name = 'test__%f{}.h5'.format(np.random.random())
modelpath = os.path.join(tempfile.gettempdir(), test_name)
model.save(modelpath)
del model
model = load_model(modelpath, custom_objects={optimizer_name: optimizer})
loaded_model_preds = model.predict(X[0])
loaded_model_weights = K.batch_get_value(model.trainable_weights)
loaded_optim_weights = K.batch_get_value(model.optimizer.weights)
self.assertTrue(np.allclose(saved_model_preds, loaded_model_preds,
rtol=0, atol=1e-8))
for smw, lmw in zip(saved_model_weights, loaded_model_weights):
self.assertTrue(np.allclose(smw, lmw, rtol=0, atol=1e-8))
for sow, low in zip(saved_optim_weights, loaded_optim_weights):
self.assertTrue(np.allclose(sow, low, rtol=0, atol=1e-8))
@staticmethod
def _make_data(num_batches, batch_size, timesteps, num_channels=None,
embed_input_dim=None, sparse=False):
if sparse:
X = np.random.randint(0, embed_input_dim,
(num_batches, batch_size, timesteps))
else:
X = np.random.randn(num_batches, batch_size, timesteps, num_channels)
Y = np.random.randint(0, 2, (num_batches, batch_size))
return X, Y
@staticmethod
def _make_model(batch_shape, total_iterations, l2_reg=0, bidirectional=True,
dense_constraint=None, embed_input_dim=None, sparse=False):
if dense_constraint is not None:
dense_constraint = maxnorm(dense_constraint)
ipt = Input(batch_shape=batch_shape)
if sparse:
x = Embedding(embed_input_dim, embed_input_dim*3 + 1,
mask_zero=True)(ipt)
else:
x = ipt
gru = GRU(4, recurrent_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg))
if bidirectional:
x = Bidirectional(gru)(x)
else:
x = gru(x)
x = Dense(2, kernel_regularizer=l2(l2_reg),
kernel_constraint=dense_constraint)(x)
if sparse:
out = Dense(2, activation='softmax')(x)
else:
out = Dense(1, activation='sigmoid')(x)
return Model(ipt, out)
@staticmethod
def _make_optimizer(optimizer_name, model, total_iterations, decay=0,
amsgrad=False, nesterov=False, control_mode=False):
optimizer_dict = {'AdamW': AdamW, 'NadamW': NadamW, 'SGDW': SGDW,
'Adam': Adam, 'Nadam': Nadam, 'SGD': SGD}
optimizer = optimizer_dict[optimizer_name]
optimizer_kw = {}
if 'Adam' in optimizer_name:
optimizer_kw = {'amsgrad': amsgrad}
elif 'SGD' in optimizer_name:
optimizer_kw = {'nesterov': nesterov, 'momentum': .9}
if 'Nadam' not in optimizer_name:
optimizer_kw.update({'decay': decay})
if not control_mode:
wd_dict = get_weight_decays(model)
l2_extra = [2e-5]*(len(wd_dict) - 3)
wd = fill_dict_in_order(wd_dict, [1e-5, 1e-5, 1e-6] + l2_extra)
lr_m = {'gru': 0.5}
use_cosine_annealing = True
else:
wd, lr_m = None, None
use_cosine_annealing = False
if not any([optimizer_name == name for name in ('Adam', 'Nadam', 'SGD')]):
return optimizer(lr=1e-4, weight_decays=wd, lr_multipliers=lr_m,
use_cosine_annealing=use_cosine_annealing, t_cur=0,
total_iterations=total_iterations, **optimizer_kw)
else:
return optimizer(lr=1e-4, **optimizer_kw)
@staticmethod
def _valid_weight_decays(model):
weight_decays = get_weight_decays(model)
trues = 0
for wd in weight_decays.values():
trues += (wd != 0)
return (trues == 0)
@staticmethod
def _valid_cosine_annealing(eta_history, total_iterations, num_epochs):
eta_history_simul = []
for epoch in range(num_epochs):
for iteration in range(0, total_iterations):
eta_history_simul.append(0.5 * (
1 + np.cos(np.pi*iteration / total_iterations)))
return np.allclose(eta_history, eta_history_simul, rtol=0, atol=2e-7)
| 44.334459
| 83
| 0.586375
| 1,542
| 13,123
| 4.714008
| 0.152399
| 0.059018
| 0.026826
| 0.017884
| 0.446416
| 0.377356
| 0.333608
| 0.301967
| 0.241574
| 0.177466
| 0
| 0.012393
| 0.311362
| 13,123
| 295
| 84
| 44.484746
| 0.791966
| 0.022556
| 0
| 0.265306
| 0
| 0
| 0.064617
| 0.004838
| 0
| 0
| 0
| 0
| 0.02449
| 1
| 0.040816
| false
| 0.036735
| 0.061224
| 0
| 0.130612
| 0.073469
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d942ae26a14855b18361770889fe0b68867154b
| 1,433
|
py
|
Python
|
sympy/tensor/tests/test_functions.py
|
iamabhishek0/sympy
|
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
|
[
"BSD-3-Clause"
] | 8,323
|
2015-01-02T15:51:43.000Z
|
2022-03-31T13:13:19.000Z
|
sympy/tensor/tests/test_functions.py
|
iamabhishek0/sympy
|
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
|
[
"BSD-3-Clause"
] | 15,102
|
2015-01-01T01:33:17.000Z
|
2022-03-31T22:53:13.000Z
|
sympy/tensor/tests/test_functions.py
|
iamabhishek0/sympy
|
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
|
[
"BSD-3-Clause"
] | 4,490
|
2015-01-01T17:48:07.000Z
|
2022-03-31T17:24:05.000Z
|
from sympy.tensor.functions import TensorProduct
from sympy import MatrixSymbol, Matrix, Array
from sympy.abc import x, y, z
from sympy.abc import i, j, k, l
A = MatrixSymbol("A", 3, 3)
B = MatrixSymbol("B", 3, 3)
C = MatrixSymbol("C", 3, 3)
def test_TensorProduct_construction():
assert TensorProduct(3, 4) == 12
assert isinstance(TensorProduct(A, A), TensorProduct)
expr = TensorProduct(TensorProduct(x, y), z)
assert expr == x*y*z
expr = TensorProduct(TensorProduct(A, B), C)
assert expr == TensorProduct(A, B, C)
expr = TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]])
assert expr == Array([
[
[[0, -1], [1, 0]],
[[0, 0], [0, 0]]
],
[
[[0, 0], [0, 0]],
[[0, -1], [1, 0]]
]
])
def test_TensorProduct_shape():
expr = TensorProduct(3, 4, evaluate=False)
assert expr.shape == ()
assert expr.rank() == 0
expr = TensorProduct([1, 2], [x, y], evaluate=False)
assert expr.shape == (2, 2)
assert expr.rank() == 2
expr = TensorProduct(expr, expr, evaluate=False)
assert expr.shape == (2, 2, 2, 2)
assert expr.rank() == 4
expr = TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]], evaluate=False)
assert expr.shape == (2, 2, 2, 2)
assert expr.rank() == 4
def test_TensorProduct_getitem():
expr = TensorProduct(A, B)
assert expr[i, j, k, l] == A[i, j]*B[k, l]
| 25.589286
| 74
| 0.567341
| 201
| 1,433
| 4.014925
| 0.189055
| 0.148699
| 0.02974
| 0.034696
| 0.307311
| 0.242875
| 0.242875
| 0.2057
| 0.2057
| 0.116481
| 0
| 0.050704
| 0.256804
| 1,433
| 55
| 75
| 26.054545
| 0.707042
| 0
| 0
| 0.097561
| 0
| 0
| 0.002094
| 0
| 0
| 0
| 0
| 0
| 0.341463
| 1
| 0.073171
| false
| 0
| 0.097561
| 0
| 0.170732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d94582a86f1cb5da7910dcf3af0df5fef4be108
| 898
|
py
|
Python
|
app/views.py
|
Kgermando/sem
|
c76e97e1d526d4e92a925adb6bceee426f999655
|
[
"Apache-2.0"
] | null | null | null |
app/views.py
|
Kgermando/sem
|
c76e97e1d526d4e92a925adb6bceee426f999655
|
[
"Apache-2.0"
] | null | null | null |
app/views.py
|
Kgermando/sem
|
c76e97e1d526d4e92a925adb6bceee426f999655
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
class MultipleProxyMiddleware:
FORWARDED_FOR_FIELDS = [
'HTTP_X_FORWARDED_FOR',
'HTTP_X_FORWARDED_HOST',
'HTTP_X_FORWARDED_SERVER',
]
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
"""
Rewrites the proxy headers so that only the most
recent proxy is used.
"""
for field in self.FORWARDED_FOR_FIELDS:
if field in request.META:
if ',' in request.META[field]:
parts = request.META[field].split(',')
request.META[field] = parts[-1].strip()
return self.get_response(request)
def index(request):
context = {
}
template_name = 'pages/app/index.html'
return render(request, template_name, context)
| 26.411765
| 59
| 0.604677
| 102
| 898
| 5.058824
| 0.509804
| 0.085271
| 0.081395
| 0.081395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001597
| 0.302895
| 898
| 33
| 60
| 27.212121
| 0.822684
| 0.105791
| 0
| 0
| 0
| 0
| 0.111543
| 0.057069
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.047619
| 0
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d968294f2b19ad9cf4d5cc885fbe7be0f0e3330
| 15,289
|
py
|
Python
|
byol_train.py
|
fjbriones/deep-text-recognition-benchmark
|
c85d12aa56495fe221656bac4c8cb159a28456b1
|
[
"Apache-2.0"
] | null | null | null |
byol_train.py
|
fjbriones/deep-text-recognition-benchmark
|
c85d12aa56495fe221656bac4c8cb159a28456b1
|
[
"Apache-2.0"
] | null | null | null |
byol_train.py
|
fjbriones/deep-text-recognition-benchmark
|
c85d12aa56495fe221656bac4c8cb159a28456b1
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import time
import random
import string
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.optim as optim
import torch.utils.data
import numpy as np
from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager
from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset
from simclr_model import FeaturesModel as Model
from test import validation
from byol_pytorch import BYOL
from imgaug import augmenters as iaa
import imgaug as ia
from tqdm import tqdm
import matplotlib.pyplot as plt
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def train(opt):
""" dataset preparation """
if not opt.data_filtering_off:
print('Filtering the images containing characters which are not in opt.character')
print('Filtering the images whose label is longer than opt.batch_max_length')
# see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130
opt.select_data = opt.select_data.split('-')
opt.batch_ratio = opt.batch_ratio.split('-')
train_dataset = Batch_Balanced_Dataset(opt)
log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a')
ia.seed(1)
image_transforms = iaa.Sequential([iaa.SomeOf((1, 5),
[iaa.LinearContrast((0.5, 1.0)),
iaa.GaussianBlur((0.5, 1.5)),
iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0, 0.0)), keep_size=True),
iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0, 0.02)), keep_size=True),
iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)),
iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'),
iaa.PerspectiveTransform(scale=(0.01, 0.02))],
random_order=True)])
AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD, image_transforms=image_transforms)
valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=opt.batch_size,
shuffle=True, # 'True' to check training progress with validation function.
num_workers=int(opt.workers),
collate_fn=AlignCollate_valid, pin_memory=True)
log.write(valid_dataset_log)
print('-' * 80)
log.write('-' * 80 + '\n')
log.close()
if opt.rgb:
opt.input_channel = 3
model = Model(opt)
print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,
opt.hidden_size, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,
opt.SequenceModeling)
# weight initialization
for name, param in model.named_parameters():
if 'localization_fc2' in name:
print(f'Skip {name} as it is already initialized')
continue
try:
if 'bias' in name:
init.constant_(param, 0.0)
elif 'weight' in name:
init.kaiming_normal_(param)
except Exception as e: # for batchnorm.
if 'weight' in name:
param.data.fill_(1)
continue
# data parallel for multi-GPU
model = torch.nn.DataParallel(model).to(device)
model.train()
if opt.saved_model != '':
print(f'loading pretrained model from {opt.saved_model}')
if opt.FT:
model.load_state_dict(torch.load(opt.saved_model), strict=False)
else:
model.load_state_dict(torch.load(opt.saved_model))
print("Model:")
print(model)
image_transforms = iaa.Sequential([iaa.SomeOf((1, 5),
[iaa.LinearContrast((0.5, 1.0)),
iaa.GaussianBlur((0.5, 1.5)),
iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0, 0.0)), keep_size=True),
iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0, 0.02)), keep_size=True),
iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)),
iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'),
iaa.PerspectiveTransform(scale=(0.01, 0.02))],
random_order=True)])
byol_learner = BYOL(
model,
image_size=(32,100),
hidden_layer=-1,
channels=1,
augment_fn=image_transforms,
augmented=True)
print(byol_learner)
# filter that only require gradient decent
filtered_parameters = []
params_num = []
for p in filter(lambda p: p.requires_grad, byol_learner.parameters()):
filtered_parameters.append(p)
params_num.append(np.prod(p.size()))
print('Trainable params num : ', sum(params_num))
# setup optimizer
if opt.optimizer == 'adam':
optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))
elif opt.optimizer == 'adadelta':
optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps, weight_decay=opt.weight_decay)
elif opt.optimizer == 'sgd':
optimizer = optim.SGD(filtered_parameters, lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay, nesterov=opt.nesterov)
else:
raise Exception('Unknown optimizer')
print("Optimizer:")
print(optimizer)
""" final options """
# print(opt)
with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file:
opt_log = '------------ Options -------------\n'
args = vars(opt)
for k, v in args.items():
opt_log += f'{str(k)}: {str(v)}\n'
opt_log += '---------------------------------------\n'
print(opt_log)
opt_file.write(opt_log)
""" start training """
start_iter = 0
if opt.saved_model != '':
try:
start_iter = int(opt.saved_model.split('_')[-1].split('.')[0])
print(f'continue to train, start_iter: {start_iter}')
except:
pass
#LR Scheduler:
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(0.6*opt.num_iter), int(0.8*opt.num_iter)], last_epoch=start_iter-1, gamma=0.1)
best_loss = None
iteration = start_iter
print(device)
loss_avg = Averager()
valid_loss_avg = Averager()
# kl_loss_avg = Averager()
# kl_loss = torch.nn.KLDivLoss()
epoch = 0
while(True):
# train part
for i in tqdm(range(opt.valInterval)):
image_tensors, _ = train_dataset.get_batch()
image = image_tensors.to(device)
optimizer.zero_grad()
loss = byol_learner(image)
loss.backward()
if opt.grad_clip:
torch.nn.utils.clip_grad_norm_(byol_learner.parameters(), opt.grad_clip)
optimizer.step()
scheduler.step()
byol_learner.update_moving_average()
loss_avg.add(loss)
if iteration==0:
print("Epoch {:06d} Loss: {:.04f}".format(iteration, loss_avg.val()))
iteration += 1
byol_learner.eval()
model.eval()
with torch.no_grad():
for image_tensors, _ in valid_loader:
image = image_tensors.to(device)
val_loss = byol_learner(image)
valid_loss_avg.add(val_loss)
# features = model(image)
# features = features.view(-1, 26, features.shape[1])
# kl_div = kl_loss(features[:int(features.shape[0]/2)], features[int(features.shape[0]/2):])
# kl_loss_avg.add(kl_div)
model.train()
byol_learner.train()
with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log:
log.write("Iteration {:06d} Loss: {:.06f} Val loss: {:06f}".format(iteration, loss_avg.val(), valid_loss_avg.val()) + '\n')
print("Iteration {:06d} Loss: {:.06f} Val loss: {:06f}".format(iteration, loss_avg.val(), valid_loss_avg.val()))
if best_loss is None:
best_loss = valid_loss_avg.val()
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
elif best_loss > valid_loss_avg.val():
best_loss = valid_loss_avg.val()
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
scheduler.step()
loss_avg.reset()
valid_loss_avg.reset()
if epoch % 5 == 0:
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
if (iteration + 1) >= opt.num_iter:
print('end the training')
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
sys.exit()
epoch +=1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', help='Where to store logs and models')
parser.add_argument('--train_data', required=True, help='path to training dataset')
parser.add_argument('--valid_data', required=True, help='path to validation dataset')
parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch_size', type=int, default=192, help='input batch size')
parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for')
parser.add_argument('--valInterval', type=int, default=2000, help='Interval between each validation')
parser.add_argument('--saved_model', default='', help="path to model to continue training")
parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning')
parser.add_argument('--optimizer', type=str, choices=['adam', 'adadelta', 'sgd'], help="Optimizer")
parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')
parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95')
parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8')
parser.add_argument('--nesterov', action='store_true', help='Use Nesterov momentum for SGD')
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum for SGD')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5')
parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode')
""" Data processing """
parser.add_argument('--select_data', type=str, default='MJ-ST',
help='select training data (default is MJ-ST, which means MJ and ST used as training data)')
parser.add_argument('--batch_ratio', type=str, default='0.5-0.5',
help='assign ratio for each selected data in the batch')
parser.add_argument('--total_data_usage_ratio', type=str, default='1.0',
help='total data usage ratio, this ratio is multiplied to total number of data.')
parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser.add_argument('--rgb', action='store_true', help='use rgb input')
parser.add_argument('--character', type=str,
default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')
parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')
parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')
""" Model Architecture """
parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS')
parser.add_argument('--FeatureExtraction', type=str, required=True,
help='FeatureExtraction stage. VGG|RCNN|ResNet')
parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM')
parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser.add_argument('--input_channel', type=int, default=1,
help='the number of input channel of Feature extractor')
parser.add_argument('--output_channel', type=int, default=512,
help='the number of output channel of Feature extractor')
parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')
parser.add_argument('--weight_decay', type=float, default=10e-4, help='Weight decay')
parser.add_argument('--FinalLayer', action='store_true', help='Use a nonlinear projection head during training')
parser.add_argument('--final_feature', type=int, default=256, help='the size of the output of the final layer')
opt = parser.parse_args()
if not opt.exp_name:
opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-BYOL'
opt.exp_name += f'-Seed{opt.manualSeed}'
# print(opt.exp_name)
os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True)
""" vocab / character number configuration """
if opt.sensitive:
# opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
""" Seed and GPU setting """
# print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
cudnn.deterministic = True
opt.num_gpu = torch.cuda.device_count()
# print('device count', opt.num_gpu)
if opt.num_gpu > 1:
print('------ Use multi-GPU setting ------')
print('if you stuck too long time with multi-GPU setting, try to set --workers 0')
# check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1
opt.workers = opt.workers * opt.num_gpu
opt.batch_size = opt.batch_size * opt.num_gpu
""" previous version
print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)
opt.batch_size = opt.batch_size * opt.num_gpu
print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')
If you dont care about it, just commnet out these line.)
opt.num_iter = int(opt.num_iter / opt.num_gpu)
"""
train(opt)
| 45.502976
| 152
| 0.636013
| 2,028
| 15,289
| 4.643984
| 0.204635
| 0.038225
| 0.072202
| 0.012742
| 0.267042
| 0.21735
| 0.172436
| 0.155447
| 0.135273
| 0.103844
| 0
| 0.024724
| 0.224868
| 15,289
| 335
| 153
| 45.638806
| 0.769977
| 0.057427
| 0
| 0.153226
| 0
| 0
| 0.228783
| 0.040433
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004032
| false
| 0.004032
| 0.084677
| 0
| 0.08871
| 0.084677
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d9879f28b9c13e35ac740f4cb5632764b5c35dd
| 2,426
|
py
|
Python
|
pipescaler/core/stage.py
|
KarlTDebiec/PipeScaler
|
b990ece8f3dd2c3506c226ed871871997fc57beb
|
[
"BSD-3-Clause"
] | 1
|
2022-02-07T03:47:53.000Z
|
2022-02-07T03:47:53.000Z
|
pipescaler/core/stage.py
|
KarlTDebiec/PipeScaler
|
b990ece8f3dd2c3506c226ed871871997fc57beb
|
[
"BSD-3-Clause"
] | 49
|
2022-01-17T15:16:22.000Z
|
2022-03-28T03:00:39.000Z
|
pipescaler/core/stage.py
|
KarlTDebiec/PipeScaler
|
b990ece8f3dd2c3506c226ed871871997fc57beb
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# pipescaler/core/stage.py
#
# Copyright (C) 2020-2021 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license.
from __future__ import annotations
from abc import ABC, abstractmethod
from importlib.util import module_from_spec, spec_from_file_location
from typing import Any, List, Optional
from pipescaler.common import validate_input_path
def initialize_stage(stage_name, stage_conf, modules):
# Get stage's class name
stage_cls_name = next(iter(stage_conf)) # get first key
# Get stage's configuration
stage_args = stage_conf.get(stage_cls_name)
if stage_args is None:
stage_args = {}
# Get stage's class
stage_cls = None
for module in modules:
try:
stage_cls = getattr(module, stage_cls_name)
except AttributeError:
continue
if stage_cls is None:
if "infile" in stage_args:
module_infile = validate_input_path(stage_args.pop("infile"))
spec = spec_from_file_location(stage_cls_name, module_infile)
module = module_from_spec(spec)
spec.loader.exec_module(module)
stage_cls = getattr(module, stage_cls_name)
else:
raise KeyError(f"Class '{stage_cls_name}' not found")
return stage_cls(name=stage_name, **stage_args)
class Stage(ABC):
"""Base class for stages."""
trim_suffixes = None
extension = "png"
def __init__(
self, name: Optional[str] = None, desc: Optional[str] = None, **kwargs: Any
) -> None:
"""
Validates and stores static configuration.
Arguments:
name (Optional[str]): Name of stage
desc (Optional[str]): Description of stage
kwargs (Any): Additional keyword arguments
"""
if name is not None:
self.name = name
else:
self.name = self.__class__.__name__
if desc is not None:
self.desc = desc
else:
self.desc = self.name
def __repr__(self) -> str:
return self.desc
def __str__(self) -> str:
return self.name
@property
@abstractmethod
def inlets(self) -> List[str]:
raise NotImplementedError()
@property
@abstractmethod
def outlets(self) -> List[str]:
raise NotImplementedError()
| 27.568182
| 83
| 0.633965
| 301
| 2,426
| 4.873754
| 0.365449
| 0.059986
| 0.05726
| 0.02454
| 0.125426
| 0.04499
| 0.04499
| 0
| 0
| 0
| 0
| 0.004592
| 0.281946
| 2,426
| 87
| 84
| 27.885057
| 0.837543
| 0.201566
| 0
| 0.211538
| 0
| 0
| 0.026273
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.096154
| 0.038462
| 0.326923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d987ab5c44436d3b47a96128fa67f2ac2b9fa8d
| 670
|
py
|
Python
|
leetcode/970_powerful_integers/970_powerful_integers.py
|
ryangillard/misc
|
d1f9919400636e6b988fa933493b94829a73331e
|
[
"Apache-2.0"
] | null | null | null |
leetcode/970_powerful_integers/970_powerful_integers.py
|
ryangillard/misc
|
d1f9919400636e6b988fa933493b94829a73331e
|
[
"Apache-2.0"
] | null | null | null |
leetcode/970_powerful_integers/970_powerful_integers.py
|
ryangillard/misc
|
d1f9919400636e6b988fa933493b94829a73331e
|
[
"Apache-2.0"
] | null | null | null |
class Solution(object):
def powerfulIntegers(self, x, y, bound):
"""
:type x: int
:type y: int
:type bound: int
:rtype: List[int]
"""
# Find max exponent
base = max(x, y) if x == 1 or y == 1 else min(x, y)
exponent = 1
if base != 1:
while base ** exponent <= bound:
exponent += 1
# Brute force all of the exponent trials
hashset = set()
for i in range(exponent):
for j in range(exponent):
z = x ** i + y ** j
if z <= bound:
hashset.add(z)
return list(hashset)
| 26.8
| 59
| 0.444776
| 82
| 670
| 3.634146
| 0.47561
| 0.020134
| 0.100671
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013514
| 0.447761
| 670
| 25
| 60
| 26.8
| 0.791892
| 0.176119
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d9a08394431a2356f36800cd3badfa0bed3c07f
| 7,730
|
py
|
Python
|
qiskit_metal/qlibrary/lumped/cap_n_interdigital.py
|
wdczdj/qiskit-metal
|
c77805f66da60021ef8d10d668715c1dc2ebcd1d
|
[
"Apache-2.0"
] | null | null | null |
qiskit_metal/qlibrary/lumped/cap_n_interdigital.py
|
wdczdj/qiskit-metal
|
c77805f66da60021ef8d10d668715c1dc2ebcd1d
|
[
"Apache-2.0"
] | null | null | null |
qiskit_metal/qlibrary/lumped/cap_n_interdigital.py
|
wdczdj/qiskit-metal
|
c77805f66da60021ef8d10d668715c1dc2ebcd1d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from qiskit_metal import draw, Dict
from qiskit_metal.qlibrary.core import QComponent
import numpy as np
class CapNInterdigital(QComponent):
"""Generates a two pin (+) structure comprised of a north CPW transmission
line, and a south transmission line, coupled together via a finger
capacitor. Such a structure can be used, as an example, for generating CPW
resonators. (0,0) represents the center position of the component. Setting
finger length to 0 gives a simple gap capacitor. The width of the gap
capacitor is found via.
(cap_width * finger_count + * cap_gap * (finger_count-1)).
Inherits QComponent class.
::
(0,0) N
+ ^
| |
|
|
--|-----|--
| | | | |
|-----|-----|
|
|
|
|
+
Options:
* north_width: '10um' -- The width of the 'north' portion of the CPW transmission line
* north_gap: '6um' -- The dielectric gap of the 'north' portion of the CPW transmission line
* south_width: '10um' -- The width of the 'south' portion of the CPW transmission line
* south_gap: '6um' -- The dielectric gap of the 'south' portion of the CPW transmission line
(also for the capacitor gap to ground)
* cap_width: '10um' -- The width of the finger capacitor metal (and islands)
* cap_gap: '6um' -- The width of dielectric for the capacitive coupling/fingers
* cap_gap_ground: '6um' -- Width of the dielectric between the capacitor and ground
* finger_length: '20um' -- The depth of the finger islands of the capacitor
* finger_count: '5' -- Number of fingers in the capacitor
* cap_distance: '50um' -- Distance of the north point of the capacitor from the north pin
* pos_x/_y: '0um' -- The x/y position of the north pin
* rotation: '0' -- The direction of the transmission line. 0 degrees is -y, following a
counter-clockwise rotation (eg. 90 is +x)
* chip: 'main' -- The chip the capacitor should be on.
* layer: '1' -- Layer the capacitor is on.
"""
component_metadata = Dict(short_name='cpw',
_qgeometry_table_poly='True',
_qgeometry_table_path='True')
"""Component metadata"""
#Currently setting the primary CPW length based on the coupling_length
#May want it to be it's own value that the user can control?
default_options = Dict(north_width='10um',
north_gap='6um',
south_width='10um',
south_gap='6um',
cap_width='10um',
cap_gap='6um',
cap_gap_ground='6um',
finger_length='20um',
finger_count='5',
cap_distance='50um',
pos_x='0um',
pos_y='0um',
orientation='0',
chip='main',
layer='1')
"""Default connector options"""
def make(self):
"""Build the component."""
p = self.p
N = int(p.finger_count)
#Finger Capacitor
cap_box = draw.rectangle(N * p.cap_width + (N - 1) * p.cap_gap,
p.cap_gap + 2 * p.cap_width + p.finger_length,
0, 0)
make_cut_list = []
make_cut_list.append([0, (p.finger_length) / 2])
make_cut_list.append([(p.cap_width) + (p.cap_gap / 2),
(p.finger_length) / 2])
flip = -1
for i in range(1, N):
make_cut_list.append([
i * (p.cap_width) + (2 * i - 1) * (p.cap_gap / 2),
flip * (p.finger_length) / 2
])
make_cut_list.append([
(i + 1) * (p.cap_width) + (2 * i + 1) * (p.cap_gap / 2),
flip * (p.finger_length) / 2
])
flip = flip * -1
cap_cut = draw.LineString(make_cut_list).buffer(p.cap_gap / 2,
cap_style=2,
join_style=2)
cap_cut = draw.translate(cap_cut,
-(N * p.cap_width + (N - 1) * p.cap_gap) / 2,
0)
cap_body = draw.subtract(cap_box, cap_cut)
cap_body = draw.translate(
cap_body, 0, -p.cap_distance -
(p.cap_gap + 2 * p.cap_width + p.finger_length) / 2)
cap_etch = draw.rectangle(
N * p.cap_width + (N - 1) * p.cap_gap + 2 * p.cap_gap_ground,
p.cap_gap + 2 * p.cap_width + p.finger_length +
2 * p.cap_gap_ground, 0, -p.cap_distance -
(p.cap_gap + 2 * p.cap_width + p.finger_length) / 2)
#CPW
north_cpw = draw.LineString([[0, 0], [0, -p.cap_distance]])
south_cpw = draw.LineString(
[[
0, -p.cap_distance -
(p.cap_gap + 2 * p.cap_width + p.finger_length)
],
[
0, -2 * p.cap_distance -
(p.cap_gap + 2 * p.cap_width + p.finger_length)
]])
#Rotate and Translate
c_items = [north_cpw, south_cpw, cap_body, cap_etch]
c_items = draw.rotate(c_items, p.orientation, origin=(0, 0))
c_items = draw.translate(c_items, p.pos_x, p.pos_y)
[north_cpw, south_cpw, cap_body, cap_etch] = c_items
#Add to qgeometry tables
self.add_qgeometry('path', {'north_cpw': north_cpw},
width=p.north_width,
layer=p.layer)
self.add_qgeometry('path', {'north_cpw_sub': north_cpw},
width=p.north_width + 2 * p.north_gap,
layer=p.layer,
subtract=True)
self.add_qgeometry('path', {'south_cpw': south_cpw},
width=p.south_width,
layer=p.layer)
self.add_qgeometry('path', {'south_cpw_sub': south_cpw},
width=p.south_width + 2 * p.south_gap,
layer=p.layer,
subtract=True)
self.add_qgeometry('poly', {'cap_body': cap_body}, layer=p.layer)
self.add_qgeometry('poly', {'cap_etch': cap_etch},
layer=p.layer,
subtract=True)
#Add pins
north_pin_list = north_cpw.coords
south_pin_list = south_cpw.coords
self.add_pin('north_end',
points=np.array(north_pin_list[::-1]),
width=p.north_width,
input_as_norm=True)
self.add_pin('south_end',
points=np.array(south_pin_list),
width=p.south_width,
input_as_norm=True)
| 41.336898
| 100
| 0.51022
| 940
| 7,730
| 4.009574
| 0.218085
| 0.033961
| 0.027859
| 0.025471
| 0.336429
| 0.297161
| 0.236402
| 0.222075
| 0.190236
| 0.124436
| 0
| 0.022847
| 0.388486
| 7,730
| 186
| 101
| 41.55914
| 0.774487
| 0.346831
| 0
| 0.24
| 0
| 0
| 0.032862
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01
| false
| 0
| 0.03
| 0
| 0.07
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d9cfb201fcd48e23406da7a37202a4d1d0051f3
| 1,758
|
py
|
Python
|
Medium/102_2.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | 6
|
2017-09-25T18:05:50.000Z
|
2019-03-27T00:23:15.000Z
|
Medium/102_2.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | 1
|
2017-10-29T12:04:41.000Z
|
2018-08-16T18:00:37.000Z
|
Medium/102_2.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | null | null | null |
# ------------------------------
# Binary Tree Level Order Traversal
#
# Description:
# Given a binary tree, return the level order traversal of its nodes' values. (ie, from
# left to right, level by level).
#
# For example:
# Given binary tree [3,9,20,null,null,15,7],
# 3
# / \
# 9 20
# / \
# 15 7
# return its level order traversal as:
# [
# [3],
# [9,20],
# [15,7]
# ]
#
# Version: 2.0
# 11/11/19 by Jianfa
# ------------------------------
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
# BFS
res = []
queue = [root]
while queue:
temp = [] # values of this level of nodes
children = [] # next level of nodes
for node in queue:
temp.append(node.val)
if node.left:
children.append(node.left)
if node.right:
children.append(node.right)
res.append(temp[:]) # actually here can be res.append(temp), res will not change as temp changes
queue = children[:] # here must be children[:] otherwise queue will change as children changes
return res
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Similar BFS solution but use a little more spaces.
# On 102.py, using list.pop(0) actually takes O(n) time because it needs to remap the index
# of values. Use collections.deque instead.
#
# O(N) time O(N) space
| 26.636364
| 108
| 0.527873
| 219
| 1,758
| 4.182648
| 0.461187
| 0.043668
| 0.062227
| 0.0131
| 0.015284
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027523
| 0.317975
| 1,758
| 66
| 109
| 26.636364
| 0.736447
| 0.591013
| 0
| 0
| 0
| 0
| 0.011834
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d9e54e4e20b43e465756409507c8caedb39d5b5
| 7,969
|
py
|
Python
|
mturk/comparison_among_different_models/sample_from_models_for_comparison.py
|
qiaone/GIF
|
2c551e844748c72395fc91fb080c7a2f9c8d5285
|
[
"MIT"
] | 322
|
2020-08-28T22:23:09.000Z
|
2022-03-25T09:42:12.000Z
|
mturk/comparison_among_different_models/sample_from_models_for_comparison.py
|
qiaone/GIF
|
2c551e844748c72395fc91fb080c7a2f9c8d5285
|
[
"MIT"
] | 25
|
2020-11-03T02:03:51.000Z
|
2022-03-18T13:06:42.000Z
|
mturk/comparison_among_different_models/sample_from_models_for_comparison.py
|
qiaone/GIF
|
2c551e844748c72395fc91fb080c7a2f9c8d5285
|
[
"MIT"
] | 59
|
2020-08-28T23:32:08.000Z
|
2022-03-30T03:29:35.000Z
|
import sys
sys.path.append('../../')
import constants as cnst
import os
os.environ['PYTHONHASHSEED'] = '2'
import tqdm
from model.stg2_generator import StyledGenerator
import numpy as np
from my_utils.visualize_flame_overlay import OverLayViz
from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp
from my_utils.generate_gif import generate_from_flame_sequence
from my_utils.generic_utils import save_set_of_images
from my_utils import compute_fid
import constants
from dataset_loaders import fast_image_reshape
import torch
from my_utils import generic_utils
from my_utils.eye_centering import position_to_given_location
def ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond):
if normal_map_cond and texture_cond:
return torch.cat((textured_rndr, norm_map), dim=1)
elif normal_map_cond:
return norm_map
elif texture_cond:
return textured_rndr
else:
return flm_params
# General settings
save_images = True
code_size = 236
use_inst_norm = True
core_tensor_res = 4
resolution = 256
alpha = 1
step_max = int(np.log2(resolution) - 2)
root_out_dir = f'{cnst.output_root}sample/'
num_smpl_to_eval_on = 1000
use_styled_conv_stylegan2 = True
flength = 5000
cam_t = np.array([0., 0., 0])
camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength)
run_ids_1 = [29, ] # with sqrt(2)
# run_ids_1 = [7, 24, 8, 3]
# run_ids_1 = [7, 8, 3]
settings_for_runs = \
{24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True},
7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},
3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},}
overlay_visualizer = OverLayViz()
# overlay_visualizer.setup_renderer(mesh_file=None)
flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32')
fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()
for i, key in enumerate(fl_param_dict):
flame_param = fl_param_dict[key]
flame_param = np.hstack((flame_param['shape'], flame_param['exp'], flame_param['pose'], flame_param['cam'],
flame_param['tex'], flame_param['lit'].flatten()))
# tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157])
# flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1)
# import ipdb; ipdb.set_trace()
flm_params[i, :] = flame_param.astype('float32')
if i == num_smpl_to_eval_on - 1:
break
batch_size = 64
flame_decoder = overlay_visualizer.deca.flame.eval()
for run_idx in run_ids_1:
# import ipdb; ipdb.set_trace()
generator_1 = torch.nn.DataParallel(
StyledGenerator(embedding_vocab_size=69158,
rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'],
normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'],
core_tensor_res=core_tensor_res,
w_truncation_factor=1.0,
apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'],
n_mlp=8)).cuda()
model_idx = settings_for_runs[run_idx]['model_idx']
ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')
generator_1.load_state_dict(ckpt1['generator_running'])
generator_1 = generator_1.eval()
# images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')
pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size))
pbar.set_description('Generating_images')
flame_mesh_imgs = None
mdl_id = 'mdl2_'
if settings_for_runs[run_idx]['name'] == 'full_model':
mdl_id = 'mdl1_'
for batch_idx in pbar:
flm_batch = flm_params[batch_idx:batch_idx+batch_size, :]
flm_batch = torch.from_numpy(flm_batch).cuda()
flm_batch = position_to_given_location(flame_decoder, flm_batch)
batch_size_true = flm_batch.shape[0]
if settings_for_runs[run_idx]['normal_maps_as_cond'] or \
settings_for_runs[run_idx]['rendered_flame_as_condition']:
cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:]
shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]]
exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]]
pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]]
# import ipdb; ipdb.set_trace()
light_code = \
flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3))
texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:]
norma_map_img, _, _, _, rend_flm = \
overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
camera_params=cam)
rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1
norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1
rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear')
norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')
else:
rend_flm = None
norma_map_img = None
gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'],
settings_for_runs[run_idx]['rendered_flame_as_condition'])
# torch.manual_seed(2)
identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long,
device='cuda')
mdl_1_gen_images = generic_utils.get_images_from_flame_params(
flame_params=gen_1_in.cpu().numpy(), pose=None,
model=generator_1,
step=step_max, alpha=alpha,
input_indices=identity_embeddings.cpu().numpy())
# import ipdb; ipdb.set_trace()
images = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy()
flame_mesh_imgs = torch.clamp(rend_flm, -1, 1).cpu().numpy()
save_path_current_id = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name'])
save_set_of_images(path=save_path_current_id, prefix=f'{mdl_id}_{batch_idx}',
images=(images + 1) / 2, show_prog_bar=True)
#save flam rndr
save_path_current_id_flm_rndr = os.path.join(root_out_dir, 'inter_model_comparison',
settings_for_runs[run_idx]['name'])
save_set_of_images(path=save_path_current_id_flm_rndr, prefix=f'mesh_{batch_idx}',
images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True)
# save_set_of_images(path=save_path_this_expt, prefix='mesh_', images=((norma_map_img + 1) / 2).cpu().numpy())
# save_set_of_images(path=save_path_this_expt, prefix='mdl1_', images=((mdl_1_gen_images + 1) / 2).cpu().numpy())
# save_set_of_images(path=save_path_this_expt, prefix='mdl2_', images=((mdl_2_gen_images + 1) / 2).cpu().numpy())
| 47.718563
| 119
| 0.676371
| 1,157
| 7,969
| 4.239412
| 0.215212
| 0.015902
| 0.036697
| 0.040367
| 0.312334
| 0.281549
| 0.229969
| 0.213252
| 0.198981
| 0.145566
| 0
| 0.032455
| 0.199649
| 7,969
| 167
| 120
| 47.718563
| 0.736595
| 0.107542
| 0
| 0.032
| 0
| 0
| 0.150247
| 0.073573
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008
| false
| 0
| 0.128
| 0
| 0.168
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5da2dcdc36e76038fe4e53bad3d9602bb03e2dea
| 38,961
|
py
|
Python
|
Packs/mnemonicMDR/Integrations/ArgusManagedDefence/ArgusManagedDefence.py
|
matan-xmcyber/content
|
7f02301c140b35956af3cd20cb8dfc64f34afb3e
|
[
"MIT"
] | null | null | null |
Packs/mnemonicMDR/Integrations/ArgusManagedDefence/ArgusManagedDefence.py
|
matan-xmcyber/content
|
7f02301c140b35956af3cd20cb8dfc64f34afb3e
|
[
"MIT"
] | 3
|
2019-12-13T13:27:20.000Z
|
2020-01-01T14:27:45.000Z
|
Packs/mnemonicMDR/Integrations/ArgusManagedDefence/ArgusManagedDefence.py
|
matan-xmcyber/content
|
7f02301c140b35956af3cd20cb8dfc64f34afb3e
|
[
"MIT"
] | null | null | null |
import demistomock as demisto
from CommonServerPython import *
""" IMPORTS """
import json
import urllib3
import dateparser
import traceback
from typing import Any, Dict, List, Union
import logging
from argus_api import session as argus_session
from argus_api.api.currentuser.v1.user import get_current_user
from argus_api.api.cases.v2.case import (
add_case_tag,
add_comment,
advanced_case_search,
close_case,
create_case,
delete_case,
delete_comment,
download_attachment,
edit_comment,
get_attachment,
get_case_metadata_by_id,
list_case_attachments,
list_case_tags,
list_case_comments,
remove_case_tag_by_id,
remove_case_tag_by_key_value,
update_case,
)
from argus_api.api.events.v1 import get_event_by_path
from argus_api.api.events.v1.case.case import get_events_for_case
from argus_api.api.events.v1.aggregated import (
find_aggregated_events,
list_aggregated_events,
)
from argus_api.api.events.v1.payload import get_payload
from argus_api.api.events.v1.pcap import get_pcap
from argus_api.api.events.v1.nids import find_n_i_d_s_events, list_n_i_d_s_events
from argus_api.api.pdns.v3.search import search_records
from argus_api.api.reputation.v1.observation import (
fetch_observations_for_domain,
fetch_observations_for_i_p,
)
# Disable insecure warnings
urllib3.disable_warnings()
""" CONSTANTS """
DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
PRETTY_DATE_FORMAT = "%b %d, %Y, %H:%M:%S"
FETCH_TAG = demisto.params().get("fetch_tag")
""" HELPER FUNCTIONS """
def set_argus_settings(
api_key: str, base_url: str = None, proxies: dict = None, verify: bool = None
):
argus_session.api_key = api_key
argus_session.base_url = base_url
argus_session.proxies = proxies
argus_session.verify = verify
def argus_priority_to_demisto_severity(priority: str) -> int:
mapping = {"low": 1, "medium": 2, "high": 3, "critical": 4}
return mapping.get(priority, 0)
def argus_status_to_demisto_status(status: str) -> int:
mapping = {
"pendingCustomer": 0,
"pendingSoc": 0,
"pendingVendor": 0,
"pendingClose": 0,
"workingSoc": 1,
"workingCustomer": 1,
"closed": 2,
}
return mapping.get(status, 0)
def build_argus_priority_from_min_severity(min_severity: str) -> List[str]:
severities = ["low", "medium", "high", "critical"]
min_severity_list = []
for severity in severities:
if argus_priority_to_demisto_severity(
min_severity.lower()
) <= argus_priority_to_demisto_severity(severity):
min_severity_list.append(severity)
return min_severity_list
def parse_first_fetch(first_fetch: Any) -> Any:
if isinstance(first_fetch, str):
if first_fetch[0] != "-":
first_fetch = f"-{first_fetch}"
return first_fetch
def build_tags_from_list(lst: list) -> List[Dict]:
if not lst:
return []
if len(lst) % 2 != 0:
return []
tags = []
for i in range(0, len(lst), 2):
tags.append({"key": lst[i], "value": lst[i + 1]})
return tags
def str_to_dict(string: str) -> dict:
if not string:
return {}
lst = argToList(string)
if len(lst) % 2 != 0:
return {}
return {lst[i]: lst[i + 1] for i in range(0, len(lst), 2)}
def date_time_to_epoch_milliseconds(date_time: Union[datetime, str] = None) -> int:
if isinstance(date_time, datetime):
return int(date_time.timestamp() * 1000)
if isinstance(date_time, str):
return date_time_to_epoch_milliseconds(dateparser.parse(date_time))
return int(datetime.now().timestamp() * 1000)
def pretty_print_date(date_time: Union[datetime, str] = None) -> str:
if isinstance(date_time, datetime):
return date_time.strftime(PRETTY_DATE_FORMAT)
if isinstance(date_time, str):
return pretty_print_date(dateparser.parse(date_time))
return datetime.now().strftime(PRETTY_DATE_FORMAT)
def pretty_print_case_metadata(result: dict, title: str = None) -> str:
data = result["data"]
string = title if title else f"# #{data['id']}: {data['subject']}\n"
string += "_Priority: {}, status: {}, last updated: {}_\n".format(
data["priority"], data["status"], pretty_print_date(data["lastUpdatedTime"])
)
string += "Reported by {} at {}\n\n".format(
data["publishedByUser"]["name"], pretty_print_date(data["publishedTime"])
)
string += data["description"]
return string
def pretty_print_comment(comment: dict, title: str = None) -> str:
string = title if title else ""
string += f"#### *{comment['addedByUser']['userName']} - {pretty_print_date(comment['addedTime'])}*\n"
string += (
f"_Last updated {pretty_print_date(comment['lastUpdatedTime'])}_\n"
if comment["lastUpdatedTime"]
else ""
)
string += f"{comment['comment']}\n\n"
string += f"_id: {comment['id']}_\n"
string += f"_Flags: {str(comment['flags'])}_\n" if comment["flags"] else ""
string += "* * *\n"
return string
def pretty_print_comments(comments: list, title: str = None) -> str:
string = title if title else ""
for comment in comments:
string += pretty_print_comment(comment)
return string
def pretty_print_events(result: dict, title: str = None) -> str:
string = title if title else ""
string += "_Count: {}, showing {} events, from {} to {}_\n".format(
result["count"], result["size"], result["offset"], result["limit"]
)
string += tableToMarkdown("Events", result["data"])
return string
""" COMMAND FUNCTIONS """
def test_module_command() -> str:
response = get_current_user()
if response["responseCode"] == 200:
return "ok"
return (
f"Unable to communicate with Argus API {response['responseCode']}, {response}"
)
def fetch_incidents(
last_run: dict, first_fetch_period: str, limit: int = 25, min_severity: str = "low"
):
start_timestamp = last_run.get("start_time", None) if last_run else None
# noinspection PyTypeChecker
result = advanced_case_search(
startTimestamp=start_timestamp if start_timestamp else first_fetch_period,
endTimestamp="now",
limit=limit,
sortBy=["createdTimestamp"],
priority=build_argus_priority_from_min_severity(min_severity),
subCriteria=[
{"exclude": True, "status": ["closed"]},
],
timeFieldStrategy=["createdTimestamp"],
)
incidents = []
for case in result["data"]:
incidents.append(
{
"name": f"#{case['id']}: {case['subject']}",
"occurred": case["createdTime"],
"severity": argus_priority_to_demisto_severity(case["priority"]),
"status": argus_status_to_demisto_status(case["status"]),
"details": case["description"],
"customFields": {
"argus_id": str(case["id"]),
"type": case["type"],
"category": case["category"]["name"] if case["category"] else None,
"service": case["service"]["name"],
"lastUpdatedTime": case["lastUpdatedTime"],
"createdTimestamp": case["createdTimestamp"],
"customer": case["customer"]["shortName"],
},
"rawJSON": json.dumps(case),
}
)
if result["data"]:
last_run["start_time"] = str(result["data"][-1]["createdTimestamp"] + 1)
return last_run, incidents
def add_case_tag_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
key = args.get("key", None)
value = args.get("value", None)
if not case_id:
raise ValueError("case_id not specified")
if not key:
raise ValueError("key not specified")
if not value:
raise ValueError("value not specified")
tag = {"key": key, "value": value}
result = add_case_tag(caseID=case_id, tags=tag)
headers = ["key", "value", "addedTime"]
readable_output = tableToMarkdown(
f"#{case_id}: Tags", result["data"], headers=headers
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Tags",
outputs=result,
raw_response=result,
)
def add_comment_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
comment = args.get("comment", None)
if not case_id:
raise ValueError("case_id not specified")
if not comment:
raise ValueError("comment not specified")
result = add_comment(
caseID=case_id,
comment=comment,
asReplyTo=args.get("as_reply_to", None),
internal=args.get("internal", None),
originEmailAddress=args.get("origin_email_address", None),
associatedAttachmentID=args.get("associated_attachment_id", None),
)
return CommandResults(
readable_output=pretty_print_comment(
result["data"], f"# #{case_id}: Added comment\n"
),
outputs_prefix="Argus.Comment",
outputs=result,
raw_response=result,
)
def advanced_case_search_command(args: Dict[str, Any]) -> CommandResults:
# noinspection PyTypeChecker
result = advanced_case_search(
startTimestamp=args.get("start_timestamp", None),
endTimestamp=args.get("end_timestamp", None),
limit=args.get("limit", None),
offset=args.get("offset", None),
includeDeleted=args.get("include_deleted", None),
subCriteria=argToList(args.get("sub_criteria", None)),
exclude=args.get("exclude", None),
required=args.get("required", None),
customerID=argToList(args.get("customer_id", None)),
caseID=argToList(args.get("case_id", None)),
customer=argToList(args.get("customer", None)),
type=argToList(args.get("case_type", None)),
service=argToList(args.get("service", None)),
category=argToList(args.get("category", None)),
status=argToList(args.get("status", None)),
priority=argToList(args.get("priority", None)),
assetID=argToList(args.get("asset_id", None)),
tag=argToList(args.get("tag", None)),
workflow=argToList(args.get("workflow", None)),
field=argToList(args.get("field", None)),
keywords=argToList(args.get("keywords", None)),
timeFieldStrategy=argToList(args.get("time_field_strategy", None)),
timeMatchStrategy=args.get("time_match_strategy", None),
keywordFieldStrategy=argToList(args.get("keyword_field_strategy", None)),
keywordMatchStrategy=args.get("keyword_match_strategy", None),
user=argToList(args.get("user", None)),
userFieldStrategy=argToList(args.get("user_field_strategy", None)),
userAssigned=args.get("user_assigned", None),
techAssigned=args.get("tech_assigned", None),
includeWorkflows=args.get("include_workflows", None),
includeDescription=args.get("include_description", None),
accessMode=argToList(args.get("access_mode", None)),
explicitAccess=argToList(args.get("explicit_access", None)),
sortBy=argToList(args.get("sort_by", None)),
includeFlags=argToList(args.get("include_flags", None)),
excludeFlags=argToList(args.get("exclude_flags", None)),
)
readable_output = f"Advanced Case Search: {result['count']} result(s)\n"
readable_output += tableToMarkdown(
"Output not suitable for playground", result["data"]
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Cases",
outputs=result,
raw_response=result,
)
def close_case_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case_id not specified")
result = close_case(
caseID=case_id,
comment=args.get("comment", None),
)
readable_output = f"# #{case_id}: close case\n"
readable_output += (
f"_Status: {result['data']['status']}, at: {result['data']['closedTime']}_"
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def create_case_command(args: Dict[str, Any]) -> CommandResults:
subject = args.get("subject", None)
description = args.get("description", None)
service = args.get("service", None)
case_type = args.get("type", None)
tags = args.get("tags", None)
if not subject:
raise ValueError("subject not specified")
if not description:
raise ValueError("description not specified")
if not service:
raise ValueError("service not specified")
if not case_type:
raise ValueError("case_type not specified")
if tags:
tags = str(tags).split(",")
if len(tags) % 2 != 0:
raise ValueError("tags list must be of even number", tags)
tags = build_tags_from_list(tags)
result = create_case(
customer=args.get("customer", None),
service=service,
category=args.get("category", None),
type=case_type,
status=args.get("status", None),
tags=tags,
subject=subject,
description=description,
customerReference=args.get("customer_reference", None),
priority=args.get("priority", None),
accessMode=args.get("access_mode", None),
originEmailAddress=args.get("origin_email_address", None),
publish=args.get("publish", None),
defaultWatchers=args.get("default_watchers", None),
)
return CommandResults(
readable_output=pretty_print_case_metadata(result),
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def delete_case_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case id not specified")
result = delete_case(caseID=case_id)
return CommandResults(
readable_output=pretty_print_case_metadata(result, "Case deleted"),
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def delete_comment_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
comment_id = args.get("comment_id", None)
if not case_id:
raise ValueError("case id not specified")
if not comment_id:
raise ValueError("comment id not specified")
result = delete_comment(caseID=case_id, commentID=comment_id)
return CommandResults(
readable_output=pretty_print_comment(
result["data"], f"# #{case_id}: Deleted comment\n"
),
outputs_prefix="Argus.Comment",
outputs=result,
raw_response=result,
)
def download_attachment_command(args: Dict[str, Any]) -> Any:
case_id = args.get("case_id", None)
attachment_id = args.get("attachment_id", None)
if not case_id:
raise ValueError("case id not specified")
if not attachment_id:
raise ValueError("attachment id not specified")
result = download_attachment(caseID=case_id, attachmentID=attachment_id)
return fileResult(attachment_id, result.content)
def edit_comment_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
comment_id = args.get("comment_id", None)
comment = args.get("comment", None)
if not case_id:
raise ValueError("case id not specified")
if not comment_id:
raise ValueError("comment id not specified")
if not comment:
raise ValueError("comment not specified")
result = edit_comment(caseID=case_id, commentID=comment_id, comment=comment)
return CommandResults(
readable_output=pretty_print_comment(
result["data"], f"# #{case_id}: Updated comment\n"
),
outputs_prefix="Argus.Comment",
outputs=result,
raw_response=result,
)
def get_attachment_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
attachment_id = args.get("attachment_id", None)
if not case_id:
raise ValueError("case id not specified")
if not attachment_id:
raise ValueError("attachment id not specified")
result = get_attachment(caseID=case_id, attachmentID=attachment_id)
readable_output = f"# #{case_id}: attachment metadata\n"
readable_output += f"#### *{result['data']['addedByUser']['userName']} - {result['data']['addedTime']}*\n"
readable_output += f"{result['data']['name']} ({result['data']['mimeType']}, {result['data']['size']} bytes)\n\n"
readable_output += f"_id: {result['data']['id']}_\n"
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Attachments",
outputs=result,
raw_response=result,
)
def get_case_metadata_by_id_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case id not specified")
result = get_case_metadata_by_id(
id=case_id, skipRedirect=args.get("skip_redirect", None)
)
return CommandResults(
readable_output=pretty_print_case_metadata(result),
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def list_case_attachments_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case_id not specified")
result = list_case_attachments(
caseID=case_id, limit=args.get("limit", None), offset=args.get("offset", None)
)
readable_output = f"# #{case_id}: Case attachments\n"
for attachment in result["data"]:
readable_output += f"#### *{attachment['addedByUser']['userName']} - {attachment['addedTime']}*\n"
readable_output += f"{attachment['name']} ({attachment['mimeType']}, {attachment['size']} kb)\n\n"
readable_output += f"_id: {attachment['id']}_\n"
readable_output += "* * *\n"
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Attachments",
outputs=result,
raw_response=result,
)
def list_case_tags_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case_id not specified")
result = list_case_tags(
caseID=case_id, limit=args.get("limit", None), offset=args.get("offset", None)
)
headers = ["key", "value", "addedTime", "id"]
readable_output = tableToMarkdown(
f"#{case_id}: Tags", result["data"], headers=headers
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Tags",
outputs=result,
raw_response=result,
)
def list_case_comments_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
sort_by = args.get("sort_by", None)
if not case_id:
raise ValueError("case_id not specified")
if sort_by:
sort_by = ["addedTimestamp"] if sort_by == "ascending" else ["-addedTimestamp"]
result = list_case_comments(
caseID=case_id,
beforeComment=args.get("before_comment", None),
afterComment=args.get("after_comment", None),
offset=args.get("offset", None),
limit=args.get("limit", None),
sortBy=sort_by,
)
return CommandResults(
readable_output=pretty_print_comments(
result["data"], f"# #{case_id}: Comments\n"
),
outputs_prefix="Argus.Comments",
outputs=result,
raw_response=result,
)
def remove_case_tag_by_id_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
tag_id = args.get("tag_id", None)
if not case_id:
raise ValueError("case id not specified")
if not tag_id:
raise ValueError("tag id not specified")
result = remove_case_tag_by_id(caseID=case_id, tagID=tag_id)
headers = ["key", "value", "addedTime", "id", "flags"]
readable_output = tableToMarkdown(
f"#{case_id}: Delete tags", result["data"], headers=headers
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Tags",
outputs=result,
raw_response=result,
)
def remove_case_tag_by_key_value_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
key = args.get("key", None)
value = args.get("value", None)
if not case_id:
raise ValueError("case id not specified")
if not key:
raise ValueError("key not specified")
if not value:
raise ValueError("value not specified")
result = remove_case_tag_by_key_value(caseID=case_id, tagKey=key, tagValue=value)
headers = ["key", "value", "addedTime", "id", "flags"]
readable_output = tableToMarkdown(
f"#{case_id}: Delete tags", result["data"], headers=headers
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Tags",
outputs=result,
raw_response=result,
)
def update_case_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case id not specified")
result = update_case(
id=case_id,
subject=args.get("subject", None),
description=args.get("description", None),
status=args.get("status", None),
priority=args.get("priority", None),
category=args.get("category", None),
reporter=args.get("reporter", None),
assignedUser=args.get("assigned_user", None),
assignedTech=args.get("assigned_tech", None),
customerReference=args.get("customer_reference", None),
comment=args.get("comment", None),
originEmailAddress=args.get("origin_email_address", None),
hasEvents=args.get("has_events", None),
internalComment=args.get("internal_comment", None),
)
return CommandResults(
readable_output=pretty_print_case_metadata(result),
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def get_event_command(args: Dict[str, Any]) -> CommandResults:
event_type = args.get("type", None)
timestamp = args.get("timestamp", None)
customer_id = args.get("customer_id", None)
event_id = args.get("event_id", None)
if not event_type:
raise ValueError("event type not specified")
if not timestamp:
raise ValueError("timestamp not specified")
if not customer_id:
raise ValueError("customer id not specified")
if not event_id:
raise ValueError("event id not specified")
result = get_event_by_path(
type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id
)
return CommandResults(
readable_output=tableToMarkdown(f"Event: {event_id}", result["data"]),
outputs_prefix="Argus.Event",
outputs=result,
raw_response=result,
)
def get_events_for_case_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case id not specified")
result = get_events_for_case(
caseID=case_id, limit=args.get("limit", None), offset=args.get("offset", None)
)
return CommandResults(
readable_output=pretty_print_events(
dict(result), f"# #{case_id}: Associated Events\n"
),
outputs_prefix="Argus.Events",
outputs=result,
raw_response=result,
)
def find_aggregated_events_command(args: Dict[str, Any]) -> CommandResults:
# noinspection PyTypeChecker
result = find_aggregated_events(
skipFutureEvents=args.get("skip_future_events", None),
exclude=args.get("exclude", None),
locationID=argToList(args.get("location_id", None)),
severity=argToList(args.get("severity", None)),
customer=argToList(args.get("customer", None)),
alarmID=argToList(args.get("alarm_id", None)),
attackCategoryID=argToList(args.get("attack_category_id", None)),
sourceGeoCountry=argToList(args.get("source_geo_country", None)),
destinationGeoCountry=argToList(args.get("destination_geo_country", None)),
geoCountry=argToList(args.get("geo_country", None)),
properties=str_to_dict(args.get("properties", None)),
exactMatchProperties=args.get("exact_match_properties", None),
subCriteria=argToList(args.get("sub_criteria", None)),
signature=argToList(args.get("signature", None)),
lastUpdatedTimestamp=args.get("last_updated_timestamp", None),
indexStartTime=args.get("index_start_time", None),
indexEndTime=args.get("index_end_time", None),
destinationIP=argToList(args.get("destination_ip", None)),
sourceIP=argToList(args.get("source_ip", None)),
ip=argToList(args.get("ip", None)),
destinationPort=argToList(args.get("destination_port", None)),
sourcePort=argToList(args.get("source_port", None)),
port=argToList(args.get("port", None)),
minSeverity=args.get("min_severity", None),
maxSeverity=args.get("max_severity", None),
limit=args.get("limit", 25),
offset=args.get("offset", None),
includeDeleted=args.get("include_deleted", None),
minCount=args.get("min_count", None),
associatedCaseID=argToList(args.get("associated_case_id", None)),
sourceIPMinBits=args.get("source_ip_min_bits", None),
destinationIPMinBits=args.get("destination_ip_min_bits", None),
startTimestamp=args.get("start_timestamp", "-24hours"),
endTimestamp=args.get("end_timestamp", "now"),
sortBy=argToList(args.get("sort_by", None)),
includeFlags=argToList(args.get("include_flags", None)),
excludeFlags=argToList(args.get("exclude_flags", None)),
)
return CommandResults(
readable_output=pretty_print_events(dict(result), "# Find events\n"),
outputs_prefix="Argus.Events",
outputs=result,
raw_response=result,
)
def list_aggregated_events_command(args: Dict[str, Any]) -> CommandResults:
result = list_aggregated_events(
customerID=args.get("customer_id", None),
signature=args.get("signature", None),
ip=args.get("ip", None),
startTimestamp=args.get("start_timestamp", None),
endTimestamp=args.get("end_timestamp", None),
limit=args.get("limit", None),
offset=args.get("offset", None),
)
return CommandResults(
readable_output=pretty_print_events(dict(result), "# List Events\n"),
outputs_prefix="Argus.Events",
outputs=result,
raw_response=result,
)
def get_payload_command(args: Dict[str, Any]) -> CommandResults:
event_type = args.get("type", None)
timestamp = args.get("timestamp", None)
customer_id = args.get("customer_id", None)
event_id = args.get("event_id", None)
if not event_type:
raise ValueError("event type not specified")
if not timestamp:
raise ValueError("timestamp not specified")
if not customer_id:
raise ValueError("customer id not specified")
if not event_id:
raise ValueError("event id not specified")
result = get_payload(
type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id
)
readable_output = "# Event payload\n"
readable_output += f"Event: {event_id}, type: {result['data']['type']}\n"
readable_output += result["data"]["payload"]
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Payload",
outputs=result,
raw_response=result,
)
def get_pcap_command(args: Dict[str, Any]) -> Any:
event_type = args.get("type", None)
timestamp = args.get("timestamp", None)
customer_id = args.get("customer_id", None)
event_id = args.get("event_id", None)
if not event_type:
raise ValueError("event type not specified")
if not timestamp:
raise ValueError("timestamp not specified")
if not customer_id:
raise ValueError("customer id not specified")
if not event_id:
raise ValueError("event id not specified")
result = get_pcap(
type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id
)
return fileResult(f"{event_id}_pcap", result.content)
def find_nids_events_command(args: Dict[str, Any]) -> CommandResults:
# noinspection PyTypeChecker
result = find_n_i_d_s_events(
skipFutureEvents=args.get("skip_future_events", None),
exclude=args.get("exclude", None),
eventIdentifier=argToList(args.get("event_identifier", None)),
locationID=argToList(args.get("location_id", None)),
severity=argToList(args.get("severity", None)),
customer=argToList(args.get("customer", None)),
alarmID=argToList(args.get("alarm_id", None)),
attackCategoryID=argToList(args.get("attack_category_id", None)),
sourceGeoCountry=argToList(args.get("source_geo_country", None)),
destinationGeoCountry=argToList(args.get("destination_geo_country", None)),
geoCountry=argToList(args.get("geo_country", None)),
properties=str_to_dict(args.get("properties", None)),
exactMatchProperties=args.get("exact_match_properties", None),
sensorID=argToList(args.get("sensor_id", None)),
subCriteria=argToList(args.get("sub_criteria", None)),
signature=argToList(args.get("signature", None)),
lastUpdatedTimestamp=args.get("last_updated_timestamp", None),
indexStartTime=args.get("index_start_time", None),
indexEndTime=args.get("index_end_time", None),
destinationIP=argToList(args.get("destination_ip", None)),
sourceIP=argToList(args.get("source_ip", None)),
ip=argToList(args.get("ip", None)),
destinationPort=argToList(args.get("destination_port", None)),
sourcePort=argToList(args.get("source_port", None)),
port=argToList(args.get("port", None)),
minSeverity=args.get("min_severity", None),
maxSeverity=args.get("max_severity", None),
limit=args.get("limit", 25),
offset=args.get("offset", None),
includeDeleted=args.get("include_deleted", None),
startTimestamp=args.get("start_timestamp", "-24hours"),
endTimestamp=args.get("end_timestamp", "now"),
sortBy=argToList(args.get("sort_by", None)),
includeFlags=argToList(args.get("include_flags", None)),
excludeFlags=argToList(args.get("exclude_flags", None)),
)
return CommandResults(
readable_output=pretty_print_events(dict(result), "# Find NIDS Events\n"),
outputs_prefix="Argus.NIDS",
outputs=result,
raw_response=result,
)
def list_nids_events_command(args: Dict[str, Any]) -> CommandResults:
result = list_n_i_d_s_events(
customerID=args.get("customer_id", None),
signature=args.get("signature", None),
ip=args.get("ip", None),
startTimestamp=args.get("start_timestamp", None),
endTimestamp=args.get("end_timestamp", None),
limit=args.get("limit", None),
offset=args.get("offset", None),
)
return CommandResults(
readable_output=pretty_print_events(dict(result), "# List NIDS Events\n"),
outputs_prefix="Argus.NIDS",
outputs=result,
raw_response=result,
)
def search_records_command(args: Dict[str, Any]) -> CommandResults:
query = args.get("query", None)
if not query:
raise ValueError("query not specified")
# noinspection PyTypeChecker
result = search_records(
query=query,
aggregateResult=args.get("aggregate_result", None),
includeAnonymousResults=args.get("include_anonymous_results", None),
rrClass=argToList(args.get("rr_class", None)),
rrType=argToList(args.get("rr_type", None)),
customerID=argToList(args.get("customer_id", None)),
tlp=argToList((args.get("tlp", None))),
limit=args.get("limit", 25),
offset=args.get("offset", None),
)
return CommandResults(
readable_output=tableToMarkdown("PDNS records", result["data"]),
outputs_prefix="Argus.PDNS",
outputs=result,
raw_response=result,
)
def fetch_observations_for_domain_command(args: Dict[str, Any]) -> CommandResults:
fqdn = args.get("fqdn", None)
if not fqdn:
raise ValueError("fqdn not specified")
result = fetch_observations_for_domain(fqdn=fqdn)
return CommandResults(
readable_output=tableToMarkdown(
f'Domain observations for "{fqdn}"', result["data"]
),
outputs_prefix="Argus.ObservationsDomain",
outputs=result,
raw_response=result,
)
def fetch_observations_for_i_p_command(args: Dict[str, Any]) -> CommandResults:
ip = args.get("ip", None)
if not ip:
raise ValueError("ip not specified")
result = fetch_observations_for_i_p(ip=ip)
return CommandResults(
readable_output=tableToMarkdown(f'IP observations for "{ip}"', result["data"]),
outputs_prefix="Argus.ObservationsIP",
outputs=result,
raw_response=result,
)
""" MAIN FUNCTION """
def main() -> None:
logging.getLogger("argus_cli").setLevel("WARNING")
first_fetch_period = parse_first_fetch(
demisto.params().get("first_fetch", "-1 day")
)
set_argus_settings(
demisto.params().get("api_key"),
demisto.params().get("api_url"),
handle_proxy(),
demisto.params().get("insecure", None),
)
demisto.debug(f"Command being called is {demisto.command()}")
try:
if demisto.command() == "test-module":
# This is the call made when pressing the integration Test button.
return_results(test_module_command())
elif demisto.command() == "fetch-incidents":
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, incidents = fetch_incidents(
last_run=demisto.getLastRun(),
first_fetch_period=first_fetch_period,
limit=demisto.params().get("max_fetch", 25),
min_severity=demisto.params().get("min_severity", "low"),
)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == "argus-add-case-tag":
return_results(add_case_tag_command(demisto.args()))
elif demisto.command() == "argus-add-comment":
return_results(add_comment_command(demisto.args()))
elif demisto.command() == "argus-advanced-case-search":
return_results(advanced_case_search_command(demisto.args()))
elif demisto.command() == "argus-close-case":
return_results(close_case_command(demisto.args()))
elif demisto.command() == "argus-create-case":
return_results(create_case_command(demisto.args()))
elif demisto.command() == "argus-delete-case":
return_results(delete_case_command(demisto.args()))
elif demisto.command() == "argus-delete-comment":
return_results(delete_comment_command(demisto.args()))
elif demisto.command() == "argus-download-attachment":
return_results(download_attachment_command(demisto.args()))
elif demisto.command() == "argus-edit-comment":
return_results(edit_comment_command(demisto.args()))
elif demisto.command() == "argus-get-attachment":
return_results(get_attachment_command(demisto.args()))
elif demisto.command() == "argus-get-case-metadata-by-id":
return_results(get_case_metadata_by_id_command(demisto.args()))
elif demisto.command() == "argus-list-case-attachments":
return_results(list_case_attachments_command(demisto.args()))
elif demisto.command() == "argus-list-case-tags":
return_results(list_case_tags_command(demisto.args()))
elif demisto.command() == "argus-list-case-comments":
return_results(list_case_comments_command(demisto.args()))
elif demisto.command() == "argus-remove-case-tag-by-id":
return_results(remove_case_tag_by_id_command(demisto.args()))
elif demisto.command() == "argus-remove-case-tag-by-key-value":
return_results(remove_case_tag_by_key_value_command(demisto.args()))
elif demisto.command() == "argus-update-case":
return_results(update_case_command(demisto.args()))
elif demisto.command() == "argus-get-event":
return_results(get_event_command(demisto.args()))
elif demisto.command() == "argus-get-events-for-case":
return_results(get_events_for_case_command(demisto.args()))
elif demisto.command() == "argus-find-aggregated-events":
return_results(find_aggregated_events_command(demisto.args()))
elif demisto.command() == "argus-list-aggregated-events":
return_results(list_aggregated_events_command(demisto.args()))
elif demisto.command() == "argus-get-payload":
return_results(get_payload_command(demisto.args()))
elif demisto.command() == "argus-get-pcap":
return_results(get_pcap_command(demisto.args()))
elif demisto.command() == "argus-find-nids-events":
return_results(find_nids_events_command(demisto.args()))
elif demisto.command() == "argus-list-nids-events":
return_results(list_nids_events_command(demisto.args()))
elif demisto.command() == "argus-pdns-search-records":
return_results(search_records_command(demisto.args()))
elif demisto.command() == "argus-fetch-observations-for-domain":
return_results(fetch_observations_for_domain_command(demisto.args()))
elif demisto.command() == "argus-fetch-observations-for-ip":
return_results(fetch_observations_for_i_p_command(demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(
f"Failed to execute {demisto.command()} command.\nError:\n{str(e)}"
)
""" ENTRY POINT """
if __name__ in ("__main__", "__builtin__", "builtins"):
main()
| 35.711274
| 117
| 0.65273
| 4,600
| 38,961
| 5.320652
| 0.088043
| 0.061777
| 0.044454
| 0.020592
| 0.636568
| 0.589663
| 0.534178
| 0.506149
| 0.465659
| 0.421982
| 0
| 0.002158
| 0.214856
| 38,961
| 1,090
| 118
| 35.744037
| 0.797941
| 0.009497
| 0
| 0.431306
| 0
| 0.001126
| 0.181596
| 0.033646
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04955
| false
| 0
| 0.021396
| 0
| 0.128378
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5da38e943cdd95f554ae0517d32417a9a5d31b05
| 699
|
py
|
Python
|
scripts/examples/tools/capturebat.py
|
fortinet/ips-bph-framework
|
145e14cced2181f388ade07d78b4f0e9452143dd
|
[
"Apache-2.0"
] | 21
|
2019-10-24T04:59:52.000Z
|
2021-05-11T12:47:17.000Z
|
scripts/examples/tools/capturebat.py
|
fortinet/ips-bph-framework
|
145e14cced2181f388ade07d78b4f0e9452143dd
|
[
"Apache-2.0"
] | null | null | null |
scripts/examples/tools/capturebat.py
|
fortinet/ips-bph-framework
|
145e14cced2181f388ade07d78b4f0e9452143dd
|
[
"Apache-2.0"
] | 9
|
2019-10-26T16:56:08.000Z
|
2021-03-15T14:10:21.000Z
|
# Tool Imports
from bph.tools.windows.capturebat import BphCaptureBat as CaptureBat
# Core Imports
from bph.core.server.template import BphTemplateServer as TemplateServer
from bph.core.sample import BphSample as Sample
from bph.core.sample import BphLabFile as LabFile
from bph.core.session import BphSession as Session
session = Session(project_name='blackhat_arsenal_2019')
session.start()
templateserver = TemplateServer()
templateserver.start()
capturebat = CaptureBat()
capturebat.cleanup()
capturebat.execute()
capturebat.start()
capturebat.execute(delay=15)
capturebat.stop()
capturebat.execute()
capturebat.collect()
capturebat.execute()
capturebat.files()
| 24.964286
| 73
| 0.786838
| 81
| 699
| 6.753086
| 0.419753
| 0.063985
| 0.080439
| 0.062157
| 0.084095
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009836
| 0.127325
| 699
| 28
| 74
| 24.964286
| 0.886885
| 0.035765
| 0
| 0.157895
| 0
| 0
| 0.032558
| 0.032558
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.263158
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5da5c1523876b5ad6f15a38ad4bcfea7774fd3c9
| 3,083
|
py
|
Python
|
tests/mrp/test_mrp_auth.py
|
evanreichard/pyatv
|
d41bd749bbf8f8a9365e7fd36c1164543e334565
|
[
"MIT"
] | null | null | null |
tests/mrp/test_mrp_auth.py
|
evanreichard/pyatv
|
d41bd749bbf8f8a9365e7fd36c1164543e334565
|
[
"MIT"
] | 1
|
2020-06-13T15:14:47.000Z
|
2020-06-13T15:14:47.000Z
|
tests/mrp/test_mrp_auth.py
|
evanreichard/pyatv
|
d41bd749bbf8f8a9365e7fd36c1164543e334565
|
[
"MIT"
] | null | null | null |
"""Functional authentication tests with fake MRP Apple TV."""
import inspect
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
import pyatv
from pyatv import exceptions
from pyatv.const import Protocol
from pyatv.conf import MrpService, AppleTV
from pyatv.mrp.server_auth import PIN_CODE, CLIENT_IDENTIFIER, CLIENT_CREDENTIALS
from tests.fake_device import FakeAppleTV
class MrpAuthFunctionalTest(AioHTTPTestCase):
def setUp(self):
AioHTTPTestCase.setUp(self)
self.service = MrpService(
CLIENT_IDENTIFIER, self.fake_atv.get_port(Protocol.MRP)
)
self.conf = AppleTV("127.0.0.1", "Apple TV")
self.conf.add_service(self.service)
async def tearDownAsync(self):
if inspect.iscoroutinefunction(self.handle.close):
await self.handle.close()
else:
self.handle.close()
await super().tearDownAsync()
async def get_application(self, loop=None):
self.fake_atv = FakeAppleTV(self.loop)
self.state, self.usecase = self.fake_atv.add_service(Protocol.MRP)
return self.fake_atv.app
@unittest_run_loop
async def test_pairing_with_device(self):
self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop)
self.assertIsNone(self.service.credentials)
self.assertTrue(self.handle.device_provides_pin)
await self.handle.begin()
self.handle.pin(PIN_CODE)
await self.handle.finish()
self.assertTrue(self.handle.has_paired)
self.assertTrue(self.state.has_paired)
self.assertIsNotNone(self.service.credentials)
@unittest_run_loop
async def test_pairing_with_existing_credentials(self):
self.service.credentials = CLIENT_CREDENTIALS
self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop)
self.assertFalse(self.handle.has_paired)
self.assertIsNotNone(self.service.credentials)
self.assertTrue(self.handle.device_provides_pin)
await self.handle.begin()
self.handle.pin(PIN_CODE)
await self.handle.finish()
self.assertTrue(self.handle.has_paired)
self.assertTrue(self.state.has_paired)
self.assertIsNotNone(self.service.credentials)
@unittest_run_loop
async def test_pairing_with_bad_pin(self):
self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop)
self.assertIsNone(self.service.credentials)
self.assertTrue(self.handle.device_provides_pin)
await self.handle.begin()
self.handle.pin(PIN_CODE + 1)
with self.assertRaises(exceptions.PairingError):
await self.handle.finish()
self.assertFalse(self.handle.has_paired)
self.assertFalse(self.state.has_paired)
self.assertIsNone(self.service.credentials)
@unittest_run_loop
async def test_authentication(self):
self.service.credentials = CLIENT_CREDENTIALS
self.handle = await pyatv.connect(self.conf, self.loop)
self.assertTrue(self.state.has_authenticated)
| 32.114583
| 81
| 0.705157
| 376
| 3,083
| 5.632979
| 0.210106
| 0.108593
| 0.083097
| 0.056657
| 0.565156
| 0.526912
| 0.526912
| 0.485836
| 0.467894
| 0.444759
| 0
| 0.002848
| 0.202725
| 3,083
| 95
| 82
| 32.452632
| 0.858828
| 0.01784
| 0
| 0.477612
| 0
| 0
| 0.005625
| 0
| 0
| 0
| 0
| 0
| 0.268657
| 1
| 0.014925
| false
| 0
| 0.119403
| 0
| 0.164179
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5da817172273224f419b42630ca0117dc06b1363
| 5,158
|
py
|
Python
|
curvpack/utils.py
|
AbhilashReddyM/curvpack
|
74351624ec9ec50ec4445c7be85a48a4eabb029a
|
[
"BSD-3-Clause"
] | 8
|
2019-04-30T19:31:57.000Z
|
2022-02-25T14:50:56.000Z
|
curvpack/utils.py
|
AbhilashReddyM/curvpack
|
74351624ec9ec50ec4445c7be85a48a4eabb029a
|
[
"BSD-3-Clause"
] | 1
|
2019-06-14T06:32:40.000Z
|
2019-06-14T18:26:01.000Z
|
curvpack/utils.py
|
AbhilashReddyM/curvpack
|
74351624ec9ec50ec4445c7be85a48a4eabb029a
|
[
"BSD-3-Clause"
] | 3
|
2020-04-18T10:13:55.000Z
|
2022-02-02T03:53:04.000Z
|
import numpy as np
# The first two functions are modified from MNE surface project. LIcense follows
# This software is OSI Certified Open Source Software. OSI Certified is a certification mark of the Open Source Initiative.
#
# Copyright (c) 2011-2019, authors of MNE-Python. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# Neither the names of MNE-Python authors nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# This software is provided by the copyright holders and contributors "as is" and any express or implied warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. In no event shall the copyright owner or contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage.
def triangle_neighbors(tris, npts):
"""Efficiently compute vertex neighboring triangles.
Returns the triangles in the 1-ring of a given vertex
"""
# this code replaces the following, but is faster (vectorized):
#
# this['neighbor_tri'] = [list() for _ in xrange(this['np'])]
# for p in xrange(this['ntri']):
# verts = this['tris'][p]
# this['neighbor_tri'][verts[0]].append(p)
# this['neighbor_tri'][verts[1]].append(p)
# this['neighbor_tri'][verts[2]].append(p)
# this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']]
#
verts = tris.ravel()
counts = np.bincount(verts, minlength=npts)
reord = np.argsort(verts)
tri_idx = np.unravel_index(reord, (len(tris), 3))[0]
idx = np.cumsum(np.r_[0, counts])
# the sort below slows it down a bit, but is needed for equivalence
neighbor_tri = np.array([np.sort(tri_idx[v1:v2])
for v1, v2 in zip(idx[:-1], idx[1:])])
return neighbor_tri
def get_surf_neighbors(tris,neighbor_tri, k):
"""Get vertices of 1-ring
"""
verts = tris[neighbor_tri[k]]
verts = np.setdiff1d(verts, [k], assume_unique=False)
nneighbors = len(verts)
return verts
def GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2):
"""
INPUT:
Vertices : vertices
Faces : vertex connectivity
FaceNormals : Outer Normal per face, having magnitude equal to area of face
e0,e1,e2 : edge vectors
OUTPUT:
VertNormals : Unit normal at the vertex
"""
VertNormals =np.zeros(vertices.shape)
#edge lengths
de0=np.sqrt(e0[:,0]**2+e0[:,1]**2+e0[:,2]**2)
de1=np.sqrt(e1[:,0]**2+e1[:,1]**2+e1[:,2]**2)
de2=np.sqrt(e2[:,0]**2+e2[:,1]**2+e2[:,2]**2)
L2=np.c_[de0**2,de1**2,de2**2]
#Calculate weights according to N.Max [1999] for normals
wfv1=FaceNormals/(L2[:,1]*L2[:,2])[:,np.newaxis]
wfv2=FaceNormals/(L2[:,2]*L2[:,0])[:,np.newaxis]
wfv3=FaceNormals/(L2[:,0]*L2[:,1])[:,np.newaxis]
# #Calculate the weights according to MWA for normals
# wfv1=FaceNormals*np.arcsin(2*Af/(de1*de2))[:,np.newaxis]
# wfv2=FaceNormals*np.arcsin(2*Af/(de2*de0))[:,np.newaxis]
# wfv3=FaceNormals*np.arcsin(2*Af/(de0*de1))[:,np.newaxis]
verts=faces.T[0]
for j in [0,1,2]:
VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv1[:,j])
verts=faces.T[1]
for j in [0,1,2]:
VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv2[:,j])
verts=faces.T[2]
for j in [0,1,2]:
VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv3[:,j])
VertNormals=normr(VertNormals)
return VertNormals
def fastcross(x, y):
"""Compute cross product between list of 3D vectors
Input
x : Mx3 array
y : Mx3 array
Output
z : Mx3 array Cross product of x and y.
"""
if max([x.shape[0], y.shape[0]]) >= 500:
return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]]
else:
return np.cross(x, y)
def normr(vec):
"""
Normalizes an array of vectors. e.g. to convert a np array of vectors to unit vectors
"""
return vec/np.sqrt((vec**2).sum(axis=1))[:,np.newaxis]
| 44.852174
| 757
| 0.65917
| 773
| 5,158
| 4.371281
| 0.347995
| 0.032554
| 0.026635
| 0.018941
| 0.16129
| 0.114827
| 0.098846
| 0.098846
| 0.098846
| 0.098846
| 0
| 0.033782
| 0.208026
| 5,158
| 114
| 758
| 45.245614
| 0.79339
| 0.594029
| 0
| 0.068182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113636
| false
| 0
| 0.022727
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5da84607f0ca3d7ead02486a100adef7e245823f
| 14,805
|
py
|
Python
|
tests/unit/core/streams/test_stream_zero.py
|
tethys-platform/tethys
|
c27daf5a832b05f9d771b04355001c331bc08766
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-05-20T19:03:14.000Z
|
2020-06-03T20:43:34.000Z
|
tests/unit/core/streams/test_stream_zero.py
|
tethys-platform/tethys
|
c27daf5a832b05f9d771b04355001c331bc08766
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/unit/core/streams/test_stream_zero.py
|
tethys-platform/tethys
|
c27daf5a832b05f9d771b04355001c331bc08766
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Konstruktor, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import platform
import time
from unittest import mock
from unittest.mock import patch, call
from pytest import fixture
from tethys.core.pipes.pipe_zero import ZeroPipe
from tethys.core.sessions.sess_zero import ZeroSession
from tethys.core.stations.station_zero import ZeroStation
from tethys.core.streams.stream_zero import ZeroStream
from tethys.core.transports.transport_zero import ZeroTransport
class MockTransport(ZeroTransport):
def __init__(self):
pass
connect = mock.MagicMock()
disconnect = mock.MagicMock()
class MockSession(ZeroSession):
closing_mode = None
def __init__(self):
self._closed = False
@property
def closed(self):
return self._closed
class MockStation(ZeroStation):
def __init__(self):
pass
class TestZeroStream:
@staticmethod
def teardown_method():
MockTransport.connect.reset_mock()
MockTransport.disconnect.reset_mock()
@fixture
def pipe(self):
pipe = mock.MagicMock(spec=ZeroPipe)
return pipe
@fixture
def session(self):
session = MockSession()
return session
@fixture
def transport(self):
return MockTransport()
@fixture
def station(self):
return MockStation()
@fixture
def stream(self, pipe, session, transport):
return ZeroStream(pipe, session, transport)
# init
def test_init_with_transport_cb(self, pipe, session, transport):
def get_transport(_):
return transport
get_transport = mock.MagicMock(side_effect=get_transport)
stream = ZeroStream(pipe, session, get_transport)
assert stream.transport == transport
# conn context
def test_new_connection_context(self, stream):
with stream.connection_context():
MockTransport.connect.assert_called_once_with(stream)
MockTransport.disconnect.assert_not_called()
MockTransport.disconnect.assert_called_once_with(stream)
def test_old_connection_context(self, stream):
MockTransport._connections[stream.id] = stream
with stream.connection_context():
MockTransport.connect.assert_not_called()
MockTransport.disconnect.assert_not_called()
# heartbeat
def test_heartbeat_fail_delay(self, stream):
assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY
stream.station = mock.MagicMock(spec=ZeroStation)
stream.station.heartbeat_fail_delay = 0
assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY
stream.station.heartbeat_fail_delay = 12345
assert stream.heartbeat_fail_delay == 12345
def test_busy_false(self, stream):
stream.refresh = mock.MagicMock()
stream.station = mock.MagicMock(spec=ZeroStation)
stream.station.heartbeat_fail_delay = 1
stream.heartbeat_ts = time.time() - 10
assert stream.is_busy is False
assert stream.refresh.call_count == 1
def test_busy_true(self, stream):
stream.refresh = mock.MagicMock()
stream.station = mock.MagicMock(spec=ZeroStation)
stream.station.heartbeat_fail_delay = 1000
stream.heartbeat_ts = time.time()
assert stream.is_busy is True
assert stream.refresh.call_count == 1
def test_heartbeat(self, stream):
stream.save = mock.MagicMock()
with patch("time.time", lambda: 12345):
stream.heartbeat()
assert stream.heartbeat_ts == 12345
stream.save.assert_called_once_with(save_dependency=False)
# open
def test_open(self, stream):
stream.save = mock.MagicMock()
stream.closed = True
assert stream.open() is stream
assert stream.closed is False
stream.save.assert_called_once_with(save_dependency=False)
def test_open_no_commit(self, stream):
stream.save = mock.MagicMock()
stream.closed = True
assert stream.open(save=False) is stream
assert stream.closed is False
stream.save.assert_not_called()
# close
def test_close(self, stream):
stream.save = mock.MagicMock()
assert stream.close() is stream
assert stream.closed is True
stream.save.assert_called_once_with(save_dependency=False)
def test_close_no_commit(self, stream):
stream.save = mock.MagicMock()
assert stream.close(save=False) is stream
assert stream.closed is True
stream.save.assert_not_called()
# read
def test_read(self, stream):
data = ["packet", 0, {}, "", None] + [None, "packet"] * 5
result_data = list(filter(lambda x: x is not None, data))
iter_data = iter(data)
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(test_kw=1):
if item is ...:
break
result.append(item)
if platform.python_implementation().lower() == "pypy":
gc.collect()
assert result == result_data
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in data]
)
def test_read_n_packets(self, stream):
iter_data = iter([None, "packet"] + ["packet"] * 10)
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(count=5, test_kw=1):
if item is ...:
break
result.append(item)
assert result == ["packet"] * 5
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in range(6)]
)
def test_read_while_stream_open(self, stream):
iter_data = iter(range(10))
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(test_kw=1):
if item == 4:
stream.closed = True
if item is ...:
break
result.append(item)
assert result == list(range(5))
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in range(5)]
)
def test_read_while_sess_open(self, stream):
stream.session._closed = True
iter_data = iter([0, 1, 2, 3, None, 4])
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(test_kw=1):
if item is ...:
break
result.append(item)
assert result == list(range(4))
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in range(5)]
)
def test_read_when_station_changed(self, stream, station):
iter_data = iter(range(10))
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(test_kw=1):
if item == 4:
stream.station = station
if item is ...:
break
result.append(item)
assert result == list(range(5))
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in range(5)]
)
def test_read_none(self, stream):
iter_data = iter([None, "packet"] + ["packet"] * 10)
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(wait_timeout=1, test_kw=1):
if item is ...:
break
result.append(item)
assert result == []
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_called_once_with(stream, wait_timeout=1, test_kw=1)
# write
def test_write(self, stream):
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.send = mock.MagicMock()
stream.write("packet", test_kw=1)
stream.transport.send.assert_called_once_with(stream, "packet", test_kw=1)
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
def test_write_many(self, stream):
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.send = mock.MagicMock()
stream.write("packet", many=True, test_kw=1)
stream.transport.send.assert_has_calls(
[call(stream, i, test_kw=1) for i in "packet"]
)
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
def test_write_when_closed(self, stream):
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.send = mock.MagicMock()
stream.closed = True
stream.write("packet", test_kw=1)
stream.transport.send.assert_not_called()
stream.connection_context.assert_not_called()
connection_context.__enter__.assert_not_called()
connection_context.__exit__.assert_not_called()
def test_write_out(self, stream):
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.send = mock.MagicMock()
stream.closed = True
stream.pipe.node_b = "<out>"
stream.write("packet", test_kw=1)
stream.transport.send.assert_called_once_with(stream, "packet", test_kw=1)
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
# ack
def test_ack(self, stream):
stream.transport.ack = mock.MagicMock()
stream.ack("message", test_kw=1)
stream.transport.ack.assert_called_once_with(stream, "message", test_kw=1)
def test_ack_closed(self, stream):
stream.closed = True
stream.transport.ack = mock.MagicMock()
stream.ack("message", test_kw=1)
stream.transport.ack.assert_not_called()
# redirect
def test_redirect(self, stream, station):
station.save = mock.MagicMock()
station.stream_lock_ttl = 0
stream.save = mock.MagicMock()
stream.redirect_to(station)
assert stream.station == station
station.save.assert_called_once_with(save_dependency=False)
stream.save.assert_called_once_with(save_dependency=False)
# open/close context
def test_context(self, stream):
stream.open = mock.MagicMock()
stream.close = mock.MagicMock()
with stream:
stream.open.assert_called_once_with(save=False)
stream.close.assert_not_called()
stream.close.assert_called_once_with(save=False)
| 30.779626
| 88
| 0.640662
| 1,710
| 14,805
| 5.277193
| 0.119883
| 0.120567
| 0.032137
| 0.066489
| 0.687722
| 0.644614
| 0.622008
| 0.618462
| 0.589428
| 0.578347
| 0
| 0.010725
| 0.269436
| 14,805
| 480
| 89
| 30.84375
| 0.823595
| 0.044377
| 0
| 0.578635
| 0
| 0
| 0.008709
| 0
| 0
| 0
| 0
| 0
| 0.240356
| 1
| 0.124629
| false
| 0.005935
| 0.032641
| 0.014837
| 0.234421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5da9e91d7d69e5378260fe7c404a58e9aa312b9e
| 3,101
|
py
|
Python
|
cornflow/tests/unit/test_dags.py
|
pchtsp/corn
|
2811ad400f3f3681a159984eabf4fee1fc99b433
|
[
"MIT"
] | 5
|
2021-11-24T02:43:22.000Z
|
2021-12-10T09:28:32.000Z
|
cornflow/tests/unit/test_dags.py
|
pchtsp/corn
|
2811ad400f3f3681a159984eabf4fee1fc99b433
|
[
"MIT"
] | 125
|
2021-09-01T12:06:48.000Z
|
2022-03-30T11:32:57.000Z
|
cornflow/tests/unit/test_dags.py
|
pchtsp/corn
|
2811ad400f3f3681a159984eabf4fee1fc99b433
|
[
"MIT"
] | 1
|
2022-03-23T17:57:59.000Z
|
2022-03-23T17:57:59.000Z
|
"""
Unit test for the DAG endpoints
"""
# Import from libraries
import json
# Import from internal modules
from cornflow.shared.const import EXEC_STATE_CORRECT, EXEC_STATE_MANUAL
from cornflow.tests.const import (
DAG_URL,
EXECUTION_URL_NORUN,
CASE_PATH,
INSTANCE_URL,
)
from cornflow.tests.unit.test_executions import TestExecutionsDetailEndpointMock
class TestDagEndpoint(TestExecutionsDetailEndpointMock):
def test_manual_dag_service_user(self):
with open(CASE_PATH) as f:
payload = json.load(f)
data = dict(
data=payload["data"],
state=EXEC_STATE_MANUAL,
)
payload_to_send = {**self.payload, **data}
token = self.create_service_user()
self.items_to_check = [
"config",
"name",
"description",
"schema",
"instance_id",
"state",
]
idx = self.create_new_row(
url=DAG_URL,
model=self.model,
payload=payload_to_send,
check_payload=True,
token=token,
)
def test_manual_dag_planner_user(self):
with open(CASE_PATH) as f:
payload = json.load(f)
data = dict(
data=payload["data"],
state=EXEC_STATE_MANUAL,
)
payload_to_send = {**self.payload, **data}
token = self.create_planner()
self.items_to_check = [
"config",
"name",
"description",
"schema",
"instance_id",
"state",
]
idx = self.create_new_row(
url=DAG_URL,
model=self.model,
payload=payload_to_send,
check_payload=True,
token=token,
)
class TestDagDetailEndpoint(TestExecutionsDetailEndpointMock):
def test_put_dag(self):
idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload)
with open(CASE_PATH) as f:
payload = json.load(f)
data = dict(
data=payload["data"],
state=EXEC_STATE_CORRECT,
)
payload_to_check = {**self.payload, **data}
token = self.create_service_user()
data = self.update_row(
url=DAG_URL + idx + "/",
payload_to_check=payload_to_check,
change=data,
token=token,
check_payload=False,
)
def test_get_dag(self):
idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload)
token = self.create_service_user()
data = self.get_one_row(
url=DAG_URL + idx + "/",
token=token,
check_payload=False,
payload=self.payload,
)
instance_data = self.get_one_row(
url=INSTANCE_URL + self.payload["instance_id"] + "/data/",
payload=dict(),
check_payload=False,
)
self.assertEqual(data["data"], instance_data["data"])
self.assertEqual(data["config"], self.payload["config"])
return
| 27.936937
| 80
| 0.563689
| 330
| 3,101
| 5.030303
| 0.206061
| 0.053012
| 0.031325
| 0.038554
| 0.573494
| 0.526506
| 0.507229
| 0.481928
| 0.450602
| 0.450602
| 0
| 0
| 0.335698
| 3,101
| 110
| 81
| 28.190909
| 0.805825
| 0.026766
| 0
| 0.580645
| 0
| 0
| 0.04553
| 0
| 0
| 0
| 0
| 0
| 0.021505
| 1
| 0.043011
| false
| 0
| 0.043011
| 0
| 0.11828
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5daa62bfbc58bf60d68725712bf46468f85577d3
| 10,673
|
py
|
Python
|
nets/resnet.py
|
xwshi/faster-rcnn-keras
|
bfd99e3d0e786ada75a212c007111364b2c86312
|
[
"MIT"
] | null | null | null |
nets/resnet.py
|
xwshi/faster-rcnn-keras
|
bfd99e3d0e786ada75a212c007111364b2c86312
|
[
"MIT"
] | null | null | null |
nets/resnet.py
|
xwshi/faster-rcnn-keras
|
bfd99e3d0e786ada75a212c007111364b2c86312
|
[
"MIT"
] | null | null | null |
#-------------------------------------------------------------#
# ResNet50的网络部分
#-------------------------------------------------------------#
import keras.backend as K
from keras import backend as K
from keras import initializers, layers, regularizers
from keras.engine import InputSpec, Layer
from keras.initializers import random_normal
from keras.layers import (Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed,
ZeroPadding2D)
class BatchNormalization(Layer):
def __init__(self, epsilon=1e-3, axis=-1,
weights=None, beta_init='zero', gamma_init='one',
gamma_regularizer=None, beta_regularizer=None, **kwargs):
self.supports_masking = True
self.beta_init = initializers.get(beta_init)
self.gamma_init = initializers.get(gamma_init)
self.epsilon = epsilon
self.axis = axis
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.initial_weights = weights
super(BatchNormalization, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
shape = (input_shape[self.axis],)
self.gamma = self.add_weight(shape,
initializer=self.gamma_init,
regularizer=self.gamma_regularizer,
name='{}_gamma'.format(self.name),
trainable=False)
self.beta = self.add_weight(shape,
initializer=self.beta_init,
regularizer=self.beta_regularizer,
name='{}_beta'.format(self.name),
trainable=False)
self.running_mean = self.add_weight(shape, initializer='zero',
name='{}_running_mean'.format(self.name),
trainable=False)
self.running_std = self.add_weight(shape, initializer='one',
name='{}_running_std'.format(self.name),
trainable=False)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, x, mask=None):
assert self.built, 'Layer must be built before being called'
input_shape = K.int_shape(x)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis]
if sorted(reduction_axes) == range(K.ndim(x))[:-1]:
x_normed = K.batch_normalization(
x, self.running_mean, self.running_std,
self.beta, self.gamma,
epsilon=self.epsilon)
else:
broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape)
broadcast_running_std = K.reshape(self.running_std, broadcast_shape)
broadcast_beta = K.reshape(self.beta, broadcast_shape)
broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
x_normed = K.batch_normalization(
x, broadcast_running_mean, broadcast_running_std,
broadcast_beta, broadcast_gamma,
epsilon=self.epsilon)
return x_normed
def get_config(self):
config = {'epsilon': self.epsilon,
'axis': self.axis,
'gamma_regularizer': self.gamma_regularizer.get_config() if self.gamma_regularizer else None,
'beta_regularizer': self.beta_regularizer.get_config() if self.beta_regularizer else None}
base_config = super(BatchNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def identity_block(input_tensor, kernel_size, filters, stage, block):
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same', kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2b')(x)
x = BatchNormalization(name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2c')(x)
x = BatchNormalization(name=bn_name_base + '2c')(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02),
name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same', kernel_initializer=random_normal(stddev=0.02),
name=conv_name_base + '2b')(x)
x = BatchNormalization(name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), kernel_initializer=random_normal(stddev=0.02),
name=conv_name_base + '2c')(x)
x = BatchNormalization(name=bn_name_base + '2c')(x)
shortcut = Conv2D(filters3, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02),
name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(name=bn_name_base + '1')(shortcut)
x = layers.add([x, shortcut])
x = Activation('relu')(x)
return x
def ResNet50(inputs):
#-----------------------------------#
# 假设输入进来的图片是600,600,3
#-----------------------------------#
img_input = inputs
# 600,600,3 -> 300,300,64
x = ZeroPadding2D((3, 3))(img_input)
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
x = BatchNormalization(name='bn_conv1')(x)
x = Activation('relu')(x)
# 300,300,64 -> 150,150,64
x = MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# 150,150,64 -> 150,150,256
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
# 150,150,256 -> 75,75,512
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
# 75,75,512 -> 38,38,1024
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
# 最终获得一个38,38,1024的共享特征层
return x
def identity_block_td(input_tensor, kernel_size, filters, stage, block):
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = TimeDistributed(Conv2D(nb_filter1, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), kernel_initializer='normal',padding='same'), name=conv_name_base + '2b')(x)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter3, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2c')(x)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x)
x = Add()([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block_td(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = TimeDistributed(Conv2D(nb_filter1, (1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', kernel_initializer='normal'), name=conv_name_base + '2b')(x)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter3, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2c')(x)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x)
shortcut = TimeDistributed(Conv2D(nb_filter3, (1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base + '1')(input_tensor)
shortcut = TimeDistributed(BatchNormalization(), name=bn_name_base + '1')(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
return x
def classifier_layers(x):
# num_rois, 14, 14, 1024 -> num_rois, 7, 7, 2048
x = conv_block_td(x, 3, [512, 512, 2048], stage=5, block='a', strides=(2, 2))
# num_rois, 7, 7, 2048 -> num_rois, 7, 7, 2048
x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='b')
# num_rois, 7, 7, 2048 -> num_rois, 7, 7, 2048
x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='c')
# num_rois, 7, 7, 2048 -> num_rois, 1, 1, 2048
x = TimeDistributed(AveragePooling2D((7, 7)), name='avg_pool')(x)
return x
| 45.417021
| 148
| 0.599082
| 1,345
| 10,673
| 4.554647
| 0.117472
| 0.047013
| 0.03526
| 0.036565
| 0.629122
| 0.597127
| 0.552726
| 0.531832
| 0.501632
| 0.480901
| 0
| 0.058831
| 0.246697
| 10,673
| 234
| 149
| 45.611111
| 0.703109
| 0.052375
| 0
| 0.317365
| 0
| 0
| 0.042596
| 0
| 0
| 0
| 0
| 0
| 0.005988
| 1
| 0.05988
| false
| 0
| 0.035928
| 0
| 0.149701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5daa836b2adc23c7d1169d134b820b47732f82c0
| 1,309
|
py
|
Python
|
app/api/deps.py
|
congdh/fastapi-realworld
|
42c8630aedf594b69bc96a327b04dfe636a785fe
|
[
"MIT"
] | null | null | null |
app/api/deps.py
|
congdh/fastapi-realworld
|
42c8630aedf594b69bc96a327b04dfe636a785fe
|
[
"MIT"
] | null | null | null |
app/api/deps.py
|
congdh/fastapi-realworld
|
42c8630aedf594b69bc96a327b04dfe636a785fe
|
[
"MIT"
] | null | null | null |
from typing import Generator
from fastapi import Depends, HTTPException
from fastapi.security import APIKeyHeader
from sqlalchemy.orm import Session
from starlette import status
from app import crud, models
from app.core import security
from app.db.session import SessionLocal
JWT_TOKEN_PREFIX = "Token" # noqa: S105
def get_db() -> Generator:
db = SessionLocal()
try:
yield db
finally:
db.close()
def authrization_heder_token(
api_key: str = Depends(APIKeyHeader(name="Authorization")),
) -> str:
try:
token_prefix, token = api_key.split(" ")
except ValueError:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="unsupported authorization type",
)
if token_prefix != JWT_TOKEN_PREFIX:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="unsupported authorization type",
)
return token
async def get_current_user(
token: str = Depends(authrization_heder_token), db: Session = Depends(get_db)
) -> models.User:
user_id = security.get_user_id_from_token(token=token)
user = crud.user.get_user_by_id(db, int(user_id))
if not user:
raise HTTPException(status_code=404, detail="User not found")
return user
| 26.714286
| 81
| 0.694423
| 163
| 1,309
| 5.380368
| 0.368098
| 0.050171
| 0.082098
| 0.095781
| 0.191562
| 0.191562
| 0.191562
| 0.191562
| 0.191562
| 0.191562
| 0
| 0.011823
| 0.224599
| 1,309
| 48
| 82
| 27.270833
| 0.852217
| 0.007639
| 0
| 0.205128
| 0
| 0
| 0.071704
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.205128
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5daaea4f5cbe880e71d3bbf0f6ec12e332c717ab
| 2,343
|
py
|
Python
|
src/raiden_libs/contract_info.py
|
netcriptus/raiden-services
|
3955d91852c616f6ba0a3a979757edbd852b2c6d
|
[
"MIT"
] | 13
|
2019-02-07T23:23:33.000Z
|
2021-07-03T16:00:53.000Z
|
src/raiden_libs/contract_info.py
|
netcriptus/raiden-services
|
3955d91852c616f6ba0a3a979757edbd852b2c6d
|
[
"MIT"
] | 1,095
|
2019-01-21T09:30:57.000Z
|
2022-03-25T05:13:30.000Z
|
src/raiden_libs/contract_info.py
|
netcriptus/raiden-services
|
3955d91852c616f6ba0a3a979757edbd852b2c6d
|
[
"MIT"
] | 18
|
2019-01-21T09:17:19.000Z
|
2022-02-23T15:53:17.000Z
|
import sys
from typing import Dict, List, Tuple
import structlog
from eth_utils import to_canonical_address
from raiden.utils.typing import Address, BlockNumber, ChainID, Optional
from raiden_contracts.contract_manager import (
ContractDevEnvironment,
ContractManager,
contracts_precompiled_path,
get_contracts_deployment_info,
)
log = structlog.get_logger(__name__)
CONTRACT_MANAGER = ContractManager(contracts_precompiled_path())
def get_contract_addresses_and_start_block(
chain_id: ChainID,
contracts: List[str],
address_overwrites: Dict[str, Address],
development_environment: ContractDevEnvironment = ContractDevEnvironment.DEMO,
contracts_version: Optional[str] = None,
) -> Tuple[Dict[str, Address], BlockNumber]:
"""Returns contract addresses and start query block for a given chain and contracts version.
The default contracts can be overwritten by the additional parameters.
Args:
chain_id: The chain id to look for deployed contracts.
contracts: The list of contracts which should be considered
address_overwrites: Dict of addresses which should be used instead of
the ones in the requested deployment.
contracts_version: The version of the contracts to use.
Returns: A dictionary with the contract addresses and start block for the given information
"""
contract_data = get_contracts_deployment_info(
chain_id=chain_id,
version=contracts_version,
development_environment=development_environment,
)
if not contract_data:
log.error(
"No deployed contracts were found at the default registry",
contracts_version=contracts_version,
)
sys.exit(1)
# Get deployed addresses for those contracts which have no overwrites
addresses = {
c: (
address_overwrites.get(c)
or to_canonical_address(contract_data["contracts"][c]["address"])
)
for c in contracts
}
# Set start block to zero if any contract addresses are overwritten
if any(address_overwrites.values()):
start_block = BlockNumber(0)
else:
start_block = BlockNumber(
max(0, min(contract_data["contracts"][c]["block_number"] for c in contracts))
)
return addresses, start_block
| 33.956522
| 96
| 0.714042
| 278
| 2,343
| 5.830935
| 0.356115
| 0.037014
| 0.037014
| 0.046268
| 0.037014
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001651
| 0.224499
| 2,343
| 68
| 97
| 34.455882
| 0.890479
| 0.298335
| 0
| 0
| 0
| 0
| 0.058125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.133333
| 0
| 0.177778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dab624f1bba960c93bdbcfc0dd2115a637b7aae
| 8,749
|
py
|
Python
|
meta_dataset/models/functional_classifiers.py
|
letyrodridc/meta-dataset
|
d868ea1c767cce46fa6723f6f77c29552754fcc9
|
[
"Apache-2.0"
] | null | null | null |
meta_dataset/models/functional_classifiers.py
|
letyrodridc/meta-dataset
|
d868ea1c767cce46fa6723f6f77c29552754fcc9
|
[
"Apache-2.0"
] | null | null | null |
meta_dataset/models/functional_classifiers.py
|
letyrodridc/meta-dataset
|
d868ea1c767cce46fa6723f6f77c29552754fcc9
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2,python3
"""Classifier-related code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin.tf
from meta_dataset.models import functional_backbones
import tensorflow.compat.v1 as tf
def linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier,
cosine_logits_multiplier, use_weight_norm):
"""Passes embeddings through the linear layer defined by w_fc and b_fc.
Args:
embeddings: A Tensor of size [batch size, embedding dim].
w_fc: A Tensor of size [embedding dim, num outputs].
b_fc: Either None, or a Tensor of size [num outputs] or []. If
cosine_classifier is False, it can not be None.
cosine_classifier: A bool. If true, a cosine classifier is used which does
not require the bias b_fc.
cosine_logits_multiplier: A float. Only used if cosine_classifier is True,
and multiplies the resulting logits.
use_weight_norm: A bool. Whether weight norm was used. If so, then if using
cosine classifier, normalize only the embeddings but not the weights.
Returns:
logits: A Tensor of size [batch size, num outputs].
"""
if cosine_classifier:
# Each column of the weight matrix may be interpreted as a class
# representation (of the same dimenionality as the embedding space). The
# logit for an embedding vector belonging to that class is the cosine
# similarity between that embedding and that class representation.
embeddings = tf.nn.l2_normalize(embeddings, axis=1, epsilon=1e-3)
if not use_weight_norm:
# Only normalize the weights if weight norm was not used.
w_fc = tf.nn.l2_normalize(w_fc, axis=0, epsilon=1e-3)
logits = tf.matmul(embeddings, w_fc)
# Scale the logits as passing numbers in [-1, 1] to softmax is not very
# expressive.
logits *= cosine_logits_multiplier
else:
assert b_fc is not None
logits = tf.matmul(embeddings, w_fc) + b_fc
return logits
@gin.configurable
def linear_classifier(embeddings, num_classes, cosine_classifier,
cosine_logits_multiplier, use_weight_norm, weight_decay):
"""Forward pass through a linear classifier, or possibly a cosine classifier.
Args:
embeddings: A Tensor of size [batch size, embedding dim].
num_classes: An integer; the dimension of the classification.
cosine_classifier: A bool. If true, a cosine classifier is used, which does
not require a bias.
cosine_logits_multiplier: A float. Only used if cosine_classifier is True,
and multiplies the resulting logits.
use_weight_norm: A bool. Whether weight norm was used. If so, then if using
cosine classifier, normalize only the embeddings but not the weights.
weight_decay: A float; the scalar multiple on the L2 regularization of the
weight matrix.
Returns:
logits: A Tensor of size [batch size, num outputs].
"""
embedding_dims = embeddings.get_shape().as_list()[-1]
if use_weight_norm:
# A variable to keep track of whether the initialization has already
# happened.
data_dependent_init_done = tf.get_variable(
'data_dependent_init_done',
initializer=0,
dtype=tf.int32,
trainable=False)
w_fc = tf.get_variable(
'w_fc', [embedding_dims, num_classes],
initializer=tf.random_normal_initializer(0, 0.05),
trainable=True)
# This init is temporary as it needs to be done in a data-dependent way.
# It will be overwritten during the first forward pass through this layer.
g = tf.get_variable(
'g',
dtype=tf.float32,
initializer=tf.ones([num_classes]),
trainable=True)
b_fc = None
if not cosine_classifier:
# Also initialize a bias.
b_fc = tf.get_variable(
'b_fc', initializer=tf.zeros([num_classes]), trainable=True)
def _do_data_dependent_init():
"""Returns ops for the data-dependent init of g and maybe b_fc."""
w_fc_normalized = tf.nn.l2_normalize(w_fc.read_value(), [0])
output_init = tf.matmul(embeddings, w_fc_normalized)
mean_init, var_init = tf.nn.moments(output_init, [0])
# Data-dependent init values.
g_init_value = 1. / tf.sqrt(var_init + 1e-10)
ops = [tf.assign(g, g_init_value)]
if not cosine_classifier:
# Also initialize a bias in a data-dependent way.
b_fc_init_value = -mean_init * g_init_value
ops.append(tf.assign(b_fc, b_fc_init_value))
# Mark that the data-dependent initialization is done to prevent it from
# happening again in the future.
ops.append(tf.assign(data_dependent_init_done, 1))
return tf.group(*ops)
# Possibly perform data-dependent init (if it hasn't been done already).
init_op = tf.cond(
tf.equal(data_dependent_init_done, 0), _do_data_dependent_init,
tf.no_op)
with tf.control_dependencies([init_op]):
# Apply weight normalization.
w_fc *= g / tf.sqrt(tf.reduce_sum(tf.square(w_fc), [0]))
# Forward pass through the layer defined by w_fc and b_fc.
logits = linear_classifier_forward_pass(embeddings, w_fc, b_fc,
cosine_classifier,
cosine_logits_multiplier, True)
else:
# No weight norm.
w_fc = functional_backbones.weight_variable([embedding_dims, num_classes],
weight_decay=weight_decay)
b_fc = None
if not cosine_classifier:
# Also initialize a bias.
b_fc = functional_backbones.bias_variable([num_classes])
# Forward pass through the layer defined by w_fc and b_fc.
logits = linear_classifier_forward_pass(embeddings, w_fc, b_fc,
cosine_classifier,
cosine_logits_multiplier, False)
return logits
@gin.configurable
def separate_head_linear_classifier(embeddings, num_classes, dataset_idx,
start_idx, cosine_classifier,
cosine_logits_multiplier, learnable_scale,
weight_decay):
"""A linear classifier with num_sets heads, for different datasets.
Args:
embeddings: A Tensor of size [batch size, embedding dim].
num_classes: A list of integers; the dimension of the classifier layers of
the different heads.
dataset_idx: An int Tensor. The index of the dataset head to use.
start_idx: An int Tensor. The index of the first class of the given dataset.
cosine_classifier: A bool. If true, a cosine classifier is used, which does
not require a bias.
cosine_logits_multiplier: A float. Only used if cosine_classifier is True,
and multiplies the resulting logits.
learnable_scale: A bool. Whether to make the cosine_logits_multiplier a
learnable parameter. Only applies if cosine_classifier is True.
weight_decay: A float; the scalar multiple on the L2 regularization of the
weight matrix.
Returns:
logits: A Tensor of size [batch size, num outputs].
"""
if not cosine_classifier:
raise NotImplementedError('`separate_head_linear_classifier` currently '
'only supports `cosine_classifier` True.')
if learnable_scale:
cosine_logits_multiplier = tf.get_variable(
'cosine_scale',
initializer=cosine_logits_multiplier,
dtype=tf.float32,
trainable=True)
embedding_dims = embeddings.get_shape().as_list()[-1]
w_fc = functional_backbones.weight_variable(
[embedding_dims, sum(num_classes)], weight_decay=weight_decay)
# Select the output "head" to use in the forward pass.
dataset_num_classes = tf.gather(num_classes, dataset_idx)
w_fc = w_fc[:, start_idx:start_idx + dataset_num_classes]
logits = linear_classifier_forward_pass(embeddings, w_fc, None,
cosine_classifier,
cosine_logits_multiplier, False)
return logits
| 41.861244
| 80
| 0.686821
| 1,215
| 8,749
| 4.748971
| 0.225514
| 0.072097
| 0.049567
| 0.018024
| 0.429983
| 0.387695
| 0.361005
| 0.361005
| 0.279029
| 0.279029
| 0
| 0.007115
| 0.244942
| 8,749
| 208
| 81
| 42.0625
| 0.866334
| 0.481426
| 0
| 0.290323
| 0
| 0
| 0.029224
| 0.013014
| 0
| 0
| 0
| 0
| 0.010753
| 1
| 0.043011
| false
| 0.043011
| 0.064516
| 0
| 0.150538
| 0.010753
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dacac5c524e8494c9a0a1e27e5a00cc81bbbd7d
| 13,499
|
py
|
Python
|
app.py
|
Shrinidhi-C/Context-Based-Question-Answering
|
f2e0bbc03003aae65f4cabddecd5cd9fcdbfb333
|
[
"Apache-2.0"
] | 16
|
2021-03-09T17:00:27.000Z
|
2022-01-07T15:49:46.000Z
|
app.py
|
Shrinidhi-C/Context-Based-Question-Answering
|
f2e0bbc03003aae65f4cabddecd5cd9fcdbfb333
|
[
"Apache-2.0"
] | 1
|
2021-06-03T13:01:41.000Z
|
2021-06-03T13:01:41.000Z
|
app.py
|
Karthik-Bhaskar/Context-Based-Question-Answering
|
f2e0bbc03003aae65f4cabddecd5cd9fcdbfb333
|
[
"Apache-2.0"
] | 7
|
2021-03-10T11:33:18.000Z
|
2022-01-07T17:48:17.000Z
|
import os
import threading
import shutil
from datetime import timedelta, datetime
from flask import Flask, render_template, request, session, jsonify, url_for, redirect
from haystack.document_store.elasticsearch import *
from haystack.preprocessor.utils import convert_files_to_dicts
from haystack.preprocessor.cleaning import clean_wiki_text
from haystack import Finder
from haystack.retriever.sparse import ElasticsearchRetriever
from haystack.reader.transformers import TransformersReader
from elasticsearch import Elasticsearch
es = (
Elasticsearch()
) # Replace with Elasticsearch(["http://elasticsearch:9200/"], verify_certs=True) to build docker image
session_time = 60 # Session Timeout in Minutes
app = Flask(__name__)
app.secret_key = "cbqa_123"
app.permanent_session_lifetime = timedelta(minutes=session_time)
user_id = 0 # User ID to keep track w.r.t sessions and context data
current_users = dict() # Used to store user id with time of login
user_doc_store = dict() # Document store object of the user id
user_settings = dict() # User settings for GPU and Pre-trained models choice
# Handles pre-processing the context and uploads the pre-processed context to Elasticsearch
# Each user is assigned with a separate Elasticsearch index starting with "user_{user_id}"
# Documents & textual context are deleted from them temp folder named with user_id under users dir after uploading to Es
def pre_process(user_id_key):
uploads_dir = "users/" + str(user_id_key) + "/uploads/"
try:
es_result = es.search(
index="user_" + str(user_id_key), body={"query": {"match_all": {}}}
)
no_docs = len(es_result["hits"]["hits"])
except Exception as e:
print(e)
print("\n no documents in es")
processed = convert_files_to_dicts(
dir_path=uploads_dir, clean_func=clean_wiki_text, split_paragraphs=True
)
for doc in range(len(processed)):
try:
# print("\n Checking for duplicate docs ..")
add_doc = True
for each_doc in range(no_docs):
doc_text = es_result["hits"]["hits"][each_doc]["_source"]["text"]
doc_name = es_result["hits"]["hits"][each_doc]["_source"]["name"]
doc_id = es_result["hits"]["hits"][each_doc]["_id"]
if (
processed[doc]["meta"]["name"] == "context_file.txt"
and doc_name == "context_file.txt"
):
# print("Deleting context file to update with new changes ..")
es.delete(
index="user_" + str(user_id_key), doc_type="_doc", id=doc_id
)
if processed[doc]["text"] == doc_text:
# print("\n There is a duplicate, So this document is not added ..")
add_doc = False
os.remove(uploads_dir + str(processed[doc]["meta"]["name"]))
break
if add_doc:
# print("\n No duplicates found, so adding this to es..")
processed_lst = [processed[doc]]
user_doc_store[user_id_key].write_documents(processed_lst)
os.remove(uploads_dir + str(processed[doc]["meta"]["name"]))
except Exception as e:
print(e)
# print("\n no documents in es")
processed_lst = [processed[doc]]
user_doc_store[user_id_key].write_documents(processed_lst)
os.remove(uploads_dir + str(processed[doc]["meta"]["name"]))
# Handles setting up reader and retriever
def set_finder(user_id_key):
if user_settings[user_id_key]["model"] == "roberta":
model_path = (
"deepset/roberta-base-squad2" # Path of the models hosted in Hugging Face
)
elif user_settings[user_id_key]["model"] == "bert":
model_path = "deepset/bert-large-uncased-whole-word-masking-squad2"
elif user_settings[user_id_key]["model"] == "distilbert":
model_path = "distilbert-base-uncased-distilled-squad"
else:
model_path = "illuin/camembert-base-fquad"
retriever = ElasticsearchRetriever(document_store=user_doc_store[user_id_key])
if user_settings[user_id_key]["gpu"] == "on":
try:
reader = TransformersReader(
model_name_or_path=model_path, tokenizer=model_path, use_gpu=0
)
except Exception as e:
print(e)
print("GPU not available. Inferencing on CPU")
reader = TransformersReader(
model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1
)
else:
reader = TransformersReader(
model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1
)
finder = Finder(reader, retriever)
return finder
# Handles deletion of context data completely from the server after the session time ends and deletes user id from dict
def user_session_timer():
global current_users, session_time
seconds_in_day = 24 * 60 * 60
print("\n User tracker thread started @ ", datetime.now())
while True:
for user_id_key in current_users.copy():
current_time = datetime.now()
user_time = current_users[user_id_key]
difference = current_time - user_time
time_diff = divmod(
difference.days * seconds_in_day + difference.seconds, 60
)
if time_diff[0] >= session_time:
try:
del current_users[user_id_key]
del user_doc_store[user_id_key]
del user_settings[user_id_key]
shutil.rmtree("users/" + str(user_id_key))
es.indices.delete(
index="user_" + str(user_id_key), ignore=[400, 404]
)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
# print("\n Deleted user:", user_id_key, " @", datetime.now())
session_timer = threading.Thread(target=user_session_timer)
session_timer.start()
# Handles users w.r.t new session or already in session
@app.route("/")
def home():
global user_id, current_users, session_time
logging.info(
"User connected at "
+ str(datetime.now())
+ " with IP: "
+ str(request.environ["REMOTE_ADDR"])
)
if "user" in session and session["user"] in current_users:
user_id = session["user"]
logged_on = current_users[user_id]
current_time = datetime.now()
diff_min_sec = (
int(datetime.strftime(current_time, "%M"))
- int(datetime.strftime(logged_on, "%M"))
) * 60
diff_sec = int(datetime.strftime(current_time, "%S")) - int(
datetime.strftime(logged_on, "%S")
)
diff_time = diff_min_sec + diff_sec
time_left = (
session_time * 60
) - diff_time # For session timeout on client side
return render_template("index.html", time_left=time_left)
else:
session.permanent = True
current_time = datetime.now()
user_id += 1
current_users[user_id] = current_time
session["user"] = user_id
# print(current_users)
if not os.path.exists("users/"): # Creating user temp dir for uploading context
os.makedirs("users/" + str(user_id))
os.makedirs("users/" + str(user_id) + "/uploads")
else:
os.makedirs("users/" + str(user_id))
os.makedirs("users/" + str(user_id) + "/uploads")
user_doc_store[user_id] = ElasticsearchDocumentStore(
host="localhost", index="user_" + str(user_id)
) # Change host = "elasticsearch" to build docker image
user_settings[user_id] = {
"gpu": "off",
"model": "roberta",
} # Initial user settings
logged_on = current_users[user_id]
current_time = datetime.now()
diff_min_sec = (
int(datetime.strftime(current_time, "%M"))
- int(datetime.strftime(logged_on, "%M"))
) * 60
diff_sec = int(datetime.strftime(current_time, "%S")) - int(
datetime.strftime(logged_on, "%S")
)
diff_time = diff_min_sec + diff_sec
time_left = (
session_time * 60
) - diff_time # For session timeout on client side
return render_template("index.html", time_left=time_left)
# Handles context documents uploads
@app.route("/upload_file", methods=["GET", "POST"])
def upload_file():
global current_users
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
for f in request.files.getlist("file"):
f.save(
os.path.join("users/" + str(user_id_key) + "/uploads", f.filename)
)
pre_process(user_id_key)
return render_template("index.html")
else:
return redirect(url_for("session_timeout"))
else:
return redirect(url_for("session_timeout"))
# Handles context added through the textbox
@app.route("/context", methods=["POST"])
def context():
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
text_context = request.form["context"]
context_file = open(
"users/" + str(user_id_key) + "/uploads/context_file.txt", "w"
)
context_file.write(text_context)
context_file.close()
pre_process(user_id_key)
return jsonify({"output": "" + text_context})
else:
return render_template("session_out.html")
else:
return redirect(url_for("session_timeout"))
# Provides extracted answers for the posted question
@app.route("/question", methods=["POST"])
def question():
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
query_question = request.form["question"]
es_stats = es.indices.stats(index="user_" + str(user_id_key))
user_index_size = es_stats["_all"]["primaries"]["store"]["size_in_bytes"]
if (
user_index_size == 208
): # To check if index in Es is empty. 208 bytes is default index size without docs
return jsonify({"error": "add context"})
finder = set_finder(user_id_key)
answers_dict = finder.get_answers(
question=query_question, top_k_retriever=5, top_k_reader=5
)
unique_answers = list()
output = list()
if len(answers_dict["answers"]) > 0:
for i in range(len(answers_dict["answers"])):
if (
answers_dict["answers"][i]["answer"] is not None
and answers_dict["answers"][i]["answer"] not in unique_answers
):
temp_dict = answers_dict["answers"][i]
remove = (
"score",
"probability",
"offset_start",
"offset_end",
"document_id",
)
unique_answers.append(temp_dict["answer"])
if temp_dict["meta"]["name"] == "context_file.txt":
temp_dict["meta"]["name"] = "Textual Context"
temp_dict["meta"] = temp_dict["meta"]["name"]
output.append(temp_dict)
for key in remove:
if key in temp_dict:
del temp_dict[key]
else:
output = [
{"answer": "No Answers found ..", "context": " ", "meta": " "},
]
return jsonify({"output": output})
else:
return render_template("session_out.html")
# Handles GPU setting changes.
@app.route("/gpu", methods=["POST"])
def gpu():
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
if user_settings[user_id_key]["gpu"] == "on":
user_settings[user_id_key]["gpu"] = "off"
else:
user_settings[user_id_key]["gpu"] = "on"
return jsonify({"output": "gpu status changed"})
# Handles pre-trained model choice setting changes.
@app.route("/models", methods=["POST"])
def models():
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
user_settings[user_id_key]["model"] = request.form["model"]
return jsonify({"output": "model changed"})
# Handles session timeout redirection
@app.route("/session_timeout")
def session_timeout():
return render_template("session_out.html")
# Handles removing of session identifier from session dict, This works only when app tab is open until session completes
@app.route("/session_out", methods=["POST"])
def session_out():
session.pop("user", None)
return redirect(url_for("session_timeout"))
# Comment the below block in case of building a docker image or running on WSGI server like gunicorn
if __name__ == "__main__":
app.run(host="0.0.0.0")
| 38.132768
| 120
| 0.588414
| 1,613
| 13,499
| 4.694358
| 0.201488
| 0.048336
| 0.047544
| 0.023772
| 0.384971
| 0.336371
| 0.288695
| 0.23785
| 0.222398
| 0.208531
| 0
| 0.005516
| 0.301652
| 13,499
| 353
| 121
| 38.240793
| 0.797709
| 0.142974
| 0
| 0.345196
| 0
| 0
| 0.114271
| 0.014739
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039146
| false
| 0
| 0.042705
| 0.003559
| 0.13879
| 0.024911
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dad62563785343452980a6c164a9cfda04650c2
| 7,486
|
py
|
Python
|
timevortex/utils/filestorage.py
|
timevortexproject/timevortex
|
2bc1a50b255524af8582e6624dee280d64d3c9f3
|
[
"MIT"
] | null | null | null |
timevortex/utils/filestorage.py
|
timevortexproject/timevortex
|
2bc1a50b255524af8582e6624dee280d64d3c9f3
|
[
"MIT"
] | null | null | null |
timevortex/utils/filestorage.py
|
timevortexproject/timevortex
|
2bc1a50b255524af8582e6624dee280d64d3c9f3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf8 -*-
# -*- Mode: Python; py-indent-offset: 4 -*-
"""File storage adapter for timevortex project"""
import os
from os import listdir, makedirs
from os.path import isfile, join, exists
from time import tzname
from datetime import datetime
import pytz
import dateutil.parser
from django.conf import settings
from django.utils import timezone
from timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE
from timevortex.utils.globals import KEY_DST_TIMEZONE, KEY_NON_DST_TIMEZONE, SYSTEM_SITE_ID
SETTINGS_FILE_STORAGE_FOLDER = "SETTINGS_FILE_STORAGE_FOLDER"
SETTINGS_DEFAULT_FILE_STORAGE_FOLDER = "/tmp/data/"
def get_lines_number(file_path):
"""Get lines number
"""
return sum(1 for line in open(file_path))
def get_series_per_file(site_folder, file_prefix):
"""Get series per file
"""
series = {}
for filename in listdir(site_folder):
is_file = isfile(join(site_folder, filename))
if is_file and file_prefix in filename:
complete_filename = "%s/%s" % (site_folder, filename)
with open(complete_filename, "r") as filed:
temp_series = filed.readlines()
for line in temp_series:
array_line = line.split("\t")
if len(array_line) >= 2:
series[array_line[1]] = array_line[0]
return series
def get_last_file_name(site_folder, file_prefix):
"""Get last filename
"""
old_date = None
last_filename = ""
for new_filename in listdir(site_folder):
is_file = isfile(join(site_folder, new_filename))
if is_file and file_prefix in new_filename:
old_date, last_filename = update_last_file_name(file_prefix, old_date, last_filename, new_filename)
return last_filename
def update_last_file_name(file_prefix, old_date, last_filename, new_filename):
"""Update last file name
"""
try:
new_date = new_filename.replace(file_prefix, "")
new_date = datetime.strptime(new_date, "%Y-%m-%d")
if old_date is None or new_date > old_date:
return new_date, new_filename
except ValueError:
LOGGER.error("Not right file")
return old_date, last_filename
class FileStorage(object):
"""Class that help us to store and load data over several file"""
def __init__(self, folder_path):
"""Constructor"""
self.folder_path = folder_path
if not exists(self.folder_path):
makedirs(self.folder_path)
def insert_series(self, series):
"""Insert series in DB
:param series: Representation of a series
:type series: dict.
"""
self.insert(series)
def insert(self, message):
"""Insert data in file"""
file_folder = "%s/%s" % (self.folder_path, message[KEY_SITE_ID])
file_date = timezone.localtime(
dateutil.parser.parse(message[KEY_DATE]).replace(tzinfo=pytz.UTC)).strftime("%Y-%m-%d")
if not exists(file_folder):
makedirs(file_folder)
raw_file = "%s/%s.tsv.%s" % (
file_folder, message[KEY_VARIABLE_ID], file_date)
extracted = open(raw_file, "a+")
extracted.write("%s\t%s\t%s\t%s\n" % (
message[KEY_VALUE],
message[KEY_DATE],
message[KEY_DST_TIMEZONE],
message[KEY_NON_DST_TIMEZONE]))
extracted.close()
def insert_error(self, message):
"""Function that store error in errors collection and in log
:param message: Error to insert in DB
:type message: str.
"""
LOGGER.error(message)
message[KEY_VARIABLE_ID] = KEY_ERROR
self.insert(message)
def store_error(self, error):
"""Function that create valid error message
:param error: Mal formed message
:type error: str.
"""
message = {
KEY_VALUE: error,
KEY_VARIABLE_ID: KEY_ERROR,
KEY_SITE_ID: SYSTEM_SITE_ID,
KEY_DATE: datetime.utcnow().isoformat('T'),
KEY_DST_TIMEZONE: tzname[1],
KEY_NON_DST_TIMEZONE: tzname[0]
}
LOGGER.error(error)
self.insert(message)
def get_series(self, site_id, variable_id):
"""Retrieve all series for a variable_id in site_id
"""
element = variable_id
file_prefix = "%s.tsv." % element
site_folder = "%s/%s" % (self.folder_path, site_id)
if exists(site_folder):
series = get_series_per_file(site_folder, file_prefix)
else:
series = {}
return series
def get_last_series(self, site_id, variable_id):
"""Retrieve last value of variable_id in site_id
"""
element = variable_id
file_prefix = "%s.tsv." % element
site_folder = "%s/%s" % (self.folder_path, site_id)
if exists(site_folder):
last_filename = get_last_file_name(site_folder, file_prefix)
last_filename = "%s/%s" % (site_folder, last_filename)
try:
with open(last_filename, "rb") as filed2:
for last in filed2:
pass
except IsADirectoryError:
return None
LOGGER.debug(last) # pylint: disable=I0011,W0631
last = last.decode("utf-8").replace("\n", "") # pylint: disable=I0011,W0631
return {
KEY_VARIABLE_ID: element,
KEY_SITE_ID: site_id,
KEY_VALUE: last.split("\t")[0],
KEY_DATE: last.split("\t")[1],
KEY_DST_TIMEZONE: last.split("\t")[2],
KEY_NON_DST_TIMEZONE: last.split("\t")[3]
}
return None
def get_last_error(self, site_id):
"""Retrieve last error of a site_id file storage
"""
return self.get_last_series(site_id, KEY_ERROR)
def get_number_of_error(self, site_id, day_date):
"""This method retrieve number of error published for a day_date
"""
element = KEY_ERROR
site_folder = "%s/%s" % (self.folder_path, site_id)
filename = "%s.tsv.%s" % (element, day_date)
file_path = "%s/%s" % (site_folder, filename)
if exists(site_folder) and exists(file_path):
return get_lines_number(file_path)
return 0
def get_number_of_series(self, site_id, day_date):
"""This method retrieve number of series published for a day_date
"""
site_folder = "%s/%s" % (self.folder_path, site_id)
series = []
if exists(site_folder):
for filename in listdir(site_folder):
if "%s.tsv" % KEY_ERROR not in filename and day_date in filename:
file_path = "%s/%s" % (site_folder, filename)
var_id = filename.replace(".tsv.%s" % day_date, "")
series_numbers = get_lines_number(file_path)
series.append([var_id, series_numbers])
return series
def set_data_location(self, folder_path):
"""Set data folder space"""
self.folder_path = folder_path
def get_sites_list(self):
"""Get sites list"""
return os.listdir(self.folder_path)
FILE_STORAGE_SPACE = FileStorage(getattr(settings, SETTINGS_FILE_STORAGE_FOLDER, SETTINGS_DEFAULT_FILE_STORAGE_FOLDER))
| 34.497696
| 119
| 0.612209
| 960
| 7,486
| 4.50625
| 0.183333
| 0.048544
| 0.038835
| 0.01387
| 0.37055
| 0.249884
| 0.237171
| 0.208507
| 0.16135
| 0.120203
| 0
| 0.006171
| 0.2856
| 7,486
| 216
| 120
| 34.657407
| 0.80273
| 0.134919
| 0
| 0.198582
| 0
| 0
| 0.031837
| 0.004457
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113475
| false
| 0.007092
| 0.078014
| 0
| 0.297872
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5daed49fa4c053c06f93d18e081f06b652a982e8
| 4,078
|
py
|
Python
|
main_tg.py
|
olegush/quiz-bot
|
ae370d42f32c42b290a507924a801c63901d5148
|
[
"MIT"
] | null | null | null |
main_tg.py
|
olegush/quiz-bot
|
ae370d42f32c42b290a507924a801c63901d5148
|
[
"MIT"
] | null | null | null |
main_tg.py
|
olegush/quiz-bot
|
ae370d42f32c42b290a507924a801c63901d5148
|
[
"MIT"
] | null | null | null |
import os
import logging
import logging.config
from functools import partial
from dotenv import load_dotenv
from telegram import Bot, ReplyKeyboardMarkup, ReplyKeyboardRemove
from telegram.ext import (Updater, CommandHandler, MessageHandler,
RegexHandler, ConversationHandler, Filters)
from redis import Redis
from tg_logging import create_logger
from quiz_tools import get_question_and_answer, format_answer, format_question
QUESTION, ATTEMPT = range(2)
def main():
class LoggerTelegramBot(logging.Handler):
def emit(self, record):
log_entry = self.format(record)
bot.send_message(chat_id=chat_id_tg_admin, text=log_entry)
dictLogConfig = {
'version': 1,
'handlers': {
'handler': {
'()': LoggerTelegramBot,
'formatter': 'formatter'
}
},
'loggers': {
'tg_logger': {
'handlers': ['handler'],
'level': 'INFO'
}
},
'formatters': {
'formatter': {
'format': '%(asctime)s - %(levelname)s - %(message)s'
}
}
}
load_dotenv()
chat_id_tg_admin = os.getenv('CHAT_ID_TG_ADMIN')
bot = Bot(token=os.getenv('TOKEN_TG'))
logging.config.dictConfig(dictLogConfig)
logger = logging.getLogger('tg_logger')
handler = LoggerTelegramBot()
logger.addHandler(handler)
rediser = Redis(
host=os.getenv('REDIS_HOST'),
port=os.getenv('REDIS_PORT'),
db=0,
password=os.getenv('REDIS_PWD'))
updater = Updater(token_tg)
dp = updater.dispatcher
logger.info(dp)
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
QUESTION: [
RegexHandler('^Выход$', do_exit),
MessageHandler(Filters.text, partial(handle_new_question, rediser))],
ATTEMPT: [
RegexHandler('^Выход$', do_exit),
RegexHandler('^(Новый вопрос|Другой вопрос)$', partial(handle_new_question, rediser)),
RegexHandler('^Показать ответ$', partial(display_answer, rediser)),
MessageHandler(Filters.text, partial(handle_attempt, rediser))],
},
fallbacks=[CommandHandler('cancel', do_exit)]
)
dp.add_handler(conv_handler)
updater.start_polling()
updater.idle()
def do_reply(update, text, keyboard=None):
if keyboard is None:
markup = ReplyKeyboardRemove()
return update.message.reply_text(text, reply_markup=markup)
markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
return update.message.reply_text(text, reply_markup=markup)
def start(bot, update):
do_reply(update, 'Привет знатоку в чате-викторине! Начинаем?', [['Да!']])
return QUESTION
def handle_new_question(rediser, bot, update):
new_question, new_answer = get_question_and_answer()
chat_id = update.message.chat_id
rediser.set(chat_id, new_answer)
do_reply(update, format_question(new_question))
return ATTEMPT
def display_answer(rediser, bot, update):
chat_id = update.message.chat_id
answer = rediser.get(chat_id).decode()
do_reply(update, answer, [['Новый вопрос', 'Выход']])
return QUESTION
def handle_attempt(rediser, bot, update):
chat_id = update.message.chat_id
attempt = update.message.text.strip().lower()
answer = rediser.get(chat_id).decode()
if attempt == format_answer(answer):
text = 'Правильно! \n\n {}'.format(answer)
reply_keyboard = [['Новый вопрос', 'Выход']]
else:
text = 'Неверно! Попробуйте еще раз.'
reply_keyboard = [['Показать ответ', 'Другой вопрос', 'Выход']]
do_reply(update, text, reply_keyboard)
return ATTEMPT
def do_exit(bot, update):
text = 'До скорой встречи! Желаете начать заново? Жмите /start'
do_reply(update, text)
return ConversationHandler.END
if __name__ == '__main__':
main()
| 31.612403
| 102
| 0.632908
| 439
| 4,078
| 5.676538
| 0.302961
| 0.0313
| 0.0313
| 0.01565
| 0.154896
| 0.104735
| 0.072231
| 0.072231
| 0.072231
| 0
| 0
| 0.000984
| 0.252084
| 4,078
| 128
| 103
| 31.859375
| 0.816066
| 0
| 0
| 0.121495
| 0
| 0
| 0.122609
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074766
| false
| 0.009346
| 0.093458
| 0
| 0.242991
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5daf4ad3d9f3b39d8355c443ca683a3b5708554c
| 3,082
|
py
|
Python
|
tf_fourier_features/fourier_features_mlp.py
|
titu1994/tf_fourier_features
|
3aead078ae79a278b9975e21f44560a7f51e3f31
|
[
"MIT"
] | 37
|
2020-06-20T21:39:30.000Z
|
2021-11-08T09:31:22.000Z
|
tf_fourier_features/fourier_features_mlp.py
|
titu1994/tf_fourier_features
|
3aead078ae79a278b9975e21f44560a7f51e3f31
|
[
"MIT"
] | null | null | null |
tf_fourier_features/fourier_features_mlp.py
|
titu1994/tf_fourier_features
|
3aead078ae79a278b9975e21f44560a7f51e3f31
|
[
"MIT"
] | 5
|
2020-06-22T10:24:11.000Z
|
2021-09-10T10:40:08.000Z
|
import tensorflow as tf
from typing import Optional
from tf_fourier_features import fourier_features
class FourierFeatureMLP(tf.keras.Model):
def __init__(self, units: int, final_units: int, gaussian_projection: Optional[int],
activation: str = 'relu',
final_activation: str = "linear",
num_layers: int = 1,
gaussian_scale: float = 1.0,
use_bias: bool = True, **kwargs):
"""
Fourier Feature Projection model from the paper
[Fourier Features Let Networks Learn High Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/).
Used to create a multi-layer MLP with optional FourierFeatureProjection layer.
Args:
units: Number of hidden units in the intermediate layers.
final_units: Number of hidden units in the final layer.
activation: Activation in the hidden layers.
final_activation: Activation function of the final layer.
num_layers: Number of layers in the network.
gaussian_projection: Projection dimension for the gaussian kernel in fourier feature
projection layer. Can be None, negative or positive integer.
If None, then fourier feature map layer is not used.
If <=0, uses identity matrix (basic projection) without gaussian kernel.
If >=1, uses gaussian projection matrix of specified dim.
gaussian_scale: Scale of the gaussian kernel in fourier feature projection layer.
Note: If the scale is too small, convergence will slow down and obtain poor results.
If the scale is too large (>50), convergence will be fast but results will be grainy.
Try grid search for scales in the range [10 - 50].
use_bias: Boolean whether to use bias or not.
# References:
- [Fourier Features Let Networks Learn High Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/)
"""
super().__init__(**kwargs)
layers = []
if gaussian_projection is not None:
layers.append(fourier_features.FourierFeatureProjection(
gaussian_projection=gaussian_projection,
gaussian_scale=gaussian_scale,
**kwargs
))
for _ in range(num_layers - 1):
layers.append(tf.keras.layers.Dense(units, activation=activation, use_bias=use_bias,
bias_initializer='he_uniform', **kwargs))
self.network = tf.keras.Sequential(layers)
self.final_dense = tf.keras.layers.Dense(final_units, activation=final_activation,
use_bias=use_bias, bias_initializer='he_uniform', **kwargs)
def call(self, inputs, training=None, mask=None):
features = self.network(inputs)
output = self.final_dense(features)
return output
| 48.15625
| 156
| 0.633679
| 357
| 3,082
| 5.35014
| 0.369748
| 0.025654
| 0.037696
| 0.027225
| 0.273298
| 0.257592
| 0.257592
| 0.227225
| 0.176963
| 0.176963
| 0
| 0.005553
| 0.298832
| 3,082
| 63
| 157
| 48.920635
| 0.878297
| 0.476963
| 0
| 0
| 0
| 0
| 0.021023
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.107143
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5db3e4eb84b3d9fc5559048f6229e0e36618f2f4
| 1,545
|
py
|
Python
|
parsing_documents.py
|
leylafenix/belief-network-irs
|
9094e4cde738bd93ed1747dc958b5acb0e0fa684
|
[
"MIT"
] | null | null | null |
parsing_documents.py
|
leylafenix/belief-network-irs
|
9094e4cde738bd93ed1747dc958b5acb0e0fa684
|
[
"MIT"
] | null | null | null |
parsing_documents.py
|
leylafenix/belief-network-irs
|
9094e4cde738bd93ed1747dc958b5acb0e0fa684
|
[
"MIT"
] | null | null | null |
__author__ = 'Jose Gabriel'
import os
import pprint
def read_block(f):
s = ""
line = f.readline()
while line and not line.startswith("."):
s += line
line = f.readline()
return s, line
def read_doc(f):
doc = {"title": "", "authors": "", "content": ""}
line = f.readline()
while line and not line.startswith(".I"):
if line.startswith(".T"):
doc["title"], line = read_block(f)
elif line.startswith(".A"):
doc["authors"], line = read_block(f)
elif line.startswith(".W"):
doc["content"], line = read_block(f)
else:
_, line = read_block(f)
return doc, line
def create_doc(data, out_folder, name):
with open(out_folder + os.sep + name, 'w') as f:
f.write(data["title"] + "\n")
f.write(data["content"] + "\n")
f.write(data["authors"])
def parse_all(s, out_folder):
with open(s) as f:
line = f.readline() # .I
while line:
doc_name = "d%03d.txt" % (int(line.strip().split()[-1]))
doc, line = read_doc(f)
create_doc(doc, out_folder, doc_name)
# print("**********************************")
if __name__ == '__main__':
s = "adi" + os.sep + "ADI.ALL"
out_folder = "test_index"
try: # averiguar como preguntar si una carpeta o fichero existe en python
os.mkdir(out_folder)
except FileExistsError:
pass
parse_all(s, out_folder)
| 26.186441
| 79
| 0.514563
| 193
| 1,545
| 3.943005
| 0.362694
| 0.082786
| 0.065703
| 0.073587
| 0.241787
| 0.194481
| 0.194481
| 0.110381
| 0.110381
| 0
| 0
| 0.002841
| 0.316505
| 1,545
| 58
| 80
| 26.637931
| 0.717803
| 0.073139
| 0
| 0.093023
| 0
| 0
| 0.087591
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093023
| false
| 0.023256
| 0.046512
| 0
| 0.186047
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5db634e6fdac00dd4f3ce30f7fe7fbdaae184512
| 6,924
|
py
|
Python
|
recipes/libstudxml/all/conanfile.py
|
rockandsalt/conan-center-index
|
d739adcec3e4dd4c250eff559ceb738e420673dd
|
[
"MIT"
] | 562
|
2019-09-04T12:23:43.000Z
|
2022-03-29T16:41:43.000Z
|
recipes/libstudxml/all/conanfile.py
|
rockandsalt/conan-center-index
|
d739adcec3e4dd4c250eff559ceb738e420673dd
|
[
"MIT"
] | 9,799
|
2019-09-04T12:02:11.000Z
|
2022-03-31T23:55:45.000Z
|
recipes/libstudxml/all/conanfile.py
|
rockandsalt/conan-center-index
|
d739adcec3e4dd4c250eff559ceb738e420673dd
|
[
"MIT"
] | 1,126
|
2019-09-04T11:57:46.000Z
|
2022-03-31T16:43:38.000Z
|
from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools
from conans.errors import ConanInvalidConfiguration
import os
import shutil
required_conan_version = ">=1.33.0"
class LibStudXmlConan(ConanFile):
name = "libstudxml"
description = "A streaming XML pull parser and streaming XML serializer implementation for modern, standard C++."
topics = ("xml", "xml-parser", "serialization")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://www.codesynthesis.com/projects/libstudxml/"
license = "MIT"
settings = "os", "compiler", "build_type", "arch"
exports_sources = "patches/*"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
_autotools = None
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
self.requires("expat/2.4.1")
def validate(self):
if self.settings.compiler == "Visual Studio":
if tools.Version(self.settings.compiler.version) < "9":
raise ConanInvalidConfiguration("Visual Studio {} is not supported.".format(self.settings.compiler.version))
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def build_requirements(self):
if self.settings.compiler != "Visual Studio":
self.build_requires("gnu-config/cci.20201022")
self.build_requires("libtool/2.4.6")
if self._settings_build.os == "Windows" and not tools.get_env("CONAN_BASH_PATH"):
self.build_requires("msys2/cci.latest")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_autotools(self):
if not self._autotools:
args = ["--with-external-expat"]
if self.options.shared:
args.extend(["--enable-shared", "--disable-static"])
else:
args.extend(["--disable-shared", "--enable-static"])
self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
self._autotools.configure(configure_dir=self._source_subfolder, args=args)
return self._autotools
def _build_vs(self):
vc_ver = int(tools.Version(self.settings.compiler.version).major)
sln_path = None
def get_sln_path():
return os.path.join(self._source_subfolder, "libstudxml-vc{}.sln".format(vc_ver))
sln_path = get_sln_path()
while not os.path.exists(sln_path):
vc_ver -= 1
sln_path = get_sln_path()
proj_path = os.path.join(self._source_subfolder, "xml", "libstudxml-vc{}.vcxproj".format(vc_ver))
if not self.options.shared:
tools.replace_in_file(proj_path, "DynamicLibrary", "StaticLibrary")
tools.replace_in_file(proj_path, "LIBSTUDXML_DYNAMIC_LIB", "LIBSTUDXML_STATIC_LIB")
msbuild = MSBuild(self)
msbuild.build(sln_path, platforms={"x86": "Win32"})
@property
def _user_info_build(self):
return getattr(self, "user_info_build", self.deps_user_info)
def _build_autotools(self):
shutil.copy(self._user_info_build["gnu-config"].CONFIG_SUB,
os.path.join(self._source_subfolder, "config", "config.sub"))
shutil.copy(self._user_info_build["gnu-config"].CONFIG_GUESS,
os.path.join(self._source_subfolder, "config", "config.guess"))
if self.settings.compiler.get_safe("libcxx") == "libc++":
# libc++ includes a file called 'version', and since libstudxml adds source_subfolder as an
# include dir, libc++ ends up including their 'version' file instead, causing a compile error
tools.remove_files_by_mask(self._source_subfolder, "version")
with tools.chdir(self._source_subfolder):
self.run("{} -fiv".format(tools.get_env("AUTORECONF")), win_bash=tools.os_info.is_windows)
autotools = self._configure_autotools()
autotools.make()
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
if self.settings.compiler == "Visual Studio":
self._build_vs()
else:
self._build_autotools()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
if self.settings.compiler == "Visual Studio":
self.copy("xml/value-traits", dst="include", src=self._source_subfolder)
self.copy("xml/serializer", dst="include", src=self._source_subfolder)
self.copy("xml/qname", dst="include", src=self._source_subfolder)
self.copy("xml/parser", dst="include", src=self._source_subfolder)
self.copy("xml/forward", dst="include", src=self._source_subfolder)
self.copy("xml/exception", dst="include", src=self._source_subfolder)
self.copy("xml/content", dst="include", src=self._source_subfolder)
self.copy("xml/*.ixx", dst="include", src=self._source_subfolder)
self.copy("xml/*.txx", dst="include", src=self._source_subfolder)
self.copy("xml/*.hxx", dst="include", src=self._source_subfolder)
self.copy("xml/*.h", dst="include", src=self._source_subfolder)
suffix = ""
if self.settings.arch == "x86_64":
suffix = "64"
if self.options.shared:
self.copy("*.lib", dst="lib", src=os.path.join(self._source_subfolder, "lib" + suffix))
self.copy("*.dll", dst="bin", src=os.path.join(self._source_subfolder, "bin" + suffix))
else:
self.copy("*.lib", dst="lib", src=os.path.join(self._source_subfolder, "bin" + suffix))
else:
autotools = self._configure_autotools()
autotools.install()
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "libstudxml.la")
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "share"))
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.names["pkg_config"] = "libstudxml"
# If built with makefile, static library mechanism is provided by their buildsystem already
if self.settings.compiler == "Visual Studio" and not self.options.shared:
self.cpp_info.defines = ["LIBSTUDXML_STATIC_LIB=1"]
| 41.963636
| 124
| 0.633449
| 827
| 6,924
| 5.112455
| 0.252721
| 0.092242
| 0.103359
| 0.062441
| 0.374882
| 0.315279
| 0.255203
| 0.215705
| 0.158467
| 0.036897
| 0
| 0.00602
| 0.232236
| 6,924
| 164
| 125
| 42.219512
| 0.789315
| 0.039139
| 0
| 0.147287
| 0
| 0
| 0.175515
| 0.020003
| 0
| 0
| 0
| 0
| 0
| 1
| 0.124031
| false
| 0
| 0.031008
| 0.031008
| 0.286822
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5db692792275a8f7aff10d7781c4cef5d88900db
| 6,263
|
py
|
Python
|
dataset/WebCariA.py
|
KeleiHe/DAAN
|
04e153c55f8d63e824adbee828e524573afe6a1c
|
[
"Apache-2.0"
] | 9
|
2020-07-24T03:32:17.000Z
|
2022-03-25T12:01:24.000Z
|
dataset/WebCariA.py
|
KeleiHe/DAAN
|
04e153c55f8d63e824adbee828e524573afe6a1c
|
[
"Apache-2.0"
] | 1
|
2020-10-14T17:22:43.000Z
|
2020-10-14T17:22:43.000Z
|
dataset/WebCariA.py
|
KeleiHe/DAAN
|
04e153c55f8d63e824adbee828e524573afe6a1c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Wen Ji & Kelei He (hkl@nju.edu.cn)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
class WebCariA:
def __init__(self, dataType, modelType, parse, des_attri=None):
self.dir_path = "/data/jw/dataset/" + str(parse)
self.dataType = dataType
self.parse = parse
self.des_attri = des_attri
if self.dataType == 'train':
if self.parse == 'Caricature':
self.subPath = 'CariTrain'
elif self.parse == 'Photo':
self.subPath = 'PhotoTrain'
else:
self.subPath = 'WebCariTrain'
elif self.dataType == 'val':
if self.parse == 'Caricature':
self.subPath = 'CariVal'
elif self.parse == 'Photo':
self.subPath = 'PhotoVal'
else:
self.subPath = 'WebCariVal'
elif self.dataType == 'test':
if self.parse == 'Caricature':
self.subPath = 'CariTest'
elif self.parse == 'Photo':
self.subPath = 'PhotoTest'
else:
self.subPath = 'WebCariTest'
elif self.dataType == 'all_data':
if self.parse == 'Caricature':
self.subPath = 'all_cari_data'
elif self.parse == 'Photo':
self.subPath = 'all_photo_data'
else:
self.subPath = 'all_WebCari_data'
else:
print("Caricature error, please select a dataType from: train, val, github")
exit(1)
self.modelType = modelType
self.dir_path = os.path.join(self.dir_path, self.subPath)
self.attributes = ['Women',
'Asian',
'White',
'Black',
'Youth',
'Middle',
'Old',
'Wrinkle',
'MakeUp',
'Bald',
'LargeForehead',
'RoundFace',
'DiamondFace',
'OvalFace',
'SquareShapeFace',
'NarrowEye',
'SleepyEye',
'SlantEye',
'SharpEye',
'FlabbyEye',
'BigEye',
'SmallEye',
'UnderEyePuffiness',
'BigNose',
'SmallNose',
'HighNose',
'FlatNose',
'HookNose',
'WideNose',
'NarrowNose',
'Toothy',
'Smile',
'BigMouth',
'SmallMouth',
'ThickLips',
'ThinLips',
'DoubleChin',
'ArchedEyebrows',
'FlatEyebrow',
'SlantedEyebrows',
'UpsideDownSlantedEyebrows',
'BushyEyebrows',
'ThickEyebrows',
'ThinEyebrows',
'Mustache',
'Goatee',
'Whiskers',
'OtherBeard&NoBeard',
'HighCheekbones',
'SquareJaw']
self.names, self.annas, self.visuals, self.num_attribute = self.getImgNameAndAnnas()
print(parse+"dataset, images: ", len(self.names), " type for: ", self.dataType, " num_attribute: ",
self.num_attribute)
def getImgNameAndAnnas(self):
names = []
annas = []
visuals = []
file = self.subPath+".txt"
file_v = self.subPath+"_V.txt"
fileList = open(os.path.join(self.dir_path, file)).readlines()
fileVList = open((os.path.join(self.dir_path, file_v))).readlines()
if self.modelType == 'seperate':
num_attribute = 1
attribute = self.des_attri
print("des_attribute", attribute)
if attribute not in self.attributes:
print("error: ", attribute, "is not in this dataset, please write a correct attribute in param")
exit(1)
for line in fileList:
names.append(line[0])
attributes = line[1::]
index = self.attributes.index(attribute)
annas.append([int(attributes[index])])
for line in fileVList:
attributes_v = line[1::]
index = self.attributes.index(attribute)
visuals.append([int(attributes_v[index])])
else:
for line in fileList:
names.append(line[0])
annas.append([int(x) for x in line[1::]])
for line in fileVList:
visuals.append([int(x) for x in line[1::]])
self.attributes = self.attributes
num_attribute = len(self.attributes)
return names, annas, visuals, num_attribute
def getPath(self, name):
name = name.replace(' ', '_')
name = name.replace('._', '_')
name = name.replace('-', '_')
name = name + ".jpg"
return name
| 38.900621
| 113
| 0.440683
| 510
| 6,263
| 5.341176
| 0.390196
| 0.060573
| 0.020191
| 0.030837
| 0.20558
| 0.20558
| 0.108297
| 0.080396
| 0
| 0
| 0
| 0.005055
| 0.463037
| 6,263
| 160
| 114
| 39.14375
| 0.804936
| 0.095481
| 0
| 0.181818
| 0
| 0
| 0.166302
| 0.004559
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0
| 0.007576
| 0
| 0.05303
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5db7554e0b55f70192702d11bfb40d5a1d8f2459
| 4,081
|
py
|
Python
|
moonworm/crawler/state/json_state.py
|
zomglings/moonworm
|
930e60199629b6a04adecc7f9ff9450e51bb4640
|
[
"Apache-2.0"
] | 10
|
2021-12-08T22:35:58.000Z
|
2022-03-30T07:38:12.000Z
|
moonworm/crawler/state/json_state.py
|
zomglings/moonworm
|
930e60199629b6a04adecc7f9ff9450e51bb4640
|
[
"Apache-2.0"
] | 29
|
2021-11-04T12:30:31.000Z
|
2022-03-03T21:29:08.000Z
|
moonworm/crawler/state/json_state.py
|
zomglings/moonworm
|
930e60199629b6a04adecc7f9ff9450e51bb4640
|
[
"Apache-2.0"
] | 5
|
2021-11-06T02:25:09.000Z
|
2022-02-15T03:09:26.000Z
|
import datetime
import json
import time
from typing import Optional
from web3.datastructures import AttributeDict
from .event_scanner_state import EventScannerState
class JSONifiedState(EventScannerState):
"""Store the state of scanned blocks and all events.
All state is an in-memory dict.
Simple load/store massive JSON on start up.
"""
def __init__(self):
self.state = None
self.fname = "test-state.json"
# How many second ago we saved the JSON file
self.last_save = 0
def reset(self):
"""Create initial state of nothing scanned."""
self.state = {
"last_scanned_block": 0,
"blocks": {},
}
def restore(self):
"""Restore the last scan state from a file."""
try:
self.state = json.load(open(self.fname, "rt"))
print(
f"Restored the state, previously {self.state['last_scanned_block']} blocks have been scanned"
)
except (IOError, json.decoder.JSONDecodeError):
print("State starting from scratch")
self.reset()
def save(self):
"""Save everything we have scanned so far in a file."""
with open(self.fname, "wt") as f:
json.dump(self.state, f)
self.last_save = time.time()
#
# EventScannerState methods implemented below
#
def get_last_scanned_block(self):
"""The number of the last block we have stored."""
return self.state["last_scanned_block"]
def delete_data(self, since_block):
"""Remove potentially reorganised blocks from the scan data."""
for block_num in range(since_block, self.get_last_scanned_block()):
if block_num in self.state["blocks"]:
del self.state["blocks"][block_num]
def start_chunk(self, block_number, chunk_size):
pass
def end_chunk(self, block_number):
"""Save at the end of each block, so we can resume in the case of a crash or CTRL+C"""
# Next time the scanner is started we will resume from this block
self.state["last_scanned_block"] = block_number
# Save the database file for every minute
if time.time() - self.last_save > 60:
self.save()
def process_event(
self, block_when: Optional[datetime.datetime], event: AttributeDict
) -> str:
"""Record a ERC-20 transfer in our database."""
# Events are keyed by their transaction hash and log index
# One transaction may contain multiple events
# and each one of those gets their own log index
# event_name = event.event # "Transfer"
log_index = event.logIndex # Log index within the block
# transaction_index = event.transactionIndex # Transaction index within the block
txhash = event.transactionHash.hex() # Transaction hash
block_number = event.blockNumber
# Convert ERC-20 Transfer event to our internal format
args = event["args"]
transfer = {
"from": args["from"],
"to": args.to,
"value": args.value,
}
if block_when is not None:
transfer["timestamp"] = block_when.isoformat()
# Create empty dict as the block that contains all transactions by txhash
if block_number not in self.state["blocks"]:
self.state["blocks"][block_number] = {}
block = self.state["blocks"][block_number]
if txhash not in block:
# We have not yet recorded any transfers in this transaction
# (One transaction may contain multiple events if executed by a smart contract).
# Create a tx entry that contains all events by a log index
self.state["blocks"][block_number][txhash] = {}
# Record ERC-20 transfer in our database
self.state["blocks"][block_number][txhash][log_index] = transfer
# Return a pointer that allows us to look up this event later if needed
return f"{block_number}-{txhash}-{log_index}"
| 35.486957
| 109
| 0.626072
| 524
| 4,081
| 4.778626
| 0.341603
| 0.050319
| 0.041933
| 0.039936
| 0.150559
| 0.076677
| 0
| 0
| 0
| 0
| 0
| 0.003771
| 0.285224
| 4,081
| 114
| 110
| 35.798246
| 0.854645
| 0.346729
| 0
| 0
| 0
| 0
| 0.116171
| 0.026631
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145161
| false
| 0.016129
| 0.096774
| 0
| 0.290323
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5db81c5e24b93ba19d16beaadd48634b1c9fd58a
| 4,934
|
py
|
Python
|
npbench/benchmarks/nbody/nbody_dace.py
|
frahlg/npbench
|
1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26
|
[
"BSD-3-Clause"
] | 27
|
2021-05-10T11:49:13.000Z
|
2022-03-22T18:07:19.000Z
|
npbench/benchmarks/nbody/nbody_dace.py
|
frahlg/npbench
|
1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26
|
[
"BSD-3-Clause"
] | 3
|
2021-12-01T13:03:17.000Z
|
2022-03-17T10:53:00.000Z
|
npbench/benchmarks/nbody/nbody_dace.py
|
frahlg/npbench
|
1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26
|
[
"BSD-3-Clause"
] | 7
|
2021-06-24T03:40:25.000Z
|
2022-01-26T09:04:33.000Z
|
# Adapted from https://github.com/pmocz/nbody-python/blob/master/nbody.py
# TODO: Add GPL-3.0 License
import numpy as np
import dace as dc
"""
Create Your Own N-body Simulation (With Python)
Philip Mocz (2020) Princeton Univeristy, @PMocz
Simulate orbits of stars interacting due to gravity
Code calculates pairwise forces according to Newton's Law of Gravity
"""
N, Nt = (dc.symbol(s, dtype=dc.int64) for s in ('N', 'Nt'))
# @dc.program
# def hstack(out: dc.float64[N, 3], a: dc.float64[N],
# b: dc.float64[N], c: dc.float64[N]):
# out[:, 0] = a
# out[:, 1] = b
# out[:, 2] = c
@dc.program
def getAcc(pos: dc.float64[N, 3], mass: dc.float64[N], G: dc.float64,
softening: dc.float64):
"""
Calculate the acceleration on each particle due to Newton's Law
pos is an N x 3 matrix of positions
mass is an N x 1 vector of masses
G is Newton's Gravitational constant
softening is the softening length
a is N x 3 matrix of accelerations
"""
# positions r = [x,y,z] for all particles
x = pos[:, 0:1]
y = pos[:, 1:2]
z = pos[:, 2:3]
# matrix that stores all pairwise particle separations: r_j - r_i
# dx = x.T - x
# dy = y.T - y
# dz = z.T - z
# dx = np.transpose(x) - x
# dy = np.transpose(y) - y
# dz = np.transpose(z) - z
dx = np.add.outer(-x, x)
dy = np.add.outer(-y, y)
dz = np.add.outer(-z, z)
# matrix that stores 1/r^3 for all particle pairwise particle separations
inv_r3 = (dx**2 + dy**2 + dz**2 + softening**2)
# inv_r3[inv_r3>0] = inv_r3[inv_r3>0]**(-1.5)
I = inv_r3 > 0
np.power(inv_r3, -1.5, out=inv_r3, where=I)
ax = G * (dx * inv_r3) @ mass
ay = G * (dy * inv_r3) @ mass
az = G * (dz * inv_r3) @ mass
# pack together the acceleration components
# a = np.hstack((ax,ay,az))
a = np.ndarray((N, 3), dtype=np.float64)
# hstack(a, ax, ay, az)
a[:, 0] = ax
a[:, 1] = ay
a[:, 2] = az
return a
@dc.program
def getEnergy(pos: dc.float64[N, 3], vel: dc.float64[N, 3],
mass: dc.float64[N], G: dc.float64):
"""
Get kinetic energy (KE) and potential energy (PE) of simulation
pos is N x 3 matrix of positions
vel is N x 3 matrix of velocities
mass is an N x 1 vector of masses
G is Newton's Gravitational constant
KE is the kinetic energy of the system
PE is the potential energy of the system
"""
# Kinetic Energy:
# KE = 0.5 * np.sum(np.sum( mass * vel**2 ))
# KE = 0.5 * np.sum( mass * vel**2 )
KE = 0.5 * np.sum(np.reshape(mass, (N, 1)) * vel**2)
# Potential Energy:
# positions r = [x,y,z] for all particles
x = pos[:, 0:1]
y = pos[:, 1:2]
z = pos[:, 2:3]
# matrix that stores all pairwise particle separations: r_j - r_i
# dx = x.T - x
# dy = y.T - y
# dz = z.T - z
# dx = np.transpose(x) - x
# dy = np.transpose(y) - y
# dz = np.transpose(z) - z
dx = np.add.outer(-x, x)
dy = np.add.outer(-y, y)
dz = np.add.outer(-z, z)
# matrix that stores 1/r for all particle pairwise particle separations
inv_r = np.sqrt(dx**2 + dy**2 + dz**2)
# inv_r[inv_r>0] = 1.0/inv_r[inv_r>0]
I = inv_r > 0
np.divide(1.0, inv_r, out=inv_r, where=I)
# sum over upper triangle, to count each interaction only once
# PE = G * np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1)))
# PE = G * np.sum(np.triu(-(mass*mass.T)*inv_r,1))
tmp = -np.multiply.outer(mass, mass) * inv_r
PE = 0.0
for j in range(N):
for k in range(j + 1, N):
PE += tmp[j, k]
PE *= G
return KE, PE
@dc.program
def nbody(mass: dc.float64[N], pos: dc.float64[N, 3], vel: dc.float64[N, 3],
dt: dc.float64, G: dc.float64, softening: dc.float64):
# Convert to Center-of-Mass frame
# vel -= np.mean(mass * vel, axis=0) / np.mean(mass)
# vel -= np.mean(np.reshape(mass, (N, 1)) * vel, axis=0) / np.mean(mass)
# tmp = np.divide(np.mean(np.reshape(mass, (N, 1)) * vel, axis=0), np.mean(mass))
np.subtract(vel,
np.mean(np.reshape(mass,
(N, 1)) * vel, axis=0) / np.mean(mass),
out=vel)
# calculate initial gravitational accelerations
acc = getAcc(pos, mass, G, softening)
# calculate initial energy of system
KE = np.ndarray(Nt + 1, dtype=np.float64)
PE = np.ndarray(Nt + 1, dtype=np.float64)
KE[0], PE[0] = getEnergy(pos, vel, mass, G)
t = 0.0
# Simulation Main Loop
for i in range(Nt):
# (1/2) kick
vel += acc * dt / 2.0
# drift
pos += vel * dt
# update accelerations
acc[:] = getAcc(pos, mass, G, softening)
# (1/2) kick
vel += acc * dt / 2.0
# update time
t += dt
# get energy of system
KE[i + 1], PE[i + 1] = getEnergy(pos, vel, mass, G)
return KE, PE
| 29.195266
| 85
| 0.561005
| 843
| 4,934
| 3.252669
| 0.189798
| 0.059081
| 0.043764
| 0.02407
| 0.507659
| 0.473377
| 0.408826
| 0.329322
| 0.317651
| 0.317651
| 0
| 0.043676
| 0.285367
| 4,934
| 168
| 86
| 29.369048
| 0.733976
| 0.452574
| 0
| 0.301587
| 0
| 0
| 0.001276
| 0
| 0
| 0
| 0
| 0.005952
| 0
| 1
| 0.047619
| false
| 0
| 0.031746
| 0
| 0.126984
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5db8b350508cfde3359da0d0ee1d9036c8e97549
| 817
|
py
|
Python
|
application/__init__.py
|
Healthy-Kokoro/Hiroshima
|
87c6c533f97f55ceb33553a2409076bcd21a36d2
|
[
"MIT"
] | null | null | null |
application/__init__.py
|
Healthy-Kokoro/Hiroshima
|
87c6c533f97f55ceb33553a2409076bcd21a36d2
|
[
"MIT"
] | null | null | null |
application/__init__.py
|
Healthy-Kokoro/Hiroshima
|
87c6c533f97f55ceb33553a2409076bcd21a36d2
|
[
"MIT"
] | null | null | null |
# Third-party imports
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
configurations = {
'development': 'configurations.DevelopmentConfiguration',
'testing': 'configurations.TestingConfiguration',
'staging': 'configurations.StagingConfiguration',
'production': 'configurations.ProductionConfiguration'
}
database = SQLAlchemy()
def create_application(configuration):
application = Flask(__name__, instance_relative_config=True)
application.config.from_object(configurations[configuration])
application.config.from_pyfile('configuration.py', silent=True)
database.init_app(application)
from application.init.views import blueprint
application.register_blueprint(blueprint)
from application.metadata.views import blueprint
application.register_blueprint(blueprint)
return application
| 30.259259
| 64
| 0.831089
| 78
| 817
| 8.538462
| 0.474359
| 0.027027
| 0.063063
| 0.093093
| 0.171171
| 0.171171
| 0.171171
| 0
| 0
| 0
| 0
| 0
| 0.082007
| 817
| 26
| 65
| 31.423077
| 0.888
| 0.023256
| 0
| 0.105263
| 0
| 0
| 0.248744
| 0.184673
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.210526
| 0
| 0.315789
| 0.210526
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5db8ca5f5d703991674fff33fa5c1ac47210c351
| 692
|
py
|
Python
|
lesson5/exceptions_except.py
|
drednout/letspython
|
9747442d63873b5f71e2c15ed5528bd98ad5ac31
|
[
"BSD-2-Clause"
] | 1
|
2015-11-26T15:53:58.000Z
|
2015-11-26T15:53:58.000Z
|
lesson5/exceptions_except.py
|
drednout/letspython
|
9747442d63873b5f71e2c15ed5528bd98ad5ac31
|
[
"BSD-2-Clause"
] | null | null | null |
lesson5/exceptions_except.py
|
drednout/letspython
|
9747442d63873b5f71e2c15ed5528bd98ad5ac31
|
[
"BSD-2-Clause"
] | null | null | null |
def take_beer(fridge, number=1):
if "beer" not in fridge:
raise Exception("No beer at all:(")
if number > fridge["beer"]:
raise Exception("Not enough beer:(")
fridge["beer"] -= number
if __name__ == "__main__":
fridge = {
"beer": 2,
"milk": 1,
"meat": 3,
}
print("I wanna drink 1 bottle of beer...")
take_beer(fridge)
print("Oooh, great!")
print("I wanna drink 2 bottle of beer...")
try:
take_beer(fridge, 2)
except Exception as e:
print("Error: {}. Let's continue".format(e))
print("Fallback. Try to take 1 bottle of beer...")
take_beer(fridge, 1)
print("Oooh, awesome!")
| 22.322581
| 54
| 0.559249
| 93
| 692
| 4.032258
| 0.44086
| 0.133333
| 0.149333
| 0.085333
| 0.144
| 0.144
| 0.144
| 0
| 0
| 0
| 0
| 0.018182
| 0.284682
| 692
| 30
| 55
| 23.066667
| 0.739394
| 0
| 0
| 0
| 0
| 0
| 0.323188
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0
| 0
| 0.043478
| 0.26087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5db9f201356818f114d992f32b2d46869da4d326
| 23,877
|
py
|
Python
|
synapse/storage/data_stores/state/store.py
|
juhovan/synapse
|
57feeab364325374b14ff67ac97c288983cc5cde
|
[
"Apache-2.0"
] | 1
|
2020-07-12T00:18:52.000Z
|
2020-07-12T00:18:52.000Z
|
synapse/storage/data_stores/state/store.py
|
juhovan/synapse
|
57feeab364325374b14ff67ac97c288983cc5cde
|
[
"Apache-2.0"
] | null | null | null |
synapse/storage/data_stores/state/store.py
|
juhovan/synapse
|
57feeab364325374b14ff67ac97c288983cc5cde
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import namedtuple
from typing import Dict, Iterable, List, Set, Tuple
from twisted.internet import defer
from synapse.api.constants import EventTypes
from synapse.storage._base import SQLBaseStore
from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore
from synapse.storage.database import Database
from synapse.storage.state import StateFilter
from synapse.types import StateMap
from synapse.util.caches.descriptors import cached
from synapse.util.caches.dictionary_cache import DictionaryCache
logger = logging.getLogger(__name__)
MAX_STATE_DELTA_HOPS = 100
class _GetStateGroupDelta(
namedtuple("_GetStateGroupDelta", ("prev_group", "delta_ids"))
):
"""Return type of get_state_group_delta that implements __len__, which lets
us use the itrable flag when caching
"""
__slots__ = []
def __len__(self):
return len(self.delta_ids) if self.delta_ids else 0
class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
"""A data store for fetching/storing state groups.
"""
def __init__(self, database: Database, db_conn, hs):
super(StateGroupDataStore, self).__init__(database, db_conn, hs)
# Originally the state store used a single DictionaryCache to cache the
# event IDs for the state types in a given state group to avoid hammering
# on the state_group* tables.
#
# The point of using a DictionaryCache is that it can cache a subset
# of the state events for a given state group (i.e. a subset of the keys for a
# given dict which is an entry in the cache for a given state group ID).
#
# However, this poses problems when performing complicated queries
# on the store - for instance: "give me all the state for this group, but
# limit members to this subset of users", as DictionaryCache's API isn't
# rich enough to say "please cache any of these fields, apart from this subset".
# This is problematic when lazy loading members, which requires this behaviour,
# as without it the cache has no choice but to speculatively load all
# state events for the group, which negates the efficiency being sought.
#
# Rather than overcomplicating DictionaryCache's API, we instead split the
# state_group_cache into two halves - one for tracking non-member events,
# and the other for tracking member_events. This means that lazy loading
# queries can be made in a cache-friendly manner by querying both caches
# separately and then merging the result. So for the example above, you
# would query the members cache for a specific subset of state keys
# (which DictionaryCache will handle efficiently and fine) and the non-members
# cache for all state (which DictionaryCache will similarly handle fine)
# and then just merge the results together.
#
# We size the non-members cache to be smaller than the members cache as the
# vast majority of state in Matrix (today) is member events.
self._state_group_cache = DictionaryCache(
"*stateGroupCache*",
# TODO: this hasn't been tuned yet
50000,
)
self._state_group_members_cache = DictionaryCache(
"*stateGroupMembersCache*", 500000,
)
@cached(max_entries=10000, iterable=True)
def get_state_group_delta(self, state_group):
"""Given a state group try to return a previous group and a delta between
the old and the new.
Returns:
(prev_group, delta_ids), where both may be None.
"""
def _get_state_group_delta_txn(txn):
prev_group = self.db.simple_select_one_onecol_txn(
txn,
table="state_group_edges",
keyvalues={"state_group": state_group},
retcol="prev_state_group",
allow_none=True,
)
if not prev_group:
return _GetStateGroupDelta(None, None)
delta_ids = self.db.simple_select_list_txn(
txn,
table="state_groups_state",
keyvalues={"state_group": state_group},
retcols=("type", "state_key", "event_id"),
)
return _GetStateGroupDelta(
prev_group,
{(row["type"], row["state_key"]): row["event_id"] for row in delta_ids},
)
return self.db.runInteraction(
"get_state_group_delta", _get_state_group_delta_txn
)
@defer.inlineCallbacks
def _get_state_groups_from_groups(
self, groups: List[int], state_filter: StateFilter
):
"""Returns the state groups for a given set of groups from the
database, filtering on types of state events.
Args:
groups: list of state group IDs to query
state_filter: The state filter used to fetch state
from the database.
Returns:
Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map.
"""
results = {}
chunks = [groups[i : i + 100] for i in range(0, len(groups), 100)]
for chunk in chunks:
res = yield self.db.runInteraction(
"_get_state_groups_from_groups",
self._get_state_groups_from_groups_txn,
chunk,
state_filter,
)
results.update(res)
return results
def _get_state_for_group_using_cache(self, cache, group, state_filter):
"""Checks if group is in cache. See `_get_state_for_groups`
Args:
cache(DictionaryCache): the state group cache to use
group(int): The state group to lookup
state_filter (StateFilter): The state filter used to fetch state
from the database.
Returns 2-tuple (`state_dict`, `got_all`).
`got_all` is a bool indicating if we successfully retrieved all
requests state from the cache, if False we need to query the DB for the
missing state.
"""
is_all, known_absent, state_dict_ids = cache.get(group)
if is_all or state_filter.is_full():
# Either we have everything or want everything, either way
# `is_all` tells us whether we've gotten everything.
return state_filter.filter_state(state_dict_ids), is_all
# tracks whether any of our requested types are missing from the cache
missing_types = False
if state_filter.has_wildcards():
# We don't know if we fetched all the state keys for the types in
# the filter that are wildcards, so we have to assume that we may
# have missed some.
missing_types = True
else:
# There aren't any wild cards, so `concrete_types()` returns the
# complete list of event types we're wanting.
for key in state_filter.concrete_types():
if key not in state_dict_ids and key not in known_absent:
missing_types = True
break
return state_filter.filter_state(state_dict_ids), not missing_types
@defer.inlineCallbacks
def _get_state_for_groups(
self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all()
):
"""Gets the state at each of a list of state groups, optionally
filtering by type/state_key
Args:
groups: list of state groups for which we want
to get the state.
state_filter: The state filter used to fetch state
from the database.
Returns:
Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map.
"""
member_filter, non_member_filter = state_filter.get_member_split()
# Now we look them up in the member and non-member caches
(
non_member_state,
incomplete_groups_nm,
) = yield self._get_state_for_groups_using_cache(
groups, self._state_group_cache, state_filter=non_member_filter
)
(
member_state,
incomplete_groups_m,
) = yield self._get_state_for_groups_using_cache(
groups, self._state_group_members_cache, state_filter=member_filter
)
state = dict(non_member_state)
for group in groups:
state[group].update(member_state[group])
# Now fetch any missing groups from the database
incomplete_groups = incomplete_groups_m | incomplete_groups_nm
if not incomplete_groups:
return state
cache_sequence_nm = self._state_group_cache.sequence
cache_sequence_m = self._state_group_members_cache.sequence
# Help the cache hit ratio by expanding the filter a bit
db_state_filter = state_filter.return_expanded()
group_to_state_dict = yield self._get_state_groups_from_groups(
list(incomplete_groups), state_filter=db_state_filter
)
# Now lets update the caches
self._insert_into_cache(
group_to_state_dict,
db_state_filter,
cache_seq_num_members=cache_sequence_m,
cache_seq_num_non_members=cache_sequence_nm,
)
# And finally update the result dict, by filtering out any extra
# stuff we pulled out of the database.
for group, group_state_dict in group_to_state_dict.items():
# We just replace any existing entries, as we will have loaded
# everything we need from the database anyway.
state[group] = state_filter.filter_state(group_state_dict)
return state
def _get_state_for_groups_using_cache(
self, groups: Iterable[int], cache: DictionaryCache, state_filter: StateFilter
) -> Tuple[Dict[int, StateMap[str]], Set[int]]:
"""Gets the state at each of a list of state groups, optionally
filtering by type/state_key, querying from a specific cache.
Args:
groups: list of state groups for which we want to get the state.
cache: the cache of group ids to state dicts which
we will pass through - either the normal state cache or the
specific members state cache.
state_filter: The state filter used to fetch state from the
database.
Returns:
Tuple of dict of state_group_id to state map of entries in the
cache, and the state group ids either missing from the cache or
incomplete.
"""
results = {}
incomplete_groups = set()
for group in set(groups):
state_dict_ids, got_all = self._get_state_for_group_using_cache(
cache, group, state_filter
)
results[group] = state_dict_ids
if not got_all:
incomplete_groups.add(group)
return results, incomplete_groups
def _insert_into_cache(
self,
group_to_state_dict,
state_filter,
cache_seq_num_members,
cache_seq_num_non_members,
):
"""Inserts results from querying the database into the relevant cache.
Args:
group_to_state_dict (dict): The new entries pulled from database.
Map from state group to state dict
state_filter (StateFilter): The state filter used to fetch state
from the database.
cache_seq_num_members (int): Sequence number of member cache since
last lookup in cache
cache_seq_num_non_members (int): Sequence number of member cache since
last lookup in cache
"""
# We need to work out which types we've fetched from the DB for the
# member vs non-member caches. This should be as accurate as possible,
# but can be an underestimate (e.g. when we have wild cards)
member_filter, non_member_filter = state_filter.get_member_split()
if member_filter.is_full():
# We fetched all member events
member_types = None
else:
# `concrete_types()` will only return a subset when there are wild
# cards in the filter, but that's fine.
member_types = member_filter.concrete_types()
if non_member_filter.is_full():
# We fetched all non member events
non_member_types = None
else:
non_member_types = non_member_filter.concrete_types()
for group, group_state_dict in group_to_state_dict.items():
state_dict_members = {}
state_dict_non_members = {}
for k, v in group_state_dict.items():
if k[0] == EventTypes.Member:
state_dict_members[k] = v
else:
state_dict_non_members[k] = v
self._state_group_members_cache.update(
cache_seq_num_members,
key=group,
value=state_dict_members,
fetched_keys=member_types,
)
self._state_group_cache.update(
cache_seq_num_non_members,
key=group,
value=state_dict_non_members,
fetched_keys=non_member_types,
)
def store_state_group(
self, event_id, room_id, prev_group, delta_ids, current_state_ids
):
"""Store a new set of state, returning a newly assigned state group.
Args:
event_id (str): The event ID for which the state was calculated
room_id (str)
prev_group (int|None): A previous state group for the room, optional.
delta_ids (dict|None): The delta between state at `prev_group` and
`current_state_ids`, if `prev_group` was given. Same format as
`current_state_ids`.
current_state_ids (dict): The state to store. Map of (type, state_key)
to event_id.
Returns:
Deferred[int]: The state group ID
"""
def _store_state_group_txn(txn):
if current_state_ids is None:
# AFAIK, this can never happen
raise Exception("current_state_ids cannot be None")
state_group = self.database_engine.get_next_state_group_id(txn)
self.db.simple_insert_txn(
txn,
table="state_groups",
values={"id": state_group, "room_id": room_id, "event_id": event_id},
)
# We persist as a delta if we can, while also ensuring the chain
# of deltas isn't tooo long, as otherwise read performance degrades.
if prev_group:
is_in_db = self.db.simple_select_one_onecol_txn(
txn,
table="state_groups",
keyvalues={"id": prev_group},
retcol="id",
allow_none=True,
)
if not is_in_db:
raise Exception(
"Trying to persist state with unpersisted prev_group: %r"
% (prev_group,)
)
potential_hops = self._count_state_group_hops_txn(txn, prev_group)
if prev_group and potential_hops < MAX_STATE_DELTA_HOPS:
self.db.simple_insert_txn(
txn,
table="state_group_edges",
values={"state_group": state_group, "prev_state_group": prev_group},
)
self.db.simple_insert_many_txn(
txn,
table="state_groups_state",
values=[
{
"state_group": state_group,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
"event_id": state_id,
}
for key, state_id in delta_ids.items()
],
)
else:
self.db.simple_insert_many_txn(
txn,
table="state_groups_state",
values=[
{
"state_group": state_group,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
"event_id": state_id,
}
for key, state_id in current_state_ids.items()
],
)
# Prefill the state group caches with this group.
# It's fine to use the sequence like this as the state group map
# is immutable. (If the map wasn't immutable then this prefill could
# race with another update)
current_member_state_ids = {
s: ev
for (s, ev) in current_state_ids.items()
if s[0] == EventTypes.Member
}
txn.call_after(
self._state_group_members_cache.update,
self._state_group_members_cache.sequence,
key=state_group,
value=dict(current_member_state_ids),
)
current_non_member_state_ids = {
s: ev
for (s, ev) in current_state_ids.items()
if s[0] != EventTypes.Member
}
txn.call_after(
self._state_group_cache.update,
self._state_group_cache.sequence,
key=state_group,
value=dict(current_non_member_state_ids),
)
return state_group
return self.db.runInteraction("store_state_group", _store_state_group_txn)
def purge_unreferenced_state_groups(
self, room_id: str, state_groups_to_delete
) -> defer.Deferred:
"""Deletes no longer referenced state groups and de-deltas any state
groups that reference them.
Args:
room_id: The room the state groups belong to (must all be in the
same room).
state_groups_to_delete (Collection[int]): Set of all state groups
to delete.
"""
return self.db.runInteraction(
"purge_unreferenced_state_groups",
self._purge_unreferenced_state_groups,
room_id,
state_groups_to_delete,
)
def _purge_unreferenced_state_groups(self, txn, room_id, state_groups_to_delete):
logger.info(
"[purge] found %i state groups to delete", len(state_groups_to_delete)
)
rows = self.db.simple_select_many_txn(
txn,
table="state_group_edges",
column="prev_state_group",
iterable=state_groups_to_delete,
keyvalues={},
retcols=("state_group",),
)
remaining_state_groups = {
row["state_group"]
for row in rows
if row["state_group"] not in state_groups_to_delete
}
logger.info(
"[purge] de-delta-ing %i remaining state groups",
len(remaining_state_groups),
)
# Now we turn the state groups that reference to-be-deleted state
# groups to non delta versions.
for sg in remaining_state_groups:
logger.info("[purge] de-delta-ing remaining state group %s", sg)
curr_state = self._get_state_groups_from_groups_txn(txn, [sg])
curr_state = curr_state[sg]
self.db.simple_delete_txn(
txn, table="state_groups_state", keyvalues={"state_group": sg}
)
self.db.simple_delete_txn(
txn, table="state_group_edges", keyvalues={"state_group": sg}
)
self.db.simple_insert_many_txn(
txn,
table="state_groups_state",
values=[
{
"state_group": sg,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
"event_id": state_id,
}
for key, state_id in curr_state.items()
],
)
logger.info("[purge] removing redundant state groups")
txn.executemany(
"DELETE FROM state_groups_state WHERE state_group = ?",
((sg,) for sg in state_groups_to_delete),
)
txn.executemany(
"DELETE FROM state_groups WHERE id = ?",
((sg,) for sg in state_groups_to_delete),
)
@defer.inlineCallbacks
def get_previous_state_groups(self, state_groups):
"""Fetch the previous groups of the given state groups.
Args:
state_groups (Iterable[int])
Returns:
Deferred[dict[int, int]]: mapping from state group to previous
state group.
"""
rows = yield self.db.simple_select_many_batch(
table="state_group_edges",
column="prev_state_group",
iterable=state_groups,
keyvalues={},
retcols=("prev_state_group", "state_group"),
desc="get_previous_state_groups",
)
return {row["state_group"]: row["prev_state_group"] for row in rows}
def purge_room_state(self, room_id, state_groups_to_delete):
"""Deletes all record of a room from state tables
Args:
room_id (str):
state_groups_to_delete (list[int]): State groups to delete
"""
return self.db.runInteraction(
"purge_room_state",
self._purge_room_state_txn,
room_id,
state_groups_to_delete,
)
def _purge_room_state_txn(self, txn, room_id, state_groups_to_delete):
# first we have to delete the state groups states
logger.info("[purge] removing %s from state_groups_state", room_id)
self.db.simple_delete_many_txn(
txn,
table="state_groups_state",
column="state_group",
iterable=state_groups_to_delete,
keyvalues={},
)
# ... and the state group edges
logger.info("[purge] removing %s from state_group_edges", room_id)
self.db.simple_delete_many_txn(
txn,
table="state_group_edges",
column="state_group",
iterable=state_groups_to_delete,
keyvalues={},
)
# ... and the state groups
logger.info("[purge] removing %s from state_groups", room_id)
self.db.simple_delete_many_txn(
txn,
table="state_groups",
column="id",
iterable=state_groups_to_delete,
keyvalues={},
)
| 37.307813
| 88
| 0.590568
| 2,885
| 23,877
| 4.638128
| 0.161872
| 0.06726
| 0.019431
| 0.026979
| 0.3496
| 0.298408
| 0.249533
| 0.224273
| 0.184441
| 0.161572
| 0
| 0.003186
| 0.342631
| 23,877
| 639
| 89
| 37.366197
| 0.849325
| 0.320895
| 0
| 0.354571
| 0
| 0
| 0.087112
| 0.008445
| 0
| 0
| 0
| 0.001565
| 0
| 1
| 0.044321
| false
| 0
| 0.033241
| 0.00277
| 0.127424
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dba8f581c63a89cafcdb31c2be81f0648adb964
| 1,422
|
py
|
Python
|
mnist/convolutional.py
|
Colins-Ford/mnist-webapp
|
20e9b6f5520d5bda957d9501347f787450555db8
|
[
"Apache-2.0"
] | null | null | null |
mnist/convolutional.py
|
Colins-Ford/mnist-webapp
|
20e9b6f5520d5bda957d9501347f787450555db8
|
[
"Apache-2.0"
] | null | null | null |
mnist/convolutional.py
|
Colins-Ford/mnist-webapp
|
20e9b6f5520d5bda957d9501347f787450555db8
|
[
"Apache-2.0"
] | null | null | null |
import os
from mnist import model
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets("data/dataset/", one_hot=True)
# model
with tf.variable_scope("convolutional"):
x = tf.placeholder(tf.float32, [None, 784])
keep_prob = tf.placeholder(tf.float32)
y, variables = model.convolutional(x, keep_prob)
# train
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver(variables)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = data.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g" % (i, train_accuracy))
sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print(sess.run(accuracy, feed_dict={x: data.test.images, y_: data.test.labels, keep_prob: 1.0}))
path = saver.save(
sess, os.path.join(os.path.dirname(__file__), 'data', 'convolutional.ckpt'),
write_meta_graph=False, write_state=False)
print("Saved:", path)
| 37.421053
| 100
| 0.69339
| 218
| 1,422
| 4.33945
| 0.417431
| 0.042283
| 0.047569
| 0.069767
| 0.118393
| 0.063425
| 0.063425
| 0.063425
| 0.063425
| 0.063425
| 0
| 0.031746
| 0.158228
| 1,422
| 37
| 101
| 38.432432
| 0.758563
| 0.007736
| 0
| 0
| 0
| 0
| 0.058949
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.107143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dbd482917f27cdd677d99ffd355bb76525f3a13
| 4,110
|
py
|
Python
|
tools/test_net_batch.py
|
abhirevan/pedestrian-detector
|
f4fa4cd59315ea515ace3c529b716ff3173e2205
|
[
"BSD-2-Clause"
] | null | null | null |
tools/test_net_batch.py
|
abhirevan/pedestrian-detector
|
f4fa4cd59315ea515ace3c529b716ff3173e2205
|
[
"BSD-2-Clause"
] | null | null | null |
tools/test_net_batch.py
|
abhirevan/pedestrian-detector
|
f4fa4cd59315ea515ace3c529b716ff3173e2205
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an image database."""
import _init_paths
from fast_rcnn.test import test_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
import pandas as pd
def splitall(path):
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network pipeline')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int, required=True)
parser.add_argument('--dir', dest='dir',
help='Directory of the model files',
default="", type=str, required=True)
parser.add_argument('--models', dest='model_files',
help='Text file with names of models',
default=None, type=str, required=True)
parser.add_argument('--prototxt', dest='prototxt',
help='prototxt', default=None, type=str, required=True)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='ped_test_small', type=str, required=True)
parser.add_argument('--cfg', dest='cfg_file',
help='cfg',
default='experiments/cfgs/faster_rcnn_end2end.yml', type=str)
parser.add_argument('--res', dest='res_file',
help='result file',
default='', type=str, required=True)
args = parser.parse_args()
return args
def run_test_net(gpu_id, caffemodel, prototxt, imdb_name, cfg_file):
if cfg_file is not None:
cfg_from_file(cfg_file)
cfg.GPU_ID = gpu_id
print('Using config:')
pprint.pprint(cfg)
while not os.path.exists(caffemodel):
print('Waiting for {} to exist...'.format(caffemodel))
time.sleep(10)
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(caffemodel))[0]
imdb = get_imdb(imdb_name)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
n, _ = os.path.splitext(args.caffemodel)
paths = splitall(n)
proposal_prefix = paths[-1]
return test_net(net, imdb, max_per_image=100, vis=False, proposal_prefix=proposal_prefix)
def run_test_nets(gpu_id, dir, model_files, prototxt, imdb_name, cfg_file, res_file):
models = [line.rstrip('\n') for line in open(os.path.join(dir, model_files))]
df_results = pd.DataFrame()
for model in models:
results = run_test_net(gpu_id, os.path.join(dir, model), prototxt, imdb_name, cfg_file)
for result in results:
result['file'] = model
df_results = df_results.append(results, ignore_index=True)
df_results.to_csv(os.path.join(dir, res_file))
if __name__ == '__main__':
# args = parse_args()
gpu_id = 0
# dir = '/home/abhijitcbim/git/pedestrian-detector/output/faster_rcnn_end2end/train/backup'
# model_files = 'test.txt'
args = parse_args()
print('Called with args:')
print(args)
run_test_nets(args.gpu_id, args.dir, args.model_files, args.prototxt, args.imdb_name, args.cfg_file, args.res_file)
# run_test_net(gpu_id,caffemodel, prototxt, imdb_name, cfg_file)
| 33.145161
| 119
| 0.615572
| 540
| 4,110
| 4.496296
| 0.309259
| 0.022652
| 0.049012
| 0.043245
| 0.219522
| 0.124382
| 0.107908
| 0.078254
| 0.078254
| 0.039539
| 0
| 0.008021
| 0.241606
| 4,110
| 123
| 120
| 33.414634
| 0.770934
| 0.141119
| 0
| 0.049383
| 0
| 0
| 0.103803
| 0.011438
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049383
| false
| 0
| 0.111111
| 0
| 0.197531
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dbebf189d084ec54743890289ba79eb7c5bba5c
| 5,831
|
py
|
Python
|
yolox/data/datasets/mot.py
|
ldelzott/ByteTrack
|
5f8ab49a913a551d041918607a0bd2473602ad39
|
[
"MIT"
] | null | null | null |
yolox/data/datasets/mot.py
|
ldelzott/ByteTrack
|
5f8ab49a913a551d041918607a0bd2473602ad39
|
[
"MIT"
] | null | null | null |
yolox/data/datasets/mot.py
|
ldelzott/ByteTrack
|
5f8ab49a913a551d041918607a0bd2473602ad39
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
from pycocotools.coco import COCO
import os
from ..dataloading import get_yolox_datadir
from .datasets_wrapper import Dataset
class MOTDataset(Dataset):
"""
COCO dataset class.
"""
def __init__( # This function is called in the exps yolox_x_mot17_half.py in this way: dataset = MOTDataset(
# data_dir=os.path.join(get_yolox_datadir(), "mot"),
# json_file=self.train_ann,
# name='train',
# img_size=self.input_size,
# preproc=TrainTransform(rgb_means=(0.485, 0.456, 0.406),
# std=(0.229, 0.224, 0.225),
# max_labels=500,),)
self,
data_dir=None,
json_file="train_half.json",
name="train",
img_size=(608, 1088),
preproc=None,
):
"""
COCO dataset initialization. Annotation data are read into memory by COCO API.
Args:
data_dir (str): dataset root directory
json_file (str): COCO json file name
name (str): COCO data name (e.g. 'train2017' or 'val2017')
img_size (int): target image size after pre-processing
preproc: data augmentation strategy
"""
super().__init__(img_size)
if data_dir is None:
data_dir = os.path.join(get_yolox_datadir(), "mot")
self.data_dir = data_dir
self.json_file = json_file
self.coco = COCO(os.path.join(self.data_dir, "annotations", self.json_file))
self.ids = self.coco.getImgIds()
self.class_ids = sorted(self.coco.getCatIds())
cats = self.coco.loadCats(self.coco.getCatIds())
self._classes = tuple([c["name"] for c in cats])
self.annotations = self._load_coco_annotations()
self.name = name
self.img_size = img_size
self.preproc = preproc
def __len__(self):
return len(self.ids)
def _load_coco_annotations(self):
return [self.load_anno_from_ids(_ids) for _ids in self.ids]
def load_anno_from_ids(self, id_):
im_ann = self.coco.loadImgs(id_)[0]
width = im_ann["width"]
height = im_ann["height"]
#frame_id = im_ann["frame_id"] : the default value '1' avoid to break augmentation & evaluation processes
frame_id = 1
#video_id = im_ann["video_id"] : the default value '1' avoid to break augmentation & evaluation processes
video_id = 1
anno_ids = self.coco.getAnnIds(imgIds=[int(id_)], iscrowd=False)
annotations = self.coco.loadAnns(anno_ids)
objs = []
for obj in annotations:
x1 = obj["bbox"][0]
y1 = obj["bbox"][1]
x2 = x1 + obj["bbox"][2]
y2 = y1 + obj["bbox"][3]
if obj["area"] > 0 and x2 >= x1 and y2 >= y1:
obj["clean_bbox"] = [x1, y1, x2, y2]
objs.append(obj)
num_objs = len(objs)
res = np.zeros((num_objs, 6))
for ix, obj in enumerate(objs):
cls = self.class_ids.index(obj["category_id"])
res[ix, 0:4] = obj["clean_bbox"]
res[ix, 4] = cls
#res[ix, 5] = obj["track_id"] # See comment line 66; same comment for the default value 1
res[ix, 5] = 1
file_name = im_ann["file_name"] if "file_name" in im_ann else "{:012}".format(id_) + ".jpg"
img_info = (height, width, frame_id, video_id, file_name)
del im_ann, annotations
return (res, img_info, file_name)
def load_anno(self, index):
return self.annotations[index][0]
def pull_item(self, index):
id_ = self.ids[index]
res, img_info, file_name = self.annotations[index]
# load image and preprocess
img_file = os.path.join(
self.data_dir, self.name, file_name
)
img = cv2.imread(img_file)
assert img is not None
return img, res.copy(), img_info, np.array([id_])
@Dataset.resize_getitem
def __getitem__(self, index):
"""
One image / label pair for the given index is picked up and pre-processed.
Args:
index (int): data index
Returns:
img (numpy.ndarray): pre-processed image
padded_labels (torch.Tensor): pre-processed label data.
The shape is :math:`[max_labels, 5]`.
each label consists of [class, xc, yc, w, h]:
class (float): class index.
xc, yc (float) : center of bbox whose values range from 0 to 1.
w, h (float) : size of bbox whose values range from 0 to 1.
info_img : tuple of h, w, nh, nw, dx, dy.
h, w (int): original shape of the image
nh, nw (int): shape of the resized image without padding
dx, dy (int): pad size
img_id (int): same as the input index. Used for evaluation.
"""
img, target, img_info, img_id = self.pull_item(index)
if self.preproc is not None:
img, target = self.preproc(img, target, self.input_dim)
return img, target, img_info, img_id
| 40.776224
| 165
| 0.510547
| 703
| 5,831
| 4.056899
| 0.297297
| 0.02209
| 0.014025
| 0.01683
| 0.130435
| 0.117812
| 0.088359
| 0.088359
| 0.088359
| 0.042777
| 0
| 0.025099
| 0.391871
| 5,831
| 142
| 166
| 41.06338
| 0.779188
| 0.300463
| 0
| 0
| 0
| 0
| 0.033359
| 0
| 0
| 0
| 0
| 0
| 0.012821
| 1
| 0.089744
| false
| 0
| 0.076923
| 0.038462
| 0.25641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dbf449975065338e5216b26f0b50de7db0d2cd0
| 4,740
|
py
|
Python
|
src/poetry/console/commands/remove.py
|
pkoch/poetry
|
d22c5a7187d8b5a30196a7df58111b3c90be7d22
|
[
"MIT"
] | null | null | null |
src/poetry/console/commands/remove.py
|
pkoch/poetry
|
d22c5a7187d8b5a30196a7df58111b3c90be7d22
|
[
"MIT"
] | null | null | null |
src/poetry/console/commands/remove.py
|
pkoch/poetry
|
d22c5a7187d8b5a30196a7df58111b3c90be7d22
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import Any
from cleo.helpers import argument
from cleo.helpers import option
from tomlkit.toml_document import TOMLDocument
try:
from poetry.core.packages.dependency_group import MAIN_GROUP
except ImportError:
MAIN_GROUP = "default"
from poetry.console.commands.installer_command import InstallerCommand
class RemoveCommand(InstallerCommand):
name = "remove"
description = "Removes a package from the project dependencies."
arguments = [argument("packages", "The packages to remove.", multiple=True)]
options = [
option("group", "G", "The group to remove the dependency from.", flag=False),
option("dev", "D", "Remove a package from the development dependencies."),
option(
"dry-run",
None,
"Output the operations but do not execute anything "
"(implicitly enables --verbose).",
),
]
help = """The <info>remove</info> command removes a package from the current
list of installed packages
<info>poetry remove</info>"""
loggers = ["poetry.repositories.pypi_repository", "poetry.inspection.info"]
def handle(self) -> int:
packages = self.argument("packages")
if self.option("dev"):
self.line_error(
"<warning>The --dev option is deprecated, "
"use the `--group dev` notation instead.</warning>"
)
group = "dev"
else:
group = self.option("group", self.default_group)
content: dict[str, Any] = self.poetry.file.read()
poetry_content = content["tool"]["poetry"]
if group is None:
removed = []
group_sections = [
(group_name, group_section.get("dependencies", {}))
for group_name, group_section in poetry_content.get("group", {}).items()
]
for group_name, section in [
(MAIN_GROUP, poetry_content["dependencies"])
] + group_sections:
removed += self._remove_packages(packages, section, group_name)
if group_name != MAIN_GROUP:
if not section:
del poetry_content["group"][group_name]
else:
poetry_content["group"][group_name]["dependencies"] = section
elif group == "dev" and "dev-dependencies" in poetry_content:
# We need to account for the old `dev-dependencies` section
removed = self._remove_packages(
packages, poetry_content["dev-dependencies"], "dev"
)
if not poetry_content["dev-dependencies"]:
del poetry_content["dev-dependencies"]
else:
removed = self._remove_packages(
packages, poetry_content["group"][group].get("dependencies", {}), group
)
if not poetry_content["group"][group]:
del poetry_content["group"][group]
if "group" in poetry_content and not poetry_content["group"]:
del poetry_content["group"]
removed_set = set(removed)
not_found = set(packages).difference(removed_set)
if not_found:
raise ValueError(
"The following packages were not found: " + ", ".join(sorted(not_found))
)
# Refresh the locker
self.poetry.set_locker(
self.poetry.locker.__class__(self.poetry.locker.lock.path, poetry_content)
)
self._installer.set_locker(self.poetry.locker)
# Update packages
self._installer.use_executor(
self.poetry.config.get("experimental.new-installer", False)
)
self._installer.dry_run(self.option("dry-run", False))
self._installer.verbose(self._io.is_verbose())
self._installer.update(True)
self._installer.whitelist(removed_set)
status = self._installer.run()
if not self.option("dry-run") and status == 0:
assert isinstance(content, TOMLDocument)
self.poetry.file.write(content)
return status
def _remove_packages(
self, packages: list[str], section: dict[str, Any], group_name: str
) -> list[str]:
removed = []
group = self.poetry.package.dependency_group(group_name)
section_keys = list(section.keys())
for package in packages:
for existing_package in section_keys:
if existing_package.lower() == package.lower():
del section[existing_package]
removed.append(package)
group.remove_dependency(package)
return removed
| 34.347826
| 88
| 0.59789
| 503
| 4,740
| 5.473161
| 0.276342
| 0.075554
| 0.045768
| 0.041773
| 0.120959
| 0.033418
| 0.033418
| 0
| 0
| 0
| 0
| 0.0003
| 0.29789
| 4,740
| 137
| 89
| 34.59854
| 0.826923
| 0.019409
| 0
| 0.067308
| 0
| 0
| 0.177003
| 0.017873
| 0
| 0
| 0
| 0
| 0.009615
| 1
| 0.019231
| false
| 0
| 0.076923
| 0
| 0.182692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dbff166d1570c685dadc7e901e806b3102dde0f
| 3,316
|
py
|
Python
|
orrinjelo/aoc2021/day_11.py
|
orrinjelo/AdventOfCode2021
|
6fce5c48ec3dc602b393824f592a5c6db2a8b66f
|
[
"MIT"
] | null | null | null |
orrinjelo/aoc2021/day_11.py
|
orrinjelo/AdventOfCode2021
|
6fce5c48ec3dc602b393824f592a5c6db2a8b66f
|
[
"MIT"
] | null | null | null |
orrinjelo/aoc2021/day_11.py
|
orrinjelo/AdventOfCode2021
|
6fce5c48ec3dc602b393824f592a5c6db2a8b66f
|
[
"MIT"
] | null | null | null |
from orrinjelo.utils.decorators import timeit
import numpy as np
def parse(lines):
return np.array([[int(c) for c in line.strip()] for line in lines])
visited = []
def flash(a, x, y):
global visited
if (x,y) in visited:
return
for dx in range(-1,2):
for dy in range(-1,2):
if dx == 0 and dy == 0:
continue
if x+dx < 0 or x+dx >= a.shape[0]:
continue
if y+dy < 0 or y+dy >= a.shape[1]:
continue
a[x+dx, y+dy] += 1
visited.append((x,y))
if a[x+dx, y+dy] > 9:
flash(a, x+dx, y+dy)
def progress(a):
global visited
a += 1
x,y = np.where(a > 9)
visited = []
for i in range(len(x)):
flash(a,x[i],y[i])
count = np.sum(a > 9)
# print('a:\n', a)
a[a > 9] = 0
return a, count
@timeit("Day 11 Part 1")
def part1(input_str, use_rust=False):
octomap = parse(input_str)
total_count = 0
for i in range(100):
octomap, count = progress(octomap)
total_count += count
return total_count
@timeit("Day 11 Part 2")
def part2(input_str, use_rust=False):
octomap = parse(input_str)
step = 0
while True:
step += 1
octomap, count = progress(octomap)
if count == octomap.shape[0]*octomap.shape[1]:
break
return step
# = Test ================================================
inputlist = [
'5483143223',
'2745854711',
'5264556173',
'6141336146',
'6357385478',
'4167524645',
'2176841721',
'6882881134',
'4846848554',
'5283751526',
]
def test_part1():
# import matplotlib.pyplot as plt
# plt.imshow(parse(inputlist))
# plt.show()
assert part1(inputlist) == 1656
def test_part2():
assert part2(inputlist) == 195
import pygame
import sys
def plot(input_str):
# octomap = parse(input_str)
octomap = np.random.randint(0,9,(100,100))
pygame.init()
clock = pygame.time.Clock()
scale = 5
screen = pygame.display.set_mode((octomap.shape[0]*scale,octomap.shape[1]*scale))
surface = pygame.Surface((octomap.shape[0]*scale,octomap.shape[1]*scale))
frame = 0
history = []
for i in range(500):
print('Generating frame #', i)
octomap, _ = progress(octomap)
history.append(np.copy(octomap))
input()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit(); sys.exit();
# erase the screen
screen.fill((255,0,0))
try:
octomap = history[frame]
except:
frame = 0
for i in range(octomap.shape[0]):
for j in range(octomap.shape[1]):
if octomap[i,j] == 0:
brightness = 255
else:
brightness = int(255.0 * octomap[i,j]/10.0)
print(i*scale, j*scale, brightness)
pygame.draw.rect(
screen,
(brightness,brightness,brightness),
pygame.Rect(i*scale, j*scale, scale, scale)
)
pygame.display.update()
# surface.blit(screen, (0,0))
clock.tick(30)
frame += 1
| 22.557823
| 85
| 0.518697
| 425
| 3,316
| 4.011765
| 0.294118
| 0.056305
| 0.014076
| 0.025806
| 0.139003
| 0.08915
| 0.08915
| 0.08915
| 0.046921
| 0
| 0
| 0.084015
| 0.335947
| 3,316
| 147
| 86
| 22.557823
| 0.690282
| 0.065139
| 0
| 0.142857
| 0
| 0
| 0.046572
| 0
| 0
| 0
| 0
| 0
| 0.019048
| 1
| 0.07619
| false
| 0
| 0.038095
| 0.009524
| 0.161905
| 0.019048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dc0c299dbdb6b798fc1619ba108af859bcce78e
| 3,329
|
py
|
Python
|
services/train/single.py
|
paper2code/torch2vec-restful-service
|
6c4412d84d067268bf988b1f31cef716a2ed23a5
|
[
"MIT"
] | 2
|
2020-09-13T18:08:52.000Z
|
2020-09-19T05:26:50.000Z
|
services/train/single.py
|
paper2code/torch2vec-restful-service
|
6c4412d84d067268bf988b1f31cef716a2ed23a5
|
[
"MIT"
] | null | null | null |
services/train/single.py
|
paper2code/torch2vec-restful-service
|
6c4412d84d067268bf988b1f31cef716a2ed23a5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 26 19:15:34 2020
@author: deviantpadam
"""
import pandas as pd
import numpy as np
import concurrent.futures
import os
import tqdm
from collections import Counter
from torch2vec.data import DataPreparation
from torch2vec.torch2vec import DM
# train = pd.read_csv('/home/deviantpadam/Downloads/example.csv',delimiter='\t')
# train = pd.read_csv('/home/deviantpadam/Downloads/example (1).csv')
train = pd.read_csv('../data/suggest_dump.txt',delimiter='\t')
def cleaner(train):
sub=(train['subjects'].str.lower()).str.split(',',expand=True)
sub.drop([2,3],axis=1,inplace=True)
sub.columns = ['subject1','subject2']
sub.fillna('none',inplace=True)
tasks = (train['tasks'].str.lower()).str.split(',',expand=True)[0]
tasks.fillna('none',inplace=True)
tasks.name = 'task'
train = pd.concat([train,sub,tasks],axis=1).drop(['subjects','tasks'],axis=1)
train.fillna('none',inplace=True)
return train
train = cleaner(train)
corpus = train['authors']+' '+train['title']+' '+train['summary']+' '+train['subject1']+' '+train['subject2']+' '+train['task']
corpus.name = 'text'
corpus = pd.concat([train['subject1'],train['subject2'],train['task'],corpus],axis=1)
def phraser(corpus,workers=-1):
if workers==-1:
workers = os.cpu_count()
chunks = np.array_split(corpus,workers)
with concurrent.futures.ProcessPoolExecutor(workers) as executor:
result = np.concatenate(list(tqdm.tqdm(executor.map(_add_bigrams,chunks),total=workers,desc='Phrasing using {} cores'.format(workers))),axis=0)
executor.shutdown(wait=True)
# result = _add_bigrams(data)
global bigrams
del bigrams
return pd.DataFrame({'text':np.array(result)})['text']
def _add_bigrams(text):
for idx in range(len(text)):
length=len(text[idx])-1
word_count=0
while word_count<length:
if text[idx][word_count]+' '+text[idx][word_count+1] in bigrams:
text[idx][word_count] = text[idx][word_count]+' '+text[idx][word_count+1]
text[idx].remove(text[idx][word_count+1])
length = len(text[idx])-1
# print(cor[i][j]+' '+cor[i][j+1])
word_count+=1
return text
def _get_bigrams(corpus,min_count):
text = np.copy(corpus)
vocab = [word for sen in text for word in sen]
ngram = [(i,j) for i,j in zip(vocab[:-1],vocab[1:])]
freq = Counter(ngram)
filterbi = [bigram for bigram in freq.most_common() if bigram[1]>min_count]
bigrams = [" ".join(bigram[0]) for bigram in filterbi]
return bigrams
data = DataPreparation(corpus.reset_index(),f_size=3)
data.tokenize()
bigrams = _get_bigrams(data.corpus.values,min_count=700)
data.corpus = phraser(data.corpus.values)
bigrams = _get_bigrams(data.corpus.values,min_count=500)
data.corpus = phraser(data.corpus.values)
data.vocab_builder()
doc, context, target_noise_ids = data.get_data(window_size=5,num_noise_words=10)
model = DM(vec_dim=100,num_docs=len(data),num_words=data.vocab_size).cuda()
num_workers = os.cpu_count()
model.fit(doc_ids=doc,context=context,target_noise_ids=target_noise_ids,epochs=20,batch_size=8000,num_workers=num_workers)
model.save_model(data.document_ids,data.args,file_name='weights')
| 36.988889
| 151
| 0.681586
| 484
| 3,329
| 4.568182
| 0.340909
| 0.028494
| 0.029851
| 0.043419
| 0.252827
| 0.206242
| 0.152872
| 0.115785
| 0.029851
| 0
| 0
| 0.023001
| 0.151096
| 3,329
| 89
| 152
| 37.404494
| 0.759377
| 0.097927
| 0
| 0.061538
| 0
| 0
| 0.065217
| 0.008027
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061538
| false
| 0
| 0.123077
| 0
| 0.246154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dc0fa811b71f512df88503ac7e13855083e0792
| 8,399
|
py
|
Python
|
tests/sources/test_document_oereblex.py
|
geo-bl-ch/pyramid_oereb
|
767375a4adda4589e12c4257377fc30258cdfcb3
|
[
"BSD-2-Clause"
] | null | null | null |
tests/sources/test_document_oereblex.py
|
geo-bl-ch/pyramid_oereb
|
767375a4adda4589e12c4257377fc30258cdfcb3
|
[
"BSD-2-Clause"
] | null | null | null |
tests/sources/test_document_oereblex.py
|
geo-bl-ch/pyramid_oereb
|
767375a4adda4589e12c4257377fc30258cdfcb3
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
import pytest
import requests_mock
from geolink_formatter.entity import Document, File
from requests.auth import HTTPBasicAuth
from pyramid_oereb.contrib.sources.document import OEREBlexSource
from pyramid_oereb.lib.records.documents import DocumentRecord, LegalProvisionRecord
from pyramid_oereb.lib.records.office import OfficeRecord
from tests.mockrequest import MockParameter
@pytest.mark.parametrize('valid,cfg', [
(True, {
'host': 'http://oereblex.example.com',
'language': 'de',
'canton': 'BL'
}),
(False, {
'language': 'de',
'canton': 'BL'
}),
(False, {
'host': 'http://oereblex.example.com',
'language': 'german',
'canton': 'BL'
}),
(False, {
'host': 'http://oereblex.example.com',
'language': 'de'
})
])
def test_init(valid, cfg):
if valid:
assert isinstance(OEREBlexSource(**cfg), OEREBlexSource)
else:
with pytest.raises(AssertionError):
OEREBlexSource(**cfg)
@pytest.mark.parametrize('key,language,result', [
('official_title', None, None),
('municipality', None, 'Liestal'),
('municipality', 'de', {'de': 'Liestal'})
])
def test_get_mapped_value(key, language, result):
file_ = File('Test', '/api/attachments/1', 'main')
document = Document(id='test', title='Test', category='main', doctype='decree', files=[file_],
enactment_date=datetime.date.today(), subtype='Liestal', authority='Office')
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL',
mapping={'municipality': 'subtype'})
assert source._get_mapped_value(document, key, language=language) == result
@pytest.mark.parametrize('i,document', [
(1, Document(
id='doc1',
title='Document 1',
category='main',
doctype='edict',
authority='Office',
files=[File('File 1', '/api/attachments/1', 'main')],
enactment_date=datetime.date.today()
)),
(2, Document(
id='doc2',
title='Document 2',
category='main',
doctype='decree',
authority='Office',
files=[
File('File 2', '/api/attachments/2', 'main'),
File('File 3', '/api/attachments/3', 'additional')
],
enactment_date=datetime.date.today()
)),
(3, Document(
id='doc1',
title='Document 1',
category='main',
doctype='invalid',
authority='Office',
files=[File('File 1', '/api/attachments/1', 'main')],
enactment_date=datetime.date.today()
)),
(4, Document(
id='doc1',
title='Document 1',
category='main',
doctype='decree',
authority='Office',
files=[],
enactment_date=datetime.date.today()
))
])
def test_get_document_records(i, document):
language = 'de'
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL')
references = [
Document(
id='ref',
title='Reference',
category='related',
doctype='edict',
authority='Office',
files=[File('Reference file', '/api/attachments/4', 'main')],
enactment_date=datetime.date.today()
)
]
if i == 3:
with pytest.raises(TypeError):
source._get_document_records(document, language, references)
elif i == 4:
assert source._get_document_records(document, language, references) == []
else:
records = source._get_document_records(document, language, references)
assert len(records) == i
for idx, record in enumerate(records):
if i == 1:
assert isinstance(record, DocumentRecord)
elif i == 2:
assert isinstance(record, LegalProvisionRecord)
assert record.title == {'de': 'Document {0}'.format(i)}
assert record.published_from == datetime.date.today()
assert record.canton == 'BL'
assert record.text_at_web == {'de': '/api/attachments/{fid}'.format(fid=i + idx)}
assert len(record.references) == 1
reference = record.references[0]
assert isinstance(reference, DocumentRecord)
assert reference.title == {'de': 'Reference'}
assert reference.canton == 'BL'
assert reference.text_at_web == {'de': '/api/attachments/4'}
def test_read():
with requests_mock.mock() as m:
with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f:
m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read())
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL')
source.read(MockParameter(), 100)
assert len(source.records) == 2
document = source.records[0]
assert isinstance(document, DocumentRecord)
assert isinstance(document.responsible_office, OfficeRecord)
assert document.responsible_office.name == {'de': 'Landeskanzlei'}
assert document.canton == 'BL'
assert document.text_at_web == {
'de': 'http://oereblex.example.com/api/attachments/313'
}
assert len(document.references) == 5
def test_read_related_decree_as_main():
with requests_mock.mock() as m:
with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f:
m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read())
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL',
related_decree_as_main=True)
source.read(MockParameter(), 100)
assert len(source.records) == 3
document = source.records[0]
assert isinstance(document, DocumentRecord)
assert isinstance(document.responsible_office, OfficeRecord)
assert document.responsible_office.name == {'de': 'Landeskanzlei'}
assert document.canton == 'BL'
assert document.text_at_web == {
'de': 'http://oereblex.example.com/api/attachments/313'
}
assert len(document.references) == 4
def test_read_with_version_in_url():
with requests_mock.mock() as m:
with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f:
m.get('http://oereblex.example.com/api/1.1.1/geolinks/100.xml', content=f.read())
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL',
pass_version=True)
source.read(MockParameter(), 100)
assert len(source.records) == 2
def test_read_with_specified_version():
with requests_mock.mock() as m:
with open('./tests/resources/geolink_v1.0.0.xml', 'rb') as f:
m.get('http://oereblex.example.com/api/1.0.0/geolinks/100.xml', content=f.read())
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL',
pass_version=True, version='1.0.0')
source.read(MockParameter(), 100)
assert len(source.records) == 2
def test_read_with_specified_language():
with requests_mock.mock() as m:
with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f:
m.get('http://oereblex.example.com/api/geolinks/100.xml?locale=fr', content=f.read())
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL')
params = MockParameter()
params.set_language('fr')
source.read(params, 100)
assert len(source.records) == 2
document = source.records[0]
assert document.responsible_office.name == {'fr': 'Landeskanzlei'}
assert document.text_at_web == {
'fr': 'http://oereblex.example.com/api/attachments/313'
}
def test_authentication():
auth = {
'username': 'test',
'password': 'test'
}
source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', auth=auth)
assert isinstance(source._auth, HTTPBasicAuth)
def test_get_document_title():
document = Document([], id='1', title='Test')
result = {'de': 'Test'}
assert OEREBlexSource._get_document_title(document, File(), 'de') == result
| 37.328889
| 102
| 0.604239
| 933
| 8,399
| 5.340836
| 0.148982
| 0.045756
| 0.072446
| 0.083885
| 0.613486
| 0.564319
| 0.539233
| 0.486655
| 0.486655
| 0.415613
| 0
| 0.016502
| 0.24241
| 8,399
| 224
| 103
| 37.495536
| 0.76662
| 0.0025
| 0
| 0.452261
| 0
| 0.01005
| 0.196991
| 0.024117
| 0
| 0
| 0
| 0
| 0.18593
| 1
| 0.050251
| false
| 0.015075
| 0.045226
| 0
| 0.095477
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dc1f18fa3f023890a5249859cd11435ad90ffca
| 1,088
|
py
|
Python
|
trainNN/run_bichrom.py
|
yztxwd/Bichrom
|
3939b8e52816a02b34122feef27c8e0a06e31d8e
|
[
"MIT"
] | 3
|
2021-02-09T14:07:48.000Z
|
2021-06-21T18:31:54.000Z
|
trainNN/run_bichrom.py
|
yztxwd/Bichrom
|
3939b8e52816a02b34122feef27c8e0a06e31d8e
|
[
"MIT"
] | 5
|
2021-02-05T03:46:37.000Z
|
2022-03-16T16:34:41.000Z
|
trainNN/run_bichrom.py
|
yztxwd/Bichrom
|
3939b8e52816a02b34122feef27c8e0a06e31d8e
|
[
"MIT"
] | 4
|
2021-01-09T19:59:51.000Z
|
2021-11-12T21:08:40.000Z
|
import argparse
import yaml
from subprocess import call
from train import train_bichrom
if __name__ == '__main__':
# parsing
parser = argparse.ArgumentParser(description='Train and Evaluate Bichrom')
parser.add_argument('-training_schema_yaml', required=True,
help='YAML file with paths to train, test and val data')
parser.add_argument('-len', help='Size of genomic windows',
required=True, type=int)
parser.add_argument('-outdir', required=True, help='Output directory')
parser.add_argument('-nbins', type=int, required=True, help='Number of bins')
args = parser.parse_args()
# load the yaml file with input data paths:
with open(args.training_schema_yaml, 'r') as f:
try:
data_paths = yaml.safe_load(f)
except yaml.YAMLError as exc:
print(exc)
# create the output directory:
outdir = args.outdir
call(['mkdir', outdir])
train_bichrom(data_paths=data_paths, outdir=outdir, seq_len=args.len,
bin_size=int(args.len/args.nbins))
| 38.857143
| 81
| 0.662684
| 142
| 1,088
| 4.901408
| 0.443662
| 0.051724
| 0.097701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.232537
| 1,088
| 28
| 82
| 38.857143
| 0.833533
| 0.071691
| 0
| 0
| 0
| 0
| 0.177756
| 0.020854
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dc26a359dc3c1de1c2351ad0bab013c4dbc10a0
| 3,687
|
py
|
Python
|
setup.py
|
Fronius-SED/rapidyaml
|
20d44ff0c43085d08cb17f37fd6b0b305938a3ea
|
[
"MIT"
] | null | null | null |
setup.py
|
Fronius-SED/rapidyaml
|
20d44ff0c43085d08cb17f37fd6b0b305938a3ea
|
[
"MIT"
] | null | null | null |
setup.py
|
Fronius-SED/rapidyaml
|
20d44ff0c43085d08cb17f37fd6b0b305938a3ea
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
import os
import shutil
import sys
from pathlib import Path
from distutils import log
from setuptools import setup
from setuptools.command.sdist import sdist as SdistCommand
from cmake_build_extension import BuildExtension, CMakeExtension
TOP_DIR = (Path(__file__).parent).resolve()
# Where the Python library is actually found.
PYTHON_DIR = "api/python"
setup_kw = {}
# Read in the package version when not in a git repository.
VERSION_FILE = os.path.join(PYTHON_DIR, 'ryml', 'version.py')
if not (TOP_DIR / '.git').exists() and os.path.exists(VERSION_FILE):
exec(open(VERSION_FILE).read())
setup_kw['version'] = version
else:
setup_kw['use_scm_version']= {
"version_scheme": "post-release",
"local_scheme": "no-local-version",
"write_to": VERSION_FILE,
}
# Read in the module description from the README.md file.
README_FILE = TOP_DIR / "README.md"
if README_FILE.exists():
with open(TOP_DIR / "README.md", "r") as fh:
setup_kw['long_description'] = fh.read()
setup_kw['long_description_content_type'] = "text/markdown"
# define a CMake package
cmake_args = dict(
name='ryml.ryml',
install_prefix='',
source_dir='',
cmake_component='python',
cmake_configure_options=[
"-DRYML_BUILD_API:BOOL=ON",
# Force cmake to use the Python interpreter we are currently using to
# run setup.py
"-DPython3_EXECUTABLE:FILEPATH="+sys.executable,
],
)
try:
ext = CMakeExtension(**cmake_args)
except TypeError:
del cmake_args['cmake_component']
ext = CMakeExtension(**cmake_args)
# If the CMakeExtension doesn't support `cmake_component` then we have to
# do some manual cleanup.
_BuildExtension=BuildExtension
class BuildExtension(_BuildExtension):
def build_extension(self, ext):
_BuildExtension.build_extension(self, ext)
ext_dir = Path(self.get_ext_fullpath(ext.name)).parent.absolute()
cmake_install_prefix = ext_dir / ext.install_prefix
assert cmake_install_prefix.exists(), cmake_install_prefix
try:
lib_path = cmake_install_prefix / "lib"
assert lib_path.exists(), lib_path
log.info("Removing everything under: %s", lib_path)
shutil.rmtree(lib_path)
inc_path = cmake_install_prefix / "include"
assert inc_path.exists(), inc_path
log.info("Removing everything under: %s", inc_path)
shutil.rmtree(inc_path)
# Windows only
cm_path = cmake_install_prefix / "cmake"
if cm_path.exists():
log.info("Removing everything under: %s", cm_path)
shutil.rmtree(cm_path)
except:
log.info('Found following installed files:')
for f in cmake_install_prefix.rglob("*"):
log.info(' - %s', f)
raise
setup(
# Package human readable information
name='rapidyaml',
#author='Joao Paulo Magalhaes',
description='Rapid YAML - a library to parse and emit YAML, and do it fast.',
url='https://github.com/biojppm/rapidyaml',
license='MIT',
license_files=['LICENSE.txt'],
# Package contents control
cmdclass={
"build_ext": BuildExtension,
},
package_dir={"": PYTHON_DIR},
packages=['ryml'],
ext_modules=[ext],
include_package_data=True,
# Requirements
python_requires=">=3.7",
setup_requires=['setuptools_scm'],
# Extra arguments
**setup_kw,
)
| 32.342105
| 81
| 0.641714
| 448
| 3,687
| 5.069196
| 0.401786
| 0.051519
| 0.055482
| 0.029062
| 0.044474
| 0.044474
| 0.030823
| 0
| 0
| 0
| 0
| 0.001804
| 0.24844
| 3,687
| 113
| 82
| 32.628319
| 0.817755
| 0.152427
| 0
| 0.048193
| 0
| 0
| 0.182111
| 0.026705
| 0
| 0
| 0
| 0
| 0.036145
| 1
| 0.012048
| false
| 0
| 0.096386
| 0
| 0.120482
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dc297d8b74fa875a21b1642232b22d90653124f
| 5,193
|
py
|
Python
|
litex_boards/targets/digilent_arty_z7.py
|
machdyne/litex-boards
|
2311db18f8c92f80f03226fa984e6110caf25b88
|
[
"BSD-2-Clause"
] | null | null | null |
litex_boards/targets/digilent_arty_z7.py
|
machdyne/litex-boards
|
2311db18f8c92f80f03226fa984e6110caf25b88
|
[
"BSD-2-Clause"
] | null | null | null |
litex_boards/targets/digilent_arty_z7.py
|
machdyne/litex-boards
|
2311db18f8c92f80f03226fa984e6110caf25b88
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 Gwenhael Goavec-Merou <gwenhael.goavec-merou@trabucayre.com>
# SPDX-License-Identifier: BSD-2-Clause
import argparse
import subprocess
from migen import *
from litex_boards.platforms import digilent_arty_z7
from litex.build import tools
from litex.build.xilinx import common as xil_common
from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict
from litex.soc.interconnect import axi
from litex.soc.interconnect import wishbone
from litex.soc.cores.clock import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.soc import SoCRegion
from litex.soc.integration.builder import *
from litex.soc.cores.led import LedChaser
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq, use_ps7_clk=False):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
# # #
if use_ps7_clk:
self.comb += ClockSignal("sys").eq(ClockSignal("ps7"))
self.comb += ResetSignal("sys").eq(ResetSignal("ps7") | self.rst)
else:
# Clk.
clk125 = platform.request("clk125")
# PLL.
self.submodules.pll = pll = S7PLL(speedgrade=-1)
self.comb += pll.reset.eq(self.rst)
pll.register_clkin(clk125, 125e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
# Ignore sys_clk to pll.clkin path created by SoC's rst.
platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, variant="z7-20", toolchain="vivado", sys_clk_freq=int(125e6),
with_led_chaser=True, **kwargs):
platform = digilent_arty_z7.Platform(variant=variant, toolchain=toolchain)
if kwargs.get("cpu_type", None) == "zynq7000":
kwargs['integrated_sram_size'] = 0
kwargs['with_uart'] = False
self.mem_map = {
'csr': 0x4000_0000, # Zynq GP0 default
}
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on Arty Z7",
**kwargs)
# Zynq7000 Integration ---------------------------------------------------------------------
if kwargs.get("cpu_type", None) == "zynq7000":
assert toolchain == "vivado", ' not tested / specific vivado cmds'
preset_name = "arty_z7_20.tcl" if variant == "z7-20" else "arty_z7_10.tcl"
os.system("wget http://kmf2.trabucayre.com/" + preset_name)
self.cpu.set_ps7(preset=preset_name)
# Connect AXI GP0 to the SoC
wb_gp0 = wishbone.Interface()
self.submodules += axi.AXI2Wishbone(
axi = self.cpu.add_axi_gp_master(),
wishbone = wb_gp0,
base_address = self.mem_map['csr'])
self.add_wb_master(wb_gp0)
use_ps7_clk = True
else:
use_ps7_clk = False
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq, use_ps7_clk)
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on Arty Z7")
parser.add_argument("--toolchain", default="vivado", help="FPGA toolchain (vivado, symbiflow or yosys+nextpnr).")
parser.add_argument("--build", action="store_true", help="Build bitstream.")
parser.add_argument("--load", action="store_true", help="Load bitstream.")
parser.add_argument("--variant", default="z7-20", help="Board variant (z7-20 or z7-10).")
parser.add_argument("--sys-clk-freq", default=125e6, help="System clock frequency.")
builder_args(parser)
soc_core_args(parser)
vivado_build_args(parser)
parser.set_defaults(cpu_type="zynq7000")
args = parser.parse_args()
soc = BaseSoC(
variant = args.variant,
toolchain = args.toolchain,
sys_clk_freq=int(float(args.sys_clk_freq)),
**soc_core_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
builder_kwargs = vivado_build_argdict(args) if args.toolchain == "vivado" else {}
builder.build(**builder_kwargs, run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
if __name__ == "__main__":
main()
| 38.753731
| 123
| 0.561333
| 577
| 5,193
| 4.830156
| 0.320624
| 0.025834
| 0.035881
| 0.019376
| 0.109078
| 0.076067
| 0.040904
| 0
| 0
| 0
| 0
| 0.023521
| 0.222222
| 5,193
| 133
| 124
| 39.045113
| 0.666502
| 0.182168
| 0
| 0.046512
| 0
| 0
| 0.115949
| 0
| 0
| 0
| 0
| 0
| 0.011628
| 1
| 0.034884
| false
| 0
| 0.162791
| 0
| 0.22093
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dc2ee5e1dddc721c798287488da3cf41eba8ae1
| 6,899
|
py
|
Python
|
goose/parsers.py
|
allmalaysianews/article-extractor
|
8d0ff3ed01258d0fad56fc22d2c1852e603096b4
|
[
"Apache-2.0"
] | null | null | null |
goose/parsers.py
|
allmalaysianews/article-extractor
|
8d0ff3ed01258d0fad56fc22d2c1852e603096b4
|
[
"Apache-2.0"
] | null | null | null |
goose/parsers.py
|
allmalaysianews/article-extractor
|
8d0ff3ed01258d0fad56fc22d2c1852e603096b4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import lxml.html as lxmlhtml
from lxml.html import soupparser
from lxml import etree
from copy import deepcopy
from goose.text import innerTrim
from goose.text import encodeValue
class Parser(object):
@classmethod
def xpath_re(self, node, expression):
regexp_namespace = "http://exslt.org/regular-expressions"
items = node.xpath(expression, namespaces={'re': regexp_namespace})
return items
@classmethod
def drop_tag(self, nodes):
if isinstance(nodes, list):
for node in nodes:
node.drop_tag()
else:
nodes.drop_tag()
@classmethod
def css_select(self, node, selector):
return node.cssselect(selector)
@classmethod
def fromstring(self, html):
html = encodeValue(html)
self.doc = lxmlhtml.fromstring(html)
return self.doc
@classmethod
def nodeToString(self, node):
return etree.tostring(node)
@classmethod
def replaceTag(self, node, tag):
node.tag = tag
@classmethod
def stripTags(self, node, *tags):
etree.strip_tags(node, *tags)
@classmethod
def getElementById(self, node, idd):
selector = '//*[@id="%s"]' % idd
elems = node.xpath(selector)
if elems:
return elems[0]
return None
@classmethod
def getElementsByTag(self, node, tag=None, attr=None, value=None, childs=False):
NS = "http://exslt.org/regular-expressions"
# selector = tag or '*'
selector = 'descendant-or-self::%s' % (tag or '*')
if attr and value:
selector = '%s[re:test(@%s, "%s", "i")]' % (selector, attr, value)
elems = node.xpath(selector, namespaces={"re": NS})
# remove the root node
# if we have a selection tag
if node in elems and (tag or childs):
elems.remove(node)
return elems
@classmethod
def appendChild(self, node, child):
node.append(child)
@classmethod
def childNodes(self, node):
return list(node)
@classmethod
def childNodesWithText(self, node):
root = node
# create the first text node
# if we have some text in the node
if root.text:
t = lxmlhtml.HtmlElement()
t.text = root.text
t.tag = 'text'
root.text = None
root.insert(0, t)
# loop childs
for c, n in enumerate(list(root)):
idx = root.index(n)
# don't process texts nodes
if n.tag == 'text':
continue
# create a text node for tail
if n.tail:
t = self.createElement(tag='text', text=n.tail, tail=None)
root.insert(idx + 1, t)
return list(root)
@classmethod
def textToPara(self, text):
return self.fromstring(text)
@classmethod
def getChildren(self, node):
return node.getchildren()
@classmethod
def getElementsByTags(self, node, tags):
selector = ','.join(tags)
elems = self.css_select(node, selector)
# remove the root node
# if we have a selection tag
if node in elems:
elems.remove(node)
return elems
@classmethod
def createElement(self, tag='p', text=None, tail=None):
t = lxmlhtml.HtmlElement()
t.tag = tag
t.text = text
t.tail = tail
return t
@classmethod
def getComments(self, node):
return node.xpath('//comment()')
@classmethod
def getParent(self, node):
return node.getparent()
@classmethod
def remove(self, node):
parent = node.getparent()
if parent is not None:
if node.tail:
prev = node.getprevious()
if prev is None:
if not parent.text:
parent.text = ''
parent.text += u' ' + node.tail
else:
if not prev.tail:
prev.tail = ''
prev.tail += u' ' + node.tail
node.clear()
parent.remove(node)
@classmethod
def getTag(self, node):
return node.tag
@classmethod
def getText(self, node):
txts = [i for i in node.itertext()]
return innerTrim(u' '.join(txts).strip())
@classmethod
def previousSiblings(self, node):
nodes = []
for c, n in enumerate(node.itersiblings(preceding=True)):
nodes.append(n)
return nodes
@classmethod
def previousSibling(self, node):
nodes = []
for c, n in enumerate(node.itersiblings(preceding=True)):
nodes.append(n)
if c == 0:
break
return nodes[0] if nodes else None
@classmethod
def nextSibling(self, node):
nodes = []
for c, n in enumerate(node.itersiblings(preceding=False)):
nodes.append(n)
if c == 0:
break
return nodes[0] if nodes else None
@classmethod
def isTextNode(self, node):
return True if node.tag == 'text' else False
@classmethod
def getAttribute(self, node, attr=None):
if attr:
return node.attrib.get(attr, None)
return attr
@classmethod
def delAttribute(self, node, attr=None):
if attr:
_attr = node.attrib.get(attr, None)
if _attr:
del node.attrib[attr]
@classmethod
def setAttribute(self, node, attr=None, value=None):
if attr and value:
node.set(attr, value)
@classmethod
def outerHtml(self, node):
e0 = node
if e0.tail:
e0 = deepcopy(e0)
e0.tail = None
return self.nodeToString(e0)
class ParserSoup(Parser):
@classmethod
def fromstring(self, html):
html = encodeValue(html)
self.doc = soupparser.fromstring(html)
return self.doc
| 28.159184
| 84
| 0.584433
| 824
| 6,899
| 4.879854
| 0.256068
| 0.104452
| 0.024372
| 0.006963
| 0.203929
| 0.161154
| 0.150211
| 0.130316
| 0.130316
| 0.130316
| 0
| 0.003838
| 0.320191
| 6,899
| 244
| 85
| 28.27459
| 0.853518
| 0.159009
| 0
| 0.348315
| 0
| 0
| 0.029569
| 0.003804
| 0
| 0
| 0
| 0
| 0
| 1
| 0.168539
| false
| 0
| 0.033708
| 0.050562
| 0.353933
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dc3595f9215ec36727d03f139d3d859982ac98f
| 2,352
|
py
|
Python
|
src/infrastructure/database/postgres/sqlhandler.py
|
SoyBeansLab/daizu-online-judge-backend
|
873f81fdad2f216e28b83341a6d88b0e21078d6e
|
[
"MIT"
] | 7
|
2019-05-14T08:40:35.000Z
|
2019-08-20T08:15:21.000Z
|
src/infrastructure/database/postgres/sqlhandler.py
|
SoyBeansLab/daizu-online-judge-backend
|
873f81fdad2f216e28b83341a6d88b0e21078d6e
|
[
"MIT"
] | 76
|
2019-05-14T08:56:40.000Z
|
2020-10-18T16:25:33.000Z
|
src/infrastructure/database/postgres/sqlhandler.py
|
SoyBeansLab/daizu-online-judge-backend
|
873f81fdad2f216e28b83341a6d88b0e21078d6e
|
[
"MIT"
] | 3
|
2019-12-12T01:44:31.000Z
|
2020-11-22T03:24:40.000Z
|
from logging import getLogger
import os
from typing import List, Union
import psycopg2
from interface.database.sqlhandler import Cursor as AbsCursor
from interface.database.sqlhandler import Result as AbsResult
from interface.database.sqlhandler import SqlHandler as AbsSqlHandler
from exceptions.waf import SqlTransactionException
logger = getLogger("daizu").getChild("infrastracture.SqlHandler")
class Result(AbsResult):
def __init__(self, rowid: int):
self.last_insertid = rowid
def lastrowid(self) -> int:
return self.last_insertid
class Cursor(AbsCursor):
def __init__(self, cursor):
self.cursor = cursor
def fetch_all(self):
return self.cursor
def fetch_one(self):
if len(self.cursor) == 0:
return []
return self.cursor[0]
class SqlHandler(AbsSqlHandler):
def __init__(self):
# 環境から取るようにする
self.host = os.getenv("DAIZU_DATABASE_HOST", "localhost")
self.dbname = os.getenv("DAIZU_DATABASE_NAME", "doj")
self.user = os.getenv("DAIZU_DATABASE_USERNAME", "daizu")
self.password = os.getenv("DAIZU_DATABASE_PASSWORD", "soybeanslab")
try:
self.connection = psycopg2.connect(
host=self.host,
dbname=self.dbname,
user=self.user,
password=self.password,
)
except psycopg2.OperationalError as err:
raise err
# self.cursor = self.connection.cursor()
def execute(self, query: str, *args) -> Result:
try:
with self.connection.cursor() as cursor:
cursor.execute(query, args)
lastrowid = cursor.lastrowid
self.connection.commit()
except psycopg2.errors.InFailedSqlTransaction as e:
logger.error(e)
self.connection.rollback()
raise SqlTransactionException()
return lastrowid
def query(self, query: str, *args) -> Cursor:
try:
with self.connection.cursor() as cursor:
cursor.execute(query, *args)
data = cursor.fetchall()
except psycopg2.errors.InFailedSqlTransaction as e:
logger.error(e)
self.connection.rollback()
raise SqlTransactionException()
return Cursor(data)
| 29.4
| 75
| 0.627551
| 245
| 2,352
| 5.926531
| 0.293878
| 0.067493
| 0.035813
| 0.057851
| 0.310606
| 0.23416
| 0.23416
| 0.23416
| 0.23416
| 0.23416
| 0
| 0.004149
| 0.282738
| 2,352
| 79
| 76
| 29.772152
| 0.85655
| 0.021259
| 0
| 0.220339
| 0
| 0
| 0.061766
| 0.030883
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135593
| false
| 0.033898
| 0.135593
| 0.033898
| 0.423729
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dc369e87fe800f32ded0cd2dc49e361f6723160
| 1,001
|
py
|
Python
|
virtualisation/wrapper/parser/xmlparser.py
|
CityPulse/CP_Resourcemanagement
|
aa670fa89d5e086a98ade3ccc152518be55abf2e
|
[
"MIT"
] | 2
|
2016-11-03T14:57:45.000Z
|
2019-05-13T13:21:08.000Z
|
virtualisation/wrapper/parser/xmlparser.py
|
CityPulse/CP_Resourcemanagement
|
aa670fa89d5e086a98ade3ccc152518be55abf2e
|
[
"MIT"
] | null | null | null |
virtualisation/wrapper/parser/xmlparser.py
|
CityPulse/CP_Resourcemanagement
|
aa670fa89d5e086a98ade3ccc152518be55abf2e
|
[
"MIT"
] | 1
|
2020-07-23T11:27:15.000Z
|
2020-07-23T11:27:15.000Z
|
from virtualisation.clock.abstractclock import AbstractClock
__author__ = 'Marten Fischer (m.fischer@hs-osnabrueck.de)'
from virtualisation.wrapper.parser.abstractparser import AbstractParser
from virtualisation.misc.jsonobject import JSONObject as JOb
import datetime as dt
class XMLParser(AbstractParser):
"""
Maps a list of values read by a CSVReader with a given naming list
"""
def __init__(self, wrapper):
super(XMLParser, self).__init__(wrapper)
self.timestampcell = -1
if self.wrapper.getSensorDescription().isTimestampedStream():
try:
self.timestampcell = -1
self.timestampformat = self.wrapper.getSensorDescription().timestamp.format
except ValueError:
self.timestampcell = -1
def parse(self, data, clock):
raise Exception("not implemented yet!")
if not data: # nothing received or nothing in the history -> nothing to parse
return None
| 31.28125
| 91
| 0.682318
| 108
| 1,001
| 6.212963
| 0.601852
| 0.080477
| 0.080477
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003953
| 0.241758
| 1,001
| 31
| 92
| 32.290323
| 0.880105
| 0.12987
| 0
| 0.157895
| 0
| 0
| 0.073857
| 0.032825
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.210526
| 0
| 0.421053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dc48b0f27c0b76d7893695e9d44f12dbfa7a376
| 19,404
|
py
|
Python
|
plaso/formatters/interface.py
|
jonathan-greig/plaso
|
b88a6e54c06a162295d09b016bddbfbfe7ca9070
|
[
"Apache-2.0"
] | 1,253
|
2015-01-02T13:58:02.000Z
|
2022-03-31T08:43:39.000Z
|
plaso/formatters/interface.py
|
jonathan-greig/plaso
|
b88a6e54c06a162295d09b016bddbfbfe7ca9070
|
[
"Apache-2.0"
] | 3,388
|
2015-01-02T11:17:58.000Z
|
2022-03-30T10:21:45.000Z
|
plaso/formatters/interface.py
|
jonathan-greig/plaso
|
b88a6e54c06a162295d09b016bddbfbfe7ca9070
|
[
"Apache-2.0"
] | 376
|
2015-01-20T07:04:54.000Z
|
2022-03-04T23:53:00.000Z
|
# -*- coding: utf-8 -*-
"""This file contains the event formatters interface classes.
The l2t_csv and other formats are dependent on a message field,
referred to as description_long and description_short in l2t_csv.
Plaso no longer stores these field explicitly.
A formatter, with a format string definition, is used to convert
the event object values into a formatted string that is similar
to the description_long and description_short field.
"""
import abc
import re
from plaso.formatters import logger
class EventFormatterHelper(object):
"""Base class of helper for formatting event data."""
@abc.abstractmethod
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
class BooleanEventFormatterHelper(EventFormatterHelper):
"""Helper for formatting boolean event data.
Attributes:
input_attribute (str): name of the attribute that contains the boolean
input value.
output_attribute (str): name of the attribute where the boolean output
value should be stored.
value_if_false (str): output value if the boolean input value is False.
value_if_true (str): output value if the boolean input value is True.
"""
def __init__(
self, input_attribute=None, output_attribute=None, value_if_false=None,
value_if_true=None):
"""Initialized a helper for formatting boolean event data.
Args:
input_attribute (Optional[str]): name of the attribute that contains
the boolean input value.
output_attribute (Optional[str]): name of the attribute where the
boolean output value should be stored.
value_if_false (str): output value if the boolean input value is False.
value_if_true (str): output value if the boolean input value is True.
"""
super(BooleanEventFormatterHelper, self).__init__()
self.input_attribute = input_attribute
self.output_attribute = output_attribute
self.value_if_false = value_if_false
self.value_if_true = value_if_true
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
input_value = event_values.get(self.input_attribute, None)
if input_value:
output_value = self.value_if_true
else:
output_value = self.value_if_false
event_values[self.output_attribute] = output_value
class CustomEventFormatterHelper(EventFormatterHelper):
"""Base class for a helper for custom formatting of event data."""
DATA_TYPE = ''
IDENTIFIER = ''
@abc.abstractmethod
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
class EnumerationEventFormatterHelper(EventFormatterHelper):
"""Helper for formatting enumeration event data.
Attributes:
default (str): default value.
input_attribute (str): name of the attribute that contains the enumeration
input value.
output_attribute (str): name of the attribute where the enumeration output
value should be stored.
values (dict[str, str]): mapping of enumeration input and output values.
"""
def __init__(
self, default=None, input_attribute=None, output_attribute=None,
values=None):
"""Initialized a helper for formatting enumeration event data.
Args:
default (Optional[str]): default value.
input_attribute (Optional[str]): name of the attribute that contains
the enumeration input value.
output_attribute (Optional[str]): name of the attribute where the
enumeration output value should be stored.
values (Optional[dict[str, str]]): mapping of enumeration input and
output values.
"""
super(EnumerationEventFormatterHelper, self).__init__()
self.default = default
self.input_attribute = input_attribute
self.output_attribute = output_attribute
self.values = values or {}
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
If default value is None and there is no corresponding enumeration value
then the original value is used.
Args:
event_values (dict[str, object]): event values.
"""
input_value = event_values.get(self.input_attribute, None)
if input_value is not None:
default_value = self.default
if default_value is None:
default_value = input_value
event_values[self.output_attribute] = self.values.get(
input_value, default_value)
class FlagsEventFormatterHelper(EventFormatterHelper):
"""Helper for formatting flags event data.
Attributes:
input_attribute (str): name of the attribute that contains the flags
input value.
output_attribute (str): name of the attribute where the flags output
value should be stored.
values (dict[str, str]): mapping of flags input and output values.
"""
def __init__(
self, input_attribute=None, output_attribute=None, values=None):
"""Initialized a helper for formatting flags event data.
Args:
input_attribute (Optional[str]): name of the attribute that contains
the flags input value.
output_attribute (Optional[str]): name of the attribute where the
flags output value should be stored.
values (Optional[dict[str, str]]): mapping of flags input and output
values.
"""
super(FlagsEventFormatterHelper, self).__init__()
self.input_attribute = input_attribute
self.output_attribute = output_attribute
self.values = values or {}
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
input_value = event_values.get(self.input_attribute, None)
if input_value is None:
return
output_values = []
for flag, mapped_value in self.values.items():
if flag & input_value:
output_values.append(mapped_value)
event_values[self.output_attribute] = ', '.join(output_values)
class EventFormatter(object):
"""Base class to format event values.
Attributes:
custom_helpers (list[str]): identifiers of custom event formatter helpers.
helpers (list[EventFormatterHelper]): event formatter helpers.
"""
# The format string can be defined as:
# {name}, {name:format}, {name!conversion}, {name!conversion:format}
_FORMAT_STRING_ATTRIBUTE_NAME_RE = re.compile(
'{([a-z][a-zA-Z0-9_]*)[!]?[^:}]*[:]?[^}]*}')
def __init__(self, data_type='internal'):
"""Initializes an event formatter.
Args:
data_type (Optional[str]): unique identifier for the event data supported
by the formatter.
"""
super(EventFormatter, self).__init__()
self._data_type = data_type
self._format_string_attribute_names = None
self.custom_helpers = []
self.helpers = []
@property
def data_type(self):
"""str: unique identifier for the event data supported by the formatter."""
return self._data_type.lower()
def _FormatMessage(self, format_string, event_values):
"""Determines the formatted message.
Args:
format_string (str): message format string.
event_values (dict[str, object]): event values.
Returns:
str: formatted message.
"""
try:
message_string = format_string.format(**event_values)
except KeyError as exception:
data_type = event_values.get('data_type', 'N/A')
display_name = event_values.get('display_name', 'N/A')
event_identifier = event_values.get('uuid', 'N/A')
parser_chain = event_values.get('parser', 'N/A')
error_message = (
'unable to format string: "{0:s}" missing required event '
'value: {1!s}').format(format_string, exception)
error_message = (
'Event: {0:s} data type: {1:s} display name: {2:s} '
'parser chain: {3:s} with error: {4:s}').format(
event_identifier, data_type, display_name, parser_chain,
error_message)
logger.error(error_message)
attribute_values = []
for attribute, value in event_values.items():
attribute_values.append('{0:s}: {1!s}'.format(attribute, value))
message_string = ' '.join(attribute_values)
except UnicodeDecodeError as exception:
data_type = event_values.get('data_type', 'N/A')
display_name = event_values.get('display_name', 'N/A')
event_identifier = event_values.get('uuid', 'N/A')
parser_chain = event_values.get('parser', 'N/A')
error_message = 'Unicode decode error: {0!s}'.format(exception)
error_message = (
'Event: {0:s} data type: {1:s} display name: {2:s} '
'parser chain: {3:s} with error: {4:s}').format(
event_identifier, data_type, display_name, parser_chain,
error_message)
logger.error(error_message)
message_string = ''
# Strip carriage return and linefeed form the message strings.
# Using replace function here because it is faster than re.sub() or
# string.strip().
return message_string.replace('\r', '').replace('\n', '')
def FormatEventValues(self, event_values):
"""Formats event values using the helpers.
Args:
event_values (dict[str, object]): event values.
"""
for helper in self.helpers:
helper.FormatEventValues(event_values)
@abc.abstractmethod
def GetFormatStringAttributeNames(self):
"""Retrieves the attribute names in the format string.
Returns:
set(str): attribute names.
"""
# pylint: disable=unused-argument
def AddCustomHelper(
self, identifier, input_attribute=None, output_attribute=None):
"""Adds a custom event formatter helper.
Args:
identifier (str): identifier.
input_attribute (Optional[str]): name of the attribute that contains
the input value.
output_attribute (Optional[str]): name of the attribute where the
output value should be stored.
"""
self.custom_helpers.append(identifier)
def AddHelper(self, helper):
"""Adds an event formatter helper.
Args:
helper (EventFormatterHelper): event formatter helper to add.
"""
self.helpers.append(helper)
@abc.abstractmethod
def GetMessage(self, event_values):
"""Determines the message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: message.
"""
@abc.abstractmethod
def GetMessageShort(self, event_values):
"""Determines the short message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: short message.
"""
class BasicEventFormatter(EventFormatter):
"""Format event values using a message format string.
Attributes:
custom_helpers (list[str]): identifiers of custom event formatter helpers.
helpers (list[EventFormatterHelper]): event formatter helpers.
"""
def __init__(
self, data_type='basic', format_string=None, format_string_short=None):
"""Initializes a basic event formatter.
The syntax of the format strings is similar to that of format() where
the place holder for a certain event object attribute is defined as
{attribute_name}.
Args:
data_type (Optional[str]): unique identifier for the event data supported
by the formatter.
format_string (Optional[str]): (long) message format string.
format_string_short (Optional[str]): short message format string.
"""
super(BasicEventFormatter, self).__init__(data_type=data_type)
self._format_string_attribute_names = None
self._format_string = format_string
self._format_string_short = format_string_short
def GetFormatStringAttributeNames(self):
"""Retrieves the attribute names in the format string.
Returns:
set(str): attribute names.
"""
if self._format_string_attribute_names is None:
self._format_string_attribute_names = (
self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(
self._format_string))
return set(self._format_string_attribute_names)
def GetMessage(self, event_values):
"""Determines the message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: message.
"""
return self._FormatMessage(self._format_string, event_values)
def GetMessageShort(self, event_values):
"""Determines the short message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: short message.
"""
if self._format_string_short:
format_string = self._format_string_short
else:
format_string = self._format_string
short_message_string = self._FormatMessage(format_string, event_values)
# Truncate the short message string if necessary.
if len(short_message_string) > 80:
short_message_string = '{0:s}...'.format(short_message_string[:77])
return short_message_string
class ConditionalEventFormatter(EventFormatter):
"""Conditionally format event values using format string pieces."""
_DEFAULT_FORMAT_STRING_SEPARATOR = ' '
def __init__(
self, data_type='conditional', format_string_pieces=None,
format_string_separator=None, format_string_short_pieces=None):
"""Initializes a conditional event formatter.
The syntax of the format strings pieces is similar to of the basic event
formatter (BasicEventFormatter). Every format string piece should contain
at maximum one unique attribute name. Format string pieces without an
attribute name are supported.
Args:
data_type (Optional[str]): unique identifier for the event data supported
by the formatter.
format_string_pieces (Optional[list[str]]): (long) message format string
pieces.
format_string_separator (Optional[str]): string by which separate format
string pieces should be joined.
format_string_short_pieces (Optional[list[str]]): short message format
string pieces.
"""
if format_string_separator is None:
format_string_separator = self._DEFAULT_FORMAT_STRING_SEPARATOR
super(ConditionalEventFormatter, self).__init__(data_type=data_type)
self._format_string_pieces = format_string_pieces or []
self._format_string_pieces_map = []
self._format_string_separator = format_string_separator
self._format_string_short_pieces = format_string_short_pieces or []
self._format_string_short_pieces_map = []
def _CreateFormatStringMap(
self, format_string_pieces, format_string_pieces_map):
"""Creates a format string map.
The format string pieces map is a list containing the attribute name
per format string piece. E.g. ["Description: {description}"] would be
mapped to: [0] = "description". If the string piece does not contain
an attribute name it is treated as text that does not needs formatting.
Args:
format_string_pieces (list[str]): format string pieces.
format_string_pieces_map (list[str]): format string pieces map.
Raises:
RuntimeError: when an invalid format string piece is encountered.
"""
for format_string_piece in format_string_pieces:
attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(
format_string_piece)
if len(set(attribute_names)) > 1:
raise RuntimeError((
'Invalid format string piece: [{0:s}] contains more than 1 '
'attribute name.').format(format_string_piece))
if not attribute_names:
# The text format string piece is stored as an empty map entry to keep
# the index in the map equal to the format string pieces.
attribute_name = ''
else:
attribute_name = attribute_names[0]
format_string_pieces_map.append(attribute_name)
def _CreateFormatStringMaps(self):
"""Creates the format string maps.
Maps are built of the string pieces and their corresponding attribute
name to optimize conditional string formatting.
Raises:
RuntimeError: when an invalid format string piece is encountered.
"""
self._format_string_pieces_map = []
self._CreateFormatStringMap(
self._format_string_pieces, self._format_string_pieces_map)
self._format_string_short_pieces_map = []
self._CreateFormatStringMap(
self._format_string_short_pieces, self._format_string_short_pieces_map)
def _ConditionalFormatMessage(
self, format_string_pieces, format_string_pieces_map, event_values):
"""Determines the conditional formatted message.
Args:
format_string_pieces (dict[str, str]): format string pieces.
format_string_pieces_map (list[int, str]): format string pieces map.
event_values (dict[str, object]): event values.
Returns:
str: conditional formatted message.
Raises:
RuntimeError: when an invalid format string piece is encountered.
"""
string_pieces = []
for map_index, attribute_name in enumerate(format_string_pieces_map):
if not attribute_name or event_values.get(
attribute_name, None) is not None:
string_pieces.append(format_string_pieces[map_index])
format_string = self._format_string_separator.join(string_pieces)
return self._FormatMessage(format_string, event_values)
def GetFormatStringAttributeNames(self):
"""Retrieves the attribute names in the format string.
Returns:
set(str): attribute names.
"""
if self._format_string_attribute_names is None:
self._format_string_attribute_names = []
for format_string_piece in self._format_string_pieces:
attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(
format_string_piece)
if attribute_names:
self._format_string_attribute_names.extend(attribute_names)
return set(self._format_string_attribute_names)
def GetMessage(self, event_values):
"""Determines the message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: message.
"""
if not self._format_string_pieces_map:
self._CreateFormatStringMaps()
return self._ConditionalFormatMessage(
self._format_string_pieces, self._format_string_pieces_map,
event_values)
def GetMessageShort(self, event_values):
"""Determines the short message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: short message.
"""
if not self._format_string_pieces_map:
self._CreateFormatStringMaps()
if (self._format_string_short_pieces and
self._format_string_short_pieces != ['']):
format_string_pieces = self._format_string_short_pieces
format_string_pieces_map = self._format_string_short_pieces_map
else:
format_string_pieces = self._format_string_pieces
format_string_pieces_map = self._format_string_pieces_map
short_message_string = self._ConditionalFormatMessage(
format_string_pieces, format_string_pieces_map, event_values)
# Truncate the short message string if necessary.
if len(short_message_string) > 80:
short_message_string = '{0:s}...'.format(short_message_string[:77])
return short_message_string
| 33.112628
| 79
| 0.704958
| 2,393
| 19,404
| 5.479315
| 0.103218
| 0.113484
| 0.054912
| 0.032032
| 0.655201
| 0.598078
| 0.551175
| 0.533405
| 0.507474
| 0.47018
| 0
| 0.002283
| 0.209751
| 19,404
| 585
| 80
| 33.169231
| 0.852811
| 0.434034
| 0
| 0.399083
| 0
| 0.009174
| 0.052444
| 0.004065
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12844
| false
| 0
| 0.013761
| 0
| 0.243119
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dc534af6da39531ee8b4ae7b4baf8841a23115e
| 1,608
|
py
|
Python
|
Annotated_video/test/Annotatedvideo_worm.py
|
Rukaume/LRCN
|
0d1928cc72544f59a4335fea7febc561d3dfc118
|
[
"MIT"
] | 1
|
2020-11-07T05:57:32.000Z
|
2020-11-07T05:57:32.000Z
|
Annotated_video/test/Annotatedvideo_worm.py
|
Rukaume/LRCN
|
0d1928cc72544f59a4335fea7febc561d3dfc118
|
[
"MIT"
] | 1
|
2020-11-07T00:30:22.000Z
|
2021-01-26T02:22:16.000Z
|
Annotated_video/test/Annotatedvideo_worm.py
|
Rukaume/LRCN
|
0d1928cc72544f59a4335fea7febc561d3dfc118
|
[
"MIT"
] | 1
|
2020-11-07T05:57:52.000Z
|
2020-11-07T05:57:52.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 4 22:27:11 2020
@author: Miyazaki
"""
imdir = "C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/chamber3"
resultdir= "C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/result0918.csv"
import os, cv2, shutil
from tqdm import tqdm
import pandas as pd
os.chdir(imdir)
os.makedirs("../annotatedimages", exist_ok = True)
imlist = os.listdir("./")
imlist = [i for i in imlist if os.path.splitext(i)[1] == '.jpg' \
or os.path.splitext(i)[1] == '.png']
imlist.sort()
result = pd.read_csv(resultdir)
font = cv2.FONT_HERSHEY_SIMPLEX
for i in tqdm(range(len(imlist))):
if int(result.loc[i]) == 0:
tempim = cv2.imread(imlist[i])
tempim = cv2.putText(tempim,'quiescent',(10,500), font, 1,(255,0,0),2,cv2.LINE_AA)
cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim)
elif int(result.loc[i]) == 1:
tempim = cv2.imread(imlist[i])
tempim = cv2.putText(tempim,'dwell',(10,500), font, 1,(0,255,0),2,cv2.LINE_AA)
cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim)
elif int(result.loc[i]) == 2:
tempim = cv2.imread(imlist[i])
tempim = cv2.putText(tempim,'forward',(10,500), font, 1,(0,0,255),2,cv2.LINE_AA)
cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim)
elif int(result.loc[i]) == 3:
tempim = cv2.imread(imlist[i])
tempim = cv2.putText(tempim,'backward',(10,500), font, 1,(100,100,0),2,cv2.LINE_AA)
cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim)
else:
pass
| 36.545455
| 119
| 0.670398
| 246
| 1,608
| 4.308943
| 0.361789
| 0.059434
| 0.098113
| 0.049057
| 0.618868
| 0.567925
| 0.567925
| 0.567925
| 0.567925
| 0.401887
| 0
| 0.07808
| 0.131841
| 1,608
| 43
| 120
| 37.395349
| 0.681232
| 0.047886
| 0
| 0.25
| 0
| 0
| 0.22784
| 0.190414
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.03125
| 0.09375
| 0
| 0.09375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dc7dadf50bfb05ab92b9d5e96fde0df19295e15
| 4,996
|
py
|
Python
|
services/IAm.py
|
matteobjornsson/serverless-rock-paper-scissors
|
32b6f11644c59dc3bb159ee9e1118fed26a3983d
|
[
"MIT"
] | null | null | null |
services/IAm.py
|
matteobjornsson/serverless-rock-paper-scissors
|
32b6f11644c59dc3bb159ee9e1118fed26a3983d
|
[
"MIT"
] | null | null | null |
services/IAm.py
|
matteobjornsson/serverless-rock-paper-scissors
|
32b6f11644c59dc3bb159ee9e1118fed26a3983d
|
[
"MIT"
] | 1
|
2021-04-20T23:55:37.000Z
|
2021-04-20T23:55:37.000Z
|
#
# Created on Thu Apr 22 2021
# Matteo Bjornsson
#
import boto3
from botocore.exceptions import ClientError
import logging
logging.basicConfig(filename="rps.log", level=logging.INFO)
iam_resource = boto3.resource("iam")
sts_client = boto3.client("sts")
def create_role(
iam_role_name: str, assume_role_policy_json: str, policy_arns: list
) -> iam_resource.Role:
"""
Create an IAM role with a given policy.
:param assume_role_policy_json: A json string that represents the assume
role policy defining what resources are allowed to assume the role.
:param policy_arns: a list of strings representing existing policy arns to
also attach to the role
:return: IAM role object
This method was adapted from the create_iam_role_for_lambda() method found here:
https://docs.aws.amazon.com/code-samples/latest/catalog/python-lambda-boto_client_examples-lambda_basics.py.html
"""
try:
role = iam_resource.create_role(
RoleName=iam_role_name,
AssumeRolePolicyDocument=assume_role_policy_json,
)
# wait for the creation to complete
iam_resource.meta.client.get_waiter("role_exists").wait(RoleName=iam_role_name)
# attach the additional supplied policies
for arn in policy_arns:
role.attach_policy(PolicyArn=arn)
except ClientError as error:
if error.response["Error"]["Code"] == "EntityAlreadyExists":
role = iam_resource.Role(iam_role_name)
logging.warning("The role %s already exists. Using it.", iam_role_name)
return role
else:
logging.error(error.response["Error"]["Message"])
logging.exception(
"Couldn't create role %s or attach policy %s.",
iam_role_name,
str(policy_arns),
)
raise
else:
logging.info("Created IAM role %s.", role.name)
logging.info("Attached policies %s to role %s.", policy_arns, role.name)
return role
def create_policy(policy_name: str, policy_json: str) -> iam_resource.Policy:
"""
Create an IAM policy of given name and json description.
Policies define permissions in AWS and can be associated with IAM roles.
:param policy_json: just be a valid policy json string
:return: IAM Policy object
"""
try:
policy = iam_resource.create_policy(
PolicyName=policy_name, PolicyDocument=policy_json
)
except ClientError as error:
if error.response["Error"]["Code"] == "EntityAlreadyExists":
policy = get_policy_by_name(policy_name)
logging.warning("The policy %s already exists. Using it.", policy.arn)
return policy
else:
logging.error(error.response["Error"]["Message"])
logging.exception("Couldn't create policy %s", policy_name)
raise
else:
logging.info("Created Policy '%s'", policy_name)
return policy
def get_policy_by_name(policy_name: str) -> iam_resource.Policy:
"""
Get an existing policy by name.
:return: IAM Policy object
"""
# sts provides the account number of the current credentials
account_id = sts_client.get_caller_identity()["Account"]
# policy arns consist of an account id and policy name
policy_arn = f"arn:aws:iam::{account_id}:policy/{policy_name}"
# policies are created in the Python SDK via their arn
policy = iam_resource.Policy(policy_arn)
return policy
def delete_role(iam_role) -> dict:
"""
Delete a role.
:param iam_role: this parameter is an IAM role object, such as returned
by create_role()
"""
try:
# remove all policies before deleting role
for policy in iam_role.attached_policies.all():
policy.detach_role(RoleName=iam_role.name)
response = iam_role.delete()
except ClientError as error:
logging.error(error.response["Error"]["Message"])
logging.error("Couldn't delete role %s", iam_role.name)
else:
logging.info("Deleted role '%s'", iam_role.name)
return response
def delete_policy(iam_policy) -> dict:
"""
Delete a role.
:param iam_policy: this parameter is an IAM policy object, such as returned
by create_policy()
"""
try:
response = iam_policy.delete()
except ClientError as error:
logging.error(error.response["Error"]["Message"])
logging.error("Couldn't delete policy %s", iam_policy.arn)
else:
logging.info("Deleted policy '%s'", iam_policy.arn)
return response
if __name__ == "__main__":
# brief functionality test with delete() cleanup at end
policy_json_file = "./policy/lambda_policy.json"
with open(policy_json_file) as file:
policy_json = file.read()
policy_name = "test_policy"
policy = create_policy(policy_name, policy_json)
print("new policy arn: ", policy.arn)
policy.delete()
| 34.937063
| 116
| 0.664532
| 650
| 4,996
| 4.938462
| 0.255385
| 0.039252
| 0.030841
| 0.029907
| 0.268536
| 0.190031
| 0.142679
| 0.142679
| 0.142679
| 0.142679
| 0
| 0.002377
| 0.242194
| 4,996
| 142
| 117
| 35.183099
| 0.845483
| 0.273018
| 0
| 0.345238
| 0
| 0
| 0.155766
| 0.020941
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059524
| false
| 0
| 0.035714
| 0
| 0.178571
| 0.011905
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dcbed3a8321b9c6b63677f0f51fde0daacfda04
| 21,917
|
py
|
Python
|
audiomate/annotations/label_list.py
|
CostanzoPablo/audiomate
|
080402eadaa81f77f64c8680510a2de64bc18e74
|
[
"MIT"
] | 133
|
2018-05-18T13:54:10.000Z
|
2022-02-15T02:14:20.000Z
|
audiomate/annotations/label_list.py
|
CostanzoPablo/audiomate
|
080402eadaa81f77f64c8680510a2de64bc18e74
|
[
"MIT"
] | 68
|
2018-06-03T16:42:09.000Z
|
2021-01-29T10:58:30.000Z
|
audiomate/annotations/label_list.py
|
CostanzoPablo/audiomate
|
080402eadaa81f77f64c8680510a2de64bc18e74
|
[
"MIT"
] | 37
|
2018-11-02T02:40:29.000Z
|
2021-11-30T07:44:50.000Z
|
import collections
import copy
import intervaltree
from .label import Label
class LabelList:
"""
Represents a list of labels which describe an utterance.
An utterance can have multiple label-lists.
Args:
idx (str): An unique identifier for the label-list
within a corpus for one utterance.
labels (list): The list containing the
:py:class:`audiomate.annotations.Label`.
Attributes:
utterance (Utterance): The utterance this label-list is belonging to.
label_tree (IntervalTree): The interval-tree storing the labels.
Example:
>>> label_list = LabelList(idx='transcription', labels=[
>>> Label('this', 0, 2),
>>> Label('is', 2, 4),
>>> Label('timmy', 4, 8)
>>> ])
"""
__slots__ = ['idx', 'label_tree', 'utterance']
def __init__(self, idx='default', labels=None):
self.idx = idx
self.utterance = None
self.label_tree = intervaltree.IntervalTree()
if labels is not None:
self.update(labels)
def __eq__(self, other):
data_this = (self.idx, self.label_tree)
data_other = (other.idx, other.label_tree)
return data_this == data_other
def __iter__(self):
for interval in self.label_tree:
yield interval.data
def __len__(self):
return self.label_tree.__len__()
def __copy__(self):
# utterance is ignored intentionally,
# since it is kind of a weak ref
return LabelList(
idx=self.idx,
labels=[iv.data for iv in self.label_tree]
)
def __deepcopy__(self, memo):
# utterance is ignored intentionally,
# since it is kind of a weak ref
return LabelList(
idx=self.idx,
labels=copy.deepcopy([iv.data for iv in self.label_tree], memo)
)
@property
def labels(self):
""" Return list of labels. """
return list(self)
@property
def start(self):
""" Return start of the earliest starting label (lower bound). """
return self.label_tree.begin()
@property
def end(self):
""" Return end of the lastly ending label (upper bound). """
return self.label_tree.end()
@property
def total_length(self):
"""
Return the cumulative length of all labels
(Number of characters).
"""
return sum(label.length for label in self.labels)
#
# Alteration
#
def add(self, label):
"""
Add a label to the end of the list.
Args:
label (Label): The label to add.
"""
label.label_list = self
self.label_tree.addi(label.start, label.end, label)
def addl(self, value, start=0.0, end=float('inf')):
""" Shortcut for ``add(Label(value, start, end))``. """
self.add(Label(value, start=start, end=end))
def update(self, labels):
"""
Add a list of labels to the end of the list.
Args:
labels (list): Labels to add.
"""
ivs = []
for label in labels:
label.label_list = self
ivs.append(intervaltree.Interval(label.start, label.end, label))
self.label_tree.update(ivs)
def apply(self, fn):
"""
Apply the given function `fn` to every label in this label list.
`fn` is a function of one argument that receives the current label
which can then be edited in place.
Args:
fn (func): Function to apply to every label
Example:
>>> ll = LabelList(labels=[
... Label('a_label', 1.0, 2.0),
... Label('another_label', 2.0, 3.0)
... ])
>>> def shift_labels(label):
... label.start += 1.0
... label.end += 1.0
...
>>> ll.apply(shift_labels)
>>> ll.labels
[Label(a_label, 2.0, 3.0), Label(another_label, 3.0, 4.0)]
"""
for label in self.labels:
fn(label)
def merge_overlaps(self, threshold=0.0):
"""
Merge overlapping labels with the same value.
Two labels are considered overlapping,
if ``l2.start - l1.end < threshold``.
Args:
threshold (float): Maximal distance between two labels
to be considered as overlapping.
(default: 0.0)
Example:
>>> ll = LabelList(labels=[
... Label('a_label', 1.0, 2.0),
... Label('a_label', 1.5, 2.7),
... Label('b_label', 1.0, 2.0),
... ])
>>> ll.merge_overlapping_labels()
>>> ll.labels
[
Label('a_label', 1.0, 2.7),
Label('b_label', 1.0, 2.0),
]
"""
updated_labels = []
all_intervals = self.label_tree.copy()
# recursivly find a group of overlapping labels with the same value
def recursive_overlaps(interval):
range_start = interval.begin - threshold
range_end = interval.end + threshold
direct_overlaps = all_intervals.overlap(range_start, range_end)
all_overlaps = [interval]
all_intervals.discard(interval)
for overlap in direct_overlaps:
if overlap.data.value == interval.data.value:
all_overlaps.extend(recursive_overlaps(overlap))
return all_overlaps
# For every remaining interval
# - Find overlapping intervals recursively
# - Remove them
# - Create a concatenated new label
while not all_intervals.is_empty():
next_interval = list(all_intervals)[0]
overlapping = recursive_overlaps(next_interval)
ov_start = float('inf')
ov_end = 0.0
ov_value = next_interval.data.value
for overlap in overlapping:
ov_start = min(ov_start, overlap.begin)
ov_end = max(ov_end, overlap.end)
all_intervals.discard(overlap)
updated_labels.append(Label(
ov_value,
ov_start,
ov_end
))
# Replace the old labels with the updated ones
self.label_tree.clear()
self.update(updated_labels)
#
# Statistics
#
def label_total_duration(self):
"""
Return for each distinct label value the total duration of
all occurrences.
Returns:
dict: A dictionary containing for every label-value (key)
the total duration in seconds (value).
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3, 5),
>>> Label('b', 5, 8),
>>> Label('a', 8, 10),
>>> Label('b', 10, 14),
>>> Label('a', 15, 18.5)
>>> ])
>>> ll.label_total_duration()
{'a': 7.5 'b': 7.0}
"""
durations = collections.defaultdict(float)
for label in self:
durations[label.value] += label.duration
return durations
def label_values(self):
"""
Return a list of all occuring label values.
Returns:
list: Lexicographically sorted list (str) of label values.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('c', 7.2, 10.5),
>>> Label('d', 10.5, 14),
>>> Label('d', 15, 18)
>>> ])
>>> ll.label_values()
['a', 'b', 'c', 'd']
"""
all_labels = {l.value for l in self}
return sorted(all_labels)
def label_count(self):
"""
Return for each label the number of occurrences within the list.
Returns:
dict: A dictionary containing for every label-value (key)
the number of occurrences (value).
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('a', 7.2, 10.5),
>>> Label('b', 10.5, 14),
>>> Label('a', 15, 18)
>>> ])
>>> ll.label_count()
{'a': 3 'b': 2}
"""
occurrences = collections.defaultdict(int)
for label in self:
occurrences[label.value] += 1
return occurrences
def all_tokens(self, delimiter=' '):
"""
Return a list of all tokens occurring in the label-list.
Args:
delimiter (str): The delimiter used to split labels into tokens.
See :meth:`audiomate.annotations.Label.tokenized`
Returns:
:class:`set`: A set of distinct tokens.
"""
tokens = set()
for label in self:
tokens = tokens.union(set(label.tokenized(delimiter=delimiter)))
return tokens
#
# Query Label Values
#
def join(self, delimiter=' ', overlap_threshold=0.1):
"""
Return a string with all labels concatenated together.
The order of the labels is defined by the start of the label.
If the overlapping between two labels is greater than
``overlap_threshold``, an Exception is thrown.
Args:
delimiter (str): A string to join two consecutive labels.
overlap_threshold (float): Maximum overlap between two
consecutive labels.
Returns:
str: A string with all labels concatenated together.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('c', start=7.0, end=10.2),
>>> Label('d', start=10.3, end=14.0)
>>> ])
>>> ll.join(' - ')
'a - b - c - d'
"""
sorted_by_start = sorted(self.labels)
concat_values = []
last_label_end = None
for label in sorted_by_start:
if last_label_end is None or (last_label_end - label.start < overlap_threshold and last_label_end > 0):
concat_values.append(label.value)
last_label_end = label.end
else:
raise ValueError('Labels overlap, not able to define the correct order')
return delimiter.join(concat_values)
def tokenized(self, delimiter=' ', overlap_threshold=0.1):
"""
Return a ordered list of tokens based on all labels.
Joins all token from all labels (``label.tokenized()```).
If the overlapping between two labels is greater than
``overlap_threshold``, an Exception is thrown.
Args:
delimiter (str): The delimiter used to split labels into tokens.
(default: space)
overlap_threshold (float): Maximum overlap between two
consecutive labels.
Returns:
str: A list containing tokens of all labels ordered according
to the label order.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a d q', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('c a', start=7.0, end=10.2),
>>> Label('f g', start=10.3, end=14.0)
>>> ])
>>> ll.tokenized(delimiter=' ', overlap_threshold=0.1)
['a', 'd', 'q', 'b', 'c', 'a', 'f', 'g']
"""
sorted_by_start = sorted(self.labels)
tokens = []
last_label_end = None
for label in sorted_by_start:
if last_label_end is None or (last_label_end - label.start < overlap_threshold and last_label_end > 0):
tokens.extend(label.tokenized(delimiter=delimiter))
last_label_end = label.end
else:
raise ValueError('Labels overlap, not able to define the correct order')
return tokens
#
# Restructuring
#
def separated(self):
"""
Create a separate Label-List for every distinct label-value.
Returns:
dict: A dictionary with distinct label-values as keys. Every value
is a LabelList containing only labels with the same value.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('a', start=7.0, end=10.2),
>>> Label('b', start=10.3, end=14.0)
>>> ])
>>> s = ll.separate()
>>> s['a'].labels
[Label('a', start=0, end=4), Label('a', start=7.0, end=10.2)]
>>> s['b'].labels
[Label('b', start=3.95, end=6.0), Label('b', start=10.3, end=14.0)]
"""
separated_lls = collections.defaultdict(LabelList)
for label in self.labels:
separated_lls[label.value].add(label)
for ll in separated_lls.values():
ll.idx = self.idx
return separated_lls
def labels_in_range(self, start, end, fully_included=False):
"""
Return a list of labels, that are within the given range.
Also labels that only overlap are included.
Args:
start(float): Start-time in seconds.
end(float): End-time in seconds.
fully_included(bool): If ``True``, only labels fully included
in the range are returned. Otherwise
also overlapping ones are returned.
(default ``False``)
Returns:
list: List of labels in the range.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('c', 7.2, 10.5),
>>> Label('d', 10.5, 14)
>>>])
>>> ll.labels_in_range(6.2, 10.1)
[Label('b', 5.1, 8.9), Label('c', 7.2, 10.5)]
"""
if fully_included:
intervals = self.label_tree.envelop(start, end)
else:
intervals = self.label_tree.overlap(start, end)
return [iv.data for iv in intervals]
def ranges(self, yield_ranges_without_labels=False, include_labels=None):
"""
Generate all ranges of the label-list. A range is defined
as a part of the label-list for which the same labels are defined.
Args:
yield_ranges_without_labels(bool): If True also yields ranges for
which no labels are defined.
include_labels(list): If not empty, only the label values in
the list will be considered.
Returns:
generator: A generator which yields one range
(tuple start/end/list-of-labels) at a time.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('c', 7.2, 10.5),
>>> Label('d', 10.5, 14)
>>>])
>>> ranges = ll.ranges()
>>> next(ranges)
(3.2, 4.5, [ < audiomate.annotations.Label at 0x1090527c8 > ])
>>> next(ranges)
(4.5, 5.1, [])
>>> next(ranges)
(5.1, 7.2, [ < audiomate.annotations.label.Label at 0x1090484c8 > ])
"""
tree_copy = self.label_tree.copy()
# Remove labels not included
if include_labels is not None:
for iv in list(tree_copy):
if iv.data.value not in include_labels:
tree_copy.remove(iv)
def reduce(x, y):
x.append(y)
return x
# Split labels when overlapping and merge equal ranges to a list of labels
tree_copy.split_overlaps()
tree_copy.merge_equals(data_reducer=reduce, data_initializer=[])
intervals = sorted(tree_copy)
last_end = intervals[0].begin
# yield range by range
for iv in intervals:
# yield an empty range if necessary
if yield_ranges_without_labels and iv.begin > last_end:
yield (last_end, iv.begin, [])
yield (iv.begin, iv.end, iv.data)
last_end = iv.end
def split(self, cutting_points, shift_times=False, overlap=0.0):
"""
Split the label-list into x parts and return them as new label-lists.
x is defined by the number of cutting-points
(``x == len(cutting_points) + 1``).
The result is a list of label-lists corresponding to each part.
Label-list 0 contains labels between ``0`` and ``cutting_points[0]``.
Label-list 1 contains labels between ``cutting_points[0]`` and
``cutting_points[1]``. And so on.
Args:
cutting_points(list): List of floats defining the points in seconds,
where the label-list is splitted.
shift_times(bool): If True, start and end-time are shifted in
splitted label-lists. So the start is relative
to the cutting point and not to the beginning
of the original label-list.
overlap(float): Amount of overlap in seconds. This amount is
subtracted from a start-cutting-point, and added
to a end-cutting-point.
Returns:
list: A list of of: class: `audiomate.annotations.LabelList`.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>> Label('c', 11, 15),
>>>])
>>>
>>> res = ll.split([4.1, 8.9, 12.0])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.1)]
>>> res[1].labels
[
Label('a', 4.1, 5.0),
Label('b', 5.0, 8.9)
]
>>> res[2].labels
[
Label('b', 8.9, 10.0),
Label('c', 11.0, 12.0)
]
>>> res[3].labels
[Label('c', 12.0, 15.0)]
If ``shift_times = True``, the times are adjusted to be relative
to the cutting-points for every label-list but the first.
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>>])
>>>
>>> res = ll.split([4.6])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.6)]
>>> res[1].labels
[
Label('a', 0.0, 0.4),
Label('b', 0.4, 5.4)
]
"""
if len(cutting_points) == 0:
raise ValueError('At least one cutting-point is needed!')
# we have to loop in sorted order
cutting_points = sorted(cutting_points)
splits = []
iv_start = 0.0
for i in range(len(cutting_points) + 1):
if i < len(cutting_points):
iv_end = cutting_points[i]
else:
iv_end = float('inf')
# get all intervals intersecting range
intervals = self.label_tree.overlap(
iv_start - overlap,
iv_end + overlap
)
cp_splits = LabelList(idx=self.idx)
# Extract labels from intervals with updated times
for iv in intervals:
label = copy.deepcopy(iv.data)
label.start = max(0, iv_start - overlap, label.start)
label.end = min(iv_end + overlap, label.end)
if shift_times:
orig_start = max(0, iv_start - overlap)
label.start -= orig_start
label.end -= orig_start
cp_splits.add(label)
splits.append(cp_splits)
iv_start = iv_end
return splits
#
# Convenience Constructors
#
@classmethod
def create_single(cls, value, idx='default'):
"""
Create a label-list with a single label
containing the given value.
"""
return LabelList(idx=idx, labels=[
Label(value=value)
])
@classmethod
def with_label_values(cls, values, idx='default'):
"""
Create a new label-list containing labels with the given values.
All labels will have default start/end values of 0 and ``inf``.
Args:
values(list): List of values(str) that should be created and
appended to the label-list.
idx(str): The idx of the label-list.
Returns:
(LabelList): New label-list.
Example:
>>> ll = LabelList.with_label_values(['a', 'x', 'z'], idx='letters')
>>> ll.idx
'letters'
>>> ll.labels
[
Label('a', 0, inf),
Label('x', 0, inf),
Label('z', 0, inf),
]
"""
ll = LabelList(idx=idx)
for label_value in values:
ll.add(Label(label_value))
return ll
| 31.580692
| 115
| 0.505087
| 2,551
| 21,917
| 4.246962
| 0.128969
| 0.028429
| 0.022152
| 0.018276
| 0.272568
| 0.236847
| 0.221709
| 0.204264
| 0.169097
| 0.163744
| 0
| 0.027696
| 0.380572
| 21,917
| 693
| 116
| 31.626263
| 0.77033
| 0.50673
| 0
| 0.187817
| 0
| 0
| 0.02376
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147208
| false
| 0
| 0.020305
| 0.015228
| 0.284264
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dcd80b6fc81b5df240724a020510872ace9a270
| 791
|
py
|
Python
|
examples/single_run/ocaes_single_run.py
|
EnergyModels/OCAES
|
d848d9fa621767e036824110de87450d524b7687
|
[
"MIT"
] | null | null | null |
examples/single_run/ocaes_single_run.py
|
EnergyModels/OCAES
|
d848d9fa621767e036824110de87450d524b7687
|
[
"MIT"
] | null | null | null |
examples/single_run/ocaes_single_run.py
|
EnergyModels/OCAES
|
d848d9fa621767e036824110de87450d524b7687
|
[
"MIT"
] | null | null | null |
import pandas as pd
from OCAES import ocaes
# ----------------------
# create and run model
# ----------------------
data = pd.read_csv('timeseries_inputs_2019.csv')
inputs = ocaes.get_default_inputs()
# inputs['C_well'] = 5000.0
# inputs['X_well'] = 50.0
# inputs['L_well'] = 50.0
# inputs['X_cmp'] = 0
# inputs['X_exp'] = 0
model = ocaes(data, inputs)
df, s = model.get_full_results()
revenue, LCOE, COVE, avoided_emissions = model.post_process(s)
s['revenue'] = revenue
s['LCOE'] = LCOE
s['COVE'] = COVE
s['avoided_emissions'] = avoided_emissions
df.to_csv('results_timeseries.csv')
s.to_csv('results_values.csv')
print(model.calculate_LCOE(s))
# ----------------------
# create plots using built-in functions
# ----------------------
model.plot_overview()
model.plot_power_energy()
| 23.969697
| 62
| 0.637168
| 111
| 791
| 4.324324
| 0.459459
| 0.058333
| 0.05
| 0.054167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024217
| 0.112516
| 791
| 32
| 63
| 24.71875
| 0.659544
| 0.333755
| 0
| 0
| 0
| 0
| 0.190291
| 0.093204
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dcda9f4b87b5d8b72500b6efa77e38a5d14806f
| 1,438
|
py
|
Python
|
tests/transformations/local_storage_test.py
|
am-ivanov/dace
|
c35f0b3cecc04a2c9fb668bd42a72045891e7a42
|
[
"BSD-3-Clause"
] | 1
|
2021-09-13T06:36:18.000Z
|
2021-09-13T06:36:18.000Z
|
tests/transformations/local_storage_test.py
|
1C4nfaN/dace
|
4d65e0951c112160fe783766404a806b6043b521
|
[
"BSD-3-Clause"
] | null | null | null |
tests/transformations/local_storage_test.py
|
1C4nfaN/dace
|
4d65e0951c112160fe783766404a806b6043b521
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import dace
import numpy as np
from dace.transformation.dataflow import MapTiling, OutLocalStorage
N = dace.symbol('N')
@dace.program
def arange():
out = np.ndarray([N], np.int32)
for i in dace.map[0:N]:
with dace.tasklet:
o >> out[i]
o = i
return out
class LocalStorageTests(unittest.TestCase):
def test_even(self):
sdfg = arange.to_sdfg()
sdfg.apply_transformations([MapTiling, OutLocalStorage],
options=[{
'tile_sizes': [8]
}, {}])
self.assertTrue(
np.array_equal(sdfg(N=16), np.arange(16, dtype=np.int32)))
def test_uneven(self):
# For testing uneven decomposition, use longer buffer and ensure
# it's not filled over
output = np.ones(20, np.int32)
sdfg = arange.to_sdfg()
sdfg.apply_transformations([MapTiling, OutLocalStorage],
options=[{
'tile_sizes': [5]
}, {}])
dace.propagate_memlets_sdfg(sdfg)
sdfg(N=16, __return=output)
self.assertTrue(
np.array_equal(output[:16], np.arange(16, dtype=np.int32)))
self.assertTrue(np.array_equal(output[16:], np.ones(4, np.int32)))
if __name__ == '__main__':
unittest.main()
| 30.595745
| 74
| 0.535466
| 158
| 1,438
| 4.727848
| 0.443038
| 0.046854
| 0.064257
| 0.084337
| 0.404284
| 0.369478
| 0.369478
| 0.310576
| 0.21419
| 0.21419
| 0
| 0.029947
| 0.349791
| 1,438
| 46
| 75
| 31.26087
| 0.768984
| 0.057719
| 0
| 0.277778
| 0
| 0
| 0.02145
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.083333
| false
| 0
| 0.111111
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|