hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
370bb4fe068e3ba3982bd58f8f7bad72c2b4ecd4 | 257 | py | Python | demos/floc_sample/createSiteList.py | apakrash/floc_simulator | 6276d9c94e953fb704ca687e7234658a699a434c | [
"MIT"
] | null | null | null | demos/floc_sample/createSiteList.py | apakrash/floc_simulator | 6276d9c94e953fb704ca687e7234658a699a434c | [
"MIT"
] | null | null | null | demos/floc_sample/createSiteList.py | apakrash/floc_simulator | 6276d9c94e953fb704ca687e7234658a699a434c | [
"MIT"
] | null | null | null | import json
siteList = []
for i in range(1,20):
siteList.append(input(f'Enter site {i}: '))
print(siteList)
#print(siteList)
with open('host_list.json', 'w', encoding='utf-8') as f:
json.dump(siteList, f, ensure_ascii=False, indent=4) | 21.416667 | 57 | 0.645914 | 40 | 257 | 4.1 | 0.75 | 0.158537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02381 | 0.182879 | 257 | 12 | 58 | 21.416667 | 0.757143 | 0.058366 | 0 | 0 | 0 | 0 | 0.155844 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3711dfaf9aadca685cb6c92e813e94fd2708d1c2 | 4,354 | py | Python | tests/core/contracts/test_contract_constructor_encoding.py | geofferyj/web3.py | b406663f59d5b228487951f019eaf45b3eb9413b | [
"MIT"
] | 3,041 | 2017-11-22T16:23:46.000Z | 2022-03-31T15:19:39.000Z | tests/core/contracts/test_contract_constructor_encoding.py | geofferyj/web3.py | b406663f59d5b228487951f019eaf45b3eb9413b | [
"MIT"
] | 1,506 | 2017-11-22T15:44:34.000Z | 2022-03-31T18:40:05.000Z | tests/core/contracts/test_contract_constructor_encoding.py | geofferyj/web3.py | b406663f59d5b228487951f019eaf45b3eb9413b | [
"MIT"
] | 1,095 | 2017-11-22T18:20:22.000Z | 2022-03-31T13:05:31.000Z | import pytest
from eth_utils import (
encode_hex,
remove_0x_prefix,
)
def test_contract_constructor_abi_encoding_with_no_constructor_fn(MathContract, MATH_CODE):
deploy_data = MathContract._encode_constructor_data()
assert deploy_data == MATH_CODE
def test_contract_constructor_abi_encoding_with_constructor_with_no_args(SimpleConstructorContract,
SIMPLE_CONSTRUCTOR_CODE):
deploy_data = SimpleConstructorContract._encode_constructor_data()
assert deploy_data == SIMPLE_CONSTRUCTOR_CODE
@pytest.mark.parametrize(
'args,kwargs',
(
(None, 'kwarg-is-ignored'),
('arg-is-ignored', None),
),
)
def test_contract_error_if_additional_args_are_supplied_with_no_constructor_fn(MathContract,
args, kwargs):
with pytest.raises(TypeError, match="Constructor args"):
MathContract._encode_constructor_data(args, kwargs)
@pytest.mark.parametrize(
'arguments',
(
[],
[1234],
['abcd', 1234], # wrong order
[1234, 'abcd', 'extra-arg'], # extra arguments
[-1234, 'abcd'], # wrong types
['1234', 'abcd'], # wrong types
),
)
def test_error_if_invalid_arguments_supplied(WithConstructorArgumentsContract, arguments):
with pytest.raises(TypeError):
WithConstructorArgumentsContract._encode_constructor_data(arguments)
@pytest.mark.parametrize(
'bytes_arg',
(
b'abcd',
'0x61626364',
),
)
def test_contract_constructor_encoding_encoding(web3, WithConstructorArgumentsContract, bytes_arg):
deploy_data = WithConstructorArgumentsContract._encode_constructor_data([1234, bytes_arg])
encoded_args = '0x00000000000000000000000000000000000000000000000000000000000004d26162636400000000000000000000000000000000000000000000000000000000' # noqa: E501
expected_ending = encode_hex(web3.codec.encode_abi(['uint256', 'bytes32'], [1234, b'abcd']))
assert expected_ending == encoded_args
assert deploy_data.endswith(remove_0x_prefix(expected_ending))
def test_contract_constructor_encoding_encoding_warning(web3, WithConstructorArgumentsContract):
with pytest.warns(
DeprecationWarning,
match='in v6 it will be invalid to pass a hex string without the \"0x\" prefix'
):
deploy_data = WithConstructorArgumentsContract._encode_constructor_data([1234, '61626364'])
encoded_args = '0x00000000000000000000000000000000000000000000000000000000000004d26162636400000000000000000000000000000000000000000000000000000000' # noqa: E501
expected_ending = encode_hex(
web3.codec.encode_abi(['uint256', 'bytes32'], [1234, b'abcd'])
)
assert expected_ending == encoded_args
assert deploy_data.endswith(remove_0x_prefix(expected_ending))
@pytest.mark.parametrize(
'bytes_arg,encoded_args',
(
('0x' + '00' * 32, '0x00000000000000000000000000000000000000000000000000000000000004d20000000000000000000000000000000000000000000000000000000000000000'), # noqa: E501
(b'1' * 32, '0x00000000000000000000000000000000000000000000000000000000000004d23131313131313131313131313131313131313131313131313131313131313131'), # noqa: E501
),
)
def test_contract_constructor_encoding_encoding_strict(
w3_strict_abi,
WithConstructorArgumentsContractStrict,
encoded_args,
bytes_arg):
deploy_data = WithConstructorArgumentsContractStrict._encode_constructor_data([1234, bytes_arg])
expected_ending = encode_hex(
w3_strict_abi.codec.encode_abi(['uint256', 'bytes32'], [1234, bytes_arg])
)
assert expected_ending == encoded_args
assert deploy_data.endswith(remove_0x_prefix(expected_ending))
@pytest.mark.parametrize(
'bytes_arg',
(
b'abcd',
'0x61626364',
'',
'61626364',
),
)
def test_contract_constructor_encoding_encoding_strict_errors(
w3_strict_abi,
WithConstructorArgumentsContractStrict,
bytes_arg):
with pytest.raises(
TypeError,
match="One or more arguments could not be encoded to the necessary ABI type."
):
WithConstructorArgumentsContractStrict._encode_constructor_data([1234, bytes_arg])
| 36.588235 | 175 | 0.714515 | 393 | 4,354 | 7.557252 | 0.249364 | 0.03367 | 0.056566 | 0.052525 | 0.525926 | 0.486195 | 0.419529 | 0.266667 | 0.246801 | 0.246801 | 0 | 0.184536 | 0.200965 | 4,354 | 118 | 176 | 36.898305 | 0.669158 | 0.021819 | 0 | 0.346939 | 0 | 0 | 0.210353 | 0.127529 | 0 | 0 | 0.127059 | 0 | 0.081633 | 1 | 0.081633 | false | 0.010204 | 0.020408 | 0 | 0.102041 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
371439527f71f5b5ea0e623382624347691d2251 | 832 | py | Python | Pasture_Growth_Modelling/initialisation_support/define_reseed_params.py | Komanawa-Solutions-Ltd/SLMACC-2020-CSRA | 914b6912c5f5b522107aa9406fb3d823e61c2ebe | [
"Apache-2.0"
] | null | null | null | Pasture_Growth_Modelling/initialisation_support/define_reseed_params.py | Komanawa-Solutions-Ltd/SLMACC-2020-CSRA | 914b6912c5f5b522107aa9406fb3d823e61c2ebe | [
"Apache-2.0"
] | null | null | null | Pasture_Growth_Modelling/initialisation_support/define_reseed_params.py | Komanawa-Solutions-Ltd/SLMACC-2020-CSRA | 914b6912c5f5b522107aa9406fb3d823e61c2ebe | [
"Apache-2.0"
] | null | null | null | """
Author: Matt Hanson
Created: 18/12/2020 9:30 AM
"""
from Pasture_Growth_Modelling.initialisation_support.inital_long_term_runs import run_past_basgra_irrigated, \
run_past_basgra_dryland, plot_multiple_results
import pandas as pd
import numpy as np
if __name__ == '__main__':
reseed=True
irr_ox = run_past_basgra_irrigated(site='oxford',reseed=reseed)
eyrewell = run_past_basgra_irrigated(reseed=reseed)
dry_ox = run_past_basgra_dryland(site='oxford', reseed=reseed)
plot_multiple_results({'irr_ox':irr_ox, 'irr_eyre':eyrewell, 'dry_ox':dry_ox},
out_vars=['BASAL'], rolling=30, main_kwargs={'alpha': 0.2}, label_main=False,
label_rolling=True, show=True)
t = dry_ox
t.loc[t.doy == 152].BASAL.describe(percentiles=np.arange(5, 55, 5) / 100) | 41.6 | 110 | 0.706731 | 121 | 832 | 4.487603 | 0.553719 | 0.064457 | 0.119705 | 0.121547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036496 | 0.176683 | 832 | 20 | 111 | 41.6 | 0.756204 | 0.05649 | 0 | 0 | 0 | 0 | 0.064516 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.214286 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3714939ff710373dc09f1b61b849805c96854964 | 5,570 | py | Python | system_maintenance/admin.py | mfcovington/django-system-maintenance | f28c5480ddfc862e7114846ae452ec73ed747029 | [
"BSD-3-Clause"
] | 2 | 2018-12-08T01:11:17.000Z | 2018-12-14T22:54:04.000Z | system_maintenance/admin.py | mfcovington/django-system-maintenance | f28c5480ddfc862e7114846ae452ec73ed747029 | [
"BSD-3-Clause"
] | null | null | null | system_maintenance/admin.py | mfcovington/django-system-maintenance | f28c5480ddfc862e7114846ae452ec73ed747029 | [
"BSD-3-Clause"
] | 1 | 2020-11-21T18:25:50.000Z | 2020-11-21T18:25:50.000Z | from django import forms
from django.contrib import admin
from .models import (DocumentationRecord, Hardware, MaintenanceRecord,
MaintenanceRecordRelationship, MaintenanceType, Software, SysAdmin, System)
class ReferencingRecordInline(admin.TabularInline):
model = MaintenanceRecordRelationship
fk_name = 'referencing_record'
class ReferencedRecordInline(admin.TabularInline):
model = MaintenanceRecordRelationship
fk_name = 'referenced_record'
@admin.register(Hardware)
class HardwareAdmin(admin.ModelAdmin):
search_fields = ['name']
class DocumentationRecordAdminForm(forms.ModelForm):
maintenance_records = forms.ModelMultipleChoiceField(
MaintenanceRecord.objects.all(),
widget=admin.widgets.FilteredSelectMultiple(
'MaintenanceRecord', False),
required=False,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.instance.pk:
self.initial['maintenance_records'] = \
self.instance.maintenance_records.values_list('pk', flat=True)
def save(self, *args, **kwargs):
instance = super().save(*args, **kwargs)
if instance.pk:
instance.maintenance_records.clear()
instance.maintenance_records.add(
*self.cleaned_data['maintenance_records'])
return instance
@admin.register(DocumentationRecord)
class DocumentationRecordAdmin(admin.ModelAdmin):
form = DocumentationRecordAdminForm
fieldset_basic = ('Basic', {
'fields': [
'title',
'maintenance_type'
],
})
fieldset_description = ('Documentation', {
'fields': [
'documentation',
'documentation_markup_type',
],
})
fieldset_maintenance_records = ('Related Maintenance Records', {
'fields': [
'maintenance_records',
],
})
fieldset_timestamps = ('Timestamps', {
'classes': ['collapse'],
'fields': [
'created_at',
'updated_at',
],
})
fieldsets = [
fieldset_basic,
fieldset_description,
fieldset_maintenance_records,
fieldset_timestamps,
]
list_display = [
'title',
'maintenance_type',
'created_at',
'updated_at',
]
list_filter = [
'maintenance_type',
]
readonly_fields = [
'created_at',
'updated_at',
]
save_on_top = True
search_fields = [
'title',
'documentation',
]
@admin.register(MaintenanceRecordRelationship)
class MaintenanceRecordRelationshipAdmin(admin.ModelAdmin):
list_display = [
'__str__',
'referencing_record',
'referenced_record',
]
search_fields = [
'referencing_record__description',
'referencing_record__procedure',
'referencing_record__problems',
'referencing_record__system__name',
'referencing_record__hardware__name',
'referencing_record__software__name',
'referencing_record__maintenance_type__maintenance_type',
'referenced_record__description',
'referenced_record__procedure',
'referenced_record__problems',
'referenced_record__system__name',
'referenced_record__hardware__name',
'referenced_record__software__name',
'referenced_record__maintenance_type__maintenance_type',
]
@admin.register(MaintenanceRecord)
class MaintenanceRecordAdmin(admin.ModelAdmin):
fieldset_basic = ('Basic', {
'fields': [
'system',
'sys_admin',
'maintenance_type',
'hardware',
'software',
'datetime',
'status',
],
})
fieldset_description = ('Description', {
'fields': [
'description',
'description_markup_type',
],
})
fieldset_procedure = ('Procedure', {
'fields': [
'procedure',
'procedure_markup_type',
],
})
fieldset_problems = ('Problems', {
'fields': [
'problems',
'problems_markup_type',
],
})
fieldset_documentation = ('Documentation Records', {
'fields': [
'documentation_records',
],
})
fieldsets = [
fieldset_basic,
fieldset_description,
fieldset_procedure,
fieldset_problems,
fieldset_documentation,
]
filter_horizontal = [
'hardware',
'software',
'documentation_records',
]
inlines = [
ReferencingRecordInline,
ReferencedRecordInline,
]
list_display = [
'__str__',
'system',
'datetime',
'maintenance_type',
'sys_admin',
'status',
]
list_filter = [
'status',
'system',
'maintenance_type',
'hardware',
'software',
'sys_admin',
]
save_on_top = True
search_fields = [
'description',
'procedure',
'problems',
]
@admin.register(MaintenanceType)
class MaintenanceTypeAdmin(admin.ModelAdmin):
search_fields = [
'maintenance_type',
'description',
]
@admin.register(Software)
class SoftwareAdmin(admin.ModelAdmin):
search_fields = ['name']
@admin.register(System)
class SystemAdmin(admin.ModelAdmin):
search_fields = [
'name',
'description',
]
admin.site.register(SysAdmin)
| 22.191235 | 79 | 0.597846 | 410 | 5,570 | 7.739024 | 0.239024 | 0.052001 | 0.026473 | 0.034037 | 0.150331 | 0.083202 | 0 | 0 | 0 | 0 | 0 | 0 | 0.29623 | 5,570 | 250 | 80 | 22.28 | 0.809439 | 0 | 0 | 0.494898 | 0 | 0 | 0.247038 | 0.105566 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010204 | false | 0 | 0.015306 | 0 | 0.255102 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37192bf521db3c020565d1e29a16401738175408 | 1,190 | py | Python | bozkurt-port-scanner V1.0.py | B3zkurt/bozkurt-port-scanner | 5a1f485ab042abc71d5307e433d845c7adbcf168 | [
"BSL-1.0"
] | null | null | null | bozkurt-port-scanner V1.0.py | B3zkurt/bozkurt-port-scanner | 5a1f485ab042abc71d5307e433d845c7adbcf168 | [
"BSL-1.0"
] | null | null | null | bozkurt-port-scanner V1.0.py | B3zkurt/bozkurt-port-scanner | 5a1f485ab042abc71d5307e433d845c7adbcf168 | [
"BSL-1.0"
] | 1 | 2022-02-11T18:24:13.000Z | 2022-02-11T18:24:13.000Z | from socket import *
def banner():
print("""
###########################################################
#------------------PORT SCAN V 1.0-----------------------#
#--------------------Coder By Bozkurt-------------------#
###########################################################
""")
def scan():
if __name__ == '__main__':
hedef=input("Hedef: ")
hedef_ip=gethostbyname(hedef)
print("taranıyor:",hedef_ip)
for i in range(6555):#Mevcut 6555 ayarında tüm portları teker teker tarayacaktır.
#Değiştirmek için taranmasını istediğiniz port aralığını yazınız örneğin 50.porttan 60.portun arasındaki
#Portları tarayacaksanız for i in range(50,60) şeklinde düzenleyebilirsiniz.
baglantı=socket(AF_INET,SOCK_STREAM)
port_tarama=baglantı.connect_ex((hedef_ip,i))
if(port_tarama==0):
print("Açık port: %d " %(i))
baglantı.close()
input("tarama sonlandırıldı.")
banner()
scan()
| 36.060606 | 107 | 0.428571 | 96 | 1,190 | 5.145833 | 0.625 | 0.04251 | 0.024292 | 0.044534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02381 | 0.329412 | 1,190 | 32 | 108 | 37.1875 | 0.595238 | 0.20084 | 0 | 0.090909 | 0 | 0 | 0.328603 | 0.241266 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.045455 | 0 | 0.136364 | 0.136364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
371a9f6e54991beac7038e5bc052194b2305b7a7 | 5,413 | py | Python | ner/evaluation/evaluator.py | BonnierNews/lukas-ner-model | 1f7f688f9b0f1e7b7cb66c42f188358d27a0be09 | [
"MIT"
] | null | null | null | ner/evaluation/evaluator.py | BonnierNews/lukas-ner-model | 1f7f688f9b0f1e7b7cb66c42f188358d27a0be09 | [
"MIT"
] | null | null | null | ner/evaluation/evaluator.py | BonnierNews/lukas-ner-model | 1f7f688f9b0f1e7b7cb66c42f188358d27a0be09 | [
"MIT"
] | null | null | null | from string import punctuation
import random
import jsonlines
import regex
class Evaluator:
"""Class for evaluating NER models on two IOB tagged datasets: SUC 3.0 and Web News 2012, both from Språkbanken."""
def __init__(self, suc):
self.suc = suc
if suc:
self.path = "data/input/suc_3.0_iob.txt"
self.no_chunk = "O"
self.desired = ["B-PRS", "I-PRS", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
else:
self.path = "data/input/ner_corpus.txt"
self.no_chunk = "0"
self.desired = ["PER", "ORG", "LOC"]
def _format_sentences(self, corpus):
"""Returns a list of only sentences."""
corpus = corpus.split("\n\n")
sentences = []
for sentence in corpus:
# sentence = regex.sub("\t.*\n(?=\p{P})", "", sentence)
sentence = regex.sub("\t.*?\n", " ", sentence)
sentence = regex.split("\t.*", sentence)[0]
# sentence = regex.sub('" (?=.*")', ' "', sentence)
sentences += [sentence]
return sentences[:-1]
def _format_tags(self, entities):
"""Returns a list of only tags"""
new_l = [-1]
new_l += [i for i, val in enumerate(entities) if val == "\n"]
grouped = []
for i in range(1, len(new_l)):
sen_ent = [
entities[j].split("\t") for j in range(new_l[i - 1] + 1, new_l[i])
]
ent_dict = [{"word": e[0], "entity": e[1].strip()} for e in sen_ent]
grouped += [ent_dict]
return grouped
def load_corpus(self):
"""Loads the NER-tagged corpus and returns the sentences and tags."""
with open(self.path) as f:
lines = f.readlines()
f.seek(0)
corpus = f.read()
entities = [line for line in lines if not line.endswith(f"\t{self.no_chunk}\n")]
sentences = self._format_sentences(corpus)
tags = self._format_tags(entities)
return sentences, tags
def _filter_tags(self, tags):
"""Filters out unwanted IOB tags."""
filtered = []
for ts in tags:
filt = [t for t in ts if t["entity"] in self.desired]
filtered += [filt] if filt else [[]]
return filtered
def prepare_for_evaluation(self, sentences, tags, sample_size):
"""Prepare dataset for evaluation."""
if sample_size < 1.0:
no_sentences = len(sentences)
eval_size = int(no_sentences * sample_size)
random.seed(1234567890)
eval_inds = random.sample(range(0, no_sentences - 1), eval_size)
sentences = [sentences[i] for i in eval_inds]
tags = [tags[i] for i in eval_inds]
filtered = self._filter_tags(tags)
return sentences, filtered
@staticmethod
def get_results(path):
"""Returns previouslt saved evaluation results."""
with jsonlines.open(path, "r") as reader:
results = [obj for obj in reader]
return results
# f = found, g = golden standard, w = words, e = entities, i = index, d = dictionary, t = tag
def _evaluate_typewise(self, f_w, f_e, g_w, g_e, d, t):
"""Evaluates model performance for one entity type."""
n_i = [i for i, x in enumerate(f_e) if x == t]
g_i = [i for i, x in enumerate(g_e) if x == t]
f_w = [f_w[i].split() for i in n_i]
f_w = [w for ws in f_w for w in ws if w not in set(punctuation)]
g_w = [g_w[i] for i in g_i]
true_positives = []
all_positives = f_w
relevant = g_w.copy()
for w in f_w:
if w in g_w:
true_positives += [w]
del g_w[g_w.index(w)]
d["tp"] += true_positives
d["ap"] += all_positives
d["rel"] += relevant
return d
def evaluate(self, entities, tags, min_thresh):
"""Performs evaluation."""
# True positives, all positives, relevant
per = {"tp": [], "ap": [], "rel": []}
org = {"tp": [], "ap": [], "rel": []}
loc = {"tp": [], "ap": [], "rel": []}
no_selected = 0
for i, ents in enumerate(entities):
found_w = [e["word"] for e in ents if e["score"] >= min_thresh]
found_e = [e["entity"] for e in ents if e["score"] >= min_thresh]
gold_w = [e["word"] for e in tags[i]]
no_selected += len(found_w)
if self.suc:
gold_e = [e["entity"][2:] for e in tags[i]]
gold_e = ["PER" if e == "PRS" else e for e in gold_e]
else:
gold_e = [e["entity"] for e in tags[i]]
per = self._evaluate_typewise(found_w, found_e, gold_w, gold_e, per, "PER")
org = self._evaluate_typewise(found_w, found_e, gold_w, gold_e, org, "ORG")
loc = self._evaluate_typewise(found_w, found_e, gold_w, gold_e, loc, "LOC")
return per, org, loc, no_selected
@staticmethod
def calculate_metrics(res, tag):
"""Calculates and returns precision, recall and F1 score."""
precision = len(res["tp"]) / len(res["ap"])
recall = len(res["tp"]) / len(res["rel"])
f1 = 2 * precision * recall / (precision + recall)
print(f"{tag}: precision = {precision}, recall = {recall}, f1 = {f1}")
return precision, recall, f1
| 33.621118 | 119 | 0.53741 | 742 | 5,413 | 3.774933 | 0.221024 | 0.012853 | 0.014995 | 0.007497 | 0.145305 | 0.102106 | 0.077115 | 0.064263 | 0.064263 | 0.044984 | 0 | 0.010881 | 0.320894 | 5,413 | 160 | 120 | 33.83125 | 0.751088 | 0.130242 | 0 | 0.037383 | 0 | 0 | 0.063601 | 0.010958 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093458 | false | 0 | 0.037383 | 0 | 0.224299 | 0.009346 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
371b4de36b52d84cac233275c0bbe8ca788c6d87 | 825 | py | Python | files/sources_raw/multiwordnet/jpn/jpn2tab.py | rvjansen/flp | 989b6c0781b231e4cf2d4c1c0300e20f3f2f9aa6 | [
"CC0-1.0"
] | 1 | 2022-02-20T10:00:28.000Z | 2022-02-20T10:00:28.000Z | files/sources_raw/multiwordnet/jpn/jpn2tab.py | rvjansen/flp | 989b6c0781b231e4cf2d4c1c0300e20f3f2f9aa6 | [
"CC0-1.0"
] | 8 | 2022-01-10T01:35:58.000Z | 2022-01-16T02:49:41.000Z | files/sources_raw/multiwordnet/jpn/jpn2tab.py | rvjansen/flp | 989b6c0781b231e4cf2d4c1c0300e20f3f2f9aa6 | [
"CC0-1.0"
] | null | null | null | #!/usr/share/python
# -*- encoding: utf-8 -*-
#
# Extract synset-word pairs from the Japanese Wordnet
#
import sys
import codecs
#import re
wndata="/home/bond/svn/wnja-code/tab/"
wnname = "Japanese Wordnet"
wnlang = "jpn"
wnurl = "http://nlpwww.nict.go.jp/wn-ja/"
wnlicense = "wordnet"
#
# header
#
outfile = "wn-data-%s.tab" % wnlang
o = codecs.open(outfile, "w", "utf-8" )
o.write("# %s\t%s\t%s\t%s \n" % (wnname, wnlang, wnurl, wnlicense))
#
# Data is in the file wnjpn-all.tab
# offset<TAB>Japanese<TAB>Status
f = codecs.open(wndata + "wnjpn-all.tab", "rb", "utf-8" )
sysnset = str()
lemma = str()
for l in f:
(synset, lemma, status) = l.strip().split("\t")
if status in ['hand', 'mlsn', 'mono', 'multi' ]:
o.write("%s\tlemma\t%s\n" % (synset, lemma))
##print "%s\t%s\n" % (synset, lemma)
| 21.710526 | 67 | 0.613333 | 131 | 825 | 3.862595 | 0.534351 | 0.019763 | 0.023715 | 0.01581 | 0.06917 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004386 | 0.170909 | 825 | 37 | 68 | 22.297297 | 0.73538 | 0.253333 | 0 | 0 | 0 | 0 | 0.297342 | 0.048173 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
371c0cd9f1541bc9d1d7743f22dc97957977dc77 | 5,701 | py | Python | util/loadtester/scripts/mnist_rest_locust.py | lorello/seldon-core | 9509d41856071c72c419f7ec805c39be38a41249 | [
"Apache-2.0"
] | 1 | 2019-09-06T14:06:50.000Z | 2019-09-06T14:06:50.000Z | util/loadtester/scripts/mnist_rest_locust.py | lorello/seldon-core | 9509d41856071c72c419f7ec805c39be38a41249 | [
"Apache-2.0"
] | null | null | null | util/loadtester/scripts/mnist_rest_locust.py | lorello/seldon-core | 9509d41856071c72c419f7ec805c39be38a41249 | [
"Apache-2.0"
] | null | null | null | from locust.stats import RequestStats
from locust import HttpLocust, TaskSet, task, events
import os
import sys, getopt, argparse
from random import randint,random
import json
from locust.events import EventHook
import requests
import re
import time
import resource
import socket
import signal
from socket import error as socket_error
import errno
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
def connect_to_master(host,port):
success = False
while not success:
s = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
try:
s.connect((host, int(port)))
s.shutdown(socket.SHUT_RD)
print("Connected to master")
success = True
except socket.error as serr:
print("Connection failed - sleeping")
time.sleep(1)
def parse_arguments():
parser = argparse.ArgumentParser(prog='locust')
parser.add_argument('--host')
parser.add_argument('--master-host',default="127.0.0.1")
parser.add_argument('--master-port',default="5557")
parser.add_argument('--clients',default=1, type=int)
parser.add_argument('--hatch-rate',default=1, type=int)
parser.add_argument('--master', action='store_true')
parser.add_argument('--slave', action='store_true')
args, unknown = parser.parse_known_args()
#args = parser.parse_args()
opts = vars(args)
print(args)
if args.slave:
print("Sleeping 10 secs hack")
time.sleep(10)
connect_to_master(args.master_host,args.master_port)
return args.host, args.clients, args.hatch_rate
HOST, MAX_USERS_NUMBER, USERS_PER_SECOND = parse_arguments()
rsrc = resource.RLIMIT_NOFILE
soft, hard = resource.getrlimit(rsrc)
#resource.setrlimit(rsrc, (65535, hard)) #limit to one kilobyte
soft, hard = resource.getrlimit(rsrc)
def getEnviron(key,default):
if key in os.environ:
return os.environ[key]
else:
return default
class SeldonJsLocust(TaskSet):
def get_token(self):
print("Getting access token with key "+self.oauth_key+" and secret "+self.oauth_secret)
r = self.client.request("POST","/oauth/token",headers={"Accept":"application/json"},data={"grant_type":"client_credentials"},auth=(self.oauth_key,self.oauth_secret))
if r.status_code == 200:
j = json.loads(r.content)
self.access_token = j["access_token"]
print("got access token "+self.access_token)
else:
print("failed to get access token")
print(r.status_code)
sys.exit(1)
def on_start(self):
print("on_start")
self.oauth_enabled = getEnviron('OAUTH_ENABLED',"true")
self.oauth_key = getEnviron('OAUTH_KEY',"key")
self.oauth_secret = getEnviron('OAUTH_SECRET',"secret")
self.data_size = int(getEnviron('DATA_SIZE',"1"))
self.send_feedback = int(getEnviron('SEND_FEEDBACK',"1"))
if self.oauth_enabled == "true":
self.get_token()
else:
self.access_token = "NONE"
self.mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
def sendFeedback(self,request,response,reward):
j = {"request":request,"response":response,"reward":reward}
jStr = json.dumps(j)
r = self.client.request("POST","/api/v0.1/feedback",headers={"Content-Type":"application/json","Accept":"application/json","Authorization":"Bearer "+self.access_token},name="feedback",data=jStr)
if not r.status_code == 200:
print("Failed feedback request "+str(r.status_code))
if r.status_code == 401:
if self.oauth_enabled == "true":
self.get_token()
else:
print(r.headers)
r.raise_for_status()
@task
def getPrediction(self):
batch_xs, batch_ys = self.mnist.train.next_batch(1)
data = batch_xs[0].reshape((1,784))
data = np.around(data,decimals=2)
features = ["X"+str(i+1) for i in range (0,self.data_size)]
#request = {"data":{"names":features,"ndarray":data.tolist()}}
request = {"data":{"ndarray":data.tolist()}}
jStr = json.dumps(request)
r = self.client.request("POST","/api/v0.1/predictions",headers={"Content-Type":"application/json","Accept":"application/json","Authorization":"Bearer "+self.access_token},name="predictions",data=jStr)
if r.status_code == 200:
if self.send_feedback == 1:
response = r.json()
route = response.get("meta").get("routing").get("eg-router")
proba = response["data"]["ndarray"][0]
predicted = proba.index(max(proba))
correct = np.argmax(batch_ys[0])
j = json.loads(r.content)
if predicted == correct:
self.sendFeedback(request,j,1.0)
print("Correct!")
else:
self.sendFeedback(request,j,0.0)
print("Incorrect!")
else:
print("Failed prediction request "+str(r.status_code))
if r.status_code == 401:
if self.oauth_enabled == "true":
self.get_token()
else:
print(r.headers)
r.raise_for_status()
class WebsiteUser(HttpLocust):
task_set = SeldonJsLocust
min_wait=int(getEnviron('MIN_WAIT',"900")) # Min time between requests of each user
max_wait=int(getEnviron('MAX_WAIT',"1100")) # Max time between requests of each user
stop_timeout= 1000000 # Stopping time
| 36.544872 | 208 | 0.619891 | 708 | 5,701 | 4.861582 | 0.299435 | 0.026148 | 0.025567 | 0.015108 | 0.219059 | 0.17606 | 0.15921 | 0.140616 | 0.124346 | 0.113306 | 0 | 0.017527 | 0.24943 | 5,701 | 155 | 209 | 36.780645 | 0.786866 | 0.042098 | 0 | 0.193798 | 0 | 0 | 0.147706 | 0.003853 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054264 | false | 0 | 0.131783 | 0 | 0.255814 | 0.116279 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
371d6857f471bf5c4303b94e13030507ce47c281 | 8,475 | py | Python | tests/587/test_engine_integration.py | CSUN-COMP587-F18/keras | 5beed1cae876f570f92ee49b532aef2f97176bb2 | [
"MIT"
] | null | null | null | tests/587/test_engine_integration.py | CSUN-COMP587-F18/keras | 5beed1cae876f570f92ee49b532aef2f97176bb2 | [
"MIT"
] | 10 | 2018-09-27T23:03:18.000Z | 2018-12-05T23:32:33.000Z | tests/587/test_engine_integration.py | CSUN-COMP587-F18/keras | 5beed1cae876f570f92ee49b532aef2f97176bb2 | [
"MIT"
] | null | null | null | import threading
import pytest
import numpy as np
import pandas as pd
import sys
from keras import losses
from keras.engine import Input
from keras.engine.training import Model
from keras.engine import training_utils
from keras.layers import Dense, Dropout
from keras.utils.generic_utils import slice_arrays
from keras.models import Sequential
from keras.utils import Sequence
class RandSequence(Sequence):
def __init__(self, batchSize, sequenceLength=12):
self.batchSize = batchSize
self.sequenceLength = sequenceLength
self.logs = []
def __len__(self):
return self.sequenceLength
def __getitem__(self, idx):
self.logs.append(idx)
return ([np.random.random((self.batchSize, 3)),
np.random.random((self.batchSize, 3))],
[np.random.random((self.batchSize, 4)),
np.random.random((self.batchSize, 3))])
def on_epoch_end(self):
pass
def test_length_consistency():
training_utils.check_array_length_consistency(None, None, None)
a_np = np.random.random((4, 3, 3))
training_utils.check_array_length_consistency(a_np, a_np, a_np)
training_utils.check_array_length_consistency(
[a_np, a_np], [a_np, a_np], [a_np, a_np])
training_utils.check_array_length_consistency([None], [None], [None])
b_np = np.random.random((3, 4))
with pytest.raises(ValueError):
training_utils.check_array_length_consistency(a_np, None, None)
with pytest.raises(ValueError):
training_utils.check_array_length_consistency(a_np, a_np, None)
with pytest.raises(ValueError):
training_utils.check_array_length_consistency([a_np], [None], None)
with pytest.raises(ValueError):
training_utils.check_array_length_consistency([a_np], [b_np], None)
with pytest.raises(ValueError):
training_utils.check_array_length_consistency([a_np], None, [b_np])
class threadsafe_iter:
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
with self.lock:
return next(self.it)
def threadsafe_generator(f):
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g
def testslice_arrays():
inputA = np.random.random((10, 3))
slice_arrays(None)
slice_arrays(inputA, 0)
slice_arrays(inputA, 0, 1)
slice_arrays(inputA, stop=2)
inputA = [None, [1, 1], None, [1, 1]]
slice_arrays(inputA, 0)
slice_arrays(inputA, 0, 1)
slice_arrays(inputA, stop=2)
inputA = [None]
slice_arrays(inputA, 0)
slice_arrays(inputA, 0, 1)
slice_arrays(inputA, stop=2)
inputA = None
slice_arrays(inputA, 0)
slice_arrays(inputA, 0, 1)
slice_arrays(inputA, stop=2)
@pytest.mark.skipif(sys.version_info < (3,),
reason='Cannot catch warnings in python 2')
def test_warnings():
a = Input(shape=(3,), name='inputA')
b = Input(shape=(3,), name='inputB')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
model = Model([a, b], [a_2, b_2])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None)
@threadsafe_generator
def gen_data(batch_sz):
while True:
yield ([np.random.random((batch_sz, 3)),
np.random.random((batch_sz, 3))],
[np.random.random((batch_sz, 4)),
np.random.random((batch_sz, 3))])
with pytest.warns(Warning) as w:
out = model.fit_generator(gen_data(4),
steps_per_epoch=10,
use_multiprocessing=True,
workers=2)
warning_raised = any(['Sequence' in str(w_.message) for w_ in w])
assert warning_raised, 'No warning raised when using generator with processes.'
with pytest.warns(None) as w:
out = model.fit_generator(RandSequence(3),
steps_per_epoch=4,
use_multiprocessing=True,
workers=2)
assert all(['Sequence' not in str(w_.message) for w_ in w]), (
'A warning was raised for Sequence.')
def test_with_list_as_targets():
model = Sequential()
model.add(Dense(1, input_dim=3, trainable=False))
model.compile('rmsprop', 'mse')
x = np.random.random((2, 3))
y = [0, 1]
model.train_on_batch(x, y)
def test_check_not_failing():
a = np.random.random((2, 1, 3))
training_utils.check_loss_and_target_compatibility(
[a], [losses.categorical_crossentropy], [a.shape])
training_utils.check_loss_and_target_compatibility(
[a], [losses.categorical_crossentropy], [(2, None, 3)])
def test_check_last_is_one():
a = np.random.random((2, 3, 1))
with pytest.raises(ValueError) as exc:
training_utils.check_loss_and_target_compatibility(
[a], [losses.categorical_crossentropy], [a.shape])
assert 'You are passing a target array' in str(exc)
def test_check_bad_shape():
a = np.random.random((2, 3, 5))
with pytest.raises(ValueError) as exc:
training_utils.check_loss_and_target_compatibility(
[a], [losses.categorical_crossentropy], [(2, 3, 6)])
assert 'targets to have the same shape' in str(exc)
def test_pd_df(): # testing dataframes via pandas
inputA = Input(shape=(3,), name='inputA')
inputB = Input(shape=(3,), name='inputB')
x = Dense(4, name='dense_1')(inputA)
y = Dense(3, name='desne_2')(inputB)
model1 = Model(inputs=inputA, outputs=x)
model2 = Model(inputs=[inputA, inputB], outputs=[x, y])
optimizer = 'rmsprop'
loss = 'mse'
model1.compile(optimizer=optimizer, loss=loss)
model2.compile(optimizer=optimizer, loss=loss)
inputA_df = pd.DataFrame(np.random.random((10, 3)))
inputB_df = pd.DataFrame(np.random.random((10, 3)))
outputA_df = pd.DataFrame(np.random.random((10, 4)))
outputB_df = pd.DataFrame(np.random.random((10, 3)))
model1.fit(inputA_df,
outputA_df)
model2.fit([inputA_df, inputB_df],
[outputA_df, outputB_df])
model1.fit([inputA_df],
[outputA_df])
model1.fit({'inputA': inputA_df},
outputA_df)
model2.fit({'inputA': inputA_df, 'inputB': inputB_df},
[outputA_df, outputB_df])
model1.predict(inputA_df)
model2.predict([inputA_df, inputB_df])
model1.predict([inputA_df])
model1.predict({'inputA': inputA_df})
model2.predict({'inputA': inputA_df, 'inputB': inputB_df})
model1.predict_on_batch(inputA_df)
model2.predict_on_batch([inputA_df, inputB_df])
model1.predict_on_batch([inputA_df])
model1.predict_on_batch({'inputA': inputA_df})
model2.predict_on_batch({'inputA': inputA_df, 'inputB': inputB_df})
model1.evaluate(inputA_df,
outputA_df)
model2.evaluate([inputA_df, inputB_df],
[outputA_df, outputB_df])
model1.evaluate([inputA_df],
[outputA_df])
model1.evaluate({'inputA': inputA_df},
outputA_df)
model2.evaluate({'inputA': inputA_df, 'inputB': inputB_df},
[outputA_df, outputB_df])
model1.train_on_batch(inputA_df,
outputA_df)
model2.train_on_batch([inputA_df, inputB_df],
[outputA_df, outputB_df])
model1.train_on_batch([inputA_df],
[outputA_df])
model1.train_on_batch({'inputA': inputA_df},
outputA_df)
model2.train_on_batch({'inputA': inputA_df, 'inputB': inputB_df},
[outputA_df, outputB_df])
model1.test_on_batch(inputA_df,
outputA_df)
model2.test_on_batch([inputA_df, inputB_df],
[outputA_df, outputB_df])
model1.test_on_batch([inputA_df],
[outputA_df])
model1.test_on_batch({'inputA': inputA_df},
outputA_df)
model2.test_on_batch({'inputA': inputA_df, 'inputB': inputB_df},
[outputA_df, outputB_df])
if __name__ == '__main__':
pytest.main([__file__])
| 32.224335 | 83 | 0.623717 | 1,099 | 8,475 | 4.538672 | 0.158326 | 0.049719 | 0.044106 | 0.040898 | 0.624699 | 0.545309 | 0.497795 | 0.445469 | 0.392743 | 0.348637 | 0 | 0.020833 | 0.252389 | 8,475 | 262 | 84 | 32.347328 | 0.766414 | 0.003422 | 0 | 0.257282 | 0 | 0 | 0.046779 | 0 | 0 | 0 | 0 | 0 | 0.019417 | 1 | 0.092233 | false | 0.009709 | 0.063107 | 0.019417 | 0.199029 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3720c3899d53ebbf6a2778fd6e3ce1432176eae2 | 17,466 | py | Python | entrypoint.py | lucteo/action-cxx-toolkit | f181ba4199669f9a4ca17c7786499e33d2313d13 | [
"MIT"
] | 1 | 2020-09-27T11:22:13.000Z | 2020-09-27T11:22:13.000Z | entrypoint.py | lucteo/action-cxx-toolkit | f181ba4199669f9a4ca17c7786499e33d2313d13 | [
"MIT"
] | null | null | null | entrypoint.py | lucteo/action-cxx-toolkit | f181ba4199669f9a4ca17c7786499e33d2313d13 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os, sys, subprocess
from dataclasses import dataclass
# Input variables:
# INPUT_CHECKS
# INPUT_DEPENDENCIES
# INPUT_DIRECTORY
# INPUT_BUILDDIR
# INPUT_CC
# INPUT_CFLAGS
# INPUT_CXXFLAGS
# INPUT_CONANFLAGS
# INPUT_CMAKEFLAGS
# INPUT_CTESTFLAGS
# INPUT_MAKEFLAGS
# INPUT_IWYUFLAGS
# INPUT_CPPCHECKFLAGS
# INPUT_CLANGTIDYFLAGS
# INPUT_CLANGFORMATDIRS
# INPUT_PREBUILD_COMMAND
# INPUT_BUILD_COMMAND
# INPUT_POSTBUILD_COMMAND
# INPUT_TEST_COMMAND
valid_checks = [
'build',
'warnings',
'install',
'test',
'clang-format',
'clang-tidy',
'cppcheck',
'iwyu',
'sanitize',
'coverage=codecov',
'coverage=lcov',
]
# The source directory
srcDir = os.getcwd()
# The list of checks that need to be run here (see valid_checks above)
checks = []
# The test command generated depending on the build
auto_test_cmd = ''
class colors:
''' The colors to be used by this script; Unix colors '''
RED = '\033[0;31m'
GREEN = '\033[0;32m'
ORANGE = '\033[0;33m'
BLUE = '\033[0;34m'
PURPLE = '\033[0;35m'
CYAN = '\033[0;36m'
LIGHTGRAY = '\033[0;37m'
DARKGRAY = '\033[1;30m'
LIGHTRED = '\033[1;31m'
LIGHTGREEN = '\033[1;32m'
YELLOW = '\033[1;33m'
LIGHTBLUE = '\033[1;34m'
LIGHTPURPLE = '\033[1;35m'
LIGHTCYAN = '\033[1;36m'
WHITE = '\033[1;37m'
CLEAR = '\033[0m'
def error(text):
''' Prints an errors message and exists the script with error '''
print(f'\n{colors.LIGHTRED}ERROR: {text}{colors.CLEAR}\n')
sys.exit(1)
def warning(text):
''' Just prints directly a warning message '''
print(f'{colors.YELLOW}WARNING: {text}{colors.CLEAR}')
def yesno(boolVal):
return 'yes' if boolVal else 'no'
def param(name, default=None):
''' Gets an environment variable param, with fallback to a default value. '''
res = os.environ.get(name)
return res if res else default
@dataclass
class HeaderPrint:
''' Callable that prints a header when called '''
text: str
def __call__(self):
print(f'\n{colors.LIGHTCYAN}== {self.text}{colors.CLEAR}')
@dataclass
class PropertyPrint:
''' Callable that prints the value of a property when called '''
key: str
val: str
def __call__(self):
print(f'{colors.WHITE}{self.key}:{colors.CLEAR} {self.val}')
@dataclass
class RegularPrint:
''' Callable that prints a regular text when called '''
text: str
def __call__(self):
print(self.text)
@dataclass
class Command:
''' Callable that executes a bash command; fails if the command fails. '''
cmd: str
verbose: bool = True
def __call__(self):
if self.cmd:
if self.verbose:
print(f'+ {self.cmd}')
subprocess.run(self.cmd, shell=True, check=True)
@dataclass
class ChDir:
''' Callable that changes the current directory '''
dirName: str
def __call__(self):
print(f'+ cd {self.dirName}')
os.chdir(self.dirName)
@dataclass
class CmdList:
''' A structure that keeps a list of multiple callable '''
cmds: list
def __call__(self):
for cmd in self.cmds:
if cmd:
cmd()
def add(self, cmd):
if cmd:
self.cmds.append(cmd)
def configure_compiler_options():
''' Detect the compiler version we need to use, and get the required environment variables.
Returns a pair between compiler+version and the command to set proper environment.
'''
compilerVer = param('INPUT_CC', 'gcc')
compilers_map = {
'gcc': 'g++',
'gcc-7': 'g++-7',
'gcc-8': 'g++-8',
'gcc-9': 'g++-9',
'gcc-10': 'g++-10',
'gcc-11': 'g++-11',
'clang': 'clang++',
'clang-7': 'clang++-7',
'clang-8': 'clang++-8',
'clang-9': 'clang++-9',
'clang-10': 'clang++-10',
'clang-11': 'clang++-11',
'clang-12': 'clang++-12',
'clang-13': 'clang++-13',
}
if compilerVer not in compilers_map.keys():
error(f'Invalid compiler supplied: {compilerVer}')
cc = f'/usr/bin/{compilerVer}'
cxx = f'/usr/bin/{compilers_map[compilerVer]}'
PropertyPrint('C Compiler to be used', cc)()
Command(f'{cc} --version')()
PropertyPrint('C++ Compiler to be used', cxx)()
Command(f'{cxx} --version')()
# Build the compilation environment flags
envSetCmd = f'export CC={cc} CXX={cxx}'
PropertyPrint('Environment flags to be used', envSetCmd)()
# Update the alternatives, to ensure we are pointing to the right version
# Needed mostly for the clang tools
if compilerVer != 'gcc' and compilerVer != 'clang':
base_compilers_map = {
'gcc': 'gcc',
'gcc-7': 'gcc',
'gcc-8': 'gcc',
'gcc-9': 'gcc',
'gcc-10': 'gcc',
'gcc-11': 'gcc',
'clang': 'clang',
'clang-7': 'clang',
'clang-8': 'clang',
'clang-9': 'clang',
'clang-10': 'clang',
'clang-11': 'clang',
'clang-12': 'clang',
'clang-13': 'clang',
}
baseComp = base_compilers_map[compilerVer]
Command(f'update-alternatives --set {baseComp} /usr/bin/{compilerVer}')()
return (compilerVer, envSetCmd)
def configure_conan(compilerVer, envFlags, buildType = 'Release'):
''' Configure the build system with conan; returns a command object '''
global srcDir
# Split the given compiler string into base compiler name and the version
p = compilerVer.split('-')
compiler = p[0]
# Check the flags that we need to add to the conan command, based on the compiler version
conan_extra_flags = param('INPUT_CONANFLAGS', '')
conan_extra_flags += f' -s compiler={compiler}'
if compilerVer == 'clang-7':
conan_extra_flags += f' -s compiler.version=7.0'
elif len(p) > 1:
ver = p[1]
conan_extra_flags += f' -s compiler.version={ver}'
if 'compiler.libcxx' not in conan_extra_flags:
if compiler == 'gcc':
conan_extra_flags += ' -s compiler.libcxx=libstdc++11'
elif compiler == 'clang':
conan_extra_flags += ' -s compiler.libcxx=libc++'
# Generate the command
conan_command = f'{envFlags} && conan install "{srcDir}" --build=missing -s build_type={buildType} {conan_extra_flags}'
PropertyPrint('Conan command', conan_command)()
return Command(conan_command)
def get_santizier_flags():
''' Checks if we need to add some compilation flags to support sanitizer checks '''
sanitizers_flags = ''
for c in checks:
if c.startswith('sanitize='):
sanitizers_flags += f' -f{c}'
if sanitizers_flags:
return f' -fno-omit-frame-pointer {sanitizers_flags}'
else:
return ''
def configure_cmake_build(compilerVer, envSetCmd, hasConan):
''' Configures the cmake build. Returns a command object to be run to build with cmake '''
global srcDir
global checks
global auto_test_cmd
buildCmds = CmdList([])
# Setup build and install directories
srcDir = os.getcwd()
buildDir = param('INPUT_BUILDDIR', '/tmp/build')
installDir = '/tmp/install'
PropertyPrint('Build directory', buildDir)()
PropertyPrint('Install directory', installDir)()
buildCmds.add(Command(f'mkdir -p {buildDir}'))
buildCmds.add(Command(f'mkdir {installDir}'))
buildCmds.add(ChDir(buildDir))
cmake_flags = param('INPUT_CMAKEFLAGS', '')
if cmake_flags:
PropertyPrint('CMake flags', cmake_flags)()
# Handle the checks that apply at the build step
cmake_post_build_cmd = ''
other_cmake_flags = ''
cmake_cc_flags = ''
if 'install' in checks:
other_cmake_flags += f' -DCMAKE_INSTALL_PREFIX={installDir}'
if 'warnings' in checks:
cmake_cc_flags += ' -Wall -Werror'
cmake_cc_flags += get_santizier_flags()
if 'clang-tidy' in checks or 'cppcheck' in checks or 'iwyu' in checks:
other_cmake_flags += ' -DCMAKE_EXPORT_COMPILE_COMMANDS=1'
if 'coverage=codecov' in checks or 'coverage=lcov' in checks:
cmake_cc_flags += ' --coverage'
# Add the C and C++ flags
cflags = param('INPUT_CFLAGS', '') + ' ' + cmake_cc_flags
if cflags:
other_cmake_flags += f' -DCMAKE_C_FLAGS="{cflags}"'
cxxflags = param('INPUT_CXXFLAGS', '') + ' ' + cmake_cc_flags
if cxxflags:
other_cmake_flags += f' -DCMAKE_CXX_FLAGS="{cxxflags}"'
# If we have conan, add a conan command first
if hasConan:
buildCmds.add(configure_conan(compilerVer, envSetCmd))
# Generate the actual commands to be run based on the above flags
cmake_command = f'{envSetCmd} && cmake {cmake_flags} {other_cmake_flags} "{srcDir}"'
make_params = param('INPUT_MAKEFLAGS', '')
make_command = f'cmake --build . -v {make_params}'
PropertyPrint('Configure command', cmake_command)()
PropertyPrint('Build command', make_command)()
buildCmds.add(Command(cmake_command))
if 'build' in checks:
buildCmds.add(Command(make_command))
if 'install' in checks:
install_command = f'cmake --install .'
PropertyPrint('CMake install command', install_command)()
buildCmds.add(HeaderPrint('Installing'))
buildCmds.add(Command(install_command))
if 'iwyu' in checks:
PropertyPrint('Running iwyu', yesno(True))()
flags = param('INPUT_IWYUFLAGS', '')
buildCmds.add(Command(f'iwyu_tool -p . -- {flags} | tee iwyu_results.txt'))
buildCmds.add(Command('! grep -e "- #include" iwyu_results.txt'))
if 'cppcheck' in checks:
PropertyPrint('Running cppcheck', yesno(True))()
flags = param('INPUT_CPPCHECKFLAGS', '--enable=style,performance,portability')
flags += " --template='CPPCHECK_REPORT: {file}:{line},{severity},{id},{message}'"
buildCmds.add(Command(f'cppcheck --project=compile_commands.json {flags} 2>&1 | tee cppcheck_results.txt'))
buildCmds.add(Command('! grep -e "CPPCHECK_REPORT:" cppcheck_results.txt'))
if 'clang-tidy' in checks:
PropertyPrint('Running clang-tidy', yesno(True))()
flags = param('INPUT_CLANGTIDYFLAGS', '')
buildCmds.add(Command(f'if [ -f "{srcDir}/.clang-tidy" ]; then cp --verbose "{srcDir}/.clang-tidy" {buildDir}; fi'))
buildCmds.add(Command(f'/usr/lib/llvm-13/bin/run-clang-tidy -p . {flags}'))
# Generate a test command to be used later
ctest_flags = param('INPUT_CTESTFLAGS', '')
auto_test_cmd = f'ctest --verbose {ctest_flags} .'
return buildCmds
def configure_make_build(compilerVer, envSetCmd, hasConan):
''' Configures the make build. Returns a command object to be run to build with make '''
global checks
global auto_test_cmd
buildCmds = CmdList([])
if hasConan:
buildCmds.add(configure_conan(compilerVer, envSetCmd))
# Depending on checks, check if we can add C or C++ flags
make_cc_flags = ''
if 'warnings' in checks:
make_cc_flags += ' -Wall -Werror'
make_cc_flags += get_santizier_flags()
# Add the C and C++ flags
make_params = param('INPUT_MAKEFLAGS', '')
cflags = param('INPUT_CFLAGS', '') + ' ' + make_cc_flags
if cflags:
make_params += f' CFLAGS="{cflags}"'
cxxflags = param('INPUT_CXXFLAGS', '') + ' ' + make_cc_flags
if cxxflags:
make_params += f' CXXFLAGS="{cxxflags}"'
make_command = f'{envSetCmd} && make {make_params}'
PropertyPrint('Build command', make_command)()
buildCmds.add(Command(make_command))
if 'install' in checks:
install_command = f'make install'
PropertyPrint('Install command', install_command)()
buildCmds.add(HeaderPrint('Installing'))
buildCmds.add(Command(install_command))
# Generate a test command to be used later
auto_test_cmd = f'make test'
return buildCmds
def auto_build_phase():
''' Configures and runs the build phase (automatic mode). '''
global checks
global auto_test_cmd
if 'build' not in checks and 'clang-tidy' not in checks and 'cppcheck' not in checks and 'iwyu' not in checks:
return
HeaderPrint('Auto-determining build commands')()
hasConan = os.path.isfile('conanfile.txt') or os.path.isfile('conanfile.py')
hasCmake = os.path.isfile('CMakeLists.txt')
hasMake = os.path.isfile('Makefile')
PropertyPrint('Has Conan', yesno(hasConan))()
PropertyPrint('Has Cmake', yesno(hasCmake))()
PropertyPrint('Has Make', yesno(hasMake))()
if not hasCmake and not hasMake:
error('Cannot autodetect build system. Provide the build command manually')
# Determine the compiler
(compilerVer, envSetCmd) = configure_compiler_options()
buildCmds = CmdList([])
buildCmds.add(HeaderPrint('Building the software'))
if hasCmake:
buildCmds.add(configure_cmake_build(compilerVer, envSetCmd, hasConan))
elif hasMake:
buildCmds.add(configure_make_build(compilerVer, envSetCmd, hasConan))
# Run the build commands
buildCmds()
def auto_test_phase():
''' Configures and runs the test phase (automatic mode). '''
global srcDir
global checks
global auto_test_cmd
toRun = CmdList([])
if 'test' in checks:
toRun.add(HeaderPrint('Running tests'))
toRun.add(Command(auto_test_cmd))
# Post-test actions?
if 'coverage=codecov' in checks or 'coverage=lcov' in checks:
toRun.add(HeaderPrint('Gathering test coverage info'))
if 'coverage=codecov' in checks:
toRun.add(Command(f'bash -c "bash <(curl -s https://codecov.io/bash) -R {srcDir}"'))
if 'coverage=lcov' in checks:
toRun.add(Command('lcov -c -d . -o lcov.info'))
toRun.add(Command(f'cp lcov.info {srcDir}/'))
if 'clang-format' in checks:
dirs = param('INPUT_CLANGFORMATDIRS', '.').split()
dirs = map(lambda d: f'"{srcDir}/{d}"', dirs)
dirsStr = ' '.join(dirs)
toRun.add(Command(f'find {dirsStr} \\( -name "*.[ch]" -o -name "*.cc" -o -name "*.cpp" -o -name "*.hpp" \\) -exec clang-format --Werror --dry-run {{}} +'))
# Run the test commands
toRun()
def configure_dependencies():
deps = param('INPUT_DEPENDENCIES')
if deps:
return CmdList([
PropertyPrint('Packages to install', deps),
Command(f'apt-get -y update ; apt-get install --no-install-recommends -y {deps}'),
RegularPrint(''),
])
return None
def configure_changedir():
targetdir = param('INPUT_DIRECTORY')
if targetdir:
return CmdList([
PropertyPrint('Target directory', targetdir),
ChDir(targetdir),
])
return None
def check_override_phase(paramName, printText, defaultCmd = None):
customCmd = param(paramName)
if customCmd:
return CmdList([
HeaderPrint(printText),
Command(customCmd),
])
return defaultCmd
def get_checks():
''' Parses and verifies the list of checks we need to apply. '''
checks = param('INPUT_CHECKS', '').split()
PropertyPrint('Given checks', checks)()
for c in checks:
if c not in valid_checks and not c.startswith('sanitize='):
warning(f"Check '{c}' is not valid; ignored")
if not checks:
checks += ['build', 'test']
PropertyPrint('Implicitly adding default checks', checks)()
# Do we need to add extra checks?
extra_checks = []
if 'build' not in checks:
needs_build = ['install', 'test', 'warnings', 'coverage=codecov', 'coverage=lcov']
for c in checks:
if c in needs_build or c.startswith('sanitize='):
extra_checks.append('build')
break
if 'test' not in checks:
needs_test = ['coverage=codecov', 'coverage=lcov']
for c in checks:
if c in needs_test or c.startswith('sanitize='):
extra_checks.append('test')
break
if 'coverage=codecov' in checks:
if not param('GITHUB_SHA'):
warning('Could not find GITHUB_SHA environment variable. Is the environment set correctly? (expected env vars: GITHUB_ACTION, GITHUB_REF, GITHUB_REPOSITORY, GITHUB_HEAD_REF, GITHUB_SHA, GITHUB_RUN_ID)')
if extra_checks:
PropertyPrint('Adding implicit checks', extra_checks)()
checks += extra_checks
return checks
def main():
# Configure and build the phases with what we need to run
HeaderPrint('Configuring')()
global checks
checks = get_checks()
try:
phases = CmdList([])
phases.add(configure_dependencies())
phases.add(configure_changedir())
phases.add(check_override_phase('INPUT_PREBUILD_COMMAND', 'Running custom pre-build command'))
phases.add(check_override_phase('INPUT_BUILD_COMMAND', 'Running custom build command', auto_build_phase))
phases.add(check_override_phase('INPUT_POSTBUILD_COMMAND', 'Running custom post-build command'))
phases.add(check_override_phase('INPUT_TEST_COMMAND', 'Running custom test command', auto_test_phase))
# Actually run everything
phases()
except subprocess.CalledProcessError as e:
print(e) # Don't print the whole stack
sys.exit(1)
if __name__ == '__main__':
main() | 33.914563 | 214 | 0.631455 | 2,148 | 17,466 | 5.005587 | 0.186685 | 0.023065 | 0.022972 | 0.011161 | 0.246652 | 0.181827 | 0.127046 | 0.107701 | 0.05506 | 0.05506 | 0 | 0.012313 | 0.237433 | 17,466 | 515 | 215 | 33.914563 | 0.794955 | 0.152353 | 0 | 0.209677 | 0 | 0.018817 | 0.294781 | 0.054857 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061828 | false | 0 | 0.005376 | 0.002688 | 0.193548 | 0.026882 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37225c968e5f417234da031f05ff2c4ef3bc4d89 | 3,579 | py | Python | mycv/utils/pbar_utils.py | duanzhiihao/mycv | 184b52f7a5c1b6f603122d4f4050952b65ba0ead | [
"MIT"
] | null | null | null | mycv/utils/pbar_utils.py | duanzhiihao/mycv | 184b52f7a5c1b6f603122d4f4050952b65ba0ead | [
"MIT"
] | null | null | null | mycv/utils/pbar_utils.py | duanzhiihao/mycv | 184b52f7a5c1b6f603122d4f4050952b65ba0ead | [
"MIT"
] | null | null | null | from collections import OrderedDict
from mycv.utils.general import ANSI
class SimpleTable(OrderedDict):
def __init__(self, init_keys=[]):
super().__init__()
# initialization: assign None to initial keys
for key in init_keys:
if not isinstance(key, str):
ANSI.warningstr(f'Progress bar logger key: {key} is not a string')
self[key] = None
self._str_lengths = {k: 10 for k,v in self.items()}
def _update_length(self, key, length):
old = self._str_lengths.get(key, 0)
if length <= old:
return old
else:
self._str_lengths[key] = length
return length
def update(self):
""" Update the string lengths, and return header and body
Returns:
[type]: [description]
"""
header = []
body = []
for k,v in self.items():
# convert any object to string
key = self.obj_to_str(k)
val = self.obj_to_str(v)
# get str length
str_len = max(len(key), len(val)) + 2
str_len = self._update_length(k, str_len)
# make header and body string
keystr = f'{key:^{str_len}}|'
valstr = f'{val:^{str_len}}|'
header.append(keystr)
body.append(valstr)
header = ''.join(header)
header = ANSI.headerstr(header)
body = ''.join(body)
return header, body
def get_header(self):
header = []
body = []
for k in self.keys():
key = self.obj_to_str(k)
str_len = self._str_lengths[k]
keystr = f'{key:^{str_len}}|'
header.append(keystr)
header = ''.join(header)
header = ANSI.headerstr(header)
return header
def get_body(self):
body = []
for k,v in self.items():
val = self.obj_to_str(v)
str_len = self._str_lengths[k]
valstr = f'{val:^{str_len}}|'
body.append(valstr)
body = ''.join(body)
return body
@staticmethod
def obj_to_str(obj, digits=4):
if isinstance(obj, str):
return obj
elif isinstance(obj, float) or hasattr(obj, 'float'):
obj = float(obj)
return f'{obj:.{digits}g}'
elif isinstance(obj, list):
strings = [SimpleTable.obj_to_str(item, 3) for item in obj]
return '[' + ', '.join(strings) + ']'
elif isinstance(obj, tuple):
strings = [SimpleTable.obj_to_str(item, 3) for item in obj]
return '(' + ', '.join(strings) + ')'
else:
return str(obj)
def main():
import random
import time
from tqdm import tqdm
plogger = SimpleTable()
plogger['epoch'] = 2
plogger['rand'] = random.randint(0, 255)
plogger['losses'] = [1.2, 123.3, 4, 0., 0.]
plogger['msg'] = 'afsad'
plogger['msg2'] = 'afsad'
title, body = plogger.update()
print(title)
print(body)
pbar = tqdm(range(100))
for i in pbar:
time.sleep(0.64)
plogger['epoch'] = i
plogger['rand'] = random.randint(0, 1000) / 1000.0
plogger['losses'] = [random.randint(0, 1000) / 1000.0 for _ in range(5)]
plogger['msg'] = 'x' * random.randint(0, 10)
new_title, body = plogger.update()
if new_title != title:
print('', new_title)
title = new_title
pbar.set_description(body)
debug = 1
if __name__ == '__main__':
main()
| 29.825 | 82 | 0.531713 | 438 | 3,579 | 4.200913 | 0.253425 | 0.029348 | 0.030435 | 0.026087 | 0.296739 | 0.222826 | 0.131522 | 0.065217 | 0.065217 | 0.065217 | 0 | 0.022439 | 0.340039 | 3,579 | 119 | 83 | 30.07563 | 0.756562 | 0.057558 | 0 | 0.322917 | 0 | 0 | 0.060515 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072917 | false | 0 | 0.052083 | 0 | 0.239583 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
372305c2d1fb8224de3b77aaf1f47a6d32e75ba4 | 2,939 | py | Python | example_scripts/tidetable_tutorial.py | British-Oceanographic-Data-Centre/COAsT | 4d3d57c9afb61a92063b665626c1828dd2998d2b | [
"MIT"
] | 8 | 2020-09-10T13:40:07.000Z | 2022-03-10T22:52:44.000Z | example_scripts/tidetable_tutorial.py | British-Oceanographic-Data-Centre/COAsT | 4d3d57c9afb61a92063b665626c1828dd2998d2b | [
"MIT"
] | 294 | 2020-05-11T12:17:17.000Z | 2022-03-31T22:07:52.000Z | example_scripts/tidetable_tutorial.py | British-Oceanographic-Data-Centre/COAsT | 4d3d57c9afb61a92063b665626c1828dd2998d2b | [
"MIT"
] | 4 | 2020-05-28T10:43:56.000Z | 2021-09-07T10:40:09.000Z | #!/usr/bin/env python3
"""
Tutorial for processing tabulated tide gauge data
Created on Mon Oct 12 22:30:21 2020
You might scrape tidal highs and lows from a website such as
<a title="NTSLF tidal predictions"
href="https://www.ntslf.org/tides/tidepred?port=Liverpool">
<img alt="NTSLF tidal predictions"
src="https://www.ntslf.org/files/ntslf_php/plottide.php?port=Liverpool" height="200" width="290" /></a>
and format them into a csv file:
LIVERPOOL (GLADSTONE DOCK) TZ: UT(GMT)/BST Units: METRES Datum: Chart Datum
01/10/2020 06:29 1.65
01/10/2020 11:54 9.01
01/10/2020 18:36 1.87
...
The following demonstration would allow you to pass these data.
@author: jeff
"""
import coast
import numpy as np
#%% Load and plot High and Low Water data
print("load and plot HLW data")
filnam = "example_files/Gladstone_2020-10_HLW.txt"
# Set the start and end dates
date_start = np.datetime64("2020-10-12 23:59")
date_end = np.datetime64("2020-10-14 00:01")
# Initiate a TideGauge object, if a filename is passed it assumes it is a GESLA type object
tg = coast.Tidegauge()
# specify the data read as a High Low Water dataset
tg.dataset = tg.read_hlw_to_xarray(filnam, date_start, date_end)
# Show dataset. If timezone is specified then it is presented as requested, otherwise uses UTC
print("Try the TideGauge.show() method:")
tg.show(timezone="Europe/London")
# Do a basic plot of these points
tg.dataset.plot.scatter(x="time", y="sea_level")
#%%
# There is a method to locate HLW events around an approximate date and time
# First state the time of interest
time_guess = np.datetime64("2020-10-13 12:48")
# Then recover all the HLW events in a +/- window, of specified size (iteger hrs)
# The default winsize = 2 (hrs)
HLW = tg.get_tide_table_times(np.datetime64("2020-10-13 12:48"), method="window", winsize=24)
# Alternatively recover the closest HLW event to the input timestamp
HLW = tg.get_tide_table_times(np.datetime64("2020-10-13 12:48"), method="nearest_1")
# Or the nearest two events to the input timestamp
HLW = tg.get_tide_table_times(np.datetime64("2020-10-13 12:48"), method="nearest_2")
# Extract the Low Tide value
print("Try the TideGauge.get_tidetabletimes() methods:")
print("LT:", HLW[HLW.argmin()].values, "m at", HLW[HLW.argmin()].time.values)
# Extract the High Tide value
print("HT:", HLW[HLW.argmax()].values, "m at", HLW[HLW.argmax()].time.values)
# Or use the the nearest High Tide method to get High Tide
HT = tg.get_tide_table_times(np.datetime64("2020-10-13 12:48"), method="nearest_HW")
print("HT:", HT.values, "m at", HT.time.values)
# The get_tidetabletimes() method can take extra paremeters such as a window
# size, an integer number of hours to seek either side of the guess.
HLW = tg.get_tide_table_times(np.datetime64("2020-10-13 12:48"), winsize=2, method="nearest_1")
HLW = tg.get_tide_table_times(np.datetime64("2020-10-13 12:48"), winsize=1, method="nearest_1")
| 37.202532 | 103 | 0.737326 | 513 | 2,939 | 4.150097 | 0.409357 | 0.028182 | 0.067637 | 0.076092 | 0.199155 | 0.185063 | 0.185063 | 0.173791 | 0.173791 | 0.173791 | 0 | 0.080868 | 0.137462 | 2,939 | 78 | 104 | 37.679487 | 0.758974 | 0.548146 | 0 | 0 | 0 | 0 | 0.294163 | 0.052995 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.272727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3724a438c48806b5fed578c995c9254f9535f7f6 | 1,373 | py | Python | ai_challenge/env/utils.py | village-people/flying-pig | c86b589aadb02dbfd42a917a388c2b8488ecd338 | [
"MIT"
] | 72 | 2017-05-22T18:22:58.000Z | 2019-03-11T22:49:00.000Z | ai_challenge/env/utils.py | village-people/flying-pig | c86b589aadb02dbfd42a917a388c2b8488ecd338 | [
"MIT"
] | 1 | 2017-06-09T05:26:43.000Z | 2017-08-22T11:41:41.000Z | ai_challenge/env/utils.py | village-people/flying-pig | c86b589aadb02dbfd42a917a388c2b8488ecd338 | [
"MIT"
] | 3 | 2017-05-25T08:49:27.000Z | 2019-09-09T18:41:54.000Z | # Village People, 2017
from argparse import ArgumentParser
def get_args():
args = ArgumentParser('PigChaseExperiment')
args.add_argument('-t', '--type', type=str, default='random',
choices=['dqn', 'empathetic', 'astar', 'random'],
help='The type of baseline to run.')
args.add_argument('-e', '--epochs', type=int, default=5,
help='Number of epochs to run.')
args.add_argument('-es', '--epoch_steps', type=int, default=100,
help='Max no of steps per epoch.')
args.add_argument('endpoints', nargs='*',
default=['127.0.0.1:10000', '127.0.0.1:10001'],
help='Minecraft client endpoints (ip(:port)?)+')
return args.parse_args()
def parse_clients_args(endpoints):
""" Return an array of tuples (ip, port) extracted from ip:port string
:param args_clients:
:return:
"""
return [str.split(str(client), ':') for client in endpoints]
def visualize_training(visualizer, step, rewards, tag='Training'):
visualizer.add_entry(step, '%s/reward per episode' % tag, sum(rewards))
visualizer.add_entry(step, '%s/max.reward' % tag, max(rewards))
visualizer.add_entry(step, '%s/min.reward' % tag, min(rewards))
visualizer.add_entry(step, '%s/actions per episode' % tag, len(rewards) - 1)
| 41.606061 | 80 | 0.611071 | 173 | 1,373 | 4.763006 | 0.445087 | 0.033981 | 0.072816 | 0.106796 | 0.18568 | 0.109223 | 0 | 0 | 0 | 0 | 0 | 0.029384 | 0.23161 | 1,373 | 32 | 81 | 42.90625 | 0.751659 | 0.085943 | 0 | 0 | 0 | 0 | 0.259169 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.047619 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3725a03ff33ceb614141f34ea4cafeace14f8296 | 721 | py | Python | practice/leetcode/lc0238_product_excpt_self.py | deehzee/dsalgo | 025bf292e5a2c4e079cecb0c284ab6aeae9a07f4 | [
"MIT"
] | null | null | null | practice/leetcode/lc0238_product_excpt_self.py | deehzee/dsalgo | 025bf292e5a2c4e079cecb0c284ab6aeae9a07f4 | [
"MIT"
] | null | null | null | practice/leetcode/lc0238_product_excpt_self.py | deehzee/dsalgo | 025bf292e5a2c4e079cecb0c284ab6aeae9a07f4 | [
"MIT"
] | 1 | 2020-08-06T16:50:07.000Z | 2020-08-06T16:50:07.000Z | def product_except_self(A):
'''
238. Product of Array Except Self
=================================
Given an array A of n integers (n > 1), return an array where the i-th
element is the product of all but the i-th element of A.
Restrictions:
-------------
1. Don't use division
2. Use constant extra space.
Example:
--------
>>> product_except_self([1, 2, 3, 4])
[24, 12, 8, 6]
'''
out = [1] * len(A)
# Backward pass: out[-(i + 1)] = prod(A[-1]..A[-i])
for i in range(1, len(A)):
out[-(i + 1)] = out[-i] * A[-i]
# Forward pass
lprod = 1
for i in range(1, len(A)):
lprod *= A[i - 1]
out[i] *= lprod
return out
| 24.033333 | 74 | 0.482663 | 111 | 721 | 3.099099 | 0.432432 | 0.046512 | 0.043605 | 0.075581 | 0.093023 | 0.093023 | 0.093023 | 0 | 0 | 0 | 0 | 0.047809 | 0.303745 | 721 | 29 | 75 | 24.862069 | 0.63745 | 0.570042 | 0 | 0.222222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
372749ced3100fd6460b5bb6d9752ed56e2a92b1 | 5,952 | py | Python | eventtools/tests/models/occurrence.py | atkinson/glamkit-eventtools | ed0b06c22ca3390506f4fac327493f871b0b4520 | [
"BSD-3-Clause"
] | 1 | 2015-11-05T10:26:49.000Z | 2015-11-05T10:26:49.000Z | eventtools/tests/models/occurrence.py | atkinson/glamkit-eventtools | ed0b06c22ca3390506f4fac327493f871b0b4520 | [
"BSD-3-Clause"
] | null | null | null | eventtools/tests/models/occurrence.py | atkinson/glamkit-eventtools | ed0b06c22ca3390506f4fac327493f871b0b4520 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8“ -*-
from django.test import TestCase
from eventtools.tests._inject_app import TestCaseWithApp as AppTestCase
from eventtools.tests.eventtools_testapp.models import *
from datetime import date, time, datetime, timedelta
from eventtools.tests._fixture import bigfixture, reload_films
from eventtools.utils import datetimeify
from dateutil.relativedelta import relativedelta
class TestOccurrences(AppTestCase):
"""
Occurrences must have a start datetime and end datetime. (We might have to make a widget to support entry of all-day events).
If start.time is 'ommitted', it is set to time.min.
If end is omitted, then:
end.date = start.date, then apply rule below for time.
If end.time is 'ommitted' it is set to start.time, unless start.time is time.min in which case end.time is set to time.max.
If an occurrence's times are min and max, then it is an all-day event.
End datetime must be >= start datetime.
"""
def test_occurrence_create(self):
e = ExampleEvent.eventobjects.create(name="event with occurrences")
d1 = date(2010,1,1)
d2 = date(2010,1,2)
d1min = datetimeify(d1, clamp='min')
d1max = datetimeify(d1, clamp='max')
d2min = datetimeify(d2, clamp='min')
d2max = datetimeify(d2, clamp='max')
t1 = time(9,00)
t2 = time(10,00)
dt1 = datetime.combine(d1, t1)
dt2 = datetime.combine(d2, t2)
#datetimes
o = e.occurrences.create(start=dt1, end=dt2)
self.ae(o.start, dt1)
self.ae(o.end, dt2)
o = e.occurrences.create(start=dt1)
self.ae(o.start, dt1)
self.ae(o.end, dt1)
o = e.occurrences.create(start=d1min)
self.ae(o.start, d1min)
self.ae(o.end, d1max)
#dates
o = e.occurrences.create(start=d1)
self.ae(o.start, d1min)
self.ae(o.end, d1max)
o = e.occurrences.create(start=d1, end=d2)
self.ae(o.start, d1min)
self.ae(o.end, d2max)
#combos
o = e.occurrences.create(start=dt1, end=d2)
self.ae(o.start, dt1)
self.ae(o.end, d2max)
o = e.occurrences.create(start=d1, end=dt2)
self.ae(o.start, d1min)
self.ae(o.end, dt2)
#missing start date
self.assertRaises(TypeError, e.occurrences.create, **{'end':dt1})
self.assertRaises(TypeError, e.occurrences.create, **{'end':d1})
#invalid start value
self.assertRaises(TypeError, e.occurrences.create, **{'start':t1})
self.assertRaises(TypeError, e.occurrences.create, **{'start':t1, 'end':d1})
self.assertRaises(TypeError, e.occurrences.create, **{'start':t1, 'end':dt1})
#invalid end values
self.assertRaises(TypeError, e.occurrences.create, **{'end':t1})
self.assertRaises(TypeError, e.occurrences.create, **{'start':d1, 'end':t1})
self.assertRaises(TypeError, e.occurrences.create, **{'start':dt1, 'end':t2})
#start date later than end date
self.assertRaises(AttributeError, e.occurrences.create, **{'start':dt2, 'end':dt1})
self.assertRaises(AttributeError, e.occurrences.create, **{'start':d2, 'end':dt1})
self.assertRaises(AttributeError, e.occurrences.create, **{'start':d2, 'end':d1})
def test_occurrence_properties(self):
"""
Occurrences have a duration.
Occurrences have a robot description.
Occurrences that are currently taking place return true for now_on.
Occurrences that finish in the past return True for has_finished.
We can find out how long we have to wait until an occurrence starts.
We can find out how long it has been since an occurrence finished.
"""
e = ExampleEvent.eventobjects.create(name="event with occurrences")
now = datetime.now()
earlier = now - timedelta(seconds=600)
later = now + timedelta(seconds=600)
d1 = date(2010,1,1)
d2 = date(2010,1,2)
t1 = time(9,00)
t2 = time(10,00)
dt1 = datetime.combine(d1, t1)
dt2 = datetime.combine(d2, t2)
o = e.occurrences.create(start=dt1, end=dt2)
o2 = e.occurrences.create(start=earlier, end=later)
self.ae(o.duration, timedelta(days=1, seconds=3600))
self.ae(o.relative_duration, relativedelta(days=1, hours=1))
self.ae(o.timespan_description(), "1 January 2010, 9am until 10am on 2 January 2010")
self.ae(o.has_finished, True)
self.ae(o.has_started, True)
self.ae(o.now_on, False)
self.ae(o2.has_finished, False)
self.ae(o2.has_started, True)
self.ae(o2.now_on, True)
self.assertTrue(o.time_to_go() < timedelta(0))
self.ae(o2.time_to_go(), None)
self.assertTrue(o.relative_time_to_go().years < 0)
self.ae(o2.relative_time_to_go(), None)
"""
TODO
Occurrences know if they are the opening or closing occurrences for their event.
You can filter an Occurrence queryset to show only those occurrences that are opening or closing.
The custom admin occurrence view lists the occurrences of an event and all its children. Each occurrence shows which event it is linked to.
The custom admin view can be used to assign a different event to an occurrence. The drop-down list only shows the given event and its children.
Warning
The “delete selected objects” action uses QuerySet.delete() for efficiency reasons, which has an important caveat: your model’s delete() method will not be called.
If you wish to override this behavior, simply write a custom action which accomplishes deletion in your preferred manner – for example, by calling Model.delete() for each of the selected items.
For more background on bulk deletion, see the documentation on object deletion.
"""
| 38.901961 | 193 | 0.649362 | 832 | 5,952 | 4.611779 | 0.270433 | 0.039093 | 0.093823 | 0.101903 | 0.383633 | 0.353922 | 0.326297 | 0.242377 | 0.183477 | 0.094605 | 0 | 0.032337 | 0.241431 | 5,952 | 152 | 194 | 39.157895 | 0.817276 | 0.16297 | 0 | 0.371795 | 0 | 0 | 0.044309 | 0 | 0 | 0 | 0 | 0.006579 | 0.166667 | 1 | 0.025641 | false | 0 | 0.089744 | 0 | 0.128205 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
372a1246ed0dcfcdbea326ad609c7b48827fe89f | 6,231 | py | Python | src/data.py | gkovacs81/argus_server | 97ebf705ed3e61a69bd561faf495e2c19bda510d | [
"MIT"
] | null | null | null | src/data.py | gkovacs81/argus_server | 97ebf705ed3e61a69bd561faf495e2c19bda510d | [
"MIT"
] | 3 | 2021-06-02T04:07:35.000Z | 2021-12-27T20:21:46.000Z | src/data.py | gkovacs81/argus_server | 97ebf705ed3e61a69bd561faf495e2c19bda510d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
from sqlalchemy.exc import ProgrammingError
from monitoring.constants import ROLE_ADMIN, ROLE_USER
from models import Keypad, KeypadType, Sensor, SensorType, User, Zone
from monitoring.database import Session
from models import metadata
SENSOR_TYPES = [
SensorType(1, name="Motion", description="Detect motion"),
SensorType(2, name="Tamper", description="Detect sabotage"),
SensorType(3, name="Open", description="Detect opening"),
SensorType(4, name="Break", description="Detect glass break"),
]
session = Session()
def cleanup():
print("Clean up database...")
for table in reversed(metadata.sorted_tables):
print(" - Clear table %s" % table)
try:
session.execute(table.delete())
session.commit()
except ProgrammingError:
session.rollback()
print("Database is empty")
def env_prod():
admin_user = User(name="Administrator", role=ROLE_ADMIN, access_code="1234")
admin_user.add_registration_code("ABCD1234")
session.add(admin_user)
print(" - Created admin user")
session.add_all(SENSOR_TYPES)
print(" - Created sensor types")
kt1 = KeypadType(1, "DSC", "DSC keybus (DSC PC-1555RKZ)")
kt2 = KeypadType(2, "WIEGAND", "Wiegand keypad")
session.add_all([kt1, kt2])
print(" - Created keypad types")
k1 = Keypad(keypad_type=kt1)
session.add_all([k1])
print(" - Created keypads")
session.commit()
def env_live_01():
session.add_all(
[
User(name="Administrator", role=ROLE_ADMIN, access_code="1234"),
User(name="Chuck.Norris", role=ROLE_USER, access_code="1111"),
]
)
print(" - Created users")
z1 = Zone(name="No delay", description="Alert with no delay")
z2 = Zone(name="Away delayed", away_alert_delay=20, description="Alert delayed when armed AWAY")
z3 = Zone(name="Stay delayed", stay_alert_delay=20, description="Alert delayed when armed STAY")
z4 = Zone(name="Stay", stay_alert_delay=None, description="No alert when armed STAY")
z5 = Zone(name="Away/Stay delayed", away_alert_delay=40, stay_alert_delay=20, description="Alert delayed when armed AWAY/STAY")
z6 = Zone(name="Tamper", disarmed_delay=0, away_alert_delay=None, stay_alert_delay=None, description="Sabotage alert")
session.add_all([z1, z2, z3, z4, z5, z6])
print(" - Created zones")
session.add_all(SENSOR_TYPES)
print(" - Created sensor types")
s1 = Sensor(channel=0, sensor_type=SENSOR_TYPES[0], zone=z5, description="Garage")
s2 = Sensor(channel=1, sensor_type=SENSOR_TYPES[0], zone=z5, description="Hall")
s3 = Sensor(channel=2, sensor_type=SENSOR_TYPES[2], zone=z5, description="Front door")
s4 = Sensor(channel=3, sensor_type=SENSOR_TYPES[0], zone=z3, description="Kitchen")
s5 = Sensor(channel=4, sensor_type=SENSOR_TYPES[0], zone=z1, description="Living room")
s6 = Sensor(channel=5, sensor_type=SENSOR_TYPES[0], zone=z4, description="Children's room")
s7 = Sensor(channel=6, sensor_type=SENSOR_TYPES[0], zone=z4, description="Bedroom")
s8 = Sensor(channel=7, sensor_type=SENSOR_TYPES[1], zone=z6, description="Tamper")
session.add_all([s1, s2, s3, s4, s5, s6, s7, s8])
print(" - Created sensors")
kt1 = KeypadType(1, "DSC", "DSC keybus (DSC PC-1555RKZ)")
session.add_all([kt1])
print(" - Created keypad types")
k1 = Keypad(keypad_type=kt1)
session.add_all([k1])
print(" - Created keypads")
session.commit()
def env_test_01():
admin_user = User(name="Administrator", role=ROLE_ADMIN, access_code="1234")
admin_user.add_registration_code("1234")
session.add_all([admin_user, User(name="Chuck Norris", role=ROLE_USER, access_code="1111")])
print(" - Created users")
z1 = Zone(name="No delay", description="Alert with no delay")
z2 = Zone(name="Tamper", disarmed_delay=0, away_alert_delay=None, stay_alert_delay=None, description="Sabotage alert")
z3 = Zone(name="Away/stay delayed", away_alert_delay=5, stay_alert_delay=5, description="Alert delayed when armed AWAY or STAY")
z4 = Zone(name="Stay delayed", stay_alert_delay=5, description="Alert delayed when armed STAY")
z5 = Zone(name="Stay", stay_alert_delay=None, description="No alert when armed STAY")
session.add_all([z1, z2, z3, z4, z5])
print(" - Created zones")
session.add_all(SENSOR_TYPES)
print(" - Created sensor types")
s1 = Sensor(channel=0, sensor_type=SENSOR_TYPES[0], zone=z3, description="Garage")
s2 = Sensor(channel=1, sensor_type=SENSOR_TYPES[2], zone=z5, description="Test room")
s3 = Sensor(channel=2, sensor_type=SENSOR_TYPES[1], zone=z2, description="Tamper")
session.add_all([s1, s2, s3])
print(" - Created sensors")
kt1 = KeypadType(1, "DSC", "DSC keybus (DSC PC-1555RKZ)")
kt2 = KeypadType(2, "WIEGAND", "Wiegand keypad")
session.add_all([kt1, kt2])
print(" - Created keypad types")
k1 = Keypad(keypad_type=kt1)
session.add_all([k1])
print(" - Created keypads")
session.commit()
def env_admin_registration():
admin_user = session.query(User).filter(User.role == ROLE_ADMIN).first()
code = admin_user.add_registration_code("ABCD")
print("Code: ", code)
admin_user.update({"access_code": "1234"})
print("Password: ", "1234")
session.commit()
print("Admin registration added and password changed")
def main():
environments = [
attribute.replace("env_", "")
for attribute, value in globals().items()
if attribute.startswith("env_")
]
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--delete", action="store_true", help="Delete database content")
parser.add_argument("-c", "--create", metavar="environment",
help="Create database content (environments: {})".format(", ".join(environments)))
args = parser.parse_args()
if args.delete:
cleanup()
if args.create:
create_method = globals()["env_" + args.create]
print("Creating '%s' environment..." % args.create)
create_method()
print("Environment created")
if __name__ == "__main__":
main()
| 36.652941 | 132 | 0.67437 | 820 | 6,231 | 4.970732 | 0.204878 | 0.048577 | 0.047841 | 0.056673 | 0.609912 | 0.601079 | 0.58317 | 0.573847 | 0.465653 | 0.382974 | 0 | 0.033281 | 0.180228 | 6,231 | 169 | 133 | 36.869822 | 0.764683 | 0.00321 | 0 | 0.3125 | 0 | 0 | 0.225282 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046875 | false | 0.015625 | 0.046875 | 0 | 0.09375 | 0.1875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
372abdbaa3a72f1cf8f94702fe632481d4798115 | 1,595 | py | Python | src/actionFactory.py | koleberd/goaldependency | b4572758c405c90b33408497429531db82f5d22b | [
"FTL",
"CNRI-Python",
"CECILL-B",
"AAL"
] | 1 | 2018-01-10T21:41:27.000Z | 2018-01-10T21:41:27.000Z | src/actionFactory.py | koleberd/goaldependency | b4572758c405c90b33408497429531db82f5d22b | [
"FTL",
"CNRI-Python",
"CECILL-B",
"AAL"
] | null | null | null | src/actionFactory.py | koleberd/goaldependency | b4572758c405c90b33408497429531db82f5d22b | [
"FTL",
"CNRI-Python",
"CECILL-B",
"AAL"
] | null | null | null | from action import *
from playerState import *
import gameController
import os.path
import json
###
#parses json containing known actions
#produces actions that return a specified PS
###
class ActionFactory:
def __init__(self,costs):
with open('json/actionMemory.json') as jsfl:
actMemory = json.load(jsfl)
self.costs = costs
self.actionMemory = []
for key in actMemory:
self.actionMemory.append(self.parseActionJSON(actMemory[key]))
#parses json containing known actions
def parseActionJSON(self,obj):
psp = PlayerState.parsePlayerStateJSON(obj['prereq'])
psr = PlayerState.parsePlayerStateJSON(obj['result'])
cst = obj['cost']
if obj['function'] in self.costs.keys():
cst = self.costs[obj['function']]
else:
cst = 1
func = lambda gs: gameController.executeFunction(obj['function'].split(":")[0],gs,obj['function'].split(":")[1].split(','))
nm = obj['function']
return Action(psp,psr,cst,func,nm)
#produces actions that return a specified PS
def getActions(self,ps):
ret = []
for act in self.actionMemory:
if act.ps_res.isParallel(ps):
ret.append(act)
return ret
'''
def scaleCosts(self,scalars):
res = {}
if scalars == None:
return res
for act in self.actionMemory:
ps = act.ps_res
if ps.inFrontOf in scalars.keys():
res[act] = scalars[ps.inFrontOf]
return res
'''
| 28.482143 | 131 | 0.592476 | 180 | 1,595 | 5.216667 | 0.361111 | 0.058573 | 0.042599 | 0.053248 | 0.198083 | 0.078807 | 0.078807 | 0 | 0 | 0 | 0 | 0.002655 | 0.291536 | 1,595 | 55 | 132 | 29 | 0.828319 | 0.09906 | 0 | 0 | 0 | 0 | 0.071745 | 0.019486 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.166667 | 0 | 0.366667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
372e1a3008c4b808410b6df31c41c5b99e78576d | 2,904 | py | Python | seeq/models/lattice_plot.py | juanjosegarciaripoll/seeq | 3554550c3348fbaae398737cf4ae5510a34d6665 | [
"MIT"
] | 13 | 2019-07-31T17:00:37.000Z | 2021-02-02T18:34:34.000Z | seeq/models/lattice_plot.py | Ydeh22/seeq | 3554550c3348fbaae398737cf4ae5510a34d6665 | [
"MIT"
] | null | null | null | seeq/models/lattice_plot.py | Ydeh22/seeq | 3554550c3348fbaae398737cf4ae5510a34d6665 | [
"MIT"
] | 5 | 2019-11-04T17:54:40.000Z | 2022-02-16T23:04:56.000Z |
import matplotlib.collections
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import scipy.sparse as sp
import numpy as np
def plot_lattice(L, ax=None, dot='.'):
"""Plot a 2D or 3D representation of the lattice on the given
axis, or create one if none is given.
Parameters
----------
L -- A Regular3DLattice() object
ax -- Axis to plot on. If None, create a new one.
dot -- Symbol to plot on the vertices of the lattice.
Returns
-------
ax -- Axis on which the figure is plot.
"""
if ax is None:
if L.dimension <= 2:
fig, ax = plt.subplots()
else:
import mpl_toolkits.mplot3d
fig, ax = plt.subplots(subplot_kw={'projection':'3d'})
#
# We plot all the connections
#
H = L.hamiltonian()
coord = L.coord
aux = sp.coo_matrix(H)
if L.dimension == 3:
import mpl_toolkits.mplot3d.art3d
lines = [(coord[a,:], coord[b,:])
for (a,b) in zip(aux.row, aux.col)
if H[a,b] != 0]
lc = mpl_toolkits.mplot3d.art3d.Line3DCollection(lines, linewidths=0.2)
else:
import matplotlib.collections
lines = [(coord[a,0:2], coord[b,0:2])
for (a,b) in zip(aux.row, aux.col)
if H[a,b] != 0]
lc = matplotlib.collections.LineCollection(lines, linewidths=0.2)
ax.add_collection(lc)
#
# First we plot all the dots that are connected to others
#
ndx, _ = np.nonzero(np.sum(np.abs(H), 1))
points = coord[ndx,:]
if L.dimension == 3:
ax.plot(points[:,0], points[:,1], points[:,2], '.')
else:
ax.plot(points[:,0], points[:,1], '.')
return ax
def plot_field2d(lattice, field, ax=None, x=None, y=None,
Lx=100, Ly=100, σ=1/2.0, cmap='Greys'):
"""Plot a field that lives in a 2D lattice.
Parameters
----------
L -- A Regular3DLattice() object
ax -- Axis to plot on. If None, create a new one.
dot -- Symbol to plot on the vertices of the lattice.
Returns
-------
ax -- Axis on which the figure is plot.
"""
if ax is None:
fig, ax = plt.subplots()
if x is None:
x = np.linspace(lattice.Xmin-σ, lattice.Xmax+σ, Lx)
else:
Lx = len(x)
if y is None:
y = np.linspace(lattice.Ymin-σ, lattice.Ymax+σ, Ly)
else:
Ly = len(y)
extent = [np.min(x), np.max(x), np.min(y), np.max(y)]
aspect = (extent[1]-extent[0])/(extent[3]-extent[2])/1
aspect = 'auto'
x = np.reshape(x, (1,Lx))
y = np.reshape(y, (Ly,1))
dty = np.zeros((Ly, Lx))
for (n, (X, Y, Z)) in zip(field, lattice.coord):
dty += n * np.exp(-((x-X)**2+(y-Y)**2)/σ**2)
ax.imshow(dty, extent=extent, aspect=aspect, interpolation='none',
origin='lower', cmap=cmap)
return ax, dty
| 31.225806 | 79 | 0.551653 | 435 | 2,904 | 3.662069 | 0.285057 | 0.040176 | 0.020088 | 0.030132 | 0.25361 | 0.25361 | 0.2285 | 0.2285 | 0.2285 | 0.2285 | 0 | 0.02489 | 0.294421 | 2,904 | 92 | 80 | 31.565217 | 0.752562 | 0.231405 | 0 | 0.293103 | 0 | 0 | 0.015559 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.137931 | 0 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
372f233c0bcb9fb29c757ffe4ac5e45a5fb9ba0e | 10,696 | py | Python | nl2codemodel/src/nltocode/eval/score.py | SmartDataAnalytics/codeCAI | 18a31541506d89b72cd07e498ef43dfa657877f8 | [
"MIT"
] | null | null | null | nl2codemodel/src/nltocode/eval/score.py | SmartDataAnalytics/codeCAI | 18a31541506d89b72cd07e498ef43dfa657877f8 | [
"MIT"
] | null | null | null | nl2codemodel/src/nltocode/eval/score.py | SmartDataAnalytics/codeCAI | 18a31541506d89b72cd07e498ef43dfa657877f8 | [
"MIT"
] | null | null | null | import argparse
import logging as log
import re
from collections import defaultdict
from difflib import SequenceMatcher
from os.path import commonprefix
import pandas as pd
import torch
from torch.nn.functional import nll_loss, log_softmax
from nltocode.datamodule import NL2CodeTrainDataModule
from nltocode.grammar.grammargraphloader import GrammarGraphLoader
from nltocode.grammar.grammargraphvisitor import AbstractGrammarGraphVisitor
from nltocode.nl2code import load_checkpoint
from nltocode.preprocessing.filehandling import save_json
def score(model_path, train_data_args, inference_args, output_filename):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
print()
if device.type == 'cuda':
current_device = torch.cuda.current_device()
print('Current device: ', current_device)
print('Current device name: ', torch.cuda.get_device_name(current_device))
print('Memory Usage:')
print('Allocated:', round(torch.cuda.memory_allocated(0) / 1024 ** 3, 1), 'GB')
print('Cached: ', round(torch.cuda.memory_reserved(0) / 1024 ** 3, 1), 'GB')
system = load_checkpoint(model_path, test_args=inference_args).to(device)
model = system.model
data_module = NL2CodeTrainDataModule(
**train_data_args,
max_path_depth=model.max_path_depth,
path_multiple=model.path_multiple,
max_charseq_len=model.max_charseq_len if model.withcharemb else None,
)
data_module.setup('fit')
system.setup('test')
system.eval()
dataloader = data_module.val_dataloader()
grammar_graph_file = inference_args.get('grammar_graph_file')
grammar_graph = GrammarGraphLoader(grammar_graph_file).load_graph()
grammar_graph_visitor = AbstractGrammarGraphVisitor(grammar_graph)
outputs = []
with torch.no_grad():
i = 0
for batch in dataloader:
nl_seqs, _, ast_seqs, _ = batch
beamsearch_predictions = system.beamsearch.perform_beam_search(nl_seqs.to(device))
#prob_datapoint = comp_probs(system.model, nl_seqs, char_seqs, ast_seqs, edge_order_seqs)
prefix_datapoint = comp_longest_prefix(system, beamsearch_predictions, ast_seqs)
diff_analysis_res = perf_diff_analysis(beamsearch_predictions, grammar_graph_visitor, ast_seqs)
#datapoint = {**prob_datapoint, **prefix_datapoint, **diff_analysis_res}
datapoint = { **prefix_datapoint, **diff_analysis_res}
outputs.append(datapoint)
i += 1
out_df = pd.DataFrame(outputs)
save_json(out_df, output_filename)
def comp_longest_prefix(system, beamsearch_predictions, ast_seqs):
expected_ast_seq = ast_seqs.view(-1).tolist()
ltgt = system.model.vocabtgt_size - system.model.vocabsrc_size
replaced_filtered_expected_ast_seq = filter_ast_seq(expected_ast_seq, ltgt)
if len(beamsearch_predictions) == 0:
beamsearch_ast_seq, score, code_snippet = [], float('inf'), ''
else:
beamsearch_ast_seq, score, code_snippet = beamsearch_predictions[0]
replaced_filtered_beamsearch_ast_seq = filter_ast_seq(beamsearch_ast_seq, ltgt)
common_prefix = commonprefix((expected_ast_seq, beamsearch_ast_seq))
replaced_common_prefix = commonprefix((replaced_filtered_expected_ast_seq, replaced_filtered_beamsearch_ast_seq))
return {
'replaced_common_prefix_len': len(replaced_common_prefix),
'replaced_filtered_expected_ast_seq_len': len(replaced_filtered_expected_ast_seq),
'replaced_filtered_beamsearch_ast_seq_len': len(replaced_filtered_beamsearch_ast_seq),
'common_prefix_len': len(common_prefix),
'expected_ast_seq_len': len(expected_ast_seq),
'beamsearch_ast_seq_len': len(beamsearch_ast_seq),
'expected_ast_seq': expected_ast_seq,
'beamsearch_ast_seq': beamsearch_ast_seq,
'replaced_filtered_expected_ast_seq': replaced_filtered_expected_ast_seq,
'replaced_filtered_beamsearch_ast_seq': replaced_filtered_beamsearch_ast_seq,
}
def perf_diff_analysis(beamsearch_predictions, grammar_graph_visitor, ast_seqs):
ast_seq_exp_enc = ast_seqs.view(-1).tolist()
if len(beamsearch_predictions) == 0:
ast_seq_pred_enc, score, code_snippet = [], float('inf'), ''
else:
ast_seq_pred_enc, score, code_snippet = beamsearch_predictions[0]
ast_seq_exp_dec, ast_seq_exp_type = decode_ast_seq_as_char(ast_seq_exp_enc, grammar_graph_visitor)
ast_seq_pred_dec, ast_seq_pred_type = decode_ast_seq_as_char(ast_seq_pred_enc, grammar_graph_visitor)
s = SequenceMatcher(None, ast_seq_exp_dec, ast_seq_pred_dec)
diff_analysis = defaultdict(list)
for tag, i1, i2, j1, j2 in s.get_opcodes():
diff_analysis[str(tag)].append(
{'pos_exp_i1': i1,
'pos_exp_i2': i2,
'pos_pred_j1': j1,
'pos_pred_j2': j2,
'val_exp': ast_seq_exp_dec[i1:i2],
'val_pred': ast_seq_pred_dec[j1:j2],
'type_exp': ast_seq_exp_type[i1:i2],
'type_pred': ast_seq_pred_type[j1:j2]
})
return {
'diff_analysis_res': diff_analysis,
'ast_seq_exp_dec': ast_seq_exp_dec,
'ast_seq_exp_type': ast_seq_exp_type,
'ast_seq_pred_dec': ast_seq_pred_dec,
'ast_seq_pred_type': ast_seq_pred_type
}
def comp_probs(model, nl_seqs, char_seqs, ast_seqs, edge_order_seqs):
predicted_log_probs = predict(model, nl_seqs, char_seqs, ast_seqs, edge_order_seqs)
expected_ast_seq = ast_seqs[1:, :].view(-1)
expected_scores = compute_score(predicted_log_probs, expected_ast_seq, model.vocab_pad_id)
predicted_ast_seq = torch.argmax(predicted_log_probs, dim=-1)
predicted_scores = torch.max(predicted_log_probs, -1).values
return {
'target_expected_scores_sum': expected_scores.sum().item(),
'target_predicted_scores_sum': predicted_scores.sum().item(),
'nl_length': nl_seqs.size(0),
'target_ast_seq_length': ast_seqs.size(0),
'target_expected_scores': expected_scores.tolist(),
'target_predicted_scores': predicted_scores.tolist(),
'target_predicted_ast_seq': predicted_ast_seq.tolist(),
}
def decode_ast_seq_as_char(ast_seq_enc, grammar_graph_visitor):
ast_seq_dec = []
ast_seq_type = []
for token in ast_seq_enc:
token_dec = grammar_graph_visitor.get_node_label(token)
token_dec_ = re.sub(r'|'.join(map(re.escape, ['#strliteral', '#literal'])), '', token_dec)
token_type = grammar_graph_visitor.get_node_type(token)
ast_seq_dec.append(token_dec_)
ast_seq_type.append(token_type)
return ast_seq_dec, ast_seq_type
def decode_ast_seq(ast_seq_enc, grammar_graph_visitor):
ast_seq_dec = []
ast_seq_type = []
i = 0
while i < len(ast_seq_enc):
token = ast_seq_enc[i]
token_dec = grammar_graph_visitor.get_node_label(token)
token_dec_ = re.sub(r'|'.join(map(re.escape, ['#strliteral', '#literal'])), '', token_dec)
token_type = grammar_graph_visitor.get_node_type(token)
concat_strlit_token = []
while token_type == 'strliteral':
concat_strlit_token.append(token_dec_)
i = i + 1
token = ast_seq_enc[i]
token_dec = grammar_graph_visitor.get_node_label(token)
token_dec_ = re.sub(r'|'.join(map(re.escape, ['#strliteral', '#literal'])), '', token_dec)
token_type = grammar_graph_visitor.get_node_type(token)
if concat_strlit_token:
ast_seq_dec.append(''.join(concat_strlit_token))
ast_seq_type.append('strliteral')
ast_seq_dec.append(token_dec_)
ast_seq_type.append(token_type)
i = i + 1
return ast_seq_dec, ast_seq_type
def filter_ast_seq(ast_seq_list, tgt_vocab_size):
replaced_ast_seq = [tgt_vocab_size + 1 if vocab_id > tgt_vocab_size else vocab_id for vocab_id in ast_seq_list]
replaced_filtered_ast_seq = []
for i in replaced_ast_seq:
if (i != tgt_vocab_size + 1) or (len(replaced_filtered_ast_seq) == 0) or (
replaced_filtered_ast_seq[-1] != tgt_vocab_size + 1):
replaced_filtered_ast_seq.append(i)
return replaced_filtered_ast_seq
def compute_score(log_probs, sequence, pad_id):
# nll_loss computes the score of each element in `sequence` according to the `log_probs`
return -nll_loss(log_probs, sequence, ignore_index=pad_id, reduction='none').view(-1)
def predict(model, nl_seqs, char_seqs, ast_seqs, paths_seqs):
print("shapes: ast_seq %s, paths %s" % (ast_seqs.shape, paths_seqs.shape))
print("ast_seq %s" % ast_seqs.view(-1).tolist())
print("paths %s" % paths_seqs.squeeze(1).tolist())
logits = model(nl_seqs, char_seqs, ast_seqs[:-1, :], paths_seqs[:-1, :])
output_dim = logits.shape[-1]
logits = logits.view(-1, output_dim)
log_probs = log_softmax(logits, 1)
return log_probs
def get_args():
parser = argparse.ArgumentParser("Estimate sequence scores", fromfile_prefix_chars='@')
# Dataset parameters
parser.add_argument("--preproc-data-path", type=str)
parser.add_argument("--model-path", type=str)
parser.add_argument("--grammar-graph-file", type=str)
parser.add_argument("--output-file-path", type=str)
parser.add_argument("--num-beams", type=int, default=10)
parser.add_argument("--max-beam-length", type=int, default=1000)
parser.add_argument("--disable-decoder-constraint-mask", action='store_true')
return parser.parse_args()
def main():
torch.set_printoptions(precision=3, threshold=10240, linewidth=100000)
args = get_args()
print("Score args:", vars(args))
log.basicConfig(level='DEBUG')
inference_args = {
'grammar_graph_file': args.grammar_graph_file,
'target_language': 'python',
'num_beams': args.num_beams,
'disable_decoder_constraint_mask': args.disable_decoder_constraint_mask,
'max_beam_length': args.max_beam_length,
'max_num_predicted_results': 1,
'beam_search_mode': 'full',
'keep_invalid_beamsearch_results': False,
'validate_parsability': False
}
train_data_args = {
'batch_size': 1,
'num_dataloader_workers': 1,
'train_valid_data_path': args.preproc_data_path,
'train_split': 0.0,
'val_split': 1.0,
'max_src_sentence_length': 10000,
'max_tgt_sentence_length': 10000,
}
score(args.model_path, train_data_args, inference_args, args.output_file_path)
if __name__ == '__main__':
main()
| 39.036496 | 117 | 0.703628 | 1,441 | 10,696 | 4.800139 | 0.172797 | 0.081538 | 0.03036 | 0.023421 | 0.388897 | 0.321093 | 0.244326 | 0.211219 | 0.160619 | 0.155414 | 0 | 0.01178 | 0.190445 | 10,696 | 273 | 118 | 39.179487 | 0.787042 | 0.024776 | 0 | 0.152381 | 0 | 0 | 0.127649 | 0.052556 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052381 | false | 0 | 0.066667 | 0.004762 | 0.161905 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37319bdaa284f723a5abf8c32c3ba570386d947b | 5,943 | py | Python | cv/yolov3/yolov3_gpu/src/detection.py | hellowaywewe/deeplearning | fcd12cb2cc46f17725b139571891b9c212e999d5 | [
"Apache-2.0"
] | null | null | null | cv/yolov3/yolov3_gpu/src/detection.py | hellowaywewe/deeplearning | fcd12cb2cc46f17725b139571891b9c212e999d5 | [
"Apache-2.0"
] | null | null | null | cv/yolov3/yolov3_gpu/src/detection.py | hellowaywewe/deeplearning | fcd12cb2cc46f17725b139571891b9c212e999d5 | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
import numpy as np
import sys
import cv2
import pandas as pd
label_list = [
'Bird_spp',
'Blue_sheep',
'Glovers_pika',
'Gray_wolf',
'Himalaya_marmot',
'Red_fox',
'Snow_leopard',
'Tibetan_snowcock',
'Upland_Buzzard',
'White-lipped_deer'
]
class DetectionEngine:
"""Detection engine."""
def __init__(self, args):
self.ignore_threshold = args.ignore_threshold
self.labels = label_list
self.num_classes = len(self.labels)
self.results = defaultdict(list)
self.det_boxes = []
self.nms_thresh = args.nms_thresh
def do_nms_for_results(self):
"""Get result boxes."""
for clsi in self.results:
dets = self.results[clsi]
dets = np.array(dets)
keep_index = self._nms(dets, self.nms_thresh)
keep_box = [{'category_id': self.labels[int(clsi)],
'bbox': list(dets[i][:4].astype(float)),
'score': dets[i][4].astype(float)}
for i in keep_index]
self.det_boxes.extend(keep_box)
def _nms(self, predicts, threshold):
"""Calculate NMS."""
# convert xywh -> xmin ymin xmax ymax
x1 = predicts[:, 0]
y1 = predicts[:, 1]
x2 = x1 + predicts[:, 2]
y2 = y1 + predicts[:, 3]
scores = predicts[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
reserved_boxes = []
while order.size > 0:
i = order[0]
reserved_boxes.append(i)
max_x1 = np.maximum(x1[i], x1[order[1:]])
max_y1 = np.maximum(y1[i], y1[order[1:]])
min_x2 = np.minimum(x2[i], x2[order[1:]])
min_y2 = np.minimum(y2[i], y2[order[1:]])
intersect_w = np.maximum(0.0, min_x2 - max_x1 + 1)
intersect_h = np.maximum(0.0, min_y2 - max_y1 + 1)
intersect_area = intersect_w * intersect_h
ovr = intersect_area / (areas[i] + areas[order[1:]] - intersect_area)
indexs = np.where(ovr <= threshold)[0]
order = order[indexs + 1]
return reserved_boxes
def detect(self, outputs, batch, image_shape, config=None):
"""Detect boxes."""
outputs_num = len(outputs)
# output [|32, 52, 52, 3, 85| ]
for batch_id in range(batch):
for out_id in range(outputs_num):
# 32, 52, 52, 3, 85
out_item = outputs[out_id]
# 52, 52, 3, 85
out_item_single = out_item[batch_id, :]
# get number of items in one head, [B, gx, gy, anchors, 5+80]
dimensions = out_item_single.shape[:-1]
out_num = 1
for d in dimensions:
out_num *= d
ori_w, ori_h = image_shape
x = out_item_single[..., 0] * ori_w
y = out_item_single[..., 1] * ori_h
w = out_item_single[..., 2] * ori_w
h = out_item_single[..., 3] * ori_h
conf = out_item_single[..., 4:5]
cls_emb = out_item_single[..., 5:]
cls_argmax = np.expand_dims(np.argmax(cls_emb, axis=-1), axis=-1)
x = x.reshape(-1)
y = y.reshape(-1)
w = w.reshape(-1)
h = h.reshape(-1)
cls_emb = cls_emb.reshape(-1, config.num_classes)
conf = conf.reshape(-1)
cls_argmax = cls_argmax.reshape(-1)
x_top_left = x - w / 2.
y_top_left = y - h / 2.
# create all False
flag = np.random.random(cls_emb.shape) > sys.maxsize
for i in range(flag.shape[0]):
c = cls_argmax[i]
flag[i, c] = True
confidence = cls_emb[flag] * conf
for x_lefti, y_lefti, wi, hi, confi, clsi in zip(x_top_left, y_top_left, w, h, confidence, cls_argmax):
if confi < self.ignore_threshold:
continue
x_lefti = max(0, x_lefti)
y_lefti = max(0, y_lefti)
wi = min(wi, ori_w)
hi = min(hi, ori_h)
# transform catId to match coco
coco_clsi = str(clsi)
self.results[coco_clsi].append([x_lefti, y_lefti, wi, hi, confi])
# def draw_boxes_in_image(self, img_path):
# img = cv2.imread(img_path, 1)
# for i in range(len(self.det_boxes)):
# x = int(self.det_boxes[i]['bbox'][0])
# y = int(self.det_boxes[i]['bbox'][1])
# w = int(self.det_boxes[i]['bbox'][2])
# h = int(self.det_boxes[i]['bbox'][3])
# cv2.rectangle(img, (x, y), (x+w, y+h), (0, 225, 0), 1)
# score = round(self.det_boxes[i]['score'], 3)
# text = self.det_boxes[i]['category_id']+', '+str(score)
# cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 225), 2)
#
# return img
def draw_boxes_in_image(self, img, img_name):
res_list = []
for i in range(len(self.det_boxes)):
x = int(self.det_boxes[i]['bbox'][0])
y = int(self.det_boxes[i]['bbox'][1])
w = int(self.det_boxes[i]['bbox'][2])
h = int(self.det_boxes[i]['bbox'][3])
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 3)
score = round(self.det_boxes[i]['score'], 3)
text = self.det_boxes[i]['category_id'] + ', ' + str(score)
cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_PLAIN, 3, (0, 0, 255), 3)
res = (int(img_name.split(".")[0]), img_name, self.det_boxes[i]['category_id'], score)
res_list.append(res)
return img, res_list
| 38.341935 | 119 | 0.502272 | 792 | 5,943 | 3.57197 | 0.238636 | 0.042064 | 0.07211 | 0.059738 | 0.245316 | 0.219512 | 0.201485 | 0.168257 | 0.168257 | 0.168257 | 0 | 0.038713 | 0.356722 | 5,943 | 154 | 120 | 38.590909 | 0.701282 | 0.142521 | 0 | 0 | 0 | 0 | 0.036788 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044248 | false | 0 | 0.044248 | 0 | 0.115044 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e92b47f2d6ffa829108d4338b27ad0da0819ace | 7,348 | py | Python | RSNA Pneumonia Detection Challenge/Retina_net model.py | YakinRubaiat/Kaggle-competition | 3c9cc9f2ef180bbde2a17c1332c0d13eca0e4a37 | [
"Apache-2.0"
] | null | null | null | RSNA Pneumonia Detection Challenge/Retina_net model.py | YakinRubaiat/Kaggle-competition | 3c9cc9f2ef180bbde2a17c1332c0d13eca0e4a37 | [
"Apache-2.0"
] | null | null | null | RSNA Pneumonia Detection Challenge/Retina_net model.py | YakinRubaiat/Kaggle-competition | 3c9cc9f2ef180bbde2a17c1332c0d13eca0e4a37 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pandas as pd
import numpy as np
import scipy.misc
import pydicom
import glob
import sys
import os
import pandas as pd
import base64
from IPython.display import HTML
# In[ ]:
from scipy.ndimage.interpolation import zoom
# In[ ]:
get_ipython().system('git clone https://github.com/fizyr/keras-retinanet')
os.chdir("keras-retinanet")
get_ipython().system('python setup.py build_ext --inplace')
# In[ ]:
DATA_DIR = "/kaggle/input/"
ROOT_DIR = "/kaggle/working/"
# In[ ]:
train_pngs_dir = os.path.join(DATA_DIR, "rsna-pneu-train-png/stage_1_train_pngs/orig/")
test_dicoms_dir = os.path.join(DATA_DIR, "rsna-pneumonia-detection-challenge/stage_1_test_images/")
# In[ ]:
bbox_info = pd.read_csv(os.path.join(DATA_DIR, "rsna-pneumonia-detection-challenge/stage_1_train_labels.csv"))
detailed_class_info = pd.read_csv(os.path.join(DATA_DIR, "rsna-pneumonia-detection-challenge/stage_1_detailed_class_info.csv"))
detailed_class_info = detailed_class_info.drop_duplicates()
# In[ ]:
positives = detailed_class_info
positives.head()
# In[ ]:
cash_class = positives
cash_class.head()
# In[ ]:
positives = positives.merge(bbox_info, on="patientId")
positives = positives[["patientId", "x", "y", "width", "height"]]
positives = positives.merge(cash_class, on="patientId")
# In[ ]:
#positives[positives["class"]=='No Lung Opacity / Not Normal']
#positives["class"] = np.where(positives["class"]=='No Lung Opacity / Not Normal', 'Normal','')
conditions = [
positives["class"]=='No Lung Opacity / Not Normal',
positives["class"]=='Normal',
positives["class"]=='Lung Opacity'
]
choices = ['','','Lung Opacity']
positives["class"] = np.select(conditions, choices)
positives.head()
# In[ ]:
positives["patientId"] = [os.path.join(train_pngs_dir, "{}.png".format(_)) for _ in positives.patientId]
positives["x1"] = positives["x"]
positives["y1"] = positives["y"]
positives["x2"] = positives["x"] + positives["width"]
positives["y2"] = positives["y"] + positives["height"]
positives["Target"] = positives["class"]
del positives["x"], positives["y"], positives["width"], positives["height"]
# In[ ]:
del positives['class']
# In[ ]:
positives.head()
# In[ ]:
annotations = positives
# In[ ]:
annotations = annotations.fillna(88888)
annotations["x1"] = annotations.x1.astype("int32").astype("str")
annotations["y1"] = annotations.y1.astype("int32").astype("str")
annotations["x2"] = annotations.x2.astype("int32").astype("str")
annotations["y2"] = annotations.y2.astype("int32").astype("str")
annotations = annotations.replace({"88888": ""})
annotations = annotations[["patientId", "x1", "y1", "x2", "y2", "Target"]]
annotations.to_csv(os.path.join(ROOT_DIR, "annotations.csv"), index=False, header=False)
# In[ ]:
annotations.head()
# In[ ]:
classes_file = pd.DataFrame({"class": ["Lung Opacity"], "id": [0]})
classes_file.to_csv(os.path.join(ROOT_DIR, "classes.csv"), index=False, header=False)
# In[ ]:
classes_file.head()
# In[ ]:
annotations.shape
# In[ ]:
get_ipython().system('python /kaggle/working/keras-retinanet/keras_retinanet/bin/train.py --backbone "resnet50" --image-min-side 608 --image-max-side 608 --batch-size 8 --epochs 1 --steps 3623 --no-snapshots csv /kaggle/working/annotations.csv /kaggle/working/classes.csv')
# In[ ]:
model.save("model.h5")
# In[ ]:
get_ipython().system('python /kaggle/working/keras-retinanet/keras_retinanet/bin/convert_model.py /kaggle/working/keras-retinanet/snapshots/resnet50_csv_1.h5 /kaggle/working/keras-retinanet/converted_model.h5 ')
# In[ ]:
from keras_retinanet.models import load_model
retinanet = load_model(os.path.join(ROOT_DIR, "keras-retinanet/converted_model.h5"),
backbone_name="resnet50")
# In[ ]:
def preprocess_input(x):
x = x.astype("float32")
x[..., 0] -= 103.939
x[..., 1] -= 116.779
x[..., 2] -= 123.680
return x
# In[ ]:
test_dicoms = glob.glob(os.path.join(test_dicoms_dir, "*.dcm"))
test_patient_ids = [_.split("/")[-1].split(".")[0] for _ in test_dicoms]
test_predictions = []
for i, dcm_file in enumerate(test_dicoms):
sys.stdout.write("Predicting images: {}/{} ...\r".format(i+1, len(test_dicoms)))
sys.stdout.flush()
# Load DICOM and extract pixel array
dcm = pydicom.read_file(dcm_file)
arr = dcm.pixel_array
# Make 3-channel image
img = np.zeros((arr.shape[0], arr.shape[1], 3))
for channel in range(img.shape[-1]):
img[..., channel] = arr
# Resize
# Change image size if necessary!
scale_factor = 256. / img.shape[0]
img = zoom(img, [scale_factor, scale_factor, 1], order=1, prefilter=False)
# Preprocess with ImageNet mean subtraction
img = preprocess_input(img)
prediction = retinanet.predict_on_batch(np.expand_dims(img, axis=0))
test_predictions.append(prediction)
# In[ ]:
test_pred_df = pd.DataFrame()
for i, pred in enumerate(test_predictions):
# Take top 5
# Should already be sorted in descending order by score
bboxes = pred[0][0][:5]
scores = pred[1][0][:5]
# -1 will be predicted if nothing is detected
detected = scores > -1
if np.sum(detected) == 0:
continue
else:
bboxes = bboxes[detected]
bboxes = [box / scale_factor for box in bboxes]
scores = scores[detected]
individual_pred_df = pd.DataFrame()
for j, each_box in enumerate(bboxes):
# RetinaNet output is [x1, y1, x2, y2]
tmp_df = pd.DataFrame({"patientId": [test_patient_ids[i]],
"x": [each_box[0]],
"y": [each_box[1]],
"w": [each_box[2]-each_box[0]],
"h": [each_box[3]-each_box[1]],
"score": [scores[j]]})
individual_pred_df = individual_pred_df.append(tmp_df)
test_pred_df = test_pred_df.append(individual_pred_df)
test_pred_df.head()
# In[ ]:
threshold = 0.50
list_of_pids = []
list_of_preds = []
for pid in np.unique(test_pred_df.patientId):
tmp_df = test_pred_df[test_pred_df.patientId == pid]
tmp_df = tmp_df[tmp_df.score >= threshold]
# Skip if empty
if len(tmp_df) == 0:
continue
predictionString = " ".join(["{} {} {} {} {}".format(row.score, row.x, row.y, row.w, row.h) for rownum, row in tmp_df.iterrows()])
list_of_preds.append(predictionString)
list_of_pids.append(pid)
positives = pd.DataFrame({"patientId": list_of_pids,
"PredictionString": list_of_preds})
negatives = pd.DataFrame({"patientId": list(set(test_patient_ids) - set(list_of_pids)),
"PredictionString": [""] * (len(test_patient_ids)-len(list_of_pids))})
submission = positives.append(negatives)
# In[ ]:
def create_download_link(df, title = "Download CSV file", filename = "RSNA_DataSet.csv"):
csv = df.to_csv(index = 0)
b64 = base64.b64encode(csv.encode())
payload = b64.decode()
html = '<a download="{filename}" href="data:text/csv;base64,{payload}" target="_blank">{title}</a>'
html = html.format(payload=payload,title=title,filename=filename)
return HTML(html)
# In[ ]:
test_pred_df.head(100)
| 24.331126 | 274 | 0.651606 | 978 | 7,348 | 4.733129 | 0.263804 | 0.015554 | 0.019443 | 0.012962 | 0.201771 | 0.136747 | 0.125513 | 0.102398 | 0.091596 | 0.069994 | 0 | 0.023461 | 0.18209 | 7,348 | 301 | 275 | 24.41196 | 0.746755 | 0.094311 | 0 | 0.052632 | 0 | 0.022556 | 0.210104 | 0.096506 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015038 | false | 0 | 0.090226 | 0 | 0.120301 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e92f90fb3e8266f62a37e3918ac6d4ec8f50ca0 | 369 | py | Python | qt-python-ui/qt-python.py | mortengunnufsen/examples-Qt | 83e1b4c394863335852acc445fc0cf687ee7f9b6 | [
"BSD-3-Clause"
] | 2 | 2019-12-25T16:24:43.000Z | 2022-01-16T14:43:00.000Z | qt-python-ui/qt-python.py | mortengunnufsen/examples-Qt | 83e1b4c394863335852acc445fc0cf687ee7f9b6 | [
"BSD-3-Clause"
] | null | null | null | qt-python-ui/qt-python.py | mortengunnufsen/examples-Qt | 83e1b4c394863335852acc445fc0cf687ee7f9b6 | [
"BSD-3-Clause"
] | 5 | 2020-02-25T09:04:39.000Z | 2020-12-28T21:14:00.000Z | #!/usr/bin/env python
import sys
from qtpy import QtWidgets
def window():
app = QtWidgets.QApplication(sys.argv)
w = QtWidgets.QWidget()
b = QtWidgets.QLabel(w)
b.setText("Hello World!")
w.setGeometry(100,100,200,50)
b.move(50,20)
w.setWindowTitle("hello")
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
window()
| 17.571429 | 42 | 0.636856 | 51 | 369 | 4.431373 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.051195 | 0.205962 | 369 | 20 | 43 | 18.45 | 0.720137 | 0.054201 | 0 | 0 | 0 | 0 | 0.071839 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e9316c66aa980440c5346bc184412999eb69eb1 | 7,321 | py | Python | core/admin.py | StateArchivesOfNorthCarolina/ratom_server | 7fc2c179f1baee85c4b95cdf64a2d5d690d2482e | [
"MIT"
] | 1 | 2020-02-24T03:47:17.000Z | 2020-02-24T03:47:17.000Z | core/admin.py | StateArchivesOfNorthCarolina/ratom_server | 7fc2c179f1baee85c4b95cdf64a2d5d690d2482e | [
"MIT"
] | 21 | 2020-01-20T13:29:51.000Z | 2022-03-12T00:11:12.000Z | core/admin.py | StateArchivesOfNorthCarolina/ratom_server | 7fc2c179f1baee85c4b95cdf64a2d5d690d2482e | [
"MIT"
] | 3 | 2020-08-27T09:20:34.000Z | 2022-01-24T06:20:14.000Z | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.html import format_html
from django.db.models import Count
from django.template.defaultfilters import filesizeformat
from django.utils.translation import gettext_lazy as _
from core import models as ratom
@admin.register(ratom.Account)
class AccountAdmin(admin.ModelAdmin):
actions = None
list_display = ("title",)
def has_delete_permission(self, request, obj=None):
return False
@admin.register(ratom.User)
class CustomUserAdmin(UserAdmin):
model = ratom.User
list_display = (
"pk",
"email",
"is_staff",
"is_active",
"is_superuser",
)
list_filter = (
"is_staff",
"is_active",
"is_superuser",
)
fieldsets = (
(None, {"fields": ("email", "password")}),
(
_("Permissions"),
{
"fields": (
"is_active",
"is_staff",
"is_superuser",
"groups",
"user_permissions",
),
},
),
(_("Important dates"), {"fields": ("last_login", "date_joined")}),
)
add_fieldsets = (
(
None,
{
"classes": ("wide",),
"fields": (
"email",
"password1",
"password2",
"is_active",
"is_staff",
"is_superuser",
),
},
),
)
search_fields = ("email",)
ordering = ("email",)
@admin.register(ratom.Message)
class MessageAdmin(admin.ModelAdmin):
IGNORE_CHANGES = ["date_processed", "updated_by"]
list_display = (
"pk",
"source_id",
"msg_to",
"msg_from",
"sent_date",
"subject",
"account",
)
readonly_fields = (
"get_history",
"inserted_on",
"directory",
"source_id",
"sent_date",
"subject",
"msg_to",
"msg_from",
"msg_cc",
"msg_bcc",
"body",
"errors",
"account",
"file",
"headers",
)
list_filter = ("sent_date", "account")
search_fields = ("body", "source_id")
date_hierarchy = "sent_date"
raw_id_fields = ("audit", "file")
ordering = ("-sent_date",)
fieldsets = (
("Message Metadata", {"fields": ("account", "file", "inserted_on")}),
("Headers", {"classes": ("collapse",), "fields": ("headers",),}),
(
"Message",
{
"classes": ("collapse",),
"fields": (
"directory",
"source_id",
"sent_date",
"subject",
"msg_to",
"msg_from",
"msg_cc",
"msg_bcc",
"body",
),
},
),
("Errors", {"classes": ("collapse",), "fields": ("errors",)}),
("Message History", {"fields": ("audit", "get_history",)}),
)
def get_history(self, instance):
"""
Returns an html representation of the MessageAudit's history. Not every change is important
date_processed is a change but doesn't give anymore information than is revealed by the
Date and Time column.
:param instance: MessageAudit
:return: SafeString
"""
histories = instance.audit.history.all().order_by("history_date")
history_line = "<table><tr><th>Date and Time</th><th>Field</th><th>Changed From</th><th>Changed To</th><th>User</th></tr>"
for new_record, old_record in self._get_history_pairs(histories):
if new_record:
delta = new_record.diff_against(old_record)
for change in delta.changes:
if change.field not in self.IGNORE_CHANGES:
history_line += (
f"<tr>"
f"<td>{new_record.history_date.strftime('%Y-%m-%d %H:%M:%S')}</td>"
f"<td>{change.field}</td>"
f"<td>{change.old}</td>"
f"<td>{change.new}</td>"
f"<td>{ratom.User.objects.filter(pk=new_record.updated_by_id).first()}"
f"</tr>"
)
first = histories.first()
history_line += (
f"<tr>"
f"<td>{first.history_date.strftime('%Y-%m-%d %H:%M:%S')}</td>"
f"<td colspan=3>Message Imported</td>"
f"</tr>"
)
history_line += "</table>"
return format_html(history_line)
def _get_history_pairs(self, histories):
"""
Yields pairs of histories. This function assumes a queryset of histories with the oldest entry
first in the set.
:param histories:
:return: tuple(MessageAuditHistory, MessageAuditHistory)
"""
new = None
old = None
hist_list = list(histories)
if len(hist_list) == 1:
yield None, hist_list.pop()
while hist_list:
new = old
if not new:
new = hist_list.pop()
old = hist_list.pop()
yield new, old
@admin.register(ratom.MessageAudit)
class MessageAuditAdmin(admin.ModelAdmin):
list_display = (
"pk",
"message",
"processed",
"is_record",
"date_processed",
"updated_by",
)
list_select_related = ("message",)
list_filter = ("is_record", "processed", "message__account")
search_fields = ("message__body", "pk")
date_hierarchy = "date_processed"
ordering = ("-pk",)
fieldsets = (
("Status", {"fields": (("processed", "is_record", "needs_redaction"),)},),
("Restrictions", {"fields": ("is_restricted", "restricted_until")}),
("Labels", {"fields": ("labels",)}),
)
@admin.register(ratom.File)
class FileAdmin(admin.ModelAdmin):
list_display = (
"pk",
"account",
"filename",
"reported_total_messages",
"imported_total_messages",
"message_errors",
"total_file_size",
"date_imported",
"import_status",
)
readonly_fields = ("account",)
list_filter = ("import_status", "account")
ordering = ("-date_imported",)
search_fields = ("filename", "pk")
readonly_fields = ("unique_paths",)
def get_queryset(self, request):
queryset = super().get_queryset(request)
queryset = queryset.annotate(_message_count=Count("message", distinct=True),)
return queryset
def imported_total_messages(self, obj):
return obj._message_count
def message_errors(self, obj):
return len(obj.errors) if obj.errors else ""
def total_file_size(self, obj):
return filesizeformat(obj.file_size)
@admin.register(ratom.Label)
class LabelAdmin(admin.ModelAdmin):
list_display = ("pk", "type", "name")
list_filter = ("type",)
search_fields = ("type", "name")
ordering = ("type", "name")
| 29.639676 | 130 | 0.511406 | 703 | 7,321 | 5.116643 | 0.283073 | 0.005838 | 0.030025 | 0.010008 | 0.128996 | 0.091187 | 0.052822 | 0.052822 | 0.052822 | 0.052822 | 0 | 0.000839 | 0.348996 | 7,321 | 246 | 131 | 29.760163 | 0.753882 | 0.059964 | 0 | 0.312796 | 0 | 0.014218 | 0.242809 | 0.048237 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033175 | false | 0.014218 | 0.07109 | 0.018957 | 0.327014 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e97acf2470e0d21cbc675a63fe3d4483f62b6c4 | 682 | py | Python | appengine/findit/services/test/build_url_test.py | xinghun61/infra | b5d4783f99461438ca9e6a477535617fadab6ba3 | [
"BSD-3-Clause"
] | 2 | 2021-04-13T21:22:18.000Z | 2021-09-07T02:11:57.000Z | appengine/findit/services/test/build_url_test.py | asdfghjjklllllaaa/infra | 8f63af54e46194cd29291813f2790ff6e986804d | [
"BSD-3-Clause"
] | 21 | 2020-09-06T02:41:05.000Z | 2022-03-02T04:40:01.000Z | appengine/findit/services/test/build_url_test.py | xinghun61/infra | b5d4783f99461438ca9e6a477535617fadab6ba3 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from services import build_url
from waterfall.test.wf_testcase import WaterfallTestCase
class FlakeReportUtilTest(WaterfallTestCase):
def testCreateBuildUrl(self):
master_name = 'master_name'
builder_name = 'builder_name'
build_number = 321
build_link = build_url.CreateBuildUrl(master_name, builder_name,
build_number)
self.assertIn(master_name, build_link)
self.assertIn(builder_name, build_link)
self.assertIn(str(build_number), build_link)
| 34.1 | 72 | 0.73607 | 87 | 682 | 5.563218 | 0.574713 | 0.082645 | 0.092975 | 0.086777 | 0.210744 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012868 | 0.202346 | 682 | 19 | 73 | 35.894737 | 0.876838 | 0.227273 | 0 | 0 | 0 | 0 | 0.043977 | 0 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e9aa47756ac21e3388a034195d347bd40bff78f | 3,615 | py | Python | deepjet_geometric/datasets/djc_v2_dataset.py | lgray/deepjet-geometric | 2efe7ef80fecb470d3ef55a4ec67bbb19b45a846 | [
"MIT"
] | null | null | null | deepjet_geometric/datasets/djc_v2_dataset.py | lgray/deepjet-geometric | 2efe7ef80fecb470d3ef55a4ec67bbb19b45a846 | [
"MIT"
] | null | null | null | deepjet_geometric/datasets/djc_v2_dataset.py | lgray/deepjet-geometric | 2efe7ef80fecb470d3ef55a4ec67bbb19b45a846 | [
"MIT"
] | 3 | 2021-02-10T13:31:54.000Z | 2022-03-24T17:24:05.000Z | import os.path as osp
import glob
import h5py
import numpy as np
from tqdm import tqdm
import torch
from torch_geometric.data import Data
from torch_geometric.data import Dataset
class DeepJetCoreV2(Dataset):
r'''
input z0: (FatJat basic stats, 1)
'fj_pt',
'fj_eta',
'fj_sdmass',
'fj_n_sdsubjets',
'fj_doubleb',
'fj_tau21',
'fj_tau32',
'npv',
'npfcands',
'ntracks',
'nsv'
input x0: (FatJet info, 1) (also input z1???)
input x1: (PFCandidates, max 100)
input x2: (Tracks, max 60)
input x3: (SVs, max 5)
# truth categories are QCD=0 / Hbb=1
'''
url = 'root://cmseos.fnal.gov//store/group/lpccoffea/lgray/old_format'
def __init__(self, root, transform=None):
super(DeepJetCoreV2, self).__init__(root, transform)
self.strides = [0]
self.calculate_offsets()
def calculate_offsets(self):
for path in self.raw_paths:
with h5py.File(path, 'r') as f:
self.strides.append(f['n'][()][0])
self.strides = np.cumsum(self.strides)
def download(self):
raise RuntimeError(
'Dataset not found. Please download it from {} and move all '
'*.z files to {}'.format(self.url, self.raw_dir))
def len(self):
return self.strides[-1]
@property
def raw_file_names(self):
raw_files = sorted(glob.glob(osp.join(self.raw_dir, '*.z')))
return raw_files
@property
def processed_file_names(self):
return []
def get(self, idx):
file_idx = np.searchsorted(self.strides, idx) - 1
idx_in_file = idx - self.strides[max(0, file_idx)] - 1
if file_idx >= self.strides.size:
raise Exception(f'{idx} is beyond the end of the event list {self.strides[-1]}')
edge_index = torch.empty((2,0), dtype=torch.long)
with h5py.File(self.raw_paths[file_idx]) as f:
x_jet = np.squeeze(f['x0'][idx_in_file])
Npfc = np.any(f['x1'][idx_in_file] != 0.0, axis=-1).sum()
if Npfc > 0:
x_pfc = f['x1'][idx_in_file,:Npfc,:]
else:
Npfc = 1
x_pfc = np.zeros((1,10), dtype=np.float32)
Ntrack = np.any(f['x2'][idx_in_file] != 0.0, axis=-1).sum()
if Ntrack > 0:
x_track = f['x2'][idx_in_file,:Ntrack,:]
else:
Ntrack = 1
x_track = np.zeros((1,30), dtype=np.float32)
Nsv = np.any(f['x3'][idx_in_file] != 0.0, axis=-1).sum()
if Nsv > 0:
x_sv = f['x3'][idx_in_file,:Nsv,:]
else:
Nsv = 1
x_sv = np.zeros((1,14), dtype=np.float32)
# convert to torch
x_jet = torch.from_numpy(x_jet)[None]
x_pfc = torch.from_numpy(x_pfc)
x_track = torch.from_numpy(x_track)
x_sv = torch.from_numpy(x_sv)
# convert to non-onehot categories
y = torch.from_numpy(f['y0'][idx_in_file])
y = torch.argmax(y)
# "z0" is the basic jet observables pt, eta, phi
# store this as the usual x
x = torch.from_numpy(f['z0'][idx_in_file][None])
return Data(x=x, edge_index=edge_index, y=y,
x_jet=x_jet, x_pfc=x_pfc, x_track=x_track, x_sv=x_sv)
| 30.897436 | 92 | 0.517289 | 487 | 3,615 | 3.661191 | 0.316222 | 0.028043 | 0.050477 | 0.033651 | 0.09198 | 0.035334 | 0.035334 | 0.035334 | 0.035334 | 0 | 0 | 0.031801 | 0.356293 | 3,615 | 116 | 93 | 31.163793 | 0.734422 | 0.137483 | 0 | 0.073529 | 0 | 0 | 0.074617 | 0.021124 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102941 | false | 0 | 0.117647 | 0.029412 | 0.308824 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e9ab8241a8266c7db8bfa58384994c04aa38a39 | 1,427 | py | Python | heat_integrationtests/functional/test_versionnegotiation.py | noironetworks/heat | 7cdadf1155f4d94cf8f967635b98e4012a7acfb7 | [
"Apache-2.0"
] | 265 | 2015-01-02T09:33:22.000Z | 2022-03-26T23:19:54.000Z | heat_integrationtests/functional/test_versionnegotiation.py | noironetworks/heat | 7cdadf1155f4d94cf8f967635b98e4012a7acfb7 | [
"Apache-2.0"
] | 8 | 2015-09-01T15:43:19.000Z | 2021-12-14T05:18:23.000Z | heat_integrationtests/functional/test_versionnegotiation.py | noironetworks/heat | 7cdadf1155f4d94cf8f967635b98e4012a7acfb7 | [
"Apache-2.0"
] | 295 | 2015-01-06T07:00:40.000Z | 2021-09-06T08:05:06.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
from heat_integrationtests.functional import functional_base
expected_version_dict = {
"versions": [
{"links": [{"href": None, "rel": "self"}],
"status": "CURRENT", "id": "v1.0"}
]
}
class VersionNegotiationTestCase(functional_base.FunctionalTestsBase):
def test_authless_version_negotiation(self):
# NOTE(pas-ha): this will grab the public endpoint by default
heat_url = self.identity_client.get_endpoint_url(
'orchestration', region=self.conf.region)
heat_api_root = heat_url.split('/v1')[0]
expected_version_dict[
'versions'][0]['links'][0]['href'] = heat_api_root + '/v1/'
r = requests.get(heat_api_root)
self.assertEqual(300, r.status_code, 'got response %s' % r.text)
self.assertEqual(expected_version_dict, r.json())
| 38.567568 | 78 | 0.685354 | 188 | 1,427 | 5.074468 | 0.595745 | 0.062893 | 0.059748 | 0.033543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0124 | 0.20883 | 1,427 | 36 | 79 | 39.638889 | 0.832595 | 0.424667 | 0 | 0 | 0 | 0 | 0.118012 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.055556 | false | 0 | 0.111111 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e9cb527ff1f857656488e0800350e5fc6febf44 | 13,024 | py | Python | mmcls/models/backbones/t2t_vit.py | kivanctezoren/mmclassification | 5c73d4b29f61c47d379bbec4621a465099e64bd7 | [
"Apache-2.0"
] | 1 | 2021-05-26T08:16:01.000Z | 2021-05-26T08:16:01.000Z | mmcls/models/backbones/t2t_vit.py | kivanctezoren/mmclassification | 5c73d4b29f61c47d379bbec4621a465099e64bd7 | [
"Apache-2.0"
] | null | null | null | mmcls/models/backbones/t2t_vit.py | kivanctezoren/mmclassification | 5c73d4b29f61c47d379bbec4621a465099e64bd7 | [
"Apache-2.0"
] | 1 | 2021-12-11T07:52:59.000Z | 2021-12-11T07:52:59.000Z | # Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
from typing import Sequence
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import build_norm_layer
from mmcv.cnn.bricks.transformer import FFN
from mmcv.cnn.utils.weight_init import trunc_normal_
from mmcv.runner.base_module import BaseModule, ModuleList
from ..builder import BACKBONES
from ..utils import MultiheadAttention
from .base_backbone import BaseBackbone
class T2TTransformerLayer(BaseModule):
"""Transformer Layer for T2T_ViT.
Comparing with :obj:`TransformerEncoderLayer` in ViT, it supports
different ``input_dims`` and ``embed_dims``.
Args:
embed_dims (int): The feature dimension.
num_heads (int): Parallel attention heads.
feedforward_channels (int): The hidden dimension for FFNs
input_dims (int, optional): The input token dimension.
Defaults to None.
drop_rate (float): Probability of an element to be zeroed
after the feed forward layer. Defaults to 0.
attn_drop_rate (float): The drop out rate for attention output weights.
Defaults to 0.
drop_path_rate (float): Stochastic depth rate. Defaults to 0.
num_fcs (int): The number of fully-connected layers for FFNs.
Defaults to 2.
qkv_bias (bool): enable bias for qkv if True. Defaults to True.
qk_scale (float, optional): Override default qk scale of
``(input_dims // num_heads) ** -0.5`` if set. Defaults to None.
act_cfg (dict): The activation config for FFNs.
Defaluts to ``dict(type='GELU')``.
norm_cfg (dict): Config dict for normalization layer.
Defaults to ``dict(type='LN')``.
init_cfg (dict, optional): Initialization config dict.
Defaults to None.
Notes:
In general, ``qk_scale`` should be ``head_dims ** -0.5``, i.e.
``(embed_dims // num_heads) ** -0.5``. However, in the official
code, it uses ``(input_dims // num_heads) ** -0.5``, so here we
keep the same with the official implementation.
"""
def __init__(self,
embed_dims,
num_heads,
feedforward_channels,
input_dims=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
num_fcs=2,
qkv_bias=False,
qk_scale=None,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
init_cfg=None):
super(T2TTransformerLayer, self).__init__(init_cfg=init_cfg)
self.v_shortcut = True if input_dims is not None else False
input_dims = input_dims or embed_dims
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, input_dims, postfix=1)
self.add_module(self.norm1_name, norm1)
self.attn = MultiheadAttention(
input_dims=input_dims,
embed_dims=embed_dims,
num_heads=num_heads,
attn_drop=attn_drop_rate,
proj_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
qkv_bias=qkv_bias,
qk_scale=qk_scale or (input_dims // num_heads)**-0.5,
v_shortcut=self.v_shortcut)
self.norm2_name, norm2 = build_norm_layer(
norm_cfg, embed_dims, postfix=2)
self.add_module(self.norm2_name, norm2)
self.ffn = FFN(
embed_dims=embed_dims,
feedforward_channels=feedforward_channels,
num_fcs=num_fcs,
ffn_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
act_cfg=act_cfg)
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x):
if self.v_shortcut:
x = self.attn(self.norm1(x))
else:
x = x + self.attn(self.norm1(x))
x = self.ffn(self.norm2(x), identity=x)
return x
class T2TModule(BaseModule):
"""Tokens-to-Token module.
"Tokens-to-Token module" (T2T Module) can model the local structure
information of images and reduce the length of tokens progressively.
Args:
img_size (int): Input image size
in_channels (int): Number of input channels
embed_dims (int): Embedding dimension
token_dims (int): Tokens dimension in T2TModuleAttention.
use_performer (bool): If True, use Performer version self-attention to
adopt regular self-attention. Defaults to False.
init_cfg (dict, optional): The extra config for initialization.
Default: None.
Notes:
Usually, ``token_dim`` is set as a small value (32 or 64) to reduce
MACs
"""
def __init__(
self,
img_size=224,
in_channels=3,
embed_dims=384,
token_dims=64,
use_performer=False,
init_cfg=None,
):
super(T2TModule, self).__init__(init_cfg)
self.embed_dims = embed_dims
self.soft_split0 = nn.Unfold(
kernel_size=(7, 7), stride=(4, 4), padding=(2, 2))
self.soft_split1 = nn.Unfold(
kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
self.soft_split2 = nn.Unfold(
kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
if not use_performer:
self.attention1 = T2TTransformerLayer(
input_dims=in_channels * 7 * 7,
embed_dims=token_dims,
num_heads=1,
feedforward_channels=token_dims)
self.attention2 = T2TTransformerLayer(
input_dims=token_dims * 3 * 3,
embed_dims=token_dims,
num_heads=1,
feedforward_channels=token_dims)
self.project = nn.Linear(token_dims * 3 * 3, embed_dims)
else:
raise NotImplementedError("Performer hasn't been implemented.")
# there are 3 soft split, stride are 4,2,2 separately
self.num_patches = (img_size // (4 * 2 * 2))**2
def forward(self, x):
# step0: soft split
x = self.soft_split0(x).transpose(1, 2)
for step in [1, 2]:
# re-structurization/reconstruction
attn = getattr(self, f'attention{step}')
x = attn(x).transpose(1, 2)
B, C, new_HW = x.shape
x = x.reshape(B, C, int(np.sqrt(new_HW)), int(np.sqrt(new_HW)))
# soft split
soft_split = getattr(self, f'soft_split{step}')
x = soft_split(x).transpose(1, 2)
# final tokens
x = self.project(x)
return x
def get_sinusoid_encoding(n_position, embed_dims):
"""Generate sinusoid encoding table.
Sinusoid encoding is a kind of relative position encoding method came from
`Attention Is All You Need<https://arxiv.org/abs/1706.03762>`_.
Args:
n_position (int): The length of the input token.
embed_dims (int): The position embedding dimension.
Returns:
:obj:`torch.FloatTensor`: The sinusoid encoding table.
"""
def get_position_angle_vec(position):
return [
position / np.power(10000, 2 * (i // 2) / embed_dims)
for i in range(embed_dims)
]
sinusoid_table = np.array(
[get_position_angle_vec(pos) for pos in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
return torch.FloatTensor(sinusoid_table).unsqueeze(0)
@BACKBONES.register_module()
class T2T_ViT(BaseBackbone):
"""Tokens-to-Token Vision Transformer (T2T-ViT)
A PyTorch implementation of `Tokens-to-Token ViT: Training Vision
Transformers from Scratch on ImageNet<https://arxiv.org/abs/2101.11986>`_
Args:
img_size (int): Input image size.
in_channels (int): Number of input channels.
embed_dims (int): Embedding dimension.
t2t_cfg (dict): Extra config of Tokens-to-Token module.
Defaults to an empty dict.
drop_rate (float): Dropout rate after position embedding.
Defaults to 0.
num_layers (int): Num of transformer layers in encoder.
Defaults to 14.
out_indices (Sequence | int): Output from which stages.
Defaults to -1, means the last stage.
layer_cfgs (Sequence | dict): Configs of each transformer layer in
encoder. Defaults to an empty dict.
drop_path_rate (float): stochastic depth rate. Defaults to 0.
norm_cfg (dict): Config dict for normalization layer. Defaults to
``dict(type='LN')``.
final_norm (bool): Whether to add a additional layer to normalize
final feature map. Defaults to True.
output_cls_token (bool): Whether output the cls_token.
Defaults to True.
init_cfg (dict, optional): The Config for initialization.
Defaults to None.
"""
def __init__(self,
img_size=224,
in_channels=3,
embed_dims=384,
t2t_cfg=dict(),
drop_rate=0.,
num_layers=14,
out_indices=-1,
layer_cfgs=dict(),
drop_path_rate=0.,
norm_cfg=dict(type='LN'),
final_norm=True,
output_cls_token=True,
init_cfg=None):
super(T2T_ViT, self).__init__(init_cfg)
# Token-to-Token Module
self.tokens_to_token = T2TModule(
img_size=img_size,
in_channels=in_channels,
embed_dims=embed_dims,
**t2t_cfg)
num_patches = self.tokens_to_token.num_patches
# Class token
self.output_cls_token = output_cls_token
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims))
# Position Embedding
sinusoid_table = get_sinusoid_encoding(num_patches + 1, embed_dims)
self.register_buffer('pos_embed', sinusoid_table)
self.drop_after_pos = nn.Dropout(p=drop_rate)
if isinstance(out_indices, int):
out_indices = [out_indices]
assert isinstance(out_indices, Sequence), \
f'"out_indices" must by a sequence or int, ' \
f'get {type(out_indices)} instead.'
for i, index in enumerate(out_indices):
if index < 0:
out_indices[i] = num_layers + index
assert out_indices[i] >= 0, f'Invalid out_indices {index}'
self.out_indices = out_indices
dpr = [x for x in np.linspace(0, drop_path_rate, num_layers)]
self.encoder = ModuleList()
for i in range(num_layers):
if isinstance(layer_cfgs, Sequence):
layer_cfg = layer_cfgs[i]
else:
layer_cfg = deepcopy(layer_cfgs)
layer_cfg = {
'embed_dims': embed_dims,
'num_heads': 6,
'feedforward_channels': 3 * embed_dims,
'drop_path_rate': dpr[i],
'qkv_bias': False,
'norm_cfg': norm_cfg,
**layer_cfg
}
layer = T2TTransformerLayer(**layer_cfg)
self.encoder.append(layer)
self.final_norm = final_norm
if final_norm:
self.norm = build_norm_layer(norm_cfg, embed_dims)[1]
else:
self.norm = nn.Identity()
def init_weights(self):
super().init_weights()
if (isinstance(self.init_cfg, dict)
and self.init_cfg['type'] == 'Pretrained'):
# Suppress custom init if use pretrained model.
return
trunc_normal_(self.cls_token, std=.02)
def forward(self, x):
B = x.shape[0]
x = self.tokens_to_token(x)
num_patches = self.tokens_to_token.num_patches
patch_resolution = [int(np.sqrt(num_patches))] * 2
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed
x = self.drop_after_pos(x)
outs = []
for i, layer in enumerate(self.encoder):
x = layer(x)
if i == len(self.encoder) - 1 and self.final_norm:
x = self.norm(x)
if i in self.out_indices:
B, _, C = x.shape
patch_token = x[:, 1:].reshape(B, *patch_resolution, C)
patch_token = patch_token.permute(0, 3, 1, 2)
cls_token = x[:, 0]
if self.output_cls_token:
out = [patch_token, cls_token]
else:
out = patch_token
outs.append(out)
return tuple(outs)
| 35.391304 | 79 | 0.589527 | 1,653 | 13,024 | 4.436177 | 0.196612 | 0.038047 | 0.014728 | 0.012273 | 0.191872 | 0.162962 | 0.127642 | 0.11946 | 0.109369 | 0.109369 | 0 | 0.021434 | 0.315802 | 13,024 | 367 | 80 | 35.487738 | 0.801481 | 0.313268 | 0 | 0.201878 | 0 | 0 | 0.032705 | 0 | 0 | 0 | 0 | 0 | 0.00939 | 1 | 0.051643 | false | 0 | 0.056338 | 0.014085 | 0.159624 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2e9e32236f3aacd45c54c96674594afc7e16ce46 | 2,313 | py | Python | geopyspark/tests/geotrellis/rddwrapper_test.py | geotrellis/geotrellis-python | 97bcb17a56ed4b4059e2f0dbab97706562cac692 | [
"Apache-2.0"
] | 182 | 2017-01-26T18:11:57.000Z | 2021-09-07T07:39:52.000Z | geopyspark/tests/geotrellis/rddwrapper_test.py | geotrellis/geotrellis-python | 97bcb17a56ed4b4059e2f0dbab97706562cac692 | [
"Apache-2.0"
] | 428 | 2016-12-21T14:53:43.000Z | 2019-07-14T09:33:54.000Z | geopyspark/tests/geotrellis/rddwrapper_test.py | geotrellis/geotrellis-python | 97bcb17a56ed4b4059e2f0dbab97706562cac692 | [
"Apache-2.0"
] | 66 | 2017-01-05T19:16:15.000Z | 2022-02-23T12:49:50.000Z | import os
import sys
import numpy as np
import pytest
import unittest
from geopyspark.geotrellis import Tile
from geopyspark.geotrellis.layer import RasterLayer
from geopyspark.geotrellis.constants import LayerType
from geopyspark.tests.base_test_class import BaseTestClass
from pyspark.storagelevel import StorageLevel
class LayerWrapperTest(BaseTestClass):
@pytest.fixture(autouse=True)
def tearDown(self):
yield
BaseTestClass.pysc._gateway.close()
def test_persist(self):
arr = np.array([[[1, 1, 1, 1]],
[[2, 2, 2, 2]],
[[3, 3, 3, 3]],
[[4, 4, 4, 4]]], dtype=int)
tile = Tile(arr, 'INT', -500)
rdd = BaseTestClass.pysc.parallelize([(self.projected_extent, tile)])
raster_rdd = RasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd)
self.assertEqual(raster_rdd.is_cached, False)
raster_rdd.persist(StorageLevel.MEMORY_ONLY)
self.assertEqual(raster_rdd.is_cached, True)
raster_rdd.unpersist()
self.assertEqual(raster_rdd.is_cached, False)
def test_cache(self):
arr = np.array([[[1, 1, 1, 1]],
[[2, 2, 2, 2]],
[[3, 3, 3, 3]],
[[4, 4, 4, 4]]], dtype=int)
tile = Tile(arr, 'INT', -500)
rdd = BaseTestClass.pysc.parallelize([(self.projected_extent, tile)])
raster_rdd = RasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd)
self.assertEqual(raster_rdd.is_cached, False)
raster_rdd.cache()
self.assertEqual(raster_rdd.is_cached, True)
def test_miscellaneous(self):
arr = np.array([[[1, 1, 1, 1]],
[[2, 2, 2, 2]],
[[3, 3, 3, 3]],
[[4, 4, 4, 4]]], dtype=int)
tile = Tile(arr, 'INT', -500)
rdd = BaseTestClass.pysc.parallelize([(self.projected_extent, tile)])
raster_rdd = RasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd)
self.assertEqual(raster_rdd.count(), 1)
self.assertTrue(raster_rdd.getNumPartitions() >= 1)
self.assertTrue(len(raster_rdd.wrapped_rdds()) >= 1)
self.assertEqual(str(raster_rdd), repr(raster_rdd))
if __name__ == "__main__":
unittest.main()
| 33.521739 | 77 | 0.596628 | 278 | 2,313 | 4.798561 | 0.251799 | 0.107946 | 0.013493 | 0.107946 | 0.528486 | 0.528486 | 0.528486 | 0.446777 | 0.446777 | 0.446777 | 0 | 0.035693 | 0.273238 | 2,313 | 68 | 78 | 34.014706 | 0.757882 | 0 | 0 | 0.490566 | 0 | 0 | 0.00735 | 0 | 0 | 0 | 0 | 0 | 0.169811 | 1 | 0.075472 | false | 0 | 0.188679 | 0 | 0.283019 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ea297ebd14c9f18c3e486da0ab8842cf15efdc3 | 1,476 | py | Python | core/anchor.py | calmisential/TensorFlow2.0_SSD | 737a631608a8c2d3f2213f9139c1c0af0063f1d1 | [
"MIT"
] | 90 | 2019-10-05T13:19:22.000Z | 2022-03-09T22:49:23.000Z | core/anchor.py | calmisential/TensorFlow2.0_SSD | 737a631608a8c2d3f2213f9139c1c0af0063f1d1 | [
"MIT"
] | 17 | 2019-10-28T11:13:05.000Z | 2022-02-22T12:33:23.000Z | core/anchor.py | calmisential/TensorFlow2.0_SSD | 737a631608a8c2d3f2213f9139c1c0af0063f1d1 | [
"MIT"
] | 54 | 2019-09-26T06:47:39.000Z | 2022-03-25T15:36:23.000Z | import numpy as np
import math
from configuration import IMAGE_HEIGHT, IMAGE_WIDTH, ASPECT_RATIOS, FEATURE_MAPS, \
DEFAULT_BOXES_SIZES, DOWNSAMPLING_RATIOS
from itertools import product
class DefaultBoxes:
def __init__(self):
assert IMAGE_HEIGHT == IMAGE_WIDTH
self.image_size = IMAGE_HEIGHT
self.num_priors = len(ASPECT_RATIOS)
self.feature_maps = FEATURE_MAPS
self.default_boxes_sizes = DEFAULT_BOXES_SIZES
self.aspect_ratios = ASPECT_RATIOS
self.steps = DOWNSAMPLING_RATIOS
def generate_boxes(self):
boxes = []
for k, f in enumerate(self.feature_maps):
for i, j in product(range(f[0]), repeat=2):
f_k = self.image_size / self.steps[k]
# box中心点的坐标
center_x = (j + 0.5) / f_k
center_y = (i + 0.5) / f_k
# box的高和宽
s_min = self.default_boxes_sizes[k][0] / self.image_size
s_max = math.sqrt(self.default_boxes_sizes[k][0] * self.default_boxes_sizes[k][1]) / self.image_size
boxes += [center_x, center_y, s_min, s_min]
boxes += [center_x, center_y, s_max, s_max]
for ar in self.aspect_ratios[k]:
boxes += [center_x, center_y, s_min * math.sqrt(ar), s_min / math.sqrt(ar)]
output = np.array(boxes).reshape((-1, 4))
output = np.clip(a=output, a_min=0, a_max=1)
return output
| 37.846154 | 116 | 0.603659 | 205 | 1,476 | 4.058537 | 0.302439 | 0.086538 | 0.122596 | 0.100962 | 0.194712 | 0.144231 | 0.120192 | 0 | 0 | 0 | 0 | 0.012548 | 0.298103 | 1,476 | 38 | 117 | 38.842105 | 0.790541 | 0.011518 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033333 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ea338d3af063a419c39ebbdc6516e42c2946316 | 1,185 | py | Python | tests/test_complex.py | chenzhaiyu/abspy | c03d26e4ddb81d17467573ca2f399a9cb608a4be | [
"MIT"
] | 5 | 2022-01-14T17:15:35.000Z | 2022-03-11T09:51:49.000Z | tests/test_complex.py | chenzhaiyu/abspy | c03d26e4ddb81d17467573ca2f399a9cb608a4be | [
"MIT"
] | 2 | 2022-01-18T15:17:54.000Z | 2022-01-18T15:19:25.000Z | tests/test_complex.py | chenzhaiyu/abspy | c03d26e4ddb81d17467573ca2f399a9cb608a4be | [
"MIT"
] | 1 | 2022-01-15T08:12:08.000Z | 2022-01-15T08:12:08.000Z | from pathlib import Path
import numpy as np
from abspy import CellComplex
dir_tests = Path(__file__).parent
def example_cell_complex():
"""
Simple CellComplex construction from specified planes and bounds.
"""
# start from two planes
planes = np.array([[0, 1, 0, -50], [0, 0, 1, -50]])
# specify the bounds
bounds = np.array([[[0, 50, 0], [100, 50, 100]], [[0, 0, 50], [100, 50, 50]]])
# specify the initial bound
initial_bound = [[0, 0, 0], [100, 100, 100]]
# initialise the cell complex
cell_complex = CellComplex(planes, bounds, initial_bound=initial_bound, build_graph=True)
# prioritise certain (vertical by default) planes
cell_complex.prioritise_planes()
# construct the complex
cell_complex.construct()
# print out info
cell_complex.print_info()
# visualise the cell complex (only if trimesh installation is found)
cell_complex.visualise()
# save cell complex to OBJ and PLM files
cell_complex.save_obj(dir_tests / 'test_output' / 'cells.obj', use_mtl=True)
cell_complex.save_plm(dir_tests / 'test_output' / 'cells.plm')
if __name__ == '__main__':
example_cell_complex()
| 26.931818 | 93 | 0.678481 | 163 | 1,185 | 4.705521 | 0.404908 | 0.172099 | 0.046936 | 0.062581 | 0.059974 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047872 | 0.206751 | 1,185 | 43 | 94 | 27.55814 | 0.768085 | 0.297046 | 0 | 0 | 0 | 0 | 0.059333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.176471 | 0 | 0.235294 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ea3aab54423adec72cba91766a80d7a10a99caf | 2,068 | py | Python | unicrypto/backends/pure/external/AES/ccm.py | ac-rn/unicrypto | bb3205612d2d7943817d55aa93de67806ceabf0d | [
"MIT"
] | 1 | 2022-03-15T23:23:03.000Z | 2022-03-15T23:23:03.000Z | unicrypto/backends/pure/external/AES/ccm.py | ac-rn/unicrypto | bb3205612d2d7943817d55aa93de67806ceabf0d | [
"MIT"
] | null | null | null | unicrypto/backends/pure/external/AES/ccm.py | ac-rn/unicrypto | bb3205612d2d7943817d55aa93de67806ceabf0d | [
"MIT"
] | null | null | null | from Cryptodome.Cipher import AES
import struct
# https://tools.ietf.org/html/rfc3610
def aesCCMEncrypt(plaintext, aad, key, nonce, macLen):
blockSize = 16 # For AES...
if macLen not in (4, 6, 8, 10, 12, 14, 16):
raise ValueError("Parameter 'mac_len' must be even and in the range 4..16 (not %d)" % macLen)
if not (nonce and 7 <= len(nonce) <= 13):
raise ValueError("Length of parameter 'nonce' must be in the range 7..13 bytes")
# Encryption
q = 15 - len(nonce)
cipher = AES.new(key=key, mode=AES.MODE_CTR, nonce=struct.pack("B", q - 1) + nonce)
s0 = cipher.encrypt(b'\x00'*16) # For mac
c = cipher.encrypt(plaintext)
# Mac
pLen = len(plaintext)
aadLen = len(aad)
mac = AES.new(key=key, mode=AES.MODE_CBC, iv=b'\x00'*blockSize)
flags = (64 * (aadLen > 0) + 8 * ((macLen - 2) // 2) + (q - 1))
b0 = struct.pack("B", flags) + nonce + pLen.to_bytes(q, 'big')
assocLenEncoded = b''
if aadLen > 0:
if aadLen < (2 ** 16 - 2 ** 8):
encsize = 2
elif aadLen < (2 ** 32):
assocLenEncoded = b'\xFF\xFE'
encsize = 4
else:
assocLenEncoded = b'\xFF\xFF'
encsize = 8
assocLenEncoded += aadLen.to_bytes(encsize, 'big')
print(f"Length of block 0: {len(b0)}" )
aadPadded = assocLenEncoded + aad
aadPadded += b'\x00'*(blockSize - (len(aadPadded) % blockSize))
ptxtPadded = plaintext + b'\x00'*(blockSize - (pLen % blockSize))
macData = b0 + aadPadded + ptxtPadded
print(f"Length of MAC input {len(macData)}")
t = mac.encrypt(macData)[-16:]
tag = bytes([a ^ b for (a,b) in zip(t,s0)])
return (c, tag)
if __name__ == '__main__':
import os
EncryptionKey = b'\xff'*16
nonce = os.urandom(11)
hdr = b'\xFF'* 40
msg_data = b'\xAA' * 1024
cipher = AES.new(EncryptionKey, AES.MODE_CCM, nonce)
cipher.update(hdr)
enc_data = cipher.encrypt(msg_data)
sig = cipher.digest()
print(enc_data.hex())
print(sig.hex())
c, t = aesCCMEncrypt(msg_data, hdr, EncryptionKey, nonce, 16)
print('other')
print(c.hex())
print(t.hex())
if c == enc_data:
print('Encryption OK!')
else:
print('Encryption fail')
| 25.85 | 95 | 0.640232 | 317 | 2,068 | 4.113565 | 0.353312 | 0.01227 | 0.029908 | 0.018405 | 0.035276 | 0.035276 | 0.035276 | 0 | 0 | 0 | 0 | 0.046567 | 0.190039 | 2,068 | 79 | 96 | 26.177215 | 0.73194 | 0.033366 | 0 | 0.035088 | 0 | 0 | 0.140492 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017544 | false | 0 | 0.052632 | 0 | 0.087719 | 0.157895 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ea49685661b1bbd513afcd73c45bc80909614c1 | 1,878 | py | Python | PyGenetic/mutation.py | Omair-R/PyGenetic | 27c97e0958b65b6163a6fb9e9e5089eb2dc7dae2 | [
"MIT"
] | 17 | 2021-09-08T08:21:03.000Z | 2022-03-24T16:57:02.000Z | PyGenetic/mutation.py | Omair-R/PyGenetic | 27c97e0958b65b6163a6fb9e9e5089eb2dc7dae2 | [
"MIT"
] | 1 | 2021-12-16T15:40:16.000Z | 2021-12-16T15:40:16.000Z | PyGenetic/mutation.py | Omair-R/PyGenetic | 27c97e0958b65b6163a6fb9e9e5089eb2dc7dae2 | [
"MIT"
] | 6 | 2021-12-16T14:55:15.000Z | 2022-01-11T18:05:35.000Z | import numpy as np
class MutationDecidor:
def __init__(self,
mutation_type,
n_genes,
mutation_propability,
low_boundery=0,
high_boundery=0):
self.n_genes = n_genes
self.mutation_propability = mutation_propability
self.low_boundery = low_boundery
self.high_boundery = high_boundery
self.mutation_types_dict: dict = {
"random_resetting": self.random_resetting,
"swap": self.swap,
"bit_flip": self.bit_flip
}
self.mutation_function = self.mutation_types_dict[mutation_type]
def run(self, chromosome: np.array) -> None:
return self.mutation_function(chromosome)
def random_resetting(self, chromosome: np.array) -> None:
if self.chance_gen():
mutation_point = np.random.choice(range(self.n_genes))
chromosome[mutation_point] = np.random.uniform(
self.low_boundery, self.high_boundery)
def swap(self, chromosome: np.array) -> None:
if self.chance_gen():
mutation_points = np.random.choice(range(self.n_genes),
size=2,
replace=False)
chromosome[mutation_points[0]], chromosome[
mutation_points[1]] = chromosome[
mutation_points[1]], chromosome[mutation_points[0]]
def bit_flip(self, chromosome: np.array) -> None:
if self.chance_gen():
mutation_point = np.random.choice(range(self.n_genes))
chromosome[
mutation_point] = 0 if chromosome[mutation_point] == 1 else 1
def chance_gen(self) -> bool:
chance = np.random.uniform(0, 1)
return True if self.mutation_propability > chance else False
| 34.145455 | 77 | 0.583067 | 205 | 1,878 | 5.097561 | 0.229268 | 0.080383 | 0.038278 | 0.080383 | 0.421053 | 0.345455 | 0.345455 | 0.24689 | 0.24689 | 0.24689 | 0 | 0.009509 | 0.328009 | 1,878 | 54 | 78 | 34.777778 | 0.818542 | 0 | 0 | 0.121951 | 0 | 0 | 0.014909 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.146341 | false | 0 | 0.02439 | 0.02439 | 0.243902 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ea4c520dd53a2a52f11b5e7a9e632d6c39aba9d | 3,480 | py | Python | tools/eval.py | jiaruixu/Detectron | baa3392405402438f8c339727aa9c0704dc9fa72 | [
"Apache-2.0"
] | null | null | null | tools/eval.py | jiaruixu/Detectron | baa3392405402438f8c339727aa9c0704dc9fa72 | [
"Apache-2.0"
] | null | null | null | tools/eval.py | jiaruixu/Detectron | baa3392405402438f8c339727aa9c0704dc9fa72 | [
"Apache-2.0"
] | null | null | null | """Evaluate a network with Detectron."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import cv2 # NOQA (Must import before importing caffe2 due to bug in cv2)
import os
import pprint
import sys
import time
from caffe2.python import workspace
from detectron.core.config import assert_and_infer_cfg
from detectron.core.config import cfg
from detectron.core.config import merge_cfg_from_file
from detectron.core.config import merge_cfg_from_list
from detectron.core.test_engine import run_inference
from detectron.utils.logging import setup_logging
import detectron.utils.c2 as c2_utils
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
def parse_args():
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument(
'--cfg',
dest='cfg_file',
help='optional config file',
default=None,
type=str
)
parser.add_argument(
'--wait',
dest='wait',
help='wait until net file exists',
default=True,
type=bool
)
parser.add_argument(
'--vis', dest='vis', help='visualize detections', action='store_true'
)
parser.add_argument(
'--multi-gpu-testing',
dest='multi_gpu_testing',
help='using cfg.NUM_GPUS for inference',
action='store_true'
)
parser.add_argument(
'--range',
dest='range',
help='start (inclusive) and end (exclusive) indices',
default=None,
type=int,
nargs=2
)
parser.add_argument(
'opts',
help='See detectron/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
logger = setup_logging(__name__)
args = parse_args()
logger.info('Called with args:')
logger.info(args)
if args.cfg_file is not None:
merge_cfg_from_file(args.cfg_file)
if args.opts is not None:
merge_cfg_from_list(args.opts)
assert_and_infer_cfg()
logger.info('Testing with config:')
logger.info(pprint.pformat(cfg))
# output_dir = get_output_dir(cfg.TRAIN.DATASETS, training=True)
output_dir = '/mnt/fcav/self_training/object_detection/upperbound1/train/voc_GTA_caronly_train:cityscapes_caronly_train:voc_GTA_caronly_val/generalized_rcnn'
files = os.listdir(output_dir)
for f in files:
iter_string = re.findall(r'(?<=model_iter)\d+(?=\.pkl)', f)
if len(iter_string) > 0:
checkpoint_iter = int(iter_string[0])
start_iter = checkpoint_iter + 1
resume_weights_file = f
# Override the initialization weights with the found checkpoint
weights_file = os.path.join(output_dir, resume_weights_file)
logger.info(
'========> Resuming from checkpoint {} at start iter {}'.
format(weights_file, start_iter)
)
run_inference(
weights_file,
ind_range=args.range,
multi_gpu_testing=args.multi_gpu_testing,
check_expected_results=True,
)
| 31.071429 | 161 | 0.675862 | 453 | 3,480 | 4.933775 | 0.403974 | 0.034899 | 0.045638 | 0.041163 | 0.112752 | 0.099776 | 0.036689 | 0.036689 | 0 | 0 | 0 | 0.007061 | 0.226724 | 3,480 | 111 | 162 | 31.351351 | 0.823486 | 0.100575 | 0 | 0.094737 | 0 | 0 | 0.195255 | 0.061879 | 0 | 0 | 0 | 0 | 0.021053 | 1 | 0.010526 | false | 0 | 0.2 | 0 | 0.221053 | 0.042105 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ea5aaa2c98d99adc13ed905c0058e7f02231385 | 2,847 | py | Python | u24_lymphocyte/third_party/treeano/nodes/tests/containers_test.py | ALSM-PhD/quip_classification | 7347bfaa5cf11ae2d7a528fbcc43322a12c795d3 | [
"BSD-3-Clause"
] | 45 | 2015-04-26T04:45:51.000Z | 2022-01-24T15:03:55.000Z | u24_lymphocyte/third_party/treeano/nodes/tests/containers_test.py | ALSM-PhD/quip_classification | 7347bfaa5cf11ae2d7a528fbcc43322a12c795d3 | [
"BSD-3-Clause"
] | 8 | 2018-07-20T20:54:51.000Z | 2020-06-12T05:36:04.000Z | u24_lymphocyte/third_party/treeano/nodes/tests/containers_test.py | ALSM-PhD/quip_classification | 7347bfaa5cf11ae2d7a528fbcc43322a12c795d3 | [
"BSD-3-Clause"
] | 22 | 2018-05-21T23:57:20.000Z | 2022-02-21T00:48:32.000Z | import nose.tools as nt
import numpy as np
import theano
import treeano.nodes as tn
fX = theano.config.floatX
def test_sequential_node_serialization():
tn.check_serialization(tn.SequentialNode("a", []))
tn.check_serialization(tn.SequentialNode(
"a",
[tn.SequentialNode("b", []),
tn.SequentialNode("c", [])]))
def test_auxiliary_node_serialization():
tn.check_serialization(tn.AuxiliaryNode("a", tn.IdentityNode("b")))
def test_graph_node_serialization():
tn.check_serialization(tn.GraphNode("a", [[tn.IdentityNode("b")],
[{"from": "b", "to": "a"}]]))
@nt.raises(AssertionError)
def test_container_node_raises():
network = tn.SequentialNode(
"s",
[tn.ContainerNode("c", []),
tn.IdentityNode("i")
]).network()
fn = network.function([], ["i"])
fn()
def test_auxiliary_node():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=()),
tn.AuxiliaryNode("a", tn.MultiplyConstantNode("m", value=2))]
).network()
fn = network.function(["i"], ["s", "a", "m"])
np.testing.assert_equal(np.array(fn(3.2)),
np.array([3.2, 3.2, 6.4], dtype=fX))
def test_graph_node():
network = tn.GraphNode(
"g1",
[[tn.InputNode("i", shape=()),
tn.GraphNode("g2",
[(tn.MultiplyConstantNode("m1", value=2),
tn.AddConstantNode("a1", value=2)),
[{"to": "a1"},
{"from": "a1", "to": "m1"},
{"from": "m1", "to_key": "foo"}]],
output_key="foo")],
[{"from": "i", "to": "g2"},
{"from": "g2", "to_key": "bar"}]],
output_key="bar"
).network()
fn = network.function(["i"], ["a1", "m1", "g1", "g2"])
nt.assert_equal([5, 10, 10, 10], fn(3))
def test_graph_node_no_output_key():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=()),
tn.GraphNode("g",
[(tn.MultiplyConstantNode("m1", value=2),
tn.AddConstantNode("a1", value=2)),
[{"to": "a1"},
{"from": "a1", "to": "m1"},
{"from": "m1", "to_key": "foo"}]])]
).network()
fn = network.function(["i"], ["s"])
nt.assert_equal([3], fn(3))
def test_graph_node_no_input():
network = tn.GraphNode(
"g",
[(tn.InputNode("i", shape=()),
tn.MultiplyConstantNode("m1", value=2),
tn.AddConstantNode("a1", value=2)),
[{"from": "i", "to": "a1"},
{"from": "a1", "to": "m1"},
{"from": "m1"}]]
).network()
fn = network.function(["i"], ["g"])
nt.assert_equal([10], fn(3))
| 29.968421 | 75 | 0.488936 | 312 | 2,847 | 4.339744 | 0.217949 | 0.041359 | 0.059084 | 0.088626 | 0.551699 | 0.464549 | 0.328656 | 0.241507 | 0.226736 | 0.16322 | 0 | 0.027136 | 0.301019 | 2,847 | 94 | 76 | 30.287234 | 0.653266 | 0 | 0 | 0.315789 | 0 | 0 | 0.061819 | 0 | 0 | 0 | 0 | 0 | 0.065789 | 1 | 0.105263 | false | 0 | 0.052632 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ea5e98a4fc2b8a1c6d0d0f71589f165127c59dd | 3,231 | py | Python | pupa/migrations/0001_initial.py | datamade/pupa | 7c7d2937dfa0c8347e47661a6ed42fd28a9e16d4 | [
"BSD-3-Clause"
] | 3 | 2015-11-21T10:39:44.000Z | 2019-11-17T16:34:53.000Z | pupa/migrations/0001_initial.py | datamade/pupa | 7c7d2937dfa0c8347e47661a6ed42fd28a9e16d4 | [
"BSD-3-Clause"
] | 1 | 2015-11-23T19:43:48.000Z | 2015-11-23T19:45:06.000Z | pupa/migrations/0001_initial.py | datamade/pupa | 7c7d2937dfa0c8347e47661a6ed42fd28a9e16d4 | [
"BSD-3-Clause"
] | 5 | 2015-11-22T09:23:14.000Z | 2019-11-17T16:34:57.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
('legislative', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ImportObjects',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('object_type', models.CharField(max_length=20, choices=[('jurisdiction', 'Jurisdiction'), ('person', 'Person'), ('organization', 'Organization'), ('post', 'Post'), ('membership', 'Membership'), ('bill', 'Bill'), ('vote_event', 'VoteEvent'), ('event', 'Event')])),
('insert_count', models.PositiveIntegerField()),
('update_count', models.PositiveIntegerField()),
('noop_count', models.PositiveIntegerField()),
('start_time', models.DateTimeField()),
('end_time', models.DateTimeField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RunPlan',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('success', models.BooleanField(default=True)),
('jurisdiction', models.ForeignKey(to='core.Jurisdiction')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='importobjects',
name='report',
field=models.ForeignKey(to='pupa.RunPlan'),
preserve_default=True,
),
migrations.CreateModel(
name='ScrapeObjects',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('object_type', models.CharField(max_length=20, choices=[('jurisdiction', 'Jurisdiction'), ('person', 'Person'), ('organization', 'Organization'), ('post', 'Post'), ('membership', 'Membership'), ('bill', 'Bill'), ('vote_event', 'VoteEvent'), ('event', 'Event')])),
('count', models.PositiveIntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ScrapeReport',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('scraper', models.CharField(max_length=300)),
('args', models.CharField(max_length=300)),
('start_time', models.DateTimeField()),
('end_time', models.DateTimeField()),
('plan', models.ForeignKey(to='pupa.RunPlan')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='scrapeobjects',
name='report',
field=models.ForeignKey(to='pupa.ScrapeReport'),
preserve_default=True,
),
]
| 40.898734 | 280 | 0.535747 | 260 | 3,231 | 6.511538 | 0.303846 | 0.049616 | 0.059067 | 0.054341 | 0.660366 | 0.607206 | 0.607206 | 0.506793 | 0.383934 | 0.383934 | 0 | 0.008422 | 0.301764 | 3,231 | 78 | 281 | 41.423077 | 0.742021 | 0.0065 | 0 | 0.583333 | 0 | 0 | 0.179863 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.097222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ea60c9a91259a0b51917497b57172f651d30c70 | 1,112 | py | Python | models/estudiante.py | javierdaza/directorio_estudiantes | 13f5963cccec278dec4fde7332a4491d9220c059 | [
"MIT"
] | null | null | null | models/estudiante.py | javierdaza/directorio_estudiantes | 13f5963cccec278dec4fde7332a4491d9220c059 | [
"MIT"
] | null | null | null | models/estudiante.py | javierdaza/directorio_estudiantes | 13f5963cccec278dec4fde7332a4491d9220c059 | [
"MIT"
] | null | null | null | from urllib.parse import urlparse
import datetime
from decouple import config
from peewee import *
database_proxy = DatabaseProxy()
production = config('PRODUCTION', cast=bool)
class BaseModel(Model):
class Meta:
database = database_proxy
class Estudiante(BaseModel):
nombre_usuario = CharField(max_length=255, unique=True)
puntaje = IntegerField(default=0)
fecha_creacion = DateTimeField(default=datetime.datetime.now)
numero_telefono = CharField(max_length=255)
email = CharField(max_length=255)
foto = CharField(max_length=255)
motivacion = TextField()
if production:
if config('HEROKU', cast=int) == 1:
url = urlparse(config('DATABASE_URL'))
database = PostgresqlDatabase(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port,
autorollback=True
)
else:
database = SqliteDatabase(config('DATABASE_URL'))
else:
database = SqliteDatabase(config('DATABASE_URL'))
database_proxy.initialize(database)
| 24.173913 | 65 | 0.678058 | 119 | 1,112 | 6.226891 | 0.495798 | 0.064777 | 0.097166 | 0.11336 | 0.116059 | 0.116059 | 0 | 0 | 0 | 0 | 0 | 0.017381 | 0.223921 | 1,112 | 45 | 66 | 24.711111 | 0.841251 | 0 | 0 | 0.121212 | 0 | 0 | 0.046763 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.030303 | 0.121212 | 0 | 0.424242 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ea6798fa3e1352a78ff1f506bcd6a8b03ba29cd | 2,213 | py | Python | draftplot_PWx.py | erwinlambert/alphabeta | 04dd03de3521556067c997c2deaf0d6560124b7b | [
"BSD-3-Clause"
] | null | null | null | draftplot_PWx.py | erwinlambert/alphabeta | 04dd03de3521556067c997c2deaf0d6560124b7b | [
"BSD-3-Clause"
] | null | null | null | draftplot_PWx.py | erwinlambert/alphabeta | 04dd03de3521556067c997c2deaf0d6560124b7b | [
"BSD-3-Clause"
] | null | null | null | from scipy.integrate import odeint
import os
import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
import sys
hstep = .01
sstep = .01
hmin = 0.
hmax = 2.
smin = 0.
smax = 3.
rmin = .01
rmax = 3.
rstep = .1
rr = np.arange(rmin,rmax+rstep,rstep)
LL3 = 1.2
LL4 = 0.22
dx = .01
s0 = .01
h0 = .01
y0 = np.array([h0,s0])
x = np.arange(0,1+dx,dx)
PSI = np.zeros((len(rr),len(x)))
S = PSI.copy()
Msg = PSI.copy()
def dydx(y,x,r):
h,s = y
dhdx = LL3*h +LL4/(s*h**2.) -r/(2.*s**2.*h)
dsdx = -LL3*s -LL4/(h**3.) + r/(s*h**2.)
return np.array([dhdx,dsdx])
#Define plot
mpl.rcParams['xtick.labelsize'] = 12
mpl.rcParams['ytick.labelsize'] = 12
mpl.rcParams['lines.linewidth'] = 1.
mpl.rcParams['axes.labelsize'] = 16
mpl.rcParams['axes.titlesize'] = 16
mpl.rcParams['legend.fontsize'] = 8
mpl.rcParams['figure.subplot.hspace'] = .2
mpl.rcParams['figure.subplot.wspace'] = .2
mpl.rcParams['figure.subplot.top'] = .9
mpl.rcParams['figure.subplot.bottom'] = .15
mpl.rcParams['figure.subplot.left'] = .05
mpl.rcParams['figure.subplot.right'] = .99
mpl.rcParams['figure.figsize'] = 12,4
mpl.rcParams['font.family'] ='serif'
mpl.rcParams['font.serif'] = 'palatino'
mpl.rcParams['text.usetex'] = True
mpl.rcParams['patch.linewidth'] = 0
#red = np.array([215,45,38])/255. #Red color
blu = np.array([66,118,180])/255. #Blue color
#pur = np.array([119,43,133])/255. #Purple
cmap = plt.get_cmap('gist_stern_r')
cmap2 = plt.get_cmap('inferno_r')
fig,ax = plt.subplots(1,3)
for i,r in enumerate(rr):
y = odeint(dydx,y0,x,args=(r,))
h = y[:,0]
s = y[:,1]
PSI[i,:] = s*h**2
S[i,:] = s
Msg[i,:] = LL3*s*h**2+LL4/h
N = 30
ax[0].contourf(x,rr,PSI,N,cmap=cmap)
ax[1].contourf(x,rr,S,N,cmap=cmap)
ax[2].contourf(x,rr,np.log(Msg),N,cmap=cmap)
ax[0].set_title('PW Volume transport')
ax[1].set_title('Salinity contrast')
ax[2].set_title('PW formation (log)')
for AX in ax:
AX.set_xlim([0,1])
AX.set_ylim([0,rmax])
AX.set_xlabel('Distance along boundary')
ax[0].set_ylabel('Runoff')
fname = '../figures/draftplot_PWx.png'
plt.savefig(fname,dpi=300)
os.system('eog '+fname)
| 21.485437 | 50 | 0.619973 | 383 | 2,213 | 3.548303 | 0.391645 | 0.137601 | 0.087564 | 0.10596 | 0.036792 | 0 | 0 | 0 | 0 | 0 | 0 | 0.066012 | 0.171713 | 2,213 | 102 | 51 | 21.696078 | 0.675396 | 0.048351 | 0 | 0 | 0 | 0 | 0.199142 | 0.043354 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013514 | false | 0 | 0.081081 | 0 | 0.108108 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ea6f2f3bf56a4b8ce0430cc6d5843b95908a1ac | 4,405 | py | Python | solid-pancake/solid_pancake_cli.py | sharyar/solid-pancake | 26eb519fbfcb5f195527cda06ff4a93e7c0018a7 | [
"MIT"
] | null | null | null | solid-pancake/solid_pancake_cli.py | sharyar/solid-pancake | 26eb519fbfcb5f195527cda06ff4a93e7c0018a7 | [
"MIT"
] | null | null | null | solid-pancake/solid_pancake_cli.py | sharyar/solid-pancake | 26eb519fbfcb5f195527cda06ff4a93e7c0018a7 | [
"MIT"
] | null | null | null | # CLI application for sentiment analysis of a user's tweets
# Load constants
# Check if saved model exists, if it does not, ask user if ok to train
# Save model for vectorizer as well
# Get user's twitter address
# Ask if he wants a word cloud too
# Start analyzing - Show animation for loading?
# display graph of sentiment analysis
# Import libraries needed:
import pandas as pd
import sys
import os
import time
import nltk
from nltk.corpus import stopwords
import twitter_helper_functions
from dotenv import load_dotenv
import twitter
nltk.download('stopwords')
stopwords.words('english')
# Constants:
TWITTER_DF_FILEPATH = '../twitter.csv'
# Declare variables used in the program
NLP_model = None
twitter_id = None
retrieved_tweets = None
twitter_api = None
WELCOME_MESSAGE = '''
Welcome to Solid-Pancake - A Twitter Sentiment Analysis Program
This is the command line version of the tool. A web-based version is
currently under development.
By: Sharyar Memon
Please select from one of the following options:
1. Do sentiment analysis on your tweets!
2. Admin Access
3. Display Word Cloud
4. Run model on saved tweets!
Q. Exit
'''
ADMIN_MESSAGE = '''
***************************
You are in the Admin Panel. Please select one of the following options:
1. Train Model from Default Dataset and save model
2. Specify New Dataset and Train - Must have a specific format (Check Docs)
3. Load saved-model!
Q. Go back to main menu.
'''
USER_MESSAGE = f'''
***************************
You are in the USer Panel. Please select one of the following options:
1. Provide your twitter id: {twitter_id}
2. Generate Word Cloud from tweets
3. Do Sentiment Analysis and display a distribution of your tweet sentiments.
4. Admin Panel
Q. Quit Application
'''
EXIT_MESSAGE = '''
***************************
Thanks for trying my application! To reach out to me with suggestions/ideas/bugs, create an issue on the
github repo. I am a student at the UofA and I am always trying to learn more!
Press any key to exit!
'''
ABOUT_MESSAGE = '''
#TODO
'''
def admin_panel():
user_option_ = str(input(ADMIN_MESSAGE))
nlp_model_ = None
if user_option_ == '1':
tweets_df = pd.read_csv(TWITTER_DF_FILEPATH, header=0, index_col=0, engine='c')
nlp_model_ = twitter_helper_functions.train_and_save_model(tweets_df['tweet'], tweets_df['label'])
elif user_option_ == '2':
twitter_df_path = input('Provide a path to the new training data set')
tweets_df = pd.read_csv(twitter_df_path, header=0, index_col=0, engine='c')
nlp_model_ = twitter_helper_functions.train_and_save_model(tweets_df['tweet'], tweets_df['label'])
elif user_option_ == '3':
nlp_model_ = twitter_helper_functions.load_model()
return nlp_model_
def initialize_twitter_api():
load_dotenv()
try:
CONSUMER_KEY = os.getenv('CONSUMER_KEY')
CONSUMER_SECRET = os.getenv('CONSUMER_SECRET')
ACCESS_TOKEN_KEY = os.getenv('ACCESS_TOKEN_KEY')
ACCESS_TOKEN_SECRET = os.getenv('ACCESS_TOKEN_SECRET')
except Exception as e:
print('This exception occurred', e)
else:
twitter_api_ = twitter.Api(consumer_key=CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET,
access_token_key=ACCESS_TOKEN_KEY,
access_token_secret=ACCESS_TOKEN_SECRET)
return twitter_api_
if __name__ == '__main__':
twitter_api = initialize_twitter_api()
while True:
starting_option = str(input(WELCOME_MESSAGE))
if starting_option == '1':
twitter_id = str(input('Provide your user id below with @ symbol (i.e @sharyar) :\n'))
retrieved_tweets = twitter_helper_functions.get_tweets(api=twitter_api, screen_name=twitter_id)
retrieved_tweets = twitter_helper_functions.convert_tweets_to_list(retrieved_tweets)
elif starting_option == '2':
NLP_model = admin_panel()
elif starting_option == '3':
twitter_helper_functions.generate_word_cloud(retrieved_tweets, stopwords=stopwords.words('english'))
elif starting_option == '4':
twitter_helper_functions.analyze_and_visualize_tweets(retrieved_tweets, NLP_model)
elif starting_option.lower() == 'q':
print(EXIT_MESSAGE)
time.sleep(3)
sys.exit()
| 32.873134 | 112 | 0.695119 | 610 | 4,405 | 4.785246 | 0.337705 | 0.035629 | 0.060295 | 0.017472 | 0.206578 | 0.151764 | 0.12196 | 0.104145 | 0.104145 | 0.075368 | 0 | 0.006596 | 0.2084 | 4,405 | 133 | 113 | 33.120301 | 0.830513 | 0.08899 | 0 | 0.1 | 0 | 0.01 | 0.354911 | 0.025994 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02 | false | 0 | 0.09 | 0 | 0.13 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ea8c2bf1fba5f3a643f0cdd98c6a5c896e24557 | 1,465 | py | Python | vsutillib/vsutillib-misc/vsutillib/misc/strFormatTimeDelta.py | akai10tsuki/vsutillib | 6d623171cc2a5c66a94fb508bfc312abeab49ff2 | [
"MIT"
] | null | null | null | vsutillib/vsutillib-misc/vsutillib/misc/strFormatTimeDelta.py | akai10tsuki/vsutillib | 6d623171cc2a5c66a94fb508bfc312abeab49ff2 | [
"MIT"
] | null | null | null | vsutillib/vsutillib-misc/vsutillib/misc/strFormatTimeDelta.py | akai10tsuki/vsutillib | 6d623171cc2a5c66a94fb508bfc312abeab49ff2 | [
"MIT"
] | null | null | null | """
format timedelta object
"""
from string import Formatter
def strFormatTimeDelta(tDelta, fmt=None):
"""
strFormatTimeDelta format time delta string. If fmt is None a format of
'N days N hours N minutes N seconds' will be used starting with the first
non zero N found in the calculations.
Args:
tDelta (timedelta): timedelta object to format
fmt (str, optional): string formatter. Defaults to None.
Returns:
str: formatted timedelta string
"""
strFormatter = Formatter()
args = {}
divisor = {'W': 604800, 'D': 86400, 'H': 3600, 'M': 60, 'S': 1}
labels = {'W': "weeks", 'D': "days", 'H': "hours", 'M': "minutes", 'S': "seconds"}
remainder = int(tDelta.total_seconds())
if fmt is not None:
keywords = list(map(lambda x: x[1], list(strFormatter.parse(fmt))))
for key in divisor:
if key in keywords and key in divisor.keys():
args[key], remainder = divmod(remainder, divisor[key])
else:
firstFound = False
strTmp = ""
for key in divisor:
quotient, remainder = divmod(remainder, divisor[key])
if firstFound:
strTmp += " " + str(quotient) + " " + labels[key]
elif quotient > 0:
strTmp += str(quotient) + " " + labels[key]
firstFound = True
if fmt is None:
return strTmp
return strFormatter.format(fmt, **args)
| 29.3 | 86 | 0.579522 | 174 | 1,465 | 4.873563 | 0.45977 | 0.023585 | 0.024764 | 0.025943 | 0.141509 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019531 | 0.301024 | 1,465 | 49 | 87 | 29.897959 | 0.808594 | 0.254608 | 0 | 0.08 | 0 | 0 | 0.039385 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.04 | 0 | 0.16 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ea9a3d5c1e2aaee9a2edabc627b486bd2b84f2b | 5,091 | py | Python | gatelfpytorchjson/modules/TextClassBiLstmCnnSingle.py | GateNLP/gate-lf-pytorch-wrapper | a08e180e56062dc03247ff0eb2414c0179ef7863 | [
"Apache-2.0"
] | 1 | 2018-05-13T06:26:23.000Z | 2018-05-13T06:26:23.000Z | gatelfpytorchjson/modules/TextClassBiLstmCnnSingle.py | GateNLP/gate-lf-pytorch-wrapper | a08e180e56062dc03247ff0eb2414c0179ef7863 | [
"Apache-2.0"
] | 46 | 2018-04-27T19:13:11.000Z | 2019-06-10T20:45:27.000Z | gatelfpytorchjson/modules/TextClassBiLstmCnnSingle.py | GateNLP/gate-lf-pytorch-wrapper | a08e180e56062dc03247ff0eb2414c0179ef7863 | [
"Apache-2.0"
] | 2 | 2019-06-01T11:44:08.000Z | 2019-06-18T03:27:19.000Z | from gatelfpytorchjson import CustomModule
from gatelfpytorchjson import EmbeddingsModule
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
streamhandler = logging.StreamHandler(stream=sys.stderr)
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
streamhandler.setFormatter(formatter)
logger.addHandler(streamhandler)
class TextClassBiLstmCnnSingle(CustomModule):
def __init__(self, dataset, config={}, maxSentLen=100, kernel_dim=64, lstm_dim=64, dropout=0.2, bn_momentum=0.2):
super().__init__(config=config)
#super(TextClassBiLstmCnnSingle, self).__init__()
self.maxSentLen=maxSentLen
self.n_classes = dataset.get_info()["nClasses"]
feature = dataset.get_indexlist_features()[0]
vocab = feature.vocab
vocab_size = vocab.n
logger.debug("Initializing module TextClassCNNsingle for classes: %s and vocab %s" %
(self.n_classes, vocab_size, ))
self.embedding = EmbeddingsModule(vocab)
embedding_dim = self.embedding.emb_dims
self.lstm1 = nn.LSTM(embedding_dim, lstm_dim, batch_first=True, bidirectional=True)
self.conv2 = nn.Conv2d(1, kernel_dim, (2,lstm_dim*2))
self.conv2_bn = nn.BatchNorm1d(kernel_dim, momentum=bn_momentum)
self.conv3 = nn.Conv2d(1, kernel_dim, (3,lstm_dim*2))
self.conv3_bn = nn.BatchNorm1d(kernel_dim, momentum=bn_momentum)
self.conv4 = nn.Conv2d(1, kernel_dim, (4,lstm_dim*2))
self.conv4_bn = nn.BatchNorm1d(kernel_dim, momentum=bn_momentum)
self.conv5 = nn.Conv2d(1, kernel_dim, (5,lstm_dim*2))
self.conv5_bn = nn.BatchNorm1d(kernel_dim, momentum=bn_momentum)
self.conv6 = nn.Conv2d(1, kernel_dim, (6,lstm_dim*2))
self.conv6_bn = nn.BatchNorm1d(kernel_dim, momentum=bn_momentum)
self.fc_hidden1 = nn.Linear(kernel_dim*5, 16)
#self.fc = nn.Linear(kernel_dim*3,output_size)
self.fc = nn.Linear(16,self.n_classes)
self.dropout = nn.Dropout(dropout)
self.logsoftmax = torch.nn.LogSoftmax(dim=1)
def forward(self, batch):
#print("checking shapes")
#print(x.shape) # [batch size, sentence length]
batch = torch.LongTensor(batch[0])
sent_len = batch.shape[1]
batchsize = batch.shape[0]
if sent_len > self.maxSentLen:
batch = batch[:,:self.maxSentLen]
elif sent_len < self.maxSentLen:
zero_pad = torch.zeros(batchsize, self.maxSentLen-sent_len, dtype=torch.long)
batch = torch.cat((batch, zero_pad),dim=1)
if self.on_cuda():
batch = batch.type(torch.cuda.LongTensor)
batch.cuda()
#batchsize = batch.size()[0]
#logger.info(batch)
embedded = self.embedding(batch)
#print(embedded.shape)
#print(x.shape) # [batch size, sentence length, embedding dim]
lstmed, hidden = self.lstm1(embedded)
#print(lstmed.shape)
lstmed = lstmed.unsqueeze(1) # add 1 channel to cnn
#print(lstmed.shape)
#print(x.shape) # [batch size, channle, sentence length, embedding dim]
conved_2 = F.relu(self.conv2(lstmed)).squeeze(3) # conv over embedding size, left 1 in last
conved_2 = self.conv2_bn(conved_2)
conved_3 = F.relu(self.conv3(lstmed)).squeeze(3)
conved_3 = self.conv3_bn(conved_3)
conved_4 = F.relu(self.conv4(lstmed)).squeeze(3)
conved_4 = self.conv4_bn(conved_4)
conved_5 = F.relu(self.conv5(lstmed)).squeeze(3)
conved_5 = self.conv5_bn(conved_5)
conved_6 = F.relu(self.conv6(lstmed)).squeeze(3)
conved_6 = self.conv6_bn(conved_6)
pooled_2 = F.max_pool1d(conved_2, conved_2.shape[2]).squeeze(2)
pooled_3 = F.max_pool1d(conved_3, conved_3.shape[2]).squeeze(2)
pooled_4 = F.max_pool1d(conved_4, conved_4.shape[2]).squeeze(2)
pooled_5 = F.max_pool1d(conved_5, conved_5.shape[2]).squeeze(2)
pooled_6 = F.max_pool1d(conved_6, conved_6.shape[2]).squeeze(2)
concat = torch.cat((pooled_2, pooled_3, pooled_4, pooled_5, pooled_6), dim=1)
concat = self.dropout(concat)
hidden1 = F.leaky_relu(self.fc_hidden1(concat) )
#hidden1 = torch.sigmoid(self.fc_hidden1(concat))
out = self.fc(hidden1)
out = self.logsoftmax(out)
return out
def get_lossfunction(self, config={}):
# IMPORTANT: for the target indices, we use -1 for padding by default!
return torch.nn.NLLLoss(ignore_index=-1)
def get_optimizer(self, config={}):
parms = filter(lambda p: p.requires_grad, self.parameters())
# optimizer = torch.optim.SGD(parms, lr=0.01, momentum=0.9)
# optimizer = torch.optim.SGD(parms, lr=0.01, momentum=0.9, weight_decay=0.05)
optimizer = torch.optim.Adam(parms, lr=0.015, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
return optimizer
| 41.390244 | 117 | 0.659006 | 701 | 5,091 | 4.611983 | 0.24679 | 0.036189 | 0.013919 | 0.023198 | 0.180019 | 0.127436 | 0.118157 | 0.097123 | 0.097123 | 0.025982 | 0 | 0.042116 | 0.21646 | 5,091 | 122 | 118 | 41.729508 | 0.768363 | 0.140051 | 0 | 0 | 0 | 0 | 0.028919 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048193 | false | 0 | 0.084337 | 0.012048 | 0.180723 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2eaa782a586f2d229922037eef6508d342caa110 | 10,279 | py | Python | plot_functions.py | Noahs-ARK/idea_relations | b84427ede926f4272a1f60a3554d9674d70582cc | [
"MIT"
] | 29 | 2017-04-27T18:08:38.000Z | 2020-11-14T06:54:37.000Z | plot_functions.py | Noahs-ARK/idea_relations | b84427ede926f4272a1f60a3554d9674d70582cc | [
"MIT"
] | 1 | 2017-06-16T19:47:52.000Z | 2017-06-16T19:47:52.000Z | plot_functions.py | Noahs-ARK/idea_relations | b84427ede926f4272a1f60a3554d9674d70582cc | [
"MIT"
] | 8 | 2017-04-27T20:14:28.000Z | 2019-03-18T16:50:55.000Z | """Adapt plot functions with seaborn to get more beautiful plots."""
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import collections
import itertools
import numpy as np
import scipy.stats as ss
from distutils.spawn import find_executable
if find_executable('latex'):
HAS_LATEX = True
else:
HAS_LATEX = False
import matplotlib
if "DISPLAY" not in os.environ:
matplotlib.use("Agg")
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, Arrow
import matplotlib.dates as mdates
from pandas import DataFrame
LINEWIDTH = 5
FONT_ENV = {
"lines.linewidth": LINEWIDTH,
"lines.markersize":20,
"ps.useafm": HAS_LATEX,
"pdf.use14corefonts" : HAS_LATEX,
"text.usetex": HAS_LATEX,
}
sns.set(font_scale=3.0, rc=FONT_ENV)
FIG_SIZE = (8, 7)
logging.basicConfig(level=logging.INFO)
COLOR_NAMES = ["cerulean", "light red", "seafoam", "dark orange",
"burgundy", "dark magenta", "midnight blue", "light violet",
"ocean blue", "bluish purple", "pinkish", "pale orange",
"aqua green", "pumpkin", "chocolate", "pine green"]
def start_plotting(fig_size, fig_pos, style="white", rc=None, despine=False):
with sns.axes_style(style, rc):
fig = plt.figure(figsize=fig_size)
if not fig_pos:
ax = fig.add_subplot(111)
else:
ax = fig.add_axes(fig_pos)
if despine:
sns.despine(left=True)
return fig, ax
def frange(x, y, step):
while x < y:
yield x
x += step
def end_plotting(fig, ax, title=None, xlabel=None,
ylabel=None, xlim=None, ylim=None, filename=None,
xticklabel=None, xlabel_rotation=None,
yticklabel=None, ylabel_rotation=None, label_text=None,
xtickgap=None):
'''A set of common operations after plotting.'''
if title:
ax.set_title(title)
if xlabel:
ax.set_xlabel(xlabel)
if xticklabel:
ax.set_xticks(xticklabel[0])
ax.set_xticklabels(xticklabel[1], rotation=xlabel_rotation)
elif xtickgap:
xticks = ax.get_xticks()
ax.set_xticks(list(frange(min(xticks), max(xticks) + 0.001, xtickgap)))
else:
ax.set_xticks(ax.get_xticks())
ax.set_xticklabels(ax.get_xticks())
if yticklabel:
ax.set_yticks(yticklabel[0])
ax.set_yticklabels(yticklabel[1], rotation=ylabel_rotation)
else:
ax.set_yticks(ax.get_yticks())
ax.set_yticklabels(ax.get_yticks())
if ylabel:
ax.set_ylabel(ylabel)
if xlim:
ax.set_xlim(xlim)
if ylim:
ax.set_ylim(ylim)
if label_text:
for x, y, t in label_text:
ax.text(x, y, t)
def savefig(fig, filename):
fig.savefig(filename)
plt.close()
def ax_plot_lines(ax, xs, ys, colors, shapes, linestyles,
errorbar=False, linewidth=LINEWIDTH):
lines = []
for (x, y, c, s, l) in zip(xs, ys, colors, shapes, linestyles):
if errorbar:
# y should be a list of lists in this case
mean = [np.mean(yl) for yl in y]
error = [ss.sem(yl) for yl in y]
l = ax.errorbar(x, mean, yerr=error, color=c,
marker=s, linestyle=l, ecolor=c)
else:
l, = ax.plot(x, y, color=c, marker=s, linestyle=l, linewidth=linewidth)
lines.append(l)
return lines
def plot_lines(xs, ys, title=None, xlabel=None,
ylabel=None, xlim=None, ylim=None,
colors=None, shapes=None, linestyles=None,
errorbar=False, legend=None, loc=0,
xticklabel=None, yticklabel=None,
xlabel_rotation=None, ylabel_rotation=None,
hlines=None, vlines=None, bbox_to_anchor=None,
fig_pos=None, fig_size=FIG_SIZE, label_text=None,
xdate=False, linewidth=LINEWIDTH, rc=None, despine=False,
ticksize=None, style="white", filename=None):
'''Plot lines for all pairs of xs and ys.
Input:
xs: a list of x-value lists
ys: a list of y-value lists ([[1, 2, 3]], two layers),
or a list of y-value lists grouped by x ([[[1, 2, 3], [1, 3]]],
three layers),
in the second case, errorbar is computed using standard error
ys should be with the same length of xs
'''
fig, ax = start_plotting(fig_size, fig_pos, rc=rc, despine=despine,
style=style)
if not colors:
colors = sns.color_palette(n_colors=len(xs))
if not shapes:
shapes = ['o', 's', '^', 'd', '+', 'v', '*', 'x'] * (len(xs) // 8 + 1)
if not linestyles:
linestyles = ['-'] * len(xs)
if xdate:
xs = [[mdates.date2num(d) for d in l] for l in xs]
lines = ax_plot_lines(ax, xs, ys, colors, shapes, linestyles,
errorbar=errorbar, linewidth=linewidth)
if xdate:
# this works for years and month
# customize through xlabel and xticklabels
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
yearsFmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
fig.autofmt_xdate()
if hlines:
for y in hlines:
ax.axhline(y=y, linestyle='--', color='black')
if vlines:
for x in vlines:
ax.axvline(x=x, linestyle='--', color='black')
if legend:
ax.legend(lines, legend, loc=loc, bbox_to_anchor=bbox_to_anchor,
frameon=False)
if not xlim:
diff = np.max(xs) - np.min(xs)
xlim = (np.min(xs) - 0.02 * diff, np.max(xs) + 0.02 * diff)
end_plotting(fig, ax, title=title, xlabel=xlabel,
ylabel=ylabel, xlim=xlim, ylim=ylim,
xticklabel=xticklabel, yticklabel=yticklabel,
xlabel_rotation=xlabel_rotation,
ylabel_rotation=ylabel_rotation, label_text=label_text)
if ticksize is not None:
ax.tick_params(axis='both', which='major', labelsize=ticksize)
if filename:
savefig(fig, filename)
return fig
class SubsampleJointGrid(sns.JointGrid):
def plot_sub_joint(self, func, subsample, **kwargs):
"""Draw a bivariate plot of `x` and `y`.
Parameters
----------
func : plotting callable
This must take two 1d arrays of data as the first two
positional arguments, and it must plot on the "current" axes.
kwargs : key, value mappings
Keyword argument are passed to the plotting function.
Returns
-------
self : JointGrid instance
Returns `self`.
"""
if subsample > 0 and subsample < len(self.x):
indexes = np.random.choice(range(len(self.x)), subsample,
replace=False)
plot_x = np.array([self.x[i] for i in indexes])
plot_y = np.array([self.y[i] for i in indexes])
plt.sca(self.ax_joint)
func(plot_x, plot_y, **kwargs)
else:
plt.sca(self.ax_joint)
func(self.x, self.y, **kwargs)
return self
def joint_plot(x, y, xlabel=None,
ylabel=None, xlim=None, ylim=None,
loc="best", color='#0485d1',
size=8, markersize=50, kind="kde",
scatter_color="r"):
with sns.axes_style("darkgrid"):
if xlabel and ylabel:
g = SubsampleJointGrid(xlabel, ylabel,
data=DataFrame(data={xlabel: x, ylabel: y}),
space=0.1, ratio=2, size=size, xlim=xlim, ylim=ylim)
else:
g = SubsampleJointGrid(x, y, size=size,
space=0.1, ratio=2, xlim=xlim, ylim=ylim)
g.plot_joint(sns.kdeplot, shade=True, cmap="Blues")
g.plot_sub_joint(plt.scatter, 1000, s=20, c=scatter_color, alpha=0.3)
g.plot_marginals(sns.distplot, kde=False, rug=False)
g.annotate(ss.pearsonr, fontsize=25, template="{stat} = {val:.2g}\np = {p:.2g}")
g.ax_joint.set_yticklabels(g.ax_joint.get_yticks())
g.ax_joint.set_xticklabels(g.ax_joint.get_xticks())
return g
def plot_bar(value_lists, xlabel=None, fig_size=FIG_SIZE,
fig_pos=None, ylabel=None, xticklabel=None,
xlabel_rotation=None, width=-1, gap=1,
yticklabel=None, legend=None,
errorbar_list=None, color_list=sns.color_palette(n_colors=10),
ylim=None, ncol=1,
handlelength=None, loc=0, bbox_to_anchor=None,
handletextpad=None, columnspacing=None,
vlines=None, hlines=None, xlim=None,
hatches=None, filename=None):
fig, ax = start_plotting(fig_size, fig_pos)
N = len(value_lists[0])
if width < 0:
width = 0.75 * gap / len(value_lists)
ind = np.arange(N) * gap
rects = []
for i in range(len(value_lists)):
rect = ax.bar(ind, value_lists[i], width, color=color_list[i],
yerr=errorbar_list[i] if errorbar_list else None,
error_kw={"ecolor": "black"},
hatch=None if not hatches else hatches[i])
ind = ind + width
rects.append(rect)
xlim = (-width, max(ind) + width)
if hlines:
for y in hlines:
ax.axhline(y=y, linestyle='--', color='grey')
if vlines:
for x in vlines:
ax.axvline(x=x, linestyle='--', color='grey')
if legend:
ax.legend(rects, legend, loc=loc,
bbox_to_anchor=bbox_to_anchor,
ncol=ncol, handlelength=handlelength,
handletextpad=handletextpad,
columnspacing=columnspacing,
frameon=False)
end_plotting(fig, ax, xlabel=xlabel, ylabel=ylabel,
xticklabel=xticklabel,
xlabel_rotation=xlabel_rotation,
xlim=xlim,
yticklabel=yticklabel, ylim=ylim)
if filename:
savefig(fig, filename)
return fig
| 36.580071 | 88 | 0.594027 | 1,349 | 10,279 | 4.409192 | 0.24685 | 0.011769 | 0.010087 | 0.008742 | 0.183423 | 0.14694 | 0.102219 | 0.089778 | 0.073302 | 0.061197 | 0 | 0.010555 | 0.290301 | 10,279 | 280 | 89 | 36.710714 | 0.804798 | 0.092421 | 0 | 0.142857 | 0 | 0 | 0.040009 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038961 | false | 0 | 0.073593 | 0 | 0.142857 | 0.004329 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2eaaf9b464b30657bb954716ce433490091b5cd9 | 1,916 | py | Python | train/docker/src/train.py | samuelrojolopez/ml_technical_challenge | 09b3d1e20243e0329042360d10af2d1832126c4a | [
"MIT"
] | null | null | null | train/docker/src/train.py | samuelrojolopez/ml_technical_challenge | 09b3d1e20243e0329042360d10af2d1832126c4a | [
"MIT"
] | null | null | null | train/docker/src/train.py | samuelrojolopez/ml_technical_challenge | 09b3d1e20243e0329042360d10af2d1832126c4a | [
"MIT"
] | null | null | null | # coding: utf-8
import pandas as pd
from glob import glob
from joblib import dump
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
if __name__ == "__main__":
TRAINING_DATA_PATH = "/opt/ml/input/data/train/"
MODEL_SAVE_DIR = "/opt/ml/model/"
print("Reading training data...")
training_data_file_paths = glob(
TRAINING_DATA_PATH + "/**/*.parquet", recursive=True
)
print("Training data files:", training_data_file_paths)
training_dfs = [
pd.read_parquet(training_data_file_path)
for training_data_file_path in training_data_file_paths
]
training_df = pd.concat(training_dfs, ignore_index=True)
print("All training data:")
training_df.info(verbose=True, show_counts=True)
del training_dfs
print("Reading training data ✅")
print("Fill Nans ... ")
training_df.fillna(0, inplace=True)
print("✅")
print("Train Model ... ")
Y = training_df['status']
training_df.drop(['status'], axis=1, inplace=True)
X = training_df
# Using Synthetic Minority Over-Sampling Technique(SMOTE) to
# overcome sample imbalance problem.
Y = Y.astype('int')
X_balance, Y_balance = SMOTE().fit_resample(X, Y)
X_balance = pd.DataFrame(X_balance, columns=X.columns)
X_train, X_test, y_train, y_test = train_test_split(X_balance, Y_balance,
stratify=Y_balance,
test_size=0.3,
random_state=123)
model = RandomForestClassifier(n_estimators=5)
model.fit(X_train, y_train)
print("✅")
print("Saving artifacts...")
dump(model, MODEL_SAVE_DIR + 'model_risk.joblib')
print("Artifact saved", "✅")
print("SUCCESS")
| 26.246575 | 77 | 0.635699 | 240 | 1,916 | 4.8125 | 0.4125 | 0.114286 | 0.069264 | 0.054545 | 0.050216 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006334 | 0.258351 | 1,916 | 72 | 78 | 26.611111 | 0.803659 | 0.055846 | 0 | 0.045455 | 0 | 0 | 0.139353 | 0.013935 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.136364 | 0 | 0.136364 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2eac1b4af86faa566a0da740a08ed19d0bd17c1d | 646 | py | Python | asynchronous_qiwi/call/API/QIWIWallet/payment_api/currency_rates.py | LexLuthorReal/asynchronous_qiwi | 5847a8d4008493656e973e5283888a4e57234962 | [
"MIT"
] | 3 | 2021-05-20T02:36:30.000Z | 2021-11-28T16:00:15.000Z | asynchronous_qiwi/call/API/QIWIWallet/payment_api/currency_rates.py | LexLuthorReal/asynchronous_qiwi | 5847a8d4008493656e973e5283888a4e57234962 | [
"MIT"
] | null | null | null | asynchronous_qiwi/call/API/QIWIWallet/payment_api/currency_rates.py | LexLuthorReal/asynchronous_qiwi | 5847a8d4008493656e973e5283888a4e57234962 | [
"MIT"
] | 1 | 2021-11-28T16:00:20.000Z | 2021-11-28T16:00:20.000Z | from .....data.URL import QIWIWalletURLS
from .....data_types.connector.request_type import GET
from .....connector.aiohttp_connector import Connector
class CurrencyRatesAPI:
@classmethod
async def get_currency_rates(cls, wallet_api_key: str) -> dict:
url = QIWIWalletURLS.Payments.currency_rates
headers = {"Accept": "application/json",
"Authorization": f"Bearer {wallet_api_key}",
"Content-Type": "application/json"}
response = await Connector.request(url=url, headers=headers, request_type=GET)
response_data = await response.json()
return response_data
| 38 | 86 | 0.684211 | 71 | 646 | 6.042254 | 0.507042 | 0.037296 | 0.055944 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.208978 | 646 | 16 | 87 | 40.375 | 0.83953 | 0 | 0 | 0 | 0 | 0 | 0.133127 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2eac8b4e05486b52eb12dc6e758aba64c32a1220 | 5,877 | py | Python | okonomiyaki/file_formats/setuptools_egg.py | enthought/okonomiyaki | 51b8b4fa8d17255e13c097402691726545cf5b4c | [
"BSD-3-Clause"
] | 1 | 2021-06-01T16:35:00.000Z | 2021-06-01T16:35:00.000Z | okonomiyaki/file_formats/setuptools_egg.py | enthought/okonomiyaki | 51b8b4fa8d17255e13c097402691726545cf5b4c | [
"BSD-3-Clause"
] | 249 | 2015-02-24T19:06:53.000Z | 2021-07-30T09:01:53.000Z | okonomiyaki/file_formats/setuptools_egg.py | enthought/okonomiyaki | 51b8b4fa8d17255e13c097402691726545cf5b4c | [
"BSD-3-Clause"
] | 4 | 2015-02-19T21:29:12.000Z | 2016-01-14T21:02:39.000Z | import os.path
import re
import sys
try:
import sysconfig
except ImportError: # Python 2.6 support
sysconfig = None
import warnings
from okonomiyaki.errors import OkonomiyakiError
from okonomiyaki.platforms import PythonImplementation
from okonomiyaki.utils import py3compat
from ._egg_info import _guess_python_tag
from ._package_info import PackageInfo
_R_EGG_NAME = re.compile(r"""
(?P<name>^[^.-]+)
(-(?P<version>[^-]+))
(-py(?P<pyver>(\d+\.\d+)))
(-(?P<platform>.+))?
\.egg$
""", re.VERBOSE)
def parse_filename(path):
"""
Parse a setuptools egg.
Returns
-------
name : str
the egg name
version : str
the egg version
python_version : str
the python version
platform : str or None
the platform string, or None for platform-independent eggs.
"""
m = _R_EGG_NAME.search(os.path.basename(path))
if m:
platform = m.group("platform")
return (m.group("name"), m.group("version"), m.group("pyver"),
platform)
else:
raise OkonomiyakiError("Invalid egg name: {0}".format(path))
def _get_default_setuptools_abi(platform_string, pyver):
""" Try to guess the ABI for setuptools eggs from the platform_string
and pyver parts.
Parameters
----------
platform_string: str
The platform part of the setuptools egg filename as a string. If
None, understood as a cross platform egg (pure python).
pyver: str
The python version
"""
assert pyver in ("2.6", "2.7")
if platform_string is None:
return None
else:
if (platform_string.startswith("linux")
or platform_string.startswith("win")
or platform_string.startswith("macosx")):
return 'cp{0}{1}m'.format(pyver[0], pyver[2])
else:
msg = "Platform string {0!r} not supported".format(platform_string)
raise ValueError(msg)
_UNSPECIFIED = object()
def _guess_abi_from_python(python):
# For legacy (aka legacy spec version info < 1.3), we know that pyver
# can only be one of "2.X" with X in (5, 6, 7).
#
# In those cases, the mapping (platform pyver) -> ABI is unambiguous,
# as we only ever used one ABI for a given python version/platform.
if python.major >= 3 and python.minor >= 8:
# Python 3.8 has removed the `m` from the abi tag
return u"cp{0}{1}".format(python.major, python.minor)
else:
return u"cp{0}{1}m".format(python.major, python.minor)
def _guess_abi_from_running_python():
if sysconfig is None:
soabi = None
else:
try:
soabi = sysconfig.get_config_var('SOABI')
except IOError as e: # pip issue #1074
warnings.warn("{0}".format(e), RuntimeWarning)
soabi = None
if soabi and soabi.startswith('cpython-'):
return 'cp' + soabi.split('-', 2)[1]
else:
return None
def _guess_abi(platform, python):
if platform is None:
return None
if python is not None:
if (python.major, python.minor) != sys.version_info[:2]:
return _guess_abi_from_python(python)
abi = _guess_abi_from_running_python()
if abi is None:
if python is not None:
return _guess_abi_from_python(python)
else:
msg = ("Could not guess ABI, you need to specify the abi_tag "
"argument to from_egg, e.g. 'cp34m' for Enthought "
"CPython 3.4 runtimes")
raise OkonomiyakiError(msg)
else:
return abi
class SetuptoolsEggMetadata(object):
@classmethod
def from_egg(cls, path, platform=None, python=_UNSPECIFIED,
abi_tag=_UNSPECIFIED):
filename = os.path.basename(path)
name, version, pyver, platform_string = parse_filename(filename)
if platform is None and platform_string is not None:
msg = ("Platform-specific egg detected: you need to specify a "
"platform argument that is not None to from_egg")
raise OkonomiyakiError(msg)
if python is _UNSPECIFIED:
python = PythonImplementation.from_string(_guess_python_tag(pyver))
if abi_tag is _UNSPECIFIED:
abi_tag = _guess_abi(platform, python)
else:
abi_tag = abi_tag
pkg_info = PackageInfo.from_egg(path)
return cls(name, version, platform, python, abi_tag, pkg_info)
def __init__(self, name, version, platform, python, abi_tag, pkg_info):
"""
Parameters
----------
name: str
Package name
version: str
Version string
platform: EPDPlatform
An EPDPlatform instance, or None for cross-platform eggs
python: PythonImplementation
The PEP425 python tag, or None.
abi_tag: str
The PEP425 abi tag, or None.
pkg_info: PackageInfo
Instance representing the egg PKG-INFO.
"""
self.name = name
self.version = version
self.platform = platform
if isinstance(python, py3compat.string_types):
python = PythonImplementation.from_string(python)
self.python = python
self.abi_tag = abi_tag
self._pkg_info = pkg_info
@property
def platform_tag(self):
""" Platform tag following PEP425, except that no platform is
represented as None and not 'any'."""
if self.platform is None:
return None
else:
return self.platform.pep425_tag
@property
def python_tag(self):
if self.python is None:
return None
else:
return self.python.pep425_tag
@property
def summary(self):
return self._pkg_info.summary
| 29.681818 | 79 | 0.609495 | 734 | 5,877 | 4.741144 | 0.223433 | 0.022414 | 0.017241 | 0.018391 | 0.13046 | 0.083908 | 0.03908 | 0.021839 | 0 | 0 | 0 | 0.013311 | 0.29692 | 5,877 | 197 | 80 | 29.832487 | 0.828896 | 0.219159 | 0 | 0.245763 | 0 | 0 | 0.115872 | 0.010827 | 0 | 0 | 0 | 0 | 0.008475 | 1 | 0.084746 | false | 0 | 0.09322 | 0.008475 | 0.330508 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ead2418ac79686f81faf8e79b899ef5370e790b | 1,145 | py | Python | Python/leetcode.271.encode-and-decode-strings.py | tedye/leetcode | 975d7e3b8cb9b6be9e80e07febf4bcf6414acd46 | [
"MIT"
] | 4 | 2015-10-10T00:30:55.000Z | 2020-07-27T19:45:54.000Z | Python/leetcode.271.encode-and-decode-strings.py | tedye/leetcode | 975d7e3b8cb9b6be9e80e07febf4bcf6414acd46 | [
"MIT"
] | null | null | null | Python/leetcode.271.encode-and-decode-strings.py | tedye/leetcode | 975d7e3b8cb9b6be9e80e07febf4bcf6414acd46 | [
"MIT"
] | null | null | null | class Codec:
def encode(self, strs):
"""Encodes a list of strings to a single string.
:type strs: List[str]
:rtype: str
"""
# encode format '<total_length> <seperate length connected with -> <joined str>'
if not strs:
return ''
separate_length = '-'.join([str(len(s)) for s in strs])
total_length = str(len(separate_length))
return total_length+' '+separate_length+' '+''.join(strs)
def decode(self, s):
"""Decodes a single string to a list of strings.
:type s: str
:rtype: List[str]
"""
if not s:
return []
ind = s.index(' ')
total_length = int(s[:ind])
separate_length = [int(i) for i in s[ind+1:ind+1+total_length].split('-')]
joined_str = s[ind+2+total_length:]
result = []
cur = 0
for i in separate_length:
result += joined_str[cur:cur + i],
cur += i
return result
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(strs)) | 30.131579 | 88 | 0.535371 | 146 | 1,145 | 4.109589 | 0.369863 | 0.11 | 0.023333 | 0.046667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005284 | 0.338865 | 1,145 | 38 | 89 | 30.131579 | 0.787318 | 0.302183 | 0 | 0 | 0 | 0 | 0.006944 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2eaee1f5ab79283db12741ad1f4f0ff16bd8988e | 3,761 | py | Python | shipyard/rules/base/build.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | 3 | 2016-01-04T06:28:52.000Z | 2020-09-20T13:18:40.000Z | shipyard/rules/base/build.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | null | null | null | shipyard/rules/base/build.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | null | null | null | """Base of all build rules."""
from pathlib import Path
from garage import scripts
from foreman import define_parameter, rule, to_path
(define_parameter.path_typed('root')
.with_doc('Path to the root directory of this repository.')
.with_default(Path(__file__).parent.parent.parent.parent))
(define_parameter.path_typed('input')
.with_doc('Path to the directory input data.')
.with_default(Path.home() / 'input'))
(define_parameter.path_typed('drydock')
.with_doc('Path to the directory of intermediate build artifacts.')
.with_default(Path.home() / 'drydock'))
(define_parameter.path_typed('output')
.with_doc('Path to the directory of final build artifacts.')
.with_default(Path.home() / 'output'))
(define_parameter.bool_typed('release')
.with_doc('Enable release mode for builds.')
.with_default(False))
# Handy derived parameters
(define_parameter.path_typed('drydock/build')
.with_doc('Path to the directory of unarchived image contents.')
.with_derive(lambda ps: ps['drydock'] / 'build'))
(define_parameter.path_typed('drydock/manifest')
.with_doc('Path to the image manifest.')
.with_derive(lambda ps: ps['drydock/build'] / 'manifest'))
(define_parameter.path_typed('drydock/rootfs')
.with_doc('Path to the image rootfs.')
.with_derive(lambda ps: ps['drydock/build'] / 'rootfs'))
@rule
def upgrade_system(parameters):
"""Upgrade system packages."""
with scripts.using_sudo():
scripts.apt_get_update()
scripts.apt_get_full_upgrade()
@rule
@rule.depend('upgrade_system', when=lambda ps: ps['release'])
def build(parameters):
"""Prepare for the build process.
NOTE: All `build` rules should depend on this rule.
"""
# Sanity check.
scripts.ensure_directory(parameters['root'] / '.git')
scripts.install_dependencies()
# Populate drydock.
for subdir in ('cc', 'host', 'java', 'py'):
scripts.mkdir(parameters['drydock'] / subdir)
scripts.mkdir(parameters['drydock/build'])
scripts.mkdir(parameters['drydock/rootfs'])
# Populate output (unfortunately `output` could accidentally be
# absent or not owned by plumber for a variety of reasons).
scripts.mkdir(parameters['output'])
with scripts.using_sudo():
scripts.execute([
'chown',
'--recursive', 'plumber:plumber',
parameters['output'],
])
@rule
@rule.depend('build')
def tapeout(parameters):
"""Tape-out the base system.
NOTE: All `tapeout` rules should reverse depend on this rule.
"""
with scripts.using_sudo():
rootfs = parameters['drydock/rootfs']
scripts.rsync([to_path('etc')], rootfs)
# Re-generate cache and then tapeout it.
scripts.execute(['ldconfig'])
scripts.rsync(['/etc/ld.so.cache'], rootfs, relative=True)
scripts.rsync(
['/lib/x86_64-linux-gnu', '/lib64'],
rootfs,
relative=True,
)
# Tapeout the entire /usr/lib/x86_64-linux-gnu might be an
# overkill, but it's kind hard to cherry-pick just what I need.
scripts.rsync(
['/usr/lib/x86_64-linux-gnu'],
rootfs,
relative=True,
excludes=['/usr/lib/x86_64-linux-gnu/perl*'],
)
# Tapeout only shared libraries under /usr/local/lib, which
# excludes Python modules (if you also want to tapeout /usr/lib,
# remember to only tapeout shared libraries under it).
scripts.rsync(
Path('/usr/local/lib').glob('lib*.so*'),
rootfs,
relative=True,
)
scripts.execute(['chown', '--recursive', 'root:root', rootfs / 'etc'])
scripts.execute(['chmod', '--recursive', 'go-w', rootfs / 'etc'])
| 29.155039 | 78 | 0.650625 | 468 | 3,761 | 5.111111 | 0.33547 | 0.056438 | 0.055602 | 0.070234 | 0.241221 | 0.153428 | 0.073997 | 0 | 0 | 0 | 0 | 0.006034 | 0.20686 | 3,761 | 128 | 79 | 29.382813 | 0.795843 | 0.194629 | 0 | 0.202703 | 0 | 0 | 0.260403 | 0.025839 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040541 | false | 0 | 0.040541 | 0 | 0.081081 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2eafb50bbce7f7a6466757554ee9222412c1f278 | 1,099 | py | Python | examples/crud/authentication.py | fossabot/vial | 0e33a68f5e0dd6a9a9877163f263114fc9e5f774 | [
"MIT"
] | 4 | 2019-02-07T14:58:21.000Z | 2021-01-28T08:43:46.000Z | examples/crud/authentication.py | fossabot/vial | 0e33a68f5e0dd6a9a9877163f263114fc9e5f774 | [
"MIT"
] | 8 | 2018-12-29T14:08:48.000Z | 2021-03-19T07:54:54.000Z | examples/crud/authentication.py | fossabot/vial | 0e33a68f5e0dd6a9a9877163f263114fc9e5f774 | [
"MIT"
] | 3 | 2019-01-07T05:50:39.000Z | 2020-10-24T10:26:54.000Z | from functools import wraps
from models import User
class AuthenticationService:
@classmethod
def get_profile_from_token(cls, token):
user = User.get(where={'auth_token': token})
if not user:
raise Exception("No profile for this token")
return user
@classmethod
def is_maintainer(cls, user):
if not user.get('maintainer', ''):
raise Exception("Not a maintainer profile")
@classmethod
def is_authenticated(self, func):
@wraps(func)
def wrapper(request, *args, **kwargs):
try:
token = request.headers.get('authorization', '').replace("Bearer", "").strip()
if not token:
raise Exception("No auth_token in header")
user = AuthenticationService.get_profile_from_token(token)
AuthenticationService.is_maintainer(user)
except Exception as e:
return {"status": 401, "message": str(e), "success": False}
return func(request, *args, **kwargs)
return wrapper
| 28.179487 | 94 | 0.591447 | 116 | 1,099 | 5.508621 | 0.439655 | 0.065728 | 0.043818 | 0.059468 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003937 | 0.306642 | 1,099 | 38 | 95 | 28.921053 | 0.834646 | 0 | 0 | 0.111111 | 0 | 0 | 0.119199 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148148 | false | 0 | 0.074074 | 0 | 0.407407 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2eb0484af9cce8ea841dc9edc7631fe9d4a6ea0c | 13,938 | py | Python | extractVerb.py | StatisticalProject/timetobirth | 0a7acfe51d8eb5195e9d5cbe3e7378a0b73a3842 | [
"Apache-2.0"
] | null | null | null | extractVerb.py | StatisticalProject/timetobirth | 0a7acfe51d8eb5195e9d5cbe3e7378a0b73a3842 | [
"Apache-2.0"
] | null | null | null | extractVerb.py | StatisticalProject/timetobirth | 0a7acfe51d8eb5195e9d5cbe3e7378a0b73a3842 | [
"Apache-2.0"
] | null | null | null | import urllib, json
import urllib.request,os
import socket
import random
import re
socket.setdefaulttimeout(50)
#https://www.hide-my-ip.com/fr/proxylist.shtml
# .*(\[\{.*\]).*
KEY="A00239D3-45F6-4A0A-810C-54A347F144C2"
KEY="720129CF233B4CA2DF97DAE48C8717E1"
KEY="35d28185-0ca1-47f0-8caf-edc457802c9d"
KEY="B36714DE794D0080A183B5A12BEAF8B4"
KEYS=["B36714DE794D0080A183B5A12BEAF8B4","35d28185-0ca1-47f0-8caf-edc457802c9d","A00239D3-45F6-4A0A-810C-54A347F144C2","720129CF233B4CA2DF97DAE48C8717E1","A00239D3-45F6-4A0A-810C-54A347F144C2"]#,"9b0610b4-cc4e-455a-b64d-e2fd01b5b086"]
SERIES="1"
IDSEASON="118"
urlProxys=["http://104.28.26.15:80","http://195.89.201.48:80"]
with urllib.request.urlopen('https://www.hide-my-ip.com/fr/proxylist.shtml') as response:
print("load list")
html = str(response.read())
#p = re.compile('.*(\[\{"i".*\}\])\;.*')
#line=p.match(html).group(0)
print ("parse1")
p = re.compile('.*\[\{"i"')
print ("parse2")
liner=p.sub( '[{"i"', html, count=1)
print ("parse3")
p = re.compile('\}\];.*')
print ("parse4")
line2=p.sub( '}]', liner, count=1)
print ("parse5")
line3=line2.replace("\\n","")
line4=line3.replace("\\","")
line=line4.replace("}, n {","},{").replace(" n ","")
#line = re.sub(p, "%s", html)
f = open("jjj.json", 'w')
f.write(line)
f.close()
json_object = json.loads(line)
print ("mount node")
for jsone in json_object:
urlProxys.append("http://"+jsone["i"]+":"+jsone["p"])
proxy = urllib.request.FancyURLopener({"http":"http://201.208.63.214:3128"})
#proxy = urllib.request.URLopener()
def is_json(myjson):
try:
json_object = json.loads(myjson)
except ValueError :
return False
return True
def is_file_json(myjson):
if ( os.path.exists(myjson)) :
f = open(myjson, 'r')
return is_json(f.read())
return False
def loadAndWrite(myjson,filie):
proxyMi=True
counter=0
while proxyMi:
proxyurl = random.choice (urlProxys)
try:
proxy = urllib.request.FancyURLopener({"http":random.choice (urlProxys)})
with proxy.open(myjson, data=None ) as url:
response = url.read().decode('utf-8')
data = json.loads(response)
f = open(filie, 'w')
f.write(response)
f.close()
print ("nice:"+filie)
proxyMi=False
except ValueError :
print ('error '+str(counter)+':'+myjson+":"+filie+":"+proxyurl)
if counter>15 :
proxyMi=False
counter=counter+1
except OSError :
print ('retry:'+myjson+":"+filie+":"+proxyurl)
if( not os.path.exists("currenseason")):
os.mkdir("currenseason")
if( not os.path.exists("currenseason/"+SERIES)):
os.mkdir("currenseason/"+SERIES)
if( not is_file_json("currenseason/"+SERIES+"/currentseason.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/series/"+SERIES+"/currentseason.json"
loadAndWrite(url,"currenseason/"+SERIES+"/currentseason.json")
f = open('currenseason/'+SERIES+'/currentseason.json', 'r')
data=json.loads(f.read())
f.close()
IDSEASON=str(data["current_season"]["id"])
IDSEASON="116"
if( not os.path.exists("conferences")):
os.mkdir("conferences")
if( not os.path.exists("conferences/conferences.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/conferences.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"conferences/conferences.json")
if( not os.path.exists("ladder")):
os.mkdir("ladder")
if( not os.path.exists("ladder/"+SERIES)):
os.mkdir("ladder/"+SERIES)
if( not os.path.exists("ladder/"+SERIES+"/"+IDSEASON)):
os.mkdir("ladder/"+SERIES+"/"+IDSEASON)
if( not is_file_json("ladder/"+SERIES+"/"+IDSEASON+"/lader.json") ):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/series/"+SERIES+"/seasons/"+IDSEASON+"/ladder.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"ladder/"+SERIES+"/"+IDSEASON+"/lader.json")
f = open("ladder/"+SERIES+"/"+IDSEASON+"/lader.json", 'r')
dataTeam=json.loads(f.read())
f.close()
countyx=0
for teamm in dataTeam['teams']:
countyx=countyx+1
print("TT "+str(countyx)+"/"+str(len(dataTeam['teams'])))
TEAM=str(teamm['id'])
if( not os.path.exists("fixturesandresultswithbyes")):
os.mkdir("fixturesandresultswithbyes")
if( not os.path.exists("fixturesandresultswithbyes/"+SERIES)):
os.mkdir("fixturesandresultswithbyes/"+SERIES)
if( not os.path.exists("fixturesandresultswithbyes/"+SERIES+"/"+IDSEASON)):
os.mkdir("fixturesandresultswithbyes/"+SERIES+"/"+IDSEASON)
if( not os.path.exists("fixturesandresultswithbyes/"+SERIES+"/"+IDSEASON+"/"+TEAM)):
os.mkdir("fixturesandresultswithbyes/"+SERIES+"/"+IDSEASON+"/"+TEAM)
if( not is_file_json("fixturesandresultswithbyes/"+SERIES+"/"+IDSEASON+"/"+TEAM+"/fixturesandresultswithbyes.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/series/"+SERIES+"/seasons/"+IDSEASON+"/teams/"+TEAM+"/fixturesandresultswithbyes.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"fixturesandresultswithbyes/"+SERIES+"/"+IDSEASON+"/"+TEAM+"/fixturesandresultswithbyes.json")
if( not os.path.exists("summary")):
os.mkdir("summary")
if( not os.path.exists("summary/"+SERIES)):
os.mkdir("summary/"+SERIES)
if( not os.path.exists("summary/"+SERIES+"/"+IDSEASON)):
os.mkdir("summary/"+SERIES+"/"+IDSEASON)
if( not os.path.exists("summary/"+SERIES+"/"+IDSEASON+"/"+TEAM)):
os.mkdir("summary/"+SERIES+"/"+IDSEASON+"/"+TEAM)
if( not is_file_json("summary/"+SERIES+"/"+IDSEASON+"/"+TEAM+"/summary.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/series/"+SERIES+"/seasons/"+IDSEASON+"/teams/"+TEAM+"/summary.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"summary/"+SERIES+"/"+IDSEASON+"/"+TEAM+"/summary.json")
try:
f = open("fixturesandresultswithbyes/"+SERIES+"/"+IDSEASON+"/"+TEAM+"/fixturesandresultswithbyes.json", 'r')
dataMatchId=json.loads(f.read())
f.close()
except:
print ("err:fixturesandresultswithbyes/"+SERIES+"/"+IDSEASON+"/"+TEAM+"/fixturesandresultswithbyes.json")
dataMatchId=[]
county = 0
for macth in dataMatchId:
county=county+1
print ("NB "+str(county)+"/"+str(len(dataMatchId)))
MATCHID=macth['match_id']
if MATCHID is not None:
if( not os.path.exists("scoreboard")):
os.mkdir("scoreboard")
if( not os.path.exists("scoreboard/"+MATCHID)):
os.mkdir("scoreboard/"+MATCHID)
if( not is_file_json("scoreboard/"+MATCHID+"/scoreboard.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/matches/"+MATCHID+"/scoreboard.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"scoreboard/"+MATCHID+"/scoreboard.json")
if( not os.path.exists("breakdown")):
os.mkdir("breakdown")
if( not os.path.exists("breakdown/"+MATCHID)):
os.mkdir("breakdown/"+MATCHID)
if( not is_file_json("breakdown/"+MATCHID+"/breakdown.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/matches/"+MATCHID+"/breakdown.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"breakdown/"+MATCHID+"/breakdown.json")
if( not os.path.exists("teamstats")):
os.mkdir("teamstats")
if( not os.path.exists("teamstats/"+MATCHID)):
os.mkdir("teamstats/"+MATCHID)
if( not is_file_json("teamstats/"+MATCHID+"/teamstats.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/matches/"+MATCHID+"/teamstats.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"teamstats/"+MATCHID+"/teamstats.json")
if( not os.path.exists("commentary")):
os.mkdir("commentary")
if( not os.path.exists("commentary/"+MATCHID)):
os.mkdir("commentary/"+MATCHID)
if( not is_file_json("commentary/"+MATCHID+"/commentary.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/matches/"+MATCHID+"/commentary.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"commentary/"+MATCHID+"/commentary.json")
if( not os.path.exists("players")):
os.mkdir("players")
if( not os.path.exists("players/"+MATCHID)):
os.mkdir("players/"+MATCHID)
if( not is_file_json("players/"+MATCHID+"/players.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/matches/"+MATCHID+"/players.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"players/"+MATCHID+"/players.json")
if (is_file_json("players/"+MATCHID+"/players.json")):
f = open("players/"+MATCHID+"/players.json", 'r')
dataPlayers=json.loads(f.read())
f.close()
playerss=dataPlayers['team_A']['players']
for player in playerss:
PLAYERID=str(player['id'])
if( not os.path.exists("players")):
os.mkdir("players")
if( not os.path.exists("players/"+MATCHID)):
os.mkdir("players/"+MATCHID)
if( not os.path.exists("players/"+MATCHID+"/"+PLAYERID)):
os.mkdir("players/"+MATCHID+"/"+PLAYERID)
if( not is_file_json("players/"+MATCHID+"/"+PLAYERID+"/stats.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/matches/"+MATCHID+"/players/"+PLAYERID+"/stats.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"players/"+MATCHID+"/"+PLAYERID+"/stats.json")
if( not os.path.exists("playersAll")):
os.mkdir("playersAll")
if( not os.path.exists("playersAll/"+IDSEASON)):
os.mkdir("playersAll/"+IDSEASON)
if( not os.path.exists("playersAll/"+IDSEASON+"/"+PLAYERID)):
os.mkdir("playersAll/"+IDSEASON+"/"+PLAYERID)
if( not is_file_json("playersAll/"+IDSEASON+"/"+PLAYERID+"/stats.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/series/"+SERIES+"/seasons/"+IDSEASON+"/players/"+PLAYERID+"/stats.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"playersAll/"+IDSEASON+"/"+PLAYERID+"/stats.json")
if( not os.path.exists("biography")):
os.mkdir("biography")
if( not os.path.exists("biography/"+PLAYERID)):
os.mkdir("biography/"+PLAYERID)
if( not is_file_json("biography/"+PLAYERID+"/biography.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/players/"+PLAYERID+"/biography.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"biography/"+PLAYERID+"/biography.json")
playerss=dataPlayers['team_B']['players']
for player in playerss:
PLAYERID=str(player['id'])
if( not os.path.exists("players")):
os.mkdir("players")
if( not os.path.exists("players/"+MATCHID)):
os.mkdir("players/"+MATCHID)
if( not os.path.exists("players/"+MATCHID+"/"+PLAYERID)):
os.mkdir("players/"+MATCHID+"/"+PLAYERID)
if( not is_file_json("players/"+MATCHID+"/"+PLAYERID+"/stats.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/matches/"+MATCHID+"/players/"+PLAYERID+"/stats.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"players/"+MATCHID+"/"+PLAYERID+"/stats.json")
if( not os.path.exists("playersAll")):
os.mkdir("playersAll")
if( not os.path.exists("playersAll/"+IDSEASON)):
os.mkdir("playersAll/"+IDSEASON)
if( not os.path.exists("playersAll/"+IDSEASON+"/"+PLAYERID)):
os.mkdir("playersAll/"+IDSEASON+"/"+PLAYERID)
if( not is_file_json("playersAll/"+IDSEASON+"/"+PLAYERID+"/stats.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/series/"+SERIES+"/seasons/"+IDSEASON+"/players/"+PLAYERID+"/stats.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"playersAll/"+IDSEASON+"/"+PLAYERID+"/stats.json")
if( not os.path.exists("biography")):
os.mkdir("biography")
if( not os.path.exists("biography/"+PLAYERID)):
os.mkdir("biography/"+PLAYERID)
if( not is_file_json("biography/"+PLAYERID+"/biography.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/players/"+PLAYERID+"/biography.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"biography/"+PLAYERID+"/biography.json")
| 49.425532 | 235 | 0.570311 | 1,504 | 13,938 | 5.256649 | 0.123005 | 0.035416 | 0.063749 | 0.057045 | 0.673539 | 0.617 | 0.492537 | 0.42082 | 0.388439 | 0.372502 | 0 | 0.02949 | 0.245803 | 13,938 | 281 | 236 | 49.601423 | 0.722603 | 0.016358 | 0 | 0.311966 | 0 | 0.068376 | 0.30415 | 0.079726 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012821 | false | 0 | 0.021368 | 0 | 0.051282 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2eb2116844f3cc0a0343be2f4899a9026e234186 | 9,739 | py | Python | cli.py | summunity/DjangoReact_CLI | 89f4b60028c04a800aa44f1476c63bab27e0b382 | [
"MIT"
] | null | null | null | cli.py | summunity/DjangoReact_CLI | 89f4b60028c04a800aa44f1476c63bab27e0b382 | [
"MIT"
] | null | null | null | cli.py | summunity/DjangoReact_CLI | 89f4b60028c04a800aa44f1476c63bab27e0b382 | [
"MIT"
] | null | null | null | """
Diamond Cronjob
=================
Event Detection based on data stored in Diamond
:Author: Nik Sumikawa
:Date: Nov 3, 2020
"""
import logging
log = logging.getLogger(__name__)
import pandas as pd
from src.apps.interface import *
from src.git.interface import *
import os
class CLI:
def __init__(self):
self.state = 0
self.active_threads = []
path = os.path.dirname(os.path.realpath(__file__))
self.config = pd.read_json('%s/config.json' % path)
self.run()
def run( self ):
while True:
try:
if self.state == -1 : break
if self.state == 0 : self.initial_state()
if self.state == 1 :
self.state, app = launch_app(self.state, self.config)
if app != None: self.active_threads.append( app )
if self.state == 2 : self.state = list_apps(self.state, self.active_threads)
if self.state == 3 :
self.state, self.active_threads = kill_app(self.state, self.active_threads)
if self.state == 4 :
self.state, app = git('pull', self.state, self.config)
if self.state == 5 :
self.state, app = git('push', self.state, self.config)
except KeyboardInterrupt:
break
def initial_state( self ):
command_str = """
What do you want to do:
1: Launch app
2: Active apps
3: disable app
4: Update project (git pull)
5: Commit project (git push)
q: quit
"""
command_str = self.format_cmd_prompt(command_str)
user_input = input(command_str)
try : user_input = int(user_input)
except:
if user_input == 'q' or user_input == 'quit': self.state = -1
else: print( 'Invalid Input : %s' % user_input)
return
if user_input >5:
print( 'Invalid Input : %s' % user_input)
return
self.state = user_input
# def launch_state( self ):
# """ launch app state """
#
# command_str = """
# Which application do you want to launch:
# """
#
# apps = self.config[self.config['launch'] == True]
# for i in range(0, len(apps)):
# app_name = apps.loc[i]['title']
# command_str += '%s: %s\n' % (i+1, app_name)
#
# command_str += 'b: back\n'
#
# command_str = self.format_cmd_prompt(command_str)
#
# user_input = input(command_str)
#
# try : user_input = int(user_input) - 1
# except:
# if user_input == 'b' or user_input == 'back': self.state = 0
# else: print( 'Invalid Input : %s' % user_input)
# return
#
# # catch error when supplied value is greater than # of apps
# if user_input > len(apps) :
# print( 'Invalid Input : %s' % user_input)
# return
#
# self.launch_app( apps.loc[user_input] )
#
# # set the state to return to the main menu
# self.state = 0
#
#
# def active_state( self ):
# """ prints a list of all active App threads """
#
# command_str = """
# Active Apps:
# """
#
# apps = self.config[self.config['launch'] == True]
# for i in range(0, len(self.active_threads)):
# app_name = self.active_threads[i].getName()
# command_str += '%s: %s\n' % (i+1, app_name)
#
# # command_str += 'b: back\n'
#
# command_str = self.format_cmd_prompt(command_str)
#
# print( command_str )
#
#
# # set the state to return to the main menu
# self.state = 0
#
# def disable_state( self ):
# """ prints a list of all active App threads """
#
# command_str = """
# Active Apps:
# """
#
# apps = self.config[self.config['launch'] == True]
# for i in range(0, len(self.active_threads)):
# app_name = self.active_threads[i].getName()
# command_str += '%s: %s\n' % (i+1, app_name)
#
# command_str += 'b: back\n'
#
# command_str = self.format_cmd_prompt(command_str)
#
# user_input = input(command_str)
#
# try : user_input = int(user_input) - 1
# except:
# if user_input == 'b' or user_input == 'back': self.state = 0
# else: print( 'Invalid Input : %s' % user_input)
# return
#
# # catch error when supplied value is greater than # of apps
# if user_input > len(apps) :
# print( 'Invalid Input : %s' % user_input)
# return
#
# thread = self.active_threads.pop(user_input)
# thread.raise_exception()
#
# # set the state to return to the main menu
# self.state = 0
# def launch_app( self, app ):
# """ launch application """
#
# from cmd_thread import CmdThread
#
# if app['type'] == 'django':
# cmd = 'python manage.py runserver 0.0.0.0:%s' % app['port']
# thread = CmdThread(app, [cmd])
# thread.start()
# thread.setName('Launch-%s' % app['title'])
# self.active_threads.append( thread )
#
# if app['type'] == 'react':
# cmd = 'npm run export PORT=%s react-scripts start' % app['port']
# # subprocess.call(cmd)
# # subprocess.check_output(cmd)
# print( cmd )
def git_pull( self, app ):
""" launch application """
import subprocess
import os
from cmd_thread import CmdThread
cmd = 'git pull'
thread = CmdThread(app, [cmd])
thread.start()
thread.setName('Launch-%s' % app['title'])
self.active_threads.append( thread )
def git_push( self, app ):
""" launch application """
import subprocess
import os
from cmd_thread import CmdThread
command_str = "Commit Message: \n"
user_input = input(command_str)
cmd = [
'git add -A',
'git commit -m %s' % user_input,
'git push'
]
thread = CmdThread(app, cmd)
thread.start()
thread.setName('Launch-%s' % app['title'])
self.active_threads.append( thread )
def format_cmd_prompt( self, cmd ):
""" formats the command prompt """
formatted = ''
for line in cmd.split('\n'):
formatted += line.lstrip() + '\n'
return formatted
if __name__ == "__main__":
# from django_config.logger import initialize_logging
# initialize_logging()
CLI()
#
# from cmd_thread import CmdThread
# thread = CmdThread(
# {'path': 'C:/Users/nxa18331/Desktop/websites/bitbucket/restapi'},
# ['python manage.py runserver 0.0.0.0:8000']
# )
#
# thread.start()
# #
# #
# while True:
#
# try:
# var = input(""" What do you want to do?: """
# )
#
# print( var )
# if var == 'q':
# print( 'do we raise exception??')
# thread.raise_exception()
#
# except KeyboardInterrupt:
# break
# Cronjob(timeframe=2, filter=False)
#
# Cronjob(mask='N06G', timeframe=7, filter=False)
# Cronjob(
# backfill = True,
# start_date = '2020-11-01',
# stepsize=2 )
# debug()
# import argparse
#
# parser = argparse.ArgumentParser(description='Event Detection Cronjob - Diamond')
# parser.add_argument(
# '--backfill',
# required=False,
# help='When True, the data is backfilled'
# )
#
# parser.add_argument(
# '--startdate',
# required=False,
# help='start of the backfill window in %Y-%m-%d format'
# )
#
# parser.add_argument(
# '--enddate',
# required=False,
# help='end of the backfill window in %Y-%m-%d format'
# )
#
# parser.add_argument(
# '--threads',
# required=False,
# help='number of threads to execute in parallel'
# )
#
#
# args = parser.parse_args()
#
# if args.backfill == 'True' :
# log.debug('Backfill')
#
# # extract the startdate from command line. default to 30 day window
# startdate = args.startdate
# if startdate == None:
# from datetime import datetime, timedelta
# startdate = (datetime.now() - timedelta(days=30)).strftime('%Y-%m-%d')
#
# # extract the startdate from command line. default to 30 day window
# enddate = args.enddate
# if enddate == None:
# from datetime import datetime, timedelta
# enddate = (datetime.now()).strftime('%Y-%m-%d')
#
# kwargs = {
# 'backfill': True,
# 'start_date': startdate,
# 'end_date': enddate,
# 'stepsize': 1,
# }
#
# threads = args.threads
# if threads != None: kwargs['threads'] = threads
#
# Cronjob(**kwargs)
#
#
# else:
# log.debug('Standard cronjob')
#
# kwargs = {
# 'filter': False,
# 'timeframe': 1,
# }
#
# threads = args.threads
# if threads != None: kwargs['threads'] = threads
#
# Cronjob(**kwargs)
# log.debug('finished....')
| 26.60929 | 95 | 0.505904 | 1,058 | 9,739 | 4.531191 | 0.192817 | 0.054443 | 0.046099 | 0.022528 | 0.520442 | 0.48957 | 0.466208 | 0.466208 | 0.423237 | 0.423237 | 0 | 0.011472 | 0.364514 | 9,739 | 365 | 96 | 26.682192 | 0.763128 | 0.55478 | 0 | 0.207317 | 0 | 0 | 0.095471 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073171 | false | 0 | 0.134146 | 0 | 0.256098 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2eb55e581f300c9a4e077505d8a2d1a9bac31f8e | 1,652 | py | Python | homework.py | dieundead/studies | 7da3a15b576ec1e2953c7fdf5762d88a42dea1d3 | [
"MIT"
] | null | null | null | homework.py | dieundead/studies | 7da3a15b576ec1e2953c7fdf5762d88a42dea1d3 | [
"MIT"
] | null | null | null | homework.py | dieundead/studies | 7da3a15b576ec1e2953c7fdf5762d88a42dea1d3 | [
"MIT"
] | null | null | null | testlist_bubblesort = [40,723, 781, 610, 440, 819, 728,
280, 998, 744, 40, 303, 708, 279, 910]
testlist_decision = [560, 769, 349, 97, 881, 108, 827,
131, 301, 952, 255, 787, 625, 549, 5]
testlist_insertion = [984, 738, 859, 281, 548, 399, 894,
53,145,260,787,140, 472, 566, 752]
def bubbles_sort(list):
for i in range(0, len(list)-1, 1):
for j in range(0, len(list)-1, 1):
if list[j]>list[j+1]:
list[j], list[j+1] = list[j+1], list[j]
return list
def decisions_sort(list):
for i in range(0, len(list)-1,1):
minimal_number = i
for j in range(i,len(list)-1,1):
if list[j] < list[minimal_number]:
minimal_number = j
list[i], list[minimal_number] = list[minimal_number], list[i]
return list
def insertions_sort(list):
for i in range(0, len(list)-1,1):
if list[i]>list[i+1]:
catchvariable = list.pop(i+1)
for j in range (0, len(list)-2,1):
if list[j] > catchvariable:
list.insert(j,catchvariable)
break
return list
print('Первый массив до сортировки: \n', testlist_bubblesort)
print('Первый массив после сортировки: \n', bubbles_sort(testlist_bubblesort))
print('Второй массив до сортировки: \n',testlist_decision)
print('Второй массив после сортировки: \n', decisions_sort(testlist_decision))
print('Третий массив до сортировки: \n', testlist_insertion)
print('Третий массив после сортировки: \n', insertions_sort(testlist_insertion)) | 38.418605 | 80 | 0.580508 | 235 | 1,652 | 3.995745 | 0.33617 | 0.042599 | 0.042599 | 0.058573 | 0.28754 | 0.194888 | 0.194888 | 0.175719 | 0.123536 | 0.092652 | 0 | 0.12862 | 0.289346 | 1,652 | 43 | 80 | 38.418605 | 0.67121 | 0 | 0 | 0.171429 | 0 | 0 | 0.121043 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0 | 0 | 0.171429 | 0.171429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2eb564479c3dbf656497d8b1066141ca9f3fba07 | 4,728 | py | Python | bookstore/src/book/dao/BookDao.py | mehulk99/Online-Book-Store | 27c64b31e6fd33ae3db0bde351f889518de41eb1 | [
"Apache-2.0"
] | null | null | null | bookstore/src/book/dao/BookDao.py | mehulk99/Online-Book-Store | 27c64b31e6fd33ae3db0bde351f889518de41eb1 | [
"Apache-2.0"
] | null | null | null | bookstore/src/book/dao/BookDao.py | mehulk99/Online-Book-Store | 27c64b31e6fd33ae3db0bde351f889518de41eb1 | [
"Apache-2.0"
] | null | null | null | '''
Created on 14-May-2019
@author: mehulk99
'''
from flask import jsonify
from bookstore.src.config import BookConfig
from bookstore.src.dao.DataAccessor import DataAccessor
from ...category.CategoryService import CategoryService
from ...models.core.business.books.BookRepository import BookRepository
from ...searchengine import SearchEngine
searchEngine = SearchEngine()
class BookDao(DataAccessor):
'''
classdocs
'''
__collection__ = "books"
def __init__(self):
'''
Constructor
'''
super(BookDao, self).__init__()
#self.collection = self.database[self.__collection__]
self.categoryService = CategoryService()
self.bookRepo = None
self.sortMapper = {
0: {'reverse': False, 'key': 'isbn'},
1: {'reverse': False, 'key': 'price'},
2: {'reverse': True, 'key': 'yop'}
}
def get_popular_books(self):
book_list = list()
try:
query = ("select books.isbn as isbn, title, authors, publisher, DATE_FORMAT(yop,'%Y-%m-%d') as yop, available_copies, price, format, keywords, subject,image_loc, category_id "
"from books"
" left join rating"
" on rating.isbn = books.isbn"
" where rating.score <={} and rating.score> {};").format(BookConfig.popular_max_rt, BookConfig.popular_min_rt)
book_list = super(BookDao, self).read(query=query)
return book_list
except Exception as e:
print(e, "get_popular_books")
def get_all_books(self):
try:
query = (
"select isbn, title, authors, publisher, DATE_FORMAT(yop,'%Y-%m-%d') as yop, "
"available_copies, price, format, keywords, subject,image_loc "
"from books")
book_list = super(BookDao, self).read(query=query)
self.bookRepo = BookRepository(book_list)
except Exception as e:
print(e, "get_all_books")
def get_all_books_by_cat(self):
try:
query = (
"select books.title ,books.category_id, category.name from books left join category on books.category_id = category.cat_id;")
book_list = super(BookDao, self).read(query=query)
return book_list
except Exception as e:
print(e, "get_all_books")
def get_books_by_category(self, catData):
categories = self.categoryService.getCategoriesById(
catData[1], catData[0])
print(categories)
try:
if len(categories) == 0:
return []
categoryList = str(tuple(categories))
if len(categories) == 1:
categoryList = '(' + str(categories[0]) + ')'
query = ("select isbn, title, authors, publisher, DATE_FORMAT(yop,'%Y-%m-%d') as yop, available_copies, price, format, keywords, subject,image_loc,cc.name as childcategory "
"from books"
" inner join category cc "
"on cc.cat_id = books.category_id "
"where cc.cat_id in " + categoryList)
book_list = super(BookDao, self).read(query=query)
return book_list
except Exception as e:
print(e, "get_all_books")
def searchBooks(self, query, filterList=[]):
return searchEngine.searchBooks(query, filterList=filterList)
def getBookInfo(self, isbn):
query = ("select isbn, title, authors, publisher, DATE_FORMAT(yop,'%Y-%m-%d') as yop, "
"available_copies, price, format, keywords, subject,image_loc,description from books as b"
" left join category as c on b.Category_id = c.cat_id where b.ISBN='{}'").format(isbn)
queryR = ("select * from rating where ISBN='{}'").format(isbn)
result = super(BookDao, self).read(query=query)
ratingResult = super(BookDao, self).read(query=queryR)
return {'book': result[0], 'ratings': ratingResult}
def filterBooks(self, sortParam=0, filterParam=None, searchText=None):
bookList = []
if filterParam is not None:
bookList = self.get_books_by_category(
filterParam)
if searchText is not None and searchText is not '':
bookList = self.searchBooks(
searchText, [book.get('isbn') for book in bookList])
if sortParam is not None:
bookList.sort(key=lambda x: x.get(
'price'), reverse=self.sortMapper.get(sortParam)['reverse'])
return bookList
def sortList(self, booklist=[], key='isbn'):
booklist.sort(key='isbn')
| 38.129032 | 187 | 0.592851 | 528 | 4,728 | 5.172348 | 0.234848 | 0.026364 | 0.041011 | 0.04394 | 0.309044 | 0.290004 | 0.279019 | 0.279019 | 0.265104 | 0.265104 | 0 | 0.00538 | 0.292301 | 4,728 | 123 | 188 | 38.439024 | 0.810819 | 0.024535 | 0 | 0.241758 | 0 | 0.054945 | 0.26106 | 0.03548 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098901 | false | 0 | 0.065934 | 0.010989 | 0.263736 | 0.054945 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2eb67aa5bb0af8a097d82e0669d563067c7b38b0 | 1,576 | py | Python | clase_9_LimpiezaDeDatos/test/test_1_limpieza.py | alanpirotta/data_science_dh | 4cb183aca70505d50d45a3f898463a47ee5eb053 | [
"MIT"
] | null | null | null | clase_9_LimpiezaDeDatos/test/test_1_limpieza.py | alanpirotta/data_science_dh | 4cb183aca70505d50d45a3f898463a47ee5eb053 | [
"MIT"
] | null | null | null | clase_9_LimpiezaDeDatos/test/test_1_limpieza.py | alanpirotta/data_science_dh | 4cb183aca70505d50d45a3f898463a47ee5eb053 | [
"MIT"
] | null | null | null | def dummy():
return 0;
def test_not_null_count(data, result, axis):
is_notnull_result = data.notnull()
data_not_null = is_notnull_result.sum(axis = axis)
test_ok = all(data_not_null == result)
if test_ok:
msg = "Muy bien!!"
else:
if type(result) == "pandas.core.series.Series":
msg = "Error. El tipo de datos del resultado es correcto pero los valores de los elementos son incorrectos"
else:
msg = "Error. Se espera un resultado de tipo pandas.core.series.Series"
return msg
def test_not_null_column_count(data, result):
axis = 0
msg = test_not_null_count(data, result, axis)
return msg
def test_not_null_row_count(data, result):
axis = 1
msg = test_not_null_count(data, result, axis)
return msg
def test_drop_columns_umbral(data, umbral, resultado):
cant_filas = data.shape[0]
cant_filas_min = cant_filas * (1 - umbral)
data_clean_col_with_many_null = data.dropna(axis = 1, thresh=cant_filas_min)
if resultado is None:
msg = "Error. Se espera un resultado de tipo pandas.core.frame.DataFrame"
else:
test_ok = resultado.shape == data_clean_col_with_many_null.shape
if test_ok:
msg = "Muy bien!!"
elif type(data) == type(resultado):
msg = "Error. El tipo de datos del resultado es correcto pero el tam del dataframe resultado no lo es"
else:
msg = "Error. Se espera un resultado de tipo pandas.core.frame.DataFrame"
return msg
| 35.818182 | 119 | 0.647843 | 227 | 1,576 | 4.286344 | 0.290749 | 0.05036 | 0.056526 | 0.097636 | 0.514902 | 0.514902 | 0.397739 | 0.366906 | 0.366906 | 0.366906 | 0 | 0.005204 | 0.268401 | 1,576 | 44 | 120 | 35.818182 | 0.838682 | 0 | 0 | 0.432432 | 0 | 0 | 0.273304 | 0.065948 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135135 | false | 0 | 0 | 0.027027 | 0.27027 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2eb6a5b3ad9075ee31087bd668337d5de2ad9d3f | 3,583 | py | Python | stickerfinder/telegram/keyboard/settings.py | annihilatorrrr/sticker-finder | 873468f8de26cc32d1de9b688140569b8086ab5b | [
"MIT"
] | 82 | 2018-11-13T05:39:44.000Z | 2022-01-18T17:08:44.000Z | stickerfinder/telegram/keyboard/settings.py | annihilatorrrr/sticker-finder | 873468f8de26cc32d1de9b688140569b8086ab5b | [
"MIT"
] | 25 | 2018-12-02T18:45:52.000Z | 2022-03-21T22:54:19.000Z | stickerfinder/telegram/keyboard/settings.py | annihilatorrrr/sticker-finder | 873468f8de26cc32d1de9b688140569b8086ab5b | [
"MIT"
] | 23 | 2019-01-22T20:04:50.000Z | 2022-02-01T14:57:28.000Z | """Maintenance related keyboards."""
from telegram import (
InlineKeyboardMarkup,
InlineKeyboardButton,
)
from stickerfinder.helper.callback import build_user_data, build_data
def get_settings_keyboard(user):
"""Get the inline keyboard for settings."""
international_payload = build_user_data("user_toggle_international", user)
deluxe_payload = build_user_data("user_toggle_deluxe", user)
nsfw_payload = build_user_data("user_toggle_nsfw", user)
furry_payload = build_user_data("user_toggle_furry", user)
notification_payload = build_data("user_toggle_notifications")
delete_history_payload = build_data("user_delete_history_confirmation")
main_payload = build_data("main_menu")
if user.notifications:
notification_text = "📩 Disable notifications"
else:
notification_text = "📩 Enable notifications"
if user.international:
international_text = "🌐 English-only sticker"
else:
international_text = "🌐 Include non-english stickers"
if user.deluxe:
deluxe_text = "🌟 Include non-deluxe sticker"
else:
deluxe_text = "🌟 Only show deluxe sticker"
if user.nsfw:
nsfw_text = "❌ Hide nsfw"
else:
nsfw_text = "💦 Include nsfw by default"
if user.furry:
furry_text = "Hide furry"
else:
furry_text = "Include furry by default"
buttons = [
[
InlineKeyboardButton(
text=notification_text, callback_data=notification_payload
)
],
[
InlineKeyboardButton(
text=international_text, callback_data=international_payload
)
],
[InlineKeyboardButton(text=deluxe_text, callback_data=deluxe_payload)],
[InlineKeyboardButton(text=nsfw_text, callback_data=nsfw_payload)],
[InlineKeyboardButton(text=furry_text, callback_data=furry_payload)],
[
InlineKeyboardButton(
text="⚠️ Delete history ⚠️", callback_data=delete_history_payload
)
],
[InlineKeyboardButton(text="Back", callback_data=main_payload)],
]
return InlineKeyboardMarkup(buttons)
def get_admin_settings_keyboard(user):
"""Get the inline keyboard for admin settings."""
main_payload = build_data("main_menu")
buttons = [
[InlineKeyboardButton(text="Stats", callback_data=build_data("admin_stats"))],
[
InlineKeyboardButton(
text="Cleanup", callback_data=build_data("admin_cleanup")
)
],
[InlineKeyboardButton(text="Plot", callback_data=build_data("admin_plot"))],
[
InlineKeyboardButton(
text="Refresh all sticker", callback_data=build_data("admin_refresh")
)
],
[
InlineKeyboardButton(
text="Refresh all sticker + OCR",
callback_data=build_data("admin_refresh_ocr"),
)
],
[InlineKeyboardButton(text="Back", callback_data=main_payload)],
]
return InlineKeyboardMarkup(buttons)
def get_user_delete_history_confirmation_keyboard():
"""Ask the user if they really want to delete the history."""
buttons = [
[
InlineKeyboardButton(
text="⚠️ Permanently delete history ⚠️",
callback_data=build_data("user_delete_history"),
)
],
[InlineKeyboardButton(text="back", callback_data=build_data("settings_open"))],
]
return InlineKeyboardMarkup(buttons)
| 32.279279 | 87 | 0.64192 | 354 | 3,583 | 6.248588 | 0.20339 | 0.162749 | 0.047016 | 0.066456 | 0.367993 | 0.229656 | 0.120253 | 0.120253 | 0.081374 | 0.081374 | 0 | 0 | 0.263745 | 3,583 | 110 | 88 | 32.572727 | 0.832449 | 0.046888 | 0 | 0.322222 | 0 | 0 | 0.174425 | 0.02416 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.022222 | 0 | 0.088889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2eba00efde07993dc1c59983d15c23b1545c645f | 1,931 | py | Python | code/zeroinsertion_aging/plot-fewinsertion.py | andim/paper-tcellimprint | e89605e51014fa3f347f96bab3d3d84c2b013a2f | [
"MIT"
] | 2 | 2020-07-28T10:47:40.000Z | 2021-11-14T20:07:21.000Z | code/zeroinsertion_aging/plot-fewinsertion.py | andim/paper-tcellimprint | e89605e51014fa3f347f96bab3d3d84c2b013a2f | [
"MIT"
] | null | null | null | code/zeroinsertion_aging/plot-fewinsertion.py | andim/paper-tcellimprint | e89605e51014fa3f347f96bab3d3d84c2b013a2f | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import palettable
import pandas as pd
import sys
sys.path.append('..')
from lib import *
plt.style.use('../custom.mplstyle')
names = ['oneInsertion', 'twoInsertion']
agebinsize = 10.0
agebins = np.arange(0.0, 90.0, agebinsize)
bin_ts = agebins[:-1]+agebinsize/2
bins = np.array([1, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000])
binmids = bins[1:]
print(binmids)
df_enrichments = pd.read_csv(data_directory +'emerson-enrichments.csv', index_col=0)
for name in names:
df_enrichments[name+'500'] = (2*df_enrichments[name+'200']+3*df_enrichments[name+'500'])/5
def aggregate(df, agebins, name):
grouped = df.groupby(pd.cut(df['Age'], bins=agebins))
meanfreq = grouped.agg('mean')
meanfreq = np.array([list(meanfreq[name+'%s'%rank]) for rank in bins[1:]])
semfreq = grouped.agg('sem')
semfreq = np.array([list(semfreq[name+'%s'%rank]) for rank in bins[1:]])
return meanfreq, semfreq
fig, axes = plt.subplots(figsize=(7.0, 2.8), ncols=2)
colors = np.asarray(palettable.matplotlib.Viridis_9.mpl_colors)
marker = ["o", "v", "^", "<", ">", "1", "2", "3", "4"]
nsizes = 9
for j, name in enumerate(names):
ax = axes[j]
meanfreq, semfreq = aggregate(df_enrichments, agebins, name)
for i in range(1, nsizes):
l, = ax.plot(bin_ts, meanfreq[i, :], '-o', c=colors[i], label='%g'%bins[i+1])
ax.fill_between(bin_ts,
meanfreq[i, :]-semfreq[i, :],
meanfreq[i, :]+semfreq[i, :], facecolor=colors[i], alpha=.5, edgecolor=None)
ax.set_xlabel('Age in years (binned)')
ax.set_ylim(0.0, 0.068)
axes[0].set_ylabel('One insertion clones')
axes[1].set_ylabel('Two insertions clones')
axes[0].legend(title='Clone size rank (binned)', ncol=2, loc='upper right')
fig.tight_layout()
label_axes(axes)
plt.show()
fig.savefig(figure_directory+'figure_fewinsertions.svg')
| 32.183333 | 95 | 0.663387 | 294 | 1,931 | 4.282313 | 0.455782 | 0.051628 | 0.040508 | 0.031771 | 0.036537 | 0.036537 | 0.036537 | 0.036537 | 0 | 0 | 0 | 0.055046 | 0.153288 | 1,931 | 59 | 96 | 32.728814 | 0.714985 | 0 | 0 | 0 | 0 | 0 | 0.116002 | 0.02434 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021277 | false | 0 | 0.148936 | 0 | 0.191489 | 0.021277 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2eba870cf64953dbca714f2107848bfa15ca97d2 | 2,360 | py | Python | tests/test_transforms.py | sri-sensors/atmosense-abcgan | 6a0d88ce49b7de03e557eca362ab7d3399d44cfb | [
"MIT"
] | null | null | null | tests/test_transforms.py | sri-sensors/atmosense-abcgan | 6a0d88ce49b7de03e557eca362ab7d3399d44cfb | [
"MIT"
] | null | null | null | tests/test_transforms.py | sri-sensors/atmosense-abcgan | 6a0d88ce49b7de03e557eca362ab7d3399d44cfb | [
"MIT"
] | null | null | null | import unittest
import abcgan.constants as const
from abcgan import transforms as trans
import numpy as np
def fake_drivers(n):
return np.exp(np.random.normal(size=(n, const.n_driver)))
def fake_bvs(n):
return np.exp(np.random.normal(size=(n, const.max_alt, const.n_bv)))
class TestTransforms(unittest.TestCase):
def test_scale_driver(self):
drivers = fake_drivers(10)
driver_feat = trans.scale_driver(drivers)
self.assertEqual(driver_feat.shape,
(drivers.shape[0], const.n_driver_feat))
def test_scale_bv(self):
batch_size = 10
bvs = fake_bvs(batch_size)
bv_feat, valid_mask = trans.scale_bv(bvs)
self.assertEqual(bv_feat.shape,
(bvs.shape[0], bvs.shape[1],
const.n_bv_feat))
self.assertEqual(valid_mask.shape, (batch_size,))
def test_get_driver(self):
drivers = fake_drivers(10)
driver_feat = trans.scale_driver(drivers)
new_drivers = trans.get_driver(driver_feat)
self.assertEqual(drivers.shape, new_drivers.shape)
self.assertTrue(np.allclose(drivers, new_drivers))
def test_get_driver_subset(self):
driver_names = const.driver_names[4:8]
drivers = fake_drivers(10)[:, :len(driver_names)]
driver_feat = trans.scale_driver(drivers, driver_names=driver_names)
new_drivers = trans.get_driver(driver_feat, driver_names=driver_names)
self.assertEqual(drivers.shape, new_drivers.shape)
self.assertTrue(np.allclose(drivers, new_drivers))
def test_get_bv(self):
bvs = fake_bvs(10)
bv_feat, valid_mask = trans.scale_bv(bvs)
new_bvs = trans.get_bv(bv_feat)
self.assertEqual(new_bvs.shape[1], const.max_alt)
self.assertTrue(np.allclose(bvs, new_bvs))
def test_padding(self):
bvs = fake_bvs(10)
bvs = bvs[:, :const.max_alt - 5, :]
bv_feat, valid_mask = trans.scale_bv(bvs)
self.assertEqual(bv_feat.shape[1], const.max_alt)
def test_shrink(self):
bvs = fake_bvs(10)
bvs = np.concatenate((bvs, bvs), axis=1)
bv_feat, valid_mask = trans.scale_bv(bvs)
self.assertEqual(bv_feat.shape[1], const.max_alt)
if __name__ == "__main__":
unittest.main(argv=['first-arg-is-ignored'], exit=False)
| 34.202899 | 78 | 0.658051 | 330 | 2,360 | 4.436364 | 0.193939 | 0.036885 | 0.037568 | 0.040984 | 0.531421 | 0.50888 | 0.460383 | 0.413934 | 0.393443 | 0.393443 | 0 | 0.013165 | 0.227542 | 2,360 | 68 | 79 | 34.705882 | 0.789907 | 0 | 0 | 0.320755 | 0 | 0 | 0.011864 | 0 | 0 | 0 | 0 | 0 | 0.207547 | 1 | 0.169811 | false | 0 | 0.075472 | 0.037736 | 0.301887 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ebce4a5e3f3f33fb24cf40e4d6d09afffd1fa71 | 4,937 | py | Python | bone-age/src/bsmu/bone_age/plugins/journal_exporter.py | IvanKosik/vision | 74603d4b727e6d993b562eb4656952e29173323e | [
"BSD-3-Clause"
] | 2 | 2019-10-15T11:34:17.000Z | 2021-02-03T10:46:07.000Z | bone-age/src/bsmu/bone_age/plugins/journal_exporter.py | IvanKosik/vision | 74603d4b727e6d993b562eb4656952e29173323e | [
"BSD-3-Clause"
] | null | null | null | bone-age/src/bsmu/bone_age/plugins/journal_exporter.py | IvanKosik/vision | 74603d4b727e6d993b562eb4656952e29173323e | [
"BSD-3-Clause"
] | null | null | null | from __future__ import annotations
import csv
from typing import TYPE_CHECKING
from PySide2.QtCore import QObject
from PySide2.QtWidgets import QFileDialog, QMessageBox
from bsmu.bone_age.plugins.main_window import TableMenu
from bsmu.vision.core.plugins.base import Plugin
if TYPE_CHECKING:
from bsmu.bone_age.plugins.main_window import BoneAgeMainWindowPlugin, BoneAgeMainWindow
from bsmu.bone_age.plugins.table_visualizer import BoneAgeTableVisualizerPlugin, BoneAgeTableVisualizer
class PatientBoneAgeJournalExporterPlugin(Plugin):
_DEFAULT_DEPENDENCY_PLUGIN_FULL_NAME_BY_KEY = {
'main_window_plugin': 'bsmu.bone_age.plugins.main_window.BoneAgeMainWindowPlugin',
'bone_age_table_visualizer_plugin':
'bsmu.bone_age.plugins.table_visualizer.BoneAgeTableVisualizerPlugin',
}
def __init__(
self,
main_window_plugin: BoneAgeMainWindowPlugin,
bone_age_table_visualizer_plugin: BoneAgeTableVisualizerPlugin,
):
super().__init__()
self._main_window_plugin = main_window_plugin
self._main_window: BoneAgeMainWindow | None = None
self._bone_age_table_visualizer_plugin = bone_age_table_visualizer_plugin
self._table_visualizer: BoneAgeTableVisualizer | None = None
self._journal_exporter: PatientBoneAgeJournalExporter | None = None
def _enable(self):
self._main_window = self._main_window_plugin.main_window
self._table_visualizer = self._bone_age_table_visualizer_plugin.table_visualizer
self._journal_exporter = PatientBoneAgeJournalExporter(self._table_visualizer, self._main_window)
self._main_window.add_menu_action(TableMenu, 'Export to Excel...', self._journal_exporter.export_to_csv)
def _disable(self):
raise NotImplementedError
class PatientBoneAgeJournalExporter(QObject):
IMAGE_NAME_FIELD_NAME = 'Name'
GENDER_FIELD_NAME = 'Gender'
BIRTHDATE_FIELD_NAME = 'Date of Birth'
IMAGE_DATE_FIELD_NAME = 'Image Date'
AGE_IN_IMAGE_FIELD_NAME = 'Age in Image'
BONE_AGE_FIELD_NAME = 'Bone Age'
AGE_DELIMITER = '//'
HEIGHT_FIELD_NAME = 'Height'
MAX_HEIGHT_FIELD_NAME = 'Max Height'
DATE_STR_FORMAT = 'dd.MM.yyyy'
def __init__(self, table_visualizer: BoneAgeTableVisualizer, main_window: BoneAgeMainWindow):
super().__init__()
self._table_visualizer = table_visualizer
self._main_window = main_window
def export_to_csv(self):
file_name, selected_filter = QFileDialog.getSaveFileName(
parent=self._main_window, caption='Export to CSV', filter='CSV (*.csv)')
if not file_name:
return
try:
csv_file = open(file_name, 'w', encoding='utf-8-sig', newline='')
except PermissionError:
QMessageBox.warning(self._main_window, 'File Open Error',
'Cannot open the file due to a permission error.\n'
'The file may be opened in another program.')
else:
with csv_file:
age_in_image_with_format_field_name = \
f'{self.AGE_IN_IMAGE_FIELD_NAME} ({self._table_visualizer.age_format.ABBR})'
bone_age_with_format_field_name = \
f'{self.BONE_AGE_FIELD_NAME} ({self._table_visualizer.age_format.ABBR})'
field_names = [self.IMAGE_NAME_FIELD_NAME, self.GENDER_FIELD_NAME, self.BIRTHDATE_FIELD_NAME,
self.IMAGE_DATE_FIELD_NAME, age_in_image_with_format_field_name,
bone_age_with_format_field_name, self.HEIGHT_FIELD_NAME, self.MAX_HEIGHT_FIELD_NAME]
writer = csv.DictWriter(csv_file, delimiter=';', fieldnames=field_names)
writer.writeheader()
for record in self._table_visualizer.journal.records:
writer.writerow({self.IMAGE_NAME_FIELD_NAME: record.image.path.stem,
self.GENDER_FIELD_NAME: 'Man' if record.male else 'Woman',
self.BIRTHDATE_FIELD_NAME: record.birthdate.toString(self.DATE_STR_FORMAT),
self.IMAGE_DATE_FIELD_NAME: record.image_date.toString(self.DATE_STR_FORMAT),
age_in_image_with_format_field_name: self._table_visualizer.age_format.format(
record.age_in_image, delimiter=self.AGE_DELIMITER),
bone_age_with_format_field_name: self._table_visualizer.age_format.format(
record.bone_age, delimiter=self.AGE_DELIMITER),
self.HEIGHT_FIELD_NAME: record.height_str,
self.MAX_HEIGHT_FIELD_NAME: record.max_height_str,
})
| 46.140187 | 115 | 0.667004 | 547 | 4,937 | 5.570384 | 0.226691 | 0.082704 | 0.045947 | 0.037414 | 0.364293 | 0.255661 | 0.125041 | 0.090581 | 0.038727 | 0.038727 | 0 | 0.000824 | 0.262913 | 4,937 | 106 | 116 | 46.575472 | 0.836494 | 0 | 0 | 0.024096 | 0 | 0 | 0.114239 | 0.059955 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060241 | false | 0 | 0.108434 | 0 | 0.337349 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ebd1a88d72451941988cd64b63eaae21ba2e050 | 21,234 | py | Python | pyximc.py | EmmaKindstrom/Indentation_Discrimination | bdb68c82a36aae7f00acbf071fdd8e68bc309874 | [
"MIT"
] | null | null | null | pyximc.py | EmmaKindstrom/Indentation_Discrimination | bdb68c82a36aae7f00acbf071fdd8e68bc309874 | [
"MIT"
] | null | null | null | pyximc.py | EmmaKindstrom/Indentation_Discrimination | bdb68c82a36aae7f00acbf071fdd8e68bc309874 | [
"MIT"
] | 1 | 2022-02-14T10:52:31.000Z | 2022-02-14T10:52:31.000Z | from ctypes import *
import os
import platform
import sys
# Load library
# use cdecl on unix and stdcall on windows
def ximc_shared_lib():
if platform.system() == "Linux":
return CDLL("libximc.so")
elif platform.system() == "FreeBSD":
return CDLL("libximc.so")
elif platform.system() == "Darwin":
return CDLL("libximc.framework/libximc")
elif platform.system() == "Windows":
if sys.version_info[0] == 3 and sys.version_info[0] >= 8:
WinDLL("libximc.dll", winmode=RTLD_GLOBAL)
else:
return WinDLL("libximc.dll")
else:
return None
lib = ximc_shared_lib()
# Common declarations
class Result:
Ok = 0
Error = -1
NotImplemented = -2
ValueError = -3
NoDevice = -4
class calibration_t(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('A', c_double),
('MicrostepMode', c_uint)
]
class device_enumeration_t(LittleEndianStructure):
pass
class device_network_information_t(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('ipv4', c_uint32),
('nodename', c_char * 16),
('axis_state', c_uint),
('locker_username', c_char * 16),
('locker_nodename', c_char * 16),
('locked_time', c_ulonglong),
]
# Clarify function types
lib.enumerate_devices.restype = POINTER(device_enumeration_t)
lib.get_device_name.restype = c_char_p
# ---------------------------
# BEGIN OF GENERATED code
# ---------------------------
class EnumerateFlags:
ENUMERATE_PROBE = 0x01
ENUMERATE_ALL_COM = 0x02
ENUMERATE_NETWORK = 0x04
class MoveState:
MOVE_STATE_MOVING = 0x01
MOVE_STATE_TARGET_SPEED = 0x02
MOVE_STATE_ANTIPLAY = 0x04
class ControllerFlags:
EEPROM_PRECEDENCE = 0x01
class PowerState:
PWR_STATE_UNKNOWN = 0x00
PWR_STATE_OFF = 0x01
PWR_STATE_NORM = 0x03
PWR_STATE_REDUCT = 0x04
PWR_STATE_MAX = 0x05
class StateFlags:
STATE_CONTR = 0x000003F
STATE_ERRC = 0x0000001
STATE_ERRD = 0x0000002
STATE_ERRV = 0x0000004
STATE_EEPROM_CONNECTED = 0x0000010
STATE_IS_HOMED = 0x0000020
STATE_SECUR = 0x1B3FFC0
STATE_ALARM = 0x0000040
STATE_CTP_ERROR = 0x0000080
STATE_POWER_OVERHEAT = 0x0000100
STATE_CONTROLLER_OVERHEAT = 0x0000200
STATE_OVERLOAD_POWER_VOLTAGE = 0x0000400
STATE_OVERLOAD_POWER_CURRENT = 0x0000800
STATE_OVERLOAD_USB_VOLTAGE = 0x0001000
STATE_LOW_USB_VOLTAGE = 0x0002000
STATE_OVERLOAD_USB_CURRENT = 0x0004000
STATE_BORDERS_SWAP_MISSET = 0x0008000
STATE_LOW_POWER_VOLTAGE = 0x0010000
STATE_H_BRIDGE_FAULT = 0x0020000
STATE_WINDING_RES_MISMATCH = 0x0100000
STATE_ENCODER_FAULT = 0x0200000
STATE_ENGINE_RESPONSE_ERROR = 0x0800000
STATE_EXTIO_ALARM = 0x1000000
class GPIOFlags:
STATE_DIG_SIGNAL = 0xFFFF
STATE_RIGHT_EDGE = 0x0001
STATE_LEFT_EDGE = 0x0002
STATE_BUTTON_RIGHT = 0x0004
STATE_BUTTON_LEFT = 0x0008
STATE_GPIO_PINOUT = 0x0010
STATE_GPIO_LEVEL = 0x0020
STATE_BRAKE = 0x0200
STATE_REV_SENSOR = 0x0400
STATE_SYNC_INPUT = 0x0800
STATE_SYNC_OUTPUT = 0x1000
STATE_ENC_A = 0x2000
STATE_ENC_B = 0x4000
class EncodeStatus:
ENC_STATE_ABSENT = 0x00
ENC_STATE_UNKNOWN = 0x01
ENC_STATE_MALFUNC = 0x02
ENC_STATE_REVERS = 0x03
ENC_STATE_OK = 0x04
class WindStatus:
WIND_A_STATE_ABSENT = 0x00
WIND_A_STATE_UNKNOWN = 0x01
WIND_A_STATE_MALFUNC = 0x02
WIND_A_STATE_OK = 0x03
WIND_B_STATE_ABSENT = 0x00
WIND_B_STATE_UNKNOWN = 0x10
WIND_B_STATE_MALFUNC = 0x20
WIND_B_STATE_OK = 0x30
class MvcmdStatus:
MVCMD_NAME_BITS = 0x3F
MVCMD_UKNWN = 0x00
MVCMD_MOVE = 0x01
MVCMD_MOVR = 0x02
MVCMD_LEFT = 0x03
MVCMD_RIGHT = 0x04
MVCMD_STOP = 0x05
MVCMD_HOME = 0x06
MVCMD_LOFT = 0x07
MVCMD_SSTP = 0x08
MVCMD_ERROR = 0x40
MVCMD_RUNNING = 0x80
class MoveFlags:
RPM_DIV_1000 = 0x01
class EngineFlags:
ENGINE_REVERSE = 0x01
ENGINE_CURRENT_AS_RMS = 0x02
ENGINE_MAX_SPEED = 0x04
ENGINE_ANTIPLAY = 0x08
ENGINE_ACCEL_ON = 0x10
ENGINE_LIMIT_VOLT = 0x20
ENGINE_LIMIT_CURR = 0x40
ENGINE_LIMIT_RPM = 0x80
class MicrostepMode:
MICROSTEP_MODE_FULL = 0x01
MICROSTEP_MODE_FRAC_2 = 0x02
MICROSTEP_MODE_FRAC_4 = 0x03
MICROSTEP_MODE_FRAC_8 = 0x04
MICROSTEP_MODE_FRAC_16 = 0x05
MICROSTEP_MODE_FRAC_32 = 0x06
MICROSTEP_MODE_FRAC_64 = 0x07
MICROSTEP_MODE_FRAC_128 = 0x08
MICROSTEP_MODE_FRAC_256 = 0x09
class EngineType:
ENGINE_TYPE_NONE = 0x00
ENGINE_TYPE_DC = 0x01
ENGINE_TYPE_2DC = 0x02
ENGINE_TYPE_STEP = 0x03
ENGINE_TYPE_TEST = 0x04
ENGINE_TYPE_BRUSHLESS = 0x05
class DriverType:
DRIVER_TYPE_DISCRETE_FET = 0x01
DRIVER_TYPE_INTEGRATE = 0x02
DRIVER_TYPE_EXTERNAL = 0x03
class PowerFlags:
POWER_REDUCT_ENABLED = 0x01
POWER_OFF_ENABLED = 0x02
POWER_SMOOTH_CURRENT = 0x04
class SecureFlags:
ALARM_ON_DRIVER_OVERHEATING = 0x01
LOW_UPWR_PROTECTION = 0x02
H_BRIDGE_ALERT = 0x04
ALARM_ON_BORDERS_SWAP_MISSET = 0x08
ALARM_FLAGS_STICKING = 0x10
USB_BREAK_RECONNECT = 0x20
ALARM_WINDING_MISMATCH = 0x40
ALARM_ENGINE_RESPONSE = 0x80
class PositionFlags:
SETPOS_IGNORE_POSITION = 0x01
SETPOS_IGNORE_ENCODER = 0x02
class FeedbackType:
FEEDBACK_ENCODER = 0x01
FEEDBACK_EMF = 0x04
FEEDBACK_NONE = 0x05
FEEDBACK_ENCODER_MEDIATED = 0x06
class FeedbackFlags:
FEEDBACK_ENC_REVERSE = 0x01
FEEDBACK_ENC_TYPE_BITS = 0xC0
FEEDBACK_ENC_TYPE_AUTO = 0x00
FEEDBACK_ENC_TYPE_SINGLE_ENDED = 0x40
FEEDBACK_ENC_TYPE_DIFFERENTIAL = 0x80
class SyncInFlags:
SYNCIN_ENABLED = 0x01
SYNCIN_INVERT = 0x02
SYNCIN_GOTOPOSITION = 0x04
class SyncOutFlags:
SYNCOUT_ENABLED = 0x01
SYNCOUT_STATE = 0x02
SYNCOUT_INVERT = 0x04
SYNCOUT_IN_STEPS = 0x08
SYNCOUT_ONSTART = 0x10
SYNCOUT_ONSTOP = 0x20
SYNCOUT_ONPERIOD = 0x40
class ExtioSetupFlags:
EXTIO_SETUP_OUTPUT = 0x01
EXTIO_SETUP_INVERT = 0x02
class ExtioModeFlags:
EXTIO_SETUP_MODE_IN_BITS = 0x0F
EXTIO_SETUP_MODE_IN_NOP = 0x00
EXTIO_SETUP_MODE_IN_STOP = 0x01
EXTIO_SETUP_MODE_IN_PWOF = 0x02
EXTIO_SETUP_MODE_IN_MOVR = 0x03
EXTIO_SETUP_MODE_IN_HOME = 0x04
EXTIO_SETUP_MODE_IN_ALARM = 0x05
EXTIO_SETUP_MODE_OUT_BITS = 0xF0
EXTIO_SETUP_MODE_OUT_OFF = 0x00
EXTIO_SETUP_MODE_OUT_ON = 0x10
EXTIO_SETUP_MODE_OUT_MOVING = 0x20
EXTIO_SETUP_MODE_OUT_ALARM = 0x30
EXTIO_SETUP_MODE_OUT_MOTOR_ON = 0x40
class BorderFlags:
BORDER_IS_ENCODER = 0x01
BORDER_STOP_LEFT = 0x02
BORDER_STOP_RIGHT = 0x04
BORDERS_SWAP_MISSET_DETECTION = 0x08
class EnderFlags:
ENDER_SWAP = 0x01
ENDER_SW1_ACTIVE_LOW = 0x02
ENDER_SW2_ACTIVE_LOW = 0x04
class BrakeFlags:
BRAKE_ENABLED = 0x01
BRAKE_ENG_PWROFF = 0x02
class ControlFlags:
CONTROL_MODE_BITS = 0x03
CONTROL_MODE_OFF = 0x00
CONTROL_MODE_JOY = 0x01
CONTROL_MODE_LR = 0x02
CONTROL_BTN_LEFT_PUSHED_OPEN = 0x04
CONTROL_BTN_RIGHT_PUSHED_OPEN = 0x08
class JoyFlags:
JOY_REVERSE = 0x01
class CtpFlags:
CTP_ENABLED = 0x01
CTP_BASE = 0x02
CTP_ALARM_ON_ERROR = 0x04
REV_SENS_INV = 0x08
CTP_ERROR_CORRECTION = 0x10
class HomeFlags:
HOME_DIR_FIRST = 0x001
HOME_DIR_SECOND = 0x002
HOME_MV_SEC_EN = 0x004
HOME_HALF_MV = 0x008
HOME_STOP_FIRST_BITS = 0x030
HOME_STOP_FIRST_REV = 0x010
HOME_STOP_FIRST_SYN = 0x020
HOME_STOP_FIRST_LIM = 0x030
HOME_STOP_SECOND_BITS = 0x0C0
HOME_STOP_SECOND_REV = 0x040
HOME_STOP_SECOND_SYN = 0x080
HOME_STOP_SECOND_LIM = 0x0C0
HOME_USE_FAST = 0x100
class UARTSetupFlags:
UART_PARITY_BITS = 0x03
UART_PARITY_BIT_EVEN = 0x00
UART_PARITY_BIT_ODD = 0x01
UART_PARITY_BIT_SPACE = 0x02
UART_PARITY_BIT_MARK = 0x03
UART_PARITY_BIT_USE = 0x04
UART_STOP_BIT = 0x08
class MotorTypeFlags:
MOTOR_TYPE_UNKNOWN = 0x00
MOTOR_TYPE_STEP = 0x01
MOTOR_TYPE_DC = 0x02
MOTOR_TYPE_BLDC = 0x03
class EncoderSettingsFlags:
ENCSET_DIFFERENTIAL_OUTPUT = 0x001
ENCSET_PUSHPULL_OUTPUT = 0x004
ENCSET_INDEXCHANNEL_PRESENT = 0x010
ENCSET_REVOLUTIONSENSOR_PRESENT = 0x040
ENCSET_REVOLUTIONSENSOR_ACTIVE_HIGH = 0x100
class MBSettingsFlags:
MB_AVAILABLE = 0x01
MB_POWERED_HOLD = 0x02
class TSSettingsFlags:
TS_TYPE_BITS = 0x07
TS_TYPE_UNKNOWN = 0x00
TS_TYPE_THERMOCOUPLE = 0x01
TS_TYPE_SEMICONDUCTOR = 0x02
TS_AVAILABLE = 0x08
class LSFlags:
LS_ON_SW1_AVAILABLE = 0x01
LS_ON_SW2_AVAILABLE = 0x02
LS_SW1_ACTIVE_LOW = 0x04
LS_SW2_ACTIVE_LOW = 0x08
LS_SHORTED = 0x10
class BackEMFFlags:
BACK_EMF_INDUCTANCE_AUTO = 0x01
BACK_EMF_RESISTANCE_AUTO = 0x02
BACK_EMF_KM_AUTO = 0x04
class feedback_settings_t(Structure):
_fields_ = [
("IPS", c_uint),
("FeedbackType", c_uint),
("FeedbackFlags", c_uint),
("CountsPerTurn", c_uint),
]
class home_settings_t(Structure):
_fields_ = [
("FastHome", c_uint),
("uFastHome", c_uint),
("SlowHome", c_uint),
("uSlowHome", c_uint),
("HomeDelta", c_int),
("uHomeDelta", c_int),
("HomeFlags", c_uint),
]
class home_settings_calb_t(Structure):
_fields_ = [
("FastHome", c_float),
("SlowHome", c_float),
("HomeDelta", c_float),
("HomeFlags", c_uint),
]
class move_settings_t(Structure):
_fields_ = [
("Speed", c_uint),
("uSpeed", c_uint),
("Accel", c_uint),
("Decel", c_uint),
("AntiplaySpeed", c_uint),
("uAntiplaySpeed", c_uint),
("MoveFlags", c_uint),
]
class move_settings_calb_t(Structure):
_fields_ = [
("Speed", c_float),
("Accel", c_float),
("Decel", c_float),
("AntiplaySpeed", c_float),
("MoveFlags", c_uint),
]
class engine_settings_t(Structure):
_fields_ = [
("NomVoltage", c_uint),
("NomCurrent", c_uint),
("NomSpeed", c_uint),
("uNomSpeed", c_uint),
("EngineFlags", c_uint),
("Antiplay", c_int),
("MicrostepMode", c_uint),
("StepsPerRev", c_uint),
]
class engine_settings_calb_t(Structure):
_fields_ = [
("NomVoltage", c_uint),
("NomCurrent", c_uint),
("NomSpeed", c_float),
("EngineFlags", c_uint),
("Antiplay", c_float),
("MicrostepMode", c_uint),
("StepsPerRev", c_uint),
]
class entype_settings_t(Structure):
_fields_ = [
("EngineType", c_uint),
("DriverType", c_uint),
]
class power_settings_t(Structure):
_fields_ = [
("HoldCurrent", c_uint),
("CurrReductDelay", c_uint),
("PowerOffDelay", c_uint),
("CurrentSetTime", c_uint),
("PowerFlags", c_uint),
]
class secure_settings_t(Structure):
_fields_ = [
("LowUpwrOff", c_uint),
("CriticalIpwr", c_uint),
("CriticalUpwr", c_uint),
("CriticalT", c_uint),
("CriticalIusb", c_uint),
("CriticalUusb", c_uint),
("MinimumUusb", c_uint),
("Flags", c_uint),
]
class edges_settings_t(Structure):
_fields_ = [
("BorderFlags", c_uint),
("EnderFlags", c_uint),
("LeftBorder", c_int),
("uLeftBorder", c_int),
("RightBorder", c_int),
("uRightBorder", c_int),
]
class edges_settings_calb_t(Structure):
_fields_ = [
("BorderFlags", c_uint),
("EnderFlags", c_uint),
("LeftBorder", c_float),
("RightBorder", c_float),
]
class pid_settings_t(Structure):
_fields_ = [
("KpU", c_uint),
("KiU", c_uint),
("KdU", c_uint),
("Kpf", c_float),
("Kif", c_float),
("Kdf", c_float),
]
class sync_in_settings_t(Structure):
_fields_ = [
("SyncInFlags", c_uint),
("ClutterTime", c_uint),
("Position", c_int),
("uPosition", c_int),
("Speed", c_uint),
("uSpeed", c_uint),
]
class sync_in_settings_calb_t(Structure):
_fields_ = [
("SyncInFlags", c_uint),
("ClutterTime", c_uint),
("Position", c_float),
("Speed", c_float),
]
class sync_out_settings_t(Structure):
_fields_ = [
("SyncOutFlags", c_uint),
("SyncOutPulseSteps", c_uint),
("SyncOutPeriod", c_uint),
("Accuracy", c_uint),
("uAccuracy", c_uint),
]
class sync_out_settings_calb_t(Structure):
_fields_ = [
("SyncOutFlags", c_uint),
("SyncOutPulseSteps", c_uint),
("SyncOutPeriod", c_uint),
("Accuracy", c_float),
]
class extio_settings_t(Structure):
_fields_ = [
("EXTIOSetupFlags", c_uint),
("EXTIOModeFlags", c_uint),
]
class brake_settings_t(Structure):
_fields_ = [
("t1", c_uint),
("t2", c_uint),
("t3", c_uint),
("t4", c_uint),
("BrakeFlags", c_uint),
]
class control_settings_t(Structure):
_fields_ = [
("MaxSpeed", c_uint * 10),
("uMaxSpeed", c_uint * 10),
("Timeout", c_uint * 9),
("MaxClickTime", c_uint),
("Flags", c_uint),
("DeltaPosition", c_int),
("uDeltaPosition", c_int),
]
class control_settings_calb_t(Structure):
_fields_ = [
("MaxSpeed", c_float * 10),
("Timeout", c_uint * 9),
("MaxClickTime", c_uint),
("Flags", c_uint),
("DeltaPosition", c_float),
]
class joystick_settings_t(Structure):
_fields_ = [
("JoyLowEnd", c_uint),
("JoyCenter", c_uint),
("JoyHighEnd", c_uint),
("ExpFactor", c_uint),
("DeadZone", c_uint),
("JoyFlags", c_uint),
]
class ctp_settings_t(Structure):
_fields_ = [
("CTPMinError", c_uint),
("CTPFlags", c_uint),
]
class uart_settings_t(Structure):
_fields_ = [
("Speed", c_uint),
("UARTSetupFlags", c_uint),
]
class calibration_settings_t(Structure):
_fields_ = [
("CSS1_A", c_float),
("CSS1_B", c_float),
("CSS2_A", c_float),
("CSS2_B", c_float),
("FullCurrent_A", c_float),
("FullCurrent_B", c_float),
]
class controller_name_t(Structure):
_fields_ = [
("ControllerName", c_char * 17),
("CtrlFlags", c_uint),
]
class nonvolatile_memory_t(Structure):
_fields_ = [
("UserData", c_uint * 7),
]
class emf_settings_t(Structure):
_fields_ = [
("L", c_float),
("R", c_float),
("Km", c_float),
("BackEMFFlags", c_uint),
]
class engine_advansed_setup_t(Structure):
_fields_ = [
("stepcloseloop_Kw", c_uint),
("stepcloseloop_Kp_low", c_uint),
("stepcloseloop_Kp_high", c_uint),
]
class extended_settings_t(Structure):
_fields_ = [
("Param1", c_uint),
]
class get_position_t(Structure):
_fields_ = [
("Position", c_int),
("uPosition", c_int),
("EncPosition", c_longlong),
]
class get_position_calb_t(Structure):
_fields_ = [
("Position", c_float),
("EncPosition", c_longlong),
]
class set_position_t(Structure):
_fields_ = [
("Position", c_int),
("uPosition", c_int),
("EncPosition", c_longlong),
("PosFlags", c_uint),
]
class set_position_calb_t(Structure):
_fields_ = [
("Position", c_float),
("EncPosition", c_longlong),
("PosFlags", c_uint),
]
class status_t(Structure):
_fields_ = [
("MoveSts", c_uint),
("MvCmdSts", c_uint),
("PWRSts", c_uint),
("EncSts", c_uint),
("WindSts", c_uint),
("CurPosition", c_int),
("uCurPosition", c_int),
("EncPosition", c_longlong),
("CurSpeed", c_int),
("uCurSpeed", c_int),
("Ipwr", c_int),
("Upwr", c_int),
("Iusb", c_int),
("Uusb", c_int),
("CurT", c_int),
("Flags", c_uint),
("GPIOFlags", c_uint),
("CmdBufFreeSpace", c_uint),
]
class status_calb_t(Structure):
_fields_ = [
("MoveSts", c_uint),
("MvCmdSts", c_uint),
("PWRSts", c_uint),
("EncSts", c_uint),
("WindSts", c_uint),
("CurPosition", c_float),
("EncPosition", c_longlong),
("CurSpeed", c_float),
("Ipwr", c_int),
("Upwr", c_int),
("Iusb", c_int),
("Uusb", c_int),
("CurT", c_int),
("Flags", c_uint),
("GPIOFlags", c_uint),
("CmdBufFreeSpace", c_uint),
]
class measurements_t(Structure):
_fields_ = [
("Speed", c_int * 25),
("Error", c_int * 25),
("Length", c_uint),
]
class chart_data_t(Structure):
_fields_ = [
("WindingVoltageA", c_int),
("WindingVoltageB", c_int),
("WindingVoltageC", c_int),
("WindingCurrentA", c_int),
("WindingCurrentB", c_int),
("WindingCurrentC", c_int),
("Pot", c_uint),
("Joy", c_uint),
("DutyCycle", c_int),
]
class device_information_t(Structure):
_fields_ = [
("Manufacturer", c_char * 5),
("ManufacturerId", c_char * 3),
("ProductDescription", c_char * 9),
("Major", c_uint),
("Minor", c_uint),
("Release", c_uint),
]
class serial_number_t(Structure):
_fields_ = [
("SN", c_uint),
("Key", c_ubyte * 32),
("Major", c_uint),
("Minor", c_uint),
("Release", c_uint),
]
class analog_data_t(Structure):
_fields_ = [
("A1Voltage_ADC", c_uint),
("A2Voltage_ADC", c_uint),
("B1Voltage_ADC", c_uint),
("B2Voltage_ADC", c_uint),
("SupVoltage_ADC", c_uint),
("ACurrent_ADC", c_uint),
("BCurrent_ADC", c_uint),
("FullCurrent_ADC", c_uint),
("Temp_ADC", c_uint),
("Joy_ADC", c_uint),
("Pot_ADC", c_uint),
("L5_ADC", c_uint),
("H5_ADC", c_uint),
("A1Voltage", c_int),
("A2Voltage", c_int),
("B1Voltage", c_int),
("B2Voltage", c_int),
("SupVoltage", c_int),
("ACurrent", c_int),
("BCurrent", c_int),
("FullCurrent", c_int),
("Temp", c_int),
("Joy", c_int),
("Pot", c_int),
("L5", c_int),
("H5", c_int),
("deprecated", c_uint),
("R", c_int),
("L", c_int),
]
class debug_read_t(Structure):
_fields_ = [
("DebugData", c_ubyte * 128),
]
class debug_write_t(Structure):
_fields_ = [
("DebugData", c_ubyte * 128),
]
class stage_name_t(Structure):
_fields_ = [
("PositionerName", c_char * 17),
]
class stage_information_t(Structure):
_fields_ = [
("Manufacturer", c_char * 17),
("PartNumber", c_char * 25),
]
class stage_settings_t(Structure):
_fields_ = [
("LeadScrewPitch", c_float),
("Units", c_char * 9),
("MaxSpeed", c_float),
("TravelRange", c_float),
("SupplyVoltageMin", c_float),
("SupplyVoltageMax", c_float),
("MaxCurrentConsumption", c_float),
("HorizontalLoadCapacity", c_float),
("VerticalLoadCapacity", c_float),
]
class motor_information_t(Structure):
_fields_ = [
("Manufacturer", c_char * 17),
("PartNumber", c_char * 25),
]
class motor_settings_t(Structure):
_fields_ = [
("MotorType", c_uint),
("ReservedField", c_uint),
("Poles", c_uint),
("Phases", c_uint),
("NominalVoltage", c_float),
("NominalCurrent", c_float),
("NominalSpeed", c_float),
("NominalTorque", c_float),
("NominalPower", c_float),
("WindingResistance", c_float),
("WindingInductance", c_float),
("RotorInertia", c_float),
("StallTorque", c_float),
("DetentTorque", c_float),
("TorqueConstant", c_float),
("SpeedConstant", c_float),
("SpeedTorqueGradient", c_float),
("MechanicalTimeConstant", c_float),
("MaxSpeed", c_float),
("MaxCurrent", c_float),
("MaxCurrentTime", c_float),
("NoLoadCurrent", c_float),
("NoLoadSpeed", c_float),
]
class encoder_information_t(Structure):
_fields_ = [
("Manufacturer", c_char * 17),
("PartNumber", c_char * 25),
]
class encoder_settings_t(Structure):
_fields_ = [
("MaxOperatingFrequency", c_float),
("SupplyVoltageMin", c_float),
("SupplyVoltageMax", c_float),
("MaxCurrentConsumption", c_float),
("PPR", c_uint),
("EncoderSettings", c_uint),
]
class hallsensor_information_t(Structure):
_fields_ = [
("Manufacturer", c_char * 17),
("PartNumber", c_char * 25),
]
class hallsensor_settings_t(Structure):
_fields_ = [
("MaxOperatingFrequency", c_float),
("SupplyVoltageMin", c_float),
("SupplyVoltageMax", c_float),
("MaxCurrentConsumption", c_float),
("PPR", c_uint),
]
class gear_information_t(Structure):
_fields_ = [
("Manufacturer", c_char * 17),
("PartNumber", c_char * 25),
]
class gear_settings_t(Structure):
_fields_ = [
("ReductionIn", c_float),
("ReductionOut", c_float),
("RatedInputTorque", c_float),
("RatedInputSpeed", c_float),
("MaxOutputBacklash", c_float),
("InputInertia", c_float),
("Efficiency", c_float),
]
class accessories_settings_t(Structure):
_fields_ = [
("MagneticBrakeInfo", c_char * 25),
("MBRatedVoltage", c_float),
("MBRatedCurrent", c_float),
("MBTorque", c_float),
("MBSettings", c_uint),
("TemperatureSensorInfo", c_char * 25),
("TSMin", c_float),
("TSMax", c_float),
("TSGrad", c_float),
("TSSettings", c_uint),
("LimitSwitchesSettings", c_uint),
]
class init_random_t(Structure):
_fields_ = [
("key", c_ubyte * 16),
]
class globally_unique_identifier_t(Structure):
_fields_ = [
("UniqueID0", c_uint),
("UniqueID1", c_uint),
("UniqueID2", c_uint),
("UniqueID3", c_uint),
]
# -------------------------
# END OF GENERATED code
# -------------------------
# vim: set ft=python
| 23.462983 | 65 | 0.646605 | 2,507 | 21,234 | 5.039888 | 0.244515 | 0.061733 | 0.07218 | 0.049387 | 0.231658 | 0.190503 | 0.186704 | 0.16296 | 0.156787 | 0.156787 | 0 | 0.054833 | 0.221861 | 21,234 | 904 | 66 | 23.488938 | 0.709859 | 0.012668 | 0 | 0.234615 | 0 | 0 | 0.154823 | 0.011311 | 0 | 0 | 0.04806 | 0 | 0 | 1 | 0.001282 | false | 0.001282 | 0.005128 | 0 | 0.494872 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ebd8e420a76964bfe5a3a13462a439277b542dc | 4,787 | py | Python | cgi-bin/report.py | alexander1389/IMS.WebAPI | cfc8c6c899655c337973f9a32a620e9cd6af34b9 | [
"MIT"
] | null | null | null | cgi-bin/report.py | alexander1389/IMS.WebAPI | cfc8c6c899655c337973f9a32a620e9cd6af34b9 | [
"MIT"
] | null | null | null | cgi-bin/report.py | alexander1389/IMS.WebAPI | cfc8c6c899655c337973f9a32a620e9cd6af34b9 | [
"MIT"
] | null | null | null | import sys
import os
import time
from datetime import datetime
from os.path import isfile, join, splitext, getsize, basename
from shutil import copyfileobj
from zipfile import ZipFile
from bad_request import bad_request
from utils import validate_dt
DEFAULT_DAT = 'last'
DEFAULT_EXT = 'xml'
def report(**kwargs):
""" Take HTTP response with asked reports
:param **kwargs: Parameters of GET request
:type **kwargs: dict
"""
path = os.getenv('CGI_REPORTS_PATH', None)
if not path:
bad_request(msg='Bad Path')
date = kwargs.get('date', DEFAULT_DAT)
ext = '.%s' % kwargs.get('ext', DEFAULT_EXT)
if (date == 'last'):
get_last_report(path, ext)
else:
get_reports_by_date(path, date, ext)
def get_ctime(path):
""" Get file creation time
:param path: The file location path
:type path: str
:returns: a file creation time
(or last modified time if ctime getting fails on *nix)
:rtype: str
"""
st_info = os.stat(path)
try:
t = st_info.st_birthtime
except AttributeError:
t = st_info.st_mtime
return datetime.fromtimestamp(t).strftime("%y%m%d%H%M%S")
def get_replist(path, ext=DEFAULT_EXT):
""" Get a list of reports (non-recursively)
:param path: The location of reports
:type path: str
:param ext: The extension of reports files (optional (default=DEFAULT_EXT))
:type ext: str
:returns: a dictionary containing pairs of absolute filename
and creation time of the report
:rtype: dict
"""
if not os.path.exists(path):
return {}
return {f: get_ctime(f) for f in (join(path, f) for f in os.listdir(path))
if isfile(f) and splitext(f)[1] == ext}
def filter_by_date(lst, date):
""" Filter reports list by date
:param lst: Reports dictionary
:type lst: dict
:param date: Date to filter by
:type date: str
:returns: list of absolute filenames of reports corresponding to the date
:rtype: list
"""
date = date.replace('_', '')
if not lst or not date:
return []
if not validate_dt(date):
bad_request(400, 'Bad Request', 'Wrong Date')
return [k for k, v in lst.items() if v[:len(date)] == date]
def get_reports_by_date(path, date, ext=DEFAULT_EXT):
""" Get reports filtered by date in an HTTP response
:param path: The location of reports
:type path: str
:param date: Date to get reports by
:type date: str
:param ext: The extension of reports files (optional (default=DEFAULT_EXT))
:type ext:str
"""
reports = filter_by_date(get_replist(path, ext), date)
if not reports:
bad_request(404, msg='No Reports')
arch_name = 'rep_%s.zip' % date
arch = ZipFile(arch_name, 'w')
for report in reports:
arch.write(report, basename(report))
arch.close()
print('Cache-Control: no-cache')
print('Cache-Control: no-store')
print('Content-Disposition: attachment; filename="%s"' % arch_name)
print('Content-Transfer-Encoding: binary')
print('Content-Length: %d' % getsize(arch_name))
print('Content-Type: application/zip')
print(flush=True)
with open(arch_name, 'rb') as zipfile:
copyfileobj(zipfile, sys.stdout.buffer)
def get_last_report(path, ext=DEFAULT_EXT):
""" Get last created report in an HTTP response
:param path: The location of report
:type path: str
:param ext: The extension of report file (optional (default=DEFAULT_EXT))
:type ext:str
"""
lst = get_replist(path, ext)
if not lst:
bad_request(404, msg='No Reports')
last_report = sorted(lst.items(),
key=lambda x: (x[1], x[0]),
reverse=True)[0][0]
report_name = basename(last_report)
print('Cache-Control: no-cache')
print('Cache-Control: no-store')
print('Content-Disposition: attachment; filename="%s"' % report_name)
print('Content-Transfer-Encoding: binary')
print('Content-Length: %d' % getsize(last_report))
print('Content-Type: application/octet-stream')
print(flush=True)
with open(last_report, 'rb') as file:
copyfileobj(file, sys.stdout.buffer)
if __name__ == '__main__':
if len(sys.argv) < 4:
sys.exit()
TEST_PTH = sys.argv[1]
TEST_EXT = sys.argv[2]
TEST_DAT = sys.argv[3]
print('\nget_rep_list():')
print('---------------\n')
reps = get_replist(TEST_PTH, TEST_EXT)
for k, v in reps.items():
print("%s - %s" % (v, k))
print('---------------\n')
print('filter_by_date():')
print('-----------------\n')
filtered = filter_by_date(reps, TEST_DAT)
for f in filtered:
print(f)
print('-----------------\n')
| 27.354286 | 79 | 0.62628 | 668 | 4,787 | 4.371257 | 0.242515 | 0.027397 | 0.017808 | 0.026027 | 0.300342 | 0.260274 | 0.243151 | 0.212671 | 0.20137 | 0.182877 | 0 | 0.004944 | 0.239398 | 4,787 | 174 | 80 | 27.511494 | 0.797034 | 0.257155 | 0 | 0.150538 | 0 | 0 | 0.174533 | 0.022559 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.096774 | 0 | 0.215054 | 0.236559 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ec221502b1e647f94c1e89ca9e353cdcc04428f | 507 | py | Python | Python/CeV/Exercicios/ex99.py | WerickL/Learning | 5a9a488f0422454e612439b89093d5bc11242e65 | [
"MIT"
] | null | null | null | Python/CeV/Exercicios/ex99.py | WerickL/Learning | 5a9a488f0422454e612439b89093d5bc11242e65 | [
"MIT"
] | null | null | null | Python/CeV/Exercicios/ex99.py | WerickL/Learning | 5a9a488f0422454e612439b89093d5bc11242e65 | [
"MIT"
] | null | null | null | def maior(*valores):
if len(valores) >= 1:
maiorv = valores[0]
print(f'Analisando os valores...')
for c in valores:
print(c, end=' ')
if c > maiorv:
maiorv = c
print()
print(f'Foram informados {len(valores)} valores ao todo.')
print(f'O maior valor é {maiorv}!')
print('-'*50)
else:
print('Nenhum elemento foi identificado')
maior(2, 9, 4, 5, 7, 1)
maior(4, 7, 0)
maior(1, 2)
maior(6)
maior()
| 23.045455 | 66 | 0.512821 | 69 | 507 | 3.768116 | 0.507246 | 0.069231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047619 | 0.337278 | 507 | 21 | 67 | 24.142857 | 0.72619 | 0 | 0 | 0 | 0 | 0 | 0.258383 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0 | 0 | 0.052632 | 0.368421 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ec2c19a187f653438f9e1dd5d0899ca5f849f6b | 2,922 | py | Python | pretraining/preprocessing.py | Artia-Inspirenet/module-2 | 7cf1d74f13d23a11ce202436d88b283d7ef1e109 | [
"Apache-2.0"
] | null | null | null | pretraining/preprocessing.py | Artia-Inspirenet/module-2 | 7cf1d74f13d23a11ce202436d88b283d7ef1e109 | [
"Apache-2.0"
] | null | null | null | pretraining/preprocessing.py | Artia-Inspirenet/module-2 | 7cf1d74f13d23a11ce202436d88b283d7ef1e109 | [
"Apache-2.0"
] | null | null | null | import sys
import os
import cv2
import numpy as np
from matplotlib import pyplot as plt
import hed
curdir = os.path.abspath(os.path.curdir)
sys.path.append(os.path.join(os.path.dirname(curdir), 'tf_pose'))
from pycocotools.coco import COCO
dataset_path = '/home/artia/prj/datasets/coco'
coco = COCO(os.path.join(dataset_path, 'annotations/person_keypoints_train2017.json'))
# get all images containing given categories, select one at random
catIds = coco.getCatIds(catNms=['person']);
imgIds = coco.getImgIds(catIds=catIds );
# pick a person picture ramdomly among entire training dataset.
# img_info = coco.loadImgs(imgIds[np.random.randint(0,len(imgIds))])[0]
x = coco.loadImgs(imgIds)
print('total num :', len(x))
img_info_list = x[1590:10000]
del x
# define path to HED model parameters and path to two types of output images(with/without background)
model_path = 'HED_reproduced.npz'
output_path_with_bg = '/home/artia/prj/datasets/preprocessed_coco/edge_detected_with_bg'
output_path_without_bg = '/home/artia/prj/datasets/preprocessed_coco/edge_detected_without_bg'
if not os.path.isdir(output_path_with_bg):
os.makedirs(output_path_with_bg)
if not os.path.isdir(output_path_without_bg):
os.makedirs(output_path_without_bg)
i = 0
for img_info in img_info_list:
i = i+1
print('iteration : ', i)
filename = img_info['file_name']
im = cv2.imread(os.path.join(dataset_path, 'train2017', filename))
base = os.path.splitext(filename)[0]
png_filename = base + '.png'
# create masking image with coco dataset segmentation points for filtering background. (without background)
annIds = coco.getAnnIds(imgIds=img_info['id'], catIds=catIds, iscrowd=None) #get annotations ids of the image
anns = coco.loadAnns(annIds) #get annotation infomation of that image
contours = []
for ann in anns:
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((-1, 1, 2)).astype(np.int32)
contours.append(poly)
if len(contours) == 0:
continue
else:
temp = im.copy()
cv2.drawContours(temp, contours, -1, (128, 125, 10), -1)
mask = cv2.inRange(temp, (128, 125, 10), (128, 125, 10))
# resize images
im = cv2.resize(im, (im.shape[1] // 16 * 16, im.shape[0] // 16 * 16))[None, :, :, :].astype('float32')
mask = cv2.resize(mask, (mask.shape[1] // 16 * 16, mask.shape[0] // 16 * 16))
# Write a image processed with Holistic edge detection algorithm. (with background)
edged_im = hed.run(model_path, im).astype(np.uint8)
cv2.imwrite(os.path.join(output_path_with_bg, png_filename), edged_im)
# # write a edged-image of which backround is white out.
mask_inv = cv2.bitwise_not(mask)
res = cv2.bitwise_and(edged_im, edged_im, mask=mask)
dst = cv2.add(res, mask_inv)
cv2.imwrite(os.path.join(output_path_without_bg, png_filename), dst)
| 36.525 | 110 | 0.718686 | 452 | 2,922 | 4.511062 | 0.365044 | 0.032369 | 0.024522 | 0.031388 | 0.148112 | 0.105934 | 0.105934 | 0.076508 | 0.049044 | 0 | 0 | 0.035455 | 0.150582 | 2,922 | 79 | 111 | 36.987342 | 0.78606 | 0.21629 | 0 | 0 | 0 | 0 | 0.142418 | 0.089231 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.127273 | 0 | 0.127273 | 0.036364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ec5bb2ae4b37f160ab9e0941f0b6024ab25a311 | 5,727 | py | Python | dojo/finding_group/views.py | alles-klar/django-DefectDojo | 83630437f7259fdbe633ac21cf5715685ffa7eab | [
"BSD-3-Clause"
] | null | null | null | dojo/finding_group/views.py | alles-klar/django-DefectDojo | 83630437f7259fdbe633ac21cf5715685ffa7eab | [
"BSD-3-Clause"
] | 48 | 2021-04-14T13:02:43.000Z | 2021-07-06T03:08:36.000Z | dojo/finding_group/views.py | Hijerboa/django-DefectDojo | 3aea3bc3406f860c0842b0bf8800efe2c86bf81b | [
"BSD-3-Clause"
] | null | null | null | # # findings
from dojo.utils import Product_Tab
from dojo.forms import DeleteFindingGroupForm
from dojo.notifications.helper import create_notification
from django.contrib import messages
from django.contrib.admin.utils import NestedObjects
from django.db.utils import DEFAULT_DB_ALIAS
from django.http.response import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404, render
from django.urls.base import reverse
from django.views.decorators.http import require_POST
from dojo.models import Finding_Group
import logging
from dojo.user.helper import user_must_be_authorized
import dojo.jira_link.helper as jira_helper
logger = logging.getLogger(__name__)
@user_must_be_authorized(Finding_Group, 'view', 'fgid')
def view_finding_group(request, fgid):
logger.debug('view finding group: %s', fgid)
return HttpResponse('Not implemented yet')
@user_must_be_authorized(Finding_Group, 'change', 'fgid')
def edit_finding_group(request, fgid):
logger.debug('edit finding group: %s', fgid)
return HttpResponse('Not implemented yet')
@user_must_be_authorized(Finding_Group, 'delete', 'fgid')
@require_POST
def delete_finding_group(request, fgid):
logger.debug('delete finding group: %s', fgid)
finding_group = get_object_or_404(Finding_Group, pk=fgid)
form = DeleteFindingGroupForm(instance=finding_group)
if request.method == 'POST':
if 'id' in request.POST and str(finding_group.id) == request.POST['id']:
form = DeleteFindingGroupForm(request.POST, instance=finding_group)
if form.is_valid():
finding_group.delete()
messages.add_message(request,
messages.SUCCESS,
'Finding Group and relationships removed.',
extra_tags='alert-success')
create_notification(event='other',
title='Deletion of %s' % finding_group.name,
description='The finding group "%s" was deleted by %s' % (finding_group.name, request.user),
url=request.build_absolute_uri(reverse('view_test', args=(finding_group.test.id,))),
icon="exclamation-triangle")
return HttpResponseRedirect(reverse('view_test', args=(finding_group.test.id,)))
collector = NestedObjects(using=DEFAULT_DB_ALIAS)
collector.collect([finding_group])
rels = collector.nested()
product_tab = Product_Tab(finding_group.test.engagement.product.id, title="Product", tab="settings")
return render(request, 'dojo/delete_finding_group.html',
{'finding_group': finding_group,
'form': form,
'product_tab': product_tab,
'rels': rels,
})
@user_must_be_authorized(Finding_Group, 'change', 'fgid')
@require_POST
def unlink_jira(request, fgid):
logger.debug('/finding_group/%s/jira/unlink', fgid)
group = get_object_or_404(Finding_Group, id=fgid)
logger.info('trying to unlink a linked jira issue from %d:%s', group.id, group.name)
if group.has_jira_issue:
try:
jira_helper.unlink_jira(request, group)
messages.add_message(
request,
messages.SUCCESS,
'Link to JIRA issue succesfully deleted',
extra_tags='alert-success')
return JsonResponse({'result': 'OK'})
except Exception as e:
logger.exception(e)
messages.add_message(
request,
messages.ERROR,
'Link to JIRA could not be deleted, see alerts for details',
extra_tags='alert-danger')
return HttpResponse(status=500)
else:
messages.add_message(
request,
messages.ERROR,
'Link to JIRA not found',
extra_tags='alert-danger')
return HttpResponse(status=400)
@user_must_be_authorized(Finding_Group, 'change', 'fgid')
@require_POST
def push_to_jira(request, fgid):
logger.debug('/finding_group/%s/jira/push', fgid)
group = get_object_or_404(Finding_Group, id=fgid)
try:
logger.info('trying to push %d:%s to JIRA to create or update JIRA issue', group.id, group.name)
logger.debug('pushing to jira from group.push_to-jira()')
# it may look like succes here, but the push_to_jira are swallowing exceptions
# but cant't change too much now without having a test suite, so leave as is for now with the addition warning message to check alerts for background errors.
if jira_helper.push_to_jira(group, sync=True):
messages.add_message(
request,
messages.SUCCESS,
message='Action queued to create or update linked JIRA issue, check alerts for background errors.',
extra_tags='alert-success')
else:
messages.add_message(
request,
messages.SUCCESS,
'Push to JIRA failed, check alerts on the top right for errors',
extra_tags='alert-danger')
return JsonResponse({'result': 'OK'})
except Exception as e:
logger.exception(e)
logger.error('Error pushing to JIRA: ', exc_info=True)
messages.add_message(
request,
messages.ERROR,
'Error pushing to JIRA',
extra_tags='alert-danger')
return HttpResponse(status=500)
# return redirect_to_return_url_or_else(request, reverse('view_finding', args=(group.id,)))
| 41.201439 | 165 | 0.640475 | 680 | 5,727 | 5.214706 | 0.263235 | 0.111675 | 0.035533 | 0.049351 | 0.390863 | 0.35674 | 0.258601 | 0.237451 | 0.187253 | 0.135928 | 0 | 0.005004 | 0.267156 | 5,727 | 138 | 166 | 41.5 | 0.839886 | 0.057796 | 0 | 0.421053 | 0 | 0 | 0.184113 | 0.015961 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04386 | false | 0 | 0.122807 | 0 | 0.245614 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ec603a6f9e4717583739386a5137cf37166725f | 899 | py | Python | yt/data_objects/tests/test_time_series.py | cevans216/yt | c19c3c615b996c8a6e418362ffea9041a616d673 | [
"BSD-3-Clause-Clear"
] | null | null | null | yt/data_objects/tests/test_time_series.py | cevans216/yt | c19c3c615b996c8a6e418362ffea9041a616d673 | [
"BSD-3-Clause-Clear"
] | null | null | null | yt/data_objects/tests/test_time_series.py | cevans216/yt | c19c3c615b996c8a6e418362ffea9041a616d673 | [
"BSD-3-Clause-Clear"
] | null | null | null | import os
import tempfile
from pathlib import Path
from yt.data_objects.time_series import get_filenames_from_glob_pattern
from yt.testing import assert_raises
def test_pattern_expansion():
file_list = ["fake_data_file_{}".format(str(i).zfill(4)) for i in range(10)]
with tempfile.TemporaryDirectory() as tmpdir:
for file in file_list:
(Path(tmpdir) / file).touch()
pattern = os.path.join(tmpdir, "fake_data_file_*")
found = get_filenames_from_glob_pattern(pattern)
assert found == [os.path.join(tmpdir, file) for file in file_list]
found2 = get_filenames_from_glob_pattern(Path(pattern))
assert found2 == found
def test_no_match_pattern():
with tempfile.TemporaryDirectory() as tmpdir:
pattern = os.path.join(tmpdir, "fake_data_file_*")
assert_raises(OSError, get_filenames_from_glob_pattern, pattern)
| 32.107143 | 80 | 0.719689 | 124 | 899 | 4.919355 | 0.354839 | 0.078689 | 0.104918 | 0.131148 | 0.495082 | 0.22623 | 0.114754 | 0.114754 | 0 | 0 | 0 | 0.00684 | 0.186874 | 899 | 27 | 81 | 33.296296 | 0.827633 | 0 | 0 | 0.210526 | 0 | 0 | 0.054505 | 0 | 0 | 0 | 0 | 0 | 0.210526 | 1 | 0.105263 | false | 0 | 0.263158 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ec66dedba0a3ea331f1648a84344fc949e5d2d8 | 1,561 | py | Python | convolut/model/infer_manager.py | convolut/convolut | e7168ace62f70f9df084a652a2b8fa70a6902e55 | [
"Apache-2.0"
] | 9 | 2020-01-07T06:05:58.000Z | 2020-07-03T12:24:31.000Z | convolut/model/infer_manager.py | convolut/convolut | e7168ace62f70f9df084a652a2b8fa70a6902e55 | [
"Apache-2.0"
] | null | null | null | convolut/model/infer_manager.py | convolut/convolut | e7168ace62f70f9df084a652a2b8fa70a6902e55 | [
"Apache-2.0"
] | null | null | null | from typing import Dict, Any, Callable, Optional
import torch
from decouple import Module
from torch import nn
from .events import (
ModelForwardStartEvent, ModelForwardEndEvent, ModelInitEvent)
from ..loader import LoaderProcessBatchStartEvent
from ..runner import RunnerStartEvent
class InferManager(Module):
def __init__(
self,
model: nn.Module,
device,
input_fn: Callable[[Any], torch.Tensor] = lambda batch: batch["input"],
model_kwargs: Optional[Dict] = None
):
super().__init__()
self._model = model
self._model_kwargs = model_kwargs if model_kwargs else {}
self._device = device
self._input_fn = input_fn
(
self.sub(RunnerStartEvent, self.handle_runner_start)
.sub(LoaderProcessBatchStartEvent, self.handle_process_batch_start)
)
def handle_runner_start(self, event: RunnerStartEvent):
self.pub(ModelInitEvent(model=self._model,
runner=event.runner))
self._model.eval()
self._model.to(self._device)
def handle_process_batch_start(self, event: LoaderProcessBatchStartEvent):
inpt = self._input_fn(event.batch).to(self._device)
with torch.no_grad():
self._forward(input=inpt)
def _forward(self, input: torch.Tensor):
self.pub(ModelForwardStartEvent(input=input))
output = self._model.forward(input, **self._model_kwargs)
self.pub(ModelForwardEndEvent(output=output))
| 30.607843 | 83 | 0.659193 | 165 | 1,561 | 5.981818 | 0.30303 | 0.072948 | 0.026342 | 0.046606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25048 | 1,561 | 50 | 84 | 31.22 | 0.84359 | 0 | 0 | 0 | 0 | 0 | 0.003203 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.184211 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ecb0816be05abed47d0b45283c0a0ba32a21fef | 7,908 | py | Python | line_extract/model_v3.py | hoslo/ocr | 4f78ae7013beb2cab8fb9391ba25ba5e6e78967c | [
"Apache-2.0"
] | 4 | 2019-05-27T10:23:55.000Z | 2020-01-19T10:03:14.000Z | line_extract/model_v3.py | dun933/ocr | 4f78ae7013beb2cab8fb9391ba25ba5e6e78967c | [
"Apache-2.0"
] | null | null | null | line_extract/model_v3.py | dun933/ocr | 4f78ae7013beb2cab8fb9391ba25ba5e6e78967c | [
"Apache-2.0"
] | 3 | 2019-08-16T18:24:02.000Z | 2020-05-15T06:35:45.000Z | import os
from keras.models import *
from keras.layers import *
from keras.optimizers import Adam
import tensorflow as tf
import keras
import itertools
import numpy as np
from keras.callbacks import Callback
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
import tensorflow as tf
from keras.backend import tensorflow_backend as K
class Metrics(Callback):
def on_train_begin(self, logs={}):
self.val_f1s = []
self.val_recalls = []
self.val_precisions = []
def on_epoch_end(self, epoch, logs={}):
val_predict = (np.asarray(self.model.predict(self.model.validation_data[0]))).round()
val_targ = self.model.validation_data[1]
_val_f1 = f1_score(val_targ, val_predict)
_val_recall = recall_score(val_targ, val_predict)
_val_precision = precision_score(val_targ, val_predict)
self.val_f1s.append(_val_f1)
self.val_recalls.append(_val_recall)
self.val_precisions.append(_val_precision)
print('--val_f1: %.4f --val_precision: %.4f --val_recall: %.4f' % (_val_f1, _val_precision, _val_recall))
# return _val_f1, _val_precision, _val_recall
return
def loss(y_true, y_pred):
return tf.nn.weighted_cross_entropy_with_logits(y_true, y_pred, 2)
def binary_PFA(y_true, y_pred, threshold=K.variable(value=0.5)):
y_pred = K.cast(y_pred >= threshold, 'float32')
# N = total number of negative labels
N = K.sum(1 - y_true)
# FP = total number of false alerts, alerts from the negative class labels
TN = K.sum((1 - y_pred) * (1 - y_true))
return TN / N
def binary_PTA(y_true, y_pred, threshold=K.variable(value=0.5)):
y_pred = K.cast(y_pred >= threshold, 'float32')
# P = total number of positive labels
P = K.sum(y_true)
# TP = total number of correct alerts, alerts from the positive class labels
TP = K.sum(y_pred * y_true)
return TP / P
def f1(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
def unet(pretrained_weights=None, input_size=(None, None, 3),num_class=2):
inputs = Input(input_size)
conv1 = Conv2D(32, (7, 1), activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
conv1 = BatchNormalization(momentum=0.9)(conv1)
conv1 = Conv2D(32, (1, 7), activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = BatchNormalization(momentum=0.9)(pool1)
conv2 = Conv2D(64, (5, 1), activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
conv2 = BatchNormalization(momentum=0.9)(conv2)
conv2 = Conv2D(64, (1, 5), activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = BatchNormalization(momentum=0.9)(pool2)
conv3 = Conv2D(256, (5, 1), activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
conv3 = BatchNormalization(momentum=0.9)(conv3)
conv3 = Conv2D(256, (1, 5), activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
drop3 = Dropout(0.5)(conv3)
pool3 = AveragePooling2D(pool_size=(2, 2))(drop3)
# conv4 = BatchNormalization(momentum=0.9)(pool3)
# conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
# conv4 = BatchNormalization(momentum=0.9)(conv4)
# conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
# drop4 = Dropout(0.5)(conv4)
# pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = BatchNormalization(momentum=0.9)(pool3)
conv5 = Conv2D(512, (5, 1), activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
conv5 = BatchNormalization(momentum=0.9)(conv5)
conv5 = Conv2D(512, (1, 5), activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(drop5))
merge6 = concatenate([drop3, up6], axis=3)
conv6 = BatchNormalization(momentum=0.9)(merge6)
conv6 = Conv2D(256, (5, 1), activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
conv6 = BatchNormalization(momentum=0.9)(conv6)
conv6 = Conv2D(256, (5, 1), activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
up7 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv6))
merge7 = concatenate([conv2, up7], axis=3)
conv7 = BatchNormalization(momentum=0.9)(merge7)
conv7 = Conv2D(64, (5, 1), activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
conv7 = BatchNormalization(momentum=0.9)(conv7)
conv7 = Conv2D(64, (1, 5), activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
conv7 = BatchNormalization(momentum=0.9)(conv7)
up8 = Conv2D(32, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv7))
merge8 = concatenate([conv1, up8], axis=3)
conv8 = BatchNormalization(momentum=0.9)(merge8)
conv8 = Conv2D(32, (7, 1), activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
conv8 = BatchNormalization(momentum=0.9)(conv8)
conv8 = Conv2D(32, (1, 7), activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
conv8 = BatchNormalization(momentum=0.9)(conv8)
# up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
# UpSampling2D(size=(2, 2))(conv8))
# merge9 = concatenate([conv1, up9], axis=3)
# conv9 = BatchNormalization(momentum=0.9)(merge9)
# conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
# conv9 = BatchNormalization(momentum=0.9)(conv9)
# conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
# conv9 = BatchNormalization(momentum=0.9)(conv9)
# conv9 = Conv2D(num_class, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
# conv9 = BatchNormalization(momentum=0.9)(conv9)
if num_class == 2:
conv10 = Conv2D(1, 1, activation='sigmoid')(conv8)
loss_function = 'binary_crossentropy'
else:
conv10 = Conv2D(num_class, 1, activation='relu')(conv8)
loss_function = 'mse'
model = Model(input=inputs, output=conv10)
model.compile(optimizer=Adam(lr=1e-4), loss=loss_function, metrics=['accuracy'])
# model.compile(optimizer=Adam(lr=1e-4), loss=loss, metrics=[binary_PTA, binary_PFA])
model.summary()
if (pretrained_weights):
model.load_weights(pretrained_weights)
return model | 45.976744 | 113 | 0.68083 | 1,068 | 7,908 | 4.882959 | 0.171348 | 0.06443 | 0.092617 | 0.110259 | 0.582742 | 0.461745 | 0.452157 | 0.439885 | 0.420901 | 0.378523 | 0 | 0.054757 | 0.173242 | 7,908 | 172 | 114 | 45.976744 | 0.742888 | 0.215984 | 0 | 0.108108 | 0 | 0 | 0.065378 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.108108 | 0.009009 | 0.27027 | 0.009009 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ecdb4a48fe7a624c03288fdd20c3df4771fcc1e | 16,582 | py | Python | test/python/rest/test_toolkits.py | ddebrunner/streamsx.topology | 5006453e915ae3a7945585f63f2082610a824023 | [
"Apache-2.0"
] | null | null | null | test/python/rest/test_toolkits.py | ddebrunner/streamsx.topology | 5006453e915ae3a7945585f63f2082610a824023 | [
"Apache-2.0"
] | null | null | null | test/python/rest/test_toolkits.py | ddebrunner/streamsx.topology | 5006453e915ae3a7945585f63f2082610a824023 | [
"Apache-2.0"
] | null | null | null | import os
import logging
import time
import unittest
import urllib.parse
import xml.etree.ElementTree as ElementTree
from streamsx.topology.tester import Tester
from streamsx.topology.context import ConfigParams, JobConfig
from streamsx.build import BuildService
from streamsx.rest_primitives import *
logger = logging.getLogger('streamsx.test.toolkits_test')
def _get_distributed_sc():
return BuildService.of_endpoint(verify=False)
# REST API failures raise HTTPError instance, which, when printed, show
# the default error message for the status code. We often have useful
# messages in the response, but those are hidden from the user. We ought
# to consider making it easier for the user to see the specific error message
# from the REST API call.
# Handle HTTPError, and show the detailed error message contained
# in the response.
def _handle_http_error(err):
try:
response = err.response
text_json = json.loads(response.text)
messages = text_json['messages']
for message in messages:
print (message['message'])
logger.error(message['message'])
except:
pass
raise err
# Tests of the toolkit methods provided throught the Build REST API
@unittest.skipUnless(
"CP4D_URL" in os.environ
and "STREAMS_INSTANCE_ID" in os.environ
and "STREAMS_USERNAME" in os.environ
and "STREAMS_PASSWORD" in os.environ,
"requires Streams REST API setup",
)
class TestDistributedRestToolkitAPI(unittest.TestCase):
# toolkits used in these tests.
bingo_0_path = './toolkits/bingo_tk0' # version 1.0.0
bingo_1_path = './toolkits/bingo_tk1' # version 1.0.1
bingo_2_path = './toolkits/bingo_tk2' # version 1.0.2
cards_path = './toolkits/cards_tk'
games_path = './toolkits/games_tk'
bingo_toolkit_name = 'com.example.bingo'
cards_toolkit_name = 'com.example.cards'
games_toolkit_name = 'com.example.games'
bingo_0_version = '1.0.0'
bingo_1_version = '1.0.1'
bingo_2_version = '1.0.2'
@classmethod
def setUpClass(cls):
"""
Initialize the logger and get the SWS username, password, and REST URL.
:return: None
"""
if not 'CP4D_URL' in os.environ:
return
cls.is_v2 = None
cls.logger = logger
cls.get_toolkit_names()
def setUp(self):
Tester.setup_distributed(self)
self.sc = _get_distributed_sc()
if self.sc.resource_url is None:
self.skipTest("Build REST API is not available")
else:
self.sc.session.verify = False
self.test_config[ConfigParams.STREAMS_CONNECTION] = self.sc
self.delete_test_toolkits()
# Operations such as adding or deleting toolkits sometimes are not
# effective immediately. Even though toolkits were deleted
# successfully, it may take some time for them to be completely
# removed. For this reason, we sleep between tests, after deleting
# all the toolkits known to this test suite.
time.sleep(5)
def tearDown(self):
self.delete_test_toolkits()
# Find all toolkits matching a toolkit name. Optionally a version
# may also be specified, in which case only a toolkit matching the
# name and version will be returned.
def find_matching_toolkits(self, name, version = None):
toolkits = self.sc.get_toolkits()
return [toolkit for toolkit in toolkits if (toolkit.name == name and (version is None or toolkit.version == version))]
# Verify that a toolkit with a given name exists. If a version is
# specified, also verify that the version matches.
def assert_toolkit_exists(self, name, version=None):
matches = self.find_matching_toolkits(name, version)
self.assertGreaterEqual (len(matches), 1)
# Verify that a toolkit with a given name and optional version does not
# exist.
def assert_toolkit_not_exists(self, name, version=None):
matches = self.find_matching_toolkits(name, version)
self.assertEqual (len(matches), 0)
# Sometimes toolkits seem not to be listed immediately after they are
# posted. This waits for a toolkit to appear in the toolkit list,
# retrying a limited number of times until it does.
def wait_for_toolkit(self, name, version=None, retries=30):
retry = 0
while retry < retries:
matches = self.find_matching_toolkits(name, version)
if matches:
break
else:
time.sleep(1)
retry += 1
else:
self.fail('Toolkit ' + name + ' not found')
# delete the test toolkits, in case they have been left behind by
# a test failure.
def delete_test_toolkits(self):
toolkit_names = [type(self).bingo_toolkit_name,
type(self).cards_toolkit_name,
type(self).games_toolkit_name]
toolkits = self.sc.get_toolkits()
for toolkit in toolkits:
if toolkit.name in toolkit_names:
self.assertTrue(toolkit.delete())
# Verify get_toolkits() does not fail. We don't know what toolkits
# exist on the test host, so we can't do much more until we push our own
# test toolkits. It is probably safe to assume that the standard spl
# toolkit will always be present, so there will be at least one toolkit.
def test_get_toolkits(self):
try:
toolkits = self.sc.get_toolkits()
self.assertGreaterEqual(len(toolkits), 1)
self.assertIsNotNone(toolkits[0].name)
# for toolkit in toolkits:
# print(toolkit.name + ' ' + toolkit.version + ": " + toolkit.id)
except requests.exceptions.HTTPError as err:
_handle_http_error(err)
# Post a test toolkit. Verify that it succeeded and returned a Toolkit
# object, and its attributes are as expected. Also get the new list
# of toolkits and verify that the new toolkit is there. As the testing
# might be done in a shared environment, we can't verify that there are
# no other changes to the list of toolkits.
def test_post_toolkit(self):
try:
toolkit_path = type(self).bingo_1_path
expected_toolkit_name = type(self).bingo_toolkit_name
expected_toolkit_version = type(self).bingo_1_version
bingo = self.sc.upload_toolkit(toolkit_path)
self.assertIsNotNone(bingo)
self.assertEqual(bingo.name, expected_toolkit_name)
self.assertEqual(bingo.version, expected_toolkit_version)
self.assertEqual(bingo.requiredProductVersion, '4.2')
self.assertEqual(bingo.resourceType, 'toolkit')
# We don't know what the values the following attributes will have,
# but we verify that the expected attributes do at least have values.
self.assertIsNotNone(bingo.id)
self.assertIsNotNone(bingo.index)
self.assertIsNotNone(bingo.path)
self.assertIsNotNone(bingo.restid)
self.assertIsNotNone(bingo.self)
# verify that the toolkit is now in the list of all toolkits.
self.wait_for_toolkit(expected_toolkit_name)
# delete the toolkit and verify that it is no longer in the list
self.assertTrue(bingo.delete())
self.assert_toolkit_not_exists(expected_toolkit_name)
# deleting it a second time should fail
self.assertFalse(bingo.delete())
# post it again, then find it in the list and delete it through the
# Toolkit object in the list.
bingo = self.sc.upload_toolkit(toolkit_path)
self.assertIsNotNone(bingo)
self.assertEqual(expected_toolkit_name, bingo.name)
self.assertEqual(expected_toolkit_version, bingo.version)
# verify that the toolkit is now in the list of all toolkits.
self.wait_for_toolkit(expected_toolkit_name, expected_toolkit_version)
self.assertTrue(bingo.delete())
except requests.exceptions.HTTPError as err:
_handle_http_error(err)
# Test posting a toolkit, then getting its index. Do some sanity
# checks on the index.
def test_get_index(self):
try:
toolkit_path = type(self).bingo_1_path
expected_toolkit_name = type(self).bingo_toolkit_name
expected_toolkit_version = type(self).bingo_1_version
bingo = self.sc.upload_toolkit(toolkit_path)
self.assertIsNotNone(bingo)
index = bingo.get_index()
self.assertIsNotNone(index)
self.assertTrue(bingo.delete())
# The index is xml. Verify that it can be parsed and is from the
# correct toolkit.
root = ElementTree.fromstring(index)
toolkit_element = root.find('{http://www.ibm.com/xmlns/prod/streams/spl/toolkit}toolkit')
toolkit_name = toolkit_element.attrib['name']
toolkit_version = toolkit_element.attrib['version']
self.assertEqual(expected_toolkit_name, toolkit_name)
self.assertEqual(expected_toolkit_version, toolkit_version)
except requests.exceptions.HTTPError as err:
_handle_http_error(err)
# Test posting different versions of a toolkit. Posting a version
# equal to one that is currently deployed should fail,
# but posting a different version should succeed.
def test_post_multiple_versions(self):
try:
toolkits = self.sc.get_toolkits()
self.assertNotIn(type(self).bingo_toolkit_name, [toolkit.name for toolkit in toolkits])
# first post version 1.0.1
bingo1 = self.sc.upload_toolkit(type(self).bingo_1_path)
self.assertIsNotNone(bingo1)
self.wait_for_toolkit(type(self).bingo_toolkit_name, type(self).bingo_1_version)
# post version 1.0.1 again. It should return None
self.assertIsNone(self.sc.upload_toolkit(type(self).bingo_1_path))
# post version 1.0.0. The version does not match any existing
# version, so it should get posted.
bingo0 = self.sc.upload_toolkit(type(self).bingo_0_path)
self.assertIsNotNone(bingo0)
# Version 1.0.0 is now in the list, and version 1.0.1 is still there
self.wait_for_toolkit(type(self).bingo_toolkit_name, type(self).bingo_0_version)
self.assert_toolkit_exists(type(self).bingo_toolkit_name, type(self).bingo_1_version)
self.assert_toolkit_not_exists(type(self).bingo_toolkit_name, type(self).bingo_2_version)
# post version 1.0.2. It does not replace version 1.0.1, but they
# both continue to exist.
bingo2 = self.sc.upload_toolkit(type(self).bingo_2_path)
self.assertIsNotNone(bingo2)
self.wait_for_toolkit(type(self).bingo_toolkit_name, type(self).bingo_2_version)
self.assert_toolkit_exists(type(self).bingo_toolkit_name, type(self).bingo_1_version)
self.assert_toolkit_exists(type(self).bingo_toolkit_name, type(self).bingo_1_version)
self.assertTrue (bingo0.delete())
self.assertTrue (bingo1.delete())
self.assertTrue (bingo2.delete())
except requests.exceptions.HTTPError as err:
_handle_http_error(err)
# Test getting the dependencies of a toolkit.
def test_dependencies(self):
try:
# Games depends on both cards and bingo.
bingo = self.sc.upload_toolkit(type(self).bingo_0_path)
self.assertIsNotNone(bingo)
cards = self.sc.upload_toolkit(type(self).cards_path)
self.assertIsNotNone(cards)
games = self.sc.upload_toolkit(type(self).games_path)
self.assertIsNotNone(games)
# bingo and cards have no dependencies
self.assertEqual(len(bingo.dependencies), 0)
self.assertEqual(len(cards.dependencies), 0)
games_dependencies = games.dependencies
self.assertEqual(len(games_dependencies), 2)
self.assertEqual(games_dependencies[0].name, type(self).bingo_toolkit_name)
self.assertEqual(games_dependencies[0].version, '[1.0.0,2.0.0)')
self.assertEqual(games_dependencies[1].name, type(self).cards_toolkit_name)
self.assertEqual(games_dependencies[1].version, '[1.0.0,1.1.0)')
self.assertTrue(games.delete())
self.assertTrue(cards.delete())
self.assertTrue(bingo.delete())
except requests.exceptions.HTTPError as err:
_handle_http_error(err)
# test posting from a bad path
def test_post_bad_path(self):
try:
# path does not exist
not_exists = 'toolkits/fleegle_tk'
with self.assertRaises(ValueError):
fleegle = self.sc.upload_toolkit(not_exists)
# path is an individual file
file_exists = 'toolkits/bingo_tk0/toolkit.xml'
with self.assertRaises(ValueError):
fleegle = self.sc.upload_toolkit(file_exists)
# path is malformed garbage
garbage_path = './toolkits/bingo_tk0\000/snork'
with self.assertRaises(ValueError):
fleegle = self.sc.upload_toolkit(garbage_path)
except requests.exceptions.HTTPError as err:
_handle_http_error(err)
# Test getting toolkit by id.
def test_get_toolkit(self):
try:
toolkit_path = type(self).bingo_1_path
expected_toolkit_name = type(self).bingo_toolkit_name
expected_toolkit_version = type(self).bingo_1_version
bingo = self.sc.upload_toolkit(toolkit_path)
self.assertIsNotNone(bingo)
# verify that the toolkit is now in the list of all toolkits.
self.wait_for_toolkit(expected_toolkit_name, expected_toolkit_version)
found = self.sc.get_toolkit(bingo.id)
self.assertIsNotNone(found)
self.assertEqual(found.name, expected_toolkit_name)
self.assertEqual(found.version, expected_toolkit_version)
self.assertEqual(found.requiredProductVersion, '4.2')
self.assertEqual(found.resourceType, 'toolkit')
# We don't know what the values the following attributes will have,
# but we verify that the expected attributes do at least have values.
self.assertIsNotNone(found.path)
# These are undocumented. Note index is the URL of the index,
# not the contents of the index, which are exposed through
# get_index()
self.assertIsNotNone(found.id)
self.assertIsNotNone(found.index)
self.assertIsNotNone(found.restid)
self.assertIsNotNone(found.self)
# The ID is streams-toolkits/name-version.
toolkit_id = 'streams-toolkits/' + expected_toolkit_name + '-' + expected_toolkit_version
found = self.sc.get_toolkit(toolkit_id)
# Using just the name fails
with self.assertRaises(ValueError):
self.sc.get_toolkit('streams-toolkits/' + expected_toolkit_name)
self.assertTrue(bingo.delete())
except requests.exceptions.HTTPError as err:
_handle_http_error(err)
@classmethod
def get_toolkit_name(cls, toolkit_path):
root = ElementTree.parse(toolkit_path + "/toolkit.xml")
toolkit_element = root.find('{http://www.ibm.com/xmlns/prod/streams/spl/toolkit}toolkit')
toolkit_name = toolkit_element.attrib['name']
return toolkit_name
@classmethod
def get_toolkit_names(cls):
cls.bingo_toolkit_name = cls.get_toolkit_name(cls.bingo_0_path)
cls.cards_toolkit_name = cls.get_toolkit_name(cls.cards_path)
cls.games_toolkit_name = cls.get_toolkit_name(cls.games_path)
| 40.842365 | 126 | 0.641298 | 2,048 | 16,582 | 5.030273 | 0.162109 | 0.05232 | 0.039119 | 0.02582 | 0.435255 | 0.379732 | 0.331198 | 0.305475 | 0.292273 | 0.2682 | 0 | 0.00982 | 0.28151 | 16,582 | 405 | 127 | 40.94321 | 0.854877 | 0.239175 | 0 | 0.308017 | 0 | 0 | 0.05301 | 0.006956 | 0 | 0 | 0 | 0 | 0.295359 | 1 | 0.080169 | false | 0.008439 | 0.042194 | 0.004219 | 0.189873 | 0.004219 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ecf85b27afc09640a82b97f465143a5ec26f976 | 1,730 | py | Python | core/rewrite.py | DCZYewen/NullDCZHFS | 974ffd8c688eb45f5a0ee76636bbfd6a86ecd3be | [
"MIT"
] | 1 | 2020-05-13T15:54:36.000Z | 2020-05-13T15:54:36.000Z | core/rewrite.py | DCZYewen/NullcatServer | 974ffd8c688eb45f5a0ee76636bbfd6a86ecd3be | [
"MIT"
] | null | null | null | core/rewrite.py | DCZYewen/NullcatServer | 974ffd8c688eb45f5a0ee76636bbfd6a86ecd3be | [
"MIT"
] | null | null | null | import asyncio
from .config import conf
from .logger import main_logger
from .web import HTTPRequest, http301, WebHandler
log = main_logger.get_logger()
target_port = conf.get("https", "port")
class Redirect_Handler(WebHandler):
async def get(self):
if self.request.head.get("X-Local"):
return http301(self.request.head.get("X-Local"))
host = self.request.head.get("Host")
if not host:
log.warning(f"GET {self.request.path}: Host not found(from {self.request.remote})")
host = "127.0.0.1"
return http301(f"https://{host.split(':')[0]}:{target_port}{self.request.path}")
async def post(self):
return await self.get()
async def rewrite_handler(reader, writer, data):
ip, header = data
if header:
try:
req = HTTPRequest(header, ip)
except (ValueError, AttributeError):
log.warning("Request Unpack Error(from %s)" % ip)
log.warning(("Origin data: ", header))
return
if req.path[0] != "/": # Filter proxy scanner
return
sender = await Redirect_Handler(req, reader, writer).run()
sender.add_header({"Connection": "close"})
await sender.send(writer)
async def server(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
ip, port = writer.get_extra_info("peername")[0:2]
try:
header = await asyncio.wait_for(reader.readuntil(b"\r\n\r\n"), conf.get("server", "request_timeout"))
await rewrite_handler(reader, writer, (ip, header))
except (ConnectionError, asyncio.TimeoutError, asyncio.IncompleteReadError):
pass
except OSError as e:
print(e)
pass
writer.close()
| 34.6 | 109 | 0.631792 | 217 | 1,730 | 4.97235 | 0.400922 | 0.061168 | 0.041705 | 0.050046 | 0.044486 | 0.044486 | 0 | 0 | 0 | 0 | 0 | 0.01435 | 0.234682 | 1,730 | 49 | 110 | 35.306122 | 0.800604 | 0.011561 | 0 | 0.142857 | 0 | 0 | 0.151639 | 0.012881 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.047619 | 0.095238 | 0 | 0.238095 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ecfa0d95f020f2b485feb32481f32e2baf2f0f3 | 3,619 | py | Python | writing/scripts/QD-other-metrics.py | johanere/qflow | 5453cd5c3230ad7f082adf9ec1aea63ab0a4312a | [
"MIT"
] | 5 | 2019-07-24T21:46:24.000Z | 2021-06-11T18:18:24.000Z | writing/scripts/QD-other-metrics.py | johanere/qflow | 5453cd5c3230ad7f082adf9ec1aea63ab0a4312a | [
"MIT"
] | 22 | 2019-02-19T10:49:26.000Z | 2019-07-18T09:42:13.000Z | writing/scripts/QD-other-metrics.py | bsamseth/FYS4411 | 72b879e7978364498c48fc855b5df676c205f211 | [
"MIT"
] | 2 | 2020-11-04T15:17:24.000Z | 2021-11-03T16:37:38.000Z | import time
from datetime import timedelta
import numpy as np
from qflow.wavefunctions import (
JastrowPade,
SimpleGaussian,
WavefunctionProduct,
Dnn,
InputSorter,
)
from qflow.wavefunctions.nn.layers import DenseLayer
from qflow.wavefunctions.nn.activations import tanh, exponential
from qflow.hamiltonians import CoulombHarmonicOscillator
from qflow.samplers import ImportanceSampler
from qflow.statistics import compute_statistics_for_series, statistics_to_tex
from qflow.mpi import mpiprint
P, D = 2, 2 # Particles, dimensions
system = np.empty((P, D))
H = CoulombHarmonicOscillator()
# Wave functions:
simple_gaussian = SimpleGaussian(alpha=0.5)
jastrow = JastrowPade(alpha=1, beta=1)
simple_and_jastrow = WavefunctionProduct(simple_gaussian, jastrow)
layers = [
DenseLayer(P * D, 32, activation=tanh, scale_factor=0.001),
DenseLayer(32, 16, activation=tanh),
DenseLayer(16, 1, activation=exponential),
]
dnn = Dnn()
for l in layers:
dnn.add_layer(l)
psi = WavefunctionProduct(simple_and_jastrow, dnn)
psi_sampler = ImportanceSampler(system, psi, step_size=0.1)
psi.parameters = np.loadtxt("QD-parameters-dnn-regular.txt")
# Sorted
simple_gaussian2 = SimpleGaussian(alpha=0.5)
jastrow2 = JastrowPade(alpha=1, beta=1)
simple_and_jastrow2 = WavefunctionProduct(simple_gaussian2, jastrow2)
layers2 = [
DenseLayer(P * D, 32, activation=tanh, scale_factor=0.001),
DenseLayer(32, 16, activation=tanh),
DenseLayer(16, 1, activation=exponential),
]
dnn2 = Dnn()
for l in layers2:
dnn2.add_layer(l)
psi_sorted_base = WavefunctionProduct(simple_and_jastrow2, dnn2)
psi_sorted = InputSorter(psi_sorted_base)
psi_sorted.parameters = np.loadtxt("QD-parameters-dnn-sorted.txt")
psi_sorted_sampler = ImportanceSampler(system, psi_sorted, step_size=0.1)
# Benchmark:
simple_gaussian_bench = SimpleGaussian(alpha=0.5)
jastrow_bench = JastrowPade(alpha=1, beta=1)
psi_bench = WavefunctionProduct(simple_gaussian_bench, jastrow_bench)
psi_bench.parameters = [0.494_821_73, 1, 1, 0.397_401_86]
psi_bench_sampler = ImportanceSampler(system, psi_bench, step_size=0.1)
wavefuncs = [psi_bench, psi, psi_sorted]
samplers = [psi_bench_sampler, psi_sampler, psi_sorted_sampler]
for s in samplers:
s.thermalize(10000)
evaluation_points = 2 ** 24
t0 = time.time()
H.mean_squared_radius_array(psi_sampler, 500)
H.mean_radius_array(psi_sampler, 500)
H.mean_distance_array(psi_sampler, P * 500)
t1 = time.time() - t0
eta = timedelta(seconds=round(t1 / 500 * evaluation_points))
mpiprint(f"Calculating final energy - ETA {eta}")
labels = [r"$\psi_{PJ}$", r"$\psi_{DNN}$", r"$\psi_{SDNN}$", r"$\hat{\psi}_{SDNN}$"]
r2_stats = [
compute_statistics_for_series(
H.mean_squared_radius_array(s, evaluation_points), method="blocking"
)
for s in samplers
]
mpiprint(
statistics_to_tex(
r2_stats,
labels,
filename=__file__ + ".r2-table.tex",
quantity_name="$\\langle r^2\\rangle$",
)
)
r_stats = [
compute_statistics_for_series(
H.mean_radius_array(s, evaluation_points), method="blocking"
)
for s in samplers
]
mpiprint(
statistics_to_tex(
r_stats,
labels,
filename=__file__ + ".r-table.tex",
quantity_name="$\\langle r\\rangle$",
)
)
rij_stats = [
compute_statistics_for_series(
H.mean_distance_array(s, evaluation_points * P), method="blocking"
)
for s in samplers
]
mpiprint(
statistics_to_tex(
rij_stats,
labels,
filename=__file__ + ".rij-table.tex",
quantity_name="$\\langle r_{12}\\rangle$",
)
)
| 28.273438 | 84 | 0.727273 | 479 | 3,619 | 5.246347 | 0.26096 | 0.028651 | 0.031834 | 0.041385 | 0.355352 | 0.316753 | 0.257461 | 0.166733 | 0.166733 | 0.166733 | 0 | 0.035082 | 0.157226 | 3,619 | 127 | 85 | 28.496063 | 0.788852 | 0.015198 | 0 | 0.194444 | 0 | 0 | 0.078112 | 0.016016 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.12037 | 0 | 0.12037 | 0.046296 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ed116d59c2b0c901b520091f0adae6d5dd2c1e7 | 2,721 | py | Python | docs/testcases/test_CON_TC28_ManyArticle.py | iiistvan/conduit | 4ee93a61371747ad5f70f51a52a80ecd57a86979 | [
"MIT"
] | null | null | null | docs/testcases/test_CON_TC28_ManyArticle.py | iiistvan/conduit | 4ee93a61371747ad5f70f51a52a80ecd57a86979 | [
"MIT"
] | null | null | null | docs/testcases/test_CON_TC28_ManyArticle.py | iiistvan/conduit | 4ee93a61371747ad5f70f51a52a80ecd57a86979 | [
"MIT"
] | null | null | null | # CON_TC28_ManyArticle: Ismételt és sorozatos adatbevitel adatforrásból
# a szükséges csomagok, modulok betöltése
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
import time
import csv
def test_CON_TC28_ManyArticle():
# webdriver konfiguráció, tesztelt oldal megnyitása
options = webdriver.ChromeOptions()
options.add_argument('--headless')
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
driver.get("http://localhost:1667")
# tesztre vonatkozó egységes, központi időzítés
def ts():
time.sleep(3)
# a felvett cikkek meglétének ellenőrzése
def check_art_item(art_title):
driver.find_element_by_xpath('//a[@href="#/"]').click()
ts()
# Oldalszám elemek kigyűjtése
pages = driver.find_elements_by_class_name('page-link')
print(len(pages))
# Oldalon belüli bejegyzések ellenőrzése
title_list = []
for i in range(len(pages)):
pages[i].click()
ts()
art_input_items = driver.find_elements_by_class_name('article-preview')
for ai in art_input_items:
if ai.find_element_by_tag_name('h1').text == art_title:
return True
return False
# Step0: előfeltétel, belépés beépített tesztadattal
testdata = [['testuser1', 'testuser1@example.com', 'Abcd123$'], ]
signin_head = driver.find_element_by_xpath('//a[@href="#/login"]')
signin_head.click()
input_items = driver.find_elements_by_xpath('//form//input')
signin_btn = driver.find_element_by_xpath('//form/button')
for e, i in enumerate(input_items):
i.send_keys(testdata[0][e + 1])
ts()
signin_btn.click()
ts()
# Step1: new Article felirat
newArt_head = driver.find_element_by_xpath('//*[@id="app"]//li[2]/a')
newArt_head.click()
ts()
# Step2: post feltöltése adatokkal csv-ből
publish_btn = driver.find_element_by_xpath('//form/button')
with open('ManyDataInput.csv', "r", encoding='utf-8') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
for row in csvreader:
input_items = driver.find_elements_by_xpath('//form//input')
for e, i in enumerate(input_items):
i.send_keys(row[e])
ts()
driver.find_element_by_xpath('//form//textarea').send_keys(row[-1])
publish_btn.click()
ts()
assert check_art_item(row[0])
newArt_head.click()
ts()
driver.find_element_by_xpath('//a[@href="#/"]').click()
# ablak lezárása, memória felszabadítása
driver.close()
driver.quit()
| 35.337662 | 83 | 0.642411 | 327 | 2,721 | 5.122324 | 0.455657 | 0.065672 | 0.06209 | 0.079403 | 0.275224 | 0.275224 | 0.196418 | 0.179104 | 0.094328 | 0.041791 | 0 | 0.011527 | 0.23484 | 2,721 | 76 | 84 | 35.802632 | 0.792988 | 0.172731 | 0 | 0.290909 | 0 | 0 | 0.116123 | 0.019652 | 0 | 0 | 0 | 0 | 0.018182 | 1 | 0.054545 | false | 0 | 0.072727 | 0 | 0.163636 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ed2120ed3b51f8f3b66f633df4e04159fd5c611 | 2,943 | py | Python | flask_discord_interactions/tests/test_subcommand.py | ccall48/flask-discord-interactions | 37bc73fdfea038051eb934590ef3b53660ce0517 | [
"MIT"
] | null | null | null | flask_discord_interactions/tests/test_subcommand.py | ccall48/flask-discord-interactions | 37bc73fdfea038051eb934590ef3b53660ce0517 | [
"MIT"
] | null | null | null | flask_discord_interactions/tests/test_subcommand.py | ccall48/flask-discord-interactions | 37bc73fdfea038051eb934590ef3b53660ce0517 | [
"MIT"
] | null | null | null | from flask_discord_interactions import CommandOptionType, Context, Member
def test_subcommand(discord, client):
group = discord.command_group("group")
@group.command()
def sub_one(ctx):
return "sub one"
@group.command()
def sub_two(ctx):
return "sub two"
assert client.run("group", "sub_one").content == "sub one"
assert client.run("group", "sub_two").content == "sub two"
def test_subcommand_groups(discord, client):
group = discord.command_group("group")
subgroup_one = group.subgroup("sub_one")
subgroup_two = group.subgroup("sub_two")
@subgroup_one.command()
def sub_one_one(ctx):
return "one one"
@subgroup_one.command()
def sub_one_two(ctx):
return "one two"
@subgroup_two.command()
def sub_two_one(ctx):
return "two one"
assert client.run("group", "sub_one", "sub_one_one").content == "one one"
assert client.run("group", "sub_one", "sub_one_two").content == "one two"
assert client.run("group", "sub_two", "sub_two_one").content == "two one"
def test_oldstyle_subcommand(discord, client):
@discord.command(
options=[
{
"name": "google",
"description": "Search with Google",
"type": CommandOptionType.SUB_COMMAND,
"options": [
{
"name": "query",
"description": "Search query",
"type": CommandOptionType.STRING,
"required": True,
}
],
},
{
"name": "bing",
"description": "Search with Bing",
"type": CommandOptionType.SUB_COMMAND,
"options": [
{
"name": "query",
"description": "Search query",
"type": CommandOptionType.STRING,
"required": True,
}
],
},
]
)
def search(ctx, subcommand, *, query):
"Search the Internet!"
if subcommand == "google":
return f"https://google.com/search?q={query}"
if subcommand == "bing":
return f"https://bing.com/search?q={query}"
assert (
client.run("search", "google", query="hello").content
== "https://google.com/search?q=hello"
)
assert (
client.run("search", "bing", query="hello").content
== "https://bing.com/search?q=hello"
)
def test_context_with_subcommand(discord, client):
group = discord.command_group("group")
@group.command()
def subcommand(ctx):
return ctx.author.display_name
context = Context(author=Member(username="Bob"))
with client.context(context):
assert client.run("group", "subcommand").content == "Bob"
| 29.138614 | 77 | 0.53245 | 294 | 2,943 | 5.187075 | 0.166667 | 0.043279 | 0.078689 | 0.078689 | 0.447869 | 0.39541 | 0.302951 | 0.27541 | 0.27541 | 0.229508 | 0 | 0 | 0.331295 | 2,943 | 100 | 78 | 29.43 | 0.774898 | 0.006796 | 0 | 0.3 | 0 | 0 | 0.200815 | 0 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.1375 | false | 0 | 0.0125 | 0.075 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ed2d64e061c92079cee2868a26ed1b0890a057c | 6,963 | py | Python | Main.py | AayushK47/sudoku-solver | 27960449927806e62621d6cd136c82abcfdc0f6f | [
"MIT"
] | 1 | 2020-01-24T16:15:31.000Z | 2020-01-24T16:15:31.000Z | Main.py | AayushK47/sudoku-solver | 27960449927806e62621d6cd136c82abcfdc0f6f | [
"MIT"
] | null | null | null | Main.py | AayushK47/sudoku-solver | 27960449927806e62621d6cd136c82abcfdc0f6f | [
"MIT"
] | null | null | null | class SudokuSolver:
def __init__(self, query_string, rows = 'ABCDEFGHI', columns = '123456789'):
"""
Initializing the various variables required here
"""
self.query_string = query_string
self.rows = rows # The Rows are labeled from A to Z
self.columns = columns # The columns are labeled from 1 to 9
"""
The individual squares at the intersection of rows and columns will be called boxes. These boxes will have labels 'A1', 'A2', …, 'I9'.
The complete rows, columns, and 3x3 squares, will be called units. Thus, each unit is a set of 9 boxes, and there are 27 units in total.
For a particular box (such as 'A1'), its peers will be all other boxes that belong to a common unit
namely, those that belong to the same row, column, or 3x3 square.
"""
self.boxes = self.cross(self.rows, self.columns) # Getting all the box labels
self.row_units = [self.cross(r, self.columns) for r in self.rows] # Getting all the row units
self.column_units = [self.cross(self.rows, c) for c in self.columns] # Getting all the column units
self.square_units = [self.cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')] # Getting all the square units
self.unitlist = self.row_units + self.column_units + self.square_units # Combining all the units (Row, column and square)
self.units = dict((s, [u for u in self.unitlist if s in u]) for s in self.boxes) # Getting all units corresponding to each box
self.peers = dict((s, set(sum(self.units[s],[]))-set([s])) for s in self.boxes) # Getting all the peers corresponding to each box
def check(self):
d = self.grid_values()
# checking all the rows
for i in self.row_units:
row_elements = []
for j in i:
if d[j] != '123456789' and d[j] in row_elements:
return False
else:
row_elements.append(d[j])
# checking all the columns
for i in self.column_units:
col_elements = []
for j in i:
if d[j] != '123456789' and d[j] in col_elements:
return False
else:
col_elements.append(d[j])
# Checking all the square units
for i in self.square_units:
square_elements = []
for j in i:
if d[j] != '123456789' and d[j] in square_elements:
return False
else:
square_elements.append(d[j])
return True
def cross(self, a, b):
"""
A helper function for combining the row and column labels
"""
return [s+t for s in a for t in b]
def grid_values(self):
"""
Function to convert the input string to grid
Input: A grid in string form.
Output: A grid in dictionary form
"""
chars = []
digits = '123456789'
for c in self.query_string:
if c in digits:
chars.append(c)
if c == '.':
chars.append(digits)
assert len(chars) == 81
return dict(zip(self.boxes, chars))
def display(self, values):
"""
Display the values as a 2-D grid.
Input: The sudoku in dictionary form
Output: Prints the grid, returns None
"""
width = 1+max(len(values[s]) for s in self.boxes)
line = '+'.join(['-'*(width*3)]*3)
for r in self.rows:
print(''.join(values[r+c].center(width)+('|' if c in '36' else '')
for c in self.columns))
if r in 'CF':
print(line)
def eliminate(self, values):
"""
Go through all the boxes, and whenever there is a box with a value, eliminate this value from the values of all its peers.
Input: A sudoku in dictionary form.
Output: The resulting sudoku in dictionary form.
"""
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
digit = values[box]
for peer in self.peers[box]:
values[peer] = values[peer].replace(digit,'')
return values
def only_choice(self, values):
"""
Go through all the units, and whenever there is a unit with a value that only fits in one box, assign the value to this box.
Input: A sudoku in dictionary form.
Output: The resulting sudoku in dictionary form.
"""
for unit in self.unitlist:
for digit in '123456789':
dplaces = [box for box in unit if digit in values[box]]
if len(dplaces) == 1:
values[dplaces[0]] = digit
return values
def reduce_puzzle(self, values):
"""
Iterate eliminate() and only_choice(). If at some point, there is a box with no available values, return False.
If the sudoku is solved, return the sudoku.
If after an iteration of both functions, the sudoku remains the same, return the sudoku.
Input: A sudoku in dictionary form.
Output: The resulting sudoku in dictionary form.
"""
stalled = False
while not stalled:
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
values = self.eliminate(values)
values = self.only_choice(values)
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
stalled = solved_values_before == solved_values_after
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
def search(self, values):
"Using depth-first search and propagation, try all possible values."
# First, reduce the puzzle using the previous function
values = self.reduce_puzzle(values)
if values is False:
return False ## Failed earlier
if all(len(values[s]) == 1 for s in self.boxes):
return values ## Solved!
# Choose one of the unfilled squares with the fewest possibilities
_,s = min((len(values[s]), s) for s in self.boxes if len(values[s]) > 1)
# Now use recurrence to solve each one of the resulting sudokus, and
for value in values[s]:
new_sudoku = values.copy()
new_sudoku[s] = value
attempt = self.search(new_sudoku)
if attempt:
return attempt
def toString(self, grid):
solved_string = ''
for i in self.row_units:
for j in i:
solved_string += grid[j]
return solved_string | 42.981481 | 144 | 0.564268 | 939 | 6,963 | 4.13312 | 0.209798 | 0.026282 | 0.032981 | 0.03968 | 0.253285 | 0.191703 | 0.147127 | 0.118784 | 0.118784 | 0.118784 | 0 | 0.020238 | 0.34712 | 6,963 | 162 | 145 | 42.981481 | 0.83282 | 0.251903 | 0 | 0.178218 | 0 | 0 | 0.034338 | 0 | 0 | 0 | 0 | 0 | 0.009901 | 1 | 0.09901 | false | 0 | 0 | 0 | 0.247525 | 0.019802 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ed7a7df06cc4cee499aace6ba6f51568d3566d0 | 615 | py | Python | pawnshop_management/pawnshop_management/doctype/pawn_ticket_jewelry/pawn_ticket_jewelry.py | EricMendoza412/pawnshop_management | 62d22f6435c3b8737c51923f1d28a0201fa0b9c8 | [
"MIT"
] | null | null | null | pawnshop_management/pawnshop_management/doctype/pawn_ticket_jewelry/pawn_ticket_jewelry.py | EricMendoza412/pawnshop_management | 62d22f6435c3b8737c51923f1d28a0201fa0b9c8 | [
"MIT"
] | null | null | null | pawnshop_management/pawnshop_management/doctype/pawn_ticket_jewelry/pawn_ticket_jewelry.py | EricMendoza412/pawnshop_management | 62d22f6435c3b8737c51923f1d28a0201fa0b9c8 | [
"MIT"
] | 1 | 2022-03-18T06:50:55.000Z | 2022-03-18T06:50:55.000Z | # Copyright (c) 2022, Rabie Moses Santillan and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
class PawnTicketJewelry(Document):
def before_save(self):
if frappe.db.exists('Pawn Ticket Jewelry', self.name) == None:
settings = frappe.get_doc('Pawnshop Management Settings')
settings.jewelry_inventory_count += 1
settings.jewelry_count = 1
if self.item_series == 'A':
settings.a_series_current_count += 1
elif self.item_series == 'B':
settings.b_series_current_count += 1
settings.save(ignore_permissions=True)
| 32.368421 | 64 | 0.756098 | 83 | 615 | 5.433735 | 0.60241 | 0.053215 | 0.062084 | 0.084257 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015267 | 0.147967 | 615 | 18 | 65 | 34.166667 | 0.84542 | 0.172358 | 0 | 0 | 0 | 0 | 0.09703 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2edb23164b8db38139af3fd55a1813a6f7890fb5 | 4,751 | py | Python | bcs-ui/backend/iam/permissions/resources/cluster.py | ZhongmingFan/bk-bcs | e4b36b05e837e6a8468994869b0f55c972381a20 | [
"Apache-2.0"
] | null | null | null | bcs-ui/backend/iam/permissions/resources/cluster.py | ZhongmingFan/bk-bcs | e4b36b05e837e6a8468994869b0f55c972381a20 | [
"Apache-2.0"
] | null | null | null | bcs-ui/backend/iam/permissions/resources/cluster.py | ZhongmingFan/bk-bcs | e4b36b05e837e6a8468994869b0f55c972381a20 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from typing import Dict, List, Type
import attr
from backend.iam.permissions import decorators
from backend.iam.permissions.perm import PermCtx, Permission, ResCreatorAction, validate_empty
from backend.iam.permissions.request import IAMResource, ResourceRequest
from backend.packages.blue_krill.data_types.enum import EnumField, StructuredEnum
from .constants import ResourceType
from .project import ProjectPermission, related_project_perm
class ClusterAction(str, StructuredEnum):
CREATE = EnumField('cluster_create', label='cluster_create')
VIEW = EnumField('cluster_view', label='cluster_view')
MANAGE = EnumField('cluster_manage', label='cluster_manage')
DELETE = EnumField('cluster_delete', label='cluster_delete')
@attr.dataclass
class ClusterCreatorAction(ResCreatorAction):
cluster_id: str
name: str
resource_type: str = ResourceType.Cluster
def to_data(self) -> Dict:
data = super().to_data()
return {
'id': self.cluster_id,
'name': self.name,
'ancestors': [{'system': self.system, 'type': ResourceType.Project, 'id': self.project_id}],
**data,
}
@attr.s
class ClusterPermCtx(PermCtx):
project_id = attr.ib(validator=[attr.validators.instance_of(str), validate_empty])
cluster_id = attr.ib(validator=attr.validators.instance_of(str), default='')
@classmethod
def from_dict(cls, init_data: Dict) -> 'ClusterPermCtx':
return cls(
username=init_data['username'],
force_raise=init_data['force_raise'],
project_id=init_data['project_id'],
cluster_id=init_data.get('cluster_id', ''),
)
@property
def resource_id(self) -> str:
return self.cluster_id
def get_parent_chain(self) -> List[IAMResource]:
return [IAMResource(ResourceType.Project, self.project_id)]
@attr.s
class ClusterRequest(ResourceRequest):
project_id = attr.ib(validator=[attr.validators.instance_of(str), validate_empty])
resource_type = attr.ib(init=False, default=ResourceType.Cluster)
request_attrs = attr.ib(init=False, default={'_bk_iam_path_': f'/project,{{project_id}}/'})
@classmethod
def from_dict(cls, init_data: Dict) -> 'ClusterRequest':
"""从字典构建对象"""
return cls(project_id=init_data['project_id'])
def _make_attribute(self, res_id: str) -> Dict:
return {'_bk_iam_path_': self.request_attrs['_bk_iam_path_'].format(project_id=self.project_id)}
class related_cluster_perm(decorators.RelatedPermission):
module_name: str = ResourceType.Cluster
class cluster_perm(decorators.Permission):
module_name: str = ResourceType.Cluster
class ClusterPermission(Permission):
"""集群权限"""
resource_type: str = ResourceType.Cluster
resource_request_cls: Type[ResourceRequest] = ClusterRequest
perm_ctx_cls = ClusterPermCtx
parent_res_perm = ProjectPermission()
@related_project_perm(method_name='can_view')
def can_create(self, perm_ctx: ClusterPermCtx, raise_exception: bool = True) -> bool:
return self.can_action(perm_ctx, ClusterAction.CREATE, raise_exception)
@related_project_perm(method_name='can_view')
def can_view(self, perm_ctx: ClusterPermCtx, raise_exception: bool = True) -> bool:
perm_ctx.validate_resource_id()
return self.can_action(perm_ctx, ClusterAction.VIEW, raise_exception, use_cache=True)
@related_project_perm(method_name='can_view')
def can_manage(self, perm_ctx: ClusterPermCtx, raise_exception: bool = True) -> bool:
perm_ctx.validate_resource_id()
return self.can_multi_actions(perm_ctx, [ClusterAction.MANAGE, ClusterAction.VIEW], raise_exception)
@related_project_perm(method_name='can_view')
def can_delete(self, perm_ctx: ClusterPermCtx, raise_exception: bool = True) -> bool:
perm_ctx.validate_resource_id()
return self.can_multi_actions(perm_ctx, [ClusterAction.DELETE, ClusterAction.VIEW], raise_exception)
| 38.942623 | 115 | 0.733109 | 596 | 4,751 | 5.619128 | 0.295302 | 0.032248 | 0.026874 | 0.028665 | 0.348462 | 0.301881 | 0.264258 | 0.244849 | 0.222753 | 0.169901 | 0 | 0.002771 | 0.164597 | 4,751 | 121 | 116 | 39.264463 | 0.841018 | 0.155967 | 0 | 0.220779 | 0 | 0 | 0.076904 | 0.006012 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12987 | false | 0 | 0.103896 | 0.064935 | 0.688312 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2edc3f792e182de7dfa82061e009b428d2e09ffd | 41,666 | py | Python | nlplingo/oregon/event_models/uoregon/models/pipeline/_01/modules/argument_model.py | BBN-E/nlplingo | 32ff17b1320937faa3d3ebe727032f4b3e7a353d | [
"Apache-2.0"
] | 3 | 2020-10-22T13:28:00.000Z | 2022-03-24T19:57:22.000Z | nlplingo/oregon/event_models/uoregon/models/pipeline/_01/modules/argument_model.py | BBN-E/nlplingo | 32ff17b1320937faa3d3ebe727032f4b3e7a353d | [
"Apache-2.0"
] | null | null | null | nlplingo/oregon/event_models/uoregon/models/pipeline/_01/modules/argument_model.py | BBN-E/nlplingo | 32ff17b1320937faa3d3ebe727032f4b3e7a353d | [
"Apache-2.0"
] | 1 | 2020-10-22T13:29:51.000Z | 2020-10-22T13:29:51.000Z | # -*- coding: utf-8 -*-
from nlplingo.common.utils import DEPREL_TO_ID # ==>
from nlplingo.oregon.event_models.uoregon.models.pipeline._01.local_constants import *
from fairseq.models.roberta import XLMRModel
from nlplingo.oregon.event_models.uoregon.tools.utils import *
from nlplingo.oregon.event_models.uoregon.layers import DynamicLSTM, CRF, GCN, SelfAttention, Elmo
from nlplingo.oregon.event_models.uoregon.models.pipeline._01.iterators import upos_map, deprel_map, ner_map
class ArgumentModel(nn.Module):
def __init__(self, opt, label_map):
super(ArgumentModel, self).__init__()
print('========= argument_model.ArgumentModel.__init__ START ===========')
"""decode.bash
upos_dim= 30
dist_dim= 30
self.rep_dim= 60
deprel_dim= 30
hidden_dim= 200
self.rep_dim= 860
use_ner= 0
use_biw2v= 0
num_last_layer_xlmr= 1
self.xlmr_dim= 768
xlmr_model_dir= models/xlmr.base
dropout_xlmr= 0.1
self.word_embed_dim= 768
self.rep_dim= 1628
hidden_dim= 200
deprel_dim= 30
self.word_embed_dim= 768
"""
self.opt = opt
print('Using {} for argument model...'.format(opt['xlmr_version']))
print('upos_dim=', self.opt['upos_dim'])
print('dist_dim=', self.opt['dist_dim'])
self.upos_embedding = nn.Embedding(
num_embeddings=len(upos_map),
embedding_dim=self.opt['upos_dim'],
padding_idx=0
)
self.dist_embedding = nn.Embedding(
num_embeddings=NUM_DISTANCES,
embedding_dim=self.opt['dist_dim'],
padding_idx=0
)
self.rep_dim = self.opt['upos_dim'] + self.opt['dist_dim']
print('self.rep_dim=', self.rep_dim)
print('deprel_dim=', self.opt['deprel_dim'])
print('hidden_dim=', self.opt['hidden_dim'])
self.deprel_embedding = nn.Embedding(
#num_embeddings=len(deprel_map), # <==
num_embeddings=len(DEPREL_TO_ID), # ==> TODO
embedding_dim=self.opt['deprel_dim'],
padding_idx=0
)
self.gcn_layer = GCN(
in_dim=self.opt['deprel_dim'],
hidden_dim=self.opt['hidden_dim'],
num_layers=2,
opt=opt
)
self.rep_dim += self.opt['hidden_dim'] * 4
print('self.rep_dim=', self.rep_dim)
print('use_ner=', self.opt['use_ner'])
if self.opt['use_ner']:
self.ner_embedding = nn.Embedding(
num_embeddings=len(ner_map),
embedding_dim=self.opt['ner_dim'],
padding_idx=0
)
self.rep_dim += self.opt['ner_dim']
# *********************************************
print('use_biw2v=', self.opt['use_biw2v'])
print('num_last_layer_xlmr=', self.opt['num_last_layer_xlmr'])
if not self.opt['use_biw2v']:
if 'base' in self.opt['xlmr_version']:
self.xlmr_dim = 768 * self.opt['num_last_layer_xlmr']
elif 'large' in self.opt['xlmr_version']:
self.xlmr_dim = 1024 * self.opt['num_last_layer_xlmr']
print('self.xlmr_dim=', self.xlmr_dim)
print('xlmr_model_dir=', self.opt['xlmr_model_dir'])
print('dropout_xlmr=', self.opt['dropout_xlmr'])
self.xlmr_embedding = XLMRModel.from_pretrained(
#os.path.join(WORKING_DIR, 'tools', 'xlmr_resources', self.opt['xlmr_version']), # <==
self.opt['xlmr_model_dir'], # ==>
checkpoint_file='model.pt')
self.dropout = nn.Dropout(self.opt['dropout_xlmr'])
self.word_embed_dim = self.xlmr_dim
print('self.word_embed_dim=', self.word_embed_dim)
else:
self.biw2v_embedding = nn.Embedding(
opt['biw2v_size'],
embedding_dim=300,
padding_idx=PAD_ID
)
self.load_pretrained_biw2v()
if not self.opt['finetune_biw2v']:
self.biw2v_embedding.weight.requires_grad = False
self.word_embed_dim = 300
self.rep_dim += self.word_embed_dim
print('self.rep_dim=', self.rep_dim)
# ********************************************
self.self_att = SelfAttention(self.rep_dim, opt)
print('hidden_dim=', self.opt['hidden_dim'])
self.fc_argument = nn.Sequential(
nn.Linear(self.rep_dim + self.word_embed_dim, self.opt['hidden_dim']),
nn.ReLU(),
#nn.Linear(self.opt['hidden_dim'], len(ARGUMENT_TAG_MAP)) # <==
#nn.Linear(self.opt['hidden_dim'], 5) # ==> TODO
nn.Linear(self.opt['hidden_dim'], len(label_map))
)
# ************ ACL idea **********************
print('deprel_dim=', self.opt['deprel_dim'])
print('self.word_embed_dim=', self.word_embed_dim)
self.fc_edge = nn.Linear(self.word_embed_dim * 2, self.opt['deprel_dim'])
self.edge_loss_func = nn.CrossEntropyLoss(reduction='none')
print('========= argument_model.ArgumentModel.__init__ END ===========')
def load_pretrained_biw2v(self):
embed = self.biw2v_embedding
vecs = self.opt['biw2v_vecs']
pretrained = torch.from_numpy(vecs)
embed.weight.data.copy_(pretrained)
def get_xlmr_reps(self, inputs):
print('============ argument_model.get_xlmr_reps START =============')
"""
xlmr_ids.shape= torch.Size([30, 57])
retrieve_ids.shape= torch.Size([30, 33])
type(all_hiddens)= <class 'list'>
len(all_hiddens)= 13
all_hiddens[0].shape= torch.Size([30, 57, 768])
all_hiddens[1].shape= torch.Size([30, 57, 768])
all_hiddens[-1].shape= torch.Size([30, 57, 768])
batch_size= 30
len(all_hiddens)= 12
self.opt['num_last_layer_xlmr']= 1
used_layers= [11]
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
retrieved_reps.shape= torch.Size([33, 768])
token_reps.shape= torch.Size([30, 33, 768])
"""
xlmr_ids = inputs[0]
retrieve_ids = inputs[2]
print('xlmr_ids.shape=', xlmr_ids.shape)
print('retrieve_ids.shape=', retrieve_ids.shape)
# all_layers = xlmr.extract_features(zh_tokens, return_all_hiddens=True)
all_hiddens = self.xlmr_embedding.extract_features(xlmr_ids, return_all_hiddens=True)
print('type(all_hiddens)=', type(all_hiddens))
print('len(all_hiddens)=', len(all_hiddens))
print('all_hiddens[0].shape=', all_hiddens[0].shape)
print('all_hiddens[1].shape=', all_hiddens[1].shape)
print('all_hiddens[-1].shape=', all_hiddens[-1].shape)
all_hiddens = list(all_hiddens[1:]) # remove embedding layer
token_reps = []
batch_size, _ = xlmr_ids.shape
print('batch_size=', batch_size)
used_layers = list(range(len(all_hiddens)))[-self.opt['num_last_layer_xlmr']:]
print('len(all_hiddens)=', len(all_hiddens))
print("self.opt['num_last_layer_xlmr']=", self.opt['num_last_layer_xlmr'])
print('used_layers=', used_layers)
for example_id in range(batch_size):
retrieved_reps = torch.cat([all_hiddens[layer_id][example_id][retrieve_ids[example_id]]
for layer_id in used_layers], dim=1) # [seq len, xlmr_dim x num last layers]
print('retrieved_reps.shape=', retrieved_reps.shape)
token_reps.append(retrieved_reps)
token_reps = torch.stack(token_reps, dim=0) # [batch size, original seq len, xlmr_dim x num_layers]
print('token_reps.shape=', token_reps.shape)
print('============ argument_model.get_xlmr_reps END =============')
return token_reps
def supervise_deprel_embeds(self, h, inputs):
xlmr_ids, biw2v_ids, retrieve_ids, upos_ids, xpos_ids, head_ids, deprel_ids, ner_ids, lang_weights, triggers, entity_tags, eid, pad_masks = inputs
batch_size, seq_len, rep_dim = h.shape
edge_reps = get_edge_reps(h, head_ids, pad_masks,
self.opt[
'device']) # [batch size, seq len, xlmr dim]
edge_reps = edge_reps.repeat(1, 1, len(deprel_map)).view(batch_size, seq_len, len(deprel_map), -1) # [batch size, seq len, num deps, xlmr dim] # <==
#edge_reps = edge_reps.repeat(1, 1, len(DEPREL_TO_ID)).view(batch_size, seq_len, len(DEPREL_TO_ID), -1) # [batch size, seq len, num deps, xlmr dim] # ==> TODO
edge_reps = self.fc_edge(edge_reps)
dep_reps = self.deprel_embedding.weight.unsqueeze(0).unsqueeze(0).repeat(batch_size, seq_len, 1,
1) # [batch size, seq len, num deps, dep dim]
# ****** take dot product to compute similarity scores **********
logits = (edge_reps * dep_reps).sum(dim=3) # [batch size, seq len, num deps]
logits = logits.transpose(1, 2) # [batch size, num deps, seq len]
targets = deprel_ids # [batch size, seq len]
edge_loss = self.edge_loss_func(logits, targets) # [batch size, seq len]
input_masks = pad_masks.long().eq(0).float() # [batch size, seq len]
num_edges = torch.sum(input_masks)
supervise_loss = torch.sum(edge_loss * input_masks) / num_edges
return supervise_loss
def forward(self, inputs):
print('============= argument_model.forward START ============')
xlmr_ids, biw2v_ids, retrieve_ids, upos_ids, xpos_ids, head_ids, deprel_ids, ner_ids, lang_weights, triggers, entity_tags, eid, pad_masks = inputs
print('xlmr_ids.shape=', xlmr_ids.shape)
print('biw2v_ids.shape=', biw2v_ids.shape)
print('retrieve_ids.shape=', retrieve_ids.shape)
print('upos_ids.shape=', upos_ids.shape)
print('xpos_ids.shape=', xpos_ids.shape)
print('head_ids.shape=', head_ids.shape)
print('deprel_ids.shape=', deprel_ids.shape)
print('ner_ids.shape=', ner_ids.shape)
print('lang_weights.shape=', lang_weights.shape)
print('trigger.shape=', triggers.shape)
print('entity_tags.shape=', entity_tags.shape)
print('eid.shape=', eid.shape)
print('pad_masks.shape=', pad_masks.shape)
"""
xlmr_ids.shape= torch.Size([16, 64])
biw2v_ids.shape= torch.Size([16, 44])
retrieve_ids.shape= torch.Size([16, 44])
upos_ids.shape= torch.Size([16, 44])
xpos_ids.shape= torch.Size([16, 44])
head_ids.shape= torch.Size([16, 44])
deprel_ids.shape= torch.Size([16, 44])
ner_ids.shape= torch.Size([16, 44])
lang_weights.shape= torch.Size([16])
trigger.shape= torch.Size([16])
entity_tags.shape= torch.Size([16, 44])
eid.shape= torch.Size([16])
pad_masks.shape= torch.Size([16, 44])
batch_size= 16 seq_len= 44
upos_reps.shape= torch.Size([16, 44, 30])
dist_reps.shape= torch.Size([16, 44, 30])
self.opt[use_biw2v]= 0
"""
batch_size, seq_len = pad_masks.shape
print('batch_size=', batch_size, 'seq_len=', seq_len)
upos_reps = self.upos_embedding(upos_ids) # [batch size, seq len, upos dim]
print('upos_reps.shape=', upos_reps.shape)
dist_reps = get_dist_embeds(triggers, batch_size, seq_len, self.dist_embedding, self.opt['device'])
print('dist_reps.shape=', dist_reps.shape)
word_feats = []
print('self.opt[use_biw2v]=', self.opt['use_biw2v'])
if not self.opt['use_biw2v']:
"""From below self.get_xlmr_reps()
xlmr_ids.shape= torch.Size([16, 64])
retrieve_ids.shape= torch.Size([16, 44])
type(all_hiddens)= <class 'list'>
len(all_hiddens)= 13
all_hiddens[0].shape= torch.Size([16, 64, 768])
all_hiddens[1].shape= torch.Size([16, 64, 768])
all_hiddens[-1].shape= torch.Size([16, 64, 768])
batch_size= 16
len(all_hiddens)= 12
self.opt['num_last_layer_xlmr']= 1
used_layers= [11]
retrieved_reps.shape= torch.Size([44, 768])
retrieved_reps.shape= torch.Size([44, 768])
retrieved_reps.shape= torch.Size([44, 768])
retrieved_reps.shape= torch.Size([44, 768])
retrieved_reps.shape= torch.Size([44, 768])
retrieved_reps.shape= torch.Size([44, 768])
retrieved_reps.shape= torch.Size([44, 768])
retrieved_reps.shape= torch.Size([44, 768])
retrieved_reps.shape= torch.Size([44, 768])
retrieved_reps.shape= torch.Size([44, 768])
retrieved_reps.shape= torch.Size([44, 768])
retrieved_reps.shape= torch.Size([44, 768])
retrieved_reps.shape= torch.Size([44, 768])
retrieved_reps.shape= torch.Size([44, 768])
retrieved_reps.shape= torch.Size([44, 768])
retrieved_reps.shape= torch.Size([44, 768])
token_reps.shape= torch.Size([16, 44, 768])
"""
word_embeds = self.get_xlmr_reps(inputs) # [batch size, seq len, xlmr dim]
word_embeds = self.dropout(word_embeds)
else:
print('** using biw2v')
word_embeds = self.biw2v_embedding(biw2v_ids)
print('word_embeds.shape=', word_embeds.shape)
"""
word_embeds.shape= torch.Size([16, 44, 768])
word_deprel_reps.shape= torch.Size([16, 44, 30])
adj.shape= torch.Size([16, 44, 44])
word_deprel_reps.shape= torch.Size([16, 44, 200])
trigger_deprel_reps.shape= torch.Size([16, 44, 200])
word_reps.shape= torch.Size([16, 44, 1628])
trigger_reps.shape= torch.Size([16, 44, 1628])
word_reps.shape= torch.Size([16, 44, 1628])
"""
# ********** supervise deprel embeds **********
# edge_loss = self.supervise_deprel_embeds(word_embeds, inputs)
edge_loss = 0
# *********************************************
word_feats.append(word_embeds)
word_feats.append(upos_reps)
word_feats.append(dist_reps)
word_deprel_reps = self.deprel_embedding(deprel_ids)
print('word_deprel_reps.shape=', word_deprel_reps.shape)
adj = get_full_adj(head_ids, retrieve_ids, self.opt['device'])
print('adj.shape=', adj.shape)
word_deprel_reps, _ = self.gcn_layer(word_deprel_reps, adj)
print('word_deprel_reps.shape=', word_deprel_reps.shape)
trigger_deprel_reps = get_trigger_reps(word_deprel_reps, triggers).unsqueeze(1).repeat(1, seq_len, 1) # [batch size, sep len, xlmr dim]
print('trigger_deprel_reps.shape=', trigger_deprel_reps.shape)
word_feats.append(word_deprel_reps)
word_feats.append(trigger_deprel_reps)
word_feats.append(torch.abs(trigger_deprel_reps - word_deprel_reps))
word_feats.append(trigger_deprel_reps * word_deprel_reps)
if self.opt['use_ner']:
ner_reps = self.ner_embedding(ner_ids)
word_feats.append(ner_reps)
word_reps = torch.cat(word_feats, dim=2)
print('word_reps.shape=', word_reps.shape)
trigger_reps = get_trigger_reps(word_reps, triggers).unsqueeze(1).repeat(1, seq_len, 1) # [batch size, sep len, xlmr dim]
print('trigger_reps.shape=', trigger_reps.shape)
word_reps = trigger_reps * word_reps
print('word_reps.shape=', word_reps.shape)
"""From below self.self_att()
input_masks.shape= torch.Size([16, 44])
slf_attn_mask.shape= torch.Size([16, 44, 44])
non_pad_mask.shape= torch.Size([16, 44, 1])
enc_output.shape= torch.Size([16, 44, 1628])
position_embed_for_satt= 1
position_ids.shape= torch.Size([16, 44])
enc_output.shape= torch.Size([16, 44, 1628])
"""
word_reps, _ = self.self_att(word_reps, pad_masks)
print('word_reps.shape=', word_reps.shape)
"""
word_reps.shape= torch.Size([16, 44, 1628])
word_reps.shape= torch.Size([16, 44, 2396])
raw_scores.shape= torch.Size([16, 44, 12])
token_masks.shape= torch.Size([16, 44])
cl_loss= tensor(2.4789, device='cuda:0', grad_fn=<DivBackward0>)
edge_loss= 0
probs.shape= torch.Size([16, 44, 12])
preds.shape= torch.Size([16, 44])
loss= tensor(2.2310, device='cuda:0', grad_fn=<AddBackward0>)
"""
word_reps = torch.cat(
[word_reps, word_embeds],
dim=2
)
print('word_reps.shape=', word_reps.shape)
raw_scores = self.fc_argument(word_reps) # [batch size, seq len, num tags]
print('raw_scores.shape=', raw_scores.shape)
token_masks = pad_masks.eq(0).float()
print('token_masks.shape=', token_masks.shape)
cl_loss, probs, preds = compute_batch_loss(raw_scores, entity_tags, token_masks, instance_weights=lang_weights)
print('cl_loss=', cl_loss)
print('edge_loss=', edge_loss)
print('probs.shape=', probs.shape)
print('preds.shape=', preds.shape)
loss = self.opt['edge_lambda'] * edge_loss + (1 - self.opt['edge_lambda']) * cl_loss
print('loss=', loss)
print('============= argument_model.forward END ============')
return loss, preds
def predict(self, combined_task_inputs):
print('============= argument_model.predict START =========')
xlmr_ids, biw2v_ids, retrieve_ids, upos_ids, xpos_ids, head_ids, deprel_ids, ner_ids, triggers, eid, pad_masks = combined_task_inputs
# print('xlmr_ids.shape=', xlmr_ids.shape) [30, 57]
# print('biw2v_ids.shape=', biw2v_ids.shape) [30, 33]
# print('retrieve_ids.shape=', retrieve_ids.shape) [30, 33]
# print('upos_ids.shape=', upos_ids.shape) [30, 33]
# print('xpos_ids.shape=', xpos_ids.shape) [30, 33]
# print('head_ids.shape=', head_ids.shape) [30, 33]
# print('deprel_ids.shape=', deprel_ids.shape) [30, 33]
# print('ner_ids.shape=', ner_ids.shape) [30, 33]
# print('triggers.shape=', triggers.shape) [30]
# print('eid.shape=', eid.shape) [30]
# print('pad_masks.shape=', pad_masks.shape) [30, 33]
batch_size, seq_len = pad_masks.shape
upos_reps = self.upos_embedding(upos_ids) # [batch size, seq len, upos dim]
dist_reps = get_dist_embeds(triggers, batch_size, seq_len, self.dist_embedding, self.opt['device'])
# print('upos_reps.shape=', upos_reps.shape) [30, 33, 30]
# print('dist_reps.shape=', dist_reps.shape) [30, 33, 30]
# print("self.opt['use_biw2v']=", self.opt['use_biw2v']) 0
word_feats = []
if not self.opt['use_biw2v']:
word_embeds = self.get_xlmr_reps(combined_task_inputs) # [batch size, seq len, xlmr dim]
word_embeds = self.dropout(word_embeds)
else:
word_embeds = self.biw2v_embedding(biw2v_ids)
# print('word_embeds.shape=', word_embeds.shape) [30, 33, 768]
word_feats.append(word_embeds)
word_feats.append(upos_reps)
word_feats.append(dist_reps)
word_deprel_reps = self.deprel_embedding(deprel_ids)
adj = get_full_adj(head_ids, retrieve_ids, self.opt['device'])
word_deprel_reps, _ = self.gcn_layer(word_deprel_reps, adj)
trigger_deprel_reps = get_trigger_reps(word_deprel_reps, triggers).unsqueeze(1).repeat(1, seq_len, 1) # [batch size, sep len, xlmr dim]
# print('word_deprel_reps.shape=', word_deprel_reps.shape) [30, 33, 200]
# print('adj.shape=', adj.shape) [30, 33, 33]
# print('word_deprel_reps.shape=', word_deprel_reps.shape) [30, 33, 200]
# print('trigger_deprel_reps.shape=', trigger_deprel_reps.shape) [30, 33, 200]
word_feats.append(word_deprel_reps)
word_feats.append(trigger_deprel_reps)
word_feats.append(torch.abs(trigger_deprel_reps - word_deprel_reps))
word_feats.append(trigger_deprel_reps * word_deprel_reps)
# print("self.opt['use_ner']=", self.opt['use_ner']) 0
if self.opt['use_ner']:
ner_reps = self.ner_embedding(ner_ids)
word_feats.append(ner_reps)
word_reps = torch.cat(word_feats, dim=2)
# print('word_reps.shape=', word_reps.shape) [30, 33, 1628]
trigger_reps = get_trigger_reps(word_reps, triggers).unsqueeze(1).repeat(1, seq_len, 1) # [batch size, sep len, xlmr dim]
# print('trigger_reps.shape=', trigger_reps.shape) [30, 33, 1628]
word_reps = trigger_reps * word_reps
# print('word_reps.shape=', word_reps.shape) [30, 33, 1628]
""" When call self.self_att() below
input_masks.shape= torch.Size([30, 33])
slf_attn_mask.shape= torch.Size([30, 33, 33])
non_pad_mask.shape= torch.Size([30, 33, 1])
enc_output.shape= torch.Size([30, 33, 1628])
position_embed_for_satt= 1
position_ids.shape= torch.Size([30, 33])
enc_output.shape= torch.Size([30, 33, 1628])
"""
word_reps, _ = self.self_att(word_reps, pad_masks)
# print('word_reps.shape=', word_reps.shape) [30, 33, 1628]
word_reps = torch.cat(
[word_reps, word_embeds],
dim=2
)
# print('word_reps.shape=', word_reps.shape) [30, 33, 2396]
raw_scores = self.fc_argument(word_reps) # [batch size, seq len, num tags]
# print('raw_scores.shape=', raw_scores.shape) [30, 33, 12]
token_masks = pad_masks.eq(0).float()
entity_preds = torch.argmax(raw_scores, dim=2).long() * token_masks.long()
# print('token_masks.shape=', token_masks.shape) [30, 33]
# print('entity_preds.shape=', entity_preds.shape) [30, 33]
probs = torch.softmax(raw_scores, dim=2) # [batch size, seq len, num classes]
# print('probs.shape=', probs.shape) [30, 33, 12]
"""
entity_preds= tensor([[ 3, 3, 4, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4,
3, 3, 3, 3, 3, 3, 3, 6, 7, 7, 7, 3, 3, 3, 3],
[ 3, 3, 3, 6, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 6, 7, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 6, 7, 7, 7, 7, 7, 3, 0, 0],
[ 3, 3, 3, 3, 3, 6, 7, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 6, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0],
[ 3, 4, 5, 5, 5, 5, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3, 3, 3, 6,
7, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 4, 3, 3, 3, 3, 3, 6, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 4, 5, 3, 3, 3, 3, 6, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 6, 7, 3, 3, 3, 3, 3,
3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
6, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 6, 7, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 6, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 6, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 4, 5, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 4, 5, 5, 5, 3, 3, 3, 6, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 8, 11, 11, 11, 11, 11, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 8, 11, 11, 11, 11, 11, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 4, 5, 5, 5, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 6, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 6,
7, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 4, 3, 3, 3, 3, 3, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
device='cuda:0')
probs= tensor([[[1.1168e-12, 2.5829e-12, 6.1225e-12, ..., 5.6271e-09,
7.6096e-08, 8.8167e-08],
[8.9221e-13, 2.2523e-12, 5.5693e-12, ..., 5.1459e-09,
5.5401e-08, 5.5402e-08],
[4.9063e-09, 4.8340e-09, 1.9968e-09, ..., 1.6185e-05,
2.2680e-04, 2.8433e-05],
...,
[8.4289e-13, 2.5055e-12, 6.1558e-12, ..., 4.0031e-09,
5.3314e-08, 7.3456e-08],
[6.1697e-12, 1.4442e-11, 3.8848e-11, ..., 1.6425e-08,
3.9797e-07, 1.0860e-06],
[1.0555e-12, 2.4275e-12, 5.7865e-12, ..., 5.0906e-09,
7.4863e-08, 1.0263e-07]],
[[1.3705e-12, 2.5311e-12, 6.1232e-12, ..., 1.3011e-08,
8.8079e-08, 2.2395e-07],
[1.2803e-12, 2.6615e-12, 6.6574e-12, ..., 1.5308e-08,
7.7462e-08, 1.2939e-07],
[1.8388e-12, 3.6695e-12, 8.3890e-12, ..., 2.1921e-08,
1.1262e-07, 1.6080e-07],
...,
[1.0013e-12, 2.1821e-12, 6.0111e-12, ..., 8.8796e-09,
6.4776e-08, 1.7182e-07],
[2.3456e-12, 4.3797e-12, 1.2402e-11, ..., 1.6345e-08,
1.3718e-07, 4.6316e-07],
[1.3735e-12, 2.4432e-12, 6.1151e-12, ..., 1.2238e-08,
9.3299e-08, 2.8162e-07]],
[[5.2102e-14, 9.0607e-14, 1.7959e-13, ..., 1.0838e-09,
3.2204e-09, 1.3049e-08],
[4.4066e-14, 8.4767e-14, 1.6680e-13, ..., 1.1195e-09,
2.5025e-09, 7.5451e-09],
[5.7656e-14, 1.1395e-13, 2.1501e-13, ..., 1.4185e-09,
3.0650e-09, 7.2904e-09],
...,
[8.0382e-14, 1.9794e-13, 4.8370e-13, ..., 1.2600e-09,
4.1817e-09, 1.3255e-08],
[2.8645e-12, 3.1558e-12, 7.4554e-12, ..., 1.2975e-08,
1.8952e-07, 2.6335e-06],
[7.8002e-14, 1.2314e-13, 2.4968e-13, ..., 1.3987e-09,
4.9060e-09, 2.3695e-08]],
...,
[[9.6163e-14, 1.9982e-13, 3.9772e-13, ..., 3.2113e-09,
5.2772e-09, 1.4878e-08],
[1.2484e-11, 1.7996e-11, 1.9031e-11, ..., 4.3214e-07,
8.1072e-07, 6.5115e-07],
[2.6778e-11, 2.2870e-11, 3.3277e-11, ..., 2.5126e-07,
2.4906e-06, 1.9328e-05],
...,
[3.7306e-02, 4.2389e-02, 3.6747e-02, ..., 6.5021e-02,
6.5860e-02, 7.8383e-02],
[3.7306e-02, 4.2389e-02, 3.6747e-02, ..., 6.5021e-02,
6.5860e-02, 7.8383e-02],
[3.7306e-02, 4.2389e-02, 3.6747e-02, ..., 6.5021e-02,
6.5860e-02, 7.8383e-02]],
[[3.0249e-14, 7.0043e-14, 1.4678e-13, ..., 4.8590e-10,
1.9103e-09, 5.7838e-09],
[2.4591e-10, 1.6229e-10, 5.7729e-11, ..., 1.5785e-06,
1.1090e-05, 2.7864e-06],
[5.8275e-13, 7.5668e-13, 1.2182e-12, ..., 5.4186e-09,
5.6736e-08, 3.0347e-07],
...,
[3.8307e-02, 4.3550e-02, 3.7779e-02, ..., 6.5669e-02,
6.6611e-02, 7.9756e-02],
[3.8307e-02, 4.3550e-02, 3.7779e-02, ..., 6.5669e-02,
6.6611e-02, 7.9756e-02],
[3.8307e-02, 4.3550e-02, 3.7779e-02, ..., 6.5669e-02,
6.6611e-02, 7.9756e-02]],
[[1.1015e-12, 2.6133e-12, 6.7187e-12, ..., 8.8687e-09,
6.7803e-08, 4.9128e-08],
[9.4971e-12, 1.8937e-11, 3.3766e-11, ..., 9.4450e-08,
6.8199e-07, 3.0716e-07],
[3.3704e-12, 5.4446e-12, 1.3414e-11, ..., 1.8952e-08,
3.1152e-07, 6.1141e-07],
...,
[3.2763e-02, 3.8756e-02, 3.3914e-02, ..., 6.3191e-02,
6.1866e-02, 7.1638e-02],
[3.2763e-02, 3.8756e-02, 3.3914e-02, ..., 6.3191e-02,
6.1866e-02, 7.1638e-02],
[3.2763e-02, 3.8756e-02, 3.3914e-02, ..., 6.3191e-02,
6.1866e-02, 7.1638e-02]]], device='cuda:0')
token_masks= tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]],
device='cuda:0')
"""
print('============= argument_model.predict END =========')
return entity_preds, probs, token_masks
def pool_naive_path(reps, triggers, device):
triggers = triggers.long().data.cpu().numpy()
batch_size, seq_len, rep_dim = reps.shape
pooled = []
for b_id in range(batch_size):
sentence = []
for j in range(seq_len):
with torch.no_grad():
min_id = min(j, triggers[b_id])
max_id = max(j, triggers[b_id])
positions = set(range(min_id, max_id + 1))
pool_mask = [0 if k in positions else 1 for k in range(seq_len)]
pool_mask = torch.Tensor(pool_mask).bool().to(device)
token_rep = max_pooling2d(pool_mask, reps[b_id])
sentence.append(token_rep)
sentence = torch.stack(sentence, dim=0) # [seq len, rep dim]
pooled.append(sentence)
pooled = torch.stack(pooled, dim=0).to(device) # [batch size, seq len, rep dim]
return pooled
def get_dist_embeds(triggers, batch_size, seq_len, embed_layer, device):
padding_id = 0
out_of_left_id = 1
out_of_right_id = 2
with torch.no_grad():
positions = torch.from_numpy(np.array(range(seq_len)))
batch_position = positions.expand(batch_size, seq_len)
batch_distance = batch_position - triggers.unsqueeze(
1).long().cpu() + WINDOW_SIZE + 3 # 3 = 1 for padding, 1 for out of leftcontext, 1 for out of rightcontext
out_of_left_mask = (batch_distance < 3).long()
out_of_right_mask = (batch_distance >= NUM_DISTANCES).long()
batch_outofleft = torch.ones_like(batch_distance).long() * out_of_left_mask * out_of_left_id
batch_outofright = torch.ones_like(batch_distance).long() * out_of_right_mask * out_of_right_id
out_of_context_mask = out_of_left_mask + out_of_right_mask
out_of_context_mask = (out_of_context_mask != 1).long()
batch_distance = batch_distance * out_of_context_mask + batch_outofleft + batch_outofright
batch_distance = batch_distance.to(device)
dist_reps = embed_layer(batch_distance) # [batch size, seq len, dist dim]
return dist_reps
def get_trigger_reps(batch_reps, triggers):
'''
:param batch_reps: [batch size, sequence length, rep dim]
:param triggers: [batch size, ]
:return: reps of anchor words: [batch size, rep dim]
'''
batch_reps = batch_reps.clone()
ids = triggers.view(-1, 1).long()
ids = ids.expand(ids.size(0), batch_reps.size(2)).unsqueeze(1)
trigger_reps = torch.gather(batch_reps, 1, ids)
trigger_reps = trigger_reps.squeeze(1)
return trigger_reps
| 52.875635 | 167 | 0.47672 | 6,187 | 41,666 | 3.057217 | 0.075804 | 0.073064 | 0.104679 | 0.133862 | 0.665451 | 0.611208 | 0.543167 | 0.480465 | 0.438699 | 0.409833 | 0 | 0.158257 | 0.334541 | 41,666 | 787 | 168 | 52.942821 | 0.52393 | 0.084049 | 0 | 0.311258 | 0 | 0 | 0.125872 | 0.025113 | 0 | 0 | 0 | 0.002541 | 0 | 1 | 0.029801 | false | 0 | 0.019868 | 0 | 0.076159 | 0.245033 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2edc62a2e3c3d4664ec1267a615b4c63d54b1f65 | 3,203 | py | Python | models/lstm.py | k-jun/sotuken | 7e7c1202653bda8b95ebe6627e5cf12000d5704f | [
"MIT"
] | null | null | null | models/lstm.py | k-jun/sotuken | 7e7c1202653bda8b95ebe6627e5cf12000d5704f | [
"MIT"
] | null | null | null | models/lstm.py | k-jun/sotuken | 7e7c1202653bda8b95ebe6627e5cf12000d5704f | [
"MIT"
] | null | null | null | # ref: https://www.tensorflow.org/guide/keras/rnn
# ref: https://www.tensorflow.org/tutorials/structured_data/time_series
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import random as rn
import tensorflow.keras.backend as K
from sklearn.metrics import r2_score, mean_squared_error
import lib
def custom_loss(y_true, y_pred):
return tf.keras.losses.mean_absolute_error(y_true, y_pred)
# return K.maximum(K.sign(y_true - y_pred), 0.01) * tf.keras.losses.mean_absolute_error(y_true, y_pred)
def create_model(input_shape):
lstm_model = tf.keras.models.Sequential([
tf.keras.layers.LSTM(10, input_shape=input_shape),
tf.keras.layers.Dense(1),
tf.keras.layers.Activation("linear")
])
lstm_model.compile(
optimizer=tf.keras.optimizers.RMSprop(),
loss=custom_loss,
)
lstm_model.summary()
return lstm_model
DATA_PATH = "./input/ap-northeast-1c_from_2019-07-01_to_2019-12-01.csv"
# m3.large, m5.2xlarge, m5.large, m5.xlarge, r3.xlarge, r5d.xlarge
TARGET_TYPE = "r5d.xlarge"
MULTI_STEP = 10
BATCH_SIZE = 32
EPOCHS = 100
PAST_HISTORY = 10
TRAIN_RATIO = 0.7
RANDOM_STATE = 1221
np.random.seed(RANDOM_STATE)
rn.seed(RANDOM_STATE)
tf.random.set_seed(RANDOM_STATE)
result = []
colors = ["red", "royalblue", "violet", "green", "cyan", "orange"]
instance_types = ["m3.large", "m5.2xlarge",
"m5.large", "m5.xlarge", "r3.xlarge", "r5d.xlarge"]
feature_importance = []
fig = plt.figure(figsize=(16, 9))
for i in range(len(instance_types)):
TARGET_TYPE = instance_types[i]
print("=" * 10, TARGET_TYPE, "=" * 10)
df = lib.load_data(DATA_PATH, TARGET_TYPE)
df, mean, std = lib.normalize(df)
(x_train, y_train), (x_test, y_test), columns = lib.train_test_split_lstm(df["price"].values, df.index,
PAST_HISTORY, TRAIN_RATIO)
# モデルを定義
model = create_model(x_train.shape[-2:])
# モデルを学習
hist = model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS,
verbose=1, validation_data=(x_test, y_test))
y_pred = model.predict(x_test)
a = {}
y_pred = lib.denormalize(y_pred, std, mean)
y_test = lib.denormalize(y_test, std, mean)
a["r2_score"] = lib.r2_score(y_pred, y_test)
a["rmse"] = np.sqrt(mean_squared_error(y_pred, y_test))
result.append(a)
subfig = fig.add_subplot(2, 3, i+1)
# subfig.plot(range(len(train_loss)), train_loss, label='train_loss', color='blue')
# subfig.plot(range(len(val_loss)), val_loss, label='val_loss', color='red')
subfig.scatter(y_test, y_pred, c="black", label=TARGET_TYPE)
subfig.set_xlabel('y_test')
subfig.set_ylabel('y_pred')
subfig.plot([-2, 4], [-2, 4])
subfig.legend(bbox_to_anchor=(1, 0), loc='lower right',
borderaxespad=1, fontsize=15)
plt.legend(bbox_to_anchor=(1, 0), loc='lower right',
borderaxespad=1, fontsize=20)
fig.tight_layout()
fig.savefig("./output/lstm.png")
for i in result:
print(i)
| 31.097087 | 107 | 0.674056 | 482 | 3,203 | 4.253112 | 0.381743 | 0.026829 | 0.011707 | 0.019512 | 0.173659 | 0.139512 | 0.139512 | 0.139512 | 0.139512 | 0.139512 | 0 | 0.031466 | 0.186388 | 3,203 | 102 | 108 | 31.401961 | 0.75518 | 0.142054 | 0 | 0 | 0 | 0 | 0.085798 | 0.020811 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028169 | false | 0 | 0.15493 | 0.014085 | 0.211268 | 0.042254 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ede893a1fd4b8b59efb44450c5f33e55ff0017c | 436 | py | Python | tests/system/test_hello.py | bnwest/FastApiMicroService | 1768f28974fea6dda1cc4312b3beca78401f9dfb | [
"CC0-1.0"
] | 1 | 2021-06-18T16:29:49.000Z | 2021-06-18T16:29:49.000Z | tests/system/test_hello.py | bnwest/FastApiMicroService | 1768f28974fea6dda1cc4312b3beca78401f9dfb | [
"CC0-1.0"
] | null | null | null | tests/system/test_hello.py | bnwest/FastApiMicroService | 1768f28974fea6dda1cc4312b3beca78401f9dfb | [
"CC0-1.0"
] | null | null | null | import os
import requests
# export FASTAPI_ROOT="http://fastapi-micro-service:8000"
FASTAPI_ROOT = os.environ.get("FASTAPI_ROOT")
def test_hello_world():
endpoint_url = f"{FASTAPI_ROOT}/"
response = requests.get(endpoint_url)
assert response.status_code == 200
response_json = response.json()
assert isinstance(response_json, dict)
assert "Hello" in response_json
assert response_json["Hello"] == "World"
| 27.25 | 57 | 0.731651 | 57 | 436 | 5.368421 | 0.491228 | 0.196078 | 0.117647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01897 | 0.15367 | 436 | 15 | 58 | 29.066667 | 0.810298 | 0.126147 | 0 | 0 | 0 | 0 | 0.110818 | 0 | 0 | 0 | 0 | 0 | 0.363636 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ee2e6731a1c9e2346795ca28318bd107b7a7619 | 2,963 | py | Python | RafalG/sequence_slice_e.py | Khayn/2021-12-elearning-pythonana | a54e407adc8fb8c3a5fd2522735ae09cdef6540a | [
"MIT"
] | null | null | null | RafalG/sequence_slice_e.py | Khayn/2021-12-elearning-pythonana | a54e407adc8fb8c3a5fd2522735ae09cdef6540a | [
"MIT"
] | null | null | null | RafalG/sequence_slice_e.py | Khayn/2021-12-elearning-pythonana | a54e407adc8fb8c3a5fd2522735ae09cdef6540a | [
"MIT"
] | null | null | null | """
* Assignment: Sequence Slice Train/Test
* Required: yes
* Complexity: easy
* Lines of code: 4 lines
* Time: 8 min
English:
1. Divide `data` into two lists:
a. `train`: 60% - training data
b. `test`: 40% - testing data
2. Calculate split point:
a. `data` length multiplied by percent
b. From `data` slice training data from start to split
c. From `data` slice test data from split to end
3. Run doctests - all must succeed
Polish:
1. Podziel `data` na dwie listy:
a. `train`: 60% - dane do uczenia
b. `test`: 40% - dane do testów
2. Aby to zrobić wylicz punkt podziału:
a. Długość `data` razy procent
c. Z `data` wytnij do uczenia rekordy od początku do punktu podziału
d. Z `data` zapisz do testów rekordy od punktu podziału do końca
3. Uruchom doctesty - wszystkie muszą się powieść
Tests:
>>> import sys; sys.tracebacklimit = 0
>>> assert split is not Ellipsis, \
'Assign result to variable: `split`'
>>> assert train is not Ellipsis, \
'Assign result to variable: `train`'
>>> assert test is not Ellipsis, \
'Assign result to variable: `test`'
>>> assert type(split) is int, \
'Variable `split` has invalid type, should be int'
>>> assert type(train) is list, \
'Variable `train` has invalid type, should be list'
>>> assert type(train) is list, \
'Variable `train` has invalid type, should be list'
>>> assert type(test) is list, \
'Variable `test` has invalid type, should be list'
>>> assert all(type(x) is tuple for x in train), \
'All elements in `train` should be tuple'
>>> assert all(type(x) is tuple for x in test), \
'All elements in `test` should be tuple'
>>> split
6
>>> train # doctest: +NORMALIZE_WHITESPACE
[(5.8, 2.7, 5.1, 1.9, 'virginica'),
(5.1, 3.5, 1.4, 0.2, 'setosa'),
(5.7, 2.8, 4.1, 1.3, 'versicolor'),
(6.3, 2.9, 5.6, 1.8, 'virginica'),
(6.4, 3.2, 4.5, 1.5, 'versicolor'),
(4.7, 3.2, 1.3, 0.2, 'setosa')]
>>> test # doctest: +NORMALIZE_WHITESPACE
[(7.0, 3.2, 4.7, 1.4, 'versicolor'),
(7.6, 3.0, 6.6, 2.1, 'virginica'),
(4.9, 3.0, 1.4, 0.2, 'setosa'),
(4.9, 2.5, 4.5, 1.7, 'virginica')]
"""
DATA = [
('Sepal length', 'Sepal width', 'Petal length', 'Petal width', 'Species'),
(5.8, 2.7, 5.1, 1.9, 'virginica'),
(5.1, 3.5, 1.4, 0.2, 'setosa'),
(5.7, 2.8, 4.1, 1.3, 'versicolor'),
(6.3, 2.9, 5.6, 1.8, 'virginica'),
(6.4, 3.2, 4.5, 1.5, 'versicolor'),
(4.7, 3.2, 1.3, 0.2, 'setosa'),
(7.0, 3.2, 4.7, 1.4, 'versicolor'),
(7.6, 3.0, 6.6, 2.1, 'virginica'),
(4.9, 3.0, 1.4, 0.2, 'setosa'),
(4.9, 2.5, 4.5, 1.7, 'virginica')]
header = DATA[0]
data = DATA[1:]
# int: `data` length multiplied by percent
split = int(len(data) * 60 / 100)
# list[tuple]: first 60% from data
train = data[:split]
# list[tuple]: last 40% from data
test = data[split:]
| 31.860215 | 78 | 0.577118 | 490 | 2,963 | 3.485714 | 0.25102 | 0.01171 | 0.028103 | 0.046838 | 0.423302 | 0.376464 | 0.376464 | 0.299766 | 0.299766 | 0.26815 | 0 | 0.084753 | 0.247384 | 2,963 | 92 | 79 | 32.206522 | 0.681166 | 0.791428 | 0 | 0 | 0 | 0 | 0.232203 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ee382ef7e4e52fef0695c81d210f8710fbaae22 | 26,869 | py | Python | kiosk/models.py | AndiBr/ffksk | ff4bc4ad26d4571eaa1a6ff815b2e6a876f8ba99 | [
"MIT"
] | null | null | null | kiosk/models.py | AndiBr/ffksk | ff4bc4ad26d4571eaa1a6ff815b2e6a876f8ba99 | [
"MIT"
] | 14 | 2018-09-12T06:59:55.000Z | 2020-02-26T07:17:48.000Z | kiosk/models.py | AndiBr/ffksk | ff4bc4ad26d4571eaa1a6ff815b2e6a876f8ba99 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
from dateutil import tz
import pytz
from datetime import date
from django.core.validators import MinValueValidator
from django.db import transaction
from profil.models import KioskUser
from django.db import connection
from django.conf import settings
from django.template.loader import render_to_string
from .queries import readFromDatabase
from django.db.models import Max
# Create your models here.
class Start_News(models.Model):
heading = models.CharField(max_length=256)
date = models.DateTimeField(default=timezone.now)
content = models.TextField(max_length=2048, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
starred = models.BooleanField(default=False)
visible = models.BooleanField(default=True)
def __str__(self):
return(str(self.date) + ': ' + str(self.heading))
class Kontakt_Nachricht(models.Model):
name = models.CharField(max_length=40)
email = models.EmailField('E-Mail-Adresse')
gesendet = models.DateTimeField(auto_now_add=True)
betreff = models.TextField(max_length=128, blank=True)
text = models.TextField(max_length=1024)
beantwortet = models.BooleanField(default=False)
def __str__(self):
return ('Von: ' + str(self.name) + ': '+str(self.betreff))
class Produktpalette(models.Model):
produktName = models.CharField(max_length=40)
imVerkauf = models.BooleanField()
inAufstockung = models.BooleanField(default=True)
produktErstellt = models.DateTimeField(auto_now_add=True)
produktGeaendert = models.DateTimeField(auto_now=True)
#kommentar = models.TextField(max_length=512,blank=True)
farbeFuerPlot = models.TextField(max_length=7,blank=True)
def __str__(self):
return ('ID ' + str(self.id) + ': ' + self.produktName)
class Produktkommentar(models.Model):
produktpalette = models.ForeignKey(Produktpalette, on_delete=models.CASCADE)
erstellt = models.DateTimeField(auto_now_add=timezone.now)
kommentar = models.TextField(max_length=512,blank=True)
def __str__(self):
return (self.produktpalette.produktName + ' (' + str(self.erstellt) + ' )')
class Kioskkapazitaet(models.Model):
produktpalette = models.OneToOneField(
Produktpalette,on_delete=models.CASCADE
,primary_key=True)
maxKapazitaet = models.IntegerField(validators=[MinValueValidator(0)])
schwelleMeldung = models.IntegerField(validators=[MinValueValidator(0)])
paketgroesseInListe = models.IntegerField(validators=[MinValueValidator(0)])
def __str__(self):
return(self.produktpalette.produktName +
", Kapazit"+chr(228)+"t: " + str(self.maxKapazitaet))
class ProduktVerkaufspreise(models.Model):
produktpalette = models.ForeignKey(
Produktpalette, on_delete=models.CASCADE)
verkaufspreis = models.IntegerField(validators=[MinValueValidator(0)])
preisAufstockung = models.IntegerField(validators=[MinValueValidator(0)])
gueltigAb = models.DateTimeField(default=timezone.now)
def __str__(self):
price = '%.2f' % (self.verkaufspreis/100)
aufstockung = '%.2f' % (self.preisAufstockung/100)
return(self.produktpalette.produktName + ", " +
str(price) + "(+"+str(aufstockung)+") "+chr(8364)+" g"+chr(252)+"ltig ab " + str(self.gueltigAb))
def getActPrices(produkt_id):
verkaufspreis = readFromDatabase('getActPrices',[produkt_id])
return(verkaufspreis[0])
def getPreisAufstockung(produkt_id):
aufstockung = readFromDatabase('getPreisAufstockung',[produkt_id])
return(aufstockung[0])
class Einkaufsliste(models.Model):
kiosk_ID = models.AutoField(primary_key=True)
produktpalette = models.ForeignKey(
Produktpalette,on_delete=models.CASCADE)
bedarfErstelltUm = models.DateTimeField(auto_now_add=timezone.now)
def __str__(self):
return("[#" + str(self.kiosk_ID) + "] " +
self.produktpalette.produktName + ", Bedarf angemeldet um " +
str(self.bedarfErstelltUm))
def getEinkaufsliste():
einkaufsliste = readFromDatabase('getEinkaufsliste')
return(einkaufsliste)
# Eine Gruppe in der Einkaufsliste wird zum Einkauf vorgemerkt
@transaction.atomic
def einkaufGroupVormerken(ekGroupID,user):
# Suchen von Gruppen in EinkaufslisteGroups und dann die IDs in Einkaufsliste
groupEntries = EinkaufslisteGroups.objects.filter(gruppenID=ekGroupID)
for grEntry in groupEntries:
grEntryID = grEntry.einkaufslistenItem_id
ekItem = Einkaufsliste.objects.get(kiosk_ID=grEntryID)
vg = ZumEinkaufVorgemerkt(kiosk_ID=ekItem.kiosk_ID, bedarfErstelltUm=ekItem.bedarfErstelltUm,
produktpalette_id=ekItem.produktpalette_id, einkaufsvermerkUm=timezone.now(),
einkaeufer_id = user)
vg.save()
Einkaufsliste.objects.get(kiosk_ID=grEntryID).delete()
EinkaufslisteGroups.objects.filter(gruppenID=ekGroupID).delete()
return True
def getCommentsOnProducts(ekGroupID):
# Gebe die Kommentare aller Produkte zurueck
comments = readFromDatabase('getCommentsOnProductsInEkList',[ekGroupID])
return comments
class EinkaufslisteGroups(models.Model):
einkaufslistenItem = models.OneToOneField(Einkaufsliste, to_field='kiosk_ID', on_delete=models.CASCADE)
gruppenID = models.IntegerField()
def __str__(self):
return("Element: [#" + str(self.einkaufslistenItem.kiosk_ID) + "] Gruppe " +
str(self.gruppenID))
class ZumEinkaufVorgemerkt(models.Model):
kiosk_ID = models.AutoField(primary_key=True)
produktpalette = models.ForeignKey(
Produktpalette,on_delete=models.CASCADE)
bedarfErstelltUm = models.DateTimeField()
einkaufsvermerkUm = models.DateTimeField(auto_now_add=timezone.now)
einkaeufer = models.ForeignKey(
KioskUser,on_delete=models.CASCADE)
def __str__(self):
return("[#" + str(self.kiosk_ID) + "] " +
self.produktpalette.produktName + ", vorgemerkt um " +
str(self.einkaufsvermerkUm) + ", von " + str(self.einkaeufer))
def getMyZumEinkaufVorgemerkt(currentUserID):
persEinkaufsliste = readFromDatabase('getMyZumEinkaufVorgemerkt',[currentUserID])
return(persEinkaufsliste)
@transaction.atomic
def einkaufAnnehmen(form, currentUser):
retVal = {'product_id': None, 'err': False, 'msg': None, 'html': None, 'dct': None, 'angeliefert': None}
finanz = getattr(settings,'FINANZ')
product_id= form['product_id']
product = Produktpalette.objects.get(id=product_id)
retVal['product_id'] = product_id
prodVkPreis = ProduktVerkaufspreise.getActPrices(product_id)
prodVkPreis = prodVkPreis.get('verkaufspreis')
retVal['err'] = False
userID = form['userID']
anzahlAngeliefert = form['anzahlAngeliefert']
gesPreis = form['gesPreis']
# Get the maximal number of products to accept
persEkList = ZumEinkaufVorgemerkt.getMyZumEinkaufVorgemerkt(userID)
anzahlElemente = [x['anzahlElemente'] for x in persEkList if x['id']==product_id][0]
# Pruefen, ob nicht mehr einkgekauft wurde, als auf der Liste stand
if anzahlAngeliefert > anzahlElemente:
retVal['msg'] = "Die Menge der angelieferten Ware ist zu gro"+chr(223)+" f"+chr(252)+"r '"+product.produktName+"'"
retVal['err'] = True
# Pruefen, dass die Kosten niedrig genug sind, so dass eine Marge zwischen Einkauf und Verkauf von 10 % vorhanden ist.
minProduktMarge = finanz['minProduktMarge']
if float(gesPreis) > float(anzahlAngeliefert) * (1-float(minProduktMarge)) * float(prodVkPreis):
retVal['msg'] = "Die Kosten f"+chr(252)+"r den Einkauf von '"+product.produktName+"' sind zu hoch. Der Einkauf kann nicht angenommen werden."
retVal['err'] = True
if retVal['err'] == True:
# Bei Eingabefehler, Eine Alert-Meldung zurueck, dass Eingabe falsch ist
retVal['html'] = render_to_string('kiosk/fehler_message.html', {'message':retVal['msg']})
return retVal
# Hier am besten die <form> aufloesen und das manuell bauen, POST wie oben GET nutzen, der Token muss in die uebergebenen Daten im JavaScript mit rein.
else:
# Wenn Eingabe passt, dann wird der Einkaufspreis errechnet, zu den Produkten geschrieben und die Produkte in das Kiosk gelegt. Geldueberweisung von der Bank an den Einkaeufer
# Einkaufspreis berechnen
prodEkPreis = int(gesPreis / anzahlAngeliefert)
datum = timezone.now()
angeliefert = ZumEinkaufVorgemerkt.objects.filter(einkaeufer__id=userID,
produktpalette__id=product_id).order_by('kiosk_ID')[:anzahlAngeliefert]
if len(angeliefert) != anzahlAngeliefert:
raise ValueError
# Eintragen der Werte und Schreiben ins Kiosk
for an in angeliefert:
k = Kiosk(kiosk_ID=an.kiosk_ID,bedarfErstelltUm=an.bedarfErstelltUm,
produktpalette_id=an.produktpalette_id, einkaufsvermerkUm=an.einkaufsvermerkUm,
einkaeufer_id = an.einkaeufer_id, geliefertUm = datum,
verwalterEinpflegen_id = currentUser.id, einkaufspreis = prodEkPreis)
# Aufpassen, dass dann ein zweistelliger Nachkommawert eingetragen wird!
k.save()
an.delete()
# Gewinn und Gesamtrechnung berechnen
gewinnEK = finanz['gewinnEK']
provision = int(((float(prodVkPreis) * float(anzahlAngeliefert)) - float(gesPreis)) * float(gewinnEK))
paidPrice = gesPreis
gesPreis = gesPreis + provision
# Geldueberweisung von der Bank an den Einkaeufer
userBank = KioskUser.objects.get(username='Bank')
userAnlieferer = KioskUser.objects.get(id=userID)
GeldTransaktionen.doTransaction(userBank,userAnlieferer,gesPreis,datum,
"Erstattung Einkauf " + product.produktName + " (" + str(anzahlAngeliefert) + "x)" )#" um " + str(datum.astimezone(tz.tzlocal())))
# Aufpassen, dass dann ein zweistelliger Nachkommawert eingetragen wird!
retVal['dct'] = {'gesPreis':gesPreis/100,'userAnlieferer':userAnlieferer.username, 'produktName': product.produktName,'anzahlElemente':anzahlElemente}
retVal['angeliefert'] = angeliefert
retVal['msg'] = "Vom Produkt '"+str(product.produktName)+"' wurden "+str(anzahlAngeliefert)+' St'+chr(252)+'ck zum Preis von '+'%.2f'%(paidPrice/100)+' '+chr(8364)+' angeliefert.'
retVal['html'] = render_to_string('kiosk/success_message.html', {'message':retVal['msg']})
return retVal
class Kiosk(models.Model):
kiosk_ID = models.AutoField(primary_key=True)
produktpalette = models.ForeignKey(
Produktpalette,on_delete=models.CASCADE)
bedarfErstelltUm = models.DateTimeField()
einkaufsvermerkUm = models.DateTimeField()
einkaeufer = models.ForeignKey(
KioskUser,on_delete=models.CASCADE,related_name='kiosk_einkaeufer')
geliefertUm = models.DateTimeField(auto_now_add=timezone.now)
verwalterEinpflegen = models.ForeignKey(
KioskUser,on_delete=models.CASCADE,related_name='kiosk_verwalter')
einkaufspreis = models.IntegerField(validators=[MinValueValidator(0)])
def __str__(self):
price = '%.2f' % (self.einkaufspreis/100)
return("[#" + str(self.kiosk_ID) + "] " +
self.produktpalette.produktName + ", EK: " +
str(price) + " "+chr(8364)+", um " +
str(self.geliefertUm) + ', von ' + str(self.einkaeufer) + ' (' + str(self.verwalterEinpflegen) + ')')
def getKioskContent():
kioskItems = readFromDatabase('getKioskContent')
return(kioskItems)
def getKioskContentForInventory():
kioskItems = readFromDatabase('getKioskContentForInventory')
return(kioskItems)
# Kauf eines Produkts auf 'kauf_page'
@transaction.atomic
def buyItem(wannaBuyItem,user,gekauft_per='ubk', buyAndDonate=False):
retVals = {'success': False, 'msg': [], 'product': wannaBuyItem, 'price': 0, 'hasDonated': False, 'donation': 0}
# First, look in Kiosk.
try:
item = Kiosk.objects.filter(produktpalette__produktName=wannaBuyItem)[:1].get()
foundInKiosk = True
except:
msg = 'Selected item is not in Kiosk anymore. But let\'s look into the bought items of "Dieb" ...'
print(msg)
retVals['msg'].append(msg)
foundInKiosk = False
# If not available in Kiosk, do Rueckbuchung from Dieb
if not foundInKiosk:
try:
itemBoughtByDieb = Gekauft.objects.filter(kaeufer__username='Dieb',produktpalette__produktName=wannaBuyItem)[:1].get()
except:
msg = 'No selecetd item has been found in the whole Kiosk to be bought.'
print(msg)
retVals['msg'].append(msg)
return retVals
# Book back the item from Dieb
dieb = KioskUser.objects.get(username='Dieb')
item = Gekauft.rueckbuchenOhneForm(dieb.id, itemBoughtByDieb.produktpalette.id, 1)
foundInKiosk = True
# Abfrage des aktuellen Verkaufspreis fuer das Objekt
actPrices = ProduktVerkaufspreise.getActPrices(item.produktpalette.id)
actPrices = actPrices.get('verkaufspreis')
donation = ProduktVerkaufspreise.getPreisAufstockung(item.produktpalette.id)
donation = donation.get('preisAufstockung')
# Check if user is allowed to buy something and has enough money
allowedConusmers = readFromDatabase('getUsersToConsume')
if user.id not in [x['id'] for x in allowedConusmers] and not user.username=='Dieb':
msg = 'Du bist nicht berechtigt, Produkte zu kaufen.'
print(msg)
retVals['msg'].append(msg)
return retVals
if not user.username=='Dieb':
konto = Kontostand.objects.get(nutzer = user)
if buyAndDonate:
if konto.stand - actPrices - donation < 0:
msg = 'Dein Kontostand ist zu niedrig, um dieses Produkt zu kaufen und eine Spende zu geben.'
print(msg)
retVals['msg'].append(msg)
return retVals
else:
if konto.stand - actPrices < 0:
msg = 'Dein Kontostand ist zu niedrig, um dieses Produkt zu kaufen.'
print(msg)
retVals['msg'].append(msg)
return retVals
# Ablage des Kaufs in Tabelle 'Gekauft'
g = Gekauft(kiosk_ID=item.kiosk_ID, produktpalette=item.produktpalette,
bedarfErstelltUm=item.bedarfErstelltUm, einkaufsvermerkUm=item.einkaufsvermerkUm,
einkaeufer=item.einkaeufer, geliefertUm=item.geliefertUm,
verwalterEinpflegen=item.verwalterEinpflegen, einkaufspreis=item.einkaufspreis,
gekauftUm = timezone.now(), kaeufer = user, verkaufspreis=actPrices, gekauft_per=gekauft_per)
# Produkt in Tabelle 'Kiosk' loeschen
Kiosk.objects.get(kiosk_ID=item.pk).delete()
# Automatische Geldtransaktion vom User zur Bank
userBank = KioskUser.objects.get(username='Bank')
GeldTransaktionen.doTransaction(g.kaeufer,userBank,g.verkaufspreis,g.gekauftUm,
"Kauf " + g.produktpalette.produktName)# + " um " + str(g.gekauftUm.astimezone(tz.tzlocal())))
if buyAndDonate and donation>0:
userSpendenkonto = KioskUser.objects.get(username='Spendenkonto')
GeldTransaktionen.doTransaction(
g.kaeufer,
userSpendenkonto,
donation,
g.gekauftUm,
"Spende durch Aufstockung von " + g.produktpalette.produktName)
g.save()
retVals['success'] = True
retVals['msg'].append('OK')
retVals['price'] = actPrices/100.0
retVals['hasDonated'] = buyAndDonate and donation>0
retVals['donation'] = donation/100.0
return retVals
class Gekauft(models.Model):
kiosk_ID = models.AutoField(primary_key=True)
produktpalette = models.ForeignKey(
Produktpalette,on_delete=models.CASCADE)
bedarfErstelltUm = models.DateTimeField()
einkaufsvermerkUm = models.DateTimeField()
einkaeufer = models.ForeignKey(
KioskUser,on_delete=models.CASCADE,related_name='gekauft_einkaeufer')
geliefertUm = models.DateTimeField()
verwalterEinpflegen = models.ForeignKey(
KioskUser,on_delete=models.CASCADE,related_name='gekauft_verwalter')
einkaufspreis = models.IntegerField(validators=[MinValueValidator(0)])
gekauftUm = models.DateTimeField(auto_now_add=timezone.now)
kaeufer = models.ForeignKey(
KioskUser,on_delete=models.CASCADE,related_name='gekauft_kaeufer')
# Verkaufspreis ist eigentlich nicht noetig, ergibt sich aus Relationen, die Dokumentationstabellen sollen aber sicherheitshalber diese Info speichern (zum Schutz vor Loesuchungen in anderen Tabellen).
verkaufspreis = models.IntegerField(validators=[MinValueValidator(0)])
kaufarten = (('slack','slack'),('web','web'),('ubk','unbekannt'),('dieb','dieb'))
gekauft_per = models.CharField(max_length=6,default='ubk',choices=kaufarten)
def __str__(self):
price = '%.2f' % (self.verkaufspreis/100)
return("[#" + str(self.kiosk_ID) + "] " +
self.produktpalette.produktName + ", VK: " +
str(price) + " "+chr(8364)+", gekauft von " +
str(self.kaeufer) + " um " + str(self.gekauftUm))
@transaction.atomic
def rueckbuchenOhneForm(userID,productID,anzahlZurueck):
dR = doRueckbuchung(userID,productID,anzahlZurueck)
return dR['item']
@transaction.atomic
def rueckbuchen(form):
userID = form.cleaned_data['kaeufer_id']
productID = form.cleaned_data['produkt_id']
anzahlZurueck = form.cleaned_data['anzahl_zurueck']
dR = doRueckbuchung(userID,productID,anzahlZurueck)
price = dR['price']
# Hole den Kioskinhalt
kioskItems = Kiosk.getKioskContent()
# Einkaufsliste abfragen
einkaufsliste = Einkaufsliste.getEinkaufsliste()
product = Produktpalette.objects.get(id=productID)
return {'userID':userID, 'anzahlZurueck': anzahlZurueck, 'price': price/100.0, 'product': product.produktName}
def doRueckbuchung(userID,productID,anzahlZurueck):
productsToMove = Gekauft.objects.filter(kaeufer__id=userID, produktpalette__id=productID).order_by('-gekauftUm')[:anzahlZurueck]
price = 0
newKioskItem = None
for item in productsToMove:
k = Kiosk(kiosk_ID=item.kiosk_ID, produktpalette=item.produktpalette,
bedarfErstelltUm=item.bedarfErstelltUm, einkaufsvermerkUm=item.einkaufsvermerkUm,
einkaeufer=item.einkaeufer, geliefertUm=item.geliefertUm,
verwalterEinpflegen=item.verwalterEinpflegen, einkaufspreis=item.einkaufspreis)
k.save()
k.geliefertUm = item.geliefertUm
k.save()
# Only the last item is taken!!
price = price + item.verkaufspreis
newKioskItem = k
userBank = KioskUser.objects.get(username='Bank')
user = KioskUser.objects.get(id=userID)
GeldTransaktionen.doTransaction(userBank,user,item.verkaufspreis,timezone.now,
"R"+chr(252)+"ckbuchung Kauf von " + item.produktpalette.produktName)
item.delete()
return {'price':price, 'item':newKioskItem}
from .bot import slack_MsgToUserAboutNonNormalBankBalance
class GeldTransaktionen(models.Model):
AutoTrans_ID = models.AutoField(primary_key=True)
vonnutzer = models.ForeignKey(
KioskUser, on_delete=models.CASCADE,related_name='nutzerVon')
zunutzer = models.ForeignKey(
KioskUser, on_delete=models.CASCADE,related_name='nutzerZu')
betrag = models.IntegerField(validators=[MinValueValidator(0)])
kommentar = models.TextField(max_length=512,blank=True)
datum = models.DateTimeField(auto_now_add=timezone.now)
def __str__(self):
betr = '%.2f' % (self.betrag/100)
return("[#" +
str(self.AutoTrans_ID) + "] " + str(betr) +
" "+chr(8364)+" von " + str(self.vonnutzer) + " an " +
str(self.zunutzer))
# Abfrage der Anzahl aller Transaktionen
def getLengthOfAllTransactions(user):
allTransactions = readFromDatabase('getLengthOfAllTransactions',[user.id, user.id])
return(allTransactions)
# Abfrage einer Auswahl an Transaktionen eines Nutzers zur Anzeige bei den Kontobewegungen
def getTransactions(user,page,limPP,maxIt):
if int(page)*int(limPP) > int(maxIt):
limPPn = int(limPP) - (int(page)*int(limPP) - int(maxIt))
else:
limPPn = limPP
allTransactions = readFromDatabase('getTransactions',
[user.id, user.id, int(page)*int(limPP), limPPn])
# Add TimeZone information: It is stored as UTC-Time in the SQLite-Database
for k,v in enumerate(allTransactions):
allTransactions[k]['datum'] = pytz.timezone('UTC').localize(v['datum'])
return(allTransactions)
@transaction.atomic
def doTransaction(vonnutzer,zunutzer,betrag,datum, kommentar):
t = GeldTransaktionen(vonnutzer=vonnutzer, zunutzer=zunutzer, betrag = betrag, datum=datum, kommentar=kommentar)
# Bargeld transaction among Bargeld-users are calculated negatively. But not, as soon as one "normal" user is a part of the transaction
if t.vonnutzer.username in ('Bargeld','Bargeld_Dieb','Bargeld_im_Tresor') and t.zunutzer.username in ('Bargeld','Bargeld_Dieb','Bargeld_im_Tresor'):
sign = -1
else:
sign = +1
# Besorge den Kontostand des 'vonNutzer' und addiere neuen Wert
vonNutzerKonto = Kontostand.objects.get(nutzer_id=t.vonnutzer)
vonNutzerKonto.stand = vonNutzerKonto.stand - sign * t.betrag
vonNutzerKonto.save()
# Besorge den Kontostand des 'zuNutzer' und addiere neuen Wert
zuNutzerKonto = Kontostand.objects.get(nutzer_id=t.zunutzer)
zuNutzerKonto.stand = zuNutzerKonto.stand + sign * t.betrag
zuNutzerKonto.save()
t.save()
# Message to the users if their bank balance becomes too high / too low
if getattr(settings,'ACTIVATE_SLACK_INTERACTION') == True:
try:
slack_MsgToUserAboutNonNormalBankBalance(t.vonnutzer.id, vonNutzerKonto.stand)
slack_MsgToUserAboutNonNormalBankBalance(t.zunutzer.id, zuNutzerKonto.stand)
except:
pass
@transaction.atomic
def makeManualTransaktion(form,currentUser):
# Durchfuehren einer Ueberweisung aus dem Admin-Bereich
idFrom = int(form['idFrom'].value())
idTo = int(form['idTo'].value())
betrag = int(100*float(form['betrag'].value()))
kommentar = form['kommentar'].value()
userFrom = KioskUser.objects.get(id=idFrom)
userTo = KioskUser.objects.get(id=idTo)
kommentar = kommentar + ' (' + userFrom.username + ' --> ' + userTo.username + ')'
GeldTransaktionen.doTransaction(vonnutzer=userFrom, zunutzer=userTo,
betrag=betrag, datum=timezone.now(), kommentar=kommentar)
return {'returnDict':{'betrag':betrag/100,'userFrom':userFrom.username,'userTo':userTo.username},
'type':'manTransaction',
'userFrom':userFrom,
'userTo':userTo,
'betrag':betrag/100,
'user':currentUser
}
@transaction.atomic
def makeEinzahlung(form,currentUser):
# Durchfuehren einer Einzahlung bzw. Auszahlung (GegenUser ist 'Bargeld')
barUser = KioskUser.objects.get(username='Bargeld')
if form['typ'].value() == 'Einzahlung':
idFrom = barUser.id
idTo = int(form['idUser'].value())
ezaz = 'eingezahlt'
else:
idTo = barUser.id
idFrom = int(form['idUser'].value())
ezaz = 'ausgezahlt'
betrag = int(100*float(form['betrag'].value()))
kommentar = form['kommentar'].value()
userFrom = KioskUser.objects.get(id=idFrom)
userTo = KioskUser.objects.get(id=idTo)
kommentar = kommentar + ' (' + form['typ'].value() + ')'
GeldTransaktionen.doTransaction(vonnutzer=userFrom, zunutzer=userTo,
betrag=betrag, datum=timezone.now(), kommentar=kommentar)
return {'type':ezaz,
'userFrom':userFrom,
'userTo':userTo,
'betrag':betrag/100,
'user':currentUser
}
# Aus den GeldTransaktionen ergibt sich eigentlich der Kontostand, aber zur Sicherheit (Loeschen von Tabelleneintraegen, Bugs, etc.) wird der Kontostand zusaetzlich gespeichert, bei jeder Transaktion wird dem aktuellen Stand die neue Transaktion angerechnet. Keine weitere Kopplung -> andere Tabellen koennen crashen, ohne den Kontostand zu beschaedigen.
class Kontostand(models.Model):
nutzer = models.OneToOneField(KioskUser, on_delete=models.CASCADE,
primary_key = True)
stand = models.IntegerField()
def __str__(self):
stnd = '%.2f' % (self.stand/100)
return(str(self.nutzer) + ": " + str(stnd) + " "+chr(8364))
# At inventory, here the paid but not taken items are registered
class ZuVielBezahlt(models.Model):
produkt = models.ForeignKey(
Produktpalette,on_delete=models.CASCADE)
datum = models.DateTimeField(auto_now_add=True)
preis = models.IntegerField()
def __str__(self):
preis = '%.2f' % (self.preis/100)
return(self.produkt.produktName + ": " + str(preis) + " "+chr(8364))
@transaction.atomic
def makeInventory(request, currentUser, inventoryList):
report = []
# Go through all items in the kiosk
for item in inventoryList:
# Check, if the item should be considered
if not request.POST.get(item["checkbutton_id_name"]) is None:
# Get the should- and is- count of the item
isVal = int(request.POST.get(item["count_id_name"]))
shouldVal = item["anzahl"]
# Check, if stock is higher, lower or equal
if shouldVal == isVal:
diff = 0
report.append({'id': item["id"],
'produkt_name': item["produkt_name"],
'verkaufspreis_ct': item["verkaufspreis_ct"],
'verlust': False,
'anzahl': diff,
'message': 'OK.'})
elif shouldVal < isVal:
diff = isVal - shouldVal
# Too much has been bought.
# First try to book back items, the "Dieb" has "bought"
userDieb = KioskUser.objects.get(username='Dieb')
diebBoughtItems = readFromDatabase('getBoughtItemsOfUser', [userDieb.id])
diebBought = [x for x in diebBoughtItems if x['produkt_id']==item['id']]
if not diebBought==[]:
noToBuyBack = diebBought[0]['anzahl_gekauft']
noToBuyBack = min(noToBuyBack,diff)
Gekauft.rueckbuchenOhneForm(userDieb.id,item['id'],noToBuyBack)
else:
noToBuyBack = 0
diff = diff - noToBuyBack
# If not possible, boooking back, a new item will be created in the open shopping list and be pushed to the kiosk. Notice in table of to much bought items will be given.
datum = timezone.now()
p = Produktpalette.objects.get(id=item["id"])
maxGroup = EinkaufslisteGroups.objects.all().aggregate(Max('gruppenID'))
maxGroup = maxGroup["gruppenID__max"] + 1
for i in range(0,diff):
e = Einkaufsliste(produktpalette = p)
e.save()
eg = EinkaufslisteGroups(einkaufslistenItem=e,gruppenID=maxGroup)
eg.save()
ok = Einkaufsliste.einkaufGroupVormerken(maxGroup,currentUser.id)
z = ZuVielBezahlt(produkt = p, datum = datum, preis = int(item["verkaufspreis_ct"]))
z.save()
angeliefert = ZumEinkaufVorgemerkt.objects.filter(einkaeufer__id=currentUser.id,
produktpalette__id=item["id"]).order_by('kiosk_ID')[:diff]
# Eintragen der Werte und Schreiben ins Kiosk
for an in angeliefert:
k = Kiosk(kiosk_ID=an.kiosk_ID,bedarfErstelltUm=an.bedarfErstelltUm,
produktpalette_id=an.produktpalette_id, einkaufsvermerkUm=an.einkaufsvermerkUm,
einkaeufer_id = an.einkaeufer_id, geliefertUm = datum,
verwalterEinpflegen_id = currentUser.id, einkaufspreis = 0)
k.save()
an.delete()
report.append({'id': item["id"],
'produkt_name': item["produkt_name"],
'verkaufspreis_ct': item["verkaufspreis_ct"],
'verlust': False,
'anzahl': diff+noToBuyBack,
'message': str(diff+noToBuyBack) + ' zu viel gekauft.'})
elif shouldVal > isVal:
# Items have not been payed. Now, the "thieve" "buys" them.
diff = shouldVal-isVal
user = KioskUser.objects.get(username='Dieb')
buyItem = item["produkt_name"]
for x in range(0,diff):
retVal = Kiosk.buyItem(buyItem,user,gekauft_per='dieb')
report.append({'id': item["id"],
'produkt_name': item["produkt_name"],
'verkaufspreis_ct': item["verkaufspreis_ct"],
'verlust': True,
'anzahl': diff,
'message': str(diff) + ' nicht bezahlt. Nun "kauft" diese der Dieb.'})
return(report) | 37.526536 | 354 | 0.737318 | 3,076 | 26,869 | 6.356307 | 0.201235 | 0.014321 | 0.012889 | 0.019333 | 0.359605 | 0.307079 | 0.263809 | 0.233531 | 0.193024 | 0.175941 | 0 | 0.007543 | 0.141427 | 26,869 | 716 | 355 | 37.526536 | 0.840002 | 0.130448 | 0 | 0.309381 | 0 | 0.007984 | 0.107756 | 0.007893 | 0.001996 | 0 | 0 | 0 | 0 | 1 | 0.065868 | false | 0.001996 | 0.027944 | 0.015968 | 0.299401 | 0.00998 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ee40da39bd2d751bdd4296c97d932bca4108462 | 910 | py | Python | ex_15/ex_15_02.py | rovelee/py4e | 32125f5d62b6c7b6a56c8e1a250c1d81c6d54006 | [
"MIT"
] | null | null | null | ex_15/ex_15_02.py | rovelee/py4e | 32125f5d62b6c7b6a56c8e1a250c1d81c6d54006 | [
"MIT"
] | null | null | null | ex_15/ex_15_02.py | rovelee/py4e | 32125f5d62b6c7b6a56c8e1a250c1d81c6d54006 | [
"MIT"
] | null | null | null | import sqlite3
import re
from sys import prefix
conn = sqlite3.connect('orgdb.sqlite')
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS Counts')
cur.execute('''
CREATE TABLE Counts (org TEXT, count INTEGER)''')
prefix='../file/'
fname = input('Enter file name: ')
if (len(fname) < 1):
fname = 'mbox-short.txt'
fh = open(prefix+fname)
dct=dict()
for line in fh:
line=line.rstrip()
lst=re.findall('^From: .+@(.+)',line)
if len(lst)>0:
dct[lst[0]]=dct.get(lst[0],0)+1
for k,v in dct.items():
cur.execute('''INSERT INTO Counts (org, count)
VALUES (?, ?)''',(k,v))
sqlstr = 'SELECT org, count FROM Counts ORDER BY count DESC LIMIT 10'
for row in cur.execute(sqlstr):
print(str(row[0]), row[1])
org='edu'
cur.execute('SELECT count FROM Counts WHERE org = ? ', (org, ))
row = cur.fetchone()
print('row:',row,'type:',type(row))
conn.commit()
cur.close()
| 21.162791 | 69 | 0.627473 | 142 | 910 | 4.021127 | 0.471831 | 0.087566 | 0.024518 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016064 | 0.179121 | 910 | 42 | 70 | 21.666667 | 0.748327 | 0 | 0 | 0 | 0 | 0 | 0.338462 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.096774 | 0 | 0.096774 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ee7fb23d90df230952beef9e9b26c2d9645d6d2 | 7,111 | py | Python | scripts/test_pretrained_model.py | NamJiii/PECNet | b99f54dacc14ff079222b3a339428c10e622cb16 | [
"MIT"
] | null | null | null | scripts/test_pretrained_model.py | NamJiii/PECNet | b99f54dacc14ff079222b3a339428c10e622cb16 | [
"MIT"
] | null | null | null | scripts/test_pretrained_model.py | NamJiii/PECNet | b99f54dacc14ff079222b3a339428c10e622cb16 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import sys
from torch.utils.data import DataLoader
import argparse
import copy
sys.path.append("../utils/")
import matplotlib.pyplot as plt
import numpy as np
from models import *
from social_utils import *
import yaml
parser = argparse.ArgumentParser(description='PECNet')
parser.add_argument('--num_workers', '-nw', type=int, default=0)
parser.add_argument('--gpu_index', '-gi', type=int, default=0)
parser.add_argument('--load_file', '-lf', default="run7.pt")
parser.add_argument('--num_trajectories', '-nt', default=20) #number of trajectories to sample
parser.add_argument('--verbose', '-v', action='store_true')
parser.add_argument('--root_path', '-rp', default="./")
args = parser.parse_args()
dtype = torch.float64
torch.set_default_dtype(dtype)
device = torch.device('cuda', index=args.gpu_index) if torch.cuda.is_available() else torch.device('cpu')
if torch.cuda.is_available():
torch.cuda.set_device(args.gpu_index)
print(device)
checkpoint = torch.load('../saved_models/{}'.format(args.load_file), map_location=device)
hyper_params = checkpoint["hyper_params"]
print(hyper_params)
def inference(test_dataset, model, best_of_n = 1):
model.eval()
assert best_of_n >= 1 and type(best_of_n) == int
test_loss = 0
with torch.no_grad():
for i, (traj, mask, initial_pos) in enumerate(zip(test_dataset.trajectory_batches, test_dataset.mask_batches, test_dataset.initial_pos_batches)):
traj, mask, initial_pos = torch.DoubleTensor(traj).to(device), torch.DoubleTensor(mask).to(device), torch.DoubleTensor(initial_pos).to(device)
x = traj[:, :hyper_params["past_length"], :]#2829*20*2 => 2829*8*2
sample_data = [[[0.0,0.0],
[710.0,10.0],
[620.0,20.0],
[530.0,30.0],
[440.0,40.0],
[350.0,50.0],
[260.0,60.0],
[170.0,70.0]],
[[0.0,0.0],
[10.0,1.0],
[20.0,2.0],
[30.0,3.0],
[40.0,4.0],
[50.0,5.0],
[60.0,6.0],
[70.0,7.0]]]#2*5*2
#print('x : ',x[:10,:,:])
x = x.contiguous().view(-1, x.shape[1]*x.shape[2])#2829*8*2 => 2829*16
#print('x : ', x[:10, :])
sample_data = torch.DoubleTensor(sample_data).to(device)
sample_data = sample_data.contiguous().view(-1,sample_data.shape[1]*sample_data.shape[2])
sample_data = sample_data.to(device)
x = x.to(device)
all_guesses = []
#print('x.shape : ',x)# x는 이중 배열 2829*16
#print('initial_pos.shape : ', initial_pos) # x는 이중 배열 2829*2
init = []
dest_recon = model.forward(sample_data, init, device=device)#********'''initial_pos[:100,:]''',
#print('dest_recon : ',dest_recon)#2829*2
dest_recon = dest_recon.cpu().numpy()
#print('initial_pos : ',initial_pos[:10,:])
print('dest_recon : ',dest_recon)
return dest_recon
def test(test_dataset, model, best_of_n = 1):
print('==start test==')
model.eval()
assert best_of_n >= 1 and type(best_of_n) == int
test_loss = 0
with torch.no_grad():
for i, (traj, mask, initial_pos) in enumerate(zip(test_dataset.trajectory_batches, test_dataset.mask_batches, test_dataset.initial_pos_batches)):
traj, mask, initial_pos = torch.DoubleTensor(traj).to(device), torch.DoubleTensor(mask).to(device), torch.DoubleTensor(initial_pos).to(device)
#traj는 정답 mask는 문제
x = traj[:, :hyper_params["past_length"], :]
y = traj[:, hyper_params["past_length"]:, :]
y = y.cpu().numpy()
# reshape the data
#print(x.shape)
x = x.contiguous().view(-1, x.shape[1]*x.shape[2])#2829*8*2 => 2829*16
x = x.to(device)
#print('--start with x--')
#print(x.shape)
future = y[:, :-1, :]
dest = y[:, -1, :]
all_l2_errors_dest = []
all_guesses = []
for index in range(best_of_n):
print('x.shape : ',x)# x는 이중 배열 2829*16
print('initial_pos.shape : ', initial_pos) # x는 이중 배열 2829*2
dest_recon = model.forward(x, initial_pos, device=device)#********
print('dest_recon : ',dest_recon)#2829*2
dest_recon = dest_recon.cpu().numpy()
all_guesses.append(dest_recon)
l2error_sample = np.linalg.norm(dest_recon - dest, axis = 1)#dest_recon은 추측값, dest는 진짜임
all_l2_errors_dest.append(l2error_sample)
#추측과 에러내기를 여러번 함
all_l2_errors_dest = np.array(all_l2_errors_dest)
all_guesses = np.array(all_guesses)
# average error
l2error_avg_dest = np.mean(all_l2_errors_dest)
# choosing the best guess
indices = np.argmin(all_l2_errors_dest, axis = 0)
best_guess_dest = all_guesses[indices,np.arange(x.shape[0]), :]
# taking the minimum error out of all guess
l2error_dest = np.mean(np.min(all_l2_errors_dest, axis = 0))
# back to torch land
best_guess_dest = torch.DoubleTensor(best_guess_dest).to(device)
# using the best guess for interpolation
interpolated_future = model.predict(x, best_guess_dest, mask, initial_pos)
interpolated_future = interpolated_future.cpu().numpy()
best_guess_dest = best_guess_dest.cpu().numpy()
# final overall prediction
predicted_future = np.concatenate((interpolated_future, best_guess_dest), axis = 1)
predicted_future = np.reshape(predicted_future, (-1, hyper_params["future_length"], 2))
print('**REAL :',y,' |Predicted :',predicted_future,'**')
# ADE error
l2error_overall = np.mean(np.linalg.norm(y - predicted_future, axis = 2))
l2error_overall /= hyper_params["data_scale"]
l2error_dest /= hyper_params["data_scale"]
l2error_avg_dest /= hyper_params["data_scale"]
print('Test time error in destination best: {:0.3f} and mean: {:0.3f}'.format(l2error_dest, l2error_avg_dest))
print('Test time error overall (ADE) best: {:0.3f}'.format(l2error_overall))
return l2error_overall, l2error_dest, l2error_avg_dest
def main():
N = args.num_trajectories #number of generated trajectories
model = PECNet(hyper_params["enc_past_size"], hyper_params["enc_dest_size"], hyper_params["enc_latent_size"], hyper_params["dec_size"], hyper_params["predictor_hidden_size"], hyper_params['non_local_theta_size'], hyper_params['non_local_phi_size'], hyper_params['non_local_g_size'], hyper_params["fdim"], hyper_params["zdim"], hyper_params["nonlocal_pools"], hyper_params['non_local_dim'], hyper_params["sigma"], hyper_params["past_length"], hyper_params["future_length"], args.verbose)
model = model.double().to(device)
model.load_state_dict(checkpoint["model_state_dict"])
test_dataset = SocialDataset(set_name="test", b_size=hyper_params["test_b_size"], t_tresh=hyper_params["time_thresh"], d_tresh=hyper_params["dist_thresh"], verbose=args.verbose)
#print(test_dataset.trajectory_batches[0][22])#batch_num X datadict[key] X 20(prev8+post20) by ID pid fid x y
#return 0
for traj in test_dataset.trajectory_batches:
#print('-----',type(traj))
traj -= traj[:, :1, :] #위치정보를 변위로 바꿈
#print('=====', traj)
traj *= hyper_params["data_scale"] #1.86배, 상수임
#print('=-=-=', traj)
#average ade/fde for k=20 (to account for variance in sampling)
num_samples = 1
average_ade, average_fde = 0, 0
for i in range(num_samples):
predicted= inference(test_dataset, model, best_of_n = N)
main()
| 39.071429 | 487 | 0.696527 | 1,099 | 7,111 | 4.276615 | 0.228389 | 0.067872 | 0.028723 | 0.02234 | 0.317447 | 0.260426 | 0.224468 | 0.191915 | 0.191915 | 0.191915 | 0 | 0.04089 | 0.140205 | 7,111 | 181 | 488 | 39.287293 | 0.727838 | 0.141049 | 0 | 0.190476 | 0 | 0 | 0.113108 | 0.003462 | 0 | 0 | 0 | 0 | 0.015873 | 1 | 0.02381 | false | 0 | 0.103175 | 0 | 0.142857 | 0.079365 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ee7fd84d8fa002e0d13f33ba310cdac5cb3d3f4 | 3,386 | py | Python | Count/src/index.py | ApperPh/aws-cloudformation-macros | 74c6b969d9780fd930b76325af5baaf1d15be572 | [
"Apache-2.0"
] | null | null | null | Count/src/index.py | ApperPh/aws-cloudformation-macros | 74c6b969d9780fd930b76325af5baaf1d15be572 | [
"Apache-2.0"
] | null | null | null | Count/src/index.py | ApperPh/aws-cloudformation-macros | 74c6b969d9780fd930b76325af5baaf1d15be572 | [
"Apache-2.0"
] | null | null | null | import copy
import json
def process_template(template, params):
new_template = copy.deepcopy(template)
status = 'success'
for name, resource in template['Resources'].items():
if 'Count' in resource:
#Get the number of times to multiply the resource
count = new_template['Resources'][name].pop('Count')
print('Count data type: {}'.format(type(count)))
if type(count) == dict:
count = int(params[count['Ref']])
print('Dict Count data type: {}'.format(type(count)))
print("Found 'Count' property with value {} in '{}' resource....multiplying!".format(count,name))
#Remove the original resource from the template but take a local copy of it
resourceToMultiply = new_template['Resources'].pop(name)
#Create a new block of the resource multiplied with names ending in the iterator and the placeholders substituted
resourcesAfterMultiplication = multiply(name, resourceToMultiply, count)
if not set(resourcesAfterMultiplication.keys()) & set(new_template['Resources'].keys()):
new_template['Resources'].update(resourcesAfterMultiplication)
else:
status = 'failed'
return status, template
else:
print("Did not find 'Count' property in '{}' resource....Nothing to do!".format(name))
return status, new_template
def update_placeholder(resource_structure, iteration):
#Convert the json into a string
resourceString = json.dumps(resource_structure)
#Count the number of times the placeholder is found in the string
placeHolderCount = resourceString.count('%d')
#If the placeholder is found then replace it
if placeHolderCount > 0:
print("Found {} occurrences of decimal placeholder in JSON, replacing with iterator value {}".format(placeHolderCount, iteration))
#Make a list of the values that we will use to replace the decimal placeholders - the values will all be the same
placeHolderReplacementValues = [iteration] * placeHolderCount
#Replace the decimal placeholders using the list - the syntax below expands the list
resourceString = resourceString % (*placeHolderReplacementValues,)
#Convert the string back to json and return it
return json.loads(resourceString)
else:
print("No occurences of decimal placeholder found in JSON, therefore nothing will be replaced")
return resource_structure
def multiply(resource_name, resource_structure, count):
resources = {}
#Loop according to the number of times we want to multiply, creating a new resource each time
for iteration in range(1, (count + 1)):
print("Multiplying '{}', iteration count {}".format(resource_name,iteration))
multipliedResourceStructure = update_placeholder(resource_structure,iteration)
resources[resource_name+str(iteration)] = multipliedResourceStructure
return resources
def handler(event, context):
params = event["templateParameterValues"]
fragment = event['fragment']
print("parameter values")
print(params)
result = process_template(fragment, params)
return {
'requestId': event['requestId'],
'status': result[0],
'fragment': result[1],
}
| 46.383562 | 138 | 0.674247 | 376 | 3,386 | 6.023936 | 0.332447 | 0.029139 | 0.03532 | 0.021192 | 0.062693 | 0.024724 | 0 | 0 | 0 | 0 | 0 | 0.001933 | 0.236267 | 3,386 | 72 | 139 | 47.027778 | 0.873937 | 0.20762 | 0 | 0.057692 | 0 | 0 | 0.20015 | 0.017583 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.038462 | 0 | 0.230769 | 0.173077 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ee971f0a382fe6fbd625d25b5631b8db3539cec | 8,745 | py | Python | dali/test/python/test_operator_readers_webdataset_corner.py | barci2/DALI | 21a51891eae9ad6f7fc8b17d7dccc65465783d2d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | dali/test/python/test_operator_readers_webdataset_corner.py | barci2/DALI | 21a51891eae9ad6f7fc8b17d7dccc65465783d2d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | dali/test/python/test_operator_readers_webdataset_corner.py | barci2/DALI | 21a51891eae9ad6f7fc8b17d7dccc65465783d2d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from webdataset_base import *
def general_corner_case(
test_batch_size=test_batch_size, dtypes=None, missing_component_behavior="", **kwargs
):
num_samples = 1000
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar")
index_file = generate_temp_index_file(tar_file_path)
extract_dir = generate_temp_extract(tar_file_path)
equivalent_files = sorted(
glob(extract_dir.name + "/*"), key=lambda s: int(s[s.rfind("/") + 1 : s.rfind(".")])
)
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
missing_component_behavior=missing_component_behavior,
dtypes=dtypes,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
**kwargs
),
file_reader_pipeline(
equivalent_files,
["jpg", "cls"],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
**kwargs
),
test_batch_size,
math.ceil(num_samples / test_batch_size),
)
def test_mmap_dtype_incompatibility():
assert_raises(
RuntimeError,
general_corner_case,
dtypes=[dali.types.INT8, dali.types.FLOAT64],
glob="component size and dtype incompatible",
)
def test_lazy_init():
general_corner_case(lazy_init=True)
def test_read_ahead():
general_corner_case(read_ahead=True)
def test_single_sample():
test_batch_size = 1
num_samples = 1
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/sample-tar/single.tar")
index_file = generate_temp_index_file(tar_file_path)
extract_dir = generate_temp_extract(tar_file_path)
equivalent_files = list(sorted(glob(extract_dir.name + "/*")))
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["txt"],
missing_component_behavior="skip",
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
file_reader_pipeline(
equivalent_files, ["txt"], batch_size=test_batch_size, device_id=0, num_threads=1
),
test_batch_size,
math.ceil(num_samples / test_batch_size) * 10,
)
wds_pipeline = webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["txt"],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
)
wds_pipeline.build()
assert_equal(list(wds_pipeline.epoch_size().values())[0], num_samples)
def test_single_sample_and_junk():
test_batch_size = 1
num_samples = 1
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/sample-tar/single_junk.tar")
index_file = generate_temp_index_file(tar_file_path)
extract_dir = generate_temp_extract(tar_file_path)
equivalent_files = list(sorted(glob(extract_dir.name + "/*")))
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["txt"],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
file_reader_pipeline(
equivalent_files, ["txt"], batch_size=test_batch_size, device_id=0, num_threads=1
),
test_batch_size,
math.ceil(num_samples / test_batch_size) * 10,
)
wds_pipeline = webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["txt"],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
)
wds_pipeline.build()
assert_equal(list(wds_pipeline.epoch_size().values())[0], num_samples)
def test_wide_sample():
test_batch_size = 1
num_samples = 1
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/sample-tar/wide.tar")
index_file = generate_temp_index_file(tar_file_path)
extract_dir = generate_temp_extract(tar_file_path)
equivalent_files = list(sorted(glob(extract_dir.name + "/*")))
num_components = 1000
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
[str(x) for x in range(num_components)],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
file_reader_pipeline(
equivalent_files,
[str(x) for x in range(num_components)],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
test_batch_size,
math.ceil(num_samples / test_batch_size) * 10,
)
wds_pipeline = webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["txt"],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
)
wds_pipeline.build()
assert_equal(list(wds_pipeline.epoch_size().values())[0], num_samples)
def test_argument_errors():
def paths_index_paths_error():
webdataset_pipeline = webdataset_raw_pipeline(
[
os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar"),
os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-1.tar"),
os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-2.tar"),
],
[],
["jpg", "cls"],
batch_size=1,
device_id=0,
num_threads=1,
)
webdataset_pipeline.build()
assert_raises(
RuntimeError,
paths_index_paths_error,
glob="Number of webdataset archives does not match the number of index files",
)
assert_raises(
RuntimeError,
general_corner_case,
missing_component_behavior="SomethingInvalid",
glob="Invalid value for missing_component_behavior",
)
general_corner_case(missing_component_behavior="Skip")
assert_raises(
RuntimeError,
general_corner_case,
dtypes=[dali.types.STRING, dali.types.STRING],
glob="Unsupported output dtype *. Supported types are",
)
assert_raises(
RuntimeError,
general_corner_case,
dtypes=dali.types.INT8,
glob="Number of extensions does not match the number of provided types",
)
def general_index_error(
index_file_contents, tar_file_path="db/webdataset/MNIST/devel-0.tar", ext="jpg"
):
index_file = tempfile.NamedTemporaryFile()
index_file.write(index_file_contents)
index_file.flush()
webdataset_pipeline = webdataset_raw_pipeline(
os.path.join(get_dali_extra_path(), tar_file_path),
index_file.name,
ext,
batch_size=1,
device_id=0,
num_threads=1,
)
webdataset_pipeline.build()
webdataset_pipeline.run()
webdataset_pipeline.run()
def test_index_errors():
assert_raises(RuntimeError, general_index_error, b"", glob="no version signature found")
assert_raises(
RuntimeError,
general_index_error,
b"v0.1",
glob="the version of the index file does not match the expected version (expected: ",
)
assert_raises(RuntimeError, general_index_error, b"v1.1", glob="no sample count found")
assert_raises(
RuntimeError, general_index_error, b"v1.1 -1", glob="sample count must be positive"
)
assert_raises(
RuntimeError, general_index_error, b"v1.1 1\n", glob="no extensions provided for the sample"
)
assert_raises(
RuntimeError,
general_index_error,
b"v1.1 1\njpg",
glob="size or offset corresponding to the extension not found",
)
assert_raises(
RuntimeError,
general_index_error,
b"v1.1 1\njpg 1 0",
glob="tar offset is not a multiple of tar block size",
)
assert_raises(
RuntimeError,
general_index_error,
b"v1.1 1\njpg 51200 1",
"db/webdataset/sample-tar/empty.tar",
glob="offset is outside of the archive file"
)
| 31.010638 | 100 | 0.641624 | 1,098 | 8,745 | 4.779599 | 0.186703 | 0.063453 | 0.059451 | 0.029726 | 0.665015 | 0.649962 | 0.616425 | 0.602706 | 0.59375 | 0.574505 | 0 | 0.014884 | 0.262436 | 8,745 | 281 | 101 | 31.120996 | 0.79876 | 0.068039 | 0 | 0.64557 | 0 | 0 | 0.126721 | 0.0397 | 0 | 0 | 0 | 0 | 0.067511 | 1 | 0.046414 | false | 0 | 0.004219 | 0 | 0.050633 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ee978484eedfa6f0ee35b6d2e588d59022d7378 | 461 | py | Python | lessons/lesson3/t210.py | kcfkwok2003/Simp_py | f75e66da01b45dc8688dda602f8b33d4258f0c31 | [
"MIT"
] | null | null | null | lessons/lesson3/t210.py | kcfkwok2003/Simp_py | f75e66da01b45dc8688dda602f8b33d4258f0c31 | [
"MIT"
] | null | null | null | lessons/lesson3/t210.py | kcfkwok2003/Simp_py | f75e66da01b45dc8688dda602f8b33d4258f0c31 | [
"MIT"
] | null | null | null | # t210.py
from machine import Pin
from simp_py import tft
count=0
def counting(v):
global count, show, p5
if p5.value()==0:
count+=1
else:
count-=1
btnA= Pin(39,Pin.IN)
a = Pin(5, Pin.IN,Pin.PULL_UP)
b = Pin(2, Pin.IN,Pin.PULL_UP)
a.irq(counting,trigger=Pin.IRQ_RISING| Pin.IRQ_FALLING)
pcount=-1
while 1:
if btnA.value()==0:
count=0
if pcount != count:
pcount=count
tft.tft.text(0,100,'count:%s ' % (count,))
time.sleep(0.1)
| 20.043478 | 55 | 0.646421 | 87 | 461 | 3.367816 | 0.45977 | 0.051195 | 0.075085 | 0.081911 | 0.095563 | 0 | 0 | 0 | 0 | 0 | 0 | 0.06117 | 0.184382 | 461 | 22 | 56 | 20.954545 | 0.718085 | 0.015184 | 0 | 0.095238 | 0 | 0 | 0.026549 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.095238 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ee9b23e78afb82a590a362f8bb474641574a4b8 | 2,876 | py | Python | server/apps/utils/timezone_utils.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
] | null | null | null | server/apps/utils/timezone_utils.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
] | null | null | null | server/apps/utils/timezone_utils.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
] | null | null | null | import datetime
import logging
import pytz
from django.utils import timezone
from django.utils.dateparse import parse_date, parse_datetime
logger = logging.getLogger(__name__)
Y2K = datetime.datetime(2000, 1, 1)
def formated_timedelta(total_seconds):
"""
Format time delta as H:M:S\
:param total_seconds: from timedelta.totalseconds
:return: formatted string
"""
if isinstance(total_seconds, str):
parts = total_seconds.split(':')
if len(parts) > 1:
# Assume already formatted
return total_seconds
try:
# Assume an integer as a string
total_seconds = int(total_seconds)
except ValueError:
# Don't know what this is, just return as is
return total_seconds
hours = int(total_seconds // 3600)
minutes = int((total_seconds % 3600) // 60)
seconds = int(total_seconds % 60)
return f'{hours}:{minutes}:{seconds}'
# dt is a datetime object
def convert_to_utc(dt):
if dt.tzinfo:
dt_utc = dt.astimezone(pytz.timezone('UTC'))
else:
# set the datetime object in django current_time_zone
if timezone.get_current_timezone():
dt_current_tz = pytz.timezone(timezone.get_current_timezone_name()).localize(dt)
dt_utc = dt_current_tz.astimezone(pytz.timezone('UTC'))
else:
# If current timezone is not set, use utc
dt_utc = pytz.timezone('UTC').localize(dt)
return dt_utc
def str_to_dt_utc(str):
# should always use this to obtain an timezone-aware datetime object
dt = parse_datetime(str)
if not dt:
# Try to convert a date to datetime
d = parse_date(str)
if d:
dt = parse_datetime('{}T00:00:00Z'.format(d))
return dt
def force_to_utc(dt_str):
"""
Given a string, force it to a UTC DateTime
If the string represents a timezone, return None
:param dt_str: string representing date
:return: DateTime (UTC). None if not UTC
"""
if dt_str[-6] in ['-', '+']:
return str_to_dt_utc(dt_str)
if not dt_str.endswith('Z'):
# Assume this dt is UTC even if not specified
dt_str = dt_str + 'Z'
utc_dt = str_to_dt_utc(dt_str)
return utc_dt
def formatted_ts(dt):
return dt.strftime('%Y-%m-%dT%H:%M:%SZ')
def display_formatted_ts(dt):
return dt.strftime('%Y-%m-%d %H:%M:%S')
def str_utc(dt):
dt_utc = convert_to_utc(dt)
if dt_utc:
return formatted_ts(dt_utc)
else:
logger.error("Fail to convert to datetime utc string")
return None
def nb_seconds_since_2000(dt):
dt_utc = convert_to_utc(dt)
y2k_utc = convert_to_utc(Y2K)
if dt_utc:
return (dt_utc - y2k_utc).total_seconds()
else:
logger.error("Fail to convert to datetime utc string")
return None
| 27.132075 | 92 | 0.638039 | 416 | 2,876 | 4.213942 | 0.257212 | 0.037079 | 0.034227 | 0.023959 | 0.187108 | 0.154022 | 0.124358 | 0.100399 | 0.065031 | 0.065031 | 0 | 0.016068 | 0.264256 | 2,876 | 105 | 93 | 27.390476 | 0.812382 | 0.221836 | 0 | 0.222222 | 0 | 0 | 0.075472 | 0.012425 | 0 | 0 | 0 | 0 | 0 | 1 | 0.126984 | false | 0 | 0.079365 | 0.031746 | 0.412698 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2eee783f48eb0a497623c6955f7e5e7628fdc3db | 1,745 | py | Python | SATguess.py | gradyap/random_quiz | 68d150731e2d3465e5789b3f5463ef4fc0da5167 | [
"MIT"
] | null | null | null | SATguess.py | gradyap/random_quiz | 68d150731e2d3465e5789b3f5463ef4fc0da5167 | [
"MIT"
] | null | null | null | SATguess.py | gradyap/random_quiz | 68d150731e2d3465e5789b3f5463ef4fc0da5167 | [
"MIT"
] | null | null | null | import random
#setting variables
ranwins = 0
conwins = 0
ties = 0
takes = 1000
for quiz in range (0, takes):
rancorrect=0
ranincorrect=0
concorrect=0
conincorrect=0
revisions=1000
possible = 4
# the consistent variable remains constant through the entire sequence
# and is compared to the same set of random answers as the random user
consistent = random.randint(1,possible)
for iteration in range (0,revisions):
question = random.randint(1,possible)
answer = random.randint(1,possible)
if question == answer:
rancorrect += 1
if consistent == answer:
concorrect += 1
#r = 100*(float(rancorrect)/float(revisions))
#c = 100*(float(concorrect)/float(revisions))
#print ("The random user is correct {:.1f} percent of the time".format(r))
#print ("The consisent user is correct {:.1f} percent of the time".format(c))
#print ("The random user's score was {}/{}".format(rancorrect,revisions))
#print ("The consistent user's score was {}/{}".format(concorrect,revisions))
if rancorrect > concorrect:
ranwins += 1
if concorrect >rancorrect:
conwins += 1
elif concorrect == rancorrect:
ties += 1
if ranwins > conwins:
print ("it is better to answer randomly {:.1f}% of the time".format(100*(float(ranwins)/float(takes - ties))))
if conwins > ranwins:
print ("it is better to answer consistently {:.1f}% of the time".format(100*(float(conwins)/float(takes - ties))))
print ("In a total of {} quizzes with {} questions each:".format(takes, revisions))
print ("Random Answer Wins: {}".format (ranwins))
print ("Consistent Answer Wins: {}".format (conwins))
print ("Ties: {}".format(ties))
| 35.612245 | 118 | 0.65788 | 227 | 1,745 | 5.057269 | 0.312775 | 0.027875 | 0.031359 | 0.052265 | 0.181185 | 0.148084 | 0.108014 | 0.06446 | 0.06446 | 0 | 0 | 0.030657 | 0.2149 | 1,745 | 48 | 119 | 36.354167 | 0.807299 | 0.309456 | 0 | 0 | 0 | 0 | 0.175585 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.029412 | 0 | 0.029412 | 0.176471 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2eee9d309fd7f5c6301b77cefb4ea94494b674f4 | 901 | py | Python | scheduler.py | kenseitrg/room-monitor | 38f6d7fbee1256bc89b1325a923dbb8a8b929488 | [
"Unlicense"
] | null | null | null | scheduler.py | kenseitrg/room-monitor | 38f6d7fbee1256bc89b1325a923dbb8a8b929488 | [
"Unlicense"
] | 1 | 2021-06-01T23:52:57.000Z | 2021-06-01T23:52:57.000Z | scheduler.py | kenseitrg/room-monitor | 38f6d7fbee1256bc89b1325a923dbb8a8b929488 | [
"Unlicense"
] | null | null | null | import threading
import time
from typing import Callable, List, Dict
class Scheduler():
def __init__(self, interval:int, function:Callable, *args, **kwargs) -> None:
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.next_call = time.time()
self.start()
def _run(self) -> None:
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self) -> None:
if not self.is_running:
self.next_call += self.interval
self._timer = threading.Timer(
self.next_call - time.time(), self._run)
self._timer.start()
self.is_running = True
def stop(self) -> None:
self._timer.cancel()
self.is_running = False | 29.064516 | 81 | 0.584906 | 107 | 901 | 4.757009 | 0.299065 | 0.058939 | 0.127701 | 0.10609 | 0.172888 | 0.094303 | 0 | 0 | 0 | 0 | 0 | 0 | 0.306326 | 901 | 31 | 82 | 29.064516 | 0.8144 | 0 | 0 | 0.185185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148148 | false | 0 | 0.111111 | 0 | 0.296296 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ef23bdc25e698b08ae27de75aab909449244e12 | 5,166 | py | Python | main.py | dcabezas98/DDSI | 011574ce8c084363090271cbc17510e4b443905c | [
"MIT"
] | null | null | null | main.py | dcabezas98/DDSI | 011574ce8c084363090271cbc17510e4b443905c | [
"MIT"
] | null | null | null | main.py | dcabezas98/DDSI | 011574ce8c084363090271cbc17510e4b443905c | [
"MIT"
] | 1 | 2021-01-09T18:27:23.000Z | 2021-01-09T18:27:23.000Z | import pymysql as mdb
import sys
class App(object):
def __init__(self, ip, user, passwd, db):
# Menú de la interfaz de texto
self.mainMenu='''\n Videoclub:
------------------
0: Salir.
1: Dar de alta un socio.
2: Dar de baja un socio.
3: Cerrar un alquiler.
4: Consultar DVDs para una película.
5: Consultar turnos de un empleado.
------------------
Elija una opción: '''
# Datos para conectarse a la base de datos
self.ip = ip
self.user = user
self.passwd = passwd
self.db = db
self.query = ''
connection = mdb.connect(self.ip, self.user, self.passwd, self.db)
self.cursor = connection.cursor()
def altaSocio(self):
nombre=input('Inserte nombre y apellidos del socio: ')
while len(nombre)<=0 or len(nombre)>60:
nombre=input('Debe tener entre 1 y 60 caracteres. \nInserte nombre y apellidos del socio: ')
dni=input('Inserte DNI del socio: ')
while len(dni)!=9:
dni=input('Debe tener 9 caracteres. \nInserte DNI del socio: ')
telefono=input('Inserte teléfono del socio: ')
while len(telefono)!=9:
telefono=input('Debe tener 9 caracteres. \nInserte teléfono del socio: ')
domicilio=input('Inserte domicilio del socio: ')
while len(domicilio)<=0 or len(domicilio)>=60:
domicilio=input('Debe tener entre 1 y 60 caracteres. Inserte el domicilio del socio: ')
diaNacimiento=input('Inserte dia de nacimiento del socio: ')
while int(diaNacimiento)<1 or int(diaNacimiento) >31:
diaNacimiento=input('Debe estar entre 1 y 31. Inserte dia de nacimiento del socio: ')
mesNacimiento=input('Inserte mes de nacimiento del socio: ')
while int(mesNacimiento)<1 or int(mesNacimiento) >12:
mesNacimiento=input('Debe estar entre 1 y 12. Inserte mes de nacimiento del socio: ')
anioNacimiento=input('Inserte año de nacimiento del socio: ')
fechaNacimiento='-'.join([anioNacimiento,mesNacimiento,diaNacimiento])
valores=', '.join(["'"+dni+"'","'"+nombre+"'","'"+telefono+"'","'"+domicilio+"'","DATE('" + fechaNacimiento + "')"])
self.query='INSERT INTO Socio VALUES (' + valores + ');'
self.executeQuery()
self.cursor.execute("commit;")
def cerrarAlquiler(self):
dni=input('Inserte DNI del socio: ')
while len(dni)!=9:
dni=input('Debe tener 9 caracteres. \nInserte DNI del socio: ')
cod=input('Inserte el código del DVD alquilado: ')
while len(cod)!=9:
cod=input('Debe tener 9 caracteres. \nInserte el código del DVD alquilado: ')
# Sólo puede haber un alquiler abierto con ese código y ese DNI
fecha = "STR_TO_DATE(DATE_FORMAT(SYSDATE(),'%Y-%m-%d'),'%Y-%m-%d')"
self.query="UPDATE Alquiler SET fechaDevolucion=" + fecha + " WHERE DNI='"+dni+"' AND cod_DVD='"+cod+"' AND fechaDevolucion IS NULL;"
self.executeQuery()
self.cursor.execute("commit;");
def consultarDVDs(self):
titulo=input('Inserte título de la película: ')
while len(titulo)<=0 or len(titulo)>=60:
titulo=input('Debe tener entre 1 y 60 caracteres. Inserte título de la película: ')
anio=int(input('Inserte el año de la película: '))
self.query="SELECT * from DVD WHERE titulo='" + titulo + "' AND anio=" + str(anio) + ";"
self.executeQuery()
rows=self.cursor.fetchall()
print()
for r in rows:
print(r)
def consultarTurnos(self):
dni=input('Inserte DNI del empleado: ')
while len(dni)!=9:
dni=input('Debe tener 9 caracteres. \nInserte DNI del empleado: ')
self.query="SELECT * from Turno WHERE dni='" + dni + "';"
self.executeQuery()
rows=self.cursor.fetchall()
print()
for r in rows:
print(r[0],' ',r[1].strftime("%m/%d/%Y"))
def bajaSocio(self):
dni=input('Inserte DNI del socio: ')
while len(dni)!=9:
dni=input('Debe tener 9 caracteres. \nInserte DNI del socio: ')
self.query="DELETE from Socio WHERE DNI='" + dni + "';"
self.executeQuery()
self.cursor.execute("commit;");
def executeQuery(self):
print("Petición: " + self.query)
return self.cursor.execute(self.query)
def mainloop(self):
while True:
option = int(input(self.mainMenu))
if option == 0:
print('Hasta pronto.\t:D')
exit()
elif option == 1:
self.altaSocio()
elif option == 3:
self.cerrarAlquiler()
elif option == 4:
self.consultarDVDs()
elif option == 5:
self.consultarTurnos()
elif option == 2:
self.bajaSocio()
else:
print('Opción no válida.')
# Main
if __name__=="__main__":
ip = sys.argv[1]
user = sys.argv[2]
passwd = sys.argv[3]
db = sys.argv[4]
app = App(ip, user, passwd, db)
app.mainloop()
| 30.210526 | 139 | 0.578784 | 632 | 5,166 | 4.705696 | 0.246835 | 0.04573 | 0.042367 | 0.03228 | 0.370545 | 0.317081 | 0.226631 | 0.184264 | 0.173167 | 0.146268 | 0 | 0.01626 | 0.285714 | 5,166 | 170 | 140 | 30.388235 | 0.789702 | 0.026326 | 0 | 0.221239 | 0 | 0 | 0.333731 | 0.011343 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070796 | false | 0.044248 | 0.017699 | 0 | 0.106195 | 0.061947 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ef25b45df51b2f3ee20981837c4f8427530bc4f | 2,533 | py | Python | pymc3/plots/energyplot.py | rsumner31/pymc3-2 | e824294ddfb45610536cad07394b8c290904c38d | [
"Apache-2.0"
] | 1 | 2019-03-01T06:05:48.000Z | 2019-03-01T06:05:48.000Z | pymc3/plots/energyplot.py | shunanzhang/pymc3 | fde52a4a69be1b0887a2f7861801fb48c941bbe6 | [
"Apache-2.0"
] | null | null | null | pymc3/plots/energyplot.py | shunanzhang/pymc3 | fde52a4a69be1b0887a2f7861801fb48c941bbe6 | [
"Apache-2.0"
] | null | null | null | import warnings
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError: # mpl is optional
pass
from .kdeplot import kdeplot
def energyplot(trace, kind='kde', figsize=None, ax=None, legend=True, shade=0.35, bw=4.5,
frame=True, kwargs_shade=None, **kwargs):
"""Plot energy transition distribution and marginal energy distribution in
order to diagnose poor exploration by HMC algorithms.
Parameters
----------
trace : result of MCMC run
kind : str
Type of plot to display (kde or histogram)
figsize : figure size tuple
If None, size is (8 x 6)
ax : axes
Matplotlib axes.
legend : bool
Flag for plotting legend (defaults to True)
shade : float
Alpha blending value for the shaded area under the curve, between 0
(no shade) and 1 (opaque). Defaults to 0.35
bw : float
Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the
smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule
of thumb (the default rule used by SciPy). Only works if `kind='kde'`.
frame : bool
Flag for plotting frame around figure.
kwargs_shade : dicts, optional
Additional keywords passed to `fill_between` (to control the shade)
Returns
-------
ax : matplotlib axes
"""
if ax is None:
_, ax = plt.subplots(figsize=figsize)
try:
energy = trace['energy']
except KeyError:
warnings.warn('There is no energy information in the passed trace.')
return ax
series = [('Marginal energy distribution', energy - energy.mean()),
('Energy transition distribution', np.diff(energy))]
if figsize is None:
figsize = (8, 6)
if ax is None:
_, ax = plt.subplots(figsize=figsize)
if kwargs_shade is None:
kwargs_shade = {}
if kind == 'kde':
for label, value in series:
kdeplot(value, label=label, shade=shade, bw=bw, ax=ax, kwargs_shade=kwargs_shade,
**kwargs)
elif kind == 'hist':
for label, value in series:
ax.hist(value, alpha=shade, label=label, **kwargs)
else:
raise ValueError('Plot type {} not recognized.'.format(kind))
ax.set_xticks([])
ax.set_yticks([])
if not frame:
for spine in ax.spines.values():
spine.set_visible(False)
if legend:
ax.legend()
return ax
| 28.460674 | 99 | 0.620213 | 338 | 2,533 | 4.612426 | 0.426036 | 0.042335 | 0.006414 | 0.024375 | 0.074407 | 0.047466 | 0.047466 | 0.047466 | 0.047466 | 0 | 0 | 0.009439 | 0.288985 | 2,533 | 88 | 100 | 28.784091 | 0.856191 | 0.397947 | 0 | 0.243902 | 0 | 0 | 0.108511 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0.04878 | 0.121951 | 0 | 0.195122 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ef3c2563763d28a137338103321c672c4fc64a8 | 6,852 | py | Python | portfolio/forms.py | DriptaSenapati/myPortfolio | 094bf3054dcbff17cd6a8bb6527addcdda780f7d | [
"MIT"
] | null | null | null | portfolio/forms.py | DriptaSenapati/myPortfolio | 094bf3054dcbff17cd6a8bb6527addcdda780f7d | [
"MIT"
] | null | null | null | portfolio/forms.py | DriptaSenapati/myPortfolio | 094bf3054dcbff17cd6a8bb6527addcdda780f7d | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms import StringField, PasswordField, SubmitField, BooleanField, FieldList, TextAreaField, IntegerField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from wtforms.fields.html5 import DateField
from portfolio.models import User, Project, Job, Skills, Testimonial
from flask_login import current_user
class LoginForm(FlaskForm):
# email = FieldList(StringField('Email', validators=[
# DataRequired(), Email()]), min_entries=2, max_entries=10)
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Log In')
class Testimoni_form(FlaskForm):
name = StringField('Name', validators=[DataRequired()])
desc = StringField('Description', validators=[DataRequired()])
main = TextAreaField('Recommendation Data', validators=[DataRequired()])
picture = FileField('Update Profile Picture', validators=[
FileAllowed(['jpg', 'png'])])
submit = SubmitField('Save')
def __init__(self, id=None,*args, **kwargs):
self.id = id
super(Testimoni_form,self).__init__(*args, **kwargs)
def validate_name(self, name):
if self.id:
past_name = Testimonial.query.filter_by(id=self.id).first()
if name.data != past_name.name:
data = Testimonial.query.filter_by(name=name.data).first()
if data:
raise ValidationError('This name already exists.')
elif not self.id:
data = Testimonial.query.filter_by(name=name.data).first()
if data:
raise ValidationError('This name already exists.')
class changepictureForm(FlaskForm):
picture = FileField('Update Profile Picture', validators=[
FileAllowed(['jpg', 'png'])])
submit_pic = SubmitField('Save Changes')
def __init__(self, id=None,*args, **kwargs):
self.id = id
super(changepictureForm,self).__init__(*args, **kwargs)
class SkillForm(FlaskForm):
skillsname = StringField('Name', validators=[DataRequired()])
skills = IntegerField('Value', validators=[DataRequired()])
submit = SubmitField('Save')
def __init__(self, id=None,*args, **kwargs):
self.id = id
super(SkillForm,self).__init__(*args, **kwargs)
def validate_skills(self, skills):
if skills.data >= 100:
raise ValidationError('Skill value must be less than 100')
def validate_skillsname(self, skillsname):
if self.id:
name = Skills.query.filter_by(id=self.id).first()
if skillsname.data != name.sk_name:
data = Skills.query.filter_by(sk_name=skillsname.data).first()
if data:
raise ValidationError('Skill name already exists')
elif not self.id:
data = Skills.query.filter_by(sk_name=skillsname.data).first()
if data:
raise ValidationError('Skill name already exists')
class ProjectForm(FlaskForm):
title = StringField('Title', validators=[DataRequired()])
Description = TextAreaField('Description')
URL = StringField('Project URL')
Organization = StringField('Organization (if any)')
Credential = StringField('Credential id')
Certificate = StringField('Certificate Url')
submit = SubmitField('Save')
def __init__(self, id=None,*args, **kwargs):
self.id = id
super(ProjectForm,self).__init__(*args, **kwargs)
def validate_title(self, title):
if self.id:
past_data = Project.query.filter_by(id=self.id).first()
if past_data.p_name != title.data:
data = Project.query.filter_by(p_name=title.data).first()
if data:
raise ValidationError('Project name already exists')
elif not self.id:
data = Project.query.filter_by(p_name=title.data).first()
if data:
raise ValidationError('Project name already exists')
def validate_Description(self, Description):
if self.id:
past_data = Project.query.filter_by(id=self.id).first()
if past_data.p_description != Description.data:
data = Project.query.filter_by(p_description=Description.data).first()
if data:
raise ValidationError('Project description already exists')
elif not self.id:
data = Project.query.filter_by(p_description=Description.data).first()
if data:
raise ValidationError('Project description already exists')
class JobForm(FlaskForm):
role = StringField('Job Role', validators=[DataRequired()])
company = StringField('Company Name', validators=[DataRequired()])
current = BooleanField('This is my Current Job')
start = DateField('Start Date', validators=[DataRequired()], format=f'%Y-%m-%d')
end = DateField('End Date', format=f'%Y-%m-%d')
place = StringField('Location')
jd = TextAreaField('Job Description', validators=[DataRequired()])
submit = SubmitField('Save')
def __init__(self, id=None,*args, **kwargs):
self.id = id
super(JobForm, self).__init__(*args, **kwargs)
def validate_company(self, company):
if self.id:
past_company = Job.query.filter_by(id=self.id).first()
if company.data != past_company.company:
data = Job.query.filter_by(company=company.data).first()
if data:
raise ValidationError('Company name already exists')
elif not self.id:
data = Job.query.filter_by(company=company.data).first()
if data:
raise ValidationError('Company name already exists')
def validate_end(self, end):
if end.data:
if end.data < self.start.data:
raise ValidationError('End date nust be greater than start date')
else:
if self.current.data == False:
raise ValidationError('Please check current job option')
def validate_start(self, start):
jobs = current_user.job
for j in jobs:
if j.id != self.id:
if j.end:
if j.start.date() <= start.data <= j.end.date():
raise ValidationError(f'Start date is overlapping with {j.company} duration')
def validate_current(self, current):
jobs = current_user.job
for j in jobs:
if j.id != self.id:
if (j.end == None) & (current.data == True):
raise ValidationError(f'Current job is already present for {j.company}')
| 42.559006 | 113 | 0.627116 | 764 | 6,852 | 5.503927 | 0.174084 | 0.038526 | 0.046373 | 0.035672 | 0.486326 | 0.455886 | 0.4283 | 0.426397 | 0.397146 | 0.397146 | 0 | 0.001961 | 0.255838 | 6,852 | 161 | 114 | 42.559006 | 0.82271 | 0.018535 | 0 | 0.485294 | 0 | 0 | 0.118697 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102941 | false | 0.014706 | 0.051471 | 0 | 0.404412 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ef74fe559909b6050f7a7dd49867a660fe27a5c | 14,986 | py | Python | src/HumanPose.py | burgessadrien/human-pose | a63f75f83f35f195b5195ce5518c1101f074196b | [
"MIT"
] | null | null | null | src/HumanPose.py | burgessadrien/human-pose | a63f75f83f35f195b5195ce5518c1101f074196b | [
"MIT"
] | null | null | null | src/HumanPose.py | burgessadrien/human-pose | a63f75f83f35f195b5195ce5518c1101f074196b | [
"MIT"
] | null | null | null | from imutils.object_detection import non_max_suppression
from imutils import paths, resize
import numpy as np
import argparse
import imutils
import cv2
from math import cos, sin, atan
import time
def find_body_part(mask, ratio_y, ratio_x):
bodyPartAverage = 0
bodyPart = np.zeros(
(int(mask.shape[0] / ratio_y), (int(mask.shape[1] / ratio_x)), 3), np.uint8)
width = bodyPart.shape[1]
height = bodyPart.shape[0]
y, x = 0, 0
for i in range(bodyPart.shape[0], mask.shape[0], 1):
for j in range(bodyPart.shape[1], mask.shape[1], 1):
tmp = mask[(i - height):i, (j - width):j]
average = np.average(tmp[0:]) # tmp[x] = [255,255,255] when white
if average > bodyPartAverage:
bodyPartAverage = average
y, x = i, j
bodyPart = mask[(y - height):y, (x - width):x]
return [bodyPart, [y, x]]
# pt2 x > pt1 x
def find_angle(pt1, pt2):
width = pt2[0] - pt1[0]
height = pt2[1] - pt1[1]
return atan(height / width)
def find_face(image):
head_casc = "haarcascade_frontalface_default.xml"
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + head_casc)
img_cpy = image.copy()
gray_image = cv2.cvtColor(img_cpy, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(
gray_image,
scaleFactor=1.1,
minNeighbors=5
)
eye_casc = "haarcascade_eye.xml"
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + eye_casc)
(x, y, w, h) = faces[0]
roi_gray = gray_image[y:y+h, x:x+w]
face_image = image[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
theta = 0
if len(eyes) >= 2:
eyes = eyes[0:2]
(ex1, ey1, ew1, eh1) = eyes[0]
(ex2, ey2, ew2, eh2) = eyes[1]
p1 = (ex1 + ew1//2, ey1 + eh1//2)
p2 = (ex2 + ew2//2, ey2 + eh2//2)
cv2.line(face_image, p1, p2, (0, 0, 255), 2)
theta = find_angle(p1,p2)
# cv2.imshow("other image", image)
return faces[0], theta
def verify_top_left_bottom_right(p1,p2):
t1 = [p1[0], p1[1]]
t2 = [p2[0], p2[1]]
if t2[0] < t1[0]:
tmp = t2[0]
t2[0] = t1[0]
t1[0] = tmp
if t1[1] > t2[1]:
tmp = t2[1]
t2[1] = t1[1]
t1[1] = tmp
return (t1[0],t1[1]), (t2[0], t2[1])
def get_rectangle_score(image, p1, p2):
p1,p2 = verify_top_left_bottom_right(p1,p2)
rectangle = image[p1[1]:p2[1], p1[0]:p2[0]]
non_zero_sum = cv2.countNonZero(rectangle)
area = (p2[0] - p1[0])*(p2[1]-p1[1])
return (area, non_zero_sum/area)
def fit_torso(thresh_image, torso_orig, torso_width, torso_height):
scale = 0.25
score_thresh = 0.93
max_score = 0
max_area = 0
best_t1 = (0, 0)
best_t2 = (0, 0)
best_theta = 0
best_img = thresh_image
best_rotate_orig = (0,0)
for scale_hund in range(75, 150, 15):
for theta in range(-40, 40, 5):
scale = scale_hund/100
scaled_torso_height = int(scale * torso_height)
scaled_torso_width = int(scale * torso_width)
# t1: top left, t2: bottom right
t1 = (torso_orig[0] - scaled_torso_width//2, torso_orig[1]-scaled_torso_height//2)
t2 = (torso_orig[0] + scaled_torso_width//2, torso_orig[1]+scaled_torso_height//2)
rotate_orig = (t1[0] + (t2[0]-t1[0])//2, t1[1])
rot_img = thresh_image.copy()
rot_img = rotate_image(rot_img, rotate_orig, theta)
(area, score) = get_rectangle_score(rot_img, t1, t2)
if score > score_thresh and area > max_area:
best_t1 = t1
best_t2 = t2
max_score = score
max_area = area
best_theta = theta
best_rotate_orig = rotate_orig
best_t = rotate_rectangle(best_t1, best_t2, best_theta, torso_orig)
return (best_t, best_theta, best_rotate_orig)
def fit_limb(thresh_image, limb_orig, limb_width, limb_height, limb_side, theta_begin, theta_end, theta_iter):
scale = 0.25
score_thresh = 0.85
max_score = 0
max_area = 0
best_t1 = (0, 0)
best_t2 = (0, 0)
best_theta = 0
best_img = thresh_image
for scale_height_hund in range(75, 150, 10):
for scale_width_hund in range(75,150,10):
for theta in range(theta_begin, theta_end, theta_iter):
scale_height = scale_height_hund/100
scale_width = scale_width_hund/100
scaled_limb_height = int(scale_height * limb_height)
scaled_limb_width = int(scale_width * limb_width)
# t1: top left, t2: bottom right
t1 = limb_orig
if limb_side == "left":
t2 = (limb_orig[0] + scaled_limb_width, limb_orig[1]+scaled_limb_height)
elif limb_side == "right":
t2 = (limb_orig[0] + scaled_limb_width, limb_orig[1]-scaled_limb_height)
else:
t1 = (limb_orig[0] - scaled_limb_width//2, limb_orig[1])
t2 = (limb_orig[0] + scaled_limb_width//2, limb_orig[1]+scaled_limb_height)
rotate_orig = limb_orig
rot_img = thresh_image.copy()
rot_img = rotate_image(rot_img, rotate_orig, theta)
(area, score) = get_rectangle_score(rot_img, t1, t2)
#cv2.rectangle(rot_img, t1, t2, 150)
#cv2.imshow("best right", rot_img)
#cv2.waitKey()
if score > score_thresh and area > max_area:
best_t1 = t1
best_t2 = t2
max_score = score
max_area = area
best_theta = theta
thresh_color = thresh_image.copy()
best_t = rotate_rectangle(best_t1, best_t2, best_theta, limb_orig)
return (best_t, best_theta)
def rotate_image(image, rot_orig, angle):
rot_img = image.copy()
rot_mat = cv2.getRotationMatrix2D(rot_orig, angle, 1.0)
result = cv2.warpAffine( rot_img, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
def rotate_point(pos, orig, angle):
if angle == 0:
return pos
angle = np.radians(angle)
x = pos[0]
y = pos[1]
orig_x = orig[0]
orig_y = orig[1]
newx = cos(angle)*(x-orig_x) - sin(angle)*(y-orig_y)+orig_x
newy = sin(angle) * (x-orig_x) + cos(angle)*(y-orig_y) + orig_y
return int(newx), int(newy)
def rotate_rectangle(t1, t2, theta, origin):
x1 = t1[0]
y1 = t1[1]
x2 = t2[0]
y2 = t2[1]
# top left
new_t1 = rotate_point((x1, y1), origin, theta)
# top right
new_t2 = rotate_point((x2, y1), origin, theta)
# bottom right
new_t3 = rotate_point((x2, y2), origin, theta)
# bottom left
new_t4 = rotate_point((x1, y2), origin, theta)
return (new_t1, new_t2, new_t3, new_t4)
def draw_rect(image, points):
(p1, p2, p3, p4) = points
cv2.line(image, p1, p2, (0, 0, 255), thickness=2)
cv2.line(image, p2, p3, (0, 0, 255), thickness=2)
cv2.line(image, p3, p4, (0, 0, 255), thickness=2)
cv2.line(image, p4, p1, (0, 0, 255), thickness=2)
# To remove section specified from image
def remove_section(rectangle, thresh_image, theta, rotate_orig):
t1,t2,t3,t4 = rectangle
np_rectangle = np.array(rectangle)
image = cv2.fillConvexPoly(thresh_image, np_rectangle, 0)
return image
def find_midway(p1,p2):
return (int((p1[0]+p2[0])/2), int((p1[1]+p2[1])/2))
def main():
set_width = 1500
# both MOG and MOG2 can be used, with different parameter values
backgroundSubtractor = cv2.createBackgroundSubtractorMOG2(detectShadows=True)
person_image = "images/adrien2.jpg"
# apply the algorithm for background images using learning rate > 0
bgImageFile = "images/background.jpg"
bg = cv2.imread(bgImageFile)
bg = resize(bg, width=set_width)
# applying background subtraction
for i in range(1, 16):
backgroundSubtractor.apply(bg, learningRate=0.5)
# creating HOG
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
image = cv2.imread(person_image)
image = resize(image, width=set_width)
# detect people in the image
(rects, weights) = hog.detectMultiScale(image, winStride=(4, 4), padding=(8, 8), scale=1.05)
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
# get the largest person detected
max_area = 0
max_rectangle = (0, 0, 0, 0)
p1 = (0, 0)
p2 = (0, 0)
for (xA, yA, xB, yB) in pick:
area = (xB - xA) * (yB - yA)
if area > max_area:
p1 = (xA, yA)
p2 = (xB, (yA + yB))
max_area = area
cv2.rectangle(image, p1, p2, (0, 255, 0), 2)
# going to threshold using background subtraction
# image = cv2.imread(person_image)
fgmask = backgroundSubtractor.apply(image, learningRate=0)
ret, fgmask = cv2.threshold(fgmask, 200, 255, cv2.THRESH_BINARY)
kernel = np.ones((17, 17), np.uint8)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
kernel_close = np.ones((35,35), np.uint8)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
# finding the face
# this should probably be done before the torso, than have the torso position linked to
# the position of the face
(x, y, w, h), face_theta = find_face(image)
face_points = ((x, y), (x+w, y+h))
face_h = h
face_w = w
remove_face = (( x, y), ( x+w, y), ( x+w,y+h ), ( x,y+h ))
face_orig = (x + w//2, y + h//2)
fgmask = remove_section(remove_face, fgmask, face_theta, face_orig)
# # now we will find the torso
# torso, coor = find_body_part(upper_half, 2, 2)
(xA, yA) = p1
(xB, yB) = p2
# take a portion of the thresholded image
# based on the bounding box that we got from the HOOG and top of face
human = fgmask[p1[1]:p2[1], p1[0]:p2[0]]
# cv2.rectangle(image, (xA + coor[1] - torso.shape[1], yA + coor[0] -
# torso.shape[0]), (xA + coor[1], yA + coor[0]), (0, 255, 0), 2)
# draw the face rectangle
cv2.rectangle(image, face_points[0], face_points[1], (0, 255, 0), 2)
# finding the torso from the face
torso_width = int(1.8*face_h)
torso_height = int(2.5*face_h)
torso_orig = (x+w//2, y+h + torso_height//2 + 50)
(tp1, tp2, tp3, tp4), torso_theta, torso_orig = fit_torso(human, (torso_orig[0]-xA, torso_orig[1]-yA), torso_width, torso_height)
draw_rect(image, ((tp1[0]+xA, tp1[1]+yA), (tp2[0]+xA, tp2[1]+yA), (tp3[0]+xA, tp3[1]+yA), (tp4[0]+xA, tp4[1]+yA)))
# define shoulder and leg points
shoulder_right_pt = tp1
shoulder_left_pt = tp2
thigh_left_pt = tp4
thigh_right_pt = tp3
# Identify right arm of person
(uar1,uar2,uar3,uar4), uar_theta = fit_limb(human, shoulder_right_pt, int(face_h*1.1), face_h//2, "right", -90, -270, -5)
draw_rect(image, ( (uar1[0]+xA, uar1[1]+yA), (uar2[0]+xA, uar2[1]+yA), (uar3[0]+xA, uar3[1]+yA), (uar4[0]+xA, uar4[1]+yA) ) )
# identify left arm of person
(ual1,ual2,ual3,ual4), ual_theta = fit_limb(human, shoulder_left_pt, int(face_h*1.1), face_h//2, "left", -90, 90, 5)
draw_rect(image, ( (ual1[0]+xA, ual1[1]+yA), (ual2[0]+xA, ual2[1]+yA), (ual3[0]+xA, ual3[1]+yA), (ual4[0]+xA, ual4[1]+yA) ) )
# identify right leg of person
(ulr1,ulr2,ulr3,ulr4), ulr_theta = fit_limb(human, thigh_right_pt, int(face_h*1.5), int(face_h//2), "left", 0, 110, 5)
draw_rect(image, ( (ulr1[0]+xA, ulr1[1]+yA), (ulr2[0]+xA, ulr2[1]+yA), (ulr3[0]+xA, ulr3[1]+yA), (ulr4[0]+xA, ulr4[1]+yA) ) )
# identify left leg of person
(ull1,ull2,ull3,ull4), ull_theta = fit_limb(human, thigh_left_pt, int(face_h*1.5), int(face_h//2), "right", -180, -290, -5)
draw_rect(image, ( (ull1[0]+xA, ull1[1]+yA), (ull2[0]+xA, ull2[1]+yA), (ull3[0]+xA, ull3[1]+yA), (ull4[0]+xA, ull4[1]+yA) ) )
no_torso = remove_section( (tp1, tp2, tp3, tp4) , human, torso_theta, torso_orig )
upper_arm_right = remove_section( (uar1,uar2,uar3,uar4) , no_torso, uar_theta, shoulder_right_pt )
upper_arm_left = remove_section( (ual1,ual2,ual3,ual4) , no_torso, ual_theta, shoulder_left_pt )
upper_thigh_right = remove_section( (ulr1,ulr2,ulr3,ulr4) , no_torso, ulr_theta, thigh_right_pt )
upper_thigh_left = remove_section( (ull1,ull2,ull3,ull4) , no_torso, ull_theta, thigh_left_pt )
#cv2.imshow("arm right", upper_arm_right)
#cv2.imshow("arm left", upper_arm_left)
#cv2.imshow("leg left", upper_thigh_left)
#cv2.imshow("leg right", upper_thigh_right)
# identifying right lower arm
forearm_right_pt = find_midway(uar3,uar2)
(lar1,lar2,lar3,lar4), uar_theta = fit_limb(upper_arm_right, forearm_right_pt, int(face_h//2), face_h, "center", 180, -180, -5)
draw_rect(image, ( (lar1[0]+xA, lar1[1]+yA), (lar2[0]+xA, lar2[1]+yA), (lar3[0]+xA, lar3[1]+yA), (lar4[0]+xA, lar4[1]+yA) ) )
# identifying left lower arm
forearm_left_pt = find_midway(ual3,ual2)
(lal1,lal2,lal3,lal4), ual_theta = fit_limb(upper_arm_left, forearm_left_pt, int(face_h//2), face_h, "center", -180, 180, 5)
draw_rect(image, ( (lal1[0]+xA, lal1[1]+yA), (lal2[0]+xA, lal2[1]+yA), (lal3[0]+xA, lal3[1]+yA), (lal4[0]+xA, lal4[1]+yA) ) )
# identifying right lower leg
shin_right_pt = find_midway(ulr3,ulr2)
(llr1,llr2,llr3,llr4), ulr_theta = fit_limb(upper_thigh_right, shin_right_pt, int(face_h//2), face_h, "center", 180, -180, -5)
draw_rect(image, ( (llr1[0]+xA, llr1[1]+yA), (llr2[0]+xA, llr2[1]+yA), (llr3[0]+xA, llr3[1]+yA), (llr4[0]+xA, llr4[1]+yA) ) )
# identifying left lower leg
shin_left_pt = find_midway(ull3,ull2)
(lll1,lll2,lll3,lll4), ull_theta = fit_limb(upper_thigh_left, shin_left_pt, int(face_h//2), face_h, "center", -180, 180, 5)
draw_rect(image, ( (lll1[0]+xA, lll1[1]+yA), (lll2[0]+xA, lll2[1]+yA), (lll3[0]+xA, lll3[1]+yA), (lll4[0]+xA, lll4[1]+yA) ) )
# show the original images with rectangles and the thresholded image
# cv2.imshow("mask", fgmask)
# cv2.imshow("upper half", human)
head_body_angle = abs(face_theta - torso_theta)
print(face_theta)
print(torso_theta)
image = resize(image, height=720)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image,'Head to body angle: ' + str(head_body_angle),(10,image.shape[0] - 20), font, 1,(255,255,255),2,cv2.LINE_AA)
cv2.imshow("Boxes", image)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
# Body Part Approximations:
# head: width: height: 1 head
# torso: width: height: 3 heads
# upper arm: width: height: 1 heads
# lower arms: width: height: 1.5 heads
# upper leg: width: height: 2 heads
# lower leg: width: height: 2 heads
| 37.465 | 133 | 0.61117 | 2,348 | 14,986 | 3.716354 | 0.151618 | 0.013408 | 0.014898 | 0.009168 | 0.252464 | 0.206853 | 0.176828 | 0.155512 | 0.127206 | 0.12308 | 0 | 0.072638 | 0.243027 | 14,986 | 399 | 134 | 37.558897 | 0.69658 | 0.13239 | 0 | 0.127341 | 0 | 0 | 0.013673 | 0.004326 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052434 | false | 0 | 0.029963 | 0.003745 | 0.131086 | 0.007491 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ef86256c31b8117ad52ae4437715bf8e3b2b1af | 2,027 | py | Python | zekeconv/zekeconv.py | StanislavKlenin/zekeconv | a690bd9ece0151c0780dbb1780fdd4b7bcd7546c | [
"Apache-2.0"
] | null | null | null | zekeconv/zekeconv.py | StanislavKlenin/zekeconv | a690bd9ece0151c0780dbb1780fdd4b7bcd7546c | [
"Apache-2.0"
] | null | null | null | zekeconv/zekeconv.py | StanislavKlenin/zekeconv | a690bd9ece0151c0780dbb1780fdd4b7bcd7546c | [
"Apache-2.0"
] | null | null | null | #!/bin/env python
from __future__ import print_function
import argparse
import traceback
import sys
from . import io
from . import utl
def main():
parser = argparse.ArgumentParser(description="convert between zeke dump files and directory structures")
parser.add_argument("-f", "--file", metavar="DUMP", help="zeke dump (.zk) file", required=True)
parser.add_argument("-s", "--source", metavar="SOURCE", help="original source (subroot) of zookeeper tree")
parser.add_argument("-V", "--verbose", action='store_true', default=False, help="be more verbose")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-u", "--unpack", metavar="DIR", help="unpack zeke dump into directory structure")
group.add_argument("-p", "--pack", metavar="DIR", help="pack directory structure to zeke dump")
args = parser.parse_args()
# global exception handler
try:
source = utl.preprocess_source_subpath(args.source)
log = sys.stderr if args.verbose else None
# parser ensures these two are mutually exclusive
if args.unpack:
if (args.verbose):
print("unpacking zeke dump '{1}' "
"into directory structure at '{0}'".format(args.unpack,
args.file),
file=log)
io.unpack(args.file, args.unpack, source, log)
if args.pack:
if (args.verbose):
print("packing directory structure at '{0}' "
"into zeke dump '{1}'".format(args.pack, args.file),
file=log)
io.pack(args.pack, args.file, source, log)
except Exception as e:
print("Error ({0}): {1}".format(type(e).__name__, e), file=sys.stderr)
if args.verbose:
print(traceback.format_exc(), file=sys.stderr)
return 1
if __name__ == '__main__':
sys.exit(main())
| 38.245283 | 112 | 0.587074 | 238 | 2,027 | 4.87395 | 0.390756 | 0.041379 | 0.044828 | 0.046552 | 0.067241 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004848 | 0.287617 | 2,027 | 52 | 113 | 38.980769 | 0.798476 | 0.043907 | 0 | 0.105263 | 0 | 0 | 0.219638 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.157895 | 0 | 0.210526 | 0.131579 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ef8ca77af067b0f173acd5d082a652fd6a6e7d5 | 5,470 | py | Python | clin/run.py | herojan/clin | 442d909cfd4e996987801e7aa47746af6d1df049 | [
"MIT"
] | null | null | null | clin/run.py | herojan/clin | 442d909cfd4e996987801e7aa47746af6d1df049 | [
"MIT"
] | null | null | null | clin/run.py | herojan/clin | 442d909cfd4e996987801e7aa47746af6d1df049 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import logging
import os
from pathlib import Path
from typing import Optional
import click
from clin.clinfile import calculate_scope
from clin.config import ConfigurationError, load_config
from clin.nakadi import Nakadi, NakadiError
from clin.processor import Processor, ProcessingError
from clin.utils import configure_logging, pretty_yaml, pretty_json
from clin.yamlops import YamlLoader, load_manifest, load_yaml, YamlError
DEFAULT_YAML_LOADER = YamlLoader()
@click.group()
def cli():
"""Manage Nakadi resources"""
pass
@cli.command("apply")
@click.option("-e", "--env", required=True, type=str, help="Nakadi environment")
@click.option("-t", "--token", required=False, type=str, help="Bearer token")
@click.option(
"-X",
"--execute",
is_flag=True,
default=False,
help="Execute updates (default - false)",
)
@click.option(
"-v",
"--verbose",
is_flag=True,
default=False,
help="Verbose output (default - false)",
)
@click.option(
"-d", "--show-diff", is_flag=True, default=False, help="Show diff (default - false)"
)
@click.option(
"-p",
"--show-payload",
is_flag=True,
default=False,
help="Show Nakadi payload (default - false)",
)
@click.argument("file", type=click.Path(exists=True, dir_okay=False, readable=True))
def apply(
env: str,
token: Optional[str],
execute: bool,
verbose: bool,
show_diff: bool,
show_payload: bool,
file: str,
):
"""Create or update Nakadi resource from single yaml manifest file\n
Values to fill {{VARIABLES}} are taken from system environment"""
configure_logging(verbose)
try:
config = load_config()
kind, spec = load_manifest(Path(file), DEFAULT_YAML_LOADER, os.environ)
processor = Processor(config, token, execute, show_diff, show_payload)
processor.apply(env, kind, spec)
except (ProcessingError, NakadiError, ConfigurationError, YamlError) as ex:
logging.error(ex)
exit(-1)
except Exception as ex:
logging.exception(ex)
exit(-1)
@cli.command("process")
@click.option("-t", "--token", required=False, type=str, help="Bearer token")
@click.option("-X", "--execute", is_flag=True, default=False, help="Execute updates")
@click.option(
"-v",
"--verbose",
is_flag=True,
default=False,
help="Verbose output (default - false)",
)
@click.option(
"-d", "--show-diff", is_flag=True, default=False, help="Show diff (default - false)"
)
@click.option(
"-p",
"--show-payload",
is_flag=True,
default=False,
help="Show Nakadi payload (default - false)",
)
@click.argument("file", type=click.Path(exists=True, dir_okay=False, readable=True))
def process(
token: Optional[str],
execute: bool,
verbose: bool,
show_diff: bool,
show_payload: bool,
file: str,
):
"""Create or update multiple Nakadi resources from clin file"""
configure_logging(verbose)
try:
config = load_config()
processor = Processor(config, token, execute, show_diff, show_payload)
file_path: Path = Path(file)
master = load_yaml(file_path, DEFAULT_YAML_LOADER, os.environ)
scope = calculate_scope(master, file_path.parent, DEFAULT_YAML_LOADER)
event_types = [et for et in scope if et.kind == "event-type"]
subscriptions = [sub for sub in scope if sub.kind == "subscription"]
for task in event_types + subscriptions:
logging.debug(
"[%s] applying file %s to %s environment",
task.id,
task.path,
task.target,
)
processor.apply(task.target, task.kind, task.spec)
except (ProcessingError, ConfigurationError, YamlError) as ex:
logging.error(ex)
exit(-1)
except Exception as ex:
logging.exception(ex)
exit(-1)
@cli.command("dump")
@click.option("-e", "--env", required=True, type=str, help="Nakadi environment")
@click.option("-t", "--token", required=False, type=str, help="Bearer token")
@click.option(
"-o",
"--output",
default="yaml",
type=click.Choice(["yaml", "json"]),
help="Output format",
)
@click.option(
"-v",
"--verbose",
is_flag=True,
default=False,
help="Verbose output (default - false)",
)
@click.argument("event_type", type=str)
def dump(env: str, token: Optional[str], output: str, verbose: bool, event_type: str):
"""Print manifest of existing Nakadi event type"""
configure_logging(verbose)
try:
config = load_config()
if env not in config.environments:
logging.error(f"Environment not found in configuration: {env}")
exit(-1)
nakadi = Nakadi(config.environments[env].nakadi_url, token)
et = nakadi.get_event_type(event_type)
if not et:
logging.error("Event type not found in Nakadi %s: %s", env, event_type)
exit(-1)
if output.lower() == "yaml":
logging.info(pretty_yaml(et.to_spec()))
elif output.lower() == "json":
logging.info(pretty_json(et.to_spec()))
else:
logging.error("Invalid output format: %s", output)
exit(-1)
except (NakadiError, ConfigurationError) as ex:
logging.error(ex)
exit(-1)
except Exception as ex:
logging.exception(ex)
exit(-1)
if __name__ == "__main__":
cli()
| 28.195876 | 88 | 0.635466 | 678 | 5,470 | 5.028024 | 0.205015 | 0.059842 | 0.026401 | 0.044881 | 0.524787 | 0.50132 | 0.50132 | 0.464359 | 0.464359 | 0.432092 | 0 | 0.002359 | 0.225046 | 5,470 | 193 | 89 | 28.341969 | 0.80184 | 0.050457 | 0 | 0.546584 | 0 | 0 | 0.145345 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024845 | false | 0.006211 | 0.068323 | 0 | 0.093168 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ef9edaf1fc48d11773cf0a061364a9a10b3eee7 | 13,668 | py | Python | movo_common/movo_ros/src/movo/movo_control_marker.py | zkytony/kinova-movo | 37d7454b2dc589d44133f3913f567b9cc321a66d | [
"BSD-3-Clause"
] | 1 | 2021-06-24T19:20:01.000Z | 2021-06-24T19:20:01.000Z | movo_common/movo_ros/src/movo/movo_control_marker.py | ALAN-NUS/kinova_movo | 05a0451f5c563359ae0ffe3280e1df85caec9e55 | [
"BSD-3-Clause"
] | null | null | null | movo_common/movo_ros/src/movo/movo_control_marker.py | ALAN-NUS/kinova_movo | 05a0451f5c563359ae0ffe3280e1df85caec9e55 | [
"BSD-3-Clause"
] | 1 | 2020-01-21T11:05:24.000Z | 2020-01-21T11:05:24.000Z | """--------------------------------------------------------------------
Copyright (c) 2017, Kinova Robotics inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\file movo_control_marker.py
\brief This module contains a collection of functions for controlling
the Vector platform through RVIZ with interactive markers.
\Platform: Ubuntu 16.04 LTS / ROS Kinetic
--------------------------------------------------------------------"""
import rospy
import tf
import sys
from utils import *
from system_defines import *
from movo_msgs.msg import *
from std_msgs.msg import UInt32
from geometry_msgs.msg import Twist
from interactive_markers.interactive_marker_server import *
from interactive_markers.menu_handler import *
from visualization_msgs.msg import *
from geometry_msgs.msg import Pose,Point,Quaternion,PoseStamped
import os
import threading
class MovoMarkerMenu:
def __init__(self,server,sim):
self.wp_menu_opt = dict({2:"Add",3:"Start",4:"Stop",5:"Reset",6:"Clear",7:"Reload",8:"Save"})
self.mode_menu_opt = dict({10:"Standby",11:"Tractor"})
self._server = server
self.menu_handler = MenuHandler()
sub_menu_handle = self.menu_handler.insert( "WayPoints" )
self.h_wp_last = self.menu_handler.insert( "Add", parent=sub_menu_handle, callback=self._waypointCb )
self.menu_handler.setCheckState( self.h_wp_last, MenuHandler.UNCHECKED)
self.h_wp_last = self.menu_handler.insert( "Start", parent=sub_menu_handle, callback=self._waypointCb )
self.menu_handler.setCheckState( self.h_wp_last, MenuHandler.UNCHECKED)
self.h_wp_last = self.menu_handler.insert( "Stop", parent=sub_menu_handle, callback=self._waypointCb )
self.menu_handler.setCheckState( self.h_wp_last, MenuHandler.UNCHECKED)
self.h_wp_last = self.menu_handler.insert( "Reset", parent=sub_menu_handle, callback=self._waypointCb )
self.menu_handler.setCheckState( self.h_wp_last, MenuHandler.UNCHECKED)
self.h_wp_last = self.menu_handler.insert( "Clear", parent=sub_menu_handle, callback=self._waypointCb )
self.menu_handler.setCheckState( self.h_wp_last, MenuHandler.UNCHECKED)
self.h_wp_last = self.menu_handler.insert( "Reload", parent=sub_menu_handle, callback=self._waypointCb )
self.menu_handler.setCheckState( self.h_wp_last, MenuHandler.UNCHECKED)
self.h_wp_last = self.menu_handler.insert( "Save", parent=sub_menu_handle, callback=self._waypointCb )
self.menu_handler.setCheckState( self.h_wp_last, MenuHandler.UNCHECKED)
sub_menu_handle = self.menu_handler.insert( "Mode" )
self.h_mode_last = self.menu_handler.insert( "Standby", parent=sub_menu_handle, callback=self._modeCb )
self.menu_handler.setCheckState( self.h_mode_last, MenuHandler.UNCHECKED)
self.h_mode_last = self.menu_handler.insert( "Tractor", parent=sub_menu_handle, callback=self._modeCb )
self.menu_handler.setCheckState( self.h_mode_last, MenuHandler.UNCHECKED)
int_marker = InteractiveMarker()
int_marker.header.frame_id = "base_link"
int_marker.pose.position.z = 2.0
int_marker.scale = 1
int_marker.name = "movo_menu_marker"
marker_box = Marker()
marker_box.type = Marker.CUBE
marker_box.scale.x = int_marker.scale * 0.45
marker_box.scale.y = int_marker.scale * 0.45
marker_box.scale.z = int_marker.scale * 0.45
marker_box.color.r = 0.5
marker_box.color.g = 0.5
marker_box.color.b = 0.5
marker_box.color.a = 1.0
control = InteractiveMarkerControl()
control.interaction_mode = InteractiveMarkerControl.MENU
control.always_visible = False
control.markers.append(marker_box)
int_marker.controls.append(control)
self._server.insert( int_marker, self._clicked )
self.menu_handler.apply(self._server, "movo_menu_marker")
self._server.applyChanges()
self._msg_pub = rospy.Publisher('/movo/waypoint_cmd',UInt32,queue_size=10)
self._cfg_pub = rospy.Publisher('/movo/gp_command', ConfigCmd, queue_size=10)
def _waypointCb(self,feedback):
handle = feedback.menu_entry_id
self.menu_handler.setCheckState( handle, MenuHandler.CHECKED )
if ("Add" == self.wp_menu_opt[handle]):
msg = 1<<0
elif ("Start" == self.wp_menu_opt[handle]):
msg = 1<<1
elif ("Stop" == self.wp_menu_opt[handle]):
msg = 1<<2
elif ("Reset" == self.wp_menu_opt[handle]):
msg = 1<<3
elif ("Clear" == self.wp_menu_opt[handle]):
msg = 1<<4
elif ("Save" == self.wp_menu_opt[handle]):
msg = 1<<5
elif ("Reload" == self.wp_menu_opt[handle]):
msg = 1<<6
self._msg_pub.publish(msg)
for key,value in self.wp_menu_opt.iteritems():
if (key != handle):
self.menu_handler.setCheckState( key, MenuHandler.UNCHECKED )
self.menu_handler.reApply( self._server )
self._server.applyChanges()
def _modeCb(self,feedback):
handle = feedback.menu_entry_id
self.menu_handler.setCheckState( handle, MenuHandler.CHECKED )
msg = ConfigCmd()
msg.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
if ("Standby" == self.mode_menu_opt[handle]):
msg.gp_param = STANDBY_REQUEST
elif ("Tractor" == self.mode_menu_opt[handle]):
msg.gp_param = TRACTOR_REQUEST
self._cfg_pub.publish(msg)
for key,value in self.mode_menu_opt.iteritems():
if (key != handle):
self.menu_handler.setCheckState( key, MenuHandler.UNCHECKED )
self.menu_handler.reApply( self._server )
self._server.applyChanges()
def _clicked(self,feedback):
pass
class MovoMarkerControl:
def __init__(self,sim):
self.lock=threading.RLock()
"""
Subscribe to the configuration message
"""
if (False == sim):
self.config_updated = False
rospy.Subscriber("/movo/feedback/active_configuration", Configuration, self._update_configuration_limits)
start_time = rospy.get_time()
while ((rospy.get_time() - start_time) < 10.0) and (False == self.config_updated):
rospy.sleep(0.05)
if (False == self.config_updated):
rospy.logerr("Timed out waiting for Movo feedback topics make sure the driver is running")
sys.exit(0)
return
else:
self.x_vel_limit_mps = 0.5
self.y_vel_limit_mps = 0.5
self.yaw_rate_limit_rps = 0.5
self.accel_lim = 1.0
self.yaw_accel_lim = 1.0
# create an interactive marker server on the topic namespace simple_marker
self._server = InteractiveMarkerServer("movo_marker_ctrl")
self.br = tf.TransformBroadcaster()
self.listener = tf.TransformListener()
self.last_marker_update = rospy.get_time()
self.last_feedback = None
self.linear_scale = rospy.get_param('~linear_scale', 1.0);
self.angular_scale = rospy.get_param('~angular_scale', 2.2);
self.motion_cmd = Twist()
self.motion_pub = rospy.Publisher('/movo/int_marker/cmd_vel', Twist, queue_size=10)
# create an interactive marker for our server
int_marker = InteractiveMarker()
int_marker.header.frame_id = "base_link"
int_marker.name = "movo_twist_ctrl"
int_marker.description = "MoVo Control Marker"
control = InteractiveMarkerControl()
control.orientation_mode = InteractiveMarkerControl.FIXED;
control.orientation.w = 1;
control.orientation.x = 1;
control.orientation.y = 0;
control.orientation.z = 0;
control.name = "move_x";
control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS;
int_marker.controls.append( control )
control = InteractiveMarkerControl()
control.orientation_mode = InteractiveMarkerControl.FIXED;
control.orientation.w = 1;
control.orientation.x = 0;
control.orientation.y = 0;
control.orientation.z = 1;
control.name = "move_y";
control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS;
int_marker.controls.append( control )
control = InteractiveMarkerControl()
control.orientation_mode = InteractiveMarkerControl.FIXED;
control.orientation.w = 1;
control.orientation.x = 0;
control.orientation.y = 1;
control.orientation.z = 0;
control.name = "rotate_z";
control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS;
int_marker.controls.append( control )
# add the interactive marker to our collection &
# tell the server to call processFeedback() when feedback arrives for it
self._server.insert(int_marker, self.processFeedback)
# 'commit' changes and send to all clients
self._server.applyChanges()
MovoMarkerMenu(self._server,sim)
rospy.Timer(rospy.Duration(0.5),self.timeout_check)
def timeout_check(self,event):
now_time = rospy.get_time()
dt = now_time - self.last_marker_update
if (dt > 0.4) and (None != self.last_feedback) and (True == self.stop_on_timeout):
self.motion_cmd.linear.x = 0.0
self.motion_cmd.linear.y = 0.0
self.motion_cmd.angular.z = 0.0
self.motion_pub.publish(self.motion_cmd)
self._server.setPose(self.last_feedback.marker_name, Pose())
self._server.applyChanges()
self.stop_on_timeout = False
def _update_configuration_limits(self,config):
self.x_vel_limit_mps = config.teleop_x_vel_limit_mps
self.y_vel_limit_mps = config.teleop_y_vel_limit_mps
self.yaw_rate_limit_rps = config.teleop_yaw_rate_limit_rps
self.accel_lim = config.teleop_accel_limit_mps2
self.yaw_accel_lim = config.teleop_yaw_accel_limit_rps2
self.config_updated = True
def processFeedback(self,feedback):
with self.lock:
p = feedback.pose.position
o = feedback.pose.orientation
now_time = rospy.get_time()
dt = now_time - self.last_marker_update
self.last_feedback = feedback
if (dt >= 0.01):
self.last_marker_update = now_time
(roll, pitch, yaw) = tf.transformations.euler_from_quaternion([o.x, o.y, o.z, o.w])
vx = p.x * self.linear_scale
vx = limit_f(vx,self.x_vel_limit_mps)
vy = p.y * self.linear_scale
vy = limit_f(vy,self.y_vel_limit_mps)
wz = yaw * self.angular_scale
wz = limit_f(wz,self.yaw_rate_limit_rps)
self.motion_cmd.linear.x = slew_limit(vx,
self.motion_cmd.linear.x,
self.accel_lim, dt)
self.motion_cmd.linear.y = slew_limit(vy,
self.motion_cmd.linear.y,
self.accel_lim, dt)
self.motion_cmd.angular.z = slew_limit(wz,
self.motion_cmd.angular.z,
self.yaw_accel_lim, dt)
self.motion_pub.publish(self.motion_cmd)
self.stop_on_timeout = True
self._server.setPose(feedback.marker_name, Pose())
self._server.applyChanges()
| 44.090323 | 117 | 0.631694 | 1,654 | 13,668 | 4.990931 | 0.217654 | 0.038643 | 0.050878 | 0.018655 | 0.486856 | 0.418171 | 0.397698 | 0.342459 | 0.299091 | 0.299091 | 0 | 0.011165 | 0.272608 | 13,668 | 309 | 118 | 44.23301 | 0.819151 | 0.159204 | 0 | 0.275701 | 0 | 0 | 0.044236 | 0.008672 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037383 | false | 0.004673 | 0.065421 | 0 | 0.116822 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2efbe656bc169d6f0a48e304d2e05f7b7b7fd9bb | 412 | py | Python | setup.py | shaunwbell/EcoFOCIpy | 266e8e49b6ee1326aae9d8cf583a98f31d9f271d | [
"MIT"
] | null | null | null | setup.py | shaunwbell/EcoFOCIpy | 266e8e49b6ee1326aae9d8cf583a98f31d9f271d | [
"MIT"
] | 62 | 2021-03-23T18:40:32.000Z | 2022-01-18T22:34:47.000Z | setup.py | shaunwbell/EcoFOCIpy | 266e8e49b6ee1326aae9d8cf583a98f31d9f271d | [
"MIT"
] | null | null | null | # Copyright (c) 2021 EcoFOCIpy
"""Setup script for installing EcoFOCIpy."""
import sys
from setuptools import setup
if sys.version_info[0] < 3:
error = """
EcoFOCIpy requires the Python 3.8 or above, but will install on
all versions of python3.
Python {py} detected.
""".format(py='.'.join([str(v) for v in sys.version_info[:3]]))
print(error) # noqa: T001
sys.exit(1)
setup()
| 20.6 | 67 | 0.65534 | 61 | 412 | 4.393443 | 0.721311 | 0.074627 | 0.104478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043077 | 0.211165 | 412 | 19 | 68 | 21.684211 | 0.781538 | 0.191748 | 0 | 0 | 0 | 0 | 0.398773 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2efdf97fa86eb1016906f47bfcb322cf41ba7f62 | 288 | py | Python | opinion_mining/utils/remove_stop_words.py | ricardomilhazes/WellBeingAnalysis | f563ad2f059f9ff55e1f67a94e75ff1b5ec5f6ea | [
"MIT"
] | 2 | 2021-05-06T11:11:33.000Z | 2021-06-15T19:15:38.000Z | opinion_mining/utils/remove_stop_words.py | ricardomilhazes/WellBeingAnalysis | f563ad2f059f9ff55e1f67a94e75ff1b5ec5f6ea | [
"MIT"
] | 9 | 2021-04-07T13:47:23.000Z | 2021-06-28T16:08:52.000Z | opinion_mining/utils/remove_stop_words.py | ricardomilhazes/WellBeingAnalysis | f563ad2f059f9ff55e1f67a94e75ff1b5ec5f6ea | [
"MIT"
] | 2 | 2021-11-05T20:35:09.000Z | 2021-12-04T14:38:17.000Z | from nltk.corpus import stopwords
# Remove stop words
stop_words = set(stopwords.words("english"))
def remove_stop_words(sentence):
filtered_sentence = []
for w in sentence:
if w not in stop_words:
filtered_sentence.append(w)
return filtered_sentence
| 19.2 | 44 | 0.701389 | 38 | 288 | 5.131579 | 0.526316 | 0.184615 | 0.153846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.225694 | 288 | 14 | 45 | 20.571429 | 0.874439 | 0.059028 | 0 | 0 | 0 | 0 | 0.026022 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c004e45ea18286ed9625f6273b5024c69f28852 | 7,374 | py | Python | ajastin.py | taloprojekti/SP2017 | c5d314169c051e924429f4c3362f3b1be244edb0 | [
"MIT"
] | null | null | null | ajastin.py | taloprojekti/SP2017 | c5d314169c051e924429f4c3362f3b1be244edb0 | [
"MIT"
] | null | null | null | ajastin.py | taloprojekti/SP2017 | c5d314169c051e924429f4c3362f3b1be244edb0 | [
"MIT"
] | 2 | 2017-02-06T15:30:38.000Z | 2019-03-05T15:01:40.000Z | def downloader(year, month, day, week):
#downloads the data from Nordpool
from download import download
from jsonhandler import importJSON, writeJSON
time = str("{:4d}-{:02d}-{:02d}".format(year, month,day))
data = importJSON("tasklists/tasklist.json")
if time != data["downloader_time"]:
download(year, week)
data["downloader_time"] = time
writeJSON("tasklists/tasklist.json", data)
return 1
else:
return 0
def rele(mode, PID, temp_req, temp_now, deadband_max, deadband_min, rele_pin):
import rele
tila = rele.switch(mode, PID, temp_req, temp_now, deadband_max, deadband_min, rele_pin)
return tila
def rele_cleanup(rele_pin):
import rele
rele.cleanup(rele_pin)
print("GPIO cleanup done")
def tempread_all():
import tempread
return tempread.read_temp()
def write_temp(pvm):
import tempread
tempread.write_temp(pvm)
def ptulkinta(day, month, year, hour):
import tulkinta
tulkinta.main(day, month, year, hour)
def mode_switch(current_time):
from datetime import datetime
from jsonhandler import importJSON
data = importJSON("tasklists/tasklist.json")
#matriisi jonka yksi rivi sisältää aina yhden rivin tiedot
time_list = []
#Creates a list, which includes all starting and finishing times alternately
time_list.append(data["running_times"])
i = 0
#Checks if heating should be turned off
for part in time_list:
starting_time = part[i][0]
finishing_time = part[i][1]
datetime1 = datetime.strptime(starting_time,"%Y-%m-%d %H:%M:%S")
datetime2 = datetime.strptime(finishing_time,"%Y-%m-%d %H:%M:%S")
datetime_now = datetime.strptime(current_time,"%Y-%m-%d,%H:%M:%S")
i += 1
if datetime_now > datetime1:
if datetime2 > datetime_now:
return 1
else:
pass
else:
pass
return 0
#for looppi joka vetää filen läpi start-end-intervalleissa
#flag joka nousee jos time seikä date ovat jollain näistä väleistä
taim = str("{:02d}:{:02d}:{:02d}".format(hour, minute, second))
n = 1
print("len:{:d}".format(len(arr)))
flag = 0
while n < len(arr):
if taim > arr[n-1][2] and taim < arr[n][2]:
flag = 1
n += 1
else:
print("kierros:{:d}".format(n))
n += 1
if flag == 1:
mode = "prog"
flag = 0
else:
mode = "PIDctrl"
return mode
def main():
import time
import datetime
import setup
import PIDclass
import checklist
n = 0
ret1 = 0
t0 = time.time()
print("Checking downloader state.")
ret = checklist.main()
print("Initialising clock.")
now = datetime.datetime.now()
d = now.day
m = now.month
y = now.year
week = datetime.date(y, m, d).isocalendar()[1] #Haetaan viikkonumero
ret = downloader(y, m, d, week)
if ret == 1:
print("download complete")
else:
("download data already exists")
# Setup.py -tiedostosta luettujen muuttujien alustus
data = setup.read_setup()
main_switch = setup.main_switch(data) # checks if the program is in testing- or operating mode
rele_pin = setup.hardware_settings(data)
Tfav, Tmin, Tmax = setup.temperatures(data)
Pgain, Igain, Dgain, Imax, Imin = setup.pid_tuning(data)
DBmin, DBmax = setup.relay_settings(data)
PIDajo = PIDclass.PID(Pgain, Igain, Dgain, Imax, Imin) # PID-ajon alustus setup-tiedoston gain-arvoilla
print("Setup complete:")
print(" PID-Gains: P={:.1f}, I={:.1f}, D={:.1f}".format(Pgain,Igain,Dgain))
print(" PID-Deadband: {:.1f} - {:.1f}".format(DBmin,DBmax))
print(" Integrator range: {:.1f} - {:.1f}\n".format(Imin,Imax))
flag = 0 #tarvitaan downloaderissa
try:
print("Entering loop")
while ret1 == 0:
time.sleep(10)
now = datetime.datetime.now()
if(main_switch == 1):
#Lämpötilan lukeminen
temp_all = tempread_all()
temp_in = float(temp_all[0])
temp_out = float(temp_all[1])
elif(main_switch == 0): # kiinteästi asetettavat lämpötilat testausta varten
temp_in = 20.0
temp_out = 10.0
PID_curr = PIDajo.process(Tfav, temp_in)
# t = tämä hetki
# n = start-end-intervallien määrä
pvm = str("{:4d}-{:02d}-{:02d},{:02}:{:02d}:{:02d}".format(now.year, now.month, now.day, now.hour, now.minute, now.second))
mode = mode_switch(pvm)
print("{:d}:{:d}:{:d}".format(now.hour, now.minute, now.second))
#PID-ajo
print("{:.4f}".format(PID_curr)) #PID-ajon testi, pitää myöhemmin integroida ajasta riippuvan if-ehdon sisään ja yhdistää lämmittimen hallintaan.
if (main_switch == 1 and mode == 0):
print("{}\n".format(rele(mode, PID_curr, 21, temp_in, DBmax, DBmin, rele_pin)))
else:
print("Heating is turned off due to higher price of electricity")
#Telemetria
if (main_switch == 1):
write_temp(pvm)
if (now.minute == 0 and now.hour == 0) or (now.minute == 1 and now.hour == 0):
print(flag)
if flag == 0:
flag = 1 #jotta mentäisiin tähän vain kerran
d = now.day
m = now.month
y = now.year
week = datetime.date(y, m, d).isocalendar()[1] #Haetaan viikkonumero
downloader(y, m, d, week)
"""legacy code"""
if d < 10:
if m < 10:
stringtowrite = "0" + str(d) + "0" + str(m) + str(y)
else:
stringtowrite = "0" + str(d) + str(m) + str(y)
elif d > 10 and m < 10:
stringtowrite = str(d) + "0" + str(m) + str(y)
else:
stringtowrite = str(d) + str(m) + str(y)
file = open("checklist.txt", "w")
file.write(stringtowrite)
file.close()
while now.minute == 0:
time.sleep(1)
now = datetime.now()
else:
continue
if (now.minute == 5 and now.hour == 0): #resetoi flagin nollaksi, jotta sitä voidaan käyttää ensi keskiyönä
flag = 0
if (now.minute == 54 and now.hour == 3) or (now.minute == 55 and now.hour == 3):
ptulkinta(now.day, now.month, now.year, now.hour)
#if now.minute % 30 == 0:
# while now.minute % 30:
# time.sleep(1)
# datetime.now()
except KeyboardInterrupt:
if(main_switch == 1):
rele_cleanup(rele_pin)
print("Exiting loop\n")
return
main()
| 34.138889 | 157 | 0.527122 | 874 | 7,374 | 4.375286 | 0.289474 | 0.023536 | 0.005492 | 0.013598 | 0.158996 | 0.107741 | 0.088389 | 0.080544 | 0.080544 | 0.064854 | 0 | 0.0242 | 0.355574 | 7,374 | 215 | 158 | 34.297674 | 0.780513 | 0.134255 | 0 | 0.267081 | 0 | 0 | 0.099968 | 0.017029 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049689 | false | 0.012422 | 0.099379 | 0 | 0.198758 | 0.10559 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c0089beb61c511d6d88942a670bc8195c3371e1 | 4,213 | py | Python | executor/tests/transpiler/test_bai_kubernetes_object_builder.py | gavinmbell/benchmark-ai-1 | a697e67d68b843fe9350e55871dad867bab5d51d | [
"Apache-2.0"
] | 6 | 2020-09-29T09:03:04.000Z | 2022-03-14T06:52:25.000Z | executor/tests/transpiler/test_bai_kubernetes_object_builder.py | gavinmbell/benchmark-ai-1 | a697e67d68b843fe9350e55871dad867bab5d51d | [
"Apache-2.0"
] | null | null | null | executor/tests/transpiler/test_bai_kubernetes_object_builder.py | gavinmbell/benchmark-ai-1 | a697e67d68b843fe9350e55871dad867bab5d51d | [
"Apache-2.0"
] | 4 | 2020-10-01T07:49:22.000Z | 2021-06-16T19:44:12.000Z | import base64
import json
import random
from unittest.mock import create_autospec
import pytest
from pytest import fixture
from bai_kafka_utils.executors.descriptor import BenchmarkDescriptor, DescriptorError
from transpiler.bai_knowledge import (
BaiKubernetesObjectBuilder,
SingleRunBenchmarkKubernetesObjectBuilder,
create_single_run_benchmark_bai_k8s_builder,
)
from transpiler.config import EnvironmentInfo
WHITELISTED_ZONE = "us-east-1a"
INVALID_ZONE = "us-east-1w"
INVALID_ZONE_ID = "use1-az42"
WHITELISTED_ZONE_ID = "use1-az1"
JOB_ID = "JOB_ID"
def test_add_container_cmd(descriptor, bai_config, fetched_data_sources, bai_environment_info, benchmark_event):
descriptor.ml.benchmark_code = "cmd"
descriptor.ml.args = "arg1 arg2"
bai_k8s_builder = create_single_run_benchmark_bai_k8s_builder(
descriptor,
bai_config,
fetched_data_sources,
[],
JOB_ID,
environment_info=bai_environment_info,
event=benchmark_event,
)
container = bai_k8s_builder.root.find_container("benchmark")
assert container.command == ["cmd", "arg1", "arg2"]
assert "args" not in container
def test_add_container_no_cmd(descriptor, bai_config, fetched_data_sources, bai_environment_info, benchmark_event):
descriptor.ml.benchmark_code = ""
descriptor.ml.args = "arg1 arg2=abc"
bai_k8s_builder = create_single_run_benchmark_bai_k8s_builder(
descriptor,
bai_config,
fetched_data_sources,
[],
JOB_ID,
environment_info=bai_environment_info,
event=benchmark_event,
)
container = bai_k8s_builder.root.find_container("benchmark")
assert "command" not in container
assert container.args == ["arg1", "arg2=abc"]
def test_choose_zone_passed(descriptor: BenchmarkDescriptor, bai_environment_info: EnvironmentInfo):
descriptor.hardware.availability_zone = WHITELISTED_ZONE
assert WHITELISTED_ZONE == SingleRunBenchmarkKubernetesObjectBuilder.choose_availability_zone(
descriptor, bai_environment_info
)
def test_choose_zone_id(descriptor: BenchmarkDescriptor, bai_environment_info: EnvironmentInfo):
descriptor.hardware.aws_zone_id = WHITELISTED_ZONE_ID
assert WHITELISTED_ZONE == SingleRunBenchmarkKubernetesObjectBuilder.choose_availability_zone(
descriptor, bai_environment_info
)
def test_choose_zone_rnd(
descriptor: BenchmarkDescriptor, bai_environment_info: EnvironmentInfo, mock_random: random.Random
):
descriptor.zone_id = None
assert WHITELISTED_ZONE == SingleRunBenchmarkKubernetesObjectBuilder.choose_availability_zone(
descriptor, bai_environment_info, mock_random
)
def test_choose_zone_invalid(
descriptor: BenchmarkDescriptor, bai_environment_info: EnvironmentInfo, mock_random: random.Random
):
descriptor.hardware.availability_zone = INVALID_ZONE
with pytest.raises(DescriptorError):
SingleRunBenchmarkKubernetesObjectBuilder.choose_availability_zone(
descriptor, bai_environment_info, mock_random
)
def test_choose_zone_id_invalid(
descriptor: BenchmarkDescriptor, bai_environment_info: EnvironmentInfo, mock_random: random.Random
):
descriptor.hardware.aws_zone_id = INVALID_ZONE_ID
with pytest.raises(DescriptorError):
SingleRunBenchmarkKubernetesObjectBuilder.choose_availability_zone(
descriptor, bai_environment_info, mock_random
)
def test_metrics(descriptor):
metrics = BaiKubernetesObjectBuilder.get_metrics_from_descriptor(descriptor)
assert metrics is not None
json_object = json.loads(metrics)
assert len(json_object) == 2
m = json_object[0]
assert m["name"] == "accuracy"
assert base64.b64decode(m["pattern"]).decode("utf-8") == "accuracy=([-+]?\\d*\\.\\d+|\\d+)"
m = json_object[1]
assert m["name"] == "throughput"
assert base64.b64decode(m["pattern"]).decode("utf-8") == "throughput=([-+]?\\d*\\.\\d+|\\d+)"
@fixture
def mock_random() -> random.Random:
def return_first(passed_list):
return passed_list[0]
mock = create_autospec(random.Random)
mock.choice.side_effect = return_first
return mock
| 31.676692 | 115 | 0.751484 | 470 | 4,213 | 6.395745 | 0.210638 | 0.07984 | 0.083832 | 0.028277 | 0.642715 | 0.619428 | 0.619428 | 0.607119 | 0.527944 | 0.527944 | 0 | 0.01078 | 0.163304 | 4,213 | 132 | 116 | 31.916667 | 0.841986 | 0 | 0 | 0.363636 | 0 | 0 | 0.056017 | 0.015666 | 0 | 0 | 0 | 0 | 0.131313 | 1 | 0.10101 | false | 0.030303 | 0.090909 | 0.010101 | 0.212121 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c06caf4f37892f4a59ced3cc2b2789dc5ebd2dd | 370 | py | Python | SUL1/sample/Face_recognition/recog.py | ddddwee1/SULT | 0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784 | [
"WTFPL"
] | 18 | 2017-12-19T07:55:17.000Z | 2020-03-12T11:07:45.000Z | SUL1/sample/Face_recognition/recog.py | ddddwee1/SULT | 0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784 | [
"WTFPL"
] | 1 | 2019-04-18T10:31:41.000Z | 2019-05-23T06:47:24.000Z | SUL1/sample/Face_recognition/recog.py | ddddwee1/SULT | 0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784 | [
"WTFPL"
] | 5 | 2018-06-06T00:51:48.000Z | 2020-03-12T11:07:46.000Z | import resnet3 as net
import numpy as np
import cv2
img = cv2.imread('9_1.jpg')
res1 = net.eval(img.reshape([-1,128,128,3]))
img = cv2.imread('10_1.jpg')
res2 = net.eval(img.reshape([-1,128,128,3]))
res1 = res1/np.linalg.norm(res1)
res2 = res2/np.linalg.norm(res2)
cosres = res1*res2
cosres = cosres.sum()
print('Sim1: ',cosres)
print('Sim2: ',np.arctan(cosres)) | 20.555556 | 44 | 0.681081 | 66 | 370 | 3.787879 | 0.409091 | 0.048 | 0.096 | 0.136 | 0.2 | 0.2 | 0.2 | 0.2 | 0 | 0 | 0 | 0.113846 | 0.121622 | 370 | 18 | 45 | 20.555556 | 0.655385 | 0 | 0 | 0 | 0 | 0 | 0.072776 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.230769 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c07a21caf70e36b4396fdb8399d6c1facb5f0c6 | 2,988 | py | Python | google-cloud-sdk/lib/surface/config/configurations/delete.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/surface/config/configurations/delete.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/surface/config/configurations/delete.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | 3 | 2017-07-27T18:44:13.000Z | 2020-07-25T17:48:53.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to delete named configuration."""
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.config import completers
from googlecloudsdk.core import log
from googlecloudsdk.core.configurations import named_configs
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.resource import resource_printer
class Delete(base.SilentCommand):
"""Deletes a named configuration."""
detailed_help = {
'DESCRIPTION': """\
{description} You cannot delete a configuration that is active, even
when overridden with the --configuration flag. To delete the current
active configuration, first `gcloud config configurations activate`
another one.
See `gcloud topic configurations` for an overview of named
configurations.
""",
'EXAMPLES': """\
To delete a named configuration, run:
$ {command} my_config
To delete more than one configuration, run:
$ {command} my_config1 my_config2
To list existing configurations, run:
$ gcloud config configurations list
""",
}
@staticmethod
def Args(parser):
"""Adds args for this command."""
parser.add_argument(
'configuration_names',
nargs='+',
completer=completers.NamedConfigCompleter,
help=('Name of the configuration to delete. '
'Cannot be currently active configuration.'))
def Run(self, args):
# Fail the delete operation when we're attempting to delete the
# active config.
active_config = named_configs.ConfigurationStore.ActiveConfig()
if active_config.name in args.configuration_names:
raise named_configs.NamedConfigError(
'Deleting named configuration failed because configuration '
'[{0}] is set as active. Use `gcloud config configurations '
'activate` to change the active configuration.'.format(
active_config.name))
fmt = 'list[title="The following configurations will be deleted:"]'
resource_printer.Print(args.configuration_names, fmt, out=log.status)
console_io.PromptContinue(default=True, cancel_on_no=True)
for configuration_name in args.configuration_names:
named_configs.ConfigurationStore.DeleteConfig(configuration_name)
log.DeletedResource(configuration_name)
| 36.888889 | 79 | 0.710843 | 352 | 2,988 | 5.960227 | 0.460227 | 0.028599 | 0.041945 | 0.015253 | 0.026692 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004691 | 0.215194 | 2,988 | 80 | 80 | 37.35 | 0.889979 | 0.248996 | 0 | 0.042553 | 0 | 0 | 0.452123 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.12766 | 0 | 0.212766 | 0.042553 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c07c0573397722d3a10b277c708abff836c1571 | 1,161 | py | Python | snekomatic/db.py | wgwz/snekomatic | 316f5c096aad7011953433324829e4614c283cd9 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-07-07T14:36:06.000Z | 2021-07-07T14:36:06.000Z | snekomatic/db.py | wgwz/snekomatic | 316f5c096aad7011953433324829e4614c283cd9 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-06-02T02:37:16.000Z | 2021-06-02T02:37:16.000Z | snekomatic/db.py | webknjaz/snekomatic | 3dbd84155aee5b485c7626ff2e471b949e971616 | [
"Apache-2.0",
"MIT"
] | null | null | null | import os
import psycopg2
class PersistentStringSet:
def __init__(self, name):
self._table_name = f"persistent_set_{name}"
def _conn(self):
conn = psycopg2.connect(os.environ["DATABASE_URL"])
cursor = conn.cursor()
cursor.execute(
f"""
CREATE TABLE IF NOT EXISTS {self._table_name} (
entry text PRIMARY KEY
);
"""
)
conn.commit()
return conn
def __contains__(self, value):
with self._conn() as conn:
cursor = conn.cursor()
cursor.execute(
f"""
SELECT exists
(SELECT 1 FROM {self._table_name} WHERE entry = %s);
""",
(value,),
)
(exists,) = cursor.fetchone()
return exists
def add(self, value):
with self._conn() as conn:
cursor = conn.cursor()
cursor.execute(
f"""
INSERT INTO {self._table_name} (entry) VALUES (%s);
""",
(value,),
)
conn.commit()
| 25.8 | 71 | 0.459087 | 107 | 1,161 | 4.775701 | 0.411215 | 0.097847 | 0.101761 | 0.129159 | 0.2818 | 0.2818 | 0.223092 | 0.223092 | 0.223092 | 0.223092 | 0 | 0.004566 | 0.434109 | 1,161 | 44 | 72 | 26.386364 | 0.773212 | 0 | 0 | 0.435897 | 0 | 0 | 0.314384 | 0.018088 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.051282 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c0874ebe77cdf2687aca06582ce38fe6c2c0b1e | 2,193 | py | Python | Silver.py | aWonderWorld/bilibili-live-tools | deaf697e5fe675a4815aa05ec1997c30d6f7c737 | [
"MIT"
] | 192 | 2018-04-18T16:11:41.000Z | 2022-03-29T07:22:20.000Z | Silver.py | aWonderWorld/bilibili-live-tools | deaf697e5fe675a4815aa05ec1997c30d6f7c737 | [
"MIT"
] | 88 | 2018-03-24T17:30:27.000Z | 2019-06-01T00:20:39.000Z | Silver.py | aWonderWorld/bilibili-live-tools | deaf697e5fe675a4815aa05ec1997c30d6f7c737 | [
"MIT"
] | 43 | 2018-04-21T07:38:37.000Z | 2020-06-14T11:01:08.000Z | from online_net import OnlineNet
import utils
import printer
import asyncio
# 领取银瓜子
async def GetAward():
temp = await OnlineNet().req('get_time_about_silver')
# print (temp['code']) #宝箱领完返回的code为-10017
if temp['code'] == -10017:
printer.info(["# 今日宝箱领取完毕"])
else:
time_start = temp['data']['time_start']
time_end = temp['data']['time_end']
json_response = await OnlineNet().req('get_silver', time_start, time_end)
return json_response
async def GetAward_black():
temp = await OnlineNet().req('get_time_about_silver')
# print (temp['code']) #宝箱领完返回的code为-10017
if temp['code'] == -10017:
printer.info(["# 今日宝箱领取完毕"])
else:
time_start = temp['data']['time_start']
time_end = temp['data']['time_end']
for i in range(50):
json_response = await OnlineNet().req('get_silver', time_start, time_end)
if json_response['code'] != 400:
print('宝箱小黑屋的结果返回', json_response)
return json_response
async def run():
while True:
printer.info(["检查宝箱状态"], True)
json_rsp = await GetAward()
if json_rsp is None or json_rsp['code'] == -10017:
sleeptime = (utils.seconds_until_tomorrow() + 300)
await asyncio.sleep(sleeptime)
elif not json_rsp['code']:
printer.info(["# 打开了宝箱"])
elif json_rsp['code'] == 400:
print('小黑屋')
await asyncio.sleep(3600)
continue
print('小黑屋, 暴力测试中')
tasklist = []
for i in range(60):
task = asyncio.ensure_future(GetAward_black())
tasklist.append(task)
await asyncio.wait(tasklist, return_when=asyncio.FIRST_COMPLETED)
json_rsp = await GetAward()
sleeptime = 3 * 60 + 5
await asyncio.sleep(sleeptime)
else:
printer.info(["# 继续等待宝箱冷却..."])
# 未来如果取消了这个东西就睡眠185s,否则精确睡眠
# surplus里面是min,float格式
sleeptime = (json_rsp['data'].get('surplus', 3)) * 60 + 5
await asyncio.sleep(sleeptime)
| 34.265625 | 85 | 0.564523 | 240 | 2,193 | 4.9875 | 0.333333 | 0.040936 | 0.056809 | 0.066834 | 0.442774 | 0.399332 | 0.399332 | 0.349206 | 0.349206 | 0.349206 | 0 | 0.03503 | 0.310078 | 2,193 | 63 | 86 | 34.809524 | 0.756114 | 0.063383 | 0 | 0.431373 | 0 | 0 | 0.106549 | 0.020528 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.078431 | 0 | 0.117647 | 0.176471 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |