hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
c11f7822c5bd888ca47b611e77c261bcc7260743
15,956
py
Python
tests/annotator/test_structured_incident_annotator.py
langstok/EpiTator
721fdc444382a0493702ee5976c987954753f47a
[ "Apache-2.0" ]
40
2017-05-27T03:53:22.000Z
2021-08-07T16:33:58.000Z
tests/annotator/test_structured_incident_annotator.py
langstok/EpiTator
721fdc444382a0493702ee5976c987954753f47a
[ "Apache-2.0" ]
25
2017-07-17T14:33:24.000Z
2021-04-09T10:27:56.000Z
tests/annotator/test_structured_incident_annotator.py
langstok/EpiTator
721fdc444382a0493702ee5976c987954753f47a
[ "Apache-2.0" ]
9
2017-11-15T05:13:53.000Z
2021-08-07T16:33:59.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import import unittest from epitator.annotator import AnnoDoc from epitator.structured_incident_annotator import StructuredIncidentAnnotator import datetime # import logging # from .test_utils import with_log_level def remove_empty_props(d): return { k: v for k, v in d.items() if v is not None } class TestStructuredIncidentAnnotator(unittest.TestCase): def setUp(self): self.maxDiff = None self.annotator = StructuredIncidentAnnotator() # @with_log_level(logging.getLogger('epitator.structured_incident_annotator'), logging.INFO) def test_count_table(self): doc = AnnoDoc(''' Type / New / Confirmed / Probable / Suspect / Total Cases / 3 / 293 / / 32 / 413 Deaths / 5 / 193 / 82 / 28 / 303 ''') doc.add_tier(self.annotator) metadatas = [ remove_empty_props(span.metadata) for span in doc.tiers['structured_incidents'] ] self.assertEqual(metadatas, [{ # Date/country?? # Need to include because association rules are different for tables. 'type': 'caseCount', 'value': 3, 'attributes': [] }, { 'type': 'cumulativeCaseCount', 'value': 293, 'attributes': ['confirmed'] }, { 'type': 'cumulativeCaseCount', 'value': 32, 'attributes': ['suspected'] }, { 'type': 'cumulativeCaseCount', 'value': 413, 'attributes': [] }, { 'type': 'deathCount', 'value': 5, 'attributes': [] }, { 'type': 'cumulativeDeathCount', 'value': 193, 'attributes': ['confirmed'] }, { 'type': 'cumulativeDeathCount', 'value': 82, 'attributes': [] }, { 'type': 'cumulativeDeathCount', 'value': 28, 'attributes': ['suspected'] }, { 'type': 'cumulativeDeathCount', 'value': 303, 'attributes': [] }]) # @with_log_level(logging.getLogger('epitator.structured_incident_annotator'), logging.INFO) def test_location_count_table(self): doc = AnnoDoc(""" Distribution of reported x fever cases from 1 Jul 2017-17 Apr 2018 Federal units / Reported / Discarded / Under investigation / Confirmed / Deaths Acre (AC) / 1 / 1 / - / - / - Amapá (AP) / 8 / 2 / 6 / - / - Pará (PA) / 7 / 5 / 2 / - / - Amazonas (AM) / 42 / 31 / 11 / - / - Rondônia (RO) / 9 / 8 / 1 / - / - Roraima (RR) / 3 / 3 / - / - / - Tocantins (TO) / 17 / 15 / 2 / - / - Bahia (BA) / 62 / 35 / 27 / - / - Ceará (CE) / 4 / 3 / 1 / - / - Maranhão (MA) / 7 / 5 / 2 / - / - Paraíba (PB) / 5 / - / 5 / - / - Pernambuco (PE) / 6 / 4 / 2 / - / - Piauí (PI) / 9 / 6 / 3 / - / - Rio Grande do Norte (RN) / 3 / 2 / 1 / - / - Sergipe (SE) / 2 / 2 / - / - / - Distrito Federal (DF) / 74 / 43 / 30 / 1 / 1 Goiás (GO) / 66 / 37 / 29 / - / - Mato Grosso (MT) / 10 / 8 / 2 / - / - Mato Grosso do Sul (MS) / 13 / 10 / 3 / - / - Espírito Santo (ES) / 119 / 88 / 25 / 6 / 1 Minas Gerais (MG) / 1444 / 656 / 294 / 494 / 156 Rio de Janeiro (RJ) / 453 / 172 / 84 / 197 / 64 São Paulo (SP) / 2558 / 1655 / 444 / 459 / 120 Paraná (PR) / 110 / 102 / 8 / - / - Rio Grande do Sul (RS) / 49 / 34 / 15 / - / - Santa Catarina (SC) / 45 / 22 / 23 / - / - Total / 5131 / 2951 / 1023 / 1157 / 342 """) doc.add_tier(self.annotator) metadatas = [ remove_empty_props(span.metadata) for span in doc.tiers['structured_incidents'] ] incident = metadatas[0] self.assertEqual(incident['value'], 1) self.assertEqual(incident['type'], 'caseCount') self.assertEqual(incident['location']['geonameid'], '3665474') self.assertEqual( incident['dateRange'], [datetime.datetime(2017, 7, 1), datetime.datetime(2018, 4, 18)]) def test_date_count_table(self): doc = AnnoDoc(""" Cumulative case data Report date / Cases / Deaths / New cases per week 26 Jun 2017 / 190 / 10 / 8 Sep 2017 / 300 / 12 / 9 Sep 2017 / 309 / 13 / 15 Sep 2017 / 319 / 14 / 6 Oct 2017 / 376 / 14 / 13 Oct 2017 / 20 Oct 2017 / 431 / 17 / 34 27 Oct 2017 / 457 / 18 / 26 3 Nov 2017 / 486 / 19 / 29""") doc.add_tier(self.annotator) metadatas = [ remove_empty_props(span.metadata) for span in doc.tiers['structured_incidents'] ] self.assertEqual(metadatas[-1], { 'value': 29, 'type': 'caseCount', 'attributes': [], 'dateRange': [ datetime.datetime(2017, 10, 28), datetime.datetime(2017, 11, 4)] }) self.assertEqual(metadatas[-2], { 'value': 19, 'type': 'cumulativeDeathCount', 'attributes': [], 'dateRange': [ datetime.datetime(2017, 11, 3), datetime.datetime(2017, 11, 4)] }) def test_date_count_table_2(self): doc = AnnoDoc(""" | Report date | Cases | | 6 Oct 2017 | 26 | | 13 Oct 2017 | 29 | | 20 Oct 2017 | 34 |""") doc.add_tier(self.annotator) metadatas = [ remove_empty_props(span.metadata) for span in doc.tiers['structured_incidents'] ] self.assertEqual(metadatas, [{ 'value': 26, 'type': 'caseCount', 'attributes': [], 'dateRange': [ datetime.datetime(2017, 9, 30), datetime.datetime(2017, 10, 7)] }, { 'value': 29, 'type': 'caseCount', 'attributes': [], 'dateRange': [ datetime.datetime(2017, 10, 7), datetime.datetime(2017, 10, 14)] }, { 'value': 34, 'type': 'caseCount', 'attributes': [], 'dateRange': [ datetime.datetime(2017, 10, 14), datetime.datetime(2017, 10, 21)] }]) def test_non_incident_counts_and_species(self): doc = AnnoDoc(""" Species / Morbidity / Mortality / Susceptible / Cases / Deaths / Killed and disposed of / Slaughtered Orange Spotted Snakehead (_Channa aurantimaculata_) / 100% / 1% / 32 / 30 / 1 / 28 / 3 """) doc.add_tier(self.annotator) metadatas = [ remove_empty_props(span.metadata) for span in doc.tiers['structured_incidents'] ] self.assertEqual(metadatas, [{ 'attributes': [], 'type': 'caseCount', 'value': 30, 'species': { 'id': 'tsn:642745', 'label': 'Channa aurantimaculata'} }, { 'attributes': [], 'type': 'deathCount', 'value': 1, 'species': { 'id': 'tsn:642745', 'label': 'Channa aurantimaculata'} }]) def test_unknown_species_and_space_delimited_counts(self): doc = AnnoDoc(""" The epidemiological statistics accumulated since the start of the event are included in the following "outbreak summary": Species / Susceptible / Cases / Deaths / Killed and disposed of / Slaughtered Birds / 6 368 632 / 1 303 173 / 1 297 617 / 3 850 608 / 0 Black-crowned night-heron / not available / 1 / 1 / 0 / 0 Passeridae (unidentified) / not available / 2 / 2 / 0 / 0 Pale thrush / not available / 1 / 1 / 0 / 0 """) doc.add_tier(self.annotator) metadatas = [ remove_empty_props(span.metadata) for span in doc.tiers['structured_incidents'] ] self.assertEqual(metadatas[0], { 'attributes': [], 'type': 'caseCount', 'value': 1303173, 'species': {'id': 'tsn:174371', 'label': 'Aves'} }) self.assertEqual(metadatas[-1], { 'attributes': [], 'type': 'deathCount', 'value': 1, 'species': "Cannot parse" }) # @with_log_level(logging.getLogger('epitator.structured_incident_annotator'), logging.INFO) def test_multi_section_table(self): doc = AnnoDoc(""" Disease update -------------- Confirmed, probable, and suspect cases and deaths from Ebola virus disease in Guinea, Liberia, and Sierra Leone, as of 30 Jun 2014 Type / New* / Confirmed / Probable / Suspect / Totals by country Guinea Cases / 3 / 293 / 88 / 32 / 413 Deaths / 5 / 193 / 82 / 28 / 303 Liberia Cases / 8 / 52 / 21 / 34 / 107 Deaths / 7 / 33 / 17 / 15 / 65 Sierra Leone Cases / 11 / 199 / 31 / 9 / 239 Deaths / 2 / 65 / 29 / 5 / 99 Totals Cases / 22 / 544 / 140 / 75 / 759 Deaths / 14 / 291 / 128 / 48 / 467 *New cases were reported between 25-29 Jun 2014 """) doc.add_tier(self.annotator) metadatas = [ remove_empty_props(span.metadata) for span in doc.tiers['structured_incidents'] ] self.assertEqual(metadatas[4]['type'], 'cumulativeCaseCount') self.assertEqual(metadatas[4]['dateRange'], [ datetime.datetime(2014, 6, 30, 0, 0), datetime.datetime(2014, 7, 1, 0, 0)]) self.assertEqual(metadatas[4]['value'], 413) self.assertEqual(metadatas[4]['location']['geonameid'], '2420477') def test_number_in_header(self): doc = AnnoDoc(""" Health Jurisdiction / Cases (percentage) / Incidence rate per 100 000 Person-Years Salt Lake county / 162 (68.9) / 14.4 Utah county / 45 (19.1) / 7.6 Bear River / 5 (2.1) / 2.8 Southeast Utah / 2 (0.9) / 5.0 """) doc.add_tier(self.annotator) metadatas = [ remove_empty_props(span.metadata) for span in doc.tiers['structured_incidents'] ] self.assertEqual(metadatas[0]['type'], 'caseCount') self.assertEqual(metadatas[0]['value'], 162) self.assertEqual(metadatas[0]['location']['geonameid'], '5781004') # @with_log_level(logging.getLogger('epitator.structured_incident_annotator'), logging.INFO) def test_unusual_format(self): doc = AnnoDoc(""" For subscribers' convenience, we hereby reproduce Israel's annual rabies statistics since 2014: Year // badger / cat / fox / jackal / wolf / dog / cattle / sheep / horse // total 2014 // 3 / 0 / 2 / 2 / 4 / 2 / 1 / 0 / 0 // 14 2015 // 12 / 1 / 1 / 3 / 0 / 1 / 7 / 0 / 1 // 20 2016 // 12 / 0 / 7 / 5 / 0 / 0 / 5 / 0 / 1 // 30 2017 // 10 / 2 / 0 / 47 / 0 / 0 / 14 / 1 / 0 // 74 2018 // 4 / 0 / 0 / 35 / 0 / 1 / 7 / 1 / 1 // 51 """) doc.add_tier(self.annotator) metadatas = [ remove_empty_props(span.metadata) for span in doc.tiers['structured_incidents'] ] # A value from row one is not used because 2014 is missed by the date # parser although other years are caught. # The index refers to the badgers in 2015. It is an unintuitive index # because some species are not being parsed so their values are skipped. self.assertEqual(metadatas[2]['type'], 'caseCount') self.assertEqual(metadatas[2]['value'], 12) self.assertEqual(metadatas[2]['species']['label'], 'Taxidea taxus') self.assertEqual(metadatas[2]['dateRange'], [ datetime.datetime(2015, 1, 1, 0, 0), datetime.datetime(2016, 1, 1, 0, 0)]) def test_date_association(self): doc = AnnoDoc(""" The outbreak strains of salmonella have infected a reported 961 people in 48 states [only Alaska and Delaware have not reported cases - Mod.LL] and the District of Columbia. Illnesses started on dates ranging from 4 January 2017 to 31 July 2017. State / Number of Cases Alabama / 25 Arizona / 6 Arkansas / 9 California / 54 Virginia / 56 Washington / 22 West Virginia / 17 Wisconsin / 24 Wyoming / 10""") doc.add_tier(self.annotator) metadatas = [ remove_empty_props(span.metadata) for span in doc.tiers['structured_incidents'] ] self.assertEqual(metadatas[0]['dateRange'], [ datetime.datetime(2017, 1, 4, 0, 0), datetime.datetime(2017, 8, 1, 0, 0)]) def test_fp_table_merging(self): doc = AnnoDoc(""" Non-Latin Caribbean Bahamas / week 30 [ending 25 Jul 2014] / 0 / 0 / 6 / 0 Dominica / week 28 [ending 11 Jul 2014] / 3559 / 141 / 0 / 0 Jamaica / week 29 [ending 18 Jul 2014] / 0 / 0 / 1 / 0 Turks & Caicos Islands / week 28 [ending 11 Jul 2014] / 0 / 10 / 7 / 0 US Virgin Islands / week 29 [ending 18 Jul 2014] / 0 / 2 / 7 / 0 Andean area: Bolivia / 9 / 0 / 0 / 3 / 0 Colombia / 30 / 0 / 0 / 1 / 0 Peru / 28 / 0 / 0 / 3 / 0 """) doc.add_tier(self.annotator) def test_unparsable_date_bug(self): doc = AnnoDoc(""" Cases by Country / Week updated / Probable / Conf. / Virus type / DHF severe / Deaths Hispanic Caribbean Dominican Republic / 17 [week ending 28 Apr 2017] / 315 / 0 / D? / 15 / 0 Puerto Rico / 19 [week ending 12 May 2017] / 9 / 0 / D2 / 0 / 0 English, French, Dutch Caribbean American Virgin Islands / 19 [week ending 12 May 2017] / 1 / 1 / D? / 0 / 0 Andean Bolivia / 17 / [week ending 28 Apr 2017] / 4260 / 0 / D? / 34 / 0 Colombia / 20 [week ending 19 May 2017] / 12 552 / 8357 / D? / 131 / 36 Ecuador / 17 [week ending 28 Apr 2017] / 6075 / 6075 / D? / 6 / 3 Peru / 20 [week ending 19 May 2017] / 44 971 / 12 717 / D 2,3 / 137 / 54 Venezuela / 17 [week ending 28 Apr 2017] / 2722 / 309 / D? / 7 / 0 """) doc.add_tier(self.annotator) def test_non_integer_value(self): doc = AnnoDoc(""" ****** [6] India, Pune, Marharastra, fatal human case Date: Mon 4 Jul 2016, 12.57 AM IST Source: The Times of India [edited] """) doc.add_tier(self.annotator) self.assertEqual(len(doc.tiers['structured_incidents']), 0) def test_multiline_title(self): doc = AnnoDoc(""" Arizona, 3 May 2018. More text Species / Susceptible / Cases / Deaths / Killed and disposed of / Slaughtered Birds / 3000/ 1500 / 1500 / 0 / 0 Affected population: Commercial layers """) doc.add_tier(self.annotator) # TODO: 1500 in the Deaths column is parsed as a year. To resolve this # the annotator needs to use a heuristic based on the column # name when determining column types. Simply giving integer interpretations # priority in all cases doesn't work on docs like the one in test_unusual_format. self.assertEqual(doc.tiers['structured_incidents'][0].metadata['location']['name'], 'Arizona') # @with_log_level(logging.getLogger('epitator.structured_incident_annotator'), logging.INFO) def test_missing_count_bug(self): doc = AnnoDoc(""" State / Number of Cases Alabama / 25 Arizona / 6 Arkansas / 9 California / 54 Colorado / 18 N Dakota / 1 S Dakota / 1 Connecticut / 9 """) doc.add_tier(self.annotator) locations = [span.metadata['location'] for span in doc.tiers['structured_incidents']] geonameids = [ location['geonameid'] if isinstance(location, dict) else location for location in locations] self.assertEqual(geonameids, [ '4829764', '5551752', '4099753', '5332921', '5417618', '5690763', '5769223', '4831725']) def test_case_synonyms(self): doc = AnnoDoc(""" As of 7 Jun 2019, a total of 279 people infected with the outbreak strains of _Salmonella_ have been reported from 41 states. A list of the states and the number of cases in each is on the map of reported cases page. State / Ill people ------------------ Alabama / 7 Arkansas / 8 Arizona / 1 California / 9 Colorado / 4 Connecticut / 3 """) doc.add_tier(self.annotator) self.assertEqual(len(doc.tiers['structured_incidents']), 6)
32.169355
245
0.572324
1,947
15,956
4.619928
0.289676
0.045025
0.050695
0.024903
0.37532
0.319066
0.277043
0.252585
0.229016
0.229016
0
0.107783
0.297004
15,956
495
246
32.234343
0.694125
0.072888
0
0.406484
0
0.049875
0.492893
0
0
0
0
0.00202
0.067332
1
0.044888
false
0.002494
0.012469
0.002494
0.062344
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1211c97bd0c2cd978848796f6323f97d81c815a
3,492
py
Python
fastseq/optimizer/fairseq/__init__.py
nttcs-ds/fastseq
f1338f1125612df318c9d1f030a8457397ed05a6
[ "MIT" ]
346
2020-11-28T14:25:21.000Z
2022-03-25T14:50:22.000Z
fastseq/optimizer/fairseq/__init__.py
nttcs-ds/fastseq
f1338f1125612df318c9d1f030a8457397ed05a6
[ "MIT" ]
22
2020-12-03T18:52:04.000Z
2022-02-26T05:19:14.000Z
fastseq/optimizer/fairseq/__init__.py
nttcs-ds/fastseq
f1338f1125612df318c9d1f030a8457397ed05a6
[ "MIT" ]
35
2020-11-30T21:37:45.000Z
2022-03-23T01:54:51.000Z
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. """ Automatically apply the optimizations if the supported versions of FairSeq are detected. """ import logging import sys from packaging import version from fastseq.config import FASTSEQ_VERSION, MAX_FAIRSEQ_VERSION, MIN_FAIRSEQ_VERSION from fastseq.logging import get_logger from fastseq.utils.api_decorator import OPTIMIZED_CLASSES from fastseq import config logger = get_logger(__name__, logging.INFO) LATEST_VERSION = 'latest' def is_supported_fairseq(): """Check if the installed fairseq is supported. Returns: a bool value: True indicates the installed fairseq is supported. """ v = version.parse(fairseq.__version__) return version.parse( MIN_FAIRSEQ_VERSION) <= v <= version.parse(MAX_FAIRSEQ_VERSION) def apply_fairseq_optimization(): """Automaticall apply the optimization to the installed fairseq. The optimized classes and functions are replaced in runtime. """ if not is_supported_fairseq(): logger.warning( f"fairseq(v{fairseq.__version__}) is not supported by fastseq(v" f"{FASTSEQ_VERSION}) yet, please change fairseq to " f"v{MIN_FAIRSEQ_VERSION} ~ v{MAX_FAIRSEQ_VERSION}, or check other " "versions of fastseq. Currently, no optimization in fastseq has " "been applied. Please ignore this warning if you are not using " "fairseq") return import fastseq.optimizer.fairseq.beam_search_optimizer # pylint: disable=import-outside-toplevel if config.USE_EL_ATTN: import fastseq.optimizer.fairseq.el_attention_optimizer # pylint: disable=import-outside-toplevel import fastseq.optimizer.fairseq.generate # pylint: disable=import-outside-toplevel _update_fairseq_model_registration() logger.info(f"fairseq(v{fairseq.__version__}) has been optimized by " f"fastseq(v{FASTSEQ_VERSION}).") def _update_fairseq_model_registration(): """Use the optimized classes to update the registered fairseq models and arches. """ for model_name, model_class in MODEL_REGISTRY.items(): if model_class in OPTIMIZED_CLASSES: MODEL_REGISTRY[model_name] = OPTIMIZED_CLASSES[model_class] logger.debug( "Update the register model {} from {} to {}".format( model_name, model_class, OPTIMIZED_CLASSES[model_class])) for arch_name, model_class in ARCH_MODEL_REGISTRY.items(): if model_class in OPTIMIZED_CLASSES: ARCH_MODEL_REGISTRY[arch_name] = OPTIMIZED_CLASSES[model_class] logger.debug( "Update the register model arch {} from {} to {}".format( arch_name, model_class, OPTIMIZED_CLASSES[model_class])) is_fairseq_installed = True try: import fairseq # pylint: disable=ungrouped-imports from fairseq.models import ARCH_MODEL_REGISTRY, MODEL_REGISTRY # pylint: disable=ungrouped-imports from fairseq.sequence_generator import SequenceGenerator # pylint: disable=ungrouped-imports except ImportError as error: is_fairseq_installed = False logger.warning('fairseq can not be imported. Please ignore this warning if ' 'you are not using fairseq: {}'.format(error)) if is_fairseq_installed: try: apply_fairseq_optimization() except: logger.error("Unexpected error: {}".format(sys.exc_info()[0])) raise
37.148936
106
0.71134
427
3,492
5.590164
0.285714
0.041894
0.043988
0.043569
0.293255
0.234604
0.165061
0.131546
0.131546
0.091328
0
0.000363
0.21134
3,492
93
107
37.548387
0.866376
0.202176
0
0.103448
0
0
0.216881
0.049541
0
0
0
0
0
1
0.051724
false
0
0.258621
0
0.344828
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1219d3a603a918463a09f2d7d78debd9182f482
192
py
Python
eula-scan/app/counts.py
davidbstein/ml-law
2db439a9b618384c57acb51ddc0d55cf864ed8be
[ "MIT" ]
null
null
null
eula-scan/app/counts.py
davidbstein/ml-law
2db439a9b618384c57acb51ddc0d55cf864ed8be
[ "MIT" ]
null
null
null
eula-scan/app/counts.py
davidbstein/ml-law
2db439a9b618384c57acb51ddc0d55cf864ed8be
[ "MIT" ]
null
null
null
import model print(dict(model._ex("select count(*) policies from tos_text").fetchone())) print(dict(model._ex("select count(*) companies from company where last_error is null").fetchone()))
38.4
100
0.75
28
192
5
0.678571
0.128571
0.2
0.228571
0.385714
0.385714
0
0
0
0
0
0
0.09375
192
4
101
48
0.804598
0
0
0
0
0
0.534031
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0.666667
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
7
c123c8a1452dd7217130353820cbbb49ad40ee13
1,340
py
Python
__main__.py
vmenezio/clippr
78d2d8e14090fcde3c43da1656afae25d7b1629e
[ "MIT" ]
1
2015-12-20T13:32:51.000Z
2015-12-20T13:32:51.000Z
__main__.py
vmenezio/clippr
78d2d8e14090fcde3c43da1656afae25d7b1629e
[ "MIT" ]
null
null
null
__main__.py
vmenezio/clippr
78d2d8e14090fcde3c43da1656afae25d7b1629e
[ "MIT" ]
null
null
null
#! python3 # -*- coding: utf-8 -*- # [ clipper ] # # # # Hey, welcome to clipper! This is a small tool I # # have been building for personal use as a means # # to take, analyze and upload screenshots quickly. # # # # I'm not sure how common this specific task is for # # anyone else, but since, personally, it'd be a # # huge time saver to have the proccess automated # # and bound to a shortcut, I'm making the source # # available to whomever else happens to find this # # useful as well. Enjoy! # # # # - Vinícius Menézio # from .img.clipImage import ClipImage from requests.exceptions import ConnectionError from imgurpython.helpers.error import ImgurClientError def main(): clippy = ClipImage() print( "dimensions:", clippy.width, "x", clippy.height, "px | colors:", len(clippy.palette) ) print("filesize: LOCAL", clippy.size/1000, "KB, ONLINE", clippy.onlineSize/1000,"KB\n") # BREAKS IF IT CAN'T UPLOAD / RETRIEVE FILESIZE print("url:",clippy.url,"\n") print(clippy.getColorTable()) if __name__ == "__main__": main()
37.222222
139
0.54403
148
1,340
4.871622
0.682432
0.005548
0
0
0
0
0
0
0
0
0
0.011614
0.357463
1,340
35
140
38.285714
0.825784
0.590299
0
0
0
0
0.129344
0
0
0
0
0
0
1
0.090909
false
0
0.272727
0
0.363636
0.363636
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1241bde09017aa407e82068b3e6ddb900708f43
508
py
Python
old/LongestSubStringWithoutRepeatingsCharcters.py
jonathanjosef91/LeetCode
68dd91b1d0e0c607e50378ea7f5529fd47445c4b
[ "MIT" ]
null
null
null
old/LongestSubStringWithoutRepeatingsCharcters.py
jonathanjosef91/LeetCode
68dd91b1d0e0c607e50378ea7f5529fd47445c4b
[ "MIT" ]
4
2022-02-18T10:08:51.000Z
2022-02-18T13:12:09.000Z
old/LongestSubStringWithoutRepeatingsCharcters.py
jonathanjosef91/LeetCode
68dd91b1d0e0c607e50378ea7f5529fd47445c4b
[ "MIT" ]
null
null
null
class Solution: def lengthOfLongestSubstring(self, s: str) -> int: if len(s) == 0: return 0 NUM_OF_CHARS = 256 hist = [0] * NUM_OF_CHARS l = 0 r = 0 max = 0 while r < len(s): hist_i = ord(s[r]) hist[hist_i] += 1 while hist[hist_i] > 1: hist[ord(s[l])] -= 1 l += 1 if r - l + 1 > max: max = r - l + 1 r += 1 return max
23.090909
54
0.374016
67
508
2.731343
0.358209
0.043716
0.065574
0.120219
0
0
0
0
0
0
0
0.064777
0.51378
508
22
55
23.090909
0.676113
0
0
0
0
0
0
0
0
0
0
0
0
1
0.052632
false
0
0
0
0.210526
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
c124253bfbcd49e0f1812986a73b8ad635b8c1fb
936
py
Python
core/setup.py
DiegoGH117/cellare
c0c68f6f53ee8f31999c3538c327ddca34ba6e94
[ "MIT" ]
null
null
null
core/setup.py
DiegoGH117/cellare
c0c68f6f53ee8f31999c3538c327ddca34ba6e94
[ "MIT" ]
null
null
null
core/setup.py
DiegoGH117/cellare
c0c68f6f53ee8f31999c3538c327ddca34ba6e94
[ "MIT" ]
null
null
null
from setuptools import setup with open('README.md', 'r') as f: long_description = f.read() setup( name = 'CellARE', version = '0.0.2', description = 'A cellular automaton based implementation to run SIR simulations', py_modules = ['cellare'], package_dir = {'':'src'}, classifiers = [ 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent' ], long_description = long_description, long_description_content_type = 'text/markdown', install_requires=[ "numpy", "matplotlib" ], url = 'https://github.com/DiegoGH117/cellare', project_urls = { 'Documentation': 'https://cellare.readthedocs.io/en/latest/', }, )
33.428571
87
0.569444
91
936
5.747253
0.747253
0.114723
0.143403
0.14914
0
0
0
0
0
0
0
0.016692
0.29594
936
28
88
33.428571
0.776935
0
0
0.074074
0
0
0.422625
0
0
0
0
0
0
1
0
false
0
0.037037
0
0.037037
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1248143e6872a13760d0d34115b96fb5c387e21
12,008
py
Python
deploy-tools/auction-deploy/tests/test_cli.py
d3centr0z/trustlines-blockchain
b90cba6e4ca7a5194eadc35793cc0fad63d9c761
[ "MIT" ]
9
2019-02-28T06:24:08.000Z
2021-05-29T04:43:56.000Z
deploy-tools/auction-deploy/tests/test_cli.py
d3centr0z/trustlines-blockchain
b90cba6e4ca7a5194eadc35793cc0fad63d9c761
[ "MIT" ]
425
2019-04-02T08:07:27.000Z
2021-07-01T18:29:02.000Z
deploy-tools/auction-deploy/tests/test_cli.py
d3centr0z/trustlines-blockchain
b90cba6e4ca7a5194eadc35793cc0fad63d9c761
[ "MIT" ]
10
2019-02-25T08:40:24.000Z
2022-03-08T10:22:57.000Z
import csv import re import pytest from click.testing import CliRunner from deploy_tools.cli import test_json_rpc, test_provider from eth_tester.exceptions import TransactionFailed from eth_utils import to_checksum_address import auction_deploy.core from auction_deploy.cli import AuctionState, main from auction_deploy.core import ( DeployedAuctionContracts, deploy_auction_contracts, get_deployed_auction_contracts, ) @pytest.fixture def runner(): return CliRunner() def extract_auction_address(output): """extract the auction address from 'deploy' output""" match = re.search("^Auction address: (0x[0-9a-fA-F]{40})$", output, re.M) if match: return match[1] raise ValueError(f"Could not find auction address in output: {repr(output)}") @pytest.fixture() def deployed_auction_address(auction_options, runner, use_token, token_contract): """Deploys an auction and return its address""" argument = ( f"deploy --release-timestamp 2000000000 --max-participants " f"{auction_options.maximal_number_of_participants} " f"--min-participants {auction_options.minimal_number_of_participants}" f" --start-price {auction_options.start_price} --jsonrpc test" ) if use_token: argument += f" --use-token --token-address {auction_options.token_address}" deploy_result = runner.invoke(main, args=argument) if deploy_result.exception is not None: raise RuntimeError( "Error while trying to run auction-deploy" ) from deploy_result.exception return extract_auction_address(deploy_result.output) @pytest.fixture() def whitelisted_auction_address(runner, deployed_auction_address, whitelist_file): """Whitelists all addresses in the whitelist on the deployed auction and returns its address""" runner.invoke( main, args=f"whitelist --file {whitelist_file} --address {deployed_auction_address} " + "--batch-size 100 --jsonrpc test", ) return deployed_auction_address @pytest.fixture() def whitelist_file(tmp_path, whitelist): folder = tmp_path / "subfolder" folder.mkdir() file_path = folder / "whitelist.csv" with file_path.open("w") as f: writer = csv.writer(f) writer.writerows([[to_checksum_address(address)] for address in whitelist]) return file_path @pytest.fixture def contracts(deployed_auction_address) -> DeployedAuctionContracts: """return the core.DeployedAuctionContracts object for the currently active auction""" return get_deployed_auction_contracts(test_json_rpc, deployed_auction_address) @pytest.fixture def contracts_not_initialized(auction_options) -> DeployedAuctionContracts: """return the three auction related contracts where locker and slasher are not initialized""" contracts = deploy_auction_contracts( web3=test_json_rpc, auction_options=auction_options ) return contracts @pytest.fixture def ensure_auction_state(contracts): """return a function that can be used to check the current auction state""" def ensure_state(expected_state): current_state = AuctionState(contracts.auction.functions.auctionState().call()) assert current_state == expected_state return ensure_state def bid(auction_contract, token_contract, sender, bid_value, use_token): if use_token: token_contract.functions.approve(auction_contract.address, bid_value).transact( {"from": sender} ) auction_contract.functions.bid().transact({"from": sender}) else: auction_contract.functions.bid().transact({"from": sender, "value": bid_value}) @pytest.fixture def deposit_pending_auction( runner, deployed_auction_address, contracts, token_contract, auction_options, use_token, ether_owning_whitelist, ensure_auction_state, ): """return the auction contract with enough bids so that the state is `DepositPending`""" contracts.auction.functions.addToWhitelist(ether_owning_whitelist).transact() contracts.auction.functions.startAuction().transact() bid_value = contracts.auction.functions.currentPrice().call() bid( contracts.auction, token_contract, ether_owning_whitelist[0], bid_value, use_token, ) bid( contracts.auction, token_contract, ether_owning_whitelist[1], bid_value, use_token, ) ensure_auction_state(AuctionState.DepositPending) return contracts.auction def test_cli_release_date_option(runner): deploy_result = runner.invoke( main, args="deploy --release-date '2033-05-18 03:33:21' --jsonrpc test" ) assert deploy_result.exception is None assert deploy_result.exit_code == 0 auction_address = extract_auction_address(deploy_result.output) contracts = get_deployed_auction_contracts(test_json_rpc, auction_address) release_timestamp = contracts.locker.functions.releaseTimestamp().call() # 2033-05-18 03:33:21 is timestamp 2000000001 assert release_timestamp == 2_000_000_001 def test_cli_contract_parameters_set(runner): result = runner.invoke( main, args="deploy --start-price 123 --duration 4 --max-participants 567 --min-participants 456 " "--release-timestamp 2000000000 --jsonrpc test", ) assert result.exit_code == 0 def test_cli_deploy_token_auction(runner): arbitrary_token_address = "0x" + "1234" * 10 result = runner.invoke( main, args=f"deploy --use-token --token-address {arbitrary_token_address} --release-timestamp 2000000000 --jsonrpc test", ) assert result.exit_code == 0 def test_cli_resume_deployment(runner, contracts_not_initialized): result = runner.invoke( main, args=f"deploy --start-price 123 --duration 4 --max-participants 567 --min-participants 456 " f"--release-timestamp 2000000000 --jsonrpc test --auction {contracts_not_initialized.auction.address}" f" --locker {contracts_not_initialized.locker.address}", ) assert result.exit_code == 0 assert ( extract_auction_address(result.output) == contracts_not_initialized.auction.address ) def test_cli_transaction_parameters_set(runner): result = runner.invoke( main, args="deploy --nonce 0 --gas-price 123456789 --gas 7000000 --release-timestamp 2000000000 --jsonrpc test", ) assert result.exit_code == 0 def test_cli_private_key(runner, keystore_file_path, key_password): result = runner.invoke( main, args="deploy --jsonrpc test --release-timestamp 2000000000 --keystore " + str(keystore_file_path), input=key_password, ) assert result.exit_code == 0 def test_cli_start_auction(runner, deployed_auction_address): result = runner.invoke( main, args="start --jsonrpc test --address " + deployed_auction_address ) assert result.exit_code == 0 def test_cli_close_auction( runner, deployed_auction_address, ensure_auction_state, contracts ): result = runner.invoke( main, args=f"start --jsonrpc test --address {deployed_auction_address}" ) assert result.exit_code == 0 auction_duration = ( contracts.auction.functions.auctionDurationInDays().call() * 24 * 3600 ) # auction is started, time travel forward test_provider.ethereum_tester.time_travel( test_json_rpc.eth.getBlock("latest").timestamp + auction_duration ) test_provider.ethereum_tester.mine_block() result = runner.invoke( main, args=f"close --jsonrpc test --address {deployed_auction_address}" ) assert result.exit_code == 0 ensure_auction_state(AuctionState.Failed) def test_cli_start_auction_with_auto_nonce( runner, deployed_auction_address, keystores, key_password ): """test the auto-nonce option. we only do this for the start-auction""" result = runner.invoke( main, args=f"start --auto-nonce --jsonrpc test --keystore {keystores[0]}" + f" --address {deployed_auction_address}", input=key_password, ) assert result.exit_code == 0 def test_cli_start_auction_key_not_owner( runner, deployed_auction_address, keystore_file_path, key_password ): """Test that when you attempt to start the auction with a private_key not corresponding to the owner of the auction, the command fails This shows that the command takes into account the key""" result = runner.invoke( main, args="start --jsonrpc test --address " + deployed_auction_address + " --keystore " + str(keystore_file_path), input=key_password, ) assert result.exit_code == 1 def test_cli_deposit_bids(runner, deposit_pending_auction, ensure_auction_state): result = runner.invoke( main, args=f"deposit-bids --jsonrpc test --address {deposit_pending_auction.address}", ) assert result.exit_code == 0 ensure_auction_state(AuctionState.Ended) @pytest.fixture() def replace_bad_function_call_output(): # TransactionFailed is raised by eth_tester # when BadFunctionCallOutput would be raised by web3 in `get_bid_token_address` bad_function_call = auction_deploy.core.BadFunctionCallOutput auction_deploy.core.BadFunctionCallOutput = TransactionFailed yield auction_deploy.core.BadFunctionCallOutput = bad_function_call @pytest.mark.usefixtures("replace_bad_function_call_output") def test_cli_auction_status(runner, deployed_auction_address): result = runner.invoke( main, args="status --jsonrpc test --address " + deployed_auction_address ) assert result.exit_code == 0 @pytest.mark.usefixtures("replace_bad_function_call_output") def test_cli_auction_status_locker_not_init(runner, contracts_not_initialized): result = runner.invoke( main, args="status --jsonrpc test --address " + contracts_not_initialized.auction.address, ) assert result.exit_code == 0 def test_cli_whitelist(runner, deployed_auction_address, whitelist_file, whitelist): result = runner.invoke( main, args=f"whitelist --file {whitelist_file} --address {deployed_auction_address} " + "--batch-size 10 --jsonrpc test", ) assert result.exit_code == 0 assert result.output == f"Number of whitelisted addresses: {len(whitelist)}\n" def test_cli_check_whitelist_not_whitelisted( runner, deployed_auction_address, whitelist_file, whitelist ): result = runner.invoke( main, args=f"check-whitelist --file {whitelist_file} --address {deployed_auction_address} " + "--jsonrpc test", ) assert result.exit_code == 0 assert ( result.output == f"{len(whitelist)} of {len(whitelist)} addresses have not been whitelisted yet\n" ) def test_cli_check_whitelist_all_whitelisted( runner, whitelisted_auction_address, whitelist_file, whitelist ): result = runner.invoke( main, args=f"check-whitelist --file {whitelist_file} --address {whitelisted_auction_address} " + "--jsonrpc test", ) assert result.exit_code == 0 assert result.output == f"All {len(whitelist)} addresses have been whitelisted\n" @pytest.mark.usefixtures("replace_bad_function_call_output") def test_cli_not_checksummed_address(runner, deployed_auction_address): address = deployed_auction_address.lower() result = runner.invoke(main, args=f"status --jsonrpc test --address {address}") assert result.exit_code == 0 def test_cli_incorrect_address_parameter_fails(runner): not_an_address = "not_an_address" result = runner.invoke( main, args=f"status --jsonrpc test --address {not_an_address}" ) assert ( f"The address parameter is not recognized to be an address: {not_an_address}" in result.output ) assert result.exit_code == 2
30.4
123
0.713358
1,451
12,008
5.650586
0.168849
0.068301
0.064398
0.051226
0.442127
0.386145
0.328821
0.297231
0.283571
0.240151
0
0.019922
0.193205
12,008
394
124
30.477157
0.826383
0.079947
0
0.32491
0
0
0.221181
0.05277
0.00361
0
0
0
0.097473
1
0.108303
false
0.021661
0.036101
0.00361
0.176895
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1257c741492e036061014a924bb9f56f773f5b1
10,555
py
Python
core/plugins/rabbitmq.py
dnegreira/hotsos
c88375d8700bf53faed4e5de55c34bd0bdc66187
[ "Apache-2.0" ]
null
null
null
core/plugins/rabbitmq.py
dnegreira/hotsos
c88375d8700bf53faed4e5de55c34bd0bdc66187
[ "Apache-2.0" ]
null
null
null
core/plugins/rabbitmq.py
dnegreira/hotsos
c88375d8700bf53faed4e5de55c34bd0bdc66187
[ "Apache-2.0" ]
null
null
null
import os from core.log import log from core.cli_helpers import CLIHelper from core.utils import mktemp_dump, sorted_dict from core.ycheck.events import YEventCheckerBase from core.searchtools import ( SearchDef, SequenceSearchDef, FileSearcher, ) from core import ( checks, plugintools, ) RMQ_SERVICES_EXPRS = [ r"beam.smp", r"epmd", r"rabbitmq-server", ] RMQ_PACKAGES = [ r"rabbitmq-server", ] def cached_property(f): @property def _inner(inst): if f.__name__ in inst._property_cache: # log.debug("using cached value for %s", f.__name__) return inst._property_cache[f.__name__] # log.debug("using uncached value for %s", f.__name__) ret = f(inst) inst._property_cache[f.__name__] = ret return ret return _inner class RabbitMQReport(object): """ Class providing easy access to the contents of a rabbitmqctl report. First registers search definitions to execute against rabbitmqctl report then runs the search to fetch the information that is then expose through properties. NOTE: the rabbitmqctl report output differs between versions 3.6.x and 3.8.x and we try to account for either by providing optional regex expressions to match either. """ def __init__(self): self._property_cache = {} # save to file so we can search it later self._f_report = mktemp_dump(''.join(CLIHelper().rabbitmqctl_report())) searcher = FileSearcher() searcher.add_search_term(self.connections_searchdef, self._f_report) searcher.add_search_term(self.memory_searchdef, self._f_report) searcher.add_search_term(self.cluster_partition_handling_searchdef, self._f_report) searcher.add_search_term(self.queues_searchdef, self._f_report) self.results = searcher.search() def __del__(self): if os.path.exists(self._f_report): os.unlink(self._f_report) @cached_property def queues_searchdef(self): start = SearchDef([r"^Queues on ([^:]+):", (r"^Listing queues for vhost ([^:]+) " r"...")]) # NOTE: we don't use a list for the body here because # we need to know which expression matched so that we # can know in which order to retrieve the columns since # their order is inverted between 3.6.x and 3.8.x body = SearchDef(r"^(?:<([^.\s]+)[.0-9]+>\s+(\S+)|" r"(\S+)\s+(?:\S+\s+){4}<([^.\s]+)[.0-9]" r"+>)\s+.+") end = SearchDef(r"^$") return SequenceSearchDef(start=start, body=body, end=end, tag='queues') @cached_property def skewed_nodes(self): vhosts = self.vhosts _skewed_nodes = {} skewed_queue_nodes = {} global_total_queues = sum([vhost.total_queues for vhost in vhosts]) for vhost in self.vhosts: if not vhost.total_queues: continue total_pcent = (float(100) / global_total_queues * vhost.total_queues) for node, vhost_dist in vhost.node_queue_distributions.items(): if total_pcent >= 1 and vhost_dist['pcent'] > 75: if node not in skewed_queue_nodes: skewed_queue_nodes[node] = 0 skewed_queue_nodes[node] += 1 # Report the node with the greatest skew of queues/vhost if skewed_queue_nodes: max_node = None for node_name in skewed_queue_nodes: if max_node is None: max_node = node_name elif (skewed_queue_nodes[node_name] >= skewed_queue_nodes[max_node]): max_node = node_name if (skewed_queue_nodes[max_node] > _skewed_nodes.get(max_node, 0)): _skewed_nodes[max_node] = skewed_queue_nodes[max_node] return _skewed_nodes @cached_property def vhosts(self): seq_def = self.queues_searchdef vhosts = [] for section in self.results.find_sequence_sections(seq_def).values(): vhost = None # ensure we get vhost before the rest for result in section: if result.tag == seq_def.start_tag: # check both report formats vhost = RabbitMQVhost(result.get(1)) break for result in section: if result.tag == seq_def.body_tag: node_name = result.get(1) or result.get(4) # if we matched the section header, skip if node_name == "pid": continue queue = result.get(2) or result.get(3) # if we matched the section header, skip if queue == "name": continue vhost.node_inc_queue_count(node_name) log.debug(vhost.name) vhosts.append(vhost) return vhosts @cached_property def connections_searchdef(self): start = SearchDef([r"^Connections:$", r"^Listing connections ...$"]) # Again, the user and protocol columns are inverted # between 3.6.x and 3.8.x so we have to catch both and # decide. body = SearchDef(r"^<(rabbit[^>.]*)(?:[.][0-9]+)+>.+(?:[A-Z]+\s+{[\d,]+}\s+(\S+)|\d+\s+{[\d,]+}\s+\S+\s+(\S+)).+{\"connection_name\",\"([^:]+):\d+:.+$") # noqa end = SearchDef(r"^$") return SequenceSearchDef(start=start, body=body, end=end, tag='connections') @cached_property def memory_searchdef(self): start = SearchDef([r"^Status of node '([^']*)'$", r"^Status of node ([^']*) ...$"]) body = SearchDef(r"^\s+\[{total,([0-9]+)}.+") end = SearchDef(r"^$") return SequenceSearchDef(start=start, body=body, end=end, tag='memory') @cached_property def cluster_partition_handling_searchdef(self): return SearchDef(r"^\s*{cluster_partition_handling,([^}]*)}", tag='cluster_partition_handling') @cached_property def connections(self): _connections = {'host': {}, 'client': {}} sd = self.connections_searchdef for results in self.results.find_sequence_sections(sd).values(): for result in results: if result.tag == sd.body_tag: host = result.get(1) if host not in _connections['host']: _connections['host'][host] = 1 else: _connections['host'][host] += 1 # detect 3.6.x or 3.8.x format user = result.get(2) if user is None: user = result.get(3) client_name = result.get(4) if user not in _connections['client']: _connections['client'][user] = {} if client_name not in _connections['client'][user]: _connections['client'][user][client_name] = 1 else: _connections['client'][user][client_name] += 1 if _connections['host']: for client, users in _connections['client'].items(): sorted_users = sorted_dict(users, key=lambda e: e[1], reverse=True) _connections['client'][client] = sorted_users return _connections @cached_property def memory_used(self): sd = self.memory_searchdef _memory_used = {} for results in self.results.find_sequence_sections(sd).values(): for result in results: if result.tag == sd.start_tag: # check both report formats node_name = result.get(1) elif result.tag == sd.body_tag: total = result.get(1) mib_used = int(total) / 1024. / 1024. _memory_used[node_name] = "{:.3f}".format(mib_used) return _memory_used @cached_property def partition_handling(self): results = self.results.find_by_tag("cluster_partition_handling") if not results: return return results[0].get(1) class RabbitMQVhost(object): def __init__(self, name): self.name = name self._node_queues = {} def node_inc_queue_count(self, node): if node not in self._node_queues: self._node_queues[node] = 0 self._node_queues[node] += 1 @property def total_queues(self): return sum(self.node_queues.values()) @property def node_queues(self): return self._node_queues def node_queues_vhost_pcent(self, node): return float(100) / self.total_queues * self.node_queues[node] @property def node_queue_distributions(self): dists = {} for node, queues in self.node_queues.items(): if queues: vhost_pcent = self.node_queues_vhost_pcent(node) dists[node] = {'queues': queues, 'pcent': vhost_pcent} else: dists[node] = {'queues': 0, 'pcent': 0} return dists class RabbitMQBase(object): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.report = RabbitMQReport() class RabbitMQChecksBase(RabbitMQBase, plugintools.PluginPartBase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.apt_check = checks.APTPackageChecksBase(core_pkgs=RMQ_PACKAGES) @property def plugin_runnable(self): if self.apt_check.core: return True return False class RabbitMQServiceChecksBase(RabbitMQChecksBase, checks.ServiceChecksBase): def __init__(self, *args, **kwargs): super().__init__(*args, service_exprs=RMQ_SERVICES_EXPRS, **kwargs) class RabbitMQEventChecksBase(RabbitMQChecksBase, YEventCheckerBase): @property def summary(self): # mainline all results into summary root return self.run_checks()
34.158576
168
0.559545
1,187
10,555
4.74305
0.20893
0.029307
0.028419
0.01492
0.277975
0.179396
0.142629
0.14103
0.123268
0.07833
0
0.009674
0.33406
10,555
308
169
34.269481
0.791293
0.110185
0
0.191781
0
0.004566
0.066102
0.029891
0
0
0
0
0
1
0.109589
false
0
0.031963
0.022831
0.255708
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c127d6465fa7c0671438fe8816025b96ec521c2a
31,776
py
Python
src/mercs/core/Mercs.py
MattiasDC/mercs
466962e254c4f56f4a16a31b1a3d7bd893c8e23e
[ "MIT" ]
11
2020-01-28T16:15:53.000Z
2021-05-20T08:05:42.000Z
src/mercs/core/Mercs.py
MattiasDC/mercs
466962e254c4f56f4a16a31b1a3d7bd893c8e23e
[ "MIT" ]
null
null
null
src/mercs/core/Mercs.py
MattiasDC/mercs
466962e254c4f56f4a16a31b1a3d7bd893c8e23e
[ "MIT" ]
4
2020-02-06T09:02:28.000Z
2022-02-14T09:42:04.000Z
import itertools import warnings from inspect import signature from timeit import default_timer from sklearn.preprocessing import normalize import dask import numpy as np try: import shap except: msg = "SHAP not found, therefore using SHAP-values for feature importance not available." warnings.warn(msg) shap = None from dask import delayed from networkx import NetworkXUnfeasible, find_cycle, topological_sort from sklearn.ensemble import ( ExtraTreesClassifier, ExtraTreesRegressor, RandomForestClassifier, RandomForestRegressor, ) from sklearn.impute import SimpleImputer from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from ..algo import ( evaluation, imputation, inference, inference_v3, new_inference, new_prediction, selection, vector_prediction, ) from ..algo.induction import base_induction_algorithm, expand_induction_algorithm from ..composition import CompositeModel, NewCompositeModel, o, x from ..graph import build_diagram, compose_all, get_targ, model_to_graph from ..utils import ( DESC_ENCODING, MISS_ENCODING, TARG_ENCODING, DecoratedDecisionTreeClassifier, DecoratedDecisionTreeRegressor, DecoratedRandomForestClassifier, DecoratedRandomForestRegressor, code_to_query, get_i_o, query_to_code, ) from ..visuals import save_diagram, show_diagram try: from xgboost import XGBClassifier as XGBC from xgboost import XGBRegressor as XGBR except: XGBC, XGBR = None, None try: from lightgbm import LGBMClassifier as LGBMC from lightgbm import LGBMRegressor as LGBMR except: LGBMC, LGBMR = None, None try: from catboost import CatBoostClassifier as CBC from catboost import CatBoostRegressor as CBR except: CBC, CBR = None, None try: from wekalearn import RandomForestClassifier as WLC from wekalearn import RandomForestRegressor as WLR except: WLC, WLR = None, None class Mercs(object): delimiter = "_" selection_algorithms = dict( default=selection.base_selection_algorithm, base=selection.base_selection_algorithm, random=selection.random_selection_algorithm, ) induction_algorithms = dict( base=base_induction_algorithm, default=base_induction_algorithm, expand=expand_induction_algorithm, ) classifier_algorithms = dict( DT=DecisionTreeClassifier, DDT=DecoratedDecisionTreeClassifier, RF=RandomForestClassifier, DRF=DecoratedRandomForestClassifier, XGB=XGBC, xgb=XGBC, weka=WLC, LGBM=LGBMC, lgbm=LGBMC, CB=CBC, extra=ExtraTreesClassifier, ) regressor_algorithms = dict( DT=DecisionTreeRegressor, DDT=DecoratedDecisionTreeRegressor, RF=RandomForestRegressor, DRF=DecoratedDecisionTreeRegressor, XGB=XGBR, xgb=XGBR, weka=WLR, LGBM=LGBMR, lgbm=LGBMR, CB=CBR, extra=ExtraTreesRegressor, ) prediction_algorithms = dict( mi=vector_prediction.mi, mrai=vector_prediction.mrai, it=vector_prediction.it, rw=vector_prediction.rw, ) inference_algorithms = dict( base=inference.base_inference_algorithm, dask=inference_v3.inference_algorithm, own=inference_v3.inference_algorithm, ) imputer_algorithms = dict( nan=imputation.nan_imputation, NAN=imputation.nan_imputation, NaN=imputation.nan_imputation, null=imputation.nan_imputation, NULL=imputation.nan_imputation, skl=imputation.skl_imputation, base=imputation.skl_imputation, default=imputation.skl_imputation, ) evaluation_algorithms = dict( base=evaluation.base_evaluation, default=evaluation.base_evaluation, dummy=evaluation.dummy_evaluation, ) # Used in parse kwargs to identify parameters. If this identification goes wrong, you are sending settings # somewhere you do not want them to be. So, this is a tricky part, and moreover hardcoded. In other words: # this is risky terrain, and should probably be done differently in the future. configuration_prefixes = dict( imputation={"imputation", "imp"}, induction={"induction", "ind"}, selection={"selection", "sel"}, prediction={"prediction", "pred", "prd"}, inference={"inference", "infr", "inf"}, classification={"classification", "classifier", "clf"}, regression={"regression", "regressor", "rgr"}, metadata={"metadata", "meta", "mtd"}, evaluation={"evaluation", "evl"}, ) def __init__( self, selection_algorithm="base", induction_algorithm="base", classifier_algorithm="DT", regressor_algorithm="DT", prediction_algorithm="mi", inference_algorithm="own", imputer_algorithm="default", evaluation_algorithm="default", random_state=42, **kwargs ): self.params = dict( selection_algorithm=selection_algorithm, induction_algorithm=induction_algorithm, classifier_algorithm=classifier_algorithm, regressor_algorithm=regressor_algorithm, prediction_algorithm=prediction_algorithm, inference_algorithm=inference_algorithm, imputer_algorithm=imputer_algorithm, evaluation_algorithm=evaluation_algorithm, random_state=random_state, ) self.params = {**self.params, **kwargs} self.random_state = random_state self.selection_algorithm = self.selection_algorithms[selection_algorithm] # N.b.: First try to look up the key. If the key is not found, we assume the algorithm itself was passed. self.classifier_algorithm = self.classifier_algorithms.get( classifier_algorithm, classifier_algorithm ) self.regressor_algorithm = self.regressor_algorithms.get( regressor_algorithm, regressor_algorithm ) self.prediction_algorithm = self.prediction_algorithms[prediction_algorithm] self.inference_algorithm = self.inference_algorithms[inference_algorithm] self.induction_algorithm = self.induction_algorithms[ induction_algorithm ] # For now, we only have one. self.imputer_algorithm = self.imputer_algorithms[imputer_algorithm] self.evaluation_algorithm = self.evaluation_algorithms[evaluation_algorithm] # Data-structures self.m_codes = np.array([]) self.m_list = [] self.c_list = [] self.g_list = [] self.i_list = [] self.m_fimps = np.array([]) self.m_score = np.array([]) self.FI = np.array([]) self.targ_ids = np.array([]) # Query-related things self.q_code = None self.q_desc_ids = None self.q_targ_ids = None self.q_diagram = None self.q_compose = None self.q_methods = [] # Configurations self.imp_cfg = self._default_config(self.imputer_algorithm) self.ind_cfg = self._default_config(self.induction_algorithm) self.sel_cfg = self._default_config(self.selection_algorithm) self.clf_cfg = self._default_config(self.classifier_algorithm) self.rgr_cfg = self._default_config(self.regressor_algorithm) self.prd_cfg = self._default_config(self.prediction_algorithm) self.inf_cfg = self._default_config(self.inference_algorithm) self.evl_cfg = self._default_config(self.evaluation_algorithm) self.configuration = dict( imputation=self.imp_cfg, induction=self.ind_cfg, selection=self.sel_cfg, classification=self.clf_cfg, regression=self.rgr_cfg, prediction=self.prd_cfg, inference=self.inf_cfg, ) # Collect all configs in one self._update_config(random_state=random_state, **kwargs) self.metadata = dict() self.model_data = dict() self._extra_checks_on_config() return def fit(self, X, y=None, m_codes=None, **kwargs): assert isinstance(X, np.ndarray) if y is not None: assert isinstance(y, np.ndarray) X = np.c_[X, y] tic = default_timer() self.metadata = self._default_metadata(X) self._update_metadata(**kwargs) self.i_list = self.imputer_algorithm(X, self.metadata.get("nominal_attributes")) # N.b.: `random state` parameter is in `self.sel_cfg` if m_codes is None: self.m_codes = self.selection_algorithm(self.metadata, **self.sel_cfg) else: self.m_codes = m_codes self.m_list = self.induction_algorithm( X, self.m_codes, self.metadata, self.classifier_algorithm, self.regressor_algorithm, self.clf_cfg, self.rgr_cfg, **self.ind_cfg ) self._filter_m_list_m_codes() self._consistent_datastructures() if self.imputer_algorithm == self.imputer_algorithms.get("nan"): # If you do no have imputers, you cannot use them as a baseline evaluation self.evl_cfg["consider_imputations"] = False self.m_score = self.evaluation_algorithm( X, self.m_codes, self.m_list, self.i_list, **self.evl_cfg ) toc = default_timer() self.model_data["ind_time"] = toc - tic self.metadata["n_component_models"] = len(self.m_codes) return def predict( self, X, q_code=None, inference_algorithm=None, prediction_algorithm=None, **kwargs ): # Update configuration if necessary if q_code is None: q_code = self._default_q_code() if inference_algorithm is not None: self._reconfig_inference(inference_algorithm=inference_algorithm) if prediction_algorithm is not None: self._reconfig_prediction( prediction_algorithm=prediction_algorithm, **kwargs ) # Adjust data self.q_code = q_code self.q_desc_ids, self.q_targ_ids, _ = code_to_query( self.q_code, return_list=True ) # Make query-diagram tic_prediction = default_timer() self.m_sel = self.prediction_algorithm( self.m_codes, self.m_fimps, self.m_score, q_code=self.q_code, **self.prd_cfg ) toc_prediction = default_timer() tic_diagram = default_timer() self.q_diagram = self._build_q_diagram(self.m_list, self.m_sel) toc_diagram = default_timer() tic_infalgo = default_timer() if isinstance(self.q_diagram, tuple): self.q_diagrams = self.q_diagram # for d in self.q_diagrams: # print(d.nodes) # self.c_list.append(self._build_q_model(X, d)) self.c_list = [self._build_q_model(X, d) for d in self.q_diagrams] self.c_sel = list(range(len(self.c_list))) self.c_diagram = self._build_q_diagram( self.c_list, self.c_sel, composition=True ) self.q_model = self._build_q_model(X, self.c_diagram) else: self.q_model = self._build_q_model(X, self.q_diagram) toc_infalgo = default_timer() tic_dask = default_timer() X = X[:, self.q_model.desc_ids] result = self.q_model.predict(X) toc_dask = default_timer() self.model_data["prd_time"] = toc_prediction - tic_prediction self.model_data["dia_time"] = toc_diagram - tic_diagram self.model_data["infalgo_time"] = toc_infalgo - tic_infalgo self.model_data["dsk_time"] = toc_dask - tic_dask self.model_data["inf_time"] = toc_dask - tic_prediction return result def get_params(self, deep=False): return self.params # Diagrams def _build_q_diagram(self, m_list, m_sel, composition=False): if isinstance(m_sel, tuple): diagrams = [ build_diagram( m_list, m_sel_instance, self.q_code, prune=True, composition=composition, ) for m_sel_instance in m_sel ] return tuple(diagrams) else: return build_diagram( m_list, m_sel, self.q_code, prune=True, composition=composition ) def show_q_diagram(self, kind="svg", fi=False, ortho=False, index=None, **kwargs): if isinstance(self.q_diagram, tuple) and index is None: return show_diagram(self.c_diagram, kind=kind, fi=fi, ortho=ortho, **kwargs) elif isinstance(self.q_diagram, tuple): return show_diagram( self.q_diagram[index], kind=kind, fi=fi, ortho=ortho, **kwargs ) else: return show_diagram(self.q_diagram, kind=kind, fi=fi, ortho=ortho, **kwargs) def save_diagram(self, fname=None, kind="svg", fi=False, ortho=False): return save_diagram(self.q_diagram, fname, kind=kind, fi=fi, ortho=ortho) # Inference def _build_q_model(self, X, diagram): try: self.inference_algorithm( diagram, self.m_list, self.i_list, self.c_list, X, self.metadata.get("nominal_attributes"), ) except NetworkXUnfeasible: cycle = find_cycle(self.q_diagram, orientation="original") msg = """ Topological sort failed, investigate diagram to debug. I will never be able to squeeze a prediction out of a diagram with a loop. Cycle was: {} """.format( cycle ) raise RecursionError(msg) n_component_models = self.metadata["n_component_models"] q_model = NewCompositeModel( diagram, nominal_attributes=self.metadata["nominal_attributes"], n_component_models=n_component_models, ) return q_model def _merge_q_models(self, q_models): q_diagram = build_diagram(self.c_list, self.c_sel, self.q_code, prune=True) return q_diagram def merge_models(self, q_models): types = self._get_types(self.metadata) walks = [ model_to_graph(m, types, idx=idx, composition=True) for idx, m in enumerate(q_models) ] q_diagram = compose_all(walks) filtered_nodes = self.filter_nodes(q_diagram) try: self.inference_algorithm(q_diagram, sorted_nodes=filtered_nodes) except NetworkXUnfeasible: cycle = find_cycle(q_diagram, orientation="original") msg = """ Topological sort failed, investigate diagram to debug. I will never be able to squeeze a prediction out of a diagram with a loop. Cycle was: {} """.format( cycle ) raise RecursionError(msg) q_model = CompositeModel(q_diagram) return q_diagram, q_model def _get_q_model(self, q_diagram, X): self._add_imputer_function(q_diagram) try: self.inference_algorithm(q_diagram, X=X) except NetworkXUnfeasible: cycle = find_cycle(q_diagram, orientation="original") msg = """ Topological sort failed, investigate diagram to debug. I will never be able to squeeze a prediction out of a diagram with a loop. Cycle was: {} """.format( cycle ) raise RecursionError(msg) q_model = CompositeModel(q_diagram) return q_model # Filter def _filter_m_list_m_codes(self): """Filtering out the failed models. This happens when TODO: EXPLAIN """ fail_m_idxs = [i for i, m in enumerate(self.m_list) if m is None] self.m_codes = np.delete(self.m_codes, fail_m_idxs, axis=0) self.m_list = [m for m in self.m_list if m is not None] return # Graphs def _consistent_datastructures(self, binary_scores=False): self._update_m_codes() self._update_m_fimps() return def _expand_m_list(self): self.m_list = list(itertools.chain.from_iterable(self.m_list)) return def _add_model(self, model, binary_scores=False): self.m_list.append(model) self._consistent_datastructures(binary_scores=binary_scores) return def _update_m_codes(self): self.m_codes = np.array( [ query_to_code( list(model.desc_ids), list(model.targ_ids), attributes=self.metadata["attributes"], ) for model in self.m_list ] ) return def _update_m_fimps(self): init = np.zeros(self.m_codes.shape) for m_idx, mod in enumerate(self.m_list): init[m_idx, list(mod.desc_ids)] = mod.feature_importances_ self.m_fimps = init return def _update_m_score(self, binary_scores=False): if binary_scores: self.m_score = (self.m_codes == TARG_ENCODING).astype(float) return # Imputer def _add_imputer_function(self, g): for n in g.nodes: if g.nodes[n]["kind"] == "imputation": idx = g.nodes[n]["idx"] f_1 = self._dummy_array # Artificial input f_2 = self.i_list[idx].transform # Actual imputation f_3 = np.ravel # Return a vector, not array g.nodes[n]["function"] = o(f_3, o(f_2, f_1)) return # Add ids @staticmethod def _add_ids(g, desc_ids, targ_ids): g.graph["desc_ids"] = set(desc_ids) g.graph["targ_ids"] = set(targ_ids) return g # Metadata def _default_metadata(self, X): if X.ndim != 2: X = X.reshape(-1, 1) n_rows, n_cols = X.shape types = [X[0, 0].dtype for _ in range(n_cols)] nominal_attributes = set( [att for att, typ in enumerate(types) if self._is_nominal(typ)] ) numeric_attributes = set( [att for att, typ in enumerate(types) if self._is_numeric(typ)] ) metadata = dict( attributes=set(range(n_cols)), n_attributes=n_cols, types=types, nominal_attributes=nominal_attributes, numeric_attributes=numeric_attributes, ) return metadata def _update_metadata(self, **kwargs): self._update_dictionary(self.metadata, kind="metadata", **kwargs) # Assure every attribute is `typed`: If not every attribute is here, set to numeric type (default) numeric = self.metadata["numeric_attributes"] nominal = self.metadata["nominal_attributes"] att_ids = self.metadata["attributes"] # All attributes should be accounted for and none should be double. if (len(nominal) + len(numeric) - len(att_ids)) != 0: numeric = att_ids - nominal self._update_dictionary( self.metadata, kind="metadata", numeric_attributes=numeric ) return # Configuration def _reconfig_prediction(self, prediction_algorithm="mi", **kwargs): self.prediction_algorithm = self.prediction_algorithms[prediction_algorithm] self.prd_cfg = self._default_config(self.prediction_algorithm) self.configuration["prediction"] = self.prd_cfg self._update_config(**kwargs) return def _reconfig_inference(self, inference_algorithm="own", **kwargs): self.inference_algorithm = self.inference_algorithms[inference_algorithm] self.inf_cfg = self._default_config(self.inference_algorithm) self.configuration["inference"] = self.inf_cfg self._update_config(**kwargs) return @staticmethod def _default_config(method): config = {} sgn = signature(method) for key, parameter in sgn.parameters.items(): if parameter.default is not parameter.empty: config[key] = parameter.default return config def _update_config(self, **kwargs): for kind in self.configuration: self._update_dictionary(self.configuration[kind], kind=kind, **kwargs) return def _extra_checks_on_config(self): self._check_xgb_single_target() return def _check_xgb_single_target(self): nb_targets = self.configuration["selection"]["nb_targets"] if nb_targets == 1: return None else: if ( self.classifier_algorithm is self.classifier_algorithms["XGB"] or self.regressor_algorithm is self.regressor_algorithms["XGB"] ): xgb = True else: xgb = False if xgb: msg = """ XGBoost cannot deal with multi-target outputs. Hence, the `nb_targets` parameter is automatically adapted to 1, so only single-target trees will be learned. Please take this into account. """ warnings.warn(msg) self.configuration["selection"]["nb_targets"] = 1 return def _parse_kwargs(self, kind="selection", **kwargs): prefixes = [e + self.delimiter for e in self.configuration_prefixes[kind]] parameter_map = { x.split(prefix)[1]: x for x in kwargs for prefix in prefixes if x.startswith(prefix) } return parameter_map def _update_dictionary(self, dictionary, kind=None, **kwargs): # Immediate matches overlap = set(dictionary).intersection(set(kwargs)) for k in overlap: dictionary[k] = kwargs[k] if kind is not None: # Parsed matches parameter_map = self._parse_kwargs(kind=kind, **kwargs) overlap = set(dictionary).intersection(set(parameter_map)) for k in overlap: dictionary[k] = kwargs[parameter_map[k]] return # Helpers def _filter_X(self, X): # Filter relevant input attributes if X.shape[1] != len(self.q_compose.desc_ids): indices = self._overlapping_indices( self.q_desc_ids, self.q_compose.desc_ids ) return X[:, indices] @staticmethod def _dummy_array(X): """ Return an array of np.nan, with the same number of rows as the input array. Parameters ---------- X: np.ndarray(), n_rows, n_cols = X.shape, We use the shape of X to deduce shape of our output. Returns ------- a: np.ndarray(), shape= (n_rows, 1) n_rows is the same as the number of rows as X. """ n_rows, _ = X.shape a = np.empty((n_rows, 1)) a.fill(np.nan) return a def _default_q_code(self): q_code = np.zeros(self.metadata["n_attributes"]) q_code[-1] = TARG_ENCODING return q_code @staticmethod def _is_nominal(t): condition_01 = t == np.dtype(int) return condition_01 @staticmethod def _is_numeric(t): condition_01 = t == np.dtype(float) return condition_01 @staticmethod def _get_types(metadata): nominal = {i: "nominal" for i in metadata["nominal_attributes"]} numeric = {i: "numeric" for i in metadata["numeric_attributes"]} return {**nominal, **numeric} @staticmethod def _overlapping_indices(a, b): """ Given an array a and b, return the indices (in a) of elements that occur in both a and b. Parameters ---------- a b Returns ------- Examples -------- a = [4,5,6] b = [4,6,7] overlapping_indices(a, b) = [0,2] """ return np.nonzero(np.in1d(a, b))[0] @staticmethod def filter_nodes(g): # This is not as safe as it should be sorted_nodes = list(topological_sort(g)) filtered_nodes = [] for n in reversed(sorted_nodes): if g.nodes[n]["kind"] == "model": break filtered_nodes.append(n) filtered_nodes = list(reversed(filtered_nodes)) return filtered_nodes # SYNTH def autocomplete(self, X, **kwargs): return # Legacy (delete when I am sure they can go) def predict_old( self, X, q_code=None, prediction_algorithm=None, beta=False, **kwargs ): # Update configuration if necessary if q_code is None: q_code = self._default_q_code() if prediction_algorithm is not None: reuse = False self._reconfig_prediction( prediction_algorithm=prediction_algorithm, **kwargs ) # Adjust data tic_prediction = default_timer() self.q_code = q_code self.q_desc_ids, self.q_targ_ids, _ = code_to_query( self.q_code, return_list=True ) # Make query-diagram self.q_diagram = self.prediction_algorithm( self.g_list, q_code, self.fi, self.t_codes, **self.prd_cfg ) toc_prediction = default_timer() tic_dask = default_timer() toc_dask = default_timer() tic_compute = default_timer() res = self.q_model.predict.compute() toc_compute = default_timer() # Diagnostics self.model_data["prd_time"] = toc_prediction - tic_prediction self.model_data["dsk_time"] = toc_dask - tic_dask self.model_data["cmp_time"] = toc_compute - tic_compute self.model_data["inf_time"] = toc_compute - tic_prediction self.model_data["ratios"] = ( self.model_data["prd_time"] / self.model_data["inf_time"], self.model_data["dsk_time"] / self.model_data["inf_time"], self.model_data["cmp_time"] / self.model_data["inf_time"], ) return res def _update_g_list(self): types = self._get_types(self.metadata) self.g_list = [ model_to_graph(m, types=types, idx=idx) for idx, m in enumerate(self.m_list) ] return def _update_t_codes(self): self.t_codes = (self.m_codes == TARG_ENCODING).astype(int) return # AVATAR-TOOLS def avatar( self, explainer_data, background_data=None, check_additivity=True, keep_abs_shaps=False, **explainer_kwargs ): assert shap is not None, "SHAP not found, so cannot do anything here." self._init_avatar() for m_idx in range(len(self.m_list)): # Extract tree and m_code tree = self.m_list[m_idx].model m_code = self.m_codes[m_idx] # Filter data attribute_filter = m_code == DESC_ENCODING X = explainer_data[:, attribute_filter] if background_data is not None: B = background_data[:, attribute_filter] else: B = background_data # Shap Calculation explainer = shap.TreeExplainer(tree, data=B, **explainer_kwargs) raw_shaps = explainer.shap_values(X, check_additivity=check_additivity) # Process Shap values abs_shaps = self._raw_to_abs_shaps(raw_shaps) nrm_shaps = self._abs_to_nrm_shaps(abs_shaps) if keep_abs_shaps: self.abs_shaps.append(abs_shaps) self.nrm_shaps.append(nrm_shaps) self._format_abs_shaps() self._format_nrm_shaps() return @staticmethod def _raw_to_abs_shaps(raw_shaps): # Process Shap values tsr_shaps = np.array(raw_shaps) # tensor abs_shaps = np.abs(tsr_shaps) # absolute if len(abs_shaps.shape) == 3: # In case of nominal target, sum shap values across target classes abs_shaps = np.sum(abs_shaps, axis=0) return abs_shaps @staticmethod def _abs_to_nrm_shaps(abs_shaps): avg_shaps = np.mean( abs_shaps, axis=0 ) # Avg over instances (of explainer data!) nrm_shaps = np.squeeze( normalize(avg_shaps.reshape(1, -1), norm="l1") ) # Normalize (between 0 and 1) return nrm_shaps def avatar_q_model( self, X_train, X_test, l1_reg="num_features(10)", check_additivity=False, n_samples=20, silent=True, ): assert shap is not None, "SHAP not found, so cannot do anything here." # Extract function to explain m = self.q_model f = self._extract_function_to_explain(self.q_model) # Data assert ( X_train.shape[1] == X_test.shape[1] ), "Inconsistent attribute count. Your carelessness is disappointing." if X_train.shape[1] != len(m.desc_ids): attribute_filter = m.desc_ids X_train = X_train[:, attribute_filter] X_test = X_test[:, attribute_filter] explainer = shap.KernelExplainer(f, shap.sample(X_train, n_samples)) raw_shaps = explainer.shap_values( X_test, l1_reg=l1_reg, check_additivity=check_additivity, silent=silent ) # Process Shap values abs_shaps = self._raw_to_abs_shaps(raw_shaps) nrm_shaps = self._abs_to_nrm_shaps(abs_shaps) return nrm_shaps @staticmethod def _extract_function_to_explain(m): assert m.n_outputs_ == 1 # Extract function if m.out_kind in {"nominal"}: f = lambda x: m.predict_proba(x)[0] elif m.out_kind in {"numerc"}: f = m.predict else: raise ValueError("I don't know this kind of q_model.out_kind") return f def _init_avatar(self): """Initialize avatar-datastructures that are used there. """ self.abs_shaps = [] self.nrm_shaps = [] return def _format_nrm_shaps(self): if isinstance(self.nrm_shaps, list) and len(self.nrm_shaps) > 0: init = np.zeros(self.m_codes.shape) for m_idx, (mod, nrm_shap) in enumerate(zip(self.m_list, self.nrm_shaps)): init[m_idx, list(mod.desc_ids)] = nrm_shap self.nrm_shaps = init else: return def _format_abs_shaps(self): if isinstance(self.abs_shaps, list) and len(self.abs_shaps) > 0: n_models, n_attributes = self.m_codes.shape n_instances = self.abs_shaps[0].shape[0] init = np.zeros((n_models, n_instances, n_attributes)) for m_idx, (mod, abs_shap) in enumerate(zip(self.m_list, self.abs_shaps)): init_abs = np.zeros((n_instances, n_attributes)) init_abs[:, list(mod.desc_ids)] = abs_shap init[m_idx, :, :] = init_abs self.abs_shaps = init else: return
30.97076
113
0.606905
3,734
31,776
4.909213
0.132833
0.012547
0.009328
0.01091
0.318641
0.252578
0.175277
0.150946
0.135126
0.114505
0
0.003078
0.304758
31,776
1,025
114
31.000976
0.826679
0.072382
0
0.21745
0
0
0.067073
0
0
0
0
0.000976
0.008054
1
0.067114
false
0
0.038926
0.004027
0.193289
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c12a3cb2ec282c49961e4c9f8bc0a286e747e3af
13,135
py
Python
pysnmp/HUAWEI-LswRSTP-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
11
2021-02-02T16:27:16.000Z
2021-08-31T06:22:49.000Z
pysnmp/HUAWEI-LswRSTP-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
75
2021-02-24T17:30:31.000Z
2021-12-08T00:01:18.000Z
pysnmp/HUAWEI-LswRSTP-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
10
2019-04-30T05:51:36.000Z
2022-02-16T03:33:41.000Z
# # PySNMP MIB module HUAWEI-LswRSTP-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-LswRSTP-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 19:34:33 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion") dot1dStpPortEntry, dot1dStpPort = mibBuilder.importSymbols("BRIDGE-MIB", "dot1dStpPortEntry", "dot1dStpPort") lswCommon, = mibBuilder.importSymbols("HUAWEI-3COM-OID-MIB", "lswCommon") NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance") NotificationType, Integer32, Counter64, ObjectIdentity, Bits, ModuleIdentity, iso, TimeTicks, MibIdentifier, IpAddress, Gauge32, Unsigned32, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Integer32", "Counter64", "ObjectIdentity", "Bits", "ModuleIdentity", "iso", "TimeTicks", "MibIdentifier", "IpAddress", "Gauge32", "Unsigned32", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn") TextualConvention, TruthValue, DisplayString, MacAddress = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "TruthValue", "DisplayString", "MacAddress") hwLswRstpMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6)) hwLswRstpMib.setRevisions(('2001-06-29 00:00',)) if mibBuilder.loadTexts: hwLswRstpMib.setLastUpdated('200106290000Z') if mibBuilder.loadTexts: hwLswRstpMib.setOrganization('') class EnabledStatus(TextualConvention, Integer32): status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2)) namedValues = NamedValues(("enabled", 1), ("disabled", 2)) hwLswRstpMibObject = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1)) hwdot1dStpStatus = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 1), EnabledStatus()).setMaxAccess("readwrite") if mibBuilder.loadTexts: hwdot1dStpStatus.setStatus('current') hwdot1dStpForceVersion = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2))).clone(namedValues=NamedValues(("stp", 0), ("rstp", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: hwdot1dStpForceVersion.setStatus('current') hwdot1dStpDiameter = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 7))).setMaxAccess("readwrite") if mibBuilder.loadTexts: hwdot1dStpDiameter.setStatus('current') hwdot1dStpRootBridgeAddress = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 4), MacAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: hwdot1dStpRootBridgeAddress.setStatus('current') hwDot1dStpBpduGuard = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 6), EnabledStatus()).setMaxAccess("readwrite") if mibBuilder.loadTexts: hwDot1dStpBpduGuard.setStatus('current') hwDot1dStpRootType = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("normal", 1), ("primary", 2), ("secondary", 3)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: hwDot1dStpRootType.setStatus('current') hwDot1dTimeOutFactor = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(3, 7))).setMaxAccess("readwrite") if mibBuilder.loadTexts: hwDot1dTimeOutFactor.setStatus('current') hwDot1dStpPathCostStandard = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("dot1d-1998", 1), ("dot1t", 2), ("legacy", 3)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: hwDot1dStpPathCostStandard.setStatus('current') hwdot1dStpPortXTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5), ) if mibBuilder.loadTexts: hwdot1dStpPortXTable.setStatus('current') hwdot1dStpPortXEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1), ) dot1dStpPortEntry.registerAugmentions(("HUAWEI-LswRSTP-MIB", "hwdot1dStpPortXEntry")) hwdot1dStpPortXEntry.setIndexNames(*dot1dStpPortEntry.getIndexNames()) if mibBuilder.loadTexts: hwdot1dStpPortXEntry.setStatus('current') hwdot1dStpPortStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 1), EnabledStatus()).setMaxAccess("readwrite") if mibBuilder.loadTexts: hwdot1dStpPortStatus.setStatus('current') hwdot1dStpPortEdgeport = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 2), TruthValue()).setMaxAccess("readwrite") if mibBuilder.loadTexts: hwdot1dStpPortEdgeport.setStatus('current') hwdot1dStpPortPointToPoint = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("forceTrue", 1), ("forceFalse", 2), ("auto", 3)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: hwdot1dStpPortPointToPoint.setStatus('current') hwdot1dStpMcheck = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 4), TruthValue()).setMaxAccess("readwrite") if mibBuilder.loadTexts: hwdot1dStpMcheck.setStatus('current') hwdot1dStpTransLimit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readwrite") if mibBuilder.loadTexts: hwdot1dStpTransLimit.setStatus('current') hwdot1dStpRXStpBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 6), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hwdot1dStpRXStpBPDU.setStatus('current') hwdot1dStpTXStpBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 7), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hwdot1dStpTXStpBPDU.setStatus('current') hwdot1dStpRXTCNBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 8), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hwdot1dStpRXTCNBPDU.setStatus('current') hwdot1dStpTXTCNBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 9), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hwdot1dStpTXTCNBPDU.setStatus('current') hwdot1dStpRXRSTPBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 10), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hwdot1dStpRXRSTPBPDU.setStatus('current') hwdot1dStpTXRSTPBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 11), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hwdot1dStpTXRSTPBPDU.setStatus('current') hwdot1dStpClearStatistics = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("clear", 1)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: hwdot1dStpClearStatistics.setStatus('current') hwdot1dSetStpDefaultPortCost = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("enable", 1)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: hwdot1dSetStpDefaultPortCost.setStatus('current') hwdot1dStpRootGuard = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 14), EnabledStatus()).setMaxAccess("readwrite") if mibBuilder.loadTexts: hwdot1dStpRootGuard.setStatus('current') hwdot1dStpLoopGuard = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 15), EnabledStatus()).setMaxAccess("readwrite") if mibBuilder.loadTexts: hwdot1dStpLoopGuard.setStatus('current') hwdot1dStpPortBlockedReason = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("notBlock", 1), ("blockForProtocol", 2), ("blockForRootGuard", 3), ("blockForBPDUGuard", 4), ("blockForLoopGuard", 5)))).setMaxAccess("readonly") if mibBuilder.loadTexts: hwdot1dStpPortBlockedReason.setStatus('current') hwdot1dStpRXTCBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 17), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hwdot1dStpRXTCBPDU.setStatus('current') hwdot1dStpPortSendingBPDUType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2))).clone(namedValues=NamedValues(("stp", 0), ("rstp", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: hwdot1dStpPortSendingBPDUType.setStatus('current') hwdot1dStpOperPortPointToPoint = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("true", 1), ("false", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: hwdot1dStpOperPortPointToPoint.setStatus('current') hwRstpEventsV2 = ObjectIdentity((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 0)) if mibBuilder.loadTexts: hwRstpEventsV2.setStatus('current') hwRstpBpduGuarded = NotificationType((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 0, 1)).setObjects(("BRIDGE-MIB", "dot1dStpPort")) if mibBuilder.loadTexts: hwRstpBpduGuarded.setStatus('current') hwRstpRootGuarded = NotificationType((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 0, 2)).setObjects(("BRIDGE-MIB", "dot1dStpPort")) if mibBuilder.loadTexts: hwRstpRootGuarded.setStatus('current') hwRstpBridgeLostRootPrimary = NotificationType((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 0, 3)) if mibBuilder.loadTexts: hwRstpBridgeLostRootPrimary.setStatus('current') hwRstpLoopGuarded = NotificationType((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 0, 4)).setObjects(("BRIDGE-MIB", "dot1dStpPort")) if mibBuilder.loadTexts: hwRstpLoopGuarded.setStatus('current') hwdot1dStpIgnoredVlanTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 10), ) if mibBuilder.loadTexts: hwdot1dStpIgnoredVlanTable.setStatus('current') hwdot1dStpIgnoredVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 10, 1), ).setIndexNames((0, "HUAWEI-LswRSTP-MIB", "hwdot1dVlan")) if mibBuilder.loadTexts: hwdot1dStpIgnoredVlanEntry.setStatus('current') hwdot1dVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 10, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readonly") if mibBuilder.loadTexts: hwdot1dVlan.setStatus('current') hwdot1dStpIgnore = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: hwdot1dStpIgnore.setStatus('current') mibBuilder.exportSymbols("HUAWEI-LswRSTP-MIB", hwdot1dStpTXStpBPDU=hwdot1dStpTXStpBPDU, hwdot1dStpPortPointToPoint=hwdot1dStpPortPointToPoint, hwdot1dStpRXTCNBPDU=hwdot1dStpRXTCNBPDU, hwdot1dStpMcheck=hwdot1dStpMcheck, hwdot1dStpTXTCNBPDU=hwdot1dStpTXTCNBPDU, hwdot1dStpIgnore=hwdot1dStpIgnore, hwdot1dStpIgnoredVlanEntry=hwdot1dStpIgnoredVlanEntry, hwdot1dStpTXRSTPBPDU=hwdot1dStpTXRSTPBPDU, hwdot1dStpRXStpBPDU=hwdot1dStpRXStpBPDU, hwDot1dStpBpduGuard=hwDot1dStpBpduGuard, hwdot1dStpStatus=hwdot1dStpStatus, hwRstpLoopGuarded=hwRstpLoopGuarded, hwDot1dStpRootType=hwDot1dStpRootType, hwdot1dStpTransLimit=hwdot1dStpTransLimit, hwdot1dStpPortStatus=hwdot1dStpPortStatus, hwdot1dStpRXRSTPBPDU=hwdot1dStpRXRSTPBPDU, hwdot1dStpClearStatistics=hwdot1dStpClearStatistics, hwDot1dStpPathCostStandard=hwDot1dStpPathCostStandard, hwLswRstpMibObject=hwLswRstpMibObject, hwdot1dStpDiameter=hwdot1dStpDiameter, PYSNMP_MODULE_ID=hwLswRstpMib, hwdot1dStpOperPortPointToPoint=hwdot1dStpOperPortPointToPoint, hwLswRstpMib=hwLswRstpMib, hwdot1dStpPortEdgeport=hwdot1dStpPortEdgeport, hwdot1dStpPortXTable=hwdot1dStpPortXTable, hwdot1dStpRXTCBPDU=hwdot1dStpRXTCBPDU, hwdot1dStpLoopGuard=hwdot1dStpLoopGuard, hwRstpRootGuarded=hwRstpRootGuarded, EnabledStatus=EnabledStatus, hwdot1dStpRootGuard=hwdot1dStpRootGuard, hwdot1dStpIgnoredVlanTable=hwdot1dStpIgnoredVlanTable, hwRstpBridgeLostRootPrimary=hwRstpBridgeLostRootPrimary, hwdot1dStpRootBridgeAddress=hwdot1dStpRootBridgeAddress, hwRstpBpduGuarded=hwRstpBpduGuarded, hwRstpEventsV2=hwRstpEventsV2, hwdot1dSetStpDefaultPortCost=hwdot1dSetStpDefaultPortCost, hwdot1dStpForceVersion=hwdot1dStpForceVersion, hwDot1dTimeOutFactor=hwDot1dTimeOutFactor, hwdot1dStpPortXEntry=hwdot1dStpPortXEntry, hwdot1dStpPortBlockedReason=hwdot1dStpPortBlockedReason, hwdot1dVlan=hwdot1dVlan, hwdot1dStpPortSendingBPDUType=hwdot1dStpPortSendingBPDUType)
125.095238
1,870
0.767035
1,447
13,135
6.961299
0.138217
0.015686
0.012211
0.015884
0.432145
0.37536
0.269731
0.254045
0.236275
0.218009
0
0.091075
0.081309
13,135
104
1,871
126.298077
0.743681
0.025124
0
0
0
0
0.11191
0.003439
0
0
0
0
0
1
0
false
0
0.083333
0
0.125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
c12c2ac656b7260dfdb953a62cfeab4d5b386d09
6,377
py
Python
src/ydata_quality/duplicates/engine.py
poga/ydata-quality
0cdda2774b05101c5f4f773b5e946f2a6544da09
[ "MIT" ]
242
2021-09-22T17:16:49.000Z
2022-03-30T10:26:25.000Z
src/ydata_quality/duplicates/engine.py
poga/ydata-quality
0cdda2774b05101c5f4f773b5e946f2a6544da09
[ "MIT" ]
13
2021-09-23T00:15:10.000Z
2022-02-04T16:33:42.000Z
src/ydata_quality/duplicates/engine.py
poga/ydata-quality
0cdda2774b05101c5f4f773b5e946f2a6544da09
[ "MIT" ]
21
2021-09-24T09:59:30.000Z
2022-03-16T02:48:11.000Z
""" Implementation of DuplicateChecker engine class to run duplicate records analysis. """ from typing import List, Optional, Union from pandas import DataFrame from src.ydata_quality.core.warnings import Priority from ..core import QualityEngine, QualityWarning from ..utils.auxiliary import find_duplicate_columns class DuplicateChecker(QualityEngine): "Engine for running analyis on duplicate records." def __init__(self, df: DataFrame, entities: List[Union[str, List[str]]] = None, is_close: bool = False, severity: Optional[str] = None): """ Arguments: df (DataFrame): reference DataFrame used to run the DataQuality analysis. entities (List[Union[str, List[str]]]): entities relevant for duplicate analysis. Passing lists allows composed entities of multiple columns. is_close (bool): Pass True to use numpy.isclose instead of pandas.equals in column comparison. severity (str): Sets the logger warning threshold. Valid levels are [DEBUG, INFO, WARNING, ERROR, CRITICAL].""" super().__init__(df=df, severity=severity) self._entities = [] if entities is None else entities self._tests = ["exact_duplicates", "entity_duplicates", "duplicate_columns"] self._is_close = is_close @property def entities(self): "Property that returns the entities relevant for duplicates analysis." return self._entities @entities.setter def entities(self, entities: List[Union[str, List[str]]]): if not isinstance(entities, list): raise ValueError("Property 'entities' should be a list.") entities = self.__unique_entities(entities) assert all(entity in self.df.columns if isinstance(entity, str) else [ c in self.df.columns for c in entity] for entity in entities), "Given entities should exist as \ DataFrame's columns." self._entities = entities @staticmethod def __unique_entities(entities: List[Union[str, List[str]]]): """Returns entities list with only unique entities""" entities = set(entity if isinstance(entity, str) else entity[0] if len( entity) == 1 else tuple(entity) for entity in entities) return [entity if isinstance(entity, str) else list(entity) for entity in entities] @staticmethod def __get_duplicates(df: DataFrame): "Returns duplicate records." return df[df.duplicated()] @staticmethod def __get_entity_duplicates(df: DataFrame, entity: Union[str, List[str]]): "Returns the duplicate records aggregated by a given entity." return df.groupby(entity).apply(DuplicateChecker.__get_duplicates).reset_index(drop=True) def exact_duplicates(self): "Returns a DataFrame filtered for exact duplicate records." dups = self.__get_duplicates(self.df) # Filter for duplicate instances if len(dups) > 0: self.store_warning( QualityWarning( test=QualityWarning.Test.EXACT_DUPLICATES, category=QualityWarning.Category.DUPLICATES, priority=Priority.P2, data=dups, description=f"Found {len(dups)} instances with exact duplicate feature values." )) else: self._logger.info("No exact duplicates were found.") dups = None return dups def __provided_entity_dups(self, entity: Optional[Union[str, List[str]]] = None) -> dict: "Find duplicates for passed entity (simple or composed)." found_dups = {} dups = self.__get_entity_duplicates(self.df, entity) if len(dups) > 0: # if we have any duplicates self.store_warning( QualityWarning( test='Entity Duplicates', category='Duplicates', priority=Priority.P2, data=dups, description=f"Found {len(dups)} duplicates after grouping by entities." )) if isinstance(entity, str): entity = [entity] # Makes logic the same for str or List[str] entities set_vals = set(dups[entity].apply(tuple, axis=1)) if len(entity) > 1: entity_key = tuple(entity) # Lists are not hashable, therefore cannot be dictionary keys else: # No need to store keys as tuples for single entities (single values) set_vals = [val[0] for val in set_vals] entity_key = entity[0] for val in set_vals: # iterate on each entity with duplicates found_dups.setdefault(entity_key, {})[val] = dups[(dups[entity].values == val).all(axis=1)] return found_dups def entity_duplicates(self, entity: Optional[Union[str, List[str]]] = None): """Returns a dict of {entity: {entity_value: duplicates}} of duplicate records after grouping by an entity. If entity is not specified, compute for all entities defined in the init. """ ent_dups = {} if entity is not None: # entity is specified ent_dups.update(self.__provided_entity_dups(entity)) else: # if entity is not specified if len(self.entities) == 0: self._logger.warning("There are no entities defined to run the analysis. Skipping the test.") return None for col in self.entities: ent_dups.update(self.entity_duplicates(col)) return ent_dups def duplicate_columns(self): "Returns a mapping dictionary of columns with fully duplicated feature values." dups = find_duplicate_columns(self.df, self._is_close) cols_with_dups = len(dups.keys()) if cols_with_dups > 0: self.store_warning( QualityWarning( test=QualityWarning.Test.DUPLICATE_COLUMNS, category=QualityWarning.Category.DUPLICATES, priority=Priority.P1, data=dups, description=f"Found {cols_with_dups} columns with exactly the same feature values as other columns." ) ) else: self._logger.info("No duplicate columns were found.") dups = None return dups
45.877698
120
0.631175
747
6,377
5.26506
0.243641
0.014238
0.021358
0.026697
0.237478
0.167302
0.08238
0.08238
0.063565
0.034579
0
0.003067
0.284303
6,377
138
121
46.210145
0.858677
0.233966
0
0.201923
0
0
0.162418
0
0
0
0
0
0.009615
1
0.096154
false
0.009615
0.048077
0
0.240385
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c12f3aad32b9c24ae341c2d85562fa4d677a6dca
89
py
Python
notebooks/classic/exercise/matplotlib-subplots.py
kmunve/ml-workshop
96a42e663bb656e97231eff17ef4ca21e2a14b0e
[ "MIT" ]
3
2020-02-17T13:35:56.000Z
2020-10-22T13:15:28.000Z
notebooks/classic/exercise/matplotlib-subplots.py
kmunve/ml-workshop
96a42e663bb656e97231eff17ef4ca21e2a14b0e
[ "MIT" ]
7
2020-02-09T17:52:44.000Z
2020-02-09T17:52:53.000Z
notebooks/classic/exercise/matplotlib-subplots.py
kmunve/ml-workshop
96a42e663bb656e97231eff17ef4ca21e2a14b0e
[ "MIT" ]
4
2019-07-22T17:05:52.000Z
2020-01-23T12:17:59.000Z
fig, ax = plt.subplots(2, sharex='all', figsize=(10, 5)) fig.savefig("two-subplots.png")
29.666667
56
0.674157
15
89
4
0.866667
0
0
0
0
0
0
0
0
0
0
0.049383
0.089888
89
3
57
29.666667
0.691358
0
0
0
0
0
0.211111
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
c12f7689727d68b07585dc735616888c343cb5e6
3,575
py
Python
dataio/python/pprint.py
hschwane/offline_production
e14a6493782f613b8bbe64217559765d5213dc1e
[ "MIT" ]
1
2020-12-24T22:00:01.000Z
2020-12-24T22:00:01.000Z
dataio/python/pprint.py
hschwane/offline_production
e14a6493782f613b8bbe64217559765d5213dc1e
[ "MIT" ]
null
null
null
dataio/python/pprint.py
hschwane/offline_production
e14a6493782f613b8bbe64217559765d5213dc1e
[ "MIT" ]
3
2020-07-17T09:20:29.000Z
2021-03-30T16:44:18.000Z
import collections import re from icecube import icetray from icecube import dataclasses from icecube import dataio def format_line( frame, key, maxwidth = None, ellipsis = '...' ): '''Given an icecube frame and a key in that frame, return exactly one line of text describing the I3FrameObject with that key. Try to make the text as useful to a human reader as possible. If accessing the object generates an exception, catch it and return its description. Clip to an optional maximum width with a trailing ellipsis''' try: obj = frame[key] if (obj is None) and (key in frame): return '(Unreadable)' if hasattr(obj, "apply"): obj = obj.apply(frame) haslength = isinstance( obj, collections.Iterable ) except Exception as e: obstr = '(Unreadable)' else: if( haslength ): obstr = 'Iterable with {0} items'.format(len(obj)) else: try: # give the module and class name obstr = '{0}.{1} object'.format(obj.__module__,obj.__class__.__name__) except Exception as e: # try basic repr obstr = repr(obj).split('\n')[0] if( maxwidth ): if( len(obstr) > maxwidth ): obstr = obstr[:maxwidth - len(ellipsis)] + ellipsis[0:maxwidth] return obstr def format_detail( frame, key ): '''Given an icecube frame and a key in that frame, return a human-readable string that describes the item in detail.''' try: obj = frame[key] if hasattr(obj, "apply"): obj = obj.apply(frame) if isinstance(obj,dataclasses.I3String): message = obj.value if isinstance(obj,dataclasses.I3Double): message = str(obj.value) elif hasattr(obj, "items"): message = '{\n' for k in obj.keys(): message += str(k)+': '+str(obj[k])+'\n' message += '}' else: message = str(obj) except Exception as e: message = '({0})'.format(e) if re.match('<icecube\.[\S]*\.[\S]* object at [0-9xa-f]*>', message): # Standard boring format. In some cases we might be able to do better. if isinstance( obj, collections.Iterable): message += ', contents:\n' + '\n'.join( [ str(x) for x in frame[key] ] ) return message def format_xml( frame, key ): '''Given an icecube frame and a key in that frame, return the xml serialization of the item.''' try: if key in frame: message = frame.as_xml(key) else: message = key+' not in frame' except Exception as e: message = '({0})'.format(e) return message.expandtabs(4) def format_size( frame, key): '''Given an icecube frame and a key in that frame, return the size of the string. Default converts the string in Kilo, Mega, or GigaByte. Adjust conversion to different formats by supplying the list with given unit names.''' cfactor = 1024. sunit = False unit = ['K', 'M', 'G'] if key in frame: size = frame.size(key) else: return str() while size > cfactor and bool(unit): size /= cfactor sunit = unit.pop(0) if bool(sunit): if size < 10: return '{0:1.1f}{1:1s}'.format(size,sunit) else: return '{0:4.0f}{1:1s}'.format(size, sunit) # Bytes are integer value, so show them like this return '{0:4d} '.format(size)
30.818966
86
0.575385
466
3,575
4.377682
0.32618
0.027451
0.027451
0.037255
0.196078
0.163725
0.163725
0.163725
0.09902
0.09902
0
0.013029
0.313007
3,575
115
87
31.086957
0.81759
0.258741
0
0.32
0
0
0.080976
0.008524
0.013333
0
0
0
0
1
0.053333
false
0
0.066667
0
0.226667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c12ff6fc86fc337cb5664cef1cd543659806c57c
600
py
Python
staris.py
aertoria/MiscCode
a2e94d0fe0890e6620972f84adcb7976ca9f1408
[ "Apache-2.0" ]
null
null
null
staris.py
aertoria/MiscCode
a2e94d0fe0890e6620972f84adcb7976ca9f1408
[ "Apache-2.0" ]
null
null
null
staris.py
aertoria/MiscCode
a2e94d0fe0890e6620972f84adcb7976ca9f1408
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python #You are climbing a stair case. It takes n steps to reach to the top. #Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top? class Solution: # @param {integer} n # @return {integer} count=0 def climbStairs(self, n): self.rec_climb(n) print self.count return self.count def rec_climb(self, n): if n==0: #print 'yeah success' self.count=self.count+1 elif n<0: #print 'cannot climb this way' pass else: self.rec_climb(n-1) self.rec_climb(n-2) solution=Solution() solution.climbStairs(35)
18.181818
97
0.675
103
600
3.893204
0.504854
0.079801
0.089776
0.097257
0
0
0
0
0
0
0
0.021322
0.218333
600
33
98
18.181818
0.833689
0.443333
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0.0625
0
null
null
0.0625
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
2
c131857f7131f2f64a0c9cd301cbb4e69c3dcbec
9,619
py
Python
CryptoAttacks/Block/ecb.py
akbarszcz/CryptoAttacks
ae675d016b314414a3dc9b23c7d8a32da4c62457
[ "MIT" ]
54
2017-03-28T23:46:58.000Z
2022-02-23T01:53:38.000Z
CryptoAttacks/Block/ecb.py
maximmasiutin/CryptoAttacks
d1d47d3cb2ce38738a60b728bc35ce80bfe64374
[ "MIT" ]
null
null
null
CryptoAttacks/Block/ecb.py
maximmasiutin/CryptoAttacks
d1d47d3cb2ce38738a60b728bc35ce80bfe64374
[ "MIT" ]
13
2017-03-31T06:07:23.000Z
2021-11-20T19:01:30.000Z
from __future__ import absolute_import, division, print_function import string from builtins import bytes, range from CryptoAttacks.Math import factors from CryptoAttacks.Utils import (add_padding, b2h, chunks, log, print_chunks, random_bytes) def encryption_oracle(payload): """Function implementing encryption oracle with ecb mode Args: payload(string): raw data to encrypt Returns: string """ raise NotImplementedError def is_ecb(cipher, block_size=16): """Check if there are repeated blocks in ciphertext Args: cipher(string) block_size(int) Returns: bool: True if there are repeated blocks (so it's probably ECB mode) """ cipher_blocks = chunks(cipher, block_size) unique_blocks = set(cipher_blocks) if len(unique_blocks) < len(cipher_blocks): return True return False def find_block_size(encryption_oracle, constant=True): """Determine block size if ecb mode Args: encryption_oracle(callable) constant(bool): True if prefix and suffix have constant length Returns: int """ if constant: log.debug("constant == True") payload = bytes(b'A') size = len(encryption_oracle(payload)) while True: payload += bytes(b'A') new_size = len(encryption_oracle(payload)) if new_size > size: log.info("block_size={}".format(new_size - size)) return new_size - size else: log.debug("constant == False") payload = bytes(b'A') max_size = len(encryption_oracle(payload)) possible_sizes = factors(max_size) possible_sizes.add(max_size) blocks_to_send = 5 for block_size in sorted(possible_sizes): """send payload of length x, so at least x-1 blocks should be identical""" payload = random_bytes(1) * (blocks_to_send*block_size) enc_chunks = chunks(encryption_oracle(payload), block_size) for x in range(len(enc_chunks)-1): if enc_chunks[x] == enc_chunks[x+1]: log.debug("Found two identical blocks at {}: {}".format(x, print_chunks(enc_chunks))) for y in range(2, blocks_to_send-1): if enc_chunks[x] != enc_chunks[x+y]: break else: log.info("block_size={}".format(block_size)) return block_size def find_prefix_suffix_size(encryption_oracle, block_size=16): """Determine prefix and suffix sizes if ecb mode, sizes must be constant Rarely may fail (if random data that are send unhappily matches prefix/suffix) Args: encryption_oracle(callable) block_size(int) Returns: tuple(int,int): prefix_size, suffix_size """ blocks_to_send = 5 payload = random_bytes(1) * (blocks_to_send * block_size) enc_chunks = chunks(encryption_oracle(payload), block_size) log.debug("Encryption of length {}".format(blocks_to_send * block_size)) log.debug(print_chunks(enc_chunks)) for position_start in range(len(enc_chunks) - 1): if enc_chunks[position_start] == enc_chunks[position_start + 1]: for y in range(2, blocks_to_send - 1): if enc_chunks[position_start] != enc_chunks[position_start + y]: break else: log.success("Controlled payload start at chunk {}".format(position_start)) break else: log.critical_error("Position of controlled chunks not found") log.info('Finding prefix') changed_char = bytes([(payload[0] - 1)%256]) for aligned_bytes in range(block_size): payload_new = payload[:aligned_bytes] + changed_char + payload[aligned_bytes+1:] enc_chunks_new = chunks(encryption_oracle(payload_new), block_size) log.debug(print_chunks(chunks(payload_new, block_size))) log.debug(print_chunks(enc_chunks_new)) if enc_chunks_new[position_start] != enc_chunks[position_start]: prefix_size = position_start*block_size - aligned_bytes log.success("Prefix size: {}".format(prefix_size)) break else: log.critical_error("Size of prefix not found") log.info('Finding suffix') payload = random_bytes(1) * (block_size - (prefix_size % block_size)) # align to block_size encrypted = encryption_oracle(payload) suffix_size = len(encrypted) - len(payload) - prefix_size while True: payload += random_bytes(1) suffix_size -= 1 if len(encryption_oracle(payload)) > len(encrypted): log.success("Suffix size: {}".format(suffix_size)) break else: log.critical_error("Size of suffix not found") return prefix_size, suffix_size def decrypt(encryption_oracle, constant=True, block_size=16, prefix_size=None, secret_size=None, alphabet=None): """Given encryption oracle which produce ecb(prefix || our_input || secret), find secret Args: encryption_oracle(callable) constant(bool): True if prefix have constant length (secret must have constant length) block_size(int/None) prefix_size(int/None) secret_size(int/None) alphabet(string): plaintext space Returns: secret(string) """ log.debug("Start decrypt function") if not alphabet: alphabet = bytes(string.printable.encode()) if not block_size: block_size = find_block_size(encryption_oracle, constant) if constant: log.debug("constant == True") if not prefix_size or not secret_size: prefix_size, secret_size = find_prefix_suffix_size(encryption_oracle, block_size) """Start decrypt""" secret = bytes(b'') aligned_bytes = random_bytes(1) * (block_size - (prefix_size % block_size)) if len(aligned_bytes) == block_size: aligned_bytes = bytes(b'') aligned_bytes_suffix = random_bytes(1) * (block_size - (secret_size % block_size)) if len(aligned_bytes_suffix) == block_size: aligned_bytes_suffix = bytes(b'') block_to_find_position = -1 controlled_block_position = (prefix_size+len(aligned_bytes)) // block_size while len(secret) < secret_size: if (len(secret)+1) % block_size == 0: block_to_find_position -= 1 payload = aligned_bytes + aligned_bytes_suffix + random_bytes(1) + secret enc_chunks = chunks(encryption_oracle(payload), block_size) block_to_find = enc_chunks[block_to_find_position] log.debug("To guess at position {}:".format(block_to_find_position)) log.debug("Plain: " + print_chunks(chunks(bytes(b'P'*prefix_size) + payload + bytes(b'S'*secret_size), block_size))) log.debug("Encry: " + print_chunks(enc_chunks)+"\n") for guessed_char in range(256): guessed_char = bytes([guessed_char]) payload = aligned_bytes + add_padding(guessed_char + secret, block_size) enc_chunks = chunks(encryption_oracle(payload), block_size) log.debug("Plain: " + print_chunks(chunks(bytes(b'P'*prefix_size) + payload + bytes(b'S'*secret_size), block_size))) log.debug("Encry: " + print_chunks(enc_chunks)+"\n") if block_to_find == enc_chunks[controlled_block_position]: secret = guessed_char + secret log.debug("Found char, secret={}".format(repr(secret))) break else: log.critical_error("Char not found, try change alphabet. Secret so far: {}".format(repr(secret))) log.success("Secret(hex): {}".format(b2h(secret))) return secret else: log.debug("constant == False") def known_plaintexts(pairs, ciphertext, block_size=16): """Given enough pairs plaintext-ciphertext, we can assign ciphertexts blocks to plaintexts blocks, then we can possibly decrypt ciphertext Args: pairs(list): list of dict, [{'cipher': 'aaa', 'plain': 'bbb'}, {'cipher': 'xxx', 'plain': 'pwa'}] plaintexts have to be correctly padded (len(cipher) == len(plain)) ciphertext(string): ciphertext to decrypt block_size(int) Returns tuple: ([decrypted_ciphertext_blocks], {'ciphertext_block': 'plaintext_block', ...}) decrypted_ciphertext_blocks may contain not-decrypted blocks from ciphertext """ result_mapping = {} for pair in pairs: ciphertext_blocks = chunks(pair['cipher'], block_size) plaintext_blocks = chunks(pair['plain'], block_size) if len(ciphertext_blocks) != len(plaintext_blocks): print(pair) print(ciphertext_blocks, plaintext_blocks) print(len(ciphertext_blocks), len(plaintext_blocks)) assert 0 for cipher_block_no in range(len(ciphertext_blocks)): result_mapping[ciphertext_blocks[cipher_block_no]] = plaintext_blocks[cipher_block_no] target_ciphertext_blocks = chunks(ciphertext, block_size) for cipher_block_no in range(len(target_ciphertext_blocks)): if target_ciphertext_blocks[cipher_block_no] in list(result_mapping.keys()): target_ciphertext_blocks[cipher_block_no] = result_mapping[target_ciphertext_blocks[cipher_block_no]] return target_ciphertext_blocks, result_mapping
39.584362
132
0.640607
1,179
9,619
4.977099
0.149279
0.07362
0.043115
0.020279
0.406271
0.299591
0.222222
0.205351
0.145535
0.105658
0
0.006043
0.260214
9,619
242
133
39.747934
0.818578
0.174031
0
0.256757
0
0
0.067881
0
0
0
0
0
0.006757
1
0.040541
false
0
0.033784
0
0.121622
0.094595
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c133b030e2d992d0cf7302a80fd9d38d5daf7e7c
973
py
Python
codes/convergence_elasticity_advection/bilinear.py
adRenaud/research
2f0062a1800d7a17577bbfc2393b084253d567f4
[ "MIT" ]
1
2021-06-18T14:52:03.000Z
2021-06-18T14:52:03.000Z
codes/comparison/fem/bilinear.py
adRenaud/research
2f0062a1800d7a17577bbfc2393b084253d567f4
[ "MIT" ]
1
2019-01-07T13:11:11.000Z
2019-01-07T13:11:11.000Z
codes/convergence_elasticity_advection/bilinear.py
adRenaud/research
2f0062a1800d7a17577bbfc2393b084253d567f4
[ "MIT" ]
null
null
null
#!/usr/bin/python import numpy as np def bilinear(x,u_n,u,EPn,Pn,E,Sigy,H): #initialization h = x[1:len(x)]-x[:(len(x)-1)] eps_n = (u_n[1:len(u_n)]-u_n[:(len(u_n)-1)])/h eps = (u[1:len(u)]-u[:(len(u)-1)])/h S = np.zeros(len(eps)) EP = np.zeros(len(eps)) P = np.zeros(len(eps)) TM = np.zeros(len(eps)) #Loop on integration points for i,DEFO in enumerate(eps): #(i) Elastic prediction Selas = E*(DEFO-EPn[i]) #(ii) Compute the criterion f = np.abs(Selas) - (Sigy+H*Pn[i]) if (f<=0): #elastic step S[i] = Selas EP[i] = EPn[i] P[i] = Pn[i] TM[i] = E elif (f>0): #elastoplastic step: solve a nonlinear scalar equation dP = f/(E+H) P[i] = Pn[i]+dP EP[i] = EPn[i]+(P[i]-Pn[i])*np.sign(Selas) S[i] = E*(DEFO-EP[i]) TM[i] = (E*H)/(E+H) return S,P,EP,TM
28.617647
66
0.464543
169
973
2.639053
0.337278
0.022422
0.089686
0.116592
0.053812
0.053812
0.053812
0.053812
0
0
0
0.012308
0.331963
973
33
67
29.484848
0.673846
0.174717
0
0
0
0
0
0
0
0
0
0
0
1
0.041667
false
0
0.041667
0
0.125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c133b53053daca0e859c64e76f3f3420f8594a0a
1,827
py
Python
MR-OCP/MROCPdjango/computation/algs/max_avg_degree/max_avg_deg.py
justi/m2g
09e8b889889ee8d8fb08b9b6fcd726fb3d901644
[ "Apache-2.0" ]
12
2015-03-11T22:07:17.000Z
2016-01-29T21:24:29.000Z
MR-OCP/MROCPdjango/computation/algs/max_avg_degree/max_avg_deg.py
youngmook/m2g
09e8b889889ee8d8fb08b9b6fcd726fb3d901644
[ "Apache-2.0" ]
213
2015-01-30T16:02:57.000Z
2016-01-29T21:45:02.000Z
MR-OCP/MROCPdjango/computation/algs/max_avg_degree/max_avg_deg.py
youngmook/m2g
09e8b889889ee8d8fb08b9b6fcd726fb3d901644
[ "Apache-2.0" ]
5
2015-02-04T13:58:12.000Z
2016-01-29T21:24:46.000Z
#!/usr/bin/env python # Copyright 2014 Open Connectome Project (http://openconnecto.me) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # max_avg_deg.py # Created by Disa Mhembere on 2014-08-03. # Email: disa@jhu.edu from computation.utils.r_utils import r_igraph_set_graph_attribute from computation.algs.eigen.eigen import r_igraph_eigs from computation.utils.igraph_attributes import r_igraph_get_attr from rpy2.rinterface import NULL def r_igraph_max_ave_degree(g): """ Compute local triangle count of graph g and save as necessary *Global graph attributes can only be stored in the graph* @param g: The igraph graph loaded via Rpy2 i.e. an R object @return: Same graph an input but with added invariant as an attribute """ mad = r_igraph_get_attr(g, "eigvals", "g") # See if we already have computed eigenvalues for the graph if mad == NULL: # Ok then compute top 1 eig ourself mad = r_igraph_eigs(g, 1, return_eigs=True, save_fn=None)[0] else: mad = float(mad[0].split(",")[0][1:]) # The largest eigenvalue is held at index 0 if mad is not None: g = r_igraph_set_graph_attribute(g, "max_ave_degree", mad) else: # More than likely ran out of memory print "Failed to estimate max ave degree because eigensolver failed ..." return g # return so we can use for other attributes
38.0625
104
0.749316
303
1,827
4.419142
0.541254
0.036594
0.029126
0.023898
0.035848
0
0
0
0
0
0
0.016556
0.173508
1,827
47
105
38.87234
0.870199
0.488232
0
0.133333
0
0
0.135093
0
0
0
0
0
0
0
null
null
0
0.266667
null
null
0.066667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
c13494ee29e2f123470e1509930b8cc4d4a82910
4,828
py
Python
tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/adtk/_transformer_base.py
iLuSIAnn/test
10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e
[ "Apache-2.0" ]
null
null
null
tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/adtk/_transformer_base.py
iLuSIAnn/test
10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e
[ "Apache-2.0" ]
null
null
null
tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/adtk/_transformer_base.py
iLuSIAnn/test
10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e
[ "Apache-2.0" ]
null
null
null
from typing import Union import pandas as pd from ._base import ( _NonTrainableMultivariateModel, _NonTrainableUnivariateModel, _TrainableMultivariateModel, _TrainableUnivariateModel, ) class _NonTrainableUnivariateTransformer(_NonTrainableUnivariateModel): def predict( self, ts: Union[pd.Series, pd.DataFrame] ) -> Union[pd.Series, pd.DataFrame]: """Transform time series. Parameters ---------- ts: pandas.Series or pandas.DataFrame Time series to be transformed. If a DataFrame with k columns, it is treated as k independent univariate time series and the transformer will be applied to each univariate series independently. Returns ------- pandas.Series or pandas.DataFrame Transformed time series. """ return self._predict(ts) transform = predict class _TrainableUnivariateTransformer(_TrainableUnivariateModel): def fit(self, ts: Union[pd.Series, pd.DataFrame]) -> None: """Train the transformer with given time series. Parameters ---------- ts: pandas.Series or pandas.DataFrame Time series to be used to train the transformer. If a DataFrame with k columns, k univariate transformers will be trained independently. """ self._fit(ts) def predict( self, ts: Union[pd.Series, pd.DataFrame] ) -> Union[pd.Series, pd.DataFrame]: """Transform time series. Parameters ---------- ts: pandas.Series or pandas.DataFrame Time series to be transformed. If a DataFrame with k columns, it is treated as k independent univariate time series. - If the transformer was trained with a Series, the transformer will be applied to each univariate series independently; - If the transformer was trained with a DataFrame, i.e. the transformer is essentially k transformers, those transformers will be applied to each univariate series respectively. Returns ------- pandas.Series or pandas.DataFrame Transformed time series. """ return self._predict(ts) def fit_predict( self, ts: Union[pd.Series, pd.DataFrame] ) -> Union[pd.Series, pd.DataFrame]: """Train the transformer, and tranform the time series used for training. Parameters ---------- ts: pandas.Series or pandas.DataFrame Time series to be used for training and be transformed. If a DataFrame with k columns, it is treated as k independent univariate time series, and k univariate transformers will be trained and applied to each series independently. Returns ------- pandas.Series or pandas.DataFrame Transformed time series. """ self.fit(ts) return self.predict(ts) transform = predict fit_transform = fit_predict class _NonTrainableMultivariateTransformer(_NonTrainableMultivariateModel): def predict(self, df: pd.DataFrame) -> Union[pd.Series, pd.DataFrame]: """Transform time series. Parameters ---------- df: pandas.DataFrame Time series to be transformed. Returns ------- pandas.Series or pandas.DataFrame Transformed time series. """ return self._predict(df) transform = predict class _TrainableMultivariateTransformer(_TrainableMultivariateModel): def fit(self, df: pd.DataFrame) -> None: """Train the transformer with given time series. Parameters ---------- df: pandas.DataFrame Time series to be used to train the transformer. """ self._fit(df) def predict(self, df: pd.DataFrame) -> Union[pd.Series, pd.DataFrame]: """Transform time series. Parameters ---------- df: pandas.DataFrame Time series to be transformed. Returns ------- pandas.Series or pandas.DataFrame Transformed time series. """ return self._predict(df) def fit_predict(self, df: pd.DataFrame) -> Union[pd.Series, pd.DataFrame]: """Train the transformer, and tranform the time series used for training. Parameters ---------- df: pandas.DataFrame Time series to be used for training and be transformed. Returns ------- pandas.Series or pandas.DataFrame Transformed time series. """ self.fit(df) return self.predict(df) transform = predict fit_transform = fit_predict
28.233918
79
0.606462
503
4,828
5.771372
0.131213
0.086118
0.044781
0.051671
0.800207
0.800207
0.716845
0.676886
0.676886
0.676197
0
0
0.308824
4,828
170
80
28.4
0.869943
0.509321
0
0.47619
0
0
0
0
0
0
0
0
0
1
0.190476
false
0
0.071429
0
0.642857
0
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
c134bdbfe11dbc33ecbecbfb4d500d8cdb08c69d
3,511
py
Python
src/accounts/views.py
m2jobe/tafseer
8f7d4bddbcd8a73c607f39a2b1d27c78aef86a15
[ "MIT" ]
null
null
null
src/accounts/views.py
m2jobe/tafseer
8f7d4bddbcd8a73c607f39a2b1d27c78aef86a15
[ "MIT" ]
null
null
null
src/accounts/views.py
m2jobe/tafseer
8f7d4bddbcd8a73c607f39a2b1d27c78aef86a15
[ "MIT" ]
null
null
null
from django.shortcuts import get_object_or_404 from django_rest_logger import log from knox.auth import TokenAuthentication from knox.models import AuthToken from rest_framework import status from rest_framework.authentication import BasicAuthentication from rest_framework.generics import GenericAPIView from rest_framework.mixins import CreateModelMixin from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from accounts.models import User from accounts.serializers import UserRegistrationSerializer, UserSerializer from lib.utils import AtomicMixin from django.utils import timezone from lib.utils import validate_email as email_is_valid from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter from rest_auth.registration.views import SocialLoginView class UserRegisterView(AtomicMixin, CreateModelMixin, GenericAPIView): serializer_class = UserRegistrationSerializer authentication_classes = () def post(self, request): """User registration view.""" email=request.data['email'] #email = User.normalize_email(request.data['email']) if not email_is_valid(email): return Response("Please use a different email address provider.", status=status.HTTP_200_OK) if User.objects.filter(email=email).exists(): return Response("Email already in use, please use a different email address.", status=status.HTTP_200_OK) now = timezone.now() user = User(email=email, first_name=request.data['first_name'], last_name=request.data['last_name'], is_active=True, last_login=now, date_joined=now) user.set_password(request.data['password']) user.save() return Response("SUCCESS", status=status.HTTP_200_OK) #return self.create(request) class UserLoginView(GenericAPIView): serializer_class = UserSerializer authentication_classes = (BasicAuthentication,) permission_classes = (IsAuthenticated,) def post(self, request): """User login with username and password.""" token = AuthToken.objects.create(request.user) return Response({ 'user': self.get_serializer(request.user).data, 'token': token }) class UserConfirmEmailView(AtomicMixin, GenericAPIView): serializer_class = None authentication_classes = () def get(self, request, activation_key): """ View for confirm email. Receive an activation key as parameter and confirm email. """ user = get_object_or_404(User, activation_key=str(activation_key)) if user.confirm_email(): return Response(status=status.HTTP_200_OK) log.warning(message='Email confirmation key not found.', details={'http_status_code': status.HTTP_404_NOT_FOUND}) return Response(status=status.HTTP_404_NOT_FOUND) class UserEmailConfirmationStatusView(GenericAPIView): serializer_class = None authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def get(self, request): """Retrieve user current confirmed_email status.""" user = self.request.user return Response({'status': user.confirmed_email}, status=status.HTTP_200_OK) class FacebookLogin(SocialLoginView): adapter_class = FacebookOAuth2Adapter
36.195876
117
0.710339
382
3,511
6.358639
0.311518
0.023055
0.041993
0.039111
0.162618
0.069988
0
0
0
0
0
0.010469
0.211051
3,511
96
118
36.572917
0.866426
0.076901
0
0.123077
0
0
0.065143
0
0
0
0
0
0
1
0.061538
false
0.015385
0.261538
0
0.676923
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
c1365b2df1fdc2c37aa4c5a090e8a65cce8207d8
2,985
py
Python
enigma.py
danhab99/EnigmaPY
b7526c26ac98675e911a8d0dcaf1acfe6d2659fb
[ "MIT" ]
null
null
null
enigma.py
danhab99/EnigmaPY
b7526c26ac98675e911a8d0dcaf1acfe6d2659fb
[ "MIT" ]
null
null
null
enigma.py
danhab99/EnigmaPY
b7526c26ac98675e911a8d0dcaf1acfe6d2659fb
[ "MIT" ]
null
null
null
import create from lib import Machine from lib import Transformer import argparse import pickle from itertools import chain from random import shuffle parser = argparse.ArgumentParser(description='A simulation of the enigma encryption algorithm', prog='enigma.py') subparsers = parser.add_subparsers(help='Which command to run', dest='subroutine') create_parser = subparsers.add_parser('create', help='A utility to create encryption codexes') encrypt_parser = subparsers.add_parser('encrypt', help='Encrypt a file with a codex') parser.add_argument('--test', type=argparse.FileType('r'), help='Validate a cypher') create_parser.add_argument('file', metavar='<File>', type=argparse.FileType('w'), help='The file to output to') create_parser.add_argument('-r --random', action='store_true', help='Generates a completly random codex') encrypt_parser.add_argument('in_file', metavar='<Input file>', type=argparse.FileType('r'), help='The file to be encrypted') encrypt_parser.add_argument('out_file', metavar='<Out file>', type=argparse.FileType('w'), help='The destination for the resuts') encrypt_mutual = encrypt_parser.add_mutually_exclusive_group(required=True) encrypt_mutual.add_argument('--codex', type=argparse.FileType('r'), help='The codex to use') encrypt_mutual.add_argument('--random', nargs=3, help='Create a random codex using a preset alphabet [ABC, bytes, numbers, ASCII, UTF], a minimum number of transformers, and a maximum number of transformers') args = parser.parse_args() if (args.test): with open('cypher.pkl', mode='rb') as file: cypher = pickle.load(file) abc = cypher.getABC() # print(cypher) machine = Machine(cypher) def gen(length): c = [sample(abc, len(abc))] * length return chain.from_iterable(c) def transform(d): return [machine.parse(value, counter) for counter, value in enumerate(d)] testData = list(gen(5)) pdb.set_trace() results = transform(transform(testData)) if (False not in [item[0] == item[1] for item in zip(testData, results)]): print("This is a valid cypher") else: print("This is NOT a valid cypher") if (args.subroutine == 'create'): file = create.Create() with open(args.file.name, mode='wb+') as output: pickle.dump(file, output) if (args.subroutine == 'encrypt'): machine = None if (args.codex): with open(args.codex, 'rb') as file: machine = Machine(pickle.load(file)) if (args.random): CYPHER = create.random(create.genPreset(args.random[0]), args.random[1], args.random[2]) machine = Machine(abc=CYPHER[0].getABC()) with open(args.in_file.name, 'rb') as input, open(args.out_file.name, 'wb') as output: clean = input.read() crypt = [machine.parse(i, value) for i in enumerate(clean)] output.write(crypt)
33.166667
163
0.669012
402
2,985
4.890547
0.335821
0.032045
0.043235
0.032045
0.073754
0.061038
0.032553
0
0
0
0
0.003342
0.19799
2,985
89
164
33.539326
0.817878
0.004355
0
0.071429
0
0.014286
0.213131
0
0
0
0
0
0
1
0.028571
false
0
0.1
0.014286
0.157143
0.028571
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c13aeff29bda001891c8cf3e8b1bc374fc0c165c
754
py
Python
autoimpute/analysis/__init__.py
gjdv/autoimpute
aa418102d3b64fc7e0c0dafa6839746f0b9a6545
[ "MIT" ]
191
2019-03-16T17:00:33.000Z
2022-03-11T12:14:17.000Z
autoimpute/analysis/__init__.py
MlataIbrahim/autoimpute
d22cd86db6facd4a68746c8c0fcb3fae70071dac
[ "MIT" ]
57
2019-03-09T23:59:38.000Z
2022-03-01T08:17:33.000Z
autoimpute/analysis/__init__.py
MlataIbrahim/autoimpute
d22cd86db6facd4a68746c8c0fcb3fae70071dac
[ "MIT" ]
19
2019-04-13T19:01:23.000Z
2021-05-14T08:59:27.000Z
"""Manage the analysis folder from the autoimpute package. This module handles imports from the analysis folder that should be accessible whenever someone imports autoimpute.analysis. The list below specifies the methods and classes that are available on import. This module handles `from autoimpute.analysis import *` with the __all__ variable below. This command imports the public classes and methods from autoimpute.analysis. """ from .base_regressor import MiBaseRegressor from .linear_regressor import MiLinearRegression from .logistic_regressor import MiLogisticRegression from .metrics import raw_bias, percent_bias __all__ = [ "MiBaseRegressor", "MiLinearRegression", "MiLogisticRegression", "raw_bias", "percent_bias" ]
31.416667
78
0.802387
91
754
6.483516
0.472527
0.091525
0.057627
0.061017
0
0
0
0
0
0
0
0
0.145889
754
23
79
32.782609
0.916149
0.567639
0
0
0
0
0.22884
0
0
0
0
0
0
1
0
false
0
0.363636
0
0.363636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
c13af56264e5c19bd63a5cb098d1273308f7f27f
5,962
py
Python
tests/sdk/test_client_response.py
AitoDotAI/aito-python-tools
891d433222b04f4ff8a4eeafbb9268516fd215dc
[ "MIT" ]
6
2019-10-16T02:35:06.000Z
2021-02-03T13:39:43.000Z
tests/sdk/test_client_response.py
AitoDotAI/aito-python-tools
891d433222b04f4ff8a4eeafbb9268516fd215dc
[ "MIT" ]
23
2020-03-17T13:16:02.000Z
2021-04-23T15:09:51.000Z
tests/sdk/test_client_response.py
AitoDotAI/aito-python-tools
891d433222b04f4ff8a4eeafbb9268516fd215dc
[ "MIT" ]
null
null
null
import requests from parameterized import parameterized, parameterized_class import aito.client.requests as aito_requests import aito.schema as aito_schema from aito.client import AitoClient from tests.cases import CompareTestCase from tests.sdk.contexts import grocery_demo_client def get_requests_resp_and_aito_resp(aito_client: AitoClient, request_obj: aito_requests.AitoRequest): """returns the json content from requests lib response and aito response for comparison""" raw_resp_obj = requests.request( method=request_obj.method, url=aito_client.instance_url + request_obj.endpoint, headers=aito_client.headers, json=request_obj.query ) raw_resp_json = raw_resp_obj.json() aito_resp = aito_client.request(request_obj=request_obj) return raw_resp_json, aito_resp class TestBaseHitsResponse(CompareTestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.client = grocery_demo_client() cls.request_obj = aito_requests.GenericQueryRequest(query={'from': 'users', 'limit': 3}) cls.raw_resp_json, cls.aito_resp = get_requests_resp_and_aito_resp(cls.client, cls.request_obj) def test_attributes(self): for attr in ['offset', 'total']: self.assertEqual(getattr(self.aito_resp, attr), self.raw_resp_json[attr]) self.assertTrue(hasattr(self.aito_resp, 'hits')) for idx, hit in enumerate(self.aito_resp.hits): self.assertEqual(hit.json, self.raw_resp_json['hits'][idx]) self.assertTrue(hasattr(self.aito_resp, 'first_hit')) self.assertEqual(self.aito_resp.first_hit.json, self.raw_resp_json['hits'][0]) def test_get_field(self): self.assertIn('offset', self.aito_resp) with self.assertRaises(KeyError): _ = self.aito_resp['some_field'] def test_iter_fields(self): aito_res_fields = [field for field in self.aito_resp] json_res_fields = list(self.raw_resp_json.keys()) self.assertCountEqual(aito_res_fields, json_res_fields) @parameterized_class(("test_name", "request_obj", "score_field"), [ ("predict", aito_requests.PredictRequest({"from": "products", "predict": "tags", "limit": 3}), "$p"), ("recommend", aito_requests.RecommendRequest( {"from": "impressions", "recommend": "product", "goal": {"session.user": "veronica"}, "limit": 3} ), "$p" ), ("match", aito_requests.MatchRequest( {"from": "impressions", "where": {"session.user": "veronica"}, "match": "product", "limit": 3} ), "$p"), ("similarity", aito_requests.SimilarityRequest({"from": "products", "similarity": {"name": "rye bread"}}), "$score") ]) class TestScoredHitsResponse(CompareTestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.client = grocery_demo_client() def test_hit_class(self): raw_resp_json, aito_resp = get_requests_resp_and_aito_resp(self.client, self.request_obj) self.assertTrue(hasattr(aito_resp, 'first_hit')) self.assertEqual(aito_resp.first_hit.score, raw_resp_json['hits'][0][self.score_field]) with self.assertRaises(KeyError): _ = aito_resp.first_hit.explanation def test_hit_with_explanation(self): self.request_obj.query = {**self.request_obj.query, 'select': ['$why']} raw_resp_json, aito_resp = get_requests_resp_and_aito_resp(self.client, self.request_obj) self.assertEqual(aito_resp.first_hit.explanation, raw_resp_json['hits'][0]['$why']) class TestRelateResponse(CompareTestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.client = grocery_demo_client() def test_relate_response(self): raw_resp_json, aito_resp = get_requests_resp_and_aito_resp( self.client, aito_requests.RelateRequest({"from": "products", "where": {"$exists": "name"}, "relate": "tags", "limit": 2}) ) self.assertEqual(aito_resp.relations[0].json, raw_resp_json['hits'][0]) self.assertEqual(aito_resp.relations[0].frequencies, raw_resp_json['hits'][0]['fs']) self.assertEqual(aito_resp.relations[0].probabilities, raw_resp_json['hits'][0]['ps']) class TestEvaluateResponse(CompareTestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.client = grocery_demo_client() def test_relate_response(self): raw_resp_json, aito_resp = get_requests_resp_and_aito_resp( self.client, aito_requests.EvaluateRequest({ "test": {"$index": {"$mod": [10, 0]}}, "evaluate": { "from": "products", "where": {"name": {"$get": "name"}}, "match": "tags" } }) ) self.assertEqual(aito_resp.accuracy, raw_resp_json['accuracy']) self.assertEqual(aito_resp.test_sample_count, raw_resp_json['testSamples']) self.assertEqual(aito_resp.train_sample_count, raw_resp_json['trainSamples']) class TestGetSchemaResponse(CompareTestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.client = grocery_demo_client() @parameterized.expand([ ('get_database_schema', aito_requests.GetDatabaseSchemaRequest(), aito_schema.AitoDatabaseSchema), ('get_table_schema', aito_requests.GetTableSchemaRequest(table_name='products'), aito_schema.AitoTableSchema), ( 'get_column_schema', aito_requests.GetColumnSchemaRequest(table_name='products', column_name='name'), aito_schema.AitoColumnTypeSchema ) ]) def test_get_schema_response(self, _, request_instance, schema_cls): raw_resp_json, aito_resp = get_requests_resp_and_aito_resp(self.client, request_instance) self.assertEqual(aito_resp.schema, schema_cls.from_deserialized_object(raw_resp_json))
42.283688
121
0.679638
701
5,962
5.476462
0.198288
0.070852
0.060172
0.05392
0.361552
0.318052
0.233655
0.220109
0.211253
0.211253
0
0.003532
0.192721
5,962
140
122
42.585714
0.794099
0.014089
0
0.295652
0
0
0.096901
0
0
0
0
0
0.165217
1
0.121739
false
0
0.06087
0
0.234783
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c13c4bfee1c9c7430580db61de18561beb73453a
157
py
Python
test/integration/default/testinfra/test_docker.py
joe-bowman/docker-formula
efc3cc4ecb86163b179d476ebaf96d9a2fc1d476
[ "Apache-2.0" ]
null
null
null
test/integration/default/testinfra/test_docker.py
joe-bowman/docker-formula
efc3cc4ecb86163b179d476ebaf96d9a2fc1d476
[ "Apache-2.0" ]
null
null
null
test/integration/default/testinfra/test_docker.py
joe-bowman/docker-formula
efc3cc4ecb86163b179d476ebaf96d9a2fc1d476
[ "Apache-2.0" ]
null
null
null
import testinfra def test_service_is_running_and_enabled(Service): docker = Service('docker') assert docker.is_running assert docker.is_enabled
22.428571
49
0.783439
21
157
5.52381
0.52381
0.155172
0.241379
0
0
0
0
0
0
0
0
0
0.152866
157
6
50
26.166667
0.87218
0
0
0
0
0
0.038217
0
0
0
0
0
0.4
1
0.2
false
0
0.2
0
0.4
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
c13d31a85ce3ce0b7615b9c5e782008402d5a721
9,292
py
Python
lib/worker.py
GuoxiaWang/InstanceLabelTool
ece37a0dfe1467ad24d6d3472adb50b20b6abd24
[ "MIT" ]
6
2018-10-28T07:43:34.000Z
2021-04-11T15:15:14.000Z
lib/worker.py
GuoxiaWang/InstanceLabelTool
ece37a0dfe1467ad24d6d3472adb50b20b6abd24
[ "MIT" ]
2
2019-03-13T15:16:57.000Z
2019-04-15T02:35:46.000Z
lib/worker.py
GuoxiaWang/InstanceLabelTool
ece37a0dfe1467ad24d6d3472adb50b20b6abd24
[ "MIT" ]
1
2020-01-16T10:23:36.000Z
2020-01-16T10:23:36.000Z
""" Copyright (c) 2018- Guoxia Wang mingzilaochongtu at gmail com Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. The Software is provided "as is", without warranty of any kind. """ from PyQt4 import QtCore, QtGui import numpy as np import cv2 import os import getpass from edgelink import edgelink from annotation import Point, Annotation, AnnBoundary class ConvertToBoundariesWorker(QtCore.QObject): """ Make a new thread instance to convert to boundaries from a segment map """ finishedSignal = QtCore.pyqtSignal(list) def __init__(self, objects=None, height=0, width=0): QtCore.QObject.__init__(self) self.objects = objects self.segmentMap = np.zeros((height, width), np.uint8) def setObjects(self, objects): self.objects = objects def setSegmentMap(self, height, width): self.segmentMap = np.zeros((height, width), np.uint8) # Segment map convert to boundary list def convertToBoundaries(self): # First, we fill all labels to numpy ndarray count = 1 for obj in self.objects: for poly in obj.polygon: pts = [] for pt in poly: pts.append([pt.x, pt.y]) pts = np.around(pts).astype(np.int32) cv2.fillPoly(self.segmentMap, [pts], count) count += 1 # Second, we convert to boundary map from segment map edgeMap = self.segmentationMapToBoundaryMap(self.segmentMap) # Third, we get edge fragments edgelist, edgeim, etype = edgelink(edgeMap) polygon = [] for edge in edgelist: if (len(edge) < 5): continue # Auto correct occlusion boundary direction if (self.isNeedReverse(edge)): edge.reverse() # Convert to QPolygonF poly = [] for pt in edge: point = Point(pt[1], pt[0]) poly.append(point) polygon.append(poly) self.finishedSignal.emit(polygon) return polygon # Label segmentation map to boundary map def segmentationMapToBoundaryMap(self, segment): height, width = segment.shape boundary = np.zeros((2*height+1, 2*width+1), np.uint8) # Find vertical direction difference edgelsV = (segment[0:-1, :] != segment[1:, :]).astype(np.uint8) # Add a zero row edgelsV = np.vstack([edgelsV, np.zeros((1, width), dtype=np.uint8)]) # Find horizontal direction difference edgelsH = (segment[:,0:-1] != segment[:, 1:]).astype(np.uint8) # Append a zero column edgelsH = np.hstack([edgelsH, np.zeros((height, 1), dtype=np.uint8)]) # Assign to boundary boundary[2::2, 1::2] = edgelsV boundary[1::2, 2::2] = edgelsH # Get boundary boundary[2:-1:2, 2:-1:2] = np.maximum( np.maximum(edgelsH[0:-1, 0:-1], edgelsH[1:, 0:-1]), np.maximum(edgelsV[0:-1, 0:-1], edgelsV[0:-1, 1:])) boundary[0, :] = boundary[1, :] boundary[:, 0] = boundary[:, 1] boundary[-1, :] = boundary[-2, :] boundary[:, -1] = boundary[:, -2] boundary = boundary[2::2, 2::2] return boundary # Check one edge occluison direction, and return true if need reverse def isNeedReverse(self, edge): height, width = self.segmentMap.shape step = 3 posDirCount = 0 totalCount = len(edge) / step for i in range(totalCount): idx = i * step pt1 = QtCore.QPointF(edge[idx][1], edge[idx][0]) idx = (i + 1) * step if (idx >= len(edge)): idx = -1 pt2 = QtCore.QPointF(edge[idx][1], edge[idx][0]) line1 = QtCore.QLineF(pt1, pt2) line1 = line1.normalVector() pt3 = line1.p2() pt3.setX(min(max(pt3.x(), 0), width-1)) pt3.setY(min(max(pt3.y(), 0), height-1)) pt4 = QtCore.QPointF(line1.x1() - line1.dx(), line1.y1() - line1.dy()) pt4.setX(min(max(pt4.x(), 0), width-1)) pt4.setY(min(max(pt4.y(), 0), height-1)) if (self.segmentMap[int(pt3.y()), int(pt3.x())] >= self.segmentMap[int(pt4.y()), int(pt4.x())]): posDirCount += 1 ratio = float(posDirCount) / np.ceil(float(totalCount)) # If ratio greater than the threshold, we dont need to reverse the edge if (ratio > 0.3): return False else: return True class BatchConvertToBoundariesWorker(QtCore.QObject): """ Make a new thread instance to batch convert to occlusion boundary labels from instance labels """ updateProgress = QtCore.pyqtSignal(int, str) finished = QtCore.pyqtSignal() information = QtCore.pyqtSignal(str, str) # Flag indicate cancel by user canceled = False # User selected operation userOperationResult = -1 # Mutex and waitcondition mutex = QtCore.QMutex() waitCondition = QtCore.QWaitCondition() def __init__(self, imageList, imageDir, gtExt): QtCore.QObject.__init__(self) self.imageDir = imageDir self.imageList = imageList self.gtExt = gtExt def stop(self): self.canceled = True def batchConvertToBoundaries(self): overwriteAll = False annotation = Annotation() worker = ConvertToBoundariesWorker() # Convert each image for idx, filename in enumerate(self.imageList): if (self.canceled): break # get label json file name imageExt = os.path.splitext(filename)[1] gtfilename = filename.replace(imageExt, self.gtExt) filename = os.path.join(self.imageDir, gtfilename) filename = os.path.normpath(filename) # Update progress dialog self.updateProgress.emit(idx + 1, "Converting {0}".format(gtfilename)) # Check if label json file exist if (not os.path.isfile(filename)): text = "{0} not exist. Continue?".format(filename) self.mutex.lock() self.information.emit("IOError", text) self.waitCondition.wait(self.mutex) self.mutex.unlock() if (self.userOperationResult == QtGui.QMessageBox.Yes): continue else: break try: annotation = Annotation() annotation.fromJsonFile(filename) except StandardError as e: text = "Error parsing labels in {0}. \nContinue?".format(filename) self.mutex.lock() self.information.emit("IOError", text) self.waitCondition.wait(self.mutex) self.mutex.unlock() if (self.userOperationResult == QtGui.QMessageBox.Yes): continue else: break # Skip all image of has no instance labels if (not annotation.objects): continue # Check if it has occlusion boundary label if (not overwriteAll and annotation.boundaries): text = "{0} already exists occlusion boundary labels. Do you want to overwrite?".format(filename) self.mutex.lock() self.information.emit("Overwrite", text) self.waitCondition.wait(self.mutex) self.mutex.unlock() if (self.userOperationResult == QtGui.QMessageBox.No): continue elif (self.userOperationResult == QtGui.QMessageBox.YesToAll): overwriteAll = True height = annotation.imgHeight width = annotation.imgWidth worker.setObjects(annotation.objects) worker.setSegmentMap(height, width) polygon = worker.convertToBoundaries() # Create a new boundary object boundaries = AnnBoundary() boundaries.polygon = polygon boundaries.deleted = 0 boundaries.verified = 0 boundaries.user = getpass.getuser() boundaries.updateDate() annotation.boundaries = boundaries try: annotation.toJsonFile(filename) except StandardError as e: text = "Error writting labels to {0}. \nContinue?".format(filename) self.mutex.lock() self.information.emit("IOError", text) self.waitCondition.wait(self.mutex) self.mutex.unlock() if (self.userOperationResult == QtGui.QMessageBox.Yes): continue else: break self.finished.emit()
36.439216
113
0.567908
1,001
9,292
5.255744
0.27972
0.020528
0.026611
0.037065
0.213648
0.197301
0.188747
0.173921
0.113857
0.113857
0
0.021365
0.330069
9,292
254
114
36.582677
0.823775
0.160245
0
0.251429
0
0
0.028453
0
0
0
0
0
0
1
0.051429
false
0.011429
0.04
0
0.171429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c13ec1a7b5e150d075490c6a5d27c1d7f874d34f
1,266
py
Python
ark_nlp/dataset/__init__.py
yubuyuabc/ark-nlp
165d35cfacd7476791c0aeba19bf43f4f8079553
[ "Apache-2.0" ]
1
2022-03-23T05:10:55.000Z
2022-03-23T05:10:55.000Z
ark_nlp/dataset/__init__.py
yubuyuabc/ark-nlp
165d35cfacd7476791c0aeba19bf43f4f8079553
[ "Apache-2.0" ]
null
null
null
ark_nlp/dataset/__init__.py
yubuyuabc/ark-nlp
165d35cfacd7476791c0aeba19bf43f4f8079553
[ "Apache-2.0" ]
null
null
null
from ark_nlp.dataset.base._dataset import * from ark_nlp.dataset.base._sentence_classification_dataset import SentenceClassificationDataset from ark_nlp.dataset.base._sentence_classification_dataset import SentenceClassificationDataset as SCDataset from ark_nlp.dataset.base._sentence_classification_dataset import PairMergeSentenceClassificationDataset from ark_nlp.dataset.base._sentence_classification_dataset import PairMergeSentenceClassificationDataset as PMSCDataset from ark_nlp.dataset.base._sentence_classification_dataset import TwinTowersSentenceClassificationDataset from ark_nlp.dataset.base._sentence_classification_dataset import TwinTowersSentenceClassificationDataset as TTSCDataset from ark_nlp.dataset.base._token_classification_dataset import TokenClassificationDataset from ark_nlp.dataset.text_classification_dataset import TCDataset from ark_nlp.dataset.text_match_dataset import TMDataset from ark_nlp.dataset.bio_named_entity_recognition_dataset import BIONERDataset from ark_nlp.dataset.biaffine_named_entity_recognition_dataset import BiaffineNERDataset from ark_nlp.dataset.span_named_entity_recognition_dataset import SpanNERDataset from ark_nlp.dataset.global_pointer_named_entity_recognition_dataset import GlobalPointerNERDataset
70.333333
120
0.919431
146
1,266
7.582192
0.226027
0.088528
0.126468
0.214995
0.69738
0.495032
0.495032
0.495032
0.495032
0.495032
0
0
0.051343
1,266
17
121
74.470588
0.921732
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
c142870cdc5b68b605e9ca3cb9dda2dd39df1fad
674
py
Python
fastquotes/index/csi.py
YangzhenZhao/fastquotes
1faba9f7fc7801a11359001e08cefa9cfbc41d64
[ "MIT" ]
4
2020-11-18T11:25:00.000Z
2021-04-08T01:02:49.000Z
fastquotes/index/csi.py
YangzhenZhao/fastquotes
1faba9f7fc7801a11359001e08cefa9cfbc41d64
[ "MIT" ]
null
null
null
fastquotes/index/csi.py
YangzhenZhao/fastquotes
1faba9f7fc7801a11359001e08cefa9cfbc41d64
[ "MIT" ]
1
2020-11-18T11:25:01.000Z
2020-11-18T11:25:01.000Z
import codecs import json import requests from ..const import CUSTOM_HEADER def latest_year_data(code: str, latest_year: int) -> list: """ lastest_year: 1、3、5 """ url = ( f"http://www.csindex.com.cn/zh-CN/indices/index-detail/{code}?" f"earnings_performance={latest_year}%E5%B9%B4&data_type=json" ) text = requests.get(url, headers=CUSTOM_HEADER).text res_list = [] text = codecs.decode(text.encode(), "utf-8-sig") for item in json.loads(text): res_list.append( { "date": item["tradedate"][:10], "close": item["tclose"], } ) return res_list
23.241379
71
0.578635
87
674
4.344828
0.655172
0.079365
0.058201
0
0
0
0
0
0
0
0
0.018405
0.274481
674
28
72
24.071429
0.754601
0.02819
0
0
0
0.05
0.236307
0.090767
0
0
0
0
0
1
0.05
false
0
0.2
0
0.3
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c144afd84ce935554287482e6c1ce7b3ab3a0525
5,596
py
Python
Python/split-array-into-fibonacci-sequence.py
RideGreg/LeetCode
b70818b1e6947bf29519a24f78816e022ebab59e
[ "MIT" ]
1
2022-01-30T06:55:28.000Z
2022-01-30T06:55:28.000Z
Python/split-array-into-fibonacci-sequence.py
RideGreg/LeetCode
b70818b1e6947bf29519a24f78816e022ebab59e
[ "MIT" ]
null
null
null
Python/split-array-into-fibonacci-sequence.py
RideGreg/LeetCode
b70818b1e6947bf29519a24f78816e022ebab59e
[ "MIT" ]
1
2021-12-31T03:56:39.000Z
2021-12-31T03:56:39.000Z
# Time: O(n^3) # Space: O(n) # 842 # Given a string S of digits, such as S = "123456579", # we can split it into a Fibonacci-like sequence [123, 456, 579]. # # Formally, a Fibonacci-like sequence is a list F of non-negative # integers such that: # # 0 <= F[i] <= 2^31 - 1, # (that is, each integer fits a 32-bit signed integer type); # F.length >= 3; # and F[i] + F[i+1] = F[i+2] for all 0 <= i < F.length - 2. # Also, note that when splitting the string into pieces, # each piece must not have extra leading zeroes, # except if the piece is the number 0 itself. # # Return any Fibonacci-like sequence split from S, # or return [] if it cannot be done. # # Example 1: # # Input: "123456579" # Output: [123,456,579] # Example 2: # # Input: "11235813" # Output: [1,1,2,3,5,8,13] # Example 3: # # Input: "112358130" # Output: [] # Explanation: The task is impossible. # Example 4: # # Input: "0123" # Output: [] # Explanation: Leading zeroes are not allowed, so "01", "2", "3" is not valid. # Example 5: # # Input: "1101111" # Output: [110, 1, 111] # Explanation: The output [11, 0, 11, 11] would also be accepted. # # Note: # - 1 <= S.length <= 200 # - S contains only digits. # Solution: Brute Force # The first two elements of the array uniquely determine the rest of the sequence. # For each of the first two elements, assuming they have no leading zero, let's iterate through the rest of the string. # At each stage, we expect a number less than or equal to 2^31 - 1 that starts with the sum of the two previous numbers. try: xrange # Python 2 except NameError: xrange = range # Python 3 class Solution(object): def splitIntoFibonacci(self, S): # 20ms """ :type S: str :rtype: List[int] """ for i in xrange(min(10, len(S)-2)): a = S[:i+1] if a.startswith('0') and a != '0': break a = int(a) for j in xrange(i+1, min(i+10, len(S)-1)): b = S[i+1:j+1] if b.startswith('0') and b != '0': break b = int(b) fib = [a, b] k = j+1 while k < len(S): nxt = fib[-1] + fib[-2] nxtS = str(nxt) if nxt <= 2**31-1 and S[k:].startswith(nxtS): k += len(nxtS) fib.append(nxt) else: break else: if len(fib) >= 3: return fib return [] def splitIntoFibonacci_kamyu(self, S): def startswith(S, k, x): y = 0 for i in xrange(k, len(S)): y = 10*y + int(S[i]) if y == x: return i-k+1 elif y > x: break return 0 MAX_INT = 2**31-1 a = 0 for i in xrange(len(S)-2): a = 10*a + int(S[i]) b = 0 for j in xrange(i+1, len(S)-1): b = 10*b + int(S[j]) fib = [a, b] k = j+1 while k < len(S): if fib[-2] > MAX_INT-fib[-1]: break c = fib[-2]+fib[-1] length = startswith(S, k, c) if length == 0: break fib.append(c) k += length else: return fib if b == 0: break if a == 0: break return [] # Bad time complexity 240 ms, try to determine first 3 numbers in fib sequence at once. # repeat partition with same first number ... # not good as the official solution which calculates each first number only once def splitIntoFibonacci_mingContest(self, S): def isF(ss): for i in xrange(1, min(11, len(ss)-1)): for j in xrange(i+1, len(ss)): d1, d2, d3 = ss[:i], ss[i:j], ss[j:] fail = False for d in (d1,d2,d3): if len(d)>1 and d[0]=='0' or int(d)>2147483647: fail = True break if fail: continue if int(d1)+int(d2)==int(d3): #print '{} {} {}'.format(d1, d2, d3) return [int(d1),int(d2),int(d3)] return [] if len(S)<3: return False ans = [] for i in xrange(3, min(31, len(S)+1)): ans = [] ret = isF(S[:i]) if ret: ans = ret d2, d3 = ret[1], ret[2] ok = True while i < len(S) and ok: if d2+d3 > 2147483647: ok = False break nd = str(d2 + d3) if i + len(nd) <= len(S) and nd == S[i:i+len(nd)]: i += len(nd) d2, d3 = d3, d2+d3 ans.append(d3) else: ok = False if i==len(S) and ok: return ans return ans print Solution().splitIntoFibonacci('123456579') # [123, 456, 579] print Solution().splitIntoFibonacci('11235813') # [1, 1, 2, 3, 5, 8, 13] print Solution().splitIntoFibonacci('112358130') # [] print Solution().splitIntoFibonacci('0123') # [] print Solution().splitIntoFibonacci('1101111') # [11, 0, 11, 11]
32.16092
120
0.454789
746
5,596
3.406166
0.261394
0.01889
0.011806
0.023613
0.069264
0.051161
0.033845
0.014168
0.014168
0.014168
0
0.092398
0.42173
5,596
174
121
32.16092
0.692831
0.312545
0
0.259615
0
0
0.011284
0
0
0
0
0
0
0
null
null
0
0
null
null
0.048077
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
c144dd7ed4502a22ce0fcfc2f712cd5108d540e6
5,160
py
Python
substrabac/substrapp/utils.py
GuillaumeCisco/substra-backend
777ec0cfc10a1aad34cccba449e4923c20786d32
[ "Apache-2.0" ]
null
null
null
substrabac/substrapp/utils.py
GuillaumeCisco/substra-backend
777ec0cfc10a1aad34cccba449e4923c20786d32
[ "Apache-2.0" ]
null
null
null
substrabac/substrapp/utils.py
GuillaumeCisco/substra-backend
777ec0cfc10a1aad34cccba449e4923c20786d32
[ "Apache-2.0" ]
null
null
null
import io import hashlib import logging import os import tempfile from os import path from os.path import isfile, isdir import shutil import requests import tarfile import zipfile import uuid from checksumdir import dirhash from django.conf import settings from rest_framework import status class JsonException(Exception): def __init__(self, msg): self.msg = msg super(JsonException, self).__init__() def get_dir_hash(archive_object): with tempfile.TemporaryDirectory() as temp_dir: try: content = archive_object.read() archive_object.seek(0) uncompress_content(content, temp_dir) except Exception as e: logging.error(e) raise e else: return dirhash(temp_dir, 'sha256') def store_datasamples_archive(archive_object): try: content = archive_object.read() archive_object.seek(0) except Exception as e: logging.error(e) raise e # Temporary directory for uncompress datasamples_uuid = uuid.uuid4().hex tmp_datasamples_path = path.join(getattr(settings, 'MEDIA_ROOT'), f'datasamples/{datasamples_uuid}') try: uncompress_content(content, tmp_datasamples_path) except Exception as e: shutil.rmtree(tmp_datasamples_path, ignore_errors=True) logging.error(e) raise e else: # return the directory hash of the uncompressed file and the path of # the temporary directory. The removal should be handled externally. return dirhash(tmp_datasamples_path, 'sha256'), tmp_datasamples_path def get_hash(file, key=None): if file is None: return '' else: if isinstance(file, (str, bytes, os.PathLike)): if isfile(file): with open(file, 'rb') as f: data = f.read() elif isdir(file): return dirhash(file, 'sha256') else: return '' else: openedfile = file.open() data = openedfile.read() openedfile.seek(0) return compute_hash(data, key) def get_owner(): ledger_settings = getattr(settings, 'LEDGER') return ledger_settings['client']['msp_id'] def compute_hash(bytes, key=None): sha256_hash = hashlib.sha256() if isinstance(bytes, str): bytes = bytes.encode() if key is not None and isinstance(key, str): bytes += key.encode() sha256_hash.update(bytes) return sha256_hash.hexdigest() def create_directory(directory): if not os.path.exists(directory): os.makedirs(directory) class ZipFile(zipfile.ZipFile): """Override Zipfile to ensure unix file permissions are preserved. This is due to a python bug: https://bugs.python.org/issue15795 Workaround from: https://stackoverflow.com/questions/39296101/python-zipfile-removes-execute-permissions-from-binaries """ def extract(self, member, path=None, pwd=None): if not isinstance(member, zipfile.ZipInfo): member = self.getinfo(member) if path is None: path = os.getcwd() ret_val = self._extract_member(member, path, pwd) attr = member.external_attr >> 16 os.chmod(ret_val, attr) return ret_val def uncompress_path(archive_path, to_directory): if zipfile.is_zipfile(archive_path): with ZipFile(archive_path, 'r') as zf: zf.extractall(to_directory) elif tarfile.is_tarfile(archive_path): with tarfile.open(archive_path, 'r:*') as tf: tf.extractall(to_directory) else: raise Exception('Archive must be zip or tar.gz') def uncompress_content(archive_content, to_directory): if zipfile.is_zipfile(io.BytesIO(archive_content)): with ZipFile(io.BytesIO(archive_content)) as zf: zf.extractall(to_directory) else: try: with tarfile.open(fileobj=io.BytesIO(archive_content)) as tf: tf.extractall(to_directory) except tarfile.TarError: raise Exception('Archive must be zip or tar.*') class NodeError(Exception): pass def get_remote_file(url, auth, **kwargs): kwargs.update({ 'headers': {'Accept': 'application/json;version=0.0'}, 'auth': auth }) if settings.DEBUG: kwargs['verify'] = False try: response = requests.get(url, **kwargs) except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as e: raise NodeError(f'Failed to fetch {url}') from e return response def get_remote_file_content(url, auth, content_hash, salt=None): response = get_remote_file(url, auth) if response.status_code != status.HTTP_200_OK: logging.error(response.text) raise NodeError(f'Url: {url} returned status code: {response.status_code}') computed_hash = compute_hash(response.content, key=salt) if computed_hash != content_hash: raise NodeError(f"url {url}: hash doesn't match {content_hash} vs {computed_hash}") return response.content
27.593583
105
0.64845
634
5,160
5.130915
0.293375
0.023978
0.027667
0.0166
0.183523
0.134952
0.083923
0.071934
0.050415
0
0
0.011743
0.257364
5,160
186
106
27.741935
0.837161
0.081008
0
0.236641
0
0
0.069807
0.016974
0
0
0
0
0
1
0.091603
false
0.007634
0.114504
0
0.312977
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1472dfe5defd2c2cedbd1490cbcbbe0967be5ae
67
py
Python
{{cookiecutter.git_project_name}}/core/views/__init__.py
imAsparky/django-cookiecutter
6608b46d3b3a75ef8880b18724eb06e9ed7567df
[ "BSD-3-Clause" ]
null
null
null
{{cookiecutter.git_project_name}}/core/views/__init__.py
imAsparky/django-cookiecutter
6608b46d3b3a75ef8880b18724eb06e9ed7567df
[ "BSD-3-Clause" ]
128
2021-09-28T03:17:40.000Z
2022-03-19T00:21:50.000Z
{{cookiecutter.git_project_name}}/core/views/__init__.py
imAsparky/django-cookiecutter
6608b46d3b3a75ef8880b18724eb06e9ed7567df
[ "BSD-3-Clause" ]
null
null
null
"""Initialise {{cookiecutter.git_project_name}} Core App Views."""
33.5
66
0.746269
8
67
6
1
0
0
0
0
0
0
0
0
0
0
0
0.074627
67
1
67
67
0.774194
0.895522
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
c1484f680c8d5268a7187ffd0cd5d37747e57a92
1,569
py
Python
Algorithms/Assign1/v2.py
thebishaldeb/ClassAssignments
f44c51695266da0c98d1ab3516c473c6d1008933
[ "MIT" ]
null
null
null
Algorithms/Assign1/v2.py
thebishaldeb/ClassAssignments
f44c51695266da0c98d1ab3516c473c6d1008933
[ "MIT" ]
null
null
null
Algorithms/Assign1/v2.py
thebishaldeb/ClassAssignments
f44c51695266da0c98d1ab3516c473c6d1008933
[ "MIT" ]
null
null
null
# FUNCTION def med(arr1, arr2, length): if length == 2: return findMed( arr1, arr2) mid = int((length-1)/2) if (arr1[mid] < arr2[mid]): return med( arr2[0:mid+1], arr1[-mid-1:length], len(arr2[0:mid+1])) elif (arr1[mid] > arr2[mid]): return med( arr1[0:mid+1], arr2[-mid-1:length], len(arr1[0:mid+1])) def findMed(arr1, arr2): return sorted(arr1 + arr2)[int(len(arr1 + arr2)/2 - 1)] # Dictionaries to store databases from the text files db1 = {} db2 = {} with open("db1.txt","r") as file: for line in file: x = line.split("- ") db1[int(x[0])] = x[1][0:len(x[1])-1] with open("db2.txt","r") as file: for line in file: x = line.split("- ") db2[int(x[0])] = x[1][0:len(x[1])-1] print(db1) print(db2) kth = int(input('\nEnter the no of the smallest movie: ')) print('\nThe reqd. smallest movie from first database:', db1[sorted(db1)[kth-1]]) print('\nThe reqd. smallest movie from second database:', db2[sorted(db2)[kth-1]]) # The Duration of the movie of Databases sorted and stored in the lists arr1 = sorted(db1) arr2 = sorted(db2) length = len(arr1) # No. of movies in the database median = med(arr1, arr2, length) #Function 'med' defined at the top. for i in range(length): if arr1[i] == median: print("\nThe movie with median duration, i.e.",median, "is", db1[median]) break elif arr2[i] == median: print("\nThe movie with median duration, i.e.",median, "is", db2[median]) break
26.59322
82
0.586361
250
1,569
3.68
0.26
0.052174
0.021739
0.036957
0.323913
0.323913
0.208696
0.208696
0.208696
0.208696
0
0.058774
0.240918
1,569
59
83
26.59322
0.713686
0.123646
0
0.166667
0
0
0.170073
0
0
0
0
0
0
1
0.055556
false
0
0
0.027778
0.166667
0.166667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1486548c7b778a225198dc3750c6bc512122e6c
4,025
py
Python
brain-imaging/run_tsne_brain.py
agramfort/spatio-temporal-alignements
18594cf0372dc874decccecad69e310f84142c88
[ "BSD-3-Clause" ]
28
2019-10-18T07:29:52.000Z
2022-01-27T15:12:45.000Z
brain-imaging/run_tsne_brain.py
agramfort/spatio-temporal-alignements
18594cf0372dc874decccecad69e310f84142c88
[ "BSD-3-Clause" ]
2
2021-01-16T18:34:31.000Z
2022-02-03T14:49:34.000Z
brain-imaging/run_tsne_brain.py
agramfort/spatio-temporal-alignements
18594cf0372dc874decccecad69e310f84142c88
[ "BSD-3-Clause" ]
4
2021-01-16T17:22:23.000Z
2022-01-11T03:24:24.000Z
import mne import pickle import numpy as np from sta import sta_matrix, sdtw_matrix from sklearn.manifold import TSNE # change this if you have GPUs # in our platform, this experiment ran on 4 GPUs in around 20 minutes n_gpu_devices = 0 def generate_samples(n_samples, n_times, time_point, space_points, M, smoothing_time=1., smoothing_space=0.01, seed=None): """Simulate brain signals at a time_point and in a random vertex among `space_points`.""" rng = np.random.RandomState(seed) n_features = len(M) time_points = (np.ones(n_samples) * time_point).astype(int) space_points = rng.choice(space_points, size=n_samples) signals = np.zeros((n_samples, n_times, n_features)).astype(float) values = rng.rand(n_samples) * 2 + 1 signals[np.arange(n_samples), time_points, space_points] = values # create temporal and spatial gaussian filters to smooth the data times = np.arange(n_times) metric = (times[:, None] - times[None, :]) ** 2 kernel_time = np.exp(- metric / smoothing_time) kernel_space = np.exp(- M / smoothing_space) for i, signal in enumerate(signals): signals[i] = kernel_space.dot(signal.T).T signals[i] = kernel_time.dot(signal) return signals if __name__ == "__main__": # load brain regions mt = mne.read_label("data/lh.MT.label") v1 = mne.read_label("data/lh.V1.label") # load ground metric defined on the cortical triangulated mesh M_ = np.load("data/ground_metric.npy") ** 2 M = M_ / np.median(M_) vertices = [np.arange(642), []] gamma = 1. n_features = len(M) epsilon = 10. / n_features K = np.exp(- M / epsilon) mt_vertices = mt.vertices[mt.vertices < 642] v1_vertices = v1.vertices[v1.vertices < 642] seed = 42 n_samples_per_task = 50 n_times = 20 time0, time1 = 5, 15 # Create the four categories of brain signals with different random seeds meg_v1_0 = generate_samples(n_samples_per_task, n_times, time0, v1_vertices, M=M, seed=seed) meg_v1_1 = generate_samples(n_samples_per_task, n_times, time1, v1_vertices, M=M, seed=seed + 1) meg_mt_0 = generate_samples(n_samples_per_task, n_times, time0, mt_vertices, M=M, seed=seed + 2) meg_mt_1 = generate_samples(n_samples_per_task, n_times, time1, mt_vertices, M=M, seed=seed + 3) # to avoid numerical errors with Sinkhorn, add 1e-3 meg = np.concatenate((meg_v1_0, meg_v1_1, meg_mt_0, meg_mt_1)) + 1e-3 # create labels for categories y_time = np.r_[2 * np.r_[n_samples_per_task * [0], n_samples_per_task * [1]].tolist()] y_space = np.r_[2 * n_samples_per_task * [0], 2 * n_samples_per_task * [1]] betas = [0, 0.001, 0.01, 0.1, 0.5, 1., 2., 3., 5., 10.] experiment = dict(meg=meg, y_time=y_time, y_space=y_space, betas=betas) train_data = [] n_samples, n_times, dimension = meg.shape params = dict(K=K, epsilon=epsilon, gamma=gamma, n_jobs=4, n_gpu_devices=n_gpu_devices) precomputed = sta_matrix(meg, betas, **params) experiment["sta"] = dict() for beta, train_ in zip(betas, precomputed): train = train_.copy() # shift the distance to avoid negative values with large betas train -= train.min() tsne_data = TSNE(metric="precomputed").fit_transform(train) experiment["sta"][beta] = tsne_data method = "soft" experiment["soft"] = dict() for beta in betas: precomputed = sdtw_matrix(meg, beta, n_jobs=10) train = precomputed.copy() # shift the distance to avoid negative values with large betas train -= train.min() tsne_data = TSNE(metric="precomputed").fit_transform(train) experiment[method][beta] = tsne_data expe_file = open("data/tsne-brains.pkl", "wb") pickle.dump(experiment, expe_file)
37.268519
79
0.640248
592
4,025
4.119932
0.282095
0.052481
0.04059
0.055351
0.245182
0.203362
0.170562
0.170562
0.170562
0.170562
0
0.030723
0.24795
4,025
107
80
37.616822
0.775025
0.148571
0
0.08
0
0
0.035253
0.006463
0
0
0
0
0
1
0.013333
false
0
0.066667
0
0.093333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c14cdcac7d3354fc54dac9a43c5883bc3c65d04c
2,161
py
Python
cyphercat/datadefs/cifar10_dataset.py
arafin-lab/model_inversion_experiments
8e491f22ae847d8d0f2f31caed6ad0078c9b6a49
[ "Apache-2.0" ]
101
2018-10-26T14:39:19.000Z
2022-03-28T11:38:45.000Z
cyphercat/datadefs/cifar10_dataset.py
zhampel/cyphercat
4029ae8683b9056013e6424d8931afe79afa618e
[ "Apache-2.0" ]
32
2018-10-10T23:02:04.000Z
2019-11-11T21:30:57.000Z
cyphercat/datadefs/cifar10_dataset.py
zhampel/cyphercat
4029ae8683b9056013e6424d8931afe79afa618e
[ "Apache-2.0" ]
49
2018-10-05T17:54:49.000Z
2022-02-10T07:47:25.000Z
import torch from torch.utils.data import Dataset, ConcatDataset import torchvision from cyphercat.definitions import DATASETS_DIR, DATASPLITS_DIR from tqdm import tqdm import pandas as pd import numpy as np import pickle import os from .splitter import splitter, dataset_split def Cifar10_preload_and_split(path=None, splits=[0.4, 0.1, 0.25, 0.25], transform=None): """Index and split CIFAR10 dataset. Args: path (string): Path to location containing dataset. If left as None will search default location 'DATASETS_DIR' specified in definitions. splits (list): list of fractional splits Returns: dict(Dataframes): Dictionary containing the dataframes corresponding to each split inclduing metadata. Example: Todo: - Write Example. - More work on user specified splits. """ if path is None: path = DATASETS_DIR index_file = os.path.join(path, 'cifar10.index.csv') indices = None if os.path.exists(index_file): index_csv = np.loadtxt(index_file) indices = torch.tensor(index_csv) print('Found predefined indexing file {}'.format(index_file)) trainset = torchvision.datasets.CIFAR10(path, train=True, transform=transform[0], download=False) testset = torchvision.datasets.CIFAR10(path, train=False, transform=transform[0], download=False) fullset = ConcatDataset([trainset, testset]) print('Initializing CIFAR10Dataset splits') # Currently five equal splits dset_size = fullset.cumulative_sizes[-1] int_splits = [] for i in range(len(splits)): int_splits.append(int(dset_size * splits[i])) if sum(int_splits) < dset_size: rem = dset_size - sum(int_splits) int_splits[-1] += rem indices, splitsets = dataset_split(fullset, int_splits, indices=indices) if not os.path.exists(index_file): print('No predefined indexing file found, so index permutations saving to {}'.format(index_file)) np.savetxt(index_file, indices.numpy(), fmt='%i', delimiter=',') print('Finished splitting data.') return splitsets
32.742424
105
0.690421
276
2,161
5.300725
0.42029
0.043062
0.016405
0.02324
0.120301
0
0
0
0
0
0
0.015339
0.215641
2,161
65
106
33.246154
0.847788
0.220731
0
0
0
0
0.110769
0
0
0
0
0.015385
0
1
0.027778
false
0
0.277778
0
0.333333
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
c14dc762e4ac7e02bd632552dcbebfc3d90dd126
31
py
Python
easyTCP/SERVER/utils/__init__.py
dsal3389/easyTCP
0a11ffe4726bfd0461c24fa459e417fd2fe3cd7f
[ "MIT" ]
4
2018-12-09T13:57:59.000Z
2019-10-19T19:34:28.000Z
easyTCP/SERVER/utils/__init__.py
dsal3389/easyTCP
0a11ffe4726bfd0461c24fa459e417fd2fe3cd7f
[ "MIT" ]
null
null
null
easyTCP/SERVER/utils/__init__.py
dsal3389/easyTCP
0a11ffe4726bfd0461c24fa459e417fd2fe3cd7f
[ "MIT" ]
null
null
null
#from .BUILD_IN import BUILD_IN
31
31
0.83871
6
31
4
0.666667
0.583333
0
0
0
0
0
0
0
0
0
0
0.096774
31
1
31
31
0.857143
0.967742
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
c14f824a0678ada5998332bc22d1955b5b7acece
19,547
py
Python
src/muscle_synergies/vicon_data/user_data.py
elvis-sik/muscle_synergies
eff0d016f2032faa9b8fba5363249e6fdb150abf
[ "MIT" ]
6
2021-02-05T21:53:08.000Z
2022-01-20T16:50:39.000Z
src/muscle_synergies/vicon_data/user_data.py
elvis-sik/muscle_synergies
eff0d016f2032faa9b8fba5363249e6fdb150abf
[ "MIT" ]
1
2021-02-06T14:14:52.000Z
2021-03-01T03:44:23.000Z
src/muscle_synergies/vicon_data/user_data.py
elvis-sik/muscle_synergies
eff0d016f2032faa9b8fba5363249e6fdb150abf
[ "MIT" ]
null
null
null
"""Types that help building the final representation of the data. From the point of view of the internal API, the main type in this module is :py:class:`Builder`, which uses the data stored in an :py:class:`~muscle_synergies.vicon_data.aggregator.Aggregator` to build the :py:class:`ViconNexusData`. That object, in turn, simply holds references to :py:class:`DeviceData` instances corresponding to the different experimental devices, organized by their type (see :py:class:`~muscle_synergies.vicon_data.definitions.DeviceType`). Refer to the documentation for the package :py:mod:`muscle_synergies.vicon_data.__init__.py` for more on how :py:class:`Builder` fits together with the other classes used for reading the data from disk. """ import abc from collections import defaultdict from dataclasses import dataclass from functools import lru_cache from typing import Iterator, List, Mapping, Optional, Sequence, Tuple, Union import numpy as np import pandas as pd from .aggregator import Aggregator, DeviceAggregator from .definitions import DeviceType, SamplingFreq @dataclass class ViconNexusData: """The data contained in a Vicon Nexus CSV file. The initialization arguments are stored as they are under the same names. Args: forcepl: a sequence of :py:class:`DeviceData` corresponding to the different force plate devices. emg: a single :py:class:`DeviceData` that includes all columns with EMG measurements. traj: a sequence of :py:class:`DeviceData` corresponding to the different trajectory devices. """ forcepl: Sequence["DeviceData"] emg: "DeviceData" traj: Sequence["DeviceData"] def __repr__(self): return "ViconNexusData(forcepl=[...], emg=<DeviceData>, traj=[...])" def describe(self) -> str: """Represent ViconNexusData object as a Markdown list. This method is intended to help the user get a quick glance at what was loaded. The returned value will be a multiline string similar to this: ViconNexusData: + emg: 8 columns + forcepl (2 devices): DeviceData("Force Plate 1"), DeviceData("Force Plate 2") + traj (14 devices): DeviceData("Traj 1"), ..., DeviceData("Traj 14") In the case of force plates and trajectory markers, if there are more than 2 devices, they are occluded as in the last line of the example. """ emg_str = self._amount_str(self._num_cols(self.emg), "column") forcepl_len_str = self._amount_str(len(self.forcepl), "device") forcepl_members_str = self._stringify_list(self.forcepl) traj_len_str = self._amount_str(len(self.traj), "device") traj_members_str = self._stringify_list(self.traj) return f"""ViconNexusData: + emg: {emg_str} + forcepl ({forcepl_len_str}): {forcepl_members_str} + traj ({traj_len_str}): {traj_members_str}""" @staticmethod def _num_cols(dev: "DeviceData") -> int: """Get number of columns contained in :py:class:`DeviceData` object.""" return len(dev.df.columns) @staticmethod def _amount_str(num: int, noun: str) -> str: """Add an "s" to a noun to make it plural if needed.""" if num == 1: plural_s = "" else: plural_s = "s" return f"{num} {noun}{plural_s}" @staticmethod def _stringify_list(seq: Sequence) -> str: """Represent list as string occluding elements to make it short.""" seq = list(seq) if len(seq) > 2: seq = [seq[0]] + ["..."] + [seq[-1]] return ", ".join(map(str, seq)) class Builder: """Build a ViconNexusData using the data stored in an Aggregator.""" def __init__(self, aggregator: Optional[Aggregator] = None): self.aggregator = aggregator def build(self, aggregator: Optional[Aggregator] = None) -> ViconNexusData: """Build a ViconNexusData using the data stored in an Aggregator. Args: aggregator: if not provided, use the one given during initialization. Raises: ValueError if the number of EMG devices is not exactly 1. """ if aggregator is None: aggregator = self.aggregator frame_tracker = self._build_frame_tracker(aggregator) devices_by_type = defaultdict(list) for device_agg in self._devices(aggregator): device_data = self._build_device(device_agg, frame_tracker) device_type = self._device_agg_type(device_agg) devices_by_type[device_type].append(device_data) # TODO fix a typing mess: # 1. make _vicon_nexus_data get 3 parameters corresponding to device # type lists instead of a dict # 2. _simplify_emg now gets an emg_list and returns an emg_dev, # checking if the list has too many entries # done. return self._vicon_nexus_data(self._simplify_emg(devices_by_type)) def _build_device( self, device_agg: DeviceAggregator, frame_tracker: Tuple["ForcesEMGFrameTracker", "TrajFrameTracker"], ) -> "DeviceData": """Create new DeviceData from DeviceAggregator and frame trackers.""" params_dict = self._params_device_data(device_agg, frame_tracker) return self._instantiate_device(**params_dict) def _params_device_data( self, device_agg: DeviceAggregator, frame_tracker: Tuple["ForcesEMGFrameTracker", "TrajFrameTracker"], ) -> Mapping[str, Union[str, DeviceType, "_SectionFrameTracker", pd.DataFrame]]: """Build a dict with the params to create a new DeviceData instance. This method sets up a dict corresponding to the keyword arguments required by :py:meth`~Builder._instantiate_device`. """ return { "device_name": self._device_agg_name(device_agg), "device_type": self._device_agg_type(device_agg), "units": self._device_agg_units(device_agg), "frame_tracker": self._choose_frame_tracker(device_agg, *frame_tracker), "dataframe": self._extract_dataframe(device_agg), } def _build_frame_tracker( self, aggregator: Aggregator ) -> Tuple["ForcesEMGFrameTracker", "TrajFrameTracker"]: """Build frame trackers corresponding to Aggregator.""" sampling_freq = self._aggregator_sampling_freq(aggregator) return (ForcesEMGFrameTracker(sampling_freq), TrajFrameTracker(sampling_freq)) @staticmethod def _instantiate_device( device_name: str, device_type: DeviceType, units: List[str], frame_tracker: "_SectionFrameTracker", dataframe: pd.DataFrame, ) -> "DeviceData": """Instantiate new DeviceData object.""" return DeviceData( device_name=device_name, device_type=device_type, units=units, frame_tracker=frame_tracker, dataframe=dataframe, ) @classmethod def _extract_dataframe(cls, device_aggregator: DeviceAggregator) -> pd.DataFrame: """Create DataFrame with the data in the DeviceAggregator.""" data = cls._device_agg_data(device_aggregator) header = cls._device_agg_coords(device_aggregator) return pd.DataFrame(data, columns=header, dtype=float) def _simplify_emg( self, devices_by_type: Mapping[DeviceType, List["DeviceData"]] ) -> Mapping[DeviceType, Union["DeviceData", List["DeviceData"]]]: """Replaces list of EMG devices with the single device in dict. Args: devices_by_type: a dict which lists all devices of each type. Returns: a copy of the dict with one change. `new_devices_by_type[DeviceType.EMG]` will not be a a list of devices but rather a single one as it is assumed that all EMG data is represented as being different coordinates of a single experimental device. Raises: ValueError if the number of EMG devices is not exactly 1. """ new_devices_dict = dict(devices_by_type) emg_list = devices_by_type[DeviceType.EMG] if len(emg_list) != 1: raise ValueError(f"found {len(emg_list)} EMG devices - expected one") emg_dev = emg_list[0] new_devices_dict[DeviceType.EMG] = emg_dev return new_devices_dict @staticmethod def _vicon_nexus_data( devices_by_type: Mapping[DeviceType, Union["DeviceData", List["DeviceData"]]], ) -> ViconNexusData: """Instantiate new ViconNexusData object.""" return ViconNexusData( forcepl=devices_by_type[DeviceType.FORCE_PLATE], emg=devices_by_type[DeviceType.EMG], traj=devices_by_type[DeviceType.TRAJECTORY_MARKER], ) @staticmethod def _devices(aggregator: Aggregator) -> Iterator[DeviceAggregator]: """Yield all `DeviceAggregator`s stored in the Aggregator.""" yield from aggregator.get_devices() def _choose_frame_tracker( self, device_agg: DeviceAggregator, forces_emg_tracker: "ForcesEMGFrameTracker", traj_tracker: "TrajFrameTracker", ) -> "_SectionFrameTracker": """Choose the correct frame tracker for device.""" forces_emg = {DeviceType.FORCE_PLATE, DeviceType.EMG} if self._device_agg_type(device_agg) in forces_emg: return forces_emg_tracker return traj_tracker @staticmethod def _device_agg_name(device_aggregator: DeviceAggregator) -> str: """Get device name from DeviceAggregator.""" return device_aggregator.name @staticmethod def _device_agg_type(device_aggregator: DeviceAggregator) -> DeviceType: """Get device type from DeviceAggregator.""" return device_aggregator.device_type @staticmethod def _device_agg_units(device_aggregator: DeviceAggregator) -> List[str]: """Get device units from DeviceAggregator.""" return device_aggregator.units @staticmethod def _device_agg_coords(device_aggregator: DeviceAggregator) -> List[str]: """Get device coordinates from DeviceAggregator.""" return device_aggregator.coords @staticmethod def _device_agg_data(device_aggregator: DeviceAggregator) -> List[List[float]]: """Get the data rows stored in DeviceAggregator.""" return device_aggregator.data_rows @staticmethod def _aggregator_sampling_freq(aggregator: Aggregator) -> "SamplingFreq": """Get the sampling frequencies stored in Aggregator.""" return aggregator.get_sampling_freq() class _SectionFrameTracker(abc.ABC): """Convert array indices to/from (frame, subframe) for a section. This class is abstract, subclasses implement the conversions, which differ between the 2 sections of the CSV file. The first data row will have index 0 and correspond to frame 0 and subframe 0. The second data row will have index 1 but its frame and subframe will differ depending on the relative sampling rate of each section. See :py:class:`~muscle_synergies.vicon_data.definitions.SamplingFreq`. The 2 main methods of :py:class:`_SectionFrameTracker` are: + :py:meth:`~_SectionFrameTracker.index`: convert frame and subframe to the corresponding array index. + :py:meth:`~_SectionFrameTracker.frame_tracker`: convert an array index to the corresponding frame and subframe. """ def __init__(self, sampling_freq=SamplingFreq): self._sampling_freq = sampling_freq @property def num_frames(self) -> int: """Total number of frames.""" return self._sampling_freq.num_frames @abc.abstractproperty def sampling_frequency(self) -> int: """Sampling frequency in Hz with which the measurements were made.""" pass @abc.abstractmethod def index(self, frame: int, subframe: int) -> int: """Array index associated with frame and subframe. Raises: ValueError if the arguments are outside of the allowed range. `frame` should be between 1 and :py:attr:`~_SectionFrameTracker.num_frames`. `subframe` should be between 0 and :py:attr:`~_SectionFrameTracker.num_subframes`. """ self._validate_frame_tracker_args(frame, subframe) @abc.abstractmethod def frame_tracker(self, index: int) -> Tuple[int, int]: """Frame and subframe associated with given array index. Raises: ValueError if the argument is outside of the allowed range (from 0 to :py:attr:`~_SectionFrameTracker.final_index`). """ self._validate_index_arg(index) @abc.abstractproperty def final_index(self) -> int: """The highest array index.""" pass @property def num_subframes(self) -> int: """The total number of subframes.""" return self._sampling_freq.num_subframes @property def _freq_forces_emg(self) -> int: """The sampling rate of the section with force plates and EMG.""" return self._sampling_freq.freq_forces_emg @property def _freq_traj(self) -> int: """The sampling rate of the section with trajectories.""" return self._sampling_freq.freq_traj def _validate_index_arg(self, index: int): """Raise exception if index is outside of allowed range.""" if index not in range(self.final_index + 1): raise ValueError(f"index {index} out of bounds (max is self.final_index)") def _validate_frame_tracker_args(self, frame: int, subframe: int): """Raise exception if frame and subframe are not in allowed range.""" if frame not in range(1, self.num_frames + 1): raise ValueError(f"frame {frame} is out of bounds") if subframe not in range(self.num_subframes): raise ValueError(f"subframe {subframe} out of range") def time_seq(self) -> pd.Series: """Create Series with times in seconds of all measurements.""" return self._time_seq(self.sampling_frequency, self.final_index + 1) @staticmethod @lru_cache(maxsize=2) def _time_seq(sampling_frequency: int, num_measurements: int) -> pd.Series: """Memoized version of time_seq.""" period = 1 / sampling_frequency return pd.Series(period * np.arange(1, num_measurements + 1, 1)) class ForcesEMGFrameTracker(_SectionFrameTracker): @property def sampling_frequency(self) -> int: return self._freq_forces_emg def index(self, frame: int, subframe: int) -> int: super().index(frame, subframe) return (frame - 1) * self.num_subframes + subframe def frame_tracker(self, index: int) -> Tuple[int, int]: super().frame_tracker(index) frame = (index // self.num_subframes) + 1 subframe = index % self.num_subframes return frame, subframe @property def final_index(self) -> int: return self.num_frames * self.num_subframes - 1 class TrajFrameTracker(_SectionFrameTracker): @property def sampling_frequency(self) -> int: return self._freq_traj def index(self, frame: int, subframe: int) -> int: super().index(frame, subframe) return frame - 1 def frame_tracker(self, index: int) -> Tuple[int, int]: super().frame_tracker(index) return index + 1, 0 @property def final_index(self) -> int: return self.num_frames - 1 class DeviceData: """Data associated with a measurement device.""" name: str """the name of the device, as it occurs on the CSV file. """ dev_type: DeviceType """the data associated with the device.""" units: Tuple[str] """physical units of each column in the :py:class:`~pandas.DataFrame`.""" df: pd.DataFrame """the type of the device (can be a force plate, trajectory marker or EMG device). """ def __init__( self, device_name: str, device_type: DeviceType, units: List[str], frame_tracker: _SectionFrameTracker, dataframe: pd.DataFrame, ): self.name = device_name self.dev_type = device_type self.units = tuple(units) self.df = dataframe self._frame_tracker = frame_tracker @property def sampling_frequency(self) -> int: """Sampling rate with which measurements were made.""" return self._frame_tracker.sampling_frequency def time_seq(self) -> pd.Series: """Compute the moment in seconds in which measurements were made. Returns: a :py:class:`pandas.Series` where each entry corresponds to """ return self._frame_tracker.time_seq() def iloc(self, frame: int, subframe: int) -> pd.Series: """Index data rows by their frame and subframe. This method works similarly to :py:attr:`pandas.DataFrame.iloc`: its purpose is to help the user index the data referring to rows. Whereas the :py:class:`~pandas.DataFrame` version is used by directly indexing it (`datafr.iloc[0]` returns the first row), the :py:class:`DeviceData` version is a method. To get the i-th row of the :py:class:`~pandas.DataFrame`, use its own :py:attr:`~pandas.DataFrame.iloc`. This method should be used only when the goal is to get not the i-th row but the one corresponding to a given frame and subframe. Raises: KeyError: if the frame and subframe are out of bounds. """ return self.df.iloc[self._convert_key(frame, subframe)] def frame_subfr(self, index: int) -> Tuple[ int, int]: """Find (frame, subframe) pair corresponding to index.""" return self._frame_tracker.frame_tracker(index) def _key_slice_frame_subframe( self, stop: Tuple[int, int], start: Optional[Tuple[int, int]] = None, step: Optional[int] = None, ) -> slice: """Create slice with indexes corresponding to (frame, subframe) range. Raises: KeyError: if the frame and subframe are out-of-bounds. """ stop_index = self._convert_key(*stop) if start is None: return slice(stop_index) start_index = self._convert_key(*start) if step is None: return slice(start_index, stop_index) return slice(start_index, stop_index, step) def _convert_key(self, frame: int, subframe: int) -> int: """Get index corresponding to given frame and subframe. Raises: KeyError: if the frame and subframe are out-of-bounds. """ try: return self._frame_tracker_index(frame, subframe) except ValueError as err: raise KeyError from err def _frame_tracker_index(self, frame: int, subframe: int) -> int: """Call FrameTracker.index with arguments.""" return self._frame_tracker.index(frame, subframe) def __eq__(self, other) -> bool: return ( self.name == other.name and self.dev_type == other.dev_type and self.units == other.units and self.df.equals(other.df) ) def __str__(self): return f'DeviceData("{self.name}")' def __repr__(self): return f"<{str(self)}>"
37.020833
91
0.65785
2,392
19,547
5.198579
0.15301
0.031846
0.012545
0.011259
0.268999
0.190109
0.154564
0.133735
0.117973
0.095456
0
0.003207
0.250269
19,547
527
92
37.091082
0.845309
0.339234
0
0.249097
0
0
0.068773
0.013434
0
0
0
0.001898
0
1
0.194946
false
0.00722
0.032491
0.028881
0.444043
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1530d1f98179c78b07bae3b02ff2a685a89878e
1,629
py
Python
tests/modification-check.py
luisriverag/certbot
52e207a404ab3600637fc7a24492e2c68512ce2d
[ "Apache-2.0" ]
1
2017-05-14T17:09:38.000Z
2017-05-14T17:09:38.000Z
tests/modification-check.py
luisriverag/certbot
52e207a404ab3600637fc7a24492e2c68512ce2d
[ "Apache-2.0" ]
5
2021-03-15T21:43:04.000Z
2021-07-22T20:31:43.000Z
tests/modification-check.py
luisriverag/certbot
52e207a404ab3600637fc7a24492e2c68512ce2d
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python """Ensures there have been no changes to important certbot-auto files.""" import hashlib import os # Relative to the root of the Certbot repo, these files are expected to exist # and have the SHA-256 hashes contained in this dictionary. These hashes were # taken from our v1.14.0 tag which was the last release we intended to make # changes to certbot-auto. # # Deleting letsencrypt-auto-source/letsencrypt-auto and # letsencrypt-auto-source/letsencrypt-auto.sig can be done once we're # comfortable breaking any certbot-auto scripts that haven't already updated to # the last version. See # https://opensource.eff.org/eff-open-source/pl/65geri7c4tr6iqunc1rpb3mpna for # more info. EXPECTED_FILES = { os.path.join('letsencrypt-auto-source', 'letsencrypt-auto'): 'b997e3608526650a08e36e682fc3bf0c29903c06fa5ba4cc49308c43832450c2', os.path.join('letsencrypt-auto-source', 'letsencrypt-auto.sig'): '61c036aabf75da350b0633da1b2bef0260303921ecda993455ea5e6d3af3b2fe', } def find_repo_root(): return os.path.dirname(os.path.dirname(os.path.realpath(__file__))) def sha256_hash(filename): hash_object = hashlib.sha256() with open(filename, 'rb') as f: hash_object.update(f.read()) return hash_object.hexdigest() def main(): repo_root = find_repo_root() for filename, expected_hash in EXPECTED_FILES.items(): filepath = os.path.join(repo_root, filename) assert sha256_hash(filepath) == expected_hash, f'unexpected changes to {filepath}' print('All certbot-auto files have correct hashes.') if __name__ == '__main__': main()
33.9375
90
0.745242
217
1,629
5.470046
0.511521
0.101095
0.070767
0.107835
0.170177
0.11289
0.077506
0.077506
0
0
0
0.076032
0.152241
1,629
47
91
34.659574
0.78349
0.398404
0
0
0
0
0.306334
0.180685
0
0
0
0
0.043478
1
0.130435
false
0
0.086957
0.043478
0.304348
0.043478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c153b504e55b04acb0b49c1e4ecd7223c00968b8
560
py
Python
tests/test_load.py
michaelpeterswa/qsml
e3aeb48ac8ba7bb3eca7ec866f6d75258cfdc7c2
[ "MIT" ]
7
2020-06-28T16:28:54.000Z
2020-09-18T13:18:55.000Z
tests/test_load.py
michaelpeterswa/qsml
e3aeb48ac8ba7bb3eca7ec866f6d75258cfdc7c2
[ "MIT" ]
1
2020-06-27T08:36:02.000Z
2020-06-28T23:30:03.000Z
tests/test_load.py
michaelpeterswa/qsml
e3aeb48ac8ba7bb3eca7ec866f6d75258cfdc7c2
[ "MIT" ]
1
2020-07-30T05:03:38.000Z
2020-07-30T05:03:38.000Z
import unittest import qsml class TestLoad(unittest.TestCase): def test_load(self): file = "tests/load.qsml" returned_val = { "myportfolio": {"GOOG": 10, "AAPL": 5, "BRK.B": 1}, "test": {"SNAP": 130, "MSFT": 5, "TSLA": 100}, } self.assertEqual(qsml.load(file), returned_val, "Were not equal") def test_load_comment_error(self): file = "tests/load2.qsml" with self.assertRaises(qsml.error.QSMLError): qsml.load(file) if __name__ == "__main__": unittest.main()
25.454545
73
0.583929
67
560
4.671642
0.597015
0.044728
0.070288
0
0
0
0
0
0
0
0
0.029126
0.264286
560
21
74
26.666667
0.730583
0
0
0
0
0
0.166071
0
0
0
0
0
0.125
1
0.125
false
0
0.125
0
0.3125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c153e1c37eaaf1da2ce812283ce1bb7f91f0f0b1
6,012
py
Python
votesim/utilities/decorators.py
johnh865/election_sim
b73b7e65f1bb22abb82cbe8442fcf02b0c20894e
[ "MIT" ]
8
2019-10-21T23:24:51.000Z
2021-09-14T03:04:59.000Z
votesim/utilities/decorators.py
johnh865/election_sim
b73b7e65f1bb22abb82cbe8442fcf02b0c20894e
[ "MIT" ]
2
2021-02-09T23:52:47.000Z
2021-02-10T04:08:35.000Z
votesim/utilities/decorators.py
johnh865/election_sim
b73b7e65f1bb22abb82cbe8442fcf02b0c20894e
[ "MIT" ]
1
2019-10-21T23:32:18.000Z
2019-10-21T23:32:18.000Z
""" Collection of utilities such as memoization, automatic property storage, etc """ from __future__ import print_function, absolute_import, division from functools import wraps, partial import logging from votesim.utilities import misc logger = logging.getLogger(__name__) class memoize: """ Decorator used to store past calls. """ def __init__(self, function): self.function = function self.memoized = {} def __call__(self, *args, **kwargs): key = (args, frozenset(kwargs.items())) try: return self.memoized[key] except KeyError: self.memoized[key] = self.function(*args, **kwargs) return self.memoized[key] class method_memoize(object): """cache the return value of a method This class is meant to be used as a decorator of methods. The return value from a given method invocation will be cached on the instance whose method was invoked. All arguments passed to a method decorated with memoize must be hashable. If a memoized method is invoked directly on its class the result will not be cached. Instead the method will be invoked like a static method: class Obj(object): @memoize def add_to(self, arg): return self + arg Obj.add_to(1) # not enough arguments Obj.add_to(1, 2) # returns 3, result is not cached """ def __init__(self, func): self.func = func def __get__(self, obj, objtype=None): if obj is None: return self.func return partial(self, obj) def __call__(self, *args, **kw): obj = args[0] try: cache = obj.__cache except AttributeError: cache = obj.__cache = {} key = (self.func, args[1:], frozenset(kw.items())) try: res = cache[key] except KeyError: res = cache[key] = self.func(*args, **kw) return res # #def lazyprop(fn): # """ # Decorator used to cache property results # # From stack overflow. Author Mike Boers # https://stackoverflow.com/questions/3012421/python-memoising-deferred-lookup-property-decorator # """ # # attr_name = '_lazy_' + fn.__name__ # @property # def _lazyprop(self): # if not hasattr(self, attr_name): # setattr(self, attr_name, fn(self)) # return getattr(self, attr_name) # return _lazyprop # ### Lazy Property decorator # Property name to hold all lazy data _data_holder_attr = '_cache_properties' def clean_lazy_properties(instance): '''Clean all lazy properties''' setattr(instance, _data_holder_attr, {}) def clean_some_lazy_properties(instance, names): """Clean properties in iterable names""" try: cache = getattr(instance, _data_holder_attr) except AttributeError: return if isinstance(names, str): names = [names] for name in names: try: del cache[name] except KeyError: pass setattr(instance, _data_holder_attr, cache) return def modify_lazy_property(instance, name, value, dictname=_data_holder_attr): """Modify a lazy property""" cache = getattr(instance, dictname) cache[name] = value setattr(instance, _data_holder_attr, cache) return def lazy_property(fn): """ Version of lazy_property by John Huang. Decorator used to cache property results into dictionary. The cache can be clered using clean_lazy_properties. """ cache_name = _data_holder_attr attr_name = fn.__name__ def get_cache(instance): if not hasattr(instance, cache_name): setattr(instance, cache_name, {}) return getattr(instance, cache_name) @property @wraps(fn) def get_attr(self): cache = get_cache(self) if attr_name not in cache: cache[attr_name] = fn(self) return cache[attr_name] return get_attr def lazy_property2(name=_data_holder_attr): """ Version of lazy_property by John Huang. Decorator used to cache property results into dictionary. The cache can be cleared using clean_lazy_properties. Decorator must be called as a function. Parameters ---------- name : str Name of cache dictionary Example --------- Set the lazy property >>> class class1(object): >>> @lazy_property2('my_cache') >>> def property(self): >>> x = 2.0 >>> return x Delete the lazy property >>> a = class1() >>> del a.my_cache """ def decorator(fn): cache_name = name attr_name = fn.__name__ def get_cache(instance): if not hasattr(instance, cache_name): setattr(instance, cache_name, {}) return getattr(instance, cache_name) @property @wraps(fn) def get_attr(self): cache = get_cache(self) if attr_name not in cache: cache[attr_name] = fn(self) return cache[attr_name] return get_attr return decorator def reuse_doc(f): """Reuse the docstring from f on the decorated function Parameters ---------- f : func or class Desired func/class whose __doc__ you want to reuse Returns ------- out : decorator Example -------- Here we decorate class B with class A's docstring >>> class A(object): >>> '''I got A docstring''' >>> def __init__(self): >>> self.x = 10 >>> @reuse_doc(A) >>> class B(A): >>> pass >>> B.__doc__ == 'I got A docstring' """ doc = f.__doc__ def decorator(fn): fn.__doc__ = doc return fn return decorator
23.669291
100
0.58849
710
6,012
4.774648
0.243662
0.028319
0.033038
0.025959
0.254867
0.228614
0.218289
0.218289
0.19292
0.19292
0
0.005119
0.317698
6,012
253
101
23.762846
0.821307
0.397705
0
0.494737
0
0
0.00526
0
0
0
0
0
0
1
0.178947
false
0.010526
0.042105
0
0.421053
0.010526
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c15402f1ab58bd4a60c7b4bb3dddbb75ea0cbef9
10,304
py
Python
portcran.py
yzgyyang/portcran
04fa6ce8cd8585ed96aab19177d030b030ff79c9
[ "BSD-2-Clause" ]
1
2021-07-15T04:35:08.000Z
2021-07-15T04:35:08.000Z
portcran.py
yzgyyang/portcran
04fa6ce8cd8585ed96aab19177d030b030ff79c9
[ "BSD-2-Clause" ]
null
null
null
portcran.py
yzgyyang/portcran
04fa6ce8cd8585ed96aab19177d030b030ff79c9
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python3 from argparse import ArgumentParser, Namespace from pathlib import Path from re import search from sys import argv from typing import Callable, Iterable, List, Optional, TextIO, Tuple from urllib.request import urlopen, urlretrieve from ports import Platform, PortError, PortLicense, Ports from ports.cran import Cran, CranPort __author__ = "David Naylor <dbn@FreeBSD.org>" __license__ = "BSD (FreeBSD)" __summary__ = "Generates FreeBSD Ports from CRAN packages" __version__ = "0.1.9" ERR_GENERAL = 1 ERR_CATEGORY = 2 ERR_EXISTS = 3 class Command(object): def __init__(self, description: str) -> None: self._parser = ArgumentParser(description=description) self._subparsers = self._parser.add_subparsers(title="available sub-commands", help="sub-command help") def execute(self, args: List[str]) -> None: parsed_args = self._parser.parse_args(args) if hasattr(parsed_args, "action"): parsed_args.action(parsed_args) else: self.usage() def usage(self) -> None: self._parser.print_usage() def __call__(self, verb: str, description: str) -> Callable[[Callable[[Namespace], None]], ArgumentParser]: def decorator(action: Callable[[Namespace], None]) -> ArgumentParser: parser = self._subparsers.add_parser(verb, help=description) parser.set_defaults(action=action) return parser return decorator def make_cran_port(name: str, portdir: Optional[Path] = None, version: Optional[str] = None) -> CranPort: if not version: print("Checking for latest version...") site_page = urlopen("http://cran.r-project.org/package=%s" % name).read().decode("utf-8") version_match = search(r"<td>Version:</td>\s*<td>(.*?)</td>", str(site_page)) assert version_match is not None version = version_match.group(1) distfile = Ports.distdir / ("%s_%s.tar.gz" % (name, version)) if not distfile.exists(): # pylint: disable=no-member print("Fetching package source (%s-%s)..." % (name, version)) urlretrieve("https://cran.r-project.org/src/contrib/%s" % distfile.name, distfile) # pylint: disable=no-member return CranPort.create(name, distfile, portdir) def diff(left: Iterable[str], right: Iterable[str]) -> Tuple[List[str], bool, List[str]]: left = list(left) right = list(right) old = [i for i in left if i not in right] new = [i for i in right if i not in left] left = [i for i in left if i not in old] right = [i for i in right if i not in new] return old, left == right, new def yies(obj: list) -> str: return "ies" if len(obj) > 1 else "y" def log_depends(log: TextIO, depend: str, difference: Tuple[List[str], bool, List[str]]) -> None: old, common, new = difference if not common: log.write(" - order %s dependencies lexicographically on origin\n" % depend) if old: log.write(" - remove unused %s dependenc%s:\n" % (depend, yies(old))) for i in sorted(old): log.write(" - %s\n" % i) if new: log.write(" - add new %s dependenc%s:\n" % (depend, yies(new))) for i in sorted(new): log.write(" - %s\n" % i) def log_uses(log: TextIO, difference: Tuple[List[str], bool, List[str]]) -> None: old, common, new = difference if not common: log.write(" - sort cran uses arguments lexicographically\n") for arg in old: if arg == "auto-plist": log.write(" - manually generate pkg-plist\n") elif arg == "compiles": log.write(" - port no longer needs to compile\n") else: raise PortError("Log: unknown cran argument: %s" % arg) for arg in new: if arg == "auto-plist": log.write(" - automatically generate pkg-plist\n") elif arg == "compiles": log.write(" - mark port as needing to compile\n") else: raise PortError("Log: unknown cran argument: %s" % arg) def log_license(log: TextIO, old: PortLicense, new: PortLicense) -> None: if list(old) != list(sorted(new)): log.write(" - update license to: %s\n" % " ".join(sorted(new))) elif old.combination != new.combination: if new.combination is None: log.write(" - remove license combination\n") else: log.write(" - update license combination\n") def generate_update_log(old: CranPort, new: CranPort) -> None: assert (old.portversion or old.distversion) != new.distversion with open(new.portdir / "commit.svn", "w", encoding="utf-8") as log: log.write("%s: updated to version %s\n\n" % (new.origin, new.distversion)) if old.portrevision is not None: log.write(" - removed PORTREVISION due to version bump\n") if old.maintainer != new.maintainer: log.write(" - update maintainer\n") if old.comment != new.comment: log.write(" - updated comment to align with CRAN package\n") if list(sorted(old.license)) != list(sorted(new.license)) or old.license.combination != new.license.combination: log.write(" - updated license to align with CRAN package\n") if old.license.file is None and new.license.file is not None: log.write(" - added license file from CRAN package\n") elif old.license.file is not None and new.license.file is None: log.write(" - removed license file (no longer in CRAN package)\n") for depend in ("build", "lib", "run", "test"): old_depends = getattr(old.depends, depend) new_depends = getattr(new.depends, depend) log_depends(log, depend, diff([i.origin for i in old_depends], sorted(i.origin for i in new_depends))) if old.description != new.description: log.write(" - update description to align with CRAN package\n") if old.website != new.website: log.write(" - update website URL to align with CRAN package\n") if new.version in new.changelog: assert old.portname is not None port = make_cran_port(new.portname, version=new.version) assert port.version == new.version if port.version in port.changelog and port.changelog[port.version] == new.changelog[new.version]: log.write(" - changelog not updated\n") else: log.write(" - changelog:\n") for line in new.changelog[new.version]: log.write(" -") length = 4 for word in line.split(" "): length += len(word) + 1 if length > 75: log.write("\n ") length = 5 + len(word) log.write(" " + word) log.write("\n") else: log.write(" - no changelog provided\n") log.write("\nGenerated by:\tportcran (%s)\n" % __version__) def update_category(portsdir: Path, category: str, name: str) -> None: entry = " SUBDIR += %s\n" % name makefile = portsdir / category / "Makefile" tmpfile = portsdir / category / ".Makefile.portcran" with makefile.open() as old: with tmpfile.open("w") as new: has_subdir = False drain = False for line in old.readlines(): if not drain: if line == entry: drain = True if line.lstrip().startswith("SUBDIR"): has_subdir = True if line > entry: new.write(entry) drain = True elif has_subdir: new.write(entry) drain = True new.write(line) tmpfile.rename(makefile) def generate_create_log(cran: CranPort) -> None: with open(cran.portdir / ".." / ".." / "commit.svn", "w") as log: log.write("%s: %s\n" % (cran.origin, cran.comment)) log.write("\nGenerated by:\tportcran (%s)\n" % __version__) def main() -> None: command = Command(__summary__) @command("update", "update a CRAN port") def update(args: Namespace) -> None: port = Ports.get_port_by_name(Cran.PKGNAMEPREFIX + args.name) assert isinstance(port, CranPort) cran = make_cran_port(args.name, portdir=port._portdir) cran.generate() generate_update_log(port, cran) update.add_argument("name", help="name of the CRAN package") update.add_argument("-o", "--output", help="output directory") @command("create", "create a CRAN port") def create(args: Namespace) -> None: if args.address is not None: Platform.address = args.address categories = args.categories.split(",") for category in categories: if category not in Ports.categories: print("err: %s in not a ports category" % category) exit(ERR_CATEGORY) portsdir = Ports.dir if args.portsdir is None else Path(args.portsdir) category = categories[0] name = Cran.PKGNAMEPREFIX + args.name portdir = portsdir / category / name cran = make_cran_port(args.name, portdir) cran.categories = categories cran.maintainer = Platform.address try: port = Ports.get_port_by_name(name) print("err: CRAN port %s already exists at %s" % (args.name, port.origin)) exit(ERR_EXISTS) except PortError: pass portdir.mkdir() update_category(portsdir, category, name) cran.generate() generate_create_log(cran) create.add_argument("name", help="name of the CRAN package") create.add_argument("-a", "--address", help="creator's email address") create.add_argument("-c", "--categories", default="math", help="comma separated list of the CRAN port's categories") create.add_argument("-p", "--portsdir", help="output ports directory") command.execute(argv[1:]) if __name__ == "__main__": try: main() except PortError as ex: print("err: %s" % ex) exit(ERR_GENERAL)
40.566929
120
0.595885
1,294
10,304
4.657651
0.2017
0.042476
0.007964
0.004646
0.211548
0.167745
0.12444
0.105857
0.096565
0.043803
0
0.002426
0.279794
10,304
253
121
40.727273
0.809729
0.007085
0
0.146226
0
0
0.178138
0.003324
0
0
0
0
0.023585
1
0.080189
false
0.004717
0.037736
0.004717
0.146226
0.028302
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c15423c94eae9054e142abda74c8200dd59d29fa
6,484
py
Python
calendarserver/tools/validcalendardata.py
eventable/CalendarServer
384444edb1966b530bc391789afbe3fb9cd6fd3e
[ "Apache-2.0" ]
1
2017-02-18T19:22:19.000Z
2017-02-18T19:22:19.000Z
calendarserver/tools/validcalendardata.py
eventable/CalendarServer
384444edb1966b530bc391789afbe3fb9cd6fd3e
[ "Apache-2.0" ]
null
null
null
calendarserver/tools/validcalendardata.py
eventable/CalendarServer
384444edb1966b530bc391789afbe3fb9cd6fd3e
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python ## # Copyright (c) 2012-2015 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## from __future__ import print_function """ This tool takes data from stdin and validates it as iCalendar data suitable for the server. """ from calendarserver.tools.cmdline import utilityMain, WorkerService from twisted.internet.defer import succeed from twisted.python.text import wordWrap from twisted.python.usage import Options from twistedcaldav.config import config from twistedcaldav.ical import Component, InvalidICalendarDataError from twistedcaldav.stdconfig import DEFAULT_CONFIG_FILE import os import sys def usage(e=None): if e: print(e) print("") try: ValidOptions().opt_help() except SystemExit: pass if e: sys.exit(64) else: sys.exit(0) description = '\n'.join( wordWrap( """ Usage: validcalendardata [options] [input specifiers]\n """, int(os.environ.get('COLUMNS', '80')) ) ) class ValidOptions(Options): """ Command-line options for 'validcalendardata' """ synopsis = description optFlags = [ ['verbose', 'v', "Verbose logging."], ['debug', 'D', "Debug logging."], ['parse-only', 'p', "Only validate parsing of the data."], ] optParameters = [ ['config', 'f', DEFAULT_CONFIG_FILE, "Specify caldavd.plist configuration path."], ] def __init__(self): super(ValidOptions, self).__init__() self.outputName = '-' self.inputName = '-' def opt_output(self, filename): """ Specify output file path (default: '-', meaning stdout). """ self.outputName = filename opt_o = opt_output def openOutput(self): """ Open the appropriate output file based on the '--output' option. """ if self.outputName == '-': return sys.stdout else: return open(self.outputName, "wb") def opt_input(self, filename): """ Specify output file path (default: '-', meaning stdin). """ self.inputName = filename opt_i = opt_input def openInput(self): """ Open the appropriate output file based on the '--input' option. """ if self.inputName == '-': return sys.stdin else: return open(os.path.expanduser(self.inputName), "rb") errorPrefix = "Calendar data had unfixable problems:\n " class ValidService(WorkerService, object): """ Service which runs, exports the appropriate records, then stops the reactor. """ def __init__(self, store, options, output, input, reactor, config): super(ValidService, self).__init__(store) self.options = options self.output = output self.input = input self.reactor = reactor self.config = config self._directory = None def doWork(self): """ Start the service. """ super(ValidService, self).startService() if self.options["parse-only"]: result, message = self.parseCalendarData() else: result, message = self.validCalendarData() if result: print("Calendar data OK") else: print(message) return succeed(None) def parseCalendarData(self): """ Check the calendar data for valid iCalendar data. """ result = True message = "" try: component = Component.fromString(self.input.read()) # Do underlying iCalendar library validation with data fix fixed, unfixed = component._pycalendar.validate(doFix=True) if unfixed: raise InvalidICalendarDataError("Calendar data had unfixable problems:\n %s" % ("\n ".join(unfixed),)) if fixed: print("Calendar data had fixable problems:\n %s" % ("\n ".join(fixed),)) except ValueError, e: result = False message = str(e) if message.startswith(errorPrefix): message = message[len(errorPrefix):] return (result, message,) def validCalendarData(self): """ Check the calendar data for valid iCalendar data. """ result = True message = "" truncated = False try: component = Component.fromString(self.input.read()) if getattr(self.config, "MaxInstancesForRRULE", 0) != 0: truncated = component.truncateRecurrence(config.MaxInstancesForRRULE) component.validCalendarData(doFix=False, validateRecurrences=True) component.validCalendarForCalDAV(methodAllowed=True) component.validOrganizerForScheduling(doFix=False) except ValueError, e: result = False message = str(e) if message.startswith(errorPrefix): message = message[len(errorPrefix):] if truncated: message = "Calendar data RRULE truncated\n" + message return (result, message,) def main(argv=sys.argv, stderr=sys.stderr, reactor=None): """ Do the export. """ if reactor is None: from twisted.internet import reactor options = ValidOptions() options.parseOptions(argv[1:]) try: output = options.openOutput() except IOError, e: stderr.write("Unable to open output file for writing: %s\n" % (e)) sys.exit(1) try: input = options.openInput() except IOError, e: stderr.write("Unable to open input file for reading: %s\n" % (e)) sys.exit(1) def makeService(store): return ValidService(store, options, output, input, reactor, config) utilityMain(options["config"], makeService, reactor, verbose=options["debug"]) if __name__ == "__main__": main()
27.35865
120
0.609963
693
6,484
5.646465
0.349206
0.021467
0.006133
0.008178
0.214413
0.209047
0.168157
0.145668
0.102734
0.081268
0
0.004748
0.285318
6,484
236
121
27.474576
0.839663
0.100401
0
0.253731
0
0
0.095702
0
0
0
0
0
0
0
null
null
0.007463
0.08209
null
null
0.044776
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
c155e8b957ea1abd8dd89360b9558b67dc020499
1,243
py
Python
src/gluonts/nursery/torch_arsgls_rbpf/test/basic_tests/conv.py
richardk53/gluon-ts
5bde492198c0348b550ac6f7269f1740a699ec30
[ "Apache-2.0" ]
null
null
null
src/gluonts/nursery/torch_arsgls_rbpf/test/basic_tests/conv.py
richardk53/gluon-ts
5bde492198c0348b550ac6f7269f1740a699ec30
[ "Apache-2.0" ]
null
null
null
src/gluonts/nursery/torch_arsgls_rbpf/test/basic_tests/conv.py
richardk53/gluon-ts
5bde492198c0348b550ac6f7269f1740a699ec30
[ "Apache-2.0" ]
null
null
null
import torch from torch import nn from utils.utils import compute_conv_output_img_dims def test_compute_conv_dims_out(): for width_img in [63, 64, 65, 66]: dims_img = (width_img, width_img) inp = torch.randn((10, 1,) + dims_img) for padding in [0, 1, 2]: for dilation in [1, 2, 3]: for stride in [1, 2, 3]: for kernel_size in [2, 3, 4, 5]: conv = nn.Conv2d( in_channels=1, out_channels=1, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, ) computed_img_dims_out = compute_conv_output_img_dims( dims_img=dims_img, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, ) actual_img_dims_out = conv(inp).shape[2:] assert computed_img_dims_out == actual_img_dims_out
38.84375
77
0.442478
127
1,243
4.031496
0.314961
0.095703
0.078125
0.078125
0.367188
0.242188
0.242188
0.242188
0.242188
0.242188
0
0.044304
0.491553
1,243
31
78
40.096774
0.765823
0
0
0.285714
0
0
0
0
0
0
0
0
0.035714
1
0.035714
false
0
0.107143
0
0.142857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1561171d3885a4dc3c76906c27aa5632df77a77
589
py
Python
OOP/deep_dive_tkinter/many_widget_example.py
Amaranese/python-exercises-notes-solutions-projects
58f7677ecb97971733d9f4ff87fda75e23d7c0cb
[ "Unlicense" ]
1
2021-12-03T12:38:33.000Z
2021-12-03T12:38:33.000Z
OOP/deep_dive_tkinter/many_widget_example.py
Amaranese/python-exercises-notes-solutions-projects
58f7677ecb97971733d9f4ff87fda75e23d7c0cb
[ "Unlicense" ]
null
null
null
OOP/deep_dive_tkinter/many_widget_example.py
Amaranese/python-exercises-notes-solutions-projects
58f7677ecb97971733d9f4ff87fda75e23d7c0cb
[ "Unlicense" ]
null
null
null
import tkinter as tk parent = tk.Tk() # tk.WidgetName(parent_frame, options) tk.Entry(parent, width=25).pack() tk.Button(parent, text="LOOKOUT!").pack() tk.Checkbutton(parent, text='RememberMe', variable=tk.IntVar()).pack() tk.Label(parent, text="What's Your Name?").pack() tk.OptionMenu(parent, tk.IntVar(), "Select Age", "15+", "25+", "40+", "60+").pack() tk.Scrollbar(parent, orient=tk.VERTICAL).pack() tk.Radiobutton(parent, text='Democratic', variable=tk.IntVar(), value=3).pack() tk.Radiobutton(parent, text='Republican', variable=tk.IntVar(), value=5).pack() parent.mainloop()
32.722222
83
0.702886
85
589
4.858824
0.470588
0.101695
0.116223
0.11138
0.130751
0
0
0
0
0
0
0.022059
0.076401
589
17
84
34.647059
0.737132
0.061121
0
0
0
0
0.139746
0
0
0
0
0
0
1
0
false
0
0.090909
0
0.090909
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c156565f017d48828a6c04509f6eaa61d605a332
432
py
Python
hardhat/recipes/x11/driver/xf86-video-nouveau.py
stangelandcl/hardhat
1ad0c5dec16728c0243023acb9594f435ef18f9c
[ "MIT" ]
null
null
null
hardhat/recipes/x11/driver/xf86-video-nouveau.py
stangelandcl/hardhat
1ad0c5dec16728c0243023acb9594f435ef18f9c
[ "MIT" ]
null
null
null
hardhat/recipes/x11/driver/xf86-video-nouveau.py
stangelandcl/hardhat
1ad0c5dec16728c0243023acb9594f435ef18f9c
[ "MIT" ]
null
null
null
from ..base import X11DriverBaseRecipe class Xf86VideoNouveauRecipe(X11DriverBaseRecipe): def __init__(self, *args, **kwargs): super(Xf86VideoNouveauRecipe, self).__init__(*args, **kwargs) self.sha256 = '6d9242ba139c3df7afefffb455573b52' \ 'f4427920b978161c00483c64a6da47cb' self.name = 'xf86-video-nouveau' self.version = '1.0.13' self.depends = ['xorg-server']
33.230769
69
0.664352
36
432
7.75
0.722222
0.071685
0
0
0
0
0
0
0
0
0
0.172619
0.222222
432
12
70
36
0.657738
0
0
0
0
0
0.229167
0.148148
0
0
0
0
0
1
0.111111
false
0
0.111111
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1588d562ae990566fc09dd0f8d1a7453c6a6f20
3,563
py
Python
fem_dsa/networks/autoencoders.py
idealab-isu/DSA
b9157eb9307c0ff06d91ff2bdcd8f70df5b896cb
[ "BSD-3-Clause" ]
3
2022-01-18T01:33:34.000Z
2022-03-22T20:46:16.000Z
DiffNet/networks/autoencoders.py
adityabalu/DiffNet
a21e024ad9948fa76fe73796e216a0a6601f2c7c
[ "MIT" ]
1
2022-03-30T10:16:47.000Z
2022-03-30T10:16:47.000Z
DiffNet/networks/autoencoders.py
adityabalu/DiffNet
a21e024ad9948fa76fe73796e216a0a6601f2c7c
[ "MIT" ]
2
2021-12-01T20:53:24.000Z
2021-12-02T06:42:39.000Z
import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable class Encoder(nn.Module): def __init__(self, in_channels=3, dim=64, n_downsample=3, encoder_type='convolutional'): super(Encoder, self).__init__() # Initial convolution block layers = [ nn.ReflectionPad2d(3), nn.Conv2d(in_channels, dim*2, 7), nn.InstanceNorm2d(dim), nn.LeakyReLU(0.2, inplace=True), ] # Downsampling for i in range(n_downsample): if i <= 3: layers += [ nn.Conv2d(dim*2*(i+1), dim * (i+2)*2, 4, stride=2, padding=1), nn.InstanceNorm2d(dim * (i+2)*2), nn.ReLU(inplace=True), ] else: layers += [ nn.Conv2d(dim*2*(5), dim * (5)*2, 4, stride=2, padding=1), nn.InstanceNorm2d(dim * (5)*2), nn.ReLU(inplace=True), ] self.model_blocks = nn.Sequential(*layers, nn.Tanh()) def forward(self, x): x = self.model_blocks(x) return x class Decoder(nn.Module): def __init__(self, out_channels=3, dim=64, n_upsample=3, encoder_type='convolutional', activation='relu'): super(Decoder, self).__init__() layers = [] dim = dim # Upsampling for i in reversed(range(n_upsample)): # print(i) if i > 3: print('Arjuna') layers += [ nn.ConvTranspose2d(dim * (5)*2, dim * (5)*2, 4, stride=2, padding=1), nn.InstanceNorm2d(dim * (5)*2), nn.LeakyReLU(0.2, inplace=True), ] else: layers += [ nn.ConvTranspose2d(dim * (i + 2)*2, dim * (i + 1)*2, 4, stride=2, padding=1), nn.InstanceNorm2d(dim * (i + 1)*2), nn.LeakyReLU(0.2, inplace=True), ] # Output layer # layers += [nn.ReflectionPad2d(3), nn.Conv2d(dim, out_channels, 7)] layers += [nn.ReflectionPad2d(4), nn.Conv2d(dim * (i + 1)*2, out_channels, 3), nn.Conv2d(out_channels, out_channels, 7)] self.model_blocks = nn.Sequential(*layers) if activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'relu': self.activation = nn.ReLU() def forward(self, x): # print(x.shape) x = self.model_blocks(x) #x = self.activation(x) return x class AE(nn.Module): """docstring for AE""" def __init__(self, in_channels, out_channels, dims=64, n_downsample=4): super(AE, self).__init__() self.encoder = Encoder(in_channels, dim=dims, n_downsample=n_downsample, encoder_type='regular') self.decoder = Decoder(out_channels, dim=dims, n_upsample=n_downsample, activation='relu') def forward(self, x): code = self.encoder(x) out = self.decoder(code) return out class VAE(nn.Module): """docstring for AE""" def __init__(self, in_channels, out_channels, dims=64, n_downsample=3): super(VAE, self).__init__() self.encoder = Encoder(in_channels, dim=dims, n_downsample=n_downsample, encoder_type='variational') self.decoder = Decoder(out_channels, dim=dims, n_upsample=n_downsample) def forward(self, x): mu, z = self.encoder(x) out = self.decoder(z) return out
33.299065
128
0.541959
435
3,563
4.28046
0.183908
0.059076
0.05102
0.019334
0.562836
0.440924
0.33029
0.305048
0.305048
0.305048
0
0.034884
0.324165
3,563
107
129
33.299065
0.738372
0.058939
0
0.302632
0
0
0.020683
0
0
0
0
0
0
1
0.105263
false
0
0.052632
0
0.263158
0.013158
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1598a3545cd8dc90345c280e6f51a6897b9912a
2,215
py
Python
week02/03.MoreTesting/fractions/tests_collect_fractions.py
TsvetomirTsvetkov/Python-Course-101
1c5ea4631128c22effe3c4ee5a18c43f5e79d463
[ "MIT" ]
null
null
null
week02/03.MoreTesting/fractions/tests_collect_fractions.py
TsvetomirTsvetkov/Python-Course-101
1c5ea4631128c22effe3c4ee5a18c43f5e79d463
[ "MIT" ]
null
null
null
week02/03.MoreTesting/fractions/tests_collect_fractions.py
TsvetomirTsvetkov/Python-Course-101
1c5ea4631128c22effe3c4ee5a18c43f5e79d463
[ "MIT" ]
null
null
null
# tests_collect_fractions.py import unittest from collect_fractions import ( validate_input_collect, lcm, collect_fractions ) class TestValidateInputCollect(unittest.TestCase): def test_validation_passes_with_correct_input(self): fractions = [(1, 3), (4, 5)] validate_input_collect(fractions) def test_validation_raises_exception_with_empty_list(self): fractions = [] exc = None try: validate_input_collect(fractions) except Exception as err: exc = err self.assertIsNotNone(exc) self.assertEqual(str(exc), 'List cannot be empty.') def test_validation_raises_exception_if_fractions_is_not_of_type_list(self): fractions = ((1, 3), (4, 5)) exc = None try: validate_input_collect(fractions) except Exception as err: exc = err self.assertIsNotNone(exc) self.assertEqual(str(exc), 'Argument can only be of type "list".') def test_validation_raises_exception_if_length_of_element_is_not_two(self): fractions = [(1, 2), (1, 3, 4)] exc = None try: validate_input_collect(fractions) except Exception as err: exc = err self.assertIsNotNone(exc) self.assertEqual(str(exc), 'Tuple can only contain 2 elements.') def test_validation_raises_exception_if_one_of_the_elements_of_the_tuples_is_not_integer(self): fractions = [(1, 5), (1, 2.0)] exc = None try: validate_input_collect(fractions) except Exception as err: exc = err self.assertIsNotNone(exc) self.assertEqual(str(exc), 'Tuple can only contain integers.') def test_validation_raises_exception_if_one_of_the_elements_has_denominator_zero(self): fractions = [(1, 2), (1, 0)] exc = None try: validate_input_collect(fractions) except Exception as err: exc = err self.assertIsNotNone(exc) self.assertEqual(str(exc), 'Cannot devide by zero.') class TestCollectFractions(unittest.TestCase): def test_collect_fractions_passes_with_only_one_element_in_list(self): fractions = [(1, 7)] self.assertEqual((1, 7), collect_fractions(fractions)) def test_collect_fraction_passes_with_more_than_one_element_in_list(self): fractions = [(1, 4), (1, 2)] self.assertEqual((3, 4), collect_fractions(fractions)) if __name__ == '__main__': unittest.main()
25.170455
96
0.751242
308
2,215
5.068182
0.237013
0.122998
0.089686
0.111467
0.580397
0.539398
0.474055
0.435618
0.435618
0.435618
0
0.017433
0.145372
2,215
88
97
25.170455
0.807184
0.011738
0
0.484375
0
0
0.069927
0
0
0
0
0
0.1875
1
0.125
false
0.046875
0.03125
0
0.1875
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c159e41ff48b6f66e8bdd24ff1ed589656d0c172
3,278
py
Python
exporter/management/commands/exporter.py
open-contracting/data-registry
5a73e7f2334c6af5be23070493842b494b3e5357
[ "BSD-3-Clause" ]
null
null
null
exporter/management/commands/exporter.py
open-contracting/data-registry
5a73e7f2334c6af5be23070493842b494b3e5357
[ "BSD-3-Clause" ]
170
2021-02-12T12:52:37.000Z
2022-03-28T14:37:05.000Z
exporter/management/commands/exporter.py
open-contracting/data-registry
5a73e7f2334c6af5be23070493842b494b3e5357
[ "BSD-3-Clause" ]
null
null
null
import gzip import logging import shutil from django.conf import settings from django.core.management.base import BaseCommand from django.db import connections from yapw.methods.blocking import ack from exporter.util import Export, create_client logger = logging.getLogger(__name__) class Command(BaseCommand): """ Start a worker to export files from collections in Kingfisher Process. Data is exported as gzipped line-delimited JSON files, with one file per year and one ``full.jsonl.gz`` file. Multiple workers can run at the same time. """ def handle(self, *args, **options): create_client().consume(callback, "exporter_init") def callback(state, channel, method, properties, input_message): collection_id = input_message.get("collection_id") job_id = input_message.get("job_id") export = Export(job_id) dump_file = export.directory / "full.jsonl" try: export.directory.mkdir(parents=True) except FileExistsError: [f.unlink() for f in export.directory.glob("*") if f.is_file()] export.lock() id = 0 page = 1 files = {} # acknowledge message processing now to avoid connection loses # the rest can run for hours and is irreversible anyways ack(state, channel, method.delivery_tag) # load data from kf-process and save while True: with connections["kingfisher_process"].cursor() as cursor: logger.debug("Processing page %s with id > %s", page, id) cursor.execute( """ SELECT d.id, d.data, d.data->>'date' FROM compiled_release c JOIN data d ON (c.data_id = d.id) WHERE collection_id = %s AND d.id > %s ORDER BY d.id LIMIT %s """, [collection_id, id, settings.EXPORTER_PAGE_SIZE], ) records = cursor.fetchall() if not records: break with open(dump_file, "a") as full: files[dump_file] = full for r in records: id = r[0] full.write(r[1]) full.write("\n") # annual and monthly dump if r[2] is not None and len(r[2]) > 9: year_path = export.directory / f"{int(r[2][:4])}.jsonl" if year_path not in files: files[year_path] = year_path.open("a") files[year_path].write(r[1]) files[year_path].write("\n") month_path = export.directory / f"{int(r[2][:4])}_{r[2][5:7]}.jsonl" if month_path not in files: files[month_path] = month_path.open("a") files[month_path].write(r[1]) files[month_path].write("\n") page = page + 1 # last page if len(records) < settings.EXPORTER_PAGE_SIZE: break for path, file in files.items(): file.close() with path.open("rb") as f_in: with gzip.open(f"{path}.gz", "wb") as f_out: shutil.copyfileobj(f_in, f_out) path.unlink() export.unlock()
29.531532
113
0.556742
413
3,278
4.307506
0.382567
0.026981
0.011804
0.019112
0.068578
0.02923
0.02923
0.02923
0
0
0
0.007852
0.339536
3,278
110
114
29.8
0.813857
0.125381
0
0.031746
0
0
0.066693
0.021437
0
0
0
0
0
1
0.031746
false
0
0.126984
0
0.174603
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c15a19cb03f23c0c130c7938dd4eba13516d4750
71
py
Python
config.py
SuShu19/TiTIC
7dd83a1527ee0e57e354eb7843c75ad2e53d69fc
[ "Apache-2.0" ]
null
null
null
config.py
SuShu19/TiTIC
7dd83a1527ee0e57e354eb7843c75ad2e53d69fc
[ "Apache-2.0" ]
null
null
null
config.py
SuShu19/TiTIC
7dd83a1527ee0e57e354eb7843c75ad2e53d69fc
[ "Apache-2.0" ]
null
null
null
code_path = "F:/发表论文/apsec 2021/TiTIC" data_path = "D:/fasttext_data"
17.75
38
0.71831
12
71
4
0.833333
0
0
0
0
0
0
0
0
0
0
0.063492
0.112676
71
3
39
23.666667
0.698413
0
0
0
0
0
0.571429
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
c15a975febae16e23c4a925f353cceee52893986
147
py
Python
terrascript/pingdom/d.py
jackluo923/python-terrascript
ed4b626e6d28621ea1b02fc16f7277a094d89830
[ "BSD-2-Clause" ]
4
2022-02-07T21:08:14.000Z
2022-03-03T04:41:28.000Z
terrascript/pingdom/d.py
jackluo923/python-terrascript
ed4b626e6d28621ea1b02fc16f7277a094d89830
[ "BSD-2-Clause" ]
null
null
null
terrascript/pingdom/d.py
jackluo923/python-terrascript
ed4b626e6d28621ea1b02fc16f7277a094d89830
[ "BSD-2-Clause" ]
2
2022-02-06T01:49:42.000Z
2022-02-08T14:15:00.000Z
# terrascript/pingdom/d.py import terrascript class pingdom_contact(terrascript.Data): pass class pingdom_team(terrascript.Data): pass
13.363636
40
0.768707
18
147
6.166667
0.555556
0.216216
0.342342
0
0
0
0
0
0
0
0
0
0.14966
147
10
41
14.7
0.888
0.163265
0
0.4
0
0
0
0
0
0
0
0
0
1
0
true
0.4
0.2
0
0.6
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
1
0
0
6
c15bdeecdedb2772bfcc37455ef18ef0155dfd42
154
py
Python
src/arcrest/hostedservice/__init__.py
kevinsigwart/ArcREST
2d83bd87e23f3f78d6bba2131e6d47cf8e2e07c1
[ "Apache-2.0" ]
null
null
null
src/arcrest/hostedservice/__init__.py
kevinsigwart/ArcREST
2d83bd87e23f3f78d6bba2131e6d47cf8e2e07c1
[ "Apache-2.0" ]
null
null
null
src/arcrest/hostedservice/__init__.py
kevinsigwart/ArcREST
2d83bd87e23f3f78d6bba2131e6d47cf8e2e07c1
[ "Apache-2.0" ]
null
null
null
from __future__ import absolute_import from .service import AdminFeatureService, AdminFeatureServiceLayer, AdminMapService, Services __version__ = "3.5.6"
51.333333
93
0.857143
16
154
7.6875
0.8125
0
0
0
0
0
0
0
0
0
0
0.021277
0.084416
154
3
94
51.333333
0.851064
0
0
0
0
0
0.032258
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
c160f505df5dab1a29a92764a36839b1cc74f021
3,357
py
Python
test_triplegan.py
AmirHosseinAmeli/Triple-GAN
127948d9e22767d315a4b3ca58fc4a56d92ff9d3
[ "MIT" ]
29
2020-09-03T08:35:47.000Z
2022-02-10T18:39:29.000Z
test_triplegan.py
AmirHosseinAmeli/Triple-GAN
127948d9e22767d315a4b3ca58fc4a56d92ff9d3
[ "MIT" ]
6
2020-12-22T14:43:14.000Z
2022-03-12T00:55:24.000Z
test_triplegan.py
AmirHosseinAmeli/Triple-GAN
127948d9e22767d315a4b3ca58fc4a56d92ff9d3
[ "MIT" ]
8
2020-10-01T04:03:40.000Z
2022-03-21T10:23:40.000Z
import copy import os import pickle import torch import torch.nn as nn import numpy as np from library import inputs, eval_inception_score from Utils.checkpoints import save_context, Logger from Utils import flags from Utils import config import Torture FLAGS = flags.FLAGS KEY_ARGUMENTS = config.load_config(FLAGS.config_file) model = FLAGS.old_model dirname = os.path.dirname(model) basename = os.path.basename(model) config_path = os.path.join(dirname, "..", "source", "configs_dict.pkl") summary_path = os.path.join(dirname, "..", "summary") with open(config_path, "rb") as f: new_dict = pickle.load(f) new_dict["gpu"] = FLAGS.gpu FLAGS.set_dict(new_dict) FLAGS.old_model = "loaded" text_logger, MODELS_FOLDER, SUMMARIES_FOLDER = save_context(__file__, KEY_ARGUMENTS) torch.manual_seed(1234) torch.cuda.manual_seed(1235) np.random.seed(1236) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = True device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device) itr = inputs.get_data_iter(batch_size=100, subset=1000) itr_u = inputs.get_data_iter(batch_size=100) netG, optim_G = inputs.get_generator_optimizer() netD, optim_D = inputs.get_discriminator_optimizer() netC, optim_c = inputs.get_classifier_optimizer() netC_T, _ = inputs.get_classifier_optimizer() netG, netD, netC = netG.to(device), netD.to(device), netC.to(device) netG = nn.DataParallel(netG) netD = nn.DataParallel(netD) netC = nn.DataParallel(netC) netC_T = nn.DataParallel(netC_T) checkpoint_io = Torture.utils.checkpoint.CheckpointIO(checkpoint_dir=MODELS_FOLDER) checkpoint_io.register_modules( netG=netG, netD=netD, netC=netC, netC_T=netC_T, optim_G=optim_G, optim_D=optim_D, optim_c=optim_c, ) checkpoint_io.load_file(model) logger = Logger(log_dir=SUMMARIES_FOLDER) # with torch.no_grad(): # netG.eval() # data, label = itr.__next__() # sample_z = torch.randn(FLAGS.bs_g, FLAGS.g_z_dim).to(device) # tlabel = label[: FLAGS.bs_g // 10] # tlabel = torch.cat([tlabel for _ in range(10)], 0) # x_fake = netG(sample_z, tlabel) # logger.add_imgs(x_fake, "imgtest", nrow=FLAGS.bs_g // 10) # itr_test = inputs.get_data_iter(batch_size=100, train=False, infinity=False) # netC_T.eval() # total, correct = 0, 0 # for images, labels in itr_test: # images, labels = images.to(device), labels.to(device) # outputs = netC_T(images) # _, predicted = torch.max(outputs.data, 1) # total += labels.size(0) # correct += (predicted == labels).sum().item() # print(total, correct, correct / total) # # # # Inception score with torch.no_grad(): netG.eval() img_list = [] for _ in range(500): sample_z = torch.randn(100, FLAGS.g_z_dim).to(device) data, label = itr.__next__() # print(label.shape, sample_z.shape) x_fake = netG(sample_z.to(device), label.to(device)) img_list.append(x_fake.data.cpu().numpy() * 0.5 + 0.5) img_list = np.concatenate(img_list, axis=0) img_list = (np.transpose(img_list, [0, 2, 3, 1]) * 255).astype(np.uint8) new_img_list = [] for i in range(50000): new_img_list.append(img_list[i]) with open("image.pkl", "wb") as f: pickle.dump(new_img_list, f) exit() print(img_list.shape) print(eval_inception_score.get_inception_score(new_img_list))
32.278846
84
0.712541
514
3,357
4.412451
0.303502
0.037037
0.017637
0.022487
0.107143
0.074515
0.03836
0
0
0
0
0.021075
0.151921
3,357
103
85
32.592233
0.775553
0.231159
0
0
0
0
0.024247
0
0
0
0
0
0
1
0
false
0
0.152778
0
0.152778
0.041667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c16138a7cd5144aec3ed7335c23d116573f315f9
2,093
py
Python
pages/product_page.py
DedMazzai/stepic_test_project
f5dc9e71c9f18b48edf017cbfde86ada888482e2
[ "Apache-2.0" ]
null
null
null
pages/product_page.py
DedMazzai/stepic_test_project
f5dc9e71c9f18b48edf017cbfde86ada888482e2
[ "Apache-2.0" ]
null
null
null
pages/product_page.py
DedMazzai/stepic_test_project
f5dc9e71c9f18b48edf017cbfde86ada888482e2
[ "Apache-2.0" ]
null
null
null
from .base_page import BasePage from .locators import ProductPageLocators class ProductPage(BasePage): def should_be_button_add_to_basket(self): assert self.is_element_present( *ProductPageLocators.BUTTON_ADD_TO_BASKET), "Button add to basket is not presented" def click_button_add_to_basket(self): self.browser.find_element(*ProductPageLocators.BUTTON_ADD_TO_BASKET).click() def should_be_alert_about_adding_product_to_basket(self): assert self.is_element_present( *ProductPageLocators.ALERT_ABOUT_ADDING_PRODUCT_TO_BASKET), "Alert with name of product is not presented" def should_be_product_name_in_alert_match_product_name(self): text_product_name_in_basket = self.browser.find_element( *ProductPageLocators.PRODUCT_NAME_IN_BASKET).text text_product_name = self.browser.find_element(*ProductPageLocators.PRODUCT_NAME).text assert text_product_name == text_product_name_in_basket, \ "A message about adding a product to the basket does not contain information about the desired product" def shold_be_alert_with_cost_basket(self): assert self.browser.find_element( *ProductPageLocators.ALERT_WITH_COST_BASKET), "Alert with cost of basket is not presented" def should_be_cost_product_in_alert_with_cost_basket(self): text_cost_basket_in_alert = self.browser.find_element(*ProductPageLocators.COST_BASKET_IN_ALERT).text text_cost_product = self.browser.find_element(*ProductPageLocators.COST_PRODUCT).text assert text_cost_basket_in_alert == text_cost_product, \ "The cost of the basket does not match the cost of the product" def should_not_be_success_message(self): assert self.is_not_element_present(*ProductPageLocators.SUCCESS_MESSAGE), \ "Success message is presented, but should not be" def should_dissapear_of_success_message(self): assert self.is_disappeared(*ProductPageLocators.SUCCESS_MESSAGE), \ "Success message is presented, but should not be"
49.833333
117
0.763497
278
2,093
5.359712
0.176259
0.05906
0.060403
0.088591
0.634899
0.413423
0.242953
0.173154
0.173154
0.096644
0
0
0.175824
2,093
41
118
51.04878
0.863768
0
0
0.129032
0
0
0.180602
0
0
0
0
0
0.225806
1
0.258065
false
0
0.064516
0
0.354839
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
c16436e269d25cef8488abc134bc5e666d9d98ac
2,863
py
Python
ViyaCasual/CAS/CASTableBase.py
willhaley-bne/ViyaCasual
9c0ca9b05a9602e67c8d56e2f63eba01e1218e42
[ "MIT" ]
null
null
null
ViyaCasual/CAS/CASTableBase.py
willhaley-bne/ViyaCasual
9c0ca9b05a9602e67c8d56e2f63eba01e1218e42
[ "MIT" ]
null
null
null
ViyaCasual/CAS/CASTableBase.py
willhaley-bne/ViyaCasual
9c0ca9b05a9602e67c8d56e2f63eba01e1218e42
[ "MIT" ]
null
null
null
import pandas as pd class CASTableBase(object): source_sql = None source_data = None source_cas = None source_caslib = None cas_table_name = None caslib = None decision_source = None decision = None db_conn = None clean_up = False def __init__(self, viya_conn, db_conn=None): self.viya_conn = viya_conn self.register_db_connection(db_conn) self.set_decision_source() def __del__(self): if self.clean_up: self.remove_from_cas() def register_db_connection(self, db_conn): self.db_conn = db_conn def set_decision_source(self): if self.decision_source is None: return module_obj = __import__('CAS') if hasattr(module_obj, self.decision_source): decision_module = getattr(module_obj, self.decision_source) self.decision = decision_module(self.db_conn, self.viya_conn) def remove_from_cas(self): try: self.viya_conn.drop_cas_table(self.cas_table_name, self.caslib) except: pass def update_from_records(self, records): self.viya_conn.update_cas_table(records, self.cas_table_name, self.caslib) def update_from_source(self): self.update_from_records(self.get_source_data()) def get_source_data(self): if self.source_data is not None: return self.source_data self.pre_process_source_data() if self.source_cas and self.source_caslib: self.source_data = self.viya_conn.get_cas_table(self.source_cas, self.source_caslib) elif self.decision_source: self.decision.exec() self.source_data = self.viya_conn.get_cas_table(self.cas_table_name, self.caslib) else: if self.source_sql is not None: self.source_data = self.read_sql(self.source_sql, True) try: self.source_data.drop(['index'], axis=1, inplace=True) except KeyError: pass except IndexError: pass self.source_data = pd.DataFrame().from_records(self.source_data.to_records()) self.post_process_source_data() return self.source_data def pre_process_source_data(self): pass def post_process_source_data(self): pass def get_from_cas(self): return self.viya_conn.get_cas_table(self.cas_table_name, self.caslib) def read_sql(self, sql, clear_index=False): self.__check_db_conn() if clear_index: return pd.read_sql_query(sql, self.db_conn.conn, index_col=None) else: return pd.read_sql_query(sql, self.db_conn.conn) def __check_db_conn(self): if self.db_conn is None: raise Exception('Please register a valid DB connection before using this method')
28.63
96
0.653161
390
2,863
4.44359
0.194872
0.092325
0.072706
0.03693
0.25678
0.201385
0.169071
0.150606
0.130987
0.130987
0
0.000477
0.267202
2,863
99
97
28.919192
0.825548
0
0
0.150685
0
0
0.02445
0
0
0
0
0
0
1
0.178082
false
0.068493
0.027397
0.013699
0.438356
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
c164a2131f24d47c407acfa16109127338defe1a
102
py
Python
Beecrowd/Python/ex1060.py
yurifalves/Exercises
a4e84ac76b9432f6c2efdeef6e5e2a093c39882d
[ "MIT" ]
null
null
null
Beecrowd/Python/ex1060.py
yurifalves/Exercises
a4e84ac76b9432f6c2efdeef6e5e2a093c39882d
[ "MIT" ]
null
null
null
Beecrowd/Python/ex1060.py
yurifalves/Exercises
a4e84ac76b9432f6c2efdeef6e5e2a093c39882d
[ "MIT" ]
null
null
null
cont = 0 for i in range(6): if float(input()) >= 0: cont += 1 print(f'{cont} valores positivos')
17
37
0.598039
18
102
3.388889
0.833333
0
0
0
0
0
0
0
0
0
0
0.05
0.215686
102
5
38
20.4
0.7125
0
0
0
0
0
0.235294
0
0
0
0
0
0
1
0
false
0
0
0
0
0.25
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
c164aad97b718794ec2487936b78ec7212cf88c1
1,523
py
Python
Library/operations.py
marcelodaher/ArraySim
f42db96e30acff6f3ce3829dc89a79ef5473b4db
[ "MIT" ]
1
2019-12-06T16:48:10.000Z
2019-12-06T16:48:10.000Z
Library/operations.py
marcelodaher/ArraySim
f42db96e30acff6f3ce3829dc89a79ef5473b4db
[ "MIT" ]
null
null
null
Library/operations.py
marcelodaher/ArraySim
f42db96e30acff6f3ce3829dc89a79ef5473b4db
[ "MIT" ]
null
null
null
# coding=utf-8 import numpy as np def colKRproduct(A,B): ''' columnwise Khatri-Rao product between matrix A and B ''' if A.shape[1] != B.shape[1]: raise TypeError("A and B must have the same number of columns") q = A.shape[1] C = np.zeros([A.shape[0]*B.shape[0],q]) for i in np.arange(q): C[:,i] = np.kron(A[:,i],B[:,i]) return C def colKRproduct_conj_self(A): return np.apply_along_axis(lambda x: np.kron(x.conj(),x),0,A) def Xi(nMicX,nMicY): ''' Retorna a matrix de permutação \Xi ''' Xi = np.zeros([nMicX*nMicY,nMicX*nMicY]) print("XI() NOT IMPLEMENTED") return Xi def S2Z(S,nMicX,nMicY): Z = np.zeros([nMicX*nMicY,nMicX*nMicY], dtype = S.dtype) for x in np.arange(nMicX): for y in np.arange(nMicX): Z[:,y+x*nMicY] = np.reshape( S[y*nMicY:(y+1)*nMicY,x*nMicX:(x+1)*nMicX], newshape = [nMicX*nMicY], order="F") return Z def spark(A): from itertools import combinations as comb from numpy import linalg A = np.array(A) At = A.T [m,n] = At.shape if n > m: return 0 for k in range (1,n+1): row_combos = comb(range(m),k) for rows in row_combos: R = np.array([At[row] for row in rows]) rank = linalg.matrix_rank(R) if rank < k: return k return n+1
27.196429
76
0.512147
231
1,523
3.34632
0.359307
0.090556
0.03881
0.043984
0.069858
0.069858
0
0
0
0
0
0.014141
0.349967
1,523
56
77
27.196429
0.766667
0.066316
0
0
0
0
0.048616
0
0
0
0
0
0
1
0.131579
false
0
0.078947
0.026316
0.342105
0.026316
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1653d9ca159307ad4091c89f53debf9a3453ffc
1,337
py
Python
gtsfm/runner/run_scene_optimizer_olssonloader.py
swershrimpy/gtsfm
8d301eb3ef9172345a1ac1369fd4e19764d28946
[ "Apache-2.0" ]
122
2021-02-07T23:01:58.000Z
2022-03-30T13:10:35.000Z
gtsfm/runner/run_scene_optimizer_olssonloader.py
swershrimpy/gtsfm
8d301eb3ef9172345a1ac1369fd4e19764d28946
[ "Apache-2.0" ]
273
2021-01-30T16:45:26.000Z
2022-03-16T15:02:33.000Z
gtsfm/runner/run_scene_optimizer_olssonloader.py
swershrimpy/gtsfm
8d301eb3ef9172345a1ac1369fd4e19764d28946
[ "Apache-2.0" ]
13
2021-03-12T03:01:27.000Z
2022-03-11T03:16:54.000Z
import argparse import os from pathlib import Path import gtsfm.utils.logger as logger_utils from gtsfm.loader.loader_base import LoaderBase from gtsfm.loader.olsson_loader import OlssonLoader from gtsfm.runner.gtsfm_runner_base import GtsfmRunnerBase DATA_ROOT = Path(__file__).resolve().parent.parent.parent / "tests" / "data" logger = logger_utils.get_logger() class GtsfmRunnerOlssonLoader(GtsfmRunnerBase): def __init__(self): super(GtsfmRunnerOlssonLoader, self).__init__(tag="GTSFM on Dataset in Olsson's Lund format") def construct_argparser(self) -> argparse.ArgumentParser: parser = super(GtsfmRunnerOlssonLoader, self).construct_argparser() parser.add_argument("--dataset_root", type=str, default=os.path.join(DATA_ROOT, "set1_lund_door"), help="") parser.add_argument("--image_extension", type=str, default="JPG", help="") return parser def construct_loader(self) -> LoaderBase: loader = OlssonLoader( self.parsed_args.dataset_root, image_extension=self.parsed_args.image_extension, max_frame_lookahead=self.parsed_args.max_frame_lookahead, max_resolution=self.parsed_args.max_resolution, ) return loader if __name__ == "__main__": runner = GtsfmRunnerOlssonLoader() runner.run()
31.833333
115
0.729993
156
1,337
5.929487
0.410256
0.043243
0.060541
0.036757
0
0
0
0
0
0
0
0.000905
0.173523
1,337
41
116
32.609756
0.836199
0
0
0
0
0
0.078534
0
0
0
0
0
0
1
0.107143
false
0
0.25
0
0.464286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c165a2d6078e507a40ef86207575d3eefb0da508
289
py
Python
test/tests/intmethods.py
kevinxucs/pyston
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
[ "Apache-2.0" ]
1
2015-11-06T03:39:51.000Z
2015-11-06T03:39:51.000Z
test/tests/intmethods.py
kevinxucs/pyston
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
[ "Apache-2.0" ]
null
null
null
test/tests/intmethods.py
kevinxucs/pyston
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
[ "Apache-2.0" ]
null
null
null
# can't try large numbers yet due to lack of long for i in xrange(1, 100): for j in xrange(1, 100): print i.__divmod__(j) for i in xrange(1, 12): for j in xrange(1, 12): print i | j print i & j print i ^ j print 1 ** 0 print 0 ** 0 print -1 ** 0
18.0625
49
0.546713
57
289
2.701754
0.385965
0.207792
0.233766
0.233766
0.506494
0.168831
0.168831
0
0
0
0
0.10582
0.346021
289
15
50
19.266667
0.708995
0.16263
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0.636364
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
5
c165a6d6b497f214d3ee7b9ab319db0cb8d9588f
384
py
Python
src/reverse/setup.py
fugue/zim-example
861b197ddc1074375bb9437b3282ab3e517b9019
[ "MIT" ]
null
null
null
src/reverse/setup.py
fugue/zim-example
861b197ddc1074375bb9437b3282ab3e517b9019
[ "MIT" ]
null
null
null
src/reverse/setup.py
fugue/zim-example
861b197ddc1074375bb9437b3282ab3e517b9019
[ "MIT" ]
2
2021-03-17T03:02:52.000Z
2021-07-21T23:31:08.000Z
import os.path from setuptools import setup, find_packages with open(os.path.join(os.path.dirname(__file__), "requirements.txt")) as f: requirements = f.read().strip() setup( name="reverse", version="0.0.0", description="Reverse data", packages=find_packages(exclude=["tests"]), package_data={"reverse": ["metadata/*"]}, install_requires=requirements, )
25.6
76
0.690104
48
384
5.354167
0.645833
0.070039
0
0
0
0
0
0
0
0
0
0.009119
0.143229
384
14
77
27.428571
0.772036
0
0
0
0
0
0.161458
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c165ec6055b3d0599812a0a06fa513f8948722c9
3,204
py
Python
tutorial_metarl/tasks/CompositionalTwoArmedBandit.py
akjagadish/tutorial-metarl
8810eafa783749c70a0575e805810a098b3df0fb
[ "MIT" ]
null
null
null
tutorial_metarl/tasks/CompositionalTwoArmedBandit.py
akjagadish/tutorial-metarl
8810eafa783749c70a0575e805810a098b3df0fb
[ "MIT" ]
null
null
null
tutorial_metarl/tasks/CompositionalTwoArmedBandit.py
akjagadish/tutorial-metarl
8810eafa783749c70a0575e805810a098b3df0fb
[ "MIT" ]
null
null
null
import torch import numpy as np import math class CompositionalTwoArmedBandit(): def __init__(self, probs, ctx_dim, num_arms, num_ctx=400, max_ctx=1000): self.probs = np.asarray(probs) self.num_arms = num_arms self.ctx_dim = ctx_dim self.num_ctx = num_ctx self.max_ctx = max_ctx self.context = self.make_contexts(ctx_dim, num_ctx, max_ctx) def sample(self, num_episodes=1000, num_trials=100, prob=None, cxt_per_epoch=False, repeats=None): if cxt_per_epoch: # generate unique contexts self.context = self.make_contexts(self.ctx_dim, self.num_ctx, self.max_ctx) # group them into high and rewarding highrwd_context = self.context[:int(self.num_ctx/2)] lowrwd_context = self.context[int(self.num_ctx/2):] # make copies and generate samples for both contexts highsamples = self.make_bag_of_tasks(num_episodes, repeats=repeats) lowsamples = highsamples.copy() np.random.shuffle(lowsamples) # set low and high probs low_prob, high_prob = self.probs probs = self.probs.copy() X, Y = [], [] ctx = torch.zeros(self.num_arms, self.ctx_dim) for hsample, lsample in zip(highsamples, lowsamples): # change high and low rewarding arm np.random.shuffle(probs) # sample contexts and assign to respective arms ctx[probs == low_prob] = lowrwd_context[lsample] ctx[probs == high_prob] = highrwd_context[hsample] x, y = self._sample_one_episode(ctx.reshape(-1), probs, num_trials) X.append(x) Y.append(y) Y = torch.stack(Y) X = torch.stack(X) return X, Y def _sample_one_episode(self, x, probs, num_trials): X, Y = [], [] low_prob, high_prob = self.probs for _ in range(num_trials): y = np.zeros(self.num_arms) y[probs == low_prob] = np.random.choice([0, 1], size=(1,), p=self.probs[::-1]) y[probs == high_prob] = np.random.choice([0, 1], size=(1,), p=self.probs) Y.append(torch.as_tensor(y)) X.append(torch.as_tensor(x).type(torch.FloatTensor)) return torch.stack(X), torch.stack(Y) def make_bag_of_tasks(self, num_episodes, repeats=None): num_contexts_per_group = int(self.num_ctx/2) if repeats is None: repeats = int(num_episodes/num_contexts_per_group) samples = np.repeat(np.arange(num_contexts_per_group), repeats) samples = samples[:num_episodes] np.random.shuffle(samples) return samples def make_contexts(self, ctx_dim, num_ctx, max_ctx): sample_contexts = np.random.randint(2, size=(max_ctx, ctx_dim)) while len(np.unique(sample_contexts, axis=0))<num_ctx: # such that we sample unique contexts sample_contexts = np.random.randint(2, size=(max_ctx, ctx_dim)) sample_contexts = np.unique(sample_contexts, axis=0)[:num_ctx] np.random.shuffle(sample_contexts) return torch.tensor(sample_contexts).type(torch.FloatTensor)
41.61039
102
0.626717
441
3,204
4.335601
0.222222
0.034519
0.026151
0.020397
0.290795
0.196653
0.15272
0.15272
0.084728
0.084728
0
0.012319
0.265293
3,204
76
103
42.157895
0.799915
0.077715
0
0.103448
0
0
0
0
0
0
0
0
0
1
0.086207
false
0
0.051724
0
0.224138
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1665cfed8ff4a38680780c2f17d5964d1d9ed35
70
py
Python
00.source-code/02.Python/writeFile.py
wardensky/blogs
e8f347d05b59684f81ea6f7ee8cdbe0d701122f5
[ "Apache-2.0" ]
2
2018-12-20T12:19:56.000Z
2019-09-24T06:25:01.000Z
00.source-code/02.Python/writeFile.py
wardensky/blogs
e8f347d05b59684f81ea6f7ee8cdbe0d701122f5
[ "Apache-2.0" ]
null
null
null
00.source-code/02.Python/writeFile.py
wardensky/blogs
e8f347d05b59684f81ea6f7ee8cdbe0d701122f5
[ "Apache-2.0" ]
3
2018-09-18T06:58:56.000Z
2019-10-27T09:35:01.000Z
#!/usr/bin/python f = file('x1.txt', 'w') f.write('hello') f.close()
11.666667
23
0.571429
13
70
3.076923
0.846154
0
0
0
0
0
0
0
0
0
0
0.016129
0.114286
70
5
24
14
0.629032
0.228571
0
0
0
0
0.226415
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
c16896197cec1995065f5c34607ce687f11e89f6
2,916
py
Python
scripts/example.py
alexboden/nba-who-has-more
590ba8bd062b96ff866c13988eb79a8c7ff0f488
[ "MIT" ]
null
null
null
scripts/example.py
alexboden/nba-who-has-more
590ba8bd062b96ff866c13988eb79a8c7ff0f488
[ "MIT" ]
null
null
null
scripts/example.py
alexboden/nba-who-has-more
590ba8bd062b96ff866c13988eb79a8c7ff0f488
[ "MIT" ]
null
null
null
from nba_api.stats.static import players from nba_api.stats import endpoints from nba_api.stats.library.parameters import SeasonAll from nba_api.stats.endpoints import playercareerstats from nba_api.stats.endpoints import commonplayerinfo from nba_api.stats.endpoints import playergamelog import pandas as pd import time from random import * import time start_time = time.time() #list of all players player_dict = players.get_players() def games_with_x_or_more_points(seasons, x, player_id): count = 0 for s in seasons: time.sleep(0.6) gamelog_player = playergamelog.PlayerGameLog(player_id = player_id, season = s) df_player_games = gamelog_player.get_data_frames()[0] box_scores_points = df_player_games.loc[:, "PTS"] for points in box_scores_points: if(points >= x): count += 1 return count def get_player_id(fullname): player = [player for player in player_dict if player['full_name'] == fullname][0] return player['id'] def get_player_seasons(player_id): player_info = commonplayerinfo.CommonPlayerInfo(player_id=player_id) available_seasons = player_info.available_seasons.get_dict() seasons = [] for season in available_seasons["data"]: for s in season: year = s[1:5]; if not year in seasons: seasons.append(year) return seasons all_time_great_list_file = open("NBA/alltimegreats.txt","r") ALL_TIMERS = [] while(True): line = all_time_great_list_file.readline()[3:].strip() if not line: break ALL_TIMERS.append(line) player1 = ALL_TIMERS[randint(0, 99)] player2 = ALL_TIMERS[randint(0, 99)] print(player1) print(player2) while(player1 == player2): player2 = ALL_TIMERS[randint(0, 99)] player1_id = get_player_id(player1) player2_id = get_player_id(player2) player1_seasons = get_player_seasons(player1_id) player2_seasons = get_player_seasons(player2_id) ready = input() print(player1 + " has " + str(games_with_x_or_more_points(player1_seasons, 30, player1_id)) + " games with 30 or more points") print(player2 + " has " +str( games_with_x_or_more_points(player2_seasons, 30, player2_id)) + " games with 30 or more points") # career = playercareerstats.PlayerCareerStats(player_id=player['id']) # career_df = career.get_data_frames()[0] # df_player_games.to_csv(filename) # nba_players = players.get_players() # for p in celtics_players: # player_dict = [player for player in nba_players if player['full_name'] == p][0] # career = playercareerstats.PlayerCareerStats(player_id=player_dict['id']) # career_df = career.get_data_frames()[0] # print(career_df) # bron = player_info.available_seasons.get_dict() # player_info = playercareerstats.career_totals_regular_season(per_mode36='totals', player_id=2544) print("--- %s seconds ---" % (time.time() - start_time))
25.80531
126
0.718793
410
2,916
4.834146
0.243902
0.056509
0.030272
0.045409
0.284057
0.254289
0.083754
0.058527
0
0
0
0.023304
0.175926
2,916
113
127
25.80531
0.801498
0.20439
0
0.070175
0
0
0.054569
0.009095
0
0
0
0
0
1
0.052632
false
0
0.175439
0
0.280702
0.087719
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c168f756bc02752d155d2b864b3e1da8b5fa59b8
2,653
py
Python
data/Process_MIR1k.py
carrieeeeewithfivee/tf2_Vocal_Separation_UNet
5dbb6838bee0d8fbf0f73fa83e8c3d6c1978c67c
[ "MIT" ]
null
null
null
data/Process_MIR1k.py
carrieeeeewithfivee/tf2_Vocal_Separation_UNet
5dbb6838bee0d8fbf0f73fa83e8c3d6c1978c67c
[ "MIT" ]
1
2022-01-02T06:54:27.000Z
2022-01-02T12:09:13.000Z
data/Process_MIR1k.py
carrieeeeewithfivee/tf2_Vocal_Separation_UNet
5dbb6838bee0d8fbf0f73fa83e8c3d6c1978c67c
[ "MIT" ]
null
null
null
import os from librosa.core import load, stft, istft, magphase from librosa.output import write_wav from concurrent.futures import ThreadPoolExecutor from time import time import asyncio import os,glob import numpy as np from multiprocessing import cpu_count #Thanks to https://github.com/jnzhng/keras-unet-vocal-separation SAMPLE_RATE = 8192 WINDOW_SIZE = 1024 HOP_LENGTH = 768 def downsample(input_path, output_path): wav, _ = load(input_path, sr=SAMPLE_RATE) write_wav(output_path, wav, SAMPLE_RATE, norm=True) print(f"Saving {output_path}") def load_as_mag(file): wav, _ = load(file, sr=None) spectrogram = stft(wav, n_fft=WINDOW_SIZE, hop_length=HOP_LENGTH) mag, _ = magphase(spectrogram) return mag.astype(np.float32) def save_to_npz(base, sample): nps = {} mix = load_as_mag(f'{base}/{sample}/mix.wav') vocal = load_as_mag(f'{base}/{sample}/vocal.wav') inst = load_as_mag(f'{base}/{sample}/inst.wav') mix_max = mix.max() mix_norm = mix / mix_max vocal_norm = vocal / mix_max inst_norm = inst / mix_max #print(f"Saving {sample}") try: np.savez_compressed(f'MIR-1K_resized/{sample}.npz', mix=mix_norm, vocal=vocal_norm, inst=inst_norm) except Exception as e: print(e) if __name__ == '__main__': voise = 'MIR-1K/voise' bg = 'MIR-1K/bg' mix = 'MIR-1K/mix' name = 0 resampled_data = 'MIR-1K_resampled_data' base = 'MIR-1K' foldernames = [] for filename in sorted(glob.glob(os.path.join(voise, '*.wav'))): foldernames.append(os.path.split(filename)[-1].replace('.wav','')) dirs = foldernames with ThreadPoolExecutor(max_workers=cpu_count() * 2) as pool: for i in range(len(dirs)): target_dir = 'MIR-1K_resampled_data/{}_{:0>2d}/'.format(base, i+1) os.makedirs(target_dir, exist_ok=True) pool.submit(downsample, f'{mix}/{dirs[i]}.wav', target_dir + 'mix.wav') pool.submit(downsample, f'{bg}/{dirs[i]}.wav', target_dir + 'inst.wav') pool.submit(downsample, f'{voise}/{dirs[i]}.wav', target_dir + 'vocal.wav') # ## Save wav files to npz # 1. Load wave files from `corpus_resized`. # 2. Apply Short-time Fourier transform (STFT) to audio trios # 3. Apply normalization to magnitudes and save as npz dict in `numpy/` dirs = sorted(list(os.walk('MIR-1K_resampled_data'))[0][1]) print(dirs) with ThreadPoolExecutor(max_workers=cpu_count() * 2) as pool: #print("!!!") for i in range(len(dirs)): #print("!!!") pool.submit(save_to_npz, 'MIR-1K_resampled_data', dirs[i])
35.851351
107
0.656238
390
2,653
4.276923
0.328205
0.026978
0.021583
0.043165
0.196043
0.113909
0.056355
0.056355
0.056355
0
0
0.016053
0.201659
2,653
74
108
35.851351
0.771483
0.115718
0
0.070175
0
0
0.150193
0.092426
0
0
0
0
0
1
0.052632
false
0
0.157895
0
0.22807
0.052632
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1693dff2b16a43c1fe7913423163831050a96a1
3,195
py
Python
utils/utils.py
bo-miao/anomaly_classification
08829b3cdc488c6c7867f02950b5e22b6a5d5435
[ "Apache-2.0" ]
null
null
null
utils/utils.py
bo-miao/anomaly_classification
08829b3cdc488c6c7867f02950b5e22b6a5d5435
[ "Apache-2.0" ]
null
null
null
utils/utils.py
bo-miao/anomaly_classification
08829b3cdc488c6c7867f02950b5e22b6a5d5435
[ "Apache-2.0" ]
null
null
null
from utils import lr_scheduler, metric, prefetch, summary import os, sys import time import numpy as np from collections import OrderedDict import glob import math import copy import tqdm from sklearn.metrics import roc_auc_score, roc_curve, auc import matplotlib.pyplot as plt from torch.cuda.amp import autocast import cv2 from PIL import Image import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data as data from torch.autograd import Variable import torchvision import torchvision.transforms as transforms rng = np.random.RandomState(2020) def get_the_number_of_params(model, is_trainable=False): """get the number of the model""" if is_trainable: return sum(p.numel() for p in model.parameters() if p.requires_grad) return sum(p.numel() for p in model.parameters()) def AUC(anomal_scores, labels): frame_auc = 0 try: frame_auc = roc_auc_score(y_true=np.squeeze(labels, axis=0), y_score=np.squeeze(anomal_scores)) except: print("AUC Cal ERROR: ", labels, anomal_scores) return frame_auc def evaluate_resnet(model, test_batch, args): single_time = metric.AverageMeter('Time', ':6.3f') progress = metric.ProgressMeter(len(test_batch), single_time, prefix="Evaluation: ") model.eval() counter = 0 tp = 0 for k, (images, labels) in enumerate(test_batch): images = images.cuda(non_blocking=True) labels = labels.cuda(non_blocking=True) label = labels if args.label else None label = label.view(-1) input_image = images.detach() a = time.time() with autocast(): logit = model.forward(input_image) if args.evaluate_time: single_time.update((time.time() - a) * 1000) progress.print(counter) print("Single batch time cost {}ms".format(1000 * (time.time() - a))) class_vector = F.softmax(logit, 1).data.squeeze() assert len(class_vector) == len(label), "class number must match" probs, idx = class_vector.sort(1, True) idx = idx[:,0] tp += torch.sum(idx.view(-1)==label).item() counter += len(label) accuracy = tp / counter print("INFERENCE ACCURACY IS {}".format(accuracy)) return accuracy def visualize(recon, gt): b, c, h, w = recon.size() for i in range(b): img1, img2 = recon[i], gt[i] img = torch.cat((img1, img2), dim=2) img = 255. * (img + 1.) / 2. img = img.squeeze(0).byte().cpu().numpy().transpose((1, 2, 0)) img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) img = cv2.resize(img, (600, 300)) frame, name = img, str(int(time.time()*1000)) cv2.imwrite(os.path.join("/data/miaobo/tmp", name+".jpg"), frame) return True def visualize_single(image): b, c, h, w = image.size() for i in range(b): img = image[i] img = 255. * (img + 1.) / 2. img = img.byte().cpu().numpy().transpose((1, 2, 0)) img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) frame, name = img, str(int(time.time()*1000)) cv2.imwrite(os.path.join("/data/miaobo/tmp", name+".jpg"), frame) return True
30.428571
103
0.638498
459
3,195
4.359477
0.363834
0.01999
0.010995
0.013993
0.209895
0.209895
0.193903
0.176912
0.176912
0.14093
0
0.02806
0.23036
3,195
104
104
30.721154
0.785685
0.008451
0
0.144578
0
0
0.047438
0
0
0
0
0
0.012048
1
0.060241
false
0
0.253012
0
0.385542
0.048193
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c169f12d80ecf64a50d7329d9a77f916c0b26871
1,960
py
Python
src/kpi_WV_hs/.ipynb_checkpoints/compute_kpi_1d_v2_prun-checkpoint.py
tlechauveCLS/kpi_mpc
4dc61d210c2b97e6ac240e54a8d96c35cf9123de
[ "MIT" ]
null
null
null
src/kpi_WV_hs/.ipynb_checkpoints/compute_kpi_1d_v2_prun-checkpoint.py
tlechauveCLS/kpi_mpc
4dc61d210c2b97e6ac240e54a8d96c35cf9123de
[ "MIT" ]
null
null
null
src/kpi_WV_hs/.ipynb_checkpoints/compute_kpi_1d_v2_prun-checkpoint.py
tlechauveCLS/kpi_mpc
4dc61d210c2b97e6ac240e54a8d96c35cf9123de
[ "MIT" ]
1
2022-03-23T07:48:27.000Z
2022-03-23T07:48:27.000Z
#!/home1/datawork/agrouaze/conda_envs2/envs/py2.7_cwave/bin/python # coding: utf-8 """ """ import sys print(sys.executable) import subprocess import logging from dateutil import rrule import datetime if __name__ == '__main__': root = logging.getLogger () if root.handlers: for handler in root.handlers: root.removeHandler (handler) import argparse parser = argparse.ArgumentParser (description='start prun') parser.add_argument ('--verbose',action='store_true',default=False) args = parser.parse_args () if args.verbose: logging.basicConfig (level=logging.DEBUG,format='%(asctime)s %(levelname)-5s %(message)s', datefmt='%d/%m/%Y %H:%M:%S') else: logging.basicConfig (level=logging.INFO,format='%(asctime)s %(levelname)-5s %(message)s', datefmt='%d/%m/%Y %H:%M:%S') prunexe = '/appli/prun/bin/prun' listing = '/home1/scratch/agrouaze/list_kpi_1d_v2_prun_test.txt' # written below # call prun opts = ' --split-max-lines=3 --background -e ' listing_content = [] sta = datetime.datetime(2015,1,1) #sta = datetime.datetime(2020,6,1) # pour test 2 qui utilisent les cross assignments de partitions logging.info('start year: %s',sta) sto = datetime.datetime.today() fid = open(listing,'w') cpt = 0 for unit in ['S1A','S1B']: for wv in ['wv1','wv2']: logging.info('%s',unit) for dd in rrule.rrule(rrule.DAILY,dtstart=sta,until=sto): fid.write('%s %s %s\n'%(unit,wv,dd.strftime('%Y%m%d'))) cpt +=1 fid.close() logging.info('listing written ; %s nb lines: %s',listing,cpt) pbs = '/home1/datahome/agrouaze/git/kpi_mpc/src/kpi_WV_hs/compute_kpi_1d_v2.pbs' cmd = prunexe+opts+pbs+' '+listing logging.info('cmd to cast = %s',cmd) st = subprocess.check_call(cmd,shell=True) logging.info('status cmd = %s',st)
40
102
0.626531
269
1,960
4.468401
0.513011
0.054908
0.03827
0.049917
0.076539
0.076539
0.076539
0.076539
0.076539
0.076539
0
0.021484
0.216327
1,960
49
103
40
0.761068
0.101531
0
0.046512
0
0
0.251572
0.070898
0
0
0
0
0
1
0
false
0
0.139535
0
0.139535
0.023256
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c16a520b3532e245375dff9d61f50950a6a91c7f
20,482
py
Python
pysrc/simulserver.py
juliusbierk/simultant
9d454b58797399f60812c4d8c482a57e82b5dba7
[ "MIT" ]
null
null
null
pysrc/simulserver.py
juliusbierk/simultant
9d454b58797399f60812c4d8c482a57e82b5dba7
[ "MIT" ]
null
null
null
pysrc/simulserver.py
juliusbierk/simultant
9d454b58797399f60812c4d8c482a57e82b5dba7
[ "MIT" ]
null
null
null
import asyncio import concurrent import functools import json import numpy as np import torch from aiohttp import web from aiohttp.web_runner import GracefulExit import aiohttp_cors import logging import csv import multiprocessing import queue import pickle # Local imports: from torchfcts import function_from_code, get_default_args, check_code_get_args, get_f_expr_or_ode, get_const_bools from torchfit import torch_fit if __name__ == '__main__': import dbfcts as db # we do not need a database connection for spawned processes logging.basicConfig(level=logging.WARN) logging.root.setLevel(logging.WARN) logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) HOST = '127.0.0.1' PORT = 7555 DEFAULT_PLOTLY_COLORS = ['rgb(31, 119, 180)', 'rgb(255, 127, 14)', 'rgb(44, 160, 44)', 'rgb(214, 39, 40)', 'rgb(148, 103, 189)', 'rgb(140, 86, 75)', 'rgb(227, 119, 194)', 'rgb(127, 127, 127)', 'rgb(188, 189, 34)', 'rgb(23, 190, 207)'] sys_print = print def print(*args): sys_print(*args, flush=True) async def index(request): return web.json_response({'running': True}) async def check_code(request): data = await request.json() d = check_code_get_args(data['code'], data['name_underscore'], data['expr_mode'], data['ode_dim'], data['ode_dim_select']) return web.json_response(d) async def add_model(request): data = await request.json() if data['expr_mode'] and 'ode_dim' in data: del data['ode_dim'] del data['ode_dim_select'] f = function_from_code(data['code'], data['name_underscore']) kwargs = get_default_args(f, data['expr_mode'], data.get('ode_dim')) consts = get_const_bools(f) data['args'] = [{'name': k, 'value': v, 'const': consts[k]} for k, v in kwargs.items()] await db.create_model(data['name'], data) return web.json_response({'success': True}) async def delete_model(request): data = await request.json() await db.delete_model(data['name']) return web.json_response({'success': True}) async def delete_data(request): data = await request.json() await db.delete_data(data['parent']) return web.json_response({'success': True}) async def model_exist_check(request): data = await request.json() print(data['name'], await db.get_models_names()) return web.json_response({'exists': data['name'] in await db.get_models_names()}) async def model_list(request): return web.json_response(await db.get_all_models()) async def data_list(request): return web.json_response(await db.get_data_names()) async def plot_code(request): data = await request.json() if data['content']['expr_mode']: mask, res, x = plot_code_py(data) else: # ODEs can be slow to solve, so we spin up a new process to not block the async loop with concurrent.futures.ProcessPoolExecutor(max_workers=None) as executor: future = asyncio.wrap_future(executor.submit(plot_code_py, data)) mask, res, x = await future return web.json_response({'x': x[mask].numpy().tolist(), 'y': res[mask].numpy().tolist()}) def plot_code_py(data): content = data['content'] f_name = content['name_underscore'] f = function_from_code(content['code'], f_name) kwargs = get_default_args(f, content['expr_mode'], content.get('ode_dim')) f = get_f_expr_or_ode(content['code'], content['expr_mode'], f_name, content.get('ode_dim_select')) # if not content['expr_mode']: # kwargs['y0'] = torch.tensor(kwargs['y0'], dtype=torch.double) for k in kwargs: kwargs[k] = torch.tensor(kwargs[k], dtype=torch.double) if 'xlim' in data: x = torch.linspace(data['xlim'][0], data['xlim'][1], 250, dtype=torch.double) else: x = torch.linspace(0, 10, 250, dtype=torch.double) with torch.no_grad(): res = f(x, **kwargs) mask = torch.isfinite(res) return mask, res, x async def plot_data(request): data = await request.json() plot_data = [] max_n = data.get('max_n', 250) for content in data['content']: dataset = await db.get_data_content(content['id']) if len(dataset['x']) > max_n: skip = 1 + int(len(dataset['x']) / max_n) else: skip = 1 x = dataset['x'][::skip] y = dataset['y'][::skip] plot_data.append({'x': x, 'y': y, 'name': dataset['name'], 'mode': 'markers', 'type': 'scattergl'}) return web.json_response(plot_data) async def upload_data(request): data = await request.post() example = None filenames = [] has_header = json.loads(data['has_header']) commit_data = json.loads(data['commit_data']) multiple_x_axes = json.loads(data['multiple_x_axes']) for fname in data: if not fname.startswith('file_'): continue f = data[fname].file.read().decode('latin-1') fname = fname[5:] filenames.append(fname) if not commit_data and len(filenames) > 1: continue sniffer = csv.Sniffer() dialect = sniffer.sniff(f) if has_header is None: has_header = sniffer.has_header(f) rows = [r for r in csv.reader(f.split('\n'), dialect=dialect) if len(r) > 0] if has_header: header = rows[0] rows = rows[1:] else: header = ['x'] + [f'#{i}' for i in range(1, len(rows[0]))] if commit_data: try: num_rows = np.array([[np.nan if x.strip() == '' else np.double(x) for x in r] for r in rows]) except ValueError: return web.json_response({'success': False, 'error': 'Data contains non-numerical entries.'}) if multiple_x_axes: for i in range(0, num_rows.shape[1], 2): x = num_rows[:, i] y = num_rows[:, i + 1] mask = ~np.isnan(y) if any(np.isnan(x[mask])): return web.json_response({'success': False, 'error': 'x-axis not defined for all y-values.'}) dataset = {'parent_name': fname, 'name': header[i], 'x': list(x[mask]), 'y': list(y[mask]), 'orig_x': list(x[mask]), 'orig_y': list(y[mask])} await db.create_dataset(header[i + 1], fname, dataset) else: x = num_rows[:, 0] for i in range(1, num_rows.shape[1]): y = num_rows[:, i] mask = ~np.isnan(y) if any(np.isnan(x[mask])): return web.json_response({'success': False, 'error': 'x-axis not defined for all y-values.'}) dataset = {'parent_name': fname, 'name': header[i], 'x': list(x[mask]), 'y': list(y[mask]), 'orig_x': list(x[mask]), 'orig_y': list(y[mask])} await db.create_dataset(header[i], fname, dataset) else: cut_horizontal = False cut_vertical = False if len(rows[0]) > 7: rows = [r[:7] + ['&#8943;'] for r in rows] header = header[:7] + ['&#8943;'] cut_horizontal = True if len(rows) > 7: rows = rows[:7] + [['<center>&#8942;</center>'] * len(rows[0])] cut_vertical = True if cut_horizontal and cut_vertical: rows[-1][-1] = '&#8945;' example = {'header': header, 'has_header': has_header, 'data': rows, 'fname': fname} if commit_data: return web.json_response({'success': True, 'error': None}) else: res = {'filenames': filenames, 'example': example} return web.json_response(res) async def shuwdown(request): print('Stopping python server') fit_process.terminate() raise GracefulExit async def stop_spurious_running_fits_and_empty_stop_queue(n_max=5): # stop any fits that might be running (not that any should be...) for _ in range(n_max): interrupt_queue.put(True) await asyncio.sleep(0.01) while True: try: interrupt_queue.get_nowait() except queue.Empty: break async def load_fit_models_data(fit_info): # Get model code models = {} for model_id, d in fit_info['models'].items(): m = await db.get_models_content(d['name']) models[model_id] = {'code': m['code'], 'expr_mode': m['expr_mode'], 'name_underscore': m['name_underscore'], 'ode_dim': m.get('ode_dim'), 'ode_dim_select': m.get('ode_dim_select')} # Get data data = [] for data_id, d in fit_info['data'].items(): if d['in_use']: db_data = await db.get_data_content(d['id']) data.append({'x': db_data['x'], 'y': db_data['y'], 'weight': d['weight'], 'model': d['model'], 'parameters': d['parameters']}) return fit_info, data, models async def run_fit(request): if request.method == 'POST': await stop_spurious_running_fits_and_empty_stop_queue() run_fit_queue.put(await load_fit_models_data(await request.json())) return web.json_response({'status': 'started'}) return web.json_response({'error': 'must be a POST request'}) async def interrupt_fit(request): if request.method == 'POST': interrupt_queue.put(True) return web.json_response({'status': 'interrupting'}) return web.json_response({'error': 'must be a POST request'}) async def fit_result(request): try: fit, r2 = result_queue.get_nowait() # Empty iteration queue: await asyncio.sleep(0.01) try: while True: status_queue.get_nowait() except queue.Empty: pass except queue.Empty: # No fit result yet, check if there is a loss update: d = None try: while True: d = status_queue.get_nowait() except queue.Empty: pass return web.json_response({'status': 'no-fit', 'info': d}) return web.json_response({'status': 'success', 'fit': fit, 'r2': r2}) class PickleableF: def __init__(self, m): self.m = m def __call__(self, *args, **kwargs): m = self.m f = get_f_expr_or_ode(m['code'], m['expr_mode'], m['name_underscore'], m.get('ode_dim_select')) return list(f(*args, **kwargs).numpy()) async def plot_fit(request): data = await request.json() plot_data, is_fitted = await make_plot(data) res = {'plots': plot_data, 'is_fitted': is_fitted} return web.json_response(res) async def make_plot(data): plot_data = [] max_n = data.get('max_n', 250) # Generate functions models = {} for model_id, d in data['models'].items(): m = await db.get_models_content(d['name']) models[model_id] = PickleableF(m) models[model_id].expr_mode = m['expr_mode'] models[model_id].ode_dim = m.get('ode_dim') # Plot data xmin = float('infinity') xmax = float('-infinity') for d_id in data['data']: d = data['data'][d_id] if d['in_use']: # dataset = await db.get_data_content(d['id']) if len(dataset['x']) > max_n: skip = 1 + int(len(dataset['x']) / max_n) else: skip = 1 x = dataset['x'][::skip] y = dataset['y'][::skip] if min(x) < xmin: xmin = min(x) if max(x) > xmax: xmax = max(x) plot_data.append({'x': x, 'y': y, 'name': dataset['name'], 'mode': 'markers', 'type': 'scattergl', 'legendgroup': d_id}) # Plot fits x = np.linspace(xmin, xmax, 250) x_list = list(x) x_torch = torch.from_numpy(x) is_fitted = False with concurrent.futures.ProcessPoolExecutor(max_workers=None) as executor: for i, d_id in enumerate(data['data']): d = data['data'][d_id] if d['in_use']: f = models[d['model']] is_fitted = True kwargs = {} for p in d['parameters']: p_id = d['parameters'][p] parameter = data['parameters'][p_id] if parameter['const']: kwargs[p] = parameter['value'] elif parameter.get('fit') is None: kwargs[p] = parameter['value'] is_fitted = False else: kwargs[p] = parameter['fit'] for p in kwargs: kwargs[p] = torch.tensor(kwargs[p], dtype=torch.double) if not f.expr_mode: kwargs = transform_y0_kwargs_for_ode(kwargs, f.ode_dim) # Run function evaluation in parallel, without blocking the server loop: future = asyncio.wrap_future(executor.submit(f, x_torch, **kwargs)) c = DEFAULT_PLOTLY_COLORS[i % len(DEFAULT_PLOTLY_COLORS)] plot_data.append( {'x': x_list, 'future': future, 'mode': 'lines', 'showlegend': False, 'legendgroup': d_id, 'line': {'color': c} if is_fitted else {'color': c, 'dash': 'dash'}}) for d in plot_data: if 'future' in d: d['y'] = await d['future'] del d['future'] return plot_data, is_fitted async def make_download(data): download_data = [] # Generate functions models = {} for model_id, d in data['models'].items(): m = await db.get_models_content(d['name']) models[model_id] = PickleableF(m) models[model_id].expr_mode = m['expr_mode'] models[model_id].ode_dim = m.get('ode_dim') # Get data and range datasets = {} xmin = float('infinity') xmax = float('-infinity') for d_id in data['data']: d = data['data'][d_id] if d['in_use']: dataset = await db.get_data_content(d['id']) datasets[d_id] = dataset x = dataset['x'] if min(x) < xmin: xmin = min(x) if max(x) > xmax: xmax = max(x) # Generate fits and store data x = np.linspace(xmin, xmax, 250) x_list = list(x) x_torch = torch.from_numpy(x) with concurrent.futures.ProcessPoolExecutor(max_workers=None) as executor: for i, d_id in enumerate(data['data']): d = data['data'][d_id] if d['in_use']: dataset = datasets[d_id] store = { 'name': dataset['name'], 'x_data': dataset['x'], 'y_data': dataset['y'] } f = models[d['model']] kwargs = {} list_of_parameters = [] for p in d['parameters']: p_id = d['parameters'][p] parameter = data['parameters'][p_id] if parameter['const']: kwargs[p] = parameter['value'] elif parameter.get('fit') is None: kwargs[p] = parameter['value'] else: kwargs[p] = parameter['fit'] info = {'name': p, 'type': parameter['type'], 'value:': kwargs[p], 'is_const': parameter['const']} if parameter['type'] == 'detached': info['detached_name'] = parameter['name'] list_of_parameters.append(info) store['parameters'] = list_of_parameters for p in kwargs: kwargs[p] = torch.tensor(kwargs[p], dtype=torch.double) if not f.expr_mode: kwargs = transform_y0_kwargs_for_ode(kwargs, f.ode_dim) # Run function evaluation in parallel, without blocking the server loop: future = asyncio.wrap_future(executor.submit(f, x_torch, **kwargs)) store['x_fit'] = x_list store['future'] = future download_data.append(store) for d in download_data: if 'future' in d: d['y_fit'] = await d['future'] del d['future'] return download_data async def download_fit(request): data = await request.json() download_data = await make_download(data) return web.json_response(download_data, dumps=functools.partial(json.dumps, indent=4)) def transform_y0_kwargs_for_ode(kwargs, dim): y0 = np.ones(dim) for i in range(dim): y0[i] = kwargs[f'y0[{i}]'] del kwargs[f'y0[{i}]'] kwargs['y0'] = torch.from_numpy(y0) return kwargs def fitter(input_queue, output_queue, status_queue, interrupt_queue): print('Fitting queue running') while True: fit_info, data, models = input_queue.get(True) logger.debug('Got fit to be run') # First get all parameters parameter_names = [] values = [] const_index = 0 for parameter_id, d in fit_info['parameters'].items(): if not d['const']: parameter_names.append(parameter_id) values.append(d['value']) const_index += 1 for parameter_id, d in fit_info['parameters'].items(): if d['const']: parameter_names.append(parameter_id) values.append(d['value']) logger.debug(f'#parameters = {len(fit_info["parameters"])}') logger.debug(f'#fit parameters = {const_index}') for d in data: d['parameter_indeces'] = {k: parameter_names.index(v) for k, v in d['parameters'].items()} if const_index == 0: logger.info('No parameters to be fitted') output_queue.put(None) continue # with open('cache.pkl', 'wb') as f: # pickle.dump((parameter_names, values, const_index, models, data), f) method = fit_info.get('method') fit, r2 = torch_fit(parameter_names, values, const_index, models, data, status_queue, interrupt_queue, method=method) output_queue.put((fit, r2)) if __name__ == '__main__': multiprocessing.freeze_support() # with open('cache.pkl', 'rb') as f: # torch_fit(*pickle.load(f)) # exit() # Fitter run_fit_queue = multiprocessing.Queue() result_queue = multiprocessing.Queue() status_queue = multiprocessing.Queue() interrupt_queue = multiprocessing.Queue() fit_process = multiprocessing.Process(target=fitter, args=(run_fit_queue, result_queue, status_queue, interrupt_queue)) fit_process.daemon = True fit_process.start() # Web Server app = web.Application() cors = aiohttp_cors.setup(app, defaults={ "*": aiohttp_cors.ResourceOptions( allow_credentials=True, expose_headers="*", allow_headers="*", ) }) routes = [('/', index), ('/check_code', check_code), ('/plot_code', plot_code), ('/add_model', add_model), ('/delete_model', delete_model), ('/delete_data', delete_data), ('/model_exist_check', model_exist_check), ('/model_list', model_list), ('/upload_data', upload_data), ('/data_list', data_list), ('/plot_data', plot_data), ('/run_fit', run_fit), ('/interrupt_fit', interrupt_fit), ('/plot_fit', plot_fit), ('/fit_result', fit_result), ('/download_fit', download_fit), ('/exit', shuwdown), ] methods = ['GET', 'POST', 'DELETE'] for uri, f in routes: resource = cors.add(app.router.add_resource(uri)) for m in methods: cors.add(resource.add_route(m, f)) print('Python server started') try: web.run_app(app, host=HOST, port=PORT, shutdown_timeout=0.0) finally: fit_process.terminate()
33.412724
117
0.553803
2,573
20,482
4.223475
0.139915
0.019049
0.027514
0.044446
0.434435
0.374804
0.341401
0.298702
0.272936
0.246066
0
0.013218
0.309296
20,482
612
118
33.46732
0.75493
0.043257
0
0.383772
0
0
0.113757
0.002709
0
0
0
0
0
1
0.013158
false
0.004386
0.037281
0
0.116228
0.015351
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c16b54a8fb917e5a067468f0c78cd337a4b77c6b
4,312
py
Python
streak/api_get.py
srevinsaju/streak
ff21f39b06da3010568940d335c32bd7d357ca69
[ "MIT" ]
2
2022-03-07T20:18:46.000Z
2022-03-08T12:48:04.000Z
streak/api_get.py
srevinsaju/streak
ff21f39b06da3010568940d335c32bd7d357ca69
[ "MIT" ]
null
null
null
streak/api_get.py
srevinsaju/streak
ff21f39b06da3010568940d335c32bd7d357ca69
[ "MIT" ]
null
null
null
from flask import jsonify, make_response, request from . import app from .api_post import engine, login from .core import utility_funcs from sqlalchemy.orm import sessionmaker from sqlalchemy_cockroachdb import run_transaction from .api_post import login_required @app.route("/api/v1/tasks/list") @login_required def list(): user_uuid = request.environ["user_id"] d = [] tasks = run_transaction( sessionmaker(bind=engine), lambda session: utility_funcs.get_tasks(session, user_uuid), ) for task in tasks: d.append( { "id": task.task_id, "name": task.task_name, "description": task.task_description, "schedule": str(task.schedule), "timestamp": str(task.timestamp), } ) return jsonify(d) @app.route("/api/v1/task/<task_uuid>") @login_required def meta(task_uuid): user_uuid = request.environ["user_id"] Session = sessionmaker(bind=engine) with Session() as session: task = utility_funcs.get_task(session, user_uuid, task_uuid) return { "id": task.task_id, "name": task.task_name, "description": task.task_description, "schedule": str(task.schedule), "timestamp": str(task.timestamp), } @app.route("/api/v1/task/<task_uuid>/completed") @login_required def get_completed(task_uuid): user_uuid = request.environ["user_id"] is_completed = run_transaction( sessionmaker(bind=engine), lambda session: utility_funcs.has_task_completed( session, task_id=task_uuid, user_id=user_uuid ), ) return {"completed": is_completed} @app.route("/api/v1/task/<task_uuid>/current-streak") @login_required def get_current_streak(task_uuid): user_uuid = request.environ["user_id"] streak = run_transaction( sessionmaker(bind=engine), lambda session: utility_funcs.task_streak_status( session, task_id=task_uuid, user_id=user_uuid ), ) return {"streak": streak} def _get_info_fmt(session, user_uuid): user = utility_funcs.get_user(session, user_uuid) return { "id": str(user.user_id), "username": user.username, "name": user.name, "last_seen": user.last_seen, "last_checked_events": user.last_checked_events, } @app.route("/api/v1/users/<user_id>") @login_required def get_info(user_uuid): return run_transaction( sessionmaker(bind=engine), lambda session: _get_info_fmt(session, user_uuid) ) @app.route("/api/v1/self") @login_required def get_self_info(): user_uuid = request.environ["user_id"] return run_transaction( sessionmaker(bind=engine), lambda session: _get_info_fmt(session, user_uuid) ) @app.route("/api/v1/users/<friend_id>/friend_status") @login_required def friend_status(friend_id): user_uuid = request.environ["user_id"] print(friend_id, user_uuid, friend_id == str(user_uuid)) if friend_id == str(user_uuid): return make_response("Cannot make friends with yourself", 403) return { "friends": run_transaction( sessionmaker(bind=engine), lambda session: utility_funcs.check_friend(session, user_uuid, friend_id), ) } @app.route("/api/v1/streaks/maximum") @login_required def max_streak(): user_uuid = request.environ["user_id"] all, month, year = run_transaction( sessionmaker(bind=engine), lambda session: utility_funcs.get_max_streak(session, user_uuid), ) return {"all_time": all, "month": month, "year": year} @app.route("/api/v1/task/<task_id>/maximum") @login_required def max_streak_task(task_id): user_uuid = request.environ["user_id"] all, month, year = run_transaction( sessionmaker(bind=engine), lambda session: utility_funcs.get_max_streak_task(session, user_uuid, task_id), ) return {"all_time": all, "month": month, "year": year} @app.route("/api/v1/events") @login_required def get_notifications(): user_uuid = request.environ["user_id"] return run_transaction( sessionmaker(bind=engine), lambda session: utility_funcs.get_notifications(session, user_uuid), )
28
87
0.661874
541
4,312
5.007394
0.147874
0.073828
0.040605
0.047988
0.598007
0.558509
0.512366
0.472868
0.433001
0.365449
0
0.003854
0.217764
4,312
153
88
28.183007
0.799288
0
0
0.419355
0
0
0.12013
0.049165
0
0
0
0
0
1
0.08871
false
0
0.056452
0.008065
0.241935
0.008065
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c16b90c3a22166a86fb8294e272716e1caa3d0d8
4,066
py
Python
BI/delta.py
aessex24/rindr
2a546b27ce1c72728bb7a63e60653929ca592cfe
[ "MIT" ]
1
2021-12-21T16:24:18.000Z
2021-12-21T16:24:18.000Z
BI/delta.py
aessex24/rindr
2a546b27ce1c72728bb7a63e60653929ca592cfe
[ "MIT" ]
1
2021-12-10T01:05:55.000Z
2021-12-10T01:05:55.000Z
BI/delta.py
aessex24/rindr
2a546b27ce1c72728bb7a63e60653929ca592cfe
[ "MIT" ]
1
2021-12-10T00:48:33.000Z
2021-12-10T00:48:33.000Z
#!/usr/bin/python3 """ Run "mypy --disallow-untyped-defs --ignore-missing-imports \ --show-error-codes --strict-equality delta.py". The holidays module lacks type hints. Next need: explicit Pytests. Notice the imports are punctuated in an isort style. """ from datetime import datetime, timedelta import sys import holidays EVENT = datetime fromisoformat = datetime.fromisoformat class TimeCalc(): BD_START = 10 BD_STOP = 17 def _get_beginning_of_day(self,event: EVENT) -> EVENT: return fromisoformat(f"{self._get_day_string(event)} {self.BD_START}:00") def _get_end_of_day(self,event: EVENT) -> EVENT: return fromisoformat(f"{self._get_day_string(event)} {self.BD_STOP}") def _get_day(self,event: EVENT) -> EVENT: return fromisoformat(self._get_day_string(event)) def _get_day_string(self,event: EVENT) -> str: return f"{event.year}-{event.month}-{event.day}" def _get_next_day(self,event: EVENT) -> EVENT: return self._get_day(event) + timedelta(days=1) def _is_work_day(self,event: EVENT) -> bool: if event.date() in holidays.US(): return False if event.weekday() >= 5: return False return True def business_lapse(self,request: EVENT, response: EVENT) -> timedelta: """ This is the entry point most clients will want to use. """ if request > response: raise RuntimeError(f"How can there have been a response at {response} to a *later* request at {request}?") if request != response: beginning_of_request_day = self._get_beginning_of_day(request) if request < beginning_of_request_day: return self.business_lapse(min(beginning_of_request_day, response), response) end_of_response_day = self._get_end_of_day(response) if end_of_response_day < response: return self.business_lapse(request, max(end_of_response_day, request)) request_day = self._get_day(request) new_request = self._get_next_day(request) if not self._is_work_day(request_day): return self.business_lapse(new_request, max(response, new_request)) assert self._is_work_day(request_day) if request_day != self._get_day(response): assert request_day < self._get_day(response) return (max(request, self._get_end_of_day(request)) - request) + \ self.business_lapse(new_request, max(response, new_request)) assert beginning_of_request_day <= request <= response <= end_of_response_day return response - request def main() -> None: """ Examples: ./delta.py "2021-10-27 03:45" "2021-10-29 11:08" ./delta.py "2021-10-27 03:45" "2021-10-29 11:08" ./delta.py "2021-10-27 03:45" "2021-10-29 20:08" ./delta.py "2021-10-27 13:45" "2021-10-27 14:08" ./delta.py "2021-10-27 13:45" "2021-10-28 14:08" ./delta.py "2021-10-27 03:45" "2021-10-27 05:58" ./delta.py "2021-10-27 10:00" "2021-10-28 17:00" ./delta.py "2021-10-27 10:00" "2021-10-30 10:20" ./delta.py "2021-10-27 18:00" "2021-10-27 20:20" ./delta.py "2021-11-27 03:45" "2021-11-29 20:08" ./delta.py "2021-11-27 13:45" "2021-11-27 14:08" ./delta.py "2021-11-27 13:45" "2021-11-28 14:08" ./delta.py "2021-11-27 03:45" "2021-11-27 05:58" ./delta.py "2021-11-27 10:00" "2021-11-28 17:00" ./delta.py "2021-11-27 10:00" "2021-11-30 10:20" ./delta.py "2021-11-27 18:00" "2021-11-27 20:20" """ request_time = fromisoformat(sys.argv[1]) response_time = fromisoformat(sys.argv[2]) print(f"Times are {request_time} and {response_time}.") print(f"Difference is {response_time - request_time}.") print(f"Business lapse is {TimeCalc().business_lapse(request_time, response_time)}.") if __name__ == "__main__": main()
40.257426
118
0.624693
594
4,066
4.079125
0.217172
0.044573
0.072637
0.048287
0.431284
0.38052
0.27858
0.257532
0.257532
0.195625
0
0.128539
0.24422
4,066
100
119
40.66
0.659941
0.274471
0
0.037736
0
0.018868
0.14134
0.049799
0
0
0
0
0.056604
1
0.150943
false
0
0.056604
0.09434
0.509434
0.056604
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
c16c66d300e2ec1188a948c8172e2c9116bd68b9
2,831
py
Python
octopus/modules/account/dao.py
tuub/magnificent-octopus
62722fbb9eecd0f6727b4d9cc0ef3b732b4702d9
[ "Apache-2.0" ]
null
null
null
octopus/modules/account/dao.py
tuub/magnificent-octopus
62722fbb9eecd0f6727b4d9cc0ef3b732b4702d9
[ "Apache-2.0" ]
null
null
null
octopus/modules/account/dao.py
tuub/magnificent-octopus
62722fbb9eecd0f6727b4d9cc0ef3b732b4702d9
[ "Apache-2.0" ]
2
2019-12-17T14:55:17.000Z
2020-02-03T12:35:24.000Z
from octopus.modules.es import dao from datetime import datetime from octopus.modules.account.exceptions import NonUniqueAccountException def query_filter(q): """Function used by the query endpoint to ensure only the relevant account data is returned""" # q is an esprit.models.Query object # this limits the query to certain fields in the source, so that things like password # hashes and activation/reset tokens are never sent to the client q.include_source(["id", "email", "created_date", "last_updated", "role"]) class BasicAccountDAO(dao.ESDAO): __type__ = 'account' @classmethod def pull_by_email(cls, email): q = AccountQuery(email=email) accs = cls.object_query(q=q.query()) if len(accs) > 1: raise NonUniqueAccountException("There is more than one user account with the email {x}".format(x=email)) elif len(accs) == 1: return accs[0] else: return None @classmethod def get_by_reset_token(cls, reset_token, not_expired=True): q = AccountQuery(reset_token=reset_token) accs = cls.object_query(q=q.query()) if len(accs) > 1: raise NonUniqueAccountException("There is more than one user account with the reset token {x}".format(x=reset_token)) elif len(accs) == 0: return None acc = accs[0] if acc.is_reset_expired() and not_expired: return None return acc @classmethod def get_by_activation_token(cls, activation_token, not_expired=True): q = AccountQuery(activation_token=activation_token) accs = cls.object_query(q=q.query()) if len(accs) > 1: raise NonUniqueAccountException("There is more than one user account with the activation token {x}".format(x=activation_token)) elif len(accs) == 0: return None acc = accs[0] if acc.is_activation_expired() and not_expired: return None return acc class AccountQuery(object): def __init__(self, email=None, reset_token=None, activation_token=None): self.email = email self.reset_token = reset_token self.activation_token = activation_token def query(self): q = { "query" : { "bool" : { "must" : [] } } } if self.email is not None: q["query"]["bool"]["must"].append({"term" : {"email.exact" : self.email}}) if self.reset_token is not None: q["query"]["bool"]["must"].append({"term" : {"reset_token.exact" : self.reset_token}}) if self.activation_token is not None: q["query"]["bool"]["must"].append({"term" : {"activation_token.exact" : self.activation_token}}) return q
36.766234
139
0.620276
359
2,831
4.749304
0.270195
0.070381
0.018768
0.032845
0.380059
0.380059
0.342522
0.342522
0.296774
0.277419
0
0.004373
0.273048
2,831
77
140
36.766234
0.824101
0.096079
0
0.327869
0
0
0.13127
0.008621
0
0
0
0
0
1
0.098361
false
0
0.04918
0
0.344262
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c16e57e83faead3659a5fd36f3a7c633cdd84012
330
py
Python
app/app/tests.py
aniket463/recipe-app-api
64d390bea67f840f0d056f6fdb36eb30236be64b
[ "MIT" ]
null
null
null
app/app/tests.py
aniket463/recipe-app-api
64d390bea67f840f0d056f6fdb36eb30236be64b
[ "MIT" ]
null
null
null
app/app/tests.py
aniket463/recipe-app-api
64d390bea67f840f0d056f6fdb36eb30236be64b
[ "MIT" ]
null
null
null
from django.test import TestCase from app.calc import substraction from app.calc import add class CalcTests(TestCase): def test_add_number(self): #Test that two number added togather self.assertEqual(add(3,8), 11) def test_substraction_number(self): self.assertEqual(substraction(8,3),5)
27.5
45
0.709091
46
330
5
0.5
0.06087
0.095652
0.147826
0
0
0
0
0
0
0
0.02682
0.209091
330
12
46
27.5
0.854406
0.106061
0
0
0
0
0
0
0
0
0
0
0.25
1
0.25
false
0
0.375
0
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
3
c16fb7371862914b59939c00a0c9796c961142c4
856
py
Python
buildbot/master/files/config/passwords.py
ecoal95/saltfs
4d2596794a70919c2887688d6d116f2f5bb5cf1e
[ "Apache-2.0", "MIT" ]
1
2021-01-07T18:49:38.000Z
2021-01-07T18:49:38.000Z
buildbot/master/files/config/passwords.py
ecoal95/saltfs
4d2596794a70919c2887688d6d116f2f5bb5cf1e
[ "Apache-2.0", "MIT" ]
null
null
null
buildbot/master/files/config/passwords.py
ecoal95/saltfs
4d2596794a70919c2887688d6d116f2f5bb5cf1e
[ "Apache-2.0", "MIT" ]
null
null
null
import json # Jinja will replace the inside with double-quote-using JSON, # so use single quotes to delimit the string. # Use double quotes inside to keep the expression as a single string. credentials = json.loads('{{ pillar["buildbot"]["credentials"]|json }}') # json.loads creates unicode strings but Buildbot requires bytestrings. # Python 2's Unicode situation makes me sad. credentials = {k: v.encode('utf-8') for k, v in credentials.items()} HTTP_USERNAME = credentials['http-user'] HTTP_PASSWORD = credentials['http-pass'] SLAVE_PASSWORD = credentials['slave-pass'] CHANGE_PASSWORD = credentials['change-pass'] GITHUB_DOC_TOKEN = credentials['gh-doc-token'] HOMU_BUILDBOT_SECRET = credentials['homu-secret'] S3_UPLOAD_ACCESS_KEY_ID = credentials['s3-upload-access-key-id'] S3_UPLOAD_SECRET_ACCESS_KEY = credentials['s3-upload-secret-access-key']
45.052632
72
0.775701
123
856
5.268293
0.528455
0.049383
0.04321
0.052469
0.12963
0
0
0
0
0
0
0.007833
0.10514
856
18
73
47.555556
0.83812
0.331776
0
0
0
0
0.284452
0.155477
0
0
0
0
0
1
0
false
0.272727
0.090909
0
0.090909
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
c170014093c78e48166681a428ba4bef1d6ab86d
11,819
py
Python
analytics/core/tests/unittest_core_processor.py
sadikovi/Pulsar
3267426cf5bd676a3c4c20cbee88a80b89e65b0f
[ "Apache-2.0" ]
null
null
null
analytics/core/tests/unittest_core_processor.py
sadikovi/Pulsar
3267426cf5bd676a3c4c20cbee88a80b89e65b0f
[ "Apache-2.0" ]
29
2015-02-23T07:59:13.000Z
2015-04-05T09:49:53.000Z
analytics/core/tests/unittest_core_processor.py
sadikovi/Pulsar
3267426cf5bd676a3c4c20cbee88a80b89e65b0f
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # import libs import unittest import random import sys import warnings # import classes import analytics.utils.misc as misc import analytics.exceptions.exceptions as ex import analytics.core.processor.processor as processor from types import ListType, DictType from analytics.core.map.dataitemmap import DataItemMap from analytics.core.map.clustermap import ClusterMap from analytics.core.map.elementmap import ElementMap from analytics.core.map.pulsemap import PulseMap from analytics.core.cluster import Cluster from analytics.core.element import Element from analytics.core.pulse import DynamicPulse, StaticPulse, Pulse from analytics.core.attribute.dynamic import Dynamic from analytics.core.attribute.feature import Feature from analytics.algorithms.rank import RSYS # some general input to test general_input = [ None, True, False, sys.maxint, -sys.maxint-1, {}, [], {"1": 1, "2": 2}, [1, 2, 3, 4, 5], "abc", 0, 1, -1, 1.23, -3.34, " string ", " test test test ", "1" ] class Processor_TestSequence(unittest.TestCase): def setUp(self): self._teststr = "test string" self._iterations = 20 # object lists self._clrobj = { "id": "#1", "name": "#1", "desc": "#1", "parent": None } self._elmobj = { "id": "#1", "name": "#1", "desc": "#1", "cluster": "#1", "dir": "up" } self._plsobj = { "id": "#1", "name": "#1", "desc": "#1", "sample": 1 } # maps self._clustermap = ClusterMap() self._elementmap = ElementMap() self._pulsemap = PulseMap() def test_processor_clusterObject(self): for it in range(self._iterations): obj = random.choice(general_input) idmapper = random.choice(general_input) if type(obj) is DictType: with self.assertRaises(KeyError): processor._processClusterObject(obj, idmapper) else: with self.assertRaises(TypeError): processor._processClusterObject(obj, idmapper) # process object without errors idmapper = {} clr = processor._processClusterObject(self._clrobj, idmapper) self.assertEqual(type(clr), Cluster) self.assertEqual(clr.name(), self._clrobj["name"]) self.assertEqual(clr.desc(), self._clrobj["desc"]) self.assertEqual(clr.parent(), None) idmapper_obj = {self._clrobj["id"]: {"cluster": clr, "parent": None}} self.assertEqual(idmapper, idmapper_obj) def test_processor_elementObject(self): for it in range(self._iterations): obj = random.choice(general_input) idmapper = random.choice(general_input) if type(obj) is DictType: with self.assertRaises(KeyError): processor._processElementObject(obj, idmapper) else: with self.assertRaises(TypeError): processor._processElementObject(obj, idmapper) # process object without errors elm = processor._processElementObject(self._elmobj) self.assertEqual(type(elm), Element) self.assertEqual(elm.name(), self._elmobj["name"]) self.assertEqual(elm.desc(), self._elmobj["desc"]) self.assertEqual(elm.cluster(), None) self.assertEqual(elm.rank(), RSYS.UND_RANK) def test_processor_pulseObject(self): for it in range(self._iterations): obj = random.choice(general_input) idmapper = random.choice(general_input) if type(obj) is DictType: with self.assertRaises(KeyError): processor._processPulseObject(obj, idmapper) else: with self.assertRaises(TypeError): processor._processPulseObject(obj, idmapper) # process object without errors pls = processor._processPulseObject(self._plsobj) self.assertEqual(isinstance(pls, Pulse), True) self.assertEqual(pls.name(), self._plsobj["name"]) self.assertEqual(pls.desc(), self._plsobj["desc"]) self.assertEqual(pls.type(), type(self._plsobj["sample"])) self.assertEqual(pls.store(), []) self.assertEqual(pls.default(), None) def test_processor_parseClusters(self): objlist = [self._clrobj, {}] idmapper = {} with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") processor.parseClusters(objlist, self._clustermap, idmapper) self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[0].category, UserWarning)) self.assertEqual(len(self._clustermap._map), 1) self.assertEqual(len(self._clustermap._root), 1) clr = self._clustermap._map.values()[0] key = self._clrobj["id"] self.assertEqual(key in idmapper, True) self.assertEqual(idmapper[key], {"cluster":clr, "parent": None}) def test_processor_parseElements(self): objlist = [self._elmobj, {}] with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") processor.parseElements(objlist, self._elementmap) self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[0].category, UserWarning)) self.assertEqual(len(self._elementmap._map), 1) exm = self._elementmap._map.values()[0] self.assertEqual(len(exm.features()), 1) self.assertEqual(exm.features()[0].name(), "dir") def test_processor_parsePulses(self): objlist = [self._plsobj, {}] with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") processor.parsePulses(objlist, self._pulsemap) self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[0].category, UserWarning)) self.assertEqual(len(self._pulsemap._map), 1) def test_processor_processBlock(self): clusters = {"map": self._clustermap, "data": [self._clrobj]} elements = {"map": self._elementmap, "data": [self._elmobj]} pulses = {"map": self._pulsemap, "data": [self._plsobj]} # fill block block = processor.ProcessBlock(clusters, elements, pulses) self.assertEqual(block._clustermap, self._clustermap) self.assertEqual(block._elementmap, self._elementmap) self.assertEqual(block._pulsemap, self._pulsemap) self.assertEqual(block._isProcessed, False) # process block block = processor.processWithBlock(block) self.assertEqual(block._isProcessed, True) self.assertEqual( len(block._clustermap._map.values()), len(clusters["data"]) ) self.assertEqual( len(block._elementmap._map.values()), len(elements["data"]) ) self.assertEqual( len(block._pulsemap._map.values()), len(pulses["data"]) ) def test_processor_processBlockUnknownCluster(self): elmobjects = [ self._elmobj, { "id": "#2", "name": "#2", "desc": "#2", "cluster": None, "dir": "up" } ] clusters = {"map": self._clustermap, "data": [self._clrobj]} elements = {"map": self._elementmap, "data": elmobjects} pulses = {"map": self._pulsemap, "data": [self._plsobj]} # fill block block = processor.ProcessBlock(clusters, elements, pulses) self.assertEqual(block._clustermap, self._clustermap) self.assertEqual(block._elementmap, self._elementmap) self.assertEqual(block._pulsemap, self._pulsemap) self.assertEqual(block._isProcessed, False) # process block block = processor.processWithBlock(block) self.assertEqual(block._isProcessed, True) self.assertEqual( len(block._clustermap._map.values()), len(clusters["data"])+1 ) self.assertTrue( processor.UNKNOWN_CLUSTER.id() in block._clustermap._map ) self.assertEqual( len(block._elementmap._map.values()), len(elements["data"]) ) self.assertEqual( len(block._pulsemap._map.values()), len(pulses["data"]) ) def test_processor_discoverPulses(self): clusters = {"map": self._clustermap, "data": [self._clrobj]} elements = {"map": self._elementmap, "data": [self._elmobj]} pulses = {"map": self._pulsemap, "data": [self._plsobj]} # fill block, and discover pulses block = processor.ProcessBlock(clusters, elements, pulses, True) self.assertEqual(block._clustermap, self._clustermap) self.assertEqual(block._elementmap, self._elementmap) self.assertEqual(block._pulsemap, self._pulsemap) self.assertEqual(block._isDiscovery, True) self.assertEqual(block._isProcessed, False) # process block with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") block = processor.processWithBlock(block) self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[0].category, UserWarning)) self.assertEqual(block._isProcessed, True) self.assertEqual( len(block._clustermap._map.values()), len(clusters["data"]) ) self.assertEqual( len(block._elementmap._map.values()), len(elements["data"]) ) self.assertEqual(len(block._pulsemap._map.values()), 1) for pulse in block._pulsemap._map.values(): self.assertEqual(pulse.name() in self._elmobj, True) self.assertEqual(len(pulse.store()), 1) def test_processor_sortElements(self): elementlist = [ {"id": "#1","name": "#1","desc": "#1","cluster": None}, {"id": "#2","name": "#2","desc": "#2","cluster": None}, {"id": "#3","name": "#3","desc": "#3","cluster": None} ] ranks = [RSYS.O, RSYS.B, RSYS.A, RSYS.F, RSYS.G, RSYS.K] elementmap = ElementMap() processor.parseElements(elementlist, elementmap) for element in elementmap._map.values(): element.setRank(random.choice(ranks)) ls = processor.sortElements(elementmap._map.values()) for i in range(len(ls)-1): self.assertTrue(ls[i].rank()._value >= ls[i+1].rank()._value) def test_processor_sortElementsReversed(self): elementlist = [ {"id": "#1","name": "#1","desc": "#1","cluster": None}, {"id": "#2","name": "#2","desc": "#2","cluster": None}, {"id": "#3","name": "#3","desc": "#3","cluster": None} ] ranks = [RSYS.O, RSYS.B, RSYS.A, RSYS.F, RSYS.G, RSYS.K] elementmap = ElementMap() processor.parseElements(elementlist, elementmap) for element in elementmap._map.values(): element.setRank(random.choice(ranks)) ls = processor.sortElements(elementmap._map.values(), True) for i in range(len(ls)-1): self.assertTrue(ls[i].rank()._value <= ls[i+1].rank()._value) # Load test suites def _suites(): return [ Processor_TestSequence ] # Load tests def loadSuites(): # global test suite for this module gsuite = unittest.TestSuite() for suite in _suites(): gsuite.addTest(unittest.TestLoader().loadTestsFromTestCase(suite)) return gsuite if __name__ == '__main__': suite = loadSuites() print "" print "### Running tests ###" print "-" * 70 unittest.TextTestRunner(verbosity=2).run(suite)
40.064407
77
0.607496
1,236
11,819
5.669094
0.147249
0.117739
0.048808
0.029542
0.556872
0.550878
0.519481
0.509776
0.483659
0.483659
0
0.009123
0.258059
11,819
294
78
40.20068
0.789942
0.028767
0
0.466667
0
0
0.043015
0
0
0
0
0
0.266667
0
null
null
0
0.070588
null
null
0.011765
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
c171e9fdddb0b26706be73c55350884337b985be
19,654
py
Python
test/test_get_metadata_functions.py
bjoernmeier/sqlalchemy_exasol
d63e70096f227db5eb68e631e98777e0e68ac178
[ "BSD-2-Clause" ]
26
2015-10-13T21:43:07.000Z
2021-09-22T16:58:02.000Z
test/test_get_metadata_functions.py
bjoernmeier/sqlalchemy_exasol
d63e70096f227db5eb68e631e98777e0e68ac178
[ "BSD-2-Clause" ]
65
2015-01-22T10:05:18.000Z
2022-01-18T12:11:53.000Z
test/test_get_metadata_functions.py
bjoernmeier/sqlalchemy_exasol
d63e70096f227db5eb68e631e98777e0e68ac178
[ "BSD-2-Clause" ]
23
2015-01-21T09:27:05.000Z
2022-01-18T11:40:18.000Z
# -*- coding: UTF-8 -*- import pytest from sqlalchemy import create_engine from sqlalchemy.engine.url import URL from sqlalchemy.sql.sqltypes import INTEGER, VARCHAR from sqlalchemy.testing import fixtures, config from sqlalchemy_exasol.base import EXADialect TEST_GET_METADATA_FUNCTIONS_SCHEMA = "test_get_metadata_functions_schema" ENGINE_NONE_DATABASE = "ENGINE_NONE_DATABASE" ENGINE_SCHEMA_DATABASE = "ENGINE_SCHEMA_DATABASE" ENGINE_SCHEMA_2_DATABASE = "ENGINE_SCHEMA_2_DATABASE" class MetadataTest(fixtures.TablesTest): __backend__ = True @classmethod def define_tables(cls, metadata): cls.schema = TEST_GET_METADATA_FUNCTIONS_SCHEMA cls.schema_2 = "test_get_metadata_functions_schema_2" with config.db.begin() as c: try: c.execute("DROP SCHEMA %s CASCADE" % cls.schema) except Exception as e: print(e) pass c.execute("CREATE SCHEMA %s" % cls.schema) c.execute( "CREATE TABLE %s.t (pid1 int, pid2 int, name VARCHAR(20), age int, PRIMARY KEY (pid1,pid2))" % cls.schema) c.execute( "CREATE TABLE {schema}.s (id1 int primary key, fid1 int, fid2 int, age int, CONSTRAINT fk_test FOREIGN KEY (fid1,fid2) REFERENCES {schema}.t(pid1,pid2))".format( schema=cls.schema)) cls.view_defintion = "CREATE VIEW {schema}.v AS select * from {schema}.t".format(schema=cls.schema) c.execute(cls.view_defintion) try: c.execute("DROP SCHEMA %s CASCADE" % cls.schema_2) except Exception as e: print(e) pass c.execute("CREATE SCHEMA %s" % cls.schema_2) c.execute( "CREATE TABLE %s.t_2 (pid1 int, pid2 int, name VARCHAR(20), age int, PRIMARY KEY (pid1,pid2))" % cls.schema_2) c.execute("CREATE VIEW {schema}.v_2 AS select * from {schema}.t_2".format(schema=cls.schema_2)) c.execute("COMMIT") cls.engine_none_database = cls.create_engine_with_database_name(c, None) cls.engine_schema_database = cls.create_engine_with_database_name(c, cls.schema) cls.engine_schema_2_database = cls.create_engine_with_database_name(c, cls.schema_2) cls.engine_map = { ENGINE_NONE_DATABASE: cls.engine_none_database, ENGINE_SCHEMA_DATABASE: cls.engine_schema_database, ENGINE_SCHEMA_2_DATABASE: cls.engine_schema_2_database } @classmethod def generate_url_with_database_name(cls, connection, new_database_name): database_url = config.db_url new_args = database_url.translate_connect_args() new_args["database"] = new_database_name new_database_url = URL(drivername=database_url.drivername, query=database_url.query, **new_args) return new_database_url @classmethod def create_engine_with_database_name(cls, connection, new_database_name): url = cls.generate_url_with_database_name(connection, new_database_name) engine = create_engine(url) return engine @pytest.mark.parametrize("use_sql_fallback", [True, False]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_get_schema_names(self, engine_name, use_sql_fallback): with self.engine_map[engine_name].begin() as c: dialect = EXADialect() schema_names = dialect.get_schema_names(connection=c, use_sql_fallback=use_sql_fallback) assert self.schema in schema_names and self.schema_2 in schema_names @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_compare_get_schema_names_for_sql_and_odbc(self, engine_name): with self.engine_map[engine_name].begin() as c: dialect = EXADialect() schema_names_fallback = dialect.get_schema_names(connection=c, use_sql_fallback=True) schema_names_odbc = dialect.get_schema_names(connection=c) assert sorted(schema_names_fallback) == sorted(schema_names_odbc) @pytest.mark.parametrize("use_sql_fallback", [True, False]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_get_table_names(self, use_sql_fallback, engine_name): with self.engine_map[engine_name].begin() as c: dialect = EXADialect() table_names = dialect.get_table_names(connection=c, schema=self.schema, use_sql_fallback=use_sql_fallback) assert "t" in table_names and "s" in table_names @pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_compare_get_table_names_for_sql_and_odbc(self, schema, engine_name): with self.engine_map[engine_name].begin() as c: if schema is None: c.execute("OPEN SCHEMA %s" % self.schema) dialect = EXADialect() table_names_fallback = dialect.get_table_names(connection=c, schema=schema, use_sql_fallback=True) table_names_odbc = dialect.get_table_names(connection=c, schema=schema) assert table_names_fallback == table_names_odbc @pytest.mark.parametrize("use_sql_fallback", [True, False]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_has_table_table_exists(self, use_sql_fallback, engine_name): with self.engine_map[engine_name].begin() as c: dialect = EXADialect() has_table = dialect.has_table(connection=c, schema=self.schema, table_name="t", use_sql_fallback=use_sql_fallback) assert has_table, "Table %s.T was not found, but should exist" % self.schema @pytest.mark.parametrize("use_sql_fallback", [True, False]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_has_table_table_exists_not(self, use_sql_fallback, engine_name): with self.engine_map[engine_name].begin() as c: dialect = EXADialect() has_table = dialect.has_table(connection=c, schema=self.schema, table_name="not_exist", use_sql_fallback=use_sql_fallback) assert not has_table, "Table %s.not_exist was found, but should not exist" % self.schema @pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_compare_has_table_for_sql_and_odbc(self, schema, engine_name): with self.engine_map[engine_name].begin() as c: dialect = EXADialect() has_table_fallback = dialect.has_table(connection=c, schema=schema, use_sql_fallback=True, table_name="t") has_table_odbc = dialect.has_table(connection=c, schema=schema, table_name="t") assert has_table_fallback == has_table_odbc, "Expected table %s.t with odbc and fallback" % schema @pytest.mark.parametrize("use_sql_fallback", [True, False]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_get_view_names(self, use_sql_fallback,engine_name): with self.engine_map[engine_name].begin() as c: dialect = EXADialect() view_names = dialect.get_view_names(connection=c, schema=self.schema, use_sql_fallback=use_sql_fallback) assert "v" in view_names @pytest.mark.parametrize("use_sql_fallback", [True, False]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_get_view_names_for_sys(self, use_sql_fallback, engine_name): with self.engine_map[engine_name].begin() as c: dialect = EXADialect() view_names = dialect.get_view_names(connection=c, schema="sys", use_sql_fallback=use_sql_fallback) assert len(view_names) == 0 @pytest.mark.parametrize("use_sql_fallback", [True, False]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_get_view_definition(self, use_sql_fallback,engine_name): with self.engine_map[engine_name].begin() as c: dialect = EXADialect() view_definition = dialect.get_view_definition(connection=c, schema=self.schema, view_name="v", use_sql_fallback=use_sql_fallback) assert self.view_defintion == view_definition @pytest.mark.parametrize("use_sql_fallback", [True, False]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_get_view_definition_view_name_none(self, use_sql_fallback,engine_name): with self.engine_map[engine_name].begin() as c: dialect = EXADialect() view_definition = dialect.get_view_definition(connection=c, schema=self.schema, view_name=None, use_sql_fallback=use_sql_fallback) assert view_definition is None @pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_compare_get_view_names_for_sql_and_odbc(self, schema,engine_name): with self.engine_map[engine_name].begin() as c: dialect = EXADialect() c.execute("OPEN SCHEMA %s" % self.schema) view_names_fallback = dialect.get_view_names(connection=c, schema=schema, use_sql_fallback=True) view_names_odbc = dialect.get_view_names(connection=c, schema=schema) assert view_names_fallback == view_names_odbc @pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_compare_get_view_definition_for_sql_and_odbc(self, schema,engine_name): with self.engine_map[engine_name].begin() as c: if schema is None: c.execute("OPEN SCHEMA %s" % self.schema) view_name = "v" dialect = EXADialect() view_definition_fallback = dialect.get_view_definition( connection=c, view_name=view_name, schema=schema, use_sql_fallback=True) view_definition_odbc = dialect.get_view_definition( connection=c, view_name=view_name, schema=schema) assert view_definition_fallback == view_definition_odbc @pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None]) @pytest.mark.parametrize("table", ["t", "s", "unknown"]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_compare_get_columns_for_sql_and_odbc(self, schema, table, engine_name): with self.engine_map[engine_name].begin() as c: if schema is None: c.execute("OPEN SCHEMA %s" % self.schema) dialect = EXADialect() columns_fallback = dialect.get_columns(connection=c, table_name=table, schema=schema, use_sql_fallback=True) columns_odbc = dialect.get_columns(connection=c, table_name=table, schema=schema) assert str(columns_fallback) == str(columns_odbc) # object equality doesn't work for sqltypes @pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_compare_get_columns_none_table_for_sql_and_odbc(self, schema, engine_name): with self.engine_map[engine_name].begin() as c: if schema is None: c.execute("OPEN SCHEMA %s" % self.schema) dialect = EXADialect() table = None columns_fallback = dialect.get_columns(connection=c, table_name=table, schema=schema, use_sql_fallback=True) columns_odbc = dialect.get_columns(connection=c, table_name=table, schema=schema) assert str(columns_fallback) == str(columns_fallback) def make_columns_comparable(self, column_list): # object equality doesn't work for sqltypes return sorted([{k: str(v) for k, v in column.items()} for column in column_list], key=lambda k: k["name"]) @pytest.mark.parametrize("use_sql_fallback", [True, False]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_get_columns(self, use_sql_fallback, engine_name): with self.engine_map[engine_name].begin() as c: dialect = EXADialect() columns = dialect.get_columns(connection=c, schema=self.schema, table_name="t", use_sql_fallback=use_sql_fallback) expected = [{'default': None, 'is_distribution_key': False, 'name': 'pid1', 'nullable': False, 'type': INTEGER()}, {'default': None, 'is_distribution_key': False, 'name': 'pid2', 'nullable': False, 'type': INTEGER()}, {'default': None, 'is_distribution_key': False, 'name': 'name', 'nullable': True, 'type': VARCHAR(length=20)}, {'default': None, 'is_distribution_key': False, 'name': 'age', 'nullable': True, 'type': INTEGER()}, ] assert self.make_columns_comparable(expected) == self.make_columns_comparable(columns) @pytest.mark.parametrize("use_sql_fallback", [True, False]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_get_columns_table_name_none(self, use_sql_fallback, engine_name): with self.engine_map[engine_name].begin() as c: dialect = EXADialect() columns = dialect.get_columns(connection=c, schema=self.schema, table_name=None, use_sql_fallback=use_sql_fallback) assert columns == [] @pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None]) @pytest.mark.parametrize("table", ["t", "s"]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_compare_get_pk_constraint_for_sql_and_odbc(self, schema, table, engine_name): with self.engine_map[engine_name].begin() as c: if schema is None: c.execute("OPEN SCHEMA %s" % self.schema) dialect = EXADialect() pk_constraint_fallback = dialect.get_pk_constraint(connection=c, table_name=table, schema=schema, use_sql_fallback=True) pk_constraint_odbc = dialect.get_pk_constraint(connection=c, table_name=table, schema=schema) assert str(pk_constraint_fallback) == str(pk_constraint_odbc) @pytest.mark.parametrize("use_sql_fallback", [True, False]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_get_pk_constraint(self, use_sql_fallback, engine_name): with self.engine_map[engine_name].begin() as c: dialect = EXADialect() pk_constraint = dialect.get_pk_constraint(connection=c, schema=self.schema, table_name="t", use_sql_fallback=use_sql_fallback) assert pk_constraint["constrained_columns"] == ['pid1', 'pid2'] and \ pk_constraint["name"].startswith("sys_") @pytest.mark.parametrize("use_sql_fallback", [True, False]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_get_pk_constraint_table_name_none(self, use_sql_fallback, engine_name): with self.engine_map[engine_name].begin() as c: dialect = EXADialect() pk_constraint = dialect.get_pk_constraint(connection=c, schema=self.schema, table_name=None, use_sql_fallback=use_sql_fallback) assert pk_constraint is None @pytest.mark.parametrize("table", ["t", "s"]) @pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_compare_get_foreign_keys_for_sql_and_odbc(self, schema, table, engine_name): with self.engine_map[engine_name].begin() as c: if schema is None: c.execute("OPEN SCHEMA %s" % self.schema_2) dialect = EXADialect() foreign_keys_fallback = dialect.get_foreign_keys(connection=c, table_name=table, schema=schema, use_sql_fallback=True) foreign_keys_odbc = dialect.get_foreign_keys(connection=c, table_name=table, schema=schema) assert str(foreign_keys_fallback) == str(foreign_keys_odbc) @pytest.mark.parametrize("use_sql_fallback", [True, False]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_get_foreign_keys(self, use_sql_fallback, engine_name): with self.engine_map[engine_name].begin() as c: dialect = EXADialect() foreign_keys = dialect.get_foreign_keys(connection=c, schema=self.schema, table_name="s", use_sql_fallback=use_sql_fallback) expected = [{'name': 'fk_test', 'constrained_columns': ['fid1', 'fid2'], 'referred_schema': 'test_get_metadata_functions_schema', 'referred_table': 't', 'referred_columns': ['pid1', 'pid2']}] assert foreign_keys == expected @pytest.mark.parametrize("use_sql_fallback", [True, False]) @pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE]) def test_get_foreign_keys_table_name_none(self, use_sql_fallback, engine_name): with self.engine_map[engine_name].begin() as c: dialect = EXADialect() foreign_keys = dialect.get_foreign_keys(connection=c, schema=self.schema, table_name=None, use_sql_fallback=use_sql_fallback) assert foreign_keys == []
59.738602
177
0.666735
2,414
19,654
5.070423
0.058409
0.056373
0.074346
0.048039
0.816585
0.796977
0.771405
0.739461
0.705147
0.673203
0
0.004736
0.237255
19,654
328
178
59.920732
0.811754
0.005342
0
0.508591
0
0.010309
0.093067
0.0088
0
0
0
0
0.079038
1
0.092784
false
0.006873
0.020619
0.003436
0.130584
0.006873
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
c1724e3fcc8f514b866413548912dc98400bc49f
2,790
py
Python
entrepreneurial_property/migrations/0015_auto_20180905_0646.py
CzechInvest/ciis
c6102598f564a717472e5e31e7eb894bba2c8104
[ "MIT" ]
1
2019-05-26T22:24:01.000Z
2019-05-26T22:24:01.000Z
entrepreneurial_property/migrations/0015_auto_20180905_0646.py
CzechInvest/ciis
c6102598f564a717472e5e31e7eb894bba2c8104
[ "MIT" ]
6
2019-01-22T14:53:43.000Z
2020-09-22T16:20:28.000Z
entrepreneurial_property/migrations/0015_auto_20180905_0646.py
CzechInvest/ciis
c6102598f564a717472e5e31e7eb894bba2c8104
[ "MIT" ]
null
null
null
# Generated by Django 2.0.5 on 2018-09-05 06:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('entrepreneurial_property', '0014_auto_20180905_0629'), ] operations = [ migrations.AlterModelOptions( name='brownfieldwastewater', options={'verbose_name': 'Odpadní voda', 'verbose_name_plural': 'Odpadní vody'}, ), migrations.AlterModelOptions( name='developmentparkwastewater', options={'verbose_name': 'Odpadní voda', 'verbose_name_plural': 'Odpadní vody'}, ), migrations.AlterModelOptions( name='greenfieldwastewater', options={'verbose_name': 'Odpadní voda', 'verbose_name_plural': 'Odpadní vody'}, ), migrations.AlterModelOptions( name='industrialarealwastewater', options={'verbose_name': 'Odpadní voda', 'verbose_name_plural': 'Odpadní vody'}, ), migrations.AlterModelOptions( name='officewastewater', options={'verbose_name': 'Odpadní voda', 'verbose_name_plural': 'Odpadní vody'}, ), migrations.AlterModelOptions( name='scientificparkwastewater', options={'verbose_name': 'Odpadní voda', 'verbose_name_plural': 'Odpadní vody'}, ), migrations.AddField( model_name='brownfield', name='uuid', field=models.CharField(default='6cfe1b7c-0699-466a-99c0-04523b11b3ea', max_length=36), preserve_default=False, ), migrations.AddField( model_name='developmentpark', name='uuid', field=models.CharField(default='6cfe1b7c-0699-466a-99c0-04523b11b3ea', max_length=36), preserve_default=False, ), migrations.AddField( model_name='greenfield', name='uuid', field=models.CharField(default='6cfe1b7c-0699-466a-99c0-04523b11b3ea', max_length=36), preserve_default=False, ), migrations.AddField( model_name='industrialareal', name='uuid', field=models.CharField(default='6cfe1b7c-0699-466a-99c0-04523b11b3ea', max_length=36), preserve_default=False, ), migrations.AddField( model_name='office', name='uuid', field=models.CharField(default='6cfe1b7c-0699-466a-99c0-04523b11b3ea', max_length=36), preserve_default=False, ), migrations.AddField( model_name='scientificpark', name='uuid', field=models.CharField(default='6cfe1b7c-0699-466a-99c0-04523b11b3ea', max_length=36), preserve_default=False, ), ]
37.702703
98
0.603584
246
2,790
6.682927
0.256098
0.080292
0.113139
0.091241
0.748175
0.748175
0.748175
0.748175
0.748175
0.748175
0
0.083829
0.277419
2,790
73
99
38.219178
0.731647
0.016129
0
0.716418
1
0
0.297849
0.122858
0
0
0
0
0
1
0
false
0
0.014925
0
0.059701
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
c173709b35ae18fdb3b908927a799df5ea385960
1,353
py
Python
backend/app/__init__.py
mushcatshiro/flask-template
27a1558e831ef2c14622c320a5e6fd6991b94bcf
[ "MIT" ]
null
null
null
backend/app/__init__.py
mushcatshiro/flask-template
27a1558e831ef2c14622c320a5e6fd6991b94bcf
[ "MIT" ]
null
null
null
backend/app/__init__.py
mushcatshiro/flask-template
27a1558e831ef2c14622c320a5e6fd6991b94bcf
[ "MIT" ]
null
null
null
from flask import Flask, jsonify from flask_marshmallow import Marshmallow from flask_sqlalchemy import SQLAlchemy from flask_track_usage import TrackUsage from backend.config import config, Config from celery import Celery db = SQLAlchemy() ma = Marshmallow() celery = Celery( __name__, broker=Config.CELERY_BROKER_URL, # backend='db+sqlite:///results.sqlite' ) track = TrackUsage() def create_app(config_name): app = Flask(__name__) print(config_name) app.config.from_object(config[config_name]) config[config_name].init_app(app) db.init_app(app) ma.init_app(app) celery.conf.update(app.config) from flask_track_usage.storage.sql import SQLStorage with app.app_context(): track.init_app(app, [SQLStorage(db=db)]) from backend.app.api.sample_api import api as api_blueprint app.register_blueprint(api_blueprint, url_prefix='/api/v1') register_error(app) return app def register_error(app): @app.errorhandler(400) def bad_request(e): print(e) # jsonify({"msg": "bad request"}) return "bad request", 400 @app.errorhandler(404) def page_not_found(e): return jsonify({"msg": "page not found"}), 404 @app.errorhandler(500) def internal_server_error(e): return jsonify({"msg": "internal server error"}), 500
24.160714
63
0.702882
182
1,353
5.010989
0.296703
0.039474
0.04386
0.041667
0
0
0
0
0
0
0
0.017304
0.18847
1,353
55
64
24.6
0.813297
0.050998
0
0
0
0
0.046058
0
0
0
0
0
0
1
0.128205
false
0
0.205128
0.051282
0.435897
0.102564
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1743e483c31f6d867722bc46b72b5ece150db84
1,561
py
Python
pineboolib/fllegacy/tests/test_settings.py
juanjosepablos/pineboo
f6ce515aec6e0139821bb9c1d62536d9fb50dae4
[ "MIT" ]
null
null
null
pineboolib/fllegacy/tests/test_settings.py
juanjosepablos/pineboo
f6ce515aec6e0139821bb9c1d62536d9fb50dae4
[ "MIT" ]
1
2017-10-30T22:00:48.000Z
2017-11-11T19:34:32.000Z
pineboolib/fllegacy/tests/test_settings.py
juanjosepablos/pineboo
f6ce515aec6e0139821bb9c1d62536d9fb50dae4
[ "MIT" ]
1
2017-10-30T20:16:38.000Z
2017-10-30T20:16:38.000Z
"""Test_flutil module.""" import unittest from pineboolib.fllegacy import flsettings class TestSettings(unittest.TestCase): def test_settings(self) -> None: """Test read functions.""" setting = flsettings.FLSettings() setting.writeEntryList("test_uno", [""]) setting.writeEntryList("test_uno", ["test_uno"]) setting.writeEntryList("test_dos", []) setting.writeEntryList("test_dos", ["test_2_1", "test_2_2"]) setting.writeEntry("test_tres", "") setting.writeEntry("test_tres", "test_tres") setting.writeEntry("test_cuatro", False) setting.writeEntry("test_cuatro", True) setting.writeEntry("test_cinco", 0) setting.writeEntry("test_cinco", 10) setting.writeEntry("test_double", 0.00) setting.writeEntry("test_double", 23.12) self.assertEqual(setting.readListEntry("test_dos"), ["test_2_1", "test_2_2"]) self.assertEqual(setting.readListEntry("test_seis"), []) self.assertEqual(setting.readListEntry("test_uno"), ["test_uno"]) self.assertEqual(setting.readEntry("test_tres"), "test_tres") self.assertEqual(setting.readEntry("test_siete", "fallo"), "fallo") self.assertEqual(setting.readNumEntry("test_cinco", 12), 10) self.assertEqual(setting.readNumEntry("test_ocho", 14), 14) self.assertTrue(setting.readBoolEntry("test_cuatro", False)) self.assertTrue(setting.readBoolEntry("test_nueve", True)) self.assertTrue(setting.readDoubleEntry("test_double", 23.12))
43.361111
85
0.675208
171
1,561
5.953216
0.28655
0.133595
0.165029
0.103143
0.486248
0.037328
0.037328
0.037328
0
0
0
0.023256
0.173607
1,561
35
86
44.6
0.765891
0.025625
0
0
0
0
0.188742
0
0
0
0
0
0.37037
1
0.037037
false
0
0.074074
0
0.148148
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c174bae4ae48802d0df61029ad46df721459ecfb
1,109
py
Python
setup.py
nat236919/pyhighstakes
dbe7dcf81febd361b6b0084788d9f176e1008ee7
[ "MIT" ]
null
null
null
setup.py
nat236919/pyhighstakes
dbe7dcf81febd361b6b0084788d9f176e1008ee7
[ "MIT" ]
2
2020-01-29T02:03:55.000Z
2020-01-29T02:05:17.000Z
setup.py
nat236919/pyhighstakes
dbe7dcf81febd361b6b0084788d9f176e1008ee7
[ "MIT" ]
1
2020-01-28T14:14:45.000Z
2020-01-28T14:14:45.000Z
""" FILE: setup.py DESCRIPTION: Set up PyPI as a Python Library DATE: 27-Jan-2020 """ import setuptools with open('README.md') as f: README = f.read() setuptools.setup( author='Nuttaphat Arunoprayoch', author_email='nat236919@gmail.com', name='pyhighstakes', license='MIT', description='PyHighStakes is a library offering card decks and games', version='v0.0.1', long_description=README, long_description_content_type='text/markdown', url='https://github.com/nat236919/pyhighstakes', packages=setuptools.find_packages(), python_requires=">=3.5", install_requires=[], classifiers=[ # Trove classifiers # (https://pypi.python.org/pypi?%3Aaction=list_classifiers) 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', 'Intended Audience :: Developers', 'Intended Audience :: End Users/Desktop' ], )
30.805556
74
0.660956
121
1,109
5.983471
0.669421
0.041436
0.066298
0.09116
0
0
0
0
0
0
0
0.028377
0.205591
1,109
36
75
30.805556
0.793417
0.138864
0
0
0
0
0.493136
0
0
0
0
0
0
1
0
false
0
0.038462
0
0.038462
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1764afdac11e0d9a60b3e199a98aa40f4ecf8a4
894
py
Python
ecommerce/accounts/migrations/0028_auto_20220127_2039.py
mhdirajabi/django-drf-e-commerce
526044a728f9f073a21386ff7f67ac570f4755c6
[ "MIT" ]
null
null
null
ecommerce/accounts/migrations/0028_auto_20220127_2039.py
mhdirajabi/django-drf-e-commerce
526044a728f9f073a21386ff7f67ac570f4755c6
[ "MIT" ]
null
null
null
ecommerce/accounts/migrations/0028_auto_20220127_2039.py
mhdirajabi/django-drf-e-commerce
526044a728f9f073a21386ff7f67ac570f4755c6
[ "MIT" ]
null
null
null
# Generated by Django 3.2.10 on 2022-01-27 17:09 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('accounts', '0027_alter_totprequest_receiver'), ] operations = [ migrations.AlterModelOptions( name='totprequest', options={'verbose_name': 'درخواست\u200cهای رمز یکبار مصرف', 'verbose_name_plural': 'درخواست\u200cهای رمز یکبار مصرف'}, ), migrations.AlterField( model_name='totprequest', name='channel', field=models.CharField(choices=[('phone_number', 'Phone')], default='phone_number', max_length=20, verbose_name='از طریق'), ), migrations.AlterField( model_name='totprequest', name='created', field=models.DateTimeField(auto_now_add=True, verbose_name='تاریخ ایجاد'), ), ]
31.928571
135
0.621924
91
894
5.945055
0.637363
0.081331
0.066543
0.085028
0.262477
0.162662
0
0
0
0
0
0.042042
0.255034
894
27
136
33.111111
0.77027
0.051454
0
0.333333
1
0
0.267139
0.036643
0
0
0
0
0
1
0
false
0
0.047619
0
0.190476
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
c1771dbe757ef772ec9d522997c65b45e5bb1019
14,552
py
Python
kernel/rnn.py
diwu1990/UnaryComputingSim
1a4746b7bcc2fcde5144a8b42fc94e6f5cd82b8e
[ "MIT" ]
1
2020-06-24T09:54:06.000Z
2020-06-24T09:54:06.000Z
kernel/rnn.py
RuokaiYin/UnarySim
343ff9abf356a63d526b1df8eb946ad528690a27
[ "MIT" ]
null
null
null
kernel/rnn.py
RuokaiYin/UnarySim
343ff9abf356a63d526b1df8eb946ad528690a27
[ "MIT" ]
null
null
null
import math import torch import copy from torch import Tensor import torch.nn as nn import torch.nn.functional as F from UnarySim.kernel import FSUAdd from UnarySim.kernel import FSUMul from UnarySim.kernel import FSULinear from torch.cuda.amp import autocast from typing import List, Tuple, Optional, overload, Union from UnarySim.kernel import HUBHardsigmoid, HUBHardtanh from UnarySim.kernel import truncated_normal, Round from UnarySim.stream import BinGen, RNG, BSGen from UnarySim.metric import ProgError class FSUMGUCell(torch.nn.Module): """ This is a minimal gated unit with unary computing, corresponding to HardMGUCell with "hard" asserted. The scalehardsigmoid is scaled addition (x+1)/2, and hardtanh is direct pass. This module follows the uBrain implementation style to maximize hardware reuse. """ def __init__( self, input_size: int, hidden_size: int, bias: bool = True, weight_ext_f=None, bias_ext_f=None, weight_ext_n=None, bias_ext_n=None, hx_buffer=None, hwcfg={ "width" : 8, "mode" : "bipolar", "depth" : 10, "depth_ismul" : 6, "rng" : "Sobol", "dimr" : 1 }, swcfg={ "btype" : torch.float, "rtype" : torch.float, "stype" : torch.float }) -> None: super(FSUMGUCell, self).__init__() self.hwcfg = {} self.hwcfg["width"] = hwcfg["width"] self.hwcfg["mode"] = hwcfg["mode"].lower() self.hwcfg["depth"] = hwcfg["depth"] self.hwcfg["depth_ismul"] = hwcfg["depth_ismul"] self.hwcfg["rng"] = hwcfg["rng"].lower() self.hwcfg["dimr"] = hwcfg["dimr"] self.swcfg = {} self.swcfg["btype"] = swcfg["btype"] self.swcfg["rtype"] = swcfg["rtype"] self.swcfg["stype"] = swcfg["stype"] self.input_size = input_size self.hidden_size = hidden_size assert self.hwcfg["mode"] in ["bipolar"], \ "Error: the hw config 'mode' in " + str(self) + " class requires 'bipolar'." assert (weight_ext_f.size()[0], weight_ext_f.size()[1]) == (hidden_size, hidden_size + input_size), "Incorrect weight_f shape." assert (weight_ext_n.size()[0], weight_ext_n.size()[1]) == (hidden_size, hidden_size + input_size), "Incorrect weight_n shape." if bias is True: assert bias_ext_f.size()[0] == hidden_size, "Incorrect bias_f shape." assert bias_ext_n.size()[0] == hidden_size, "Incorrect bias_n shape." hwcfg_linear={ "width" : self.hwcfg["width"], "mode" : self.hwcfg["mode"], "scale" : 1, "depth" : self.hwcfg["depth"], "rng" : self.hwcfg["rng"], "dimr" : self.hwcfg["dimr"] } self.fg_ug_tanh = FSULinear(hidden_size + input_size, hidden_size, bias=bias, weight_ext=weight_ext_f, bias_ext=bias_ext_f, hwcfg=hwcfg_linear, swcfg=swcfg) self.ng_ug_tanh = FSULinear(hidden_size + input_size, hidden_size, bias=bias, weight_ext=weight_ext_n, bias_ext=bias_ext_n, hwcfg=hwcfg_linear, swcfg=swcfg) hwcfg_sigm={ "mode" : self.hwcfg["mode"], "scale" : None, "dima" : 0, "depth" : self.hwcfg["depth"], "entry" : None } self.fg_sigmoid = FSUAdd(hwcfg_sigm, swcfg) hwcfg_hx_mul={ "width" : self.hwcfg["width"], "mode" : self.hwcfg["mode"], "static" : True, "rng" : self.hwcfg["rng"], "dimr" : self.hwcfg["dimr"] } self.fg_hx_mul = FSUMul(in_1_prob=hx_buffer, hwcfg=hwcfg_hx_mul, swcfg=swcfg) hwcfg_ng_mul={ "width" : self.hwcfg["depth_ismul"], "mode" : self.hwcfg["mode"], "static" : False, "rng" : self.hwcfg["rng"], "dimr" : self.hwcfg["dimr"] } self.fg_ng_mul = FSUMul(in_1_prob=None, hwcfg=hwcfg_ng_mul, swcfg=swcfg) hwcfg_hy={ "mode" : self.hwcfg["mode"], "scale" : 1, "dima" : 0, "depth" : self.hwcfg["depth"], "entry" : 3 } self.hy_add = FSUAdd(hwcfg_hy, swcfg) def check_forward_input(self, input: Tensor) -> None: if input.size(1) != self.input_size: raise RuntimeError("input has inconsistent input_size: got {}, expected {}".format(input.size(1), self.input_size)) def check_forward_hidden(self, input: Tensor, hx: Tensor, hidden_label: str = '') -> None: if input.size(0) != hx.size(0): raise RuntimeError("Input batch size {} doesn't match hidden{} batch size {}".format(input.size(0), hidden_label, hx.size(0))) if hx.size(1) != self.hidden_size: raise RuntimeError("hidden{} has inconsistent hidden_size: got {}, expected {}".format(hidden_label, hx.size(1), self.hidden_size)) @autocast() def forward(self, input: Tensor, hx: Tensor) -> Tensor: self.check_forward_input(input) self.check_forward_hidden(input, hx, '') # forget gate self.fg_ug_in = torch.cat((hx, input), 1) self.fg_in = self.fg_ug_tanh(self.fg_ug_in) self.fg = self.fg_sigmoid(torch.stack([self.fg_in, torch.ones_like(self.fg_in)], dim=0)) # new gate self.fg_hx = self.fg_hx_mul(self.fg) self.ng_ug_in = torch.cat((self.fg_hx, input), 1) self.ng = self.ng_ug_tanh(self.ng_ug_in) # output self.fg_ng = self.fg_ng_mul(self.fg, self.ng) self.fg_ng_inv = 1 - self.fg_ng hy = self.hy_add(torch.stack([self.ng, self.fg_ng_inv, self.fg_hx], dim=0)) return hy class HUBMGUCell(torch.nn.Module): """ This is a minimal gated unit with hybrid unary binary computing, corresponding to HardMGUCell with "hard" asserted. The scalehardsigmoid is scaled addition (x+1)/2, and hardtanh is direct pass. This module follows the uBrain implementation style to maximize hardware reuse. """ def __init__( self, input_size: int, hidden_size: int, bias: bool = True, weight_ext_f=None, bias_ext_f=None, weight_ext_n=None, bias_ext_n=None, hwcfg={ "width" : 8, "mode" : "bipolar", "depth" : 10, "depth_ismul" : 6, "rng" : "Sobol", "dimr" : 1 }) -> None: super(HUBMGUCell, self).__init__() self.hwcfg = {} self.hwcfg["width"] = hwcfg["width"] self.hwcfg["mode"] = hwcfg["mode"].lower() self.hwcfg["depth"] = hwcfg["depth"] self.hwcfg["depth_ismul"] = hwcfg["depth_ismul"] self.hwcfg["rng"] = hwcfg["rng"].lower() self.hwcfg["dimr"] = hwcfg["dimr"] self.swcfg = {} self.swcfg["btype"] = torch.float self.swcfg["rtype"] = torch.float self.swcfg["stype"] = torch.float self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.weight_f = weight_ext_f self.bias_f = bias_ext_f self.weight_n = weight_ext_n self.bias_n = bias_ext_n self.hwcfg_ope = copy.deepcopy(self.hwcfg) self.hwcfg_ope["scale"] = 1 @autocast() def forward(self, input: Tensor, hx: Tensor) -> Tensor: if hx is None: hx = torch.zeros(input.size()[0], self.hidden_size, dtype=input.dtype, device=input.device) rnncell = FSUMGUCell(self.input_size, self.hidden_size, bias=self.bias, weight_ext_f=self.weight_f, bias_ext_f=self.bias_f, weight_ext_n=self.weight_n, bias_ext_n=self.bias_n, hx_buffer=hx, hwcfg=self.hwcfg, swcfg=self.swcfg).to(input.device) iSource = BinGen(input, self.hwcfg, self.swcfg)().to(input.device) iRNG = RNG(self.hwcfg, self.swcfg)().to(input.device) iBSG = BSGen(iSource, iRNG, self.swcfg).to(input.device) hSource = BinGen(hx, self.hwcfg, self.swcfg)().to(input.device) hRNG = RNG(self.hwcfg, self.swcfg)().to(input.device) hBSG = BSGen(hSource, hRNG, self.swcfg).to(input.device) oPE = ProgError(torch.zeros(input.size()[0], self.hidden_size, dtype=input.dtype, device=input.device), self.hwcfg_ope).to(input.device) for c in range(2**self.hwcfg["width"]): idx = torch.zeros(iSource.size(), dtype=torch.long, device=input.device) iBS = iBSG(idx + c) hdx = torch.zeros(hSource.size(), dtype=torch.long, device=input.device) hBS = hBSG(hdx + c) oBS = rnncell(iBS, hBS) oPE.Monitor(oBS) hy = oPE()[0] return hy class HardMGUCell(torch.nn.Module): """ This is a minimal gated unit by replacing sigmoid and tanh with hubhardsigmoid and hubhardtanh if hard is set to True. Refer to "Simplified Minimal Gated Unit Variations for Recurrent Neural Networks" and "Gate-Variants of Gated Recurrent Unit (GRU) Neural Networks" for more details. This module is fully unary computing aware, i.e., all intermediate data are bounded to the legal unary range. This module follows the uBrain implementation style to maximize hardware reuse. This modeule assigns batch to dim[0]. This module applies floating-point data. """ def __init__(self, input_size: int, hidden_size: int, bias: bool = True, hard: bool = True) -> None: super(HardMGUCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.hard = hard if hard == True: self.fg_sigmoid = HUBHardsigmoid() self.ng_tanh = HUBHardtanh() else: self.fg_sigmoid = nn.Sigmoid() self.ng_tanh = nn.Tanh() self.weight_f = nn.Parameter(torch.empty((hidden_size, hidden_size + input_size))) self.weight_n = nn.Parameter(torch.empty((hidden_size, hidden_size + input_size))) if bias: self.bias_f = nn.Parameter(torch.empty(hidden_size)) self.bias_n = nn.Parameter(torch.empty(hidden_size)) else: self.register_parameter('bias_f', None) self.register_parameter('bias_n', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.hidden_size) for weight in self.parameters(): weight.data = truncated_normal(weight, mean=0, std=stdv) def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor: if hx is None: hx = torch.zeros(input.size()[0], self.hidden_size, dtype=input.dtype, device=input.device) # forget gate self.fg_ug_in = torch.cat((hx, input), 1) self.fg_in = HUBHardtanh()(F.linear(self.fg_ug_in, self.weight_f, self.bias_f)) self.fg = self.fg_sigmoid(self.fg_in) # new gate self.fg_hx = self.fg * hx self.ng_ug_in = torch.cat((self.fg_hx, input), 1) self.ng = self.ng_tanh(F.linear(self.ng_ug_in, self.weight_n, self.bias_n)) # output self.fg_ng = self.fg * self.ng self.fg_ng_inv = 0 - self.fg_ng hy = HUBHardtanh()(self.ng + self.fg_ng_inv + self.fg_hx) return hy class HardMGUCellFXP(torch.nn.Module): """ This is a minimal gated unit by replacing sigmoid and tanh with hubhardsigmoid and hubhardtanh if hard is set to True. Refer to "Simplified Minimal Gated Unit Variations for Recurrent Neural Networks" and "Gate-Variants of Gated Recurrent Unit (GRU) Neural Networks" for more details. This module is fully unary computing aware, i.e., all intermediate data are bounded to the legal unary range. This module follows the uBrain implementation style to maximize hardware reuse. This modeule assigns batch to dim[0]. This module applies fixed-point quantization using 'intwidth' and 'fracwidth'. """ def __init__(self, input_size: int, hidden_size: int, bias: bool = True, hard: bool = True, intwidth=3, fracwidth=4) -> None: super(HardMGUCellFXP, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.hard = hard self.trunc = Round(intwidth=intwidth, fracwidth=fracwidth) if hard == True: self.fg_sigmoid = HUBHardsigmoid() self.ng_tanh = HUBHardtanh() else: self.fg_sigmoid = nn.Sigmoid() self.ng_tanh = nn.Tanh() self.weight_f = nn.Parameter(torch.empty((hidden_size, hidden_size + input_size))) self.weight_n = nn.Parameter(torch.empty((hidden_size, hidden_size + input_size))) if bias: self.bias_f = nn.Parameter(torch.empty(hidden_size)) self.bias_n = nn.Parameter(torch.empty(hidden_size)) else: self.register_parameter('bias_f', None) self.register_parameter('bias_n', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.hidden_size) for weight in self.parameters(): weight.data = truncated_normal(weight, mean=0, std=stdv) def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor: if hx is None: hx = torch.zeros(input.size()[0], self.hidden_size, dtype=input.dtype, device=input.device) # forget gate self.fg_ug_in = torch.cat((self.trunc(hx), self.trunc(input)), 1) self.fg_in = HUBHardtanh()(F.linear(self.trunc(self.fg_ug_in), self.trunc(self.weight_f), self.trunc(self.bias_f))) self.fg = self.fg_sigmoid(self.trunc(self.fg_in)) # new gate self.fg_hx = self.trunc(self.fg) * self.trunc(hx) self.ng_ug_in = torch.cat((self.trunc(self.fg_hx), self.trunc(input)), 1) self.ng = self.ng_tanh(self.trunc(F.linear(self.trunc(self.ng_ug_in), self.trunc(self.weight_n), self.trunc(self.bias_n)))) # output self.fg_ng = self.trunc(self.fg) * self.trunc(self.ng) self.fg_ng_inv = 0 - self.trunc(self.fg_ng) hy = HUBHardtanh()(self.trunc(self.ng) + self.trunc(self.fg_ng_inv) + self.trunc(self.fg_hx)) return hy
42.17971
169
0.604178
1,945
14,552
4.3491
0.117738
0.04043
0.023052
0.023643
0.72148
0.661662
0.619459
0.593924
0.556803
0.520156
0
0.005937
0.270753
14,552
344
170
42.302326
0.79118
0.12122
0
0.494253
0
0
0.065945
0
0
0
0
0
0.019157
1
0.045977
false
0
0.057471
0
0.1341
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1786868690cfefd20cc751e90ea8b47ab54a55a
323
py
Python
app/__init__.py
uwase-diane/NewsAPI
49acb4e22f623818b5ee63f7b545f698953e2387
[ "Unlicense" ]
null
null
null
app/__init__.py
uwase-diane/NewsAPI
49acb4e22f623818b5ee63f7b545f698953e2387
[ "Unlicense" ]
null
null
null
app/__init__.py
uwase-diane/NewsAPI
49acb4e22f623818b5ee63f7b545f698953e2387
[ "Unlicense" ]
null
null
null
from flask import Flask from .config import DevConfig from flask_bootstrap import Bootstrap app = Flask(__name__,instance_relative_config= True) app.config.from_object(DevConfig) app.config.from_pyfile('config.py') # Initializing Flask Extensions bootstrap = Bootstrap(app) from app import views from app import error
20.1875
52
0.817337
45
323
5.666667
0.4
0.070588
0.101961
0
0
0
0
0
0
0
0
0
0.117647
323
16
53
20.1875
0.894737
0.089783
0
0
0
0
0.030717
0
0
0
0
0
0
1
0
false
0
0.555556
0
0.555556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
c17afdf4c22417a1397dcd7e27fcec9b2af92696
3,505
py
Python
bentosign/bentosign_object.py
BentoSign/python_bentosign
f1f00b1c84af353c3945e1ed09c8460ee836d1be
[ "MIT" ]
null
null
null
bentosign/bentosign_object.py
BentoSign/python_bentosign
f1f00b1c84af353c3945e1ed09c8460ee836d1be
[ "MIT" ]
null
null
null
bentosign/bentosign_object.py
BentoSign/python_bentosign
f1f00b1c84af353c3945e1ed09c8460ee836d1be
[ "MIT" ]
null
null
null
import json import requests from .bentosign_errors import BentoSignError # An BentoSignObject is a dictionary where ``object.key=value`` is a shortcut for ``object[key]=value`` class BentoSignObject(dict): def __init__(self): super(BentoSignObject, self).__init__() # Define __getattr__, __setattr__ and __delattr, so that # object.name becomes an alias to object['name'] def __getattr__(self, key): if key[0] == '_': raise AttributeError('No such attribute: ' + key) if key in self: return self[key] else: raise AttributeError('No such attribute: ' + key) def __setattr__(self, key, value): if key[0] == '_': return super(BentoSignObject, self).__setattr__(key, value) self[key] = value def __delattr__(self, key): if key[0] == '_': return super(BentoSignObject, self).__delattr__(key) if key in self: del self[key] else: raise AttributeError('No such attribute: ' + key) # Define __getattr__, __setattr__ and __delattr, so that # object.name becomes an alias to object['name'] @classmethod def get_base_url(cls): return 'http://localhost:5000/api/v1.0/' @classmethod def get_class_url(cls): raise NotImplementedError() @classmethod def get(cls, id): # Perform an HTTP GET url = cls.get_class_url() + '/' + id response = requests.get(url) cls._process_response_code('GET', url, response) # Create Object from JSON object = cls() payload = json.loads(response.content) object.load_object_from_data(payload['object']) return object @classmethod def find(cls, **params): # Perform an HTTP GET with params url = cls.get_class_url() response = requests.get(url, params=params) cls._process_response_code('GET', url, response) # Create Objects from JSON list objects = [] payload = json.loads(response.content) for object_data in payload['objects']: object = cls() object.load_object_from_data(object_data) objects.append(object) return objects @classmethod def create(cls, **params): # Perform an HTTP POST url = cls.get_class_url() response = requests.post(url, data=params) cls._process_response_code('POST', url, response) # Create Object from JSON payload = json.loads(response.content) object = cls() object.load_object_from_data(payload['object']) return object @classmethod def delete(cls, id): # Perform an HTTP DELETE url = cls.get_class_url() + '/' + id response = requests.delete(url) cls._process_response_code('DELETE', url, response) def load_object_from_data(self, data): for key, value in data.items(): self[key] = value @classmethod def _process_response_code(cls, method, url, response): if response.status_code!=200: payload = json.loads(response.content) error_message = "%s %s returned %d" % (method, url, response.status_code) if payload: error = payload.get('error', None) if error: error_message = "BentoSignError %d: %s" % (error.code, error.message) raise BentoSignError(error_message)
31.294643
103
0.607133
405
3,505
5.012346
0.212346
0.023645
0.027094
0.027586
0.538424
0.415764
0.344828
0.256158
0.180296
0.133005
0
0.004821
0.289872
3,505
111
104
31.576577
0.810767
0.136662
0
0.461538
0
0
0.056754
0
0
0
0
0
0
1
0.153846
false
0
0.038462
0.012821
0.294872
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c17b28f08989d2fa2b75ad5040f70d15e7571b14
2,842
py
Python
OpenThermML/backup.py
wwlorey/open-thermostat-software
a0521b0d3b65fe9f2bd23f5059971d3a8d773e54
[ "MIT" ]
1
2020-12-14T02:44:10.000Z
2020-12-14T02:44:10.000Z
OpenThermML/backup.py
wwlorey/open-thermostat-software
a0521b0d3b65fe9f2bd23f5059971d3a8d773e54
[ "MIT" ]
null
null
null
OpenThermML/backup.py
wwlorey/open-thermostat-software
a0521b0d3b65fe9f2bd23f5059971d3a8d773e54
[ "MIT" ]
1
2020-12-12T20:24:43.000Z
2020-12-12T20:24:43.000Z
predictions = [ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ], ] prediction_counts = [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], ]
149.578947
247
0.364532
1,011
2,842
1.023739
0.004946
1.940097
2.901449
3.857005
0.972947
0.972947
0.972947
0.972947
0.972947
0.972947
0
0.481836
0.263899
2,842
18
248
157.888889
0.012906
0
0
0.722222
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
1
null
1
1
1
1
1
1
1
1
1
0
1
0
0
0
0
1
1
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
14
c17bdf43121bff61f06c96f3d74874b39434a71f
267
py
Python
app1/callbacks.py
DrGFreeman/multi-dash
377788c4404a9f7b59ba0dbfb697b4db074fdbd4
[ "Unlicense" ]
null
null
null
app1/callbacks.py
DrGFreeman/multi-dash
377788c4404a9f7b59ba0dbfb697b4db074fdbd4
[ "Unlicense" ]
null
null
null
app1/callbacks.py
DrGFreeman/multi-dash
377788c4404a9f7b59ba0dbfb697b4db074fdbd4
[ "Unlicense" ]
null
null
null
from datetime import datetime from dash.dependencies import Input, Output from app1 import app @app.callback(Output('placeholder', 'children'), [Input('update_button', 'n_clicks')]) def update(n_clicks): return datetime.now().strftime('%H:%M:%S')
26.7
51
0.700375
35
267
5.257143
0.657143
0.076087
0
0
0
0
0
0
0
0
0
0.004425
0.153558
267
10
52
26.7
0.809735
0
0
0
0
0
0.179104
0
0
0
0
0
0
1
0.142857
false
0
0.428571
0.142857
0.714286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
3
c17c5ab864e3da04534b047a476281e0e31c2fdb
108
py
Python
main.py
Aayushpatil77/Gryphon_Web_Crawler
ee52fe2b353cc040e662056d7888ea420c1a49fa
[ "MIT" ]
null
null
null
main.py
Aayushpatil77/Gryphon_Web_Crawler
ee52fe2b353cc040e662056d7888ea420c1a49fa
[ "MIT" ]
24
2021-11-12T20:33:04.000Z
2021-11-24T14:34:31.000Z
main.py
Aayushpatil77/Gryphon_Web_Crawler
ee52fe2b353cc040e662056d7888ea420c1a49fa
[ "MIT" ]
5
2021-11-10T18:04:23.000Z
2022-02-22T04:38:16.000Z
from django.http import HttpResponse def test(): return HttpResponse("Hello, world. #Homepage") test()
18
50
0.731481
13
108
6.076923
0.846154
0
0
0
0
0
0
0
0
0
0
0
0.148148
108
6
51
18
0.858696
0
0
0
0
0
0.211009
0
0
0
0
0
0
1
0.25
true
0
0.25
0.25
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
1
1
0
0
6
c17d194b756e7fa590502bf845583e715519cb01
460
py
Python
app/career/serializers/weekly_business.py
TIHLDE/Lepton
60ec0793381f1c1b222f305586e8c2d4345fb566
[ "MIT" ]
7
2021-03-04T18:49:12.000Z
2021-03-08T18:25:51.000Z
app/career/serializers/weekly_business.py
TIHLDE/Lepton
60ec0793381f1c1b222f305586e8c2d4345fb566
[ "MIT" ]
251
2021-03-04T19:19:14.000Z
2022-03-31T14:47:53.000Z
app/career/serializers/weekly_business.py
tihlde/Lepton
5cab3522c421b76373a5c25f49267cfaef7b826a
[ "MIT" ]
3
2021-10-05T19:03:04.000Z
2022-02-25T13:32:09.000Z
from app.career.models import WeeklyBusiness from app.common.serializers import BaseModelSerializer class WeeklyBusinessSerializer(BaseModelSerializer): class Meta: model = WeeklyBusiness fields = ( "id", "created_at", "updated_at", "image", "image_alt", "business_name", "body", "year", "week", ) validators = []
23
54
0.523913
34
460
6.970588
0.764706
0.059072
0
0
0
0
0
0
0
0
0
0
0.384783
460
19
55
24.210526
0.837456
0
0
0
0
0
0.132609
0
0
0
0
0
0
1
0
false
0
0.117647
0
0.235294
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c17d66b90ac3507c48eac335d807b7e734538bbd
3,401
py
Python
emovix-twitter-detectlang.py
eMOVIX/emovix-twitter-detectlang
d4ad5a9845b9b98fec70490c81481390a24a9af1
[ "Apache-2.0" ]
null
null
null
emovix-twitter-detectlang.py
eMOVIX/emovix-twitter-detectlang
d4ad5a9845b9b98fec70490c81481390a24a9af1
[ "Apache-2.0" ]
null
null
null
emovix-twitter-detectlang.py
eMOVIX/emovix-twitter-detectlang
d4ad5a9845b9b98fec70490c81481390a24a9af1
[ "Apache-2.0" ]
null
null
null
__author__ = 'Jordi Vilaplana' from pymongo import MongoClient import detectlanguage import json import logging import time logging.basicConfig( filename='emovix_twitter_detectlang.log', level=logging.WARNING, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%d-%m-%y %H:%M') # Configuration parameters detectlanguage_api_key = "" database_host = "" database_name = "" twitterStatusCol = "" client = None db = None if __name__ == '__main__': logging.debug('emovix_twitter_detectlang.py starting ...') # Load configuration with open('config.json', 'r') as f: config = json.load(f) detectlanguage_api_key = config['detectlanguage_api_key'] database_host = config['database_host'] database_name = config['database_name'] twitterStatusCol = config['source_box'] + "_twitterStatus" client = MongoClient('mongodb://' + database_host + ':27017/') db = client[database_name] detectlanguage.configuration.api_key = detectlanguage_api_key while True: try: if detectlanguage.user_status()['requests'] >= detectlanguage.user_status()['daily_requests_limit']: logging.debug("Number of requests over daily limit.") time.sleep(60) statuses = db[twitterStatusCol].find({ "language_detections.language": { "$exists": False } }) if statuses: count = 0 batch_request = [] batch_status = [] for twitterStatus in statuses: if count >= 500: logging.debug("Processing batch ...") detections = detectlanguage.detect(batch_request) if len(detections) != 500: logging.error("ABNORMAL NUMBER OF LANGUAGE DETECTIONS: " + str(len(detections))) break count = 0 for detection in detections: if len(detection) == 0: detection = {} detection['source'] = 'detectlanguage' detection['language'] = '' batch_status[count]['language_detections'] = [] batch_status[count]['language_detections'].append(detection) else: detection[0]['source'] = 'detectlanguage' batch_status[count]['language_detections'] = [] batch_status[count]['language_detections'].append(detection[0]) db[twitterStatusCol].update( { "_id": batch_status[count]['_id']}, batch_status[count], upsert=True) count += 1 count = 0 batch_request = [] batch_status = [] text = twitterStatus['text'].encode('utf-8') batch_request.append(text) batch_status.append(twitterStatus) count += 1 except Exception as e: # Oh well, just keep going logging.error(e.__class__) logging.error(e) continue except KeyboardInterrupt: break
36.569892
128
0.527786
292
3,401
5.931507
0.390411
0.057159
0.055427
0.055427
0.166282
0.12933
0.095843
0.095843
0.095843
0.095843
0
0.011671
0.370185
3,401
92
129
36.967391
0.796919
0.019994
0
0.180556
0
0
0.167318
0.032142
0
0
0
0
0
1
0
false
0
0.069444
0
0.069444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c17d75beb39c3d0157047d3515d20edb8d4e90a4
1,178
py
Python
src/azure-cli/azure/cli/command_modules/storage/operations/queue.py
YuanyuanNi/azure-cli
63844964374858bfacd209bfe1b69eb456bd64ca
[ "MIT" ]
3,287
2016-07-26T17:34:33.000Z
2022-03-31T09:52:13.000Z
src/azure-cli/azure/cli/command_modules/storage/operations/queue.py
YuanyuanNi/azure-cli
63844964374858bfacd209bfe1b69eb456bd64ca
[ "MIT" ]
19,206
2016-07-26T07:04:42.000Z
2022-03-31T23:57:09.000Z
src/azure-cli/azure/cli/command_modules/storage/operations/queue.py
YuanyuanNi/azure-cli
63844964374858bfacd209bfe1b69eb456bd64ca
[ "MIT" ]
2,575
2016-07-26T06:44:40.000Z
2022-03-31T22:56:06.000Z
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from knack.log import get_logger logger = get_logger(__name__) def list_queues(client, include_metadata=False, marker=None, num_results=None, prefix=None, show_next_marker=None, **kwargs): from ..track2_util import list_generator generator = client.list_queues(name_starts_with=prefix, include_metadata=include_metadata, results_per_page=num_results, **kwargs) pages = generator.by_page(continuation_token=marker) result = list_generator(pages=pages, num_results=num_results) if show_next_marker: next_marker = {"nextMarker": pages.continuation_token} result.append(next_marker) else: if pages.continuation_token: logger.warning('Next Marker:') logger.warning(pages.continuation_token) return result
42.071429
94
0.60017
121
1,178
5.570248
0.487603
0.074184
0.097923
0
0
0
0
0
0
0
0
0.00103
0.175722
1,178
27
95
43.62963
0.6931
0.285229
0
0
0
0
0.026284
0
0
0
0
0
0
1
0.058824
false
0
0.117647
0
0.235294
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c17dc41b3022979fe55065b808584b80b7482833
8,793
py
Python
tests/getl/blocks/load/test_load_entrypoint.py
husqvarnagroup/GETL
37251abf12bac2efed1fe463b09a288d85969141
[ "MIT" ]
8
2020-06-10T09:00:17.000Z
2021-06-07T18:02:19.000Z
tests/getl/blocks/load/test_load_entrypoint.py
husqvarnagroup/GETL
37251abf12bac2efed1fe463b09a288d85969141
[ "MIT" ]
5
2020-07-03T10:39:25.000Z
2021-08-30T14:52:47.000Z
tests/getl/blocks/load/test_load_entrypoint.py
husqvarnagroup/GETL
37251abf12bac2efed1fe463b09a288d85969141
[ "MIT" ]
1
2020-05-28T07:53:48.000Z
2020-05-28T07:53:48.000Z
"""Unit test for GETL load method.""" from os import environ from unittest.mock import Mock from pyspark.sql import types as T from getl.blocks.load.entrypoint import ( batch_csv, batch_delta, batch_json, batch_xml, resolve, ) # TODO: Need to adapt to different xml version depending on spark version environ[ "PYSPARK_SUBMIT_ARGS" ] = "--packages com.databricks:spark-xml_2.11:0.9.0 pyspark-shell" SCHEMA = T.StructType( [ T.StructField("name", T.StringType(), True), T.StructField("empid", T.IntegerType(), True), T.StructField("happy", T.BooleanType(), True), T.StructField("sad", T.BooleanType(), True), T.StructField("extra", T.BooleanType(), True), ] ) # FUNCTIONS def test_batch_json(spark_session, helpers): """batch_json should be able to load json files to a dataframe.""" # Arrange helpers.create_s3_files({"schema.json": SCHEMA.json()}) conf = helpers.create_block_conf( "", { "Path": helpers.relative_path(__file__, "./data/sample.json"), "JsonSchemaPath": "s3://tmp-bucket/schema.json", "Alias": "alias", }, ) # Act result_df = resolve(batch_json, conf) # Assert assert result_df.collect()[0][0] == "Mark Steelspitter" assert result_df.collect()[1][0] == "Mark Two" assert result_df.collect()[2][1] == 11 assert result_df.count() == 3 def test_batch_json_multiLine_options(spark_session, helpers): helpers.create_s3_files({"schema.json": SCHEMA.json()}) conf = helpers.create_block_conf( "", { "Path": helpers.relative_path(__file__, "./data/sample_multiline.json"), "JsonSchemaPath": "s3://tmp-bucket/schema.json", "Alias": "alias", "Options": {"multiLine": True}, }, ) # Act result_df = resolve(batch_json, conf) # Assert assert result_df.collect()[0][0] == "Mark Steelspitter" assert result_df.collect()[1][0] == "Mark Two" assert result_df.collect()[2][1] == 11 assert result_df.count() == 3 def test_batch_json_fileregistry(spark_session, helpers): """batch_json should be able to load json files with file registry.""" # Arrange file_path = helpers.relative_path(__file__, "./data/sample.json") file_registry_mock = Mock() file_registry_mock.get.return_value.load.return_value = [file_path] helpers.create_s3_files({"schema.json": SCHEMA.json()}) conf = helpers.create_block_conf( "", { "Path": "base_path", "JsonSchemaPath": "s3://tmp-bucket/schema.json", "FileRegistry": "SuperReg", }, file_registry=file_registry_mock, ) # Act result_df = resolve(batch_json, conf) # Assert assert result_df.collect()[0][0] == "Mark Steelspitter" assert result_df.count() == 3 file_registry_mock.get.assert_called_with("SuperReg") file_registry_mock.get.return_value.load.assert_called_with("base_path", ".json") def test_batch_json_no_schema(spark_session, helpers): """batch_json should be able to load json files and inferSchema.""" # Arrange conf = helpers.create_block_conf( "", { "Path": helpers.relative_path(__file__, "./data/sample.json"), "Alias": "alias", }, ) # Act result_df = resolve(batch_json, conf) # Assert assert result_df.collect()[0][0] == 9 assert result_df.collect()[1][3] == "Mark Two" assert not result_df.collect()[2][2] assert result_df.count() == 3 def test_batch_xml(spark_session, helpers): """Check if the batch_xml loader can load XML documents.""" helpers.create_s3_files({"schema.xml": SCHEMA.json()}) conf = helpers.create_block_conf( "", { "Path": helpers.relative_path(__file__, "./data/employee.xml"), "JsonSchemaPath": "s3://tmp-bucket/schema.xml", "RowTag": "employee", }, ) # Act result_df = resolve(batch_xml, conf) # Assert assert result_df.collect()[0][0] == "name1" assert result_df.count() == 3 def test_batch_xml_no_schema(spark_session, helpers): """Test batch_xml can load XML doc without a given schema.""" conf = helpers.create_block_conf( "", { "Path": helpers.relative_path(__file__, "./data/employee.xml"), "RowTag": "employee", }, ) # Act result_df = resolve(batch_xml, conf) # Assert assert result_df.collect()[0][0] == 123 assert result_df.collect()[1][2] == "name2" assert result_df.collect()[2][1] == "false" assert result_df.count() == 3 def test_batch_xml_batching(spark_session, helpers): """Check if the batch_xml loader can load XML documents.""" helpers.create_s3_files({"schema.xml": SCHEMA.json()}) conf = helpers.create_block_conf( "", { "Path": [ helpers.relative_path(__file__, "./data/employee.xml"), helpers.relative_path(__file__, "./data/employee_2.xml"), ], "JsonSchemaPath": "s3://tmp-bucket/schema.xml", "RowTag": "employee", }, ) # Act result_df = resolve(batch_xml, conf) # Assert assert result_df.collect()[0][0] == "name1" assert result_df.count() == 4 def test_batch_xml_batching_new_column(spark_session, helpers): """Check if the batch_xml loader can load XML documents.""" helpers.create_s3_files({"schema.xml": SCHEMA.json()}) conf = helpers.create_block_conf( "", { "Path": [ helpers.relative_path(__file__, "./data/employee.xml"), helpers.relative_path(__file__, "./data/employee_2.xml"), helpers.relative_path(__file__, "./data/employee_3.xml"), ], "JsonSchemaPath": "s3://tmp-bucket/schema.xml", "RowTag": "employee", }, ) # Act result_df = resolve(batch_xml, conf) # Assert assert result_df.collect()[4][3] is False assert result_df.count() == 5 def test_batch_xml_fileregistry(spark_session, helpers): """Check if the batch_xml loader can load XML documents with a file registry.""" file_path = helpers.relative_path(__file__, "./data/employee.xml") file_registry_mock = Mock() file_registry_mock.get.return_value.load.return_value = [file_path] helpers.create_s3_files({"schema.xml": SCHEMA.json()}) conf = helpers.create_block_conf( "", { "Path": "base_path", "JsonSchemaPath": "s3://tmp-bucket/schema.xml", "RowTag": "employee", "FileRegistry": "SuperReg", }, file_registry=file_registry_mock, ) # Act result_df = resolve(batch_xml, conf) # Assert assert result_df.collect()[0][0] == "name1" assert result_df.count() == 3 file_registry_mock.get.assert_called_with("SuperReg") file_registry_mock.get.return_value.load.assert_called_with("base_path", ".xml") def test_batch_csv(spark_session, helpers): conf = helpers.create_block_conf( "", { "Path": helpers.relative_path(__file__, "./data/sample.csv"), "Options": {"inferSchema": True, "header": True}, }, ) # Act result_df = resolve(batch_csv, conf) # Assert data = result_df.collect() assert data[0]["name"] == "Mark Steelspitter" assert data[0]["empid"] == 9 assert data[0]["happy"] is True assert data[2]["name"] == "Mark Second" assert data[2]["empid"] == 11 assert data[2]["happy"] is False assert result_df.count() == 3 def test_batch_delta(spark_session, helpers): conf = helpers.create_block_conf( "", { "Path": helpers.relative_path(__file__, "./data/sample-delta"), "Options": {"inferSchema": True, "header": True}, }, ) # Act result_df = resolve(batch_delta, conf) # Assert data = result_df.collect() assert data[0]["name"] == "Mark Steelspitter" assert data[0]["empid"] == 9 assert data[0]["happy"] is True assert data[2]["name"] == "Mark Second" assert data[2]["empid"] == 11 assert data[2]["happy"] is False assert result_df.count() == 3 def test_batch_delta_no_files(spark_session, helpers): conf = helpers.create_block_conf( "", { "Path": helpers.relative_path(__file__, "./data/sample-delta-nofiles"), "Options": {"inferSchema": True, "header": True}, }, ) # Act result_df = resolve(batch_delta, conf) # Assert data = result_df.collect() assert len(data) == 0
28.548701
85
0.60787
1,059
8,793
4.78187
0.123702
0.06793
0.074645
0.066351
0.842417
0.809637
0.798381
0.789889
0.770142
0.736572
0
0.014928
0.245764
8,793
307
86
28.641694
0.748643
0.085409
0
0.564593
0
0
0.164282
0.04242
0
0
0
0.003257
0.215311
1
0.057416
false
0
0.019139
0
0.076555
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
c17f0a03fa3b18a4b271f3edc88481c6be2c623b
745
py
Python
arrays/sorting/selection_sort.py
AyoubEssrifi/Data-Structures-Algorithms
d903bccba46cd8f2a35728f47cdfc836d5dfdf1a
[ "MIT" ]
null
null
null
arrays/sorting/selection_sort.py
AyoubEssrifi/Data-Structures-Algorithms
d903bccba46cd8f2a35728f47cdfc836d5dfdf1a
[ "MIT" ]
null
null
null
arrays/sorting/selection_sort.py
AyoubEssrifi/Data-Structures-Algorithms
d903bccba46cd8f2a35728f47cdfc836d5dfdf1a
[ "MIT" ]
null
null
null
import numpy as np import math def selection_sort(arr): """ Performs a selection sort algorithm - Time complexity: O(n²) Args: arr (list): List to sort Returns: (list): Sorted list """ i = 0 while i < len(arr): j = i min = math.inf while j < len(arr): if arr[j] < min: min = arr[j] min_idx = j j += 1 temp = arr[i] arr[i] = arr[min_idx] arr[min_idx] = temp i += 1 return arr # Testing for i in range(100): arr = list(np.random.randint(1,100,100)) sorted_arr = selection_sort(arr) builtin_sort = sorted(arr) if sorted_arr != builtin_sort: print("false")
20.135135
44
0.507383
102
745
3.617647
0.421569
0.105691
0.086721
0
0
0
0
0
0
0
0
0.030435
0.38255
745
36
45
20.694444
0.771739
0.186577
0
0
0
0
0.008772
0
0
0
0
0
0
1
0.043478
false
0
0.086957
0
0.173913
0.043478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c17f1a7c03757d8295e2618ab5615740ad1e5ea5
6,819
py
Python
dumb_cup/dc.py
Ladvien/ezsp32_upython_env
5bdec9e1d2eabb117941a5ce0e74570757e87740
[ "MIT" ]
null
null
null
dumb_cup/dc.py
Ladvien/ezsp32_upython_env
5bdec9e1d2eabb117941a5ce0e74570757e87740
[ "MIT" ]
null
null
null
dumb_cup/dc.py
Ladvien/ezsp32_upython_env
5bdec9e1d2eabb117941a5ce0e74570757e87740
[ "MIT" ]
1
2020-05-27T13:18:31.000Z
2020-05-27T13:18:31.000Z
import time from machine import I2C, Pin, Timer from dumb_cup.v53l0x import VL53L0X from dumb_cup.adxl345 import ADXL345 from dumb_cup.spirit_level import SpiritLevel ############### # Constants ############### OZ_FULL = const(16) INIT_SAMPES = const(50) NUM_SAMPLES = const(15) RND_PLCS = const(1) DE_BNC_DELAY = const(250) BTN = const(26) DE_BNC_TMR = const(0) CHK_LVL_TMR = const(1) BTN_ACTION_IN_PRG = const(99) BTN_UNPRESSED = const(0) BTN_PRESSED = const(1) BTN_GOT_EMPTY = const(2) BTN_GOT_FULL = const(3) BTN_CALI_IN_PRG_3 = const(4) SCL_PIN = const(21) SDA_PIN = const(22) X_THRESH = const(300) Y_THRESH = const(300) Z_THRESH = const(300) CALI_F_NAME = "calibration.txt" SETTINGS_DIR_NAME = "/dumb_cup" ############### # Methods ############### def map_val(x, in_min, in_max, out_min, out_max): return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min def on_not_level(x: int, y: int, z: int): print("Not level") print('x:', x, 'y:', y, 'z:',z ,'uint:mg') def measure(tof: ADXL345, num_samples: int, round_num: int) -> int: global vol_cof samples = [] # if len(samples) >= NUM_SAMPLES + 1: samples.pop(0) for i in range(num_samples): samples.append(tof.read()) dist = sum(samples) / len(samples) * vol_cof dist = map_val(dist, empty_val, full_val, 0, OZ_FULL) return round(dist, round_num) def blink(led, num: int = 1, delay: int = 200): for i in range(num): led.on() time.sleep_ms(delay) led.off() time.sleep_ms(delay) ################ # Initialization ################ i2c = I2C(scl = Pin(SCL_PIN), sda = Pin(SDA_PIN), freq = 20000, timeout = 2000) # Accelerometer a345 = ADXL345(i2c) dd = SpiritLevel(a345, on_not_level, X_THRESH, Y_THRESH, Z_THRESH) # Time-of-Flight tof = VL53L0X(i2c) tof.set_Vcsel_pulse_period(tof.vcsel_period_type[0], 18) tof.set_Vcsel_pulse_period(tof.vcsel_period_type[1], 14) tof.start() # Blinker led = Pin(2, Pin.OUT) # Main button. btn = Pin(BTN, Pin.IN, Pin.PULL_DOWN) ############### # Calibration ############### de_bnc_tmr = Timer(DE_BNC_TMR) de_bnc_flag = False def dnc_timer_expr(timer): global de_bnc_flag de_bnc_flag = False def on_btn(pin): global tof global de_bnc_flag global cali_file global btn_state global btn global led if not de_bnc_flag: # Turn on debounce timer. de_bnc_flag = True de_bnc_tmr.init(mode=Timer.ONE_SHOT, period=DE_BNC_DELAY, callback=dnc_timer_expr) if btn_state == BTN_UNPRESSED: btn_state = BTN_ACTION_IN_PRG # Erase old file. erase_cali() # Let the user know we are getting the # depth of the cup. print("Getting measurements when empty.") blink(led, 5, 300) empty = measure(tof, NUM_SAMPLES, RND_PLCS) fs_write_val("empty", empty) btn_state = BTN_GOT_EMPTY elif btn_state == BTN_GOT_EMPTY: btn_state = BTN_ACTION_IN_PRG # Let the user know we are getting the # depth of the cup. print("Getting measurements when full.") blink(led, 5, 300) full = measure(tof, NUM_SAMPLES, RND_PLCS) fs_write_val("full", full) btn_state = BTN_UNPRESSED elif btn_state == BTN_ACTION_IN_PRG: print("Busy") def erase_cali(): import os filepath = SETTINGS_DIR_NAME + "/" + CALI_F_NAME try: with open(filepath) as f: os.remove(filepath) except: pass def fs_write_val(key: str, value: str): import os filepath = SETTINGS_DIR_NAME + "/" + CALI_F_NAME write_type = "a" try: with open(filepath, write_type) as f: f.write("") except OSError: os.mkdir(SETTINGS_DIR_NAME) write_type = "w" try: with open(filepath, write_type) as f: s = "{}={}\n".format(key, value) f.write(s) except OSError: print("FS error.") def fs_read_cali(): import os filepath = SETTINGS_DIR_NAME + "/" + CALI_F_NAME with open(filepath, "r") as f: return f.readlines() return [] def uninstall(): import os os.chdir("dumb_cup") for item in os.listdir(): print(type(item)) try: os.remove(item) except: os.rmdir(item) btn_state = BTN_UNPRESSED btn.irq(on_btn) ##################### # Volume coefficient ##################### def vol_cof(): lines = fs_read_cali() for value in lines: if "empty" in value: empty_val = float(value.split("=")[1][0:-1]) elif "full" in value: full_val = float(value.split("=")[1][0:-1]) return (empty_val, full_val) ##################### # Check Liquid Level ##################### def chk_liq_lvl(timer): global old_lvl global cur_lvl global consumed global tof cur_lvl = measure(tof, NUM_SAMPLES, RND_PLCS) delta = round((cur_lvl - old_lvl), RND_PLCS) consumed += round(delta, RND_PLCS) print("Current: {} Delta: {} Consumed: {}".format(cur_lvl, delta, consumed)) old_lvl = cur_lvl ############### # Calibration ############### empty_val, full_val = vol_cof() volume = (full_val - empty_val) * -1 vol_cof = OZ_FULL / volume print("Initializing liquid gauge...") cur_lvl = measure(tof, INIT_SAMPES, RND_PLCS) old_lvl = cur_lvl consumed = 0 print("Initial liquid level: {}".format(cur_lvl)) chk_liq_lvl_tmr = Timer(CHK_LVL_TMR) chk_liq_lvl_tmr.init(mode=Timer.PERIODIC, period=3000, callback=chk_liq_lvl) # To begin conversion we need a calibration sequence. # 1. Have the user empty the cup and level it on counter. # 2. Have the user fill the cup. # # We could use the empty_reading (-137) and full_reading (-63) to # calculate the linear volume of the cup (74). # # abs(empty_reading) - abs(full_reading) = linear_volume # # Then, we have two routes, we can convert linear_volume into # millimeters. This would become linear_volume_mm. # # We can then get the cup diameter in millimeters (80mm) # and multiply it by the linear_volume_mm, which should give # us total volume. # # V = πr^2h # cubic_mm = (cup_diameter / 2)^2 * π * linear_volume_mm # cubic_mm = (40)^2 * π * 74 # cubic_mm = 1600 * π * 74 # cubic_mm = 5026.54 * 74 # cubic_mm = 371964.57 # # To get ounces, multiply mm3 by 3.3814e-5. # # ounces = 12.5776184143 # ############### # Main Loop ############### while True: dd.calculate()
25.349442
90
0.592022
964
6,819
3.950207
0.267635
0.015756
0.023109
0.014706
0.200105
0.162553
0.13813
0.127101
0.110819
0.059874
0
0.033986
0.266461
6,819
268
91
25.44403
0.727309
0.171579
0
0.210191
0
0
0.046664
0
0
0
0
0
0
1
0.076433
false
0.006369
0.057325
0.006369
0.165605
0.063694
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c17f98ab5a946504638ab093524b788e3de36f90
2,042
py
Python
nsh/asteroids/views.py
kowabunga314/NSH
01dfc56d88e9b3b96e5bed61be24dd1e5080abbc
[ "MIT" ]
null
null
null
nsh/asteroids/views.py
kowabunga314/NSH
01dfc56d88e9b3b96e5bed61be24dd1e5080abbc
[ "MIT" ]
1
2021-08-31T23:10:01.000Z
2021-08-31T23:10:01.000Z
nsh/asteroids/views.py
kowabunga314/NSH
01dfc56d88e9b3b96e5bed61be24dd1e5080abbc
[ "MIT" ]
null
null
null
from requests import api from rest_framework.decorators import api_view from rest_framework.response import Response from asteroids.api import AsteroidApi from asteroids.serializers import ApproachSerializer from clients.neows import NeoWs from django.shortcuts import render @api_view(['GET']) def get_closest_approach(request): # Initialize NeoWs API asteroid_api = AsteroidApi() # Get data from NeoWs API closest_approach_data = asteroid_api.get_closest_approach() # Return closest approach ca_model = ApproachSerializer().create(closest_approach_data) return Response(data=ca_model.__dict__()) @api_view(['GET']) def get_largest_asteroid(request): # Initialize NeoWs API asteroid_api = AsteroidApi() # Get data from NeoWs API largest_approach_data = asteroid_api.get_largest_approach() # Return closest approach ca_model = ApproachSerializer().create(data=largest_approach_data) return Response(data=ca_model.__dict__()) @api_view(['GET']) def get_fastest_asteroid(request): # Initialize NeoWs API asteroid_api = AsteroidApi() # Get data from NeoWs API fastest_approach_data = asteroid_api.get_fastest_approach() # Return closest approach ca_model = ApproachSerializer().create(data=fastest_approach_data) return Response(data=ca_model.__dict__()) @api_view(['GET']) def get_notable_encounters(request): # Initialize NeoWs API asteroid_api = AsteroidApi() # Get data from NeoWs API closest_approach_data = ApproachSerializer().create(asteroid_api.get_closest_approach()) largest_approach_data = ApproachSerializer().create(asteroid_api.get_largest_approach()) fastest_approach_data = ApproachSerializer().create(asteroid_api.get_fastest_approach()) data = { 'closest_approach': closest_approach_data.__dict__(), 'largest_approach': largest_approach_data.__dict__(), 'fastest_approach': fastest_approach_data.__dict__(), } # Return closest approach return Response(data=data)
31.415385
92
0.760039
243
2,042
6
0.144033
0.106996
0.057613
0.035665
0.660494
0.558985
0.558985
0.464335
0.423182
0.335391
0
0
0.15475
2,042
64
93
31.90625
0.844728
0.134672
0
0.297297
0
0
0.034208
0
0
0
0
0
0
1
0.108108
false
0
0.189189
0
0.405405
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1814a99cc81b34203c9bfed3dc84d50f6f96bd3
1,313
py
Python
shell_test.py
zignig/cqparts_bucket
9707b0948a9dd1ed514e03c291a3b96fddc4a22d
[ "Apache-2.0" ]
10
2018-09-18T08:09:02.000Z
2022-03-18T06:24:22.000Z
shell_test.py
zignig/cqparts-bucket
9707b0948a9dd1ed514e03c291a3b96fddc4a22d
[ "Apache-2.0" ]
1
2018-08-09T01:57:32.000Z
2018-08-09T01:57:32.000Z
shell_test.py
zignig/cqparts-bucket
9707b0948a9dd1ed514e03c291a3b96fddc4a22d
[ "Apache-2.0" ]
1
2018-12-07T20:14:04.000Z
2018-12-07T20:14:04.000Z
""" Shell test # 2019 Simon Kirkby obeygiantrobot@gmail.com """ import cadquery as cq import cqparts from cqparts.params import PositiveFloat, Int from cqparts.display import render_props from cqparts.constraint import Mate from cqparts.utils.geometry import CoordSystem from cqparts.search import register # base shaft type @register(export="misc") class Shell(cqparts.Part): length = PositiveFloat(124, doc="shaft length") diam = PositiveFloat(40, doc="shaft diameter") count = Int(5) def make(self): shft = cq.Workplane("XY").circle(self.diam / 2).extrude(self.length) inc = 360.0 / float(self.count) for i in range(self.count): b = cq.Workplane("XY").circle(self.diam / 4).extrude(self.length / 2) b = b.translate((self.diam / 2, 0, self.length / 8)) b = b.rotate((0, 0, 0), (0, 0, 1), float(i * inc)) shft = shft.union(b) c = cq.Workplane("XY").circle(self.diam / 8).extrude(self.length - 6) c = c.translate((self.diam / 2, 0, 0)) c = c.rotate((0, 0, 0), (0, 0, 1), float(i * inc)) shft = shft.union(c) shft = shft.faces(">Z").shell(-1) return shft if __name__ == "__main__": from cqparts.display import display s = Shell() display(s)
29.840909
81
0.611577
187
1,313
4.245989
0.40107
0.02267
0.02267
0.020151
0.235516
0.187657
0.085642
0.085642
0.085642
0.085642
0
0.038268
0.243717
1,313
43
82
30.534884
0.761329
0.055598
0
0
0
0
0.037338
0
0
0
0
0
0
1
0.033333
false
0
0.266667
0
0.466667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c181b677ca0d130f4ba82570047a76315f9bae27
1,266
py
Python
test/should-work/client.py
dragazo/PyBlox
52e895090708810f4af18da746b90278e4ee82f7
[ "Apache-2.0" ]
2
2021-08-16T14:25:11.000Z
2021-12-11T19:38:15.000Z
test/should-work/client.py
dragazo/PyBlox
52e895090708810f4af18da746b90278e4ee82f7
[ "Apache-2.0" ]
null
null
null
test/should-work/client.py
dragazo/PyBlox
52e895090708810f4af18da746b90278e4ee82f7
[ "Apache-2.0" ]
1
2021-09-24T15:32:49.000Z
2021-09-24T15:32:49.000Z
#!/usr/bin/env python import netsblox editor = netsblox.Editor() phoneiot = editor.phone_iot public_roles = editor.public_roles assert type(phoneiot.get_sensors()) == list assert public_roles.get_public_role_id() == editor.get_public_role_id() assert phoneiot.get_color(12, 34, 54, 34) == 571220534 assert phoneiot.get_color(12, 34, 54) == -15982026 assert phoneiot.get_color(12, 34, blue=54) == -15982026 assert phoneiot.get_color(12, blue=54, green=34) == -15982026 assert type(editor.chart.default_options()) == dict v = editor.hurricane_data.get_hurricane_data('katrina', 2005) assert type(v) == list for x in v: assert type(x) == dict assert netsblox.prep_send(12) == 12 assert netsblox.prep_send(12.5) == 12.5 assert netsblox.prep_send([1, 2, 3]) == [1, 2, 3] assert netsblox.prep_send((1, 2, 3)) == [1, 2, 3] assert netsblox.prep_send({ 'key': 'value' }) == [['key', 'value']] assert netsblox.prep_send({ 'key': { 'more': 'stuff' } }) == [[ 'key', [[ 'more', 'stuff' ]] ]] assert netsblox.prep_send([{ 'a': 1 }, { 'b': 2 }]) == [ [[ 'a', 1 ]], [[ 'b', 2 ]] ] assert netsblox.prep_send(({ 'a': 1 }, { 'b': 2 })) == [ [[ 'a', 1 ]], [[ 'b', 2 ]] ] assert netsblox.prep_send({ (1, 2, 3): 4 }) == [[ [1, 2, 3], 4 ]] assert netsblox.prep_send(None) == ''
38.363636
95
0.636651
193
1,266
4.025907
0.274611
0.18018
0.23166
0.28314
0.467181
0.369369
0.366795
0.20592
0.20592
0.20592
0
0.093173
0.14376
1,266
32
96
39.5625
0.623616
0.015798
0
0
0
0
0.044177
0
0
0
0
0
0.76
1
0
false
0
0.04
0
0.04
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
3
c183f4d6f31778737b79a4e9462050d2a9596ead
872
py
Python
carla_env/__init__.py
janwithb/Carla-Gym-Wrapper
f1ea9fe89427c5a654f5561f214a5fba139b2568
[ "Apache-2.0" ]
6
2021-04-15T09:22:44.000Z
2022-02-15T01:07:23.000Z
carla_env/__init__.py
janwithb/Carla-Gym-Wrapper
f1ea9fe89427c5a654f5561f214a5fba139b2568
[ "Apache-2.0" ]
2
2021-08-23T02:47:40.000Z
2022-01-17T02:20:47.000Z
carla_env/__init__.py
janwithb/Carla-Gym-Wrapper
f1ea9fe89427c5a654f5561f214a5fba139b2568
[ "Apache-2.0" ]
2
2021-07-12T06:32:37.000Z
2021-11-24T14:43:13.000Z
from gym.envs.registration import register register( id='CarlaEnv-state-v1', entry_point='carla_env.carla_env:CarlaEnv', max_episode_steps=500, kwargs={ 'render': True, 'carla_port': 2000, 'changing_weather_speed': 0.1, 'frame_skip': 1, 'observations_type': 'state', 'traffic': True, 'vehicle_name': 'tesla.cybertruck', 'map_name': 'Town05', 'autopilot': True } ) register( id='CarlaEnv-pixel-v1', entry_point='carla_env.carla_env:CarlaEnv', max_episode_steps=500, kwargs={ 'render': True, 'carla_port': 2000, 'changing_weather_speed': 0.1, 'frame_skip': 1, 'observations_type': 'pixel', 'traffic': True, 'vehicle_name': 'tesla.cybertruck', 'map_name': 'Town05', 'autopilot': True } )
23.567568
47
0.579128
94
872
5.117021
0.43617
0.066528
0.074844
0.070686
0.806653
0.806653
0.806653
0.806653
0.806653
0.806653
0
0.041204
0.276376
872
36
48
24.222222
0.721078
0
0
0.727273
0
0
0.396789
0.114679
0
0
0
0
0
1
0
true
0
0.030303
0
0.030303
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
8
c185bdc700271a99b44e838020cbe70d16ab1c2c
5,039
py
Python
utilities/fragtop.py
ondrejholecek/fortimonitor
79b0377f97b084f04396a323a84bb3e8a93d6f2d
[ "BSD-3-Clause" ]
9
2018-10-19T08:47:42.000Z
2020-08-19T01:58:27.000Z
utilities/fragtop.py
ondrejholecek/fortimonitor
79b0377f97b084f04396a323a84bb3e8a93d6f2d
[ "BSD-3-Clause" ]
1
2019-11-07T11:24:43.000Z
2019-11-07T11:52:51.000Z
utilities/fragtop.py
ondrejholecek/fortimonitor
79b0377f97b084f04396a323a84bb3e8a93d6f2d
[ "BSD-3-Clause" ]
3
2020-04-24T03:51:15.000Z
2021-12-19T16:08:24.000Z
#!/usr/bin/env python2.7 import os # to be able to import our modules from the directory above os.sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from parsers.CurrentTime import ParserCurrentTime from parsers.Fragmentation import ParserFragmentation from parsers.ProcessCPU import ParserProcessCPU from _common import ssh, cycle, simple_command_with_timestamp, prepend_timestamp import re import sys import time sshc, args = ssh([ { 'name':'--collect-time', 'type':int, 'default':5, 'help':'How long should each cycle take' }, { 'name':'--history', 'type':int, 'default':45, 'help':'Maximum lines to show (45 by default)' }, { 'name':'--hz', 'type':int, 'default':100, 'help':'CONFIG_HZ of device, do not change' }, { 'name':'--raw', 'default':False, 'action':'store_true', 'help':'Show raw difference (not divided by interval)' }, { 'name':'--no-cpu', 'default':False, 'action':'store_true', 'help':'Do not show CPU usage on each line' }, ], """ """, supports_script=True) def do(sshc, cache, history, hz, raw, show_cpu): frags = ParserFragmentation(sshc).get() usage = ParserProcessCPU(sshc).get([]) etime = ParserCurrentTime(sshc).get() if 'last' not in cache: cache['last'] = { 'collected_on': etime.as_timestamp(), 'frags': frags, 'cpu' : usage, } return time_difference = etime.as_timestamp() - cache['last']['collected_on'] overall_cpus = {} for tmp in ['user', 'system', 'idle', 'iowait', 'irq', 'softirq']: overall_cpus[tmp] = int(round(((usage['global'][tmp] - cache['last']['cpu']['global'][tmp])*100)/(time_difference*hz))) pdiff = {} for p in frags['frags']: if p not in frags['frags']: print >>sys.stderr, 'Error: fragmentation key %s missing in current statistics' % (p,) return elif p not in cache['last']['frags']['frags']: print >>sys.stderr, 'Error: fragmentation key %s missing in previous statistics' % (p,) return if raw: pdiff[p] = frags['frags'][p] - cache['last']['frags']['frags'][p] else: pdiff[p] = int(round((((frags['frags'][p] - cache['last']['frags']['frags'][p]))/(time_difference)))) if os.name == 'nt': os.system('cls') print "Packet fragmentation (written by Ondrej Holecek <oholecek@fortinet.com>)" else: print "\x1b[2J\x1b[H\033[1mPacket fragmentation (written by Ondrej Holecek <oholecek@fortinet.com>)\033[0m" filters_applied = "Applied filters: " if raw: filters_applied += "CNTS[raw] " else: filters_applied += "CNTS[diff] " filters_applied += "HIST[%i] " % (history,) print prepend_timestamp("Overall CPU utilization: %3.1f %% user, %3.1f %% system, %3.1f %% idle" % ( overall_cpus['user'], overall_cpus['system'], overall_cpus['idle'], ), etime, 'fragtop') print prepend_timestamp("Overall CPU utilization: %3.1f %% iowait, %3.1f %% irq, %3.1f %% softirq" % ( overall_cpus['iowait'], overall_cpus['irq'], overall_cpus['softirq'], ), etime, 'fragtop') print prepend_timestamp(filters_applied, etime, 'fragtop') prehdr = " | Received fragments reassembly counters | Outgoing fragmentation counters |" if show_cpu: prehdr += " Historical CPU percentage |" print prepend_timestamp(prehdr, etime, 'fragtop') hdr = " %7s | %9s | %9s | %9s | %9s | %9s | %9s | %9s |" % ("history", "fragments", "packets", "timeout", "error", "packets", "fragments", "unable",) if show_cpu: hdr += " %8s | %8s | %8s |" % ("system%", "irq%", "softirq%",) print prepend_timestamp(hdr, etime, 'fragtop') # current line current_line = " %7i " % ( 0, ) for k in ('ReasmReqds', 'ReasmOKs', 'ReasmTimeout', 'ReasmFails', 'FragOKs', 'FragCreates', 'FragFails'): current_line += "| %9i " % (pdiff[k],) current_line += "|" if show_cpu: current_line += " %8i | %8i | %8i |" % (overall_cpus['system'], overall_cpus['irq'], overall_cpus['softirq'],) print prepend_timestamp(current_line, etime, 'fragtop') # older lines for odata in cache['history']: old_line = " %7i " % ( -int(round(etime.as_timestamp()-odata[0])),) for k in ('ReasmReqds', 'ReasmOKs', 'ReasmTimeout', 'ReasmFails', 'FragOKs', 'FragCreates', 'FragFails'): old_line += "| %9i " % (odata[1][k],) old_line += "|" if show_cpu: old_line += " %8i | %8i | %8i |" % (odata[2], odata[3], odata[4],) print prepend_timestamp(old_line, etime, 'fragtop') cache['history'].insert(0, (etime.as_timestamp(), pdiff, overall_cpus['system'], overall_cpus['irq'], overall_cpus['softirq'],) ) if len(cache['history']) > history: cache['history'] = cache['history'][:history] cache['last']['frags'] = frags cache['last']['cpu'] = usage cache['last']['collected_on'] = etime.as_timestamp() sys.stdout.flush() return etime if __name__ == '__main__': cache = {'history':[]} try: cycle(do, { 'sshc': sshc, 'cache': cache, 'history': args.history, 'hz': args.hz, 'raw': args.raw, 'show_cpu': not args.no_cpu, }, args.collect_time, cycles_left=[args.max_cycles], debug=args.debug, interactive=args.interactive) except KeyboardInterrupt: sshc.destroy()
39.992063
150
0.653106
655
5,039
4.905344
0.290076
0.04793
0.045752
0.023654
0.280423
0.247744
0.21413
0.191721
0.1108
0.080299
0
0.014943
0.15003
5,039
125
151
40.312
0.735232
0.021036
0
0.090909
0
0.040404
0.338068
0.016437
0
0
0
0
0
0
null
null
0
0.080808
null
null
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
c1870c6bef4d20bdc8064b24eaabb94b95f23f4a
161
py
Python
app/admin.py
kirezibana/awards
7f674430e84e84e5fd39106f3af7c42ee2f6b8ac
[ "Unlicense" ]
null
null
null
app/admin.py
kirezibana/awards
7f674430e84e84e5fd39106f3af7c42ee2f6b8ac
[ "Unlicense" ]
null
null
null
app/admin.py
kirezibana/awards
7f674430e84e84e5fd39106f3af7c42ee2f6b8ac
[ "Unlicense" ]
null
null
null
from django.contrib import admin from .models import Project, Profile, Rate admin.site.register(Project) admin.site.register(Profile) admin.site.register(Rate)
23
42
0.813665
23
161
5.695652
0.478261
0.206107
0.389313
0
0
0
0
0
0
0
0
0
0.086957
161
6
43
26.833333
0.891156
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.4
0
0.4
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
c187d975a2a0abd411797bab631c97d079d986a0
3,250
py
Python
SCOTUS/SVE/infer_sco50case.py
JeffT13/LegalUISRNN
a5efdba091746a0e04da9faccdad74b3b4cec74f
[ "Apache-2.0" ]
1
2020-11-17T02:33:28.000Z
2020-11-17T02:33:28.000Z
SCOTUS/SVE/infer_sco50case.py
JeffT13/LegalUISRNN
a5efdba091746a0e04da9faccdad74b3b4cec74f
[ "Apache-2.0" ]
null
null
null
SCOTUS/SVE/infer_sco50case.py
JeffT13/LegalUISRNN
a5efdba091746a0e04da9faccdad74b3b4cec74f
[ "Apache-2.0" ]
null
null
null
''' SCOTUS d-vec UISRNN inference''' import sys sys.path.append("./LegalUISRNN") import numpy as np import os, csv import torch import uisrnn case_path = '/scratch/jt2565/sco50/sco50wav_proc_case/' total_cases = (len(os.listdir(case_path))/2) train_cases = (total_cases//10)*9 print("# of training:", train_cases) print("# total cases:" , total_cases) trn_seq_lst = [] trn_cluster_lst = [] test_seq_lst = [] test_cluster_lst = [] verbose = False if verbose: print("\n", "="*50, "\n Processing case-embedded d-vec") #load 5 case-embedded dvecs (with directory holding raw files) for i, case in enumerate(os.listdir(case_path)): if case[-7:] == 'seq.npy': case_id = case.split('/')[-1].split('_')[0] train_sequence = np.load(case_path+case) train_clus = np.load(case_path+case_id+'_id.npy') train_cluster_id = [] #converts labels to int for inference/testing for j in range(np.shape(train_clus)[0]): if i <= train_cases: train_cluster_id.append(str(train_clus[j])) else: train_cluster_id.append(int(train_clus[j])) if j==(np.shape(train_clus)[0]-1): train_cluster_id = np.asarray(train_cluster_id) if verbose: if i > train_cases: print("-- Stored as test case --") else: print("-- Stored as train case --") print('Processed case:', case_id) print('emb shape:', np.shape(train_sequence)) print('label shape:', np.shape(train_clus)) print('flat label:', np.shape(train_cluster_id)) #add to training or testing list (for multiple cases if i <= train_cases: trn_seq_lst.append(train_sequence) trn_cluster_lst.append(train_cluster_id) else: test_seq_lst.append(train_sequence) test_cluster_lst.append(train_cluster_id) #Define UISRNN (**copy from training**) + load model model_args, training_args, inference_args = uisrnn.parse_arguments() model_args.verbosity=3 #can verbose=False for no prints except training # must match saved model model_args.observation_dim=256 #from hparam model_args.enable_cuda = True model_args.rnn_depth = 2 model_args.rnn_hidden_size = 32 model_args.rnn_dropout = .2 model_args.crp_alpha = .8 inference_args.test_iteration = 2 inference_args.beam_search = 10 model = uisrnn.UISRNN(model_args) model.load('./sco50wav_250bs10.pth') #inference and evaluation (shrunk for running) pred = model.predict(test_seq_lst[0], inference_args) ans = [i for i in test_cluster_lst[0]] if verbose: print("-- Inference --") print(type(pred), type(pred[0])) print(type(ans), type(ans[0])) print(len(pred), len(ans)) # opening the csv file in 'w+' mode file = open('./predicted_labels.csv', 'w+', newline ='') # writing the data into the file with file: write = csv.writer(file) write.writerows([pred]) tracker=0 for i, p in enumerate(pred): if p!=0: tracker+=1 if tracker>0: print('predicted other than 0! -> ', tracker)
28.761062
74
0.626462
448
3,250
4.339286
0.330357
0.041667
0.057613
0.024691
0.092593
0.030864
0
0
0
0
0
0.020098
0.249846
3,250
113
75
28.761062
0.777276
0.134154
0
0.106667
0
0
0.114725
0.030379
0
0
0
0
0
1
0
false
0
0.066667
0
0.066667
0.186667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1883470a70c6ef2fb0240606651d61fa3e8412b
4,143
py
Python
evaluation/evaluation_template.py
MaviccPRP/Anonymizer
3d75ed3e97e260b6ded7e188eb3d58d749844e36
[ "MIT" ]
null
null
null
evaluation/evaluation_template.py
MaviccPRP/Anonymizer
3d75ed3e97e260b6ded7e188eb3d58d749844e36
[ "MIT" ]
2
2019-06-14T19:55:39.000Z
2019-06-14T20:16:11.000Z
evaluation/evaluation_template.py
MaviccPRP/Anonymizer
3d75ed3e97e260b6ded7e188eb3d58d749844e36
[ "MIT" ]
1
2020-03-13T14:32:31.000Z
2020-03-13T14:32:31.000Z
# Evaluation script template for de-identification tool https://github.com/dieterich-lab/Anonymize from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_curve, auc from sklearn.metrics import matthews_corrcoef from sklearn.metrics import fbeta_score from sklearn.model_selection import permutation_test_score import matplotlib.pyplot as plt import glob from mlxtend.evaluate import mcnemar import collections import numpy as np from mlxtend.evaluate import mcnemar_table # Evaluating baseline # Save predicted labels and gold standard into these variables y_true = [] y_pred = [] # Define folder of predictions and gold standard file_list = [] # ENTER the folders of gold standard and labeled data for baseline types = ('*.conll', '*.tokenized_anon') for files in types: for file in glob.glob(files): file_list.append(file) file_list = sorted(file_list) list1 = file_list[:int(len(file_list)/2)] list2 = file_list[int(len(file_list)/2):] d = 0 for i,j in zip(list2, list1): with open(i, "r", encoding="utf-8") as gold, open(j, "r", encoding="utf-8") as anno: for g, a in zip(gold, anno): g = g.replace(" ", "\t") a = a.replace(" ", "\t") a = a.split("\t") g = g.split("\t") g = [elem.replace("ORG", "O") for elem in g] a = [elem.replace("ORG", "O") for elem in a] if len(a) > 1 and len(g) > 1: # Analze binary if "ANON" in a[1]: y_pred.append(1) else: y_pred.append(0) if len(g[1].strip()) == 1: y_true.append(0) else: y_true.append(1) # Print evaluation report, confusion matrix and f2 scores print("Evaluation baseline:") print(classification_report(y_true, y_pred, labels=[1,0])) print("Confusion matrix:") print(confusion_matrix(y_true, y_pred, labels= [1,0])) print("MCC",matthews_corrcoef(y_true, y_pred)) print("F2 - None",fbeta_score(y_true, y_pred, average=None, beta=2)) print("F2 - weighted",fbeta_score(y_true, y_pred, average='weighted', beta=2)) print("F2 - micro",fbeta_score(y_true, y_pred, average='micro', beta=2)) print("F2 - macro",fbeta_score(y_true, y_pred, average='macro', beta=2)) # Evaluating full featured model y_true = [] y_pred2 = [] import glob file_list = [] # ENTER the folders of gold standard and labeled data for baseline types = ('*.conll', '*.tokenized_anon') for files in types: for file in glob.glob(files): file_list.append(file) file_list = sorted(file_list) list1 = file_list[:int(len(file_list)/2)] list2 = file_list[int(len(file_list)/2):] d = 0 for i,j in zip(list2, list1): with open(i, "r", encoding="utf-8") as gold, open(j, "r", encoding="utf-8") as anno: for g, a in zip(gold, anno): g = g.replace(" ", "\t") a = a.replace(" ", "\t") a = a.split("\t") g = g.split("\t") g = [elem.replace("ORG", "O") for elem in g] a = [elem.replace("ORG", "O") for elem in a] if len(a) > 1 and len(g) > 1: # Analze binary if "ANON" in a[1]: y_pred2.append(1) else: y_pred2.append(0) if len(g[1].strip()) == 1: y_true.append(0) else: y_true.append(1) # Print evaluation report, confusion matrix and f2 scores print("Evaluation full featured model:") print(classification_report(y_true, y_pred2, labels= [1,0])) print("Confusion matrix:") print(confusion_matrix(y_true, y_pred2, labels= [1,0])) print("MCC",matthews_corrcoef(y_true, y_pred2)) print("F2 - None",fbeta_score(y_true, y_pred2, average=None, beta=2)) print("F2 - weighted",fbeta_score(y_true, y_pred2, average='weighted', beta=2)) print("F2 - micro",fbeta_score(y_true, y_pred2, average='micro', beta=2)) print("F2 - macro",fbeta_score(y_true, y_pred2, average='macro', beta=2)) # McNemar test y_true = np.array(y_true) y_pred = np.array(y_pred) y_pred2 = np.array(y_pred2) tb = mcnemar_table(y_target=y_true, y_model1=y_pred, y_model2=y_pred2) print("McNemar contigency table") print(tb) chi2, p = mcnemar(ary=tb, corrected=True) print('chi-squared:', chi2) print('p-value:', p)
30.688889
99
0.668839
670
4,143
3.995522
0.2
0.042959
0.040344
0.03362
0.673889
0.649981
0.631304
0.612626
0.581248
0.581248
0
0.02329
0.18127
4,143
135
100
30.688889
0.76592
0.129858
0
0.571429
0
0
0.106678
0
0
0
0
0
0
1
0
false
0
0.132653
0
0.132653
0.22449
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c1893382322e8407dce4b1d084d353bc65babc08
880
py
Python
tests/utils.py
CrisKP/gyst
c3a86564bcf278d2b6a177b20840d50f71dd63b1
[ "MIT" ]
12
2017-09-10T01:43:42.000Z
2020-09-20T01:17:20.000Z
functional_tests/utils.py
HelloMelanieC/FiveUp
ab97d311f163b09146fe330e4360d8e75d769f95
[ "MIT" ]
22
2016-12-26T21:46:10.000Z
2022-02-10T08:01:52.000Z
tests/utils.py
CrisKP/gyst
c3a86564bcf278d2b6a177b20840d50f71dd63b1
[ "MIT" ]
4
2017-08-24T16:01:37.000Z
2019-02-14T23:50:17.000Z
from contextlib import contextmanager from django.contrib.staticfiles.testing import StaticLiveServerTestCase from selenium.webdriver.firefox.webdriver import WebDriver from selenium.webdriver.support.expected_conditions import staleness_of from selenium.webdriver.support.ui import WebDriverWait class SeleniumTestCase(StaticLiveServerTestCase): @classmethod def setUpClass(cls): super(StaticLiveServerTestCase, cls).setUpClass() cls.browser = WebDriver() cls.browser.implicitly_wait(10) @classmethod def tearDownClass(cls): cls.browser.quit() super(StaticLiveServerTestCase, cls).tearDownClass() @contextmanager def wait_for_page_load(self, timeout=10): old_page = self.browser.find_element_by_tag_name("html") yield WebDriverWait(self.browser, timeout).until(staleness_of(old_page))
32.592593
74
0.761364
93
880
7.064516
0.494624
0.054795
0.09589
0.085236
0
0
0
0
0
0
0
0.005413
0.160227
880
26
75
33.846154
0.883627
0
0
0.1
0
0
0.004545
0
0
0
0
0
0
1
0.15
false
0
0.25
0
0.45
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c18b1f9e7702a88aea07ec02c274a084bf5ac802
411
py
Python
dictsort.py
robertbyers1111/python
e87558f2432f0a4a86f17c47c6b19e1345625b83
[ "MIT" ]
null
null
null
dictsort.py
robertbyers1111/python
e87558f2432f0a4a86f17c47c6b19e1345625b83
[ "MIT" ]
null
null
null
dictsort.py
robertbyers1111/python
e87558f2432f0a4a86f17c47c6b19e1345625b83
[ "MIT" ]
null
null
null
#!/usr/bin/python3 # Iterate through a list of dictionaries, sorted by a field from the dictionaries response = [ {'a':1, 'b':2222, 'LastModified':1320, 'c':33}, {'a':11, 'LastModified':1229, 'b':222, 'c':3}, {'LastModified':1400,'a':111, 'b':2, 'c':3333}, {'a':1111, 'b':22, 'LastModified':180, 'c':333} ] response = sorted(response, key=lambda k: k['LastModified']) for x in response: print('x',x)
25.6875
81
0.627737
64
411
4.03125
0.640625
0
0
0
0
0
0
0
0
0
0
0.129577
0.136253
411
15
82
27.4
0.597183
0.23601
0
0
0
0
0.233974
0
0
0
0
0
0
1
0
false
0
0
0
0
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c18c4a2ad72355e5338af8e668969552070e526d
617
py
Python
setup.py
urbangrammarai/graphics
69bf5976a11c783fc8a27f59ef57efefbbee6aa8
[ "BSD-3-Clause" ]
1
2021-05-30T07:41:23.000Z
2021-05-30T07:41:23.000Z
setup.py
urbangrammarai/graphics
69bf5976a11c783fc8a27f59ef57efefbbee6aa8
[ "BSD-3-Clause" ]
2
2021-02-19T09:00:03.000Z
2021-10-16T18:59:05.000Z
setup.py
urbangrammarai/graphics
69bf5976a11c783fc8a27f59ef57efefbbee6aa8
[ "BSD-3-Clause" ]
null
null
null
import setuptools with open("README.md", "r", encoding="utf8") as fh: long_description = fh.read() setuptools.setup( name="urbangrammar_graphics", version="1.2.3", author="Martin Fleischmann", author_email="martin@martinfleischmann.net", python_requires=">=3.6", install_requires=["matplotlib", "seaborn", "numpy", "contextily"], description="Visual style for Urban Grammar AI research project", url="https://github.com/urbangrammarai/graphics", long_description=long_description, long_description_content_type="text/markdown", packages=setuptools.find_packages(), )
32.473684
70
0.719611
71
617
6.098592
0.774648
0.138568
0.08776
0.138568
0
0
0
0
0
0
0
0.011278
0.137763
617
18
71
34.277778
0.802632
0
0
0
0
0
0.36953
0.079417
0
0
0
0
0
1
0
false
0
0.0625
0
0.0625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c18d7f1c745b04cc0b216ca0de4f5614635c60a7
701
py
Python
Supervised Learning/Tilecoder.py
panchyo0/Reinforcement-Learning
b823d7900f020db154d0a61f3683e0cce29e3797
[ "MIT" ]
null
null
null
Supervised Learning/Tilecoder.py
panchyo0/Reinforcement-Learning
b823d7900f020db154d0a61f3683e0cce29e3797
[ "MIT" ]
null
null
null
Supervised Learning/Tilecoder.py
panchyo0/Reinforcement-Learning
b823d7900f020db154d0a61f3683e0cce29e3797
[ "MIT" ]
null
null
null
numTilings = 8 def tilecode(in1, in2, tileIndices): # write your tilecoder here (5 lines or so) for i in range(0,numTilings): col = (in1 + i*(0.6/numTilings)) // 0.6 row = (in2 + i*(0.6/numTilings)) // 0.6 tile = (i*121) + (11*col) + row tileIndices[i] = int(tile) return tileIndices def printTileCoderIndices(in1, in2): tileIndices = [-1] * numTilings tilecode(in1, in2, tileIndices) print('Tile indices for input (', in1, ',', in2,') are : ', tileIndices) #printTileCoderIndices(0.1, 0.1) #printTileCoderIndices(4.0, 2.0) #printTileCoderIndices(5.99, 5.99) #printTileCoderIndices(4.0, 2.1)
26.961538
77
0.584879
89
701
4.606742
0.41573
0.058537
0.12439
0.121951
0.073171
0.073171
0
0
0
0
0
0.087379
0.265335
701
25
78
28.04
0.708738
0.238231
0
0
0
0
0.065606
0
0
0
0
0
0
1
0.166667
false
0
0
0
0.25
0.166667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
c18ed66e182091851798f0c36bc54305d626d02e
868
py
Python
diary/models.py
yheiblog/base_diary
01c2914019d6581c67071c7823a677e3c3e70950
[ "MIT" ]
null
null
null
diary/models.py
yheiblog/base_diary
01c2914019d6581c67071c7823a677e3c3e70950
[ "MIT" ]
16
2021-06-07T13:23:10.000Z
2022-02-05T23:18:43.000Z
diary/models.py
yheiblog/base_diary
01c2914019d6581c67071c7823a677e3c3e70950
[ "MIT" ]
1
2021-08-18T23:40:21.000Z
2021-08-18T23:40:21.000Z
from django.db import models from django.db.models.deletion import CASCADE from django.contrib.auth.models import AbstractUser # Create your models here. class User(AbstractUser): pass class Post(models.Model): id = models.AutoField(primary_key=True) user = models.ForeignKey(User, on_delete=CASCADE) title = models.CharField(max_length=2048) body = models.TextField() created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) categories = models.ManyToManyField( 'Category', blank=True, related_name="posts", ) class Category(models.Model): name = models.CharField(max_length=1024) slug = models.CharField(max_length=1024) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __str__(self) -> str: return f'{self.name}'
27.125
54
0.756912
116
868
5.491379
0.465517
0.050235
0.131868
0.156986
0.342229
0.254317
0.254317
0.254317
0.254317
0.254317
0
0.016
0.135945
868
31
55
28
0.833333
0.02765
0
0.166667
0
0
0.028504
0
0
0
0
0
0
1
0.041667
false
0.041667
0.125
0.041667
0.791667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0