hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
8acdfbdff8ea00cb28afa15084b186e6d5355915
4,299
py
Python
tests/hwsim/test_p2p_ext.py
AMacedoP/hostapd
d0a6ec203295372342093abcf4eca7caa45acda5
[ "Unlicense" ]
4
2015-06-18T05:25:10.000Z
2019-07-09T13:57:28.000Z
tests/hwsim/test_p2p_ext.py
AMacedoP/hostapd
d0a6ec203295372342093abcf4eca7caa45acda5
[ "Unlicense" ]
null
null
null
tests/hwsim/test_p2p_ext.py
AMacedoP/hostapd
d0a6ec203295372342093abcf4eca7caa45acda5
[ "Unlicense" ]
null
null
null
# P2P vendor specific extension tests # Copyright (c) 2014, Qualcomm Atheros, Inc. import logging logger = logging.getLogger() def test_p2p_ext_discovery(dev): """P2P device discovery with vendor specific extensions""" addr0 = dev[0].p2p_dev_addr() addr1 = dev[1].p2p_dev_addr() try: if "OK" not in dev[0].request("VENDOR_ELEM_ADD 1 dd050011223344"): raise Exception("VENDOR_ELEM_ADD failed") res = dev[0].request("VENDOR_ELEM_GET 1") if res != "dd050011223344": raise Exception("Unexpected VENDOR_ELEM_GET result: " + res) if "OK" not in dev[0].request("VENDOR_ELEM_ADD 1 dd06001122335566"): raise Exception("VENDOR_ELEM_ADD failed") res = dev[0].request("VENDOR_ELEM_GET 1") if res != "dd050011223344dd06001122335566": raise Exception("Unexpected VENDOR_ELEM_GET result(2): " + res) res = dev[0].request("VENDOR_ELEM_GET 2") if res != "": raise Exception("Unexpected VENDOR_ELEM_GET result(3): " + res) if "OK" not in dev[0].request("VENDOR_ELEM_REMOVE 1 dd050011223344"): raise Exception("VENDOR_ELEM_REMOVE failed") res = dev[0].request("VENDOR_ELEM_GET 1") if res != "dd06001122335566": raise Exception("Unexpected VENDOR_ELEM_GET result(4): " + res) if "OK" not in dev[0].request("VENDOR_ELEM_REMOVE 1 dd06001122335566"): raise Exception("VENDOR_ELEM_REMOVE failed") res = dev[0].request("VENDOR_ELEM_GET 1") if res != "": raise Exception("Unexpected VENDOR_ELEM_GET result(5): " + res) if "OK" not in dev[0].request("VENDOR_ELEM_ADD 1 dd050011223344dd06001122335566"): raise Exception("VENDOR_ELEM_ADD failed(2)") if "FAIL" not in dev[0].request("VENDOR_ELEM_REMOVE 1 dd051122334455"): raise Exception("Unexpected VENDOR_ELEM_REMOVE success") if "FAIL" not in dev[0].request("VENDOR_ELEM_REMOVE 1 dd"): raise Exception("Unexpected VENDOR_ELEM_REMOVE success(2)") if "FAIL" not in dev[0].request("VENDOR_ELEM_ADD 1 ddff"): raise Exception("Unexpected VENDOR_ELEM_ADD success(3)") dev[0].p2p_listen() if not dev[1].discover_peer(addr0): raise Exception("Device discovery timed out") if not dev[0].discover_peer(addr1): raise Exception("Device discovery timed out") peer = dev[1].get_peer(addr0) if peer['vendor_elems'] != "dd050011223344dd06001122335566": raise Exception("Vendor elements not reported correctly") res = dev[0].request("VENDOR_ELEM_GET 1") if res != "dd050011223344dd06001122335566": raise Exception("Unexpected VENDOR_ELEM_GET result(6): " + res) if "OK" not in dev[0].request("VENDOR_ELEM_REMOVE 1 dd06001122335566"): raise Exception("VENDOR_ELEM_REMOVE failed") res = dev[0].request("VENDOR_ELEM_GET 1") if res != "dd050011223344": raise Exception("Unexpected VENDOR_ELEM_GET result(7): " + res) finally: dev[0].request("VENDOR_ELEM_REMOVE 1 *") def test_p2p_ext_discovery_go(dev): """P2P device discovery with vendor specific extensions for GO""" addr0 = dev[0].p2p_dev_addr() addr1 = dev[1].p2p_dev_addr() try: if "OK" not in dev[0].request("VENDOR_ELEM_ADD 2 dd050011223344dd06001122335566"): raise Exception("VENDOR_ELEM_ADD failed") if "OK" not in dev[0].request("VENDOR_ELEM_ADD 3 dd050011223344dd06001122335566"): raise Exception("VENDOR_ELEM_ADD failed") if "OK" not in dev[0].request("VENDOR_ELEM_ADD 12 dd050011223344dd06001122335566"): raise Exception("VENDOR_ELEM_ADD failed") dev[0].p2p_start_go(freq="2412") if not dev[1].discover_peer(addr0): raise Exception("Device discovery timed out") peer = dev[1].get_peer(addr0) if peer['vendor_elems'] != "dd050011223344dd06001122335566": print peer['vendor_elems'] raise Exception("Vendor elements not reported correctly") finally: dev[0].request("VENDOR_ELEM_REMOVE 2 *") dev[0].request("VENDOR_ELEM_REMOVE 3 *") dev[0].request("VENDOR_ELEM_REMOVE 12 *")
47.241758
91
0.650384
550
4,299
4.883636
0.136364
0.156366
0.094192
0.14557
0.905436
0.876396
0.839911
0.670514
0.634028
0.59866
0
0.127698
0.234938
4,299
90
92
47.766667
0.688963
0.018144
0
0.539474
0
0
0.398291
0.058608
0
0
0
0
0
0
null
null
0
0.013158
null
null
0.013158
0
0
0
null
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
8ad596dcfcb6e7c71addc5f7193aa5d8dcddb0d3
69
py
Python
gs/group/member/add/json/__init__.py
groupserver/gs.group.member.add.json
4d12d942c0d0d5ed8888218631102a352730be47
[ "ZPL-2.1" ]
null
null
null
gs/group/member/add/json/__init__.py
groupserver/gs.group.member.add.json
4d12d942c0d0d5ed8888218631102a352730be47
[ "ZPL-2.1" ]
null
null
null
gs/group/member/add/json/__init__.py
groupserver/gs.group.member.add.json
4d12d942c0d0d5ed8888218631102a352730be47
[ "ZPL-2.1" ]
null
null
null
# -*- coding: utf-8 -*- # This space intentially left (almost) blank
23
44
0.652174
9
69
5
1
0
0
0
0
0
0
0
0
0
0
0.017544
0.173913
69
2
45
34.5
0.77193
0.927536
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
76e34740793af2d99cf758f4c36d62e3a7c231f2
13,327
py
Python
sdk/python/pulumi_newrelic/nrql_drop_rule.py
bob-bins/pulumi-newrelic
f8a121fb7d6e6ad979d3ccf72467b9e89769e305
[ "ECL-2.0", "Apache-2.0" ]
6
2019-09-17T20:41:26.000Z
2022-01-13T23:54:14.000Z
sdk/python/pulumi_newrelic/nrql_drop_rule.py
bob-bins/pulumi-newrelic
f8a121fb7d6e6ad979d3ccf72467b9e89769e305
[ "ECL-2.0", "Apache-2.0" ]
136
2019-04-29T21:34:57.000Z
2022-03-30T17:07:03.000Z
sdk/python/pulumi_newrelic/nrql_drop_rule.py
bob-bins/pulumi-newrelic
f8a121fb7d6e6ad979d3ccf72467b9e89769e305
[ "ECL-2.0", "Apache-2.0" ]
3
2019-10-05T10:33:59.000Z
2021-06-15T16:37:49.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities __all__ = ['NrqlDropRuleArgs', 'NrqlDropRule'] @pulumi.input_type class NrqlDropRuleArgs: def __init__(__self__, *, action: pulumi.Input[str], nrql: pulumi.Input[str], account_id: Optional[pulumi.Input[int]] = None, description: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a NrqlDropRule resource. :param pulumi.Input[str] action: An action type specifying how to apply the NRQL string (either `drop_data` or `drop_attributes`). :param pulumi.Input[str] nrql: A NRQL string that specifies what data types to drop. :param pulumi.Input[int] account_id: Account where the drop rule will be put. Defaults to the account associated with the API key used. :param pulumi.Input[str] description: The description of the drop rule. """ pulumi.set(__self__, "action", action) pulumi.set(__self__, "nrql", nrql) if account_id is not None: pulumi.set(__self__, "account_id", account_id) if description is not None: pulumi.set(__self__, "description", description) @property @pulumi.getter def action(self) -> pulumi.Input[str]: """ An action type specifying how to apply the NRQL string (either `drop_data` or `drop_attributes`). """ return pulumi.get(self, "action") @action.setter def action(self, value: pulumi.Input[str]): pulumi.set(self, "action", value) @property @pulumi.getter def nrql(self) -> pulumi.Input[str]: """ A NRQL string that specifies what data types to drop. """ return pulumi.get(self, "nrql") @nrql.setter def nrql(self, value: pulumi.Input[str]): pulumi.set(self, "nrql", value) @property @pulumi.getter(name="accountId") def account_id(self) -> Optional[pulumi.Input[int]]: """ Account where the drop rule will be put. Defaults to the account associated with the API key used. """ return pulumi.get(self, "account_id") @account_id.setter def account_id(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "account_id", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ The description of the drop rule. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @pulumi.input_type class _NrqlDropRuleState: def __init__(__self__, *, account_id: Optional[pulumi.Input[int]] = None, action: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, nrql: Optional[pulumi.Input[str]] = None, rule_id: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering NrqlDropRule resources. :param pulumi.Input[int] account_id: Account where the drop rule will be put. Defaults to the account associated with the API key used. :param pulumi.Input[str] action: An action type specifying how to apply the NRQL string (either `drop_data` or `drop_attributes`). :param pulumi.Input[str] description: The description of the drop rule. :param pulumi.Input[str] nrql: A NRQL string that specifies what data types to drop. :param pulumi.Input[str] rule_id: The id, uniquely identifying the rule. """ if account_id is not None: pulumi.set(__self__, "account_id", account_id) if action is not None: pulumi.set(__self__, "action", action) if description is not None: pulumi.set(__self__, "description", description) if nrql is not None: pulumi.set(__self__, "nrql", nrql) if rule_id is not None: pulumi.set(__self__, "rule_id", rule_id) @property @pulumi.getter(name="accountId") def account_id(self) -> Optional[pulumi.Input[int]]: """ Account where the drop rule will be put. Defaults to the account associated with the API key used. """ return pulumi.get(self, "account_id") @account_id.setter def account_id(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "account_id", value) @property @pulumi.getter def action(self) -> Optional[pulumi.Input[str]]: """ An action type specifying how to apply the NRQL string (either `drop_data` or `drop_attributes`). """ return pulumi.get(self, "action") @action.setter def action(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "action", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ The description of the drop rule. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def nrql(self) -> Optional[pulumi.Input[str]]: """ A NRQL string that specifies what data types to drop. """ return pulumi.get(self, "nrql") @nrql.setter def nrql(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "nrql", value) @property @pulumi.getter(name="ruleId") def rule_id(self) -> Optional[pulumi.Input[str]]: """ The id, uniquely identifying the rule. """ return pulumi.get(self, "rule_id") @rule_id.setter def rule_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "rule_id", value) class NrqlDropRule(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, account_id: Optional[pulumi.Input[int]] = None, action: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, nrql: Optional[pulumi.Input[str]] = None, __props__=None): """ ## Import New Relic NRQL drop rules can be imported using a concatenated string of the format `<account_id>:<rule_id>`, e.g. bash ```sh $ pulumi import newrelic:index/nrqlDropRule:NrqlDropRule foo 12345:34567 ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[int] account_id: Account where the drop rule will be put. Defaults to the account associated with the API key used. :param pulumi.Input[str] action: An action type specifying how to apply the NRQL string (either `drop_data` or `drop_attributes`). :param pulumi.Input[str] description: The description of the drop rule. :param pulumi.Input[str] nrql: A NRQL string that specifies what data types to drop. """ ... @overload def __init__(__self__, resource_name: str, args: NrqlDropRuleArgs, opts: Optional[pulumi.ResourceOptions] = None): """ ## Import New Relic NRQL drop rules can be imported using a concatenated string of the format `<account_id>:<rule_id>`, e.g. bash ```sh $ pulumi import newrelic:index/nrqlDropRule:NrqlDropRule foo 12345:34567 ``` :param str resource_name: The name of the resource. :param NrqlDropRuleArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(NrqlDropRuleArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, account_id: Optional[pulumi.Input[int]] = None, action: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, nrql: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = NrqlDropRuleArgs.__new__(NrqlDropRuleArgs) __props__.__dict__["account_id"] = account_id if action is None and not opts.urn: raise TypeError("Missing required property 'action'") __props__.__dict__["action"] = action __props__.__dict__["description"] = description if nrql is None and not opts.urn: raise TypeError("Missing required property 'nrql'") __props__.__dict__["nrql"] = nrql __props__.__dict__["rule_id"] = None super(NrqlDropRule, __self__).__init__( 'newrelic:index/nrqlDropRule:NrqlDropRule', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, account_id: Optional[pulumi.Input[int]] = None, action: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, nrql: Optional[pulumi.Input[str]] = None, rule_id: Optional[pulumi.Input[str]] = None) -> 'NrqlDropRule': """ Get an existing NrqlDropRule resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[int] account_id: Account where the drop rule will be put. Defaults to the account associated with the API key used. :param pulumi.Input[str] action: An action type specifying how to apply the NRQL string (either `drop_data` or `drop_attributes`). :param pulumi.Input[str] description: The description of the drop rule. :param pulumi.Input[str] nrql: A NRQL string that specifies what data types to drop. :param pulumi.Input[str] rule_id: The id, uniquely identifying the rule. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _NrqlDropRuleState.__new__(_NrqlDropRuleState) __props__.__dict__["account_id"] = account_id __props__.__dict__["action"] = action __props__.__dict__["description"] = description __props__.__dict__["nrql"] = nrql __props__.__dict__["rule_id"] = rule_id return NrqlDropRule(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="accountId") def account_id(self) -> pulumi.Output[int]: """ Account where the drop rule will be put. Defaults to the account associated with the API key used. """ return pulumi.get(self, "account_id") @property @pulumi.getter def action(self) -> pulumi.Output[str]: """ An action type specifying how to apply the NRQL string (either `drop_data` or `drop_attributes`). """ return pulumi.get(self, "action") @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: """ The description of the drop rule. """ return pulumi.get(self, "description") @property @pulumi.getter def nrql(self) -> pulumi.Output[str]: """ A NRQL string that specifies what data types to drop. """ return pulumi.get(self, "nrql") @property @pulumi.getter(name="ruleId") def rule_id(self) -> pulumi.Output[str]: """ The id, uniquely identifying the rule. """ return pulumi.get(self, "rule_id")
39.78209
143
0.628649
1,595
13,327
5.047649
0.104702
0.08471
0.081729
0.068315
0.786486
0.757173
0.735064
0.690722
0.66352
0.64228
0
0.002155
0.268853
13,327
334
144
39.901198
0.824097
0.314099
0
0.630208
1
0
0.078891
0.00476
0
0
0
0
0
1
0.15625
false
0.005208
0.026042
0
0.276042
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
76f31e51ce9a21684ea781d4d246debbe2644f94
198
py
Python
confirm_Alice.py
To-Fujita/ChatBot_with_ParlAI
fd067731552a49d50780ee442d44794e13dc34d0
[ "MIT" ]
1
2021-08-06T11:24:10.000Z
2021-08-06T11:24:10.000Z
confirm_Alice.py
To-Fujita/ChatBot_with_ParlAI
fd067731552a49d50780ee442d44794e13dc34d0
[ "MIT" ]
null
null
null
confirm_Alice.py
To-Fujita/ChatBot_with_ParlAI
fd067731552a49d50780ee442d44794e13dc34d0
[ "MIT" ]
null
null
null
# Confirm ParlAI Agent for Alice by T. Fujita 2021/08/01 import sys sys.path.append('./ParlAI-master/') from parlai.scripts.interactive import Interactive Interactive.main(model='alice')
22
57
0.742424
28
198
5.25
0.75
0
0
0
0
0
0
0
0
0
0
0.047337
0.146465
198
8
58
24.75
0.822485
0.272727
0
0
0
0
0.156716
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
0a0f871f04fbf756ae025446b46603cd70ca814e
180
py
Python
coyote_framework/config/browser_config.py
vaibhavrastogi1988/python_testing_framework
583a2286479ed0ccda309c866a403dc92fa1bb3b
[ "MIT" ]
null
null
null
coyote_framework/config/browser_config.py
vaibhavrastogi1988/python_testing_framework
583a2286479ed0ccda309c866a403dc92fa1bb3b
[ "MIT" ]
null
null
null
coyote_framework/config/browser_config.py
vaibhavrastogi1988/python_testing_framework
583a2286479ed0ccda309c866a403dc92fa1bb3b
[ "MIT" ]
null
null
null
from coyote_framework.config.abstract_config import ConfigBase class BrowserConfig(ConfigBase): def __init__(self): super(BrowserConfig, self).__init__('browser')
18
62
0.761111
19
180
6.684211
0.736842
0
0
0
0
0
0
0
0
0
0
0
0.15
180
9
63
20
0.830065
0
0
0
0
0
0.039326
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
0a1c78cf7cf91b7152a06ec58f14117ab5359b93
429
py
Python
rendering_widgets/abstract_gui_base.py
JohannaLatt/SHM-Mirror
b5ce0c6a9c16761a9cc19f8b8769d1636477cc3c
[ "MIT" ]
null
null
null
rendering_widgets/abstract_gui_base.py
JohannaLatt/SHM-Mirror
b5ce0c6a9c16761a9cc19f8b8769d1636477cc3c
[ "MIT" ]
null
null
null
rendering_widgets/abstract_gui_base.py
JohannaLatt/SHM-Mirror
b5ce0c6a9c16761a9cc19f8b8769d1636477cc3c
[ "MIT" ]
null
null
null
from abc import ABC, abstractmethod class AbstractGUIBase(ABC): @abstractmethod def render_skeleton_data(self, data_str): pass @abstractmethod def change_joint_or_bone_color(self, data_str): pass @abstractmethod def clear_skeleton(self): pass @abstractmethod def show_text(self, data): pass @abstractmethod def update_graps(self, data): pass
17.16
51
0.659674
48
429
5.666667
0.479167
0.3125
0.308824
0.110294
0.235294
0.235294
0
0
0
0
0
0
0.275058
429
24
52
17.875
0.874598
0
0
0.588235
0
0
0
0
0
0
0
0
0
1
0.294118
false
0.294118
0.058824
0
0.411765
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
4
0a25b8218b03ff4e0d0a5d6d6d0c97d95e4d3302
226
py
Python
django101/django101/web/admin.py
Minkov/python-web-framework-demos-
30f39de0b4344e1b7e5d4cf96f6d6bbbfe867305
[ "MIT" ]
2
2022-03-06T11:56:35.000Z
2022-03-20T09:31:45.000Z
django101/django101/web/admin.py
Minkov/python-web-framework-demos-
30f39de0b4344e1b7e5d4cf96f6d6bbbfe867305
[ "MIT" ]
null
null
null
django101/django101/web/admin.py
Minkov/python-web-framework-demos-
30f39de0b4344e1b7e5d4cf96f6d6bbbfe867305
[ "MIT" ]
4
2022-03-17T18:05:19.000Z
2022-03-22T16:38:11.000Z
from django.contrib import admin from django101.web.models import Todo, Category @admin.register(Todo) class TodoAdmin(admin.ModelAdmin): pass @admin.register(Category) class CategoryAdmin(admin.ModelAdmin): pass
16.142857
47
0.778761
28
226
6.285714
0.571429
0.147727
0.215909
0
0
0
0
0
0
0
0
0.015306
0.132743
226
13
48
17.384615
0.882653
0
0
0.25
0
0
0
0
0
0
0
0
0
1
0
true
0.25
0.25
0
0.5
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
4
0a3474cad566eef0c77e26114ed88715bf9096bc
82
py
Python
transcrypt/development/automated_tests/hello/testlet1.py
bitwiseman/Transcrypt
c77fb8064f0466a33469df9a2f38a4fb9c6eaa51
[ "Apache-2.0" ]
1
2021-08-23T21:06:32.000Z
2021-08-23T21:06:32.000Z
transcrypt/development/automated_tests/hello/testlet1.py
bitwiseman/Transcrypt
c77fb8064f0466a33469df9a2f38a4fb9c6eaa51
[ "Apache-2.0" ]
null
null
null
transcrypt/development/automated_tests/hello/testlet1.py
bitwiseman/Transcrypt
c77fb8064f0466a33469df9a2f38a4fb9c6eaa51
[ "Apache-2.0" ]
null
null
null
def run (autoTester): autoTester.check ('goodbye') autoTester.check ('moon')
20.5
30
0.695122
9
82
6.333333
0.666667
0.526316
0
0
0
0
0
0
0
0
0
0
0.146341
82
3
31
27.333333
0.814286
0
0
0
0
0
0.139241
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
4
0a45e67fb01b26c0f6e2d2f03f32d4b8042cd06b
237
py
Python
pythonproject/oddeven.py
sangampatel/pythonproject
c6c690fd5d4fa62023db15cf10bdedeeb8b44a41
[ "Apache-2.0" ]
null
null
null
pythonproject/oddeven.py
sangampatel/pythonproject
c6c690fd5d4fa62023db15cf10bdedeeb8b44a41
[ "Apache-2.0" ]
null
null
null
pythonproject/oddeven.py
sangampatel/pythonproject
c6c690fd5d4fa62023db15cf10bdedeeb8b44a41
[ "Apache-2.0" ]
null
null
null
print("Enter The Number n") n = int(input()) if (n%2)!=0: print("Weird") elif (n%2)==0: if n in range(2,5): print("Not Weird") elif n in range(6,21): print("Weird") elif n > 20: print("Not Weird")
19.75
27
0.510549
41
237
2.95122
0.463415
0.223141
0.247934
0.247934
0
0
0
0
0
0
0
0.065476
0.291139
237
11
28
21.545455
0.654762
0
0
0.363636
0
0
0.194093
0
0
0
0
0
0
1
0
false
0
0
0
0
0.454545
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
0a4700b8da1905b719d74e0f7a2ab55be961d857
192
py
Python
tests/unit_tests/cellular/__init__.py
fqzhou/LoadBalanceControl-RL
689eec3b3b27e121aa45d2793e411f1863f6fc0b
[ "MIT" ]
11
2018-10-29T06:50:43.000Z
2022-03-28T14:26:09.000Z
tests/unit_tests/cellular/__init__.py
fqzhou/LoadBalanceControl-RL
689eec3b3b27e121aa45d2793e411f1863f6fc0b
[ "MIT" ]
1
2022-03-01T13:46:25.000Z
2022-03-01T13:46:25.000Z
tests/unit_tests/cellular/__init__.py
fqzhou/LoadBalanceControl-RL
689eec3b3b27e121aa45d2793e411f1863f6fc0b
[ "MIT" ]
6
2019-02-05T20:01:53.000Z
2020-09-04T12:30:00.000Z
#! /usr/bin/env python3 # -*- coding: utf-8 -*- """ Unit test cases for cellular environment and algorithms """ __author__ = 'Ari Saha (arisaha@icloud.com), Mingyang Liu(liux3941@umn.edu)'
24
76
0.6875
26
192
4.923077
1
0
0
0
0
0
0
0
0
0
0
0.036585
0.145833
192
7
77
27.428571
0.743902
0.526042
0
0
0
0
0.73494
0.506024
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
0a4e7499ab7b0074c5a49e4113f85edbe58de1fc
74
py
Python
ideal/contrib/django/ideal_compat/__init__.py
maykinmedia/python-ideal
bb1ab14a49d2032f89d0816d65986c9f427aad97
[ "MIT" ]
8
2018-02-12T10:22:57.000Z
2019-11-14T16:10:28.000Z
ideal/contrib/django/ideal_compat/__init__.py
maykinmedia/python-ideal
bb1ab14a49d2032f89d0816d65986c9f427aad97
[ "MIT" ]
5
2018-02-12T09:15:46.000Z
2018-02-23T12:43:29.000Z
ideal/contrib/django/ideal_compat/__init__.py
maykinmedia/python-ideal
bb1ab14a49d2032f89d0816d65986c9f427aad97
[ "MIT" ]
1
2018-02-15T12:29:05.000Z
2018-02-15T12:29:05.000Z
default_app_config = 'ideal.contrib.django.ideal_compat.apps.IdealConfig'
37
73
0.851351
10
74
6
0.9
0
0
0
0
0
0
0
0
0
0
0
0.040541
74
1
74
74
0.84507
0
0
0
0
0
0.675676
0.675676
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
0a561f90eab84dccb02ee3fb5d5cc64a04e7cdda
26
py
Python
scripts/scale/__init__.py
atsgen/tf-test
2748fcd81491450c75dadc71849d2a1c11061029
[ "Apache-2.0" ]
5
2020-09-29T00:36:57.000Z
2022-02-16T06:51:32.000Z
scripts/scale/__init__.py
vkolli/contrail-test-perf
db04b8924a2c330baabe3059788b149d957a7d67
[ "Apache-2.0" ]
27
2019-11-02T02:18:34.000Z
2022-02-24T18:49:08.000Z
scripts/scale/__init__.py
vkolli/contrail-test-perf
db04b8924a2c330baabe3059788b149d957a7d67
[ "Apache-2.0" ]
20
2019-11-28T16:02:25.000Z
2022-01-06T05:56:58.000Z
"""scale test package."""
13
25
0.615385
3
26
5.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.115385
26
1
26
26
0.695652
0.730769
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
0a58dd6b60ea5d3b8cc87ac39aa1a3f31a06d13e
65
py
Python
src/config.py
tkowalski9938/Boosted-Bot
6bfe4290e5df541106fe068753cc7864c93782ad
[ "MIT" ]
3
2021-05-25T03:05:36.000Z
2022-01-15T01:17:19.000Z
src/config.py
tkowalski9938/Boosted-Bot
6bfe4290e5df541106fe068753cc7864c93782ad
[ "MIT" ]
null
null
null
src/config.py
tkowalski9938/Boosted-Bot
6bfe4290e5df541106fe068753cc7864c93782ad
[ "MIT" ]
null
null
null
DISCORD_API_KEY = "" RIOT_API_KEY = "" BOOSTED_BOT_CHANNEL_ID =
16.25
25
0.753846
10
65
4.2
0.8
0.285714
0
0
0
0
0
0
0
0
0
0
0.138462
65
3
26
21.666667
0.75
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
6a5c63000c6ccb9ee39ed83b4f2f5e8dc47df468
170
py
Python
rusty/http/responses.py
justanotherbyte/Rusty
0132c44a99ebc36f31c70482b19161196f41bc5e
[ "MIT" ]
1
2021-09-03T13:03:16.000Z
2021-09-03T13:03:16.000Z
rusty/http/responses.py
justanotherbyte/Rusty
0132c44a99ebc36f31c70482b19161196f41bc5e
[ "MIT" ]
null
null
null
rusty/http/responses.py
justanotherbyte/Rusty
0132c44a99ebc36f31c70482b19161196f41bc5e
[ "MIT" ]
1
2021-12-24T12:33:09.000Z
2021-12-24T12:33:09.000Z
from starlette.responses import ( JSONResponse, HTMLResponse, Response, FileResponse, RedirectResponse, PlainTextResponse, StreamingResponse )
18.888889
33
0.717647
11
170
11.090909
1
0
0
0
0
0
0
0
0
0
0
0
0.229412
170
9
34
18.888889
0.931298
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.111111
0
0.111111
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
6a61b759a3cb54d94695e1f7dac27837108bb966
250
py
Python
src/bt/char.py
btdevel/bt
23abdf0860484a4adcfbe2bcbe94eebca7f820fd
[ "MIT" ]
1
2017-06-30T00:35:05.000Z
2017-06-30T00:35:05.000Z
src/bt/char.py
btdevel/bt
23abdf0860484a4adcfbe2bcbe94eebca7f820fd
[ "MIT" ]
null
null
null
src/bt/char.py
btdevel/bt
23abdf0860484a4adcfbe2bcbe94eebca7f820fd
[ "MIT" ]
null
null
null
# this module contains just some ideas, nothing of which is finished yet # i don't know yet where to put general character stuff/game specific stuff # and bt1/2/3/ specific stuff # so currently stuff is in bt.game from bt.game.character import *
31.25
75
0.76
44
250
4.318182
0.772727
0.136842
0
0
0
0
0
0
0
0
0
0.014706
0.184
250
7
76
35.714286
0.916667
0.824
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
6a78eb55c0a74dd504c7e7a03d54527991378196
113
py
Python
docker_bilibili/bilibili/data_init/init_scripts/__init__.py
PyDee/Spiders
6fc0a414060032b5ba4332302285e3fcc9a6113e
[ "Apache-2.0" ]
6
2020-06-02T16:22:58.000Z
2021-09-18T03:20:16.000Z
docker_bilibili/bilibili/data_init/init_scripts/__init__.py
PyDee/Spiders
6fc0a414060032b5ba4332302285e3fcc9a6113e
[ "Apache-2.0" ]
4
2021-03-31T19:54:37.000Z
2022-03-12T00:33:41.000Z
docker_bilibili/bilibili/data_init/init_scripts/__init__.py
PyDee/Spiders
6fc0a414060032b5ba4332302285e3fcc9a6113e
[ "Apache-2.0" ]
5
2020-06-02T16:23:00.000Z
2021-09-03T02:16:15.000Z
# -*- coding: utf-8 -*- """ @Time : 2020/11/26 15:38 @Author : PyDee @File : __init__.py.py @description : """
14.125
24
0.566372
16
113
3.75
0.9375
0
0
0
0
0
0
0
0
0
0
0.141304
0.185841
113
7
25
16.142857
0.51087
0.902655
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
6acb4841509568b9454a968d24e6bfab0ecc0c98
87
py
Python
tests/periodicities/Day/Cycle_Day_1600_D_5.py
jmabry/pyaf
afbc15a851a2445a7824bf255af612dc429265af
[ "BSD-3-Clause" ]
null
null
null
tests/periodicities/Day/Cycle_Day_1600_D_5.py
jmabry/pyaf
afbc15a851a2445a7824bf255af612dc429265af
[ "BSD-3-Clause" ]
1
2019-11-30T23:39:38.000Z
2019-12-01T04:34:35.000Z
tests/periodicities/Day/Cycle_Day_1600_D_5.py
jmabry/pyaf
afbc15a851a2445a7824bf255af612dc429265af
[ "BSD-3-Clause" ]
null
null
null
import pyaf.tests.periodicities.period_test as per per.buildModel((5 , 'D' , 1600));
17.4
50
0.724138
13
87
4.769231
0.923077
0
0
0
0
0
0
0
0
0
0
0.065789
0.126437
87
4
51
21.75
0.75
0
0
0
0
0
0.011628
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
6aea4e8c17ca65eaf6bc3d4071033fc0cae2a9e8
5,068
py
Python
test/test_role_api.py
gustavs408650/looker_sdk_30
8b52449f216b2cb3b84f09e2856bcea1ed4a2b0c
[ "MIT" ]
null
null
null
test/test_role_api.py
gustavs408650/looker_sdk_30
8b52449f216b2cb3b84f09e2856bcea1ed4a2b0c
[ "MIT" ]
null
null
null
test/test_role_api.py
gustavs408650/looker_sdk_30
8b52449f216b2cb3b84f09e2856bcea1ed4a2b0c
[ "MIT" ]
1
2019-11-12T10:05:51.000Z
2019-11-12T10:05:51.000Z
# coding: utf-8 """ Looker API 3.0 Reference ### Authorization The Looker API uses Looker **API3** credentials for authorization and access control. Looker admins can create API3 credentials on Looker's **Admin/Users** page. Pass API3 credentials to the **/login** endpoint to obtain a temporary access_token. Include that access_token in the Authorization header of Looker API requests. For details, see [Looker API Authorization](https://looker.com/docs/r/api/authorization) ### Client SDKs The Looker API is a RESTful system that should be usable by any programming language capable of making HTTPS requests. Client SDKs for a variety of programming languages can be generated from the Looker API's Swagger JSON metadata to streamline use of the Looker API in your applications. A client SDK for Ruby is available as an example. For more information, see [Looker API Client SDKs](https://looker.com/docs/r/api/client_sdks) ### Try It Out! The 'api-docs' page served by the Looker instance includes 'Try It Out!' buttons for each API method. After logging in with API3 credentials, you can use the \"Try It Out!\" buttons to call the API directly from the documentation page to interactively explore API features and responses. ### Versioning Future releases of Looker will expand this API release-by-release to securely expose more and more of the core power of Looker to API client applications. API endpoints marked as \"beta\" may receive breaking changes without warning. Stable (non-beta) API endpoints should not receive breaking changes in future releases. For more information, see [Looker API Versioning](https://looker.com/docs/r/api/versioning) # noqa: E501 OpenAPI spec version: 3.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import looker_client_30 from looker_client_30.api.role_api import RoleApi # noqa: E501 from looker_client_30.rest import ApiException class TestRoleApi(unittest.TestCase): """RoleApi unit test stubs""" def setUp(self): self.api = looker_client_30.api.role_api.RoleApi() # noqa: E501 def tearDown(self): pass def test_all_model_sets(self): """Test case for all_model_sets Get All Model Sets # noqa: E501 """ pass def test_all_permission_sets(self): """Test case for all_permission_sets Get All Permission Sets # noqa: E501 """ pass def test_all_permissions(self): """Test case for all_permissions Get All Permissions # noqa: E501 """ pass def test_all_roles(self): """Test case for all_roles Get All Roles # noqa: E501 """ pass def test_create_model_set(self): """Test case for create_model_set Create Model Set # noqa: E501 """ pass def test_create_permission_set(self): """Test case for create_permission_set Create Permission Set # noqa: E501 """ pass def test_create_role(self): """Test case for create_role Create Role # noqa: E501 """ pass def test_delete_model_set(self): """Test case for delete_model_set Delete Model Set # noqa: E501 """ pass def test_delete_permission_set(self): """Test case for delete_permission_set Delete Permission Set # noqa: E501 """ pass def test_delete_role(self): """Test case for delete_role Delete Role # noqa: E501 """ pass def test_model_set(self): """Test case for model_set Get Model Set # noqa: E501 """ pass def test_permission_set(self): """Test case for permission_set Get Permission Set # noqa: E501 """ pass def test_role(self): """Test case for role Get Role # noqa: E501 """ pass def test_role_groups(self): """Test case for role_groups Get Role Groups # noqa: E501 """ pass def test_role_users(self): """Test case for role_users Get Role Users # noqa: E501 """ pass def test_set_role_groups(self): """Test case for set_role_groups Update Role Groups # noqa: E501 """ pass def test_set_role_users(self): """Test case for set_role_users Update Role Users # noqa: E501 """ pass def test_update_model_set(self): """Test case for update_model_set Update Model Set # noqa: E501 """ pass def test_update_permission_set(self): """Test case for update_permission_set Update Permission Set # noqa: E501 """ pass def test_update_role(self): """Test case for update_role Update Role # noqa: E501 """ pass if __name__ == '__main__': unittest.main()
28.96
1,639
0.643054
679
5,068
4.646539
0.238586
0.05832
0.069731
0.095087
0.406973
0.363233
0.143265
0
0
0
0
0.023881
0.281176
5,068
174
1,640
29.126437
0.842163
0.598066
0
0.403846
0
0
0.005135
0
0
0
0
0
0
1
0.423077
false
0.403846
0.096154
0
0.538462
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
4
6aeaf6dc00bb2d82b281aacfadbd78ef75b41db2
343
py
Python
src/pymortests/basic.py
fameyer/pymorWin
b449a38754fddb719d554f1aacf9280a585f1168
[ "Unlicense" ]
null
null
null
src/pymortests/basic.py
fameyer/pymorWin
b449a38754fddb719d554f1aacf9280a585f1168
[ "Unlicense" ]
null
null
null
src/pymortests/basic.py
fameyer/pymorWin
b449a38754fddb719d554f1aacf9280a585f1168
[ "Unlicense" ]
null
null
null
# -*- coding: utf-8 -*- # This file is part of the pyMOR project (http://www.pymor.org). # Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler # License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause) from __future__ import absolute_import, division, print_function def test_importable(): import pymor.basic
31.181818
77
0.74344
49
343
5.061224
0.795918
0.032258
0.080645
0
0
0
0
0
0
0
0
0.010135
0.137026
343
10
78
34.3
0.827703
0.641399
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
true
0
1
0
1.333333
0.333333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
4
0aa23c40583113d9944e36347faa0ae2c0ad43e4
6,207
py
Python
tests/unit/stats/test_measurement_map.py
CESARBR/opencensus-python
d59ee42037499b25a9307025adeb32ca0681b544
[ "Apache-2.0" ]
null
null
null
tests/unit/stats/test_measurement_map.py
CESARBR/opencensus-python
d59ee42037499b25a9307025adeb32ca0681b544
[ "Apache-2.0" ]
null
null
null
tests/unit/stats/test_measurement_map.py
CESARBR/opencensus-python
d59ee42037499b25a9307025adeb32ca0681b544
[ "Apache-2.0" ]
1
2019-09-01T06:00:13.000Z
2019-09-01T06:00:13.000Z
# Copyright 2018, OpenCensus Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import mock from opencensus.stats import measurement_map as measurement_map_module from opencensus.stats.measure_to_view_map import MeasureToViewMap from opencensus.tags import execution_context from opencensus.stats.measure import BaseMeasure from opencensus.stats.measure import MeasureInt from opencensus.stats import measure_to_view_map as measure_to_view_map_module from opencensus.stats.view import View class TestMeasurementMap(unittest.TestCase): def test_constructor_explicit(self): measure_to_view_map = {'testMeasure1': 'testVal1'} measurement_map = measurement_map_module.MeasurementMap( measure_to_view_map=measure_to_view_map) self.assertEqual(measure_to_view_map, measurement_map.measure_to_view_map) self.assertEqual({}, measurement_map.measurement_map) def test_measure_int_put(self): measure_to_view_map = mock.Mock() test_key = 'testKey' test_value = 1 measurement_map = measurement_map_module.MeasurementMap( measure_to_view_map=measure_to_view_map) measurement_map.measure_int_put(test_key, test_value) self.assertEqual({'testKey': 1}, measurement_map.measurement_map) def test_measure_float_put(self): measure_to_view_map = mock.Mock() test_key = 'testKey' test_value = 1.0 measurement_map = measurement_map_module.MeasurementMap( measure_to_view_map=measure_to_view_map) measurement_map.measure_float_put(test_key, test_value) self.assertEqual({'testKey': 1.0}, measurement_map.measurement_map) def test_put_attachment_none_key(self): measure_to_view_map = mock.Mock() test_key = None test_value = 'testValue' measurement_map = measurement_map_module.MeasurementMap( measure_to_view_map=measure_to_view_map, attachments={}) with self.assertRaisesRegexp(TypeError, 'attachment key should not be empty and should be a string'): measurement_map.measure_put_attachment(test_key, test_value) def test_put_attachment_none_value(self): measure_to_view_map = mock.Mock() test_key = 'testKey' test_value = None measurement_map = measurement_map_module.MeasurementMap( measure_to_view_map=measure_to_view_map, attachments={}) with self.assertRaisesRegexp(TypeError, 'attachment value should not be empty and should be a string'): measurement_map.measure_put_attachment(test_key, test_value) def test_put_attachment_int_key(self): measure_to_view_map = mock.Mock() test_key = 42 test_value = 'testValue' measurement_map = measurement_map_module.MeasurementMap( measure_to_view_map=measure_to_view_map, attachments={}) with self.assertRaisesRegexp(TypeError, 'attachment key should not be empty and should be a string'): measurement_map.measure_put_attachment(test_key, test_value) def test_put_attachment_int_value(self): measure_to_view_map = mock.Mock() test_key = 'testKey' test_value = 42 measurement_map = measurement_map_module.MeasurementMap( measure_to_view_map=measure_to_view_map, attachments={}) with self.assertRaisesRegexp(TypeError, 'attachment value should not be empty and should be a string'): measurement_map.measure_put_attachment(test_key, test_value) def test_put_attachment(self): measure_to_view_map = mock.Mock() test_key = 'testKey' test_value = 'testValue' measurement_map = measurement_map_module.MeasurementMap( measure_to_view_map=measure_to_view_map, attachments={}) measurement_map.measure_put_attachment(test_key, test_value) self.assertEqual({'testKey': 'testValue'}, measurement_map.attachments) def test_put_none_attachment(self): measure_to_view_map = mock.Mock() test_key = 'testKey' test_value = 'testValue' measurement_map = measurement_map_module.MeasurementMap( measure_to_view_map=measure_to_view_map) measurement_map.measure_put_attachment(test_key, test_value) self.assertEqual({'testKey': 'testValue'}, measurement_map.attachments) def test_put_multiple_attachment(self): measure_to_view_map = mock.Mock() test_key = 'testKey' test_value = 'testValue' test_value2 = 'testValue2' measurement_map = measurement_map_module.MeasurementMap( measure_to_view_map=measure_to_view_map, attachments={}) measurement_map.measure_put_attachment(test_key, test_value) measurement_map.measure_put_attachment(test_key, test_value2) self.assertEqual({test_key: test_value2}, measurement_map.attachments) def test_record_against_explicit_tag_map(self): measure_to_view_map = mock.Mock() measurement_map = measurement_map_module.MeasurementMap( measure_to_view_map=measure_to_view_map) tags = {'testtag1': 'testtag1val'} measurement_map.record(tag_map_tags=tags) self.assertTrue(measure_to_view_map.record.called) def test_record_against_implicit_tag_map(self): measure_to_view_map = mock.Mock() measurement_map = measurement_map_module.MeasurementMap( measure_to_view_map=measure_to_view_map) tags = {'testtag1': 'testtag1val'} execution_context.set_current_tag_map(tags) measurement_map.record() self.assertTrue(measure_to_view_map.record.called)
44.335714
111
0.730304
784
6,207
5.397959
0.155612
0.15879
0.132089
0.162571
0.760633
0.715028
0.706285
0.668478
0.637996
0.618147
0
0.00563
0.198808
6,207
139
112
44.654676
0.845365
0.089576
0
0.603774
0
0
0.078056
0
0
0
0
0
0.122642
1
0.113208
false
0
0.084906
0
0.207547
0
0
0
0
null
0
0
1
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
0abb2e95c5123ad8a5c95149a3f3bfaa2accfc3a
262
py
Python
vanko/scrapy/mongo/connection.py
ivandeex/scrapoku
2a315e0e8ed2228e17ef65a5647523e66a7bbf36
[ "BSD-3-Clause" ]
1
2018-05-08T21:29:18.000Z
2018-05-08T21:29:18.000Z
vanko/scrapy/mongo/connection.py
ivandeex/scrapoku
2a315e0e8ed2228e17ef65a5647523e66a7bbf36
[ "BSD-3-Clause" ]
10
2021-01-07T22:41:57.000Z
2022-03-29T23:18:11.000Z
vanko/scrapy/mongo/connection.py
ivandeex/scrapoku
2a315e0e8ed2228e17ef65a5647523e66a7bbf36
[ "BSD-3-Clause" ]
2
2020-08-01T10:14:17.000Z
2021-05-14T14:59:44.000Z
def from_settings(settings_or_url): from pymongo import MongoClient if isinstance(settings_or_url, basestring): url = settings_or_url else: url = settings_or_url.get('MONGODB_URL') return MongoClient(url).get_default_database()
26.2
50
0.725191
34
262
5.235294
0.5
0.224719
0.292135
0.179775
0
0
0
0
0
0
0
0
0.198473
262
9
51
29.111111
0.847619
0
0
0
0
0
0.042146
0
0
0
0
0
0
1
0.142857
false
0
0.142857
0
0.428571
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
0ae67037467deff4f9cfa587587c143d547d5afe
205
py
Python
newssimilarity/model/features/source_feature.py
imackerracher/NewsSimilarity
2e6a85dc9e95ef94bec2339987950f4e88f5d909
[ "Apache-2.0" ]
null
null
null
newssimilarity/model/features/source_feature.py
imackerracher/NewsSimilarity
2e6a85dc9e95ef94bec2339987950f4e88f5d909
[ "Apache-2.0" ]
null
null
null
newssimilarity/model/features/source_feature.py
imackerracher/NewsSimilarity
2e6a85dc9e95ef94bec2339987950f4e88f5d909
[ "Apache-2.0" ]
null
null
null
from newssimilarity.model.features.feature import Feature """ Feature for source outlet of the article """ class SourceFeature(Feature): def get_feature_name(self): return self.feature_name
18.636364
57
0.756098
26
205
5.846154
0.730769
0.144737
0
0
0
0
0
0
0
0
0
0
0.165854
205
10
58
20.5
0.888889
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0.25
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
0affdd24804236d1ee05bd2968d383a4d62c719f
3,902
py
Python
heat/_deprecated/paraheat.py
lukasgehrke/paraheat
551caf98fd760bebb5ca8af4a51c9acdf58da914
[ "BSD-2-Clause" ]
null
null
null
heat/_deprecated/paraheat.py
lukasgehrke/paraheat
551caf98fd760bebb5ca8af4a51c9acdf58da914
[ "BSD-2-Clause" ]
null
null
null
heat/_deprecated/paraheat.py
lukasgehrke/paraheat
551caf98fd760bebb5ca8af4a51c9acdf58da914
[ "BSD-2-Clause" ]
null
null
null
# create a heatmap out of timeseries input data and keep the data structure # can always be directly used for single subject data as well as at the end to prepare plot? otherwise is used by Analyses to prepare per participant binning # keep this very simple, stupid # might have more function for binning later on down the road and can cutout a polygon from the input data before hist binning # checks data structure whether 2D, 3D or 4D import abc import numpy as np import pandas as pd from scipy import stats # import matplotlib.image as mpimg from dataclasses import dataclass # TODO # - call binned_statistic during construction, so attribute heatmap of paraheat is always there @dataclass class ParaHeat: """Simple wrapper class built around scipy's binned_statistic functions Returns: [type]: [description] """ name: str data: pd.DataFrame heatmap: None # TODO refactoring: rename to binned_statistic def __post_init__(self): if self.data is not None: self.col_names = list(self.data.columns) @abc.abstractmethod def binned_statistic(self): pass @abc.abstractmethod def select_aoi(self): pass @dataclass class ParaHeat2D(ParaHeat): def binned_statistic(self, bins=None, agg_stats_func='count'): # provide bins and edges or the program makes an educated guess # freedman rule if bins is None: bins = freedman_bins(self.bg_image.shape) # return hist, xedges, yedges ret = stats.binned_statistic_2d(self.data[self.col_names[0]], self.data[self.col_names[1]], None, agg_stats_func, bins=bins) return ret def select_aoi(self, aoi): # cut out polynom from data and retain either the whats outside or inside pass @dataclass class ParaHeat3D(ParaHeat): def binned_statistic(self): # return hist, xedges, yedges ret = stats.binned_statistic_2d(self.data[self.col_names[0]], self.data[self.col_names[1]], self.data[self.col_names[2]], agg_stats_func, bins=bins) return ret def select_aoi(self): pass def freedman_bins(size): # provide bins and edges or the program makes an educated guess # freedman rule return int(np.log2(max(size)) * 10) # Stuff for later implementation # import scipy.spatial as sp # def transform_data_to_view_coord(self, p, resolution, pmin, pmax): # """ # Fit data to image resolution # Args: # p ([type]): [description] # resolution ([type]): [description] # pmin ([type]): [description] # pmax ([type]): [description] # Returns: # [type]: [description] # """ # dp = pmax - pmin # dv = (p - pmin) / dp * resolution # return dv # def knn2d(self, neighbours=32, dim=2): # """[summary] # Args: # x ([type]): [description] # y ([type]): [description] # resolution ([type]): [description] # neighbours (int, optional): [description]. Defaults to 32. # dim (int, optional): [description]. Defaults to 2. # Returns: # [type]: [description] # """ # # Create the tree # tree = sp.cKDTree(self.data) # # Find the closest nnmax-1 neighbors (first entry is the point itself) # # import pdb; pdb.set_trace() # grid = np.mgrid[0:self.bg_image.shape[0], 0:self.bg_image.shape[1]].T.reshape(self.bg_image.shape[0]*self.bg_image.shape[1], dim) # dists = tree.query(grid, neighbours) # # Inverse of the sum of distances to each grid point. # inv_sum_dists = 1. / dists[0].sum(1) # # Reshape # im = inv_sum_dists.reshape(self.bg_image.shape[0], self.bg_image.shape[1]) # return im
27.286713
157
0.627371
511
3,902
4.696673
0.379648
0.0625
0.032083
0.046667
0.316667
0.205833
0.198333
0.198333
0.198333
0.198333
0
0.011997
0.273706
3,902
143
158
27.286713
0.834862
0.598411
0
0.384615
0
0
0.003353
0
0
0
0
0.013986
0
1
0.205128
false
0.102564
0.128205
0.025641
0.564103
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
1
0
0
1
0
0
4
7c15165473ddb37a3d2ffe8625fe9159988b25bb
3,590
py
Python
backend/lextool/models/user.py
Lmineor/Tools
cbf58baa84524e763ba6c2bd0c796de912d3f690
[ "MIT" ]
1
2020-05-18T13:23:35.000Z
2020-05-18T13:23:35.000Z
backend/lextool/models/user.py
Lmineor/Tools
cbf58baa84524e763ba6c2bd0c796de912d3f690
[ "MIT" ]
null
null
null
backend/lextool/models/user.py
Lmineor/Tools
cbf58baa84524e763ba6c2bd0c796de912d3f690
[ "MIT" ]
null
null
null
import datetime from werkzeug.security import generate_password_hash, check_password_hash # 转换密码用到的库 from flask_security import UserMixin # 登录和角色需要继承的对象 from itsdangerous import BadSignature, SignatureExpired from itsdangerous import TimedJSONWebSignatureSerializer as Serializer from oocfg import cfg from backend.lextool.models import db class UserConfig(db.Model): __tablename__ = 'user_config' index = db.Column(db.Integer(), primary_key=True) words_book = db.Column(db.String(10), default='CET4') # 1: CET4, 2 CET6, 3: TOEFL 4: GRE words_num = db.Column(db.Integer(), default=20) role = db.Column(db.Boolean, nullable=False) # True:admin, False: common user user_id = db.Column(db.Integer(), db.ForeignKey('user_user.id')) create_at = db.Column(db.DateTime, default=datetime.datetime.now) update_at = db.Column(db.DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now) class UserMemo(db.Model): __tablename__ = 'user_memo' index = db.Column(db.Integer(), primary_key=True) memo = db.Column(db.Text(16777216), default="写下你的便签") user_id = db.Column(db.Integer(), db.ForeignKey('user_user.id')) create_at = db.Column(db.DateTime, default=datetime.datetime.now) update_at = db.Column(db.DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now) class User(db.Model, UserMixin): __tablename__ = 'user_user' id = db.Column(db.Integer(), primary_key=True) username = db.Column(db.String(80), nullable=False) email = db.Column(db.String(100), unique=True, nullable=False, index=True) password_hash = db.Column(db.String(128)) activate = db.Column(db.Boolean, default=False) create_at = db.Column(db.DateTime, default=datetime.datetime.now) update_at = db.Column(db.DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now) memo = db.relationship("UserMemo", uselist=False, backref="user_user", cascade="delete") # todo = db.relationship("TODO", uselist=False, backref="user_user", cascade="delete") config = db.relationship("UserConfig", uselist=False, backref="user_user", cascade="delete") def __repr__(self): return "<User_id:{0}>".format(self.id) @property def password(self): raise AttributeError("密码不允许读取") # 转换密码为hash存入数据库 @password.setter def password(self, password): self.password_hash = generate_password_hash(password) # 检查密码 def check_password_hash(self, password): return check_password_hash(self.password_hash, password) # 获取token def generate_auth_token(self, expiration=cfg.CONF.AUTH.token_expiration): s = Serializer(cfg.CONF.AUTH.secret_key, expires_in=expiration) return s.dumps({'id': self.id}) # 解析token,确认登录的用户身份 @staticmethod def verify_auth_token(token): s = Serializer(cfg.CONF.AUTH.secret_key) try: data = s.loads(token) except BadSignature as e: return None # invalid token except SignatureExpired as e: return None # token expire user = User.query.get(data['id']) return user @staticmethod def check_activate_token(token): s = Serializer(cfg.CONF.AUTH.secret_key) try: data = s.loads(token) except: return False u = User.query.get(data['id']) if not u: # 用户已被删除 return False if not u.activate: u.activate = True db.session.add(u) db.session.commit() return True
37.395833
101
0.681616
459
3,590
5.1939
0.276688
0.063758
0.079698
0.042785
0.413171
0.373742
0.373742
0.291527
0.261326
0.261326
0
0.009766
0.201393
3,590
95
102
37.789474
0.821765
0.069638
0
0.30137
1
0
0.041178
0
0
0
0
0.010526
0
1
0.09589
false
0.109589
0.09589
0.027397
0.684932
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
1
0
0
0
0
1
0
0
1
0
0
4
7c15d04517e5393f3a7ca33c57625eec546eb664
119
py
Python
colour/notation/datasets/__init__.py
rift-labs-developer/colour
15112dbe824aab0f21447e0db4a046a28a06f43a
[ "BSD-3-Clause" ]
1,380
2015-01-10T12:30:33.000Z
2022-03-30T10:19:57.000Z
colour/notation/datasets/__init__.py
rift-labs-developer/colour
15112dbe824aab0f21447e0db4a046a28a06f43a
[ "BSD-3-Clause" ]
638
2015-01-02T10:49:05.000Z
2022-03-29T10:16:22.000Z
colour/notation/datasets/__init__.py
rift-labs-developer/colour
15112dbe824aab0f21447e0db4a046a28a06f43a
[ "BSD-3-Clause" ]
250
2015-01-21T15:27:19.000Z
2022-03-30T10:23:58.000Z
# -*- coding: utf-8 -*- from .munsell import * # noqa from . import munsell __all__ = [] __all__ += munsell.__all__
14.875
30
0.638655
14
119
4.571429
0.571429
0.3125
0
0
0
0
0
0
0
0
0
0.010526
0.201681
119
7
31
17
0.663158
0.218487
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
7c315b065c2d5304cf98765086e7a04a8d206c39
9,475
py
Python
experimental/language_structure/psl/psl_model_multiwoz_test_util.py
y0ast/uncertainty-baselines
8d32c77ba0803ed715c1406378adf10ebd61ab74
[ "Apache-2.0" ]
794
2020-07-17T06:23:58.000Z
2022-03-31T08:31:53.000Z
experimental/language_structure/psl/psl_model_multiwoz_test_util.py
y0ast/uncertainty-baselines
8d32c77ba0803ed715c1406378adf10ebd61ab74
[ "Apache-2.0" ]
136
2020-08-04T22:42:04.000Z
2022-03-26T21:07:03.000Z
experimental/language_structure/psl/psl_model_multiwoz_test_util.py
y0ast/uncertainty-baselines
8d32c77ba0803ed715c1406378adf10ebd61ab74
[ "Apache-2.0" ]
129
2020-08-16T12:46:55.000Z
2022-03-31T23:00:10.000Z
# coding=utf-8 # Copyright 2021 The Uncertainty Baselines Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Util file for psl rules test.""" from typing import List import tensorflow as tf LOGITS = [[[0.0, 0.0, 0.4, 0.4, 0.0, 0.2, 0.0, 0.0, 0.0], [0.0, 0.0, 0.2, 0.6, 0.0, 0.2, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0], [0.0, 0.8, 0.1, 0.1, 0.2, 0.0, 0.0, 0.0, 0.2], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]], [[0.0, 0.8, 0.0, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0], [0.0, 0.0, 0.5, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0], [0.0, 0.0, 0.5, 0.4, 0.0, 0.0, 0.0, 0.1, 0.0], [0.0, 0.0, 0.8, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.9, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]] FEATURES = [[[-1, -2, -2, -2, -1, -2, -2, -2], [-1, -2, -2, -2, -1, -2, -2, -2], [-1, -2, -2, -2, -2, -2, -2, -2], [-1, -2, -2, -2, -2, -1, -2, -1], [-1, -2, -2, -1, -1, -2, -2, -2], [-2, -1, -2, -1, -1, -2, -1, -2], [-3, -2, -2, -2, -2, -2, -2, -2], [-3, -2, -2, -2, -2, -2, -2, -2], [-3, -2, -2, -2, -2, -2, -2, -2], [-3, -2, -2, -2, -2, -2, -2, -2]], [[-1, -2, -2, -2, -2, -2, -2, -2], [-1, -2, -2, -2, -2, -2, -2, -2], [-1, -2, -2, -1, -1, -2, -2, -2], [-1, -2, -2, -2, -1, -2, -2, -1], [-1, -2, -1, -2, -1, -2, -2, -2], [-2, -2, -2, -1, -1, -2, -2, -2], [-3, -2, -2, -2, -2, -2, -2, -2], [-3, -2, -2, -2, -2, -2, -2, -2], [-3, -2, -2, -2, -2, -2, -2, -2], [-3, -2, -2, -2, -2, -2, -2, -2]]] DATA = { 'train_data': [ [[[ 1109, 1616, 41, 800, 740, 1743, 557, 981, 886, 1616, 1658, 909, 1380, 1256, 1565, 482, 1304 ], [1109, 1304]], [[1109, 1023, 38, 893, 1037, 1664, 886, 1304], [ 1109, 218, 751, 1616, 812, 1406, 1152, 981, 65, 778, 688, 886, 427, 641, 611, 742, 321, 557, 354, 1471, 161, 182, 767, 1304 ]], [[1109, 1162, 145, 557, 981, 740, 734, 776, 1037, 755, 886, 1304], [ 1109, 1616, 812, 1406, 1152, 981, 79, 886, 766, 1616, 558, 165, 1471, 161, 182, 4, 1304 ]], [[1109, 1738, 145, 893, 532, 1304], [ 1109, 1616, 1658, 218, 1616, 812, 1406, 1152, 981, 79, 886, 1023, 38, 557, 354, 182, 731, 161, 182, 1304 ]], [[1109, 1738, 145, 1215, 1047, 1274, 1304], [ 1109, 1616, 812, 1406, 1152, 981, 740, 65, 778, 688, 886, 427, 641, 611, 742, 321, 557, 354, 1017, 161, 731, 1304 ]], [[1109, 1162, 641, 631, 145, 1738, 1499, 740, 1743, 557, 981, 1304], [ 1109, 1616, 1658, 218, 145, 1162, 1499, 981, 740, 263, 173, 62, 886, 766, 1616, 558, 165, 1471, 161, 1017, 4, 1304 ]]], [[[ 1109, 1616, 1658, 1450, 1743, 800, 1430, 79, 886, 1616, 1658, 1496, 1565, 1448, 929, 1489, 742, 1662, 1565, 1662, 1304 ], [1109, 1304]]], [[[ 1109, 1616, 1658, 1276, 1450, 1743, 800, 1430, 79, 751, 1616, 1133, 1431, 1496, 742, 1062, 1415, 1565, 818, 1304 ], [1109, 1304]]], [[[ 1109, 1616, 41, 800, 981, 886, 1616, 1077, 742, 1145, 1565, 83, 1037, 923, 1304 ], [1109, 1304]], [[1109, 1738, 145, 557, 740, 1743, 557, 981, 909, 256, 680, 187, 1304], [ 1109, 218, 1616, 812, 1406, 1152, 981, 740, 886, 1023, 38, 557, 354, 182, 767, 161, 1017, 4, 1304 ]], [[1109, 525, 641, 751, 1498, 1133, 1431, 1085, 1743, 610, 1304], [1109, 427, 641, 611, 742, 865, 641, 557, 574, 1304]], [[1109, 525, 641, 751, 1498, 1133, 1431, 1085, 886, 1304], [1109, 1185, 641, 1077, 1762, 512, 4, 1304]]], [[[ 1109, 764, 1178, 1616, 1658, 1450, 1743, 557, 981, 79, 886, 1616, 1133, 1431, 1496, 742, 821, 1565, 83, 1304 ], [1109, 1304]]] ], 'test_data': [ [[[ 1109, 1616, 1658, 1450, 1743, 891, 38, 800, 1430, 886, 1616, 1658, 909, 742, 499, 1565, 1159, 1472, 886, 1304 ], [1109, 1304]]], [[[ 1109, 1616, 427, 611, 564, 112, 801, 1412, 742, 446, 248, 800, 1001, 194, 886, 1616, 1077, 742, 1514, 1743, 142, 886, 1304 ], [1109, 1304]], [[1109, 1738, 1573, 557, 1510, 1561, 1301, 1301, 1412, 4, 1304], [ 1109, 1616, 323, 800, 1409, 1177, 886, 1573, 1738, 557, 1412, 742, 1621, 248, 800, 1001, 194, 886, 1304 ]], [[1109, 1499, 1718, 37, 1738, 1337, 1616, 1077, 886, 1304], [ 1109, 800, 1176, 72, 1506, 1738, 1374, 751, 427, 641, 611, 742, 1514, 1573, 1304 ]]], [[[ 1109, 1228, 1616, 1658, 1450, 1743, 800, 981, 886, 1616, 1077, 742, 1145, 283, 1669, 1565, 482, 1250, 551, 886, 1304 ], [1109, 1304]], [[1109, 1228, 766, 641, 1406, 1762, 742, 849, 1304], [ 1109, 1616, 812, 1406, 1152, 981, 740, 886, 427, 641, 611, 742, 321, 557, 354, 182, 731, 4, 1304 ]], [[1109, 1718, 37, 1738, 1337, 1616, 1077, 1304], [1109, 427, 641, 611, 742, 865, 641, 557, 574, 1304]], [[1109, 525, 641, 37, 1738, 1337, 1616, 1077, 886, 1304], [1109, 1738, 145, 1762, 512, 1616, 766, 814, 641, 4, 1304]]], [[[ 1109, 1228, 1616, 1658, 1450, 1743, 662, 226, 557, 981, 79, 886, 1616, 1658, 1496, 742, 1187, 1493, 1136, 1565, 1690, 886, 1304 ], [1109, 1304]]], ], 'vocab_mapping': { 'address': 53, 'thank': 525, 'sure': 631, 'yes': 758, 'hello': 764, 'pricey': 1012, 'hi': 1228, 'great': 1490, 'no': 1499, 'phone': 1596, 'thanks': 1718, }, 'train_labels': [[ 'init_request', 'second_request', 'second_request', 'second_request', 'second_request', 'insist' ], ['init_request'], ['init_request'], ['init_request', 'second_request', 'cancel', 'end'], ['init_request']], 'test_labels': [['init_request'], ['init_request', 'slot_question', 'cancel'], ['init_request', 'second_request', 'cancel', 'end'], ['init_request']] } TEST_MULTIWOZ_CONFIG = { 'default_seed': 4, 'batch_size': 128, 'max_dialog_size': 10, 'max_utterance_size': 40, 'class_map': { 'accept': 0, 'cancel': 1, 'end': 2, 'greet': 3, 'info_question': 4, 'init_request': 5, 'insist': 6, 'second_request': 7, 'slot_question': 8, }, 'accept_words': ['yes', 'great'], 'cancel_words': ['no'], 'end_words': ['thank', 'thanks'], 'greet_words': ['hello', 'hi'], 'info_question_words': ['address', 'phone'], 'insist_words': ['sure', 'no'], 'slot_question_words': ['pricey'], 'includes_word': -1, 'excludes_word': -2, 'mask_index': 0, 'accept_index': 1, 'cancel_index': 2, 'end_index': 3, 'greet_index': 4, 'info_question_index': 5, 'insist_index': 6, 'slot_question_index': 7, 'utterance_mask': -1, 'last_utterance_mask': -2, 'pad_utterance_mask': -3, 'shuffle_train': True, 'shuffle_test': False, 'train_epochs': 5, } def build_constrained_model(input_size: List[int]) -> tf.keras.Model: """Build simple neural model for class prediction.""" input_layer = tf.keras.layers.Input(input_size) hidden_layer_1 = tf.keras.layers.Dense(1024)(input_layer) hidden_layer_2 = tf.keras.layers.Dense( 512, activation='sigmoid')( hidden_layer_1) output = tf.keras.layers.Dense( 9, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2(1.0))( hidden_layer_2) model = tf.keras.Model(input_layer, output) model.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss='categorical_crossentropy', metrics=['accuracy']) return model
39.644351
80
0.463219
1,460
9,475
2.950685
0.216438
0.13974
0.19429
0.239554
0.390669
0.300604
0.285747
0.218895
0.184076
0.1474
0
0.391969
0.329815
9,475
238
81
39.810924
0.286457
0.071768
0
0.200957
0
0
0.10171
0.002737
0
0
0
0
0
1
0.004785
false
0
0.009569
0
0.019139
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
7c341949d080e8daf98a74b8ebb928e4027e2d4b
6,757
py
Python
src/sage/quadratic_forms/random_quadraticform.py
bopopescu/sagesmc
e8d1d31f6f598dba2d763baa2d2e804338f9e89e
[ "BSL-1.0" ]
5
2015-01-04T07:15:06.000Z
2022-03-04T15:15:18.000Z
src/sage/quadratic_forms/random_quadraticform.py
bopopescu/sagesmc
e8d1d31f6f598dba2d763baa2d2e804338f9e89e
[ "BSL-1.0" ]
null
null
null
src/sage/quadratic_forms/random_quadraticform.py
bopopescu/sagesmc
e8d1d31f6f598dba2d763baa2d2e804338f9e89e
[ "BSL-1.0" ]
10
2016-09-28T13:12:40.000Z
2022-02-12T09:28:34.000Z
""" Creating A Random Quadratic Form """ from sage.quadratic_forms.quadratic_form import QuadraticForm from sage.quadratic_forms.ternary_qf import TernaryQF from sage.rings.ring import is_Ring from sage.rings.all import ZZ ################################################ ## Routines to create a random quadratic form ## ################################################ def random_quadraticform(R, n, rand_arg_list=[]): """ Create a random quadratic form in `n` variables defined over the ring `R`. The last (and optional) argument ``rand_arg_list`` is a list of at most 3 elements which is passed (as at most 3 separate variables) into the method ``R.random_element()``. INPUT: - `R` -- a ring. - `n` -- an integer `\ge 0` - ``rand_arg_list`` -- a list of at most 3 arguments which can be taken by ``R.random_element()``. OUTPUT: A quadratic form over the ring `R`. EXAMPLES:: sage: random_quadraticform(ZZ, 3, [1,5]) ## RANDOM Quadratic form in 3 variables over Integer Ring with coefficients: [ 3 2 3 ] [ * 1 4 ] [ * * 3 ] :: sage: random_quadraticform(ZZ, 3, [-5,5]) ## RANDOM Quadratic form in 3 variables over Integer Ring with coefficients: [ 3 2 -5 ] [ * 2 -2 ] [ * * -5 ] :: sage: random_quadraticform(ZZ, 3, [-50,50]) ## RANDOM Quadratic form in 3 variables over Integer Ring with coefficients: [ 1 8 -23 ] [ * 0 0 ] [ * * 6 ] """ ## Sanity Checks: We have a ring and there are at most 3 parameters for randomness! if len(rand_arg_list) > 3: raise TypeError, "Oops! The list of randomness arguments can have at most 3 elements." if not is_Ring(R): raise TypeError, "Oops! The first argument must be a ring." ## Create a list of upper-triangular entries for the quadratic form L = len(rand_arg_list) nn = int(n*(n+1)/2) if L == 0: rand_list = [R.random_element() for _ in range(nn)] elif L == 1: rand_list = [R.random_element(rand_arg_list[0]) for _ in range(nn)] elif L == 2: rand_list = [R.random_element(rand_arg_list[0], rand_arg_list[1]) for _ in range(nn)] elif L == 3: rand_list = [R.random_element(rand_arg_list[0], rand_arg_list[1], rand_arg_list[2]) for _ in range(nn)] ## Return the Quadratic Form return QuadraticForm(R, n, rand_list) def random_quadraticform_with_conditions(R, n, condition_list=[], rand_arg_list=[]): """ Create a random quadratic form in `n` variables defined over the ring `R` satisfying a list of boolean (i.e. True/False) conditions. The conditions `c` appearing in the list must be boolean functions which can be called either as ``Q.c()`` or ``c(Q)``, where ``Q`` is the random quadratic form. The last (and optional) argument ``rand_arg_list`` is a list of at most 3 elements which is passed (as at most 3 separate variables) into the method ``R.random_element()``. EXAMPLES:: sage: Q = random_quadraticform_with_conditions(ZZ, 3, [QuadraticForm.is_positive_definite], [-5, 5]) sage: Q ## RANDOM Quadratic form in 3 variables over Integer Ring with coefficients: [ 3 -2 -5 ] [ * 2 2 ] [ * * 3 ] """ Q = random_quadraticform(R, n, rand_arg_list) Done_Flag = True ## Check that all conditions are satisfied while Done_Flag: Done_Flag = False for c in condition_list: ## Check if condition c is satisfied try: bool_ans = Q.c() except Exception: bool_ans = c(Q) ## Create a new quadratic form if a condition fails if (bool_ans == False): Q = random_quadraticform(R, n, rand_arg_list) Done_Flag = True break ## Return the quadratic form return Q def random_ternaryqf(rand_arg_list = []): """ Create a random ternary quadratic form. The last (and optional) argument ``rand_arg_list`` is a list of at most 3 elements which is passed (as at most 3 separate variables) into the method ``R.random_element()``. INPUT: - ``rand_arg_list`` -- a list of at most 3 arguments which can be taken by ``R.random_element()``. OUTPUT: A ternary quadratic form. EXAMPLES:: sage: random_ternaryqf() ##RANDOM Ternary quadratic form with integer coefficients: [1 1 4] [-1 1 -1] sage: random_ternaryqf([-1, 2]) ##RANDOM Ternary quadratic form with integer coefficients: [1 0 1] [-1 -1 -1] sage: random_ternaryqf([-10, 10, "uniform"]) ##RANDOM Ternary quadratic form with integer coefficients: [7 -8 2] [0 3 -6] """ R = ZZ n = 6 L = len(rand_arg_list) if L == 0: rand_list = [ R.random_element() for _ in range(n)] elif L == 1: rand_list = [ R.random_element(rand_arg_list[0]) for _ in range(6)] elif L == 2: rand_list = [ R.random_element(rand_arg_list[0], rand_arg_list[1]) for _ in range(6)] elif L == 3: rand_list = [ R.random_element(rand_arg_list[0], rand_arg_list[1], rand_arg_list[2]) for _ in range(6)] return TernaryQF(rand_list) def random_ternaryqf_with_conditions(condition_list=[], rand_arg_list=[]): """ Create a random ternary quadratic form satisfying a list of boolean (i.e. True/False) conditions. The conditions `c` appearing in the list must be boolean functions which can be called either as ``Q.c()`` or ``c(Q)``, where ``Q`` is the random ternary quadratic form. The last (and optional) argument ``rand_arg_list`` is a list of at most 3 elements which is passed (as at most 3 separate variables) into the method ``R.random_element()``. EXAMPLES:: sage: Q = random_ternaryqf_with_conditions([TernaryQF.is_positive_definite], [-5, 5]) sage: Q ## RANDOM Ternary quadratic form with integer coefficients: [3 4 2] [2 -2 -1] """ Q = random_ternaryqf(rand_arg_list) Done_Flag = True ## Check that all conditions are satisfied while Done_Flag: Done_Flag = False for c in condition_list: ## Check if condition c is satisfied try: bool_ans = Q.c() except Exception: bool_ans = c(Q) ## Create a new quadratic form if a condition fails if (bool_ans == False): Q = random_ternaryqf(rand_arg_list) Done_Flag = True break return Q
31.282407
112
0.598194
949
6,757
4.109589
0.13804
0.052051
0.081795
0.030769
0.791026
0.74
0.726667
0.697692
0.645385
0.617179
0
0.023923
0.28859
6,757
215
113
31.427907
0.787393
0.072221
0
0.603175
0
0
0.041746
0
0
0
0
0
0
0
null
null
0
0.063492
null
null
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
7c5839cd796214afe04042e39c50b00e4f26a81f
171
py
Python
launch.py
thedeltaflyer/OUTD_2_ACLOG
d228d92485a2f5f4a306c72795948e6c08bcfb1b
[ "MIT" ]
null
null
null
launch.py
thedeltaflyer/OUTD_2_ACLOG
d228d92485a2f5f4a306c72795948e6c08bcfb1b
[ "MIT" ]
4
2021-05-03T02:22:12.000Z
2021-05-05T03:05:54.000Z
launch.py
thedeltaflyer/OUTD_2_ACLOG
d228d92485a2f5f4a306c72795948e6c08bcfb1b
[ "MIT" ]
1
2021-05-03T00:59:20.000Z
2021-05-03T00:59:20.000Z
# This file acts as an entry point for running this as a script # Or for building using pyinstaller from outd2aclog.gui import main if __name__ == '__main__': main()
24.428571
63
0.74269
27
171
4.407407
0.814815
0
0
0
0
0
0
0
0
0
0
0.007299
0.19883
171
6
64
28.5
0.861314
0.555556
0
0
0
0
0.109589
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
7c66b3d3668995533cf0a6c8c4412e25c6166b20
141
py
Python
config/wsgi.py
kimbugp/ah-haven-space-sprinters
00e08131d1c4d7785f127f8e1beb17488c8ca0fd
[ "BSD-3-Clause" ]
4
2019-04-30T13:38:15.000Z
2021-04-22T08:56:54.000Z
config/wsgi.py
kimbugp/ah-haven-space-sprinters
00e08131d1c4d7785f127f8e1beb17488c8ca0fd
[ "BSD-3-Clause" ]
37
2019-03-19T09:33:42.000Z
2019-04-30T20:02:02.000Z
config/wsgi.py
kimbugp/ah-haven-space-sprinters
00e08131d1c4d7785f127f8e1beb17488c8ca0fd
[ "BSD-3-Clause" ]
4
2019-06-29T11:50:41.000Z
2019-12-09T02:17:05.000Z
import os from django.core.wsgi import get_wsgi_application os.environ.get('DJANGO_SETTINGS_MODULE') application = get_wsgi_application()
17.625
49
0.829787
20
141
5.55
0.55
0.126126
0.324324
0
0
0
0
0
0
0
0
0
0.092199
141
7
50
20.142857
0.867188
0
0
0
0
0
0.156028
0.156028
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
7c774c686e447a57d94ae09305875ffaa33f0ba0
22,632
py
Python
zarc/views_2017-08-13-02:36:26.py
nyimbi/caseke
ce4a0fa44cd383bc23900e42f81656f089c8fdd9
[ "MIT" ]
1
2019-06-03T16:20:35.000Z
2019-06-03T16:20:35.000Z
zarc/views_2017-08-13-02:36:26.py
nyimbi/caseke
ce4a0fa44cd383bc23900e42f81656f089c8fdd9
[ "MIT" ]
20
2020-01-28T22:02:29.000Z
2022-03-29T22:28:34.000Z
zarc/views_2017-08-13-02:36:26.py
nyimbi/caseke
ce4a0fa44cd383bc23900e42f81656f089c8fdd9
[ "MIT" ]
1
2019-06-10T17:20:48.000Z
2019-06-10T17:20:48.000Z
# coding: utf-8 # views.py AUTOGENERATED BY gen_script.sh from kp4.py # Copyright (C) Nyimbi Odero, Sun Aug 13 02:35:14 EAT 2017 import calendar from flask import redirect, flash, url_for, Markup from flask import render_template from flask_appbuilder.models.sqla.interface import SQLAInterface from flask_appbuilder.views import ModelView, BaseView, MasterDetailView, MultipleView, RestCRUDView, CompactCRUDMixin from flask_appbuilder import ModelView, CompactCRUDMixin, aggregate_count, action, expose, BaseView, has_access from flask_appbuilder.charts.views import ChartView, TimeChartView, GroupByChartView from flask_appbuilder.models.group import aggregate_count from flask_appbuilder.widgets import ListThumbnail, ListWidget from flask_appbuilder.widgets import FormVerticalWidget, FormInlineWidget, FormHorizontalWidget, ShowBlockWidget from flask_appbuilder.models.sqla.filters import FilterStartsWith, FilterEqualFunction as FA from app import appbuilder, db from .models import * # Basic Lists hide_list = ['created_by', 'changed_by', 'created_on', 'changed_on'] #To pretty Print from PersonMixin def pretty_month_year(value): return calendar.month_name[value.month] + ' ' + str(value.year) def pretty_year(value): return str(value.year) def fill_gender(): try: db.session.add(Gender(name='Male')) db.session.add(Gender(name='Female')) db.session.commit() except: db.session.rollback() class LawyerChartView(GroupByChartView): datamodel = SQLAInterface(Lawyer , db.session) chart_title = 'Grouped Lawyer by Birth' chart_3d = 'true' label_columns = LawyerView.label_columns chart_type = 'PieChart' definitions = [ { 'group' : 'age_today', "series" : [(aggregate_count,"age_today")] }, { 'group' : 'gender', "series" : [(aggregate_count,"age_today")] } ] class LawyerTimeChartView(GroupByChartView): datamodel = SQLAInterface(Lawyer , db.session) chart_title = 'Grouped Birth Lawyer' chart_type = 'AreaChart' chart_3d = 'true' label_columns = LawyerView.label_columns definitions = [ { 'group' : 'age_today', 'formatter': pretty_month_year, "series" : [(aggregate_count,"age_today")] }, { 'group': 'age_today', 'formatter': pretty_year, "series" : [(aggregate_count,"age_today")] } ] class PlaintiffChartView(GroupByChartView): datamodel = SQLAInterface(Plaintiff , db.session) chart_title = 'Grouped Plaintiff by Birth' chart_3d = 'true' label_columns = PlaintiffView.label_columns chart_type = 'PieChart' definitions = [ { 'group' : 'age_today', "series" : [(aggregate_count,"age_today")] }, { 'group' : 'gender', "series" : [(aggregate_count,"age_today")] } ] class PlaintiffTimeChartView(GroupByChartView): datamodel = SQLAInterface(Plaintiff , db.session) chart_title = 'Grouped Birth Plaintiff' chart_type = 'AreaChart' chart_3d = 'true' label_columns = PlaintiffView.label_columns definitions = [ { 'group' : 'age_today', 'formatter': pretty_month_year, "series" : [(aggregate_count,"age_today")] }, { 'group': 'age_today', 'formatter': pretty_year, "series" : [(aggregate_count,"age_today")] } ] class WitnesChartView(GroupByChartView): datamodel = SQLAInterface(Witnes , db.session) chart_title = 'Grouped Witnes by Birth' chart_3d = 'true' label_columns = WitnesView.label_columns chart_type = 'PieChart' definitions = [ { 'group' : 'age_today', "series" : [(aggregate_count,"age_today")] }, { 'group' : 'gender', "series" : [(aggregate_count,"age_today")] } ] class WitnesTimeChartView(GroupByChartView): datamodel = SQLAInterface(Witnes , db.session) chart_title = 'Grouped Birth Witnes' chart_type = 'AreaChart' chart_3d = 'true' label_columns = WitnesView.label_columns definitions = [ { 'group' : 'age_today', 'formatter': pretty_month_year, "series" : [(aggregate_count,"age_today")] }, { 'group': 'age_today', 'formatter': pretty_year, "series" : [(aggregate_count,"age_today")] } ] class SuretyChartView(GroupByChartView): datamodel = SQLAInterface(Surety , db.session) chart_title = 'Grouped Surety by Birth' chart_3d = 'true' label_columns = SuretyView.label_columns chart_type = 'PieChart' definitions = [ { 'group' : 'age_today', "series" : [(aggregate_count,"age_today")] }, { 'group' : 'gender', "series" : [(aggregate_count,"age_today")] } ] class SuretyTimeChartView(GroupByChartView): datamodel = SQLAInterface(Surety , db.session) chart_title = 'Grouped Birth Surety' chart_type = 'AreaChart' chart_3d = 'true' label_columns = SuretyView.label_columns definitions = [ { 'group' : 'age_today', 'formatter': pretty_month_year, "series" : [(aggregate_count,"age_today")] }, { 'group': 'age_today', 'formatter': pretty_year, "series" : [(aggregate_count,"age_today")] } ] class ProsecutorChartView(GroupByChartView): datamodel = SQLAInterface(Prosecutor , db.session) chart_title = 'Grouped Prosecutor by Birth' chart_3d = 'true' label_columns = ProsecutorView.label_columns chart_type = 'PieChart' definitions = [ { 'group' : 'age_today', "series" : [(aggregate_count,"age_today")] }, { 'group' : 'gender', "series" : [(aggregate_count,"age_today")] } ] class ProsecutorTimeChartView(GroupByChartView): datamodel = SQLAInterface(Prosecutor , db.session) chart_title = 'Grouped Birth Prosecutor' chart_type = 'AreaChart' chart_3d = 'true' label_columns = ProsecutorView.label_columns definitions = [ { 'group' : 'age_today', 'formatter': pretty_month_year, "series" : [(aggregate_count,"age_today")] }, { 'group': 'age_today', 'formatter': pretty_year, "series" : [(aggregate_count,"age_today")] } ] class PoliceofficerChartView(GroupByChartView): datamodel = SQLAInterface(Policeofficer , db.session) chart_title = 'Grouped Policeofficer by Birth' chart_3d = 'true' label_columns = PoliceofficerView.label_columns chart_type = 'PieChart' definitions = [ { 'group' : 'age_today', "series" : [(aggregate_count,"age_today")] }, { 'group' : 'gender', "series" : [(aggregate_count,"age_today")] } ] class PoliceofficerTimeChartView(GroupByChartView): datamodel = SQLAInterface(Policeofficer , db.session) chart_title = 'Grouped Birth Policeofficer' chart_type = 'AreaChart' chart_3d = 'true' label_columns = PoliceofficerView.label_columns definitions = [ { 'group' : 'age_today', 'formatter': pretty_month_year, "series" : [(aggregate_count,"age_today")] }, { 'group': 'age_today', 'formatter': pretty_year, "series" : [(aggregate_count,"age_today")] } ] class JudicialofficerChartView(GroupByChartView): datamodel = SQLAInterface(Judicialofficer , db.session) chart_title = 'Grouped Judicialofficer by Birth' chart_3d = 'true' label_columns = JudicialofficerView.label_columns chart_type = 'PieChart' definitions = [ { 'group' : 'age_today', "series" : [(aggregate_count,"age_today")] }, { 'group' : 'gender', "series" : [(aggregate_count,"age_today")] } ] class JudicialofficerTimeChartView(GroupByChartView): datamodel = SQLAInterface(Judicialofficer , db.session) chart_title = 'Grouped Birth Judicialofficer' chart_type = 'AreaChart' chart_3d = 'true' label_columns = JudicialofficerView.label_columns definitions = [ { 'group' : 'age_today', 'formatter': pretty_month_year, "series" : [(aggregate_count,"age_today")] }, { 'group': 'age_today', 'formatter': pretty_year, "series" : [(aggregate_count,"age_today")] } ] class DefendantChartView(GroupByChartView): datamodel = SQLAInterface(Defendant , db.session) chart_title = 'Grouped Defendant by Birth' chart_3d = 'true' label_columns = DefendantView.label_columns chart_type = 'PieChart' definitions = [ { 'group' : 'age_today', "series" : [(aggregate_count,"age_today")] }, { 'group' : 'gender', "series" : [(aggregate_count,"age_today")] } ] class DefendantTimeChartView(GroupByChartView): datamodel = SQLAInterface(Defendant , db.session) chart_title = 'Grouped Birth Defendant' chart_type = 'AreaChart' chart_3d = 'true' label_columns = DefendantView.label_columns definitions = [ { 'group' : 'age_today', 'formatter': pretty_month_year, "series" : [(aggregate_count,"age_today")] }, { 'group': 'age_today', 'formatter': pretty_year, "series" : [(aggregate_count,"age_today")] } ] class VisitorChartView(GroupByChartView): datamodel = SQLAInterface(Visitor , db.session) chart_title = 'Grouped Visitor by Birth' chart_3d = 'true' label_columns = VisitorView.label_columns chart_type = 'PieChart' definitions = [ { 'group' : 'age_today', "series" : [(aggregate_count,"age_today")] }, { 'group' : 'gender', "series" : [(aggregate_count,"age_today")] } ] class VisitorTimeChartView(GroupByChartView): datamodel = SQLAInterface(Visitor , db.session) chart_title = 'Grouped Birth Visitor' chart_type = 'AreaChart' chart_3d = 'true' label_columns = VisitorView.label_columns definitions = [ { 'group' : 'age_today', 'formatter': pretty_month_year, "series" : [(aggregate_count,"age_today")] }, { 'group': 'age_today', 'formatter': pretty_year, "series" : [(aggregate_count,"age_today")] } ] class WarderChartView(GroupByChartView): datamodel = SQLAInterface(Warder , db.session) chart_title = 'Grouped Warder by Birth' chart_3d = 'true' label_columns = WarderView.label_columns chart_type = 'PieChart' definitions = [ { 'group' : 'age_today', "series" : [(aggregate_count,"age_today")] }, { 'group' : 'gender', "series" : [(aggregate_count,"age_today")] } ] class WarderTimeChartView(GroupByChartView): datamodel = SQLAInterface(Warder , db.session) chart_title = 'Grouped Birth Warder' chart_type = 'AreaChart' chart_3d = 'true' label_columns = WarderView.label_columns definitions = [ { 'group' : 'age_today', 'formatter': pretty_month_year, "series" : [(aggregate_count,"age_today")] }, { 'group': 'age_today', 'formatter': pretty_year, "series" : [(aggregate_count,"age_today")] } ] class VisitChartView(GroupByChartView): datamodel = SQLAInterface(Visit , db.session) chart_title = 'Grouped Visit by Birth' chart_3d = 'true' label_columns = VisitView.label_columns chart_type = 'PieChart' definitions = [ { 'group' : 'age_today', "series" : [(aggregate_count,"age_today")] }, { 'group' : 'gender', "series" : [(aggregate_count,"age_today")] } ] class VisitTimeChartView(GroupByChartView): datamodel = SQLAInterface(Visit , db.session) chart_title = 'Grouped Birth Visit' chart_type = 'AreaChart' chart_3d = 'true' label_columns = VisitView.label_columns definitions = [ { 'group' : 'age_today', 'formatter': pretty_month_year, "series" : [(aggregate_count,"age_today")] }, { 'group': 'age_today', 'formatter': pretty_year, "series" : [(aggregate_count,"age_today")] } ] class DisciplineChartView(GroupByChartView): datamodel = SQLAInterface(Discipline , db.session) chart_title = 'Grouped Discipline by Birth' chart_3d = 'true' label_columns = DisciplineView.label_columns chart_type = 'PieChart' definitions = [ { 'group' : 'age_today', "series" : [(aggregate_count,"age_today")] }, { 'group' : 'gender', "series" : [(aggregate_count,"age_today")] } ] class DisciplineTimeChartView(GroupByChartView): datamodel = SQLAInterface(Discipline , db.session) chart_title = 'Grouped Birth Discipline' chart_type = 'AreaChart' chart_3d = 'true' label_columns = DisciplineView.label_columns definitions = [ { 'group' : 'age_today', 'formatter': pretty_month_year, "series" : [(aggregate_count,"age_today")] }, { 'group': 'age_today', 'formatter': pretty_year, "series" : [(aggregate_count,"age_today")] } ] class MedeventChartView(GroupByChartView): datamodel = SQLAInterface(Medevent , db.session) chart_title = 'Grouped Medevent by Birth' chart_3d = 'true' label_columns = MedeventView.label_columns chart_type = 'PieChart' definitions = [ { 'group' : 'age_today', "series" : [(aggregate_count,"age_today")] }, { 'group' : 'gender', "series" : [(aggregate_count,"age_today")] } ] class MedeventTimeChartView(GroupByChartView): datamodel = SQLAInterface(Medevent , db.session) chart_title = 'Grouped Birth Medevent' chart_type = 'AreaChart' chart_3d = 'true' label_columns = MedeventView.label_columns definitions = [ { 'group' : 'age_today', 'formatter': pretty_month_year, "series" : [(aggregate_count,"age_today")] }, { 'group': 'age_today', 'formatter': pretty_year, "series" : [(aggregate_count,"age_today")] } ] class HearingChartView(GroupByChartView): datamodel = SQLAInterface(Hearing , db.session) chart_title = 'Grouped Hearing by Birth' chart_3d = 'true' label_columns = HearingView.label_columns chart_type = 'PieChart' definitions = [ { 'group' : 'age_today', "series" : [(aggregate_count,"age_today")] }, { 'group' : 'gender', "series" : [(aggregate_count,"age_today")] } ] class HearingTimeChartView(GroupByChartView): datamodel = SQLAInterface(Hearing , db.session) chart_title = 'Grouped Birth Hearing' chart_type = 'AreaChart' chart_3d = 'true' label_columns = HearingView.label_columns definitions = [ { 'group' : 'age_today', 'formatter': pretty_month_year, "series" : [(aggregate_count,"age_today")] }, { 'group': 'age_today', 'formatter': pretty_year, "series" : [(aggregate_count,"age_today")] } ] class PrisoncommitalChartView(GroupByChartView): datamodel = SQLAInterface(Prisoncommital , db.session) chart_title = 'Grouped Prisoncommital by Birth' chart_3d = 'true' label_columns = PrisoncommitalView.label_columns chart_type = 'PieChart' definitions = [ { 'group' : 'age_today', "series" : [(aggregate_count,"age_today")] }, { 'group' : 'gender', "series" : [(aggregate_count,"age_today")] } ] class PrisoncommitalTimeChartView(GroupByChartView): datamodel = SQLAInterface(Prisoncommital , db.session) chart_title = 'Grouped Birth Prisoncommital' chart_type = 'AreaChart' chart_3d = 'true' label_columns = PrisoncommitalView.label_columns definitions = [ { 'group' : 'age_today', 'formatter': pretty_month_year, "series" : [(aggregate_count,"age_today")] }, { 'group': 'age_today', 'formatter': pretty_year, "series" : [(aggregate_count,"age_today")] } ] class CaseChartView(GroupByChartView): datamodel = SQLAInterface(Case , db.session) chart_title = 'Grouped Case by Birth' chart_3d = 'true' label_columns = CaseView.label_columns chart_type = 'PieChart' definitions = [ { 'group' : 'age_today', "series" : [(aggregate_count,"age_today")] }, { 'group' : 'gender', "series" : [(aggregate_count,"age_today")] } ] class CaseTimeChartView(GroupByChartView): datamodel = SQLAInterface(Case , db.session) chart_title = 'Grouped Birth Case' chart_type = 'AreaChart' chart_3d = 'true' label_columns = CaseView.label_columns definitions = [ { 'group' : 'age_today', 'formatter': pretty_month_year, "series" : [(aggregate_count,"age_today")] }, { 'group': 'age_today', 'formatter': pretty_year, "series" : [(aggregate_count,"age_today")] } ] # How to create a MasterDetailView #class DetailView(ModelView): # datamodel = SQLAInterface(DetailTable, db.session) #class MasterView(MasterDetailView): # datamodel = SQLAInterface(MasterTable, db.session) # related_views = [DetailView] # How to create a MultipleView #class MultipleViewsExp(MultipleView): # views = [GroupModelView, ContactModelView] #View Registration db.create_all() fill_gender() appbuilder.add_view(LawyerChartView(), 'Lawyer Age Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(LawyerTimeChartView(), 'Lawyer Time Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(PlaintiffChartView(), 'Plaintiff Age Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(PlaintiffTimeChartView(), 'Plaintiff Time Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(WitnesChartView(), 'Witnes Age Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(WitnesTimeChartView(), 'Witnes Time Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(SuretyChartView(), 'Surety Age Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(SuretyTimeChartView(), 'Surety Time Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(ProsecutorChartView(), 'Prosecutor Age Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(ProsecutorTimeChartView(), 'Prosecutor Time Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(PoliceofficerChartView(), 'Policeofficer Age Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(PoliceofficerTimeChartView(), 'Policeofficer Time Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(JudicialofficerChartView(), 'Judicialofficer Age Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(JudicialofficerTimeChartView(), 'Judicialofficer Time Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(DefendantChartView(), 'Defendant Age Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(DefendantTimeChartView(), 'Defendant Time Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(VisitorChartView(), 'Visitor Age Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(VisitorTimeChartView(), 'Visitor Time Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(WarderChartView(), 'Warder Age Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(WarderTimeChartView(), 'Warder Time Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(VisitChartView(), 'Visit Age Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(VisitTimeChartView(), 'Visit Time Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(DisciplineChartView(), 'Discipline Age Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(DisciplineTimeChartView(), 'Discipline Time Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(MedeventChartView(), 'Medevent Age Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(MedeventTimeChartView(), 'Medevent Time Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(HearingChartView(), 'Hearing Age Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(HearingTimeChartView(), 'Hearing Time Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(PrisoncommitalChartView(), 'Prisoncommital Age Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(PrisoncommitalTimeChartView(), 'Prisoncommital Time Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(CaseChartView(), 'Case Age Chart', icon='fa-dashboard', category='Reports') appbuilder.add_view(CaseTimeChartView(), 'Case Time Chart', icon='fa-dashboard', category='Reports') #appbuilder.add_separator("Setup") #appbuilder.add_separator("My Views") #appbuilder.add_link(name, href, icon='', label='', category='', category_icon='', category_label='', baseview=None)
29.354086
122
0.617135
2,059
22,632
6.565809
0.099563
0.066277
0.094682
0.108884
0.762852
0.750573
0.744656
0.736371
0.631186
0.455877
0
0.002751
0.261046
22,632
770
123
29.392208
0.805609
0.031725
0
0.53068
1
0
0.20676
0
0
0
0
0
0
1
0.004975
false
0
0.021559
0.003317
0.401327
0
0
0
0
null
0
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
7ca46b907f7ceff8442ee6ec5f1a235158c29b9f
156
py
Python
icebergsdk/resources/promotion.py
Iceberg-Marketplace/Iceberg-API-PYTHON
5f8314376ebb6e5a8099253ec79fd40ea835beac
[ "MIT" ]
null
null
null
icebergsdk/resources/promotion.py
Iceberg-Marketplace/Iceberg-API-PYTHON
5f8314376ebb6e5a8099253ec79fd40ea835beac
[ "MIT" ]
2
2015-01-26T10:22:41.000Z
2015-01-26T10:22:41.000Z
icebergsdk/resources/promotion.py
Iceberg-Marketplace/Iceberg-API-PYTHON
5f8314376ebb6e5a8099253ec79fd40ea835beac
[ "MIT" ]
3
2016-12-30T15:08:57.000Z
2019-09-24T14:13:44.000Z
# -*- coding: utf-8 -*- from icebergsdk.resources.base import UpdateableIcebergObject class Discount(UpdateableIcebergObject): endpoint = 'discount'
19.5
61
0.75641
14
156
8.428571
0.857143
0
0
0
0
0
0
0
0
0
0
0.007407
0.134615
156
8
62
19.5
0.866667
0.134615
0
0
0
0
0.059701
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
7ca7f6efb8231259fb5840ad62a4ff4a3694aca3
160
py
Python
usaspending_api/awards/v2/urls_subawards.py
g4brielvs/usaspending-api
bae7da2c204937ec1cdf75c052405b13145728d5
[ "CC0-1.0" ]
217
2016-11-03T17:09:53.000Z
2022-03-10T04:17:54.000Z
usaspending_api/awards/v2/urls_subawards.py
g4brielvs/usaspending-api
bae7da2c204937ec1cdf75c052405b13145728d5
[ "CC0-1.0" ]
622
2016-09-02T19:18:23.000Z
2022-03-29T17:11:01.000Z
usaspending_api/awards/v2/urls_subawards.py
g4brielvs/usaspending-api
bae7da2c204937ec1cdf75c052405b13145728d5
[ "CC0-1.0" ]
93
2016-09-07T20:28:57.000Z
2022-02-25T00:25:27.000Z
from django.conf.urls import url from usaspending_api.awards.v2.views.subawards import SubawardsViewSet urlpatterns = [url(r"^$", SubawardsViewSet.as_view())]
32
70
0.8
21
160
6
0.809524
0
0
0
0
0
0
0
0
0
0
0.006803
0.08125
160
4
71
40
0.85034
0
0
0
0
0
0.0125
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
7cce2c2b8a027632cfe0b0202831b69f4ecdaabb
90
py
Python
main.py
OrestSenkovskyi/fleet_test
ae4f82263a1793bd129ee07309a7b59efaaa97bf
[ "MIT" ]
null
null
null
main.py
OrestSenkovskyi/fleet_test
ae4f82263a1793bd129ee07309a7b59efaaa97bf
[ "MIT" ]
null
null
null
main.py
OrestSenkovskyi/fleet_test
ae4f82263a1793bd129ee07309a7b59efaaa97bf
[ "MIT" ]
null
null
null
#!/usr/bin/python3 """ Main module for demo """ if __name__ == "__main__": pass
12.857143
27
0.577778
11
90
4
0.909091
0
0
0
0
0
0
0
0
0
0
0.014706
0.244444
90
7
28
12.857143
0.632353
0.422222
0
0
0
0
0.205128
0
0
0
0
0
0
1
0
true
0.5
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
4
7cd5ce4ca0a52aa31727f3b898735ce75ab78639
26
py
Python
homeassistant/components/cups/__init__.py
domwillcode/home-assistant
f170c80bea70c939c098b5c88320a1c789858958
[ "Apache-2.0" ]
30,023
2016-04-13T10:17:53.000Z
2020-03-02T12:56:31.000Z
homeassistant/components/cups/__init__.py
jagadeeshvenkatesh/core
1bd982668449815fee2105478569f8e4b5670add
[ "Apache-2.0" ]
31,101
2020-03-02T13:00:16.000Z
2022-03-31T23:57:36.000Z
homeassistant/components/cups/__init__.py
jagadeeshvenkatesh/core
1bd982668449815fee2105478569f8e4b5670add
[ "Apache-2.0" ]
11,956
2016-04-13T18:42:31.000Z
2020-03-02T09:32:12.000Z
"""The cups component."""
13
25
0.615385
3
26
5.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.115385
26
1
26
26
0.695652
0.730769
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
6b1c0fe7ed85829d723294b5d5ac0d564b2efd17
8,498
py
Python
python-midonetclient/src/midonetclient/vendor_media_type.py
duarten/midonet
c7a5aa352a8038bdc6a463c68abc47bb411a1e7c
[ "Apache-2.0" ]
null
null
null
python-midonetclient/src/midonetclient/vendor_media_type.py
duarten/midonet
c7a5aa352a8038bdc6a463c68abc47bb411a1e7c
[ "Apache-2.0" ]
null
null
null
python-midonetclient/src/midonetclient/vendor_media_type.py
duarten/midonet
c7a5aa352a8038bdc6a463c68abc47bb411a1e7c
[ "Apache-2.0" ]
null
null
null
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Midokura PTE LTD. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. APPLICATION_OCTET_STREAM = "application/octet-stream" APPLICATION_JSON_V5 = "application/vnd.org.midonet.Application-v5+json" APPLICATION_ERROR_JSON = "application/vnd.org.midonet.Error-v1+json" APPLICATION_TENANT_JSON = "application/vnd.org.midonet.Tenant-v1+json" APPLICATION_TENANT_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.Tenant-v1+json" APPLICATION_ROUTER_JSON = "application/vnd.org.midonet.Router-v3+json" APPLICATION_ROUTER_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.Router-v3+json" APPLICATION_BRIDGE_JSON = "application/vnd.org.midonet.Bridge-v3+json" APPLICATION_BRIDGE_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.Bridge-v3+json" APPLICATION_HOST_JSON = "application/vnd.org.midonet.Host-v2+json" APPLICATION_HOST_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.Host-v2+json" APPLICATION_INTERFACE_JSON = "application/vnd.org.midonet.Interface-v1+json" APPLICATION_INTERFACE_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.Interface-v1+json" APPLICATION_HOST_COMMAND_JSON = \ "application/vnd.org.midonet.HostCommand-v1+json" APPLICATION_HOST_COMMAND_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.HostCommand-v1+json" APPLICATION_PORT_LINK_JSON = "application/vnd.org.midonet.PortLink-v1+json" APPLICATION_ROUTE_JSON = "application/vnd.org.midonet.Route-v1+json" APPLICATION_ROUTE_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.Route-v1+json" APPLICATION_PORTGROUP_JSON = "application/vnd.org.midonet.PortGroup-v1+json" APPLICATION_PORTGROUP_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.PortGroup-v1+json" APPLICATION_PORTGROUP_PORT_JSON = \ "application/vnd.org.midonet.PortGroupPort-v1+json" APPLICATION_PORTGROUP_PORT_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.PortGroupPort-v1+json" APPLICATION_CHAIN_JSON = "application/vnd.org.midonet.Chain-v1+json" APPLICATION_CHAIN_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.Chain-v1+json" APPLICATION_RULE_JSON = "application/vnd.org.midonet.Rule-v2+json" APPLICATION_RULE_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.Rule-v2+json" APPLICATION_BGP_JSON = "application/vnd.org.midonet.Bgp-v1+json" APPLICATION_BGP_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.Bgp-v1+json" APPLICATION_AD_ROUTE_JSON = "application/vnd.org.midonet.AdRoute-v1+json" APPLICATION_AD_ROUTE_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.AdRoute-v1+json" APPLICATION_BGP_NETWORK_JSON = "application/vnd.org.midonet.BgpNetwork-v1+json" APPLICATION_BGP_NETWORK_COLLECTION_JSON =\ "application/vnd.org.midonet.collection.BgpNetwork-v1+json" APPLICATION_BGP_PEER_JSON = "application/vnd.org.midonet.BgpPeer-v1+json" APPLICATION_BGP_PEER_COLLECTION_JSON =\ "application/vnd.org.midonet.collection.BgpPeer-v1+json" APPLICATION_VPN_JSON = "application/vnd.org.midonet.Vpn-v1+json" APPLICATION_VPN_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.Vpn-v1+json" APPLICATION_DHCP_SUBNET_JSON = "application/vnd.org.midonet.DhcpSubnet-v2+json" APPLICATION_DHCP_SUBNET_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.DhcpSubnet-v2+json" APPLICATION_DHCP_HOST_JSON = "application/vnd.org.midonet.DhcpHost-v1+json" APPLICATION_DHCP_HOST_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.DhcpHost-v1+json" APPLICATION_DHCPV6_SUBNET_JSON = \ "application/vnd.org.midonet.DhcpV6Subnet-v1+json" APPLICATION_DHCPV6_SUBNET_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.DhcpV6Subnet-v1+json" APPLICATION_DHCPV6_HOST_JSON = "application/vnd.org.midonet.DhcpV6Host-v1+json" APPLICATION_DHCPV6_HOST_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.DhcpV6Host-v1+json" APPLICATION_MONITORING_QUERY_RESPONSE_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.mgmt.MetricQueryResponse-v1+json" APPLICATION_MONITORING_QUERY_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.MetricQuery-v1+json" APPLICATION_METRICS_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.Metric-v1+json" APPLICATION_METRIC_TARGET_JSON = \ "application/vnd.org.midonet.MetricTarget-v1+json" APPLICATION_TUNNEL_ZONE_JSON = "application/vnd.org.midonet.TunnelZone-v1+json" APPLICATION_TUNNEL_ZONE_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.TunnelZone-v1+json" APPLICATION_TUNNEL_ZONE_HOST_JSON = \ "application/vnd.org.midonet.TunnelZoneHost-v1+json" APPLICATION_TUNNEL_ZONE_HOST_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.TunnelZoneHost-v1+json" APPLICATION_GRE_TUNNEL_ZONE_HOST_JSON = \ "application/vnd.org.midonet.GreTunnelZoneHost-v1+json" APPLICATION_GRE_TUNNEL_ZONE_HOST_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.GreTunnelZoneHost-v1+json" APPLICATION_HOST_INTERFACE_PORT_JSON = \ "application/vnd.org.midonet.HostInterfacePort-v1+json" APPLICATION_HOST_INTERFACE_PORT_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.HostInterfacePort-v1+json" APPLICATION_CONDITION_JSON = "application/vnd.org.midonet.Condition-v1+json" APPLICATION_CONDITION_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.Condition-v1+json" APPLICATION_TRACE_JSON = "application/vnd.org.midonet.Trace-v1+json" APPLICATION_TRACE_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.Trace-v1+json" APPLICATION_WRITE_VERSION_JSON = \ "application/vnd.org.midonet.WriteVersion-v1+json" APPLICATION_SYSTEM_STATE_JSON = \ "application/vnd.org.midonet.SystemState-v2+json" APPLICATION_HOST_VERSION_JSON = \ "application/vnd.org.midonet.HostVersion-v1+json" # Port media types APPLICATION_PORT_JSON = "application/vnd.org.midonet.Port-v2+json" APPLICATION_PORT_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.Port-v2+json" APPLICATION_IP_ADDR_GROUP_JSON = \ "application/vnd.org.midonet.IpAddrGroup-v1+json" APPLICATION_IP_ADDR_GROUP_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.IpAddrGroup-v1+json" APPLICATION_IP_ADDR_GROUP_ADDR_JSON = \ "application/vnd.org.midonet.IpAddrGroupAddr-v1+json" APPLICATION_IP_ADDR_GROUP_ADDR_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.IpAddrGroupAddr-v1+json" # L4LB media types APPLICATION_LOAD_BALANCER_JSON = \ "application/vnd.org.midonet.LoadBalancer-v1+json" APPLICATION_LOAD_BALANCER_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.LoadBalancer-v1+json" APPLICATION_VIP_JSON = "application/vnd.org.midonet.VIP-v1+json" APPLICATION_VIP_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.VIP-v1+json" APPLICATION_POOL_JSON = "application/vnd.org.midonet.Pool-v1+json" APPLICATION_POOL_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.Pool-v1+json" APPLICATION_POOL_MEMBER_JSON = "application/vnd.org.midonet.PoolMember-v1+json" APPLICATION_POOL_MEMBER_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.PoolMember-v1+json" APPLICATION_HEALTH_MONITOR_JSON = \ "application/vnd.org.midonet.HealthMonitor-v1+json" APPLICATION_HEALTH_MONITOR_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.HealthMonitor-v1+json" APPLICATION_POOL_STATISTIC_JSON = \ "application/vnd.org.midonet.PoolStatistic-v1+json" APPLICATION_POOL_STATISTIC_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.PoolStatistic-v1+json" # VxGW APPLICATION_VTEP_JSON = "application/vnd.org.midonet.VTEP-v1+json" APPLICATION_VTEP_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.VTEP-v1+json" APPLICATION_VTEP_BINDING_JSON = \ "application/vnd.org.midonet.VTEPBinding-v1+json" APPLICATION_VTEP_BINDING_COLLECTION_JSON = \ "application/vnd.org.midonet.collection.VTEPBinding-v1+json"
49.695906
79
0.814074
1,097
8,498
6.058341
0.148587
0.36789
0.214866
0.30334
0.673488
0.416642
0.335841
0.110442
0.018658
0.018658
0
0.013422
0.07943
8,498
170
80
49.988235
0.836124
0.076606
0
0
0
0
0.541518
0.541518
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
6b451c2f6be5924a1d246c05307b7164a5d127b8
5,780
py
Python
zignal/tests/test_music_scales.py
fspacheco/zignal
19ac50157a276e9640e362b0472a5e209dfe6709
[ "MIT" ]
null
null
null
zignal/tests/test_music_scales.py
fspacheco/zignal
19ac50157a276e9640e362b0472a5e209dfe6709
[ "MIT" ]
null
null
null
zignal/tests/test_music_scales.py
fspacheco/zignal
19ac50157a276e9640e362b0472a5e209dfe6709
[ "MIT" ]
null
null
null
''' Created on 24 Feb 2015 @author: Ronny Andersson (ronny@andersson.tk) @copyright: (c) 2015 Ronny Andersson @license: MIT ''' # standard library from __future__ import division, print_function import unittest # external libraries import nose # local libraries from zignal.music import scales class Test_midi_scales(unittest.TestCase): # Benson, DJ. (2006). Music: A Mathematical Offering. Cambridge University Press. # http://homepages.abdn.ac.uk/mth192/pages/html/maths-music.html def test_freq2key_quantise(self): # 70 466.164 # 69 440.00 # 68 415.305 self.assertAlmostEqual(scales.midi_freq2key(416.4, quantise=True), 68, places=7) self.assertAlmostEqual(scales.midi_freq2key(438.0, quantise=True), 69, places=7) self.assertAlmostEqual(scales.midi_freq2key(441.0, quantise=True), 69, places=7) self.assertAlmostEqual(scales.midi_freq2key(442.0, quantise=True), 69, places=7) self.assertAlmostEqual(scales.midi_freq2key(452.1, quantise=True), 69, places=7) self.assertAlmostEqual(scales.midi_freq2key(453.1, quantise=True), 70, places=7) self.assertAlmostEqual(scales.midi_freq2key(460.0, quantise=True), 70, places=7) self.assertAlmostEqual(scales.midi_freq2key(470.0, quantise=True), 70, places=7) def test_key2freq(self): self.assertAlmostEqual(scales.midi_key2freq(69), 440.0, places=7) self.assertAlmostEqual(scales.midi_key2freq(81), 880.0, places=7) self.assertAlmostEqual(scales.midi_key2freq(21), 27.5, places=7) self.assertAlmostEqual(scales.midi_key2freq(43), 97.9989, places=4) def test_freq2key(self): self.assertAlmostEqual(scales.midi_freq2key(440), 69.0, places=7) self.assertAlmostEqual(scales.midi_freq2key(880), 81.0, places=7) def test_key2freq_tuning(self): self.assertAlmostEqual(scales.midi_key2freq(69, tuning=450), 450.0, places=7) self.assertAlmostEqual(scales.midi_key2freq(81, tuning=450), 900.0, places=7) self.assertAlmostEqual(scales.midi_key2freq(21, tuning=400), 25.0, places=7) def test_freq2key_tuning(self): self.assertAlmostEqual(scales.midi_freq2key(450, tuning=450), 69.0, places=7) self.assertAlmostEqual(scales.midi_freq2key(900, tuning=450), 81.0, places=7) def test_back2back_key(self): self.assertAlmostEqual(scales.midi_key2freq(scales.midi_freq2key(1234)), 1234, places=7) self.assertAlmostEqual(scales.midi_key2freq(scales.midi_freq2key(45.67)), 45.67, places=7) def test_back2back_freq(self): self.assertAlmostEqual(scales.midi_freq2key(scales.midi_key2freq(76.543)), 76.543, places=7) self.assertAlmostEqual(scales.midi_freq2key(scales.midi_key2freq(124)), 124, places=7) class Test_piano_note_to_freq(unittest.TestCase): def test_octaves(self): self.assertAlmostEqual(scales.piano_note2freq('A2'), 110.0, places=7) self.assertAlmostEqual(scales.piano_note2freq('A3'), 220.0, places=7) self.assertAlmostEqual(scales.piano_note2freq('A4'), 440.0, places=7) self.assertAlmostEqual(scales.piano_note2freq('A5'), 880.0, places=7) self.assertAlmostEqual(scales.piano_note2freq('A6'), 1760.0, places=7) def test_values(self): self.assertAlmostEqual(scales.piano_note2freq('C6'), 1046.50, places=2) self.assertAlmostEqual(scales.piano_note2freq('D1'), 36.7081, places=4) class Test_piano_freq_to_note(unittest.TestCase): def test_values(self): self.assertEqual(scales.piano_freq2note(1046.50), 'C6') self.assertEqual(scales.piano_freq2note(36.7051), 'D1') self.assertEqual(scales.piano_freq2note(440), 'A4') def test_quantise(self): self.assertEqual(scales.piano_freq2note(435.00), 'A4') self.assertEqual(scales.piano_freq2note(439.00), 'A4') self.assertEqual(scales.piano_freq2note(440.00), 'A4') self.assertEqual(scales.piano_freq2note(441.00), 'A4') self.assertEqual(scales.piano_freq2note(447.00), 'A4') class Test_piano(unittest.TestCase): def test_back2back_key(self): self.assertAlmostEqual(scales.piano_key2freq(scales.piano_freq2key(100)), 100, places=7) self.assertAlmostEqual(scales.piano_key2freq(scales.piano_freq2key(32)), 32, places=7) self.assertAlmostEqual(scales.piano_key2freq(scales.piano_freq2key(997)), 997, places=7) self.assertAlmostEqual(scales.piano_key2freq(scales.piano_freq2key(12345)), 12345, places=7) self.assertAlmostEqual(scales.piano_key2freq(scales.piano_freq2key(4.563)), 4.563, places=7) def test_back2back_freq(self): self.assertAlmostEqual(scales.piano_freq2key(scales.piano_key2freq(10)), 10, places=7) self.assertAlmostEqual(scales.piano_freq2key(scales.piano_key2freq(49)), 49, places=7) self.assertAlmostEqual(scales.piano_freq2key(scales.piano_key2freq(30.3)), 30.3, places=7) def test_back2back_freq_quantised(self): self.assertAlmostEqual(scales.piano_freq2key(scales.piano_key2freq(10.2), quantise=True), 10, places=7) self.assertAlmostEqual(scales.piano_freq2key(scales.piano_key2freq(34.678), quantise=True), 35, places=7) if __name__ == "__main__": noseargs = [__name__, "--verbosity=2", "--logging-format=%(asctime)s %(levelname)-8s: %(name)-15s "+ "%(module)-15s %(funcName)-20s %(message)s", "--logging-level=DEBUG", __file__, ] nose.run(argv=noseargs)
50.26087
101
0.684256
721
5,780
5.323162
0.226075
0.218864
0.281397
0.196978
0.723033
0.67431
0.574257
0.479416
0.348619
0.272017
0
0.100107
0.189446
5,780
114
102
50.701754
0.719104
0.060208
0
0.074074
0
0
0.031567
0.009046
0
0
0
0
0.592593
1
0.17284
false
0
0.049383
0
0.271605
0.012346
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
4
861849c0f380f560406c2462086f258a1f58ba14
51
py
Python
katas/kyu_8/grasshopper_summation.py
ShRenat/codewars
c743b622122208c9ac5ba926957754e5165ea81f
[ "MIT" ]
null
null
null
katas/kyu_8/grasshopper_summation.py
ShRenat/codewars
c743b622122208c9ac5ba926957754e5165ea81f
[ "MIT" ]
null
null
null
katas/kyu_8/grasshopper_summation.py
ShRenat/codewars
c743b622122208c9ac5ba926957754e5165ea81f
[ "MIT" ]
null
null
null
def summation(num): return sum(range(num + 1))
17
30
0.647059
8
51
4.125
0.875
0
0
0
0
0
0
0
0
0
0
0.02439
0.196078
51
2
31
25.5
0.780488
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
4
8619586b83c3d17ed020d9e59bba2b81e73ae4b5
52
py
Python
ovmm/commands/__init__.py
tobiasraabe/otree_virtual_machine_manager
72c2e384f599e400720cad14d0cd28d515214563
[ "MIT" ]
2
2017-03-22T05:38:10.000Z
2017-10-17T13:15:35.000Z
ovmm/commands/__init__.py
tobiasraabe/otree_virtual_machine_manager
72c2e384f599e400720cad14d0cd28d515214563
[ "MIT" ]
53
2017-01-22T09:39:03.000Z
2017-10-20T08:13:43.000Z
ovmm/commands/__init__.py
tobiasraabe/otree_virtual_machine_manager
72c2e384f599e400720cad14d0cd28d515214563
[ "MIT" ]
null
null
null
"""This module contains all command of ``ovmm``."""
26
51
0.653846
7
52
4.857143
1
0
0
0
0
0
0
0
0
0
0
0
0.134615
52
1
52
52
0.755556
0.865385
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
862dbbcdfa2b205e7f40c83b3ba887d2dfc80d0e
1,373
py
Python
tests/test_structure_decomposition.py
TariqTNO/OpenQL
8da802cae3a7b8c244edb4d3b74be9e60250528c
[ "Apache-2.0" ]
61
2019-04-24T08:25:41.000Z
2022-03-01T22:23:23.000Z
tests/test_structure_decomposition.py
TariqTNO/OpenQL
8da802cae3a7b8c244edb4d3b74be9e60250528c
[ "Apache-2.0" ]
141
2019-03-27T16:19:06.000Z
2022-03-03T10:11:47.000Z
tests/test_structure_decomposition.py
TariqTNO/OpenQL
8da802cae3a7b8c244edb4d3b74be9e60250528c
[ "Apache-2.0" ]
43
2019-03-27T13:40:45.000Z
2022-01-14T12:48:51.000Z
import os from utils import file_compare import unittest from openql import openql as ql curdir = os.path.dirname(os.path.realpath(__file__)) output_dir = os.path.join(curdir, 'test_output') class Test_structure_decomposition(unittest.TestCase): def run_test_case(self, name): old_wd = os.getcwd() try: os.chdir(curdir) in_fn = 'test_' + name + '.cq' out_fn = 'test_output/' + name + '_out.cq' gold_fn = 'golden/' + name + '_out.cq' ql.compile(in_fn) self.assertTrue(file_compare(out_fn, gold_fn)) finally: os.chdir(old_wd) def test_structure_decomposition_goto(self): self.run_test_case('structure_decomposition_goto') def test_structure_decomposition_if_else(self): self.run_test_case('structure_decomposition_if_else') def test_structure_decomposition_foreach(self): self.run_test_case('structure_decomposition_foreach') def test_structure_decomposition_for(self): self.run_test_case('structure_decomposition_for') def test_structure_decomposition_while(self): self.run_test_case('structure_decomposition_while') def test_structure_decomposition_repeat_until(self): self.run_test_case('structure_decomposition_repeat_until') if __name__ == '__main__': unittest.main()
29.212766
66
0.701384
173
1,373
5.115607
0.289017
0.323164
0.20565
0.19661
0.277966
0.277966
0.277966
0
0
0
0
0
0.20539
1,373
46
67
29.847826
0.811182
0
0
0
0
0
0.176256
0.132556
0
0
0
0
0.03125
1
0.21875
false
0
0.125
0
0.375
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
4
86362bafd62909b880aad6da4736386ddcbe5c55
141
py
Python
documents/__init__.py
nuwainfo/treeio
f57bf9114d9774c11468a1b0e44614b04631beb1
[ "MIT" ]
242
2015-01-01T15:08:23.000Z
2022-01-19T21:14:24.000Z
documents/__init__.py
nuwainfo/treeio
f57bf9114d9774c11468a1b0e44614b04631beb1
[ "MIT" ]
52
2015-01-05T09:13:17.000Z
2018-12-26T14:52:43.000Z
documents/__init__.py
nuwainfo/treeio
f57bf9114d9774c11468a1b0e44614b04631beb1
[ "MIT" ]
99
2015-01-09T23:28:14.000Z
2021-12-30T09:19:51.000Z
# encoding: utf-8 # Copyright 2011 Tree.io Limited # This file is part of Treeio. # License www.tree.io/license """ Documents docstring """
15.666667
32
0.716312
21
141
4.809524
0.857143
0.118812
0
0
0
0
0
0
0
0
0
0.042373
0.163121
141
8
33
17.625
0.813559
0.879433
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
864dce2a6678d15acd7e82f7dc140723550b55a9
182
py
Python
lab/lab8/genmat.py
YZL24/SUSTech-CS205
c15e4055b3e260e84e94c8db46b4180448c3619f
[ "MIT" ]
null
null
null
lab/lab8/genmat.py
YZL24/SUSTech-CS205
c15e4055b3e260e84e94c8db46b4180448c3619f
[ "MIT" ]
null
null
null
lab/lab8/genmat.py
YZL24/SUSTech-CS205
c15e4055b3e260e84e94c8db46b4180448c3619f
[ "MIT" ]
null
null
null
from random import random with open('mat2.txt', 'w+') as f: for _ in range(100): for _ in range(100): f.write(f'{random()*100:.2f} ') f.write('\n')
20.222222
43
0.516484
28
182
3.285714
0.607143
0.108696
0.217391
0.282609
0
0
0
0
0
0
0
0.085938
0.296703
182
8
44
22.75
0.632813
0
0
0.333333
0
0
0.17033
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
86514d5a6f07a08866a536927bdc4fdf42fe3668
1,040
py
Python
software/scripts/setupLibPaths.py
slaclab/atlas-rd53-fmc-dev
1c5e50980dca3389c9b9a8eaa7c215f5c21eff87
[ "BSD-3-Clause-LBNL" ]
2
2021-08-17T17:59:19.000Z
2021-08-17T17:59:44.000Z
software/scripts/setupLibPaths.py
slaclab/atlas-rd53-fmc-dev
1c5e50980dca3389c9b9a8eaa7c215f5c21eff87
[ "BSD-3-Clause-LBNL" ]
3
2020-09-14T21:36:26.000Z
2020-11-02T17:51:41.000Z
software/scripts/setupLibPaths.py
slaclab/atlas-rd53-fmc-dev
1c5e50980dca3389c9b9a8eaa7c215f5c21eff87
[ "BSD-3-Clause-LBNL" ]
1
2020-12-12T23:14:16.000Z
2020-12-12T23:14:16.000Z
#!/usr/bin/env python3 #----------------------------------------------------------------------------- # This file is part of the 'Camera link gateway'. It is subject to # the license terms in the LICENSE.txt file found in the top-level directory # of this distribution and at: # https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. # No part of the 'Camera link gateway', including this file, may be # copied, modified, propagated, or distributed except according to the terms # contained in the LICENSE.txt file. #----------------------------------------------------------------------------- import pyrogue as pr pr.addLibraryPath('../../firmware/submodules/atlas-rd53-fw-lib/python') pr.addLibraryPath('../../firmware/submodules/axi-pcie-core/python') pr.addLibraryPath('../../firmware/submodules/surf/python') pr.addLibraryPath('../../firmware/submodules/rce-gen3-fw-lib/python') pr.addLibraryPath('../../firmware/common/fmc/python') #pr.addLibraryPath('../../firmware/common/feb/python') pr.addLibraryPath('../python')
52
78
0.635577
124
1,040
5.330645
0.548387
0.16944
0.217852
0.226929
0.441755
0.184569
0
0
0
0
0
0.004184
0.080769
1,040
19
79
54.736842
0.687238
0.617308
0
0
0
0
0.573643
0.550388
0
0
0
0
0
1
0
true
0
0.142857
0
0.142857
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
8658e7b50fa7ff04601eb7178c721b3fd12eee98
62
py
Python
test_file/testide.py
EXPmaster/notepad
b2d5535b424b8fbfa6786fc23af0d28e110fc2ba
[ "MIT" ]
4
2020-11-15T15:45:15.000Z
2020-11-22T08:08:08.000Z
test_file/testide.py
EXPmaster/notepad
b2d5535b424b8fbfa6786fc23af0d28e110fc2ba
[ "MIT" ]
null
null
null
test_file/testide.py
EXPmaster/notepad
b2d5535b424b8fbfa6786fc23af0d28e110fc2ba
[ "MIT" ]
null
null
null
name=input("please input your name:" print("Hello "+name)
20.666667
38
0.677419
9
62
4.666667
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.16129
62
3
39
20.666667
0.807692
0
0
0
0
0
0.47541
0
0
0
0
0
0
0
null
null
0
0
null
null
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
4
869f003fd79b34cbe91c7d48d62e338ce2f67b6f
283
py
Python
src/dropbot_monitor/__init__.py
sci-bots/dropbot-monitor
1c786d25e8d2f548311e297495592f152580d6bc
[ "BSD-3-Clause" ]
null
null
null
src/dropbot_monitor/__init__.py
sci-bots/dropbot-monitor
1c786d25e8d2f548311e297495592f152580d6bc
[ "BSD-3-Clause" ]
1
2019-11-19T15:27:51.000Z
2019-11-19T15:28:07.000Z
src/dropbot_monitor/__init__.py
sci-bots/dropbot-monitor
1c786d25e8d2f548311e297495592f152580d6bc
[ "BSD-3-Clause" ]
null
null
null
from __future__ import (absolute_import, division, print_function, unicode_literals) from ._version import get_versions __version__ = get_versions()['version'] del get_versions from .blinker_mqtt import * from .mqtt_async import * from .mqtt_bridge import *
28.3
66
0.745583
34
283
5.676471
0.5
0.170984
0.186529
0
0
0
0
0
0
0
0
0
0.187279
283
9
67
31.444444
0.83913
0
0
0
0
0
0.024735
0
0
0
0
0
0
1
0
false
0
0.625
0
0.625
0.125
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
86d4c7f16fae56372b48195a2b576431e5fa079d
55
py
Python
metaopt/optimizer/__init__.py
cigroup-ol/metaopt
6dfd5105d3c6eaf00f96670175cae16021069514
[ "BSD-3-Clause" ]
8
2015-02-02T21:42:23.000Z
2019-06-30T18:12:43.000Z
metaopt/optimizer/__init__.py
cigroup-ol/metaopt
6dfd5105d3c6eaf00f96670175cae16021069514
[ "BSD-3-Clause" ]
4
2015-09-24T14:12:38.000Z
2021-12-08T22:42:52.000Z
metaopt/optimizer/__init__.py
cigroup-ol/metaopt
6dfd5105d3c6eaf00f96670175cae16021069514
[ "BSD-3-Clause" ]
6
2015-02-27T12:35:33.000Z
2020-10-15T21:04:02.000Z
# -*- coding: utf-8 -*- """ Package of optimizers. """
11
23
0.527273
6
55
4.833333
1
0
0
0
0
0
0
0
0
0
0
0.022222
0.181818
55
4
24
13.75
0.622222
0.818182
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
81051d7d60ba56e2618c21618074f1176cc6f40b
161
py
Python
ch03/3_5.py
shifumin/create-deep-learning-from-scratch
cdcb753654863950065c6980e4913369c40a2850
[ "MIT" ]
null
null
null
ch03/3_5.py
shifumin/create-deep-learning-from-scratch
cdcb753654863950065c6980e4913369c40a2850
[ "MIT" ]
null
null
null
ch03/3_5.py
shifumin/create-deep-learning-from-scratch
cdcb753654863950065c6980e4913369c40a2850
[ "MIT" ]
null
null
null
import numpy as np def softmaxa): c = np.max(a) exp_a = np.exp(a - c) # オーバーフロー対策 sum_exp_a = np.sum(exp_a) y = exp_a / sum_exp_a return y
16.1
37
0.590062
32
161
2.75
0.4375
0.272727
0.238636
0
0
0
0
0
0
0
0
0
0.298137
161
9
38
17.888889
0.778761
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0.142857
null
null
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
81189d275d73853eb1cdbffbca8cd67a26a3faed
177
py
Python
examples/misc/djangotasks/todo/models.py
takipsizad/pyjs
54db0ba6747aca744f9f3c3e985a17e913dfb951
[ "ECL-2.0", "Apache-2.0" ]
739
2015-01-01T02:05:11.000Z
2022-03-30T15:26:16.000Z
examples/misc/djangotasks/todo/models.py
takipsizad/pyjs
54db0ba6747aca744f9f3c3e985a17e913dfb951
[ "ECL-2.0", "Apache-2.0" ]
33
2015-03-25T23:17:04.000Z
2021-08-19T08:25:22.000Z
examples/misc/djangotasks/todo/models.py
takipsizad/pyjs
54db0ba6747aca744f9f3c3e985a17e913dfb951
[ "ECL-2.0", "Apache-2.0" ]
167
2015-01-01T22:27:47.000Z
2022-03-17T13:29:19.000Z
from django.db import models class Todo(models.Model): task = models.CharField(max_length=30) def __unicode__(self): return unicode(self.task) # Create your models here.
17.7
39
0.757062
26
177
4.961538
0.769231
0.170543
0
0
0
0
0
0
0
0
0
0.013158
0.141243
177
9
40
19.666667
0.835526
0.135593
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.2
0.2
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
4
d49993acfc37171b414a426caddc20982191c4f9
3,118
py
Python
run.py
Chippers255/scheduler
ff4a0d69a2dc0e16e007583e381ba83c9e660f3f
[ "MIT" ]
1
2021-12-19T02:43:31.000Z
2021-12-19T02:43:31.000Z
run.py
Chippers255/scheduler
ff4a0d69a2dc0e16e007583e381ba83c9e660f3f
[ "MIT" ]
null
null
null
run.py
Chippers255/scheduler
ff4a0d69a2dc0e16e007583e381ba83c9e660f3f
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # run.py # # Created by Thomas Nelson <tn90ca@gmail.com> # # Created..........2015-03-12 # Modified.........2015-03-12 # Import required modules import random # Import required user made modules from scheduler import Store, Employee, Individual common_grounds = Store() employee_list = [] employee_list.append(Employee(common_grounds, 'Tom')) employee_list.append(Employee(common_grounds, 'Adam')) employee_list.append(Employee(common_grounds, 'Julie')) employee_list.append(Employee(common_grounds, 'Vini')) employee_list[0].add_hours(0, '07:30', '12:00', common_grounds) employee_list[0].add_hours(0, '14:00', '15:00', common_grounds) employee_list[0].add_hours(1, '07:30', '12:30', common_grounds) employee_list[0].add_hours(2, '07:30', '12:00', common_grounds) employee_list[0].add_hours(3, '07:30', '12:30', common_grounds) employee_list[0].add_hours(4, '07:30', '12:00', common_grounds) employee_list[1].add_hours(0, '09:00', '09:30', common_grounds) employee_list[1].add_hours(0, '11:00', '11:30', common_grounds) employee_list[1].add_hours(0, '19:00', '21:30', common_grounds) employee_list[1].add_hours(1, '07:30', '15:30', common_grounds) employee_list[1].add_hours(2, '09:00', '09:30', common_grounds) employee_list[1].add_hours(2, '11:00', '11:30', common_grounds) employee_list[1].add_hours(2, '19:00', '21:30', common_grounds) employee_list[1].add_hours(3, '07:30', '15:30', common_grounds) employee_list[1].add_hours(4, '09:00', '09:30', common_grounds) employee_list[1].add_hours(4, '11:00', '11:30', common_grounds) employee_list[2].add_hours(0, '07:30', '21:30', common_grounds) employee_list[2].add_hours(1, '07:30', '21:30', common_grounds) employee_list[2].add_hours(2, '07:30', '21:30', common_grounds) employee_list[2].add_hours(3, '16:00', '21:30', common_grounds) employee_list[2].add_hours(4, '16:00', '18:30', common_grounds) employee_list[2].add_hours(5, '10:00', '17:30', common_grounds) employee_list[3].add_hours(0, '16:00', '21:30', common_grounds) employee_list[3].add_hours(1, '16:00', '21:30', common_grounds) employee_list[3].add_hours(2, '16:00', '21:30', common_grounds) employee_list[3].add_hours(3, '07:30', '21:30', common_grounds) employee_list[3].add_hours(4, '07:30', '18:30', common_grounds) employee_list[3].add_hours(5, '10:00', '17:30', common_grounds) pop_size = 500 population = [Individual(None, employee_list, common_grounds, 0) for x in xrange(pop_size)] for x in xrange(100): best = scheduler.utils.selection(population, pop_size) if best.score <= 10.0: break print "Epoch:", x, "=", best.score new_pop = [] if (x % 5) == 0: pop_size -= 20 while len(new_pop) < pop_size: m1 = scheduler.utils.selection(population, 20) m2 = scheduler.utils.selection(population, 20) new_pop.append(utils.crossover(m1, m2, employee_list, common_grounds, 0)) population = new_pop print print best = scheduler.utils.selection(population, pop_size) print best.score for t in xrange(len(best.c)): print common_grounds.date[0][t], ":", employee_list[best.c[t]].name
36.255814
91
0.70526
518
3,118
4.032819
0.164093
0.224031
0.271422
0.323121
0.744375
0.678794
0.591192
0.549067
0.450455
0.382001
0
0.116271
0.109044
3,118
85
92
36.682353
0.635709
0.059654
0
0.070175
0
0
0.104038
0
0
0
0
0
0
0
null
null
0
0.035088
null
null
0.087719
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
d4cd49b38e05ac78b3d8abc05ca06f0132c85ca8
178
py
Python
api/curve/v1/plugins/__init__.py
newstartcheng/Curve
03ad63b36f4622501a609c12c2f9a866db5c6865
[ "Apache-2.0" ]
1
2018-03-10T06:39:28.000Z
2018-03-10T06:39:28.000Z
api/curve/v1/plugins/__init__.py
newstartcheng/Curve
03ad63b36f4622501a609c12c2f9a866db5c6865
[ "Apache-2.0" ]
null
null
null
api/curve/v1/plugins/__init__.py
newstartcheng/Curve
03ad63b36f4622501a609c12c2f9a866db5c6865
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ plugin ~~~~ plugin manager and plugin api :copyright: (c) 2017 by Baidu, Inc. :license: Apache, see LICENSE for more details. """
19.777778
51
0.573034
22
178
4.636364
0.863636
0
0
0
0
0
0
0
0
0
0
0.038168
0.264045
178
9
52
19.777778
0.740458
0.837079
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
d4e654f240b413237312c21ffa16db539aac75c2
284
py
Python
game/admin.py
BlackwoodOne/Storytelling2.0
6a75045ced8711f52e568290b4b6b0d546434d7b
[ "MIT" ]
1
2018-07-20T20:17:47.000Z
2018-07-20T20:17:47.000Z
game/admin.py
BlackwoodOne/Storytelling2.0
6a75045ced8711f52e568290b4b6b0d546434d7b
[ "MIT" ]
null
null
null
game/admin.py
BlackwoodOne/Storytelling2.0
6a75045ced8711f52e568290b4b6b0d546434d7b
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Game, Player,Sentence,WordCard, GameState, RoundState admin.site.register(Game) admin.site.register(Player) admin.site.register(Sentence) admin.site.register(WordCard) admin.site.register(GameState) admin.site.register(RoundState)
31.555556
73
0.830986
38
284
6.210526
0.368421
0.228814
0.432203
0
0
0
0
0
0
0
0
0
0.059859
284
9
74
31.555556
0.883895
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.25
0
0.25
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
be12f4811c933102fe6f4ee97eb45f662b22e42e
615
py
Python
tests/unit/executors/test_splay.py
Noah-Huppert/salt
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
[ "Apache-2.0" ]
19
2016-01-29T14:37:52.000Z
2022-03-30T18:08:01.000Z
tests/unit/executors/test_splay.py
Noah-Huppert/salt
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
[ "Apache-2.0" ]
223
2016-03-02T16:39:41.000Z
2022-03-03T12:26:35.000Z
tests/unit/executors/test_splay.py
Noah-Huppert/salt
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
[ "Apache-2.0" ]
64
2016-02-04T19:45:26.000Z
2021-12-15T02:02:31.000Z
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals import salt.executors.splay as splay_exec # Import Salt libs from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase class SplayTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): return {splay_exec: {"__grains__": {"id": "foo"}}} def test__get_hash(self): # We just want to make sure that this function does not result in an # error due to passing a unicode value to bytearray() assert splay_exec._get_hash()
30.75
76
0.736585
82
615
5.268293
0.695122
0.0625
0.074074
0
0
0
0
0
0
0
0
0.001988
0.182114
615
19
77
32.368421
0.856859
0.255285
0
0
0
0
0.033113
0
0
0
0
0
0.111111
1
0.222222
false
0
0.444444
0.111111
0.888889
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
4
be1da47ecf7c71ae3422903348e6b12b29f92fc5
119
py
Python
language_features/other/predicate.py
PrasadHonrao/python-samples
faa48aa3eaf2d67b8cef0114e1f6ef08e2c1300a
[ "MIT" ]
3
2018-08-20T13:00:01.000Z
2021-09-18T04:19:46.000Z
language_features/other/predicate.py
PrasadHonrao/python-samples
faa48aa3eaf2d67b8cef0114e1f6ef08e2c1300a
[ "MIT" ]
1
2021-06-25T20:25:02.000Z
2021-08-19T22:44:31.000Z
language_features/other/predicate.py
PrasadHonrao/python-samples
faa48aa3eaf2d67b8cef0114e1f6ef08e2c1300a
[ "MIT" ]
1
2021-09-18T23:51:20.000Z
2021-09-18T23:51:20.000Z
def is_even(x): if (x % 2 == 0): return True return False print([x for x in range(100) if is_even(x)])
19.833333
44
0.563025
23
119
2.826087
0.652174
0.184615
0.215385
0
0
0
0
0
0
0
0
0.059524
0.294118
119
6
44
19.833333
0.714286
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0
0
0.6
0.2
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
be1ff324537f9bb25d324d922fd32ec1a8f45882
6,451
py
Python
tests/test_pyswitch/test_interface_pvlan.py
mfeed/PySwitchLib
54e872bcbe77f2ae840d845dadb7c5b9c12482ed
[ "Apache-2.0" ]
6
2017-10-02T21:02:02.000Z
2018-07-04T13:56:55.000Z
tests/test_pyswitch/test_interface_pvlan.py
mfeed/PySwitchLib
54e872bcbe77f2ae840d845dadb7c5b9c12482ed
[ "Apache-2.0" ]
23
2017-10-03T18:49:11.000Z
2019-07-20T00:25:44.000Z
tests/test_pyswitch/test_interface_pvlan.py
mfeed/PySwitchLib
54e872bcbe77f2ae840d845dadb7c5b9c12482ed
[ "Apache-2.0" ]
4
2018-02-27T05:43:37.000Z
2019-06-30T13:30:25.000Z
from __future__ import absolute_import import unittest import yaml from attrdict import AttrDict from pyswitch.device import Device class InterfacePrivateVlanTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): super(InterfacePrivateVlanTestCase, self).__init__(*args, **kwargs) with open('tests/test_pyswitch/config.yaml') as fileobj: cfg = AttrDict(yaml.safe_load(fileobj)) switch = cfg.InterfacePrivateVlanTestCase.switch self.switch_ip = switch.ip self.switch_username = switch.username self.switch_pasword = switch.password self.rbridge_id = str(switch.rbridge_id) self.pvlan = str(switch.pvlan) self.svlan = str(switch.svlan) self.int_name = str(switch.int_name) self.int_type = str(switch.int_type) self.conn = (self.switch_ip, '22') self.auth = (self.switch_username, self.switch_pasword) def setUp(self): with Device(conn=self.conn, auth=self.auth) as dev: dev.interface.add_vlan_int(vlan_id=self.pvlan) dev.interface.add_vlan_int(vlan_id=self.svlan) dev.interface.switchport(name=self.int_name, int_type=self.int_type) def test_private_vlan_type(self): with Device(conn=self.conn, auth=self.auth) as dev: dev.interface.private_vlan_type(name=self.pvlan, pvlan_type='isolated') op = dev.interface.private_vlan_type(get=True, name=self.pvlan) self.assertEqual(op, 'isolated') def test_vlan_pvlan_association_add(self): with Device(conn=self.conn, auth=self.auth) as dev: dev.interface.private_vlan_type(name=self.pvlan, pvlan_type='primary') dev.interface.private_vlan_type(name=self.svlan, pvlan_type='isolated') dev.interface.vlan_pvlan_association_add( name=self.pvlan, sec_vlan=self.svlan) op = dev.interface.vlan_pvlan_association_add( get=True, name=self.pvlan) self.assertEqual(op, self.svlan) def test_switchport_pvlan_mapping(self): with Device(conn=self.conn, auth=self.auth) as dev: dev.interface.private_vlan_mode(name=self.int_name, int_type=self.int_type, mode='promiscuous') dev.interface.private_vlan_type(name=self.pvlan, pvlan_type='primary') dev.interface.private_vlan_type(name=self.svlan, pvlan_type='isolated') dev.interface.vlan_pvlan_association_add( name=self.pvlan, sec_vlan=self.svlan) dev.interface.switchport_pvlan_mapping( int_type=self.int_type, name=self.int_name, pri_vlan=self.pvlan, sec_vlan=self.svlan) op = dev.interface.switchport_pvlan_mapping( int_type=self.int_type, name=self.int_name, get=True) self.assertEqual( {'pri_vlan': self.pvlan, 'sec_vlan': self.svlan}, op) dev.interface.switchport_pvlan_mapping( int_type=self.int_type, name=self.int_name, pri_vlan=self.pvlan, sec_vlan=self.svlan, delete=True) op = dev.interface.switchport_pvlan_mapping( int_type=self.int_type, name=self.int_name, get=True) self.assertNotEqual( {'pri_vlan': self.pvlan, 'sec_vlan': self.svlan}, op) def test_private_vlan_mode(self): with Device(conn=self.conn, auth=self.auth) as dev: output = dev.interface.private_vlan_mode(name=self.int_name, int_type=self.int_type, mode='trunk_promiscuous') output = dev.interface.private_vlan_mode(name=self.int_name, int_type=self.int_type, get=True) self.assertEqual(output, 'trunk_promiscuous') output = dev.interface.private_vlan_mode(name=self.int_name, int_type=self.int_type, mode='host') output = dev.interface.private_vlan_mode(name=self.int_name, int_type=self.int_type, get=True) self.assertEqual(output, 'host') def test_pvlan_host_association(self): with Device(conn=self.conn, auth=self.auth) as dev: output = dev.interface.private_vlan_type(name=self.pvlan, pvlan_type='primary') output = dev.interface.private_vlan_type(name=self.svlan, pvlan_type='isolated') output = dev.interface.vlan_pvlan_association_add( name=self.pvlan, sec_vlan=self.svlan) output = dev.interface.private_vlan_mode(name=self.int_name, int_type=self.int_type, mode='host') output = dev.interface.pvlan_host_association( int_type=self.int_type, name=self.int_name, pri_vlan=self.pvlan, sec_vlan=self.svlan) output = dev.interface.pvlan_host_association( int_type=self.int_type, name=self.int_name, get=True) self.assertEqual(output, (self.pvlan, self.svlan)) def tearDown(self): with Device(conn=self.conn, auth=self.auth) as dev: dev.interface.del_vlan_int(vlan_id=self.pvlan) dev.interface.del_vlan_int(vlan_id=self.svlan) dev.interface.switchport(name=self.int_name, int_type=self.int_type, enabled=False)
45.429577
78
0.539296
692
6,451
4.780347
0.104046
0.063482
0.049879
0.063482
0.753325
0.724607
0.717654
0.717654
0.669287
0.654776
0
0.000496
0.374981
6,451
141
79
45.751773
0.81994
0
0
0.563025
0
0
0.028368
0.004805
0
0
0
0
0.058824
1
0.067227
false
0.008403
0.042017
0
0.117647
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
be22f93978cf615c6732583d8b01a5ee03fc80f5
138
py
Python
terrascript/data/AdrienneCohea/nomadutility.py
mjuenema/python-terrascript
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
[ "BSD-2-Clause" ]
507
2017-07-26T02:58:38.000Z
2022-01-21T12:35:13.000Z
terrascript/data/AdrienneCohea/nomadutility.py
mjuenema/python-terrascript
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
[ "BSD-2-Clause" ]
135
2017-07-20T12:01:59.000Z
2021-10-04T22:25:40.000Z
terrascript/data/AdrienneCohea/nomadutility.py
mjuenema/python-terrascript
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
[ "BSD-2-Clause" ]
81
2018-02-20T17:55:28.000Z
2022-01-31T07:08:40.000Z
# terrascript/data/AdrienneCohea/nomadutility.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:22:45 UTC) __all__ = []
23
73
0.768116
19
138
5.368421
0.947368
0
0
0
0
0
0
0
0
0
0
0.097561
0.108696
138
5
74
27.6
0.731707
0.855072
0
0
1
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
07746c04832a5e2c1ea27d089f9e3d41fb0d143d
252
py
Python
ambry/database/postgis.py
kball/ambry
ae865245128b92693d654fbdbb3efc9ef29e9745
[ "BSD-2-Clause" ]
1
2017-06-14T13:40:57.000Z
2017-06-14T13:40:57.000Z
ambry/database/postgis.py
kball/ambry
ae865245128b92693d654fbdbb3efc9ef29e9745
[ "BSD-2-Clause" ]
null
null
null
ambry/database/postgis.py
kball/ambry
ae865245128b92693d654fbdbb3efc9ef29e9745
[ "BSD-2-Clause" ]
null
null
null
""" Copyright (c) 2013 Clarinova. This file is licensed under the terms of the Revised BSD License, included in this distribution as LICENSE.txt """ from postgres import PostgresDatabase class PostgisDatabase(PostgresDatabase): pass
25.2
74
0.746032
31
252
6.064516
0.870968
0
0
0
0
0
0
0
0
0
0
0.019802
0.198413
252
10
75
25.2
0.910891
0.555556
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
4
07b4e778789236da8df522c4e12bf4b824e79c36
172
py
Python
src/courses/tests/conftest.py
iNerV/education-backend
787c0d090eb6e4a9338812941b0246a6e1b8e7ad
[ "MIT" ]
null
null
null
src/courses/tests/conftest.py
iNerV/education-backend
787c0d090eb6e4a9338812941b0246a6e1b8e7ad
[ "MIT" ]
null
null
null
src/courses/tests/conftest.py
iNerV/education-backend
787c0d090eb6e4a9338812941b0246a6e1b8e7ad
[ "MIT" ]
null
null
null
import pytest pytestmark = [pytest.mark.django_db] @pytest.fixture def testcode(mixer): return mixer.blend('orders.PromoCode', name='TESTCODE', discount_percent=10)
19.111111
80
0.761628
22
172
5.863636
0.818182
0
0
0
0
0
0
0
0
0
0
0.013072
0.110465
172
8
81
21.5
0.830065
0
0
0
0
0
0.139535
0
0
0
0
0
0
1
0.2
false
0
0.2
0.2
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
4
5800b61db37b0d2aa2dfb35888bb0b08a5a5e12f
1,678
py
Python
tests/models/MLR_test.py
mOmUcf/DeepCTR
176c61cb4e6a147fae463c143a3e1cd4c04598ed
[ "MIT" ]
1
2020-07-08T05:37:59.000Z
2020-07-08T05:37:59.000Z
tests/models/MLR_test.py
linxid/DeepCTR
1404f0d27396a6e5061c41d7ee3099f8a23ea6e8
[ "MIT" ]
null
null
null
tests/models/MLR_test.py
linxid/DeepCTR
1404f0d27396a6e5061c41d7ee3099f8a23ea6e8
[ "MIT" ]
null
null
null
import pytest from deepctr.models import MLR from ..utils import check_model, SAMPLE_SIZE,get_test_data @pytest.mark.parametrize( 'region_sparse,region_dense,base_sparse,base_dense,bias_sparse,bias_dense', [(0, 2, 0, 2, 0, 1), (0, 2, 0, 1, 0, 2), (0, 2, 0, 0, 1, 0), (0, 1, 1, 2, 1, 1,), (0, 1, 1, 1, 1, 2), (0, 1, 1, 0, 2, 0), (1, 0, 2, 2, 2, 1), (2, 0, 2, 1, 2, 2), (2, 0, 2, 0, 0, 0) ] ) def test_MLRs(region_sparse, region_dense, base_sparse, base_dense, bias_sparse, bias_dense): model_name = "MLRs" region_x,y,region_feature_columns = get_test_data(SAMPLE_SIZE,region_sparse,region_dense,prefix='region') base_x, y, base_feature_columns = get_test_data(SAMPLE_SIZE, region_sparse, region_dense, prefix='base') bias_x, y, bias_feature_columns = get_test_data(SAMPLE_SIZE, region_sparse, region_dense, prefix='bias') model = MLR(region_feature_columns, base_feature_columns, bias_feature_columns=bias_feature_columns) model.compile('adam', 'binary_crossentropy', metrics=['binary_crossentropy']) print(model_name + " test pass!") def test_MLR(): model_name = "MLR" region_x,y,region_feature_columns = get_test_data(SAMPLE_SIZE,3,3,prefix='region') base_x, y, base_feature_columns = get_test_data(SAMPLE_SIZE, 3, 3, prefix='base') bias_x, y, bias_feature_columns = get_test_data(SAMPLE_SIZE, 3, 3, prefix='bias') model = MLR(region_feature_columns) model.compile('adam', 'binary_crossentropy', metrics=['binary_crossentropy']) check_model(model,model_name,region_x,y) print(model_name + " test pass!") if __name__ == "__main__": pass
33.56
109
0.685936
263
1,678
4.038023
0.159696
0.145009
0.072505
0.118644
0.786252
0.701507
0.701507
0.637476
0.637476
0.637476
0
0.043321
0.174613
1,678
49
110
34.244898
0.723466
0
0
0.193548
0
0
0.131783
0.042934
0
0
0
0
0
1
0.064516
false
0.096774
0.096774
0
0.16129
0.064516
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
4
6afc5803888f5c0785480189b650377ad4be9838
22,234
py
Python
rest/spot_db.py
vigorIv2/spot
55e3a5a0b99fe959e95d7225a3bada857752e9fa
[ "MIT" ]
1
2021-05-03T18:35:30.000Z
2021-05-03T18:35:30.000Z
rest/spot_db.py
vigorIv2/spot
55e3a5a0b99fe959e95d7225a3bada857752e9fa
[ "MIT" ]
1
2022-02-16T00:55:15.000Z
2022-02-16T00:55:15.000Z
rest/spot_db.py
vigorIv2/spot
55e3a5a0b99fe959e95d7225a3bada857752e9fa
[ "MIT" ]
null
null
null
#!flask/bin/python import time import traceback import datetime # import json import psycopg2 from psycopg2.pool import SimpleConnectionPool import logging, logging.config, yaml logging.config.dictConfig(yaml.load(open('logging.conf'))) logfl = logging.getLogger('file') logconsole = logging.getLogger('console') logfl.debug("Debug FILE") logconsole.debug("Debug CONSOLE") def initPool(): global g_pool cs = "postgresql://huhuladb00:26257/huhula?user=huhulaman&sslcert=/home/ubuntu/spot/certs/client.huhulaman.crt&sslkey=/home/ubuntu/spot/certs/client.huhulaman.key&sslmode=require&ssl=true" g_pool = SimpleConnectionPool(1, 9, cs) # con = g_pool.getconn() # con.set_session(autocommit=True) # g_pool.putconn(con) initPool() def openConnoldStyle(): global conn global cur # conn = psycopg2.connect(database="huhula", user="root", host="huhuladb00", port=26257) # secure way : cs = "postgresql://huhuladb00:26257/huhula?user=huhulaman&sslcert=/home/ubuntu/spot/certs/client.huhulaman.crt&sslkey=/home/ubuntu/spot/certs/client.huhulaman.key&sslmode=require&ssl=true" conn = psycopg2.connect(cs) conn.set_session(autocommit=True) cur = conn.cursor() def getInformedSpots(uid,dfrom,dto) : lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute("""select sum(orig_quantity) as qty, informer_id as uid, min(inserted_at) as mnat, max(inserted_at) as mxat, count(*) as cnt from spots where informer_id = '%s' and inserted_at between '%s' and '%s' group by informer_id;""" % (uid,dfrom,dto)) row=cur.fetchone() if row: return row else: return None finally: cur.close() lconn.commit() g_pool.putconn(lconn) def getOccupiedSpots(uid,dfrom,dto) : lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute("""select count(*) as qty, taker_id as uid, min(inserted_at) as mnat, max(inserted_at) as mxat from occupy where taker_id = '%s' and inserted_at between '%s' and '%s' group by taker_id;""" % (uid,dfrom,dto)) row=cur.fetchone() if row: return row else: return None finally: cur.close() lconn.commit() g_pool.putconn(lconn) def getUserProperties(uid) : lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute("SELECT roles FROM users WHERE id = '%s'" % (uid,)) row=cur.fetchone() if row: return row else: return None finally: cur.close() lconn.commit() g_pool.putconn(lconn) def getUserBalance(user) : lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute("SELECT id,balance FROM users WHERE userhash = '%s'" % (user,)) row=cur.fetchone() if row == None: return None informer_id = row[0] balance = str(row[1]) cur.execute("select sum(informed_qty) as iqty, sum(occupied_qty) as octy, sum(gift) as gift, sum(penalty) as penalty, sum(balance) as balance from huhula.bill_payable where user_id = '%s'" % (informer_id,)) row2=cur.fetchone() if row2: return (balance,str(row2[0]),str(row2[1]),str(row2[2]),str(row2[3]),str(row2[4])) else: return None finally: cur.close() lconn.commit() g_pool.putconn(lconn) def cleanUp(users) : for u in users: informer_id=getUserID(u) if informer_id != None: logconsole.info("Cleaning up derivatives for user "+str(u)+" uid="+str(informer_id)) lconn = g_pool.getconn() lconn.set_session(autocommit=True) cur = lconn.cursor() try: cur.execute("delete FROM link WHERE referral_id = (select id from referral where sender_id = '%s')" % (informer_id,)) cur.execute("delete FROM referral where sender_id = '%s'" % (informer_id,)) cur.execute("delete FROM occupy WHERE taker_id = '%s'" % (informer_id,)) cur.execute("delete FROM parked WHERE informer_id = '%s'" % (informer_id,)) cur.execute("delete FROM bill WHERE user_id = '%s'" % (informer_id,)) cur.execute("delete FROM reference WHERE sender_id = '%s'" % (informer_id,)) finally: cur.close() g_pool.putconn(lconn) for u in users: informer_id=getUserID(u) if informer_id != None: logconsole.info("Cleaning up root recs for user "+str(u)+" uid="+str(informer_id)) lconn = g_pool.getconn() lconn.set_session(autocommit=True) cur = lconn.cursor() try: cur.execute("delete FROM spots WHERE informer_id = '%s'" % (informer_id,)) cur.execute("delete FROM users WHERE userhash = '%s'" % (u,)) finally: cur.close() g_pool.putconn(lconn) def getUserID(user) : lconn = g_pool.getconn() cur = lconn.cursor() cur.execute("SELECT id FROM users WHERE userhash = '%s'" % (user,)) row = cur.fetchone() try: if row: return row[0] else: return None finally: cur.close() lconn.commit() g_pool.putconn(lconn) def countReferrals(user) : sender_id=getUserID(user) if sender_id == None: return None lconn = g_pool.getconn() cur = lconn.cursor() cur.execute("select count(*) from huhula.referral r inner join huhula.link l on (l.referral_id = r.id) where r.sender_id = '%s' and not l.updated_at is null " % (sender_id,)) row = cur.fetchone() try: if row: return row[0] else: return 0 finally: cur.close() lconn.commit() g_pool.putconn(lconn) def newReferral(user,non_members) : sender_id=getUserID(user) if sender_id == None: return None lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute("INSERT INTO referral(sender_id) values(%s) RETURNING id;", (sender_id,)) reference=cur.fetchone()[0] for to_hash in non_members: cur.execute("INSERT INTO link(referral_id,to_hash) values(%s,%s);", (reference,to_hash,)) return reference except Exception as error: jts = traceback.format_exc() logconsole.error(jts) lconn.rollback() return None finally: cur.close() lconn.commit() g_pool.putconn(lconn) return None def closeReferral(referral_id,user_hash) : lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute("update link set updated_at=now() where referral_id=%s and to_hash=%s and updated_at is null ", (referral_id,user_hash,)) rc = cur.rowcount sender_id = getReferralSender(referral_id) return (rc, sender_id) except Exception as error: jts = traceback.format_exc() logconsole.error(jts) lconn.rollback() return None finally: cur.close() lconn.commit() g_pool.putconn(lconn) return None def getReferralSender(referral_id) : lconn = g_pool.getconn() cur = lconn.cursor() cur.execute("select distinct sender_id from referral where id = '%s' " % (referral_id,)) row = cur.fetchone() try: if row: return row[0] else: return None finally: cur.close() lconn.commit() g_pool.putconn(lconn) def getReferral(userhash) : lconn = g_pool.getconn() cur = lconn.cursor() cur.execute("SELECT referral_id FROM link WHERE to_hash = '%s'" % (userhash,)) row = cur.fetchone() try: if row: return row[0] else: return None finally: cur.close() lconn.commit() g_pool.putconn(lconn) def newReference(user) : sender_id=getUserID(user) if sender_id == None: return None lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute("INSERT INTO reference(sender_id) values(%s) RETURNING id;", (sender_id,)) reference=cur.fetchone()[0] return reference finally: cur.close() lconn.commit() g_pool.putconn(lconn) return None def closeReferrence(ref, receiver_id) : lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute("update reference set receiver_id=%s, updated_at=now() where receiver_id is null and id=%s", (receiver_id,ref,)) if cur.rowcount == 0: return 404 except Exception as error: jts = traceback.format_exc() logconsole.error(jts) lconn.rollback() return 404 finally: cur.close() lconn.commit() g_pool.putconn(lconn) return 0 def getSenderId(ref) : lconn = g_pool.getconn() cur = lconn.cursor() cur.execute("SELECT sender_id FROM reference WHERE receiver_id is null and id = '%s'" % (ref,)) row = cur.fetchone() try: if row: return row[0] else: return None finally: cur.close() lconn.commit() g_pool.putconn(lconn) def checkSameSpot(informer_id,spot,lat,lon) : selsql = """select count(*) as cnt from huhula.spots where quantity > 0 and informer_id = '%s' and array_position(direction,%s) is not null and round(longitude,4) = round(%s,4) and round(latitude,4) = round(%s,4) """ % (informer_id, spot, lon, lat,) logconsole.debug("SQL:" + selsql) lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute(selsql) row=cur.fetchone() if row: return row[0] else: return None finally: cur.close() lconn.commit() g_pool.putconn(lconn) def locateSpot(latitude0,longitude0) : selsql = """select id, spot, substring(cast(age as string),1,16) as age, sqrt(df*df + dl*dl) * 6371e3 as dist, latitude, longitude from ( select sp.id, direction[1] as spot, sp.inserted_at as age, (longitude*pi()/180 - %s*pi()/180) * cos((latitude*pi()/180 + %s*pi()/180)/2) as dl, (latitude*pi()/180 - %s*pi()/180) as df, latitude, longitude from huhula.spots as sp where quantity > 0 -- and age(sp.inserted_at) < INTERVAL '2d2h1m1s1ms1us6ns' order by age(sp.inserted_at) ) where sqrt(df*df + dl*dl) * 6371e3 < 300 order by sqrt(df*df + dl*dl) * 6371e3, age desc limit 30""" % (longitude0,latitude0,latitude0,) logconsole.debug("SQL:" + selsql) lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute(selsql) row=cur.fetchall() if row: return row else: return None finally: cur.close() lconn.commit() g_pool.putconn(lconn) def getReportedSpots(rid,hd) : if '-' in rid: informer_id=rid else: informer_id=getUserID(rid) if informer_id == None: return 404 selsql = "select longitude, latitude from huhula.spots as sp where sp.informer_id = '%s' and age(sp.inserted_at) < INTERVAL '%sh'" % (informer_id,hd,) logconsole.debug("SQL:" + selsql) lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute(selsql) rows=cur.fetchall() if rows: return rows else: return None finally: cur.close() lconn.commit() g_pool.putconn(lconn) def getParkedSpots(rid,hd) : if rid.isnumeric(): informer_id=getUserID(rid) else: informer_id=rid if informer_id == None: return 404 selsql = "select longitude, latitude from huhula.parked as sp where sp.informer_id = '%s' and age(sp.inserted_at) < INTERVAL '%sh'" % (informer_id,hd,) logconsole.debug("SQL:" + selsql) lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute(selsql) rows=cur.fetchall() if rows: return rows else: return None finally: cur.close() lconn.commit() g_pool.putconn(lconn) def getNearSpots(lt,lg,hd) : selsql = """select longitude, latitude from ( select sp.id, age(sp.inserted_at) as age, (longitude*pi()/180 - %s*pi()/180) * cos((latitude*pi()/180 + %s*pi()/180)/2) as dl, (latitude*pi()/180 - %s*pi()/180) as df, latitude, longitude from huhula.spots as sp where quantity > 0 and age(sp.inserted_at) < INTERVAL '%sh' order by age(sp.inserted_at) ) where sqrt(df*df + dl*dl) * 6371e3 < 2000 order by sqrt(df*df + dl*dl) * 6371e3, age limit 1000""" % (lg,lt,lt,hd,) logconsole.debug("SQL:" + selsql) lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute(selsql) rows=cur.fetchall() if rows: return rows else: return None finally: cur.close() lconn.commit() g_pool.putconn(lconn) def getAllNearSpots(lt,lg,hd) : selsql = """select longitude, latitude from ( select sp.id, age(sp.inserted_at) as age, (longitude*pi()/180 - %s*pi()/180) * cos((latitude*pi()/180 + %s*pi()/180)/2) as dl, (latitude*pi()/180 - %s*pi()/180) as df, latitude, longitude from huhula.spots as sp where age(sp.inserted_at) < INTERVAL '%sh' order by age(sp.inserted_at) ) where sqrt(df*df + dl*dl) * 6371e3 < 20000 order by sqrt(df*df + dl*dl) * 6371e3, age """ % (lg,lt,lt,hd,) logconsole.debug("SQL:" + selsql) lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute(selsql) rows=cur.fetchall() if rows: return rows else: return None finally: cur.close() lconn.commit() g_pool.putconn(lconn) def getSpotClusters(hd) : selsql = """select count(*), cast(max(s.inserted_at) as date), round(longitude,2) as lg, round(latitude,2) as lt from huhula.spots s where age(s.inserted_at) < INTERVAL '%sh' group by round(longitude,2), round(latitude,2) order by round(longitude,2) limit 5000""" % (hd,) logconsole.debug("SQL:" + selsql) lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute(selsql) rows=cur.fetchall() if rows: return rows else: return None finally: cur.close() lconn.commit() g_pool.putconn(lconn) def revokeRole(user,role) : lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute("update huhula.users set roles=array_remove(roles,%s) where userhash=%s",(role,user,)) lconn.commit() except Exception as error: jts = traceback.format_exc() logconsole.error(jts) lconn.rollback() finally: cur.close() g_pool.putconn(lconn) def newUser(user) : lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute("INSERT INTO huhula.users(userhash,roles) values(%s,array['promoter'])",(user,)) lconn.commit() except Exception as error: jts = traceback.format_exc() logconsole.error(jts) lconn.rollback() finally: cur.close() g_pool.putconn(lconn) def insertParked(informer,informed_at,azimuth,altitude,longitude,latitude,client_at) : informer_id=getUserID(informer) if ( informer_id is None ) : return 404 lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute("INSERT INTO huhula.parked(informer_id,informed_at,azimuth,altitude,longitude,latitude,client_at) values(%s,%s,%s,%s,%s,%s,%s)", (informer_id,informed_at,azimuth,altitude,longitude,latitude,client_at)) lconn.commit() except Exception as error: jts = traceback.format_exc() logconsole.error(jts) lconn.rollback() finally: cur.close() g_pool.putconn(lconn) return 0 def last_day_of_month(any_day): next_month = any_day.replace(day=28) + datetime.timedelta(days=4) # this will never fail result = next_month - datetime.timedelta(days=next_month.day) return str(result) def upsertBill(pconn, user_id, for_date, informed_qty_delta, occupied_qty_delta) : cur = pconn.cursor() try: # first attempt to update, if it yeilds zero affected rows that meants it needs to be inserted usql = "update huhula.bill set updated_at=now(), informed_qty=informed_qty+%s, occupied_qty=occupied_qty+%s where user_id='%s' and for_date=cast('%s' as date)" % (informed_qty_delta, occupied_qty_delta, user_id, for_date) logconsole.debug("update bill sql:" + usql) cur.execute(usql) if cur.rowcount == 0: isql = "INSERT INTO huhula.bill(user_id, for_date, informed_qty, occupied_qty) values('%s',cast('%s' as date),%s,%s)" % (user_id, for_date, informed_qty_delta, occupied_qty_delta) logconsole.debug("insert bill sql:" + isql) cur.execute(isql) except Exception as error: jts = traceback.format_exc() logconsole.error(jts) raise Exception('Exception while update bill', 'Bill Update Error') finally: cur.close() def giftBill(user_id, for_date, amount=0) : lconn = g_pool.getconn() cur = lconn.cursor() try: # first attempt to update, if it yeilds zero affected rows that meants it needs to be inserted usql = "update huhula.bill set updated_at=now(), gift=gift+%s where user_id='%s' and for_date=cast('%s' as date)" % (amount, user_id, for_date) logconsole.debug("update bill gift sql:" + usql) cur.execute(usql) if cur.rowcount == 0: isql = "INSERT INTO huhula.bill(user_id, for_date, gift) values('%s',cast('%s' as date),%s)" % (user_id, for_date, amount) logconsole.debug("insert bill sql:" + isql) cur.execute(isql) lconn.commit() except Exception as error: jts = traceback.format_exc() logconsole.error(jts) lconn.rollback() raise Exception('Exception while update bill gift ', 'Bill Update Error') finally: cur.close() g_pool.putconn(lconn) def insertSpot(informer,informed_at,azimuth,altitude,longitude,latitude,spots,client_at,mode,qty) : informer_id=getUserID(informer) if ( informer_id is None ) : return 404 # newUser(informer) # informer_id=getUserID(informer) sameSpot = checkSameSpot(informer_id,spots[0],latitude,longitude) lconn = g_pool.getconn() cur = lconn.cursor() try: if (sameSpot is None) or (sameSpot == 0) : cur.execute("INSERT INTO huhula.spots(informer_id,informed_at,azimuth,altitude,longitude,latitude,direction,quantity,orig_quantity,client_at,mode) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", (informer_id,informed_at,azimuth,altitude,longitude,latitude,spots,qty,qty,client_at,mode)) upsertBill(lconn,informer_id, last_day_of_month(datetime.datetime.fromtimestamp(informed_at/1000.0)), qty, 0) lconn.commit() return 0 else : return 409 except Exception as error: jts = traceback.format_exc() logconsole.error(jts) lconn.rollback() finally: cur.close() g_pool.putconn(lconn) def bulkInsertSpot(informer_id,longitude,latitude,qty) : lconn = g_pool.getconn() cur = lconn.cursor() try: cur.execute("INSERT INTO huhula.spots(informer_id,informed_at,azimuth,altitude,longitude,latitude,direction,quantity,orig_quantity,client_at,mode) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", (informer_id,int(round(time.time() * 1000)),0,0.156,longitude,latitude,[-3],qty,qty,int(round(time.time() * 1000)),2)) lconn.commit() return 0 except Exception as error: jts = traceback.format_exc() logconsole.error(jts) lconn.rollback() finally: cur.close() g_pool.putconn(lconn) def getInformerID(sid) : lconn = g_pool.getconn() cur = lconn.cursor() cur.execute("SELECT informer_id FROM spots WHERE id = '%s' and quantity > 0" % (sid,)) row = cur.fetchone() try: if row: return row[0] else: return None finally: cur.close() lconn.commit() g_pool.putconn(lconn) def occupySpot(taker,sid,taken_at,client_at) : taker_id=getUserID(taker) if ( taker_id is None ) : return 404 lconn = g_pool.getconn() cur = lconn.cursor() try: informer_id = getInformerID(sid) if informer_id != None: cur.execute("update huhula.spots set quantity=quantity-1 where id=%s and quantity > 0", (sid,)) if cur.rowcount > 0: cur.execute("INSERT INTO huhula.occupy(spot_id, taken_at, taker_id, client_at) values(%s,now(),%s,%s)", (sid, taker_id, client_at)) upsertBill(lconn,informer_id, last_day_of_month(datetime.datetime.fromtimestamp(taken_at/1000.0)), 1, 0) # transfer one token from taker to informer upsertBill(lconn,taker_id, last_day_of_month(datetime.datetime.fromtimestamp(taken_at/1000.0)), 0, 1) lconn.commit() else: return 404 else: return 404 except Exception as error: jts = traceback.format_exc() logconsole.error(jts) lconn.rollback() finally: cur.close() g_pool.putconn(lconn) return 0
33.996942
226
0.592246
2,822
22,234
4.561658
0.105245
0.024858
0.028898
0.039618
0.717859
0.70776
0.684378
0.666434
0.636215
0.617261
0
0.017374
0.285509
22,234
653
227
34.049005
0.792962
0.023163
0
0.73854
0
0.039049
0.290565
0.053672
0
0
0
0
0
0
null
null
0
0.010187
null
null
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
ed038430464535f1a51b180920d6b6692841c0c4
139
py
Python
code/10-12-pytest/day10.py
llamafarmer/100_days_of_code
6af973157aa4c77cd6f88bf1f0fa5e60a375339c
[ "MIT" ]
1
2018-08-04T00:41:32.000Z
2018-08-04T00:41:32.000Z
code/10-12-pytest/day10.py
llamafarmer/100_days_of_code
6af973157aa4c77cd6f88bf1f0fa5e60a375339c
[ "MIT" ]
null
null
null
code/10-12-pytest/day10.py
llamafarmer/100_days_of_code
6af973157aa4c77cd6f88bf1f0fa5e60a375339c
[ "MIT" ]
null
null
null
## https://codechalleng.es/challenges/39/ ## https://github.com/llamafarmer/Pi_Weather_Station # Going to use the repo above with pytest
23.166667
52
0.76259
20
139
5.2
0.95
0
0
0
0
0
0
0
0
0
0
0.016129
0.107914
139
5
53
27.8
0.822581
0.920863
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
ed0dd2815fba30c28bc9ab56f525f21dd6b8ec5d
98
py
Python
279/armstrong.py
alehpineda/bitesofpy
bfd319a606cd0b7b9bfb85a3e8942872a2d43c48
[ "MIT" ]
null
null
null
279/armstrong.py
alehpineda/bitesofpy
bfd319a606cd0b7b9bfb85a3e8942872a2d43c48
[ "MIT" ]
2
2020-09-24T11:25:29.000Z
2021-06-25T15:43:35.000Z
279/armstrong.py
alehpineda/bitesofpy
bfd319a606cd0b7b9bfb85a3e8942872a2d43c48
[ "MIT" ]
null
null
null
def is_armstrong(n: int) -> bool: return sum([pow(int(x), len(str(n))) for x in str(n)]) == n
32.666667
63
0.581633
20
98
2.8
0.7
0.142857
0
0
0
0
0
0
0
0
0
0
0.183673
98
2
64
49
0.7
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
4
ed1ee39f1affe1e15d8a6768d028b542ae534f2d
177
py
Python
new/src/15.02.2020/string4.py
VladBaryliuk/my_start_tasks
bf387543e6fa3ee303cbef04d2af48d558011ed9
[ "Apache-2.0" ]
null
null
null
new/src/15.02.2020/string4.py
VladBaryliuk/my_start_tasks
bf387543e6fa3ee303cbef04d2af48d558011ed9
[ "Apache-2.0" ]
null
null
null
new/src/15.02.2020/string4.py
VladBaryliuk/my_start_tasks
bf387543e6fa3ee303cbef04d2af48d558011ed9
[ "Apache-2.0" ]
null
null
null
string = str (input()) string_copy = string new_string = string.replace(string[0],string[-1]) new_string = string_copy.replace(string_copy[-1],string_copy[0]) print(new_string)
29.5
64
0.762712
28
177
4.571429
0.321429
0.3125
0.234375
0
0
0
0
0
0
0
0
0.02454
0.079096
177
5
65
35.4
0.760736
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.2
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
ed28e6d1e245395c490051af9d2bade0cb88ca07
55
py
Python
code/abc120_a_03.py
KoyanagiHitoshi/AtCoder
731892543769b5df15254e1f32b756190378d292
[ "MIT" ]
3
2019-08-16T16:55:48.000Z
2021-04-11T10:21:40.000Z
code/abc120_a_03.py
KoyanagiHitoshi/AtCoder
731892543769b5df15254e1f32b756190378d292
[ "MIT" ]
null
null
null
code/abc120_a_03.py
KoyanagiHitoshi/AtCoder
731892543769b5df15254e1f32b756190378d292
[ "MIT" ]
null
null
null
A, B, C = map(int, input().split()) print(min(C, B//A))
27.5
35
0.545455
12
55
2.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.127273
55
2
36
27.5
0.625
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
4
ed2eacb53ae14f236d6e04dfd83126fd3f176438
270
py
Python
sample_test.py
thautwarm/reley
17e5730c1afbefaeb22103719c85f08333c65937
[ "MIT" ]
12
2018-09-13T02:32:21.000Z
2021-08-06T04:59:26.000Z
sample_test.py
thautwarm/reley
17e5730c1afbefaeb22103719c85f08333c65937
[ "MIT" ]
null
null
null
sample_test.py
thautwarm/reley
17e5730c1afbefaeb22103719c85f08333c65937
[ "MIT" ]
null
null
null
from reley.impl.pycompat import * import os import haskell_test.comment import haskell_test.import_hs import haskell_test.operator import haskell_test.sum_n import haskell_test.test_prelude from haskell_test.sum_n import m_sum lst = (5, (2, (1, ()))) print(m_sum(lst))
22.5
36
0.803704
46
270
4.456522
0.434783
0.321951
0.414634
0.146341
0.204878
0
0
0
0
0
0
0.012397
0.103704
270
11
37
24.545455
0.834711
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.8
0
0.8
0.1
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
ed4cfb8739cd8629d093496054f0718bed5b3178
43
py
Python
somedjango/stubs/__init__.py
dvaldivia/grpc-celery-fork-bug
421eca43daef9e138d53e6f095cf470b98c14f99
[ "MIT" ]
null
null
null
somedjango/stubs/__init__.py
dvaldivia/grpc-celery-fork-bug
421eca43daef9e138d53e6f095cf470b98c14f99
[ "MIT" ]
null
null
null
somedjango/stubs/__init__.py
dvaldivia/grpc-celery-fork-bug
421eca43daef9e138d53e6f095cf470b98c14f99
[ "MIT" ]
1
2019-03-14T04:09:43.000Z
2019-03-14T04:09:43.000Z
""" Copyright (C) 2017 Espressive Inc """
8.6
33
0.627907
5
43
5.4
1
0
0
0
0
0
0
0
0
0
0
0.114286
0.186047
43
4
34
10.75
0.657143
0.767442
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
ed60343c2b753a93c70bf1053df28bb98bf0ccd1
856
py
Python
src/groupspkg/rubik.py
MrCamoga/Finite-Groups-2
5e52102a423a8ff4eed0cbf59617b6fe999e7ef3
[ "MIT" ]
2
2020-05-01T21:48:46.000Z
2021-07-15T13:54:37.000Z
src/groupspkg/rubik.py
MrCamoga/Finite-Groups-2
5e52102a423a8ff4eed0cbf59617b6fe999e7ef3
[ "MIT" ]
null
null
null
src/groupspkg/rubik.py
MrCamoga/Finite-Groups-2
5e52102a423a8ff4eed0cbf59617b6fe999e7ef3
[ "MIT" ]
null
null
null
#Rubik's Cube from groups import GeneralizedSymmetric from functools import reduce def Rubik222(): """ The elements operate in the usual order. Example: RUR'U' = G.op(R,U,R,R,R,U,U,U) Returns G, (R,U,F,L,D,B) """ G = GeneralizedSymmetric(3,8) return (G, (19106868, 38578680, 133254322, 105367583, 118098, 3212037)) def Rubik333(): """ The elements operate in the usual order. Example: RUR'U' = G.op(R,U,R,R,R,U,U,U) Returns G, (R,U,F,L,D,B,M,E,S,X,Y,Z) """ G = GeneralizedSymmetric(3,8)*GeneralizedSymmetric(2,12) return (G, ((12260357905730210868, 47577203601977420280, 307133848437835100722, 137967831317727263, 19503969848658, 1232685858040989957, 93631695786114433920, 16383398063155200, 8613117329516824320, 361990033464047119142, 47583584714230479009, 316877050992623394868)))
37.217391
272
0.69743
110
856
5.427273
0.518182
0.020101
0.060302
0.067002
0.231156
0.231156
0.231156
0.231156
0.231156
0.231156
0
0.409027
0.171729
856
22
273
38.909091
0.433004
0.275701
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
ed8ae8093d687b5f5cfcca5a0d9849252ab9f84d
10,563
py
Python
tests/test_pathbased_similarity.py
Germanet-sfs/germanetpy
806c38084a71ffe190438af968716ad06b7c813d
[ "Apache-2.0" ]
5
2020-05-29T20:40:36.000Z
2022-01-06T13:34:42.000Z
tests/test_pathbased_similarity.py
Germanet-sfs/germanetpy
806c38084a71ffe190438af968716ad06b7c813d
[ "Apache-2.0" ]
3
2021-01-28T15:46:27.000Z
2021-06-07T15:23:49.000Z
tests/test_pathbased_similarity.py
Germanet-sfs/germanetpy
806c38084a71ffe190438af968716ad06b7c813d
[ "Apache-2.0" ]
1
2021-07-19T18:47:51.000Z
2021-07-19T18:47:51.000Z
from pathlib import Path import sys import logging import pytest from germanetpy.germanet import Germanet import numpy as np from lxml import etree as ET from germanetpy.path_based_relatedness_measures import PathBasedRelatedness from germanetpy.synset import WordCategory logger = logging.getLogger('logging_test_semrel') d = str(Path(__file__).parent.parent) + "/data" try: germanet_data = Germanet(d) johannis_wurm = germanet_data.get_synset_by_id("s49774") leber_trans = germanet_data.get_synset_by_id("s83979") relatedness_nouns = PathBasedRelatedness(germanet=germanet_data, category=WordCategory.nomen, max_len=35, max_depth=20, synset_pair=(johannis_wurm, leber_trans)) relatedness_verbs = PathBasedRelatedness(germanet=germanet_data, category=WordCategory.verben) relatedness_adj = PathBasedRelatedness(germanet=germanet_data, category=WordCategory.adj) except ET.ParseError: message = ("Unable to load GermaNet data at {0} . Aborting...").format(d) logger.error(message, ET.ParseError) sys.exit(0) except IOError: message = ("GermaNet data not found at {0} . Aborting...").format(d) logger.error(message, IOError) sys.exit(0) unnormalized_path_len_nouns = [ ('s49774', 's83979', 35), ('s49774', 's20560', 35), ('s49774', 's20561', 35), ('s49774', 's138670', 35), ('s9439', 's48837', 12), ('s39183', 's39496', 5) ] unnormalized_path_len_adj = [ ('s91', 's102579', 7), ('s5399', 's5427', 4), ('s95326', 's95987', 20), ('s95326', 's94396', 20), ('s94411', 's95987', 20), ('s94411', 's94396', 20) ] unnormalized_path_len_verbs = [ ('s58565', 's58578', 2), ('s57835', 's57328', 5), ('s106731', 's123246', 28), ('s106731', 's120154', 28), ('s106731', 's57534', 28), ('s106731', 's123240', 28), ('s119463', 's120154', 28), ('s119463', 's57534', 28), ('s119463', 's123240', 28), ('s119463', 's123246', 28) ] normalized_path_len_nouns = [ ('s46047', 's45380', 1.0, 0.88571), ('s46047', 's45380', 10.0, 8.8571), ('s49774', 's83979', 1.0, 0.0), ('s49774', 's49774', 1.0, 1.0), ('s49774', 's49774', 10.0, 10.0), ('s46683', 's46650', 10.0, 8.5714) ] unnormalized_lch_nouns = [ ('s46047', 's45380', 0.92428), ('s49774', 's49774', 1.62325), ('s46683', 's46650', 0.84509), ] normalized_lch_nouns = [ ('s46047', 's45380', 10.0, 5.50877), ('s49774', 's49774', 10.0, 10.0), ('s46683', 's46650', 10.0, 4.99999) ] unnormalized_lch_verbs = [ ('s57534', 's119463', 0.04275), ('s57534', 's57534', 1.50515), ] normalized_lch_verbs = [ ('s57534', 's119463', 10.0, 0.0), ('s57534', 's57534', 10.0, 10.0), ] unnormalized_lch_adj = [ ('s94396', 's94411', 0.020203), ('s94396', 's94396', 1.342423) ] normalized_lch_adj = [ ('s94396', 's94411', 10.0, 0.0), ('s94396', 's94396', 10.0, 10.0) ] unnormalized_wup_nouns = [ ('s46047', 's45380', 0.75), ('s49774', 's49774', 1.0), ('s46683', 's46650', 0.70588), ] normalized_wup_nouns = [ ('s46047', 's45380', 10.0, 7.5), ('s49774', 's49774', 10.0, 10.0), ('s46683', 's46650', 10.0, 7.0588) ] unnormalized_wup_verbs = [ ('s57534', 's119463', 0.0), ('s57534', 's57534', 1.0), ] normalized_wup_verbs = [ ('s57534', 's119463', 10.0, 0.0), ('s57534', 's57534', 10.0, 10.0), ] unnormalized_wup_adj = [ ('s94396', 's94411', 0.0), ('s94396', 's94396', 1.0) ] normalized_wup_adj = [ ('s94396', 's94411', 10.0, 0.0), ('s94396', 's94396', 10.0, 10.0) ] @pytest.mark.parametrize('id1,id2,pathlength', unnormalized_path_len_nouns) def test_pathlength_nouns(id1, id2, pathlength): """Tests whether the length of the shortest path between two given nouns is correct.""" synset1 = germanet_data.get_synset_by_id(id1) synset2 = germanet_data.get_synset_by_id(id2) dist = synset1.shortest_path_distance(synset2) np.testing.assert_equal(dist, pathlength) @pytest.mark.parametrize('id1,id2,pathlength', unnormalized_path_len_adj) def test_pathlength_adj(id1, id2, pathlength): """Tests whether the length of the shortest path between two given adjectives is correct.""" synset1 = germanet_data.get_synset_by_id(id1) synset2 = germanet_data.get_synset_by_id(id2) dist = synset1.shortest_path_distance(synset2) np.testing.assert_equal(dist, pathlength) @pytest.mark.parametrize('id1,id2,pathlength', unnormalized_path_len_verbs) def test_pathlength_verbs(id1, id2, pathlength): """Tests whether the length of the shortest path between two given verbs is correct.""" synset1 = germanet_data.get_synset_by_id(id1) synset2 = germanet_data.get_synset_by_id(id2) dist = synset1.shortest_path_distance(synset2) np.testing.assert_equal(dist, pathlength) # leacock and chodorow # @pytest.mark.parametrize('id1,id2,similarity', unnormalized_lch_nouns) def test_raw_lch_nouns(id1, id2, similarity): """Tests whether the unnormalized lch score for nouns is correct.""" synset1 = germanet_data.get_synset_by_id(id1) synset2 = germanet_data.get_synset_by_id(id2) sim = relatedness_nouns.leacock_chodorow(synset1=synset1, synset2=synset2, normalize=False) np.testing.assert_almost_equal(sim, similarity, decimal=2) @pytest.mark.parametrize('id1,id2,upper,similarity', normalized_lch_nouns) def test_normalized_lch_nouns(id1, id2, upper, similarity): """Tests whether the normalized lch score for nouns is correct.""" synset1 = germanet_data.get_synset_by_id(id1) synset2 = germanet_data.get_synset_by_id(id2) sim = relatedness_nouns.leacock_chodorow(synset1=synset1, synset2=synset2, normalize=True, normalized_max=upper) np.testing.assert_almost_equal(sim, similarity, decimal=2) @pytest.mark.parametrize('id1,id2,similarity', unnormalized_lch_verbs) def test_unnormalized_lch_verbs(id1, id2, similarity): """Tests whether the unnormalized lch score for verbs is correct.""" synset1 = germanet_data.get_synset_by_id(id1) synset2 = germanet_data.get_synset_by_id(id2) sim = relatedness_verbs.leacock_chodorow(synset1=synset1, synset2=synset2, normalize=False) np.testing.assert_almost_equal(sim, similarity, decimal=2) @pytest.mark.parametrize('id1,id2,upper,similarity', normalized_lch_verbs) def test_normalized_lch_verbs(id1, id2, upper, similarity): """Tests whether the normalized lch score for verbs is correct.""" synset1 = germanet_data.get_synset_by_id(id1) synset2 = germanet_data.get_synset_by_id(id2) sim = relatedness_verbs.leacock_chodorow(synset1=synset1, synset2=synset2, normalize=True, normalized_max=upper) np.testing.assert_almost_equal(sim, similarity, decimal=2) @pytest.mark.parametrize('id1,id2,similarity', unnormalized_lch_adj) def test_unnormalized_lch_adj(id1, id2, similarity): """Tests whether the unnormalized lch score for adjectives is correct.""" synset1 = germanet_data.get_synset_by_id(id1) synset2 = germanet_data.get_synset_by_id(id2) sim = relatedness_adj.leacock_chodorow(synset1=synset1, synset2=synset2, normalize=False) np.testing.assert_almost_equal(sim, similarity, decimal=2) @pytest.mark.parametrize('id1,id2,upper,similarity', normalized_lch_adj) def test_normalized_lch_adj(id1, id2, upper, similarity): """Tests whether the normalized lch score for adjectives is correct.""" synset1 = germanet_data.get_synset_by_id(id1) synset2 = germanet_data.get_synset_by_id(id2) sim = relatedness_adj.leacock_chodorow(synset1=synset1, synset2=synset2, normalize=True, normalized_max=upper) np.testing.assert_almost_equal(sim, similarity, decimal=2) # wu and palmer # @pytest.mark.parametrize('id1,id2,similarity', unnormalized_wup_nouns) def test_raw_wup_nouns(id1, id2, similarity): """Tests whether the unnormalized wup score for nouns is correct.""" synset1 = germanet_data.get_synset_by_id(id1) synset2 = germanet_data.get_synset_by_id(id2) sim = relatedness_nouns.wu_and_palmer(synset1=synset1, synset2=synset2, normalize=False) np.testing.assert_almost_equal(sim, similarity, decimal=2) @pytest.mark.parametrize('id1,id2,upper,similarity', normalized_wup_nouns) def test_normalized_wup_nouns(id1, id2, upper, similarity): """Tests whether the normalized wup score for nouns is correct.""" synset1 = germanet_data.get_synset_by_id(id1) synset2 = germanet_data.get_synset_by_id(id2) sim = relatedness_nouns.wu_and_palmer(synset1=synset1, synset2=synset2, normalize=True, normalized_max=upper) np.testing.assert_almost_equal(sim, similarity, decimal=2) @pytest.mark.parametrize('id1,id2,similarity', unnormalized_wup_verbs) def test_unnormalized_wup_verbs(id1, id2, similarity): """Tests whether the unnormalized wup score for verbs is correct.""" synset1 = germanet_data.get_synset_by_id(id1) synset2 = germanet_data.get_synset_by_id(id2) sim = relatedness_verbs.wu_and_palmer(synset1=synset1, synset2=synset2, normalize=False) np.testing.assert_almost_equal(sim, similarity, decimal=2) @pytest.mark.parametrize('id1,id2,upper,similarity', normalized_wup_verbs) def test_normalized_wup_verbs(id1, id2, upper, similarity): """Tests whether the normalized wup score for verbs is correct.""" synset1 = germanet_data.get_synset_by_id(id1) synset2 = germanet_data.get_synset_by_id(id2) sim = relatedness_verbs.wu_and_palmer(synset1=synset1, synset2=synset2, normalize=True, normalized_max=upper) np.testing.assert_almost_equal(sim, similarity, decimal=2) @pytest.mark.parametrize('id1,id2,similarity', unnormalized_wup_adj) def test_unnormalized_wup_adj(id1, id2, similarity): """Tests whether the unnormalized wup score for adjectives is correct.""" synset1 = germanet_data.get_synset_by_id(id1) synset2 = germanet_data.get_synset_by_id(id2) sim = relatedness_adj.wu_and_palmer(synset1=synset1, synset2=synset2, normalize=False) np.testing.assert_almost_equal(sim, similarity, decimal=2) @pytest.mark.parametrize('id1,id2,upper,similarity', normalized_wup_adj) def test_normalized_wup_adj(id1, id2, upper, similarity): """Tests whether the normalized wup score for adjectives is correct.""" synset1 = germanet_data.get_synset_by_id(id1) synset2 = germanet_data.get_synset_by_id(id2) sim = relatedness_adj.wu_and_palmer(synset1=synset1, synset2=synset2, normalize=True, normalized_max=upper) np.testing.assert_almost_equal(sim, similarity, decimal=2)
37.996403
116
0.721954
1,423
10,563
5.126493
0.115952
0.062509
0.065798
0.092118
0.743386
0.737903
0.703907
0.703907
0.676217
0.664976
0
0.111037
0.143141
10,563
277
117
38.133574
0.694951
0.099498
0
0.296117
0
0
0.118851
0.015267
0
0
0
0
0.072816
1
0.072816
false
0
0.043689
0
0.116505
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
71e81eb36705878470b3f0b6dd48e7b3a4232b86
183
py
Python
subsclu/pipe/reducers.py
StepicOrg/submissions-clustering
d61f4cd24ff165ed9b0cdde79d9dcd1ffae47387
[ "MIT" ]
1
2017-11-20T02:28:07.000Z
2017-11-20T02:28:07.000Z
subsclu/pipe/reducers.py
StepicOrg/submissions-clustering
d61f4cd24ff165ed9b0cdde79d9dcd1ffae47387
[ "MIT" ]
6
2017-08-22T10:34:26.000Z
2017-08-25T14:29:38.000Z
subsclu/pipe/reducers.py
StepicOrg/submissions-clustering
d61f4cd24ff165ed9b0cdde79d9dcd1ffae47387
[ "MIT" ]
null
null
null
"""Module for implementing pipe ops for reducing.""" from sklearn.decomposition import PCA, TruncatedSVD from sklearn.manifold import TSNE __all__ = ["PCA", "TruncatedSVD", "TSNE"]
26.142857
52
0.759563
22
183
6.136364
0.681818
0.162963
0
0
0
0
0
0
0
0
0
0
0.125683
183
6
53
30.5
0.84375
0.251366
0
0
0
0
0.145038
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
71e9f9e3b46f579ad69b22efab8f6620450dd37e
204
py
Python
definitions.py
robsfletch/bts
c03d821b048fdc2b2735fd77bd193443d40468fc
[ "MIT" ]
null
null
null
definitions.py
robsfletch/bts
c03d821b048fdc2b2735fd77bd193443d40468fc
[ "MIT" ]
null
null
null
definitions.py
robsfletch/bts
c03d821b048fdc2b2735fd77bd193443d40468fc
[ "MIT" ]
null
null
null
import os ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) DATA = os.path.join(ROOT_DIR, 'data') RAW_DATA = os.path.join(ROOT_DIR, 'data/raw') INTERIM_DATA = os.path.join(ROOT_DIR, 'data/interim')
25.5
53
0.735294
36
204
3.888889
0.333333
0.214286
0.214286
0.3
0.578571
0.578571
0.578571
0.4
0
0
0
0
0.093137
204
7
54
29.142857
0.756757
0
0
0
0
0
0.117647
0
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
71f2a4e7fa6ef671c3a07724183edf2e759aec5e
14,294
py
Python
core/trainers/framework/network.py
fuyinno4/PaddleRec
fd66f18a5f5c216e1e3d3fc3c7c9166ea4b6c166
[ "Apache-2.0" ]
3
2020-09-22T02:46:30.000Z
2021-06-17T06:43:37.000Z
core/trainers/framework/network.py
fuyinno4/PaddleRec
fd66f18a5f5c216e1e3d3fc3c7c9166ea4b6c166
[ "Apache-2.0" ]
null
null
null
core/trainers/framework/network.py
fuyinno4/PaddleRec
fd66f18a5f5c216e1e3d3fc3c7c9166ea4b6c166
[ "Apache-2.0" ]
1
2021-01-11T06:31:36.000Z
2021-01-11T06:31:36.000Z
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import os import warnings import paddle.fluid as fluid from paddlerec.core.utils import envs from paddlerec.core.trainers.framework.dataset import DataLoader, QueueDataset __all__ = [ "NetworkBase", "SingleNetwork", "PSNetwork", "PslibNetwork", "CollectiveNetwork" ] class NetworkBase(object): """R """ def __init__(self, context): pass def build_network(self, context): pass class SingleNetwork(NetworkBase): """R """ def __init__(self, context): print("Running SingleNetwork.") pass def build_network(self, context): context["model"] = {} for model_dict in context["phases"]: context["model"][model_dict["name"]] = {} train_program = fluid.Program() startup_program = fluid.Program() scope = fluid.Scope() dataset_name = model_dict["dataset_name"] with fluid.program_guard(train_program, startup_program): with fluid.unique_name.guard(): with fluid.scope_guard(scope): model_path = envs.os_path_adapter( envs.workspace_adapter(model_dict["model"])) model = envs.lazy_instance_by_fliename( model_path, "Model")(context["env"]) if context["is_infer"]: model._infer_data_var = model.input_data( is_infer=context["is_infer"], dataset_name=model_dict["dataset_name"]) else: model._data_var = model.input_data( dataset_name=model_dict["dataset_name"]) if envs.get_global_env("dataset." + dataset_name + ".type") == "DataLoader": model._init_dataloader( is_infer=context["is_infer"]) data_loader = DataLoader(context) data_loader.get_dataloader(context, dataset_name, model._data_loader) if context["is_infer"]: model.net(model._infer_data_var, context["is_infer"]) else: model.net(model._data_var, context["is_infer"]) optimizer = model.optimizer() optimizer.minimize(model._cost) context["model"][model_dict["name"]][ "main_program"] = train_program context["model"][model_dict["name"]][ "startup_program"] = startup_program context["model"][model_dict["name"]]["scope"] = scope context["model"][model_dict["name"]]["model"] = model context["model"][model_dict["name"]][ "default_main_program"] = train_program.clone() context["dataset"] = {} for dataset in context["env"]["dataset"]: type = envs.get_global_env("dataset." + dataset["name"] + ".type") if type != "DataLoader": dataset_class = QueueDataset(context) context["dataset"][dataset[ "name"]] = dataset_class.create_dataset(dataset["name"], context) context["status"] = "startup_pass" class PSNetwork(NetworkBase): def __init__(self, context): print("Running PSNetwork.") pass def build_network(self, context): context["model"] = {} if len(context["env"]["phase"]) > 1: warnings.warn( "Cluster Train Only Support One Phase.", category=UserWarning, stacklevel=2) model_dict = context["env"]["phase"][0] context["model"][model_dict["name"]] = {} dataset_name = model_dict["dataset_name"] model_path = envs.os_path_adapter( envs.workspace_adapter(model_dict["model"])) model = envs.lazy_instance_by_fliename(model_path, "Model")(context["env"]) model._data_var = model.input_data( dataset_name=model_dict["dataset_name"]) if envs.get_global_env("dataset." + dataset_name + ".type") == "DataLoader": model._init_dataloader(is_infer=False) data_loader = DataLoader(context) data_loader.get_dataloader(context, dataset_name, model._data_loader) model.net(model._data_var, False) optimizer = model.optimizer() strategy = self._build_strategy(context) optimizer = context["fleet"].distributed_optimizer(optimizer, strategy) optimizer.minimize(model._cost) context["model"][model_dict["name"]]["main_program"] = context[ "fleet"].main_program context["model"][model_dict["name"]]["startup_program"] = context[ "fleet"].startup_program context["model"][model_dict["name"]]["scope"] = fluid.global_scope() context["model"][model_dict["name"]]["model"] = model context["model"][model_dict["name"]]["default_main_program"] = context[ "fleet"].main_program.clone() if context["fleet"].is_server(): self._server(context) else: context["fleet"].init_worker() context["dataset"] = {} for dataset in context["env"]["dataset"]: type = envs.get_global_env("dataset." + dataset["name"] + ".type") if type != "DataLoader": dataset_class = QueueDataset(context) context["dataset"][dataset[ "name"]] = dataset_class.create_dataset( dataset["name"], context) context["status"] = "startup_pass" def _build_strategy(self, context): from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory mode = envs.get_runtime_environ("train.trainer.strategy") assert mode in ["async", "geo", "sync", "half_async"] strategy = None if mode == "async": strategy = StrategyFactory.create_async_strategy() elif mode == "geo": push_num = envs.get_global_env("train.strategy.mode.push_num", 100) strategy = StrategyFactory.create_geo_strategy(push_num) elif mode == "sync": strategy = StrategyFactory.create_sync_strategy() elif mode == "half_async": strategy = StrategyFactory.create_half_async_strategy() assert strategy is not None context["strategy"] = strategy return strategy def _server(self, context): init_model_path = envs.get_global_env( "runner." + context["runner_name"] + ".init_model_path", default_value="") context["fleet"].init_server(init_model_path) context["fleet"].run_server() context['status'] = "terminal_pass" class PslibNetwork(NetworkBase): def __init__(self, context): print("Running PslibNetwork.") pass def build_network(self, context): context["model"] = {} if len(context["env"]["phase"]) > 1: warnings.warn( "Cluster Train Only Support One Phase.", category=UserWarning, stacklevel=2) model_dict = context["env"]["phase"][0] train_program = fluid.Program() startup_program = fluid.Program() scope = fluid.Scope() dataset_name = model_dict["dataset_name"] with fluid.program_guard(train_program, startup_program): with fluid.unique_name.guard(): with fluid.scope_guard(scope): context["model"][model_dict["name"]] = {} model_path = envs.os_path_adapter( envs.workspace_adapter(model_dict["model"])) model = envs.lazy_instance_by_fliename( model_path, "Model")(context["env"]) model._data_var = model.input_data( dataset_name=model_dict["dataset_name"]) if envs.get_global_env("dataset." + dataset_name + ".type") == "DataLoader": model._init_dataloader(is_infer=False) data_loader = DataLoader(context) data_loader.get_dataloader(context, dataset_name, model._data_loader) model.net(model._data_var, False) optimizer = model.optimizer() optimizer = context["fleet"].distributed_optimizer( optimizer) optimizer.minimize([model._cost], [fluid.global_scope()]) context["model"][model_dict["name"]][ "main_program"] = train_program context["model"][model_dict["name"]][ "startup_program"] = startup_program context["model"][model_dict["name"]]["scope"] = scope context["model"][model_dict["name"]]["model"] = model context["model"][model_dict["name"]][ "default_main_program"] = train_program.clone() if context["fleet"].is_server(): self._server(context) else: context["dataset"] = {} for dataset in context["env"]["dataset"]: type = envs.get_global_env("dataset." + dataset["name"] + ".type") if type != "DataLoader": dataset_class = QueueDataset(context) context["dataset"][dataset[ "name"]] = dataset_class.create_dataset( dataset["name"], context) context["status"] = "startup_pass" def _server(self, context): context["fleet"].run_server() context['status'] = "terminal_pass" class CollectiveNetwork(NetworkBase): def __init__(self, context): print("Running CollectiveNetwork.") pass def build_network(self, context): context["model"] = {} if len(context["env"]["phase"]) > 1: warnings.warn( "Cluster Train Only Support One Phase.", category=UserWarning, stacklevel=2) model_dict = context["env"]["phase"][0] context["model"][model_dict["name"]] = {} dataset_name = model_dict["dataset_name"] train_program = fluid.Program() startup_program = fluid.Program() scope = fluid.Scope() with fluid.program_guard(train_program, startup_program): with fluid.scope_guard(scope): model_path = envs.os_path_adapter( envs.workspace_adapter(model_dict["model"])) model = envs.lazy_instance_by_fliename(model_path, "Model")(context["env"]) model._data_var = model.input_data( dataset_name=model_dict["dataset_name"]) if envs.get_global_env("dataset." + dataset_name + ".type") == "DataLoader": model._init_dataloader(is_infer=False) data_loader = DataLoader(context) data_loader.get_dataloader(context, dataset_name, model._data_loader) model.net(model._data_var, False) optimizer = model.optimizer() strategy = self._build_strategy(context) optimizer = context["fleet"].distributed_optimizer(optimizer, strategy) optimizer.minimize(model._cost) context["model"][model_dict["name"]]["main_program"] = context[ "fleet"].main_program context["model"][model_dict["name"]][ "startup_program"] = startup_program context["model"][model_dict["name"]]["scope"] = scope context["model"][model_dict["name"]]["model"] = model context["model"][model_dict["name"]][ "default_main_program"] = train_program context["dataset"] = {} for dataset in context["env"]["dataset"]: type = envs.get_global_env("dataset." + dataset["name"] + ".type") if type != "DataLoader": dataset_class = QueueDataset(context) context["dataset"][dataset[ "name"]] = dataset_class.create_dataset(dataset["name"], context) context["status"] = "startup_pass" def _build_strategy(self, context): from paddle.fluid.incubate.fleet.collective import DistributedStrategy exec_strategy = fluid.ExecutionStrategy() strategy = DistributedStrategy() strategy.exec_strategy = exec_strategy context["strategy"] = strategy return strategy
42.668657
123
0.539317
1,336
14,294
5.514222
0.135479
0.050088
0.055382
0.068413
0.770327
0.729198
0.707887
0.68834
0.673001
0.659156
0
0.002145
0.347628
14,294
334
124
42.796407
0.787882
0.041836
0
0.771218
0
0
0.119432
0.003657
0
0
0
0
0.00738
1
0.051661
false
0.04428
0.02952
0
0.107011
0.01845
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
9c3de727e1e29e139f4865b67f6d271a39250680
544
py
Python
compileman/Commands.py
danilocgsilva/CompileMan
26ea6cdd40f2bcbf3562da53da33f0dfe5e3fb92
[ "MIT" ]
null
null
null
compileman/Commands.py
danilocgsilva/CompileMan
26ea6cdd40f2bcbf3562da53da33f0dfe5e3fb92
[ "MIT" ]
null
null
null
compileman/Commands.py
danilocgsilva/CompileMan
26ea6cdd40f2bcbf3562da53da33f0dfe5e3fb92
[ "MIT" ]
null
null
null
class Commands: def __init__(self, arguments_commandline: list): commands_allowed = ['compile', 'clean'] if len(arguments_commandline) > 1: command = arguments_commandline[1] if not command in commands_allowed: raise Exception("You give an invalid command") self.arguments_commandline = arguments_commandline def is_command_given(self): return len(self.arguments_commandline) > 1 def get_command_given(self): return self.arguments_commandline[1]
28.631579
62
0.669118
60
544
5.783333
0.45
0.403458
0.276657
0.126801
0
0
0
0
0
0
0
0.009877
0.255515
544
18
63
30.222222
0.846914
0
0
0
0
0
0.071691
0
0
0
0
0
0
1
0.25
false
0
0
0.166667
0.5
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
4
9c4a7ba2ef76b198806294a0dd887692dcd14ce6
183
py
Python
moto/transcribe/__init__.py
oakbramble/moto
6350d8ec4c59eaf12b83385b6acd386e5c2f5593
[ "Apache-2.0" ]
1
2021-12-12T04:23:06.000Z
2021-12-12T04:23:06.000Z
moto/transcribe/__init__.py
oakbramble/moto
6350d8ec4c59eaf12b83385b6acd386e5c2f5593
[ "Apache-2.0" ]
4
2017-09-30T07:52:52.000Z
2021-12-13T06:56:55.000Z
moto/transcribe/__init__.py
oakbramble/moto
6350d8ec4c59eaf12b83385b6acd386e5c2f5593
[ "Apache-2.0" ]
2
2021-11-24T08:05:43.000Z
2021-11-25T16:18:48.000Z
from __future__ import unicode_literals from .models import transcribe_backends transcribe_backend = transcribe_backends["us-east-1"] mock_transcribe = transcribe_backend.decorator
26.142857
53
0.857923
22
183
6.681818
0.636364
0.244898
0
0
0
0
0
0
0
0
0
0.005988
0.087432
183
6
54
30.5
0.874252
0
0
0
0
0
0.04918
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
9c6e8f293e124a2c3cead921721f2e55b9e93034
5,715
py
Python
pygsti/modelpacks/smq1Q_XY.py
drewrisinger/pyGSTi
dd4ad669931c7f75e026456470cf33ac5b682d0d
[ "Apache-2.0" ]
1
2021-12-19T15:11:09.000Z
2021-12-19T15:11:09.000Z
pygsti/modelpacks/smq1Q_XY.py
drewrisinger/pyGSTi
dd4ad669931c7f75e026456470cf33ac5b682d0d
[ "Apache-2.0" ]
null
null
null
pygsti/modelpacks/smq1Q_XY.py
drewrisinger/pyGSTi
dd4ad669931c7f75e026456470cf33ac5b682d0d
[ "Apache-2.0" ]
null
null
null
""" Variables for working with the a model containing X(pi/2) and Y(pi/2) gates. """ #*************************************************************************************************** # Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). # Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights # in this software. # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. #*************************************************************************************************** from collections import OrderedDict from pygsti.construction import circuitconstruction as _strc from pygsti.construction import modelconstruction as _setc from pygsti.modelpacks._modelpack import GSTModelPack, RBModelPack class _Module(GSTModelPack, RBModelPack): description = "X(pi/2) and Y(pi/2) gates" gates = [('Gxpi2', 0), ('Gypi2', 0)] _sslbls = (0,) _germs = [(('Gxpi2', 0), ), (('Gypi2', 0), ), (('Gxpi2', 0), ('Gypi2', 0)), (('Gxpi2', 0), ('Gxpi2', 0), ('Gypi2', 0)), (('Gxpi2', 0), ('Gypi2', 0), ('Gypi2', 0)), (('Gxpi2', 0), ('Gxpi2', 0), ('Gypi2', 0), ('Gxpi2', 0), ('Gypi2', 0), ('Gypi2', 0))] _germs_lite = [(('Gxpi2', 0), ), (('Gypi2', 0), ), (('Gxpi2', 0), ('Gypi2', 0)), (('Gxpi2', 0), ('Gxpi2', 0), ('Gypi2', 0))] _fiducials = [(), (('Gxpi2', 0), ), (('Gypi2', 0), ), (('Gxpi2', 0), ('Gxpi2', 0)), (('Gxpi2', 0), ('Gxpi2', 0), ('Gxpi2', 0)), (('Gypi2', 0), ('Gypi2', 0), ('Gypi2', 0))] _prepfiducials = [(), (('Gxpi2', 0), ), (('Gypi2', 0), ), (('Gxpi2', 0), ('Gxpi2', 0)), (('Gxpi2', 0), ('Gxpi2', 0), ('Gxpi2', 0)), (('Gypi2', 0), ('Gypi2', 0), ('Gypi2', 0))] _measfiducials = [(), (('Gxpi2', 0), ), (('Gypi2', 0), ), (('Gxpi2', 0), ('Gxpi2', 0)), (('Gxpi2', 0), ('Gxpi2', 0), ('Gxpi2', 0)), (('Gypi2', 0), ('Gypi2', 0), ('Gypi2', 0))] _clifford_compilation = OrderedDict([('Gc0', []), ('Gc1', [('Gypi2', 0), ('Gxpi2', 0)]), ('Gc2', [('Gxpi2', 0), ('Gxpi2', 0), ('Gxpi2', 0), ('Gypi2', 0), ('Gypi2', 0), ('Gypi2', 0)]), ('Gc3', [('Gxpi2', 0), ('Gxpi2', 0)]), ('Gc4', [('Gypi2', 0), ('Gypi2', 0), ('Gypi2', 0), ('Gxpi2', 0), ('Gxpi2', 0), ('Gxpi2', 0)]), ('Gc5', [('Gxpi2', 0), ('Gypi2', 0), ('Gypi2', 0), ('Gypi2', 0)]), ('Gc6', [('Gypi2', 0), ('Gypi2', 0)]), ('Gc7', [('Gypi2', 0), ('Gypi2', 0), ('Gypi2', 0), ('Gxpi2', 0)]), ('Gc8', [('Gxpi2', 0), ('Gypi2', 0)]), ('Gc9', [('Gxpi2', 0), ('Gxpi2', 0), ('Gypi2', 0), ('Gypi2', 0)]), ('Gc10', [('Gypi2', 0), ('Gxpi2', 0), ('Gxpi2', 0), ('Gxpi2', 0)]), ('Gc11', [('Gxpi2', 0), ('Gxpi2', 0), ('Gxpi2', 0), ('Gypi2', 0)]), ('Gc12', [('Gypi2', 0), ('Gxpi2', 0), ('Gxpi2', 0)]), ('Gc13', [('Gxpi2', 0), ('Gxpi2', 0), ('Gxpi2', 0)]), ('Gc14', [('Gxpi2', 0), ('Gypi2', 0), ('Gypi2', 0), ('Gypi2', 0), ('Gxpi2', 0), ('Gxpi2', 0), ('Gxpi2', 0)]), ('Gc15', [('Gypi2', 0), ('Gypi2', 0), ('Gypi2', 0)]), ('Gc16', [('Gxpi2', 0)]), ('Gc17', [('Gxpi2', 0), ('Gypi2', 0), ('Gxpi2', 0)]), ('Gc18', [('Gypi2', 0), ('Gypi2', 0), ('Gypi2', 0), ('Gxpi2', 0), ('Gxpi2', 0)]), ('Gc19', [('Gxpi2', 0), ('Gypi2', 0), ('Gypi2', 0)]), ('Gc20', [('Gxpi2', 0), ('Gypi2', 0), ('Gypi2', 0), ('Gypi2', 0), ('Gxpi2', 0)]), ('Gc21', [('Gypi2', 0)]), ('Gc22', [('Gxpi2', 0), ('Gxpi2', 0), ('Gxpi2', 0), ('Gypi2', 0), ('Gypi2', 0)]), ('Gc23', [('Gxpi2', 0), ('Gypi2', 0), ('Gxpi2', 0), ('Gxpi2', 0), ('Gxpi2', 0)])]) global_fidPairs = [(0, 0), (2, 3), (5, 2), (5, 4)] _pergerm_fidPairsDict = { (('Gxpi2', 0), ): [(1, 1), (3, 4), (4, 2), (5, 5)], (('Gypi2', 0), ): [(0, 2), (2, 2), (2, 4), (4, 4)], (('Gxpi2', 0), ('Gypi2', 0)): [(0, 0), (0, 4), (2, 5), (5, 4)], (('Gxpi2', 0), ('Gxpi2', 0), ('Gypi2', 0)): [(1, 3), (1, 4), (3, 5), (5, 0), (5, 4), (5, 5)], (('Gxpi2', 0), ('Gypi2', 0), ('Gypi2', 0)): [(0, 3), (1, 2), (2, 5), (3, 1), (3, 3), (5, 3)], (('Gxpi2', 0), ('Gxpi2', 0), ('Gypi2', 0), ('Gxpi2', 0), ('Gypi2', 0), ('Gypi2', 0)): [(0, 0), (2, 3), (5, 2), (5, 4)] } global_fidPairs_lite = [(0, 2), (2, 4), (3, 1), (3, 3)] _pergerm_fidPairsDict_lite = { (('Gxpi2', 0), ): [(1, 1), (3, 4), (4, 2), (5, 5)], (('Gypi2', 0), ): [(0, 2), (2, 2), (2, 4), (4, 4)], (('Gxpi2', 0), ('Gypi2', 0)): [(0, 0), (0, 4), (2, 5), (5, 4)], (('Gxpi2', 0), ('Gxpi2', 0), ('Gypi2', 0)): [(1, 3), (1, 4), (3, 5), (5, 0), (5, 4), (5, 5)] } def _target_model(self, sslbls): return self._build_explicit_target_model( sslbls, [('Gxpi2', 0), ('Gypi2', 0)], ['X(pi/2,{0})', 'Y(pi/2,{0})']) import sys sys.modules[__name__] = _Module()
62.802198
143
0.377953
641
5,715
3.319813
0.202808
0.24812
0.213816
0.214286
0.479793
0.479793
0.4375
0.428571
0.349624
0.349624
0
0.126417
0.305162
5,715
90
144
63.5
0.409469
0.130184
0
0.15
0
0
0.192292
0
0
0
0
0
0
1
0.016667
false
0
0.083333
0.016667
0.35
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
9c7b146bf09f0ccf6684c48290e87c27303cffd5
6,290
py
Python
app/tests/service_container_test.py
golem4300/quattran
8ab49be9fce99d52384ada0defc94f3a0f4afff1
[ "MIT" ]
183
2016-07-23T22:29:18.000Z
2016-09-25T15:39:10.000Z
app/tests/service_container_test.py
OpenPoGo/OpenPoGo
02a6c955a36e25f7dd7d325f20ea9d6f5418a911
[ "MIT" ]
354
2016-07-23T21:43:27.000Z
2016-09-15T21:01:39.000Z
app/tests/service_container_test.py
OpenPoGo/OpenPoGo
02a6c955a36e25f7dd7d325f20ea9d6f5418a911
[ "MIT" ]
104
2016-07-23T22:28:58.000Z
2016-09-09T11:28:01.000Z
import unittest import pytest from mock import Mock from app import ServiceContainer from app.exceptions import ServiceNotFoundException, ContainerAlreadyBootedException class PluginsTest(unittest.TestCase): @staticmethod def test_register_singleton(): service_container = ServiceContainer() service = Mock() service_container.register_singleton('mock_service', service) service_container.boot() assert service_container.has('mock_service') is True assert service_container.get('mock_service') is service @staticmethod def test_register_decorator(): service_container = ServiceContainer() @service_container.register('test_service') class TestService(object): pass service_container.boot() assert service_container.has('test_service') is True assert isinstance(service_container.get('test_service'), TestService) is True @staticmethod def test_get_service_unknown(): service_container = ServiceContainer() service_container.boot() with pytest.raises(ServiceNotFoundException): service_container.get('test_service') @staticmethod def test_register_decorator_args(): service_container = ServiceContainer() another_service = Mock() service_container.register_singleton('another_service', another_service) param_service = Mock() service_container.register_singleton('param_service', param_service) service_container.set_parameter('test_param', 'hello') service_container.set_parameter('service_param', 'param_service') @service_container.register('test_service', ['@another_service', '%test_param%', '%service_param%', 'static']) class TestService(object): def __init__(self, ts_another_service, ts_test_param, ts_param_service, ts_static_val): self.another_service = ts_another_service self.test_param = ts_test_param self.param_service = ts_param_service self.static_val = ts_static_val service_container.boot() test_service = service_container.get('test_service') assert service_container.has('test_service') is True assert isinstance(test_service, TestService) is True assert test_service.another_service is another_service assert test_service.test_param is 'hello' assert test_service.param_service is param_service assert test_service.static_val is 'static' @staticmethod def test_register_decorator_kwargs(): service_container = ServiceContainer() another_service = Mock() service_container.register_singleton('another_service', another_service) param_service = Mock() service_container.register_singleton('param_service', param_service) service_container.set_parameter('test_param', 'hello') service_container.set_parameter('service_param', 'param_service') @service_container.register('test_service', keywordsargs={'ts_another_service': '@another_service', 'ts_test_param': '%test_param%', 'ts_param_service': '%service_param%', 'ts_static_val': 'static'}) class TestService(object): def __init__(self, ts_another_service=None, ts_test_param=None, ts_param_service=None, ts_static_val=None): self.another_service = ts_another_service self.test_param = ts_test_param self.param_service = ts_param_service self.static_val = ts_static_val service_container.boot() test_service = service_container.get('test_service') assert service_container.has('test_service') is True assert isinstance(test_service, TestService) is True assert test_service.another_service is another_service assert test_service.test_param is 'hello' assert test_service.param_service is param_service assert test_service.static_val is 'static' @staticmethod def test_register_tags(): service_container = ServiceContainer() another_service = Mock() service_container.register_singleton('another_service', another_service, tags=['tag_one', 'tag_two', 'tag_three']) @service_container.register('test_service', tags=['tag_one', 'tag_two']) # pylint: disable=unused-variable class TestService(object): def __init__(self, ts_another_service=None, ts_test_param=None, ts_param_service=None, ts_static_val=None): self.another_service = ts_another_service self.test_param = ts_test_param self.param_service = ts_param_service self.static_val = ts_static_val service_container.boot() tag_one_services = service_container.get_by_tag('tag_one') tag_two_services = service_container.get_by_tag('tag_two') tag_three_services = service_container.get_by_tag('tag_three') tag_four_services = service_container.get_by_tag('tag_four') assert len(tag_one_services) is 2 assert len(tag_two_services) is 2 assert len(tag_three_services) is 1 assert len(tag_four_services) is 0 @staticmethod def test_compiler_pass(): service_container = ServiceContainer() @service_container.register_compiler_pass() # pylint: disable=unused-variable def compiler_pass(sc): sc.set_parameter('compiler_set', 'test') service_container.boot() assert service_container.get_parameter('compiler_set') is 'test' @staticmethod def test_compiler_pass_already_booted(): service_container = ServiceContainer() service_container.boot() with pytest.raises(ContainerAlreadyBootedException): @service_container.register_compiler_pass() # pylint: disable=unused-variable def compiler_pass(sc): sc.set_parameter('compiler_set', 'test') @staticmethod def test_boot_already_booted(): service_container = ServiceContainer() service_container.boot() with pytest.raises(ContainerAlreadyBootedException): service_container.boot()
37.891566
207
0.69841
695
6,290
5.928058
0.097842
0.190291
0.069903
0.056796
0.826456
0.765049
0.681311
0.631553
0.631553
0.615049
0
0.000816
0.22035
6,290
165
208
38.121212
0.839315
0.015103
0
0.683333
0
0
0.099661
0
0
0
0
0
0.175
1
0.116667
false
0.058333
0.041667
0
0.2
0
0
0
0
null
0
0
0
1
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
4
92f5d9933f7fc527ce436b69a538a62f022b7e9f
28,261
py
Python
datahub/sql/controllers/aws/comprehend_controller.py
Chronicles-of-AI/osiris
c71b1324ed270caa3724c0a8c58c4883b28dc19c
[ "Apache-2.0" ]
3
2021-08-03T08:13:40.000Z
2022-02-23T04:27:30.000Z
datahub/sql/controllers/aws/comprehend_controller.py
Chronicles-of-AI/osiris
c71b1324ed270caa3724c0a8c58c4883b28dc19c
[ "Apache-2.0" ]
null
null
null
datahub/sql/controllers/aws/comprehend_controller.py
Chronicles-of-AI/osiris
c71b1324ed270caa3724c0a8c58c4883b28dc19c
[ "Apache-2.0" ]
null
null
null
from commons.external_call import APIInterface from sql import config, logger from sql.crud.model_crud import CRUDModel from sql.crud.deployment_crud import CRUDDeployment from sql.crud.project_flow_crud import CRUDProjectFlow from sql.crud.model_monitoring_crud import CRUDModelMonitoring from datetime import datetime logging = logger(__name__) class ComprehendController: def __init__(self): self.CRUDModel = CRUDModel() self.CRUDDeployment = CRUDDeployment() self.CRUDProjectFlow = CRUDProjectFlow() self.CRUDModelMonitoring = CRUDModelMonitoring() self.core_aws_comprehend_config = ( config.get("core_engine").get("aws").get("comprehend_router") ) def create_document_classifier_controller(self, request): """[Controller function to create a new document classifier using AWS Comprehend] Args: request ([dict]): [Create document classifier request] Raises: error: [Error raised from controller layer] Returns: [str]: [document_classifier_arn] [str]: [status] """ try: logging.info("executing create_document_classifier_controller function") uuid = str(int(datetime.now().timestamp())) create_document_classifier_request = request.dict(exclude_none=True) create_document_classifier_url = self.core_aws_comprehend_config.get( "create_document_classifier" ) response, status_code = APIInterface.post( route=create_document_classifier_url, data=create_document_classifier_request, ) if status_code == 200: crud_request = { "pipeline_id": create_document_classifier_request.get( "pipeline_id" ), "model_id": response.get("document_classifier_arn"), "dataset_id": create_document_classifier_request.get( "InputDataConfig" ), "artifacts": create_document_classifier_request.get( "OutputDataConfig" ), "alias_name": create_document_classifier_request.get( "DocumentClassifierName" ), "auto_trigger": False, "UUID": uuid, "status": "Running", "created": datetime.now(), } self.CRUDModel.create(**crud_request) project_flow_crud_request = { "pipeline_id": create_document_classifier_request.get( "pipeline_id" ), "updated_at": datetime.now(), "functional_stage_id": response.get("document_classifier_arn"), "current_stage": "TRAINING", } self.CRUDProjectFlow.update(**project_flow_crud_request) return { "document_classifier_arn": response.get("document_classifier_arn"), "status": "training started", } else: raise Exception({"status": "training failed"}) except Exception as error: logging.error( f"Error in create_document_classifier_controller function: {error}" ) raise error def create_entity_recognizer_controller(self, request): """[Controller function to create a new entity recognizer using AWS Comprehend] Args: request ([dict]): [Create entity recognizer request] Raises: error: [Error raised from controller layer] Returns: [str]: [entity_recognizer_arn] [str]: [status] """ try: logging.info("executing create_entity_recognizer_controller function") uuid = str(int(datetime.now().timestamp())) create_entity_recognizer_request = request.dict(exclude_none=True) create_entity_recognizer_url = self.core_aws_comprehend_config.get( "create_entity_recognizer" ) response, status_code = APIInterface.post( route=create_entity_recognizer_url, data=create_entity_recognizer_request, ) if status_code == 200: crud_request = { "pipeline_id": create_entity_recognizer_request.get("pipeline_id"), "model_id": response.get("entity_recognizer_arn"), "dataset_id": create_entity_recognizer_request.get( "InputDataConfig" ), "artifacts": create_entity_recognizer_request.get( "OutputDataConfig" ), "alias_name": create_entity_recognizer_request.get( "RecognizerName" ), "auto_trigger": False, "UUID": uuid, "status": "Running", "created": datetime.now(), } self.CRUDModel.create(**crud_request) project_flow_crud_request = { "pipeline_id": create_entity_recognizer_request.get("pipeline_id"), "updated_at": datetime.now(), "functional_stage_id": response.get("entity_recognizer_arn"), "current_stage": "TRAINING", } self.CRUDProjectFlow.update(**project_flow_crud_request) return { "entity_recognizer_arn": response.get("entity_recognizer_arn"), "status": "training started", } else: raise Exception({"status": "training failed"}) except Exception as error: logging.error( f"Error in create_entity_recognizer_controller function: {error}" ) raise error def delete_document_classifier_controller(self, request): """[Controller function to delete a document classifier using AWS Comprehend] Args: request ([dict]): [Delete document classifier request] Raises: error: [Error raised from controller layer] Returns: [str]: [status] """ try: logging.info("executing delete_document_classifier_controller function") delete_document_classifier_request = request.dict(exclude_none=True) delete_document_classifier_url = self.core_aws_comprehend_config.get( "delete_document_classifier" ) response, status_code = APIInterface.post( route=delete_document_classifier_url, data=delete_document_classifier_request, ) if status_code == 200: crud_request = { "model_id": request.DocumentClassifierArn, "status": "Deleted", "updated": datetime.now(), } self.CRUDModel.update(crud_request) return {"status": "classifier deleted"} else: raise Exception({"status": "deletion failed"}) except Exception as error: logging.error( f"Error in delete_document_classifier_controller function: {error}" ) raise error def delete_entity_recognizer_controller(self, request): """[Controller function to delete a entity_recognizer using AWS Comprehend] Args: request ([dict]): [Delete entity_recognizer request] Raises: error: [Error raised from controller layer] Returns: [str]: [status] """ try: logging.info("executing delete_entity_recognizer_controller function") delete_entity_recognizer_request = request.dict(exclude_none=True) delete_entity_recognizer_url = self.core_aws_comprehend_config.get( "delete_entity_recognizer" ) response, status_code = APIInterface.post( route=delete_entity_recognizer_url, data=delete_entity_recognizer_request, ) if status_code == 200: crud_request = { "model_id": request.DocumentClassifierArn, "status": "Deleted", "updated": datetime.now(), } self.CRUDModel.update(crud_request) return {"status": "recognizer deleted"} else: raise Exception({"status": "deletion failed"}) except Exception as error: logging.error( f"Error in delete_entity_recognizer_controller function: {error}" ) raise error def describe_document_classifier_controller(self, request): """[Controller function to describe a document classifier using AWS Comprehend] Args: request ([dict]): [Describe document classifier request] Raises: error: [Error raised from controller layer] Returns: [dict]: [Document Classifier description returned from core engine] """ try: logging.info("executing describe_document_classifier_controller function") describe_document_classifier_request = request.dict(exclude_none=True) describe_document_classifier_url = self.core_aws_comprehend_config.get( "describe_document_classifier" ) response, status_code = APIInterface.post( route=describe_document_classifier_url, data=describe_document_classifier_request, ) crud_request = { "model_id": request.DocumentClassifierArn, "status": response.get("Status"), "updated": datetime.now(), } self.CRUDModel.update(crud_request) evaluation_metrics = ( response.get("DocumentClassifierProperties") .get("ClassifierMetadata") .get("EvaluationMetrics") ) f1_score = evaluation_metrics.get("F1Score") precision = evaluation_metrics.get("Precision") recall = evaluation_metrics.get("Recall") status = response.get("DocumentClassifierProperties").get("Status") if status == "TRAINED": create_model_monitoring_request = { "model_uri": request.DocumentClassifierArn, "model_f1_score": f1_score, "model_recall": recall, "model_precision": precision, "model_drift_threshold": "0.8", "created_at": datetime.now(), "updated_at": datetime.now(), } if ( len( self.CRUDModelMonitoring.read( model_uri=request.DocumentClassifierArn ) ) == 0 ): project_flow_crud_request = { "pipeline_id": request.pipeline_id, "updated_at": datetime.now(), "current_stage": "TRAINED", } self.CRUDProjectFlow.update(**project_flow_crud_request) self.CRUDModelMonitoring.create(**create_model_monitoring_request) return response except Exception as error: logging.error( f"Error in describe_document_classifier_controller function: {error}" ) raise error def describe_document_classifier_status_controller(self, request): """[Controller function to describe a document classifier status using AWS Comprehend] Args: request ([dict]): [Describe document classifier request] Raises: error: [Error raised from controller layer] Returns: [dict]: [Document Classifier description returned from core engine] """ try: logging.info( "executing describe_document_classifier_status_controller function" ) describe_document_classifier_request = request.dict(exclude_none=True) describe_document_classifier_url = self.core_aws_comprehend_config.get( "describe_document_classifier" ) response, status_code = APIInterface.post( route=describe_document_classifier_url, data=describe_document_classifier_request, ) document_classifier_status = response.get( "DocumentClassifierProperties" ).get("Status") return {"model_status": document_classifier_status} except Exception as error: logging.error( f"Error in describe_document_classifier_status_controller function: {error}" ) raise error def describe_entity_recognizer_controller(self, request): """[Controller function to describe a entity recognizer using AWS Comprehend] Args: request ([dict]): [Describe entity recognizer request] Raises: error: [Error raised from controller layer] Returns: [dict]: [entity recognizer description returned from core engine] """ try: logging.info("executing describe_entity_recognizer_controller function") describe_entity_recognizer_request = request.dict(exclude_none=True) describe_entity_recognizer_url = self.core_aws_comprehend_config.get( "describe_entity_recognizer" ) response, status_code = APIInterface.post( route=describe_entity_recognizer_url, data=describe_entity_recognizer_request, ) crud_request = { "model_id": request.EntityRecognizerArn, "status": response.get("Status"), "updated": datetime.now(), } self.CRUDModel.update(crud_request) evaluation_metrics = ( response.get("EntityRecognizerProperties") .get("RecognizerMetadata") .get("EvaluationMetrics") ) f1_score = evaluation_metrics.get("F1Score") precision = evaluation_metrics.get("Precision") recall = evaluation_metrics.get("Recall") status = response.get("EntityRecognizerProperties").get("Status") if status == "TRAINED": create_model_monitoring_request = { "model_uri": request.EntityRecognizerArn, "model_f1_score": f1_score, "model_recall": recall, "model_precision": precision, "model_drift_threshold": "0.8", "created_at": datetime.now(), "updated_at": datetime.now(), } if ( len( self.CRUDModelMonitoring.read( model_uri=request.EntityRecognizerArn ) ) == 0 ): project_flow_crud_request = { "pipeline_id": request.pipeline_id, "updated_at": datetime.now(), "current_stage": "TRAINED", } self.CRUDProjectFlow.update(**project_flow_crud_request) self.CRUDModelMonitoring.create(**create_model_monitoring_request) return response except Exception as error: logging.error( f"Error in describe_entity_recognizer_controller function: {error}" ) raise error def describe_entity_recognizer_status_controller(self, request): """[Controller function to describe a entity recognizer status using AWS Comprehend] Args: request ([dict]): [Describe entity recognizer request] Raises: error: [Error raised from controller layer] Returns: [dict]: [entity recognizer description returned from core engine] """ try: logging.info( "executing describe_entity_recognizer_status_controller function" ) describe_entity_recognizer_request = request.dict(exclude_none=True) describe_entity_recognizer_url = self.core_aws_comprehend_config.get( "describe_entity_recognizer" ) response, status_code = APIInterface.post( route=describe_entity_recognizer_url, data=describe_entity_recognizer_request, ) model_status = response.get("EntityRecognizerProperties").get("Status") return {"model_status": model_status} except Exception as error: logging.error( f"Error in describe_entity_recognizer_status_controller function: {error}" ) raise error def stop_training_document_classifier_controller(self, request): """[Controller function to stop a training job on AWS Comprehend] Args: request ([dict]): [Stop training request] Raises: error: [Error raised from controller layer] Returns: [str]: [Status of training job] """ try: logging.info( "executing stop_training_document_classifier_controller function" ) stop_training_document_classifier_request = request.dict(exclude_none=True) stop_training_document_classifier_url = self.core_aws_comprehend_config.get( "stop_training_document_classifier" ) response, status_code = APIInterface.post( route=stop_training_document_classifier_url, data=stop_training_document_classifier_request, ) if status_code == 200: crud_request = { "model_id": request.DocumentClassifierArn, "status": "Stopped", "updated": datetime.now(), } self.CRUDModel.update(crud_request) project_flow_crud_request = { "pipeline_id": stop_training_document_classifier_request.get( "pipeline_id" ), "updated_at": datetime.now(), "functional_stage_id": request.DocumentClassifierArn, "current_stage": "TRAINING_STOPPED", } self.CRUDProjectFlow.update(**project_flow_crud_request) return {"status": "training stopped"} else: raise Exception({"status": "training failed"}) except Exception as error: logging.error( f"Error in stop_training_document_classifier_controller function: {error}" ) raise error def stop_training_entity_recognizer_controller(self, request): """[Controller function to stop a training job on AWS Comprehend] Args: request ([dict]): [Stop training request] Raises: error: [Error raised from controller layer] Returns: [str]: [Status of training job] """ try: logging.info( "executing stop_training_entity_recognizer_controller function" ) stop_training_entity_recognizer_request = request.dict(exclude_none=True) stop_training_entity_recognizer_url = self.core_aws_comprehend_config.get( "stop_training_entity_recognizer" ) response, status_code = APIInterface.post( route=stop_training_entity_recognizer_url, data=stop_training_entity_recognizer_request, ) if status_code == 200: crud_request = { "model_id": request.EntityRecognizerArn, "status": "Stopped", "updated": datetime.now(), } self.CRUDModel.update(crud_request) project_flow_crud_request = { "pipeline_id": stop_training_entity_recognizer_request.get( "pipeline_id" ), "updated_at": datetime.now(), "functional_stage_id": request.EntityRecognizerArn, "current_stage": "TRAINING_STOPPED", } self.CRUDProjectFlow.update(**project_flow_crud_request) return {"status": "training stopped"} else: raise Exception({"status": "training failed"}) except Exception as error: logging.error( f"Error in stop_training_entity_recognizer_controller function: {error}" ) raise error def list_document_classifier_controller(self): """[Controller function to list all the document classifiers on AWS Comprehend] Raises: error: [Error raised from controller layer] Returns: [dict]: [List of all the document classifier on AWS Comprehend] """ try: logging.info("executing list_document_classifier_controller function") list_document_classifier_url = self.core_aws_comprehend_config.get( "list_document_classifier" ) response, status_code = APIInterface.get( route=list_document_classifier_url, ) return response except Exception as error: logging.error( f"Error in list_document_classifier_controller function: {error}" ) raise error def list_entity_recognizer_controller(self): """[Controller function to list all the entity recognizers on AWS Comprehend] Raises: error: [Error raised from controller layer] Returns: [dict]: [List of all the entity recognizers on AWS Comprehend] """ try: logging.info("executing list_entity_recognizer_controller function") list_entity_recognizer_url = self.core_aws_comprehend_config.get( "list_entity_recognizer" ) response, status_code = APIInterface.get( route=list_entity_recognizer_url, ) return response except Exception as error: logging.error( f"Error in list_entity_recognizer_controller function: {error}" ) raise error def deploy_model_controller(self, request): """[Controller function to deploy a document classifier] Args: request ([dict]): [Deploy document classifier request] Raises: error: [Error raised from controller layer] Returns: [dict]: [Details of the deployed document classifier model] """ try: logging.info("executing deploy_model_controller function") uuid = str(int(datetime.now().timestamp())) deploy_model_request = request.dict(exclude_none=True) deploy_model_url = self.core_aws_comprehend_config.get("deploy_model") response, status_code = APIInterface.post( route=deploy_model_url, data=deploy_model_request, ) if status_code == 200: deployment_crud_request = { "pipeline_id": deploy_model_request.get("pipeline_id"), "UUID": uuid, "model_id": request.model_arn, "deployment_endpoint": response.get("endpoint_arn"), "created": datetime.now(), "status": "Deployed", } self.CRUDDeployment.create(**deployment_crud_request) project_flow_crud_request = { "pipeline_id": deploy_model_request.get("pipeline_id"), "updated_at": datetime.now(), "model_id": request.model_arn, "functional_stage_id": request.model_arn, "current_stage": "MODEL_DEPLOYED", } self.CRUDProjectFlow.update(**project_flow_crud_request) response.update({"status": "model deployed successfully"}) return response else: raise Exception({"status": "model deployment failed"}) except Exception as error: logging.error(f"Error in deploy_model_controller function: {error}") raise error def undeploy_model_controller(self, request): """[Controller function to undeploy a document classifier] Args: request ([dict]): [Un-Deploy document classifier request] Raises: error: [Error raised from controller layer] Returns: [dict]: [Details of the undeployed document classifier model] """ try: logging.info("executing undeploy_model_controller function") undeploy_model_request = request.dict(exclude_none=True) undeploy_model_url = self.core_aws_comprehend_config.get("undeploy_model") response, status_code = APIInterface.post( route=undeploy_model_url, data=undeploy_model_request, ) if status_code == 200: undeployment_crud_request = { "deployment_endpoint": request.endpoint_arn, "updated": datetime.now(), "status": "UnDeployed", } self.CRUDDeployment.update_by_endpoint( deployment_request=undeployment_crud_request ) project_flow_crud_request = { "pipeline_id": undeploy_model_request.get("pipeline_id"), "updated_at": datetime.now(), "current_stage": "MODEL_UNDEPLOYED", } self.CRUDProjectFlow.update(**project_flow_crud_request) return {"status": "model undeployed successfully"} else: raise Exception({"status": "model undeployment failed"}) except Exception as error: logging.error(f"Error in undeploy_model_controller function: {error}") raise error def get_predictions_controller(self, endpoint_arn: str, text: str): """[Controller function to get predictions from trained document classifier] Args: endpoint_arn (str): [endpoint of the trained document classifier] text (str): [text to be classified] Raises: error: [Error raised from controller layer] Returns: [dict]: [predictions from the trained model] """ try: logging.info("executing get_predictions_controller function") get_predictions_url = self.core_aws_comprehend_config.get("get_predictions") response, status_code = APIInterface.get( route=get_predictions_url, params={"endpoint_arn": endpoint_arn, "text": text}, ) return response except Exception as error: logging.error(f"Error in get_predictions_controller function: {error}") raise error
41.017417
94
0.564453
2,438
28,261
6.256358
0.064397
0.092047
0.036058
0.022028
0.856684
0.811447
0.768046
0.712909
0.585262
0.505409
0
0.002098
0.359046
28,261
688
95
41.077035
0.839958
0.134319
0
0.53937
0
0
0.186008
0.076289
0
0
0
0
0
1
0.031496
false
0
0.01378
0
0.076772
0
0
0
0
null
0
0
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
92ffd4ef7fef5ad7217dfd42ffbf2bf0dc75af83
224
py
Python
Library/book_inventory/serializers.py
aybarskerem/BookInventory
75aa784185940ba5e85e5181991c88edf68fa65d
[ "MIT" ]
null
null
null
Library/book_inventory/serializers.py
aybarskerem/BookInventory
75aa784185940ba5e85e5181991c88edf68fa65d
[ "MIT" ]
null
null
null
Library/book_inventory/serializers.py
aybarskerem/BookInventory
75aa784185940ba5e85e5181991c88edf68fa65d
[ "MIT" ]
null
null
null
from rest_framework import serializers from book_inventory.models import BookInventory class BookInventorySerializer(serializers.ModelSerializer): class Meta: model = BookInventory fields = '__all__'
32
59
0.767857
21
224
7.904762
0.761905
0
0
0
0
0
0
0
0
0
0
0
0.1875
224
7
60
32
0.912088
0
0
0
0
0
0.031111
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
1306fa7facf8b26780a1a8a8bf527890a51ca961
263
py
Python
src/commerce/commerce-vis.py
Calvibert/machine-learning-exercises
8184a8338505ea8075992f419385620be6522d14
[ "MIT" ]
null
null
null
src/commerce/commerce-vis.py
Calvibert/machine-learning-exercises
8184a8338505ea8075992f419385620be6522d14
[ "MIT" ]
null
null
null
src/commerce/commerce-vis.py
Calvibert/machine-learning-exercises
8184a8338505ea8075992f419385620be6522d14
[ "MIT" ]
null
null
null
import numpy as np from prettytable import PrettyTable as pt # array = np.loadtxt(open("may2018comm.csv", "rb"), dtype="str", delimiter=";", skiprows=1) array = np.genfromtxt(open("may2018comm.csv", "rb"), dtype="str", delimiter=";", skip_header=1) print(array)
37.571429
95
0.711027
37
263
5.027027
0.594595
0.075269
0.193548
0.215054
0.397849
0.397849
0.397849
0
0
0
0
0.042373
0.102662
263
7
96
37.571429
0.745763
0.338403
0
0
0
0
0.121387
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0.25
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
13264f86ebc8c9ec08ffbfd8453bed33b8c4bfb9
97
py
Python
telegram_handler/__main__.py
RedbanxUK/telegram-python-log
8a89064df025b5eff8b2b95d213efca1d83d3b7c
[ "MIT" ]
88
2016-04-25T09:56:45.000Z
2022-03-26T14:35:51.000Z
telegram_handler/__main__.py
RedbanxUK/telegram-python-log
8a89064df025b5eff8b2b95d213efca1d83d3b7c
[ "MIT" ]
28
2016-04-19T17:16:44.000Z
2022-01-24T18:36:49.000Z
telegram_handler/__main__.py
RedbanxUK/telegram-python-log
8a89064df025b5eff8b2b95d213efca1d83d3b7c
[ "MIT" ]
27
2016-08-12T22:06:51.000Z
2022-03-19T23:26:16.000Z
if __name__ == '__main__': # pragma: no cover from telegram_handler import main main()
19.4
46
0.670103
12
97
4.666667
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.237113
97
4
47
24.25
0.756757
0.164948
0
0
0
0
0.101266
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
133d9eb900a49977d73f60477727d2ebba093a48
297
py
Python
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/core.py
wpk-nist-gov/cookiecutter-pypackage
e23dabd6e08a10e4b7ab6f119c4212832a3126e8
[ "BSD-3-Clause" ]
null
null
null
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/core.py
wpk-nist-gov/cookiecutter-pypackage
e23dabd6e08a10e4b7ab6f119c4212832a3126e8
[ "BSD-3-Clause" ]
null
null
null
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/core.py
wpk-nist-gov/cookiecutter-pypackage
e23dabd6e08a10e4b7ab6f119c4212832a3126e8
[ "BSD-3-Clause" ]
null
null
null
""" core functionality """ def another_func(a, b): """ just a test Parameters ---------- a : int an input b : float another input See also -------- {{ cookiecutter.project_slug }}.{{ cookiecutter.project_slug }}.a_function """ pass
12.913043
78
0.508418
30
297
4.9
0.7
0.258503
0.312925
0
0
0
0
0
0
0
0
0
0.329966
297
22
79
13.5
0.738693
0.6633
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0.5
0
0
0.5
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
4
133efbe51fa6e6c5ed87fb5041083f8605fa7cf3
136
py
Python
narcis_api/models/operating_system.py
narcisus/narcis-api
2a73deddb858f179beb7cb862ba2f59587c99be1
[ "MIT" ]
null
null
null
narcis_api/models/operating_system.py
narcisus/narcis-api
2a73deddb858f179beb7cb862ba2f59587c99be1
[ "MIT" ]
null
null
null
narcis_api/models/operating_system.py
narcisus/narcis-api
2a73deddb858f179beb7cb862ba2f59587c99be1
[ "MIT" ]
null
null
null
from . import Base from .named_model import NamedModel class OperatingSystem(Base, NamedModel): __tablename__ = 'operating_system'
22.666667
40
0.794118
15
136
6.8
0.733333
0
0
0
0
0
0
0
0
0
0
0
0.139706
136
5
41
27.2
0.871795
0
0
0
0
0
0.117647
0
0
0
0
0
0
1
0
false
0
0.5
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
134e3517b0209490937f0658192abc5f5b677475
163
py
Python
configs/resnet50_cxr14_bs16.py
JiYuanFeng/mmclassification
b337ef1f11b85148cca4b6fb0c4da3f8cc2eede6
[ "Apache-2.0" ]
null
null
null
configs/resnet50_cxr14_bs16.py
JiYuanFeng/mmclassification
b337ef1f11b85148cca4b6fb0c4da3f8cc2eede6
[ "Apache-2.0" ]
null
null
null
configs/resnet50_cxr14_bs16.py
JiYuanFeng/mmclassification
b337ef1f11b85148cca4b6fb0c4da3f8cc2eede6
[ "Apache-2.0" ]
null
null
null
_base_ = [ '_base_/datasets/cxr14_bs16.py', '_base_/models/resnet50_cxr14.py', '_base_/schedules/cxr14_bs16_ep20.py', '_base_/default_runtime.py' ]
27.166667
42
0.705521
21
163
4.761905
0.52381
0.18
0
0
0
0
0
0
0
0
0
0.1
0.141104
163
6
43
27.166667
0.614286
0
0
0
0
0
0.731707
0.731707
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
136b7e1a4d3c6c298fe235ff9b1494e86c0a1556
763
py
Python
heap/heap.py
nkzlkk/algo-implementations
637d8498a6f2e45828eb443920a5e17a5c69fbd3
[ "MIT" ]
null
null
null
heap/heap.py
nkzlkk/algo-implementations
637d8498a6f2e45828eb443920a5e17a5c69fbd3
[ "MIT" ]
null
null
null
heap/heap.py
nkzlkk/algo-implementations
637d8498a6f2e45828eb443920a5e17a5c69fbd3
[ "MIT" ]
null
null
null
""" Implementation of a binary Min Heap, represented by an array. """ class MinHeap(): def __init__(self): self.array = [] self.root = None self.size = 0 def parent(heap, i): """ Returns the index of the parent of a given node. """ if i == 0: return 0 return heap.array[(i-1)//2] def left(heap, i): """ Returns the left child of a given node. """ return heap.array[2*i + 1] def right(heap, i): """ Returns the right child of a given node. """ return heap.array[2*i + 2] def return_smallest(heap): """ Returns the smallest value from a correct MinHeap. """ return heap.root def heapify(heap): #TODO pass def insert(heap): #TODO pass
15.895833
61
0.567497
109
763
3.926606
0.376147
0.028037
0.084112
0.10514
0.158879
0.158879
0.158879
0.158879
0.158879
0.158879
0
0.017045
0.307995
763
48
62
15.895833
0.793561
0.328965
0
0.105263
0
0
0
0
0
0
0
0.020833
0
1
0.368421
false
0.105263
0
0
0.684211
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
1
0
0
1
0
0
4
13740e4d6aa05ebbe4fa7bb05ce3fe78211ef58a
75
py
Python
range-sample1.py
misizeji/python_study_notes
b0f5a224ac65f962c6255ab7cf8a1912704d1617
[ "MIT" ]
null
null
null
range-sample1.py
misizeji/python_study_notes
b0f5a224ac65f962c6255ab7cf8a1912704d1617
[ "MIT" ]
1
2018-05-23T06:35:37.000Z
2018-05-23T06:36:12.000Z
range-sample1.py
misizeji/python_study_notes
b0f5a224ac65f962c6255ab7cf8a1912704d1617
[ "MIT" ]
null
null
null
#!/usr/bin/python3 for i in range(-10,-100,-30): print(i) print(i)
15
29
0.573333
14
75
3.071429
0.785714
0.27907
0
0
0
0
0
0
0
0
0
0.133333
0.2
75
5
30
15
0.583333
0.226667
0
0.666667
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.666667
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
1387520c6d3dbfb302062778261d540b1d54f9a9
109
py
Python
cubi_tk/__init__.py
eudesbarbosa/cubi-tk
80c3ef9387f2399f796b2cc445b99781d541f222
[ "MIT" ]
132
2017-05-14T23:52:52.000Z
2022-03-30T14:18:31.000Z
cubi_tk/__init__.py
eudesbarbosa/cubi-tk
80c3ef9387f2399f796b2cc445b99781d541f222
[ "MIT" ]
65
2020-09-23T13:22:41.000Z
2022-03-17T11:02:42.000Z
sodar_cli/__init__.py
bihealth/sodar-cli
5fe56513565222eef125752119e634d36b3c8772
[ "MIT" ]
40
2017-01-08T22:19:49.000Z
2021-11-25T05:25:13.000Z
from ._version import get_versions # type: ignore __version__ = get_versions()["version"] del get_versions
21.8
50
0.779817
14
109
5.5
0.571429
0.428571
0
0
0
0
0
0
0
0
0
0
0.12844
109
4
51
27.25
0.810526
0.110092
0
0
0
0
0.073684
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
13b28a4339e79aea898774bec1583ae6b7322f6e
161
py
Python
src/ezdxf/tracker.py
hh-wu/ezdxf
62509ba39b826ee9b36f19c0a5abad7f3518186a
[ "MIT" ]
2
2021-07-28T03:52:02.000Z
2021-07-31T05:08:11.000Z
src/ezdxf/tracker.py
hh-wu/ezdxf
62509ba39b826ee9b36f19c0a5abad7f3518186a
[ "MIT" ]
1
2020-04-28T17:52:26.000Z
2020-10-07T01:28:56.000Z
src/ezdxf/tracker.py
hh-wu/ezdxf
62509ba39b826ee9b36f19c0a5abad7f3518186a
[ "MIT" ]
1
2021-07-31T05:08:12.000Z
2021-07-31T05:08:12.000Z
# Copyright (c) 2018 Manfred Moitzi # License: MIT License from typing import Set class Tracker: dxftypes = set() # type: Set[str] # track used DXF types
20.125
61
0.701863
23
161
4.913043
0.869565
0
0
0
0
0
0
0
0
0
0
0.031496
0.21118
161
7
62
23
0.858268
0.565217
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
13b7943c79a8fe937ff5e2e3db1cfd06c50c1461
91
py
Python
markovdp/apps.py
5g-media/ss-cno-teleimmersive-game
70fd71e17c005feb02e661f36bf4257817f91dde
[ "Apache-2.0" ]
null
null
null
markovdp/apps.py
5g-media/ss-cno-teleimmersive-game
70fd71e17c005feb02e661f36bf4257817f91dde
[ "Apache-2.0" ]
null
null
null
markovdp/apps.py
5g-media/ss-cno-teleimmersive-game
70fd71e17c005feb02e661f36bf4257817f91dde
[ "Apache-2.0" ]
null
null
null
from django.apps import AppConfig class MarkovdpConfig(AppConfig): name = 'markovdp'
15.166667
33
0.758242
10
91
6.9
0.9
0
0
0
0
0
0
0
0
0
0
0
0.164835
91
5
34
18.2
0.907895
0
0
0
0
0
0.087912
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
13e3675dbe162f27ce89ac501b8bb4fdc9c0d15d
85
py
Python
Week01/Problem01/gwang_01.py
annap84/SkillsWorkshop2018
19cc76abd9b0ea15ee2823dd36698475b08b0217
[ "BSD-3-Clause" ]
1
2020-04-18T03:30:46.000Z
2020-04-18T03:30:46.000Z
Week01/Problem01/gwang_01.py
annap84/SkillsWorkshop2018
19cc76abd9b0ea15ee2823dd36698475b08b0217
[ "BSD-3-Clause" ]
21
2018-07-12T19:12:23.000Z
2018-08-10T13:52:45.000Z
Week01/Problem01/gwang_01.py
annap84/SkillsWorkshop2018
19cc76abd9b0ea15ee2823dd36698475b08b0217
[ "BSD-3-Clause" ]
60
2018-05-08T16:59:20.000Z
2018-08-01T14:28:28.000Z
#gwang_01.py j = 0 for i in range (1000): if (i % 3) == 0 or (i%5) == 0: j+=i j
12.142857
39
0.470588
21
85
1.857143
0.666667
0
0
0
0
0
0
0
0
0
0
0.186441
0.305882
85
6
40
14.166667
0.474576
0.129412
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
b91d93c28564ac88f3b3c52cfab07d03b9512a82
110
py
Python
repos/system_upgrade/common/tags/thirdpartyapplications.py
sm00th/leapp-repository
1c171ec3a5f9260a3c6f84a9b15cad78a875ac61
[ "Apache-2.0" ]
21
2018-11-20T15:58:39.000Z
2022-03-15T19:57:24.000Z
repos/system_upgrade/common/tags/thirdpartyapplications.py
sm00th/leapp-repository
1c171ec3a5f9260a3c6f84a9b15cad78a875ac61
[ "Apache-2.0" ]
732
2018-11-21T18:33:26.000Z
2022-03-31T16:16:24.000Z
repos/system_upgrade/common/tags/thirdpartyapplications.py
sm00th/leapp-repository
1c171ec3a5f9260a3c6f84a9b15cad78a875ac61
[ "Apache-2.0" ]
85
2018-11-20T17:55:00.000Z
2022-03-29T09:40:31.000Z
from leapp.tags import Tag class ThirdPartyApplicationsPhaseTag(Tag): name = 'third_party_applications'
18.333333
42
0.8
12
110
7.166667
0.916667
0
0
0
0
0
0
0
0
0
0
0
0.136364
110
5
43
22
0.905263
0
0
0
0
0
0.218182
0.218182
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
b924254372c6fa9788708355bf2891de60bf1ed9
142
py
Python
packages/regression_model/regression_model/processing/errors.py
avkmal/deploying-machine-learning-models
096be274350dbe667319979cf4a9e3001d5405f5
[ "BSD-3-Clause" ]
477
2019-02-14T11:24:29.000Z
2022-03-31T08:43:50.000Z
packages/regression_model/regression_model/processing/errors.py
avkmal/deploying-machine-learning-models
096be274350dbe667319979cf4a9e3001d5405f5
[ "BSD-3-Clause" ]
51
2019-05-11T11:00:48.000Z
2021-12-08T14:50:33.000Z
packages/regression_model/regression_model/processing/errors.py
avkmal/deploying-machine-learning-models
096be274350dbe667319979cf4a9e3001d5405f5
[ "BSD-3-Clause" ]
4,870
2019-01-20T11:04:50.000Z
2022-03-31T12:37:17.000Z
class BaseError(Exception): """Base package error.""" class InvalidModelInputError(BaseError): """Model input contains an error."""
20.285714
40
0.704225
14
142
7.142857
0.785714
0
0
0
0
0
0
0
0
0
0
0
0.15493
142
6
41
23.666667
0.833333
0.352113
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
1
0
0
4
b94903a7ffd07dab8d104a95fd8f99f407d09e75
136
py
Python
env/lib/python3.6/site-packages/jet/admin.py
anthowen/duplify
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
[ "MIT" ]
2
2019-12-04T16:24:44.000Z
2020-04-06T21:49:34.000Z
env/lib/python3.6/site-packages/jet/admin.py
anthowen/duplify
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
[ "MIT" ]
21
2021-02-04T01:37:44.000Z
2022-03-12T01:00:55.000Z
env/lib/python3.6/site-packages/jet/admin.py
anthowen/duplify
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
[ "MIT" ]
null
null
null
from django.contrib import admin class CompactInline(admin.options.InlineModelAdmin): template = 'admin/edit_inline/compact.html'
22.666667
52
0.801471
16
136
6.75
0.875
0
0
0
0
0
0
0
0
0
0
0
0.110294
136
5
53
27.2
0.892562
0
0
0
0
0
0.220588
0.220588
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
b95a7ac33f7ad5314903b29b4eca6e3fc22ec006
525
py
Python
apps/qolsysgw/qolsys/exceptions.py
kevin-david/qolsysgw
dbeb93aa9466b2d4f9c6c89417f2966fc9e9676d
[ "MIT" ]
11
2021-12-30T04:16:09.000Z
2022-03-19T16:26:22.000Z
apps/qolsysgw/qolsys/exceptions.py
kevin-david/qolsysgw
dbeb93aa9466b2d4f9c6c89417f2966fc9e9676d
[ "MIT" ]
19
2022-01-01T01:54:02.000Z
2022-03-29T20:33:36.000Z
apps/qolsysgw/qolsys/exceptions.py
kevin-david/qolsysgw
dbeb93aa9466b2d4f9c6c89417f2966fc9e9676d
[ "MIT" ]
1
2022-02-27T20:42:16.000Z
2022-02-27T20:42:16.000Z
class QolsysException(Exception): pass class QolsysGwConfigIncomplete(QolsysException): pass class QolsysGwConfigError(QolsysException): pass class UnableToParseEventException(QolsysException): pass class UnknownQolsysControlException(QolsysException): pass class UnknownQolsysEventException(QolsysException): pass class UnknownQolsysSensorException(QolsysException): pass class MissingUserCodeException(QolsysException): pass class InvalidUserCodeException(QolsysException): pass
19.444444
53
0.813333
36
525
11.861111
0.333333
0.168618
0.393443
0
0
0
0
0
0
0
0
0
0.135238
525
26
54
20.192308
0.940529
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
0
0
1
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
4
b95bca6fd7ef748c2ad3f94f620c5af793e339d4
5,606
py
Python
raam/examples/inventory/configuration.py
marekpetrik/RAAM
a8020629f98b951ee581ff3f9c3be96af74ede01
[ "MIT" ]
10
2015-09-28T14:41:12.000Z
2021-03-29T16:35:29.000Z
raam/examples/inventory/configuration.py
rlsquared/RAAM
a8020629f98b951ee581ff3f9c3be96af74ede01
[ "MIT" ]
null
null
null
raam/examples/inventory/configuration.py
rlsquared/RAAM
a8020629f98b951ee581ff3f9c3be96af74ede01
[ "MIT" ]
5
2015-09-26T20:01:49.000Z
2018-07-05T08:17:25.000Z
""" Global configuration for the problem settings """ import numpy as np from scipy import stats horizon = 300 runs = 40 DefaultConfiguration = { "price_buy" : [1.2,2.1,3.3], "price_sell" : [1,2,3], "price_probabilities" : np.array([[0.8, 0.1, 0.1],[0.1, 0.8, 0.1],[0.1, 0.1, 0.8]]), "initial_capacity" : 1, "initial_inventory" : 0.5, "degradation" : {"fun":"polynomial","charge":[0.0,0,0.01], "discharge":[0.01,-0.02,0.01] }, "capacity_cost" : 1, "change_capacity" : False # assume that the capacity does not change } def construct_martingale(prices, variance): """ Constructs a definitions with a martingale definition of transition probabilities. The change in price is modeled as a normal distribution with zero mean and the specified variance. The capacity of the battery does in fact change Parameters ---------- prices : array **Sell** prices that correspond to states in the Martingale price state process. **Buy** prices are 10% higher. variance : float Variance of the normal distribution Returns ------- out : dict Configuration that corresponds to the martingale """ states = len(prices) # defines over how many states the probability is spread over spread = min(5,states-1) if type(prices) is not np.ndarray: prices = np.array(prices) # relative transition probabilities p = stats.norm(0,variance).pdf(np.arange(-spread,spread+1)) p = p / p.sum() # add extra 0s to both ends of p p = np.concatenate((np.zeros(states-spread-1), p, np.zeros(states-spread-1))) P = [p[states-i-1:2*states-i-1] for i in range(states)] P = np.array(P) P = np.diag(1/P.sum(1)).dot(P) configuration = { "price_buy" : 1.1 * prices, "price_sell" : prices, "price_probabilities" : P, "initial_capacity" : 1, "initial_inventory" : 0.5, "degradation" : {"fun":"polynomial","charge":[0.0,0,0.01], "discharge":[0.01,0.02,0.01] }, "capacity_cost" : 1, "change_capacity" : True # assume that the capacity does not change } return configuration def construct_massdata(degrade): """ Returns a problem definition on what is described in the experimental section of the paper This uses a simple uniform quantization of energy prices Paramaters ---------- degrade : bool Whether the battery degrades """ prices = np.array([25.0, 50.0, 75.0, 100.0, 125.0, 150.0, 175.0, 200.0, 250.0, 300.0]) P = np.array([[ 8.15584416e-01, 1.76623377e-01, 5.19480519e-03, 2.59740260e-03, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 4.70114171e-02, 8.72397582e-01, 7.25319006e-02, 7.38750839e-03, 0.00000000e+00, 6.71591672e-04, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 1.19904077e-03, 1.31894484e-01, 7.79376499e-01, 6.95443645e-02, 1.43884892e-02, 3.59712230e-03, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 0.00000000e+00, 4.24528302e-02, 2.83018868e-01, 5.14150943e-01, 1.22641509e-01, 2.35849057e-02, 9.43396226e-03, 0.00000000e+00, 0.00000000e+00, 4.71698113e-03], [ 0.00000000e+00, 2.15053763e-02, 9.67741935e-02, 2.68817204e-01, 4.30107527e-01, 1.29032258e-01, 4.30107527e-02, 1.07526882e-02, 0.00000000e+00, 0.00000000e+00], [ 0.00000000e+00, 0.00000000e+00, 3.22580645e-02, 2.58064516e-01, 3.54838710e-01, 1.93548387e-01, 9.67741935e-02, 6.45161290e-02, 0.00000000e+00, 0.00000000e+00], [ 0.00000000e+00, 7.14285714e-02, 1.42857143e-01, 0.00000000e+00, 7.14285714e-02, 2.14285714e-01, 2.85714286e-01, 1.42857143e-01, 7.14285714e-02, 0.00000000e+00], [ 0.00000000e+00, 0.00000000e+00, 1.42857143e-01, 0.00000000e+00, 2.85714286e-01, 0.00000000e+00, 0.00000000e+00, 2.85714286e-01, 2.85714286e-01, 0.00000000e+00], [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 2.50000000e-01, 2.50000000e-01, 2.50000000e-01, 0.00000000e+00, 2.50000000e-01, 0.00000000e+00], [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00]]) if degrade: degradation = {"fun":"polynomial","charge" : [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00142857142857143], "discharge" : [0.0, 0.00500000000000000, -0.00750000000000000, 0.00500000000000000, -0.00125000000000000] } else: degradation = {"fun":"polynomial","charge" : [0.0], "discharge" : [0.0] } configuration = { "price_buy" : 1.05 * prices, "price_sell" : 0.95 * prices, "price_probabilities" : P, "initial_capacity" : 1, "initial_inventory" : 0.5, "degradation" : degradation, "capacity_cost" : 20000, "change_capacity" : True } return configuration
36.881579
131
0.571887
748
5,606
4.255348
0.258021
0.172793
0.184731
0.224317
0.393968
0.393968
0.32705
0.296576
0.28715
0.272699
0
0.337594
0.288263
5,606
151
132
37.125828
0.46015
0.180164
0
0.329897
0
0
0.099148
0
0
0
0
0
0
1
0.020619
false
0
0.020619
0
0.061856
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
b97da341378a0e7a5adfe6054bb84f3cc67bb9e3
286
py
Python
mne/datasets/__init__.py
joewalter/mne-python
b0629bea7f5e8e94d9e2e889f45a35f9657e6dbc
[ "BSD-3-Clause" ]
null
null
null
mne/datasets/__init__.py
joewalter/mne-python
b0629bea7f5e8e94d9e2e889f45a35f9657e6dbc
[ "BSD-3-Clause" ]
null
null
null
mne/datasets/__init__.py
joewalter/mne-python
b0629bea7f5e8e94d9e2e889f45a35f9657e6dbc
[ "BSD-3-Clause" ]
null
null
null
"""Demo datasets """ from . import brainstorm from . import eegbci from . import megsim from . import misc from . import sample from . import somato from . import multimodal from . import spm_face from . import testing from . import _fake from .utils import _download_all_example_data
19.066667
45
0.772727
40
286
5.375
0.5
0.465116
0
0
0
0
0
0
0
0
0
0
0.167832
286
14
46
20.428571
0.903361
0.045455
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
b982aaef07d2748659ab4f8ce0b8ddebd158b1ef
28
py
Python
tests/__init__.py
ssaenger/ws66i
052fc06d97addf236d6712ebd7d6bb4842fced99
[ "MIT" ]
null
null
null
tests/__init__.py
ssaenger/ws66i
052fc06d97addf236d6712ebd7d6bb4842fced99
[ "MIT" ]
null
null
null
tests/__init__.py
ssaenger/ws66i
052fc06d97addf236d6712ebd7d6bb4842fced99
[ "MIT" ]
2
2021-11-09T09:11:22.000Z
2022-02-18T21:46:36.000Z
""" Init file for tests """
14
27
0.571429
4
28
4
1
0
0
0
0
0
0
0
0
0
0
0
0.214286
28
1
28
28
0.727273
0.678571
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
b9b23b26988e8c4b0e798b42d16118eeb608bb5b
209
py
Python
rmf_fleet_adapter_python/scripts/test_adapter.py
Capstone-S13/rmf_ros2
66721dd2ab5a458c050bad154c6a17d8e4b5c8f4
[ "Apache-2.0" ]
null
null
null
rmf_fleet_adapter_python/scripts/test_adapter.py
Capstone-S13/rmf_ros2
66721dd2ab5a458c050bad154c6a17d8e4b5c8f4
[ "Apache-2.0" ]
null
null
null
rmf_fleet_adapter_python/scripts/test_adapter.py
Capstone-S13/rmf_ros2
66721dd2ab5a458c050bad154c6a17d8e4b5c8f4
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 import test_delivery import test_loop import test_interrupt def main(): test_delivery.main() test_loop.main() test_interrupt.main() if __name__ == "__main__": main()
13.933333
26
0.703349
28
209
4.75
0.464286
0.225564
0
0
0
0
0
0
0
0
0
0.005848
0.181818
209
14
27
14.928571
0.77193
0.100478
0
0
0
0
0.042781
0
0
0
0
0
0
1
0.111111
true
0
0.333333
0
0.444444
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
b9bfeeaa911a4a90d16a45cb26658de53de8c1ec
1,573
py
Python
test/test_backups_api.py
hi-artem/twistlock-py
9888e905f5b9d3cc00f9b84244588c0992f8e4f4
[ "RSA-MD" ]
null
null
null
test/test_backups_api.py
hi-artem/twistlock-py
9888e905f5b9d3cc00f9b84244588c0992f8e4f4
[ "RSA-MD" ]
null
null
null
test/test_backups_api.py
hi-artem/twistlock-py
9888e905f5b9d3cc00f9b84244588c0992f8e4f4
[ "RSA-MD" ]
null
null
null
# coding: utf-8 """ Prisma Cloud Compute API No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: 21.04.439 Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import unittest import openapi_client from openapi_client.api.backups_api import BackupsApi # noqa: E501 from openapi_client.rest import ApiException class TestBackupsApi(unittest.TestCase): """BackupsApi unit test stubs""" def setUp(self): self.api = openapi_client.api.backups_api.BackupsApi() # noqa: E501 def tearDown(self): pass def test_api_v1_backups_get(self): """Test case for api_v1_backups_get """ pass def test_api_v1_backups_id_delete(self): """Test case for api_v1_backups_id_delete """ pass def test_api_v1_backups_id_get(self): """Test case for api_v1_backups_id_get """ pass def test_api_v1_backups_id_patch(self): """Test case for api_v1_backups_id_patch """ pass def test_api_v1_backups_id_post(self): """Test case for api_v1_backups_id_post """ pass def test_api_v1_backups_id_restore_post(self): """Test case for api_v1_backups_id_restore_post """ pass def test_api_v1_backups_post(self): """Test case for api_v1_backups_post """ pass if __name__ == '__main__': unittest.main()
20.697368
124
0.662428
210
1,573
4.580952
0.290476
0.072765
0.174636
0.14553
0.509356
0.432432
0.393971
0.283784
0.068607
0
0
0.026451
0.254927
1,573
75
125
20.973333
0.794369
0.403687
0
0.307692
1
0
0.009174
0
0
0
0
0
0
1
0.346154
false
0.307692
0.192308
0
0.576923
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
4
b9d5b4144671f77e74b51dfd79b049eb9f4d15fc
1,892
py
Python
sdk/python/pulumi_azure/compute/__init__.py
henriktao/pulumi-azure
f1cbcf100b42b916da36d8fe28be3a159abaf022
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure/compute/__init__.py
henriktao/pulumi-azure
f1cbcf100b42b916da36d8fe28be3a159abaf022
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure/compute/__init__.py
henriktao/pulumi-azure
f1cbcf100b42b916da36d8fe28be3a159abaf022
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** from .. import _utilities import typing # Export this package's modules as members: from .availability_set import * from .bastion_host import * from .configuration_policy_assignment import * from .data_disk_attachment import * from .dedicated_host import * from .dedicated_host_group import * from .disk_access import * from .disk_encryption_set import * from .disk_pool import * from .disk_pool_iscsi_target import * from .disk_pool_iscsi_target_lun import * from .disk_pool_managed_disk_attachment import * from .extension import * from .get_availability_set import * from .get_dedicated_host import * from .get_dedicated_host_group import * from .get_disk_access import * from .get_disk_encryption_set import * from .get_image import * from .get_images import * from .get_managed_disk import * from .get_platform_image import * from .get_shared_image import * from .get_shared_image_gallery import * from .get_shared_image_version import * from .get_shared_image_versions import * from .get_snapshot import * from .get_ssh_public_key import * from .get_virtual_machine import * from .get_virtual_machine_scale_set import * from .image import * from .linux_virtual_machine import * from .linux_virtual_machine_scale_set import * from .managed_disk import * from .orchestrated_virtual_machine_scale_set import * from .scale_set import * from .shared_image import * from .shared_image_gallery import * from .shared_image_version import * from .snapshot import * from .ssh_public_key import * from .virtual_machine import * from .virtual_machine_scale_set_extension import * from .windows_virtual_machine import * from .windows_virtual_machine_scale_set import * from ._inputs import * from . import outputs
34.4
87
0.811311
276
1,892
5.217391
0.278986
0.319444
0.153472
0.076389
0.466667
0.165972
0
0
0
0
0
0.000604
0.124736
1,892
54
88
35.037037
0.868961
0.115751
0
0
1
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
6a138e2055dedd4c60063a6328c8d2dd9380d07e
81
py
Python
akamodel/errors.py
jjyr/akamodel
84ff7eee4f018d2eef1987f004a1f5b0924d90d2
[ "Apache-2.0" ]
null
null
null
akamodel/errors.py
jjyr/akamodel
84ff7eee4f018d2eef1987f004a1f5b0924d90d2
[ "Apache-2.0" ]
null
null
null
akamodel/errors.py
jjyr/akamodel
84ff7eee4f018d2eef1987f004a1f5b0924d90d2
[ "Apache-2.0" ]
null
null
null
class BaseError(Exception): pass class RecordNotFound(BaseError): pass
11.571429
32
0.728395
8
81
7.375
0.625
0
0
0
0
0
0
0
0
0
0
0
0.197531
81
6
33
13.5
0.907692
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
4
6a36b94e3480eb8019c361baca1d77d06e6bcb72
72
py
Python
genfigs/__init__.py
ehsanfar/Network_Auctioneer
802f10b27d4f6f5fd9d5434f30814f2175236479
[ "Apache-2.0" ]
2
2018-05-16T16:01:21.000Z
2019-07-27T13:31:17.000Z
genfigs/__init__.py
ehsanfar/Network_Auctioneer
802f10b27d4f6f5fd9d5434f30814f2175236479
[ "Apache-2.0" ]
null
null
null
genfigs/__init__.py
ehsanfar/Network_Auctioneer
802f10b27d4f6f5fd9d5434f30814f2175236479
[ "Apache-2.0" ]
null
null
null
""" Copyright 2018 abbas ehsanfar """ """ genfigs: generate figures """
10.285714
29
0.666667
7
72
6.857143
1
0
0
0
0
0
0
0
0
0
0
0.065574
0.152778
72
7
30
10.285714
0.721311
0.402778
0
null
1
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
6a398eb2b99bb2dd9e37344cddf2ae512992607b
912
py
Python
sped_correcao/_old/rule08.py
teocrono/scripts
2970192e3184c9e1d3dd67390e544d767b809c23
[ "MIT" ]
null
null
null
sped_correcao/_old/rule08.py
teocrono/scripts
2970192e3184c9e1d3dd67390e544d767b809c23
[ "MIT" ]
null
null
null
sped_correcao/_old/rule08.py
teocrono/scripts
2970192e3184c9e1d3dd67390e544d767b809c23
[ "MIT" ]
null
null
null
## Para operações de saída, o CST deve ser preenchido com os valores de 01 a 49 ou 99. ## ## def exec(conexao): cursor = conexao.cursor() print("RULE 08 - Inicializando",end=' ') update = " UPDATE principal SET " update = update + " r2 = \"49\" " update = update + " WHERE 1=1 " update = update + " and r1 = \"C181\" " update = update + " and r2 in (\"00\",\"99\") " update = update + " and r3 in (\"5152\",\"5102\") " cursor.execute(update) conexao.commit() print('-',end=' ') update = " UPDATE principal SET " update = update + " r2 = \"49\" " update = update + " WHERE 1=1 " update = update + " and r1 = \"C185\" " update = update + " and r2 in (\"00\",\"99\") " update = update + " and r3 in (\"5152\",\"5102\") " cursor.execute(update) conexao.commit() print('-',end=' ') print("Finalizado")
29.419355
86
0.525219
109
912
4.394495
0.40367
0.300626
0.187891
0.100209
0.699374
0.699374
0.699374
0.699374
0.699374
0.699374
0
0.083205
0.288377
912
31
87
29.419355
0.654854
0.091009
0
0.727273
0
0
0.287454
0
0
0
0
0
0
1
0.045455
false
0
0
0
0.045455
0.181818
0
0
0
null
1
1
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
6a4c6c00d40ccdf36a5f1d308935ebdbe1193e5f
607
py
Python
models/model_3/model_3.py
tangmingsh/image_classifier
22e995e5dd23f5352a899b3bfd57fda50326fd8a
[ "MIT" ]
175
2018-11-12T03:11:35.000Z
2022-03-25T04:35:17.000Z
models/model_3/model_3.py
tangmingsh/image_classifier
22e995e5dd23f5352a899b3bfd57fda50326fd8a
[ "MIT" ]
3
2019-07-12T10:59:12.000Z
2021-06-28T05:50:56.000Z
models/model_3/model_3.py
tangmingsh/image_classifier
22e995e5dd23f5352a899b3bfd57fda50326fd8a
[ "MIT" ]
115
2018-11-25T17:33:09.000Z
2022-03-17T10:07:43.000Z
model = Sequential() model.add(Conv2D(32, (3, 3), input_shape=input_shape)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(64)) model.add(Activation('relu')) model.add(Dense(1)) model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
26.391304
55
0.672158
85
607
4.729412
0.329412
0.278607
0.223881
0.218905
0.562189
0.562189
0.487562
0.487562
0.487562
0.487562
0
0.052533
0.121911
607
23
56
26.391304
0.701689
0
0
0.388889
0
0
0.09375
0
0
0
0
0
0
0
null
null
0
0
null
null
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4