hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2fab82aac1fbd38c5700ab2acd9c18dca5464ec7
| 154
|
py
|
Python
|
state.py
|
AndrewBeers/Scribbler
|
61ef1366f5a62ba7c033417761b9729a03af2d7c
|
[
"MIT"
] | null | null | null |
state.py
|
AndrewBeers/Scribbler
|
61ef1366f5a62ba7c033417761b9729a03af2d7c
|
[
"MIT"
] | null | null | null |
state.py
|
AndrewBeers/Scribbler
|
61ef1366f5a62ba7c033417761b9729a03af2d7c
|
[
"MIT"
] | null | null | null |
class State(object):
def __init__(self, data):
self.data = data
pass
def change_data(self, data):
self.data = data
| 11
| 32
| 0.551948
| 19
| 154
| 4.210526
| 0.473684
| 0.4
| 0.45
| 0.4
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.350649
| 154
| 13
| 33
| 11.846154
| 0.8
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.166667
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
2fb040de01ff81942ae94cf98ce243a0a4d4decf
| 47
|
py
|
Python
|
bitfinex_saf/__init__.py
|
senz/bitfinex
|
b32f36bfb40a5b922a1ea4a65fa32c8784c76647
|
[
"MIT"
] | null | null | null |
bitfinex_saf/__init__.py
|
senz/bitfinex
|
b32f36bfb40a5b922a1ea4a65fa32c8784c76647
|
[
"MIT"
] | null | null | null |
bitfinex_saf/__init__.py
|
senz/bitfinex
|
b32f36bfb40a5b922a1ea4a65fa32c8784c76647
|
[
"MIT"
] | null | null | null |
from bitfinex_saf.client import BitfinexClient
| 23.5
| 46
| 0.893617
| 6
| 47
| 6.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.953488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2fe4ab0ddca7252127c8a1228c15072aa016004d
| 12,397
|
py
|
Python
|
sdk/python/pulumi_f5bigip/cm/device.py
|
pulumi/pulumi-f5bigip
|
4bce074f8bd7cb42f359ef4814ca5b437230fd1c
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2018-12-21T23:30:33.000Z
|
2021-10-12T16:38:27.000Z
|
sdk/python/pulumi_f5bigip/cm/device.py
|
pulumi/pulumi-f5bigip
|
4bce074f8bd7cb42f359ef4814ca5b437230fd1c
|
[
"ECL-2.0",
"Apache-2.0"
] | 61
|
2019-01-09T01:50:19.000Z
|
2022-03-31T15:27:17.000Z
|
sdk/python/pulumi_f5bigip/cm/device.py
|
pulumi/pulumi-f5bigip
|
4bce074f8bd7cb42f359ef4814ca5b437230fd1c
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-10-05T10:36:30.000Z
|
2019-10-05T10:36:30.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['DeviceArgs', 'Device']
@pulumi.input_type
class DeviceArgs:
def __init__(__self__, *,
configsync_ip: pulumi.Input[str],
name: pulumi.Input[str],
mirror_ip: Optional[pulumi.Input[str]] = None,
mirror_secondary_ip: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Device resource.
:param pulumi.Input[str] configsync_ip: IP address used for config sync
:param pulumi.Input[str] name: Address of the Device which needs to be Deviceensed
:param pulumi.Input[str] mirror_ip: IP address used for state mirroring
:param pulumi.Input[str] mirror_secondary_ip: Secondary IP address used for state mirroring
"""
pulumi.set(__self__, "configsync_ip", configsync_ip)
pulumi.set(__self__, "name", name)
if mirror_ip is not None:
pulumi.set(__self__, "mirror_ip", mirror_ip)
if mirror_secondary_ip is not None:
pulumi.set(__self__, "mirror_secondary_ip", mirror_secondary_ip)
@property
@pulumi.getter(name="configsyncIp")
def configsync_ip(self) -> pulumi.Input[str]:
"""
IP address used for config sync
"""
return pulumi.get(self, "configsync_ip")
@configsync_ip.setter
def configsync_ip(self, value: pulumi.Input[str]):
pulumi.set(self, "configsync_ip", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Address of the Device which needs to be Deviceensed
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="mirrorIp")
def mirror_ip(self) -> Optional[pulumi.Input[str]]:
"""
IP address used for state mirroring
"""
return pulumi.get(self, "mirror_ip")
@mirror_ip.setter
def mirror_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mirror_ip", value)
@property
@pulumi.getter(name="mirrorSecondaryIp")
def mirror_secondary_ip(self) -> Optional[pulumi.Input[str]]:
"""
Secondary IP address used for state mirroring
"""
return pulumi.get(self, "mirror_secondary_ip")
@mirror_secondary_ip.setter
def mirror_secondary_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mirror_secondary_ip", value)
@pulumi.input_type
class _DeviceState:
def __init__(__self__, *,
configsync_ip: Optional[pulumi.Input[str]] = None,
mirror_ip: Optional[pulumi.Input[str]] = None,
mirror_secondary_ip: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Device resources.
:param pulumi.Input[str] configsync_ip: IP address used for config sync
:param pulumi.Input[str] mirror_ip: IP address used for state mirroring
:param pulumi.Input[str] mirror_secondary_ip: Secondary IP address used for state mirroring
:param pulumi.Input[str] name: Address of the Device which needs to be Deviceensed
"""
if configsync_ip is not None:
pulumi.set(__self__, "configsync_ip", configsync_ip)
if mirror_ip is not None:
pulumi.set(__self__, "mirror_ip", mirror_ip)
if mirror_secondary_ip is not None:
pulumi.set(__self__, "mirror_secondary_ip", mirror_secondary_ip)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="configsyncIp")
def configsync_ip(self) -> Optional[pulumi.Input[str]]:
"""
IP address used for config sync
"""
return pulumi.get(self, "configsync_ip")
@configsync_ip.setter
def configsync_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "configsync_ip", value)
@property
@pulumi.getter(name="mirrorIp")
def mirror_ip(self) -> Optional[pulumi.Input[str]]:
"""
IP address used for state mirroring
"""
return pulumi.get(self, "mirror_ip")
@mirror_ip.setter
def mirror_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mirror_ip", value)
@property
@pulumi.getter(name="mirrorSecondaryIp")
def mirror_secondary_ip(self) -> Optional[pulumi.Input[str]]:
"""
Secondary IP address used for state mirroring
"""
return pulumi.get(self, "mirror_secondary_ip")
@mirror_secondary_ip.setter
def mirror_secondary_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mirror_secondary_ip", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Address of the Device which needs to be Deviceensed
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
class Device(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
configsync_ip: Optional[pulumi.Input[str]] = None,
mirror_ip: Optional[pulumi.Input[str]] = None,
mirror_secondary_ip: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
`cm.Device` provides details about a specific bigip
This resource is helpful when configuring the BIG-IP device in cluster or in HA mode.
## Example Usage
```python
import pulumi
import pulumi_f5bigip as f5bigip
my_new_device = f5bigip.cm.Device("myNewDevice",
configsync_ip="2.2.2.2",
mirror_ip="10.10.10.10",
mirror_secondary_ip="11.11.11.11",
name="bigip300.f5.com")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] configsync_ip: IP address used for config sync
:param pulumi.Input[str] mirror_ip: IP address used for state mirroring
:param pulumi.Input[str] mirror_secondary_ip: Secondary IP address used for state mirroring
:param pulumi.Input[str] name: Address of the Device which needs to be Deviceensed
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DeviceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
`cm.Device` provides details about a specific bigip
This resource is helpful when configuring the BIG-IP device in cluster or in HA mode.
## Example Usage
```python
import pulumi
import pulumi_f5bigip as f5bigip
my_new_device = f5bigip.cm.Device("myNewDevice",
configsync_ip="2.2.2.2",
mirror_ip="10.10.10.10",
mirror_secondary_ip="11.11.11.11",
name="bigip300.f5.com")
```
:param str resource_name: The name of the resource.
:param DeviceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DeviceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
configsync_ip: Optional[pulumi.Input[str]] = None,
mirror_ip: Optional[pulumi.Input[str]] = None,
mirror_secondary_ip: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DeviceArgs.__new__(DeviceArgs)
if configsync_ip is None and not opts.urn:
raise TypeError("Missing required property 'configsync_ip'")
__props__.__dict__["configsync_ip"] = configsync_ip
__props__.__dict__["mirror_ip"] = mirror_ip
__props__.__dict__["mirror_secondary_ip"] = mirror_secondary_ip
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
super(Device, __self__).__init__(
'f5bigip:cm/device:Device',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
configsync_ip: Optional[pulumi.Input[str]] = None,
mirror_ip: Optional[pulumi.Input[str]] = None,
mirror_secondary_ip: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None) -> 'Device':
"""
Get an existing Device resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] configsync_ip: IP address used for config sync
:param pulumi.Input[str] mirror_ip: IP address used for state mirroring
:param pulumi.Input[str] mirror_secondary_ip: Secondary IP address used for state mirroring
:param pulumi.Input[str] name: Address of the Device which needs to be Deviceensed
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DeviceState.__new__(_DeviceState)
__props__.__dict__["configsync_ip"] = configsync_ip
__props__.__dict__["mirror_ip"] = mirror_ip
__props__.__dict__["mirror_secondary_ip"] = mirror_secondary_ip
__props__.__dict__["name"] = name
return Device(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="configsyncIp")
def configsync_ip(self) -> pulumi.Output[str]:
"""
IP address used for config sync
"""
return pulumi.get(self, "configsync_ip")
@property
@pulumi.getter(name="mirrorIp")
def mirror_ip(self) -> pulumi.Output[Optional[str]]:
"""
IP address used for state mirroring
"""
return pulumi.get(self, "mirror_ip")
@property
@pulumi.getter(name="mirrorSecondaryIp")
def mirror_secondary_ip(self) -> pulumi.Output[Optional[str]]:
"""
Secondary IP address used for state mirroring
"""
return pulumi.get(self, "mirror_secondary_ip")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Address of the Device which needs to be Deviceensed
"""
return pulumi.get(self, "name")
| 38.862069
| 134
| 0.63233
| 1,481
| 12,397
| 5.036462
| 0.109386
| 0.082585
| 0.101354
| 0.088484
| 0.793136
| 0.763105
| 0.755061
| 0.728114
| 0.710283
| 0.690843
| 0
| 0.006169
| 0.267726
| 12,397
| 318
| 135
| 38.984277
| 0.815488
| 0.283375
| 0
| 0.645714
| 1
| 0
| 0.097621
| 0.002958
| 0
| 0
| 0
| 0
| 0
| 1
| 0.154286
| false
| 0.005714
| 0.028571
| 0
| 0.274286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
641ba946c610ba1e27a453d584abc1037052f730
| 25
|
py
|
Python
|
skfda/preprocessing/dim_reduction/__init__.py
|
mdrolet01/scikit-fda
|
f16ffb3986408c12a2dfdf910688bd56ddecb188
|
[
"BSD-3-Clause"
] | 1
|
2020-06-27T22:25:49.000Z
|
2020-06-27T22:25:49.000Z
|
skfda/preprocessing/dim_reduction/__init__.py
|
KonstantinKlepikov/scikit-fda
|
93c4ad80aaba8739b4f90932a2a759d6f5960387
|
[
"BSD-3-Clause"
] | null | null | null |
skfda/preprocessing/dim_reduction/__init__.py
|
KonstantinKlepikov/scikit-fda
|
93c4ad80aaba8739b4f90932a2a759d6f5960387
|
[
"BSD-3-Clause"
] | null | null | null |
from . import projection
| 12.5
| 24
| 0.8
| 3
| 25
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
642a706433ac8b84b97fd26a00c772ddd7c6620e
| 79
|
py
|
Python
|
api/seeds/photo.py
|
flatcoke/django-structure
|
d0a7a7489d2f49c72ec4ec030f87c3942d84bb90
|
[
"MIT"
] | 6
|
2019-02-27T14:16:48.000Z
|
2021-08-12T23:47:13.000Z
|
api/seeds/photo.py
|
flatcoke/django-structure
|
d0a7a7489d2f49c72ec4ec030f87c3942d84bb90
|
[
"MIT"
] | 3
|
2020-02-11T23:47:05.000Z
|
2021-06-10T17:46:35.000Z
|
api/seeds/photo.py
|
flatcoke/django-structure
|
d0a7a7489d2f49c72ec4ec030f87c3942d84bb90
|
[
"MIT"
] | 1
|
2019-07-24T12:02:02.000Z
|
2019-07-24T12:02:02.000Z
|
from faker import Faker
def generate_data(number):
fake = Faker('ko_KR')
| 13.166667
| 26
| 0.708861
| 12
| 79
| 4.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189873
| 79
| 5
| 27
| 15.8
| 0.84375
| 0
| 0
| 0
| 1
| 0
| 0.063291
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
643b0738d29c0834cab6604eabf47387d74662c1
| 285
|
py
|
Python
|
sphere/distribution/__init__.py
|
MehdiN/sphere
|
dec3b10ef31a99c01378ffd53c434c664ae43a6c
|
[
"MIT"
] | 15
|
2019-04-01T22:35:09.000Z
|
2021-11-18T20:48:38.000Z
|
sphere/distribution/__init__.py
|
MehdiN/sphere
|
dec3b10ef31a99c01378ffd53c434c664ae43a6c
|
[
"MIT"
] | 3
|
2019-05-12T21:44:58.000Z
|
2022-02-16T04:10:30.000Z
|
sphere/distribution/__init__.py
|
MehdiN/sphere
|
dec3b10ef31a99c01378ffd53c434c664ae43a6c
|
[
"MIT"
] | 6
|
2019-09-18T04:59:06.000Z
|
2022-01-05T10:43:03.000Z
|
from .distribution import fb8
from .distribution import fb82
from .distribution import fb83
from .distribution import fb84
from .distribution import FB8Distribution
from .distribution import fb8_mle
from .distribution import kent_me
from .saddle import spa
del distribution
del saddle
| 25.909091
| 41
| 0.845614
| 38
| 285
| 6.289474
| 0.368421
| 0.468619
| 0.644351
| 0.209205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036145
| 0.126316
| 285
| 10
| 42
| 28.5
| 0.923695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
92408fe25207481e3f7089f52f78f7b3b3a20d44
| 153
|
py
|
Python
|
build/lib/yyam/tomrw.py
|
include-yy/account-manager
|
ae28433909d0f6580693d1a5b65e40dfbb9d5f57
|
[
"MIT"
] | null | null | null |
build/lib/yyam/tomrw.py
|
include-yy/account-manager
|
ae28433909d0f6580693d1a5b65e40dfbb9d5f57
|
[
"MIT"
] | null | null | null |
build/lib/yyam/tomrw.py
|
include-yy/account-manager
|
ae28433909d0f6580693d1a5b65e40dfbb9d5f57
|
[
"MIT"
] | null | null | null |
from tomlkit import parse, dumps
from toml import loads
def tom_parse(in_str):
return loads(in_str)
def tom_dump(in_dic):
return dumps(in_dic)
| 17
| 32
| 0.751634
| 27
| 153
| 4.037037
| 0.518519
| 0.110092
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 153
| 8
| 33
| 19.125
| 0.865079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
9256711a2aa7fe46429c956c780a25392e9c4b75
| 33
|
py
|
Python
|
adform/auth/__init__.py
|
dutkiewicz/adform-api
|
5b670ea971c261565d1fe4cf7c18b2e109f8449d
|
[
"MIT"
] | null | null | null |
adform/auth/__init__.py
|
dutkiewicz/adform-api
|
5b670ea971c261565d1fe4cf7c18b2e109f8449d
|
[
"MIT"
] | 6
|
2019-11-29T04:53:15.000Z
|
2020-06-29T04:41:24.000Z
|
adform/auth/__init__.py
|
dutkiewicz/adform-api
|
5b670ea971c261565d1fe4cf7c18b2e109f8449d
|
[
"MIT"
] | null | null | null |
from .authorize import Authorize
| 16.5
| 32
| 0.848485
| 4
| 33
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
92b56af0ede1046e34746284e5e1bb882a40ef95
| 122
|
py
|
Python
|
run.py
|
Jo-wang/disentangle
|
6f01990829fb353a9fbda213ab6a7a493f265183
|
[
"MIT"
] | null | null | null |
run.py
|
Jo-wang/disentangle
|
6f01990829fb353a9fbda213ab6a7a493f265183
|
[
"MIT"
] | null | null | null |
run.py
|
Jo-wang/disentangle
|
6f01990829fb353a9fbda213ab6a7a493f265183
|
[
"MIT"
] | null | null | null |
import os
os.system("python main_viz.py --name VAEbase_mnist --plots reconstruct-traverse -c 5 -r 1 -t 2 --is-posterior")
| 40.666667
| 111
| 0.737705
| 22
| 122
| 4
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028037
| 0.122951
| 122
| 3
| 111
| 40.666667
| 0.794393
| 0
| 0
| 0
| 0
| 0.5
| 0.796748
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
2bf3cd53622c1c48ec43598f5b637ddbbb598702
| 262
|
py
|
Python
|
tianshou/env/utils.py
|
DZ9/tianshou
|
04208e6cce722b7a2353d9a5f4d6f0fc05797d67
|
[
"MIT"
] | 1
|
2020-04-01T04:47:39.000Z
|
2020-04-01T04:47:39.000Z
|
tianshou/env/utils.py
|
TommeyChang/tianshou
|
4f843d3f51789f488169131a5b5decba8bab2b31
|
[
"MIT"
] | null | null | null |
tianshou/env/utils.py
|
TommeyChang/tianshou
|
4f843d3f51789f488169131a5b5decba8bab2b31
|
[
"MIT"
] | 1
|
2022-01-23T10:52:48.000Z
|
2022-01-23T10:52:48.000Z
|
import cloudpickle
class CloudpickleWrapper(object):
def __init__(self, data):
self.data = data
def __getstate__(self):
return cloudpickle.dumps(self.data)
def __setstate__(self, data):
self.data = cloudpickle.loads(data)
| 20.153846
| 43
| 0.675573
| 29
| 262
| 5.689655
| 0.482759
| 0.242424
| 0.145455
| 0.193939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.229008
| 262
| 12
| 44
| 21.833333
| 0.816832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0
| 0.125
| 0.125
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
920185790a75cbf18b63a55a9ed1b9ce196f6508
| 26
|
py
|
Python
|
blog/tests/__init__.py
|
Namee-the-SaaS/django_blog
|
94181aea68690ddf21ff6fcedf7f88641bed271e
|
[
"MIT"
] | 18
|
2015-04-18T14:23:10.000Z
|
2020-11-03T00:40:55.000Z
|
blog/tests/__init__.py
|
Namee-the-SaaS/django_blog
|
94181aea68690ddf21ff6fcedf7f88641bed271e
|
[
"MIT"
] | 13
|
2019-12-19T18:43:33.000Z
|
2021-09-22T18:16:41.000Z
|
blog/tests/__init__.py
|
Namee-the-SaaS/django_blog
|
94181aea68690ddf21ff6fcedf7f88641bed271e
|
[
"MIT"
] | 11
|
2015-04-12T15:28:22.000Z
|
2021-06-21T20:55:45.000Z
|
from .test_views import *
| 13
| 25
| 0.769231
| 4
| 26
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a626d0b43db19a652247a76deb07a94c4958e729
| 4,386
|
py
|
Python
|
CiscoAXL/cspaxl_LdapSync.py
|
sanzcarlos/CiscoCollab
|
e8a62bcbf41962a80e65b1fef5953e99a54a9ae7
|
[
"MIT"
] | 1
|
2018-07-11T15:23:50.000Z
|
2018-07-11T15:23:50.000Z
|
CiscoAXL/cspaxl_LdapSync.py
|
sanzcarlos/CiscoCollab
|
e8a62bcbf41962a80e65b1fef5953e99a54a9ae7
|
[
"MIT"
] | null | null | null |
CiscoAXL/cspaxl_LdapSync.py
|
sanzcarlos/CiscoCollab
|
e8a62bcbf41962a80e65b1fef5953e99a54a9ae7
|
[
"MIT"
] | null | null | null |
# -*- coding: iso-8859-15 -*-
# *------------------------------------------------------------------
# * cspaxl_LdapSync
# *
# * Cisco AXL Python
# *
# * Copyright (C) 2015 Carlos Sanz <carlos.sanzpenas@gmail.com>
# *
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# *------------------------------------------------------------------
# *
# Import Modules
# Import Modules
import sys
def do_start(logger,csp_soap_client,cucm_variable_axl,cspconfigfile):
# *------------------------------------------------------------------
# * function do(logger,csp_soap_client,cucm_variable_axl)
# *
# * Copyright (C) 2016 Carlos Sanz <carlos.sanzpenas@gmail.com>
# *
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# *------------------------------------------------------------------
# *
# Mandatory (pattern,usage,routePartitionName)
# Realizamos la sincronizacion del LDAP
logger.debug('Ha entrado en la funcion do_start del archivo cspaxl_LdapSync.py')
logger.info('Vamos a realizar la sincronizacion del LDAP: %s' % cucm_variable_axl)
try:
csp_ldap = {'name': cucm_variable_axl}
result = csp_soap_client.service.doLdapSync(csp_ldap,sync=0)
except:
logger.debug(sys.exc_info())
logger.error(sys.exc_info()[1])
return {'Status': False, 'Detail': sys.exc_info()[1]}
else:
return {'Status':True,'Detail':result['return']}
def do_cancel(logger,csp_soap_client,cucm_variable_axl):
# *------------------------------------------------------------------
# * function do(logger,csp_soap_client,cucm_variable_axl)
# *
# * Copyright (C) 2016 Carlos Sanz <carlos.sanzpenas@gmail.com>
# *
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# *------------------------------------------------------------------
# *
# Mandatory (pattern,usage,routePartitionName)
# Damos de alta el Translation Pattern
try:
result = csp_soap_client.service.doLdapSync(name=cucm_variable_axl,sync='false')
except:
logger.debug(sys.exc_info())
logger.error(sys.exc_info()[1])
return {'Status': False, 'Detail': sys.exc_info()[1]}
else:
return {'Status':True,'Detail':result['return']}
| 43.86
| 88
| 0.620839
| 553
| 4,386
| 4.853526
| 0.267631
| 0.036885
| 0.043592
| 0.063711
| 0.855067
| 0.855067
| 0.828241
| 0.802906
| 0.802906
| 0.802906
| 0
| 0.016881
| 0.203146
| 4,386
| 99
| 89
| 44.30303
| 0.751073
| 0.705426
| 0
| 0.636364
| 0
| 0
| 0.14742
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.045455
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a6359a6ccad0be2cf0b18f48e84e4e3fe4d31157
| 43,417
|
py
|
Python
|
main.py
|
Harsh9524/education4.0
|
a7be2571a2135b32856f39b572fe9169845ad7b9
|
[
"Apache-2.0"
] | 3
|
2020-04-06T18:55:33.000Z
|
2020-04-07T14:19:14.000Z
|
main.py
|
Harsh9524/education4.0
|
a7be2571a2135b32856f39b572fe9169845ad7b9
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
Harsh9524/education4.0
|
a7be2571a2135b32856f39b572fe9169845ad7b9
|
[
"Apache-2.0"
] | 2
|
2020-04-07T13:49:03.000Z
|
2020-04-13T17:09:40.000Z
|
from flask import Flask,render_template,url_for,request,redirect
app=Flask(__name__)
global logins
logins={"prashantarya.juit@gmail.com":"qwerty"}
global qno
qno=0
@app.route("/")
def signin():
return render_template("sign-in.html")
@app.route("/login",methods=['GET','POST'])
def login():
if request.method=='POST':
email=request.form['userMail']
password=request.form['password']
print (email,password)
#return redirect(url_for('signin'))
if logins[email]==password:
#return "Hello you have been logged in"
return redirect(url_for("question1"))
else:
return "Not able to login"
@app.route("/register")
def register():
return render_template("register.html")
@app.route("/newuser",methods=["GET","POST"])
def newuser():
if request.method=="POST":
name=request.form['yourName']
email=request.form['yourMail']
class_user=request.form['userClass']
subject=request.form['userSub']
password=request.form['pass1']
conf_password=request.form['pass2']
if request.form.get('check'):
TandC_read=True
else:
TandC_read=False
print("Name= ",name)
print("email= ",email)
print("Class= ",class_user)
print("Subject= ",subject)
print("Password= ",password)
print("Confirm Passord= ",conf_password)
print("Terms and Conditions= ",TandC_read)
if email not in logins.keys() and password==conf_password:
logins[email]=password
if password != conf_password:
return "Passwords do not match"
return redirect(url_for('signin'))
@app.route("/question1")
def question1():
return render_template("Maths-q1.html")
@app.route("/quesAns1",methods=["GET","POST"])
def quesAns1():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question2"))
@app.route("/question2")
def question2():
return render_template("Maths-q2.html")
@app.route("/quesAns2",methods=["GET","POST"])
def quesAns2():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question3"))
@app.route("/question3")
def question3():
return render_template("Maths-q3.html")
@app.route("/quesAns3",methods=["GET","POST"])
def quesAns3():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question4"))
@app.route("/question4")
def question4():
return render_template("Maths-q4.html")
@app.route("/quesAns4",methods=["GET","POST"])
def quesAns4():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question5"))
@app.route("/question5")
def question5():
return render_template("Maths-q5.html")
@app.route("/quesAns5",methods=["GET","POST"])
def quesAns5():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question6"))
@app.route("/question6")
def question6():
return render_template("Maths-q6.html")
@app.route("/quesAns6",methods=["GET","POST"])
def quesAns6():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question7"))
@app.route("/question7")
def question7():
return render_template("Maths-q7.html")
@app.route("/quesAns7",methods=["GET","POST"])
def quesAns7():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question8"))
@app.route("/question8")
def question8():
return render_template("Maths-q8.html")
@app.route("/quesAns8",methods=["GET","POST"])
def quesAns8():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question9"))
@app.route("/question9")
def question9():
return render_template("Maths-q9.html")
@app.route("/quesAns9",methods=["GET","POST"])
def quesAns9():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question10"))
@app.route("/question10")
def question10():
return render_template("Maths-q10.html")
@app.route("/quesAns10",methods=["GET","POST"])
def quesAns10():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question11"))
@app.route("/question11")
def question11():
return render_template("Maths-q11.html")
@app.route("/quesAns11",methods=["GET","POST"])
def quesAns11():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question11"))
@app.route("/question12")
def question12():
return render_template("Maths-q12.html")
@app.route("/quesAns12",methods=["GET","POST"])
def quesAns12():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question13"))
@app.route("/question13")
def question13():
return render_template("Maths-q13.html")
@app.route("/quesAns13",methods=["GET","POST"])
def quesAns13():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question14"))
@app.route("/question14")
def question14():
return render_template("Maths-q14.html")
@app.route("/quesAns14",methods=["GET","POST"])
def quesAns14():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question15"))
@app.route("/question15")
def question15():
return render_template("Maths-q15.html")
@app.route("/quesAns15",methods=["GET","POST"])
def quesAns15():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question16"))
@app.route("/question16")
def question16():
return render_template("Maths-q16.html")
@app.route("/quesAns16",methods=["GET","POST"])
def quesAns16():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question17"))
@app.route("/question17")
def question17():
return render_template("Maths-q17.html")
@app.route("/quesAns17",methods=["GET","POST"])
def quesAns17():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question18"))
@app.route("/question18")
def question18():
return render_template("Maths-q18.html")
@app.route("/quesAns18",methods=["GET","POST"])
def quesAns18():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question19"))
@app.route("/question19")
def question19():
return render_template("Maths-q19.html")
@app.route("/quesAns19",methods=["GET","POST"])
def quesAns19():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question20"))
@app.route("/question20")
def question20():
return render_template("Maths-q20.html")
@app.route("/quesAns20",methods=["GET","POST"])
def quesAns20():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question21"))
@app.route("/question21")
def question21():
return render_template("Maths-q21.html")
@app.route("/quesAns21",methods=["GET","POST"])
def quesAns21():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question22"))
@app.route("/question22")
def question22():
return render_template("Maths-q22.html")
@app.route("/quesAns22",methods=["GET","POST"])
def quesAns22():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question23"))
@app.route("/question23")
def question23():
return render_template("Maths-q23.html")
@app.route("/quesAns23",methods=["GET","POST"])
def quesAns23():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question24"))
@app.route("/question24")
def question24():
return render_template("Maths-q24.html")
@app.route("/quesAns24",methods=["GET","POST"])
def quesAns24():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question25"))
@app.route("/question25")
def question25():
return render_template("Maths-q25.html")
@app.route("/quesAns25",methods=["GET","POST"])
def quesAns25():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question26"))
@app.route("/question26")
def question26():
return render_template("Maths-q26.html")
@app.route("/quesAns26",methods=["GET","POST"])
def quesAns26():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question27"))
@app.route("/question27")
def question27():
return render_template("Maths-q27.html")
@app.route("/quesAns27",methods=["GET","POST"])
def quesAns27():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question28"))
@app.route("/question28")
def question28():
return render_template("Maths-q28.html")
@app.route("/quesAns28",methods=["GET","POST"])
def quesAns28():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question29"))
@app.route("/question29")
def question29():
return render_template("Maths-q29.html")
@app.route("/quesAns29",methods=["GET","POST"])
def quesAns29():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question30"))
@app.route("/question30")
def question30():
return render_template("Maths-q30.html")
@app.route("/quesAns30",methods=["GET","POST"])
def quesAns30():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question31"))
@app.route("/question31")
def question31():
return render_template("Maths-q31.html")
@app.route("/quesAns31",methods=["GET","POST"])
def quesAns31():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question32"))
@app.route("/question32")
def question32():
return render_template("Maths-q32.html")
@app.route("/quesAns32",methods=["GET","POST"])
def quesAns32():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question33"))
@app.route("/question33")
def question33():
return render_template("Maths-q33.html")
@app.route("/quesAns33",methods=["GET","POST"])
def quesAns33():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question34"))
@app.route("/question34")
def question34():
return render_template("Maths-q34.html")
@app.route("/quesAns34",methods=["GET","POST"])
def quesAns34():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question35"))
@app.route("/question35")
def question35():
return render_template("Maths-q35.html")
@app.route("/quesAns35",methods=["GET","POST"])
def quesAns35():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question36"))
@app.route("/question36")
def question36():
return render_template("Maths-q36.html")
@app.route("/quesAns36",methods=["GET","POST"])
def quesAns36():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question37"))
@app.route("/question37")
def question37():
return render_template("Maths-q37.html")
@app.route("/quesAns37",methods=["GET","POST"])
def quesAns37():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question38"))
@app.route("/question38")
def question38():
return render_template("Maths-q38.html")
@app.route("/quesAns38",methods=["GET","POST"])
def quesAns38():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question39"))
@app.route("/question39")
def question39():
return render_template("Maths-q39.html")
@app.route("/quesAns39",methods=["GET","POST"])
def quesAns39():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question40"))
@app.route("/question40")
def question40():
return render_template("Maths-q40.html")
@app.route("/quesAns40",methods=["GET","POST"])
def quesAns40():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question41"))
@app.route("/question41")
def question41():
return render_template("Maths-q41.html")
@app.route("/quesAns41",methods=["GET","POST"])
def quesAns41():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question42"))
@app.route("/question42")
def question42():
return render_template("Maths-q42.html")
@app.route("/quesAns42",methods=["GET","POST"])
def quesAns42():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question43"))
@app.route("/question43")
def question43():
return render_template("Maths-q43.html")
@app.route("/quesAns43",methods=["GET","POST"])
def quesAns43():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question44"))
@app.route("/question44")
def question44():
return render_template("Maths-q44.html")
@app.route("/quesAns44",methods=["GET","POST"])
def quesAns44():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question45"))
@app.route("/question45")
def question45():
return render_template("Maths-q45.html")
@app.route("/quesAns45",methods=["GET","POST"])
def quesAns45():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question46"))
@app.route("/question46")
def question46():
return render_template("Maths-q46.html")
@app.route("/quesAns46",methods=["GET","POST"])
def quesAns46():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question47"))
@app.route("/question47")
def question47():
return render_template("Maths-q47.html")
@app.route("/quesAns47",methods=["GET","POST"])
def quesAns47():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question48"))
@app.route("/question48")
def question48():
return render_template("Maths-q48.html")
@app.route("/quesAns48",methods=["GET","POST"])
def quesAns48():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question49"))
@app.route("/question49")
def question49():
return render_template("Maths-q49.html")
@app.route("/quesAns49",methods=["GET","POST"])
def quesAns49():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("question50"))
@app.route("/question50")
def question50():
return render_template("Maths-q50.html")
@app.route("/quesAns50",methods=["GET","POST"])
def quesAns50():
if request.method=="POST":
option1=""
if request.form.get("defaultCheck1"):
option1=True
else:
option1=False
option2=""
if request.form.get("defaultCheck2"):
option2=True
else:
option2=False
option3=""
if request.form.get("defaultCheck3"):
option3=True
else:
option3=False
option4=""
if request.form.get("defaultCheck4"):
option4=True
else:
option4=False
print(option1,option2,option3,option4)
return redirect(url_for("signin"))
@app.route("/terms")
def terms():
return "Bhai ne bola Accept karna hai toh karna hai"
if __name__ == "__main__":
app.run(host="0.0.0.0",port=8000,debug=True)
| 27.795775
| 67
| 0.534606
| 4,078
| 43,417
| 5.660373
| 0.060078
| 0.098644
| 0.1132
| 0.139323
| 0.706581
| 0.705454
| 0.705454
| 0.703375
| 0.703375
| 0.703375
| 0
| 0.054274
| 0.339245
| 43,417
| 1,561
| 68
| 27.813581
| 0.750349
| 0.001658
| 0
| 0.761019
| 0
| 0
| 0.136997
| 0.000646
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072314
| false
| 0.007576
| 0.000689
| 0.036501
| 0.146694
| 0.039945
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a640fcb9b9ea5d4c00223258e0697f0e89bdbaf6
| 8,956
|
py
|
Python
|
Assignments/HW6-RegressionProblems2-Yan Gu/LinearRegression/question4.py
|
billgoo/Rutgers-CS536-Machine-Learning
|
944efbc6ee5ccd2d226e420ed61528767023aab7
|
[
"MIT"
] | null | null | null |
Assignments/HW6-RegressionProblems2-Yan Gu/LinearRegression/question4.py
|
billgoo/Rutgers-CS536-Machine-Learning
|
944efbc6ee5ccd2d226e420ed61528767023aab7
|
[
"MIT"
] | null | null | null |
Assignments/HW6-RegressionProblems2-Yan Gu/LinearRegression/question4.py
|
billgoo/Rutgers-CS536-Machine-Learning
|
944efbc6ee5ccd2d226e420ed61528767023aab7
|
[
"MIT"
] | null | null | null |
import pandas as pd
import math
import matplotlib.pyplot as plt
import csv
import numpy as np
from data_generator import DataGenerator
from lassoRegression import LassoRegression
def show_Picture(x_data, y_data, y_data_name, x_label, y_label, title):
plt.figure(figsize=(16, 8))
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.plot(x_data, y_data, marker='.', c='red', lw=0.5, label=y_data_name)
filename = 'LinearRegression/images/Figure.' + title[4] + '.png'
# save the picture,filename is title
plt.savefig(filename, bbox_inches='tight')
plt.show()
if __name__ == "__main__":
'''
lambd_set1 = [0,5,10,15,20,30,40,50,60,70,80]
lambd_set2 = [100,120,140,160,180,200]
lambd_set3 = [400,600,800]
result = []
for lambd in lambd_set1:
print(lambd)
data = pd.read_csv('LinearRegression/data/question3/results3.1.csv',
names=['lambd','b','X1','X2','X3','X4','X5',
'X6','X7','X8','X9','X10',
'X11','X12','X13','X14','X15',
'X16','X17','X18','X19','X20','train_err','zero_count'])
weight = weight = data[data['lambd']==lambd][data.columns[1:22]].values
filename1 = 'LinearRegression/data/question1_m_1000000.csv'
test_data = pd.read_csv(filename1, names=['b','X1','X2','X3','X4','X5',
'X6','X7','X8','X9','X10',
'X11','X12','X13','X14','X15',
'X16','X17','X18','X19','X20','y'])
test_X, test_y = test_data[test_data.columns[:-1]].values, test_data[['y']].values
test_err = 0.0
for i in range(1000000):
test_err += math.pow((np.dot(test_X[i], weight.T) - test_y[i]), 2)
test_err /= 1000000
result.append([lambd, test_err])
print([lambd, test_err])
for lambd in lambd_set2:
print(lambd)
data = pd.read_csv('LinearRegression/data/question3/results3.2.csv',
names=['lambd','b','X1','X2','X3','X4','X5',
'X6','X7','X8','X9','X10',
'X11','X12','X13','X14','X15',
'X16','X17','X18','X19','X20','train_err','zero_count'])
weight = weight = data[data['lambd']==lambd][data.columns[1:22]].values
filename1 = 'LinearRegression/data/question1_m_1000000.csv'
test_data = pd.read_csv(filename1, names=['b','X1','X2','X3','X4','X5',
'X6','X7','X8','X9','X10',
'X11','X12','X13','X14','X15',
'X16','X17','X18','X19','X20','y'])
test_X, test_y = test_data[test_data.columns[:-1]].values, test_data[['y']].values
test_err = 0.0
for i in range(1000000):
test_err += math.pow((np.dot(test_X[i], weight.T) - test_y[i]), 2)
test_err /= 1000000
result.append([lambd, test_err])
print([lambd, test_err])
for lambd in lambd_set3:
print(lambd)
data = pd.read_csv('LinearRegression/data/question3/results3.3.csv',
names=['lambd','b','X1','X2','X3','X4','X5',
'X6','X7','X8','X9','X10',
'X11','X12','X13','X14','X15',
'X16','X17','X18','X19','X20','train_err','zero_count'])
weight = weight = data[data['lambd']==lambd][data.columns[1:22]].values
filename1 = 'LinearRegression/data/question1_m_1000000.csv'
test_data = pd.read_csv(filename1, names=['b','X1','X2','X3','X4','X5',
'X6','X7','X8','X9','X10',
'X11','X12','X13','X14','X15',
'X16','X17','X18','X19','X20','y'])
test_X, test_y = test_data[test_data.columns[:-1]].values, test_data[['y']].values
test_err = 0.0
for i in range(1000000):
test_err += math.pow((np.dot(test_X[i], weight.T) - test_y[i]), 2)
test_err /= 1000000
result.append([lambd, test_err])
print([lambd, test_err])
# output the data to be re-format
with open('LinearRegression/data/question4/results.csv', 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in result:
spamwriter.writerow(row)
# re-format and draw the xy-coordinate figure
datamap = pd.read_csv('LinearRegression/data/question4/results.csv',
names=['lambd','test_error'])
col_l = datamap['lambd']
col_e = datamap['test_error']
show_Picture(col_l, col_e, "True error", "lambd", "Testing error for each lambd",
"Fig 4: True error as function of lambd.")
result = []
for lambd in range(0, 81, 5):
print(lambd)
data = pd.read_csv('LinearRegression/data/question3/results3.1.csv',
names=['lambd','b','X1','X2','X3','X4','X5',
'X6','X7','X8','X9','X10',
'X11','X12','X13','X14','X15',
'X16','X17','X18','X19','X20','train_err','zero_count'])
weight = weight = data[data['lambd']==lambd][data.columns[1:22]].values
filename1 = 'LinearRegression/data/question1_m_1000000.csv'
test_data = pd.read_csv(filename1, names=['b','X1','X2','X3','X4','X5',
'X6','X7','X8','X9','X10',
'X11','X12','X13','X14','X15',
'X16','X17','X18','X19','X20','y'])
test_X, test_y = test_data[test_data.columns[:-1]].values, test_data[['y']].values
test_err = 0.0
for i in range(1000000):
test_err += math.pow((np.dot(test_X[i], weight.T) - test_y[i]), 2)
test_err /= 1000000
result.append([lambd, test_err])
print([lambd, test_err])
# output the data to be re-format
with open('LinearRegression/data/question4/results4.2.csv', 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in result:
spamwriter.writerow(row)
# re-format and draw the xy-coordinate figure
datamap = pd.read_csv('LinearRegression/data/question4/results4.2.csv',
names=['lambd','test_error'])
col_l = datamap['lambd']
col_e = datamap['test_error']
show_Picture(col_l, col_e, "True error", "lambd", "Testing error for each lambd",
"Fig 4: True error as function of lambd.")
'''
result = []
for lambd in range(35):
print(lambd)
data = pd.read_csv('LinearRegression/data/question3/results3.1.csv',
names=['lambd','b','X1','X2','X3','X4','X5',
'X6','X7','X8','X9','X10',
'X11','X12','X13','X14','X15',
'X16','X17','X18','X19','X20','train_err','zero_count'])
weight = weight = data[data['lambd']==lambd][data.columns[1:22]].values
filename1 = 'LinearRegression/data/question1_m_1000000.csv'
test_data = pd.read_csv(filename1, names=['b','X1','X2','X3','X4','X5',
'X6','X7','X8','X9','X10',
'X11','X12','X13','X14','X15',
'X16','X17','X18','X19','X20','y'])
test_X, test_y = test_data[test_data.columns[:-1]].values, test_data[['y']].values
test_err = 0.0
for i in range(1000000):
test_err += math.pow((np.dot(test_X[i], weight.T) - test_y[i]), 2)
test_err /= 1000000
result.append([lambd, test_err])
print([lambd, test_err])
# output the data to be re-format
with open('LinearRegression/data/question4/results4.3.csv', 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in result:
spamwriter.writerow(row)
# re-format and draw the xy-coordinate figure
datamap = pd.read_csv('LinearRegression/data/question4/results4.3.csv',
names=['lambd','test_error'])
col_l = datamap['lambd']
col_e = datamap['test_error']
show_Picture(col_l, col_e, "True error", "lambd", "Testing error for each lambd",
"Fig 4: True error as function of lambd.")
| 47.386243
| 92
| 0.508821
| 1,103
| 8,956
| 3.99456
| 0.147779
| 0.039719
| 0.026555
| 0.029505
| 0.875397
| 0.87222
| 0.871312
| 0.871312
| 0.871312
| 0.871312
| 0
| 0.091531
| 0.314426
| 8,956
| 189
| 93
| 47.386243
| 0.626059
| 0.012282
| 0
| 0.081633
| 0
| 0
| 0.197395
| 0.087098
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020408
| false
| 0
| 0.142857
| 0
| 0.163265
| 0.040816
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a641bf19133c8fcc65539b03640202dd05e54b07
| 29
|
py
|
Python
|
pysatCDF/__init__.py
|
pysat/pysatCDF
|
e57449fe412f7244021469617e98ac26d1864ad0
|
[
"BSD-3-Clause"
] | 1
|
2022-02-18T20:28:49.000Z
|
2022-02-18T20:28:49.000Z
|
pysatCDF/__init__.py
|
gregstarr/pysatCDF
|
9d987b0ca925bf369e53113577b4737a4c4278ed
|
[
"BSD-3-Clause"
] | 15
|
2019-10-09T22:24:56.000Z
|
2022-03-24T15:34:58.000Z
|
pysatCDF/__init__.py
|
gregstarr/pysatCDF
|
9d987b0ca925bf369e53113577b4737a4c4278ed
|
[
"BSD-3-Clause"
] | 2
|
2021-01-04T20:13:09.000Z
|
2021-03-12T01:04:44.000Z
|
from ._cdf import CDF as CDF
| 14.5
| 28
| 0.758621
| 6
| 29
| 3.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206897
| 29
| 1
| 29
| 29
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a6506557e183c0dc1a4f91f0e7a4ca821383585b
| 114
|
py
|
Python
|
django_server/accounts/admin.py
|
forkcs/mycode
|
6319266cba70111cd229b15d163cccbc1918410c
|
[
"MIT"
] | null | null | null |
django_server/accounts/admin.py
|
forkcs/mycode
|
6319266cba70111cd229b15d163cccbc1918410c
|
[
"MIT"
] | null | null | null |
django_server/accounts/admin.py
|
forkcs/mycode
|
6319266cba70111cd229b15d163cccbc1918410c
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django_server.accounts.views import Account
admin.site.register(Account)
| 16.285714
| 48
| 0.833333
| 16
| 114
| 5.875
| 0.6875
| 0.212766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 114
| 6
| 49
| 19
| 0.921569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a6b100d4161ea7cb16c85a6bfa98d4c894811116
| 4,177
|
py
|
Python
|
magnum/solvers/milp/test_milp.py
|
mvcisback/magnumSTL
|
e48d641118bc9c1fb28be2a38a55654441a78701
|
[
"BSD-3-Clause"
] | 1
|
2016-10-07T20:10:35.000Z
|
2016-10-07T20:10:35.000Z
|
magnum/solvers/milp/test_milp.py
|
mvcisback/py-blustl
|
e48d641118bc9c1fb28be2a38a55654441a78701
|
[
"BSD-3-Clause"
] | 15
|
2016-07-01T04:46:09.000Z
|
2017-01-06T22:09:20.000Z
|
magnum/solvers/milp/test_milp.py
|
mvcisback/py-blustl
|
e48d641118bc9c1fb28be2a38a55654441a78701
|
[
"BSD-3-Clause"
] | 5
|
2016-12-23T06:12:40.000Z
|
2017-01-10T01:58:27.000Z
|
import stl
import traces
import pytest
from magnum.solvers.milp import milp
def test_game_to_milp_smoke():
from magnum.examples.feasible_example import feasible_example as g
milp.game_to_milp(g)
def test_feasible():
from magnum.examples.feasible_example import feasible_example as g
from stl.boolean_eval import pointwise_sat
res = milp.encode_and_run(g)
phi = g.spec_as_stl(discretize=False)
dt = g.model.dt
assert pointwise_sat(phi, dt=dt)(res.solution)
assert pytest.approx(res.cost) == 5
res = milp.encode_and_run(g.invert())
phi = g.spec_as_stl(discretize=False)
dt = g.model.dt
assert not pointwise_sat(phi, dt=dt)(res.solution)
assert pytest.approx(res.cost) == 5
def test_one_player_rps_feasibility():
from magnum.examples.rock_paper_scissors import rps as g
from stl.boolean_eval import pointwise_sat
res = milp.encode_and_run(g)
phi = g.spec_as_stl(discretize=False)
dt = g.model.dt
assert pointwise_sat(phi, dt=dt)(res.solution)
assert pytest.approx(res.cost) == 10
g = g.invert()
res = milp.encode_and_run(g)
phi = g.spec_as_stl(discretize=False)
dt = g.model.dt
assert pointwise_sat(phi, dt=dt)(res.solution)
assert pytest.approx(res.cost) == 10
def test_one_player_rps_robustness():
from magnum.examples.rock_paper_scissors import rps as g
ces = [{'w': traces.TimeSeries([(0, 20 / 60)])}]
milp.encode_and_run(g, counter_examples=ces)
def test_rps_counter_examples():
from magnum.examples.rock_paper_scissors import rps as g
from stl.boolean_eval import pointwise_sat
# Respond to Paper
ces = [{'w': traces.TimeSeries([(0, 20 / 60)])}]
res = milp.encode_and_run(g, counter_examples=ces)
assert res.feasible
assert pytest.approx(res.cost) == 10
phi = stl.parse('X((x >= 10) & (x <= 50))')
assert pointwise_sat(phi, dt=g.model.dt)(res.solution)
# Respond to Scissors and Paper
ces.append({'w': traces.TimeSeries([(0, 40 / 60)])})
res = milp.encode_and_run(g, counter_examples=ces)
assert res.feasible
assert pytest.approx(res.cost) == 10
phi = stl.parse('X((x >= 30) & (x <= 50))')
assert pointwise_sat(phi, dt=g.model.dt)(res.solution)
ces.append({'w': traces.TimeSeries([(0, 0)])})
res = milp.encode_and_run(g, counter_examples=ces)
assert not res.feasible
assert pytest.approx(res.cost) == 0
phi = stl.parse('X((x = 10) | (x = 30) | (x = 50))')
assert pointwise_sat(phi, dt=g.model.dt)(res.solution)
g = g.invert()
res = milp.encode_and_run(g)
assert res.feasible
assert pytest.approx(res.cost) == 10
ces = [{'u': traces.TimeSeries([(0, 20 / 60)])}]
res = milp.encode_and_run(g, counter_examples=ces)
assert res.feasible
assert pytest.approx(res.cost) == 10
ces = [{'u': traces.TimeSeries([(0, 40 / 60)])}]
res = milp.encode_and_run(g, counter_examples=ces)
assert res.feasible
assert pytest.approx(res.cost) == 10
ces = [({'u': traces.TimeSeries([(0, 0)])})]
res = milp.encode_and_run(g, counter_examples=ces)
assert res.feasible
ces = [({'u': traces.TimeSeries([(0, 1)])})]
res = milp.encode_and_run(g, counter_examples=ces)
assert res.feasible
def test_counter_examples():
from magnum.examples.feasible_example2 import feasible_example as g
res = milp.encode_and_run(g)
assert res.feasible
ces = [{'w': traces.TimeSeries([(0, 0)])}]
res = milp.encode_and_run(g, counter_examples=ces)
assert res.feasible
ces = [{'w': traces.TimeSeries([(0, 1)])}]
res = milp.encode_and_run(g, counter_examples=ces)
assert not res.feasible
ces = [{
'w': traces.TimeSeries([(0, 1)])
}, {
'w': traces.TimeSeries([(0, 0)])
}]
res = milp.encode_and_run(g, counter_examples=ces)
assert not res.feasible
def test_example3():
from magnum.examples.feasible_example3 import feasible_example as g
from stl.fastboolean_eval import pointwise_sat
res = milp.encode_and_run(g)
phi = g.spec_as_stl(discretize=False)
assert res.feasible
assert pointwise_sat(phi)(res.solution, 0)
| 29.415493
| 71
| 0.672253
| 630
| 4,177
| 4.285714
| 0.106349
| 0.066667
| 0.086667
| 0.106667
| 0.878148
| 0.837037
| 0.822593
| 0.784815
| 0.762222
| 0.735185
| 0
| 0.021887
| 0.190567
| 4,177
| 141
| 72
| 29.624113
| 0.776693
| 0.011013
| 0
| 0.643564
| 0
| 0
| 0.022529
| 0
| 0
| 0
| 0
| 0
| 0.306931
| 1
| 0.069307
| false
| 0
| 0.148515
| 0
| 0.217822
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a6ca309ef0936687c0fa0dae7f8625ef7b59e8c1
| 70
|
py
|
Python
|
Python/Advanced OOP/Inheritance/Zoo/05. Mammal.py
|
teodoramilcheva/softuni-software-engineering
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
[
"MIT"
] | null | null | null |
Python/Advanced OOP/Inheritance/Zoo/05. Mammal.py
|
teodoramilcheva/softuni-software-engineering
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
[
"MIT"
] | null | null | null |
Python/Advanced OOP/Inheritance/Zoo/05. Mammal.py
|
teodoramilcheva/softuni-software-engineering
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
[
"MIT"
] | null | null | null |
from project.animal import Animal
class Mammal(Animal):
pass
| 14
| 34
| 0.714286
| 9
| 70
| 5.555556
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.228571
| 70
| 5
| 35
| 14
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
a6d83465c713aa0175d377de870f49d4e500e34b
| 3,854
|
py
|
Python
|
Http_Server.py
|
Ronak-Texe/Control-Panel-Interface-with-Cloud
|
8cd32ef186e2076f7cba3a61971b053bc0053ac3
|
[
"MIT"
] | null | null | null |
Http_Server.py
|
Ronak-Texe/Control-Panel-Interface-with-Cloud
|
8cd32ef186e2076f7cba3a61971b053bc0053ac3
|
[
"MIT"
] | null | null | null |
Http_Server.py
|
Ronak-Texe/Control-Panel-Interface-with-Cloud
|
8cd32ef186e2076f7cba3a61971b053bc0053ac3
|
[
"MIT"
] | null | null | null |
#import http.server
#from threading import thread
#
##filepath="temp.txt"
#
#class Handler( http.server.BaseHTTPRequestHandler ):
#
# def do_GET( self ): # Reading
# if self.path=="/download":
# self.send_response(200)
# self.send_header( 'Content-type', 'text/html' )
# self.end_headers()
# message = "Hello world!"
# print("Hello world!")
# self.wfile.write(bytes(message, "utf8"))
#
# else:
# self.send_response(404)
# self.send_header( 'Content-type', 'text/html' )
# self.end_headers()
# message = "Unknown Request"
# self.wfile.write(bytes(message, "utf8"))
#
# def do_POST( self ): # Updating the file
#
# if self.path=="/upload":
# self.send_response(200)
# self.send_header( 'Content-type', 'text/html' )
# self.end_headers()
#
# #content_len = int(self.headers.getheader('content-length', 0))
# content_length = int(self.headers['Content-Length'])
# self.post_data = (self.rfile.read(content_length)).decode()
# print(self.post_data)
## self.ParseData(self.post_data)
#
# else:
# self.send_response(404)
# self.send_header( 'Content-type', 'text/html' )
# self.end_headers()
# self.wfile.write("Unknown request")
#
#
#httpd = http.server.HTTPServer( ('', 80), Handler )
#httpd.serve_forever()
import http.server
class Handler( http.server.BaseHTTPRequestHandler):
def do_GET( self ): # Reading
if self.path=="/download":
self.send_response(200)
self.send_header( 'Content-type', 'text/html' )
self.end_headers()
message = "Hello world!"
print("Hello world!")
self.wfile.write(bytes(message, "utf8"))
else:
self.send_response(404)
self.send_header( 'Content-type', 'text/html' )
self.end_headers()
message = "Unknown Request"
self.wfile.write(bytes(message, "utf8"))
def do_POST( self ): # Updating the file
if self.path=="/upload":
self.send_response(200)
self.send_header( 'Content-type', 'text/html' )
self.end_headers()
#content_len = int(self.headers.getheader('content-length', 0))
content_length = int(self.headers['Content-Length'])
self.post_data = (self.rfile.read(content_length)).decode()
file_handle = open("Receiver_Output.txt", "w")
file_handle.write( self.post_data)
file_handle.close()
print(self.post_data)
# self.ParseData(self.post_data)
elif self.path=="/upload2":
self.send_response(200)
self.send_header( 'Content-type', 'text/html' )
self.end_headers()
#content_len = int(self.headers.getheader('content-length', 0))
content_length = int(self.headers['Content-Length'])
self.post_data = (self.rfile.read(content_length)).decode()
file_handle = open("Receiver_Output2.txt", "w")
file_handle.write( self.post_data)
file_handle.close()
print(self.post_data)
# self.ParseData(self.post_data)
else:
self.send_response(404)
self.send_header( 'Content-type', 'text/html' )
self.end_headers()
self.wfile.write("Unknown request")
httpd = http.server.HTTPServer( ('', 80), Handler )
httpd.serve_forever()
| 35.036364
| 77
| 0.535547
| 401
| 3,854
| 4.995012
| 0.177057
| 0.071892
| 0.065901
| 0.094358
| 0.948078
| 0.948078
| 0.948078
| 0.948078
| 0.948078
| 0.948078
| 0
| 0.015522
| 0.331344
| 3,854
| 110
| 78
| 35.036364
| 0.761738
| 0.433057
| 0
| 0.659091
| 0
| 0
| 0.128649
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.022727
| 0
| 0.090909
| 0.068182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a6dabf8ab4ea9f1abe0585955cac546cb3e596df
| 16,846
|
py
|
Python
|
catkin_ws2_final/src/gp_abstract_sim/src/tag_to_odom.py
|
Michael-E-Sami/MSSR2
|
8903ac65048a87f2843818981d21a5372e40dc55
|
[
"Apache-2.0"
] | null | null | null |
catkin_ws2_final/src/gp_abstract_sim/src/tag_to_odom.py
|
Michael-E-Sami/MSSR2
|
8903ac65048a87f2843818981d21a5372e40dc55
|
[
"Apache-2.0"
] | null | null | null |
catkin_ws2_final/src/gp_abstract_sim/src/tag_to_odom.py
|
Michael-E-Sami/MSSR2
|
8903ac65048a87f2843818981d21a5372e40dc55
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -- coding: utf-8 --
import rospy
from apriltag_ros.msg import AprilTagDetection, AprilTagDetectionArray
from geometry_msgs.msg import Point, Point32, Pose, PoseStamped, Quaternion
# Mostly Useless Imports
from std_msgs.msg import Header, String, UInt16
from nav_msgs.msg import Odometry
from gazebo_msgs.srv import GetModelState
# ---------------------------------------------------------------------------------
kolio=AprilTagDetectionArray()
size = 0
yoda = Odometry()
flag = [False for i in range(10)]
# ---------------------------------------------------------------------------------
def callback(data):
print("ZEFT AWY")
global size
global yoda
kolio=data
size = len(kolio.detections)
print ("size aho0000"+str(size))
if size > 0:
for i in range(0, size-1):
tag = kolio.detections[i].id[i]
print(tag)
# SWITCH CASE
if tag == 0:
if flag[0] == False:
ditto0_pub = rospy.Publisher("ditto0/odom", Odometry, queue_size=1000)
rospy.wait_for_service('gazebo/get_model_state')
flag[0] == True
yoda.child_frame_id = "ditto0"
yoda.pose.pose.position.x = kolio.detections[i].pose.pose.pose.position.x
yoda.pose.pose.position.y = kolio.detections[i].pose.pose.pose.position.y
yoda.pose.pose.position.z = kolio.detections[i].pose.pose.pose.position.z
yoda.pose.pose.orientation.x = kolio.detections[i].pose.pose.pose.orientation.x
yoda.pose.pose.orientation.y = kolio.detections[i].pose.pose.pose.orientation.y
yoda.pose.pose.orientation.z = kolio.detections[i].pose.pose.pose.orientation.z
yoda.pose.pose.orientation.w = kolio.detections[i].pose.pose.pose.orientation.w
serv = rospy.ServiceProxy('gazebo/get_model_state', GetModelState)
resp = serv('ditto0', 'ground_plane')
yoda.twist.twist.linear.x = resp.twist.linear.x
yoda.twist.twist.linear.y = resp.twist.linear.y
yoda.twist.twist.linear.z = resp.twist.linear.z
yoda.twist.twist.angular.x = resp.twist.angular.x
yoda.twist.twist.angular.y = resp.twist.angular.y
yoda.twist.twist.angular.z = resp.twist.angular.z
ditto0_pub.publish(yoda)
elif tag == 1:
if flag[1] == False:
ditto1_pub = rospy.Publisher("ditto1/odom", Odometry, queue_size=1000)
rospy.wait_for_service('gazebo/get_model_state')
flag[1] == True
yoda.child_frame_id = "ditto1"
yoda.pose.pose.position.x = kolio.detections[i].pose.pose.pose.position.x
yoda.pose.pose.position.y = kolio.detections[i].pose.pose.pose.position.y
yoda.pose.pose.position.z = kolio.detections[i].pose.pose.pose.position.z
yoda.pose.pose.orientation.x = kolio.detections[i].pose.pose.pose.orientation.x
yoda.pose.pose.orientation.y = kolio.detections[i].pose.pose.pose.orientation.y
yoda.pose.pose.orientation.z = kolio.detections[i].pose.pose.pose.orientation.z
yoda.pose.pose.orientation.w = kolio.detections[i].pose.pose.pose.orientation.w
serv = rospy.ServiceProxy('gazebo/get_model_state', GetModelState)
resp = serv('ditto1', 'ground_plane')
yoda.twist.twist.linear.x = resp.twist.linear.x
yoda.twist.twist.linear.y = resp.twist.linear.y
yoda.twist.twist.linear.z = resp.twist.linear.z
yoda.twist.twist.angular.x = resp.twist.angular.x
yoda.twist.twist.angular.y = resp.twist.angular.y
yoda.twist.twist.angular.z = resp.twist.angular.z
ditto1_pub.publish(yoda)
elif tag == 2:
if flag[2] == False:
ditto2_pub = rospy.Publisher("ditto2/odom", Odometry, queue_size=1000)
rospy.wait_for_service('gazebo/get_model_state')
flag[2] == True
yoda.child_frame_id = "ditto2"
yoda.pose.pose.position.x = data.detections[i].pose.pose.pose.position.x
yoda.pose.pose.position.y = data.detections[i].pose.pose.pose.position.y
yoda.pose.pose.position.z = data.detections[i].pose.pose.pose.position.z
yoda.pose.pose.orientation.x = data.detections[i].pose.pose.pose.orientation.x
yoda.pose.pose.orientation.y = data.detections[i].pose.pose.pose.orientation.y
yoda.pose.pose.orientation.z = data.detections[i].pose.pose.pose.orientation.z
yoda.pose.pose.orientation.w = data.detections[i].pose.pose.pose.orientation.w
serv = rospy.ServiceProxy('gazebo/get_model_state', GetModelState)
resp = serv('ditto2', 'ground_plane')
yoda.twist.twist.linear.x = resp.twist.linear.x
yoda.twist.twist.linear.y = resp.twist.linear.y
yoda.twist.twist.linear.z = resp.twist.linear.z
yoda.twist.twist.angular.x = resp.twist.angular.x
yoda.twist.twist.angular.y = resp.twist.angular.y
yoda.twist.twist.angular.z = resp.twist.angular.z
ditto2_pub.publish(yoda)
elif tag == 3:
if flag[3] == False:
ditto3_pub = rospy.Publisher("ditto3/odom", Odometry, queue_size=1000)
rospy.wait_for_service('gazebo/get_model_state')
flag[3] == True
yoda.child_frame_id = "ditto3"
yoda.pose.pose.position.x = data.detections[i].pose.pose.pose.position.x
yoda.pose.pose.position.y = data.detections[i].pose.pose.pose.position.y
yoda.pose.pose.position.z = data.detections[i].pose.pose.pose.position.z
yoda.pose.pose.orientation.x = data.detections[i].pose.pose.pose.orientation.x
yoda.pose.pose.orientation.y = data.detections[i].pose.pose.pose.orientation.y
yoda.pose.pose.orientation.z = data.detections[i].pose.pose.pose.orientation.z
yoda.pose.pose.orientation.w = data.detections[i].pose.pose.pose.orientation.w
serv = rospy.ServiceProxy('gazebo/get_model_state', GetModelState)
resp = serv('ditto3', 'ground_plane')
yoda.twist.twist.linear.x = resp.twist.linear.x
yoda.twist.twist.linear.y = resp.twist.linear.y
yoda.twist.twist.linear.z = resp.twist.linear.z
yoda.twist.twist.angular.x = resp.twist.angular.x
yoda.twist.twist.angular.y = resp.twist.angular.y
yoda.twist.twist.angular.z = resp.twist.angular.z
ditto3_pub.publish(yoda)
elif tag == 4:
if flag[4] == False:
ditto4_pub = rospy.Publisher("ditto4/odom", Odometry, queue_size=1000)
rospy.wait_for_service('gazebo/get_model_state')
flag[4] == True
yoda.child_frame_id = "ditto4"
yoda.pose.pose.position.x = data.detections[i].pose.pose.pose.position.x
yoda.pose.pose.position.y = data.detections[i].pose.pose.pose.position.y
yoda.pose.pose.position.z = data.detections[i].pose.pose.pose.position.z
yoda.pose.pose.orientation.x = data.detections[i].pose.pose.pose.orientation.x
yoda.pose.pose.orientation.y = data.detections[i].pose.pose.pose.orientation.y
yoda.pose.pose.orientation.z = data.detections[i].pose.pose.pose.orientation.z
yoda.pose.pose.orientation.w = data.detections[i].pose.pose.pose.orientation.w
serv = rospy.ServiceProxy('gazebo/get_model_state', GetModelState)
resp = serv('ditto4', 'ground_plane')
yoda.twist.twist.linear.x = resp.twist.linear.x
yoda.twist.twist.linear.y = resp.twist.linear.y
yoda.twist.twist.linear.z = resp.twist.linear.z
yoda.twist.twist.angular.x = resp.twist.angular.x
yoda.twist.twist.angular.y = resp.twist.angular.y
yoda.twist.twist.angular.z = resp.twist.angular.z
ditto4_pub.publish(yoda)
elif tag == 5:
if flag[5] == False:
ditto5_pub = rospy.Publisher("ditto5/odom", Odometry, queue_size=1000)
rospy.wait_for_service('gazebo/get_model_state')
flag[5] == True
yoda.child_frame_id = "ditto5"
yoda.pose.pose.position.x = data.detections[i].pose.pose.pose.position.x
yoda.pose.pose.position.y = data.detections[i].pose.pose.pose.position.y
yoda.pose.pose.position.z = data.detections[i].pose.pose.pose.position.z
yoda.pose.pose.orientation.x = data.detections[i].pose.pose.pose.orientation.x
yoda.pose.pose.orientation.y = data.detections[i].pose.pose.pose.orientation.y
yoda.pose.pose.orientation.z = data.detections[i].pose.pose.pose.orientation.z
yoda.pose.pose.orientation.w = data.detections[i].pose.pose.pose.orientation.w
serv = rospy.ServiceProxy('gazebo/get_model_state', GetModelState)
resp = serv('ditto5', 'ground_plane')
yoda.twist.twist.linear.x = resp.twist.linear.x
yoda.twist.twist.linear.y = resp.twist.linear.y
yoda.twist.twist.linear.z = resp.twist.linear.z
yoda.twist.twist.angular.x = resp.twist.angular.x
yoda.twist.twist.angular.y = resp.twist.angular.y
yoda.twist.twist.angular.z = resp.twist.angular.z
ditto5_pub.publish(yoda)
elif tag == 6:
if flag[6] == False:
ditto6_pub = rospy.Publisher("ditto6/odom", Odometry, queue_size=1000)
rospy.wait_for_service('gazebo/get_model_state')
flag[6] == True
yoda.child_frame_id = "ditto6"
yoda.pose.pose.position.x = data.detections[i].pose.pose.pose.position.x
yoda.pose.pose.position.y = data.detections[i].pose.pose.pose.position.y
yoda.pose.pose.position.z = data.detections[i].pose.pose.pose.position.z
yoda.pose.pose.orientation.x = data.detections[i].pose.pose.pose.orientation.x
yoda.pose.pose.orientation.y = data.detections[i].pose.pose.pose.orientation.y
yoda.pose.pose.orientation.z = data.detections[i].pose.pose.pose.orientation.z
yoda.pose.pose.orientation.w = data.detections[i].pose.pose.pose.orientation.w
serv = rospy.ServiceProxy('gazebo/get_model_state', GetModelState)
resp = serv('ditto6', 'ground_plane')
yoda.twist.twist.linear.x = resp.twist.linear.x
yoda.twist.twist.linear.y = resp.twist.linear.y
yoda.twist.twist.linear.z = resp.twist.linear.z
yoda.twist.twist.angular.x = resp.twist.angular.x
yoda.twist.twist.angular.y = resp.twist.angular.y
yoda.twist.twist.angular.z = resp.twist.angular.z
ditto6_pub.publish(yoda)
elif tag == 7:
if flag[7] == False:
ditto7_pub = rospy.Publisher("ditto7/odom", Odometry, queue_size=1000)
rospy.wait_for_service('gazebo/get_model_state')
flag[7] == True
yoda.child_frame_id = "ditto7"
yoda.pose.pose.position.x = data.detections[i].pose.pose.pose.position.x
yoda.pose.pose.position.y = data.detections[i].pose.pose.pose.position.y
yoda.pose.pose.position.z = data.detections[i].pose.pose.pose.position.z
yoda.pose.pose.orientation.x = data.detections[i].pose.pose.pose.orientation.x
yoda.pose.pose.orientation.y = data.detections[i].pose.pose.pose.orientation.y
yoda.pose.pose.orientation.z = data.detections[i].pose.pose.pose.orientation.z
yoda.pose.pose.orientation.w = data.detections[i].pose.pose.pose.orientation.w
serv = rospy.ServiceProxy('gazebo/get_model_state', GetModelState)
resp = serv('ditto7', 'ground_plane')
yoda.twist.twist.linear.x = resp.twist.linear.x
yoda.twist.twist.linear.y = resp.twist.linear.y
yoda.twist.twist.linear.z = resp.twist.linear.z
yoda.twist.twist.angular.x = resp.twist.angular.x
yoda.twist.twist.angular.y = resp.twist.angular.y
yoda.twist.twist.angular.z = resp.twist.angular.z
ditto7_pub.publish(yoda)
elif tag == 8:
if flag[8] == False:
ditto8_pub = rospy.Publisher("ditto8/odom", Odometry, queue_size=1000)
rospy.wait_for_service('gazebo/get_model_state')
flag[8] == True
yoda.child_frame_id = "ditto8"
yoda.pose.pose.position.x = data.detections[i].pose.pose.pose.position.x
yoda.pose.pose.position.y = data.detections[i].pose.pose.pose.position.y
yoda.pose.pose.position.z = data.detections[i].pose.pose.pose.position.z
yoda.pose.pose.orientation.x = data.detections[i].pose.pose.pose.orientation.x
yoda.pose.pose.orientation.y = data.detections[i].pose.pose.pose.orientation.y
yoda.pose.pose.orientation.z = data.detections[i].pose.pose.pose.orientation.z
yoda.pose.pose.orientation.w = data.detections[i].pose.pose.pose.orientation.w
serv = rospy.ServiceProxy('gazebo/get_model_state', GetModelState)
resp = serv('ditto8', 'ground_plane')
yoda.twist.twist.linear.x = resp.twist.linear.x
yoda.twist.twist.linear.y = resp.twist.linear.y
yoda.twist.twist.linear.z = resp.twist.linear.z
yoda.twist.twist.angular.x = resp.twist.angular.x
yoda.twist.twist.angular.y = resp.twist.angular.y
yoda.twist.twist.angular.z = resp.twist.angular.z
ditto8_pub.publish(yoda)
elif tag == 9:
if flag[9] == False:
ditto9_pub = rospy.Publisher("ditto9/odom", Odometry, queue_size=1000)
rospy.wait_for_service('gazebo/get_model_state')
flag[9] == True
yoda.child_frame_id = "ditto9"
yoda.pose.pose.position.x = data.detections[i].pose.pose.pose.position.x
yoda.pose.pose.position.y = data.detections[i].pose.pose.pose.position.y
yoda.pose.pose.position.z = data.detections[i].pose.pose.pose.position.z
yoda.pose.pose.orientation.x = data.detections[i].pose.pose.pose.orientation.x
yoda.pose.pose.orientation.y = data.detections[i].pose.pose.pose.orientation.y
yoda.pose.pose.orientation.z = data.detections[i].pose.pose.pose.orientation.z
yoda.pose.pose.orientation.w = data.detections[i].pose.pose.pose.orientation.w
serv = rospy.ServiceProxy('gazebo/get_model_state', GetModelState)
resp = serv('ditto9', 'ground_plane')
yoda.twist.twist.linear.x = resp.twist.linear.x
yoda.twist.twist.linear.y = resp.twist.linear.y
yoda.twist.twist.linear.z = resp.twist.linear.z
yoda.twist.twist.angular.x = resp.twist.angular.x
yoda.twist.twist.angular.y = resp.twist.angular.y
yoda.twist.twist.angular.z = resp.twist.angular.z
ditto9_pub.publish(yoda)
# ---------------------------------------------------------------------------------
def py_accu_check():
rospy.init_node('py_accu_check', anonymous=True)
rospy.Subscriber("tag_detections", AprilTagDetectionArray, callback)
rospy.loginfo("7AAAAAAAAAARAAAAAAAAAAAAM")
rospy.spin()
# ---------------------------------------------------------------------------------
if __name__ == '__main__':
py_accu_check()
| 47.587571
| 95
| 0.579544
| 2,082
| 16,846
| 4.621518
| 0.057637
| 0.1746
| 0.157971
| 0.138225
| 0.862918
| 0.82249
| 0.82249
| 0.82249
| 0.82249
| 0.82249
| 0
| 0.011243
| 0.281966
| 16,846
| 353
| 96
| 47.72238
| 0.784226
| 0.023923
| 0
| 0.604839
| 0
| 0
| 0.052936
| 0.028293
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008065
| false
| 0
| 0.024194
| 0
| 0.032258
| 0.012097
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a6e5f6f1be90c652c472b5f2165e92bcfec024b7
| 123
|
py
|
Python
|
agents/DQN/__init__.py
|
ksang/Voigt-Kampff
|
21f9ad172e5edf0fe50479eba816413f477b4c70
|
[
"MIT"
] | 3
|
2018-07-28T09:21:45.000Z
|
2020-04-11T15:01:12.000Z
|
agents/DQN/__init__.py
|
ksang/Voigt-Kampff
|
21f9ad172e5edf0fe50479eba816413f477b4c70
|
[
"MIT"
] | null | null | null |
agents/DQN/__init__.py
|
ksang/Voigt-Kampff
|
21f9ad172e5edf0fe50479eba816413f477b4c70
|
[
"MIT"
] | null | null | null |
from agents.DQN.replay_buffer import ReplayBuffer
from agents.DQN.config import Config
from agents.DQN.dqn import DQNAgent
| 30.75
| 49
| 0.853659
| 19
| 123
| 5.473684
| 0.473684
| 0.288462
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 123
| 3
| 50
| 41
| 0.936937
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a6e9eaed810d6aed58c9eb8152fc5a4040606faa
| 87
|
py
|
Python
|
pyoperant/behavior/__init__.py
|
arouse01/pyoperant
|
e61de84862096720cca7dbecf517ee11c5d504d4
|
[
"BSD-3-Clause"
] | 1
|
2019-01-26T17:19:47.000Z
|
2019-01-26T17:19:47.000Z
|
pyoperant/behavior/__init__.py
|
arouse01/pyoperant
|
e61de84862096720cca7dbecf517ee11c5d504d4
|
[
"BSD-3-Clause"
] | null | null | null |
pyoperant/behavior/__init__.py
|
arouse01/pyoperant
|
e61de84862096720cca7dbecf517ee11c5d504d4
|
[
"BSD-3-Clause"
] | null | null | null |
# from two_alt_choice import *
from .go_nogo_interrupt import *
# from lights import *
| 21.75
| 32
| 0.770115
| 13
| 87
| 4.846154
| 0.692308
| 0.31746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16092
| 87
| 3
| 33
| 29
| 0.863014
| 0.563218
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
470abd254e1830ee9c62ef69a8dd03d64054ab61
| 3,748
|
py
|
Python
|
python/test/datetime/test_next_time.py
|
takashiharano/util
|
0f730475386a77415545de3f9763e5bdeaab0e94
|
[
"MIT"
] | null | null | null |
python/test/datetime/test_next_time.py
|
takashiharano/util
|
0f730475386a77415545de3f9763e5bdeaab0e94
|
[
"MIT"
] | null | null | null |
python/test/datetime/test_next_time.py
|
takashiharano/util
|
0f730475386a77415545de3f9763e5bdeaab0e94
|
[
"MIT"
] | null | null | null |
#!python
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import util
def test_next_datetime():
ret = ''
arr = ['0300', '0900', '1200', '1800']
ret += '\n'
ret += "['0300', '0900', '1200', '1800']\n"
ret += '\n'
ret += 'now\n'
ret += str(util.next_datetime(arr)) + '\n'
ret += '\n'
dtstr = '2019-02-28 00:00:00.0000'
ret += dtstr + '\n'
ret += '(exp=03:00)\n'
dt = util.get_datetime(dtstr)
ret += str(util.next_datetime(arr, moment=dt)) + '\n'
ret += '\n'
dtstr = '2019-02-28 03:00:00.0000'
ret += dtstr + '\n'
ret += '(exp=02-28 03:00)\n'
dt = util.get_datetime(dtstr)
ret += str(util.next_datetime(arr, moment=dt)) + '\n'
ret += '\n'
dtstr = '2019-02-28 03:00:00.0001'
ret += dtstr + '\n'
ret += '(exp=02-28 09:00)\n'
dt = util.get_datetime(dtstr)
ret += str(util.next_datetime(arr, moment=dt)) + '\n'
ret += '\n'
dtstr = '2019-02-28 09:00:00.0000'
ret += dtstr + '\n'
ret += '(exp=02-28 09:00)\n'
dt = util.get_datetime(dtstr)
ret += str(util.next_datetime(arr, moment=dt)) + '\n'
ret += '\n'
dtstr = '2019-02-28 10:00:00.0000'
ret += dtstr + '\n'
ret += '(exp=02-28 12:00)\n'
dt = util.get_datetime(dtstr)
ret += str(util.next_datetime(arr, moment=dt)) + '\n'
ret += '\n'
dtstr = '2019-02-28 15:00:00.0000'
ret += dtstr + '\n'
ret += '(exp=02-28 18:00)\n'
dt = util.get_datetime(dtstr)
ret += str(util.next_datetime(arr, moment=dt)) + '\n'
ret += '\n'
dtstr = '2019-02-28 19:00:00.0000'
ret += dtstr + '\n'
ret += '(exp=03-01 03:00)\n'
dt = util.get_datetime(dtstr)
ret += str(util.next_datetime(arr, moment=dt)) + '\n'
# 2019-02-28 00:00:00.0000
ret += '\n'
dt = 1551279600.000
ret += str(dt) + '\n'
ret += '(exp=02-28 03:00)\n'
ret += str(util.next_datetime(arr, moment=dt)) + '\n'
# 2019-02-28 00:00:00.0000
ret += '\n'
dt = 1551279600
ret += str(dt) + '\n'
ret += '(exp=02-28 03:00)\n'
ret += str(util.next_datetime(arr, moment=dt)) + '\n'
# 2019-02-28 10:00:00.0000
ret += '\n'
dt = 1551315600.000
ret += str(dt) + '\n'
ret += '(exp=02-28 12:00)\n'
ret += str(util.next_datetime(arr, moment=dt)) + '\n'
# 2019-02-28 10:00:00.0000
ret += '\n'
dt = 1551315600
ret += str(dt) + '\n'
ret += '(exp=02-28 12:00)\n'
ret += str(util.next_datetime(arr, moment=dt)) + '\n'
ret += '----'
ret += '\n'
dtstr = '2019-02-28 00:00:00.0000'
ret += dtstr + '\n'
ret += '(-1 exp=02-27 18:00)\n'
dt = util.get_datetime(dtstr)
ret += str(util.next_datetime(arr, -1, moment=dt)) + '\n'
ret += '\n'
dtstr = '2019-02-28 04:00:00.0000'
ret += dtstr + '\n'
ret += '(-1 exp=02-28 03:00)\n'
dt = util.get_datetime(dtstr)
ret += str(util.next_datetime(arr, -1, moment=dt)) + '\n'
ret += '----'
ret += '\n'
dtstr = '2019-02-28 00:00:00.0000'
ret += dtstr + '\n'
ret += '(-2 exp=02-27 12:00)\n'
dt = util.get_datetime(dtstr)
ret += str(util.next_datetime(arr, -2, moment=dt)) + '\n'
ret += '\n'
dtstr = '2019-02-28 04:00:00.0000'
ret += dtstr + '\n'
ret += '(-2 exp=02-27 18:00)\n'
dt = util.get_datetime(dtstr)
ret += str(util.next_datetime(arr, -2, moment=dt)) + '\n'
ret += '----'
ret += '\n'
dtstr = '2019-02-28 00:00:00.0000'
ret += dtstr + '\n'
ret += '(2 exp=02-28 09:00)\n'
dt = util.get_datetime(dtstr)
ret += str(util.next_datetime(arr, 2, moment=dt)) + '\n'
ret += '\n'
dtstr = '2019-02-28 04:00:00.0000'
ret += dtstr + '\n'
ret += '(2 exp=02-28 12:00)\n'
dt = util.get_datetime(dtstr)
ret += str(util.next_datetime(arr, 2, moment=dt)) + '\n'
return ret
def main():
ret = test_next_datetime()
print('Content-Type: text/plain')
print()
print(ret)
main()
| 23.872611
| 65
| 0.553095
| 645
| 3,748
| 3.153488
| 0.086822
| 0.07473
| 0.088496
| 0.123894
| 0.883481
| 0.883481
| 0.883481
| 0.870206
| 0.869223
| 0.852507
| 0
| 0.176766
| 0.203042
| 3,748
| 156
| 66
| 24.025641
| 0.504185
| 0.028549
| 0
| 0.7
| 0
| 0
| 0.233498
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016667
| false
| 0
| 0.025
| 0
| 0.05
| 0.025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
471a594053ed63b206e73109f59ad82dc1eeba1f
| 24
|
py
|
Python
|
world/action/__init__.py
|
filesmuggler/rl-physnet
|
b6d9886c15d6619df331866cf6a98c61da8413e9
|
[
"MIT"
] | 1
|
2021-07-02T13:33:49.000Z
|
2021-07-02T13:33:49.000Z
|
world/action/__init__.py
|
mbed92/dao-perception
|
62b6e8a84a6704a50855434933a147f507f94263
|
[
"MIT"
] | 16
|
2018-01-21T20:59:28.000Z
|
2019-10-27T18:50:57.000Z
|
world/action/__init__.py
|
mbed92/dao-perception
|
62b6e8a84a6704a50855434933a147f507f94263
|
[
"MIT"
] | 2
|
2019-10-17T01:49:44.000Z
|
2019-10-25T04:14:06.000Z
|
from . import primitives
| 24
| 24
| 0.833333
| 3
| 24
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4721b25a31856959105470199d71eb3e90620809
| 5,032
|
py
|
Python
|
tests/modeling/layers/test_build.py
|
ad12/meddlr
|
dda5a4ad7855de3a34331c60599e3253f980e989
|
[
"Apache-2.0"
] | 23
|
2021-11-05T02:00:01.000Z
|
2022-03-21T15:35:38.000Z
|
tests/modeling/layers/test_build.py
|
ad12/meddlr
|
dda5a4ad7855de3a34331c60599e3253f980e989
|
[
"Apache-2.0"
] | 29
|
2021-11-04T22:18:26.000Z
|
2022-03-24T01:04:53.000Z
|
tests/modeling/layers/test_build.py
|
ad12/meddlr
|
dda5a4ad7855de3a34331c60599e3253f980e989
|
[
"Apache-2.0"
] | 1
|
2022-01-25T22:34:51.000Z
|
2022-01-25T22:34:51.000Z
|
from torch import nn
from meddlr.modeling import layers
from meddlr.modeling.layers.build import CUSTOM_LAYERS_REGISTRY, get_layer_type
def test_pt_layers_type():
assert issubclass(get_layer_type("conv1d"), nn.Conv1d)
assert issubclass(get_layer_type("conv", 1), nn.Conv1d)
assert issubclass(get_layer_type("conv2d"), nn.Conv2d)
assert issubclass(get_layer_type("conv", 2), nn.Conv2d)
assert issubclass(get_layer_type("conv3d"), nn.Conv3d)
assert issubclass(get_layer_type("conv", 3), nn.Conv3d)
assert issubclass(get_layer_type("convtranspose1d"), nn.ConvTranspose1d)
assert issubclass(get_layer_type("convtranspose", 1), nn.ConvTranspose1d)
assert issubclass(get_layer_type("convtranspose2d"), nn.ConvTranspose2d)
assert issubclass(get_layer_type("convtranspose", 2), nn.ConvTranspose2d)
assert issubclass(get_layer_type("convtranspose3d"), nn.ConvTranspose3d)
assert issubclass(get_layer_type("convtranspose", 3), nn.ConvTranspose3d)
assert issubclass(get_layer_type("batchnorm1d"), nn.BatchNorm1d)
assert issubclass(get_layer_type("batchnorm", 1), nn.BatchNorm1d)
assert issubclass(get_layer_type("batchnorm2d"), nn.BatchNorm2d)
assert issubclass(get_layer_type("batchnorm", 2), nn.BatchNorm2d)
assert issubclass(get_layer_type("batchnorm3d"), nn.BatchNorm3d)
assert issubclass(get_layer_type("batchnorm", 3), nn.BatchNorm3d)
assert issubclass(get_layer_type("syncbatchnorm"), nn.SyncBatchNorm)
assert issubclass(get_layer_type("syncbatchnorm", 1), nn.SyncBatchNorm)
assert issubclass(get_layer_type("syncbatchnorm", 2), nn.SyncBatchNorm)
assert issubclass(get_layer_type("syncbatchnorm", 3), nn.SyncBatchNorm)
assert issubclass(get_layer_type("groupnorm"), nn.GroupNorm)
assert issubclass(get_layer_type("groupnorm", 1), nn.GroupNorm)
assert issubclass(get_layer_type("groupnorm", 2), nn.GroupNorm)
assert issubclass(get_layer_type("groupnorm", 3), nn.GroupNorm)
assert issubclass(get_layer_type("instancenorm1d"), nn.InstanceNorm1d)
assert issubclass(get_layer_type("instancenorm", 1), nn.InstanceNorm1d)
assert issubclass(get_layer_type("instancenorm2d"), nn.InstanceNorm2d)
assert issubclass(get_layer_type("instancenorm", 2), nn.InstanceNorm2d)
assert issubclass(get_layer_type("instancenorm3d"), nn.InstanceNorm3d)
assert issubclass(get_layer_type("instancenorm", 3), nn.InstanceNorm3d)
assert issubclass(get_layer_type("layernorm"), nn.LayerNorm)
assert issubclass(get_layer_type("layernorm", 1), nn.LayerNorm)
assert issubclass(get_layer_type("layernorm", 2), nn.LayerNorm)
assert issubclass(get_layer_type("layernorm", 3), nn.LayerNorm)
assert issubclass(get_layer_type("dropout1d"), nn.Dropout)
assert issubclass(get_layer_type("dropout", 1), nn.Dropout)
assert issubclass(get_layer_type("dropout2d"), nn.Dropout2d)
assert issubclass(get_layer_type("dropout", 2), nn.Dropout2d)
assert issubclass(get_layer_type("dropout3d"), nn.Dropout3d)
assert issubclass(get_layer_type("dropout", 3), nn.Dropout3d)
assert issubclass(get_layer_type("maxpool1d"), nn.MaxPool1d)
assert issubclass(get_layer_type("maxpool", 1), nn.MaxPool1d)
assert issubclass(get_layer_type("maxpool2d"), nn.MaxPool2d)
assert issubclass(get_layer_type("maxpool", 2), nn.MaxPool2d)
assert issubclass(get_layer_type("maxpool3d"), nn.MaxPool3d)
assert issubclass(get_layer_type("maxpool", 3), nn.MaxPool3d)
assert issubclass(get_layer_type("maxunpool1d"), nn.MaxUnpool1d)
assert issubclass(get_layer_type("maxunpool", 1), nn.MaxUnpool1d)
assert issubclass(get_layer_type("maxunpool2d"), nn.MaxUnpool2d)
assert issubclass(get_layer_type("maxunpool", 2), nn.MaxUnpool2d)
assert issubclass(get_layer_type("maxunpool3d"), nn.MaxUnpool3d)
assert issubclass(get_layer_type("maxunpool", 3), nn.MaxUnpool3d)
assert issubclass(get_layer_type("avgpool1d"), nn.AvgPool1d)
assert issubclass(get_layer_type("avgpool", 1), nn.AvgPool1d)
assert issubclass(get_layer_type("avgpool2d"), nn.AvgPool2d)
assert issubclass(get_layer_type("avgpool", 2), nn.AvgPool2d)
assert issubclass(get_layer_type("avgpool3d"), nn.AvgPool3d)
assert issubclass(get_layer_type("avgpool", 3), nn.AvgPool3d)
def test_custom_layers_type():
assert issubclass(get_layer_type("GaussianBlur"), layers.GaussianBlur)
assert issubclass(get_layer_type("gaussianblur"), layers.GaussianBlur)
assert issubclass(get_layer_type("convws", 2), layers.ConvWS2d)
assert issubclass(get_layer_type("convws2d"), layers.ConvWS2d)
assert issubclass(get_layer_type("convws", 3), layers.ConvWS3d)
assert issubclass(get_layer_type("convws3d"), layers.ConvWS3d)
def test_custom_layer_conflicting_names():
"""Verify that lowercasing custom layers does not cause layer overlap."""
custom_layer_names = {x.lower(): x for x in CUSTOM_LAYERS_REGISTRY._obj_map}
assert len(custom_layer_names) == len(CUSTOM_LAYERS_REGISTRY._obj_map)
| 53.531915
| 80
| 0.768084
| 635
| 5,032
| 5.837795
| 0.119685
| 0.144591
| 0.216887
| 0.4273
| 0.828163
| 0.806582
| 0.732398
| 0.161856
| 0.038845
| 0.038845
| 0
| 0.024866
| 0.112878
| 5,032
| 93
| 81
| 54.107527
| 0.805556
| 0.013315
| 0
| 0
| 0
| 0
| 0.127445
| 0
| 0
| 0
| 0
| 0
| 0.905405
| 1
| 0.040541
| false
| 0
| 0.040541
| 0
| 0.081081
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5b38390b5311b879edf89ba37fe859527ef0a738
| 31
|
py
|
Python
|
jeopardy/data/__init__.py
|
yngtodd/jeopardy
|
c8a58ae0996544f0733a7efb2a18d3e7ccdebb65
|
[
"MIT"
] | null | null | null |
jeopardy/data/__init__.py
|
yngtodd/jeopardy
|
c8a58ae0996544f0733a7efb2a18d3e7ccdebb65
|
[
"MIT"
] | null | null | null |
jeopardy/data/__init__.py
|
yngtodd/jeopardy
|
c8a58ae0996544f0733a7efb2a18d3e7ccdebb65
|
[
"MIT"
] | null | null | null |
from .data import JeopardyData
| 15.5
| 30
| 0.83871
| 4
| 31
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5b4d388f5d008d91434870b4622cf8ba97c79dc7
| 16,951
|
py
|
Python
|
RS_backbone.py
|
Xiaoyw1998/mmdet2140
|
75ff3b24f2e2eb2ad6ea1bfcf7e18f45a287222c
|
[
"Apache-2.0"
] | null | null | null |
RS_backbone.py
|
Xiaoyw1998/mmdet2140
|
75ff3b24f2e2eb2ad6ea1bfcf7e18f45a287222c
|
[
"Apache-2.0"
] | null | null | null |
RS_backbone.py
|
Xiaoyw1998/mmdet2140
|
75ff3b24f2e2eb2ad6ea1bfcf7e18f45a287222c
|
[
"Apache-2.0"
] | null | null | null |
from mmdet.models.backbones import DetectoRS_ResNet
def test_detectorrs_resnet_backbone():
detectorrs_cfg = dict(
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
output_img=True)
"""Test init_weights config"""
model = DetectoRS_ResNet(**detectorrs_cfg)
# print(model)
print(model.conv1.weight.shape)
if __name__ == '__main__':
test_detectorrs_resnet_backbone()
"""
DetectoRS_ResNet(
(conv1): ConvAWS2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(layer1): ResLayer(
(0): Bottleneck(
(conv1): ConvAWS2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): ConvAWS2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): ConvAWS2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(downsample): Sequential(
(0): ConvAWS2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Bottleneck(
(conv1): ConvAWS2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): ConvAWS2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): ConvAWS2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(2): Bottleneck(
(conv1): ConvAWS2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): ConvAWS2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): ConvAWS2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
)
(layer2): ResLayer(
(0): Bottleneck(
(conv1): ConvAWS2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): SAConv2d(
128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False
(switch): Conv2d(128, 1, kernel_size=(1, 1), stride=(2, 2))
(pre_context): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))
(post_context): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))
(offset_s): Conv2d(128, 18, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
(offset_l): Conv2d(128, 18, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): ConvAWS2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(downsample): Sequential(
(0): ConvAWS2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Bottleneck(
(conv1): ConvAWS2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): SAConv2d(
128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False
(switch): Conv2d(128, 1, kernel_size=(1, 1), stride=(1, 1))
(pre_context): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))
(post_context): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))
(offset_s): Conv2d(128, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(offset_l): Conv2d(128, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): ConvAWS2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(2): Bottleneck(
(conv1): ConvAWS2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): SAConv2d(
128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False
(switch): Conv2d(128, 1, kernel_size=(1, 1), stride=(1, 1))
(pre_context): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))
(post_context): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))
(offset_s): Conv2d(128, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(offset_l): Conv2d(128, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): ConvAWS2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(3): Bottleneck(
(conv1): ConvAWS2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): SAConv2d(
128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False
(switch): Conv2d(128, 1, kernel_size=(1, 1), stride=(1, 1))
(pre_context): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))
(post_context): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))
(offset_s): Conv2d(128, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(offset_l): Conv2d(128, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): ConvAWS2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
)
(layer3): ResLayer(
(0): Bottleneck(
(conv1): ConvAWS2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): SAConv2d(
256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False
(switch): Conv2d(256, 1, kernel_size=(1, 1), stride=(2, 2))
(pre_context): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))
(post_context): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))
(offset_s): Conv2d(256, 18, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
(offset_l): Conv2d(256, 18, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): ConvAWS2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(downsample): Sequential(
(0): ConvAWS2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Bottleneck(
(conv1): ConvAWS2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): SAConv2d(
256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False
(switch): Conv2d(256, 1, kernel_size=(1, 1), stride=(1, 1))
(pre_context): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))
(post_context): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))
(offset_s): Conv2d(256, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(offset_l): Conv2d(256, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): ConvAWS2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(2): Bottleneck(
(conv1): ConvAWS2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): SAConv2d(
256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False
(switch): Conv2d(256, 1, kernel_size=(1, 1), stride=(1, 1))
(pre_context): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))
(post_context): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))
(offset_s): Conv2d(256, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(offset_l): Conv2d(256, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): ConvAWS2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(3): Bottleneck(
(conv1): ConvAWS2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): SAConv2d(
256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False
(switch): Conv2d(256, 1, kernel_size=(1, 1), stride=(1, 1))
(pre_context): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))
(post_context): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))
(offset_s): Conv2d(256, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(offset_l): Conv2d(256, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): ConvAWS2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(4): Bottleneck(
(conv1): ConvAWS2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): SAConv2d(
256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False
(switch): Conv2d(256, 1, kernel_size=(1, 1), stride=(1, 1))
(pre_context): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))
(post_context): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))
(offset_s): Conv2d(256, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(offset_l): Conv2d(256, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): ConvAWS2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(5): Bottleneck(
(conv1): ConvAWS2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): SAConv2d(
256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False
(switch): Conv2d(256, 1, kernel_size=(1, 1), stride=(1, 1))
(pre_context): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))
(post_context): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))
(offset_s): Conv2d(256, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(offset_l): Conv2d(256, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): ConvAWS2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
)
(layer4): ResLayer(
(0): Bottleneck(
(conv1): ConvAWS2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): SAConv2d(
512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False
(switch): Conv2d(512, 1, kernel_size=(1, 1), stride=(2, 2))
(pre_context): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1))
(post_context): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1))
(offset_s): Conv2d(512, 18, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
(offset_l): Conv2d(512, 18, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): ConvAWS2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(downsample): Sequential(
(0): ConvAWS2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Bottleneck(
(conv1): ConvAWS2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): SAConv2d(
512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False
(switch): Conv2d(512, 1, kernel_size=(1, 1), stride=(1, 1))
(pre_context): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1))
(post_context): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1))
(offset_s): Conv2d(512, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(offset_l): Conv2d(512, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): ConvAWS2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
(2): Bottleneck(
(conv1): ConvAWS2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): SAConv2d(
512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False
(switch): Conv2d(512, 1, kernel_size=(1, 1), stride=(1, 1))
(pre_context): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1))
(post_context): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1))
(offset_s): Conv2d(512, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(offset_l): Conv2d(512, 18, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): ConvAWS2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
)
)
)
"""
| 58.05137
| 95
| 0.613297
| 2,599
| 16,951
| 3.881108
| 0.041554
| 0.043422
| 0.080896
| 0.089224
| 0.942005
| 0.92872
| 0.92763
| 0.92763
| 0.92763
| 0.92763
| 0
| 0.142462
| 0.190018
| 16,951
| 291
| 96
| 58.250859
| 0.592207
| 0.000708
| 0
| 0
| 0
| 0
| 0.041925
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.111111
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5bbbf041f15793a3883b399c8b09c2265cda7959
| 42
|
py
|
Python
|
gazel/fixation_filters/__init__.py
|
devjeetr/pytrace
|
2d45d7edea484076d6319a68bef3cff250de035c
|
[
"MIT"
] | null | null | null |
gazel/fixation_filters/__init__.py
|
devjeetr/pytrace
|
2d45d7edea484076d6319a68bef3cff250de035c
|
[
"MIT"
] | null | null | null |
gazel/fixation_filters/__init__.py
|
devjeetr/pytrace
|
2d45d7edea484076d6319a68bef3cff250de035c
|
[
"MIT"
] | null | null | null |
from gazel.fixation_filters.core import *
| 21
| 41
| 0.833333
| 6
| 42
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5beac459c9fb7984e30576defe0dd1c00959324a
| 16,186
|
py
|
Python
|
azext_cdf/tester_test.py
|
ahelal/cdf
|
0c0e50123a42d701ca5133383dd20d5eabf43e7c
|
[
"MIT"
] | 1
|
2021-11-25T11:45:47.000Z
|
2021-11-25T11:45:47.000Z
|
azext_cdf/tester_test.py
|
ahelal/cdf
|
0c0e50123a42d701ca5133383dd20d5eabf43e7c
|
[
"MIT"
] | null | null | null |
azext_cdf/tester_test.py
|
ahelal/cdf
|
0c0e50123a42d701ca5133383dd20d5eabf43e7c
|
[
"MIT"
] | null | null | null |
''' tester test'''
import unittest
import tempfile
import copy
import os
import shutil
import random
import string
from mock import patch
from knack.util import CLIError
from azext_cdf.tester import run_test, _manage_git_upgrade
from azext_cdf.parser import ConfigParser, CONFIG_STATE_FILEPATH
from azext_cdf._supporter_test import assert_state
from azext_cdf.utils import run_command
# pylint: disable=C0111
def assert_run_count(self, run_dict):
for assert_key, assert_value in run_dict.items():
self.assertEqual(assert_key.call_count, assert_value)
class TesterNoUpgrade(unittest.TestCase):
def setUp(self):
self.config = {"name": "cdf_simple", "resource_group": "rg", "location": "loc"}
self.config["tests"] = {"default": {}, "patch": {}}
self.tmpdir = tempfile.mkdtemp()
self.config["tmp_dir"] = self.tmpdir
self.state_file = f"{self.tmpdir}/{''.join(random.sample(string.ascii_lowercase, 25))}.json"
self.config[CONFIG_STATE_FILEPATH] = f"file://{self.state_file}"
self.tests = ["default", "patch"]
self.upgrades = ["fresh"]
def tearDown(self):
shutil.rmtree(self.tmpdir)
@patch.object(ConfigParser, '_read_config')
@patch('azext_cdf.tester._run_expect_tests')
@patch('azext_cdf.tester._run_de_provision')
@patch('azext_cdf.tester._run_provision')
@patch('azext_cdf.tester._run_hook')
def test_simple_down_strategy_always(self, _run_hook, _run_provision, _run_de_provision, _run_expect_tests, _read_config):
self.config["name"] = 'test_simple_down_strategy_always'
_read_config.return_value = self.config
cobj = ConfigParser("/a/b/.cdf.yml")
results = run_test(None, cobj=cobj, config="/a/b/.cdf.yml", exit_on_error=False, test_args=["default", "patch"], working_dir=os.getcwd(),
down_strategy="always", upgrade_strategy="all")
assert_run_count(self, {_run_hook: 0, _run_provision: 2, _run_de_provision: 2, _run_expect_tests: 4})
for upgrade_path in self.upgrades:
self.assertEqual(len(results), len(self.upgrades))
self.assertIn(upgrade_path, results)
for test in self.tests:
for phase in ["provisioning", "provision expect", "de-provisioning", "de-provision expect"]:
self.assertFalse(results[upgrade_path][test][phase]["failed"])
self.assertIn(test, results[upgrade_path])
self.assertFalse(results[upgrade_path][test]["failed"])
assert_state(self, f"{self.tmpdir}/test_{upgrade_path}_{test}_state.json", {"name": f'{self.config["name"]}_{test}_test'})
assert_state(self, self.state_file, {"name": self.config["name"]})
self.assertEqual(cobj.name, self.config["name"])
self.assertEqual(cobj.tests, self.tests)
@patch.object(ConfigParser, '_read_config')
@patch('azext_cdf.tester._run_expect_tests')
@patch('azext_cdf.tester._run_de_provision')
@patch('azext_cdf.tester._run_provision')
@patch('azext_cdf.tester._run_hook')
def test_simple_down_strategy_success(self, _run_hook, _run_provision, _run_de_provision, _run_expect_tests, _read_config):
self.config["name"] = 'test_simple_down_strategy_success'
_read_config.return_value = self.config
cobj = ConfigParser("/a/b/.cdf.yml")
results = run_test(None, cobj=cobj, config="/a/b/.cdf.yml", exit_on_error=False, test_args=["default", "patch"], working_dir=os.getcwd(),
down_strategy="success", upgrade_strategy="all")
assert_run_count(self, {_run_hook: 0, _run_provision: 2, _run_de_provision: 2, _run_expect_tests: 4})
for upgrade_path in self.upgrades:
self.assertEqual(len(results), len(self.upgrades))
self.assertIn(upgrade_path, results)
for test in self.tests:
for phase in ["provisioning", "provision expect", "de-provisioning", "de-provision expect"]:
self.assertFalse(results[upgrade_path][test][phase]["failed"])
self.assertIn(test, results[upgrade_path])
self.assertFalse(results[upgrade_path][test]["failed"])
assert_state(self, f"{self.tmpdir}/test_{upgrade_path}_{test}_state.json", {"name": f'{self.config["name"]}_{test}_test'})
assert_state(self, self.state_file, {"name": self.config["name"]})
self.assertEqual(cobj.name, self.config["name"])
self.assertEqual(cobj.tests, self.tests)
@patch.object(ConfigParser, '_read_config')
@patch('azext_cdf.tester._run_expect_tests')
@patch('azext_cdf.tester._run_de_provision')
@patch('azext_cdf.tester._run_provision')
@patch('azext_cdf.tester._run_hook')
def test_simple_down_strategy_never(self, _run_hook, _run_provision, _run_de_provision, _run_expect_tests, _read_config):
self.config["name"] = 'test_simple_down_strategy_never'
_read_config.return_value = self.config
cobj = ConfigParser("/a/b/.cdf.yml")
results = run_test(None, cobj=cobj, config="/a/b/.cdf.yml", exit_on_error=False, test_args=["default", "patch"], working_dir=os.getcwd(),
down_strategy="never", upgrade_strategy="all")
assert_run_count(self, {_run_hook: 0, _run_provision: 2, _run_de_provision: 0, _run_expect_tests: 2})
for upgrade_path in self.upgrades:
self.assertEqual(len(results), len(self.upgrades))
self.assertIn(upgrade_path, results)
for test in self.tests:
for phase in ["provisioning", "provision expect"]:
self.assertFalse(results[upgrade_path][test][phase]["failed"])
for phase in ["de-provisioning", "de-provision expect"]:
self.assertFalse(results["fresh"][test].get(phase, False))
self.assertIn(test, results[upgrade_path])
self.assertFalse(results[upgrade_path][test]["failed"])
assert_state(self, f"{self.tmpdir}/test_{upgrade_path}_{test}_state.json", {"name": f'{self.config["name"]}_{test}_test'})
assert_state(self, self.state_file, {"name": self.config["name"]})
self.assertEqual(cobj.name, self.config["name"])
self.assertEqual(cobj.tests, self.tests)
@patch('azext_cdf.tester.de_provision')
@patch.object(ConfigParser, '_read_config')
@patch('azext_cdf.tester._run_expect_tests')
@patch('azext_cdf.tester._run_de_provision')
@patch('azext_cdf.tester._run_provision')
@patch('azext_cdf.tester._run_hook')
def test_simple_failed_provision(self, _run_hook, _run_provision, _run_de_provision, _run_expect_tests, _read_config, de_provision):
self.config["name"] = 'test_simple_failed_provision'
_read_config.return_value = self.config
cobj = ConfigParser("/a/b/.cdf.yml")
_run_provision.side_effect = CLIError("Nooo")
results = run_test(None, cobj=cobj, config="/a/b/.cdf.yml", exit_on_error=False, test_args=["default", "patch"], working_dir=os.getcwd(),
down_strategy="always", upgrade_strategy="all")
assert_run_count(self, {_run_hook: 0, _run_provision: 2, _run_de_provision: 0, _run_expect_tests: 0, de_provision: 2})
for upgrade_path in self.upgrades:
self.assertEqual(len(results), len(self.upgrades))
self.assertIn(upgrade_path, results)
for test in self.tests:
for phase in ["provisioning"]:
self.assertTrue(results[upgrade_path][test][phase]["failed"])
for phase in ["de-provisioning", "de-provision expect", "provision expect"]:
self.assertFalse(results["fresh"][test].get(phase, False))
self.assertIn(test, results[upgrade_path])
self.assertTrue(results[upgrade_path][test]["failed"])
assert_state(self, f"{self.tmpdir}/test_{upgrade_path}_{test}_state.json", {"name": f'{self.config["name"]}_{test}_test'})
assert_state(self, self.state_file, {"name": self.config["name"]})
self.assertEqual(cobj.name, self.config["name"])
self.assertEqual(cobj.tests, self.tests)
@patch('azext_cdf.tester.de_provision')
@patch.object(ConfigParser, '_read_config')
@patch('azext_cdf.tester._run_expect_tests')
@patch('azext_cdf.tester._run_de_provision')
@patch('azext_cdf.tester._run_provision')
@patch('azext_cdf.tester._run_hook')
def test_simple_failed_provision_exit_on_error(self, _run_hook, _run_provision, _run_de_provision, _run_expect_tests, _read_config, de_provision):
self.config["name"] = 'test_simple_failed_provision'
_read_config.return_value = self.config
cobj = ConfigParser("/a/b/.cdf.yml")
_run_provision.side_effect = CLIError("Nooo")
with self.assertRaises(CLIError) as context:
run_test(None, cobj=cobj, config="/a/b/.cdf.yml", exit_on_error=True, test_args=["default", "patch"], working_dir=os.getcwd(),
down_strategy="always", upgrade_strategy="all")
self.assertIn('default', context)
assert_run_count(self, {_run_hook: 0, _run_provision: 1, _run_de_provision: 0, _run_expect_tests: 0, de_provision: 1})
# test with down_strategy success only
_run_provision.reset_mock()
de_provision.reset_mock()
with self.assertRaises(CLIError) as context:
run_test(None, cobj=cobj, config="/a/b/.cdf.yml", exit_on_error=True, test_args=["default", "patch"], working_dir=os.getcwd(),
down_strategy="success", upgrade_strategy="all")
self.assertIn('default', context)
assert_run_count(self, {_run_hook: 0, _run_provision: 1, _run_de_provision: 0, _run_expect_tests: 0, de_provision: 0})
self.assertEqual(cobj.name, self.config["name"])
self.assertEqual(cobj.tests, self.tests)
@patch.object(ConfigParser, '_read_config')
@patch('azext_cdf.tester._run_expect_tests')
@patch('azext_cdf.tester._run_de_provision')
@patch('azext_cdf.tester._run_provision')
@patch('azext_cdf.tester._run_hook')
def test_simple_upgrade_strategy_only_upgrade(self, _run_hook, _run_provision, _run_de_provision, _run_expect_tests, _read_config):
self.config["name"] = 'test_simple_upgrade_strategy_only_upgrade'
_read_config.return_value = self.config
cobj = ConfigParser("/a/b/.cdf.yml")
results = run_test(None, cobj=cobj, config="/a/b/.cdf.yml", exit_on_error=False, test_args=["default", "patch"], working_dir=os.getcwd(),
down_strategy="always", upgrade_strategy="upgrade")
assert_run_count(self, {_run_hook: 0, _run_provision: 0, _run_de_provision: 0, _run_expect_tests: 0})
self.assertEqual(len(results), 0)
assert_state(self, self.state_file, {"name": self.config["name"]})
self.assertEqual(cobj.name, self.config["name"])
self.assertEqual(cobj.tests, self.tests)
class TestManageGitUpgrade(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.upgrade_config = {"name": "x1", "type": "git", "path": "/"}
self.upgrade_config["git"] = {"repo": "https://github.com/ahelal/git-example.git"}
# branch
# tag
# key
def test_reuse_manage_git_upgrade(self):
# new branch
upgrade_config = copy.deepcopy(self.upgrade_config)
upgrade_config["git"]['branch'] = "new"
gitdir_new = _manage_git_upgrade(upgrade_config, self.tmpdir, upgrade_config["name"], reuse_dir=True)
git_hash, _ = run_command("git",["show", '--pretty=format:"%H"', "--no-patch"], cwd=gitdir_new)
self.assertEqual(git_hash.replace('"',""), "1c247b950f1655ad84d2cc8fc4f594c6a6afb402")
# tag v0.0.2 branch
upgrade_config = copy.deepcopy(self.upgrade_config)
upgrade_config["git"]['tag'] = "v0.0.2"
gitdir_v0_0_2 = _manage_git_upgrade(upgrade_config, self.tmpdir, upgrade_config["name"], reuse_dir=True)
git_hash, _ = run_command("git",["show", '--pretty=format:"%H"', "--no-patch"], cwd=gitdir_v0_0_2)
self.assertEqual(git_hash.replace('"',""), "c0659f4bd2f44a917e5bc77ae41aeaa542133103")
self.assertEqual(gitdir_new, gitdir_v0_0_2)
# main branch
upgrade_config = copy.deepcopy(self.upgrade_config)
upgrade_config["git"]['branch'] = "main"
gitdir_main = _manage_git_upgrade(upgrade_config, self.tmpdir, upgrade_config["name"], reuse_dir=True)
git_hash, _ = run_command("git",["show", '--pretty=format:"%H"', "--no-patch"], cwd=gitdir_main)
self.assertEqual(git_hash.replace('"',""), "a0281435a7e1880921ae59399c98b3d04473e471")
self.assertEqual(gitdir_v0_0_2, gitdir_main)
# commit
upgrade_config = copy.deepcopy(self.upgrade_config)
upgrade_config["git"]['commit'] = "8131806c7906a252573ef329433dd5e91d708607"
gitdir_commit = _manage_git_upgrade(upgrade_config, self.tmpdir, upgrade_config["name"], reuse_dir=True)
git_hash, _ = run_command("git",["show", '--pretty=format:"%H"', "--no-patch"], cwd=gitdir_main)
self.assertEqual(git_hash.replace('"',""), "8131806c7906a252573ef329433dd5e91d708607")
self.assertEqual(gitdir_main, gitdir_commit)
def tearDown(self):
shutil.rmtree(self.tmpdir)
# class TesteUpgrade(unittest.TestCase):
# def setUp(self):
# self.config = {"name": "cdf_simple", "resource_group": "rg", "location": "loc"}
# self.config["tests"] = {"default": {}, "patch": {}}
# self.tmpdir = tempfile.mkdtemp()
# self.config["tmp_dir"] = self.tmpdir
# self.state_file = f"{self.tmpdir}/{''.join(random.sample(string.ascii_lowercase, 25))}.json"
# self.config[CONFIG_STATE_FILENAME] = os.path.basename(self.state_file)
# self.config[CONFIG_STATE_FILEPATH] = f"file://{os.path.dirname(self.state_file)}"
# self.tests = ["default", "patch"]
# self.upgrades = ["fresh"]
# def tearDown(self):
# shutil.rmtree(self.tmpdir)
# @patch.object(ConfigParser, '_read_config')
# @patch('azext_cdf.tester._run_expect_tests')
# @patch('azext_cdf.tester._run_de_provision')
# @patch('azext_cdf.tester._run_provision')
# @patch('azext_cdf.tester._run_hook')
# def test_simple_upgrade_strategy_only_upgrade(self, _run_hook, _run_provision, _run_de_provision, _run_expect_tests, _read_config):
# self.config["name"] = 'test_simple_upgrade_strategy_only_upgrade'
# _read_config.return_value = self.config
# cobj = ConfigParser("/a/b/.cdf.yml")
# results = run_test(None, cobj=cobj, config="/a/b/.cdf.yml", exit_on_error=False, test_args=["default", "patch"], working_dir=os.getcwd(),
# down_strategy="always", upgrade_strategy="upgrade")
# assert_run_count(self, {_run_hook: 0, _run_provision: 2, _run_de_provision: 2, _run_expect_tests: 4})
# for upgrade_path in self.upgrades:
# self.assertEqual(len(results), len(self.upgrades))
# self.assertIn(upgrade_path, results)
# for test in self.tests:
# for phase in ["provisioning", "provision expect", "de-provisioning", "de-provision expect"]:
# self.assertFalse(results[upgrade_path][test][phase]["failed"])
# self.assertIn(test, results[upgrade_path])
# self.assertFalse(results[upgrade_path][test]["failed"])
# assert_state(self, f"{self.tmpdir}/test_{upgrade_path}_{test}_state.json", {"name": f'{self.config["name"]}_{test}_test'})
# assert_state(self, self.state_file, {"name": self.config["name"]})
# self.assertEqual(cobj.name, self.config["name"])
# self.assertEqual(cobj.tests, self.tests)
# upgrade choices=['all', 'fresh', 'upgrade'])
# down choices=['success', 'always', 'never'])
if __name__ == '__main__':
unittest.main()
# TODO Write tests for
# _run_single_test
# _expect_cmd_exec
# _expect_assert_exec
# _phase_cordinator
| 55.621993
| 150
| 0.668911
| 2,030
| 16,186
| 5.007389
| 0.081281
| 0.040334
| 0.042696
| 0.056075
| 0.869454
| 0.855681
| 0.850172
| 0.841909
| 0.838957
| 0.838957
| 0
| 0.015724
| 0.186643
| 16,186
| 290
| 151
| 55.813793
| 0.7564
| 0.16817
| 0
| 0.642157
| 0
| 0
| 0.210891
| 0.120925
| 0
| 0
| 0
| 0.003448
| 0.323529
| 1
| 0.058824
| false
| 0
| 0.063725
| 0
| 0.132353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
751dfa78345b1ffe6968af787957e3624495b2a4
| 532
|
py
|
Python
|
webcam_scanner/api_common/api_router.py
|
hobby1999/webcam_scanner
|
50a4473eb1e139b87f5a8ffed23d388448198b08
|
[
"Apache-2.0"
] | null | null | null |
webcam_scanner/api_common/api_router.py
|
hobby1999/webcam_scanner
|
50a4473eb1e139b87f5a8ffed23d388448198b08
|
[
"Apache-2.0"
] | null | null | null |
webcam_scanner/api_common/api_router.py
|
hobby1999/webcam_scanner
|
50a4473eb1e139b87f5a8ffed23d388448198b08
|
[
"Apache-2.0"
] | null | null | null |
from fastapi import APIRouter
'''
API路由蓝图
'''
router = APIRouter()
from api_common import api_download
from api_common import api_uploadfile
from api_common import api_delete
from plugins import api_cvelookup
from plugins import api_checkenvironment
from plugins import api_checkcompany
from plugins import api_passwdcheck
from plugins import api_checksec
from plugins import api_simplescan
from plugins import api_binwalk_all
from plugins import api_binwalk_encrpt
from plugins import api_extract
from plugins import api_filetree
| 26.6
| 40
| 0.864662
| 77
| 532
| 5.74026
| 0.298701
| 0.264706
| 0.384615
| 0.452489
| 0.271493
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118421
| 532
| 19
| 41
| 28
| 0.942431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.066667
| 0.933333
| 0
| 0.933333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
7533849c157598c6f432433d5874587e93d1e422
| 109
|
py
|
Python
|
orb_simulator/orbsim_language/orbsim_ast/less_than_node.py
|
dmguezjaviersnet/IA-Sim-Comp-Project
|
8165b9546efc45f98091a3774e2dae4f45942048
|
[
"MIT"
] | 1
|
2022-01-19T22:49:09.000Z
|
2022-01-19T22:49:09.000Z
|
orb_simulator/orbsim_language/orbsim_ast/less_than_node.py
|
dmguezjaviersnet/IA-Sim-Comp-Project
|
8165b9546efc45f98091a3774e2dae4f45942048
|
[
"MIT"
] | 15
|
2021-11-10T14:25:02.000Z
|
2022-02-12T19:17:11.000Z
|
orb_simulator/orbsim_language/orbsim_ast/less_than_node.py
|
dmguezjaviersnet/IA-Sim-Comp-Project
|
8165b9546efc45f98091a3774e2dae4f45942048
|
[
"MIT"
] | null | null | null |
from orbsim_language.orbsim_ast.comp_expr_node import CompExprNode
class LessThanNode(CompExprNode):
pass
| 36.333333
| 66
| 0.862385
| 14
| 109
| 6.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091743
| 109
| 3
| 67
| 36.333333
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
754a4b6a31a700c825dcd6729bf5967fd2943193
| 29,055
|
py
|
Python
|
src/hub/dataload/sources/civic/civic_upload.py
|
erikyao/myvariant.info
|
a4eaaca7ab6c069199f8942d5afae2dece908147
|
[
"Apache-2.0"
] | 39
|
2017-07-01T22:34:39.000Z
|
2022-03-15T22:25:59.000Z
|
src/hub/dataload/sources/civic/civic_upload.py
|
erikyao/myvariant.info
|
a4eaaca7ab6c069199f8942d5afae2dece908147
|
[
"Apache-2.0"
] | 105
|
2017-06-28T17:26:06.000Z
|
2022-03-17T17:49:53.000Z
|
src/hub/dataload/sources/civic/civic_upload.py
|
erikyao/myvariant.info
|
a4eaaca7ab6c069199f8942d5afae2dece908147
|
[
"Apache-2.0"
] | 14
|
2017-06-12T18:29:36.000Z
|
2021-03-18T15:51:27.000Z
|
import glob, os
import biothings.hub.dataload.uploader as uploader
from hub.dataload.uploader import SnpeffPostUpdateUploader
from hub.dataload.storage import MyVariantIgnoreDuplicatedStorage
from .civic_parser import load_data
class CivicUploader(SnpeffPostUpdateUploader):
name = "civic"
storage_class = MyVariantIgnoreDuplicatedStorage
__metadata__ = {
"mapper" : 'observed',
"assembly" : "hg19",
"src_meta" : {
"url" : "https://civicdb.org",
"license_url" : "https://creativecommons.org/publicdomain/zero/1.0/",
"license_url_short": "http://bit.ly/2FqS871",
"licence" : "CC0 1.0 Universal"
}
}
def load_data(self, data_folder):
self.logger.info("Load data from '%s'" % data_folder)
return load_data(data_folder)
@classmethod
def get_mapping(klass):
mapping = {
"civic": {
"properties": {
"entrez_name": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"entrez_id": {
"type": "integer"
},
"name": {
"type": "text"
},
"description": {
"type": "text"
},
"gene_id": {
"type": "integer"
},
"type": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"variant_types": {
"properties": {
"id": {
"type": "integer"
},
"name": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"so_id": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"url": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"display_name": {
"type": "text"
},
"description": {
"type": "text"
}
}
},
"civic_actionability_score": {
"type": "float"
},
"coordinates": {
"properties": {
"chromosome": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"start": {
"type": "integer"
},
"stop": {
"type": "integer"
},
"representative_transcript": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"chromosome2": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"start2": {
"type": "integer"
},
"stop2": {
"type": "integer"
},
"representative_transcript2": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"ensembl_version": {
"type": "integer"
},
"reference_build": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"reference_bases": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"variant_bases": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
}
}
},
"evidence_items": {
"properties": {
"id": {
"type": "integer"
},
"name": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"disease": {
"properties": {
"id": {
"type": "integer"
},
"name": {
"type": "text"
},
"display_name": {
"type": "text"
},
"doid": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"url": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
}
}
},
"drugs": {
"properties": {
"id": {
"type": "integer"
},
"name": {
"type": "text"
}
}
},
"rating": {
"type": "integer"
},
"evidence_level": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"evidence_type": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"status": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"open_change_count": {
"type": "integer"
},
"type": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"source": {
"properties": {
"id": {
"type": "integer"
},
"name": {
"type": "text"
},
"citation": {
"type": "text"
},
"source_url": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"publication_date": {
"properties": {
"year": {
"type": "integer"
},
"month": {
"type": "integer"
},
"day": {
"type": "integer"
}
}
},
"journal": {
"type": "text"
},
"full_journal_title": {
"type": "text"
},
"status": {
"type": "text"
},
"is_review": {
"type": "boolean"
},
"pubmed": {
"type": "integer"
},
"open_access": {
"type": "boolean"
},
"pmc_id": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"clinical_trials": {
"properties": {
"nct_id": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"name": {
"type": "text"
},
"description": {
"type": "text"
},
"clinical_trial_url": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
}
}
},
"asco_abstract_id": {
"type": "integer"
},
"asco": {
"type": "integer"
}
}
},
"variant_id": {
"type": "integer"
},
"drug_interaction_type": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"phenotypes": {
"properties": {
"id": {
"type": "integer"
},
"hpo_id": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"url": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"hpo_class": {
"type": "text"
}
}
},
"evidence_direction": {
"type": "text"
},
"clinical_significance": {
"type": "text"
},
"description": {
"type": "text"
},
"variant_origin": {
"type": "text"
}
}
},
"variant_aliases": {
"type": "text"
},
"sources": {
"properties": {
"id": {
"type": "integer"
},
"citation_id": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"source_type": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"source_url": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"publication_date": {
"properties": {
"year": {
"type": "integer"
},
"month": {
"type": "integer"
},
"day": {
"type": "integer"
}
}
},
"is_review": {
"type": "boolean"
},
"open_access": {
"type": "boolean"
},
"pmc_id": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"name": {
"type": "text"
},
"citation": {
"type": "text"
},
"journal": {
"type": "text"
},
"full_journal_title": {
"type": "text"
},
"status": {
"type": "text"
}
}
},
"variant_id": {
"type": "integer"
},
"assertions": {
"properties": {
"id": {
"type": "integer"
},
"type": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"name": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"gene": {
"properties": {
"name": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"id": {
"type": "integer"
}
}
},
"variant": {
"properties": {
"name": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"id": {
"type": "integer"
}
}
},
"disease": {
"properties": {
"id": {
"type": "integer"
},
"name": {
"type": "text"
},
"display_name": {
"type": "text"
},
"doid": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"url": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
}
}
},
"drugs": {
"properties": {
"id": {
"type": "integer"
},
"name": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
}
}
},
"evidence_type": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"evidence_direction": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"clinical_significance": {
"type": "text"
},
"evidence_item_count": {
"type": "integer"
},
"fda_regulatory_approval": {
"type": "boolean"
},
"status": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"open_change_count": {
"type": "integer"
},
"pending_evidence_count": {
"type": "integer"
},
"summary": {
"type": "text"
},
"description": {
"type": "text"
}
}
},
"variant_groups": {
"properties": {
"id": {
"type": "integer"
},
"variants": {
"properties": {
"id": {
"type": "integer"
},
"entrez_name": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"entrez_id": {
"type": "integer"
},
"gene_id": {
"type": "integer"
},
"type": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"variant_types": {
"properties": {
"id": {
"type": "integer"
},
"name": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"so_id": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"url": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"display_name": {
"type": "text"
},
"description": {
"type": "text"
}
}
},
"civic_actionability_score": {
"type": "float"
},
"coordinates": {
"properties": {
"chromosome": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"start": {
"type": "integer"
},
"stop": {
"type": "integer"
},
"representative_transcript": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"chromosome2": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"start2": {
"type": "integer"
},
"stop2": {
"type": "integer"
},
"representative_transcript2": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"ensembl_version": {
"type": "integer"
},
"reference_build": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"reference_bases": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"variant_bases": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
}
}
},
"name": {
"type": "text"
},
"description": {
"type": "text"
}
}
},
"type": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"name": {
"type": "text"
},
"description": {
"type": "text"
}
}
},
"clinvar_entries": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"provisional_values": {
"properties": {
"description": {
"properties": {
"value": {
"type": "text"
},
"revision_id": {
"type": "integer"
}
}
}
}
},
"hgvs_expressions": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
},
"allele_registry_id": {
"type": "keyword",
"normalizer": "keyword_lowercase_normalizer"
}
}
}
}
return mapping
| 46.78744
| 92
| 0.206952
| 889
| 29,055
| 6.52306
| 0.167604
| 0.106225
| 0.202794
| 0.270391
| 0.769615
| 0.769615
| 0.730816
| 0.667701
| 0.657872
| 0.647008
| 0
| 0.002293
| 0.714851
| 29,055
| 620
| 93
| 46.862903
| 0.697646
| 0
| 0
| 0.586601
| 0
| 0
| 0.193054
| 0.062917
| 0
| 0
| 0
| 0
| 0.001634
| 1
| 0.003268
| false
| 0
| 0.00817
| 0
| 0.021242
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f34a73637c79b0eb7e9ba480938a8f0602fc8c06
| 49
|
py
|
Python
|
LoadTesting/load_testing_je/utils/start_locust/__init__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
LoadTesting/load_testing_je/utils/start_locust/__init__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
LoadTesting/load_testing_je/utils/start_locust/__init__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
from load_testing_je.utils.start_locust import *
| 24.5
| 48
| 0.857143
| 8
| 49
| 4.875
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 49
| 1
| 49
| 49
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f362c8d09b1a501ebfea5728473bfdd7bf7c9eda
| 118
|
py
|
Python
|
core/shortcuts.py
|
ikcam/django-skeleton
|
c07e5c1de41e5d1ea32ebe4e27fd4e577191893c
|
[
"BSD-3-Clause"
] | 3
|
2017-04-26T10:15:49.000Z
|
2019-10-13T14:13:44.000Z
|
core/shortcuts.py
|
ikcam/django-skeleton
|
c07e5c1de41e5d1ea32ebe4e27fd4e577191893c
|
[
"BSD-3-Clause"
] | null | null | null |
core/shortcuts.py
|
ikcam/django-skeleton
|
c07e5c1de41e5d1ea32ebe4e27fd4e577191893c
|
[
"BSD-3-Clause"
] | null | null | null |
def get_current_company(request):
from core.models import Company
return Company.objects.get_current(request)
| 29.5
| 47
| 0.79661
| 16
| 118
| 5.6875
| 0.6875
| 0.21978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135593
| 118
| 3
| 48
| 39.333333
| 0.892157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f369e8708a8ad1acaf92977e395188d46ac7a864
| 9,097
|
py
|
Python
|
opendatatools/aqi/aqi_agent.py
|
solider245/OpenData
|
031aa29b7b6b26a903f378e3da10520fd3a1b7ab
|
[
"Apache-2.0"
] | 1,179
|
2018-05-28T07:14:41.000Z
|
2022-03-27T16:03:51.000Z
|
opendatatools/aqi/aqi_agent.py
|
taoyeah/OpenData
|
031aa29b7b6b26a903f378e3da10520fd3a1b7ab
|
[
"Apache-2.0"
] | 42
|
2018-07-05T02:44:56.000Z
|
2022-03-29T12:12:30.000Z
|
opendatatools/aqi/aqi_agent.py
|
taoyeah/OpenData
|
031aa29b7b6b26a903f378e3da10520fd3a1b7ab
|
[
"Apache-2.0"
] | 297
|
2018-05-28T07:39:38.000Z
|
2022-03-28T02:35:59.000Z
|
# encoding: UTF-8
from opendatatools.common import get_current_day
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from opendatatools.common import RestAgent
from opendatatools.aqi.constant import city_code_map
class AQIAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
def handle_visit_limit(self):
url = ""
def get_daily_aqi(self, date):
url = "http://datacenter.mep.gov.cn/websjzx/report/list.vm"
page_no = 0
aqi_result = list()
while True:
page_no = page_no + 1
# 1. 分页爬取数据
data = {
'pageNum': page_no,
'V_DATE': date,
'xmlname': 1512478367400,
'roleType': 'CFCD2084',
}
rsp = self.do_request(url, data, self.proxies)
if rsp is None:
return None
data = list()
soup = BeautifulSoup(rsp, "html5lib")
divs = soup.find_all('div')
for div in divs:
if div.has_attr('class') and 'report_main' in div['class']:
rows = div.table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) == 9:
city = cols[3].text
aqi = cols[4].text
indicator = cols[5].text
date = cols[6].text
code = cols[7].text
level = cols[8].text
data.append ({
"date" : date,
"city" : city,
"aqi" : aqi,
"code" : code,
"level" : level,
"indicator" : indicator,
})
if len(data) == 0:
break;
aqi_result.extend(data)
df = pd.DataFrame(aqi_result)
return df
def get_hour_aqi(self, time):
url = "http://datacenter.mep.gov.cn/websjzx/report/list.vm"
page_no = 0
aqi_result = list()
while True:
page_no = page_no + 1
# 1. 分页爬取数据
data = {
'pageNum': page_no,
'xmlname': 1512382906122,
'roleType': 'CFCD2084',
'V_DATE': time,
'E_DATE' : time,
}
rsp = self.do_request(url, data, self.proxies)
if rsp is None:
return None
data = list()
soup = BeautifulSoup(rsp, "html5lib")
divs = soup.find_all('div')
for div in divs:
if div.has_attr('class') and 'report_main' in div['class']:
rows = div.table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) == 8:
city = cols[2].text
aqi = cols[3].text
indicator = cols[4].text
time = cols[5].text
code = cols[6].text
level = cols[7].text
data.append ({
"time" : time,
"city" : city,
"aqi" : aqi,
"code" : code,
"level" : level,
"indicator" : indicator,
})
if len(data) == 0:
break;
aqi_result.extend(data)
df = pd.DataFrame(aqi_result)
return df
def get_daily_aqi_onecity(self, city):
url = 'http://datacenter.mep.gov.cn/websjzx/report/list.vm'
if city not in city_code_map:
print("this city is not ready !" + city)
return None
city_code = city_code_map[city]
aqi_result = list()
page_no = 0
while True:
page_no = page_no + 1
# 1. 分页爬取数据
data = {
'pageNum': page_no,
'citycodes': city_code,
'citytime': "2000-01-01",
'xmlname': "1513844769596kqzllb"
}
rsp = self.do_request(url, data, self.proxies)
# 2. 开始解析返回数据,并从html中提取需要的内容
data = list()
soup = BeautifulSoup(rsp, "html5lib")
divs = soup.find_all('div')
for div in divs:
if div.has_attr('class') and 'report_main' in div['class']:
rows = div.table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) == 7:
date = cols[1].text
aqi = cols[3].text
level = cols[5].text
indicator = cols[4].text
data.append({
"date" : date,
"aqi" : aqi,
"level" : level,
"indicator" : indicator,
})
if len(data) == 0:
break;
aqi_result.extend(data)
df = pd.DataFrame(aqi_result)
return df
def get_recent_daily_aqi_onecity(self, city):
url = 'http://datacenter.mep.gov.cn/websjzx/report!list.vm'
if city not in city_code_map:
print("this city is not ready !" + city)
return None
city_code = city_code_map[city]
data = {
'citycodes': city_code,
'citytime': get_current_day(),
'xmlname': "1513844769596kqzllb"
}
rsp = self.do_request(url, data, self.proxies)
# 2. 开始解析返回数据,并从html中提取需要的内容
data = list()
soup = BeautifulSoup(rsp, "html5lib")
divs = soup.find_all('div')
for div in divs:
if div.has_attr('class') and 'report_main' in div['class']:
rows = div.table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) == 7:
date = cols[1].text
aqi = cols[3].text
level = cols[5].text
indicator = cols[4].text
data.append({
"date" : date,
"aqi" : aqi,
"level" : level,
"indicator" : indicator,
})
df = pd.DataFrame(data)
return df
def get_hour_aqi_onecity(self, city, date):
url = 'http://datacenter.mep.gov.cn/websjzx/report/list.vm'
if city not in city_code_map:
print("this city is not ready !" + city)
return None
city_code = city_code_map[city]
aqi_result = list()
page_no = 0
while True:
page_no = page_no + 1
# 1. 分页爬取数据
data = {
'pageNum': page_no,
'ctiycodes': city_code,
'citytime': date,
'xmlname': "1511257916552",
"queryflag": "close",
"customquery": "false",
"isdesignpatterns": "false",
}
rsp = self.do_request(url, data, self.proxies)
# 2. 开始解析返回数据,并从html中提取需要的内容
data = list()
soup = BeautifulSoup(rsp, "html5lib")
divs = soup.find_all('div')
for div in divs:
if div.has_attr('class') and 'report_main' in div['class']:
rows = div.table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) == 7:
time = cols[2].text
aqi = cols[4].text
city = cols[3].text
level = cols[5].text
indicator = cols[6].text
data.append({
"time" : time,
"aqi" : aqi,
"city" : city,
"level" : level,
"indicator" : indicator,
})
aqi_result.extend(data)
if len(data) < 10:
break;
df = pd.DataFrame(aqi_result)
return df
if __name__ == '__main__':
aqi = AQIAgent()
result = aqi.get_hour_aqi_onecity('北京市','2018-05-26')
print(result)
| 32.373665
| 75
| 0.404749
| 849
| 9,097
| 4.207303
| 0.154299
| 0.026876
| 0.021557
| 0.027996
| 0.756999
| 0.717245
| 0.709406
| 0.701008
| 0.701008
| 0.690929
| 0
| 0.031982
| 0.498186
| 9,097
| 281
| 76
| 32.373665
| 0.750493
| 0.01495
| 0
| 0.761261
| 0
| 0
| 0.103764
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031532
| false
| 0
| 0.027027
| 0
| 0.108108
| 0.018018
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f37fb863b635d8d971a14f0aae6c05986ade175f
| 10,730
|
py
|
Python
|
scenes/tests/test_integration.py
|
jordifierro/pachatary-api
|
c03ad67ceb856068daa6d082091372eb1ed3d009
|
[
"MIT"
] | 3
|
2018-12-05T16:44:59.000Z
|
2020-08-01T14:12:32.000Z
|
scenes/tests/test_integration.py
|
jordifierro/pachatary-api
|
c03ad67ceb856068daa6d082091372eb1ed3d009
|
[
"MIT"
] | 6
|
2020-06-03T15:56:59.000Z
|
2022-02-10T07:23:55.000Z
|
scenes/tests/test_integration.py
|
jordifierro/pachatary-api
|
c03ad67ceb856068daa6d082091372eb1ed3d009
|
[
"MIT"
] | null | null | null |
import json
from decimal import Decimal
import urllib.parse
from django.test import TestCase, Client, tag
from django.urls import reverse
from experiences.models import ORMExperience
from scenes.models import ORMScene
from profiles.models import ORMProfile
from people.models import ORMPerson, ORMAuthToken, ORMBlock
class ExperienceDetailTestCase(TestCase):
def test_scenes_from_experience_returns_experience(self):
orm_person = ORMPerson.objects.create()
orm_auth_token = ORMAuthToken.objects.create(person=orm_person)
ORMProfile.objects.create(person_id=orm_person.id, username='usr')
exp_c = ORMExperience.objects.create(title='Exp c', description='stuffs', author=orm_person)
scene_d = ORMScene.objects.create(title='Scene d', description='D',
latitude=Decimal('1.2'), longitude=Decimal('-3.4'), experience=exp_c)
scene_e = ORMScene.objects.create(title='Scene e', description='E',
latitude=Decimal('5.6'), longitude=Decimal('-7.8'), experience=exp_c)
client = Client()
auth_headers = {'HTTP_AUTHORIZATION': 'Token {}'.format(orm_auth_token.access_token), }
response = client.get(reverse('scenes'), {'experience': str(exp_c.id)}, **auth_headers)
assert response.status_code == 200
body = json.loads(response.content)
assert body == [
{
'id': str(scene_d.id),
'title': 'Scene d',
'description': 'D',
'picture': None,
'latitude': 1.2,
'longitude': -3.4,
'experience_id': str(exp_c.id),
},
{
'id': str(scene_e.id),
'title': 'Scene e',
'description': 'E',
'picture': None,
'latitude': 5.6,
'longitude': -7.8,
'experience_id': str(exp_c.id),
},
]
def test_scenes_from_blocked_user_raises_exception(self):
orm_person = ORMPerson.objects.create()
orm_auth_token = ORMAuthToken.objects.create(person=orm_person)
ORMProfile.objects.create(person_id=orm_person.id, username='usr')
orm_blocked_person = ORMPerson.objects.create()
ORMProfile.objects.create(person_id=orm_blocked_person.id, username='blocked')
ORMBlock.objects.create(creator=orm_person, target=orm_blocked_person)
exp_c = ORMExperience.objects.create(title='Exp c', description='stuffs', author=orm_blocked_person)
ORMScene.objects.create(title='Scene d', description='D',
latitude=Decimal('1.2'), longitude=Decimal('-3.4'), experience=exp_c)
ORMScene.objects.create(title='Scene e', description='E',
latitude=Decimal('5.6'), longitude=Decimal('-7.8'), experience=exp_c)
client = Client()
auth_headers = {'HTTP_AUTHORIZATION': 'Token {}'.format(orm_auth_token.access_token), }
response = client.get(reverse('scenes'), {'experience': str(exp_c.id)}, **auth_headers)
assert response.status_code == 403
body = json.loads(response.content)
assert body == {
'error': {
'source': 'content',
'code': 'blocked',
'message': 'Content is blocked'
}
}
class CreateSceneTestCase(TestCase):
@tag('elasticsearch')
def test_create_scene_creates_and_returns_scene(self):
orm_person = ORMPerson.objects.create()
orm_auth_token = ORMAuthToken.objects.create(person=orm_person)
ORMProfile.objects.create(person_id=orm_person.id, username='usr')
experience = ORMExperience.objects.create(title='Exp', author=orm_person)
client = Client()
auth_headers = {'HTTP_AUTHORIZATION': 'Token {}'.format(orm_auth_token.access_token), }
response = client.post(reverse('scenes'), {'title': 'Scene title',
'description': 'Some description',
'latitude': 0.3,
'longitude': 1.2,
'experience_id': experience.id}, **auth_headers)
body = json.loads(response.content)
created_scene = ORMScene.objects.get(id=body['id'],
title='Scene title',
description='Some description',
experience_id=experience.id)
assert created_scene is not None
assert body == {
'id': str(created_scene.id),
'title': 'Scene title',
'description': 'Some description',
'picture': None,
'latitude': 0.3,
'longitude': 1.2,
'experience_id': str(experience.id),
}
def test_wrong_attributes_doesnt_create_and_returns_error(self):
orm_person = ORMPerson.objects.create()
orm_auth_token = ORMAuthToken.objects.create(person=orm_person)
ORMProfile.objects.create(person_id=orm_person.id, username='usr')
experience = ORMExperience.objects.create(title='Exp', author=orm_person)
client = Client()
auth_headers = {'HTTP_AUTHORIZATION': 'Token {}'.format(orm_auth_token.access_token), }
response = client.post(reverse('scenes'), {'title': '',
'description': 'Some description',
'latitude': 0.3,
'longitude': 1.2,
'experience_id': experience.id}, **auth_headers)
assert not ORMScene.objects.filter(title='',
description='Some description',
latitude=0.3,
longitude=1.2,
experience_id=experience.id).exists()
body = json.loads(response.content)
assert body == {
'error': {
'source': 'title',
'code': 'wrong_size',
'message': 'Title must be between 1 and 80 chars'
}
}
class ModifySceneTestCase(TestCase):
def test_modifies_and_returns_scene(self):
orm_person = ORMPerson.objects.create()
ORMProfile.objects.create(person_id=orm_person.id, username='usr')
orm_auth_token = ORMAuthToken.objects.create(person=orm_person)
experience = ORMExperience.objects.create(title='Exp', author=orm_person)
orm_scene = ORMScene.objects.create(title='T', description='',
latitude=1, longitude=2, experience_id=experience.id)
client = Client()
auth_headers = {'HTTP_AUTHORIZATION': 'Token {}'.format(orm_auth_token.access_token), }
response = client.patch(reverse('scene', args=[orm_scene.id]),
urllib.parse.urlencode({"description": "New description",
"latitude": 0.3, "longitude": 1.2}),
content_type='application/x-www-form-urlencoded',
**auth_headers)
body = json.loads(response.content)
updated_scene = ORMScene.objects.get(id=orm_scene.id,
title='T', description='New description',
experience_id=experience.id)
assert updated_scene is not None
assert body == {
'id': str(orm_scene.id),
'title': 'T',
'description': 'New description',
'picture': None,
'latitude': 0.3,
'longitude': 1.2,
'experience_id': str(experience.id),
}
def test_wrong_attributes_doesnt_update_and_returns_error(self):
orm_person = ORMPerson.objects.create()
orm_auth_token = ORMAuthToken.objects.create(person=orm_person)
ORMProfile.objects.create(person_id=orm_person.id, username='usr')
experience = ORMExperience.objects.create(title='Exp', author=orm_person)
orm_scene = ORMScene.objects.create(title='T', description='',
latitude=1, longitude=2, experience_id=experience.id)
client = Client()
auth_headers = {'HTTP_AUTHORIZATION': 'Token {}'.format(orm_auth_token.access_token), }
response = client.patch(reverse('scene', args=[orm_scene.id]),
urllib.parse.urlencode({"title": "",
"description": "Some description",
"latitude": 0.3,
"longitude": 1.2,
"experience_id": experience.id}),
content_type='application/x-www-form-urlencoded',
**auth_headers)
assert not ORMScene.objects.filter(title='',
description='Some description',
latitude=0.3,
longitude=1.2,
experience_id=experience.id).exists()
body = json.loads(response.content)
assert body == {
'error': {
'source': 'title',
'code': 'wrong_size',
'message': 'Title must be between 1 and 80 chars'
}
}
| 50.853081
| 111
| 0.490587
| 938
| 10,730
| 5.434968
| 0.128998
| 0.084151
| 0.04845
| 0.04237
| 0.83366
| 0.814829
| 0.793841
| 0.754414
| 0.729109
| 0.684778
| 0
| 0.011276
| 0.404939
| 10,730
| 210
| 112
| 51.095238
| 0.787157
| 0
| 0
| 0.574586
| 0
| 0
| 0.112395
| 0.006151
| 0
| 0
| 0
| 0
| 0.066298
| 1
| 0.033149
| false
| 0
| 0.049724
| 0
| 0.099448
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f3894dade5a593253e1baf982f13f24467f04bb5
| 159
|
py
|
Python
|
ultron8/exceptions/triggers.py
|
bossjones/ultron8
|
45db73d32542a844570d44bc83defa935e15803f
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ultron8/exceptions/triggers.py
|
bossjones/ultron8
|
45db73d32542a844570d44bc83defa935e15803f
|
[
"Apache-2.0",
"MIT"
] | 43
|
2019-06-01T23:08:32.000Z
|
2022-02-07T22:24:53.000Z
|
ultron8/exceptions/triggers.py
|
bossjones/ultron8
|
45db73d32542a844570d44bc83defa935e15803f
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
from __future__ import absolute_import
from ultron8.exceptions import UltronBaseException
class TriggerDoesNotExistException(UltronBaseException):
pass
| 19.875
| 56
| 0.861635
| 14
| 159
| 9.428571
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007092
| 0.113208
| 159
| 7
| 57
| 22.714286
| 0.929078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
3408635665e30e7200d0cedd9014019a50c33b29
| 42
|
py
|
Python
|
parkloader/__init__.py
|
patientzero/parkloader
|
9c0d8f49b83a831b716c7d99f2eb674daab3f23a
|
[
"WTFPL"
] | 1
|
2021-07-29T07:02:01.000Z
|
2021-07-29T07:02:01.000Z
|
parkloader/__init__.py
|
patientzero/parkloader
|
9c0d8f49b83a831b716c7d99f2eb674daab3f23a
|
[
"WTFPL"
] | null | null | null |
parkloader/__init__.py
|
patientzero/parkloader
|
9c0d8f49b83a831b716c7d99f2eb674daab3f23a
|
[
"WTFPL"
] | null | null | null |
from ._loader import ParkLoader, ParkData
| 21
| 41
| 0.833333
| 5
| 42
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 1
| 42
| 42
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
341ebff898e40ad07a0cf65f3a804fe621b88c81
| 5,469
|
py
|
Python
|
dl/test/test_run_length_encoding.py
|
brianlan/lanutils
|
364a6e2432c12168746d5de071b137b2dbbfcea3
|
[
"MIT"
] | null | null | null |
dl/test/test_run_length_encoding.py
|
brianlan/lanutils
|
364a6e2432c12168746d5de071b137b2dbbfcea3
|
[
"MIT"
] | null | null | null |
dl/test/test_run_length_encoding.py
|
brianlan/lanutils
|
364a6e2432c12168746d5de071b137b2dbbfcea3
|
[
"MIT"
] | null | null | null |
import pytest
import numpy as np
from ..run_length_encoding import RunLengthEncoder
@pytest.fixture
def segmap1():
return np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 1, 0, 1]])
@pytest.fixture
def segmap2():
return np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
@pytest.fixture
def segmap3():
return np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
]
)
@pytest.fixture
def encoded_seq2():
return [2, 2, 15, 1, 18, 6, 25, 4, 30, 1, 34, 6, 46, 1, 50, 6, 62, 1, 68, 4, 78, 1, 84, 4, 100, 4,
116, 4, 130, 6, 146, 6, 162, 6, 198, 1, 200, 1, 202, 1, 215, 1, 217, 1, 230, 1, 232, 1,
234, 1, 247, 1, 249, 1, 262, 1, 264, 1, 266, 1, 328, 4, 344, 1, 347, 1, 360, 1, 363, 1,
376, 4, 433, 2, 449, 2, 463, 1]
@pytest.fixture
def encoded_seq3():
return [16, 1, 18, 2, 22, 2, 34, 2, 38, 2, 46, 1, 52, 2, 61, 1, 68, 2, 76, 1,
82, 2, 86, 2, 91, 1, 98, 2, 102, 2, 106, 1, 149, 2, 165, 2, 179, 6,
197, 2, 207, 1, 213, 2, 222, 1, 234, 2, 237, 1, 252, 2, 267, 1, 270, 2,
282, 1, 357, 1, 447, 2, 463, 1]
def test_run_length_encoder_encode_downward_then_rightward(segmap1):
assert RunLengthEncoder(direction="downward_then_rightward").encode(segmap1) == [1, 1, 5, 3, 11, 2]
def test_run_length_encoder_encode_rightward_then_downward(segmap1):
assert RunLengthEncoder(direction="rightward_then_downward").encode(segmap1) == [1, 1, 3, 1, 6, 1, 8, 1, 10, 1, 12, 1]
def test_run_length_encoder_encode_hard_case1(segmap2, encoded_seq2):
assert RunLengthEncoder(direction="downward_then_rightward").encode(segmap2) == encoded_seq2
def test_run_length_encoder_encode_hard_case2(segmap3, encoded_seq3):
assert RunLengthEncoder(direction="downward_then_rightward").encode(segmap3) == encoded_seq3
def test_run_length_encoder_decode_downward_then_rightward(segmap1):
encoder = RunLengthEncoder(direction="downward_then_rightward")
np.testing.assert_almost_equal(encoder.decode([1, 1, 5, 3, 11, 2], (4, 3)), segmap1)
def test_run_length_encoder_decode_rightward_then_downward(segmap1):
encoder = RunLengthEncoder(direction="rightward_then_downward")
np.testing.assert_almost_equal(encoder.decode([1, 1, 3, 1, 6, 1, 8, 1, 10, 1, 12, 1], (4, 3)), segmap1)
| 54.69
| 122
| 0.433534
| 1,293
| 5,469
| 1.784223
| 0.085073
| 0.616385
| 0.846554
| 1.04378
| 0.685306
| 0.625054
| 0.581708
| 0.463806
| 0.460338
| 0.425228
| 0
| 0.341977
| 0.323094
| 5,469
| 99
| 123
| 55.242424
| 0.281199
| 0
| 0
| 0.197368
| 0
| 0
| 0.025233
| 0.025233
| 0
| 0
| 0
| 0
| 0.078947
| 1
| 0.144737
| false
| 0
| 0.039474
| 0.065789
| 0.25
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3438a44c4d2673a95a020242d9d25e95059dc8c9
| 107
|
py
|
Python
|
8KYU/enough.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 4
|
2021-07-17T22:48:03.000Z
|
2022-03-25T14:10:58.000Z
|
8KYU/enough.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | null | null | null |
8KYU/enough.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 3
|
2021-06-14T14:18:16.000Z
|
2022-03-16T06:02:02.000Z
|
def enough(cap: int, on: int, wait: int) -> int:
return 0 if (cap >= on + wait) else abs(cap-(on+wait))
| 53.5
| 58
| 0.598131
| 20
| 107
| 3.2
| 0.55
| 0.15625
| 0.28125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011765
| 0.205607
| 107
| 2
| 58
| 53.5
| 0.741176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
3459a84067efa2cf362845fd0fdf00505050dbde
| 39
|
py
|
Python
|
backend/backend/settings/test.py
|
shearichard/django-react-todo-demo
|
04b2222e24d02dbb37a063135311652f4ceb6710
|
[
"Apache-2.0"
] | null | null | null |
backend/backend/settings/test.py
|
shearichard/django-react-todo-demo
|
04b2222e24d02dbb37a063135311652f4ceb6710
|
[
"Apache-2.0"
] | null | null | null |
backend/backend/settings/test.py
|
shearichard/django-react-todo-demo
|
04b2222e24d02dbb37a063135311652f4ceb6710
|
[
"Apache-2.0"
] | null | null | null |
#settings/test.py
from .base import *
| 9.75
| 19
| 0.717949
| 6
| 39
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 39
| 3
| 20
| 13
| 0.848485
| 0.410256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cadd6cccc2eef581a48cc0d479be4530d93893e1
| 12,738
|
py
|
Python
|
tests/unit/states/test_postgres.py
|
KevinJohn-GH/salt-3003.2
|
92c78e6806cbf1e80f13727dfd5a86ff26b16a9e
|
[
"Apache-2.0"
] | 2
|
2015-08-21T01:05:03.000Z
|
2015-09-02T07:30:45.000Z
|
tests/unit/states/test_postgres.py
|
KevinJohn-GH/salt-3003.2
|
92c78e6806cbf1e80f13727dfd5a86ff26b16a9e
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/states/test_postgres.py
|
KevinJohn-GH/salt-3003.2
|
92c78e6806cbf1e80f13727dfd5a86ff26b16a9e
|
[
"Apache-2.0"
] | 1
|
2021-11-30T06:51:52.000Z
|
2021-11-30T06:51:52.000Z
|
import salt.modules.postgres as postgresmod
import salt.states.postgres_extension as postgres_extension
import salt.states.postgres_schema as postgres_schema
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, Mock, patch
from tests.support.unit import TestCase
class PostgresExtensionTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
patcher = patch("salt.utils.path.which", Mock(return_value="/usr/bin/pgsql"))
patcher.start()
self.addCleanup(patcher.stop)
return {
postgres_extension: {
"__grains__": {"os_family": "Linux"},
"__salt__": {
"config.option": Mock(),
"cmd.run_all": Mock(),
"file.chown": Mock(),
"file.remove": Mock(),
},
"__opts__": {"test": False},
},
}
def test_present_failed(self):
"""
scenario of creating upgrading extensions with possible schema and
version specifications
"""
with patch.dict(
postgres_extension.__salt__,
{
"postgres.create_metadata": Mock(
side_effect=[
[postgresmod._EXTENSION_NOT_INSTALLED],
[
postgresmod._EXTENSION_TO_MOVE,
postgresmod._EXTENSION_INSTALLED,
],
]
),
"postgres.create_extension": Mock(side_effect=[False, False]),
},
):
ret = postgres_extension.present("foo")
self.assertEqual(
ret,
{
"comment": "Failed to install extension foo",
"changes": {},
"name": "foo",
"result": False,
},
)
ret = postgres_extension.present("foo")
self.assertEqual(
ret,
{
"comment": "Failed to upgrade extension foo",
"changes": {},
"name": "foo",
"result": False,
},
)
def test_present(self):
"""
scenario of creating upgrading extensions with possible schema and
version specifications
"""
with patch.dict(
postgres_extension.__salt__,
{
"postgres.create_metadata": Mock(
side_effect=[
[postgresmod._EXTENSION_NOT_INSTALLED],
[postgresmod._EXTENSION_INSTALLED],
[
postgresmod._EXTENSION_TO_MOVE,
postgresmod._EXTENSION_INSTALLED,
],
]
),
"postgres.create_extension": Mock(side_effect=[True, True, True]),
},
):
ret = postgres_extension.present("foo")
self.assertEqual(
ret,
{
"comment": "The extension foo has been installed",
"changes": {"foo": "Installed"},
"name": "foo",
"result": True,
},
)
ret = postgres_extension.present("foo")
self.assertEqual(
ret,
{
"comment": "Extension foo is already present",
"changes": {},
"name": "foo",
"result": True,
},
)
ret = postgres_extension.present("foo")
self.assertEqual(
ret,
{
"comment": "The extension foo has been upgraded",
"changes": {"foo": "Upgraded"},
"name": "foo",
"result": True,
},
)
def test_presenttest(self):
"""
scenario of creating upgrading extensions with possible schema and
version specifications
"""
with patch.dict(
postgres_extension.__salt__,
{
"postgres.create_metadata": Mock(
side_effect=[
[postgresmod._EXTENSION_NOT_INSTALLED],
[postgresmod._EXTENSION_INSTALLED],
[
postgresmod._EXTENSION_TO_MOVE,
postgresmod._EXTENSION_INSTALLED,
],
]
),
"postgres.create_extension": Mock(side_effect=[True, True, True]),
},
):
with patch.dict(postgres_extension.__opts__, {"test": True}):
ret = postgres_extension.present("foo")
self.assertEqual(
ret,
{
"comment": "Extension foo is set to be installed",
"changes": {},
"name": "foo",
"result": None,
},
)
ret = postgres_extension.present("foo")
self.assertEqual(
ret,
{
"comment": "Extension foo is already present",
"changes": {},
"name": "foo",
"result": True,
},
)
ret = postgres_extension.present("foo")
self.assertEqual(
ret,
{
"comment": "Extension foo is set to be upgraded",
"changes": {},
"name": "foo",
"result": None,
},
)
def test_absent(self):
"""
scenario of creating upgrading extensions with possible schema and
version specifications
"""
with patch.dict(
postgres_extension.__salt__,
{
"postgres.is_installed_extension": Mock(side_effect=[True, False]),
"postgres.drop_extension": Mock(side_effect=[True, True]),
},
):
ret = postgres_extension.absent("foo")
self.assertEqual(
ret,
{
"comment": "Extension foo has been removed",
"changes": {"foo": "Absent"},
"name": "foo",
"result": True,
},
)
ret = postgres_extension.absent("foo")
self.assertEqual(
ret,
{
"comment": (
"Extension foo is not present, " "so it cannot be removed"
),
"changes": {},
"name": "foo",
"result": True,
},
)
def test_absent_failed(self):
"""
scenario of creating upgrading extensions with possible schema and
version specifications
"""
with patch.dict(postgres_extension.__opts__, {"test": False}):
with patch.dict(
postgres_extension.__salt__,
{
"postgres.is_installed_extension": Mock(side_effect=[True, True]),
"postgres.drop_extension": Mock(side_effect=[False, False]),
},
):
ret = postgres_extension.absent("foo")
self.assertEqual(
ret,
{
"comment": "Extension foo failed to be removed",
"changes": {},
"name": "foo",
"result": False,
},
)
def test_absent_failedtest(self):
with patch.dict(
postgres_extension.__salt__,
{
"postgres.is_installed_extension": Mock(side_effect=[True, True]),
"postgres.drop_extension": Mock(side_effect=[False, False]),
},
):
with patch.dict(postgres_extension.__opts__, {"test": True}):
ret = postgres_extension.absent("foo")
self.assertEqual(
ret,
{
"comment": "Extension foo is set to be removed",
"changes": {},
"name": "foo",
"result": None,
},
)
class PostgresSchemaTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
patcher = patch("salt.utils.path.which", Mock(return_value="/usr/bin/pgsql"))
patcher.start()
self.addCleanup(patcher.stop)
return {
postgres_schema: {
"__grains__": {"os_family": "Linux"},
"__salt__": {
"config.option": Mock(),
"cmd.run_all": Mock(),
"file.chown": Mock(),
"file.remove": Mock(),
},
"__opts__": {"test": False},
},
}
def test_present_creation(self):
with patch.dict(
postgres_schema.__salt__,
{
"postgres.schema_get": Mock(return_value=None),
"postgres.schema_create": MagicMock(),
},
):
ret = postgres_schema.present("dbname", "foo")
self.assertEqual(
ret,
{
"comment": "Schema foo has been created in database dbname",
"changes": {"foo": "Present"},
"dbname": "dbname",
"name": "foo",
"result": True,
},
)
self.assertEqual(
postgres_schema.__salt__["postgres.schema_create"].call_count, 1
)
def test_present_nocreation(self):
with patch.dict(
postgres_schema.__salt__,
{
"postgres.schema_get": Mock(
return_value={"foo": {"acl": "", "owner": "postgres"}}
),
"postgres.schema_create": MagicMock(),
},
):
ret = postgres_schema.present("dbname", "foo")
self.assertEqual(
ret,
{
"comment": "Schema foo already exists in database dbname",
"changes": {},
"dbname": "dbname",
"name": "foo",
"result": True,
},
)
self.assertEqual(
postgres_schema.__salt__["postgres.schema_create"].call_count, 0
)
def test_absent_remove(self):
with patch.dict(
postgres_schema.__salt__,
{
"postgres.schema_exists": Mock(return_value=True),
"postgres.schema_remove": MagicMock(),
},
):
ret = postgres_schema.absent("dbname", "foo")
self.assertEqual(
ret,
{
"comment": "Schema foo has been removed from database dbname",
"changes": {"foo": "Absent"},
"dbname": "dbname",
"name": "foo",
"result": True,
},
)
self.assertEqual(
postgres_schema.__salt__["postgres.schema_remove"].call_count, 1
)
def test_absent_noremove(self):
with patch.dict(
postgres_schema.__salt__,
{
"postgres.schema_exists": Mock(return_value=False),
"postgres.schema_remove": MagicMock(),
},
):
ret = postgres_schema.absent("dbname", "foo")
self.assertEqual(
ret,
{
"comment": "Schema foo is not present in database dbname,"
" so it cannot be removed",
"changes": {},
"dbname": "dbname",
"name": "foo",
"result": True,
},
)
self.assertEqual(
postgres_schema.__salt__["postgres.schema_remove"].call_count, 0
)
| 34.89863
| 86
| 0.427775
| 911
| 12,738
| 5.725576
| 0.127333
| 0.072469
| 0.055215
| 0.064417
| 0.855445
| 0.841641
| 0.815376
| 0.783359
| 0.783359
| 0.7824
| 0
| 0.000593
| 0.470011
| 12,738
| 364
| 87
| 34.994505
| 0.772034
| 0.035249
| 0
| 0.587879
| 0
| 0
| 0.171021
| 0.047062
| 0
| 0
| 0
| 0
| 0.060606
| 1
| 0.036364
| false
| 0
| 0.018182
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cae30d465989bffe17001064f4795280b65816bf
| 6,870
|
py
|
Python
|
runs/run_server_mnist copy.py
|
aliborji/ShapeDefence
|
92da19bb195b5161d997f6ee1cc777b07a714f6f
|
[
"MIT"
] | null | null | null |
runs/run_server_mnist copy.py
|
aliborji/ShapeDefence
|
92da19bb195b5161d997f6ee1cc777b07a714f6f
|
[
"MIT"
] | 1
|
2022-03-12T00:40:21.000Z
|
2022-03-12T00:40:21.000Z
|
runs/run_server_mnist copy.py
|
aliborji/ShapeDefense
|
92da19bb195b5161d997f6ee1cc777b07a714f6f
|
[
"MIT"
] | null | null | null |
from lib import *
from config import *
from model import build_model, build_model_mnist
from utils import *
import torchattacks
from torchattacks import PGD, FGSM
import os
# edge_detect = edge_detector.detect_edge_mnist
NUM_EPOCHS = 20
BATCH_SIZE = 100
# train_phase = True
attack_type = 'FGSM'
net_type = 'grayedge'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data_dir = 'MNIST'
if not os.path.exists(f'./{data_dir}'):
os.mkdir(f'./{data_dir}')
if not os.path.exists(f'./{data_dir}/{attack_type}'):
os.mkdir(f'././{data_dir}/{attack_type}')
fo = open(f'./{data_dir}/{attack_type}/results_{net_type}.txt', 'w+')
# --------------------------------------------------------------------------------------------------------------------------------------------
# Train a model first
save_path = f'./{data_dir}/{attack_type}/mnist_{net_type}.pth'
net, dataloader_dict, criterior, optimizer = build_model_mnist(net_type=net_type)
net.to(device)
train_model(net, dataloader_dict, criterior, optimizer, NUM_EPOCHS, save_path)
# --------------------------------------------------------------------------------------------------------------------------------------------
# Test the clean model on clean and attacks
net, dataloader_dict, criterior, optimizer = build_model_mnist(net_type=net_type)
load_model(net, save_path)
net.to(device)
acc, images = test_model_clean(net, dataloader_dict)
print('Accuracy of original model on clean images: %f ' % acc)
fo.write('Accuracy of original model on clean images: %f \n' % acc)
for eps_t in [8,32,64]:
print(f'eps_t={eps_t}')
fo.write(f'eps_t={eps_t} \n')
epsilons = [eps_t/255]
# Test the clean model on clean and attacks
net, dataloader_dict, criterior, optimizer = build_model_mnist(net_type=net_type)
load_model(net, save_path)
net.to(device)
acc_attack, images = test_model_attack(net, dataloader_dict, epsilons, attack_type, net_type, redetect_edge=False)
print('Accuracy of clean model on adversarial images: %f %%' % acc_attack[0])
fo.write('Accuracy of clean model on adversarial images: %f \n' % acc_attack[0])
net, dataloader_dict, criterior, optimizer = build_model_mnist(net_type=net_type)
load_model(net, save_path)
net.to(device)
if net_type.lower() == 'grayedge':
acc_attack, images = test_model_attack(net, dataloader_dict, epsilons, attack_type, net_type, redetect_edge=True)
print('Accuracy of clean model on adversarial images with redetect_edge: %f %%' % acc_attack[0])
fo.write('Accuracy of clean model on adversarial images with redetect_edge: %f \n' % acc_attack[0])
# --------------------------------------------------------------------------------------------------------------------------------------------
# Now perform adversarial training
save_path_robust = f'./{data_dir}/{attack_type}/mnist_{net_type}_{eps_t}_robust_{eps_t}.pth'
if train_phase:
net_robust, dataloader_dict, criterior, optimizer = build_model_mnist(net_type)
net_robust.to(device)
train_robust_model(net_robust, dataloader_dict, criterior, optimizer, NUM_EPOCHS, save_path_robust, attack_type, eps=eps_t/255, net_type=net_type, redetect_edge=False)
# --------------------------------------------------------------------------------------------------------------------------------------------
# Test the robust model on clean and attacks
net_robust, dataloader_dict, criterior, optimizer = build_model_mnist(net_type)
load_model(net_robust, save_path_robust)
net_robust.to(device)
acc, images = test_model_clean(net_robust, dataloader_dict)
print('Accuracy of robust model on clean images: %f %%' % acc)
fo.write('Accuracy of robust model on clean images: %f \n' % acc)
net_robust, dataloader_dict, criterior, optimizer = build_model_mnist(net_type)
load_model(net_robust, save_path_robust)
net_robust.to(device)
acc_attack, images = test_model_attack(net_robust, dataloader_dict, epsilons, attack_type, net_type, redetect_edge=False)
print('Accuracy of robust model on adversarial images: %f %%' % acc_attack[0])
fo.write('Accuracy of robust model on adversarial images: %f \n' % acc_attack[0])
net_robust, dataloader_dict, criterior, optimizer = build_model_mnist(net_type)
load_model(net_robust, save_path_robust)
net_robust.to(device)
if net_type == 'grayedge':
acc_attack, images = test_model_attack(net_robust, dataloader_dict, epsilons, attack_type, net_type, redetect_edge=True)
print('Accuracy of robust model on adversarial images with redetect_edge: %f %%' % acc_attack[0])
fo.write('Accuracy of robust model on adversarial images with redetect_edge: %f \n' % acc_attack[0])
# --------------------------------------------------------------------------------------------------------------------------------------------
# Now perform adversarial training with redetect
if not (net_type.lower() in ['grayedge', 'rgbedge']): continue
save_path_robust = f'./{data_dir}/{attack_type}/mnist_{net_type}_{eps_t}_robust_{eps_t}_redetect.pth'
net_robust, dataloader_dict, criterior, optimizer = build_model_mnist(net_type)
net_robust.to(device)
train_robust_model(net_robust, dataloader_dict, criterior, optimizer, NUM_EPOCHS, save_path_robust, attack_type, eps=eps_t/255, net_type=net_type, redetect_edge=True)
net_robust, dataloader_dict, criterior, optimizer = build_model_mnist(net_type)
load_model(net_robust, save_path_robust)
net_robust.to(device)
acc, images = test_model_clean(net_robust, dataloader_dict)
print('Accuracy of robust redetect model on clean images: %f %%' % acc)
fo.write('Accuracy of robust redetect model on clean images: %f \n' % acc)
net_robust, dataloader_dict, criterior, optimizer = build_model_mnist(net_type)
load_model(net_robust, save_path_robust)
net_robust.to(device)
acc_attack, images = test_model_attack(net_robust, dataloader_dict, epsilons, attack_type, net_type, redetect_edge=False)
print('Accuracy of robust redetect model on adversarial images: %f %%' % acc_attack[0])
fo.write('Accuracy of robust redetect model on adversarial images: %f \n' % acc_attack[0])
net_robust, dataloader_dict, criterior, optimizer = build_model_mnist(net_type)
load_model(net_robust, save_path_robust)
net_robust.to(device)
acc_attack, images = test_model_attack(net_robust, dataloader_dict, epsilons, attack_type, net_type, redetect_edge=True)
print('Accuracy of robust redtect model on adversarial images with redetect_edge: %f %%' % acc_attack[0])
fo.write('Accuracy of robust redetect model on adversarial images with redetect_edge: %f \n' % acc_attack[0])
fo.close()
| 40.175439
| 175
| 0.65968
| 917
| 6,870
| 4.655398
| 0.103599
| 0.055751
| 0.071211
| 0.086203
| 0.875615
| 0.8461
| 0.838838
| 0.837901
| 0.786835
| 0.781213
| 0
| 0.005438
| 0.143377
| 6,870
| 170
| 176
| 40.411765
| 0.719966
| 0.144978
| 0
| 0.419355
| 0
| 0
| 0.255675
| 0.051033
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.075269
| 0
| 0.075269
| 0.107527
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
caf9f48ce823919d0867300f21f52bf929fad732
| 41
|
py
|
Python
|
deep_rl/deepq/__init__.py
|
jkulhanek/deep-rl-pytorch
|
6fa7ceee8524f002d4a8d93295b231f6b9b7c29c
|
[
"MIT"
] | 7
|
2019-03-24T19:51:11.000Z
|
2022-01-27T17:20:29.000Z
|
deep_rl/deepq/__init__.py
|
jkulhanek/deep-rl-pytorch
|
6fa7ceee8524f002d4a8d93295b231f6b9b7c29c
|
[
"MIT"
] | null | null | null |
deep_rl/deepq/__init__.py
|
jkulhanek/deep-rl-pytorch
|
6fa7ceee8524f002d4a8d93295b231f6b9b7c29c
|
[
"MIT"
] | 4
|
2020-04-11T01:06:24.000Z
|
2021-07-18T01:22:36.000Z
|
from .dqn import DeepQTrainer, DeepQAgent
| 41
| 41
| 0.853659
| 5
| 41
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1b14cc076a7df40c3fda8b235627962275ac4b32
| 25
|
py
|
Python
|
elang/word2vec/utils/__init__.py
|
adi-christian/elang
|
b3e0d73745c57ec060b2ecfeefa29fa3bfe4a539
|
[
"CC0-1.0"
] | 27
|
2020-01-30T01:57:08.000Z
|
2021-08-01T15:26:50.000Z
|
elang/word2vec/utils/__init__.py
|
adi-christian/elang
|
b3e0d73745c57ec060b2ecfeefa29fa3bfe4a539
|
[
"CC0-1.0"
] | 4
|
2020-01-29T09:45:46.000Z
|
2020-03-25T07:59:55.000Z
|
elang/word2vec/utils/__init__.py
|
adi-christian/elang
|
b3e0d73745c57ec060b2ecfeefa29fa3bfe4a539
|
[
"CC0-1.0"
] | 41
|
2020-01-30T01:57:17.000Z
|
2021-12-29T00:53:32.000Z
|
from .cleansing import *
| 25
| 25
| 0.76
| 3
| 25
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1b6300558338328343a8718d8eb5df6db44842d7
| 10,483
|
py
|
Python
|
flask/hello.py
|
dayoungMM/TIL
|
b844ef5621657908d4c256cdfe233462dd075e8b
|
[
"MIT"
] | null | null | null |
flask/hello.py
|
dayoungMM/TIL
|
b844ef5621657908d4c256cdfe233462dd075e8b
|
[
"MIT"
] | null | null | null |
flask/hello.py
|
dayoungMM/TIL
|
b844ef5621657908d4c256cdfe233462dd075e8b
|
[
"MIT"
] | null | null | null |
from flask import Flask, escape, request, render_template
import random
app = Flask(__name__)
@app.route('/')
def hello():
name = request.args.get("name", "World")
return f'Hello, {escape(name)}!'
@app.route('/fstring')
def fstring():
fstring = "문다영"
return f"제이름은 {fstring}입니다."
@app.route('/hi')
def hi():
name = "문다영"
return render_template('hi.html',name =name)
@app.route('/greeting/<string:name>/')
def greeting(name):
def_name = name
return render_template('greeting.html', def_name=def_name)
@app.route('/cube/<int:num>')
def cube(num):
def_num = num
result = num**3
return render_template('cube.html', def_num=def_num, result=result)
@app.route('/dinner')
def dinner():
manu = ['삼각김밥','컵라면','스테이크','마라탕','훠궈']
dinner = random.choice(manu)
manu_img = {
'삼각김밥' : "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxITEhURExIWExUWGBUaFhYXFxsXGhkZGBcaFxcXFRcbICggGBonGxUYIjEhJSkrLjAuGh8zODMtNygtLysBCgoKDg0OGxAQGjElHyUrLS0tLS0tLS0vLS0tNzcvLSstLTcuLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLf/AABEIAOEA4QMBIgACEQEDEQH/xAAcAAEAAgMBAQEAAAAAAAAAAAAABQYDBAcCAQj/xABCEAABAwIDBQYDBwMCAwkAAAABAAIRAyEEEjEFBkFRYRMiMnGBkRRCoQcjUrHB0fAWYnKSolPh8RUkM4KjssLS4v/EABkBAQADAQEAAAAAAAAAAAAAAAABAgMEBf/EAC0RAQEAAgAEBAYABwEAAAAAAAABAhEDITFREhNBYQQycZGx8CJCUoGhwdEz/9oADAMBAAIRAxEAPwDuKIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiIPhKr2BxlU4+rSLyWNEhvC7WkfmVPv1CgaAazG1Xm2fs2t6xTkx7fRZ59Z9XT8PrWe5/L/uLCiItHMIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiIMdU3HqqttzERj8KzmXOP8Aoc0fqrJin3HmqBvriKrcZQq0afaljRbMGgnM60k8isuLvXLvHb8H4fHfF/Tl+HQMLVmW8R+S2FU93MdiKg7WswUjMBgIdA4knnceytbXSJV8btx2afURFZAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICx1qzWAFzg2SAJMSToBPFZFzj7QtsF1UYdhtTguj8Zv8AQEesoLRvHSzhgvIcXdLNcRm6SAuWbSw7AGOGYHs8xaSLd5oBtwOZ3spnY+3Hl2R5L7QJi2ouSJ+YnmofH4gkMpZIMRM83Ai0a8NVxcXw+PV66ev8H4/BLOm/+7NrbddhqFGjTe6m5wNR5Zqc5hgJ4CGFdN3X2sHUaWd8ucwOk6kEmD52XOvtCoNZhZFnF7B5taMovw8P1Uhu/TIwtJ+jiwZb8uIXTjh4dSPN4nE8eVyvd1kFFzjYe81Wk8Co81KZIDg4yWz8zT+YXRKVQOAc0ggiQRcEK8rJ7REUgiIgIiICIiAiIgIiICIiAiIgIiICIiAiIg0tsY8UKL6puQO6PxONmt9TC4riqrnOc513OJJPNxMk/VXn7Q9p99lIXDQ4x/eSACegGb1PmqG/nbiSSdPNBZtzMPTFRtQgl2SoeGUEFrRFtYJWfbwDqdKvAL2PLcx1MOcIMdWyq/sLaYq0izMKbg/7h8/MRLmE8JjXrCY+vUps7XEuMhxFOmdXO4uIHyg8eK58srbrT0OFhJq+Lp1nr/b67TO3tm1a+Be4UGueAHNBJOUWvc+KCT0Cqz9omnXaxxmmxjKYg+HIBeNJzZvP6iznegtwbqgdEME+brT9VzM4yC5jm94EgnWSLStvC4c7/FXQMRh6mXtaMVWHho5h1yuYbReQrR9nW2s2fDPGR0ktb7ZgJ/1e653u1tc96jMPbYHg5gk3HEjX+GZHBVqtGs3EWeW1BdupjUGNSWkhNIdtRY8NXa9jXtMtcAQeYIkLIrIEREBERAREQEREBERAREQEREBERAREQFixVXKwu6W8+CyqkfaJtota3DsdBd3nkcG3AHQkz7dUFR3kxvaV3FtwO40841PuSqxtKv3XMaec9YEklbeIr5Glx1IOUclEsomHVHXaGuuDq4juj3MkdEvROPWPGRzGBhnWXCIh3I9QF5x2NqVCC9xcQAJJmw0Cx0TI9T+6VWWKp/K2v/r/AHecRWfkyScp4Tb2WTeXBgOp1meGqxrh/kAA8e8H1WNj5MfzVSuGworUamHMEtMsdyOhA6H9Vpj0ZZ/NUJg8SWuZWbZzC2eFhof09l0kPa+C23d10sbz6GTPUrlTHOY6/AxH0IKum7+0yB2ZcNIaeJbeD6eE+cqmUqI6duDtYkOwzhGWcnoe8z01A5FXJcd2djsj21aQLcry7nPGDGtpGsRz0XW8Fim1abajfC9oI9RN1MKzoiKUCIiAiIgIiICIiAiIgIiICIiAiIg18biQxskxr7ASSuMbXxxrVX1DoTPkNGj2AV1382yG03MBu85Gj+3Vx9Yj1XL9q4rI3KDfV37INXHVXVHwBpoPLh5rxiRla2kPlBLurzr7ABvp1Unu9sus4doxjnF4inYkZhMvFvlv3uZHVSGH3DxTvG6nTkaF2Y+zbfVQmdVPoiyyv8JM8BHuNVd8JuAwGHYqR0YBw6uM3Uofs2wrhAxb5I/sP0TGW4tM8pOJtzGkLe63cBiezeCdAR7dPT8ldq/2XVGj7vFMfyD6Zb9Q535Ku7S3SxtEEmiXtB8VM5/oO8B5hTJZFM7LlbGhvZg2NPbNEtq3GlnAXnlNj7qF2Y8kZJynVh5O5eRVo2M0VqVTDVI0OWflJt6QfzVPxNB1F5YbEGPUFWsUXvdza2cdk4BlRodmETpfM3rrPQLpG4u0w5j6AJLqcEA6wQJHW836rimFxZEYin42+Ic+p8v2Ku25G2ntqtqVMt20pM3c10sk9QRfzHJU0s7GxwIBGhX1YMNxjTX1Ov7+pWdSgREQEREBERAREQEREBERAREQFG7w7Tbh6Dqh8mjiSbABSRXNd/tr9pVFBulM3P8AcRf2BA8yUFT2liy8urPMkE5Z08vILe2LuW59I4nFS1pLSGfMWkjvO5CLganpxk90dgivV7WoJo0TDWnR9TW/NrbeZjqugYulmaWniIU63CXVQ1bAAGmKDQ0MgHLAGUGwPEjUwo/Hdqx2UtGQ5u91ykgEcRY2hTrKgpjLoBpNyef8Kr+8G0mnIZLQ2oJkeIHQArKWTKW92urlLJ2fWAGmMxDHEOMuLrjugEQ4AeK1uHRe8JhA1zi6q0i4b3uJgAxPU+02so+jjadIEGplzAOBktMQRcAGR3vcKWwzO1YXMpNLXk96bTdoN2i4M+sr0vMxvS/h53l5Tnlj+Wxu3UdDw4zB5zpy91vMf3CebjHvH6LQwWEdSBaCCXW8tL/Rbz23awaNXPxbLldN+HLMebV2tsGjX75blqDSo2zh58HDoVzjf3dapT++IzTYubobWd/abXC66RoF8xVFlRppvAc1wgg8VXS+35owGJLHwdDYhTDK7qZa1lwXtNM2sSYcCTwylw1GpK2d+d2jhKxAJLHXpu5tmL/3A2PoeKjdl18w7N3/ADB4EKiY75untLtKLCTLmgNf0I59VY1xnc3appYhrOFSc/LNfvDhJkey69g6wcFCWwiIgIiICIiAiIgIiICIiAiL490CUEdt7aIoUX1D8rSR1PAe643VxLiS83fUdDRzc48PU/VW7f7amdwoA6HM/wDQH0uqruiBidpURqylmqRH4B3T55y0qR0SlT+FbRotd3WMe6pA1ytOd7vN7gR1C28PtUABtSc47JriAIL35e6L8M0nyPJb7mscXNIDiBlcCODhOU9CIUbtqrSphpLGOLnTDnFugALpAN9AeY5reavLTG7nPaD3mo135MVhXktAHaU3CWlsnvgC/UgcIKw18K972FxBhrjkb4XSLA62Gk2+qs2AxdOadKm0ZXUi9pDpAylrC0TcxMT0WnjGNqPdSbFM3b2gew+gYHZgZ6arDi8G3o34fGk1KqlWtUfWdTYxpLIEusADe5HDS3RWfdeo44cB2UODqgIboO8TH11VcbsEtgUqlSapqM8LXvzUnOzd/MyAQJ48VO7sN7KlToNlxea5zOGWHMeA4FoLuZvPBZ8Pg545bsacXjcPLHWN7d/31TYYG3PuvtJou86C/wDzhaLqjjUc1rO2cyM0uDGNJAcGtEEuMEXPNZztNpph5YRNRtJzDALXF2UzwMSNNZXRMK5rnHtmLLoPZkGWiCYifFw4X6G0G6xuxhgHsx4QcpdeSC6IjhzHPzjDjazcO7s20wRlpu70m3atp6dA6ZWfH0iy7adPI1olz6r2gBsgAtAIMDj1Wmopuo3eDZhxuFqUy1udl6UGe9lBynlmkj1BiQuEV6XZPBBJ/DwPkeq/Rmw3l1IPyNZmJIDSTI0BJPG3tC4/9pOw+wxLy0d2oTVp8gXXqN98xCzzmq0wu4wbKxkjWJEHmL6jqCJ8wup7nbxtqwxxGe4Bnx5bEjraVwHB4wsdmkq57v7Wptcx1hBc9pB4xJB8ss+o6rKrx38Io7Ye0RWpB2jhZw5H9uI6FSKAiIgIiICIiAiIgIiICid4NpCjSc88BYc3HQKUe6AuY7/bZDqhpfLTu7q4jT2Me/JBS9ubQMOvL3mXHjBurL9j+EPaVqpFsgaD/kZ/+K58HGrVnUk/9B+QXX90HdjWGDhoLaLnvj8bnsOX0aR7lMeqL0TGOqObVdlFZpc0uOQ0SC2nALofcEZhZecZiXmgxxdUp03ZQ4w11Z4fAblyHI2Z1ieil3UWkhxEkBzZ6OjMPXKPZaW0qdJtJtN7T2ctaMpMtyDO0zMgDJqumZTlyY3G8+aMwmHYauHZUa/7sVBT7SlkkANcy8xmblN4usmLqA1RWbRmlRcc7w2HOeQQXNES9rOPU2mEpvw7HNqntiWl4ZnqOeJAgwC4i4MKUxG0WsLg4EZeNr91zrX/ALCPNTbzVk5IVpyODocRTxlYHK0uIa+m4zDQTHfC84elmfR8bAcRispuxxa8GoNRMGFI0do0WOeQ1zS57S+eZlubXTLSm35ysuLxdPtAHtdNMlwdwBFMk6G9nRB4kKfFe37+08M7/v7Efi8ZSbiXBmI7GW/eEZXNL2nJlLSLOA4zw0WOlVa/DObSl9RtRtRw8TnZazZfMAXAlSw2oInKR4bkgATmsTNoLHDzA5r4NsAkDI+/Th3bxx8WmtuomN+ydTuhNsNaC57aLqReyoTmABcW1KT80AnmVK451DtSana1C2IZ2dR9NpgXaGtyk9TK+1sZTqA5qWbuCM1pD2BzgHcPlB8xyW5gcZ2mYZYDcsXmczQ7QgEC8eiXK6Jjza2zex7QmlTqU8wMg03spnjMEAByr32j0WkUg62fO0O5ObDmfm5XVUj7V7Yak7lW/Nj/ANlnldtMZpxva2ELHukZROnIx+R1XrYdZsmm6CCQb8wrDjw2vRFSBLYDxpYaOnofoTyVOc0scRxB1/ZZ6Xdm3O2uaT2uJAZUcA9usDwtM+YFv7jyXUl+dNi7ScWiIMkB08YBBHqCRe112/dHawxFAGZLO6Z1toT6KqU4iIgIiICIiAiIgIi8PdwQRu3tpCjSdUPCzRzdwHuuGbw4jNmky+QXRxc+SfyHur59oG1wX9mD3KQk9XnQeg/NUHdun2mKY94zTUbaJBveVFE9uRuw74gVarcgac2R1nGBI7vAL5sbaDhj3Vg4h9XtRJAIic0X8gpXZe0S6t2dZ3/eKb3BjptUbmEsefxRIE84VZwlcjGtAAblquAA9Wxc81ph80Vz+WulMx2Ikg1NP7G9D+Tp9CvrcbiDq4EcPuweevLRYKuYvc45ROUXBbYcS2eZjqVibSJIHdIPS8wOMgk9ZXZqezj3fds08XXIJDmATHhGuWToDw/JejisTYl7bxowE8SPlubLWw094Rca6nTM02m/usjmP07oJB4G/rJ/gUWTfomW69WY4jE/8SLx4G/ijlyMp22J07bj+FvKeX8hYS10XgkZotHCYIkR5ELJldJBIgk8NAQZAvHy805e32Tz9/ufE4i47bSPkHHSLdV7q1sQJ++M3gZW8FiaHGO9EA6AcxbxR014ry1jiAcwAhvARpHEzw5Jy9vsjn7/AHZ2VK5n752kzlHOEZ2xMGu70j9wvGHoktEuA6Ra44QfovVGkbOmMxMwCZgyCTOsDXy4ym/3Sf3q+Zakx27z5HX62VU3/LxQaHOc7vNMOJMEteLK2Q4k96AJEj/Gbg3tb3Cq2/tOMPrMFnHkY/VRflv0J80+qk7GxAY/vklpBBEc1j3kwDm5i1lm5cnMtPnckQfosE2U5Qa+tSECX09J4tOonny8lxuxVdm1chuYBjjx4H3V+3C24cNiGAuinVMO5X4+hh3+rmqRtihHej9o4R7lZ9m1i5gk2HDy5Hgq0j9Ooq9uNtcYjCUyTL2fdv5y0WJ8xCsKkEREBERAREQFF7UxnZNqVD8rSfXgPeFJVDAXPPtG20KdNtHMQapcZHJunlc69FFFJxlCpiXuGYNYCTUqO8OYm/mdYA1UnsrB0qNag+mR2QguLnMa6Q4hxyzNwAY6rzs6q12RwGVtMEPpkeEkEdq38QcfmOh1ss7uyPaRk7ukkA2p/L1zAdPdc94mUrt4fw+GWO9/b8I3HUGuqvqvrsBcXGGtc+JJg6ATHVRFegaOKaycwZUpnNETdrpIkxqrrVwWGdRkBob2ZOfiHRaXcTMgtPsqRvHUIr5+bKLv/SZ+oWvDuXi56ZcbDh+H+Hfa7dP2bSqVMxBbDdc0kGZ4c4C3G7Nq+LtGnp3uGntCi9n4lzcxaSJLCY5XH5uC2cRjagIh5FxoZGsei9TKXfJ5GNmubaZsupctewTM2NtTbl4lk/7NqROdl7kw6/OQTF1H4jF1Q4jtDq35jFyQf50XmriqodHaO1YR3jEEX+qjWV9Vt4z0SJ2fVsTUZrb7vSeAHDTh1X34CoB/4oFzo0jUdD6+ijcRiagNqj4OU2c7S4P1XyliKsgCo8iTck8QCJ+qeG9zxTslPgatia3Twnn/AJL43Zj4k1cw0gs/QOCj3VauaA55E/icRdoXp76maxfAdzcbQOPLVRq9zc7PW0WvpZYcIdeQMplttJ6rCKtWLACzX2i4kNBjjeAvmIY4w1xPjdlk8CBEF3Cy99jUbzEBzdW6Auf+bXEHorzWlLvbG91Vrogg6wBIi4m1uJULvbUL8M+eGWIAHzt5KepYh85w0ulvACIDuQGuYfVQm8tIjD1AQR3JvrY//lL0v0J80+qi/BVcgqCk/IZhwaS3WDcaacVubIxGV4kw1xjy81NbIwb6tLDCm8thlYlwmQBVItGpvotjF4Gs1zQ4sqhwcQXsBPdIDgSAXTcaFeTlxbL0e1j8PMpNZTd9Khto4Fre44HI6criNQSYA5jgq3Tp9lULDYfyBCv+IDnAtqBhYGx2fe4ML8zCbtdBiQeIkKHxexiMuIEupgkS4Q4H8Lhxv8wTHOZclOJwMsJv0T/2W7SFPFOoE2rNEDk9gLh/tLl15cQ3Ye1uLoE6mowN597u+1121hsr41jXpERWQIiICIvhag81RIIXDPtAquqY7sWA1C1rWANBJJu8wB/l9F2TaezX1MuSvUpFt+4RB/yBF1VNobmOL3vFUtc8kvLRlLp4Etgx00QUaltD4ag2m8MOJaXQBEsBI7r3CxBGYFh6GQdI/F7yAhgbRYAAc7HCRmLiSWOHeAvpNlbqn2dDmtZ/2ddVFxl6xbHO49Kq7dpYd3jpvpnmxweP9Lr/AFWnt3F06j2mm4uaKbGklpaZbIuPKFcT9nxXn+gCox4eON3Ivnx885rK7auwt6aUN7R2V7RBmQDFpBH5Kcqb4YWLVPI5jztPoos7gH+BP6DP8AXV519Y4/JnpUm/fPC/iHHi7jEaD+SsOI3ywp0fl8s/Tp5+60Hbinl9AvB3FPL6BPN9jyfdtv3yw2vaH/efplXz+s8N/wAT/a//AOq0X7hkr4dxHfwJ597JvAx11v8AhuHfbDXGYn/yv9flXl2+mGvfr4XcohaX9BOmf0X3+hXfwJOPl2ibwMPS1nfvlhyQRLSCD4Z05r27femdaj9SfDxMz/7ne55rV/oV3L6J/QzuX0U+fl2ivkY962KW+dIWD3wNO7pxtyuoveDewVmGmzNcBpc4Ad3k0D+arc/oV/8AAf3Xw7iP/gUXjZVM4OMa+zcfkoUHMq02FrarXhzxo6oTBb4jPQL1idvtc5rnYgHICGhlN5ABsYzRfrKzDcR/8BRu4rx/CuS8KW7u3dj8TljJJJy9dPbduU6YzmtmbVGZzA0doSZaWnUMERcmehWPam8AxFAU2Qx7XS1pGVpEEC/QHjGpN1kG49T+BfWbjP6/VTjhMejPicXLPqgdk4g0XitUJ7WRkMWbfUHT2Xc91dutxVIOFnGSWnWJgOHQ/qudYfc6oLBzgOQJA9laN193DRqitcODS3RoEGJFhPAIovKLHTBWRWQIiICIiAvhC+og8GmF87ELIiDF2A5L58OOSzIgw/Dt5L58M3ks6INf4VvJPhG8lsIg1vg28k+DbyWyiDW+DbyT4NvJbKINb4NvJffg28lsIg1/hG8k+EbyWwiDB8I3knwreSzogw/DN5J8O3ksyIMYojkvQYF6RAREQEREBERAREQEREBERAREQEREBERAREQEREBERAREQEREBERAREQEREH/2Q==",
'컵라면' : "https://www.google.com/url?sa=i&url=http%3A%2F%2Fitempage3.auction.co.kr%2FDetailView.aspx%3Fitemno%3DA871315661&psig=AOvVaw32c7Ss82lCb5aPxwoSma-0&ust=1576822816440000&source=images&cd=vfe&ved=0CAIQjRxqFwoTCPDDt_mIweYCFQAAAAAdAAAAABAD",
'스테이크' : "https://www.google.com/url?sa=i&url=http%3A%2F%2Fchefnews.kr%2Farchives%2F16507&psig=AOvVaw2cII2mHnzLeBJQ31RMLLY3&ust=1576822855561000&source=images&cd=vfe&ved=0CAIQjRxqFwoTCPDXtoWJweYCFQAAAAAdAAAAABAD", '마라탕' :"http://img1.tmon.kr/cdn3/deals/2019/05/24/2099407062/original_2099407062_front_935dc_1558688230production.jpg",
'훠궈' :"https://post-phinf.pstatic.net/MjAxNzExMzBfMTg2/MDAxNTEyMDEyNDE3OTI1.W1uSSVqYNq9NS8tHODn59RIyXo5-7zByKdSwqCZlN2Qg.4CdNKyPOen6sUSYXF_DW3h_fICYAnlfncEB2B6Y3vnAg.JPEG/%ED%9B%A0%EA%B6%88%EC%95%BC_MangoPlate_%EC%98%A4%EC%A7%80%EC%88%99.jpg?type=w1200",
}
img_url = manu_img[dinner]
return render_template('dinner.html', dinner=dinner, img_url = img_url)
@app.route('/movies')
def movies():
movies = ['조커','겨울왕국2','터미네이터','어벤져스']
return render_template('movies.html', movies=movies)
if __name__ == "__main__":
app.run(debug=True)
| 190.6
| 8,487
| 0.921969
| 493
| 10,483
| 19.527383
| 0.711968
| 0.005817
| 0.010387
| 0.003532
| 0.011218
| 0.007063
| 0.007063
| 0.007063
| 0.007063
| 0.007063
| 0
| 0.141978
| 0.023085
| 10,483
| 54
| 8,488
| 194.12963
| 0.798067
| 0
| 0
| 0
| 0
| 0.119048
| 0.904694
| 0.810055
| 0
| 1
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.047619
| 0
| 0.380952
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1b89f64494d0261ba77c3a40d7f0d99ef31516c6
| 31
|
py
|
Python
|
MyProgress/First Year/Semester 1/Python for Everyone/Week02/practice-peer-graded-assignment.py
|
nashhymet/ossu-progress
|
1640d1d1e23c9005b15e133b40621d07a916c681
|
[
"MIT"
] | null | null | null |
MyProgress/First Year/Semester 1/Python for Everyone/Week02/practice-peer-graded-assignment.py
|
nashhymet/ossu-progress
|
1640d1d1e23c9005b15e133b40621d07a916c681
|
[
"MIT"
] | null | null | null |
MyProgress/First Year/Semester 1/Python for Everyone/Week02/practice-peer-graded-assignment.py
|
nashhymet/ossu-progress
|
1640d1d1e23c9005b15e133b40621d07a916c681
|
[
"MIT"
] | null | null | null |
print("Hello from The Skulk!")
| 15.5
| 30
| 0.709677
| 5
| 31
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0.677419
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
1b8eaddf47e7f0a5cb80003547b6a16aef4c6903
| 179
|
py
|
Python
|
nyuki/utils/dtutils.py
|
surycat/nyuki-legacy
|
9ab3a212f2ce34b032984c712c87eb2326bd3960
|
[
"Apache-2.0"
] | 8
|
2016-08-08T12:09:16.000Z
|
2018-08-24T02:32:06.000Z
|
nyuki/utils/dtutils.py
|
surycat/nyuki-legacy
|
9ab3a212f2ce34b032984c712c87eb2326bd3960
|
[
"Apache-2.0"
] | 16
|
2015-10-06T10:24:53.000Z
|
2018-01-23T18:35:37.000Z
|
nyuki/utils/dtutils.py
|
surycat/nyuki-legacy
|
9ab3a212f2ce34b032984c712c87eb2326bd3960
|
[
"Apache-2.0"
] | 9
|
2015-09-30T15:00:44.000Z
|
2018-04-05T21:25:48.000Z
|
from datetime import datetime, timezone
def from_isoformat(iso):
return datetime.strptime(iso, '%Y-%m-%dT%H:%M:%S.%f')
def utcnow():
return datetime.now(timezone.utc)
| 17.9
| 57
| 0.692737
| 27
| 179
| 4.555556
| 0.666667
| 0.227642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145251
| 179
| 9
| 58
| 19.888889
| 0.803922
| 0
| 0
| 0
| 0
| 0
| 0.111732
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
1bd0e74ee12aeb60da78b5276fbbc1e620d771e1
| 3,576
|
py
|
Python
|
tests/_async/test_flatten.py
|
christopher-henderson/PyStream
|
8c76a634448d98591aa68087bf78c6cd4da6a6b7
|
[
"MIT"
] | null | null | null |
tests/_async/test_flatten.py
|
christopher-henderson/PyStream
|
8c76a634448d98591aa68087bf78c6cd4da6a6b7
|
[
"MIT"
] | 12
|
2020-10-10T14:28:10.000Z
|
2020-10-28T05:42:34.000Z
|
tests/_async/test_flatten.py
|
christopher-henderson/PyStream
|
8c76a634448d98591aa68087bf78c6cd4da6a6b7
|
[
"MIT"
] | null | null | null |
import unittest
from collections.abc import Iterator
from pstream import AsyncStream
from tests._async.utils import Driver, Method, AI
class Flatten(Method):
def __init__(self, args):
super(Flatten, self).__init__(AsyncStream.flatten, args)
class TestFlatten(unittest.TestCase):
@Driver(initial=[range(3), range(3, 6)], method=Flatten(args=[]), want=[0, 1, 2, 3, 4, 5])
def test__a(self, got=None, want=None, exception=None):
if exception is not None:
raise exception
self.assertEqual(got, want)
@Driver(initial=[AI(range(3)), range(3, 6)], method=Flatten(args=[]), want=[0, 1, 2, 3, 4, 5])
def test1__a(self, got=None, want=None, exception=None):
if exception is not None:
raise exception
self.assertEqual(got, want)
@Driver(initial=[range(3), AI(range(3, 6))], method=Flatten(args=[]), want=[0, 1, 2, 3, 4, 5])
def test2__a(self, got=None, want=None, exception=None):
if exception is not None:
raise exception
self.assertEqual(got, want)
@Driver(initial=[AI(range(3)), AI(range(3, 6))], method=Flatten(args=[]), want=[0, 1, 2, 3, 4, 5])
def test3__a(self, got=None, want=None, exception=None):
if exception is not None:
raise exception
self.assertEqual(got, want)
@Driver(initial=[range(3), range(3, 6)], method=Flatten(args=[]), want=[0, 1, 2, 3, 4, 5])
def test__s(self, got=None, want=None, exception=None):
if exception is not None:
raise exception
self.assertEqual(got, want)
@Driver(initial=[AI(range(3)), range(3, 6)], method=Flatten(args=[]), want=[0, 1, 2, 3, 4, 5])
def test1__s(self, got=None, want=None, exception=None):
if exception is not None:
raise exception
self.assertEqual(got, want)
@Driver(initial=[range(3), AI(range(3, 6))], method=Flatten(args=[]), want=[0, 1, 2, 3, 4, 5])
def test2__s(self, got=None, want=None, exception=None):
if exception is not None:
raise exception
self.assertEqual(got, want)
@Driver(initial=[AI(range(3)), AI(range(3, 6))], method=Flatten(args=[]), want=[0, 1, 2, 3, 4, 5])
def test3__s(self, got=None, want=None, exception=None):
if exception is not None:
raise exception
self.assertEqual(got, want)
class AIterable:
def __init__(self, stream: AI):
self.stream = stream
def __aiter__(self):
return self.stream
@Driver(initial=[AI(range(3)), AIterable(AI(range(3, 6)))], method=Flatten(args=[]), want=[0, 1, 2, 3, 4, 5])
def test4__s(self, got=None, want=None, exception=None):
if exception is not None:
raise exception
self.assertEqual(got, want)
class SIterable:
def __init__(self, stream: Iterator):
self.stream = stream
def __iter__(self):
return self.stream
@Driver(initial=[AI(range(3)), SIterable(range(3, 6))], method=Flatten(args=[]), want=[0, 1, 2, 3, 4, 5])
def test5__s(self, got=None, want=None, exception=None):
if exception is not None:
raise exception
self.assertEqual(got, want)
@Driver(initial=[AI(range(3)), 1], method=Flatten(args=[]), want=[0, 1, 2, 3, 4, 5])
def test6__s(self, got=None, want=None, exception=None):
if exception is None:
raise Exception
if isinstance(exception, TypeError):
return
raise exception
if __name__ == '__main__':
unittest.main()
| 34.718447
| 113
| 0.605425
| 506
| 3,576
| 4.16996
| 0.118577
| 0.059716
| 0.045498
| 0.109479
| 0.785308
| 0.785308
| 0.785308
| 0.785308
| 0.785308
| 0.746446
| 0
| 0.039513
| 0.242729
| 3,576
| 102
| 114
| 35.058824
| 0.73966
| 0
| 0
| 0.558442
| 0
| 0
| 0.002237
| 0
| 0
| 0
| 0
| 0
| 0.12987
| 1
| 0.207792
| false
| 0
| 0.051948
| 0.025974
| 0.350649
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
84b6ecbf8787f114146f15b5fd9f48af2de15164
| 38
|
py
|
Python
|
astartool/error/__init__.py
|
ASTARCHEN/astartool
|
ff5c20ef76e4961e43486b9a0bdf1f98fbfd48f2
|
[
"Apache-2.0"
] | 1
|
2020-09-16T03:27:28.000Z
|
2020-09-16T03:27:28.000Z
|
astartool/error/__init__.py
|
ASTARCHEN/astartool
|
ff5c20ef76e4961e43486b9a0bdf1f98fbfd48f2
|
[
"Apache-2.0"
] | null | null | null |
astartool/error/__init__.py
|
ASTARCHEN/astartool
|
ff5c20ef76e4961e43486b9a0bdf1f98fbfd48f2
|
[
"Apache-2.0"
] | 2
|
2020-09-07T18:01:01.000Z
|
2022-01-12T14:11:14.000Z
|
from astartool.error._error import *
| 12.666667
| 36
| 0.789474
| 5
| 38
| 5.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 38
| 2
| 37
| 19
| 0.878788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ca9a0a69563597e4fdd1a99a86cbd35d89fa17b1
| 162
|
py
|
Python
|
tests/conf.py
|
adxl/todo.io
|
0e4cedbb225bd6c0f12ea45db5eaec0a326044ff
|
[
"MIT"
] | null | null | null |
tests/conf.py
|
adxl/todo.io
|
0e4cedbb225bd6c0f12ea45db5eaec0a326044ff
|
[
"MIT"
] | null | null | null |
tests/conf.py
|
adxl/todo.io
|
0e4cedbb225bd6c0f12ea45db5eaec0a326044ff
|
[
"MIT"
] | null | null | null |
from datetime import date, timedelta
def date_factory(age=0) -> date:
"""generate a datetime from an age"""
return date.today() - timedelta(age * 365)
| 20.25
| 46
| 0.679012
| 23
| 162
| 4.73913
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030769
| 0.197531
| 162
| 7
| 47
| 23.142857
| 0.807692
| 0.191358
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
047a879e5351338c7e92efb6be35eba30f466fcb
| 146
|
py
|
Python
|
meshio/gmsh/__init__.py
|
samkaksam/meshio
|
decb7a8a97e1edb2de21939a861be18e54bd3a2e
|
[
"MIT"
] | null | null | null |
meshio/gmsh/__init__.py
|
samkaksam/meshio
|
decb7a8a97e1edb2de21939a861be18e54bd3a2e
|
[
"MIT"
] | null | null | null |
meshio/gmsh/__init__.py
|
samkaksam/meshio
|
decb7a8a97e1edb2de21939a861be18e54bd3a2e
|
[
"MIT"
] | null | null | null |
from .common import _gmsh_to_meshio_type as gmsh_to_meshio_type
from .main import read, write
__all__ = ["read", "write", "gmsh_to_meshio_type"]
| 29.2
| 63
| 0.787671
| 24
| 146
| 4.208333
| 0.5
| 0.178218
| 0.356436
| 0.475248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116438
| 146
| 4
| 64
| 36.5
| 0.782946
| 0
| 0
| 0
| 0
| 0
| 0.191781
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b6cdddd474238d5ec11a7cae517ae41361d942cf
| 1,715
|
py
|
Python
|
prog_praxis/two_integrals_201.py
|
genos/online_problems
|
324597e8b64d74ad96dbece551a8220a1b61e615
|
[
"MIT"
] | 1
|
2020-07-17T13:15:21.000Z
|
2020-07-17T13:15:21.000Z
|
prog_praxis/two_integrals_201.py
|
genos/online_problems
|
324597e8b64d74ad96dbece551a8220a1b61e615
|
[
"MIT"
] | null | null | null |
prog_praxis/two_integrals_201.py
|
genos/online_problems
|
324597e8b64d74ad96dbece551a8220a1b61e615
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import division
import math
GAMMA = 0.5772156649015328606065
def exp_int(x):
s = GAMMA + math.log(x)
term, k, f = x, 1, 1
while term > 1e-17:
s += term
k += 1
f *= k
term = pow(x, k) / (k * f)
return s
def log_int(x):
return exp_int(math.log(x))
def offset_log_int(x):
return log_int(x) - 1.04516378011749278
if __name__ == "__main__":
print "Li_offset(1e6) = {0:d}".format(int(round(offset_log_int(1e6))))
print "Li_offset(1e21) = {0:d}".format(int(round(offset_log_int(1e21))))
# Output:
# Li_offset(1e6) = 78627
# Li_offset(1e21) = 21127269486616088576
"""
My Python solution. This blog and my free time studies are drawing me more and
more towards Scheme and Haskell, but since there are two great solutions in
those languages already I felt I should offer a solution in a different
language. I've moved towards the newer "format" instead of the older printf
style string formatting.
[sourcecode lang="python"]
#!/usr/bin/env python
from __future__ import division
import math
GAMMA = 0.5772156649015328606065
def exp_int(x):
s = GAMMA + math.log(x)
term, k, f = x, 1, 1
while term > 1e-17:
s += term
k += 1
f *= k
term = pow(x, k) / (k * f)
return s
def log_int(x):
return exp_int(math.log(x))
def offset_log_int(x):
return log_int(x) - 1.04516378011749278
if __name__ == "__main__":
print "Li_offset(1e6) = {0:d}".format(int(round(offset_log_int(1e6))))
print "Li_offset(1e21) = {0:d}".format(int(round(offset_log_int(1e21))))
# Output:
# Li_offset(1e6) = 78627
# Li_offset(1e21) = 21127269486616088576
[/sourcecode]
"""
| 21.987179
| 78
| 0.653644
| 271
| 1,715
| 3.944649
| 0.321033
| 0.056127
| 0.039289
| 0.048644
| 0.724041
| 0.724041
| 0.724041
| 0.724041
| 0.724041
| 0.724041
| 0
| 0.13244
| 0.216327
| 1,715
| 77
| 79
| 22.272727
| 0.662946
| 0.052478
| 0
| 0
| 0
| 0
| 0.091854
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.105263
| null | null | 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8e1334d54e081c898d8bd556f4cf5916d7e01133
| 13,631
|
py
|
Python
|
tests/types/test_enum.py
|
jborean93/psrpcore
|
2c97fa7afec2ea1cab5f0c1ce189f06f2d28b83c
|
[
"MIT"
] | 4
|
2021-06-30T07:40:26.000Z
|
2022-01-13T18:42:32.000Z
|
tests/types/test_enum.py
|
jborean93/psrpcore
|
2c97fa7afec2ea1cab5f0c1ce189f06f2d28b83c
|
[
"MIT"
] | 15
|
2021-06-28T20:58:05.000Z
|
2022-03-03T11:37:33.000Z
|
tests/types/test_enum.py
|
jborean93/psrpcore
|
2c97fa7afec2ea1cab5f0c1ce189f06f2d28b83c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Jordan Borean (@jborean93) <jborean93@gmail.com>
# MIT License (see LICENSE or https://opensource.org/licenses/MIT)
import enum
import re
import xml.etree.ElementTree as ElementTree
import pytest
import psrpcore.types._enum as ps_enum
from psrpcore.types import (
PSInt,
PSInt64,
PSNoteProperty,
PSObject,
PSString,
PSType,
PSUInt,
)
from ..conftest import COMPLEX_ENCODED_STRING, COMPLEX_STRING, deserialize, serialize
@pytest.mark.parametrize("rehydrate", [True, False])
def test_ps_enum(rehydrate):
type_name = "MyEnumRehydrated" if rehydrate else "MyEnum"
@PSType(type_names=[f"System.{type_name}"], rehydrate=rehydrate)
class EnumTest(ps_enum.PSEnumBase):
none = 0
Value1 = 1
Value2 = 2
Value3 = 3
assert str(EnumTest.none) == "EnumTest.none"
assert repr(EnumTest.none) == "<EnumTest.none: 0>"
assert str(EnumTest.Value1) == "EnumTest.Value1"
assert repr(EnumTest.Value1) == "<EnumTest.Value1: 1>"
assert str(EnumTest.Value2) == "EnumTest.Value2"
assert str(EnumTest.Value3) == "EnumTest.Value3"
val = EnumTest.Value1
assert isinstance(val, PSObject)
assert isinstance(val, enum.Enum)
assert isinstance(val, ps_enum.PSEnumBase)
assert not isinstance(val, PSInt)
assert isinstance(val.value, PSInt)
assert isinstance(val, int)
element = serialize(val)
actual = ElementTree.tostring(element, encoding="utf-8").decode()
assert (
actual == f'<Obj RefId="0">'
f"<I32>1</I32>"
f'<TN RefId="0">'
f"<T>System.{type_name}</T>"
f"<T>System.Enum</T>"
f"<T>System.ValueType</T>"
f"<T>System.Object</T>"
f"</TN>"
f"<ToString>Value1</ToString>"
f"</Obj>"
)
actual = deserialize(element)
base_types = [f"System.{type_name}", "System.Enum", "System.ValueType", "System.Object"]
if rehydrate:
assert actual == val
assert str(actual) == "EnumTest.Value1"
assert isinstance(actual, int)
assert isinstance(actual, PSObject)
assert not isinstance(actual, PSInt)
assert isinstance(actual, ps_enum.PSEnumBase)
assert isinstance(actual, EnumTest)
assert isinstance(actual, enum.Enum)
assert actual.PSTypeNames == base_types
else:
# Without hydration we just get the primitive value back
base_types = [f"Deserialized.{t}" for t in base_types]
assert actual == val.value
assert str(actual) == "Value1"
assert isinstance(actual, int)
assert isinstance(actual, PSObject)
assert isinstance(actual, PSInt)
assert not isinstance(actual, ps_enum.PSEnumBase)
assert not isinstance(actual, EnumTest)
assert not isinstance(actual, enum.Enum)
assert actual.PSTypeNames == base_types
@pytest.mark.parametrize("rehydrate", [True, False])
def test_ps_enum_unsigned_type(rehydrate):
type_name = "EnumUIntRehydrated" if rehydrate else "EnumUInt"
@PSType(type_names=[f"System.{type_name}"], rehydrate=rehydrate)
class EnumTest(ps_enum.PSEnumBase, base_type=PSUInt):
none = 0
Value1 = 1
Value2 = 2
Value3 = 3
assert str(EnumTest.none) == "EnumTest.none"
assert repr(EnumTest.none) == "<EnumTest.none: 0>"
assert str(EnumTest.Value1) == "EnumTest.Value1"
assert repr(EnumTest.Value1) == "<EnumTest.Value1: 1>"
assert str(EnumTest.Value2) == "EnumTest.Value2"
assert str(EnumTest.Value3) == "EnumTest.Value3"
val = EnumTest.Value1
assert isinstance(val, PSObject)
assert isinstance(val, enum.Enum)
assert isinstance(val, ps_enum.PSEnumBase)
assert not isinstance(val, PSUInt)
assert isinstance(val.value, PSUInt)
assert isinstance(val, int)
element = serialize(val)
actual = ElementTree.tostring(element, encoding="utf-8").decode()
assert (
actual == f'<Obj RefId="0">'
f"<U32>1</U32>"
f'<TN RefId="0">'
f"<T>System.{type_name}</T>"
f"<T>System.Enum</T>"
f"<T>System.ValueType</T>"
f"<T>System.Object</T>"
f"</TN>"
f"<ToString>Value1</ToString>"
f"</Obj>"
)
actual = deserialize(element)
base_types = [f"System.{type_name}", "System.Enum", "System.ValueType", "System.Object"]
if rehydrate:
assert actual == val
assert str(actual) == "EnumTest.Value1"
assert isinstance(actual, int)
assert isinstance(actual, PSObject)
assert not isinstance(actual, PSUInt)
assert isinstance(actual, ps_enum.PSEnumBase)
assert isinstance(actual, EnumTest)
assert isinstance(actual, enum.Enum)
assert actual.PSTypeNames == base_types
else:
# Without hydration we just get the primitive value back
base_types = [f"Deserialized.{t}" for t in base_types]
assert actual == val.value
assert str(actual) == "Value1"
assert isinstance(actual, int)
assert isinstance(actual, PSObject)
assert isinstance(actual, PSUInt)
assert not isinstance(actual, ps_enum.PSEnumBase)
assert not isinstance(actual, EnumTest)
assert not isinstance(actual, enum.Enum)
assert actual.PSTypeNames == base_types
@pytest.mark.parametrize("rehydrate", [True, False])
def test_ps_enum_extended_properties(rehydrate):
type_name = "EnumExtendedRehydrated" if rehydrate else "EnumExtended"
@PSType(type_names=[f"System.{type_name}"], rehydrate=rehydrate)
class EnumTest(ps_enum.PSEnumBase, base_type=PSInt64):
none = 0
Value1 = 1
Value2 = 2
Value3 = 3
assert str(EnumTest.none) == "EnumTest.none"
assert repr(EnumTest.none) == "<EnumTest.none: 0>"
assert str(EnumTest.Value1) == "EnumTest.Value1"
assert repr(EnumTest.Value1) == "<EnumTest.Value1: 1>"
assert str(EnumTest.Value2) == "EnumTest.Value2"
assert str(EnumTest.Value3) == "EnumTest.Value3"
val = EnumTest.none
val.PSObject.extended_properties.append(PSNoteProperty(COMPLEX_STRING))
val[COMPLEX_STRING] = COMPLEX_STRING
assert isinstance(val, PSObject)
assert isinstance(val, enum.Enum)
assert isinstance(val, ps_enum.PSEnumBase)
assert not isinstance(val, PSInt64)
assert isinstance(val.value, PSInt64)
assert isinstance(val.value, int)
element = serialize(val)
actual = ElementTree.tostring(element, encoding="utf-8").decode()
assert (
actual == f'<Obj RefId="0">'
f"<I64>0</I64>"
f'<TN RefId="0">'
f"<T>System.{type_name}</T>"
f"<T>System.Enum</T>"
f"<T>System.ValueType</T>"
f"<T>System.Object</T>"
f"</TN>"
f"<MS>"
f'<S N="{COMPLEX_ENCODED_STRING}">{COMPLEX_ENCODED_STRING}</S>'
f"</MS>"
f"<ToString>None</ToString>"
f"</Obj>"
)
actual = deserialize(element)
base_types = [f"System.{type_name}", "System.Enum", "System.ValueType", "System.Object"]
assert val[COMPLEX_STRING] == COMPLEX_STRING
if rehydrate:
assert actual == val
assert str(actual) == "EnumTest.none"
assert isinstance(actual, int)
assert isinstance(actual, PSObject)
assert not isinstance(actual, PSInt64)
assert isinstance(actual, ps_enum.PSEnumBase)
assert isinstance(actual, EnumTest)
assert isinstance(actual, enum.Enum)
assert actual.PSTypeNames == base_types
else:
# Without hydration we just get the primitive value back
base_types = [f"Deserialized.{t}" for t in base_types]
assert actual == val.value
assert str(actual) == "None"
assert isinstance(actual, int)
assert isinstance(actual, PSObject)
assert isinstance(actual, PSInt64)
assert not isinstance(actual, ps_enum.PSEnumBase)
assert not isinstance(actual, EnumTest)
assert not isinstance(actual, enum.Enum)
assert actual.PSTypeNames == base_types
@pytest.mark.parametrize("rehydrate", [True, False])
def test_ps_flags(rehydrate):
type_name = "FlagHydrated" if rehydrate else "Flag"
@PSType(type_names=[f"System.{type_name}"], rehydrate=rehydrate)
class FlagTest(ps_enum.PSFlagBase):
none = 0
Flag1 = 1
Flag2 = 2
Flag3 = 4
assert str(FlagTest.none) == "FlagTest.none"
assert repr(FlagTest.none) == "<FlagTest.none: 0>"
assert str(FlagTest.Flag1) == "FlagTest.Flag1"
assert repr(FlagTest.Flag1) == "<FlagTest.Flag1: 1>"
assert str(FlagTest.Flag2) == "FlagTest.Flag2"
assert str(FlagTest.Flag3) == "FlagTest.Flag3"
assert str(FlagTest.Flag1 | FlagTest.Flag3) == "FlagTest.Flag3|Flag1"
assert repr(FlagTest.Flag1 | FlagTest.Flag3) == "<FlagTest.Flag3|Flag1: 5>"
val = FlagTest.Flag1 | FlagTest.Flag3
assert isinstance(val, PSObject)
assert isinstance(val, enum.Flag)
assert not isinstance(val, ps_enum.PSEnumBase)
assert isinstance(val, ps_enum.PSFlagBase)
assert not isinstance(val, PSInt)
assert isinstance(val.value, PSInt)
assert isinstance(val, int)
element = serialize(val)
actual = ElementTree.tostring(element, encoding="utf-8").decode()
assert (
actual == f'<Obj RefId="0">'
f"<I32>5</I32>"
f'<TN RefId="0">'
f"<T>System.{type_name}</T>"
f"<T>System.Enum</T>"
f"<T>System.ValueType</T>"
f"<T>System.Object</T>"
f"</TN>"
f"<ToString>Flag1, Flag3</ToString>"
f"</Obj>"
)
actual = deserialize(element)
base_types = [f"System.{type_name}", "System.Enum", "System.ValueType", "System.Object"]
if rehydrate:
assert actual == val
assert str(actual) == "FlagTest.Flag3|Flag1"
assert isinstance(actual, int)
assert isinstance(actual, PSObject)
assert not isinstance(actual, PSInt)
assert isinstance(actual, ps_enum.PSFlagBase)
assert isinstance(actual, FlagTest)
assert isinstance(actual, enum.Flag)
assert actual.PSTypeNames == base_types
else:
# Without hydration we just get the primitive value back
base_types = [f"Deserialized.{t}" for t in base_types]
assert actual == val.value
assert str(actual) == "Flag1, Flag3"
assert isinstance(actual, int)
assert isinstance(actual, PSObject)
assert isinstance(actual, PSInt)
assert not isinstance(actual, ps_enum.PSFlagBase)
assert not isinstance(actual, FlagTest)
assert not isinstance(actual, enum.Flag)
assert actual.PSTypeNames == base_types
element = serialize(FlagTest.none)
actual = ElementTree.tostring(element, encoding="utf-8").decode()
assert (
actual == f'<Obj RefId="0">'
f"<I32>0</I32>"
f'<TN RefId="0">'
f"<T>System.{type_name}</T>"
f"<T>System.Enum</T>"
f"<T>System.ValueType</T>"
f"<T>System.Object</T>"
f"</TN>"
f"<ToString>None</ToString>"
f"</Obj>"
)
element = serialize(FlagTest.none | FlagTest.Flag2)
actual = ElementTree.tostring(element, encoding="utf-8").decode()
assert (
actual == f'<Obj RefId="0">'
f"<I32>2</I32>"
f'<TN RefId="0">'
f"<T>System.{type_name}</T>"
f"<T>System.Enum</T>"
f"<T>System.ValueType</T>"
f"<T>System.Object</T>"
f"</TN>"
f"<ToString>Flag2</ToString>"
f"</Obj>"
)
def test_ps_flags_operators():
@PSType(type_names=["System.FlagTest"])
class FlagTest(ps_enum.PSFlagBase):
none = 0
Flag1 = 1
Flag2 = 2
Flag3 = 4
Flag4 = 8
val = FlagTest.none
assert val == FlagTest.none
assert val != FlagTest.Flag1
assert str(val) == "FlagTest.none"
assert val.name == "none"
assert val.value == 0
val |= FlagTest.Flag1 | FlagTest.Flag2
assert isinstance(val, FlagTest)
assert str(val) == "FlagTest.Flag2|Flag1"
assert val.name is None
assert val.value == 3
val &= FlagTest.Flag1
assert isinstance(val, FlagTest)
assert str(val) == "FlagTest.Flag1"
assert val.name == "Flag1"
assert val.value == 1
val = (FlagTest.Flag1 | FlagTest.Flag2) ^ FlagTest.Flag1
assert isinstance(val, FlagTest)
assert str(val) == "FlagTest.Flag2"
assert val.value == 2
val = val << 2
assert val == FlagTest.Flag4
assert str(val) == "FlagTest.Flag4"
assert val.name == "Flag4"
assert val.value == 8
val = val >> 2
assert val == FlagTest.Flag2
assert str(val) == "FlagTest.Flag2"
assert val.name == "Flag2"
assert val.value == 2
val = ~val
assert isinstance(val, FlagTest)
assert str(val) == "FlagTest.Flag4|Flag3|Flag1"
assert val.name is None
assert val.value == -3
def test_ps_enum_not_inheriting_int_base():
expected = re.escape("PSEnumType InvalidEnum base_type must be a subclass of PSIntegerBase")
with pytest.raises(TypeError, match=expected):
@PSType(type_names=["Test"])
class InvalidEnum(ps_enum.PSEnumBase, base_type=PSString):
none = 0
def test_ps_enum_to_ps_ps_baseint():
@PSType(type_names=["System.EnumToInt"])
class EnumToInt(ps_enum.PSEnumBase):
none = 0
Value1 = 1
value = PSInt(EnumToInt.Value1)
assert isinstance(value, PSInt)
assert value == 1
value = PSInt64(EnumToInt.Value1)
assert isinstance(value, PSInt64)
assert value == 1
| 32.072941
| 96
| 0.635243
| 1,649
| 13,631
| 5.183748
| 0.089751
| 0.108563
| 0.082358
| 0.018952
| 0.801123
| 0.755615
| 0.724497
| 0.701451
| 0.690103
| 0.668343
| 0
| 0.02062
| 0.235052
| 13,631
| 424
| 97
| 32.148585
| 0.799175
| 0.027584
| 0
| 0.655172
| 0
| 0
| 0.177021
| 0.041141
| 0
| 0
| 0
| 0
| 0.465517
| 1
| 0.020115
| false
| 0
| 0.020115
| 0
| 0.12931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8e17d54efe3098d17f1aea965da46693c0cfeb49
| 244
|
py
|
Python
|
nndet/inference/detection/__init__.py
|
joeranbosma/nnDetection
|
2ebbf1cdc8a8794c73e325f06fea50632c78ae8c
|
[
"BSD-3-Clause"
] | 242
|
2021-05-17T12:31:39.000Z
|
2022-03-31T11:51:29.000Z
|
nndet/inference/detection/__init__.py
|
joeranbosma/nnDetection
|
2ebbf1cdc8a8794c73e325f06fea50632c78ae8c
|
[
"BSD-3-Clause"
] | 59
|
2021-06-02T07:32:10.000Z
|
2022-03-31T18:45:52.000Z
|
nndet/inference/detection/__init__.py
|
joeranbosma/nnDetection
|
2ebbf1cdc8a8794c73e325f06fea50632c78ae8c
|
[
"BSD-3-Clause"
] | 38
|
2021-05-31T14:01:37.000Z
|
2022-03-21T08:24:40.000Z
|
from nndet.inference.detection.wbc import batched_wbc, wbc
from nndet.inference.detection.model import batched_nms_model
from nndet.inference.detection.ensemble import batched_wbc_ensemble, batched_nms_ensemble, \
wbc_nms_no_label_ensemble
| 48.8
| 92
| 0.864754
| 35
| 244
| 5.714286
| 0.342857
| 0.135
| 0.27
| 0.405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081967
| 244
| 4
| 93
| 61
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8e1e365549661e94cd304080e610dd12201d6851
| 42
|
py
|
Python
|
monthly_calendar_plot/__init__.py
|
maxipi/python-monthly-calendar-plot
|
fb7a021ab40f4d5ddd83573ce440bf52c36863ef
|
[
"MIT"
] | 2
|
2019-12-26T18:57:49.000Z
|
2020-05-06T15:38:23.000Z
|
monthly_calendar_plot/__init__.py
|
maxipi/python-monthly-calendar-plot
|
fb7a021ab40f4d5ddd83573ce440bf52c36863ef
|
[
"MIT"
] | null | null | null |
monthly_calendar_plot/__init__.py
|
maxipi/python-monthly-calendar-plot
|
fb7a021ab40f4d5ddd83573ce440bf52c36863ef
|
[
"MIT"
] | null | null | null |
from .plot import monthly_calendar_figure
| 21
| 41
| 0.880952
| 6
| 42
| 5.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8e2c87a8d1403f7e080692f524257d4f19861e71
| 2,189
|
py
|
Python
|
python/tests/generated/errors/parsing/test_two_or_more_templates_found.py
|
eno-lang/enolib
|
4175f7c1e8246493b6758c29bddc80d20eaf15f7
|
[
"MIT"
] | 17
|
2019-04-15T21:03:37.000Z
|
2022-01-24T11:03:34.000Z
|
python/tests/generated/errors/parsing/test_two_or_more_templates_found.py
|
eno-lang/enolib
|
4175f7c1e8246493b6758c29bddc80d20eaf15f7
|
[
"MIT"
] | 20
|
2019-03-13T23:23:40.000Z
|
2022-03-29T13:40:57.000Z
|
python/tests/generated/errors/parsing/test_two_or_more_templates_found.py
|
eno-lang/enolib
|
4175f7c1e8246493b6758c29bddc80d20eaf15f7
|
[
"MIT"
] | 4
|
2019-04-15T21:18:03.000Z
|
2019-09-21T16:18:10.000Z
|
import enolib
def test_copying_a_field_that_exists_twice_raises_the_expected_parseerror():
error = None
input = ("field: value\n"
"field: value\n"
"\n"
"copy < field")
try:
enolib.parse(input)
except enolib.ParseError as _error:
if isinstance(_error, enolib.ParseError):
error = _error
else:
raise _error
assert type(error) is enolib.ParseError
text = ("There are at least two elements with the key 'field' that qualify for being copied here, it is not clear which to copy.")
assert error.text == text
snippet = (" Line | Content\n"
" ? 1 | field: value\n"
" ? 2 | field: value\n"
" 3 | \n"
" > 4 | copy < field")
assert error.snippet == snippet
assert error.selection['from']['line'] == 3
assert error.selection['from']['column'] == 0
assert error.selection['to']['line'] == 3
assert error.selection['to']['column'] == 12
def test_copying_a_section_that_exists_twice_raises_the_expected_parseerror():
error = None
input = ("# section\n"
"\n"
"# section\n"
"\n"
"# copy < section")
try:
enolib.parse(input)
except enolib.ParseError as _error:
if isinstance(_error, enolib.ParseError):
error = _error
else:
raise _error
assert type(error) is enolib.ParseError
text = ("There are at least two elements with the key 'section' that qualify for being copied here, it is not clear which to copy.")
assert error.text == text
snippet = (" Line | Content\n"
" ? 1 | # section\n"
" 2 | \n"
" ? 3 | # section\n"
" 4 | \n"
" > 5 | # copy < section")
assert error.snippet == snippet
assert error.selection['from']['line'] == 4
assert error.selection['from']['column'] == 0
assert error.selection['to']['line'] == 4
assert error.selection['to']['column'] == 16
| 29.986301
| 136
| 0.528552
| 245
| 2,189
| 4.608163
| 0.265306
| 0.116918
| 0.141718
| 0.085031
| 0.823738
| 0.770593
| 0.770593
| 0.770593
| 0.770593
| 0.676705
| 0
| 0.013437
| 0.354043
| 2,189
| 73
| 137
| 29.986301
| 0.785007
| 0
| 0
| 0.527273
| 0
| 0.036364
| 0.273973
| 0
| 0
| 0
| 0
| 0
| 0.254545
| 1
| 0.036364
| false
| 0
| 0.018182
| 0
| 0.054545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f3d7c3a07ea6e051561f63202e7f6eb539057292
| 49
|
py
|
Python
|
mercury/fog_model/iot_devices/ue/__init__.py
|
greenlsi/mercury_mso_framework
|
8b9639e5cb4b2c526a65861c93a9fe9db2460ea4
|
[
"Apache-2.0"
] | 1
|
2020-07-21T11:22:39.000Z
|
2020-07-21T11:22:39.000Z
|
mercury/fog_model/iot_devices/ue/__init__.py
|
greenlsi/mercury_mso_framework
|
8b9639e5cb4b2c526a65861c93a9fe9db2460ea4
|
[
"Apache-2.0"
] | 2
|
2021-08-25T16:09:58.000Z
|
2022-02-10T02:21:03.000Z
|
mercury/fog_model/iot_devices/ue/__init__.py
|
greenlsi/mercury_mso_framework
|
8b9639e5cb4b2c526a65861c93a9fe9db2460ea4
|
[
"Apache-2.0"
] | 1
|
2021-02-24T15:54:09.000Z
|
2021-02-24T15:54:09.000Z
|
from .ue import UserEquipment, UserEquipmentLite
| 24.5
| 48
| 0.857143
| 5
| 49
| 8.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 49
| 1
| 49
| 49
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f3ed528b4da59d626cf3a403b1bb84252cae8f07
| 192
|
py
|
Python
|
osTesting2.py
|
nawnaw1/PythonTesting
|
eade05cee4af0c2c0f805db6cf17ff9981f4688f
|
[
"MIT"
] | null | null | null |
osTesting2.py
|
nawnaw1/PythonTesting
|
eade05cee4af0c2c0f805db6cf17ff9981f4688f
|
[
"MIT"
] | null | null | null |
osTesting2.py
|
nawnaw1/PythonTesting
|
eade05cee4af0c2c0f805db6cf17ff9981f4688f
|
[
"MIT"
] | null | null | null |
import os
os.system('dir')
print('dir testdir')
print('************')
os.system('mkdir testdir')
os.system('dir testdir')
os.system('pause')
os.system('rmdir testdir')
os.system('dir testdir')
| 21.333333
| 26
| 0.666667
| 28
| 192
| 4.571429
| 0.321429
| 0.375
| 0.257813
| 0.28125
| 0.390625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072917
| 192
| 9
| 27
| 21.333333
| 0.719101
| 0
| 0
| 0.222222
| 0
| 0
| 0.409326
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.111111
| 0
| 0.111111
| 0.222222
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f3f790bee895957ac763c5496836e30bdafcf2a2
| 25
|
py
|
Python
|
tests/__init__.py
|
forieux/grasp
|
b3375a2d5aee89a408ba7ddce0c867bdb1bf1ae4
|
[
"CC0-1.0"
] | null | null | null |
tests/__init__.py
|
forieux/grasp
|
b3375a2d5aee89a408ba7ddce0c867bdb1bf1ae4
|
[
"CC0-1.0"
] | null | null | null |
tests/__init__.py
|
forieux/grasp
|
b3375a2d5aee89a408ba7ddce0c867bdb1bf1ae4
|
[
"CC0-1.0"
] | null | null | null |
from test_grasp import *
| 12.5
| 24
| 0.8
| 4
| 25
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6d0d9807058d88987e37a282ed61615e22b50d2f
| 605
|
py
|
Python
|
quadruped_spring/__init__.py
|
francescovezzi/quadruped_spring
|
23848496ac7a4508e8a0f527e961c7956fd12f95
|
[
"MIT"
] | 3
|
2022-02-21T22:30:21.000Z
|
2022-03-03T12:59:25.000Z
|
quadruped_spring/__init__.py
|
francescovezzi/quadruped_spring
|
23848496ac7a4508e8a0f527e961c7956fd12f95
|
[
"MIT"
] | 1
|
2022-03-28T09:22:50.000Z
|
2022-03-28T16:44:46.000Z
|
quadruped_spring/__init__.py
|
francescovezzi/quadruped_spring
|
23848496ac7a4508e8a0f527e961c7956fd12f95
|
[
"MIT"
] | null | null | null |
from gym.envs.registration import register
register(
id="QuadrupedSpring-v0",
entry_point="quadruped_spring.env.quadruped_gym_env:QuadrupedGymEnv",
kwargs={
"motor_control_mode": "CARTESIAN_PD",
"task_env": "LR_COURSE_TASK",
"observation_space_mode": "LR_COURSE_OBS",
},
)
# register(
# id="QuadrupedSpringTorques-v0",
# entry_point="quadruped_spring.env.quadruped_gym_env:QuadrupedGymEnv",
# kwargs={
# "motor_control_mode": "TORQUE",
# "task_env": "LR_COURSE_TASK",
# "observation_space_mode": "LR_COURSE_OBS",
# },
# )
| 26.304348
| 75
| 0.664463
| 65
| 605
| 5.769231
| 0.446154
| 0.085333
| 0.064
| 0.112
| 0.704
| 0.704
| 0.704
| 0.704
| 0.704
| 0.704
| 0
| 0.004124
| 0.198347
| 605
| 22
| 76
| 27.5
| 0.769072
| 0.446281
| 0
| 0
| 0
| 0
| 0.489231
| 0.233846
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6d233f93724871e01c763d07db7c75b52de5de9e
| 33
|
py
|
Python
|
torch_kfac/__init__.py
|
deepqmc/pytorch-kfac
|
c4742297625367c4d11613970847dacb450a9f32
|
[
"Apache-2.0"
] | 9
|
2020-07-19T14:40:30.000Z
|
2022-02-09T21:02:58.000Z
|
torch_kfac/__init__.py
|
deepqmc/pytorch-kfac
|
c4742297625367c4d11613970847dacb450a9f32
|
[
"Apache-2.0"
] | 1
|
2022-01-13T12:11:15.000Z
|
2022-02-10T10:14:17.000Z
|
torch_kfac/__init__.py
|
n-gao/pytorch-kfac
|
c4742297625367c4d11613970847dacb450a9f32
|
[
"Apache-2.0"
] | 3
|
2021-03-03T15:25:44.000Z
|
2021-04-23T04:57:44.000Z
|
from .kfac_optimizer import KFAC
| 16.5
| 32
| 0.848485
| 5
| 33
| 5.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6d508e66c26e00ec0d6529e8437ded8a6c5df5ff
| 165
|
py
|
Python
|
main/apps/magazine/admin.py
|
semyonkrutolevich/bigduck
|
72cef352784e549673b2cdd7026c2fc22b488d86
|
[
"MIT"
] | 2
|
2022-01-31T03:13:51.000Z
|
2022-01-31T03:14:25.000Z
|
main/apps/magazine/admin.py
|
sultanovilvircr/django-testing-sample
|
e2bf6b6f6c78cf1877083d960ae4eb13ebfc5a3e
|
[
"MIT"
] | null | null | null |
main/apps/magazine/admin.py
|
sultanovilvircr/django-testing-sample
|
e2bf6b6f6c78cf1877083d960ae4eb13ebfc5a3e
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from main.apps.magazine.models import NewsArticle
@admin.register(NewsArticle)
class NewsArticleAdmin(admin.ModelAdmin):
pass
| 20.625
| 49
| 0.818182
| 20
| 165
| 6.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109091
| 165
| 7
| 50
| 23.571429
| 0.918367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
6d5bbf2bb4beba68447fa96b4edc2450d78775c8
| 136
|
py
|
Python
|
evennia/contrib/base_systems/email_login/__init__.py
|
davidrideout/evennia
|
879eea55acdf4fe5cdc96ba8fd0ab5ccca4ae84b
|
[
"BSD-3-Clause"
] | null | null | null |
evennia/contrib/base_systems/email_login/__init__.py
|
davidrideout/evennia
|
879eea55acdf4fe5cdc96ba8fd0ab5ccca4ae84b
|
[
"BSD-3-Clause"
] | null | null | null |
evennia/contrib/base_systems/email_login/__init__.py
|
davidrideout/evennia
|
879eea55acdf4fe5cdc96ba8fd0ab5ccca4ae84b
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Email login contrib - Griatch 2012
"""
from .email_login import UnloggedinCmdSet # noqa
from . import connection_screens # noqa
| 17
| 49
| 0.742647
| 16
| 136
| 6.1875
| 0.6875
| 0.20202
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 0.176471
| 136
| 7
| 50
| 19.428571
| 0.848214
| 0.330882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
edab91c12e6b65c822fab2fe239aa21b9bd1fa7b
| 198
|
py
|
Python
|
tests/calling/kwargs5.py
|
Slater-Victoroff/pyjaco
|
89c4e3c46399c5023b0e160005d855a01241c58a
|
[
"MIT"
] | 3
|
2018-12-09T13:54:48.000Z
|
2020-02-24T17:26:24.000Z
|
tests/calling/kwargs5.py
|
dusty-phillips/pyjaco
|
066895ae38d1828498e529c1875cb88df6cbc54d
|
[
"MIT"
] | 1
|
2020-07-15T13:30:32.000Z
|
2020-07-15T13:30:32.000Z
|
tests/calling/kwargs5.py
|
Slater-Victoroff/pyjaco
|
89c4e3c46399c5023b0e160005d855a01241c58a
|
[
"MIT"
] | null | null | null |
def foo(a, b, c, d = 10):
print a, b, c, d
foo(1, 2, 3, 4)
foo(1, 2, 3)
foo(1, 2, 3, d = 20)
foo(1, 2, c = 10, d = 20)
foo(d = 4, c = 3, b = 2, a = 1)
foo(**dict(d = 4, c = 3, b = 2, a = 1))
| 16.5
| 39
| 0.40404
| 53
| 198
| 1.509434
| 0.264151
| 0.2
| 0.25
| 0.225
| 0.2
| 0.2
| 0.2
| 0.2
| 0
| 0
| 0
| 0.208955
| 0.323232
| 198
| 11
| 40
| 18
| 0.38806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.125
| 0
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
edf5258f6c76512e7d4b91df1f0232f60663e69c
| 23,649
|
py
|
Python
|
openstackclient/tests/unit/api/test_compute_v2.py
|
cloudification-io/python-openstackclient
|
e07324e30fbb24e89fd63d1c5a5fe485f693a45c
|
[
"Apache-2.0"
] | 262
|
2015-01-29T20:10:49.000Z
|
2022-03-23T01:59:23.000Z
|
openstackclient/tests/unit/api/test_compute_v2.py
|
adgeese/python-openstackclient
|
06263bd5852aad9cd03a76f50140fbbb2d0751ba
|
[
"Apache-2.0"
] | 5
|
2015-01-21T02:37:35.000Z
|
2021-11-23T02:26:00.000Z
|
openstackclient/tests/unit/api/test_compute_v2.py
|
adgeese/python-openstackclient
|
06263bd5852aad9cd03a76f50140fbbb2d0751ba
|
[
"Apache-2.0"
] | 194
|
2015-01-08T07:39:27.000Z
|
2022-03-30T13:51:23.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Compute v2 API Library Tests"""
from keystoneauth1 import session
from osc_lib import exceptions as osc_lib_exceptions
from requests_mock.contrib import fixture
from openstackclient.api import compute_v2 as compute
from openstackclient.tests.unit import utils
FAKE_PROJECT = 'xyzpdq'
FAKE_URL = 'http://gopher.com/v2'
class TestComputeAPIv2(utils.TestCase):
def setUp(self):
super(TestComputeAPIv2, self).setUp()
sess = session.Session()
self.api = compute.APIv2(session=sess, endpoint=FAKE_URL)
self.requests_mock = self.useFixture(fixture.Fixture())
class TestFloatingIP(TestComputeAPIv2):
FAKE_FLOATING_IP_RESP = {
'id': 1,
'ip': '203.0.113.11', # TEST-NET-3
'fixed_ip': '198.51.100.11', # TEST-NET-2
'pool': 'nova',
'instance_id': None,
}
FAKE_FLOATING_IP_RESP_2 = {
'id': 2,
'ip': '203.0.113.12', # TEST-NET-3
'fixed_ip': '198.51.100.12', # TEST-NET-2
'pool': 'nova',
'instance_id': None,
}
LIST_FLOATING_IP_RESP = [
FAKE_FLOATING_IP_RESP,
FAKE_FLOATING_IP_RESP_2,
]
FAKE_SERVER_RESP_1 = {
'id': 1,
'name': 'server1',
}
def test_floating_ip_add_id(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/servers/1/action',
json={'server': {}},
status_code=200,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/servers/1',
json={'server': self.FAKE_SERVER_RESP_1},
status_code=200,
)
ret = self.api.floating_ip_add('1', '1.0.1.0')
self.assertEqual(200, ret.status_code)
def test_floating_ip_add_name(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/servers/1/action',
json={'server': {}},
status_code=200,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/servers/server1',
json={'server': self.FAKE_SERVER_RESP_1},
status_code=200,
)
ret = self.api.floating_ip_add('server1', '1.0.1.0')
self.assertEqual(200, ret.status_code)
def test_floating_ip_create(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-floating-ips',
json={'floating_ip': self.FAKE_FLOATING_IP_RESP},
status_code=200,
)
ret = self.api.floating_ip_create('nova')
self.assertEqual(self.FAKE_FLOATING_IP_RESP, ret)
def test_floating_ip_create_not_found(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-floating-ips',
status_code=404,
)
self.assertRaises(
osc_lib_exceptions.NotFound,
self.api.floating_ip_create,
'not-nova',
)
def test_floating_ip_delete(self):
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/os-floating-ips/1',
status_code=202,
)
ret = self.api.floating_ip_delete('1')
self.assertEqual(202, ret.status_code)
self.assertEqual("", ret.text)
def test_floating_ip_delete_none(self):
ret = self.api.floating_ip_delete()
self.assertIsNone(ret)
def test_floating_ip_find_id(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ips/1',
json={'floating_ip': self.FAKE_FLOATING_IP_RESP},
status_code=200,
)
ret = self.api.floating_ip_find('1')
self.assertEqual(self.FAKE_FLOATING_IP_RESP, ret)
def test_floating_ip_find_ip(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ips/' + self.FAKE_FLOATING_IP_RESP['ip'],
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ips',
json={'floating_ips': self.LIST_FLOATING_IP_RESP},
status_code=200,
)
ret = self.api.floating_ip_find(self.FAKE_FLOATING_IP_RESP['ip'])
self.assertEqual(self.FAKE_FLOATING_IP_RESP, ret)
def test_floating_ip_find_not_found(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ips/1.2.3.4',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ips',
json={'floating_ips': self.LIST_FLOATING_IP_RESP},
status_code=200,
)
self.assertRaises(
osc_lib_exceptions.NotFound,
self.api.floating_ip_find,
'1.2.3.4',
)
def test_floating_ip_list(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ips',
json={'floating_ips': self.LIST_FLOATING_IP_RESP},
status_code=200,
)
ret = self.api.floating_ip_list()
self.assertEqual(self.LIST_FLOATING_IP_RESP, ret)
def test_floating_ip_remove_id(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/servers/1/action',
status_code=200,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/servers/1',
json={'server': self.FAKE_SERVER_RESP_1},
status_code=200,
)
ret = self.api.floating_ip_remove('1', '1.0.1.0')
self.assertEqual(200, ret.status_code)
def test_floating_ip_remove_name(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/servers/1/action',
status_code=200,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/servers/server1',
json={'server': self.FAKE_SERVER_RESP_1},
status_code=200,
)
ret = self.api.floating_ip_remove('server1', '1.0.1.0')
self.assertEqual(200, ret.status_code)
class TestFloatingIPPool(TestComputeAPIv2):
LIST_FLOATING_IP_POOL_RESP = [
{"name": "tide"},
{"name": "press"},
]
def test_floating_ip_pool_list(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ip-pools',
json={'floating_ip_pools': self.LIST_FLOATING_IP_POOL_RESP},
status_code=200,
)
ret = self.api.floating_ip_pool_list()
self.assertEqual(self.LIST_FLOATING_IP_POOL_RESP, ret)
class TestHost(TestComputeAPIv2):
FAKE_HOST_RESP_1 = {
"zone": "internal",
"host_name": "myhost",
"service": "conductor",
}
FAKE_HOST_RESP_2 = {
"zone": "internal",
"host_name": "myhost",
"service": "scheduler",
}
FAKE_HOST_RESP_3 = {
"zone": "nova",
"host_name": "myhost",
"service": "compute",
}
LIST_HOST_RESP = [
FAKE_HOST_RESP_1,
FAKE_HOST_RESP_2,
FAKE_HOST_RESP_3,
]
def test_host_list_no_options(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-hosts',
json={'hosts': self.LIST_HOST_RESP},
status_code=200,
)
ret = self.api.host_list()
self.assertEqual(self.LIST_HOST_RESP, ret)
def test_host_list_zone(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-hosts?zone=nova',
json={'hosts': [self.FAKE_HOST_RESP_3]},
status_code=200,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-hosts',
json={'hosts': [self.FAKE_HOST_RESP_3]},
status_code=200,
)
ret = self.api.host_list(zone='nova')
self.assertEqual([self.FAKE_HOST_RESP_3], ret)
def test_host_set_none(self):
ret = self.api.host_set(host='myhost')
self.assertIsNone(ret)
def test_host_set(self):
self.requests_mock.register_uri(
'PUT',
FAKE_URL + '/os-hosts/myhost',
json={},
status_code=200,
)
ret = self.api.host_set(host='myhost', status='enabled')
self.assertEqual({}, ret)
def test_host_show(self):
FAKE_RESOURCE_1 = {
"cpu": 2,
"disk_gb": 1028,
"host": "c1a7de0ac9d94e4baceae031d05caae3",
"memory_mb": 8192,
"project": "(total)",
}
FAKE_RESOURCE_2 = {
"cpu": 0,
"disk_gb": 0,
"host": "c1a7de0ac9d94e4baceae031d05caae3",
"memory_mb": 512,
"project": "(used_now)",
}
FAKE_RESOURCE_3 = {
"cpu": 0,
"disk_gb": 0,
"host": "c1a7de0ac9d94e4baceae031d05caae3",
"memory_mb": 0,
"project": "(used_max)",
}
FAKE_HOST_RESP = [
{'resource': FAKE_RESOURCE_1},
{'resource': FAKE_RESOURCE_2},
{'resource': FAKE_RESOURCE_3},
]
FAKE_HOST_LIST = [
FAKE_RESOURCE_1,
FAKE_RESOURCE_2,
FAKE_RESOURCE_3,
]
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-hosts/myhost',
json={'host': FAKE_HOST_RESP},
status_code=200,
)
ret = self.api.host_show(host='myhost')
self.assertEqual(FAKE_HOST_LIST, ret)
class TestNetwork(TestComputeAPIv2):
FAKE_NETWORK_RESP = {
'id': '1',
'label': 'label1',
'cidr': '1.2.3.0/24',
}
FAKE_NETWORK_RESP_2 = {
'id': '2',
'label': 'label2',
'cidr': '4.5.6.0/24',
}
LIST_NETWORK_RESP = [
FAKE_NETWORK_RESP,
FAKE_NETWORK_RESP_2,
]
def test_network_create_default(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-networks',
json={'network': self.FAKE_NETWORK_RESP},
status_code=200,
)
ret = self.api.network_create('label1')
self.assertEqual(self.FAKE_NETWORK_RESP, ret)
def test_network_create_options(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-networks',
json={'network': self.FAKE_NETWORK_RESP},
status_code=200,
)
ret = self.api.network_create(
name='label1',
subnet='1.2.3.0/24',
)
self.assertEqual(self.FAKE_NETWORK_RESP, ret)
def test_network_delete_id(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks/1',
json={'network': self.FAKE_NETWORK_RESP},
status_code=200,
)
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/os-networks/1',
status_code=202,
)
ret = self.api.network_delete('1')
self.assertEqual(202, ret.status_code)
self.assertEqual("", ret.text)
def test_network_delete_name(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks/label1',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks',
json={'networks': self.LIST_NETWORK_RESP},
status_code=200,
)
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/os-networks/1',
status_code=202,
)
ret = self.api.network_delete('label1')
self.assertEqual(202, ret.status_code)
self.assertEqual("", ret.text)
def test_network_delete_not_found(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks/label3',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks',
json={'networks': self.LIST_NETWORK_RESP},
status_code=200,
)
self.assertRaises(
osc_lib_exceptions.NotFound,
self.api.network_delete,
'label3',
)
def test_network_find_id(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks/1',
json={'network': self.FAKE_NETWORK_RESP},
status_code=200,
)
ret = self.api.network_find('1')
self.assertEqual(self.FAKE_NETWORK_RESP, ret)
def test_network_find_name(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks/label2',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks',
json={'networks': self.LIST_NETWORK_RESP},
status_code=200,
)
ret = self.api.network_find('label2')
self.assertEqual(self.FAKE_NETWORK_RESP_2, ret)
def test_network_find_not_found(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks/label3',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks',
json={'networks': self.LIST_NETWORK_RESP},
status_code=200,
)
self.assertRaises(
osc_lib_exceptions.NotFound,
self.api.network_find,
'label3',
)
def test_network_list_no_options(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks',
json={'networks': self.LIST_NETWORK_RESP},
status_code=200,
)
ret = self.api.network_list()
self.assertEqual(self.LIST_NETWORK_RESP, ret)
class TestSecurityGroup(TestComputeAPIv2):
FAKE_SECURITY_GROUP_RESP = {
'id': '1',
'name': 'sg1',
'description': 'test security group',
'tenant_id': '0123456789',
'rules': []
}
FAKE_SECURITY_GROUP_RESP_2 = {
'id': '2',
'name': 'sg2',
'description': 'another test security group',
'tenant_id': '0123456789',
'rules': []
}
LIST_SECURITY_GROUP_RESP = [
FAKE_SECURITY_GROUP_RESP_2,
FAKE_SECURITY_GROUP_RESP,
]
def test_security_group_create_default(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-security-groups',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP},
status_code=200,
)
ret = self.api.security_group_create('sg1')
self.assertEqual(self.FAKE_SECURITY_GROUP_RESP, ret)
def test_security_group_create_options(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-security-groups',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP},
status_code=200,
)
ret = self.api.security_group_create(
name='sg1',
description='desc',
)
self.assertEqual(self.FAKE_SECURITY_GROUP_RESP, ret)
def test_security_group_delete_id(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/1',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP},
status_code=200,
)
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/os-security-groups/1',
status_code=202,
)
ret = self.api.security_group_delete('1')
self.assertEqual(202, ret.status_code)
self.assertEqual("", ret.text)
def test_security_group_delete_name(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/sg1',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups',
json={'security_groups': self.LIST_SECURITY_GROUP_RESP},
status_code=200,
)
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/os-security-groups/1',
status_code=202,
)
ret = self.api.security_group_delete('sg1')
self.assertEqual(202, ret.status_code)
self.assertEqual("", ret.text)
def test_security_group_delete_not_found(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/sg3',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups',
json={'security_groups': self.LIST_SECURITY_GROUP_RESP},
status_code=200,
)
self.assertRaises(
osc_lib_exceptions.NotFound,
self.api.security_group_delete,
'sg3',
)
def test_security_group_find_id(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/1',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP},
status_code=200,
)
ret = self.api.security_group_find('1')
self.assertEqual(self.FAKE_SECURITY_GROUP_RESP, ret)
def test_security_group_find_name(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/sg2',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups',
json={'security_groups': self.LIST_SECURITY_GROUP_RESP},
status_code=200,
)
ret = self.api.security_group_find('sg2')
self.assertEqual(self.FAKE_SECURITY_GROUP_RESP_2, ret)
def test_security_group_find_not_found(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/sg3',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups',
json={'security_groups': self.LIST_SECURITY_GROUP_RESP},
status_code=200,
)
self.assertRaises(
osc_lib_exceptions.NotFound,
self.api.security_group_find,
'sg3',
)
def test_security_group_list_no_options(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups',
json={'security_groups': self.LIST_SECURITY_GROUP_RESP},
status_code=200,
)
ret = self.api.security_group_list()
self.assertEqual(self.LIST_SECURITY_GROUP_RESP, ret)
def test_security_group_set_options_id(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/1',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP},
status_code=200,
)
self.requests_mock.register_uri(
'PUT',
FAKE_URL + '/os-security-groups/1',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP},
status_code=200,
)
ret = self.api.security_group_set(
security_group='1',
description='desc2')
self.assertEqual(self.FAKE_SECURITY_GROUP_RESP, ret)
def test_security_group_set_options_name(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/sg2',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups',
json={'security_groups': self.LIST_SECURITY_GROUP_RESP},
status_code=200,
)
self.requests_mock.register_uri(
'PUT',
FAKE_URL + '/os-security-groups/2',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP_2},
status_code=200,
)
ret = self.api.security_group_set(
security_group='sg2',
description='desc2')
self.assertEqual(self.FAKE_SECURITY_GROUP_RESP_2, ret)
class TestSecurityGroupRule(TestComputeAPIv2):
FAKE_SECURITY_GROUP_RULE_RESP = {
'id': '1',
'name': 'sgr1',
'tenant_id': 'proj-1',
'ip_protocol': 'TCP',
'from_port': 1,
'to_port': 22,
'group': {},
# 'ip_range': ,
# 'cidr': ,
# 'parent_group_id': ,
}
def test_security_group_create_no_options(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-security-group-rules',
json={'security_group_rule': self.FAKE_SECURITY_GROUP_RULE_RESP},
status_code=200,
)
ret = self.api.security_group_rule_create(
security_group_id='1',
ip_protocol='tcp',
)
self.assertEqual(self.FAKE_SECURITY_GROUP_RULE_RESP, ret)
def test_security_group_create_options(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-security-group-rules',
json={'security_group_rule': self.FAKE_SECURITY_GROUP_RULE_RESP},
status_code=200,
)
ret = self.api.security_group_rule_create(
security_group_id='1',
ip_protocol='tcp',
from_port=22,
to_port=22,
remote_ip='1.2.3.4/24',
)
self.assertEqual(self.FAKE_SECURITY_GROUP_RULE_RESP, ret)
def test_security_group_create_port_errors(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-security-group-rules',
json={'security_group_rule': self.FAKE_SECURITY_GROUP_RULE_RESP},
status_code=200,
)
self.assertRaises(
compute.InvalidValue,
self.api.security_group_rule_create,
security_group_id='1',
ip_protocol='tcp',
from_port='',
to_port=22,
remote_ip='1.2.3.4/24',
)
self.assertRaises(
compute.InvalidValue,
self.api.security_group_rule_create,
security_group_id='1',
ip_protocol='tcp',
from_port=0,
to_port=[],
remote_ip='1.2.3.4/24',
)
def test_security_group_rule_delete(self):
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/os-security-group-rules/1',
status_code=202,
)
ret = self.api.security_group_rule_delete('1')
self.assertEqual(202, ret.status_code)
self.assertEqual("", ret.text)
| 30.873368
| 78
| 0.563153
| 2,674
| 23,649
| 4.654076
| 0.082274
| 0.087746
| 0.080996
| 0.119566
| 0.816472
| 0.768823
| 0.741583
| 0.717557
| 0.700362
| 0.667176
| 0
| 0.033092
| 0.321494
| 23,649
| 765
| 79
| 30.913725
| 0.74249
| 0.027866
| 0
| 0.560831
| 0
| 0
| 0.120482
| 0.021815
| 0
| 0
| 0
| 0
| 0.0727
| 1
| 0.063798
| false
| 0
| 0.007418
| 0
| 0.105341
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b6278451a9b13472bfe78a0e5f1ad4f7fdcf3406
| 115
|
py
|
Python
|
cwave/__init__.py
|
mmsutula/hwserver
|
0d9e43faa7cd2d069cf96a9b945ac1b891419dd4
|
[
"MIT"
] | null | null | null |
cwave/__init__.py
|
mmsutula/hwserver
|
0d9e43faa7cd2d069cf96a9b945ac1b891419dd4
|
[
"MIT"
] | null | null | null |
cwave/__init__.py
|
mmsutula/hwserver
|
0d9e43faa7cd2d069cf96a9b945ac1b891419dd4
|
[
"MIT"
] | null | null | null |
from cwave import *
init(__name__, \
cwave_addr = xxx.xxx.xxx.xxx,xxxxx)
# hardware specific IP and TCP port
| 23
| 40
| 0.713043
| 18
| 115
| 4.277778
| 0.777778
| 0.233766
| 0.233766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191304
| 115
| 5
| 41
| 23
| 0.827957
| 0.286957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.333333
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b6513b846b8988f71a88fe38f70e7fba034a970c
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/poetry/core/_vendor/packaging/__init__.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/poetry/core/_vendor/packaging/__init__.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/poetry/core/_vendor/packaging/__init__.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/51/c0/29/90c3f25867081a06161a2e652f9bb33d8a5e97268e3d15e181d5210a2c
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.479167
| 0
| 96
| 1
| 96
| 96
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b68c0420a6709b3384b508435b94fc1c6a71dcb0
| 204
|
py
|
Python
|
tensorlayerx/model/__init__.py
|
tensorlayer/TensorLayerX
|
4e3e6f13687309dda7787f0b86e35a62bb3adbad
|
[
"Apache-2.0"
] | 34
|
2021-12-03T08:19:23.000Z
|
2022-03-13T08:34:34.000Z
|
tensorlayerx/model/__init__.py
|
tensorlayer/TensorLayerX
|
4e3e6f13687309dda7787f0b86e35a62bb3adbad
|
[
"Apache-2.0"
] | null | null | null |
tensorlayerx/model/__init__.py
|
tensorlayer/TensorLayerX
|
4e3e6f13687309dda7787f0b86e35a62bb3adbad
|
[
"Apache-2.0"
] | 3
|
2021-12-28T16:57:20.000Z
|
2022-03-18T02:23:14.000Z
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
from .core import Model
from .core import WithLoss
from .core import WithGrad
from .core import TrainOneStep
from .core import TrainOneStepWithGradientClipping
| 22.666667
| 50
| 0.769608
| 26
| 204
| 6.038462
| 0.538462
| 0.254777
| 0.44586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005682
| 0.137255
| 204
| 8
| 51
| 25.5
| 0.886364
| 0.191176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b6a689a01a7888c87dc63b5e346123f2fd880208
| 109
|
py
|
Python
|
src/pico_code/pico/utime.py
|
romilly/pico-code
|
57bbc14e0a5c3e874162fcfb1fcd7cca3a838cce
|
[
"MIT"
] | 15
|
2021-02-04T02:38:23.000Z
|
2022-01-20T17:55:15.000Z
|
src/pico_code/pico/utime.py
|
romilly/pico-code
|
57bbc14e0a5c3e874162fcfb1fcd7cca3a838cce
|
[
"MIT"
] | 1
|
2021-05-06T10:09:51.000Z
|
2021-05-06T10:09:51.000Z
|
src/pico_code/pico/utime.py
|
romilly/pico-code
|
57bbc14e0a5c3e874162fcfb1fcd7cca3a838cce
|
[
"MIT"
] | 2
|
2021-02-04T20:09:01.000Z
|
2021-02-18T16:16:22.000Z
|
def sleep(seconds: float):
pass
def sleep_ms(millis: int):
pass
def sleep_us(micros: int):
pass
| 13.625
| 26
| 0.66055
| 17
| 109
| 4.117647
| 0.588235
| 0.342857
| 0.342857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.229358
| 109
| 8
| 27
| 13.625
| 0.833333
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
fcbe3c8b193a3d1bbfa6d6a45370bcdce5df3f93
| 103
|
py
|
Python
|
CodeWars/Python/7 kyu/Printer Errors/main.py
|
opastushkov/codewars-solutions
|
0132a24259a4e87f926048318332dcb4d94858ca
|
[
"MIT"
] | null | null | null |
CodeWars/Python/7 kyu/Printer Errors/main.py
|
opastushkov/codewars-solutions
|
0132a24259a4e87f926048318332dcb4d94858ca
|
[
"MIT"
] | null | null | null |
CodeWars/Python/7 kyu/Printer Errors/main.py
|
opastushkov/codewars-solutions
|
0132a24259a4e87f926048318332dcb4d94858ca
|
[
"MIT"
] | null | null | null |
import re
def printer_error(s):
return "{}/{}".format(len(s) - len(re.findall('[a-m]', s)), len(s))
| 34.333333
| 71
| 0.582524
| 18
| 103
| 3.277778
| 0.666667
| 0.135593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126214
| 103
| 3
| 71
| 34.333333
| 0.655556
| 0
| 0
| 0
| 0
| 0
| 0.096154
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
fcce6440d40b5b87a5e87a9ade608c1e783d407a
| 2,548
|
py
|
Python
|
epytope/Data/pssms/smmpmbec/mat/B_54_01_10.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/smmpmbec/mat/B_54_01_10.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/smmpmbec/mat/B_54_01_10.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
B_54_01_10 = {0: {'A': 0.5, 'C': 0.02, 'E': 0.265, 'D': 0.629, 'G': 0.086, 'F': -1.087, 'I': -0.048, 'H': -0.217, 'K': 0.459, 'M': -0.792, 'L': -0.453, 'N': 0.177, 'Q': 0.38, 'P': 0.725, 'S': 0.081, 'R': 0.393, 'T': 0.158, 'W': -0.194, 'V': -0.074, 'Y': -1.009}, 1: {'A': -0.085, 'C': -0.07, 'E': -0.011, 'D': -0.159, 'G': -0.181, 'F': 0.087, 'I': 0.196, 'H': 0.139, 'K': 0.191, 'M': 0.324, 'L': 0.407, 'N': -0.005, 'Q': 0.082, 'P': -0.779, 'S': -0.148, 'R': 0.063, 'T': -0.057, 'W': -0.059, 'V': -0.013, 'Y': 0.078}, 2: {'A': 0.091, 'C': 0.023, 'E': 0.148, 'D': 0.32, 'G': 0.194, 'F': -0.316, 'I': -0.296, 'H': -0.236, 'K': -0.062, 'M': -0.432, 'L': -0.199, 'N': 0.145, 'Q': 0.051, 'P': 0.576, 'S': 0.132, 'R': -0.053, 'T': 0.106, 'W': -0.024, 'V': -0.003, 'Y': -0.165}, 3: {'A': 0.163, 'C': -0.029, 'E': 0.096, 'D': 0.125, 'G': 0.094, 'F': -0.12, 'I': -0.235, 'H': -0.013, 'K': 0.208, 'M': -0.151, 'L': -0.184, 'N': -0.157, 'Q': 0.061, 'P': 0.09, 'S': 0.057, 'R': 0.28, 'T': 0.132, 'W': -0.054, 'V': -0.1, 'Y': -0.265}, 4: {'A': 0.069, 'C': -0.038, 'E': 0.224, 'D': 0.071, 'G': 0.274, 'F': -0.36, 'I': -0.357, 'H': 0.041, 'K': 0.047, 'M': -0.181, 'L': -0.203, 'N': 0.24, 'Q': 0.274, 'P': 0.123, 'S': 0.276, 'R': 0.112, 'T': 0.056, 'W': -0.279, 'V': -0.189, 'Y': -0.202}, 5: {'A': -0.048, 'C': 0.018, 'E': 0.062, 'D': 0.139, 'G': 0.026, 'F': -0.082, 'I': -0.12, 'H': 0.009, 'K': 0.014, 'M': -0.031, 'L': -0.13, 'N': 0.05, 'Q': 0.051, 'P': 0.039, 'S': 0.102, 'R': 0.015, 'T': 0.033, 'W': 0.026, 'V': -0.124, 'Y': -0.049}, 6: {'A': 0.121, 'C': -0.004, 'E': 0.024, 'D': 0.019, 'G': 0.012, 'F': -0.062, 'I': -0.077, 'H': -0.006, 'K': 0.025, 'M': -0.064, 'L': -0.064, 'N': -0.021, 'Q': 0.022, 'P': 0.104, 'S': 0.023, 'R': 0.061, 'T': 0.038, 'W': -0.077, 'V': -0.012, 'Y': -0.059}, 7: {'A': -0.305, 'C': -0.016, 'E': 0.043, 'D': 0.016, 'G': 0.095, 'F': -0.029, 'I': -0.037, 'H': 0.008, 'K': 0.005, 'M': -0.031, 'L': 0.103, 'N': 0.185, 'Q': 0.197, 'P': 0.064, 'S': 0.016, 'R': -0.085, 'T': -0.07, 'W': 0.015, 'V': -0.032, 'Y': -0.143}, 8: {'A': -0.084, 'C': -0.012, 'E': -0.02, 'D': 0.01, 'G': -0.048, 'F': 0.02, 'I': -0.077, 'H': 0.065, 'K': 0.051, 'M': 0.011, 'L': 0.017, 'N': 0.033, 'Q': 0.01, 'P': -0.012, 'S': -0.008, 'R': 0.076, 'T': -0.011, 'W': 0.019, 'V': -0.052, 'Y': 0.013}, 9: {'A': -1.668, 'C': -0.116, 'E': 0.158, 'D': 0.098, 'G': -0.216, 'F': 0.116, 'I': -0.208, 'H': 0.589, 'K': 0.22, 'M': 0.351, 'L': 0.32, 'N': 0.413, 'Q': 0.52, 'P': -0.481, 'S': -0.12, 'R': 0.569, 'T': -0.28, 'W': 0.426, 'V': -1.022, 'Y': 0.334}, -1: {'con': 4.3502}}
| 2,548
| 2,548
| 0.393642
| 618
| 2,548
| 1.618123
| 0.286408
| 0.02
| 0.01
| 0.012
| 0.042
| 0
| 0
| 0
| 0
| 0
| 0
| 0.373008
| 0.16248
| 2,548
| 1
| 2,548
| 2,548
| 0.095595
| 0
| 0
| 0
| 0
| 0
| 0.079639
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fcf69261c25cd0a7b02c5a1957e1a01253377630
| 138
|
py
|
Python
|
FrankNN/__init__.py
|
fpreiswerk/FrankNN
|
66441195acdd6af237f1d780975440477019dbbf
|
[
"MIT"
] | null | null | null |
FrankNN/__init__.py
|
fpreiswerk/FrankNN
|
66441195acdd6af237f1d780975440477019dbbf
|
[
"MIT"
] | null | null | null |
FrankNN/__init__.py
|
fpreiswerk/FrankNN
|
66441195acdd6af237f1d780975440477019dbbf
|
[
"MIT"
] | null | null | null |
from .layers import *
from .activations import *
from .losses import *
from .model import *
from .util import *
from .optimizers import *
| 19.714286
| 26
| 0.73913
| 18
| 138
| 5.666667
| 0.444444
| 0.490196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 138
| 6
| 27
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1e38abe12e343351a6eec1a68d88738834022221
| 183
|
py
|
Python
|
lldb/test/API/lang/objc/objc-runtime-ivars/TestRuntimeIvars.py
|
mkinsner/llvm
|
589d48844edb12cd357b3024248b93d64b6760bf
|
[
"Apache-2.0"
] | 2,338
|
2018-06-19T17:34:51.000Z
|
2022-03-31T11:00:37.000Z
|
lldb/test/API/lang/objc/objc-runtime-ivars/TestRuntimeIvars.py
|
mkinsner/llvm
|
589d48844edb12cd357b3024248b93d64b6760bf
|
[
"Apache-2.0"
] | 3,740
|
2019-01-23T15:36:48.000Z
|
2022-03-31T22:01:13.000Z
|
lldb/test/API/lang/objc/objc-runtime-ivars/TestRuntimeIvars.py
|
mkinsner/llvm
|
589d48844edb12cd357b3024248b93d64b6760bf
|
[
"Apache-2.0"
] | 500
|
2019-01-23T07:49:22.000Z
|
2022-03-30T02:59:37.000Z
|
from lldbsuite.test import lldbinline
from lldbsuite.test import decorators
lldbinline.MakeInlineTest(
__file__, globals(), [
decorators.skipIf(archs=["i386", "i686"])])
| 26.142857
| 51
| 0.73224
| 19
| 183
| 6.842105
| 0.684211
| 0.2
| 0.261538
| 0.353846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 0.147541
| 183
| 6
| 52
| 30.5
| 0.794872
| 0
| 0
| 0
| 0
| 0
| 0.043716
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
1e3b591d72a10e301ec3685da670d12e3b678472
| 42
|
py
|
Python
|
mlfromscratch/deep_learning/__init__.py
|
leeh8911/ML-From-Scratch
|
9b9c94e2f8fbbefa60d3481c23180f1852fae506
|
[
"MIT"
] | 22,453
|
2017-02-17T08:19:27.000Z
|
2022-03-31T17:45:01.000Z
|
mlfromscratch/deep_learning/__init__.py
|
oceanofinfinity/ML-From-Scratch
|
a2806c6732eee8d27762edd6d864e0c179d8e9e8
|
[
"MIT"
] | 75
|
2017-02-25T23:55:40.000Z
|
2022-03-28T04:15:08.000Z
|
mlfromscratch/deep_learning/__init__.py
|
oceanofinfinity/ML-From-Scratch
|
a2806c6732eee8d27762edd6d864e0c179d8e9e8
|
[
"MIT"
] | 4,496
|
2017-02-25T16:52:39.000Z
|
2022-03-31T06:42:54.000Z
|
from .neural_network import NeuralNetwork
| 21
| 41
| 0.880952
| 5
| 42
| 7.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1e3c2c2a78fc37cf25abc70e37bab84b5b7c1c63
| 123
|
py
|
Python
|
tsformer/models/transformer_xl.py
|
jianzhnie/TsFormer
|
47e362f02445ba00d5ab8db206667767e72faca7
|
[
"Apache-2.0"
] | null | null | null |
tsformer/models/transformer_xl.py
|
jianzhnie/TsFormer
|
47e362f02445ba00d5ab8db206667767e72faca7
|
[
"Apache-2.0"
] | null | null | null |
tsformer/models/transformer_xl.py
|
jianzhnie/TsFormer
|
47e362f02445ba00d5ab8db206667767e72faca7
|
[
"Apache-2.0"
] | 1
|
2022-01-10T08:17:55.000Z
|
2022-01-10T08:17:55.000Z
|
'''
Author: jianzhnie
Date: 2022-01-24 11:36:20
LastEditTime: 2022-01-24 11:36:21
LastEditors: jianzhnie
Description:
'''
| 13.666667
| 33
| 0.731707
| 19
| 123
| 4.736842
| 0.684211
| 0.133333
| 0.177778
| 0.222222
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0.256881
| 0.113821
| 123
| 8
| 34
| 15.375
| 0.568807
| 0.918699
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1e9559f7ecab4ea412180d8737dbcc3d4e05bc4d
| 101
|
py
|
Python
|
protobuf_gen/__init__.py
|
danielorbach/python-protobuf-gen
|
10b6d523d7fb06a596bd28e8eb74bc31ddd2a345
|
[
"Apache-2.0"
] | 10
|
2018-05-30T03:08:40.000Z
|
2020-05-03T06:29:21.000Z
|
protobuf_gen/__init__.py
|
danielorbach/python-protobuf-gen
|
10b6d523d7fb06a596bd28e8eb74bc31ddd2a345
|
[
"Apache-2.0"
] | 3
|
2018-03-02T22:38:11.000Z
|
2020-01-22T19:17:08.000Z
|
protobuf_gen/__init__.py
|
danielorbach/python-protobuf-gen
|
10b6d523d7fb06a596bd28e8eb74bc31ddd2a345
|
[
"Apache-2.0"
] | 5
|
2019-08-09T08:24:34.000Z
|
2021-01-27T20:38:57.000Z
|
from protobuf_gen.remap import remap
from protobuf_gen.wrap import wrap
__all__ = ['remap', 'wrap']
| 20.2
| 36
| 0.772277
| 15
| 101
| 4.8
| 0.466667
| 0.333333
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128713
| 101
| 4
| 37
| 25.25
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0.089109
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1ec075d36f8b198a6b81c697f82d4f57fe1ca6be
| 30
|
py
|
Python
|
plugins/custom/embed_customjs/__init__.py
|
mizunashi-mana/blog
|
96143f9d31a3b379a91b3dadeb865299158e25e3
|
[
"Apache-2.0"
] | 4
|
2020-02-01T16:27:39.000Z
|
2021-05-31T04:26:34.000Z
|
plugins/custom/embed_customjs/__init__.py
|
mizunashi-mana/blog
|
96143f9d31a3b379a91b3dadeb865299158e25e3
|
[
"Apache-2.0"
] | 12
|
2017-09-16T11:02:09.000Z
|
2022-01-30T11:29:49.000Z
|
plugins/custom/embed_customjs/__init__.py
|
mizunashi-mana/blog
|
96143f9d31a3b379a91b3dadeb865299158e25e3
|
[
"Apache-2.0"
] | 2
|
2017-09-10T02:20:50.000Z
|
2017-09-16T04:36:44.000Z
|
from .embed_customjs import *
| 15
| 29
| 0.8
| 4
| 30
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1ecf6cdabb3d1f35df3401794f608e4a94641c28
| 53
|
py
|
Python
|
day-5/boarding_pass/__init__.py
|
DallogFheir/aoc-2020
|
089bd45d5fbdf98b9729a23f3a142ca3b792567c
|
[
"MIT"
] | null | null | null |
day-5/boarding_pass/__init__.py
|
DallogFheir/aoc-2020
|
089bd45d5fbdf98b9729a23f3a142ca3b792567c
|
[
"MIT"
] | null | null | null |
day-5/boarding_pass/__init__.py
|
DallogFheir/aoc-2020
|
089bd45d5fbdf98b9729a23f3a142ca3b792567c
|
[
"MIT"
] | null | null | null |
from boarding_pass.boarding_pass import BoardingPass
| 26.5
| 52
| 0.90566
| 7
| 53
| 6.571429
| 0.714286
| 0.521739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075472
| 53
| 1
| 53
| 53
| 0.938776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
1edf3cccfedc2eb0e95268f309e00c535b7eac49
| 615
|
py
|
Python
|
lib/common/color.py
|
WhySoGeeky/DroidPot
|
7c3d9e975dae3835e2ccf42c425d65b26466e82a
|
[
"MIT"
] | 6
|
2016-02-18T10:00:34.000Z
|
2021-05-27T09:41:35.000Z
|
lib/common/color.py
|
WhySoGeeky/DroidPot
|
7c3d9e975dae3835e2ccf42c425d65b26466e82a
|
[
"MIT"
] | 6
|
2018-03-30T10:06:12.000Z
|
2021-06-10T17:59:44.000Z
|
lib/common/color.py
|
WhySoGeeky/DroidPot
|
7c3d9e975dae3835e2ccf42c425d65b26466e82a
|
[
"MIT"
] | null | null | null |
__author__ = 'RongShun'
import os
import sys
def color(text, color_code):
if sys.platform == "win32" and os.getenv("TERM") != "xterm":
return text
return "\x1b[%dm%s\x1b[0m" % (color_code, text)
def green(text):
return color(text, 32)
def yellow(text):
return color(text, 33)
def white(text):
return color(text, 37)
def bold(text):
return color(text, 1)
def black(text):
return color(text, 30)
def red(text):
return color(text, 31)
def blue(text):
return color(text, 34)
def magenta(text):
return color(text, 35)
def cyan(text):
return color(text, 36)
| 16.621622
| 64
| 0.64065
| 94
| 615
| 4.12766
| 0.414894
| 0.231959
| 0.347938
| 0.440722
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045643
| 0.21626
| 615
| 36
| 65
| 17.083333
| 0.759336
| 0
| 0
| 0
| 0
| 0
| 0.063415
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.08
| 0.36
| 0.92
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
94a8b83a75def03fd1a7ab4fcce57ce45ad131cf
| 104
|
py
|
Python
|
bluebottle/payments_mock/serializers.py
|
maykinmedia/bluebottle
|
355d4729662b5e9a03398efb4fe882e0f8cfa28d
|
[
"BSD-3-Clause"
] | null | null | null |
bluebottle/payments_mock/serializers.py
|
maykinmedia/bluebottle
|
355d4729662b5e9a03398efb4fe882e0f8cfa28d
|
[
"BSD-3-Clause"
] | null | null | null |
bluebottle/payments_mock/serializers.py
|
maykinmedia/bluebottle
|
355d4729662b5e9a03398efb4fe882e0f8cfa28d
|
[
"BSD-3-Clause"
] | null | null | null |
from rest_framework import serializers
class PaymentMockSerializer(serializers.Serializer):
pass
| 14.857143
| 52
| 0.826923
| 10
| 104
| 8.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134615
| 104
| 6
| 53
| 17.333333
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
94ff738f4b48af71e34e90e4853ee0118026d049
| 1,428
|
py
|
Python
|
cronjob/utils/user_agents.py
|
fucangyu/SimpleSpider
|
a2fd9289f44696c5c06ece9cec8dc5315300eecf
|
[
"MIT"
] | 4
|
2019-01-13T06:08:48.000Z
|
2019-01-14T07:12:37.000Z
|
cronjob/utils/user_agents.py
|
fucangyu/cronjob
|
9a27b0a430eab1f9e52ff51700217a7dac15c846
|
[
"MIT"
] | 2
|
2019-01-13T04:10:58.000Z
|
2019-01-13T07:08:53.000Z
|
cronjob/utils/user_agents.py
|
fucangyu/cronjob
|
9a27b0a430eab1f9e52ff51700217a7dac15c846
|
[
"MIT"
] | 2
|
2019-01-25T15:43:15.000Z
|
2019-06-15T09:42:15.000Z
|
import random
# flake8: noqa
# copy from https://github.com/fengzhizi715/user-agent-list
DESKTOP_AGENTS = [
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36'
]
def replace_user_agent(kwargs):
headers = kwargs.pop('headers', {})
headers['user-agent'] = random.choice(DESKTOP_AGENTS)
kwargs['headers'] = headers
| 59.5
| 127
| 0.703081
| 256
| 1,428
| 3.886719
| 0.238281
| 0.100503
| 0.090452
| 0.211055
| 0.745729
| 0.745729
| 0.725628
| 0.668342
| 0.639196
| 0.639196
| 0
| 0.198868
| 0.133754
| 1,428
| 23
| 128
| 62.086957
| 0.605497
| 0.04902
| 0
| 0
| 0
| 0.588235
| 0.809594
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a22015b1a72c09ccde4b057d185c69c76dbb1f24
| 3,080
|
py
|
Python
|
tests/test_fortiwlc.py
|
ArnesSI/fortiwlc_exporter
|
2369070588c4de84a2310a2de46dd423e6e7dcac
|
[
"MIT"
] | 2
|
2020-03-22T18:01:57.000Z
|
2020-03-23T20:06:16.000Z
|
tests/test_fortiwlc.py
|
ArnesSI/fortiwlc_exporter
|
2369070588c4de84a2310a2de46dd423e6e7dcac
|
[
"MIT"
] | null | null | null |
tests/test_fortiwlc.py
|
ArnesSI/fortiwlc_exporter
|
2369070588c4de84a2310a2de46dd423e6e7dcac
|
[
"MIT"
] | null | null | null |
import json
import responses
import unittest
from fortiwlc_exporter.fortiwlc import FortiWLC
class TestFortiWLC(unittest.TestCase):
@responses.activate
def test_managed_ap_ok(self):
""" Test successfull API call for managed APs """
url = 'https://wlc.ansoext.arnes.si/api/v2/monitor/wifi/managed_ap/select/?vdom=root'
response_data = json.load(
open('./tests/data/one_client/wlc.ansoext.arnes.si-managed_ap.json')
)
responses.add(responses.GET, url, json=response_data, status=200)
wlc = FortiWLC('wlc.ansoext.arnes.si', '123')
wlc_data = wlc.get_managed_ap()['results']
self.assertEqual(len(responses.calls), 1)
self.assertEqual(wlc.name, 'wlc.ansoext.arnes.si')
self.assertEqual(wlc.api_key, '123')
self.assertEqual(wlc_data, response_data['results'])
@responses.activate
def test_vap_group_ok(self):
""" Test successfull API call for managed APs """
url = 'https://wlc.ansoext.arnes.si/api/v2/cmdb/wireless-controller/vap-group/?vdom=root'
response_data = json.load(
open('./tests/data/one_client/wlc.ansoext.arnes.si-vap_group.json')
)
responses.add(responses.GET, url, json=response_data, status=200)
wlc = FortiWLC('wlc.ansoext.arnes.si', '123')
wlc_data = wlc.get_vap_group()['results']
self.assertEqual(len(responses.calls), 1)
self.assertEqual(wlc.name, 'wlc.ansoext.arnes.si')
self.assertEqual(wlc.api_key, '123')
self.assertEqual(wlc_data, response_data['results'])
@responses.activate
def test_clients_none_ok(self):
""" Test successfull API call for clients """
url = (
'https://wlc.ansoext.arnes.si/api/v2/monitor/wifi/client/select/?vdom=root'
)
response_data = json.load(
open('./tests/data/no_clients/wlc.ansoext.arnes.si-clients.json')
)
responses.add(responses.GET, url, json=response_data, status=200)
wlc = FortiWLC('wlc.ansoext.arnes.si', '123')
wlc_data = wlc.get_clients()['results']
self.assertEqual(len(responses.calls), 1)
self.assertEqual(wlc.name, 'wlc.ansoext.arnes.si')
self.assertEqual(wlc.api_key, '123')
self.assertEqual(wlc_data, response_data['results'])
@responses.activate
def test_clients_1_ok(self):
""" Test successfull API call for clients """
url = (
'https://wlc.ansoext.arnes.si/api/v2/monitor/wifi/client/select/?vdom=root'
)
response_data = json.load(
open('./tests/data/one_client/wlc.ansoext.arnes.si-clients.json')
)
responses.add(responses.GET, url, json=response_data, status=200)
wlc = FortiWLC('wlc.ansoext.arnes.si', '123')
wlc_data = wlc.get_clients()['results']
self.assertEqual(len(responses.calls), 1)
self.assertEqual(wlc.name, 'wlc.ansoext.arnes.si')
self.assertEqual(wlc.api_key, '123')
self.assertEqual(wlc_data, response_data['results'])
| 42.777778
| 97
| 0.647403
| 396
| 3,080
| 4.909091
| 0.15404
| 0.082305
| 0.123457
| 0.139918
| 0.875514
| 0.875514
| 0.875514
| 0.875514
| 0.875514
| 0.875514
| 0
| 0.018503
| 0.21039
| 3,080
| 71
| 98
| 43.380282
| 0.780839
| 0.052597
| 0
| 0.622951
| 0
| 0.065574
| 0.268672
| 0.080567
| 0
| 0
| 0
| 0
| 0.262295
| 1
| 0.065574
| false
| 0
| 0.065574
| 0
| 0.147541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bfaf30593a73968175d64d4145c53456d7a9f85b
| 3,427
|
py
|
Python
|
tests/auth/test_jwt.py
|
perrystallings/parrot-api-core
|
c6b5464429da00173c80ad17d9faf248cadcc33d
|
[
"MIT"
] | null | null | null |
tests/auth/test_jwt.py
|
perrystallings/parrot-api-core
|
c6b5464429da00173c80ad17d9faf248cadcc33d
|
[
"MIT"
] | null | null | null |
tests/auth/test_jwt.py
|
perrystallings/parrot-api-core
|
c6b5464429da00173c80ad17d9faf248cadcc33d
|
[
"MIT"
] | null | null | null |
import pytest
from parrot_api.core.common import generate_random_id
@pytest.fixture()
def claims(audience, issuer):
from parrot_api.core.auth.jwt import format_access_token
from parrot_api.core.common import generate_random_id
return format_access_token(
user=generate_random_id(), machine_token=True, audiences=[audience], issuer=issuer, expiration_seconds=60 * 60,
scopes=[generate_random_id() for i in range(3)]
)
@pytest.fixture()
def future_token(claims, audience, issuer, signing_key):
from datetime import datetime, timedelta
from parrot_api.core.auth.jwt import sign_token
from copy import deepcopy
claims = deepcopy(claims)
claims['iat'] = int((datetime.fromtimestamp(claims['iat']) + timedelta(days=1)).timestamp())
claims['exp'] = int((datetime.fromtimestamp(claims['exp']) + timedelta(days=1)).timestamp())
token = sign_token(payload=claims, signing_key=signing_key)
return token
@pytest.fixture()
def expired_token(claims, audience, issuer, signing_key):
from datetime import datetime, timedelta
from parrot_api.core.auth.jwt import sign_token
from copy import deepcopy
claims = deepcopy(claims)
claims['iat'] = int((datetime.fromtimestamp(claims['iat']) - timedelta(days=1)).timestamp())
claims['exp'] = int((datetime.fromtimestamp(claims['exp']) - timedelta(days=1)).timestamp())
token = sign_token(payload=claims, signing_key=signing_key)
return token
@pytest.fixture()
def signed_token(claims, audience, issuer, signing_key):
from parrot_api.core.auth.jwt import sign_token
token = sign_token(payload=claims, signing_key=signing_key)
return token
def test_decode_token(claims, public_keys, signed_token, audiences, issuers):
from parrot_api.core.auth.jwt import decode_token
decoded_token = decode_token(token=signed_token, audiences=audiences, issuers=issuers,
auth_keys=public_keys['keys'])
assert claims == decoded_token
def test_invalid_issuer(signed_token, public_keys, audiences):
from parrot_api.core.auth.jwt import decode_token
from jose.exceptions import JWTError
with pytest.raises(JWTError):
decode_token(
token=signed_token, audiences=audiences, issuers=[generate_random_id()],
auth_keys=public_keys['keys']
)
def test_invalid_audience(signed_token, public_keys, issuers):
from parrot_api.core.auth.jwt import decode_token
from jose.exceptions import JWTError
with pytest.raises(JWTError):
decode_token(token=signed_token, audiences=[generate_random_id()], issuers=issuers,
auth_keys=public_keys['keys'])
def test_expired_token(expired_token, public_keys, audiences, issuers):
from parrot_api.core.auth.jwt import decode_token
from jose.exceptions import ExpiredSignatureError
with pytest.raises(ExpiredSignatureError):
decode_token(
token=expired_token, audiences=audiences, issuers=issuers,
auth_keys=public_keys['keys']
)
def test_future_token(future_token, public_keys, audiences, issuers):
from parrot_api.core.auth.jwt import decode_token
from jose.exceptions import JWTError
with pytest.raises(JWTError):
decode_token(
token=future_token, audiences=audiences, issuers=issuers,
auth_keys=public_keys['keys']
)
| 38.077778
| 119
| 0.725124
| 432
| 3,427
| 5.534722
| 0.152778
| 0.046006
| 0.059808
| 0.07821
| 0.763697
| 0.763697
| 0.763697
| 0.724383
| 0.70849
| 0.633626
| 0
| 0.00319
| 0.176831
| 3,427
| 89
| 120
| 38.505618
| 0.844381
| 0
| 0
| 0.571429
| 1
| 0
| 0.012839
| 0
| 0
| 0
| 0
| 0
| 0.014286
| 1
| 0.128571
| false
| 0
| 0.285714
| 0
| 0.471429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bfb8d0bfe1bf96a4bd671642ac246b37c5ff44b0
| 114
|
py
|
Python
|
mf2web/__init__.py
|
jlarsen-usgs/mf2web
|
57e2c65ee84d678245ca7853feca981950a2f662
|
[
"BSD-3-Clause"
] | 1
|
2019-03-28T02:22:56.000Z
|
2019-03-28T02:22:56.000Z
|
mf2web/__init__.py
|
jlarsen-usgs/mf2web
|
57e2c65ee84d678245ca7853feca981950a2f662
|
[
"BSD-3-Clause"
] | null | null | null |
mf2web/__init__.py
|
jlarsen-usgs/mf2web
|
57e2c65ee84d678245ca7853feca981950a2f662
|
[
"BSD-3-Clause"
] | null | null | null |
from .mf2web import GwWebFlow
from . import seawat
from . import mt3d
from . import utils
from . import mf88
| 19
| 30
| 0.736842
| 16
| 114
| 5.25
| 0.5
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044944
| 0.219298
| 114
| 5
| 31
| 22.8
| 0.898876
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bfd8b0fb27e109e6860e98fa6dfa7ac285c38b4f
| 31
|
py
|
Python
|
libsaas/services/basecamp/__init__.py
|
MidtownFellowship/libsaas
|
541bb731b996b08ede1d91a235cb82895765c38a
|
[
"MIT"
] | 155
|
2015-01-27T15:17:59.000Z
|
2022-02-20T00:14:08.000Z
|
libsaas/services/basecamp/__init__.py
|
MidtownFellowship/libsaas
|
541bb731b996b08ede1d91a235cb82895765c38a
|
[
"MIT"
] | 14
|
2015-01-12T08:22:37.000Z
|
2021-06-16T19:49:31.000Z
|
libsaas/services/basecamp/__init__.py
|
MidtownFellowship/libsaas
|
541bb731b996b08ede1d91a235cb82895765c38a
|
[
"MIT"
] | 43
|
2015-01-28T22:41:45.000Z
|
2021-09-21T04:44:26.000Z
|
from .service import Basecamp
| 10.333333
| 29
| 0.806452
| 4
| 31
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 31
| 2
| 30
| 15.5
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
44aa6b54ac60f2accf9de15d2c5cd23839005de5
| 14,599
|
py
|
Python
|
models/adaptive_manfold_learning_knn.py
|
ZJUCAGD/GTS-CNN
|
a329f314b795f0dea0f46db623ac955a47619e7d
|
[
"MIT"
] | null | null | null |
models/adaptive_manfold_learning_knn.py
|
ZJUCAGD/GTS-CNN
|
a329f314b795f0dea0f46db623ac955a47619e7d
|
[
"MIT"
] | null | null | null |
models/adaptive_manfold_learning_knn.py
|
ZJUCAGD/GTS-CNN
|
a329f314b795f0dea0f46db623ac955a47619e7d
|
[
"MIT"
] | null | null | null |
import os
import sys
import numpy as np
import scipy
import scipy.sparse as sp
import matplotlib.pyplot as plt
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import PCA
import multiprocessing
from multiprocessing.dummy import Pool as ThreadPool
# from multiprocessing import Pool as ThreadPool
from time import clock, sleep
import math
def timeit(func):
def wrapper(*args, **kwargs):
starting_time = clock()
result = func(*args, **kwargs)
ending_time = clock()
print('Duration: {}'.format(ending_time - starting_time))
return result
return wrapper
@timeit
def hello():
hello_list = [i for i in range(3)]
print(hello_list)
def process(i):
a = math.sqrt(i * i + 1)
result = [i]
return result
pool = ThreadPool(4)
results = pool.map(process, hello_list)
pool.close()
pool.join()
print(results)
@timeit
def adaptive_knn(filename=None, savename=None, d=2, k_max=16, k_min=None):
if (filename == None):
print("need a file name")
return
modelnet10 = np.load(filename, encoding='latin1', allow_pickle=True)
modelnet10_data = modelnet10.tolist()['data'] #(3991, 1024, 3)
# modelnet10_label = modelnet10.tolist()['label'] #(3991,)
# modelnet10_seg = modelnet10.tolist()['seg_label'] #(n_model, 2048, C)
del modelnet10
print("the dataset shape is {}".format(modelnet10_data.shape))
n_model, n_point, _ = modelnet10_data.shape
print("k_max={}".format(k_max))
start = n_model // 4 * 0
end = n_model
print('process start={},end={}'.format(start, end))
# modelnet10_data=modelnet10_data[start:end]
result_knn = []
# d=2
# k_max=16
if k_min is None: # 6
k_min = d + 4
yita = 0.32
print('k_max={}'.format(k_max))
print('k_min={}'.format(k_min))
print('yita={}'.format(yita))
for model_i in range(start, end):
if (model_i % 100 == 0):
print(model_i)
X = modelnet10_data[model_i] # i-th model, shape=(1024,3)
nbrs = NearestNeighbors(n_neighbors=k_max + 1,
algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(X)
indx = indices[:, 1:] # nearest 16 neighbors
n, m = X.shape
# STEP 1
rho = [[] for i in range(n)]
result_indx = [[] for i in range(n)]
for i in range(n):
flag = 0
tmp = indx[i]
X_k = np.transpose(X[tmp]) # (nfeatures, npoints)
for j in range(k_max, k_min, -1):
x_i = np.mean(X_k, axis=1).reshape(-1, 1) # (nfeatures, 1)
X_i = X_k - x_i
# compute singular value d=2 (8), k_min=d+4 , yita=0.32
u, sigma, v = np.linalg.svd(X_i, full_matrices=False)
sigma = sigma**2
r_i = np.sqrt(np.sum(sigma[2:]) / np.sum(sigma[:2]))
if r_i < yita:
result_indx[i] = indx[i][:j]
rho[i].append(r_i)
flag = 1
break
rho[i].append(r_i)
X_k = X_k[:, :-1]
if flag == 0:
max_k = np.argmin(rho[i])
result_indx[i] = indx[i][:k_max - max_k]
# STEP 2
for i in range(n):
X1 = X[result_indx[i]].copy() # the neighborhood of i-th point
x2_indx = indx[i][len(result_indx[i]):]
X2 = X[x2_indx] # (N_SMAPLE, N_FEATURE)
if X2.shape[0] == 0:
continue
pca = PCA(n_components=2)
pca.fit(X1)
# pca_score = pca.explained_variance_ratio_
V = pca.components_
# pca_X1=pca.fit_transform(X1)
mypca_X2 = np.dot(X2 - pca.mean_, V.T) # (N_SAMPLE, N_FEATURE')
recover_X2 = pca.inverse_transform(mypca_X2)
do_select = np.linalg.norm(
X2 - recover_X2,
axis=1) <= yita * np.linalg.norm(mypca_X2, axis=1)
NE = [
x2_indx[idx] for idx, ii in enumerate(do_select) if ii == True
] # Neighborhood Expansion
if NE != []:
result_indx[i] = np.append(result_indx[i], NE)
# print(np.linalg.norm(X2-recover_X2,axis=1))
# print(yita*np.linalg.norm(mypca_X2,axis=1))
# print(do_select)
# print(np.linalg.norm(np.dot(X1-pca.mean_,V.T)-pca_X1))
# print(np.linalg.norm(pca.inverse_transform(pca_XX)-XX))
result_knn.append(result_indx)
if (len(result_knn) != end -
start): #n_moddel(list), n_points(list), n_neiberhood(np.array)
raise Exception("len of result_knn!=n_model")
# convert list to sparse matrix
for i in range(end - start):
data = result_knn[i]
row_ = []
col_ = []
for row, cols in enumerate(data):
row_ += [row for _ in cols]
col_ += list(cols)
sp_data = sp.csr_matrix(
(np.ones(len(row_), dtype='int32'), (row_, col_)),
shape=(n_point, n_point))
result_knn[i] = sp_data
# savename='./modelnet/data/modelNet40_train_16nn_GM_adaptive_knn_sparse.npy'
if (savename == None):
# e.g.
# filename = './modelnet/data/modelNet10_train_16nn_GM.npy'
# savename = ./modelnet/data/modelNet10_train_16nn_GM_adaptive_knn.npy
savename = "".join(
filename.split('.npy')) + "_adaptive_knn_sparse_4.npy"
# shapenet 50
np.save(
savename,
np.array({ #'data': modelnet10_data,
'graph': result_knn
#'seg_label': modelnet10_seg,
#'label': modelnet10_label
})) #'label_dict':test_modelnet10_label_dict,
# np.save(savename, np.array(result_knn))
print("saved to {}".format(savename))
def do_work(result_knn, modelnet10_data, start, stop, k_max, d):
result_knn_ = []
for model_i in range(start, stop):
if (model_i % 10 == 0):
print(model_i)
X = modelnet10_data[model_i] # i-th model, shape=(1024,3)
nbrs = NearestNeighbors(n_neighbors=k_max + 1,
algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(X)
indx = indices[:, 1:] # nearest 16 nerighbors
# d=2
# k_max=16
k_min = d + 4 # 6
n, m = X.shape
# STEP 1
rho = [[] for i in range(n)]
result_indx = [[] for i in range(n)]
yita = 0.32
for i in range(n):
flag = 0
tmp = indx[i]
X_k = np.transpose(X[tmp]) #(nfeatures, npoints)
for j in range(k_max, k_min, -1):
x_i = np.mean(X_k, axis=1).reshape(-1, 1) #(nfeatures, 1)
X_i = X_k - x_i
# compute singular value, d=2 (8), k_min=d+4 , yita=0.32
u, sigma, v = np.linalg.svd(X_i, full_matrices=False)
sigma = sigma**2
r_i = np.sqrt(np.sum(sigma[2:]) / np.sum(sigma[:2]))
if r_i < yita:
result_indx[i] = indx[i][:j]
rho[i].append(r_i)
flag = 1
break
rho[i].append(r_i)
X_k = X_k[:, :-1]
if flag == 0:
max_k = np.argmin(rho[i])
result_indx[i] = indx[i][:k_max - max_k]
# STEP 2
for i in range(n):
X1 = X[result_indx[i]].copy() # neighborhood of i-th point
x2_indx = indx[i][len(result_indx[i]):]
X2 = X[x2_indx] #(N_SMAPLE, N_FEATURE)
if X2.shape[0] == 0:
continue
pca = PCA(n_components=2)
pca.fit(X1)
# pca_score = pca.explained_variance_ratio_
V = pca.components_
# pca_X1=pca.fit_transform(X1)
mypca_X2 = np.dot(X2 - pca.mean_, V.T) #(N_SAMPLE, N_FEATURE')
recover_X2 = pca.inverse_transform(mypca_X2)
do_select = np.linalg.norm(
X2 - recover_X2,
axis=1) <= yita * np.linalg.norm(mypca_X2, axis=1)
NE = [
x2_indx[idx] for idx, ii in enumerate(do_select) if ii == True
] # Neighborhood Expansion
# print("i={}, orig ks={}, NE={}".format(i,X1.shape[0],NE))
# result_indx[i]+=NE
if NE != []:
result_indx[i] = np.append(result_indx[i], NE)
result_knn_.append(result_indx)
result_knn[start:stop] = result_knn_
@timeit
def multi_threads_adaptive_knn(filename=None, savename=None, d=2, k_max=16):
if (filename == None):
print("need a file name")
return
modelnet10 = np.load(filename, encoding='latin1')
modelnet10_data = modelnet10.tolist()['data'] #(3991, 1024, 3)
modelnet10_label = modelnet10.tolist()['label'] #(3991,)
print("the dataset shape is {}".format(modelnet10_data.shape))
n_model, n_point, _ = modelnet10_data.shape
print("k_max={}".format(k_max))
# n_model=40
result_knn = []
for model_i in range(n_model):
if (model_i % 100 == 0):
print(model_i)
X = modelnet10_data[model_i] # model_i-th model shape=(1024,3)
nbrs = NearestNeighbors(n_neighbors=k_max + 1,
algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(X)
indx = indices[:, 1:] # nearset 16 neighbors
# d=2
# k_max=16
k_min = d + 4 #6
n, m = X.shape
# STEP 1
rho = [[] for i in range(n)]
result_indx = [[] for i in range(n)]
yita = 0.32
for i in range(n):
flag = 0
tmp = indx[i]
X_k = np.transpose(X[tmp]) #(nfeatures, npoints)
for j in range(k_max, k_min, -1):
x_i = np.mean(X_k, axis=1).reshape(-1, 1) #(nfeatures, 1)
X_i = X_k - x_i
#计算奇异值 d=2 (8), k_min=d+4 , yita=0.32
u, sigma, v = np.linalg.svd(X_i, full_matrices=False)
sigma = sigma**2
r_i = np.sqrt(np.sum(sigma[2:]) / np.sum(sigma[:2]))
if r_i < yita:
result_indx[i] = indx[i][:j]
rho[i].append(r_i)
flag = 1
break
rho[i].append(r_i)
X_k = X_k[:, :-1]
if flag == 0:
max_k = np.argmin(rho[i])
result_indx[i] = indx[i][:k_max - max_k]
# STEP 2
for i in range(n):
X1 = X[result_indx[i]].copy() #第i个点的neighborhood
x2_indx = indx[i][len(result_indx[i]):]
X2 = X[x2_indx] #(N_SMAPLE, N_FEATURE)
if X2.shape[0] == 0:
continue
pca = PCA(n_components=2)
pca.fit(X1)
# pca_score = pca.explained_variance_ratio_
V = pca.components_
# pca_X1=pca.fit_transform(X1)
mypca_X2 = np.dot(X2 - pca.mean_, V.T) #(N_SAMPLE, N_FEATURE')
recover_X2 = pca.inverse_transform(mypca_X2)
do_select = np.linalg.norm(
X2 - recover_X2,
axis=1) <= yita * np.linalg.norm(mypca_X2, axis=1)
NE = [
x2_indx[idx] for idx, ii in enumerate(do_select) if ii == True
] # Neighborhood Expansion
# print("i={}, orig ks={}, NE={}".format(i,X1.shape[0],NE))
# result_indx[i]+=NE
if NE != []:
result_indx[i] = np.append(result_indx[i], NE)
# return result_indx
result_knn.append(result_indx)
# pool = ThreadPool(4)
# # result_knn = pool.map(process, range(n_model))
# pool.close()
# pool.join()
# with multiprocessing.Manager() as MG: #重命名
# mydict=MG.dict() #主进程与子进程共享这个字典
# mydict["array"]=np.zeros((3,3)).tolist()
# result_knn=MG.list(result_knn) #主进程与子进程共享这个List
# # mylist.append([1,2])
# modelnet10_data=MG.list(modelnet10_data) #主进程与子进程共享这个List
# # 多线程部分
# #result=multiprocessing.Manager().dict()
# #result['par']=Par
# #result['num']=xy_arrays
# threads=[]
# t1 =multiprocessing.Process(target=do_work,args=(result_knn,modelnet10_data,0,n_model//4,k_max,d))
# threads.append(t1)
# t2 =multiprocessing.Process(target=do_work,args=(result_knn,modelnet10_data,n_model//4,n_model//4*2,k_max,d))
# threads.append(t2)
# t3 =multiprocessing.Process(target=do_work,args=(result_knn,modelnet10_data,n_model//4*2,n_model//4*3,k_max,d))
# threads.append(t3)
# t4 =multiprocessing.Process(target=do_work,args=(result_knn,modelnet10_data,n_model//4*3,n_model,k_max,d))
# threads.append(t4)
# [t.start() for t in threads]
# [t.join() for t in threads]
# print(result_knn)
if (len(result_knn) != n_model):
raise Exception("len of result_knn!=n_model")
# convert list to sparse matrix
for i in range(n_model):
data = result_knn[i]
row_ = []
col_ = []
for row, cols in enumerate(data):
row_ += [row for _ in cols]
col_ += list(cols)
sp_data = sp.csr_matrix(
(np.ones(len(row_), dtype='int32'), (row_, col_)),
shape=(n_point, n_point))
result_knn[i] = sp_data
# savename='./modelnet/data/modelNet40_train_16nn_GM_adaptive_knn_sparse.npy'
if (savename == None):
# e.g.
# filename = './modelnet/data/modelNet10_train_16nn_GM.npy'
# savename = ./modelnet/data/modelNet10_train_16nn_GM_adaptive_knn.npy
savename = "".join(filename.split('.npy')) + "_adaptive_knn_sparse.npy"
# shapenet 50
np.save(savename,
np.array({
'data': modelnet10_data,
'graph': result_knn,
'seg_label': modelnet10.tolist()['seg_label'],
'label': modelnet10_label
})) #'label_dict':test_modelnet10_label_dict,
# np.save(savename, np.array(result_knn))
print("saved to {}".format(savename))
if __name__ == '__main__':
filename = './modelnet/data/modelNet40_test_16nn_GM.npy'
savename = "".join(filename.split('.npy')) + "_adaptive_32knn_sparse.npy"
adaptive_knn(filename=filename, savename=savename, k_max=32, k_min=16)
# abc=np.load(savename, allow_pickle=True)
| 36.959494
| 121
| 0.541064
| 1,968
| 14,599
| 3.808435
| 0.120427
| 0.021348
| 0.029353
| 0.022015
| 0.759706
| 0.731554
| 0.724216
| 0.710073
| 0.702602
| 0.702602
| 0
| 0.038699
| 0.323858
| 14,599
| 394
| 122
| 37.053299
| 0.720596
| 0.24899
| 0
| 0.731884
| 0
| 0
| 0.042812
| 0.01098
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025362
| false
| 0
| 0.043478
| 0
| 0.086957
| 0.065217
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
78364fcf61ed98804c7e0c12053d1001ec03f235
| 97
|
py
|
Python
|
backend-project/small_eod/collections/fields.py
|
WlodzimierzKorza/small_eod
|
027022bd71122a949a2787d0fb86518df80e48cd
|
[
"MIT"
] | 64
|
2019-12-30T11:24:03.000Z
|
2021-06-24T01:04:56.000Z
|
backend-project/small_eod/collections/fields.py
|
WlodzimierzKorza/small_eod
|
027022bd71122a949a2787d0fb86518df80e48cd
|
[
"MIT"
] | 465
|
2018-06-13T21:43:43.000Z
|
2022-01-04T23:33:56.000Z
|
backend-project/small_eod/collections/fields.py
|
WlodzimierzKorza/small_eod
|
027022bd71122a949a2787d0fb86518df80e48cd
|
[
"MIT"
] | 72
|
2018-12-02T19:47:03.000Z
|
2022-01-04T22:54:49.000Z
|
from rest_framework import serializers
class DurationField(serializers.IntegerField):
pass
| 16.166667
| 46
| 0.824742
| 10
| 97
| 7.9
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134021
| 97
| 5
| 47
| 19.4
| 0.940476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
15372fa5c61dd37b6f8f148a78bccdbb784f3c0f
| 262
|
py
|
Python
|
aioethereum/__init__.py
|
h8is2w8/aioethereum
|
eb23e28068c34cda28bbef45c3f288d16936d88e
|
[
"MIT"
] | null | null | null |
aioethereum/__init__.py
|
h8is2w8/aioethereum
|
eb23e28068c34cda28bbef45c3f288d16936d88e
|
[
"MIT"
] | null | null | null |
aioethereum/__init__.py
|
h8is2w8/aioethereum
|
eb23e28068c34cda28bbef45c3f288d16936d88e
|
[
"MIT"
] | null | null | null |
from .client import (
AsyncIOHTTPClient,
AsyncIOIPCClient,
BaseAsyncIOClient,
create_ethereum_client
)
__version__ = '0.2.1'
__all__ = [
'AsyncIOHTTPClient',
'AsyncIOIPCClient',
'BaseAsyncIOClient',
'create_ethereum_client',
]
| 15.411765
| 29
| 0.69084
| 20
| 262
| 8.45
| 0.65
| 0.390533
| 0.591716
| 0.662722
| 0.828402
| 0.828402
| 0
| 0
| 0
| 0
| 0
| 0.014493
| 0.209924
| 262
| 16
| 30
| 16.375
| 0.801932
| 0
| 0
| 0
| 0
| 0
| 0.293893
| 0.083969
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
158cfd231a52e22a890ef62c39ed04bfa8c5d998
| 23
|
py
|
Python
|
src/score_cleaner/__init__.py
|
m-alban/music_learner
|
4d4f1835f676becb8fee5824ab54b90b43de8723
|
[
"MIT"
] | null | null | null |
src/score_cleaner/__init__.py
|
m-alban/music_learner
|
4d4f1835f676becb8fee5824ab54b90b43de8723
|
[
"MIT"
] | null | null | null |
src/score_cleaner/__init__.py
|
m-alban/music_learner
|
4d4f1835f676becb8fee5824ab54b90b43de8723
|
[
"MIT"
] | null | null | null |
from .prepare import *
| 11.5
| 22
| 0.73913
| 3
| 23
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1593bc6d064e046ff17dd346a5f895cf708911f8
| 29
|
py
|
Python
|
clicktypes/types.py
|
jdidion/ClickTypes
|
e09465337f2b3bb6f47c886cee0f6d37c47c72fe
|
[
"MIT"
] | null | null | null |
clicktypes/types.py
|
jdidion/ClickTypes
|
e09465337f2b3bb6f47c886cee0f6d37c47c72fe
|
[
"MIT"
] | null | null | null |
clicktypes/types.py
|
jdidion/ClickTypes
|
e09465337f2b3bb6f47c886cee0f6d37c47c72fe
|
[
"MIT"
] | null | null | null |
from typing import NewType
| 7.25
| 26
| 0.793103
| 4
| 29
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206897
| 29
| 3
| 27
| 9.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ec5da265ef7703ae4ac4bb0c6a904d50954022fd
| 43
|
py
|
Python
|
modules/classifier/__init__.py
|
pythonclubmtl/paperflix
|
5115b06569c2d8183857fcb3d6c9a1a9889030a1
|
[
"MIT"
] | null | null | null |
modules/classifier/__init__.py
|
pythonclubmtl/paperflix
|
5115b06569c2d8183857fcb3d6c9a1a9889030a1
|
[
"MIT"
] | 2
|
2019-10-28T17:31:10.000Z
|
2019-12-17T21:54:56.000Z
|
modules/classifier/__init__.py
|
pythonclubmtl/paperflix
|
5115b06569c2d8183857fcb3d6c9a1a9889030a1
|
[
"MIT"
] | null | null | null |
from .train import *
from .predict import *
| 21.5
| 22
| 0.744186
| 6
| 43
| 5.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 43
| 2
| 22
| 21.5
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ec71e87df822223576f1ddcbe584ebf9d3038db5
| 46
|
py
|
Python
|
First-homework.py
|
Kbrane-08/Create-First-Homework
|
3f4836cb4c6e42d5ca5c77a85ac3e9a9a918c8b2
|
[
"MIT"
] | null | null | null |
First-homework.py
|
Kbrane-08/Create-First-Homework
|
3f4836cb4c6e42d5ca5c77a85ac3e9a9a918c8b2
|
[
"MIT"
] | 1
|
2021-09-29T00:14:54.000Z
|
2021-09-29T00:14:54.000Z
|
First-homework.py
|
Kbrane-08/Create-First-Homework
|
3f4836cb4c6e42d5ca5c77a85ac3e9a9a918c8b2
|
[
"MIT"
] | null | null | null |
print("Glen Wang.My perfer pronouns is Shark")
| 46
| 46
| 0.782609
| 8
| 46
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108696
| 46
| 1
| 46
| 46
| 0.878049
| 0
| 0
| 0
| 0
| 0
| 0.787234
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
ec788321e2e8a58cfa639834e94fa2dad74d7d04
| 14,391
|
py
|
Python
|
sdk/python/pulumi_aws/connect/bot_association.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/connect/bot_association.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/connect/bot_association.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['BotAssociationArgs', 'BotAssociation']
@pulumi.input_type
class BotAssociationArgs:
def __init__(__self__, *,
instance_id: pulumi.Input[str],
lex_bot: pulumi.Input['BotAssociationLexBotArgs']):
"""
The set of arguments for constructing a BotAssociation resource.
:param pulumi.Input[str] instance_id: The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.
:param pulumi.Input['BotAssociationLexBotArgs'] lex_bot: Configuration information of an Amazon Lex (V1) bot. Detailed below.
"""
pulumi.set(__self__, "instance_id", instance_id)
pulumi.set(__self__, "lex_bot", lex_bot)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
"""
The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="lexBot")
def lex_bot(self) -> pulumi.Input['BotAssociationLexBotArgs']:
"""
Configuration information of an Amazon Lex (V1) bot. Detailed below.
"""
return pulumi.get(self, "lex_bot")
@lex_bot.setter
def lex_bot(self, value: pulumi.Input['BotAssociationLexBotArgs']):
pulumi.set(self, "lex_bot", value)
@pulumi.input_type
class _BotAssociationState:
def __init__(__self__, *,
instance_id: Optional[pulumi.Input[str]] = None,
lex_bot: Optional[pulumi.Input['BotAssociationLexBotArgs']] = None):
"""
Input properties used for looking up and filtering BotAssociation resources.
:param pulumi.Input[str] instance_id: The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.
:param pulumi.Input['BotAssociationLexBotArgs'] lex_bot: Configuration information of an Amazon Lex (V1) bot. Detailed below.
"""
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if lex_bot is not None:
pulumi.set(__self__, "lex_bot", lex_bot)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
"""
The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="lexBot")
def lex_bot(self) -> Optional[pulumi.Input['BotAssociationLexBotArgs']]:
"""
Configuration information of an Amazon Lex (V1) bot. Detailed below.
"""
return pulumi.get(self, "lex_bot")
@lex_bot.setter
def lex_bot(self, value: Optional[pulumi.Input['BotAssociationLexBotArgs']]):
pulumi.set(self, "lex_bot", value)
class BotAssociation(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
instance_id: Optional[pulumi.Input[str]] = None,
lex_bot: Optional[pulumi.Input[pulumi.InputType['BotAssociationLexBotArgs']]] = None,
__props__=None):
"""
Allows the specified Amazon Connect instance to access the specified Amazon Lex (V1) bot. For more information see
[Amazon Connect: Getting Started](https://docs.aws.amazon.com/connect/latest/adminguide/amazon-connect-get-started.html) and [Add an Amazon Lex bot](https://docs.aws.amazon.com/connect/latest/adminguide/amazon-lex.html).
> **NOTE:** This resource only currently supports Amazon Lex (V1) Associations.
## Example Usage
### Basic
```python
import pulumi
import pulumi_aws as aws
example = aws.connect.BotAssociation("example",
instance_id=aws_connect_instance["example"]["id"],
lex_bot=aws.connect.BotAssociationLexBotArgs(
lex_region="us-west-2",
name="Test",
))
```
### Including a sample Lex bot
```python
import pulumi
import pulumi_aws as aws
current = aws.get_region()
example_intent = aws.lex.Intent("exampleIntent",
create_version=True,
name="connect_lex_intent",
fulfillment_activity=aws.lex.IntentFulfillmentActivityArgs(
type="ReturnIntent",
),
sample_utterances=["I would like to pick up flowers."])
example_bot = aws.lex.Bot("exampleBot",
abort_statement=aws.lex.BotAbortStatementArgs(
messages=[aws.lex.BotAbortStatementMessageArgs(
content="Sorry, I am not able to assist at this time.",
content_type="PlainText",
)],
),
clarification_prompt=aws.lex.BotClarificationPromptArgs(
max_attempts=2,
messages=[aws.lex.BotClarificationPromptMessageArgs(
content="I didn't understand you, what would you like to do?",
content_type="PlainText",
)],
),
intents=[aws.lex.BotIntentArgs(
intent_name=example_intent.name,
intent_version="1",
)],
child_directed=False,
name="connect_lex_bot",
process_behavior="BUILD")
example_bot_association = aws.connect.BotAssociation("exampleBotAssociation",
instance_id=aws_connect_instance["example"]["id"],
lex_bot=aws.connect.BotAssociationLexBotArgs(
lex_region=current.name,
name=example_bot.name,
))
```
## Import
`aws_connect_bot_association` can be imported by using the Amazon Connect instance ID, Lex (V1) bot name, and Lex (V1) bot region separated by colons (`:`), e.g.
```sh
$ pulumi import aws:connect/botAssociation:BotAssociation example aaaaaaaa-bbbb-cccc-dddd-111111111111:Example:us-west-2
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] instance_id: The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.
:param pulumi.Input[pulumi.InputType['BotAssociationLexBotArgs']] lex_bot: Configuration information of an Amazon Lex (V1) bot. Detailed below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BotAssociationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Allows the specified Amazon Connect instance to access the specified Amazon Lex (V1) bot. For more information see
[Amazon Connect: Getting Started](https://docs.aws.amazon.com/connect/latest/adminguide/amazon-connect-get-started.html) and [Add an Amazon Lex bot](https://docs.aws.amazon.com/connect/latest/adminguide/amazon-lex.html).
> **NOTE:** This resource only currently supports Amazon Lex (V1) Associations.
## Example Usage
### Basic
```python
import pulumi
import pulumi_aws as aws
example = aws.connect.BotAssociation("example",
instance_id=aws_connect_instance["example"]["id"],
lex_bot=aws.connect.BotAssociationLexBotArgs(
lex_region="us-west-2",
name="Test",
))
```
### Including a sample Lex bot
```python
import pulumi
import pulumi_aws as aws
current = aws.get_region()
example_intent = aws.lex.Intent("exampleIntent",
create_version=True,
name="connect_lex_intent",
fulfillment_activity=aws.lex.IntentFulfillmentActivityArgs(
type="ReturnIntent",
),
sample_utterances=["I would like to pick up flowers."])
example_bot = aws.lex.Bot("exampleBot",
abort_statement=aws.lex.BotAbortStatementArgs(
messages=[aws.lex.BotAbortStatementMessageArgs(
content="Sorry, I am not able to assist at this time.",
content_type="PlainText",
)],
),
clarification_prompt=aws.lex.BotClarificationPromptArgs(
max_attempts=2,
messages=[aws.lex.BotClarificationPromptMessageArgs(
content="I didn't understand you, what would you like to do?",
content_type="PlainText",
)],
),
intents=[aws.lex.BotIntentArgs(
intent_name=example_intent.name,
intent_version="1",
)],
child_directed=False,
name="connect_lex_bot",
process_behavior="BUILD")
example_bot_association = aws.connect.BotAssociation("exampleBotAssociation",
instance_id=aws_connect_instance["example"]["id"],
lex_bot=aws.connect.BotAssociationLexBotArgs(
lex_region=current.name,
name=example_bot.name,
))
```
## Import
`aws_connect_bot_association` can be imported by using the Amazon Connect instance ID, Lex (V1) bot name, and Lex (V1) bot region separated by colons (`:`), e.g.
```sh
$ pulumi import aws:connect/botAssociation:BotAssociation example aaaaaaaa-bbbb-cccc-dddd-111111111111:Example:us-west-2
```
:param str resource_name: The name of the resource.
:param BotAssociationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BotAssociationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
instance_id: Optional[pulumi.Input[str]] = None,
lex_bot: Optional[pulumi.Input[pulumi.InputType['BotAssociationLexBotArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BotAssociationArgs.__new__(BotAssociationArgs)
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
if lex_bot is None and not opts.urn:
raise TypeError("Missing required property 'lex_bot'")
__props__.__dict__["lex_bot"] = lex_bot
super(BotAssociation, __self__).__init__(
'aws:connect/botAssociation:BotAssociation',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
instance_id: Optional[pulumi.Input[str]] = None,
lex_bot: Optional[pulumi.Input[pulumi.InputType['BotAssociationLexBotArgs']]] = None) -> 'BotAssociation':
"""
Get an existing BotAssociation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] instance_id: The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.
:param pulumi.Input[pulumi.InputType['BotAssociationLexBotArgs']] lex_bot: Configuration information of an Amazon Lex (V1) bot. Detailed below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _BotAssociationState.__new__(_BotAssociationState)
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["lex_bot"] = lex_bot
return BotAssociation(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Output[str]:
"""
The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.
"""
return pulumi.get(self, "instance_id")
@property
@pulumi.getter(name="lexBot")
def lex_bot(self) -> pulumi.Output['outputs.BotAssociationLexBot']:
"""
Configuration information of an Amazon Lex (V1) bot. Detailed below.
"""
return pulumi.get(self, "lex_bot")
| 42.958209
| 228
| 0.63227
| 1,585
| 14,391
| 5.529968
| 0.147634
| 0.03012
| 0.023959
| 0.024643
| 0.793611
| 0.772276
| 0.761323
| 0.747062
| 0.734626
| 0.721164
| 0
| 0.004579
| 0.271628
| 14,391
| 334
| 229
| 43.086826
| 0.831616
| 0.514419
| 0
| 0.487395
| 1
| 0
| 0.136613
| 0.050434
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0.008403
| 0.058824
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ec81923a0c106847642cfaf7177b7e9471db10dc
| 161
|
py
|
Python
|
office365/excel/workbook_session_info.py
|
rikeshtailor/Office365-REST-Python-Client
|
ca7bfa1b22212137bb4e984c0457632163e89a43
|
[
"MIT"
] | 544
|
2016-08-04T17:10:16.000Z
|
2022-03-31T07:17:20.000Z
|
office365/excel/workbook_session_info.py
|
rikeshtailor/Office365-REST-Python-Client
|
ca7bfa1b22212137bb4e984c0457632163e89a43
|
[
"MIT"
] | 438
|
2016-10-11T12:24:22.000Z
|
2022-03-31T19:30:35.000Z
|
office365/excel/workbook_session_info.py
|
rikeshtailor/Office365-REST-Python-Client
|
ca7bfa1b22212137bb4e984c0457632163e89a43
|
[
"MIT"
] | 202
|
2016-08-22T19:29:40.000Z
|
2022-03-30T20:26:15.000Z
|
from office365.runtime.client_value import ClientValue
class WorkbookSessionInfo(ClientValue):
"""Provides information about workbook session."""
pass
| 23
| 54
| 0.78882
| 16
| 161
| 7.875
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021583
| 0.136646
| 161
| 6
| 55
| 26.833333
| 0.884892
| 0.273292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
ecc03dd8944d99f1748d4682a9637c3d4bc1d43c
| 26
|
py
|
Python
|
tests/test_rudaux.py
|
hsmohammed/rudaux
|
673b2bb2d6b08f9d9c34a2ed6e284d9def1a0fc7
|
[
"MIT"
] | 1
|
2020-09-10T20:36:56.000Z
|
2020-09-10T20:36:56.000Z
|
tests/test_rudaux.py
|
hsmohammed/rudaux
|
673b2bb2d6b08f9d9c34a2ed6e284d9def1a0fc7
|
[
"MIT"
] | null | null | null |
tests/test_rudaux.py
|
hsmohammed/rudaux
|
673b2bb2d6b08f9d9c34a2ed6e284d9def1a0fc7
|
[
"MIT"
] | null | null | null |
from rudaux import rudaux
| 13
| 25
| 0.846154
| 4
| 26
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ecd59d86c309878daebdbb01b07f389bf047d854
| 127
|
py
|
Python
|
src/infrastructure/errors/unable_to_write_image_exception.py
|
OzielFilho/ProjetoFinalPdi
|
c9e6fe415f1a985d6eeac204580d3ab623026665
|
[
"MIT"
] | null | null | null |
src/infrastructure/errors/unable_to_write_image_exception.py
|
OzielFilho/ProjetoFinalPdi
|
c9e6fe415f1a985d6eeac204580d3ab623026665
|
[
"MIT"
] | null | null | null |
src/infrastructure/errors/unable_to_write_image_exception.py
|
OzielFilho/ProjetoFinalPdi
|
c9e6fe415f1a985d6eeac204580d3ab623026665
|
[
"MIT"
] | null | null | null |
from infrastructure.errors.image_exception import ImageException
class UnableToWriteImageException(ImageException):
pass
| 21.166667
| 64
| 0.858268
| 11
| 127
| 9.818182
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102362
| 127
| 5
| 65
| 25.4
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
01e89d5a0ff6cb022062719af023d917facba00f
| 11,061
|
py
|
Python
|
finance_ml/importance.py
|
BTETON/finance_ml
|
a585be2d04db5a749eb6b39b7336e5aeb30d6327
|
[
"MIT"
] | 446
|
2018-09-05T18:28:51.000Z
|
2022-03-28T23:45:41.000Z
|
finance_ml/importance.py
|
BTETON/finance_ml
|
a585be2d04db5a749eb6b39b7336e5aeb30d6327
|
[
"MIT"
] | 3
|
2019-03-26T13:48:51.000Z
|
2021-10-31T11:00:14.000Z
|
finance_ml/importance.py
|
BTETON/finance_ml
|
a585be2d04db5a749eb6b39b7336e5aeb30d6327
|
[
"MIT"
] | 164
|
2018-09-12T18:37:25.000Z
|
2022-03-17T06:30:12.000Z
|
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.metrics import log_loss, mean_squared_error
from .model_selection import PurgedKFold, cv_score, evaluate
def mp_feat_imp_SFI(clf, X, y, feat_names, sample_weight=None, scoring='neg_log_loss',
n_splits=3, t1=None, cv_gen=None, pct_embargo=0, purging=True):
imp = pd.DataFrame(columns=['mean', 'std'])
for feat_name in feat_names:
scores = cv_score(clf, X=X[[feat_name]], y=y,
sample_weight=sample_weight,
scoring=scoring,
cv_gen=cv_gen,
n_splits=n_splits,
t1=t1,
pct_embargo=pct_embargo,
purging=purging)
imp.loc[feat_name, 'mean'] = scores.mean()
imp.loc[feat_name, 'std'] = scores.std() * scores.shape[0] ** -0.5
return imp
def feat_imp_SFI(clf, X, y, sample_weight=None, scoring='neg_log_loss',
n_splits=5, t1=None, cv_gen=None, pct_embargo=0, purging=True, num_threads=1):
"""Calculate Single Feature Importance
Args:
clf: Classifier instance
X: pd.DataFrame, Input feature
y: pd.Series, Label
clstrs: dict[list]
Clustering labels: key is the name of cluster and value is list of belonging columns
sample_weight: pd.Series, optional
If specified, apply this to testing and training
scoring: str, default 'neg_log_loss'
The name of scoring methods. 'f1', 'accuracy' or 'neg_log_loss'
n_splits: int, default 3
The number of splits for cross validation
t1: pd.Series
Index and value correspond to the begining and end of information. It is required for purging and embargo
cv_gen: KFold instance
If not specified, use PurgedKfold
pct_embargo: float, default 0
The percentage of applying embargo
purging: bool, default True
If true, apply purging method
num_threads: int, default 1
The number of threads for purging
Returns:
pd.DataFrame: Importance means and standard deviations
- mean: Mean of importance
- std: Standard deviation of importance
"""
imp = mp_pandas_obj(mp_feat_imp_SFI, ('feat_names', X.columns),
num_threads, clf=clf, X=X, y=y, sample_weight=sample_weight,
scoring=scoring, n_splits=n_splits, t1=t1, cv_gen=cv_gen,
pct_embargo=pct_embargo, purging=purging)
return imp
def feat_imp_MDI(fit, feat_names):
"""Compute Mean Decrease Impurity
Args:
forest (Forest Classifier instance)
feat_names (list(str)): List of names of features
Returns:
pd.DataFrame: Importance means and standard deviations
- mean: Mean of importance
- std: Standard deviation of importance
"""
df0 = {i: tree.feature_importances_ for i, tree in enumerate(fit.estimators_)}
df0 = pd.DataFrame.from_dict(df0, orient='index')
df0.columns = feat_names
df0 = df0.replace(0, np.nan)
imp = pd.concat({"mean": df0.mean(), "std": df0.std() * (df0.shape[0] ** -0.5)}, axis=1)
imp /= imp["mean"].sum()
return imp
def feat_imp_MDA(clf, X, y, sample_weight=None, scoring='neg_log_loss', n_splits=5, t1=None,
cv_gen=None, pct_embargo=0, purging=True, num_threads=1):
"""Calculate Mean Decrease Accuracy
Note:
You can use any classifier to estimate importance
Args:
clf: Classifier instance
X: pd.DataFrame, Input feature
y: pd.Series, Label
sample_weight: pd.Series, optional
If specified, apply this to testing and training
scoring: str, default 'neg_log_loss'
The name of scoring methods. 'f1', 'accuracy' or 'neg_log_loss'
n_splits: int, default 3
The number of splits for cross validation
t1: pd.Series
Index and value correspond to the begining and end of information. It is required for purging and embargo
cv_gen: KFold instance
If not specified, use PurgedKfold
pct_embargo: float, default 0
The percentage of applying embargo
purging: bool, default True
If true, apply purging method
num_threads: int, default 1
The number of threads for purging
Returns:
pd.DataFrame: Importance means and standard deviations
- mean: Mean of importance
- std: Standard deviation of importance
"""
if cv_gen is None:
if t1 is not None:
cv_gen = PurgedKFold(n_splits=n_splits, t1=t1, pct_embargo=pct_embargo,
purging=purging, num_threads=num_threads)
else:
cv_gen = KFold(n_splits=n_splits)
index = np.arange(n_splits)
scores = pd.Series(index=index)
scores_perm = pd.DataFrame(index=index, columns=X.columns)
for idx, (train, test) in zip(index, cv_gen.split(X=X)):
X_train = X.iloc[train]
y_train = y.iloc[train]
if sample_weight is not None:
w_train = sample_weight.iloc[train].values
else:
w_train = None
X_test = X.iloc[test]
y_test = y.iloc[test]
if sample_weight is not None:
w_test = sample_weight.iloc[test].values
else:
w_test = None
clf_fit = clf.fit(X_train, y_train, sample_weight=w_train)
scores.loc[idx] = evaluate(clf_fit, X_test, y_test, scoring,
sample_weight=w_test)
for col in X.columns:
X_test_ = X_test.copy(deep=True)
# Randomize certain feature to make it not effective
np.random.shuffle(X_test_[col].values)
scores_perm.loc[idx, col] = evaluate(clf_fit, X_test_, y_test, scoring,
sample_weight=w_test)
# (Original score) - (premutated score)
imprv = (-scores_perm).add(scores, axis=0)
# Relative to maximum improvement
if scoring == 'neg_log_loss':
max_imprv = -scores_perm
else:
max_imprv = 1. - scores_perm
imp = imprv / max_imprv
return pd.concat({"mean": imp.mean(), "std": imp.std() * (imp.shape[0] ** -0.5)}, axis=1)
def group_mean_std(df0, clstrs):
out = pd.DataFrame(columns=['mean', 'std'])
for key, elements in clstrs.items():
df1 = df0[elements].sum(axis=1)
out.loc[f"C_{key}", 'mean'] = df1.mean()
out.loc[f"C_{key}", 'std'] = df1.std() * df1.shape[0]**-.5
return out
def feat_imp_MDI_clustered(fit, feat_names, clstrs):
"""Compute Mean Decrease Impurity
Args:
forest (Forest Classifier instance)
feat_names (list(str)): List of names of features
clstrs: dict[list]
Clustering labels: key is the name of cluster and value is list of belonging columns
Returns:
pd.DataFrame: Importance means and standard deviations
- mean: Mean of importance
- std: Standard deviation of importance
"""
df0 = {i:tree.feature_importances_ for i, tree in enumerate(fit.estimators_)}
df0 = pd.DataFrame.from_dict(df0, orient='index')
df0.columns = feat_names
df0 = df0.replace(0, np.nan) #because max_features=1
imp = group_mean_std(df0, clstrs)
imp /= imp['mean'].sum()
return imp
def feat_imp_MDA_clustered(clf, X, y, clstrs,
sample_weight=None,
scoring='neg_log_loss',
n_splits=5, t1=None,
cv_gen=None, pct_embargo=0,
purging=True, num_threads=1):
"""Calculate Clustered Mean Decrease Accuracy
Note:
You can use any classifier to estimate importance
Args:
clf: Classifier instance
X: pd.DataFrame, Input feature
y: pd.Series, Label
clstrs: dict[list]
Clustering labels: key is the name of cluster and value is list of belonging columns
sample_weight: pd.Series, optional
If specified, apply this to testing and training
scoring: str, default 'neg_log_loss'
The name of scoring methods. 'f1', 'accuracy' or 'neg_log_loss'
n_splits: int, default 3
The number of splits for cross validation
t1: pd.Series
Index and value correspond to the begining and end of information. It is required for purging and embargo
cv_gen: KFold instance
If not specified, use PurgedKfold
pct_embargo: float, default 0
The percentage of applying embargo
purging: bool, default True
If true, apply purging method
num_threads: int, default 1
The number of threads for purging
Returns:
pd.DataFrame: Importance means and standard deviations
- mean: Mean of importance
- std: Standard deviation of importance
"""
if cv_gen is None:
if t1 is not None:
cv_gen = PurgedKFold(n_splits=n_splits, t1=t1, pct_embargo=pct_embargo,
purging=purging, num_threads=num_threads)
else:
cv_gen = KFold(n_splits=n_splits)
index = np.arange(n_splits)
scores = pd.Series(index=index)
scores_perm = pd.DataFrame(index=index, columns=clstrs.keys())
for idx, (train, test) in zip(index, cv_gen.split(X=X)):
X_train = X.iloc[train]
y_train = y.iloc[train]
if sample_weight is not None:
w_train = sample_weight.iloc[train].values
else:
w_train = None
X_test = X.iloc[test]
y_test = y.iloc[test]
if sample_weight is not None:
w_test = sample_weight.iloc[test].values
else:
w_test = None
clf_fit = clf.fit(X_train, y_train, sample_weight=w_train)
scores.loc[idx] = evaluate(clf_fit, X_test, y_test, scoring,
sample_weight=w_test)
for clstr_name in clstrs.keys():
X_test_ = X_test.copy(deep=True)
for k in clstrs[clstr_name]:
np.random.shuffle(X_test_[k].values)
scores_perm.loc[idx, clstr_name] = evaluate(clf_fit, X_test_, y_test,
scoring, sample_weight=w_test)
# (Original score) - (premutated score)
imprv = (-scores_perm).add(scores, axis=0)
# Relative to maximum improvement
if scoring == 'neg_log_loss':
max_imprv = -scores_perm
else:
max_imprv = 1. - scores_perm
imp = imprv / max_imprv
imp = pd.concat({'mean': imp.mean(), 'std': imp.std() * imp.shape[0] ** -0.5}, axis=1)
imp.index = [f"C_{i}" for i in imp.index]
return imp
| 40.076087
| 117
| 0.600579
| 1,471
| 11,061
| 4.351462
| 0.125765
| 0.046868
| 0.018747
| 0.012029
| 0.876269
| 0.848305
| 0.827527
| 0.818153
| 0.805655
| 0.799406
| 0
| 0.011434
| 0.312088
| 11,061
| 276
| 118
| 40.076087
| 0.829807
| 0.384233
| 0
| 0.562963
| 0
| 0
| 0.026498
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051852
| false
| 0
| 0.051852
| 0
| 0.155556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
01faa4d24ce3765880f0f8625def4e447a58641f
| 121
|
py
|
Python
|
gibson/utils/__init__.py
|
rainprob/GibsonEnv
|
e0d0bc614713c676cb303bf9f11ca6a98713e0e0
|
[
"MIT"
] | 731
|
2018-02-26T18:35:05.000Z
|
2022-03-23T04:00:09.000Z
|
gibson/utils/__init__.py
|
Shubodh/GibsonEnv
|
38274874d7c2c2a87efdb6ee529f2b366c5219de
|
[
"MIT"
] | 111
|
2018-04-19T01:00:22.000Z
|
2022-03-18T17:43:50.000Z
|
gibson/utils/__init__.py
|
Shubodh/GibsonEnv
|
38274874d7c2c2a87efdb6ee529f2b366c5219de
|
[
"MIT"
] | 153
|
2018-02-27T04:38:40.000Z
|
2022-03-28T08:10:39.000Z
|
#from realenv.client.vnc_client import VNCClient
#from realenv.client.client_actions import client_actions, client_newloc
| 60.5
| 72
| 0.876033
| 17
| 121
| 6
| 0.470588
| 0.215686
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066116
| 121
| 2
| 72
| 60.5
| 0.902655
| 0.975207
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bf2566abd8a094d01986a14d33c7131e429133bb
| 167
|
py
|
Python
|
code/home/home_views.py
|
GGGGFan/CS564-Course-Project-A-Database-Management-System-for-Electronic-Health-Records-in-ICU
|
11ae6c67e761a87c0584c6ef7278cb93ec708748
|
[
"Apache-2.0"
] | null | null | null |
code/home/home_views.py
|
GGGGFan/CS564-Course-Project-A-Database-Management-System-for-Electronic-Health-Records-in-ICU
|
11ae6c67e761a87c0584c6ef7278cb93ec708748
|
[
"Apache-2.0"
] | null | null | null |
code/home/home_views.py
|
GGGGFan/CS564-Course-Project-A-Database-Management-System-for-Electronic-Health-Records-in-ICU
|
11ae6c67e761a87c0584c6ef7278cb93ec708748
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
# This is the function for django to guide the page to home.html.
def home(request):
return render(request, 'home/home.html')
| 27.833333
| 65
| 0.754491
| 27
| 167
| 4.666667
| 0.666667
| 0.126984
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161677
| 167
| 5
| 66
| 33.4
| 0.9
| 0.377246
| 0
| 0
| 0
| 0
| 0.137255
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.