hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
bccc88e90fb008eb5f3ec4f235e1b178cbbd4d91
14,790
py
Python
src/storage-blob-preview/azext_storage_blob_preview/vendored_sdks/azure_mgmt_storage/v2021_01_01/models/_storage_management_client_enums.py
Mannan2812/azure-cli-extensions
e2b34efe23795f6db9c59100534a40f0813c3d95
[ "MIT" ]
2,728
2015-01-09T10:19:32.000Z
2022-03-31T14:50:33.000Z
src/storage-blob-preview/azext_storage_blob_preview/vendored_sdks/azure_mgmt_storage/v2021_01_01/models/_storage_management_client_enums.py
Mannan2812/azure-cli-extensions
e2b34efe23795f6db9c59100534a40f0813c3d95
[ "MIT" ]
17,773
2015-01-05T15:57:17.000Z
2022-03-31T23:50:25.000Z
src/storage-blob-preview/azext_storage_blob_preview/vendored_sdks/azure_mgmt_storage/v2021_01_01/models/_storage_management_client_enums.py
Mannan2812/azure-cli-extensions
e2b34efe23795f6db9c59100534a40f0813c3d95
[ "MIT" ]
1,916
2015-01-19T05:05:41.000Z
2022-03-31T19:36:44.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from enum import Enum, EnumMeta from six import with_metaclass class _CaseInsensitiveEnumMeta(EnumMeta): def __getitem__(self, name): return super().__getitem__(name.upper()) def __getattr__(cls, name): """Return the enum member matching `name` We use __getattr__ instead of descriptors or inserting into the enum class' __dict__ in order to support `name` and `value` being both properties for enum members (which live in the class' __dict__) and enum members themselves. """ try: return cls._member_map_[name.upper()] except KeyError: raise AttributeError(name) class AccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Required for storage accounts where kind = BlobStorage. The access tier used for billing. """ HOT = "Hot" COOL = "Cool" class AccountStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Gets the status indicating whether the primary location of the storage account is available or unavailable. """ AVAILABLE = "available" UNAVAILABLE = "unavailable" class BlobInventoryPolicyName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): DEFAULT = "default" class BlobRestoreProgressStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The status of blob restore progress. Possible values are: - InProgress: Indicates that blob restore is ongoing. - Complete: Indicates that blob restore has been completed successfully. - Failed: Indicates that blob restore is failed. """ IN_PROGRESS = "InProgress" COMPLETE = "Complete" FAILED = "Failed" class Bypass(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Specifies whether traffic is bypassed for Logging/Metrics/AzureServices. Possible values are any combination of Logging|Metrics|AzureServices (For example, "Logging, Metrics"), or None to bypass none of those traffics. """ NONE = "None" LOGGING = "Logging" METRICS = "Metrics" AZURE_SERVICES = "AzureServices" class CorsRuleAllowedMethodsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): DELETE = "DELETE" GET = "GET" HEAD = "HEAD" MERGE = "MERGE" POST = "POST" OPTIONS = "OPTIONS" PUT = "PUT" class CreatedByType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The type of identity that created the resource. """ USER = "User" APPLICATION = "Application" MANAGED_IDENTITY = "ManagedIdentity" KEY = "Key" class DefaultAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Specifies the default action of allow or deny when no other rules match. """ ALLOW = "Allow" DENY = "Deny" class DirectoryServiceOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Indicates the directory service used. """ NONE = "None" AADDS = "AADDS" AD = "AD" class EnabledProtocols(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The authentication protocol that is used for the file share. Can only be specified when creating a share. """ SMB = "SMB" NFS = "NFS" class EncryptionScopeSource(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The provider for the encryption scope. Possible values (case-insensitive): Microsoft.Storage, Microsoft.KeyVault. """ MICROSOFT_STORAGE = "Microsoft.Storage" MICROSOFT_KEY_VAULT = "Microsoft.KeyVault" class EncryptionScopeState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The state of the encryption scope. Possible values (case-insensitive): Enabled, Disabled. """ ENABLED = "Enabled" DISABLED = "Disabled" class ExtendedLocationTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The type of extendedLocation. """ EDGE_ZONE = "EdgeZone" class GeoReplicationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The status of the secondary location. Possible values are: - Live: Indicates that the secondary location is active and operational. - Bootstrap: Indicates initial synchronization from the primary location to the secondary location is in progress.This typically occurs when replication is first enabled. - Unavailable: Indicates that the secondary location is temporarily unavailable. """ LIVE = "Live" BOOTSTRAP = "Bootstrap" UNAVAILABLE = "Unavailable" class HttpProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The protocol permitted for a request made with the account SAS. """ HTTPS_HTTP = "https,http" HTTPS = "https" class IdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The identity type. """ NONE = "None" SYSTEM_ASSIGNED = "SystemAssigned" USER_ASSIGNED = "UserAssigned" SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned,UserAssigned" class ImmutabilityPolicyState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The ImmutabilityPolicy state of a blob container, possible values include: Locked and Unlocked. """ LOCKED = "Locked" UNLOCKED = "Unlocked" class ImmutabilityPolicyUpdateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The ImmutabilityPolicy update type of a blob container, possible values include: put, lock and extend. """ PUT = "put" LOCK = "lock" EXTEND = "extend" class InventoryRuleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The valid value is Inventory """ INVENTORY = "Inventory" class KeyPermission(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Permissions for the key -- read-only or full permissions. """ READ = "Read" FULL = "Full" class KeySource(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The encryption keySource (provider). Possible values (case-insensitive): Microsoft.Storage, Microsoft.Keyvault """ MICROSOFT_STORAGE = "Microsoft.Storage" MICROSOFT_KEYVAULT = "Microsoft.Keyvault" class KeyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Encryption key type to be used for the encryption service. 'Account' key type implies that an account-scoped encryption key will be used. 'Service' key type implies that a default service key is used. """ SERVICE = "Service" ACCOUNT = "Account" class Kind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Indicates the type of storage account. """ STORAGE = "Storage" STORAGE_V2 = "StorageV2" BLOB_STORAGE = "BlobStorage" FILE_STORAGE = "FileStorage" BLOCK_BLOB_STORAGE = "BlockBlobStorage" class LargeFileSharesState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled. """ DISABLED = "Disabled" ENABLED = "Enabled" class LeaseContainerRequestAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Specifies the lease action. Can be one of the available actions. """ ACQUIRE = "Acquire" RENEW = "Renew" CHANGE = "Change" RELEASE = "Release" BREAK_ENUM = "Break" class LeaseDuration(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Specifies whether the lease on a container is of infinite or fixed duration, only when the container is leased. """ INFINITE = "Infinite" FIXED = "Fixed" class LeaseState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Lease state of the container. """ AVAILABLE = "Available" LEASED = "Leased" EXPIRED = "Expired" BREAKING = "Breaking" BROKEN = "Broken" class LeaseStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The lease status of the container. """ LOCKED = "Locked" UNLOCKED = "Unlocked" class ListContainersInclude(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): DELETED = "deleted" class ListSharesExpand(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): DELETED = "deleted" SNAPSHOTS = "snapshots" class ManagementPolicyName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): DEFAULT = "default" class MinimumTlsVersion(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Set the minimum TLS version to be permitted on requests to storage. The default interpretation is TLS 1.0 for this property. """ TLS1_0 = "TLS1_0" TLS1_1 = "TLS1_1" TLS1_2 = "TLS1_2" class Name(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Name of the policy. The valid value is AccessTimeTracking. This field is currently read only """ ACCESS_TIME_TRACKING = "AccessTimeTracking" class Permissions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The signed permissions for the account SAS. Possible values include: Read (r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u) and Process (p). """ R = "r" D = "d" W = "w" L = "l" A = "a" C = "c" U = "u" P = "p" class PrivateEndpointConnectionProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The current provisioning state. """ SUCCEEDED = "Succeeded" CREATING = "Creating" DELETING = "Deleting" FAILED = "Failed" class PrivateEndpointServiceConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The private endpoint connection status. """ PENDING = "Pending" APPROVED = "Approved" REJECTED = "Rejected" class ProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Gets the status of the storage account at the time the operation was called. """ CREATING = "Creating" RESOLVING_DNS = "ResolvingDNS" SUCCEEDED = "Succeeded" class PublicAccess(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Specifies whether data in the container may be accessed publicly and the level of access. """ CONTAINER = "Container" BLOB = "Blob" NONE = "None" class PutSharesExpand(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): SNAPSHOTS = "snapshots" class Reason(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Gets the reason that a storage account name could not be used. The Reason element is only returned if NameAvailable is false. """ ACCOUNT_NAME_INVALID = "AccountNameInvalid" ALREADY_EXISTS = "AlreadyExists" class ReasonCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The reason for the restriction. As of now this can be "QuotaId" or "NotAvailableForSubscription". Quota Id is set when the SKU has requiredQuotas parameter as the subscription does not belong to that quota. The "NotAvailableForSubscription" is related to capacity at DC. """ QUOTA_ID = "QuotaId" NOT_AVAILABLE_FOR_SUBSCRIPTION = "NotAvailableForSubscription" class RootSquashType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The property is for NFS share only. The default is NoRootSquash. """ NO_ROOT_SQUASH = "NoRootSquash" ROOT_SQUASH = "RootSquash" ALL_SQUASH = "AllSquash" class RoutingChoice(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Routing Choice defines the kind of network routing opted by the user. """ MICROSOFT_ROUTING = "MicrosoftRouting" INTERNET_ROUTING = "InternetRouting" class RuleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The valid value is Lifecycle """ LIFECYCLE = "Lifecycle" class Services(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The signed services accessible with the account SAS. Possible values include: Blob (b), Queue (q), Table (t), File (f). """ B = "b" Q = "q" T = "t" F = "f" class ShareAccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Access tier for specific share. GpV2 account can choose between TransactionOptimized (default), Hot, and Cool. FileStorage account can choose Premium. """ TRANSACTION_OPTIMIZED = "TransactionOptimized" HOT = "Hot" COOL = "Cool" PREMIUM = "Premium" class SignedResource(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The signed services accessible with the service SAS. Possible values include: Blob (b), Container (c), File (f), Share (s). """ B = "b" C = "c" F = "f" S = "s" class SignedResourceTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The signed resource types that are accessible with the account SAS. Service (s): Access to service-level APIs; Container (c): Access to container-level APIs; Object (o): Access to object-level APIs for blobs, queue messages, table entities, and files. """ S = "s" C = "c" O = "o" class SkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The SKU name. Required for account creation; optional for update. Note that in older versions, SKU name was called accountType. """ STANDARD_LRS = "Standard_LRS" STANDARD_GRS = "Standard_GRS" STANDARD_RAGRS = "Standard_RAGRS" STANDARD_ZRS = "Standard_ZRS" PREMIUM_LRS = "Premium_LRS" PREMIUM_ZRS = "Premium_ZRS" STANDARD_GZRS = "Standard_GZRS" STANDARD_RAGZRS = "Standard_RAGZRS" class SkuTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The SKU tier. This is based on the SKU name. """ STANDARD = "Standard" PREMIUM = "Premium" class State(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Gets the state of virtual network rule. """ PROVISIONING = "provisioning" DEPROVISIONING = "deprovisioning" SUCCEEDED = "succeeded" FAILED = "failed" NETWORK_SOURCE_DELETED = "networkSourceDeleted" class StorageAccountExpand(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): GEO_REPLICATION_STATS = "geoReplicationStats" BLOB_RESTORE_STATUS = "blobRestoreStatus" class UsageUnit(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Gets the unit of measurement. """ COUNT = "Count" BYTES = "Bytes" SECONDS = "Seconds" PERCENT = "Percent" COUNTS_PER_SECOND = "CountsPerSecond" BYTES_PER_SECOND = "BytesPerSecond"
32.012987
102
0.701623
1,598
14,790
6.365457
0.284105
0.069013
0.187574
0.203205
0.334939
0.268285
0.201927
0.090838
0.046795
0.035391
0
0.00151
0.19378
14,790
461
103
32.08243
0.85156
0.372481
0
0.188073
0
0
0.138551
0.006143
0
0
0
0
0
1
0.009174
false
0.004587
0.009174
0.004587
0.986239
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
bcd40753c6b65b745382a68291ff0ca5e7e4fd1b
533
py
Python
tests/frontend/configuredwarning/plugins/corewarn.py
doraskayo/buildstream
1c72d4342ae7df360808de22c5e49f55dbb6bec6
[ "Apache-2.0" ]
null
null
null
tests/frontend/configuredwarning/plugins/corewarn.py
doraskayo/buildstream
1c72d4342ae7df360808de22c5e49f55dbb6bec6
[ "Apache-2.0" ]
null
null
null
tests/frontend/configuredwarning/plugins/corewarn.py
doraskayo/buildstream
1c72d4342ae7df360808de22c5e49f55dbb6bec6
[ "Apache-2.0" ]
null
null
null
from buildstream import Element from buildstream.plugin import CoreWarnings class CoreWarn(Element): BST_MIN_VERSION = "2.0" def configure(self, node): pass def preflight(self): pass def get_unique_key(self): pass def configure_sandbox(self, sandbox): pass def stage(self, sandbox): pass def assemble(self, sandbox): self.warn("Testing: CoreWarning produced during assemble", warning_token=CoreWarnings.OVERLAPS) def setup(): return CoreWarn
17.766667
103
0.667917
62
533
5.645161
0.564516
0.1
0.062857
0.102857
0
0
0
0
0
0
0
0.005025
0.253283
533
29
104
18.37931
0.874372
0
0
0.277778
0
0
0.090056
0
0
0
0
0
0
1
0.388889
false
0.277778
0.111111
0.055556
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
3
bcd9b1a107fc785772ee3c89ef74ef83e845fc7d
331
py
Python
umu-python/app/service/uaa_client.py
suomitek/cubeai
cc4c0f5f445a552d239910da63944307c1f06e37
[ "Apache-2.0" ]
null
null
null
umu-python/app/service/uaa_client.py
suomitek/cubeai
cc4c0f5f445a552d239910da63944307c1f06e37
[ "Apache-2.0" ]
null
null
null
umu-python/app/service/uaa_client.py
suomitek/cubeai
cc4c0f5f445a552d239910da63944307c1f06e37
[ "Apache-2.0" ]
null
null
null
import json from app.service.http_client import http_client service_name = 'uaa' def get_user(login, jwt): return http_client('get', service_name, '/api/users/{}'.format(login), jwt=jwt) def send_message(message, jwt): return http_client('post', service_name, '/api/messages/send', body=json.dumps(message), jwt=jwt)
23.642857
101
0.731118
50
331
4.66
0.48
0.171674
0.111588
0.16309
0
0
0
0
0
0
0
0
0.120846
331
13
102
25.461538
0.800687
0
0
0
0
0
0.123867
0
0
0
0
0
0
1
0.285714
false
0
0.285714
0.285714
0.857143
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
3
bcfa13b9692f7d88c289506d00b1ed8ee3c551be
1,403
py
Python
zjson/Utils.py
zoloypzuo/regex_engine
22365d7229d1a346ea7a730ebcad1c889c323e1d
[ "MIT" ]
null
null
null
zjson/Utils.py
zoloypzuo/regex_engine
22365d7229d1a346ea7a730ebcad1c889c323e1d
[ "MIT" ]
null
null
null
zjson/Utils.py
zoloypzuo/regex_engine
22365d7229d1a346ea7a730ebcad1c889c323e1d
[ "MIT" ]
null
null
null
import sys def compact(f): """ remove spaces from str output of f, used for test for convenience""" def _wrapper(*args, **kwargs): return f(*args, **kwargs).replace(' ', '') return _wrapper def load_class(module, name): """ loads the module and returns the class. >>> load_class('test.classes','A') <class 'test.classes.A'> """ __import__(module) mod = sys.modules[module] cls = getattr(mod, name) return cls def is_class_dict(obj): """ helper class that tests to see if the obj is a flattened object >>> is_class_dict({'__class_module__':'__builtin__', '__class_name__':'int'}) True >>> is_class_dict({'key':'value'}) False >>> is_class_dict(25) False """ if type(obj) is dict and \ obj.__contains__('__class_module__') and obj.__contains__('__class_name__'): return True return False def is_named_tuple_dict(obj): return isinstance(obj, dict) and \ obj.__contains__('__namedtuple_cls_name') and obj.__contains__('__namedtuple_dict') def is_namedtuple_instance(x): t = type(x) b = t.__bases__ if len(b) != 1 or b[0] != tuple: return False f = getattr(t, '_fields', None) if not isinstance(f, tuple): return False return all(type(n) == str for n in f) def create_function_dict(*functions): return {f.__name__: f for f in functions}
24.614035
94
0.639344
195
1,403
4.194872
0.389744
0.03423
0.05379
0.041565
0
0
0
0
0
0
0
0.003693
0.228083
1,403
56
95
25.053571
0.751616
0.272986
0
0
0
0
0.079249
0.021898
0
0
0
0
0
1
0.259259
false
0
0.074074
0.111111
0.62963
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
4c0495a22be305bb100b35bc072cd93591049bc5
3,931
py
Python
privex/helpers/asyncx.py
bt-cryptomancer/python-helpers
8e04438714145732fa33ca2c85a29d45d569db2b
[ "X11" ]
null
null
null
privex/helpers/asyncx.py
bt-cryptomancer/python-helpers
8e04438714145732fa33ca2c85a29d45d569db2b
[ "X11" ]
null
null
null
privex/helpers/asyncx.py
bt-cryptomancer/python-helpers
8e04438714145732fa33ca2c85a29d45d569db2b
[ "X11" ]
null
null
null
""" Functions and classes related to working with Python's native asyncio support To avoid issues with the ``async`` keyword, this file is named ``asyncx`` instead of ``async`` **Copyright**:: +===================================================+ | © 2019 Privex Inc. | | https://www.privex.io | +===================================================+ | | | Originally Developed by Privex Inc. | | | | Core Developer(s): | | | | (+) Chris (@someguy123) [Privex] | | (+) Kale (@kryogenic) [Privex] | | | +===================================================+ Copyright 2019 Privex Inc. ( https://www.privex.io ) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import asyncio def run_sync(func, *args, **kwargs): """ Run an async function synchronously (useful for REPL testing async functions) Usage: >>> async def my_async_func(a, b, x=None, y=None): ... return a, b, x, y >>> >>> run_sync(my_async_func, 1, 2, x=3, y=4) (1, 2, 3, 4,) :param callable func: An asynchronous function to run :param args: Positional arguments to pass to ``func`` :param kwargs: Keyword arguments to pass to ``func`` """ coro = asyncio.coroutine(func) future = coro(*args, **kwargs) loop = asyncio.get_event_loop() return loop.run_until_complete(future) def async_sync(f): """ Async Synchronous Decorator, borrowed from https://stackoverflow.com/a/23036785/2648583 - added this PyDoc comment and support for returning data from a synchronous function Allows a non-async function to run async functions using ``yield from`` - and can also return data Useful for unit testing, since unittest.TestCase functions are synchronous. Example async function: >>> async def my_async_func(a, b, x=None, y=None): ... return a, b, x, y ... Using the above async function with a non-async function: >>> @async_sync ... def sync_function(): ... result = yield from my_async_func(1, 2, x=3, y=4) ... return result ... >>> r = sync_function() >>> print(r) (1, 2, 3, 4,) >>> print(r[1]) 2 """ def wrapper(*args, **kwargs): coro = asyncio.coroutine(f) future = coro(*args, **kwargs) loop = asyncio.get_event_loop() return loop.run_until_complete(future) return wrapper
37.084906
118
0.552786
456
3,931
4.719298
0.414474
0.040892
0.020446
0.016729
0.170074
0.150558
0.150558
0.123606
0.123606
0.107807
0
0.016554
0.323836
3,931
105
119
37.438095
0.792701
0.846858
0
0.461538
0
0
0
0
0
0
0
0
0
1
0.230769
false
0
0.076923
0
0.538462
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
3
4c144ff6be8f1191618ed72a7a151475086cc315
186
py
Python
landlab/components/soil_moisture/__init__.py
awickert/landlab
496de56717a5877db96f354a1b1285bfabe8b56f
[ "MIT" ]
1
2019-06-01T07:39:49.000Z
2019-06-01T07:39:49.000Z
landlab/components/soil_moisture/__init__.py
awickert/landlab
496de56717a5877db96f354a1b1285bfabe8b56f
[ "MIT" ]
1
2018-04-07T08:24:56.000Z
2018-04-07T13:52:03.000Z
landlab/components/soil_moisture/__init__.py
awickert/landlab
496de56717a5877db96f354a1b1285bfabe8b56f
[ "MIT" ]
null
null
null
from .soil_moisture_dynamics import SoilMoisture from .infiltrate_soil_green_ampt import SoilInfiltrationGreenAmpt __all__ = ['SoilMoisture', 'SoilInfiltrationGreenAmpt', ]
26.571429
65
0.801075
16
186
8.75
0.6875
0
0
0
0
0
0
0
0
0
0
0
0.139785
186
6
66
31
0.875
0
0
0
0
0
0.198925
0.134409
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
4c20227203d5355c3f5670a39e8b12c214b5748e
253
py
Python
App/Module/Config/DevelopmentConfig.py
Beonwulf/Pebmyn
4f6a708e28b0ea8b176757072b68e6ede29cab69
[ "MIT" ]
1
2021-01-05T14:58:15.000Z
2021-01-05T14:58:15.000Z
App/Module/Config/DevelopmentConfig.py
Beonwulf/Pebmyn
4f6a708e28b0ea8b176757072b68e6ede29cab69
[ "MIT" ]
null
null
null
App/Module/Config/DevelopmentConfig.py
Beonwulf/Pebmyn
4f6a708e28b0ea8b176757072b68e6ede29cab69
[ "MIT" ]
null
null
null
from App.Module.Config.Config import Config from __init__ import path class DevelopmentConfig(Config): DEBUG = True SQLALCHEMY_TRACK_MODIFICATIONS = True #SQLALCHEMY_DATABASE_URI = 'sqlite:///' + str(path) + '/App/Database/mydb_dev.sqlite'
31.625
89
0.754941
31
253
5.870968
0.645161
0.153846
0
0
0
0
0
0
0
0
0
0
0.142292
253
7
90
36.142857
0.83871
0.332016
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.4
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
4c22c168dafd8c1c65ecdfcd33cf56b004bae2c1
292
py
Python
tools/cli/ucube.py
santayqs/AliOS-Things
34a29f2e23a90d5a55553c666702976d19e0a914
[ "Apache-2.0" ]
30
2018-05-21T18:58:03.000Z
2020-11-30T03:44:10.000Z
tools/cli/ucube.py
santayqs/AliOS-Things
34a29f2e23a90d5a55553c666702976d19e0a914
[ "Apache-2.0" ]
1
2021-02-24T05:00:16.000Z
2021-02-24T05:00:16.000Z
tools/cli/ucube.py
santayqs/AliOS-Things
34a29f2e23a90d5a55553c666702976d19e0a914
[ "Apache-2.0" ]
16
2018-05-15T08:11:12.000Z
2022-03-20T05:23:15.000Z
src = Split(''' cli.c dumpsys.c ''') component = aos_component('cli', src) component.add_component_dependencis('kernel/hal') component.add_global_macro('HAVE_NOT_ADVANCED_FORMATE') component.add_global_macro('CONFIG_AOS_CLI') component.add_global_includes('include')
20.857143
55
0.736301
37
292
5.432432
0.540541
0.238806
0.268657
0.228856
0
0
0
0
0
0
0
0
0.133562
292
13
56
22.461538
0.794466
0
0
0
0
0
0.318493
0.085616
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
4c38f18cbf57915f4b22d2424ac096f70332456a
11,393
py
Python
release/stubs.min/System/IO/__init___parts/Stream.py
tranconbv/ironpython-stubs
a601759e6c6819beff8e6b639d18a24b7e351851
[ "MIT" ]
null
null
null
release/stubs.min/System/IO/__init___parts/Stream.py
tranconbv/ironpython-stubs
a601759e6c6819beff8e6b639d18a24b7e351851
[ "MIT" ]
null
null
null
release/stubs.min/System/IO/__init___parts/Stream.py
tranconbv/ironpython-stubs
a601759e6c6819beff8e6b639d18a24b7e351851
[ "MIT" ]
null
null
null
class Stream(MarshalByRefObject): """ Provides a generic view of a sequence of bytes. """ def ZZZ(self): """hardcoded/mock instance of the class""" return Stream() instance=ZZZ() """hardcoded/returns an instance of the class""" def BeginRead(self,buffer,offset,count,callback,state): """ BeginRead(self: Stream,buffer: Array[Byte],offset: int,count: int,callback: AsyncCallback,state: object) -> IAsyncResult Begins an asynchronous read operation. buffer: The buffer to read the data into. offset: The byte offset in buffer at which to begin writing data read from the stream. count: The maximum number of bytes to read. callback: An optional asynchronous callback,to be called when the read is complete. state: A user-provided object that distinguishes this particular asynchronous read request from other requests. Returns: An System.IAsyncResult that represents the asynchronous read,which could still be pending. """ pass def BeginWrite(self,buffer,offset,count,callback,state): """ BeginWrite(self: Stream,buffer: Array[Byte],offset: int,count: int,callback: AsyncCallback,state: object) -> IAsyncResult Begins an asynchronous write operation. buffer: The buffer to write data from. offset: The byte offset in buffer from which to begin writing. count: The maximum number of bytes to write. callback: An optional asynchronous callback,to be called when the write is complete. state: A user-provided object that distinguishes this particular asynchronous write request from other requests. Returns: An IAsyncResult that represents the asynchronous write,which could still be pending. """ pass def Close(self): """ Close(self: Stream) Closes the current stream and releases any resources (such as sockets and file handles) associated with the current stream. """ pass def CopyTo(self,destination,bufferSize=None): """ CopyTo(self: Stream,destination: Stream) Reads the bytes from the current stream and writes them to the destination stream. destination: The stream that will contain the contents of the current stream. CopyTo(self: Stream,destination: Stream,bufferSize: int) Reads all the bytes from the current stream and writes them to a destination stream,using a specified buffer size. destination: The stream that will contain the contents of the current stream. bufferSize: The size of the buffer. This value must be greater than zero. The default size is 4096. """ pass def CopyToAsync(self,destination,bufferSize=None,cancellationToken=None): """ CopyToAsync(self: Stream,destination: Stream) -> Task CopyToAsync(self: Stream,destination: Stream,bufferSize: int) -> Task CopyToAsync(self: Stream,destination: Stream,bufferSize: int,cancellationToken: CancellationToken) -> Task """ pass def CreateWaitHandle(self,*args): """ CreateWaitHandle(self: Stream) -> WaitHandle Allocates a System.Threading.WaitHandle object. Returns: A reference to the allocated WaitHandle. """ pass def Dispose(self): """ Dispose(self: Stream) Releases all resources used by the System.IO.Stream. """ pass def EndRead(self,asyncResult): """ EndRead(self: Stream,asyncResult: IAsyncResult) -> int Waits for the pending asynchronous read to complete. asyncResult: The reference to the pending asynchronous request to finish. Returns: The number of bytes read from the stream,between zero (0) and the number of bytes you requested. Streams return zero (0) only at the end of the stream,otherwise,they should block until at least one byte is available. """ pass def EndWrite(self,asyncResult): """ EndWrite(self: Stream,asyncResult: IAsyncResult) Ends an asynchronous write operation. asyncResult: A reference to the outstanding asynchronous I/O request. """ pass def Flush(self): """ Flush(self: Stream) When overridden in a derived class,clears all buffers for this stream and causes any buffered data to be written to the underlying device. """ pass def FlushAsync(self,cancellationToken=None): """ FlushAsync(self: Stream) -> Task FlushAsync(self: Stream,cancellationToken: CancellationToken) -> Task """ pass def MemberwiseClone(self,*args): """ MemberwiseClone(self: MarshalByRefObject,cloneIdentity: bool) -> MarshalByRefObject Creates a shallow copy of the current System.MarshalByRefObject object. cloneIdentity: false to delete the current System.MarshalByRefObject object's identity,which will cause the object to be assigned a new identity when it is marshaled across a remoting boundary. A value of false is usually appropriate. true to copy the current System.MarshalByRefObject object's identity to its clone,which will cause remoting client calls to be routed to the remote server object. Returns: A shallow copy of the current System.MarshalByRefObject object. MemberwiseClone(self: object) -> object Creates a shallow copy of the current System.Object. Returns: A shallow copy of the current System.Object. """ pass def ObjectInvariant(self,*args): """ ObjectInvariant(self: Stream) Provides support for a System.Diagnostics.Contracts.Contract. """ pass def Read(self,buffer,offset,count): """ Read(self: Stream,offset: int,count: int) -> (int,Array[Byte]) When overridden in a derived class,reads a sequence of bytes from the current stream and advances the position within the stream by the number of bytes read. offset: The zero-based byte offset in buffer at which to begin storing the data read from the current stream. count: The maximum number of bytes to be read from the current stream. Returns: The total number of bytes read into the buffer. This can be less than the number of bytes requested if that many bytes are not currently available,or zero (0) if the end of the stream has been reached. """ pass def ReadAsync(self,buffer,offset,count,cancellationToken=None): """ ReadAsync(self: Stream,buffer: Array[Byte],offset: int,count: int) -> Task[int] ReadAsync(self: Stream,buffer: Array[Byte],offset: int,count: int,cancellationToken: CancellationToken) -> Task[int] """ pass def ReadByte(self): """ ReadByte(self: Stream) -> int Reads a byte from the stream and advances the position within the stream by one byte,or returns -1 if at the end of the stream. Returns: The unsigned byte cast to an Int32,or -1 if at the end of the stream. """ pass def Seek(self,offset,origin): """ Seek(self: Stream,offset: Int64,origin: SeekOrigin) -> Int64 When overridden in a derived class,sets the position within the current stream. offset: A byte offset relative to the origin parameter. origin: A value of type System.IO.SeekOrigin indicating the reference point used to obtain the new position. Returns: The new position within the current stream. """ pass def SetLength(self,value): """ SetLength(self: Stream,value: Int64) When overridden in a derived class,sets the length of the current stream. value: The desired length of the current stream in bytes. """ pass @staticmethod def Synchronized(stream): """ Synchronized(stream: Stream) -> Stream Creates a thread-safe (synchronized) wrapper around the specified System.IO.Stream object. stream: The System.IO.Stream object to synchronize. Returns: A thread-safe System.IO.Stream object. """ pass def Write(self,buffer,offset,count): """ Write(self: Stream,buffer: Array[Byte],offset: int,count: int) When overridden in a derived class,writes a sequence of bytes to the current stream and advances the current position within this stream by the number of bytes written. buffer: An array of bytes. This method copies count bytes from buffer to the current stream. offset: The zero-based byte offset in buffer at which to begin copying bytes to the current stream. count: The number of bytes to be written to the current stream. """ pass def WriteAsync(self,buffer,offset,count,cancellationToken=None): """ WriteAsync(self: Stream,buffer: Array[Byte],offset: int,count: int) -> Task WriteAsync(self: Stream,buffer: Array[Byte],offset: int,count: int,cancellationToken: CancellationToken) -> Task """ pass def WriteByte(self,value): """ WriteByte(self: Stream,value: Byte) Writes a byte to the current position in the stream and advances the position within the stream by one byte. value: The byte to write to the stream. """ pass def __enter__(self,*args): """ __enter__(self: IDisposable) -> object """ pass def __exit__(self,*args): """ __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """ pass def __init__(self,*args): """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __reduce_ex__(self,*args): pass CanRead=property(lambda self: object(),lambda self,v: None,lambda self: None) """When overridden in a derived class,gets a value indicating whether the current stream supports reading. Get: CanRead(self: Stream) -> bool """ CanSeek=property(lambda self: object(),lambda self,v: None,lambda self: None) """When overridden in a derived class,gets a value indicating whether the current stream supports seeking. Get: CanSeek(self: Stream) -> bool """ CanTimeout=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets a value that determines whether the current stream can time out. Get: CanTimeout(self: Stream) -> bool """ CanWrite=property(lambda self: object(),lambda self,v: None,lambda self: None) """When overridden in a derived class,gets a value indicating whether the current stream supports writing. Get: CanWrite(self: Stream) -> bool """ Length=property(lambda self: object(),lambda self,v: None,lambda self: None) """When overridden in a derived class,gets the length in bytes of the stream. Get: Length(self: Stream) -> Int64 """ Position=property(lambda self: object(),lambda self,v: None,lambda self: None) """When overridden in a derived class,gets or sets the position within the current stream. Get: Position(self: Stream) -> Int64 Set: Position(self: Stream)=value """ ReadTimeout=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets a value,in miliseconds,that determines how long the stream will attempt to read before timing out. Get: ReadTimeout(self: Stream) -> int Set: ReadTimeout(self: Stream)=value """ WriteTimeout=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets a value,in miliseconds,that determines how long the stream will attempt to write before timing out. Get: WriteTimeout(self: Stream) -> int Set: WriteTimeout(self: Stream)=value """ Null=None
40.257951
215
0.713772
1,545
11,393
5.222006
0.179288
0.04586
0.043629
0.021071
0.517105
0.448686
0.381631
0.349777
0.311601
0.270327
0
0.002306
0.20065
11,393
282
216
40.400709
0.883606
0.643378
0
0.393939
0
0
0
0
0
0
0
0
0
1
0.409091
false
0.393939
0
0
0.590909
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
3
4c3b2546977c8f79edf843450071d90edfcf68d3
144
py
Python
home/validators.py
jdlovins/knights381
c6a52dbcb031c6a6ac2b5b843a259687920b889b
[ "MIT" ]
null
null
null
home/validators.py
jdlovins/knights381
c6a52dbcb031c6a6ac2b5b843a259687920b889b
[ "MIT" ]
null
null
null
home/validators.py
jdlovins/knights381
c6a52dbcb031c6a6ac2b5b843a259687920b889b
[ "MIT" ]
null
null
null
from django.core.validators import RegexValidator alphanumeric = RegexValidator(r'^[0-9a-zA-Z]*$', 'Only alphanumeric characters are allowed.')
48
93
0.784722
18
144
6.277778
0.888889
0
0
0
0
0
0
0
0
0
0
0.015152
0.083333
144
3
93
48
0.840909
0
0
0
0
0
0.37931
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
4c49f6da0d182cc97f5fe6b21d77c8f8330d3c3d
432
py
Python
projects/DensePose/densepose/modeling/__init__.py
mmabrouk/detectron2
158e395acdb8ca6ed6d488b43475f9ef9d200405
[ "Apache-2.0" ]
21,274
2019-10-10T17:50:46.000Z
2022-03-31T17:58:45.000Z
projects/DensePose/densepose/modeling/__init__.py
mmabrouk/detectron2
158e395acdb8ca6ed6d488b43475f9ef9d200405
[ "Apache-2.0" ]
3,253
2019-10-10T20:39:47.000Z
2022-03-31T22:27:53.000Z
projects/DensePose/densepose/modeling/__init__.py
mmabrouk/detectron2
158e395acdb8ca6ed6d488b43475f9ef9d200405
[ "Apache-2.0" ]
6,288
2019-10-10T18:00:27.000Z
2022-03-31T21:22:58.000Z
# Copyright (c) Facebook, Inc. and its affiliates. from .confidence import DensePoseConfidenceModelConfig, DensePoseUVConfidenceType from .filter import DensePoseDataFilter from .inference import densepose_inference from .utils import initialize_module_params from .build import ( build_densepose_data_filter, build_densepose_embedder, build_densepose_head, build_densepose_losses, build_densepose_predictor, )
30.857143
81
0.826389
46
432
7.456522
0.565217
0.204082
0
0
0
0
0
0
0
0
0
0
0.12963
432
13
82
33.230769
0.912234
0.111111
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.454545
0
0.454545
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
4c520a88f0f58d147c02a69dbc81c85c69fde75f
736
py
Python
home/kwatters/harry/gestures/phonehome.py
rv8flyboy/pyrobotlab
4e04fb751614a5cb6044ea15dcfcf885db8be65a
[ "Apache-2.0" ]
63
2015-02-03T18:49:43.000Z
2022-03-29T03:52:24.000Z
home/kwatters/harry/gestures/phonehome.py
hirwaHenryChristian/pyrobotlab
2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9
[ "Apache-2.0" ]
16
2016-01-26T19:13:29.000Z
2018-11-25T21:20:51.000Z
home/kwatters/harry/gestures/phonehome.py
hirwaHenryChristian/pyrobotlab
2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9
[ "Apache-2.0" ]
151
2015-01-03T18:55:54.000Z
2022-03-04T07:04:23.000Z
def phonehome(): relax() sleep(1) i01.setHeadSpeed(1.0,1.0,1.0,1.0,1.0) i01.setArmSpeed("left",1.0,1.0,1.0,1.0) i01.setArmSpeed("right",1.0,1.0,1.0,1.0) i01.setHandSpeed("left",1.0,1.0,1.0,1.0,1.0,1.0) i01.setHandSpeed("right",1.0,1.0,1.0,1.0,1.0,1.0) i01.setTorsoSpeed(1.0,1.0,1.0) i01.moveHead(160,68) i01.moveArm("left",5,86,30,20) i01.moveArm("right",86,140,83,80) i01.moveHand("left",99,140,173,167,130,26) i01.moveHand("right",135,6,170,145,168,180) i01.moveTorso(25,80,90) sleep(2) #i01.mouth.speakBlocking("E,T phone the big home of the inmoov nation") AudioPlayer.playFile(RuningFolder+'/system/sounds/E,T phone the big home of the inmoov nation.mp3') sleep(2) rest() sleep(1) relax()
32
101
0.665761
155
736
3.16129
0.374194
0.114286
0.134694
0.179592
0.420408
0.420408
0.420408
0.402041
0.402041
0.365306
0
0.222901
0.110054
736
22
102
33.454545
0.525191
0.095109
0
0.285714
0
0
0.147368
0
0
0
0
0
0
1
0.047619
true
0
0
0
0.047619
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
4c533f621dcbe11020155d06d8dce3ba3c07b73e
788
py
Python
script.py
NickxFury/Unlimited-Filter-Bot
7a06854a99b285dfde1efcbd54a9811105dcd6a8
[ "MIT" ]
1
2021-03-27T16:01:29.000Z
2021-03-27T16:01:29.000Z
script.py
NickxFury/Unlimited-Filter-Bot
7a06854a99b285dfde1efcbd54a9811105dcd6a8
[ "MIT" ]
null
null
null
script.py
NickxFury/Unlimited-Filter-Bot
7a06854a99b285dfde1efcbd54a9811105dcd6a8
[ "MIT" ]
4
2021-04-06T06:39:29.000Z
2021-09-23T07:36:38.000Z
class Script(object): START_MSG = """<b>Hy {},എന്റെ പേര് Carla ◢ ◤! 🤭 എന്നെ നിർമിച്ചിരിക്കുന്നത് മൂവി ക്ലബ്‌ ഗ്രൂപ്പിലേക്ക് ആണ്. എന്തായാലും സ്റ്റാർട്ട് അടിച്ചതല്ലെ ഇനി ആ താഴെ കാണുന്ന നമ്മുടെ ഒഫീഷ്യൽ ചന്നെൽ കൂടി Subscribe ചെയ്തിട്ട് പൊക്കോ...🤣🤣</b> """ HELP_MSG = """ <b>നീ ഏതാ..... ഒന്ന് പോടെയ് അവൻ help ചോയ്ച്ച് വന്നിരിക്കുന്നു😤...I'm Different Bot U Know. </b> """ ABOUT_MSG = """<b>Bot Info</b> ⭕️<b>My Name : Carla ◢ ◤</b> ⭕️<b>Creater :</b> @NickxFury ⭕️<b>Language :</b> <code>Python3</code> ⭕️<b>Library :</b> <a href='https://docs.pyrogram.org/'>Pyrogram 1.0.7</a> <b>Note :</b> <code>നിങ്ങൾക്ക് എന്നെ വേറെ ഒരിടത്തും ഉപയോഗിക്കാൻ പറ്റൂല വേണേൽ നിങ്ങൾക്ക് Start,Help,About ഒക്കെ അടിച്ച് കളിക്കാം അത്ര മാത്രം..ഇത്രേ എനിക്ക് ചെയ്ത് തരാൻ പറ്റൂ🤭.</code> """
28.142857
181
0.508883
338
788
1.615385
0.310651
0.043956
0.038462
0.03663
0.10989
0.069597
0.069597
0.069597
0.032967
0
0
0.005988
0.152284
788
27
182
29.185185
0.58982
0
0
0.2
0
0.333333
0.879442
0.027919
0
0
0
0
0
1
0
false
0
0
0
0.266667
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
4c5c12fd80785f4eaa0b333b1dc9212f38fe8fd4
481
py
Python
src/scrapper/cbsnews_scrapper.py
pkupidura/reddit-corpus-builder
d47fdff1bef5ab0ba6f9e24118c2f7b8ae40a6df
[ "MIT" ]
1
2020-08-25T12:40:35.000Z
2020-08-25T12:40:35.000Z
src/scrapper/cbsnews_scrapper.py
pkupidura/reddit-corpus-builder
d47fdff1bef5ab0ba6f9e24118c2f7b8ae40a6df
[ "MIT" ]
null
null
null
src/scrapper/cbsnews_scrapper.py
pkupidura/reddit-corpus-builder
d47fdff1bef5ab0ba6f9e24118c2f7b8ae40a6df
[ "MIT" ]
null
null
null
from bs4 import BeautifulSoup from scrapper.article_scrapper import ArticleScrapper class CbsnewsScrapper(ArticleScrapper): @staticmethod def domain(): return "cbsnews.com" def scrap_header(self, dom: BeautifulSoup) -> str: return self.find_child_h1(dom, attrs={"class": "content__title"}).text def scrap_content(self, dom: BeautifulSoup) -> str: return self.get_paragraphs_text(self.find_section(dom, attrs={"class": "content__body"}))
32.066667
97
0.725572
57
481
5.912281
0.54386
0.047478
0.118694
0.136499
0.195846
0.195846
0
0
0
0
0
0.004975
0.164241
481
14
98
34.357143
0.833333
0
0
0
0
0
0.099792
0
0
0
0
0
0
1
0.3
false
0
0.2
0.3
0.9
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
4c699aa6115fb2a55438d725083106f4fcd31d1e
466
py
Python
casepro/profiles/migrations/0009_generate_api_tokens.py
lucianomc/casepro
04e37a5ac816b6c7207dc3eed7bd616257efe11f
[ "BSD-3-Clause" ]
null
null
null
casepro/profiles/migrations/0009_generate_api_tokens.py
lucianomc/casepro
04e37a5ac816b6c7207dc3eed7bd616257efe11f
[ "BSD-3-Clause" ]
165
2015-11-13T13:50:11.000Z
2018-10-24T07:00:38.000Z
casepro/profiles/migrations/0009_generate_api_tokens.py
lucianomc/casepro
04e37a5ac816b6c7207dc3eed7bd616257efe11f
[ "BSD-3-Clause" ]
5
2020-01-16T00:55:23.000Z
2020-02-06T17:21:12.000Z
# Generated by Django 2.0.8 on 2018-08-21 18:33 from django.db import migrations from django.contrib.auth.models import User from rest_framework.authtoken.models import Token def generate_tokens(apps, scheme_editor): for user in User.objects.all(): Token.objects.get_or_create(user=user) class Migration(migrations.Migration): dependencies = [("profiles", "0008_profile_must_use_faq")] operations = [migrations.RunPython(generate_tokens)]
25.888889
62
0.763948
66
466
5.242424
0.727273
0.057803
0
0
0
0
0
0
0
0
0
0.047264
0.137339
466
17
63
27.411765
0.813433
0.096567
0
0
1
0
0.078759
0.059666
0
0
0
0
0
1
0.111111
false
0
0.333333
0
0.777778
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
4c7164a14b7a7ffd5176d511831b080dab388387
169
py
Python
CursoEmVideo/ex003.py
ElivanLimaJunior/Python
57c277f3ec0da06d6c8aa125b50d01a5ab88934d
[ "MIT" ]
null
null
null
CursoEmVideo/ex003.py
ElivanLimaJunior/Python
57c277f3ec0da06d6c8aa125b50d01a5ab88934d
[ "MIT" ]
null
null
null
CursoEmVideo/ex003.py
ElivanLimaJunior/Python
57c277f3ec0da06d6c8aa125b50d01a5ab88934d
[ "MIT" ]
null
null
null
n1 = int(input('Digite um Valor: ')) n2 = int(input('Digite outro valor: ')) n = n1 + n2 print('A soma entre {} e {} resulta em: {} !'.format(n1,n2,n)) print(type(n1))
24.142857
62
0.597633
29
169
3.482759
0.62069
0.158416
0.277228
0
0
0
0
0
0
0
0
0.05
0.171598
169
6
63
28.166667
0.671429
0
0
0
0
0
0.440476
0
0
0
0
0
0
1
0
false
0
0
0
0
0.4
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
4c794c213c5467db7e88d1b0dbe4be59571118bc
1,313
py
Python
NO-LOGIN.py
CLB-09/NO-LOGIN
c2d2d34cecd1fc1e012e43630b5e2cdab8b8c6a8
[ "Apache-2.0" ]
null
null
null
NO-LOGIN.py
CLB-09/NO-LOGIN
c2d2d34cecd1fc1e012e43630b5e2cdab8b8c6a8
[ "Apache-2.0" ]
null
null
null
NO-LOGIN.py
CLB-09/NO-LOGIN
c2d2d34cecd1fc1e012e43630b5e2cdab8b8c6a8
[ "Apache-2.0" ]
null
null
null
\033[1;91m──────────────────────────────────────────────── \033[1;91m─██████████████─██████─────────██████████████─── \033[1;91m─██▒▒▒▒▒▒▒▒▒▒██─██▒▒██─────────██▒▒▒▒▒▒▒▒▒▒██─── \033[1;91m─██▒▒██████████─██▒▒██─────────██▒▒██████▒▒██─── \033[1;91m─██▒▒██─────────██▒▒██─────────██▒▒██──██▒▒██─── \033[1;91m─██▒▒██─────────██▒▒██─────────██▒▒██████▒▒████─ \033[1;91m─██▒▒██─────────██▒▒██─────────██▒▒▒▒▒▒▒▒▒▒▒▒██─ \033[1;91m─██▒▒██─────────██▒▒██─────────██▒▒████████▒▒██─ \033[1;91m─██▒▒██─────────██▒▒██─────────██▒▒██────██▒▒██─ \033[1;91m─██▒▒██████████─██▒▒██████████─██▒▒████████▒▒██─ \033[1;91m─██▒▒▒▒▒▒▒▒▒▒██─██▒▒▒▒▒▒▒▒▒▒██─██▒▒▒▒▒▒▒▒▒▒▒▒██─ \033[1;91m─██████████████─██████████████─████████████████─ \033[1;91m──────────────────────────────────────────────── \033[1;91m─────────────────────────────── \033[1;91m─██████████████─██████████████─ \033[1;91m─██▒▒▒▒▒▒▒▒▒▒██─██▒▒▒▒▒▒▒▒▒▒██─ \033[1;91m─██▒▒██████▒▒██─██▒▒██████▒▒██─ \033[1;91m─██▒▒██──██▒▒██─██▒▒██──██▒▒██─ \033[1;91m─██▒▒██──██▒▒██─██▒▒██████▒▒██─ \033[1;91m─██▒▒██──██▒▒██─██▒▒▒▒▒▒▒▒▒▒██─ \033[1;91m─██▒▒██──██▒▒██─██████████▒▒██─ \033[1;91m─██▒▒██──██▒▒██─────────██▒▒██─ \033[1;91m─██▒▒██████▒▒██─██████████▒▒██─ \033[1;91m─██▒▒▒▒▒▒▒▒▒▒██─██▒▒▒▒▒▒▒▒▒▒██─ \033[1;91m─██████████████─██████████████─ \033[1;91m───────────────────────────────
48.62963
58
0.138614
104
1,313
11.625
0.240385
0.086022
0.150538
0.090984
0.239868
0.239868
0.239868
0
0
0
0
0.121212
0.019802
1,313
26
59
50.5
0.020202
0
0
0.307692
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
d5bdc8662b74e93d4537ef87964eb33ced8a8711
366
py
Python
python/en/archive/dropbox/ec2-oregon/lid_core.py
aimldl/coding
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
[ "MIT" ]
null
null
null
python/en/archive/dropbox/ec2-oregon/lid_core.py
aimldl/coding
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
[ "MIT" ]
null
null
null
python/en/archive/dropbox/ec2-oregon/lid_core.py
aimldl/coding
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 """ lid_core.py """ # Custom package from utils import makedirs_if_absent from utils import save2wav def identify_language( device_id_, sampling_rate_, payload_ ): file = save2wav( device_id, SAMPLING_RATE, payload ) print('hello') # preprocess( payload, SAMPLING_RATE, WINDOW_LENGTH, WINDOW_STEP, NUM_FEATURES )
24.4
84
0.721311
46
366
5.413043
0.717391
0.144578
0.120482
0.160643
0.216867
0
0
0
0
0
0
0.010067
0.185792
366
15
84
24.4
0.825503
0.346995
0
0
0
0
0.023256
0
0
0
0
0
0
1
0.2
false
0
0.4
0
0.6
0.2
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
d5c6592a0008b7816bf8a04d8a5bcc9151b99344
595
py
Python
PDaS/BitVector.py
Seenivasanseeni/PyDas
cdae21c6b542254a921a692902d7c3920bd87b6a
[ "MIT" ]
null
null
null
PDaS/BitVector.py
Seenivasanseeni/PyDas
cdae21c6b542254a921a692902d7c3920bd87b6a
[ "MIT" ]
1
2017-08-02T15:24:51.000Z
2017-08-02T15:41:47.000Z
PDaS/BitVector.py
Seenivasanseeni/PyDas
cdae21c6b542254a921a692902d7c3920bd87b6a
[ "MIT" ]
1
2017-08-07T13:01:59.000Z
2017-08-07T13:01:59.000Z
class BitVector(object): """docstring for BitVector""" """infinite array of bits is present in bitvector""" def __init__(self): self.BitNum=0 self.length=0 def set(self,i): self.BitNum=self.BitNum | 1 << i self.length=self.BitNum.bit_length() def reset(self,i): resetValue=1<<i self.BitNum=self.BitNum - resetValue self.length=self.BitNum.bit_length() def at(self,i): if(i<0): raise ValueError if(i >=self.length): return 0 return int(bin(self.BitNum)[-(i+1)]) def __repr__(self): return bin(self.BitNum)[2:] def __str__(self): return bin(self.BitNum)[2:]
24.791667
53
0.684034
95
595
4.136842
0.357895
0.254453
0.099237
0.076336
0.391858
0.284987
0.16285
0
0
0
0
0.017822
0.151261
595
23
54
25.869565
0.760396
0.038655
0
0.190476
0
0
0
0
0
0
0
0
0
1
0.285714
false
0
0
0.095238
0.52381
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
3
d5d2319e0a3a67bdff4f049aa93e48fd59983b2c
214
py
Python
tests/unit/gameLogic/test_gameState.py
xmadsen/mafia-slack
3ec384173e538a99c8b419be09e0e459bd84df64
[ "MIT" ]
null
null
null
tests/unit/gameLogic/test_gameState.py
xmadsen/mafia-slack
3ec384173e538a99c8b419be09e0e459bd84df64
[ "MIT" ]
null
null
null
tests/unit/gameLogic/test_gameState.py
xmadsen/mafia-slack
3ec384173e538a99c8b419be09e0e459bd84df64
[ "MIT" ]
null
null
null
import pytest from models.gameState import Game, States def test_defaultGameState(): systemUnderTest = Game() assert systemUnderTest.state == States.MARSHALLING assert len(systemUnderTest.players) == 0
30.571429
54
0.771028
23
214
7.130435
0.73913
0
0
0
0
0
0
0
0
0
0
0.005495
0.149533
214
7
55
30.571429
0.895604
0
0
0
0
0
0
0
0
0
0
0
0.333333
1
0.166667
false
0
0.333333
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
d5eb4d0bf7e595d9c5c3f979d8eedddd957d004c
128
py
Python
venv/Lib/site-packages/branca/plugins/__init__.py
star10919/drf
77c005794087484d72ffc0d76612a6ac9845821e
[ "BSD-3-Clause" ]
82
2016-03-02T17:18:33.000Z
2022-02-23T23:01:08.000Z
venv/Lib/site-packages/branca/plugins/__init__.py
star10919/drf
77c005794087484d72ffc0d76612a6ac9845821e
[ "BSD-3-Clause" ]
74
2016-02-13T11:59:38.000Z
2022-03-01T23:21:30.000Z
venv/Lib/site-packages/branca/plugins/__init__.py
star10919/drf
77c005794087484d72ffc0d76612a6ac9845821e
[ "BSD-3-Clause" ]
68
2016-02-13T11:55:50.000Z
2021-11-10T21:46:35.000Z
# -*- coding: utf-8 -*- """ Branca plugins -------------- Add different objects/effects in a branca webpage. """ __all__ = []
12.8
50
0.5625
14
128
4.857143
0.928571
0
0
0
0
0
0
0
0
0
0
0.009434
0.171875
128
9
51
14.222222
0.632075
0.8125
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
d5f2174befa76cf1a86c0680936b0415f4aeaa10
132
py
Python
slrp/__init__.py
thomasmatecki/parsley
0c51e9c37759fbc1c723519619952248c83e4642
[ "MIT" ]
null
null
null
slrp/__init__.py
thomasmatecki/parsley
0c51e9c37759fbc1c723519619952248c83e4642
[ "MIT" ]
2
2020-03-24T18:30:15.000Z
2020-03-31T10:57:37.000Z
slrp/__init__.py
thomasmatecki/parsley
0c51e9c37759fbc1c723519619952248c83e4642
[ "MIT" ]
null
null
null
from slrp.combos import Lazy from slrp.expressions import RegExpr, StringExpr # Abbreviations L = Lazy RE = RegExpr S = StringExpr
16.5
48
0.787879
18
132
5.777778
0.666667
0.153846
0
0
0
0
0
0
0
0
0
0
0.159091
132
7
49
18.857143
0.936937
0.098485
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
d5fc7b20dd299417dff95c85abe7af16b66eed22
143
py
Python
medium/pro-divisible/Divisible.py
Adi142857/sololearn-challenges
67437d9c202ce6d470042bbe87f20da9fd4a077c
[ "MIT" ]
83
2020-01-07T23:02:52.000Z
2022-03-19T06:53:56.000Z
medium/pro-divisible/Divisible.py
Adi142857/sololearn-challenges
67437d9c202ce6d470042bbe87f20da9fd4a077c
[ "MIT" ]
21
2020-01-23T14:26:13.000Z
2022-03-20T06:30:45.000Z
medium/pro-divisible/Divisible.py
Adi142857/sololearn-challenges
67437d9c202ce6d470042bbe87f20da9fd4a077c
[ "MIT" ]
53
2020-02-10T13:40:33.000Z
2022-03-13T13:07:33.000Z
number = int(input()) if any(number % int(i) for i in input().split()): print('not divisible by all') else: print('divisible by all')
20.428571
49
0.629371
23
143
3.913043
0.652174
0.2
0.311111
0
0
0
0
0
0
0
0
0
0.195804
143
6
50
23.833333
0.782609
0
0
0
0
0
0.251748
0
0
0
0
0
0
1
0
false
0
0
0
0
0.4
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
d5febc6cb73b455db3864791ab286819438f583f
160
py
Python
scripts/Python/meanshift.py
marionomics/clase-analisis-multivariado
80324abfeb9e3e6dd15ccfc7a13f949284c9b3e1
[ "MIT" ]
null
null
null
scripts/Python/meanshift.py
marionomics/clase-analisis-multivariado
80324abfeb9e3e6dd15ccfc7a13f949284c9b3e1
[ "MIT" ]
null
null
null
scripts/Python/meanshift.py
marionomics/clase-analisis-multivariado
80324abfeb9e3e6dd15ccfc7a13f949284c9b3e1
[ "MIT" ]
2
2021-08-21T17:17:34.000Z
2021-09-04T16:04:28.000Z
import pandas as pd from sklearn.cluster import MeanShift if __name__ == "__main__": dataset = pd.read_csv("./data/candy.csv") print(dataset.head(5))
20
45
0.70625
23
160
4.521739
0.826087
0
0
0
0
0
0
0
0
0
0
0.007463
0.1625
160
7
46
22.857143
0.768657
0
0
0
0
0
0.15
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0.2
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
910443552e910211725070178068c613186364f6
1,494
py
Python
build/scripts/build_java_with_error_prone.py
jochenater/catboost
de2786fbc633b0d6ea6a23b3862496c6151b95c2
[ "Apache-2.0" ]
6,989
2017-07-18T06:23:18.000Z
2022-03-31T15:58:36.000Z
build/scripts/build_java_with_error_prone.py
jochenater/catboost
de2786fbc633b0d6ea6a23b3862496c6151b95c2
[ "Apache-2.0" ]
1,978
2017-07-18T09:17:58.000Z
2022-03-31T14:28:43.000Z
build/scripts/build_java_with_error_prone.py
jochenater/catboost
de2786fbc633b0d6ea6a23b3862496c6151b95c2
[ "Apache-2.0" ]
1,228
2017-07-18T09:03:13.000Z
2022-03-29T05:57:40.000Z
import sys import os ERROR_PRONE_FLAGS = [ '-Xep:FunctionalInterfaceMethodChanged:WARN', '-Xep:ReturnValueIgnored:WARN', ] JAVA10_EXPORTS = [ '--add-exports=jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED', '--add-exports=jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED', '--add-exports=jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED', '--add-exports=jdk.compiler/com.sun.tools.javac.main=ALL-UNNAMED', '--add-exports=jdk.compiler/com.sun.tools.javac.code=ALL-UNNAMED', '--add-exports=jdk.compiler/com.sun.tools.javac.processing=ALL-UNNAMED', '--add-exports=jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED', '--add-exports=jdk.compiler/com.sun.tools.javac.comp=ALL-UNNAMED' ] def just_do_it(argv): java, error_prone_tool, javac_cmd = argv[0], argv[1], argv[2:] if java.endswith('javac') or java.endswith('javac.exe'): for f in javac_cmd: if f.startswith('-Xep'): ERROR_PRONE_FLAGS.append(f) for f in ERROR_PRONE_FLAGS: if f in javac_cmd: javac_cmd.remove(f) os.execv(java, [java] + JAVA10_EXPORTS + ['-processorpath', error_prone_tool, '-XDcompilePolicy=byfile'] + [(' '.join(['-Xplugin:ErrorProne'] + ERROR_PRONE_FLAGS))] + javac_cmd) else: os.execv(java, [java, '-Xbootclasspath/p:' + error_prone_tool, 'com.google.errorprone.ErrorProneCompiler'] + ERROR_PRONE_FLAGS + javac_cmd) if __name__ == '__main__': just_do_it(sys.argv[1:])
40.378378
185
0.677376
206
1,494
4.737864
0.305825
0.081967
0.106557
0.172131
0.422131
0.375
0.375
0.375
0.33709
0.33709
0
0.006339
0.155288
1,494
36
186
41.5
0.767036
0
0
0
0
0.266667
0.483266
0.431058
0
0
0
0
0
1
0.033333
false
0
0.066667
0
0.1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
9112621b6cbb11b0294209f9b32dd958f3cce13d
186
py
Python
src/ychaos/agents/__init__.py
vanderh0ff/ychaos
5148c889912b744ee73907e4dd30c9ddb851aeb3
[ "Apache-2.0" ]
8
2021-07-21T15:37:48.000Z
2022-03-03T14:43:09.000Z
src/ychaos/agents/__init__.py
vanderh0ff/ychaos
5148c889912b744ee73907e4dd30c9ddb851aeb3
[ "Apache-2.0" ]
102
2021-07-20T16:08:29.000Z
2022-03-25T07:28:37.000Z
src/ychaos/agents/__init__.py
vanderh0ff/ychaos
5148c889912b744ee73907e4dd30c9ddb851aeb3
[ "Apache-2.0" ]
8
2021-07-20T13:37:46.000Z
2022-02-18T01:44:52.000Z
# Copyright 2021, Yahoo # Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms from .index import AgentType __all__ = ["AgentType"]
26.571429
105
0.752688
29
186
4.689655
0.793103
0
0
0
0
0
0
0
0
0
0
0.039735
0.188172
186
6
106
31
0.860927
0.672043
0
0
0
0
0.157895
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
911e335286ee2bf1e6abd64b57af7f315bf7ce8d
315
py
Python
django_ltree_field/migrations/0001_initial.py
john-parton/django-ltree-field
a4378f0eb0d6a4abb2ed459c49b081d7e2a35c4b
[ "BSD-3-Clause" ]
1
2021-11-11T20:03:12.000Z
2021-11-11T20:03:12.000Z
django_ltree_field/migrations/0001_initial.py
john-parton/django-ltree-field
a4378f0eb0d6a4abb2ed459c49b081d7e2a35c4b
[ "BSD-3-Clause" ]
null
null
null
django_ltree_field/migrations/0001_initial.py
john-parton/django-ltree-field
a4378f0eb0d6a4abb2ed459c49b081d7e2a35c4b
[ "BSD-3-Clause" ]
null
null
null
# Generated by Django 3.1.7 on 2021-03-29 01:59 from django.db import migrations import django.contrib.postgres.operations class Migration(migrations.Migration): dependencies = [ ] operations = [ django.contrib.postgres.operations.CreateExtension( name='ltree', ) ]
18.529412
59
0.663492
35
315
5.971429
0.714286
0.124402
0.200957
0.296651
0
0
0
0
0
0
0
0.062762
0.24127
315
16
60
19.6875
0.811715
0.142857
0
0
1
0
0.018657
0
0
0
0
0
0
1
0
false
0
0.2
0
0.5
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
912c6aae8516f5a0f8f5200c221a2287cc7043be
482
py
Python
src/apps/api/urls.py
63phc/lks
2d263cf528d5370a1ff480b323cfc8945765a152
[ "MIT" ]
4
2018-10-31T19:09:50.000Z
2020-09-29T13:16:28.000Z
src/apps/api/urls.py
63phc/lks
2d263cf528d5370a1ff480b323cfc8945765a152
[ "MIT" ]
70
2018-11-02T13:55:32.000Z
2022-03-18T13:06:45.000Z
src/apps/api/urls.py
63phc/lks
2d263cf528d5370a1ff480b323cfc8945765a152
[ "MIT" ]
2
2018-11-05T09:59:26.000Z
2020-08-26T19:37:41.000Z
from django.urls import include, path urlpatterns = [ path("", include("src.apps.blog.urls")), path("", include("src.apps.shop.urls")), path("", include("src.apps.menu.urls")), path("", include("src.apps.slider.urls")), path("", include("src.apps.account.urls")), path("", include("src.apps.subscribe.urls")), path("", include("src.apps.reviews.urls")), path("", include("src.apps.contacts.urls")), path("", include("src.apps.shorter.urls")), ]
34.428571
49
0.614108
61
482
4.852459
0.295082
0.334459
0.425676
0.547297
0.594595
0
0
0
0
0
0
0
0.13278
482
13
50
37.076923
0.708134
0
0
0
0
0
0.377593
0.224066
0
0
0
0
0
1
0
false
0
0.083333
0
0.083333
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
e6b3238370fb49aaf44908c334d3e8571387b7a4
206
py
Python
app.py
haudoux/chatelet
3d3d07fd7d17ef7a7d187b7f5d2b140ed1c54d04
[ "Unlicense" ]
null
null
null
app.py
haudoux/chatelet
3d3d07fd7d17ef7a7d187b7f5d2b140ed1c54d04
[ "Unlicense" ]
null
null
null
app.py
haudoux/chatelet
3d3d07fd7d17ef7a7d187b7f5d2b140ed1c54d04
[ "Unlicense" ]
null
null
null
from flask import Flask from test.chatelet.routes import chatelet from extensions import mysql app = Flask(__name__) mysql.init_app(app) app.register_blueprint(chatelet, url_prefix='/chatelet')
20.6
57
0.776699
28
206
5.464286
0.535714
0.078431
0
0
0
0
0
0
0
0
0
0
0.145631
206
9
58
22.888889
0.869318
0
0
0
0
0
0.045685
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0.166667
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
e6b84d269e210a52307ffdd24e8bfc3a111e10aa
200
py
Python
werobot/__init__.py
ninestep/WeRoBot
0ea659e8bc1c90d95249abb5610b998257c73017
[ "MIT" ]
1
2019-04-20T15:52:46.000Z
2019-04-20T15:52:46.000Z
werobot/__init__.py
ninestep/WeRoBot
0ea659e8bc1c90d95249abb5610b998257c73017
[ "MIT" ]
null
null
null
werobot/__init__.py
ninestep/WeRoBot
0ea659e8bc1c90d95249abb5610b998257c73017
[ "MIT" ]
null
null
null
__version__ = '1.9.0' __author__ = 'whtsky' __license__ = 'MIT' __all__ = ["WeRoBot"] try: from werobot.robot import WeRoBot except ImportError: # pragma: no cover pass # pragma: no cover
18.181818
39
0.685
25
200
4.84
0.8
0.132231
0.214876
0
0
0
0
0
0
0
0
0.01875
0.2
200
10
40
20
0.7375
0.165
0
0
0
0
0.128049
0
0
0
0
0
0
1
0
false
0.125
0.25
0
0.25
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
3
e6da493ff2545ad3154454b0f174c82283d06387
43
py
Python
estruturas_dados/src/exercicio/__init__.py
andersonmarques/programacao_2_ufra
94a22559eed817a429309d8da338431416608c0c
[ "MIT" ]
null
null
null
estruturas_dados/src/exercicio/__init__.py
andersonmarques/programacao_2_ufra
94a22559eed817a429309d8da338431416608c0c
[ "MIT" ]
null
null
null
estruturas_dados/src/exercicio/__init__.py
andersonmarques/programacao_2_ufra
94a22559eed817a429309d8da338431416608c0c
[ "MIT" ]
null
null
null
__all__ = ['controle', 'modelo', 'view_ui']
43
43
0.651163
5
43
4.6
1
0
0
0
0
0
0
0
0
0
0
0
0.093023
43
1
43
43
0.589744
0
0
0
0
0
0.477273
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
e6e261a8bc9ae2148820d2863151dcd865ce294b
399
py
Python
{{cookiecutter.package_name}}/{{cookiecutter.package_name}}/frontend/forms.py
cmeadows/fbone-marrow
0c69bcafbe21c48641cc9759f2a959b9b7881ce3
[ "BSD-3-Clause" ]
null
null
null
{{cookiecutter.package_name}}/{{cookiecutter.package_name}}/frontend/forms.py
cmeadows/fbone-marrow
0c69bcafbe21c48641cc9759f2a959b9b7881ce3
[ "BSD-3-Clause" ]
null
null
null
{{cookiecutter.package_name}}/{{cookiecutter.package_name}}/frontend/forms.py
cmeadows/fbone-marrow
0c69bcafbe21c48641cc9759f2a959b9b7881ce3
[ "BSD-3-Clause" ]
1
2020-04-25T14:01:26.000Z
2020-04-25T14:01:26.000Z
from flask.ext.wtf import Form from wtforms import HiddenField, BooleanField, TextField, PasswordField, SubmitField from wtforms.validators import Required class LoginForm(Form): next = HiddenField() login = TextField(u'Username or email', [Required()]) password = PasswordField('Password', [Required()]) remember = BooleanField('Remember me') submit = SubmitField('Sign in')
30.692308
84
0.736842
43
399
6.837209
0.651163
0.07483
0
0
0
0
0
0
0
0
0
0
0.155388
399
12
85
33.25
0.872404
0
0
0
0
0
0.107769
0
0
0
0
0
0
1
0
false
0.222222
0.333333
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
3
e6eef1cedb8b1b75e655141a891a6c8675e838d6
498
py
Python
tests/workflow/test_workflow_pubmed_article_deposit.py
elifesciences/elife-bot
d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9
[ "MIT" ]
17
2015-02-10T07:10:29.000Z
2021-05-14T22:24:45.000Z
tests/workflow/test_workflow_pubmed_article_deposit.py
elifesciences/elife-bot
d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9
[ "MIT" ]
459
2015-03-31T18:24:23.000Z
2022-03-30T19:44:40.000Z
tests/workflow/test_workflow_pubmed_article_deposit.py
elifesciences/elife-bot
d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9
[ "MIT" ]
9
2015-04-18T16:57:31.000Z
2020-10-30T11:49:13.000Z
import unittest import tests.settings_mock as settings_mock from tests.activity.classes_mock import FakeLogger from workflow.workflow_PubmedArticleDeposit import workflow_PubmedArticleDeposit class TestWorkflowPubmedArticleDeposit(unittest.TestCase): def setUp(self): self.workflow = workflow_PubmedArticleDeposit( settings_mock, FakeLogger(), None, None, None, None ) def test_init(self): self.assertEqual(self.workflow.name, "PubmedArticleDeposit")
33.2
80
0.7751
51
498
7.411765
0.45098
0.095238
0.190476
0
0
0
0
0
0
0
0
0
0.158635
498
14
81
35.571429
0.902148
0
0
0
0
0
0.040161
0
0
0
0
0
0.090909
1
0.181818
false
0
0.363636
0
0.636364
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
e6f851bfb5e78afa6c11a2d2726a533fd42ecbd2
437
py
Python
GenTemplate.py
venunathan12/BrainCooked
34b422e7a3ec04bf355172233545df733645911e
[ "MIT" ]
null
null
null
GenTemplate.py
venunathan12/BrainCooked
34b422e7a3ec04bf355172233545df733645911e
[ "MIT" ]
null
null
null
GenTemplate.py
venunathan12/BrainCooked
34b422e7a3ec04bf355172233545df733645911e
[ "MIT" ]
null
null
null
AddrSize = 8 Out = open("Template.txt", "w") Out.write(">->+\n[>\n" + ">" * AddrSize + "+" + "<" * AddrSize + "\n\n") def Mark(C, L): if C == L: Out.write("\t" * 0 + "[-\n") Out.write("\t" * 0 + "#\n") Out.write("\t" * 0 + "]\n") return Out.write("\t" * 0 + "[>\n") Mark(C+1, L) Out.write("\t" * 0 + "]>\n") Mark(C+1, L) Mark(0,AddrSize) Out.write("+[-<+]-\n>]") Out.close()
19
72
0.398169
64
437
2.71875
0.296875
0.321839
0.258621
0.287356
0.402299
0.402299
0.396552
0.396552
0.396552
0.189655
0
0.028571
0.279176
437
23
73
19
0.52381
0
0
0.125
0
0
0.157534
0
0
0
0
0
0
1
0.0625
false
0
0
0
0.125
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
fc1be88a0b4f47e673968ccb10cfa8b48ec129c5
23
py
Python
python/__init__.py
maestromark55/bust-radio
d3552304e9e0f551359b3a6b72f0f2bc31e863f5
[ "Apache-2.0" ]
null
null
null
python/__init__.py
maestromark55/bust-radio
d3552304e9e0f551359b3a6b72f0f2bc31e863f5
[ "Apache-2.0" ]
null
null
null
python/__init__.py
maestromark55/bust-radio
d3552304e9e0f551359b3a6b72f0f2bc31e863f5
[ "Apache-2.0" ]
null
null
null
__author__ = 'paolob'
7.666667
21
0.695652
2
23
6
1
0
0
0
0
0
0
0
0
0
0
0
0.173913
23
2
22
11.5
0.631579
0
0
0
0
0
0.272727
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
fc34dcabd4f40101cd53c78fb99504b4b215fe4e
260
py
Python
20_unit_testing/lectures/6_setup_method/test_printer.py
gdia/The-Complete-Python-Course
ed375b65242249bc749c3e292a6149f8528b9dcf
[ "MIT" ]
29
2019-09-02T21:15:59.000Z
2022-01-14T02:20:05.000Z
20_unit_testing/lectures/6_setup_method/test_printer.py
gdia/The-Complete-Python-Course
ed375b65242249bc749c3e292a6149f8528b9dcf
[ "MIT" ]
2
2020-08-20T05:48:36.000Z
2021-06-02T03:16:31.000Z
20_unit_testing/lectures/6_setup_method/test_printer.py
gdia/The-Complete-Python-Course
ed375b65242249bc749c3e292a6149f8528b9dcf
[ "MIT" ]
38
2019-10-20T14:29:12.000Z
2022-03-27T19:50:05.000Z
from printer import Printer, PrinterError from unittest import TestCase class TestPrinter(TestCase): def setUp(self): self.printer = Printer(pages_per_s=2.0, capacity=300) def test_print_within_capacity(self): self.printer.print(25)
23.636364
61
0.734615
35
260
5.314286
0.628571
0.086022
0.16129
0
0
0
0
0
0
0
0
0.032864
0.180769
260
10
62
26
0.840376
0
0
0
0
0
0
0
0
0
0
0
0
1
0.285714
false
0
0.285714
0
0.714286
0.571429
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
1
0
3
fc45fe421b6af96cd60660b86cdce572ef731d96
124
py
Python
code/abc036_c_02.py
KoyanagiHitoshi/AtCoder
731892543769b5df15254e1f32b756190378d292
[ "MIT" ]
3
2019-08-16T16:55:48.000Z
2021-04-11T10:21:40.000Z
code/abc036_c_02.py
KoyanagiHitoshi/AtCoder
731892543769b5df15254e1f32b756190378d292
[ "MIT" ]
null
null
null
code/abc036_c_02.py
KoyanagiHitoshi/AtCoder
731892543769b5df15254e1f32b756190378d292
[ "MIT" ]
null
null
null
N=int(input()) A=[int(input()) for i in range(N)] B={a:i for (i,a) in enumerate(sorted(set(A)))} for a in A: print(B[a])
24.8
46
0.58871
29
124
2.517241
0.448276
0.219178
0
0
0
0
0
0
0
0
0
0
0.153226
124
5
47
24.8
0.695238
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.2
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
fc4841b1dfd5dee1573087354489c6c4ebc083e1
180
py
Python
pyfds/utils/__init__.py
FathiAbdelMalek/py-fds
5387ce3aab302699475794be0cfff55b29a4cf16
[ "MIT" ]
2
2020-06-16T03:38:41.000Z
2020-11-06T18:53:43.000Z
pyfds/utils/__init__.py
FathiMalek/py-fds
5387ce3aab302699475794be0cfff55b29a4cf16
[ "MIT" ]
null
null
null
pyfds/utils/__init__.py
FathiMalek/py-fds
5387ce3aab302699475794be0cfff55b29a4cf16
[ "MIT" ]
null
null
null
from .node import Node from .dnode import Node from .pair import Pair def change(node1, node2): bid = node1.data node1.data = node2.data node2.data = bid del bid
16.363636
27
0.677778
28
180
4.357143
0.428571
0.163934
0.229508
0
0
0
0
0
0
0
0
0.044118
0.244444
180
10
28
18
0.852941
0
0
0
0
0
0
0
0
0
0
0
0
1
0.125
false
0
0.375
0
0.5
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
fc560ee549139f76b4c197aeb94ac70cfef65876
1,464
py
Python
examples/order_3.py
avigad/boole
2a436c2967dbc968f6a5877c220b9757c3bc17c3
[ "Apache-2.0" ]
16
2015-01-01T18:21:35.000Z
2021-11-20T00:39:25.000Z
examples/order_3.py
avigad/boole
2a436c2967dbc968f6a5877c220b9757c3bc17c3
[ "Apache-2.0" ]
null
null
null
examples/order_3.py
avigad/boole
2a436c2967dbc968f6a5877c220b9757c3bc17c3
[ "Apache-2.0" ]
1
2021-05-14T11:12:31.000Z
2021-05-14T11:12:31.000Z
# -*- coding: utf-8 -*- ############################################################################# # # order_2.py # # description: Axiomatization of groups of order 2 and some experiments # # # Authors: # Cody Roux # # ############################################################################## from boole import * import boole.core.tactics as tac import boole.interfaces.ladr_interface as ladr import boole.core.expr as expr ## We can do the same for order 3, but the theorem isn't true! set_verbose() push_ctxt('group') # G = deftype('G') # G_mul = defconst('G_Mul', G >> (G >> G)) # G_one = defconst('G_one', G) # G_inv = defconst('G_inv', G >> G) # i = G_inv # g = G('g') # definstance('G_Mul', Mul(G, G_mul), triv()) # definstance('G_One', One(G, G_one), triv()) # defhyp('unit_l', forall(g, one() * g == g)) # defhyp('unit_r', forall(g, g * one() == g)) # g1, g2, g3 = G('g1 g2 g3') # defhyp('assoc', forall([g1, g2, g3], (g1 * g2) * g3 == g1 * (g2 * g3))) # defhyp('inv_l', forall(g, i(g) * g == one())) # defhyp('inv_r', forall(g, g * i(g) == one())) # defhyp('order_3', forall(g, g * g * g == one())) # h = G('h') # defthm('commut', forall([g,h], g * h == h * g)) # goal = current_ctxt().next_goal() # # ## Oops! # # goal.interact(ladr.prover9_tac(unfold=['*', 'one'])) # ## We can try to find a counter-example to our goal with the # # mace4 model builder # goal.interact(ladr.mace4_tac(unfold=['*', 'one'], size=27))
20.914286
78
0.525956
218
1,464
3.422018
0.389908
0.045576
0.020107
0.010724
0.042895
0.024129
0
0
0
0
0
0.02061
0.171448
1,464
69
79
21.217391
0.594394
0.706284
0
0
0
0
0.021739
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
3
fc61ca7eb4b02d2bf27413309e307e85b6e383eb
1,514
py
Python
r2python.py
vinash85/CDR2epitope
70dd52f32dfc5bd7530c6763d262a4c9cc1eafc4
[ "MIT" ]
null
null
null
r2python.py
vinash85/CDR2epitope
70dd52f32dfc5bd7530c6763d262a4c9cc1eafc4
[ "MIT" ]
null
null
null
r2python.py
vinash85/CDR2epitope
70dd52f32dfc5bd7530c6763d262a4c9cc1eafc4
[ "MIT" ]
null
null
null
# this module are python equivalent of R functions import numpy as np def intersect(lst1, lst2): return list(set(lst1) & set(lst2)) def unique(aa): return list(set(aa)) # def sample(aa): # change later to take multiple parameters # return np.random.shuffle(aa) def which(lst): return list(np.where(lst)[0]) def rowSums(aa): return np.sum(aa, axis=1) def colSums(aa): return np.sum(aa, axis=0) def rowMeans(aa): return np.mean(aa, axis=1) def colMeans(aa): return np.mean(aa, axis=0) def rbind(aa, bb): return np.concatenate((aa, bb), axis=0) def cbind(aa, bb): return np.concatenate((aa, bb), axis=1) def cbind_array_mat(array, mat): return cbind(np.tile(array, (len(mat), 1)), mat) def sample(List, size=1, replace=False, prob=None): ''' sample.int(n=n, size = s, replace = FALSE, prob = NULL) ''' if prob is not None: prob = prob / sum(prob) return np.random.choice(List, size=size, replace=replace, p=prob) def sample_int(n, size=1, replace=False, prob=None): ''' sample.int(n=n, size = s, replace = FALSE, prob = NULL) prob should be np.array ''' List = range(n) if prob is not None: prob = prob / sum(prob) return np.random.choice(List, size=size, replace=replace, p=prob) def np_which_max(a): return np.argmax(a) def which_max(a): return a.index(max(a)) def which_min(a): return a.index(min(a)) def setdiff(a, b): return list(set(a) - set(b))
17.811765
69
0.626156
249
1,514
3.779116
0.297189
0.085016
0.042508
0.02763
0.456961
0.456961
0.37407
0.37407
0.308183
0.308183
0
0.011905
0.22325
1,514
84
70
18.02381
0.788265
0.180317
0
0.157895
0
0
0
0
0
0
0
0
0
1
0.421053
false
0
0.026316
0.368421
0.868421
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
fc630f82f9916cc2f4760ed4b8d42453eeeffa97
21
py
Python
download_mp3/config.py
nis130/awesomeScripts
6ed7890188ecf47d9abcb2231230e92de8d15ca1
[ "MIT" ]
null
null
null
download_mp3/config.py
nis130/awesomeScripts
6ed7890188ecf47d9abcb2231230e92de8d15ca1
[ "MIT" ]
null
null
null
download_mp3/config.py
nis130/awesomeScripts
6ed7890188ecf47d9abcb2231230e92de8d15ca1
[ "MIT" ]
null
null
null
api_key = 'YOUR_API'
10.5
20
0.714286
4
21
3.25
0.75
0
0
0
0
0
0
0
0
0
0
0
0.142857
21
1
21
21
0.722222
0
0
0
0
0
0.380952
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
fc81b101b9fe6e963924add69b3be53e9201e6e7
959
py
Python
tests/test_dataset.py
aksarkar/scmodes
a05a81d69a1e4b2b21ee072c3cf0bcef65360f33
[ "MIT" ]
3
2020-05-04T19:50:26.000Z
2021-03-01T06:30:48.000Z
tests/test_dataset.py
aksarkar/scmodes
a05a81d69a1e4b2b21ee072c3cf0bcef65360f33
[ "MIT" ]
null
null
null
tests/test_dataset.py
aksarkar/scmodes
a05a81d69a1e4b2b21ee072c3cf0bcef65360f33
[ "MIT" ]
null
null
null
import numpy as np import scmodes def test_simulate_pois_rank1(): x, eta = scmodes.dataset.simulate_pois(n=30, p=60, rank=1) assert x.shape == (30, 60) assert eta.shape == (30, 60) assert (x >= 0).all() assert (~np.isclose(np.linalg.svd(eta, compute_uv=False, full_matrices=False), 0)).sum() == 1 def test_simulate_pois_rank2(): x, eta = scmodes.dataset.simulate_pois(n=30, p=60, rank=2) assert x.shape == (30, 60) assert eta.shape == (30, 60) assert (x >= 0).all() assert (~np.isclose(np.linalg.svd(eta, compute_uv=False, full_matrices=False), 0)).sum() == 2 def test_simulate_pois_masked(): x, eta = scmodes.dataset.simulate_pois(n=30, p=60, rank=2, holdout=.25) assert np.ma.is_masked(x) def test_simulate_pois_size(): x, mu = scmodes.dataset.simulate_pois_size(n=30, p=60, s=1000, rank=1, seed=0) assert x.shape == (30, 60) assert mu.shape == (30, 60) assert (x >= 0).all() assert np.isclose(mu.sum(axis=0), 1).all()
34.25
95
0.671533
170
959
3.658824
0.264706
0.154341
0.086817
0.144695
0.638264
0.638264
0.602894
0.602894
0.602894
0.602894
0
0.075702
0.145985
959
27
96
35.518519
0.683761
0
0
0.347826
0
0
0
0
0
0
0
0
0.565217
1
0.173913
true
0
0.086957
0
0.26087
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
1
0
0
0
0
0
0
3
fca113643b1c276d8a5d3e617aa70673bdeb9d36
907
py
Python
python_ex/20ex.py
llinmeng/PythonStudy
68c27eaa302b95aa4fb35d794f0d645f98b832dd
[ "MIT" ]
null
null
null
python_ex/20ex.py
llinmeng/PythonStudy
68c27eaa302b95aa4fb35d794f0d645f98b832dd
[ "MIT" ]
null
null
null
python_ex/20ex.py
llinmeng/PythonStudy
68c27eaa302b95aa4fb35d794f0d645f98b832dd
[ "MIT" ]
null
null
null
""" from sys import argv script, input_file = argv def print_all(f): print f.read() def rewind(f): f.seek(0) def print_a_line(line_count, f): print line_count, f.readline() print "line = %d " % line_count current_file = open(input_file) print "First let's print the whole file: " print_all(current_file) #print "line = %d " % line_count print "Now let's rewind, kind of like a tape." rewind(current_file) #print "line = %d " % line_count print "Now let's print three lines: " current_line = 1 print_a_line(current_line, current_file) #print "line = %d " % line_count current_line = current_line + 1 print_a_line(current_line, current_file) #print "line = %d " % line_count current_line = current_line + 1 print_a_line(current_line, current_file) #print "line = %d " % line_count current_line = current_line + 1 print_a_line(current_line, current_file) #print "line = %d " % line_count """
19.717391
46
0.721058
151
907
4.059603
0.231788
0.19739
0.114193
0.159869
0.644372
0.644372
0.601958
0.601958
0.601958
0.601958
0
0.006562
0.159868
907
45
47
20.155556
0.7979
1.019846
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
fca1a01c5bb0c961eef924c158f6d6e3deeecf47
109
py
Python
score/urls.py
kien5436/pick-color
30fd963f47bc8e7fa9852ba30652e1c8050283f8
[ "Unlicense" ]
null
null
null
score/urls.py
kien5436/pick-color
30fd963f47bc8e7fa9852ba30652e1c8050283f8
[ "Unlicense" ]
null
null
null
score/urls.py
kien5436/pick-color
30fd963f47bc8e7fa9852ba30652e1c8050283f8
[ "Unlicense" ]
null
null
null
from django.urls import path from .views import ScoreView urlpatterns = [ path('', ScoreView.as_view()), ]
18.166667
32
0.724771
14
109
5.571429
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.146789
109
6
33
18.166667
0.83871
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
5d7c075c790fee9ea5441eeb65909439939c717b
121
py
Python
ch2-hello-world-app/pages/urls.py
balazskiss1985/djangoforbeginners
827b1b11592e851a6c4948d849ae8815f9c138c7
[ "MIT" ]
781
2017-09-20T14:18:48.000Z
2022-03-29T17:45:03.000Z
ch2-hello-world-app/pages/urls.py
balazskiss1985/djangoforbeginners
827b1b11592e851a6c4948d849ae8815f9c138c7
[ "MIT" ]
106
2018-08-26T15:02:16.000Z
2022-03-23T09:28:23.000Z
ch2-hello-world-app/pages/urls.py
balazskiss1985/djangoforbeginners
827b1b11592e851a6c4948d849ae8815f9c138c7
[ "MIT" ]
493
2017-09-21T11:51:23.000Z
2022-03-26T07:20:22.000Z
from django.urls import path from .views import homePageView urlpatterns = [ path('', homePageView, name='home'), ]
17.285714
40
0.710744
14
121
6.142857
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.165289
121
6
41
20.166667
0.851485
0
0
0
0
0
0.033058
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
5d7efd6f9e0dade0a918e0e2273654cf5c588393
114
py
Python
src/dispatch/plugins/dispatch_core/config.py
roor0/dispatch
12c4f567096411abe62abaf61c7c124496764346
[ "Apache-2.0" ]
3,417
2020-02-23T22:54:47.000Z
2022-03-31T13:01:01.000Z
src/dispatch/plugins/dispatch_core/config.py
roor0/dispatch
12c4f567096411abe62abaf61c7c124496764346
[ "Apache-2.0" ]
607
2020-02-24T14:27:02.000Z
2022-03-30T19:15:39.000Z
src/dispatch/plugins/dispatch_core/config.py
roor0/dispatch
12c4f567096411abe62abaf61c7c124496764346
[ "Apache-2.0" ]
359
2020-02-24T19:04:43.000Z
2022-03-29T06:48:12.000Z
import logging from starlette.config import Config log = logging.getLogger(__name__) config = Config(".env")
11.4
35
0.754386
14
114
5.857143
0.642857
0
0
0
0
0
0
0
0
0
0
0
0.149123
114
9
36
12.666667
0.845361
0
0
0
0
0
0.035088
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
5d84adb90148fdad27dfb806e6725b6c47b5a09e
201
py
Python
Ekeopara_Praise/Phase 1/Python Basic 1/Day14 Tasks/Task10.py
CodedLadiesInnovateTech/-python-challenge-solutions
430cd3eb84a2905a286819eef384ee484d8eb9e7
[ "MIT" ]
6
2020-05-23T19:53:25.000Z
2021-05-08T20:21:30.000Z
Ekeopara_Praise/Phase 1/Python Basic 1/Day14 Tasks/Task10.py
CodedLadiesInnovateTech/-python-challenge-solutions
430cd3eb84a2905a286819eef384ee484d8eb9e7
[ "MIT" ]
8
2020-05-14T18:53:12.000Z
2020-07-03T00:06:20.000Z
Ekeopara_Praise/Phase 1/Python Basic 1/Day14 Tasks/Task10.py
CodedLadiesInnovateTech/-python-challenge-solutions
430cd3eb84a2905a286819eef384ee484d8eb9e7
[ "MIT" ]
39
2020-05-10T20:55:02.000Z
2020-09-12T17:40:59.000Z
'''10. Write a Python program to use double quotes to display strings.''' def double_quote_string(string): ans = f"\"{string}\"" return ans print(double_quote_string('This is working already'))
40.2
73
0.721393
30
201
4.7
0.733333
0.156028
0.241135
0
0
0
0
0
0
0
0
0.011696
0.149254
201
5
74
40.2
0.812866
0.333333
0
0
0
0
0.186047
0
0
0
0
0
0
1
0.25
false
0
0
0
0.5
0.25
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
5d9aa52526fce7f89c2032850d966f4b8e7caa7b
489
py
Python
tests/input/custom_serializer.py
raztud/dagger
7b394138c139e3b4fdf228e3d34359f1ae6bdd7a
[ "Apache-2.0" ]
null
null
null
tests/input/custom_serializer.py
raztud/dagger
7b394138c139e3b4fdf228e3d34359f1ae6bdd7a
[ "Apache-2.0" ]
null
null
null
tests/input/custom_serializer.py
raztud/dagger
7b394138c139e3b4fdf228e3d34359f1ae6bdd7a
[ "Apache-2.0" ]
null
null
null
# noqa class CustomSerializer: """Custom serializer implementation to test the injection of different serialization strategies to an input.""" @property def extension(self) -> str: # noqa return "ext" def serialize(self, value: str) -> bytes: # noqa return b"serialized" def deserialize(self, serialized_value: bytes) -> str: # noqa return "deserialized" def __repr__(self) -> str: # noqa return "CustomSerializerInstance"
25.736842
115
0.658487
52
489
6.096154
0.615385
0.126183
0.123028
0.107256
0
0
0
0
0
0
0
0
0.247444
489
18
116
27.166667
0.861413
0.267894
0
0
0
0
0.140805
0.068966
0
0
0
0
0
1
0.4
false
0
0
0.4
0.9
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
5dbf83b53ce4afc7fae90df19e0dcf5b4a710bab
2,517
py
Python
AM-flask/app/timezone_handler.py
leglars/ThinkingofYou
5ed3c91049e55d93a7f540b5168095188d114755
[ "MIT" ]
null
null
null
AM-flask/app/timezone_handler.py
leglars/ThinkingofYou
5ed3c91049e55d93a7f540b5168095188d114755
[ "MIT" ]
null
null
null
AM-flask/app/timezone_handler.py
leglars/ThinkingofYou
5ed3c91049e55d93a7f540b5168095188d114755
[ "MIT" ]
null
null
null
from datetime import datetime, timedelta, tzinfo class UTC10(tzinfo): def utcoffset(self, dt): return timedelta(hours=10) + self.dst(dt) def dst(self, dt): """ this function is for daylight saving time, Brisbane don't have a dst, so we can directly return 0. """ return timedelta(0) def tzname(self, dt): return "UTC +10" class UTC(tzinfo): """ Google App Engine default timezone is UTC +0:00 The default UTC timezone don't need to concern dst, so we can directly return 0 """ def utcoffset(self, dt): return timedelta(hours=0) + self.dst(dt) def dst(self, dt): return timedelta(0) def tzname(self, dt): return "UTC +0" class UTC2(tzinfo): """ Google App Engine default timezone is UTC +0:00 The default UTC timezone don't need to concern dst, so we can directly return 0 """ def utcoffset(self, dt): return timedelta(hours=2) + self.dst(dt) def dst(self, dt): return timedelta(0) def tzname(self, dt): return "UTC +2" def get_brisbane_time(): return datetime.now(UTC()).astimezone(UTC10()) # def time(): # fmt = '%Y-%m-%d %H:%M:%S %Z%z' # utc = pytz.utc # local_time = datetime.now() # # utc_dt = utc.localize(datetime.now()) # utc_dt.strftime(fmt) # # au_tz = pytz.timezone('Australia/Brisbane') # au_dt = utc_dt.astimezone(au_tz) # au_dt.strftime(fmt) # # # return local_time, utc_dt, au_dt, bri # # # def get_brisbane_time(): # fmt = '%Y-%m-%d %H:%M:%S' # utc = pytz.utc # # """ # On Google app engine, the time zone is set at UTC as default # P.S. in summer, due to day-light-saving, British local time has 1 hour faster than UTC time. # but Brisbane time needn't to worry about that because Brisbane hasn't Day-light-saving. However, Sydney does have. # """ # utc_dt = utc.localize(datetime.now()) # utc_dt.strftime(fmt) # # brisbane_timezone = pytz.timezone('Australia/Brisbane') # Brisbane is UTC +10:00 # brisbane_time = brisbane_timezone.normalize(utc_dt.astimezone(brisbane_timezone)) # return brisbane_time.strftime(fmt) # # # def show_time(): # t = time() # a = "" # for i in t: # a += str(i) + '\n' # return a # print(get_brisbane_time()) # 2016-06-08 18:03:54.135738 # 2016-06-08 18:03:54.135783+00:00 # 2016-06-09 04:03:54.135783+10:00 # 2016-06-09 04:03:54.135783+10:00 # 2016-06-08 18:03:54.135783+00:00
25.683673
121
0.619388
387
2,517
3.96124
0.276486
0.035225
0.062622
0.068493
0.494455
0.494455
0.485323
0.430528
0.413568
0.361383
0
0.07664
0.243147
2,517
97
122
25.948454
0.728084
0.637664
0
0.5
0
0
0.023602
0
0
0
0
0
0
1
0.416667
false
0
0.041667
0.375
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
5dda7a0a5d59d5c8f4aa444dfcf16aece28f14fc
234
py
Python
array_remove.py
realbakari/arrays-python
51cfb80f1ec610ae6aea55dd6bd171bb8c0415c0
[ "MIT" ]
null
null
null
array_remove.py
realbakari/arrays-python
51cfb80f1ec610ae6aea55dd6bd171bb8c0415c0
[ "MIT" ]
null
null
null
array_remove.py
realbakari/arrays-python
51cfb80f1ec610ae6aea55dd6bd171bb8c0415c0
[ "MIT" ]
null
null
null
import array as arr numbers = arr.array('i', [10, 11, 12, 12, 13]) numbers.remove(12) print(numbers) # Output: array('i', [10, 11, 12, 13]) print(numbers.pop(2)) # Output: 12 print(numbers) # Output: array('i', [10, 11, 13])
23.4
55
0.611111
39
234
3.666667
0.384615
0.125874
0.167832
0.20979
0.517483
0.41958
0.41958
0.41958
0
0
0
0.150259
0.175214
234
9
56
26
0.590674
0.34188
0
0.333333
0
0
0.006667
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0.5
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
b9099fa7fdf469f4ae131bafff76e18a841710d8
78
py
Python
flask_app/__init__.py
Benton-Michael/dojo_survey_validation
3699c1dce2a25b272a9d17854de2cc777f737905
[ "MIT" ]
null
null
null
flask_app/__init__.py
Benton-Michael/dojo_survey_validation
3699c1dce2a25b272a9d17854de2cc777f737905
[ "MIT" ]
null
null
null
flask_app/__init__.py
Benton-Michael/dojo_survey_validation
3699c1dce2a25b272a9d17854de2cc777f737905
[ "MIT" ]
null
null
null
from flask import Flask, session app = Flask(__name__) app.secret_key = " "
13
32
0.717949
11
78
4.636364
0.727273
0
0
0
0
0
0
0
0
0
0
0
0.179487
78
5
33
15.6
0.796875
0
0
0
0
0
0.012821
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
f8e0ddc6a8648758bc22c5dd92137ea396f1a550
14,662
py
Python
residual_unet.py
alex-s-hong/redefining_u-net
f0e74eccaaea6a57ee84d70cfb9ca5e179f77a57
[ "MIT" ]
null
null
null
residual_unet.py
alex-s-hong/redefining_u-net
f0e74eccaaea6a57ee84d70cfb9ca5e179f77a57
[ "MIT" ]
null
null
null
residual_unet.py
alex-s-hong/redefining_u-net
f0e74eccaaea6a57ee84d70cfb9ca5e179f77a57
[ "MIT" ]
null
null
null
import torch import torch.nn as nn from torchvision import models from torch.nn.functional import sigmoid as sigmoid from torch.autograd import Variable #This file contains all of the Residual-U-Net variants, including Gaussian Dropout #Gaussian Dropout is taken from https://github.com/j-min/Dropouts/blob/master/Gaussian_Variational_Dropout.ipynb #note that alpha = p/1-p, so when alpha = 1, p = 0.5 class GaussianDropout(nn.Module): def __init__(self, alpha=1.0): super(GaussianDropout, self).__init__() self.alpha = torch.Tensor([alpha]) def forward(self, x): """ Sample noise e ~ N(1, alpha) Multiply noise h = h_ * e """ if self.train(): # N(1, alpha) epsilon = torch.randn(x.size()) * self.alpha + 1 epsilon = Variable(epsilon) if x.is_cuda: epsilon = epsilon.cuda() return x * epsilon else: return x ''' Double_conv: series of 2 3x3 conv Note: In the unet paper no padding is added. To preseve the spatial dimensions, padding = 1 is included ''' def double_conv(in_channels, out_channels): return nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size=(3,3), padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels,kernel_size=(3,3), padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True) ) ''' Upblock follows the upblock from U-Net: conv->conv->upconv ''' class upblock(nn.Module): def __init__(self, in_channels, out_channels, merge_type = "concat"): super(upblock, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.merge_type = merge_type self.conv = double_conv(in_channels, out_channels) if(merge_type == "concat"): # Divide out_channels by 2, will be merged with the preceding down_blocks self.upconv = nn.ConvTranspose2d(out_channels,out_channels // 2,kernel_size=2,stride=2) elif(merge_type == "add"): self.upconv = nn.ConvTranspose2d(out_channels,out_channels ,kernel_size=2,stride=2) def forward(self, x): # Two convolutions followed by upconvolution # Chosen upconvolution is Transpose Convolution x = self.conv(x) if(self.merge_type == "concat"): x = self.upconv(x) elif(self.merge_type == "add"): x = self.upconv(x) return x ''' lincomb is a weighted combination of feature-maps from the encoder and decoder: a(encoder) + b(decoder) ''' class lincomb(nn.Module): def __init__(self, in_channels): super(lincomb, self).__init__() self.conv1 = nn.Conv2d(in_channels,in_channels, kernel_size = (1,1)) self.conv2 = nn.Conv2d(in_channels,in_channels, kernel_size = (1,1)) self.in_channels = in_channels def forward(self, e,d): enc = self.conv1(e) dec = self.conv2(d) return enc + dec ''' lincomb_net is Residual U-net with the merge function defined as weighted combination of feature-maps ''' class lincomb_net(nn.Module): def __init__(self, num_classes = 2,in_channels = 3, dropout = 0): super(lincomb_net, self).__init__() resnet = models.resnet18(pretrained=True) self.MaxPool = nn.MaxPool2d(kernel_size=2, stride=2) self.num_classes = num_classes self.in_channels = in_channels self.dropout = dropout self.drop = nn.Dropout2d() self.preprocess = nn.Sequential(*list(resnet.children()))[:4] #64, 128, 256 self.down1 = nn.Sequential(*list(resnet.children()))[4] # 64, 128, 256 self.down2 = nn.Sequential(*list(resnet.children()))[5] #128, 64, 128 self.down3 = nn.Sequential(*list(resnet.children()))[6] # 256, 32, 64 self.up1 = upblock(in_channels = 256, out_channels = 256) #Returns out_channels / 2 , and spatial size x 2,(128x64x128) self.up2 = upblock(in_channels = 128, out_channels = 128) # 64 x 128 x 256 self.up3 = nn.Conv2d(128,64,1) # Note down1 and preprocess has the same spatial dimension self.lincomb1 = lincomb(128) self.lincomb2 = lincomb(64) self.lincomb3 = lincomb(64) self.up4 = upblock(in_channels = 64, out_channels = 256) self.up5 = upblock(in_channels = 128, out_channels= 128) self.logits = nn.Conv2d(64, num_classes, kernel_size=1) self.drop = nn.Dropout2d(0.3) def forward(self,x): base = self.preprocess(x) down1 = self.down1(base) down2 = self.down2(down1) down3 = self.down3(down2) up = self.lincomb1(down2,self.up1(down3)) # 128 x 64 x 128 if(self.dropout == "all"): up = self.drop(up) up = self.lincomb2(down1,self.up2(up)) #64 x 128 x 256 if(self.dropout == "all"): up = self.drop(up) up = self.lincomb3(base,up) #64x128x256 if(self.dropout == "all"): up = self.drop(up) up = self.up5(self.up4(up))#64x512x1028 logits_layer = self.logits(up) return logits_layer ''' Normal Residual U-net ''' class res_unet(nn.Module): def __init__(self, num_classes = 2,in_channels = 3, dropout = 0,drop_type = "normal"): super(res_unet, self).__init__() resnet = models.resnet18(pretrained=True) self.MaxPool = nn.MaxPool2d(kernel_size=2, stride=2) self.num_classes = num_classes self.in_channels = in_channels self.dropout = dropout self.drop = nn.Dropout2d() self.preprocess = nn.Sequential(*list(resnet.children()))[:4] #64, 128, 256 self.down1 = nn.Sequential(*list(resnet.children()))[4] # 64, 128, 256 self.down2 = nn.Sequential(*list(resnet.children()))[5] #128, 64, 128 self.down3 = nn.Sequential(*list(resnet.children()))[6] # 256, 32, 64 self.up1 = upblock(in_channels = 256, out_channels = 256) #Returns out_channels / 2 , and spatial size x 2 self.up2 = upblock(in_channels = 256, out_channels = 128) self.up3 = nn.Conv2d(128,64,1) # Note down1 and preprocess has the same spatial dimension self.up4 = upblock(in_channels = 128, out_channels = 256) self.up5 = upblock(in_channels = 128, out_channels= 128) self.logits = nn.Conv2d(64, num_classes, kernel_size=1) if(drop_type == "normal"): self.drop = nn.Dropout2d() elif(drop_type == "gaussian"): self.drop = GaussianDropout() def forward(self,x): base = self.preprocess(x) down1 = self.down1(base) down2 = self.down2(down1) down3 = self.down3(down2) up = torch.cat([self.up1(down3),down2],dim = 1) #256 x 64 x 128 if(self.dropout ==3 or self.dropout == "all"): up = self.drop(up) up = torch.cat([self.up2(up),down1],dim = 1) #128 x 128 x 256 if(self.dropout == 2 or self.dropout== "all"): up = self.drop(up) up = torch.cat([self.up3(up),base],dim = 1) #128 x 128 x 256 if(self.dropout == 1 or self.dropout=="all"): up = self.drop(up) up = self.up5(self.up4(up)) #64 x 512 x 1024, this is just to match the channels and spatial dimensions logits_layer = self.logits(up) return logits_layer ''' Residual Unet with Add instead of concatenation for the merge function ''' class res_unet_add(nn.Module): def __init__(self, num_classes = 2,in_channels = 3, dropout = 0): super(res_unet_add, self).__init__() resnet = models.resnet18(pretrained=True) self.MaxPool = nn.MaxPool2d(kernel_size=2, stride=2) self.num_classes = num_classes self.in_channels = in_channels self.dropout = dropout self.drop = nn.Dropout2d() self.preprocess = nn.Sequential(*list(resnet.children()))[:4] #64, 128, 256 self.down1 = nn.Sequential(*list(resnet.children()))[4] # 64, 128, 256 self.down2 = nn.Sequential(*list(resnet.children()))[5] #128, 64, 128 self.down3 = nn.Sequential(*list(resnet.children()))[6] # 256, 32, 64 self.up1 = upblock(in_channels = 256, out_channels = 128,merge_type = "add") #128x64x128 self.up2 = upblock(in_channels = 128, out_channels = 64,merge_type = "add") #64x128x256 self.up4 = upblock(in_channels = 64, out_channels = 64,merge_type="add") self.up5 = upblock(in_channels = 64, out_channels= 64,merge_type="add") self.logits = nn.Conv2d(64, num_classes, kernel_size=1) self.drop = nn.Dropout2d() def forward(self,x): base = self.preprocess(x) down1 = self.down1(base) down2 = self.down2(down1) down3 = self.down3(down2) up = self.up1(down3)+down2 #128 x 64 x 128 up = self.up2(up)+down1 #64 x 128 x 256 up = up+base #64 x 128 x 256 up = self.up5(self.up4(up)) #64 x 512 x 1024 logits_layer = self.logits(up) return logits_layer ''' Gate module to merge encoder with decoder ''' class gate(nn.Module): def __init__(self, in_channels): super(gate, self).__init__() self.conv1 = nn.Conv2d(in_channels,in_channels, kernel_size = (1,1)) self.conv2 = nn.Conv2d(in_channels,in_channels, kernel_size = (1,1)) self.in_channels = in_channels def forward(self, e,d): #d is the decoder at layer i #e is the encoder at layer i alpha = torch.sigmoid(self.conv1(d)) beta = torch.sigmoid(self.conv2(e)) return (1+alpha)*d + (1-alpha)*(beta*e) #Gated Residual Unet class res_unet_gate(nn.Module): def __init__(self, num_classes = 2,in_channels = 3, dropout = 0,drop_type = "gaussian"): super(res_unet_gate, self).__init__() resnet = models.resnet18(pretrained=True) self.MaxPool = nn.MaxPool2d(kernel_size=2, stride=2) self.num_classes = num_classes self.in_channels = in_channels self.dropout = dropout if(drop_type == "normal"): self.drop = nn.Dropout2d() elif(drop_type == "gaussian"): self.drop = GaussianDropout(alpha=1) self.preprocess = nn.Sequential(*list(resnet.children()))[:4] #64, 128, 256 self.down1 = nn.Sequential(*list(resnet.children()))[4] # 64, 128, 256 self.down2 = nn.Sequential(*list(resnet.children()))[5] #128, 64, 128 self.down3 = nn.Sequential(*list(resnet.children()))[6] # 256, 32, 64 self.up1 = upblock(in_channels = 256, out_channels = 128,merge_type = "add") #128x64x128 self.up2 = upblock(in_channels = 128, out_channels = 64,merge_type = "add") #64x128x256 self.up3 = upblock(in_channels = 64, out_channels = 64,merge_type="add") self.gate1 = gate(128) self.gate2 = gate(64) self.gate3 = gate(64) self.up4 = upblock(in_channels = 64, out_channels= 64,merge_type="add") self.logits = nn.Conv2d(64, num_classes, kernel_size=1) def forward(self,x): base = self.preprocess(x) down1 = self.down1(base) down2 = self.down2(down1) down3 = self.down3(down2) up = self.gate1(down2,self.up1(down3)) #128 x 64 x 128 if(self.dropout == 1 or self.dropout=="all"): up = self.drop(up) up = self.gate2(down1,self.up2(up)) #64 x 128 x 256 if(self.dropout=="all"): up = self.drop(up) up = self.gate3(base,up) #64 x 128 x 256 if(self.dropout=="all"): up=self.drop(up) up = self.up4(self.up3(up)) #64 x 512 x 1024 logits_layer = self.logits(up) return logits_layer #Film Paper: https://arxiv.org/abs/1709.07871 # Implementation to merge encoder and decoder using FiLM class film(nn.Module): def __init__(self, in_channels): super(film, self).__init__() self.conv1 = nn.Conv2d(in_channels,in_channels, kernel_size = (1,1)) self.conv2 = nn.Conv2d(in_channels,in_channels, kernel_size = (1,1)) self.in_channels = in_channels def forward(self, e,d): #e is the conditioning input #e and d must be of the same shape! gamma = self.conv1(e) beta = self.conv2(e) return gamma*d + beta ''' Residual U-net with merge function as FiLM ''' class film_unet(nn.Module): def __init__(self, num_classes = 2,in_channels = 3, dropout = 0): super(film_unet, self).__init__() resnet = models.resnet18(pretrained=True) self.MaxPool = nn.MaxPool2d(kernel_size=2, stride=2) self.num_classes = num_classes self.in_channels = in_channels self.dropout = dropout self.drop = nn.Dropout2d() self.preprocess = nn.Sequential(*list(resnet.children()))[:4] #64, 128, 256 self.down1 = nn.Sequential(*list(resnet.children()))[4] # 64, 128, 256 self.down2 = nn.Sequential(*list(resnet.children()))[5] #128, 64, 128 self.down3 = nn.Sequential(*list(resnet.children()))[6] # 256, 32, 64 self.up1 = upblock(in_channels = 256, out_channels = 256) #Returns out_channels / 2 , and spatial size x 2,(128x64x128) self.up2 = upblock(in_channels = 128, out_channels = 128) # 64 x 128 x 256 self.up3 = nn.Conv2d(128,64,1) # Note down1 and preprocess has the same spatial dimension self.film1 = film(128) self.film2 = film(64) self.film3 = film(64) self.up4 = upblock(in_channels = 64, out_channels = 256) self.up5 = upblock(in_channels = 128, out_channels= 128) self.logits = nn.Conv2d(64, num_classes, kernel_size=1) self.drop = nn.Dropout2d(0.3) def forward(self,x): base = self.preprocess(x) down1 = self.down1(base) down2 = self.down2(down1) down3 = self.down3(down2) up = (self.film1(down2,self.up1(down3))) # 128 x 64 x 128 if(self.dropout == 1 or self.dropout == "all"): up=self.drop(up) up = self.film2(down1,self.up2(up)) #64 x 128 x 256 if(self.dropout == "all"): up=self.drop(up) up = self.film3(base,up) #64x128x256 if(self.dropout == "all" or self.dropout == 3): up=self.drop(up) up = self.drop(self.up5(self.up4(up)))#64x512x1028 logits_layer = self.logits(up) return logits_layer
37.403061
127
0.611513
2,036
14,662
4.269155
0.105108
0.07133
0.036815
0.050621
0.738265
0.723079
0.703405
0.688334
0.660723
0.660492
0
0.078692
0.261561
14,662
392
128
37.403061
0.724116
0.115741
0
0.631579
0
0
0.010303
0
0
0
0
0
0
1
0.078947
false
0
0.018797
0.003759
0.180451
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
f8ee83d710940fee66e7e35dba149045e12ec42d
83
py
Python
loja/apps.py
sanderaugusto/cursoDjango
67b63a102680356994300f630cbf2c6810efc90e
[ "BSD-2-Clause" ]
null
null
null
loja/apps.py
sanderaugusto/cursoDjango
67b63a102680356994300f630cbf2c6810efc90e
[ "BSD-2-Clause" ]
80
2018-12-14T18:37:46.000Z
2022-03-11T23:39:57.000Z
ecommerce/loja/apps.py
lfbtessarolli/tcc_puc
3cbdebc5064599866a09e4518614908266dd2287
[ "MIT" ]
5
2018-12-14T19:35:28.000Z
2019-01-09T23:10:00.000Z
from django.apps import AppConfig class LojaConfig(AppConfig): name = 'loja'
13.833333
33
0.73494
10
83
6.1
0.9
0
0
0
0
0
0
0
0
0
0
0
0.180723
83
5
34
16.6
0.897059
0
0
0
0
0
0.048193
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
5d0f564c4c5badd014982eb031a22a19f6704fef
1,326
py
Python
brutus/tasking/AsyncThreadPool.py
MatthewZito/brutus
89cba5bf07ec579aa6987e4866db446c8457927f
[ "MIT" ]
30
2021-06-03T19:56:23.000Z
2022-03-31T08:17:07.000Z
brutus/tasking/AsyncThreadPool.py
MatthewZito/Brutus
89cba5bf07ec579aa6987e4866db446c8457927f
[ "MIT" ]
3
2020-09-16T14:35:24.000Z
2021-03-26T03:40:32.000Z
brutus/tasking/AsyncThreadPool.py
MatthewZito/brutus
89cba5bf07ec579aa6987e4866db446c8457927f
[ "MIT" ]
9
2021-05-26T17:24:45.000Z
2022-03-31T11:36:12.000Z
# from concurrent.futures import ThreadPoolExecutor, as_completed # from typing import Any, Callable, Generator, Union # class AsyncThreadPool: # """Implements a thread pool in which each threads runs # its routine asynchronously # """ # def __init__( # self, # callback: Callable, # tasks: Union[list, Generator], # arg: Any, # max_threads: int = 10, # ) -> None: # # thread routine # self.callback = callback # # argument passed to `callback`, after task # # make this an object if more than 1 argument is needed # self.args = arg # # units of work; this must be an iterable object # self.tasks = tasks # # the inner thread pool executor # self.thread_pool = ThreadPoolExecutor(max_workers=max_threads) # def run_task(self, thread_routine: Callable, tasks: list, **kwargs: Any): # futures = [ # self.thread_pool.submit(thread_routine, task, **kwargs) for task in tasks # ] # # wait(futures, return_when=ALL_COMPLETED) # results = [] # for future in as_completed(futures): # try: # yield future.result() # except Exception: # pass # TODO logger # return results
30.136364
87
0.582202
142
1,326
5.323944
0.570423
0.05291
0.037037
0
0
0
0
0
0
0
0
0.003352
0.325038
1,326
43
88
30.837209
0.841341
0.935897
0
null
0
null
0
0
null
0
0
0.023256
null
1
null
true
0
0
null
null
null
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
1
0
0
0
1
0
0
0
0
0
0
3
5d1e3bb2519377e10f8d01a401730835679de0ff
4,122
py
Python
lexicon/tests/providers/test_transip.py
HelixEducation/lexicon
9941a61a3b208c5b35602432a75a814394e34875
[ "MIT" ]
null
null
null
lexicon/tests/providers/test_transip.py
HelixEducation/lexicon
9941a61a3b208c5b35602432a75a814394e34875
[ "MIT" ]
null
null
null
lexicon/tests/providers/test_transip.py
HelixEducation/lexicon
9941a61a3b208c5b35602432a75a814394e34875
[ "MIT" ]
1
2020-07-13T21:45:08.000Z
2020-07-13T21:45:08.000Z
"""Integration tests for Transip""" import os from tempfile import mkstemp from unittest import TestCase import pytest from lexicon.tests.providers.integration_tests import IntegrationTestsV2 FAKE_KEY = b""" -----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAxV08IlJRwNq9WyyGO2xRyT0F6XIBD2R5CrwJoP7gIHVU/Mhk KeK8//+MbUZtKFoeJi9lI8Cbkqe7GVk9yab6R2/vVzV21XRh+57R79nEh+QTf/vZ dg+DjUn62U4lcgoVp3sHddIi/Zi58xz2a2lGGIdolsv1x0/PmAQPULt721IG/osp RBjTtaZ8niXrOTfjH814i8kgXu74CCGu0X6kJBIezMA2wqY1ZKZYRMpfrxkEZe0t 45pEM1CmSTCqyDMpwYou9wJaDHn0ts1KvKkKBfmO4B0nqfW9Sv9rkmpBCLTtMobj dQ8EwWv1L1g9uddkPALgRODEpR4fq7PTmq2VEQIDAQABAoIBAFf4wwEZaE9qMNUe 94YtNhdZF/WCV26g/kMGpdQZR5WwNv2l5N+2rT/+jH140tcVtDKZFZ/mDnJESWV3 Hc9wmkaVYj2hGyLyCWq61CDxFGTuCLMXc0roh17HBwUtjAtU62oHsL+XtvkKxnfT BRPDjPcKBFiS+S6qKII97QWzS/XpxL47VpXcYboVunzUncIKghC93LdvPp3ukh6x HIarqyctqkksLJtLgH5ffuABCJLChetpOIfcfspjtMoji43CXXd7Y3rGWy3EzSHA s4mNb4K6r8MOlJj3HiTn9bEgL2V2q3OHSYHYXexir67vkQeN+NsC80G0uODt6Uuo Cd1RobECgYEA+O+nZYRc22jI8oqRoQeCx6cTWJoaf4OYDXcaerRMIiE7yigHNgmX LGs9RYTVrWXzjM5KHVvPvavpm/zIBoa5fA7uqdH9BjuZVLm1COXzKxF5hevZuAxr zGQWDbdvzdsihPBvwlf0dKScA/WIRW0KCqUmC6IlS/An4Y0nI05P+KsCgYEAyvby cfUPgeanBnYE3GGou3cLiurzvK3vHuQl6vVE3DcheUj/5tKTwG5Q3/7y51MKHnfH xEc/X2IePXYVy0JwpC6NHzkyJPuJ1zYlkQGSs81TUbYOk9SKi3SL9bM+3vRzYFoL GMLJuvEqIscxLNqR0xQB5eBkg8T+AVJiA7cTITMCgYEAn5/ND2OYx3ihoiUIzOEs EyonVaE7bJjNX5UH/bavOxNka3TPau8raOg7GeDbw5ykV53QGJNO2qjp24R0Hvs0 5UAN+gcU4HJHF/UdCN+q1esWqbFaopIUbbOgEJuXrcDembAzecM8la8X+9Ht19bb oYfUpZELqW4NpKwGdLU6wpECgYAfn3hI3xjKcYiGji7Vs3WZt8OZol/VfvgpxPxP bmWLNh/GCOSuLxMMQWPicpOgDSUfeCQs5bjvAJebleFxaOmp+wLL4Zp5fqOMX4hc 3nTgBNa9fXMp/0ySy9besk3SaR3s3jqqYfcSZG7fOk/kIC3mSFC/Y0Xl7fRxekeB Mq4NVwKBgQDQ+3+cgZph5geq0PUuKMvMECDuCEnG8rrr4jTCe+sRP34y1IaxJ2w6 S6p+kvTBePSqV2wWZCls6p7mhGEto+8H9b4pWdmSqccn0vFu4kekm/OU4+IxqzWQ KPeh76yhdzsFwzh+0LBPfkFgFn3YlHp0eoywNpm57MFxWx8u3U2Hkw== -----END RSA PRIVATE KEY----- """ # The following fields were removed from the test fixtures: # getInfo: contacts, authcode, registrationDate, renewalDate # using: # find tests/fixtures/cassettes/transip/ # -name \*.json -exec sed -i 's/<contacts.*<\/contacts>//g' '{}' \; # find tests/fixtures/cassettes/transip/ # -name \*.json -exec sed -i 's/<authCode.*<\/authCode>//g' '{}' \; # find tests/fixtures/cassettes/transip/ # -name \*.json -exec sed -i 's/<registrationDate.*<\/registrationDate>//g' '{}' \; # find tests/fixtures/cassettes/transip/ # -name \*.json -exec sed -i 's/<renewalDate.*<\/renewalDate>//g' '{}' \; # Hook into testing framework by inheriting unittest.TestCase and reuse # the tests which *each and every* implementation of the interface must # pass, by inheritance from define_tests.TheTests class TransipProviderTests(TestCase, IntegrationTestsV2): """TestCase for Transip""" provider_name = 'transip' domain = 'hurrdurr.nl' # Disable setUp and tearDown, and set a real username and key in # provider_opts to execute real calls def _test_parameters_overrides(self): (_fake_fd, _fake_key) = mkstemp() _fake_file = os.fdopen(_fake_fd, 'wb', 1024) _fake_file.write(FAKE_KEY) _fake_file.close() self._fake_key = _fake_key # pylint: disable=attribute-defined-outside-init options = { 'auth_username': 'foo', 'auth_api_key': _fake_key } return options def tearDown(self): try: os.unlink(self._fake_key) except AttributeError: # Method _test_options may not have been executed, # in this case self._fake_key does not exist. pass def _filter_headers(self): return ['Cookie'] @pytest.mark.skip(reason="manipulating records by id is not supported") def test_provider_when_calling_delete_record_by_identifier_should_remove_record(self): return @pytest.mark.skip(reason=("adding docs.example.com as a CNAME target will result in a RFC 1035 error")) # pylint: disable=line-too-long def test_provider_when_calling_create_record_for_CNAME_with_valid_name_and_content(self): return
42.9375
140
0.785541
388
4,122
8.190722
0.587629
0.017621
0.021397
0.032725
0.080239
0.063877
0.063877
0.063877
0.063877
0.063877
0
0.069813
0.131247
4,122
95
141
43.389474
0.817649
0.268802
0
0.032787
0
0
0.619973
0.533512
0
1
0
0
0
1
0.081967
false
0.016393
0.081967
0.04918
0.278689
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
3
5d396158eb3001d3d2b980f858869aba55973fbd
144
py
Python
acceptability/generate.py
nyu-mll/CoLA-baselines
dd095d3646ed05a315280aaa8ed4ec84ba435b3e
[ "MIT" ]
54
2018-05-31T22:57:28.000Z
2022-03-17T13:25:49.000Z
acceptability/generate.py
nyu-mll/CoLA-baselines
dd095d3646ed05a315280aaa8ed4ec84ba435b3e
[ "MIT" ]
4
2018-06-06T14:15:10.000Z
2020-08-07T16:35:50.000Z
acceptability/generate.py
nyu-mll/CoLA-baselines
dd095d3646ed05a315280aaa8ed4ec84ba435b3e
[ "MIT" ]
18
2018-07-10T12:18:17.000Z
2022-03-02T22:19:22.000Z
from acceptability.modules import LMGenerator if __name__ == '__main__': trainer = LMGenerator() trainer.load() trainer.generate()
20.571429
45
0.715278
14
144
6.785714
0.785714
0
0
0
0
0
0
0
0
0
0
0
0.180556
144
6
46
24
0.805085
0
0
0
1
0
0.055556
0
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
5d3976b7e7eae4840ffd5eeb1cad803bbc3b86cf
177
py
Python
7. dicionarios/exemplo_1.py
lusmoura/Python-workshop
41a0f82e964a0a77a95d558806e7200dc3fab052
[ "MIT" ]
1
2021-11-10T18:33:48.000Z
2021-11-10T18:33:48.000Z
7. dicionarios/exemplo_1.py
lusmoura/Python-workshop
41a0f82e964a0a77a95d558806e7200dc3fab052
[ "MIT" ]
null
null
null
7. dicionarios/exemplo_1.py
lusmoura/Python-workshop
41a0f82e964a0a77a95d558806e7200dc3fab052
[ "MIT" ]
null
null
null
localizacao = {'Brasil': 'América', 'Portugal': 'Europa', 'Espanha': 'Europa'} continente_brasil = localizacao['Brasil'] print(continente_brasil)
25.285714
41
0.60452
14
177
7.5
0.571429
0.32381
0
0
0
0
0
0
0
0
0
0
0.242938
177
6
42
29.5
0.783582
0
0
0
0
0
0.259887
0
0
0
0
0
0
1
0
false
0
0
0
0
0.2
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
5d46e1b60a4d6ccaafad5f8b9b388edffc662320
824
py
Python
apps/profiles/admin.py
JimenezJC/cozy-exchange
131576e8159df8bab2ff680283ed55e66abaaa1d
[ "MIT" ]
null
null
null
apps/profiles/admin.py
JimenezJC/cozy-exchange
131576e8159df8bab2ff680283ed55e66abaaa1d
[ "MIT" ]
null
null
null
apps/profiles/admin.py
JimenezJC/cozy-exchange
131576e8159df8bab2ff680283ed55e66abaaa1d
[ "MIT" ]
null
null
null
from django.contrib import admin from django.contrib.auth.admin import UserAdmin from django.contrib.auth.models import User # from .models import Profile # # class ProfileInline(admin.StackedInline): # """ # # """ # model = Profile # can_delete = False # verbose_name_plural = 'Profile' # fk_name = 'user' # # class CustomUserAdmin(UserAdmin): # inlines = (ProfileInline, ) # # def get_inline_instances(self, request, obj=None): # if not obj: # return list() # return super(CustomUserAdmin, self).get_inline_instances(request, obj) # # # admin.site.unregister(User) # admin.site.register(User, CustomUserAdmin) # class UserStripeAdmin(admin.ModelAdmin): # class Meta: # model = UserStripe # # admin.site.register(UserStripe, UserStripeAdmin)
24.969697
80
0.675971
88
824
6.238636
0.488636
0.054645
0.092896
0.076503
0
0
0
0
0
0
0
0
0.207524
824
32
81
25.75
0.840735
0.779126
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
3
5d6a3f6248730550523f1c8a4f0efd40df6a0c1c
1,530
py
Python
BasicsPython/MulandExp.py
MiguelCF06/PythonProjects
dfa49203c3ed1081728c7f4e565f847629662d75
[ "MIT" ]
1
2020-06-23T18:52:11.000Z
2020-06-23T18:52:11.000Z
BasicsPython/MulandExp.py
MiguelCF06/PythonProjects
dfa49203c3ed1081728c7f4e565f847629662d75
[ "MIT" ]
null
null
null
BasicsPython/MulandExp.py
MiguelCF06/PythonProjects
dfa49203c3ed1081728c7f4e565f847629662d75
[ "MIT" ]
null
null
null
print("Welcome to the Multiplication/Exponent Table App") print() name = input("Hello, What is your name: ") number = float(input("What number would you like to work with: ")) name = name.strip() print("Multiplication Table For {}".format(number)) print() print("\t\t1.0 * {} = {:.2f}".format(number, number*1.0)) print("\t\t2.0 * {} = {:.2f}".format(number, number*2.0)) print("\t\t3.0 * {} = {:.2f}".format(number, number*3.0)) print("\t\t4.0 * {} = {:.2f}".format(number, number*4.0)) print("\t\t5.0 * {} = {:.2f}".format(number, number*5.0)) print("\t\t6.0 * {} = {:.2f}".format(number, number*6.0)) print("\t\t7.0 * {} = {:.2f}".format(number, number*7.0)) print("\t\t8.0 * {} = {:.2f}".format(number, number*8.0)) print("\t\t9.0 * {} = {:.2f}".format(number, number*9.0)) print() print("Exponent Table For {}".format(number)) print() print("\t\t{} ** 1 = {:.2f}".format(number, number**1)) print("\t\t{} ** 2 = {:.2f}".format(number, number**2)) print("\t\t{} ** 3 = {:.2f}".format(number, number**3)) print("\t\t{} ** 4 = {:.2f}".format(number, number**4)) print("\t\t{} ** 5 = {:.2f}".format(number, number**5)) print("\t\t{} ** 6 = {:.2f}".format(number, number**6)) print("\t\t{} ** 7 = {:.2f}".format(number, number**7)) print("\t\t{} ** 8 = {:.2f}".format(number, number**8)) print("\t\t{} ** 9 = {:.2f}".format(number, number**9)) print() message = "{} Math is cool!".format(name) print(message) print("\t{}".format(message.lower())) print("\t\t{}".format(message.title())) print("\t\t\t{}".format(message.lower()))
42.5
66
0.579739
243
1,530
3.650206
0.205761
0.142052
0.284104
0.405862
0.506201
0.069899
0.069899
0
0
0
0
0.052555
0.104575
1,530
36
67
42.5
0.594891
0
0
0.147059
0
0
0.369693
0.015023
0
0
0
0
0
1
0
false
0
0
0
0
0.882353
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
5d6cd2e1496c57bb7b4a171c7c2783f735003d8d
2,789
py
Python
stubs.min/Autodesk/Revit/UI/__init___parts/DockablePaneState.py
denfromufa/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
1
2017-07-07T11:15:45.000Z
2017-07-07T11:15:45.000Z
stubs.min/Autodesk/Revit/UI/__init___parts/DockablePaneState.py
hdm-dt-fb/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
null
null
null
stubs.min/Autodesk/Revit/UI/__init___parts/DockablePaneState.py
hdm-dt-fb/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
null
null
null
class DockablePaneState(object,IDisposable): """ Describes where a dockable pane window should appear in the Revit user interface. DockablePaneState(other: DockablePaneState) DockablePaneState() """ def Dispose(self): """ Dispose(self: DockablePaneState) """ pass def ReleaseUnmanagedResources(self,*args): """ ReleaseUnmanagedResources(self: DockablePaneState,disposing: bool) """ pass def SetFloatingRectangle(self,rect): """ SetFloatingRectangle(self: DockablePaneState,rect: Rectangle) When %dockPosition% is Floating,sets the rectangle used to determine the size and position of the pane when %dockPosition% is Floating. Coordinates are relative to the upper-left-hand corner of the main Revit window. """ pass def __enter__(self,*args): """ __enter__(self: IDisposable) -> object """ pass def __exit__(self,*args): """ __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """ pass def __init__(self,*args): """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod def __new__(self,other=None): """ __new__(cls: type,other: DockablePaneState) __new__(cls: type) """ pass def __repr__(self,*args): """ __repr__(self: object) -> str """ pass DockPosition=property(lambda self: object(),lambda self,v: None,lambda self: None) """Which part of the Revit application frame the pane should dock to. Get: DockPosition(self: DockablePaneState) -> DockPosition Set: DockPosition(self: DockablePaneState)=value """ FloatingRectangle=property(lambda self: object(),lambda self,v: None,lambda self: None) """When %dockPosition% is Floating,this rectangle determines the size and position of the pane. Coordinates are relative to the upper-left-hand corner of the main Revit window. Note: the returned Rectangle is a copy. In order to change the pane state,you must call SetFloatingRectangle with a modified rectangle. Get: FloatingRectangle(self: DockablePaneState) -> Rectangle """ IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None) """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: DockablePaneState) -> bool """ TabBehind=property(lambda self: object(),lambda self,v: None,lambda self: None) """Ignored unless %dockPosition% is Tabbed. The new pane will appear in a tab behind the specified existing pane ID. Get: TabBehind(self: DockablePaneState) -> DockablePaneId Set: TabBehind(self: DockablePaneState)=value """
38.205479
215
0.721406
335
2,789
5.770149
0.331343
0.06208
0.037248
0.049664
0.264356
0.264356
0.264356
0.23642
0.23642
0.23642
0
0
0.171746
2,789
72
216
38.736111
0.836797
0.344927
0
0.363636
0
0
0
0
0
0
0
0
0
1
0.363636
false
0.363636
0
0
0.590909
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
3
5377130fa1def1a4a6001506ae2a2d6c0a1fbfc5
1,478
py
Python
src/consensus/pos_quality.py
kd637xx/chia-blockchain
b82f3ba8a2953de12bddf5c5d6a33e443b51bc8b
[ "Apache-2.0" ]
null
null
null
src/consensus/pos_quality.py
kd637xx/chia-blockchain
b82f3ba8a2953de12bddf5c5d6a33e443b51bc8b
[ "Apache-2.0" ]
null
null
null
src/consensus/pos_quality.py
kd637xx/chia-blockchain
b82f3ba8a2953de12bddf5c5d6a33e443b51bc8b
[ "Apache-2.0" ]
null
null
null
from src.util.ints import uint64 from src.types.sized_bytes import bytes32 # The actual space in bytes of a plot, is _expected_plot_size(k) * UI_ACTUAL_SPACE_CONSTANT_FACTO # This is not used in consensus, only for display purposes UI_ACTUAL_SPACE_CONSTANT_FACTOR = 0.762 def _expected_plot_size(k: int) -> uint64: """ Given the plot size parameter k (which is between 32 and 59), computes the expected size of the plot in bytes (times a constant factor). This is based on efficient encoding of the plot, and aims to be scale agnostic, so larger plots don't necessarily get more rewards per byte. The +1 is added to give half a bit more space per entry, which is necessary to store the entries in the plot. """ # TODO(mariano): fix formula return ((2 * k) + 1) * (2 ** k - 1) def quality_str_to_quality(quality_str: bytes32, k: int) -> uint64: """ Takes a 256 bit quality string, converts it to an integer between 0 and 2**256, representing a decimal d=0.xxxxx..., where x are the bits of the quality. Then we perform 1/d, and multiply by the plot size and the This is a very good approximation for x when x is close to 1. However, we only work with big ints, to avoid using decimals. Finally, we divide by the plot size, to make bigger plots have a proportionally higher change to win. """ t = pow(2, 256) xt = t - int.from_bytes(quality_str, "big") return t * _expected_plot_size(k) // xt
42.228571
105
0.709066
256
1,478
4
0.492188
0.046875
0.046875
0.049805
0
0
0
0
0
0
0
0.032958
0.219892
1,478
34
106
43.470588
0.85516
0.681326
0
0
0
0
0.007463
0
0
0
0
0.029412
0
1
0.222222
false
0
0.222222
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
0
1
0
0
3
5377dcc64141c984973548b2ce4c122cb3849348
553
py
Python
cloudmesh/bridge/Bridges.py
nitesh-jaswal/cloudmesh-pi-cluster
804a7f0f93fb06161bccb4c9ff0fcecc93854747
[ "Apache-2.0" ]
null
null
null
cloudmesh/bridge/Bridges.py
nitesh-jaswal/cloudmesh-pi-cluster
804a7f0f93fb06161bccb4c9ff0fcecc93854747
[ "Apache-2.0" ]
1
2020-07-15T15:05:11.000Z
2020-07-15T16:53:21.000Z
cloudmesh/bridge/Bridges.py
nitesh-jaswal/cloudmesh-pi-cluster
804a7f0f93fb06161bccb4c9ff0fcecc93854747
[ "Apache-2.0" ]
null
null
null
# # These methods are working on multiple processors # that can be located remotely # class Bridges: @staticmethod def create(master=None, workers=None, name=None): raise NotImplementedError @staticmethod def set(master=None, workers=None, name=None): raise NotImplementedError @staticmethod def list(hosts=None): raise NotImplementedError @staticmethod def check(hosts=None): raise NotImplementedError @staticmethod def restart(host=None): raise NotImplementedError
20.481481
53
0.690778
57
553
6.701754
0.508772
0.196335
0.366492
0.418848
0.60733
0.60733
0.356021
0.356021
0.356021
0.356021
0
0
0.240506
553
26
54
21.269231
0.909524
0.139241
0
0.625
0
0
0
0
0
0
0
0
0
1
0.3125
false
0
0
0
0.375
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
5387f86a54268c986318cef686546aeff3eb50e1
100
py
Python
instance/config.py
josphat-mwangi/Blog-
646f4d4caeb8c2d9ae5baa001997beb2c6d2a62e
[ "MIT" ]
null
null
null
instance/config.py
josphat-mwangi/Blog-
646f4d4caeb8c2d9ae5baa001997beb2c6d2a62e
[ "MIT" ]
null
null
null
instance/config.py
josphat-mwangi/Blog-
646f4d4caeb8c2d9ae5baa001997beb2c6d2a62e
[ "MIT" ]
null
null
null
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://josphat:mwangi@localhost/blog' SECRET_KEY='127TUMI'
50
79
0.84
12
100
6.75
1
0
0
0
0
0
0
0
0
0
0
0.041237
0.03
100
2
80
50
0.793814
0
0
0
0
0
0.574257
0.504951
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
538cfbd8484b09e734c789269fd83d4abf1acdaa
592
py
Python
verbapie/greek_reg_number.py
galenus-verbatim/Verbapy
801cd8fd0eb6c63d7f950eec48343fc002fb8b6f
[ "MIT" ]
null
null
null
verbapie/greek_reg_number.py
galenus-verbatim/Verbapy
801cd8fd0eb6c63d7f950eec48343fc002fb8b6f
[ "MIT" ]
null
null
null
verbapie/greek_reg_number.py
galenus-verbatim/Verbapy
801cd8fd0eb6c63d7f950eec48343fc002fb8b6f
[ "MIT" ]
null
null
null
import pie_extended.pipeline.tokenizers.utils.chars as chars GreekNumbers = r"(?:M{1,4}(?:α|β|γ|δ{0,3})(?:XC|XL|L?X{0,3})" \ r"(?:IX|IV|V?I{0,3})|M{0,4}(?:CM|C?D|D?C{1,3})" \ r"(?:XC|XL|L?X{0,3})(?:IX|IV|V?I{0,3})|M{0,4}" \ r"(?:CM|CD|D?C{0,3})(?:XC|X?L|L?X{1,3})" \ r"(?:IX|IV|V?I{0,3})|M{0,4}(?:CM|CD|D?C{0,3})" \ r"(?:XC|XL|L?X{0,3})(?:IX|I?V|V?I{1,3}))" DOTS_EXCEPT_APOSTROPHES = r"[" + chars.DOTS_EXCEPT_APOSTROPHES + "‘’]" ENDING_APOSTROPHE = r"(?<=\w)([\'’ʼ])" NON_WORD_NON_SPACE = r"(\s*)([^\w\s])(\s*)"
37
70
0.459459
125
592
2.104
0.336
0.068441
0.057034
0.068441
0.315589
0.315589
0.239544
0.239544
0.239544
0.114068
0
0.06639
0.185811
592
15
71
39.466667
0.479253
0
0
0
0
0.6
0.483108
0.418919
0
0
0
0
0
1
0
false
0
0.1
0
0.1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
539495b4c416e8af7272d0fe5bc67bbb8de03dce
1,427
py
Python
shibboleth_eds/views.py
hu-berlin-cms/django-shibboleth-eds
bbc41ae988208a74350df2c4d9251af098118008
[ "Apache-2.0" ]
1
2017-05-30T09:49:45.000Z
2017-05-30T09:49:45.000Z
shibboleth_eds/views.py
hu-berlin-cms/django-shibboleth-eds
bbc41ae988208a74350df2c4d9251af098118008
[ "Apache-2.0" ]
1
2019-04-11T13:29:47.000Z
2019-04-11T13:29:47.000Z
shibboleth_eds/views.py
hu-berlin-cms/django-shibboleth-eds
bbc41ae988208a74350df2c4d9251af098118008
[ "Apache-2.0" ]
1
2019-02-28T14:47:04.000Z
2019-02-28T14:47:04.000Z
from django.shortcuts import render from django.conf import settings from django.utils.safestring import mark_safe def discovery(request): return render(request, 'shibboleth_eds/discovery.html', { 'non_js_url': getattr(settings, 'SHIBBOLETH_EDS_NON_JS_URL', 'http://federation.org/DS/DS?entityID=https%3A%2F%2FyourentityId.edu.edu%2Fshibboleth&return=https%3A%2F%2Fyourreturn.edu%2FShibboleth.sso%2FDS%3FSAMLDS%3D1%26target%3Dhttps%3A%2F%2Fyourreturn.edu%2F'), }) def config(request): return render(request, 'shibboleth_eds/idpselect_config.js', { 'dataSource': getattr(settings, 'SHIBBOLETH_EDS_DATA_SOURCE', '/Shibboleth.sso/DiscoFeed'), 'defaultReturn': mark_safe(getattr(settings, 'SHIBBOLETH_EDS_DEFAULT_RETURN', 'null')), 'defaultLogo': getattr(settings, 'SHIBBOLETH_EDS_DEFAULT_LOGO', '/media/assets/shibboleth_eds/blank.gif'), 'helpURL': getattr(settings, 'SHIBBOLETH_EDS_HELP_URL', 'https://wiki.shibboleth.net/confluence/display/DEV/EDSDetails'), 'preferredIdP': mark_safe(getattr(settings, 'SHIBBOLETH_EDS_PREFERRED_IDPS', 'null')), 'hiddenIdPs': mark_safe(getattr(settings, 'SHIBBOLETH_EDS_HIDDEN_IDPS', 'null')), 'showListFirst': getattr(settings, 'SHIBBOLETH_EDS_SHOW_LIST_FIRST', 'false'), 'autoFollowCookie': mark_safe(getattr(settings, 'SHIBBOLETH_EDS_FOLLOW_COOKIE', 'null')), }, content_type='application/javascript')
67.952381
271
0.752628
170
1,427
6.076471
0.470588
0.151016
0.217812
0.24395
0.255566
0.214908
0
0
0
0
0
0.014914
0.107218
1,427
20
272
71.35
0.795918
0
0
0
0
0.055556
0.541696
0.274001
0
0
0
0
0
1
0.111111
false
0
0.166667
0.111111
0.388889
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
3
5396f9838aecc73efde438abc55a8b6b8793e1d4
274
py
Python
2021/examples-in-class-2021-10-08/three_five_fifteen.py
ati-ozgur/course-python
38237d120043c07230658b56dc3aeb01c3364933
[ "Apache-2.0" ]
1
2021-02-04T16:59:11.000Z
2021-02-04T16:59:11.000Z
2021/examples-in-class-2021-10-08/three_five_fifteen.py
ati-ozgur/course-python
38237d120043c07230658b56dc3aeb01c3364933
[ "Apache-2.0" ]
null
null
null
2021/examples-in-class-2021-10-08/three_five_fifteen.py
ati-ozgur/course-python
38237d120043c07230658b56dc3aeb01c3364933
[ "Apache-2.0" ]
1
2019-10-30T14:37:48.000Z
2019-10-30T14:37:48.000Z
for index in range(1,101): # if index % 15 == 0: # print("fifteen") if index % 3 == 0 and index % 5 == 0 : print("fifteen") elif index % 3 == 0: print("three") elif index % 5 == 0: print("five") else: print(index)
19.571429
42
0.463504
37
274
3.432432
0.486486
0.188976
0.204724
0.188976
0
0
0
0
0
0
0
0.087719
0.375912
274
13
43
21.076923
0.654971
0.156934
0
0
0
0
0.071749
0
0
0
0
0
0
1
0
false
0
0
0
0
0.444444
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
53a593761880ba2c47da80c8a7f7f5756c576d33
138
py
Python
python_crash_course/ch10/greet_user.py
tangentspire/Python_Practice
e7f22303230a2ffa4e3f5ae57854bac9c4c3bc34
[ "Apache-2.0" ]
null
null
null
python_crash_course/ch10/greet_user.py
tangentspire/Python_Practice
e7f22303230a2ffa4e3f5ae57854bac9c4c3bc34
[ "Apache-2.0" ]
3
2020-02-11T22:58:27.000Z
2021-06-10T20:30:42.000Z
python_crash_course/ch10/greet_user.py
tangentspire/Python_Practice
e7f22303230a2ffa4e3f5ae57854bac9c4c3bc34
[ "Apache-2.0" ]
null
null
null
import json filename = 'username.json' with open(filename) as f_obj: username = json.load(f_obj) print("Welcome, " + username + "!")
15.333333
36
0.681159
19
138
4.842105
0.631579
0.26087
0
0
0
0
0
0
0
0
0
0
0.166667
138
8
37
17.25
0.8
0
0
0
0
0
0.167883
0
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0.2
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
53c9891d21deb53123cd23b7c02f3a5eb69ec89c
1,615
py
Python
com/preprocess/vectorize.py
Krish-Mahajan/topic-classifier
a0cc67e45589bcd46ed1eafb163b957a9708a3da
[ "Apache-2.0" ]
null
null
null
com/preprocess/vectorize.py
Krish-Mahajan/topic-classifier
a0cc67e45589bcd46ed1eafb163b957a9708a3da
[ "Apache-2.0" ]
null
null
null
com/preprocess/vectorize.py
Krish-Mahajan/topic-classifier
a0cc67e45589bcd46ed1eafb163b957a9708a3da
[ "Apache-2.0" ]
null
null
null
''' Created on Oct 22, 2017 @author: krish.mahajan ''' import numpy as np def tokens_to_vector_train(tokens,word_index_map,label_dict): ''' Vecotrizing particular token in test/train as per Hashmap created Above ''' x = np.zeros(len(word_index_map)+1) for t in tokens[:-1]: j = word_index_map[t] x[j] +=1 x[-1]=label_dict[tokens[-1]] return x def tokens_to_vector_test(tokens,word_index_map): ''' Vecotrizing particular token in test/train as per Hashmap created Above ''' x = np.zeros(len(word_index_map)) for t in tokens[:]: j = word_index_map[t] x[j] +=1 return x def vectorize_train_data(data,word_index_map,tokenized): """ Vectorizing all the text in training and testing """ label_dict={} i=0 for label in data['label'].unique(): label_dict[label]=i i+=1 N = len(tokenized)-1 data_vector = np.zeros((N,len(word_index_map)+1)) i=0 for tokens in tokenized[1:]: xy = tokens_to_vector_train(tokens,word_index_map,label_dict) data_vector[i,:] = xy i +=1 return data_vector,label_dict def vectorize_test_data(data,word_index_map,tokenized): """ Vectorizing all the text in training and testing """ N = len(tokenized) data_vector = np.zeros((N,len(word_index_map))) i=0 for tokens in tokenized[1:]: xy = tokens_to_vector_test(tokens,word_index_map) data_vector[i,:] = xy i +=1 return data_vector
23.405797
72
0.603096
234
1,615
3.940171
0.226496
0.117137
0.156182
0.078091
0.744035
0.741866
0.741866
0.741866
0.642082
0.503254
0
0.019114
0.287307
1,615
68
73
23.75
0.781929
0.19257
0
0.432432
0
0
0.004075
0
0
0
0
0
0
1
0.108108
false
0
0.027027
0
0.243243
0
0
0
0
null
0
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
53cc7f8b5c51d6caf5cce3b7162e0eab6d0bfb6c
2,183
py
Python
src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/defs.py
ulisesh/arcade
9a9422d109520d942711e07fae8c662c20e7b6e9
[ "MIT" ]
2
2021-02-07T11:18:50.000Z
2021-02-22T17:52:06.000Z
src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/defs.py
ulisesh/arcade
9a9422d109520d942711e07fae8c662c20e7b6e9
[ "MIT" ]
264
2020-09-22T22:35:51.000Z
2021-03-05T16:46:09.000Z
src/Microsoft.DotNet.Helix/Sdk/tools/azure-pipelines/reporter/defs.py
ulisesh/arcade
9a9422d109520d942711e07fae8c662c20e7b6e9
[ "MIT" ]
1
2021-07-15T00:32:07.000Z
2021-07-15T00:32:07.000Z
from typing import List class TestResult: def __init__(self, name, kind, type_name, method, duration, result, exception_type, failure_message, stack_trace, skip_reason, attachments): """ :type name: unicode :type kind: unicode :type type_name: unicode :type method: unicode :type duration: float :type result: unicode :type exception_type: unicode :type failure_message: unicode :type stack_trace: unicode :type skip_reason: unicode :type attachments: List[TestResultAttachment] """ self._name = name self._kind = kind self._type = type_name self._method = method self._duration_seconds = duration self._result = result self._exception_type = exception_type self._failure_message = failure_message self._stack_trace = stack_trace self._skip_reason = skip_reason self._attachments = attachments @property def name(self): return self._name @property def kind(self): return self._kind @property def type(self): return self._type @property def method(self): return self._method @property def duration_seconds(self): return self._duration_seconds @property def result(self): return self._result @property def exception_type(self): return self._exception_type @property def failure_message(self): return self._failure_message @property def stack_trace(self): return self._stack_trace @property def skip_reason(self): return self._skip_reason @property def output(self): return self._output @property def attachments(self): return self._attachments class TestResultAttachment: def __init__(self, name, text): """ :type name: unicode :type text: unicode """ self._name = name self._text = text @property def name(self): return self._name @property def text(self): return self._text
22.050505
117
0.613834
234
2,183
5.448718
0.132479
0.120784
0.153725
0.044706
0.06902
0.06902
0.06902
0.06902
0.06902
0
0
0
0.314246
2,183
98
118
22.27551
0.851703
0.151626
0
0.327869
0
0
0
0
0
0
0
0
0
1
0.262295
false
0
0.016393
0.229508
0.540984
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
53d67e936a727f8981e48ff76c997a1391a929c7
874
py
Python
deep-rl/lib/python2.7/site-packages/OpenGL/raw/EGL/EXT/create_context_robustness.py
ShujaKhalid/deep-rl
99c6ba6c3095d1bfdab81bd01395ced96bddd611
[ "MIT" ]
210
2016-04-09T14:26:00.000Z
2022-03-25T18:36:19.000Z
deep-rl/lib/python2.7/site-packages/OpenGL/raw/EGL/EXT/create_context_robustness.py
ShujaKhalid/deep-rl
99c6ba6c3095d1bfdab81bd01395ced96bddd611
[ "MIT" ]
72
2016-09-04T09:30:19.000Z
2022-03-27T17:06:53.000Z
deep-rl/lib/python2.7/site-packages/OpenGL/raw/EGL/EXT/create_context_robustness.py
ShujaKhalid/deep-rl
99c6ba6c3095d1bfdab81bd01395ced96bddd611
[ "MIT" ]
64
2016-04-09T14:26:49.000Z
2022-03-21T11:19:47.000Z
'''Autogenerated by xml_generate script, do not edit!''' from OpenGL import platform as _p, arrays # Code generation uses this from OpenGL.raw.EGL import _types as _cs # End users want this... from OpenGL.raw.EGL._types import * from OpenGL.raw.EGL import _errors from OpenGL.constant import Constant as _C import ctypes _EXTENSION_NAME = 'EGL_EXT_create_context_robustness' def _f( function ): return _p.createFunction( function,_p.PLATFORM.EGL,'EGL_EXT_create_context_robustness',error_checker=_errors._error_checker) EGL_CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY_EXT=_C('EGL_CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY_EXT',0x3138) EGL_CONTEXT_OPENGL_ROBUST_ACCESS_EXT=_C('EGL_CONTEXT_OPENGL_ROBUST_ACCESS_EXT',0x30BF) EGL_LOSE_CONTEXT_ON_RESET_EXT=_C('EGL_LOSE_CONTEXT_ON_RESET_EXT',0x31BF) EGL_NO_RESET_NOTIFICATION_EXT=_C('EGL_NO_RESET_NOTIFICATION_EXT',0x31BE)
46
128
0.850114
135
874
5
0.407407
0.074074
0.094815
0.071111
0.557037
0.293333
0.13037
0
0
0
0
0.017327
0.075515
874
18
129
48.555556
0.818069
0.114416
0
0
1
0
0.274151
0.274151
0
0
0.031332
0
0
1
0.076923
false
0
0.461538
0.076923
0.615385
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
53d760ac2207914287c835ae7b1ad1c414d581c3
145
py
Python
example/train.py
prodmodel/prodmodel
83aad9a2e3f07b182a8e90ea0d92580cb2e949fe
[ "Apache-2.0" ]
53
2019-04-28T03:50:05.000Z
2022-02-04T21:52:51.000Z
example/train.py
prodmodel/prodmodel
83aad9a2e3f07b182a8e90ea0d92580cb2e949fe
[ "Apache-2.0" ]
17
2019-04-25T01:46:46.000Z
2019-07-15T02:58:02.000Z
example/train.py
prodmodel/prodmodel
83aad9a2e3f07b182a8e90ea0d92580cb2e949fe
[ "Apache-2.0" ]
3
2019-06-20T07:47:23.000Z
2021-09-06T07:21:51.000Z
from sklearn import tree def train(X, y): clf = tree.DecisionTreeClassifier(max_depth=10, random_state=0) clf = clf.fit(X, y) return clf
18.125
65
0.717241
24
145
4.25
0.75
0.039216
0
0
0
0
0
0
0
0
0
0.025
0.172414
145
7
66
20.714286
0.825
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.2
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
9906daf94ead75110097b280c8521236abdf5d7e
400
py
Python
Exercicios/antigos/074 - Maior e menor valores em Tupla.py
isachlopes/PythonCursoEmVideo
b3814c408b13ff404bc73c8cac1367f66cd83792
[ "MIT" ]
1
2020-04-12T20:55:21.000Z
2020-04-12T20:55:21.000Z
Exercicios/antigos/074 - Maior e menor valores em Tupla.py
isachlopes/PythonCursoEmVideo
b3814c408b13ff404bc73c8cac1367f66cd83792
[ "MIT" ]
null
null
null
Exercicios/antigos/074 - Maior e menor valores em Tupla.py
isachlopes/PythonCursoEmVideo
b3814c408b13ff404bc73c8cac1367f66cd83792
[ "MIT" ]
null
null
null
#programa que vai gerar cinco números aleatórios e colocar em uma tupla. # Depois disso, mostre a listagem de números gerados e também indique o menor e o maior valor que estão na tupla from random import randint n = (randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10)) print(f'Eu sortiei os valores: {n}') print(f'Os valores de máximo e mínimo são {max(n)} e {min(n)}.')
57.142857
112
0.7225
74
400
3.905405
0.608108
0.138408
0.17301
0.235294
0.17301
0.17301
0.17301
0.17301
0.17301
0.17301
0
0.044776
0.1625
400
6
113
66.666667
0.81791
0.455
0
0
0
0
0.37037
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0.5
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
54f0c8d297d3af9b1bd08f147f275174a57cd1f4
542
py
Python
chroma-manager/tests/unit/chroma_core/lib/storage_plugin/virtual_machine_plugin.py
GarimaVishvakarma/intel-chroma
fdf68ed00b13643c62eb7480754d3216d9295e0b
[ "MIT" ]
null
null
null
chroma-manager/tests/unit/chroma_core/lib/storage_plugin/virtual_machine_plugin.py
GarimaVishvakarma/intel-chroma
fdf68ed00b13643c62eb7480754d3216d9295e0b
[ "MIT" ]
null
null
null
chroma-manager/tests/unit/chroma_core/lib/storage_plugin/virtual_machine_plugin.py
GarimaVishvakarma/intel-chroma
fdf68ed00b13643c62eb7480754d3216d9295e0b
[ "MIT" ]
null
null
null
from chroma_core.lib.storage_plugin.api.identifiers import GlobalId from chroma_core.lib.storage_plugin.api.plugin import Plugin from chroma_core.lib.storage_plugin.api import resources from chroma_core.lib.storage_plugin.api import attributes version = 1 class Controller(resources.ScannableResource): class Meta: identifier = GlobalId('address') address = attributes.String() class VirtualMachine(resources.VirtualMachine): class Meta: identifier = GlobalId('address') class TestPlugin(Plugin): pass
23.565217
67
0.776753
65
542
6.353846
0.369231
0.096852
0.135593
0.164649
0.513317
0.348668
0.348668
0.188862
0
0
0
0.00216
0.145756
542
22
68
24.636364
0.889849
0
0
0.285714
0
0
0.02583
0
0
0
0
0
0
1
0
false
0.071429
0.285714
0
0.714286
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
3
54fc12afd54ce53b3f1f956f289a24da56d48e68
251
py
Python
graph_rl/data/__init__.py
nicoguertler/graphrl
21a1cefc53e5c457745570460de0d99e68622e57
[ "MIT" ]
1
2022-01-04T15:21:55.000Z
2022-01-04T15:21:55.000Z
graph_rl/data/__init__.py
nicoguertler/graph_rl
21a1cefc53e5c457745570460de0d99e68622e57
[ "MIT" ]
null
null
null
graph_rl/data/__init__.py
nicoguertler/graph_rl
21a1cefc53e5c457745570460de0d99e68622e57
[ "MIT" ]
null
null
null
from .env_info import EnvInfo from .sess_info import SessInfo from .parent_info import ParentInfo from .flat_transition import FlatTransition from .subtask_transition import SubtaskTransition from .segmented_replay_buffer import SegmentedReplayBuffer
35.857143
58
0.880478
31
251
6.903226
0.580645
0.140187
0
0
0
0
0
0
0
0
0
0
0.095618
251
6
59
41.833333
0.942731
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
0722cada7207888c55af47effd5f7925e3d421c5
306
py
Python
arrays/third_max.py
ahcode0919/python-ds-algorithms
0d617b78c50b6c18da40d9fa101438749bfc82e1
[ "MIT" ]
null
null
null
arrays/third_max.py
ahcode0919/python-ds-algorithms
0d617b78c50b6c18da40d9fa101438749bfc82e1
[ "MIT" ]
null
null
null
arrays/third_max.py
ahcode0919/python-ds-algorithms
0d617b78c50b6c18da40d9fa101438749bfc82e1
[ "MIT" ]
3
2020-10-07T20:24:45.000Z
2020-12-16T04:53:19.000Z
from typing import List def third_max(nums: List[int]) -> int: unique_nums = set(nums) if len(unique_nums) < 3: return max(unique_nums) unique_nums.remove(max(unique_nums)) # 1st unique_nums.remove(max(unique_nums)) # 2nd return max(unique_nums) # 3rd
23.538462
49
0.627451
43
306
4.255814
0.44186
0.437158
0.284153
0.20765
0.31694
0.31694
0
0
0
0
0
0.017857
0.267974
306
12
50
25.5
0.799107
0.035948
0
0.5
0
0
0
0
0
0
0
0
0
1
0.125
false
0
0.125
0
0.5
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
072ccefbc7fbb57779e0484d3265a1ff831f5b2d
199
py
Python
python/testData/codeInsight/mlcompletion/underscoreTypeTwoStart.py
Sajaki/intellij-community
6748af2c40567839d11fd652ec77ba263c074aad
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/codeInsight/mlcompletion/underscoreTypeTwoStart.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2022-02-19T09:45:05.000Z
2022-02-27T20:32:55.000Z
python/testData/codeInsight/mlcompletion/underscoreTypeTwoStart.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
class MyClass(object): def __init__(self): self.__private_var = 42 self._private_var = 11 self.instance_var = 12 def foo(self): self.<caret> obj = MyClass()
18.090909
31
0.58794
25
199
4.28
0.6
0.149533
0.261682
0
0
0
0
0
0
0
0
0.043478
0.306533
199
11
32
18.090909
0.731884
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
075179f330bc98a6a78f0dba9398cb3bc515e029
238
py
Python
customers/urls.py
LuisMlopez/crm_agile
00a2bb71a3c954f72b50f8fe6870d2d96b9d23f6
[ "MIT" ]
null
null
null
customers/urls.py
LuisMlopez/crm_agile
00a2bb71a3c954f72b50f8fe6870d2d96b9d23f6
[ "MIT" ]
null
null
null
customers/urls.py
LuisMlopez/crm_agile
00a2bb71a3c954f72b50f8fe6870d2d96b9d23f6
[ "MIT" ]
null
null
null
from customers.views import CustomerViewSet from rest_framework.routers import DefaultRouter app_name = 'customers' router = DefaultRouter() router.register(r'customers', CustomerViewSet, basename='customer') urlpatterns = router.urls
23.8
67
0.819328
26
238
7.423077
0.692308
0
0
0
0
0
0
0
0
0
0
0
0.096639
238
9
68
26.444444
0.897674
0
0
0
0
0
0.109244
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
075d3db7147f83ce9d905034d5146687fc151cc6
993
py
Python
lib/medipy/gui/image/__init__.py
bsavelev/medipy
f0da3750a6979750d5f4c96aedc89ad5ae74545f
[ "CECILL-B" ]
null
null
null
lib/medipy/gui/image/__init__.py
bsavelev/medipy
f0da3750a6979750d5f4c96aedc89ad5ae74545f
[ "CECILL-B" ]
null
null
null
lib/medipy/gui/image/__init__.py
bsavelev/medipy
f0da3750a6979750d5f4c96aedc89ad5ae74545f
[ "CECILL-B" ]
1
2022-03-04T05:47:08.000Z
2022-03-04T05:47:08.000Z
########################################################################## # MediPy - Copyright (C) Universite de Strasbourg # Distributed under the terms of the CeCILL-B license, as published by # the CEA-CNRS-INRIA. Refer to the LICENSE file or to # http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html # for details. ########################################################################## from medipy.gui.image.contour_layer import ContourLayer from crosshair import Crosshair from medipy.gui.image.image import Image, display from medipy.gui.image.image_grid import ImageGrid from medipy.gui.image.image_layer import ImageLayer from medipy.gui.image.import_raw_dialog import ImportRawDialog from medipy.gui.image.layer import Layer from medipy.gui.image.layers_panel import LayersPanel from medipy.gui.image.slice import Slice __all__ = ["ContourLayer", "Crosshair", "display", "Image", "ImageGrid", "ImageLayer", "ImportRawDialog", "Layer", "LayersPanel", "Slice"]
47.285714
86
0.669688
120
993
5.441667
0.45
0.122511
0.159265
0.220521
0.105666
0
0
0
0
0
0
0.001119
0.099698
993
20
87
49.65
0.729306
0.242699
0
0
0
0
0.147157
0
0
0
0
0
0
1
0
false
0
0.909091
0
0.909091
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
4acf48c3b3eb6797b4748e30f7ebccf6c46c1193
11,560
py
Python
test/model/galaxy/util/test_galaxy_util.py
AshKelly/PyAutoLens
043795966338a655339e61782253ad67cc3c14e6
[ "MIT" ]
null
null
null
test/model/galaxy/util/test_galaxy_util.py
AshKelly/PyAutoLens
043795966338a655339e61782253ad67cc3c14e6
[ "MIT" ]
null
null
null
test/model/galaxy/util/test_galaxy_util.py
AshKelly/PyAutoLens
043795966338a655339e61782253ad67cc3c14e6
[ "MIT" ]
null
null
null
import numpy as np import pytest from autolens.data.array.util import mapping_util from autolens.data.array import grids from autolens.data.array import mask from autolens.model.profiles import light_profiles as lp from autolens.model.profiles import mass_profiles as mp from autolens.model.galaxy import galaxy as g from autolens.model.galaxy.util import galaxy_util @pytest.fixture(name="grid_stack") def make_grid_stack(): ma = mask.Mask(np.array([[True, True, True, True], [True, False, False, True], [True, True, True, True]]), pixel_scale=6.0) grid_stack = grids.GridStack.grid_stack_from_mask_sub_grid_size_and_psf_shape(mask=ma, sub_grid_size=2, psf_shape=(3, 3)) # Manually overwrite a set of cooridnates to make tests of grid_stacks and defledctions straightforward grid_stack.regular[0] = np.array([1.0, 1.0]) grid_stack.regular[1] = np.array([1.0, 0.0]) grid_stack.sub[0] = np.array([1.0, 1.0]) grid_stack.sub[1] = np.array([1.0, 0.0]) grid_stack.sub[2] = np.array([1.0, 1.0]) grid_stack.sub[3] = np.array([1.0, 0.0]) grid_stack.sub[4] = np.array([-1.0, 2.0]) grid_stack.sub[5] = np.array([-1.0, 4.0]) grid_stack.sub[6] = np.array([1.0, 2.0]) grid_stack.sub[7] = np.array([1.0, 4.0]) grid_stack.blurring[0] = np.array([1.0, 0.0]) grid_stack.blurring[1] = np.array([-6.0, -3.0]) grid_stack.blurring[2] = np.array([-6.0, 3.0]) grid_stack.blurring[3] = np.array([-6.0, 9.0]) grid_stack.blurring[4] = np.array([0.0, -9.0]) grid_stack.blurring[5] = np.array([0.0, 9.0]) grid_stack.blurring[6] = np.array([6.0, -9.0]) grid_stack.blurring[7] = np.array([6.0, -3.0]) grid_stack.blurring[8] = np.array([6.0, 3.0]) grid_stack.blurring[9] = np.array([6.0, 9.0]) return grid_stack @pytest.fixture(name="padded_grid_stack") def make_padded_grid_stack(): ma = mask.Mask(np.array([[True, False]]), pixel_scale=3.0) return grids.GridStack.padded_grid_stack_from_mask_sub_grid_size_and_psf_shape(ma, 2, (3, 3)) @pytest.fixture(name='galaxy_non', scope='function') def make_galaxy_non(): return g.Galaxy() @pytest.fixture(name="galaxy_light") def make_galaxy_light(): return g.Galaxy(light_profile=lp.EllipticalSersic(centre=(0.1, 0.1), axis_ratio=1.0, phi=0.0, intensity=1.0, effective_radius=0.6, sersic_index=4.0)) @pytest.fixture(name="galaxy_mass") def make_galaxy_mass(): return g.Galaxy(mass_profile=mp.SphericalIsothermal(einstein_radius=1.0)) @pytest.fixture(name='galaxy_mass_x2') def make_galaxy_mass_x2(): return g.Galaxy(sis_0=mp.SphericalIsothermal(einstein_radius=1.0), sis_1=mp.SphericalIsothermal(einstein_radius=1.0)) class TestIntensitiesFromGrid: def test__no_galaxies__intensities_returned_as_0s(self, grid_stack, galaxy_non): grid_stack.regular = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]) intensities = galaxy_util.intensities_of_galaxies_from_grid(grid=grid_stack.regular, galaxies=[galaxy_non]) assert (intensities[0] == np.array([0.0, 0.0])).all() assert (intensities[1] == np.array([0.0, 0.0])).all() assert (intensities[2] == np.array([0.0, 0.0])).all() def test__galaxy_light__intensities_returned_as_correct_values(self, grid_stack, galaxy_light): grid_stack.regular = np.array([[1.0, 1.0], [1.0, 0.0], [-1.0, 0.0]]) galaxy_intensities = galaxy_light.intensities_from_grid(grid_stack.regular) util_intensities = galaxy_util.intensities_of_galaxies_from_grid(grid=grid_stack.regular, galaxies=[galaxy_light]) assert (galaxy_intensities == util_intensities).all() def test__galaxy_light_x2__intensities_double_from_above(self, grid_stack, galaxy_light): grid_stack.regular = np.array([[1.0, 1.0], [1.0, 0.0], [-1.0, 0.0]]) galaxy_intensities = galaxy_light.intensities_from_grid(grid_stack.regular) util_intensities = galaxy_util.intensities_of_galaxies_from_grid(grid=grid_stack.regular, galaxies=[galaxy_light, galaxy_light]) assert (2.0 * galaxy_intensities == util_intensities).all() def test__sub_grid_in__grid_is_mapped_to_image_grid_by_wrapper(self, grid_stack, galaxy_light): galaxy_image = galaxy_light.intensities_from_grid(grid_stack.sub) galaxy_image = (galaxy_image[0] + galaxy_image[1] + galaxy_image[2] + galaxy_image[3]) / 4.0 util_intensities = galaxy_util.intensities_of_galaxies_from_grid(grid=grid_stack.sub, galaxies=[galaxy_light]) assert util_intensities[0] == galaxy_image class TestSurfaceDensityFromGrid: def test__no_galaxies__surface_density_returned_as_0s(self, grid_stack, galaxy_non): grid_stack.regular = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]) surface_density = galaxy_util.surface_density_of_galaxies_from_grid(grid=grid_stack.regular, galaxies=[galaxy_non]) assert (surface_density[0] == np.array([0.0, 0.0])).all() assert (surface_density[1] == np.array([0.0, 0.0])).all() assert (surface_density[2] == np.array([0.0, 0.0])).all() def test__galaxy_mass__surface_density_returned_as_correct_values(self, grid_stack, galaxy_mass): grid_stack.regular = np.array([[1.0, 1.0], [1.0, 0.0], [-1.0, 0.0]]) galaxy_surface_density = galaxy_mass.surface_density_from_grid(grid_stack.regular) util_surface_density = galaxy_util.surface_density_of_galaxies_from_grid(grid=grid_stack.regular, galaxies=[galaxy_mass]) assert (galaxy_surface_density == util_surface_density).all() def test__galaxy_mass_x2__surface_density_double_from_above(self, grid_stack, galaxy_mass): grid_stack.regular = np.array([[1.0, 1.0], [1.0, 0.0], [-1.0, 0.0]]) galaxy_surface_density = galaxy_mass.surface_density_from_grid(grid_stack.regular) util_surface_density = galaxy_util.surface_density_of_galaxies_from_grid(grid=grid_stack.regular, galaxies=[galaxy_mass, galaxy_mass]) assert (2.0 * galaxy_surface_density == util_surface_density).all() def test__sub_grid_in__grid_is_mapped_to_image_grid_by_wrapper(self, grid_stack, galaxy_mass): galaxy_image = galaxy_mass.surface_density_from_grid(grid_stack.sub) galaxy_image = (galaxy_image[0] + galaxy_image[1] + galaxy_image[2] + galaxy_image[3]) / 4.0 util_surface_density = galaxy_util.surface_density_of_galaxies_from_grid(grid=grid_stack.sub, galaxies=[galaxy_mass]) assert util_surface_density[0] == galaxy_image class TestPotentialFromGrid: def test__no_galaxies__potential_returned_as_0s(self, grid_stack, galaxy_non): grid_stack.regular = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]) potential = galaxy_util.potential_of_galaxies_from_grid(grid=grid_stack.regular, galaxies=[galaxy_non]) assert (potential[0] == np.array([0.0, 0.0])).all() assert (potential[1] == np.array([0.0, 0.0])).all() assert (potential[2] == np.array([0.0, 0.0])).all() def test__galaxy_mass__potential_returned_as_correct_values(self, grid_stack, galaxy_mass): grid_stack.regular = np.array([[1.0, 1.0], [1.0, 0.0], [-1.0, 0.0]]) galaxy_potential = galaxy_mass.potential_from_grid(grid_stack.regular) util_potential = galaxy_util.potential_of_galaxies_from_grid(grid=grid_stack.regular, galaxies=[galaxy_mass]) assert (galaxy_potential == util_potential).all() def test__galaxy_mass_x2__potential_double_from_above(self, grid_stack, galaxy_mass): grid_stack.regular = np.array([[1.0, 1.0], [1.0, 0.0], [-1.0, 0.0]]) galaxy_potential = galaxy_mass.potential_from_grid(grid_stack.regular) util_potential = galaxy_util.potential_of_galaxies_from_grid(grid=grid_stack.regular, galaxies=[galaxy_mass, galaxy_mass]) assert (2.0 * galaxy_potential == util_potential).all() def test__sub_grid_in__grid_is_mapped_to_image_grid_by_wrapper(self, grid_stack, galaxy_mass): galaxy_image = galaxy_mass.potential_from_grid(grid_stack.sub) galaxy_image = (galaxy_image[0] + galaxy_image[1] + galaxy_image[2] + galaxy_image[3]) / 4.0 util_potential = galaxy_util.potential_of_galaxies_from_grid(grid=grid_stack.sub, galaxies=[galaxy_mass]) assert util_potential[0] == galaxy_image class TestDeflectionsFromGrid: def test__all_coordinates(self, grid_stack, galaxy_mass): deflections = galaxy_util.deflections_of_galaxies_from_grid_stack(grid_stack, [galaxy_mass]) assert deflections.regular[0] == pytest.approx(np.array([0.707, 0.707]), 1e-3) assert deflections.sub[0] == pytest.approx(np.array([0.707, 0.707]), 1e-3) assert deflections.sub[1] == pytest.approx(np.array([1.0, 0.0]), 1e-3) # assert deflection_stacks.sub.sub_grid_size == 2 assert deflections.blurring[0] == pytest.approx(np.array([1.0, 0.0]), 1e-3) def test__2_identical_lens_galaxies__deflection_angles_double(self, grid_stack, galaxy_mass): deflections = galaxy_util.deflections_of_galaxies_from_grid_stack(grid_stack, [galaxy_mass, galaxy_mass]) assert deflections.regular[0] == pytest.approx(np.array([2.0 * 0.707, 2.0 * 0.707]), 1e-3) assert deflections.sub[0] == pytest.approx(np.array([2.0 * 0.707, 2.0 * 0.707]), 1e-3) assert deflections.sub[1] == pytest.approx(np.array([2.0, 0.0]), 1e-3) # assert deflection_stacks.sub.sub_grid_size == 2 assert deflections.blurring[0] == pytest.approx(np.array([2.0, 0.0]), 1e-3) def test__1_lens_with_2_identical_mass_profiles__deflection_angles_double(self, grid_stack, galaxy_mass_x2): deflections = galaxy_util.deflections_of_galaxies_from_grid_stack(grid_stack, [galaxy_mass_x2]) assert deflections.regular[0] == pytest.approx(np.array([2.0 * 0.707, 2.0 * 0.707]), 1e-3) assert deflections.sub[0] == pytest.approx(np.array([2.0 * 0.707, 2.0 * 0.707]), 1e-3) assert deflections.sub[1] == pytest.approx(np.array([2.0, 0.0]), 1e-3) assert deflections.blurring[0] == pytest.approx(np.array([2.0, 0.0]), 1e-3)
47.966805
123
0.620675
1,603
11,560
4.16781
0.073612
0.024547
0.017961
0.029636
0.797485
0.770843
0.740757
0.712618
0.67909
0.59512
0
0.052314
0.254239
11,560
241
124
47.966805
0.722654
0.017561
0
0.341317
0
0
0.007222
0
0
0
0
0
0.179641
1
0.125749
false
0
0.053892
0.023952
0.239521
0
0
0
0
null
0
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
4ad1a370c25b404ded309c11155997c9c3e6faca
183
py
Python
src/platform/axis2/fingerprints/AX12.py
0x27/clusterd
0f04a4955c61aa523274e9ae35d750f4339b1e59
[ "MIT" ]
539
2015-01-08T23:59:32.000Z
2022-03-29T17:53:02.000Z
src/platform/axis2/fingerprints/AX12.py
M31MOTH/clusterd
d190b2cbaa93820e928a7ce5471c661d4559fb7c
[ "MIT" ]
21
2015-01-17T21:51:21.000Z
2019-09-20T09:23:18.000Z
src/platform/axis2/fingerprints/AX12.py
M31MOTH/clusterd
d190b2cbaa93820e928a7ce5471c661d4559fb7c
[ "MIT" ]
192
2015-01-26T20:44:14.000Z
2021-12-22T01:39:50.000Z
from src.platform.axis2.interfaces import DefaultServer class FPrint(DefaultServer): def __init__(self): super(FPrint, self).__init__() self.version = '1.2'
22.875
55
0.677596
21
183
5.52381
0.761905
0.137931
0
0
0
0
0
0
0
0
0
0.020833
0.213115
183
7
56
26.142857
0.784722
0
0
0
0
0
0.016393
0
0
0
0
0
0
1
0.2
false
0
0.2
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
4add4764e6f89b7b8ec08aa8ce427f8af47af362
134
py
Python
Proyecto/DacodesJobs/Modulos/Candidatos/urls.py
angel318/DacodesJobs
3a8bb0248ab8addf462b175e039ae935a5e34197
[ "bzip2-1.0.6" ]
null
null
null
Proyecto/DacodesJobs/Modulos/Candidatos/urls.py
angel318/DacodesJobs
3a8bb0248ab8addf462b175e039ae935a5e34197
[ "bzip2-1.0.6" ]
null
null
null
Proyecto/DacodesJobs/Modulos/Candidatos/urls.py
angel318/DacodesJobs
3a8bb0248ab8addf462b175e039ae935a5e34197
[ "bzip2-1.0.6" ]
null
null
null
from django.urls import path from .views import * urlpatterns = [ path('<int:pk>', Candidatos.as_view(), name = 'Candidatos'), ]
19.142857
64
0.671642
17
134
5.235294
0.764706
0
0
0
0
0
0
0
0
0
0
0
0.164179
134
6
65
22.333333
0.794643
0
0
0
0
0
0.134328
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
4aea4e5cdf183931e3a1b2c573a2298e06492b6b
139
py
Python
tools/setup_helpers/rocm.py
DavidKo3/mctorch
53ffe61763059677978b4592c8b2153b0c15428f
[ "BSD-3-Clause" ]
1
2019-07-21T02:13:22.000Z
2019-07-21T02:13:22.000Z
tools/setup_helpers/rocm.py
DavidKo3/mctorch
53ffe61763059677978b4592c8b2153b0c15428f
[ "BSD-3-Clause" ]
null
null
null
tools/setup_helpers/rocm.py
DavidKo3/mctorch
53ffe61763059677978b4592c8b2153b0c15428f
[ "BSD-3-Clause" ]
null
null
null
from .env import check_env_flag # Check if ROCM is enabled USE_ROCM = check_env_flag('USE_ROCM') ROCM_HOME = "/opt/rocm" ROCM_VERSION = ""
23.166667
37
0.755396
24
139
4.041667
0.541667
0.164948
0.247423
0
0
0
0
0
0
0
0
0
0.136691
139
5
38
27.8
0.808333
0.172662
0
0
0
0
0.150442
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
4af356e6ab12c33179116388c59d48ed32798001
604
py
Python
tests/settings.py
peopledoc/django-jsonfield
031ef0f9460da5ad76edf5167e1847082c66be56
[ "BSD-3-Clause" ]
null
null
null
tests/settings.py
peopledoc/django-jsonfield
031ef0f9460da5ad76edf5167e1847082c66be56
[ "BSD-3-Clause" ]
null
null
null
tests/settings.py
peopledoc/django-jsonfield
031ef0f9460da5ad76edf5167e1847082c66be56
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python import os DATABASES = { 'default': { 'ENGINE': 'django.db.backends.{engine}'.format( engine=os.environ.get('DB_ENGINE', 'sqlite3') ), 'HOST': os.environ.get('DB_HOST', ''), 'PORT': os.environ.get('DB_PORT', ''), 'NAME': os.environ.get('DB_NAME', 'jsonfield'), 'USER': os.environ.get('DB_USER', ''), 'PASSWORD': os.environ.get('DB_PASSWORD', ''), } } INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'jsonfield', ) SECRET_KEY = '334ebe58-a77d-4321-9d01-a7d2cb8d3eea'
25.166667
57
0.572848
68
604
4.970588
0.5
0.159763
0.213018
0.248521
0
0
0
0
0
0
0
0.04034
0.220199
604
23
58
26.26087
0.677282
0.033113
0
0
0
0
0.375643
0.154374
0
0
0
0
0
1
0
false
0.052632
0.052632
0
0.052632
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
3
4af623bf21f469b6527c6ecc472dca93541c6752
1,367
py
Python
books/schemas/secured/book_mutations.py
OurBooks-Team-Yuml/Books
58abbbcd761f4ec359c6ab62a36ee77da6348e43
[ "MIT" ]
null
null
null
books/schemas/secured/book_mutations.py
OurBooks-Team-Yuml/Books
58abbbcd761f4ec359c6ab62a36ee77da6348e43
[ "MIT" ]
45
2019-12-16T11:10:27.000Z
2020-05-18T07:15:15.000Z
books/schemas/secured/book_mutations.py
OurBooks-Team-Yuml/Books
58abbbcd761f4ec359c6ab62a36ee77da6348e43
[ "MIT" ]
null
null
null
from flask import request import json import graphene # type: ignore from graphene.types.datetime import Date # type: ignore from graphql import GraphQLError # type: ignore import inject # type: ignore from books.schemas.types import BookType, Upload from books.use_cases import * from books.use_cases.exceptions import * from books.use_cases.repositories import * @inject.autoparams() def get_repositories( repository: BaseBookRepository, s3: BaseS3Repository, elastic: BaseElasticRepository): return repository, s3, elastic class CreateBook(graphene.Mutation): class Arguments: id = graphene.ID() name = graphene.String(required=True) description = graphene.String(required=True) image_path = Upload() authors = graphene.List(graphene.ID, required=True) isbn = graphene.String() publishing_house = graphene.String() published_date = Date() categories = graphene.List(graphene.ID) related_book_id = graphene.ID() Output = BookType @staticmethod def mutate(root, info, **args): ### TODO Validation ### TODO Authorization if request.files.get('1'): return new_book(args, request.files.get('1', None), *get_repositories()) return new_book(args, request.files.get('image_path', None), *get_repositories())
26.803922
90
0.693489
156
1,367
5.987179
0.429487
0.042827
0.044968
0.054604
0.117773
0.068522
0.068522
0
0
0
0
0.004621
0.208486
1,367
50
91
27.34
0.858595
0.062911
0
0
0
0
0.009449
0
0
0
0
0.02
0
1
0.0625
false
0
0.3125
0.03125
0.5625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
1
0
1
0
0
3
ab019ff1010faf737d04f56670a76e99fd53792f
132
py
Python
Backend/TuneSwitch/switch/routing.py
anandhakrishnanaji/TuneSwitch
f37e5fcfe0ce63960a72690735efb90fa614b245
[ "MIT" ]
9
2020-09-01T12:03:36.000Z
2022-03-02T18:22:10.000Z
Backend/TuneSwitch/switch/routing.py
anandhakrishnanaji/TuneSwitch
f37e5fcfe0ce63960a72690735efb90fa614b245
[ "MIT" ]
3
2021-03-30T14:18:54.000Z
2021-06-10T20:08:39.000Z
Backend/TuneSwitch/switch/routing.py
anandhakrishnanaji/TuneSwitch
f37e5fcfe0ce63960a72690735efb90fa614b245
[ "MIT" ]
5
2020-09-01T12:03:55.000Z
2022-01-08T09:10:44.000Z
from django.urls import path from . import consumers websocket_urlpatterns = [ path('ws/switch/', consumers.SwitchConsumer), ]
18.857143
49
0.75
15
132
6.533333
0.733333
0
0
0
0
0
0
0
0
0
0
0
0.143939
132
7
50
18.857143
0.867257
0
0
0
0
0
0.075188
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
ab0357403bc0246843fd1fb6d4aa7c4d7aad36fb
566
py
Python
generated_code_examples/python/classification/decision_tree.py
ggerrein/m2cgen
e916f555b42e3a1d46828942c6b4e5c365c6a624
[ "MIT" ]
3
2021-06-29T02:43:40.000Z
2022-03-28T07:41:59.000Z
generated_code_examples/python/classification/decision_tree.py
ggerrein/m2cgen
e916f555b42e3a1d46828942c6b4e5c365c6a624
[ "MIT" ]
null
null
null
generated_code_examples/python/classification/decision_tree.py
ggerrein/m2cgen
e916f555b42e3a1d46828942c6b4e5c365c6a624
[ "MIT" ]
3
2021-08-06T07:51:37.000Z
2022-03-28T07:41:42.000Z
import numpy as np def score(input): if (input[2]) <= (2.6): var0 = np.asarray([1.0, 0.0, 0.0]) else: if (input[2]) <= (4.8500004): if (input[3]) <= (1.6500001): var0 = np.asarray([0.0, 1.0, 0.0]) else: var0 = np.asarray([0.0, 0.3333333333333333, 0.6666666666666666]) else: if (input[3]) <= (1.75): var0 = np.asarray([0.0, 0.42857142857142855, 0.5714285714285714]) else: var0 = np.asarray([0.0, 0.0, 1.0]) return var0
33.294118
81
0.464664
78
566
3.371795
0.320513
0.106464
0.091255
0.212928
0.269962
0.212928
0.152091
0
0
0
0
0.334247
0.355124
566
16
82
35.375
0.386301
0
0
0.25
0
0
0
0
0
0
0
0
0
1
0.0625
false
0
0.0625
0
0.1875
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
ab1c6b24ce39aea8124f74ed35525ab78f437dd7
14,930
py
Python
src/smpl_perceptron.py
ccj5351/hmr_rgbd
d1dcf81d72c11e1f502f2c494cd86425f384d9cc
[ "MIT" ]
null
null
null
src/smpl_perceptron.py
ccj5351/hmr_rgbd
d1dcf81d72c11e1f502f2c494cd86425f384d9cc
[ "MIT" ]
1
2020-12-09T07:29:00.000Z
2020-12-09T07:29:00.000Z
src/smpl_perceptron.py
ccj5351/hmr_rgbd
d1dcf81d72c11e1f502f2c494cd86425f384d9cc
[ "MIT" ]
null
null
null
# !/usr/bin/env python3 # -*-coding:utf-8-*- # @file: smpl_perceptron.py # @brief: # @author: Changjiang Cai, ccai1@stevens.edu, caicj5351@gmail.com # @version: 0.0.1 # @creation date: 02-08-2019 # @last modified: Fri 02 Aug 2019 11:00:12 AM EDT from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import json import numpy as np def _load_3_hidden_layer_saved_model_json(json_model): with open(json_model) as f: data = json.load(f) saved_model_dict = {} # data has keys: # 1) layer1: 'regressor.0.bias', 'regressor.0.weight' # 2) layer2: 'regressor.2.bias', 'regressor.2.weight' # 3) layer3: 'regressor.4.bias', 'regressor.4.weight' # 4) layer4: 'regressor.6.bias', 'regressor.6.weight' saved_keys = [ 'regressor.0.weight', 'regressor.0.bias', 'regressor.2.weight', 'regressor.2.bias', 'regressor.4.weight', 'regressor.4.bias', 'regressor.6.weight', 'regressor.6.bias', ] want_keys = [ 'weight_h1', 'b1', 'weight_h2', 'b2', 'weight_h3', 'b3', 'weight_out', 'out', ] for i in range(0, len(want_keys)): tmp = np.array(data[saved_keys[i]]).astype(np.float32) saved_model_dict[want_keys[i]] = tmp print ("{} has shape {}".format(saved_keys[i], tmp.shape)) return saved_model_dict def _load_7_hidden_layer_saved_model_json(json_model): with open(json_model) as f: data = json.load(f) print ('data.keys = {}'.format(data.keys())) for i in data.keys(): print ("{} has shape {}".format(i, np.array(data[i]).shape)) saved_model_dict = {} # data has keys: # 1) layer1: 'regressor.0.bias', 'regressor.0.weight' # 2) layer2: 'regressor.2.bias', 'regressor.2.weight' # 3) layer3: 'regressor.4.bias', 'regressor.4.weight' # 4) layer4: 'regressor.6.bias', 'regressor.6.weight' saved_keys = [ # weights and bias for convolution 'regressor.0.weight', 'regressor.0.bias', 'regressor.2.weight', 'regressor.2.bias', 'regressor.4.weight', 'regressor.4.bias', 'regressor.6.weight', 'regressor.6.bias', 'regressor.8.weight', 'regressor.8.bias', 'regressor.10.weight', 'regressor.10.bias', 'regressor.12.weight', 'regressor.12.bias', 'regressor.14.weight', 'regressor.14.bias', # weights for PReLU 'regressor.1.weight', 'regressor.3.weight', 'regressor.5.weight', 'regressor.7.weight', 'regressor.9.weight', 'regressor.11.weight', 'regressor.13.weight', ] want_keys = [ 'weight_h1', 'b1', 'weight_h2', 'b2', 'weight_h3', 'b3', 'weight_h4', 'b4', 'weight_h5', 'b5', 'weight_h6', 'b6', 'weight_h7', 'b7', 'weight_out', 'out', 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', ] assert (len(want_keys) == len(saved_keys)) for i in range(0, len(want_keys)): tmp = np.array(data[saved_keys[i]]).astype(np.float32) saved_model_dict[want_keys[i]] = tmp #print ("{} has shape {}".format(saved_keys[i], tmp.shape)) return saved_model_dict """ 1 hidden layer perceptron """ class One_hidden_layer_joints3d_2_SmplRegressor(object): def __init__(self, config): # Store layers weight & bias self.weights = { 'h1': tf.Variable(tf.random_normal([config.num_input, config.num_hidden_1])), #'h2': tf.Variable(tf.random_normal([config.num_hidden_1, config.num_hidden_2])), #'out': tf.Variable(tf.random_normal([config.num_hidden_2, config.num_output])) 'out': tf.Variable(tf.random_normal([config.num_hidden_1, config.num_output])) } self.biases = { 'b1': tf.Variable(tf.random_normal([config.num_hidden_1])), #'b2': tf.Variable(tf.random_normal([config.num_hidden_2])), 'out': tf.Variable(tf.random_normal([config.num_output])) } # Create model def build_model(self, x): layer = tf.add(tf.matmul(x, self.weights['h1']), self.biases['b1']) #layer = tf.add(tf.matmul(layer, self.weights['h2']), self.biases['b2']) # Output fully connected layer with a neuron for each class out_layer = tf.matmul(layer, self.weights['out']) + self.biases['out'] return out_layer """ 3 hidden layer perceptron """ class Three_hidden_layer_joints3d2LspSmplRegressor(object): def __init__(self, saved_model_json_file = 'models/model_3_layer_perceptron/pytorchSmplRegressor145000.json'): # Store layers weight & bias self.num_input = 14*3 # 42 self.num_hidden_1 = 64 self.num_hidden_2 = 128 self.num_hidden_3 = 128 self.num_output = 24*3 + 10 # 82 self.saved_model_dict = _load_3_hidden_layer_saved_model_json(saved_model_json_file) #NOTE: """ the pretraiend weights are saved via PyTorch, but their dimension are just transposed w.r.t. the dimension defined here (i.e., Tensorflow). For example: weight_h1 : self.num_input x self.num_hidden_1 = 42 x 64 in here; but the loaded variable is in the transposed format, i.e., 64 x 42; """ keys = ['weight_h1', 'weight_h2', 'weight_h3', 'weight_out'] #sizes = [self.num_hidden_1, self.num_hidden_2, self.num_hidden_3, self.num_output] for i in range(0, len(keys)): self.saved_model_dict[keys[i]] = self.saved_model_dict[keys[i]].T # transposed #print ('{} transposed'.format(keys[i])) self.weights = { #'h1': tf.Variable(tf.random_normal([self.num_input, self.num_hidden_1])), 'h1': tf.Variable( np.reshape( self.saved_model_dict['weight_h1'], [self.num_input, self.num_hidden_1]), dtype=tf.float32), 'h2': tf.Variable( np.reshape( self.saved_model_dict['weight_h2'], [self.num_hidden_1, self.num_hidden_2]), dtype=tf.float32), 'h3': tf.Variable( np.reshape( self.saved_model_dict['weight_h3'], [self.num_hidden_2, self.num_hidden_3]), dtype=tf.float32), 'out': tf.Variable( np.reshape( self.saved_model_dict['weight_out'], [self.num_hidden_3, self.num_output]), dtype=tf.float32) } self.biases = { #'b1': tf.Variable(tf.random_normal([self.num_hidden_1])), 'b1': tf.Variable( np.reshape(self.saved_model_dict['b1'], [self.num_hidden_1]), dtype=tf.float32), 'b2': tf.Variable( np.reshape(self.saved_model_dict['b2'], [self.num_hidden_2]), dtype=tf.float32), 'b3': tf.Variable( np.reshape(self.saved_model_dict['b3'], [self.num_hidden_3]), dtype=tf.float32), 'out': tf.Variable( np.reshape(self.saved_model_dict['out'], [self.num_output] ), dtype=tf.float32), } #self.layers = {} # Create model #def build_model(self, x): # self.layers['layer1'] = tf.nn.relu(tf.add(tf.matmul(x, self.weights['h1']), self.biases['b1'])) # self.layers['layer2'] = tf.nn.relu(tf.add(tf.matmul(self.layers['layer1'], self.weights['h2']), self.biases['b2'])) # self.layers['layer3'] = tf.nn.relu(tf.add(tf.matmul(self.layers['layer2'], self.weights['h3']), self.biases['b3'])) # out_layer = tf.matmul(self.layers['layer3'], self.weights['out']) + self.biases['out'] # return out_layer def build_model(self, x): layer = tf.nn.relu(tf.add(tf.matmul(x, self.weights['h1']), self.biases['b1'])) layer = tf.nn.relu(tf.add(tf.matmul(layer, self.weights['h2']), self.biases['b2'])) layer = tf.nn.relu(tf.add(tf.matmul(layer, self.weights['h3']), self.biases['b3'])) out_layer = tf.matmul(layer, self.weights['out']) + self.biases['out'] return out_layer # > see How to implement PReLU activation in Tensorflow? # > at https://stackoverflow.com/questions/39975676/how-to-implement-prelu-activation-in-tensorflow def parametric_relu(_x, alphas): pos = tf.nn.relu(_x) neg = alphas * (_x - abs(_x)) * 0.5 return pos + neg """ 7 hidden layer perceptron """ class Seven_hidden_layer_joints3d2LspSmplRegressor(object): def __init__(self, saved_model_json_file = 'models/model_7_layer_perceptron/pytorchSmplRegressor15000.json' ): # Store layers weight & bias self.num_input = 14*3 # 42 self.num_hidden_1 = 64 self.num_hidden_2 = 128 self.num_hidden_3 = 256 self.num_hidden_4 = 512 self.num_hidden_5 = 512 self.num_hidden_6 = 256 self.num_hidden_7 = 128 self.num_output = 24*3 + 10 # 82 self.saved_model_dict = _load_7_hidden_layer_saved_model_json(saved_model_json_file) #NOTE: """ the pretraiend weights are saved via PyTorch, but their dimension are just transposed w.r.t. the dimension defined here (i.e., Tensorflow). For example: weight_h1 : self.num_input x self.num_hidden_1 = 42 x 64 in here; but the loaded variable is in the transposed format, i.e., 64 x 42; """ keys = ['weight_h1', 'weight_h2', 'weight_h3', 'weight_h4', 'weight_h5', 'weight_h6', 'weight_h7', 'weight_out'] for i in range(0, len(keys)): self.saved_model_dict[keys[i]] = self.saved_model_dict[keys[i]].T # transposed #print ('{} transposed'.format(keys[i])) self.weights = { #'h1': tf.Variable(tf.random_normal([self.num_input, self.num_hidden_1])), 'h1': tf.Variable( np.reshape( self.saved_model_dict['weight_h1'], [self.num_input, self.num_hidden_1]), dtype=tf.float32), 'h2': tf.Variable( np.reshape( self.saved_model_dict['weight_h2'], [self.num_hidden_1, self.num_hidden_2]), dtype=tf.float32), 'h3': tf.Variable( np.reshape( self.saved_model_dict['weight_h3'], [self.num_hidden_2, self.num_hidden_3]), dtype=tf.float32), 'h4': tf.Variable( np.reshape( self.saved_model_dict['weight_h4'], [self.num_hidden_3, self.num_hidden_4]), dtype=tf.float32), 'h5': tf.Variable( np.reshape( self.saved_model_dict['weight_h5'], [self.num_hidden_4, self.num_hidden_5]), dtype=tf.float32), 'h6': tf.Variable( np.reshape( self.saved_model_dict['weight_h6'], [self.num_hidden_5, self.num_hidden_6]), dtype=tf.float32), 'h7': tf.Variable( np.reshape( self.saved_model_dict['weight_h7'], [self.num_hidden_6, self.num_hidden_7]), dtype=tf.float32), 'out': tf.Variable( np.reshape( self.saved_model_dict['weight_out'], [self.num_hidden_7, self.num_output]), dtype=tf.float32), # for parametric Rectified Linear Unit (PReLU); 'a1': tf.Variable( np.reshape(self.saved_model_dict['a1'], [1]), dtype=tf.float32), 'a2': tf.Variable( np.reshape(self.saved_model_dict['a2'], [1]), dtype=tf.float32), 'a3': tf.Variable( np.reshape(self.saved_model_dict['a3'], [1]), dtype=tf.float32), 'a4': tf.Variable( np.reshape(self.saved_model_dict['a4'], [1]), dtype=tf.float32), 'a5': tf.Variable( np.reshape(self.saved_model_dict['a5'], [1]), dtype=tf.float32), 'a6': tf.Variable( np.reshape(self.saved_model_dict['a6'], [1]), dtype=tf.float32), 'a7': tf.Variable( np.reshape(self.saved_model_dict['a7'], [1]), dtype=tf.float32), } self.biases = { #'b1': tf.Variable(tf.random_normal([self.num_hidden_1])), 'b1': tf.Variable( np.reshape(self.saved_model_dict['b1'], [self.num_hidden_1]), dtype=tf.float32), 'b2': tf.Variable( np.reshape(self.saved_model_dict['b2'], [self.num_hidden_2]), dtype=tf.float32), 'b3': tf.Variable( np.reshape(self.saved_model_dict['b3'], [self.num_hidden_3]), dtype=tf.float32), 'b4': tf.Variable( np.reshape(self.saved_model_dict['b4'], [self.num_hidden_4]), dtype=tf.float32), 'b5': tf.Variable( np.reshape(self.saved_model_dict['b5'], [self.num_hidden_5]), dtype=tf.float32), 'b6': tf.Variable( np.reshape(self.saved_model_dict['b6'], [self.num_hidden_6]), dtype=tf.float32), 'b7': tf.Variable( np.reshape(self.saved_model_dict['b7'], [self.num_hidden_7]), dtype=tf.float32), 'out': tf.Variable( np.reshape(self.saved_model_dict['out'], [self.num_output] ), dtype=tf.float32), } #self.layers = {} # Create model def build_model(self, x): #layer = tf.nn.relu(tf.add(tf.matmul(x, self.weights['h1']), self.biases['b1'])) layer = tf.add(tf.matmul(x, self.weights['h1']), self.biases['b1']) layer = parametric_relu(layer, self.weights['a1']) layer = tf.add(tf.matmul(layer, self.weights['h2']), self.biases['b2']) layer = parametric_relu(layer, self.weights['a2']) layer = tf.add(tf.matmul(layer, self.weights['h3']), self.biases['b3']) layer = parametric_relu(layer, self.weights['a3']) layer = tf.add(tf.matmul(layer, self.weights['h4']), self.biases['b4']) layer = parametric_relu(layer, self.weights['a4']) layer = tf.add(tf.matmul(layer, self.weights['h5']), self.biases['b5']) layer = parametric_relu(layer, self.weights['a5']) layer = tf.add(tf.matmul(layer, self.weights['h6']), self.biases['b6']) layer = parametric_relu(layer, self.weights['a6']) layer = tf.add(tf.matmul(layer, self.weights['h7']), self.biases['b7']) layer = parametric_relu(layer, self.weights['a7']) out_layer = tf.matmul(layer, self.weights['out']) + self.biases['out'] return out_layer
43.275362
124
0.57562
1,940
14,930
4.227835
0.105155
0.054621
0.077664
0.0812
0.795294
0.792733
0.756035
0.718971
0.641063
0.594977
0
0.044522
0.271869
14,930
345
125
43.275362
0.709962
0.168118
0
0.513274
0
0
0.110159
0.010817
0
0
0
0
0.004425
1
0.039823
false
0
0.026549
0
0.106195
0.017699
0
0
0
null
0
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
ab48975cb122ca19a7fef1496bfe3b5d39c82774
273
py
Python
dev_tools/composectl/__main__.py
Wildertrek/gamechanger-data
d087044594c722bd373cce1a48293d1a6da5d24e
[ "MIT" ]
18
2021-04-20T20:34:01.000Z
2021-11-08T10:28:17.000Z
dev_tools/composectl/__main__.py
Wildertrek/gamechanger-data
d087044594c722bd373cce1a48293d1a6da5d24e
[ "MIT" ]
15
2021-04-20T20:31:33.000Z
2022-03-18T16:00:44.000Z
dev_tools/composectl/__main__.py
dod-advana/gamechanger-data
1cdba2a3dbc1072f5991dcfe1daea6310c8ae42b
[ "MIT" ]
8
2021-04-23T11:38:26.000Z
2021-11-17T22:42:38.000Z
from .checks import check_unique_service_names, check_env_file_exists from .cli import cli def main() -> None: try: check_unique_service_names() check_env_file_exists() except RuntimeError as e: print(e) exit(1) cli() main()
17.0625
69
0.655678
37
273
4.513514
0.594595
0.131737
0.215569
0.275449
0.491018
0.491018
0.491018
0.491018
0
0
0
0.004975
0.263736
273
15
70
18.2
0.825871
0
0
0
0
0
0
0
0
0
0
0
0
1
0.090909
true
0
0.181818
0
0.272727
0.090909
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
ab525d5ede87f7ae094f0154dd72a47a96a21850
57
py
Python
src/py/pe_0000.py
shanedrabing/project-euler
65add91195fdde3c99c843743205be6d0b1fe072
[ "MIT" ]
null
null
null
src/py/pe_0000.py
shanedrabing/project-euler
65add91195fdde3c99c843743205be6d0b1fe072
[ "MIT" ]
null
null
null
src/py/pe_0000.py
shanedrabing/project-euler
65add91195fdde3c99c843743205be6d0b1fe072
[ "MIT" ]
null
null
null
n = 1 for i in range(1, 10 + 1): print(n) n *= i
11.4
26
0.438596
13
57
1.923077
0.615385
0
0
0
0
0
0
0
0
0
0
0.142857
0.385965
57
4
27
14.25
0.571429
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.25
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
ab53b4abb07be1b79d4f99f956c7a665ca7f1098
175
py
Python
problem0274.py
kmarcini/Project-Euler-Python
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
[ "BSD-3-Clause" ]
null
null
null
problem0274.py
kmarcini/Project-Euler-Python
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
[ "BSD-3-Clause" ]
null
null
null
problem0274.py
kmarcini/Project-Euler-Python
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
[ "BSD-3-Clause" ]
null
null
null
########################### # # #274 Divisibility Multipliers - Project Euler # https://projecteuler.net/problem=274 # # Code by Kevin Marciniak # ###########################
19.444444
47
0.502857
14
175
6.285714
0.928571
0
0
0
0
0
0
0
0
0
0
0.038462
0.108571
175
8
48
21.875
0.525641
0.6
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
ab547bf82406c3b240270a2b9a3ec643b8bf6b28
191
py
Python
runserver.py
HudopCz/street_book
5bf0f1b85ac1b29b3c693830c50b7c87c1f1a711
[ "MIT" ]
null
null
null
runserver.py
HudopCz/street_book
5bf0f1b85ac1b29b3c693830c50b7c87c1f1a711
[ "MIT" ]
null
null
null
runserver.py
HudopCz/street_book
5bf0f1b85ac1b29b3c693830c50b7c87c1f1a711
[ "MIT" ]
null
null
null
#!env/bin/python # Copyright (C) 2015, Availab.io(R) Ltd. All rights reserved. import sys from street_book.app import app import street_book.startup if __name__ == '__main__': app.run()
21.222222
61
0.732984
30
191
4.333333
0.8
0.153846
0
0
0
0
0
0
0
0
0
0.02454
0.146597
191
8
62
23.875
0.773006
0.39267
0
0
0
0
0.070175
0
0
0
0
0
0
1
0
true
0
0.6
0
0.6
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
db5b74262a3947316db780c6b8661bc557c29db2
269
py
Python
rename-fonts.py
lperezperez/font-patcher-helper
70cc65055094fabf56f070f63a94312934ceb17e
[ "MIT" ]
null
null
null
rename-fonts.py
lperezperez/font-patcher-helper
70cc65055094fabf56f070f63a94312934ceb17e
[ "MIT" ]
null
null
null
rename-fonts.py
lperezperez/font-patcher-helper
70cc65055094fabf56f070f63a94312934ceb17e
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 from helpers import fonts from sys import argv, stderr # Check arguments. if len(argv) < 2: stderr.write("Usage: ./rename-fonts.py [font_path]...") exit(1) fonts.run_in_parallel(fonts.get_font_files(argv[1:]), fonts.rename_font) # Rename fonts.
33.625
88
0.739777
44
269
4.386364
0.659091
0.11399
0
0
0
0
0
0
0
0
0
0.016598
0.104089
269
8
88
33.625
0.784232
0.193309
0
0
0
0
0.181395
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
db5f87691c8d73cc3164d3c601e02a55d15e4ef2
11,147
py
Python
src/test/testcases/testExecutorPutRing.py
madscientist159/sbe
63aa1d2be90d345001937953370c3f4e1536e513
[ "Apache-2.0" ]
null
null
null
src/test/testcases/testExecutorPutRing.py
madscientist159/sbe
63aa1d2be90d345001937953370c3f4e1536e513
[ "Apache-2.0" ]
null
null
null
src/test/testcases/testExecutorPutRing.py
madscientist159/sbe
63aa1d2be90d345001937953370c3f4e1536e513
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # IBM_PROLOG_BEGIN_TAG # This is an automatically generated prolog. # # $Source: src/test/testcases/testExecutorPutRing.py $ # # OpenPOWER sbe Project # # Contributors Listed Below - COPYRIGHT 2016 # [+] International Business Machines Corp. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. # # IBM_PROLOG_END_TAG import testPSUUtil import testRegistry as reg import testUtil #------------------------------- # This is a Test Expected Data #------------------------------- ''' This data are the values or strings that needs to be validated for the test. ''' ''' #------------------------------------------------------------------------------------------------------------------------------ # SBE side test data - Target - Pervasive(Core), Chiplet Id - 32, Ring ID - ec_func(224), mode - 0x0020(RING_MODE_HEADER_CHECK) #------------------------------------------------------------------------------------------------------------------------------ ''' sbe_test_data1 = ( #----------------------------------------------------------------------------------------------------- # OP Reg ValueToWrite size Test Expected Data Description #----------------------------------------------------------------------------------------------------- ["write", reg.REG_MBOX0, "0000010000F0D301", 8, "None", "Writing to MBOX0 address"], ["write", reg.REG_MBOX1, "0002002000E00020", 8, "None", "Writing to MBOX1 address"], ["write", reg.PSU_SBE_DOORBELL_REG_WO_OR, "8000000000000000", 8, "None", "Update SBE Doorbell register to interrupt SBE"], ) ''' #------------------------------------------------------------------------------------------------------------------------------ # SBE side test data - Target - Pervasive(Perv), Chiplet Id - 1, Ring ID - perv_fure(00), mode - 0x0020(RING_MODE_HEADER_CHECK) #------------------------------------------------------------------------------------------------------------------------------ ''' sbe_test_data2 = ( #-------------------------------------------------------------------------------------------------------------------------- # OP Reg ValueToWrite size Test Expected Data Description #-------------------------------------------------------------------------------------------------------------------------- ["write", reg.REG_MBOX0, "0000010000F0D301", 8, "None", "Writing to MBOX0 address"], ["write", reg.REG_MBOX1, "0002000100000020", 8, "None", "Writing to MBOX1 address"], ["write", reg.PSU_SBE_DOORBELL_REG_WO_OR, "8000000000000000", 8, "None", "Update SBE Doorbell register to interrupt SBE"], ) ''' #--------------------- # SBE side test data - Target - PROC CHIP, Chiplet Id - x, Ring ID - ob0_fure(118), mode - 0x0020(RING_MODE_HEADER_CHECK) #--------------------- ''' sbe_test_data3 = ( #-------------------------------------------------------------------------------------------------------------------------- # OP Reg ValueToWrite size Test Expected Data Description #-------------------------------------------------------------------------------------------------------------------------- ["write", reg.REG_MBOX0, "0000010000F0D301", 8, "None", "Writing to MBOX0 address"], ["write", reg.REG_MBOX1, "0000000600760020", 8, "None", "Writing to MBOX1 address"], ["write", reg.PSU_SBE_DOORBELL_REG_WO_OR, "8000000000000000", 8, "None", "Update SBE Doorbell register to interrupt SBE"], ) ''' #------------------------------------------------------------------------------------------------------------------------------ # SBE side test data - Target - EX, Chiplet Id - 32, Ring ID - ex_l3_fure(176), mode - 0x0020(RING_MODE_HEADER_CHECK) #------------------------------------------------------------------------------------------------------------------------------ ''' sbe_test_data4 = ( #----------------------------------------------------------------------------------------------------- # OP Reg ValueToWrite size Test Expected Data Description #----------------------------------------------------------------------------------------------------- ["write", reg.REG_MBOX0, "0000010000F0D301", 8, "None", "Writing to MBOX0 address"], ["write", reg.REG_MBOX1, "0001002000B00020", 8, "None", "Writing to MBOX1 address"], ["write", reg.PSU_SBE_DOORBELL_REG_WO_OR, "8000000000000000", 8, "None", "Update SBE Doorbell register to interrupt SBE"], ) ''' #------------------------------------------------------------------------------------------------------------------------------ # SBE side test data - Target - Invalid target 0x10, Chiplet Id - 32, Ring ID - ex_l3_refr_repr(248), mode - 0x0020(RING_MODE_HEADER_CHECK) #------------------------------------------------------------------------------------------------------------------------------ ''' sbe_test_data5 = ( #----------------------------------------------------------------------------------------------------- # OP Reg ValueToWrite size Test Expected Data Description #----------------------------------------------------------------------------------------------------- ["write", reg.REG_MBOX0, "0000010000F0D301", 8, "None", "Writing to MBOX0 address"], ["write", reg.REG_MBOX1, "0010002000F80020", 8, "None", "Writing to MBOX1 address"], ["write", reg.PSU_SBE_DOORBELL_REG_WO_OR, "8000000000000000", 8, "None", "Update SBE Doorbell register to interrupt SBE"], ) ''' #--------------------- # Host side test data - SUCCESS #--------------------- ''' host_test_data_success = ( #---------------------------------------------------------------------------------------------------------------- # OP Reg ValueToWrite size Test Expected Data Description #---------------------------------------------------------------------------------------------------------------- ["read", reg.REG_MBOX4, "0", 8, "0000000000F0D301", "Reading Host MBOX4 data to Validate"], ) ''' #--------------------- # Host side test data - FAILURE #--------------------- ''' host_test_data_failure5 = ( #---------------------------------------------------------------------------------------------------------------- # OP Reg ValueToWrite size Test Expected Data Description #---------------------------------------------------------------------------------------------------------------- ["read", reg.REG_MBOX4, "0", 8, "0002000400F0D301", "Reading Host MBOX4 data to Validate"], ) ''' #----------------------------------------------------------------------- # Do not modify - Used to simulate interrupt on Ringing Doorbell on Host #----------------------------------------------------------------------- ''' host_polling_data = ( #---------------------------------------------------------------------------------------------------------------- # OP Reg ValueToWrite size Test Expected Data Description #---------------------------------------------------------------------------------------------------------------- ["read", reg.PSU_HOST_DOORBELL_REG_WO_OR, "0", 8, "8000000000000000", "Reading Host Doorbell for Interrupt"], ) #------------------------- # Main Function #------------------------- def main(): # Run Simics initially testUtil.runCycles( 10000000 ); # Intialize the class obj instances regObj = testPSUUtil.registry() # Registry obj def for operation print "\n Execute SBE Test set1 [ Put Ring ] ...\n" ''' Test Case 1 ''' # HOST->SBE data set execution regObj.ExecuteTestOp( testPSUUtil.simSbeObj, sbe_test_data1 ) print "\n Poll on Host side for INTR ...\n" #Poll on HOST DoorBell Register for interrupt regObj.pollingOn( testPSUUtil.simSbeObj, host_polling_data, 5 ) #SBE->HOST data set execution regObj.ExecuteTestOp( testPSUUtil.simSbeObj, host_test_data_success ) # Commenting out test cases for perv and proc chiplets, as there is no # way to stop cloks for these chiplets from the test framework # print "\n Execute SBE Test set2 [ Put Ring ] ...\n" # ''' # Test Case 2 # ''' # # HOST->SBE data set execution # regObj.ExecuteTestOp( testPSUUtil.simSbeObj, sbe_test_data2 ) # # print "\n Poll on Host side for INTR ...\n" # #Poll on HOST DoorBell Register for interrupt # regObj.pollingOn( testPSUUtil.simSbeObj, host_polling_data, 5 ) # # #SBE->HOST data set execution # regObj.ExecuteTestOp( testPSUUtil.simSbeObj, host_test_data_success ) # # print "\n Execute SBE Test set3 [ Put Ring ] ...\n" # ''' # Test Case 3 # ''' # # HOST->SBE data set execution # regObj.ExecuteTestOp( testPSUUtil.simSbeObj, sbe_test_data3 ) # # print "\n Poll on Host side for INTR ...\n" # #Poll on HOST DoorBell Register for interrupt # regObj.pollingOn( testPSUUtil.simSbeObj, host_polling_data, 5 ) # # #SBE->HOST data set execution # regObj.ExecuteTestOp( testPSUUtil.simSbeObj, host_test_data_success ) # print "\n Execute SBE Test set4 [ Put Ring ] ...\n" ''' Test Case 4 ''' # HOST->SBE data set execution regObj.ExecuteTestOp( testPSUUtil.simSbeObj, sbe_test_data4 ) print "\n Poll on Host side for INTR ...\n" #Poll on HOST DoorBell Register for interrupt regObj.pollingOn( testPSUUtil.simSbeObj, host_polling_data, 5 ) #SBE->HOST data set execution regObj.ExecuteTestOp( testPSUUtil.simSbeObj, host_test_data_success ) print "\n Execute SBE Test set5 [ Put Ring ] ...\n" ''' Test Case 5 ''' # HOST->SBE data set execution regObj.ExecuteTestOp( testPSUUtil.simSbeObj, sbe_test_data5 ) print "\n Poll on Host side for INTR ...\n" #Poll on HOST DoorBell Register for interrupt regObj.pollingOn( testPSUUtil.simSbeObj, host_polling_data, 5 ) #SBE->HOST data set execution regObj.ExecuteTestOp( testPSUUtil.simSbeObj, host_test_data_failure5 ) if __name__ == "__main__": main() if err: print ( "\nTest Suite completed with error(s)" ) #sys.exit(1) else: print ( "\nTest Suite completed with no errors" ) #sys.exit(0);
48.676856
139
0.456625
991
11,147
5.003027
0.238143
0.021178
0.022186
0.028237
0.676886
0.637959
0.617991
0.60952
0.573215
0.573215
0
0.045305
0.182201
11,147
228
140
48.890351
0.498574
0.45214
0
0.295082
1
0
0.361006
0
0
0
0
0
0
0
null
null
0
0.04918
null
null
0.131148
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
db65afdce59cfa91ac134d14e757a87692857110
3,079
py
Python
h1st/model/model.py
TheVinhLuong102/H1st
0c6f56d3a078817c36b208ae4f4c519cb35d5c18
[ "Apache-2.0" ]
null
null
null
h1st/model/model.py
TheVinhLuong102/H1st
0c6f56d3a078817c36b208ae4f4c519cb35d5c18
[ "Apache-2.0" ]
null
null
null
h1st/model/model.py
TheVinhLuong102/H1st
0c6f56d3a078817c36b208ae4f4c519cb35d5c18
[ "Apache-2.0" ]
null
null
null
from typing import Dict from h1st.h1flow.h1step_containable import NodeContainable from h1st.trust.trustable import Trustable from .repository.model_repository import ModelRepository from .modeler import Modelable class Model(NodeContainable, Trustable, Modelable): """ Base class for H1st Model. To create your own model, inherit `Model` class and implement `process` accordingly. Please refer to Tutorial for more details how to create a model. The framework allows you to persist and load model to the model repository. To persist the model, you can call `persist()`, and then `load_params` to retrieve the model. See `persist()` and `load_params()` document for more detail. .. code-block:: python :caption: Model Persistence and Loading Example import h1st class MyModeler(h1st.model.Modeler): def build_model(self): ... class MyModel(h1st.model.Model): my_modeler = MyModeler() my_modeler.model_class = MyModel my_model = my_modeler.build_model() # Persist the model to repo my_model.persist('1st_version') # Load the model from the repo my_model_2 = MyModel() my_model_2.load_params('1st_version') """ ## TODO: Need a better naming and the definition of the property @property def stats(self): return getattr(self, '__stats__', None) @stats.setter def stats(self, value) -> Dict: setattr(self, '__stats__', value) @property def metrics(self): if not hasattr(self, '__metrics__'): setattr(self, '__metrics__', {}) return getattr(self, '__metrics__') @metrics.setter def metrics(self, value) -> Dict: setattr(self, '__metrics__', value) def persist(self, version=None) -> None: """ Persist this model's properties to the ModelRepository. Currently, only `stats`, `metrics`, `model` properties are supported. `model` property could be single model, list or dict of models Currently, only sklearn and tensorflow-keras are supported. :param version: model version, leave blank for autogeneration :returns: model version """ repo = ModelRepository.get_model_repo(self) return repo.persist(model=self, version=version) def load_params(self, version: str = None) -> None: """ Load parameters from the specified `version` from the ModelRepository. Leave version blank to load latest version. """ repo = ModelRepository.get_model_repo(self) repo.load(model=self, version=version) return self def process(self, input_data: Dict) -> Dict: """ Implement logic to process data :params input_data: data to process :returns: processing result as a dictionary """ # not raise NotImplementedError so the initial model created by integrator will just work return input_data
31.418367
133
0.648912
366
3,079
5.319672
0.346995
0.020544
0.014381
0.020544
0.067797
0.043143
0.043143
0
0
0
0
0.005343
0.270542
3,079
97
134
31.742268
0.861532
0.544333
0
0.137931
0
0
0.052542
0
0
0
0
0.010309
0
1
0.241379
false
0
0.172414
0.034483
0.62069
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
0
1
0
0
3
db8b1ba49daf50072aba60071e3a277c6403aa1f
8,236
py
Python
tutorial/week6/problem1/src/main/mc/utils/AST.py
khoidohpc/ppl-course
3bcff3eeeeebc24f0fc9e3f844779f439aa97544
[ "MIT" ]
2
2020-10-21T13:04:18.000Z
2022-01-12T11:06:31.000Z
tutorial/week6/problem1/src/main/mc/utils/AST.py
khoidohpc/ppl-course
3bcff3eeeeebc24f0fc9e3f844779f439aa97544
[ "MIT" ]
null
null
null
tutorial/week6/problem1/src/main/mc/utils/AST.py
khoidohpc/ppl-course
3bcff3eeeeebc24f0fc9e3f844779f439aa97544
[ "MIT" ]
1
2022-01-12T11:06:45.000Z
2022-01-12T11:06:45.000Z
from abc import ABC, abstractmethod, ABCMeta from Visitor import Visitor class AST(ABC): def __eq__(self, other): return self.__dict__ == other.__dict__ @abstractmethod def accept(self, v, param): return v.visit(self, param) class Program(AST): #decl:list(Decl) def __init__(self, decl): self.decl = decl def __str__(self): return "Program([" + ','.join(str(i) for i in self.decl) + "])" def accept(self, v: Visitor, param): return v.visitProgram(self, param) class Decl(AST): __metaclass__ = ABCMeta pass class BlockMember(AST): __metaclass__ = ABCMeta pass class VarDecl(Decl,BlockMember): #variable:string #varType: Type def __init__(self, variable, varType): self.variable = variable self.varType = varType def __str__(self): return "VarDecl(" + str(self.variable) + "," + str(self.varType) + ")" def accept(self, v, param): return v.visitVarDecl(self, param) class FuncDecl(Decl): #name: Id #param: list(VarDecl) #returnType: Type #body: Block def __init__(self, name, param, returnType, body): self.name = name self.param = param self.returnType = returnType self.body = body def __str__(self): return "FuncDecl(" + str(self.name) + ",[" + ','.join(str(i) for i in self.param) + "]," + str(self.returnType) + "," + str(self.body) + ")" def accept(self, v, param): return v.visitFuncDecl(self, param) class Type(AST): __metaclass__ = ABCMeta pass class IntType(Type): def __str__(self): return "IntType" def accept(self, v, param): return v.visitIntType(self, param) class FloatType(Type): def __str__(self): return "FloatType" def accept(self, v, param): return v.visitFloatType(self, param) class BoolType(Type): def __str__(self): return "BoolType" def accept(self, v, param): return v.visitBoolType(self, param) class StringType(Type): def __str__(self): return "StringType" def accept(self, v, param): return v.visitStringType(self, param) class VoidType(Type): def __str__(self): return "VoidType" def accept(self, v, param): return v.visitVoidType(self, param) class ArrayType(Type): #dimen:int #eleType:Type def __init__(self, dimen, eleType): self.dimen = dimen self.eleType = eleType def __str__(self): return "ArrayType(" + str(self.eleType) + "," + str(self.dimen) + ")" def accept(self, v, param): return v.visitArrayType(self, param) class ArrayPointerType(Type): #eleType:Type def __init__(self, eleType): self.eleType = eleType def __str__(self): return "ArrayTypePointer(" + str(self.eleType) + ")" def accept(self, v, param): return v.visitArrayPointerType(self, param) class Stmt(BlockMember): __metaclass__ = ABCMeta pass class Expr(Stmt): __metaclass__ = ABCMeta pass class BinaryOp(Expr): #op:string #left:Expr #right:Expr def __init__(self, op, left, right): self.op = op self.left = left self.right = right def __str__(self): return "BinaryOp(" + self.op + "," + str(self.left) + "," + str(self.right) + ")" def accept(self, v, param): return v.visitBinaryOp(self, param) class UnaryOp(Expr): #op:string #body:Expr def __init__(self, op, body): self.op = op self.body = body def __str__(self): return "UnaryOp(" + self.op + "," + str(self.body) + ")" def accept(self, v, param): return v.visitUnaryOp(self, param) class CallExpr(Expr): #method:Id #param:list(Expr) def __init__(self, method, param): self.method = method self.param = param def __str__(self): return "CallExpr(" + str(self.method) + ",[" + ','.join(str(i) for i in self.param) + "])" def accept(self, v, param): return v.visitCallExpr(self, param) class LHS(Expr): __metaclass__ = ABCMeta pass class Id(LHS): #name:string def __init__(self, name): self.name = name def __str__(self): return "Id(" + self.name + ")" def accept(self, v, param): return v.visitId(self, param) class ArrayCell(LHS): #arr:Expr #idx:Expr def __init__(self, arr, idx): self.arr = arr self.idx = idx def __str__(self): return "ArrayCell(" + str(self.arr) + "," + str(self.idx) + ")" def accept(self, v, param): return v.visitArrayCell(self, param) class Block(Stmt): #decl:list(BlockMember) def __init__(self, member): self.member = member def __str__(self): return "Block([" + ','.join(str(i) for i in self.member) + "])" def accept(self, v, param): return v.visitBlock(self, param) class If(Stmt): #expr:Expr #thenStmt:Stmt #elseStmt:Stmt def __init__(self, expr, thenStmt, elseStmt=None): self.expr = expr self.thenStmt = thenStmt self.elseStmt = elseStmt def __str__(self): return "If(" + str(self.expr) + "," + str(self.thenStmt) + ("" if (self.elseStmt is None) else "," + str(self.elseStmt)) + ")" def accept(self, v, param): return v.visitIf(self, param) class For(Stmt): #expr1,expr2,expr3:Expr #loop:Stmt def __init__(self, expr1, expr2, expr3, loop): self.expr1 = expr1 self.expr2 = expr2 self.expr3 = expr3 self.loop = loop def __str__(self): return "For(" + str(self.expr1) + ";" + str(self.expr2) + ";" + str(self.expr3) + ";" + str(self.loop) + ")" def accept(self, v, param): return v.visitFor(self, param) class Break(Stmt): def __str__(self): return "Break()" def accept(self, v, param): return v.visitBreak(self, param) class Continue(Stmt): def __str__(self): return "Continue()" def accept(self, v, param): return v.visitContinue(self, param) class Return(Stmt): #expr:Expr def __init__(self, expr = None): self.expr = expr def __str__(self): return "Return(" + ("" if (self.expr is None) else str(self.expr)) + ")" def accept(self, v, param): return v.visitReturn(self, param) class Dowhile(Stmt): #sl:list(Stmt) #exp: Expr def __init__(self, sl, exp): self.sl = sl self.exp = exp def __str__(self): return "Dowhile([" + ','.join(str(i) for i in self.sl) + "]," + str(self.exp) + ")" def accept(self, v, param): return v.visitDowhile(self, param) class Literal(Expr): __metaclass__ = ABCMeta pass class IntLiteral(Literal): #value:int def __init__(self, value): self.value = value def __str__(self): return "IntLiteral(" + str(self.value) + ")" def accept(self, v, param): return v.visitIntLiteral(self, param) class FloatLiteral(Literal): #value:float def __init__(self, value): self.value = value def __str__(self): return "FloatLiteral(" + str(self.value) + ")" def accept(self, v, param): return v.visitFloatLiteral(self, param) class StringLiteral(Literal): #value:string def __init__(self, value): self.value = value def __str__(self): return "StringLiteral(" + self.value + ")" def accept(self, v, param): return v.visitStringLiteral(self, param) class BooleanLiteral(Literal): #value:boolean def __init__(self, value): self.value = value def __str__(self): return "BooleanLiteral(" + str(self.value).lower() + ")" def accept(self, v, param): return v.visitBooleanLiteral(self, param)
24.882175
150
0.57018
956
8,236
4.682008
0.121339
0.081323
0.078418
0.08445
0.342493
0.254468
0.254468
0.092717
0.08244
0.07462
0
0.002593
0.297596
8,236
330
151
24.957576
0.771132
0.047717
0
0.403756
0
0
0.037567
0
0
0
0
0
0
1
0.342723
false
0.032864
0.00939
0.253521
0.798122
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
db8d7e5f1e19e6c48d1ad560b144a3564c196e70
19,687
py
Python
tests/components/light/test_hue.py
sgrzys/AIS-home-assistant
7bfc4d6d90de75eea06702c36474d91bf38df3bf
[ "Apache-2.0" ]
2
2020-12-06T23:15:21.000Z
2021-03-20T20:21:03.000Z
tests/components/light/test_hue.py
sara0871/https-wakatime.com-android-studio
5a15b2c036b332c17d5f6a06664378e9273d684f
[ "Apache-2.0" ]
6
2021-02-08T20:25:50.000Z
2022-03-11T23:27:53.000Z
tests/components/light/test_hue.py
sara0871/https-wakatime.com-android-studio
5a15b2c036b332c17d5f6a06664378e9273d684f
[ "Apache-2.0" ]
3
2018-09-14T07:34:09.000Z
2018-09-29T12:57:10.000Z
"""Philips Hue lights platform tests.""" import asyncio from collections import deque import logging from unittest.mock import Mock import aiohue from aiohue.lights import Lights from aiohue.groups import Groups import pytest from homeassistant import config_entries from homeassistant.components import hue import homeassistant.components.light.hue as hue_light from homeassistant.util import color _LOGGER = logging.getLogger(__name__) HUE_LIGHT_NS = 'homeassistant.components.light.hue.' GROUP_RESPONSE = { "1": { "name": "Group 1", "lights": [ "1", "2" ], "type": "LightGroup", "action": { "on": True, "bri": 254, "hue": 10000, "sat": 254, "effect": "none", "xy": [ 0.5, 0.5 ], "ct": 250, "alert": "select", "colormode": "ct" }, "state": { "any_on": True, "all_on": False, } }, "2": { "name": "Group 2", "lights": [ "3", "4", "5" ], "type": "LightGroup", "action": { "on": True, "bri": 153, "hue": 4345, "sat": 254, "effect": "none", "xy": [ 0.5, 0.5 ], "ct": 250, "alert": "select", "colormode": "ct" }, "state": { "any_on": True, "all_on": False, } } } LIGHT_1_ON = { "state": { "on": True, "bri": 144, "hue": 13088, "sat": 212, "xy": [0.5128, 0.4147], "ct": 467, "alert": "none", "effect": "none", "colormode": "xy", "reachable": True }, "type": "Extended color light", "name": "Hue Lamp 1", "modelid": "LCT001", "swversion": "66009461", "manufacturername": "Philips", "uniqueid": "456", } LIGHT_1_OFF = { "state": { "on": False, "bri": 0, "hue": 0, "sat": 0, "xy": [0, 0], "ct": 0, "alert": "none", "effect": "none", "colormode": "xy", "reachable": True }, "type": "Extended color light", "name": "Hue Lamp 1", "modelid": "LCT001", "swversion": "66009461", "manufacturername": "Philips", "uniqueid": "456", } LIGHT_2_OFF = { "state": { "on": False, "bri": 0, "hue": 0, "sat": 0, "xy": [0, 0], "ct": 0, "alert": "none", "effect": "none", "colormode": "hs", "reachable": True }, "type": "Extended color light", "name": "Hue Lamp 2", "modelid": "LCT001", "swversion": "66009461", "manufacturername": "Philips", "uniqueid": "123", } LIGHT_2_ON = { "state": { "on": True, "bri": 100, "hue": 13088, "sat": 210, "xy": [.5, .4], "ct": 420, "alert": "none", "effect": "none", "colormode": "hs", "reachable": True }, "type": "Extended color light", "name": "Hue Lamp 2 new", "modelid": "LCT001", "swversion": "66009461", "manufacturername": "Philips", "uniqueid": "123", } LIGHT_RESPONSE = { "1": LIGHT_1_ON, "2": LIGHT_2_OFF, } @pytest.fixture def mock_bridge(hass): """Mock a Hue bridge.""" bridge = Mock( available=True, allow_unreachable=False, allow_groups=False, api=Mock(), spec=hue.HueBridge ) bridge.mock_requests = [] # We're using a deque so we can schedule multiple responses # and also means that `popleft()` will blow up if we get more updates # than expected. bridge.mock_light_responses = deque() bridge.mock_group_responses = deque() async def mock_request(method, path, **kwargs): kwargs['method'] = method kwargs['path'] = path bridge.mock_requests.append(kwargs) if path == 'lights': return bridge.mock_light_responses.popleft() if path == 'groups': return bridge.mock_group_responses.popleft() return None bridge.api.config.apiversion = '9.9.9' bridge.api.lights = Lights({}, mock_request) bridge.api.groups = Groups({}, mock_request) return bridge async def setup_bridge(hass, mock_bridge): """Load the Hue light platform with the provided bridge.""" hass.config.components.add(hue.DOMAIN) hass.data[hue.DOMAIN] = {'mock-host': mock_bridge} config_entry = config_entries.ConfigEntry(1, hue.DOMAIN, 'Mock Title', { 'host': 'mock-host' }, 'test') await hass.config_entries.async_forward_entry_setup(config_entry, 'light') # To flush out the service call to update the group await hass.async_block_till_done() async def test_not_load_groups_if_old_bridge(hass, mock_bridge): """Test that we don't try to load gorups if bridge runs old software.""" mock_bridge.api.config.apiversion = '1.12.0' mock_bridge.mock_light_responses.append({}) mock_bridge.mock_group_responses.append(GROUP_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 0 async def test_no_lights_or_groups(hass, mock_bridge): """Test the update_lights function when no lights are found.""" mock_bridge.allow_groups = True mock_bridge.mock_light_responses.append({}) mock_bridge.mock_group_responses.append({}) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 2 assert len(hass.states.async_all()) == 0 async def test_lights(hass, mock_bridge): """Test the update_lights function with some lights.""" mock_bridge.mock_light_responses.append(LIGHT_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 # 1 All Lights group, 2 lights assert len(hass.states.async_all()) == 3 lamp_1 = hass.states.get('light.hue_lamp_1') assert lamp_1 is not None assert lamp_1.state == 'on' assert lamp_1.attributes['brightness'] == 144 assert lamp_1.attributes['hs_color'] == (36.067, 69.804) lamp_2 = hass.states.get('light.hue_lamp_2') assert lamp_2 is not None assert lamp_2.state == 'off' async def test_lights_color_mode(hass, mock_bridge): """Test that lights only report appropriate color mode.""" mock_bridge.mock_light_responses.append(LIGHT_RESPONSE) await setup_bridge(hass, mock_bridge) lamp_1 = hass.states.get('light.hue_lamp_1') assert lamp_1 is not None assert lamp_1.state == 'on' assert lamp_1.attributes['brightness'] == 144 assert lamp_1.attributes['hs_color'] == (36.067, 69.804) assert 'color_temp' not in lamp_1.attributes new_light1_on = LIGHT_1_ON.copy() new_light1_on['state'] = new_light1_on['state'].copy() new_light1_on['state']['colormode'] = 'ct' mock_bridge.mock_light_responses.append({ "1": new_light1_on, }) mock_bridge.mock_group_responses.append({}) # Calling a service will trigger the updates to run await hass.services.async_call('light', 'turn_on', { 'entity_id': 'light.hue_lamp_2' }, blocking=True) # 2x light update, 1 turn on request assert len(mock_bridge.mock_requests) == 3 lamp_1 = hass.states.get('light.hue_lamp_1') assert lamp_1 is not None assert lamp_1.state == 'on' assert lamp_1.attributes['brightness'] == 144 assert lamp_1.attributes['color_temp'] == 467 assert 'hs_color' not in lamp_1.attributes async def test_groups(hass, mock_bridge): """Test the update_lights function with some lights.""" mock_bridge.allow_groups = True mock_bridge.mock_light_responses.append({}) mock_bridge.mock_group_responses.append(GROUP_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 2 # 1 all lights group, 2 hue group lights assert len(hass.states.async_all()) == 3 lamp_1 = hass.states.get('light.group_1') assert lamp_1 is not None assert lamp_1.state == 'on' assert lamp_1.attributes['brightness'] == 254 assert lamp_1.attributes['color_temp'] == 250 lamp_2 = hass.states.get('light.group_2') assert lamp_2 is not None assert lamp_2.state == 'on' async def test_new_group_discovered(hass, mock_bridge): """Test if 2nd update has a new group.""" mock_bridge.allow_groups = True mock_bridge.mock_light_responses.append({}) mock_bridge.mock_group_responses.append(GROUP_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 2 assert len(hass.states.async_all()) == 3 new_group_response = dict(GROUP_RESPONSE) new_group_response['3'] = { "name": "Group 3", "lights": [ "3", "4", "5" ], "type": "LightGroup", "action": { "on": True, "bri": 153, "hue": 4345, "sat": 254, "effect": "none", "xy": [ 0.5, 0.5 ], "ct": 250, "alert": "select", "colormode": "ct" }, "state": { "any_on": True, "all_on": False, } } mock_bridge.mock_light_responses.append({}) mock_bridge.mock_group_responses.append(new_group_response) # Calling a service will trigger the updates to run await hass.services.async_call('light', 'turn_on', { 'entity_id': 'light.group_1' }, blocking=True) # 2x group update, 2x light update, 1 turn on request assert len(mock_bridge.mock_requests) == 5 assert len(hass.states.async_all()) == 4 new_group = hass.states.get('light.group_3') assert new_group is not None assert new_group.state == 'on' assert new_group.attributes['brightness'] == 153 assert new_group.attributes['color_temp'] == 250 async def test_new_light_discovered(hass, mock_bridge): """Test if 2nd update has a new light.""" mock_bridge.mock_light_responses.append(LIGHT_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 3 new_light_response = dict(LIGHT_RESPONSE) new_light_response['3'] = { "state": { "on": False, "bri": 0, "hue": 0, "sat": 0, "xy": [0, 0], "ct": 0, "alert": "none", "effect": "none", "colormode": "hs", "reachable": True }, "type": "Extended color light", "name": "Hue Lamp 3", "modelid": "LCT001", "swversion": "66009461", "manufacturername": "Philips", "uniqueid": "789", } mock_bridge.mock_light_responses.append(new_light_response) # Calling a service will trigger the updates to run await hass.services.async_call('light', 'turn_on', { 'entity_id': 'light.hue_lamp_1' }, blocking=True) # 2x light update, 1 turn on request assert len(mock_bridge.mock_requests) == 3 assert len(hass.states.async_all()) == 4 light = hass.states.get('light.hue_lamp_3') assert light is not None assert light.state == 'off' async def test_other_group_update(hass, mock_bridge): """Test changing one group that will impact the state of other light.""" mock_bridge.allow_groups = True mock_bridge.mock_light_responses.append({}) mock_bridge.mock_group_responses.append(GROUP_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 2 assert len(hass.states.async_all()) == 3 group_2 = hass.states.get('light.group_2') assert group_2 is not None assert group_2.name == 'Group 2' assert group_2.state == 'on' assert group_2.attributes['brightness'] == 153 assert group_2.attributes['color_temp'] == 250 updated_group_response = dict(GROUP_RESPONSE) updated_group_response['2'] = { "name": "Group 2 new", "lights": [ "3", "4", "5" ], "type": "LightGroup", "action": { "on": False, "bri": 0, "hue": 0, "sat": 0, "effect": "none", "xy": [ 0, 0 ], "ct": 0, "alert": "none", "colormode": "ct" }, "state": { "any_on": False, "all_on": False, } } mock_bridge.mock_light_responses.append({}) mock_bridge.mock_group_responses.append(updated_group_response) # Calling a service will trigger the updates to run await hass.services.async_call('light', 'turn_on', { 'entity_id': 'light.group_1' }, blocking=True) # 2x group update, 2x light update, 1 turn on request assert len(mock_bridge.mock_requests) == 5 assert len(hass.states.async_all()) == 3 group_2 = hass.states.get('light.group_2') assert group_2 is not None assert group_2.name == 'Group 2 new' assert group_2.state == 'off' async def test_other_light_update(hass, mock_bridge): """Test changing one light that will impact state of other light.""" mock_bridge.mock_light_responses.append(LIGHT_RESPONSE) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 1 assert len(hass.states.async_all()) == 3 lamp_2 = hass.states.get('light.hue_lamp_2') assert lamp_2 is not None assert lamp_2.name == 'Hue Lamp 2' assert lamp_2.state == 'off' updated_light_response = dict(LIGHT_RESPONSE) updated_light_response['2'] = { "state": { "on": True, "bri": 100, "hue": 13088, "sat": 210, "xy": [.5, .4], "ct": 420, "alert": "none", "effect": "none", "colormode": "hs", "reachable": True }, "type": "Extended color light", "name": "Hue Lamp 2 new", "modelid": "LCT001", "swversion": "66009461", "manufacturername": "Philips", "uniqueid": "123", } mock_bridge.mock_light_responses.append(updated_light_response) # Calling a service will trigger the updates to run await hass.services.async_call('light', 'turn_on', { 'entity_id': 'light.hue_lamp_1' }, blocking=True) # 2x light update, 1 turn on request assert len(mock_bridge.mock_requests) == 3 assert len(hass.states.async_all()) == 3 lamp_2 = hass.states.get('light.hue_lamp_2') assert lamp_2 is not None assert lamp_2.name == 'Hue Lamp 2 new' assert lamp_2.state == 'on' assert lamp_2.attributes['brightness'] == 100 async def test_update_timeout(hass, mock_bridge): """Test bridge marked as not available if timeout error during update.""" mock_bridge.api.lights.update = Mock(side_effect=asyncio.TimeoutError) mock_bridge.api.groups.update = Mock(side_effect=asyncio.TimeoutError) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 0 assert len(hass.states.async_all()) == 0 assert mock_bridge.available is False async def test_update_unauthorized(hass, mock_bridge): """Test bridge marked as not available if unauthorized during update.""" mock_bridge.api.lights.update = Mock(side_effect=aiohue.Unauthorized) mock_bridge.api.groups.update = Mock(side_effect=aiohue.Unauthorized) await setup_bridge(hass, mock_bridge) assert len(mock_bridge.mock_requests) == 0 assert len(hass.states.async_all()) == 0 assert mock_bridge.available is False async def test_light_turn_on_service(hass, mock_bridge): """Test calling the turn on service on a light.""" mock_bridge.mock_light_responses.append(LIGHT_RESPONSE) await setup_bridge(hass, mock_bridge) light = hass.states.get('light.hue_lamp_2') assert light is not None assert light.state == 'off' updated_light_response = dict(LIGHT_RESPONSE) updated_light_response['2'] = LIGHT_2_ON mock_bridge.mock_light_responses.append(updated_light_response) await hass.services.async_call('light', 'turn_on', { 'entity_id': 'light.hue_lamp_2', 'brightness': 100, 'color_temp': 300, }, blocking=True) # 2x light update, 1 turn on request assert len(mock_bridge.mock_requests) == 3 assert mock_bridge.mock_requests[1]['json'] == { 'bri': 100, 'on': True, 'ct': 300, 'effect': 'none', 'alert': 'none', } assert len(hass.states.async_all()) == 3 light = hass.states.get('light.hue_lamp_2') assert light is not None assert light.state == 'on' async def test_light_turn_off_service(hass, mock_bridge): """Test calling the turn on service on a light.""" mock_bridge.mock_light_responses.append(LIGHT_RESPONSE) await setup_bridge(hass, mock_bridge) light = hass.states.get('light.hue_lamp_1') assert light is not None assert light.state == 'on' updated_light_response = dict(LIGHT_RESPONSE) updated_light_response['1'] = LIGHT_1_OFF mock_bridge.mock_light_responses.append(updated_light_response) await hass.services.async_call('light', 'turn_off', { 'entity_id': 'light.hue_lamp_1', }, blocking=True) # 2x light update, 1 turn on request assert len(mock_bridge.mock_requests) == 3 assert mock_bridge.mock_requests[1]['json'] == { 'on': False, 'alert': 'none', } assert len(hass.states.async_all()) == 3 light = hass.states.get('light.hue_lamp_1') assert light is not None assert light.state == 'off' def test_available(): """Test available property.""" light = hue_light.HueLight( light=Mock(state={'reachable': False}), request_bridge_update=None, bridge=Mock(allow_unreachable=False), is_group=False, ) assert light.available is False light = hue_light.HueLight( light=Mock(state={'reachable': False}), request_bridge_update=None, bridge=Mock(allow_unreachable=True), is_group=False, ) assert light.available is True light = hue_light.HueLight( light=Mock(state={'reachable': False}), request_bridge_update=None, bridge=Mock(allow_unreachable=False), is_group=True, ) assert light.available is True def test_hs_color(): """Test hs_color property.""" light = hue_light.HueLight( light=Mock(state={ 'colormode': 'ct', 'hue': 1234, 'sat': 123, }), request_bridge_update=None, bridge=Mock(), is_group=False, ) assert light.hs_color is None light = hue_light.HueLight( light=Mock(state={ 'colormode': 'hs', 'hue': 1234, 'sat': 123, }), request_bridge_update=None, bridge=Mock(), is_group=False, ) assert light.hs_color is None light = hue_light.HueLight( light=Mock(state={ 'colormode': 'xy', 'hue': 1234, 'sat': 123, 'xy': [0.4, 0.5] }), request_bridge_update=None, bridge=Mock(), is_group=False, ) assert light.hs_color == color.color_xy_to_hs(0.4, 0.5)
28.994109
78
0.598161
2,465
19,687
4.56714
0.087221
0.075502
0.05596
0.042636
0.781578
0.738586
0.707852
0.686534
0.667348
0.643276
0
0.0345
0.271194
19,687
678
79
29.036873
0.750139
0.045258
0
0.656307
0
0
0.133917
0.001952
0
0
0
0
0.171846
1
0.005484
false
0
0.021938
0
0.034735
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
dba4032c6e73a28c070d84e6e8301f24a8769a16
1,198
py
Python
setup.py
gmdelc66/simple-coinbase-bot
a679ab8ec4c4ff75f7a5ebdfb7315fe40844bb92
[ "MIT" ]
null
null
null
setup.py
gmdelc66/simple-coinbase-bot
a679ab8ec4c4ff75f7a5ebdfb7315fe40844bb92
[ "MIT" ]
null
null
null
setup.py
gmdelc66/simple-coinbase-bot
a679ab8ec4c4ff75f7a5ebdfb7315fe40844bb92
[ "MIT" ]
1
2021-02-15T07:37:52.000Z
2021-02-15T07:37:52.000Z
from distutils.core import setup import setuptools setup( name='SimpleCoinbaseBot', version='1.2.0', author='Matth Ingersoll', author_email='matth@mtingers.com', packages=['simplecoinbasebot',], license='BSD 2-Clause License', long_description=open('README.md').read(), url='https://github.com/mtingers/simple-coinbase-bot', install_requires=[ 'filelock>=3.0.12', 'cbpro>=1.1.4', ], entry_points={ 'console_scripts': [ 'simplebot=simplecoinbasebot.simplebot:main', 'simpletop=simplecoinbasebot.top:main', 'persistbot=simplecoinbasebot.run:main', ], }, # See https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', ], )
30.717949
65
0.602671
121
1,198
5.917355
0.578512
0.185754
0.244413
0.25419
0
0
0
0
0
0
0
0.027594
0.24374
1,198
38
66
31.526316
0.762693
0.049249
0
0.088235
0
0
0.556828
0.101322
0
0
0
0
0
1
0
true
0
0.058824
0
0.058824
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
dbb68ceac2bdebd662ab4c0bc64319dfff383044
51
py
Python
mtr/sync/__init__.py
mtrgroup/django-mtr-import-export
b8e7a6fa1cbc58b9e2126526f418306a7490cb52
[ "MIT" ]
null
null
null
mtr/sync/__init__.py
mtrgroup/django-mtr-import-export
b8e7a6fa1cbc58b9e2126526f418306a7490cb52
[ "MIT" ]
null
null
null
mtr/sync/__init__.py
mtrgroup/django-mtr-import-export
b8e7a6fa1cbc58b9e2126526f418306a7490cb52
[ "MIT" ]
null
null
null
default_app_config = 'mtr.sync.apps.MtrSyncConfig'
25.5
50
0.823529
7
51
5.714286
1
0
0
0
0
0
0
0
0
0
0
0
0.058824
51
1
51
51
0.833333
0
0
0
0
0
0.529412
0.529412
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
dbc09da959b036ed7a75ab06984c4b5d62ed3480
116
py
Python
Python/URI PROBLEMAS/1011 - Esfera.py
guimaraesalves/material-python
d56b6b24ae35a67d394b43cb1ef4420805c7bd9b
[ "MIT" ]
null
null
null
Python/URI PROBLEMAS/1011 - Esfera.py
guimaraesalves/material-python
d56b6b24ae35a67d394b43cb1ef4420805c7bd9b
[ "MIT" ]
null
null
null
Python/URI PROBLEMAS/1011 - Esfera.py
guimaraesalves/material-python
d56b6b24ae35a67d394b43cb1ef4420805c7bd9b
[ "MIT" ]
null
null
null
raio = int(input()) pi = 3.14159 volume = float(4.0 * pi * (raio* raio * raio) / 3) print("VOLUME = %0.3f" %volume)
23.2
50
0.586207
20
116
3.4
0.6
0.235294
0
0
0
0
0
0
0
0
0
0.117021
0.189655
116
4
51
29
0.606383
0
0
0
0
0
0.12069
0
0
0
0
0
0
1
0
false
0
0
0
0
0.25
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
9160262b7f3f3ba167f59b2dfa396ba24d462a21
2,854
py
Python
google/cloud/servicedirectory/v1/servicedirectory-v1-py/google/cloud/servicedirectory_v1/__init__.py
googleapis/googleapis-gen
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
[ "Apache-2.0" ]
7
2021-02-21T10:39:41.000Z
2021-12-07T07:31:28.000Z
google/cloud/servicedirectory/v1/servicedirectory-v1-py/google/cloud/servicedirectory_v1/__init__.py
googleapis/googleapis-gen
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
[ "Apache-2.0" ]
6
2021-02-02T23:46:11.000Z
2021-11-15T01:46:02.000Z
google/cloud/servicedirectory/v1beta1/servicedirectory-v1beta1-py/google/cloud/servicedirectory_v1beta1/__init__.py
googleapis/googleapis-gen
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
[ "Apache-2.0" ]
4
2021-01-28T23:25:45.000Z
2021-08-30T01:55:16.000Z
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from .services.lookup_service import LookupServiceClient from .services.lookup_service import LookupServiceAsyncClient from .services.registration_service import RegistrationServiceClient from .services.registration_service import RegistrationServiceAsyncClient from .types.endpoint import Endpoint from .types.lookup_service import ResolveServiceRequest from .types.lookup_service import ResolveServiceResponse from .types.namespace import Namespace from .types.registration_service import CreateEndpointRequest from .types.registration_service import CreateNamespaceRequest from .types.registration_service import CreateServiceRequest from .types.registration_service import DeleteEndpointRequest from .types.registration_service import DeleteNamespaceRequest from .types.registration_service import DeleteServiceRequest from .types.registration_service import GetEndpointRequest from .types.registration_service import GetNamespaceRequest from .types.registration_service import GetServiceRequest from .types.registration_service import ListEndpointsRequest from .types.registration_service import ListEndpointsResponse from .types.registration_service import ListNamespacesRequest from .types.registration_service import ListNamespacesResponse from .types.registration_service import ListServicesRequest from .types.registration_service import ListServicesResponse from .types.registration_service import UpdateEndpointRequest from .types.registration_service import UpdateNamespaceRequest from .types.registration_service import UpdateServiceRequest from .types.service import Service __all__ = ( 'LookupServiceAsyncClient', 'RegistrationServiceAsyncClient', 'CreateEndpointRequest', 'CreateNamespaceRequest', 'CreateServiceRequest', 'DeleteEndpointRequest', 'DeleteNamespaceRequest', 'DeleteServiceRequest', 'Endpoint', 'GetEndpointRequest', 'GetNamespaceRequest', 'GetServiceRequest', 'ListEndpointsRequest', 'ListEndpointsResponse', 'ListNamespacesRequest', 'ListNamespacesResponse', 'ListServicesRequest', 'ListServicesResponse', 'LookupServiceClient', 'Namespace', 'RegistrationServiceClient', 'ResolveServiceRequest', 'ResolveServiceResponse', 'Service', 'UpdateEndpointRequest', 'UpdateNamespaceRequest', 'UpdateServiceRequest', )
38.053333
74
0.84583
280
2,854
8.521429
0.357143
0.136211
0.209556
0.211232
0.336966
0
0
0
0
0
0
0.003463
0.089348
2,854
74
75
38.567568
0.914583
0.199369
0
0
0
0
0.234024
0.138828
0
0
0
0
0
1
0
false
0
0.482143
0
0.482143
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3