hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
be30c6f12931ff680481e45af1a532c7eab58cb2
| 1,089
|
py
|
Python
|
pyvmu/messages.py
|
JosephRedfern/VarienseVMU
|
e27c05a83124e024cd049b10f7d682f7f41a5c73
|
[
"MIT"
] | 5
|
2017-10-23T13:13:09.000Z
|
2018-05-07T14:38:47.000Z
|
pyvmu/messages.py
|
JosephRedfern/VarienseVMU
|
e27c05a83124e024cd049b10f7d682f7f41a5c73
|
[
"MIT"
] | 2
|
2018-04-18T08:15:52.000Z
|
2018-05-17T11:32:47.000Z
|
pyvmu/messages.py
|
JosephRedfern/VarienseVMU
|
e27c05a83124e024cd049b10f7d682f7f41a5c73
|
[
"MIT"
] | 3
|
2017-09-06T18:05:21.000Z
|
2018-11-21T13:08:16.000Z
|
from collections import namedtuple
Accelerometer = namedtuple('Accelerometer', ["timestamp", "x", "y", "z"])
Magnetometer = namedtuple('Magnetometer', ['timestamp', 'x', 'y', 'z'])
Gyroscope = namedtuple('Gyroscope', ['timestamp', 'x', 'y', 'z'])
Euler = namedtuple('Euler', ['timestamp', 'x', 'y', 'z'])
Quaternion = namedtuple('Quaternion', ['timestamp', 'w', 'x', 'y', 'z'])
Heading = namedtuple('Heading', ['timestamp', 'h'])
Status = namedtuple('Status', ['magnetometer_enabled',
'gyroscope_enabled',
'accelerometer_enabled',
'gyroscope_resolution',
'accelerometer_resolution',
'low_output_rate',
'heading_streaming',
'euler_streaming',
'magnetometer_streaming',
'quaternions_streaming',
'gyroscope_streaming',
'accelerometer_streaming'])
| 45.375
| 73
| 0.486685
| 73
| 1,089
| 7.082192
| 0.328767
| 0.019342
| 0.029014
| 0.092843
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.371901
| 1,089
| 23
| 74
| 47.347826
| 0.755848
| 0
| 0
| 0
| 0
| 0
| 0.337006
| 0.101928
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be313f1e475a00f009ff53d9286703681a5859de
| 2,847
|
py
|
Python
|
scripts/Caesar-Cipher/CaesarCipher.py
|
Pythobit/python-projects
|
1a6ee3f0f417846626dfa021af49c999771a0199
|
[
"MIT"
] | 2
|
2021-10-19T06:17:33.000Z
|
2021-10-19T06:17:37.000Z
|
scripts/Caesar-Cipher/CaesarCipher.py
|
Pythobit/Python-Projects
|
1a6ee3f0f417846626dfa021af49c999771a0199
|
[
"MIT"
] | 4
|
2021-10-19T06:04:36.000Z
|
2021-10-19T11:42:57.000Z
|
scripts/Caesar-Cipher/CaesarCipher.py
|
Pythobit/Python-Projects
|
1a6ee3f0f417846626dfa021af49c999771a0199
|
[
"MIT"
] | 1
|
2021-10-19T06:55:26.000Z
|
2021-10-19T06:55:26.000Z
|
from __future__ import print_function
import os
import string
import argparse
try:
maketrans = string.maketrans # python2
except AttributeError:
maketrans = str.maketrans # python3
def caeser_cipher(string_: str, offset: int, decode: bool, file_: string) -> None:
"""Caeser Cipher implementation, reads file or string. Also decodes.
Default implementation is ROT13 encoding.
To decode, specify the same offset you used to encode and your ciphertext / file.
:param string_: string to encode / decode
:param offset: # of chars to rotate by
:param decode: decode instead of encode
:param file_: file to read in then encode/decode
"""
if file_ and os.path.exists(file_):
with open(file_, "r") as f:
string_ = f.read()
if decode:
offset *= -1
lower_offset_alphabet = (
string.ascii_lowercase[offset:] + string.ascii_lowercase[:offset]
)
lower_translation_table = maketrans(string.ascii_lowercase, lower_offset_alphabet)
upper_offset_alphabet = (
string.ascii_uppercase[offset:] + string.ascii_uppercase[:offset]
)
upper_translation_table = maketrans(string.ascii_uppercase, upper_offset_alphabet)
lower_converted = string_.translate(lower_translation_table)
final_converted = lower_converted.translate(upper_translation_table)
if file_:
extension = "dec" if decode else "enc"
with open("{}.{}".format(file_, extension), "w") as f:
print(final_converted, file=f)
else:
print(final_converted)
def check_offset_range(value: int) -> int:
"""Validates that value is in the allowable range.
:param value: integer to validate
:return: valid integer
:raises: argparse.ArgumentTypeError
"""
value = int(value)
if value < -25 or value > 25:
raise argparse.ArgumentTypeError("{} is an invalid offset".format(value))
return value
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Simple Caeser Cipher Encoder and Decoder"
)
parser.add_argument(
"-d",
"--decode",
action="store_true",
dest="decode",
help="decode ciphertext (offset should equal what was used to encode)",
default=False,
)
parser.add_argument(
"-o",
"--offset",
dest="offset",
default=13,
type=check_offset_range,
help="number of characters to shift",
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-f", "--file", dest="file", help="file to encode", default=None)
group.add_argument(
"-s", "--string", dest="string", help="string to encode", default=None
)
args = parser.parse_args()
caeser_cipher(args.string, args.offset, args.decode, args.file)
| 30.945652
| 88
| 0.663505
| 341
| 2,847
| 5.346041
| 0.375367
| 0.036204
| 0.032913
| 0.027427
| 0.039495
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005034
| 0.232525
| 2,847
| 91
| 89
| 31.285714
| 0.829291
| 0.184053
| 0
| 0.032258
| 0
| 0
| 0.121854
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.064516
| 0
| 0.112903
| 0.048387
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be31bc2fba335d1b861c92be573990bfd80133fd
| 8,217
|
py
|
Python
|
onadata/libs/permissions.py
|
BuildAMovement/whistler-kobocat
|
7f61dd0761bb0aa5b27c909bcff8c29453d3311d
|
[
"BSD-2-Clause"
] | 38
|
2017-02-28T05:39:40.000Z
|
2019-01-16T04:39:04.000Z
|
onadata/libs/permissions.py
|
BuildAMovement/whistler-kobocat
|
7f61dd0761bb0aa5b27c909bcff8c29453d3311d
|
[
"BSD-2-Clause"
] | 20
|
2017-04-27T09:14:27.000Z
|
2019-01-17T06:35:52.000Z
|
onadata/libs/permissions.py
|
BuildAMovement/whistler-kobocat
|
7f61dd0761bb0aa5b27c909bcff8c29453d3311d
|
[
"BSD-2-Clause"
] | 5
|
2017-02-22T12:25:19.000Z
|
2019-01-15T11:16:40.000Z
|
from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from guardian.shortcuts import (
assign_perm,
remove_perm,
get_perms,
get_users_with_perms)
from onadata.apps.api.models import OrganizationProfile
from onadata.apps.main.models.user_profile import UserProfile
from onadata.apps.logger.models import XForm
from onadata.apps.api.models import Project
# Userprofile Permissions
CAN_ADD_USERPROFILE = 'add_userprofile'
CAN_CHANGE_USERPROFILE = 'change_userprofile'
CAN_DELETE_USERPROFILE = 'delete_userprofile'
CAN_ADD_XFORM_TO_PROFILE = 'can_add_xform'
CAN_VIEW_PROFILE = 'view_profile'
# Organization Permissions
CAN_VIEW_ORGANIZATION_PROFILE = 'view_organizationprofile'
CAN_ADD_ORGANIZATION_PROFILE = 'add_organizationprofile'
CAN_ADD_ORGANIZATION_XFORM = 'can_add_xform'
CAN_CHANGE_ORGANIZATION_PROFILE = 'change_organizationprofile'
CAN_DELETE_ORGANIZATION_PROFILE = 'delete_organizationprofile'
IS_ORGANIZATION_OWNER = 'is_org_owner'
# Xform Permissions
CAN_CHANGE_XFORM = 'change_xform'
CAN_ADD_XFORM = 'add_xform'
CAN_DELETE_XFORM = 'delete_xform'
CAN_VIEW_XFORM = 'view_xform'
CAN_ADD_SUBMISSIONS = 'report_xform'
CAN_TRANSFER_OWNERSHIP = 'transfer_xform'
CAN_MOVE_TO_FOLDER = 'move_xform'
# Project Permissions
CAN_VIEW_PROJECT = 'view_project'
CAN_CHANGE_PROJECT = 'change_project'
CAN_TRANSFER_PROJECT_OWNERSHIP = 'transfer_project'
CAN_DELETE_PROJECT = 'delete_project'
CAN_ADD_DATADICTIONARY = 'add_datadictionary'
CAN_CHANGE_DATADICTIONARY = 'change_datadictionary'
CAN_DELETE_DATADICTIONARY = 'delete_datadictionary'
class Role(object):
class_to_permissions = None
permissions = None
name = None
@classmethod
def _remove_obj_permissions(self, user, obj):
content_type = ContentType.objects.get(
model=obj.__class__.__name__.lower(),
app_label=obj.__class__._meta.app_label
)
object_permissions = user.userobjectpermission_set.filter(
object_pk=obj.pk, content_type=content_type)
for perm in object_permissions:
remove_perm(perm.permission.codename, user, obj)
@classmethod
def add(cls, user, obj):
cls._remove_obj_permissions(user, obj)
for codename, klass in cls.permissions:
if type(obj) == klass:
assign_perm(codename, user, obj)
@classmethod
def has_role(cls, permissions, obj):
"""Check that permission correspond to this role for this object.
:param permissions: A list of permissions.
:param obj: An object to get the permissions of.
"""
perms_for_role = set(cls.class_to_permissions[type(obj)])
return perms_for_role.issubset(set(permissions))
@classmethod
def user_has_role(cls, user, obj):
"""Check that a user has this role.
:param user: A user object.
:param obj: An object to get the permissions of.
"""
return user.has_perms(cls.class_to_permissions[type(obj)], obj)
class ReadOnlyRole(Role):
name = 'readonly'
permissions = (
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_XFORM, XForm),
(CAN_VIEW_PROJECT, Project),
)
class DataEntryRole(Role):
name = 'dataentry'
permissions = (
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_ADD_XFORM, Project),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_PROJECT, Project),
(CAN_VIEW_XFORM, XForm),
)
class EditorRole(Role):
name = 'editor'
permissions = (
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_ADD_XFORM, Project),
(CAN_CHANGE_PROJECT, Project),
(CAN_CHANGE_XFORM, XForm),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_PROJECT, Project),
(CAN_VIEW_XFORM, XForm),
)
class ManagerRole(Role):
name = 'manager'
permissions = (
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_ADD_XFORM, XForm),
(CAN_ADD_XFORM_TO_PROFILE, OrganizationProfile),
(CAN_ADD_XFORM_TO_PROFILE, UserProfile),
(CAN_CHANGE_PROJECT, Project),
(CAN_CHANGE_XFORM, XForm),
(CAN_DELETE_PROJECT, Project),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_PROFILE, UserProfile),
(CAN_VIEW_PROJECT, Project),
(CAN_VIEW_XFORM, XForm),
)
class MemberRole(Role):
"""This is a role for a member of an organization.
"""
name = 'member'
class OwnerRole(Role):
"""This is a role for an owner of a dataset, organization, or project.
"""
name = 'owner'
permissions = (
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_ADD_XFORM, Project),
(CAN_ADD_XFORM, XForm),
(CAN_VIEW_XFORM, XForm),
(CAN_ADD_DATADICTIONARY, XForm),
(CAN_CHANGE_DATADICTIONARY, XForm),
(CAN_DELETE_DATADICTIONARY, XForm),
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_DELETE_XFORM, XForm),
(CAN_MOVE_TO_FOLDER, XForm),
(CAN_TRANSFER_OWNERSHIP, XForm),
(CAN_CHANGE_XFORM, XForm),
(CAN_ADD_XFORM_TO_PROFILE, UserProfile),
(CAN_ADD_USERPROFILE, UserProfile),
(CAN_CHANGE_USERPROFILE, UserProfile),
(CAN_DELETE_USERPROFILE, UserProfile),
(CAN_ADD_XFORM_TO_PROFILE, UserProfile),
(CAN_VIEW_PROFILE, UserProfile),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_ADD_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_ADD_ORGANIZATION_XFORM, OrganizationProfile),
(CAN_CHANGE_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_DELETE_ORGANIZATION_PROFILE, OrganizationProfile),
(IS_ORGANIZATION_OWNER, OrganizationProfile),
(CAN_ADD_XFORM_TO_PROFILE, OrganizationProfile),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_CHANGE_PROJECT, Project),
(CAN_CHANGE_XFORM, XForm),
(CAN_DELETE_PROJECT, Project),
(CAN_DELETE_XFORM, XForm),
(CAN_MOVE_TO_FOLDER, XForm),
(CAN_TRANSFER_OWNERSHIP, XForm),
(CAN_TRANSFER_PROJECT_OWNERSHIP, Project),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_PROFILE, UserProfile),
(CAN_VIEW_PROJECT, Project),
(CAN_VIEW_XFORM, XForm),
(CAN_ADD_DATADICTIONARY, XForm),
(CAN_CHANGE_DATADICTIONARY, XForm),
(CAN_DELETE_DATADICTIONARY, XForm),
(CAN_ADD_SUBMISSIONS, XForm),
)
ROLES_ORDERED = [ReadOnlyRole,
DataEntryRole,
EditorRole,
ManagerRole,
OwnerRole]
ROLES = {role.name: role for role in ROLES_ORDERED}
# Memoize a class to permissions dict.
for role in ROLES.values():
role.class_to_permissions = defaultdict(list)
[role.class_to_permissions[k].append(p) for p, k in role.permissions]
def is_organization(obj):
try:
obj.organizationprofile
return True
except OrganizationProfile.DoesNotExist:
return False
def get_role(permissions, obj):
for role in reversed(ROLES_ORDERED):
if role.has_role(permissions, obj):
return role.name
def get_role_in_org(user, organization):
perms = get_perms(user, organization)
if 'is_org_owner' in perms:
return OwnerRole.name
else:
return get_role(perms, organization) or MemberRole.name
def get_object_users_with_permissions(obj, exclude=None, serializable=False):
"""Returns users, roles and permissions for a object.
When called with with `serializable=True`, return usernames (strings)
instead of User objects, which cannot be serialized by REST Framework.
"""
result = []
if obj:
users_with_perms = get_users_with_perms(
obj, attach_perms=True, with_group_users=False).items()
result = [{
'user': user if not serializable else user.username,
'role': get_role(permissions, obj),
'permissions': permissions} for user, permissions in
users_with_perms if not is_organization(
UserProfile.objects.get_or_create(user=user)[0]
)
]
return result
| 31.848837
| 77
| 0.69271
| 933
| 8,217
| 5.745981
| 0.150054
| 0.052229
| 0.028726
| 0.06883
| 0.415035
| 0.342846
| 0.271218
| 0.225145
| 0.216937
| 0.194926
| 0
| 0.000156
| 0.222344
| 8,217
| 257
| 78
| 31.972763
| 0.838811
| 0.086285
| 0
| 0.350785
| 0
| 0
| 0.06298
| 0.019016
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041885
| false
| 0
| 0.036649
| 0
| 0.230366
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be330b0c9754c05467f2b02c3762c1390226f3d3
| 10,078
|
py
|
Python
|
lanelines.py
|
gauborg/lane-finding-gborgaonkar
|
466313a0da7c245e25f0987afa953300501d5322
|
[
"MIT"
] | null | null | null |
lanelines.py
|
gauborg/lane-finding-gborgaonkar
|
466313a0da7c245e25f0987afa953300501d5322
|
[
"MIT"
] | null | null | null |
lanelines.py
|
gauborg/lane-finding-gborgaonkar
|
466313a0da7c245e25f0987afa953300501d5322
|
[
"MIT"
] | null | null | null |
# Self-Driving Car Engineer Nanodegree
#
# ## Project: **Finding Lane Lines on the Road**
# ## Import Packages
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import moviepy
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
# ## Ideas for Lane Detection Pipeline
# **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
#
# `cv2.inRange()` for color selection
# `cv2.fillPoly()` for regions selection
# `cv2.line()` to draw lines on an image given endpoints
# `cv2.addWeighted()` to coadd / overlay two images
# `cv2.cvtColor()` to grayscale or change color
# `cv2.imwrite()` to output images to file
# `cv2.bitwise_and()` to apply a mask to an image
#
# **Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=5):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
# lists to store the slopes of lines which match our criteria
left_slope = []
right_slope = []
# lists to store the calculate b intercepts of these lines
left_b = []
right_b = []
for line in lines:
for x1,y1,x2,y2 in line:
slope = ((y2-y1)/(x2-x1))
# only select lines with specific slope range
if(((slope < 0.8) and (slope > 0.5)) or ((slope > -0.8) and (slope < -0.5))):
# check where the endpoints lie on the image...
if (x1 < (img.shape[1]/2) and x2 < (img.shape[1]/2)):
left_slope.append(slope)
left_b.append(y1-slope*x1)
left_b.append(y2-slope*x2)
else:
right_slope.append(slope)
right_b.append(y1-slope*x1)
right_b.append(y2-slope*x2)
try:
# we calculate average slope to draw the line
avg_left_slope = sum(left_slope)/len(left_slope)
avg_right_slope = sum(right_slope)/len(right_slope)
avg_left_b = sum(left_b)/len(left_b)
avg_right_b = sum(right_b)/len(right_b)
# Y co-ordinate of the lane line will definitely be at the bottom of the image
y1 = img.shape[0]
y2 = 320
y3 = 320
y4 = img.shape[0]
# X co-ordinate can be calculated by using the eqn of the line and y co-ordinate
x1 = (y1 - avg_left_b)/avg_left_slope
x2 = (y2 - avg_left_b)/avg_left_slope
x3 = (y3 - avg_right_b)/avg_right_slope
x4 = (y4 - avg_right_b)/avg_right_slope
# draw the lines, converting values to integer for pixels
cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), color, thickness)
cv2.line(img, (int(x3), int(y3)), (int(x4), int(y4)), color, thickness)
except ZeroDivisionError as error:
pass
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
# ## Test Images
#
# Build your pipeline to work on the images in the directory "test_images"
# **You should make sure your pipeline works well on these images before you try the videos.**
import os
directory = os.listdir("test_images/")
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
def lanelines(image):
# 1. Grayscaling
gray = grayscale(image)
# 2. Gaussian Blur
blur = gaussian_blur(gray, 5)
# 3. Canny Detection
canny_edges = canny(blur, 50, 150)
# 4. Region Masking
vertices = np.array([[(0,image.shape[0]),(460,320),(500,320),(image.shape[1],image.shape[0])]], dtype=np.int32)
selected_region = region_of_interest(canny_edges, vertices)
mpimg.imsave(os.path.join("test_images_output/" + "output-" + i), selected_region)
# image.save(os.path.join("test_images_output/" + i + "-canny-region-output"), format=None, dpi=(540, 960))
# Hough Transform Parameters- Identify lane lines in the masked region
# execute Hough Transform
lines_image = hough_lines(selected_region, 2, np.pi/180, 25, 20, 10)
weighted_image = weighted_img(lines_image, image)
return weighted_image
for i in directory:
image = mpimg.imread(os.path.join("test_images/", i))
weighted_image = lanelines(image)
mpimg.imsave(os.path.join("test_images_output/" + "output+" + i), weighted_image)
# ## Test on Videos
#
# You know what's cooler than drawing lanes over images? Drawing lanes over video!
#
# We can test our solution on two provided videos:
# `solidWhiteRight.mp4`
# `solidYellowLeft.mp4`
#
#
# **If you get an error that looks like this:**
# ```
# NeedDownloadError: Need ffmpeg exe.
# You can download it by calling:
# imageio.plugins.ffmpeg.download()
# Import everything needed to edit/save/watch video clips
import imageio
from moviepy.editor import VideoFileClip
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
result = lanelines(image)
return result
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) # NOTE: this function expects color images!!
white_clip.write_videofile(white_output, audio=False)
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
yellow_clip.write_videofile(yellow_output, audio=False)
challenge_output = 'test_videos_output/challenge.mp4'
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
challenge_clip.write_videofile(challenge_output, audio=False)
| 36.514493
| 137
| 0.682675
| 1,498
| 10,078
| 4.489319
| 0.269025
| 0.010409
| 0.006543
| 0.008327
| 0.099033
| 0.071078
| 0.018141
| 0.013086
| 0.013086
| 0.013086
| 0
| 0.023149
| 0.224152
| 10,078
| 275
| 138
| 36.647273
| 0.836936
| 0.503671
| 0
| 0.04
| 0
| 0
| 0.070414
| 0.047932
| 0
| 0
| 0
| 0.003636
| 0
| 1
| 0.09
| false
| 0.01
| 0.1
| 0
| 0.27
| 0.01
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be368e6b255149306c28292dd49ca28ab1a75535
| 553
|
py
|
Python
|
network/pytorch2onnx.py
|
MRsoymilk/toy-car
|
5bd51bf231781a17e1d7acb4654c3d4b6adbed41
|
[
"MIT"
] | null | null | null |
network/pytorch2onnx.py
|
MRsoymilk/toy-car
|
5bd51bf231781a17e1d7acb4654c3d4b6adbed41
|
[
"MIT"
] | null | null | null |
network/pytorch2onnx.py
|
MRsoymilk/toy-car
|
5bd51bf231781a17e1d7acb4654c3d4b6adbed41
|
[
"MIT"
] | null | null | null |
import Net
import configparser
import torch
from PIL import Image
config = configparser.ConfigParser()
config.read('./config.ini')
MODEL = config.get("Network", "Model")
transformations = Net.transformations
net = Net.Net()
net.eval()
net.load_state_dict(torch.load(MODEL))
image = Image.open("./html/rwby.jpg")
image = transformations(image).float()
image = torch.autograd.Variable(image[None, ...])
torch.onnx.export(
net,
image,
MODEL.split('pth')[0] + 'onnx',
export_params=True,
output_names=['toy-car']
)
print("finish")
| 19.068966
| 49
| 0.703436
| 72
| 553
| 5.347222
| 0.541667
| 0.046753
| 0.046753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002079
| 0.130199
| 553
| 28
| 50
| 19.75
| 0.798337
| 0
| 0
| 0
| 0
| 0
| 0.106691
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be385b749f1c26b913c643d471ca79a2fd89e72b
| 724
|
py
|
Python
|
var/spack/repos/builtin/packages/r-gridextra/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/r-gridextra/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/r-gridextra/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGridextra(RPackage):
"""Miscellaneous Functions for "Grid" Graphics.
Provides a number of user-level functions to work with "grid" graphics,
notably to arrange multiple grid-based plots on a page, and draw tables."""
cran = "gridExtras"
version('2.3', sha256='81b60ce6f237ec308555471ae0119158b115463df696d2eca9b177ded8988e3b')
version('2.2.1', sha256='44fe455a5bcdf48a4ece7a542f83e7749cf251dc1df6ae7634470240398c6818')
depends_on('r-gtable', type=('build', 'run'))
| 34.47619
| 95
| 0.754144
| 85
| 724
| 6.411765
| 0.8
| 0.044037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169903
| 0.146409
| 724
| 20
| 96
| 36.2
| 0.711974
| 0.524862
| 0
| 0
| 0
| 0
| 0.495413
| 0.391437
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be387cca53cfcab985ce1dca7b42033320d21418
| 2,707
|
py
|
Python
|
tuframework/training/network_training/competitions_with_custom_Trainers/MMS/nnUNetTrainerV2_MMS.py
|
Magnety/tuFramework
|
b31cb34d476ef306b52da955021f93c91c14ddf4
|
[
"Apache-2.0"
] | null | null | null |
tuframework/training/network_training/competitions_with_custom_Trainers/MMS/nnUNetTrainerV2_MMS.py
|
Magnety/tuFramework
|
b31cb34d476ef306b52da955021f93c91c14ddf4
|
[
"Apache-2.0"
] | null | null | null |
tuframework/training/network_training/competitions_with_custom_Trainers/MMS/nnUNetTrainerV2_MMS.py
|
Magnety/tuFramework
|
b31cb34d476ef306b52da955021f93c91c14ddf4
|
[
"Apache-2.0"
] | null | null | null |
import torch
from tuframework.network_architecture.generic_UNet import Generic_UNet
from tuframework.network_architecture.initialization import InitWeights_He
from tuframework.training.network_training.tuframework_variants.data_augmentation.tuframeworkTrainerV2_insaneDA import \
tuframeworkTrainerV2_insaneDA
from tuframework.utilities.nd_softmax import softmax_helper
from torch import nn
class tuframeworkTrainerV2_MMS(tuframeworkTrainerV2_insaneDA):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params["p_rot"] = 0.7
self.data_aug_params["p_eldef"] = 0.1
self.data_aug_params["p_scale"] = 0.3
self.data_aug_params["independent_scale_factor_for_each_axis"] = True
self.data_aug_params["p_independent_scale_per_axis"] = 0.3
self.data_aug_params["do_additive_brightness"] = True
self.data_aug_params["additive_brightness_mu"] = 0
self.data_aug_params["additive_brightness_sigma"] = 0.2
self.data_aug_params["additive_brightness_p_per_sample"] = 0.3
self.data_aug_params["additive_brightness_p_per_channel"] = 1
self.data_aug_params["elastic_deform_alpha"] = (0., 300.)
self.data_aug_params["elastic_deform_sigma"] = (9., 15.)
self.data_aug_params['gamma_range'] = (0.5, 1.6)
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.BatchNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.BatchNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
"""def run_training(self):
from batchviewer import view_batch
a = next(self.tr_gen)
view_batch(a['data'])
import IPython;IPython.embed()"""
| 44.377049
| 120
| 0.663465
| 345
| 2,707
| 4.828986
| 0.33913
| 0.062425
| 0.085834
| 0.132653
| 0.235294
| 0.186074
| 0.084034
| 0.084034
| 0
| 0
| 0
| 0.021047
| 0.24529
| 2,707
| 60
| 121
| 45.116667
| 0.79442
| 0
| 0
| 0
| 0
| 0
| 0.121451
| 0.078864
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.133333
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be3baf27f812f65c9b958afcfa252dbaf8d5e093
| 3,088
|
py
|
Python
|
ansible/playbooks/roles/repository/files/download-requirements/src/command/yum.py
|
romsok24/epiphany
|
f058984939561fc8d51288765976118ae12e6c32
|
[
"Apache-2.0"
] | null | null | null |
ansible/playbooks/roles/repository/files/download-requirements/src/command/yum.py
|
romsok24/epiphany
|
f058984939561fc8d51288765976118ae12e6c32
|
[
"Apache-2.0"
] | null | null | null |
ansible/playbooks/roles/repository/files/download-requirements/src/command/yum.py
|
romsok24/epiphany
|
f058984939561fc8d51288765976118ae12e6c32
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
from src.command.command import Command
class Yum(Command):
"""
Interface for `yum`
"""
def __init__(self, retries: int):
super().__init__('yum', retries)
def update(self, enablerepo: str,
package: str = None,
disablerepo: str = '*',
assume_yes: bool = True):
"""
Interface for `yum update`
:param enablerepo:
:param package:
:param disablerepo:
:param assume_yes: if set to True, -y flag will be used
"""
update_parameters: List[str] = ['update']
update_parameters.append('-y' if assume_yes else '')
if package is not None:
update_parameters.append(package)
update_parameters.append(f'--disablerepo={disablerepo}')
update_parameters.append(f'--enablerepo={enablerepo}')
self.run(update_parameters)
def install(self, package: str,
assume_yes: bool = True):
"""
Interface for `yum install -y`
:param package: packaged to be installed
:param assume_yes: if set to True, -y flag will be used
"""
no_ask: str = '-y' if assume_yes else ''
self.run(['install', no_ask, package])
def remove(self, package: str,
assume_yes: bool = True):
"""
Interface for `yum remove -y`
:param package: packaged to be removed
:param assume_yes: if set to True, -y flag will be used
"""
no_ask: str = '-y' if assume_yes else ''
self.run(['remove', no_ask, package])
def is_repo_enabled(self, repo: str) -> bool:
output = self.run(['repolist',
'enabled']).stdout
if repo in output:
return True
return False
def find_rhel_repo_id(self, patterns: List[str]) -> List[str]:
output = self.run(['repolist',
'all']).stdout
repos: List[str] = []
for line in output.split('\n'):
for pattern in patterns:
if pattern in line:
repos.append(pattern)
return repos
def accept_keys(self):
# to accept import of repo's GPG key (for repo_gpgcheck=1)
self.run(['-y', 'repolist'])
def is_repo_available(self, repo: str) -> bool:
retval = self.run(['-q',
'--disablerepo=*',
f'--enablerepo={repo}',
'repoinfo']).returncode
if retval == 0:
return True
return False
def makecache(self, fast: bool = True,
assume_yes: bool = True):
args: List[str] = ['makecache']
args.append('-y' if assume_yes else '')
if fast:
args.append('fast')
self.run(args)
def list_all_repo_info(self) -> List[str]:
args: List[str] = ['repolist',
'-v',
'all']
return self._run_and_filter(args)
| 27.81982
| 66
| 0.51943
| 346
| 3,088
| 4.508671
| 0.254335
| 0.063462
| 0.038462
| 0.04359
| 0.291667
| 0.260897
| 0.228846
| 0.198077
| 0.175641
| 0.175641
| 0
| 0.001019
| 0.364313
| 3,088
| 110
| 67
| 28.072727
| 0.793683
| 0.152526
| 0
| 0.196721
| 0
| 0
| 0.077391
| 0.02107
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163934
| false
| 0
| 0.032787
| 0
| 0.311475
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be4037367a1afa83a7501ca75f082c616c63c62c
| 625
|
py
|
Python
|
ros_tf_publisher.py
|
BrightLamp/PyLearningCodes
|
ed237528c41ab2a9832b88806732097ffae0a0ed
|
[
"MIT"
] | null | null | null |
ros_tf_publisher.py
|
BrightLamp/PyLearningCodes
|
ed237528c41ab2a9832b88806732097ffae0a0ed
|
[
"MIT"
] | null | null | null |
ros_tf_publisher.py
|
BrightLamp/PyLearningCodes
|
ed237528c41ab2a9832b88806732097ffae0a0ed
|
[
"MIT"
] | null | null | null |
# encoding=utf-8
import rospy
import tf
if __name__ == '__main__':
rospy.init_node('py_tf_broadcaster')
br = tf.TransformBroadcaster()
x = 0.0
y = 0.0
z = 0.0
roll = 0
pitch = 0
yaw = 1.57
rate = rospy.Rate(1)
while not rospy.is_shutdown():
yaw = yaw + 0.1
roll = roll + 0.1
br.sendTransform((x, y, z),
tf.transformations.quaternion_from_euler(roll, pitch, yaw),
rospy.Time.now(),
"base_link",
"front_caster") # 发布base_link到link1的平移和翻转
rate.sleep()
| 24.038462
| 84
| 0.5104
| 76
| 625
| 3.973684
| 0.565789
| 0.019868
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046272
| 0.3776
| 625
| 25
| 85
| 25
| 0.730077
| 0.0608
| 0
| 0
| 0
| 0
| 0.078767
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.095238
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be40e740adf7c24c5c205687723b024d4eaf9752
| 2,674
|
py
|
Python
|
dataset_manager/technical_indicators.py
|
NightingaleV/bakalarska_prace-ann-algotrading
|
07866e092cb527a7e1d9d7050790d9ffd611dc83
|
[
"MIT"
] | null | null | null |
dataset_manager/technical_indicators.py
|
NightingaleV/bakalarska_prace-ann-algotrading
|
07866e092cb527a7e1d9d7050790d9ffd611dc83
|
[
"MIT"
] | null | null | null |
dataset_manager/technical_indicators.py
|
NightingaleV/bakalarska_prace-ann-algotrading
|
07866e092cb527a7e1d9d7050790d9ffd611dc83
|
[
"MIT"
] | null | null | null |
# Imports
import numpy as np
class TechnicalIndicators:
cci_constant = 0.015
def __init__(self):
self.df = None
# Exponentially-weighted moving average
def ewma(self, periods):
indicator = 'EWMA{}'.format(periods)
self.df[indicator] = self.df['close'].ewm(span=periods).mean()
return self
# Stochastic Oscillator
def stochastic_oscilator(self, k_period, d_period, smooth=1):
lows = 'l{}'.format(k_period)
highs = 'h{}'.format(k_period)
self.df = self.calc_roll_min(self.df, k_period)
self.df = self.calc_roll_max(self.df, k_period)
self.df = self.stok(self.df, k_period)
if smooth >= 1:
self.df = self.smooth_stok(self.df, smooth)
self.df = self.stod(self.df, d_period)
self.df.drop([lows, highs], axis=1, inplace=True)
return self
@staticmethod
def calc_roll_min(dataset, k_period):
lows = 'l{}'.format(k_period)
dataset[lows] = dataset['low'].rolling(window=k_period).min()
return dataset
@staticmethod
def calc_roll_max(dataset, k_period):
highs = 'h{}'.format(k_period)
dataset[highs] = dataset['high'].rolling(window=k_period).max()
return dataset
@staticmethod
def stok(dataset, k_period):
lows = 'l{}'.format(k_period)
highs = 'h{}'.format(k_period)
dataset['%k'] = ((dataset['close'] - dataset[lows]) / (
dataset[highs] - dataset[lows])) * 100
return dataset
@staticmethod
def smooth_stok(dataset, smooth):
dataset['%k'] = dataset['%k'].rolling(window=smooth).mean()
return dataset
@staticmethod
def stod(dataset, d_period):
dataset['%d'] = dataset['%k'].rolling(window=d_period).mean()
return dataset
# RSI - Relative Strength Index
def rsi_indicator(self, period):
rsi = 'rsi{}'.format(period)
# Calculate differences between prices
deltas = np.diff(self.df['close'])
# For every row calculate rsi
for i, row in self.df.iterrows():
if i < period:
self.df.loc[i, rsi] = 0
else:
self.df.loc[i, rsi] = self.calc_rsi(i, period, deltas)
return self
@staticmethod
def calc_rsi(index, period, deltas):
seed = deltas[index - period:index]
average_gain = seed[seed >= 0].sum() / period
average_loss = seed[seed < 0].sum() / period
if abs(average_loss) == 0:
rs = 0
else:
rs = average_gain / abs(average_loss)
rsi = 100. - (100. / (1 + rs))
return rsi
| 31.093023
| 71
| 0.5819
| 336
| 2,674
| 4.502976
| 0.247024
| 0.071381
| 0.051553
| 0.074025
| 0.23265
| 0.153338
| 0.153338
| 0.10575
| 0.048909
| 0.048909
| 0
| 0.011488
| 0.283844
| 2,674
| 85
| 72
| 31.458824
| 0.77859
| 0.060583
| 0
| 0.338462
| 0
| 0
| 0.024351
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.015385
| 0
| 0.338462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be44513cd298d38b88ee6e7730ed73cc8a97d105
| 5,979
|
py
|
Python
|
parlai/agents/drqa/config.py
|
shagunsodhani/ParlAI
|
5b634b844807372adfb0f6d6e5c42341ac8138f0
|
[
"BSD-3-Clause"
] | 1
|
2017-06-26T07:46:33.000Z
|
2017-06-26T07:46:33.000Z
|
parlai/agents/drqa/config.py
|
shagunsodhani/ParlAI
|
5b634b844807372adfb0f6d6e5c42341ac8138f0
|
[
"BSD-3-Clause"
] | null | null | null |
parlai/agents/drqa/config.py
|
shagunsodhani/ParlAI
|
5b634b844807372adfb0f6d6e5c42341ac8138f0
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import os
import sys
import logging
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1', 'y')
def add_cmdline_args(parser):
# Runtime environment
agent = parser.add_argument_group('DrQA Arguments')
agent.add_argument('--no_cuda', type='bool', default=False)
agent.add_argument('--gpu', type=int, default=-1)
agent.add_argument('--random_seed', type=int, default=1013)
# Basics
agent.add_argument('--embedding_file', type=str, default=None,
help='File of space separated embeddings: w e1 ... ed')
agent.add_argument('--pretrained_model', type=str, default=None,
help='Load dict/features/weights/opts from this file')
agent.add_argument('--log_file', type=str, default=None)
# Model details
agent.add_argument('--fix_embeddings', type='bool', default=True)
agent.add_argument('--tune_partial', type=int, default=0,
help='Train the K most frequent word embeddings')
agent.add_argument('--embedding_dim', type=int, default=300,
help=('Default embedding size if '
'embedding_file is not given'))
agent.add_argument('--hidden_size', type=int, default=128,
help='Hidden size of RNN units')
agent.add_argument('--doc_layers', type=int, default=3,
help='Number of RNN layers for passage')
agent.add_argument('--question_layers', type=int, default=3,
help='Number of RNN layers for question')
agent.add_argument('--rnn_type', type=str, default='lstm',
help='RNN type: lstm (default), gru, or rnn')
# Optimization details
agent.add_argument('--valid_metric', type=str,
choices=['accuracy', 'f1'], default='f1',
help='Metric for choosing best valid model')
agent.add_argument('--max_len', type=int, default=15,
help='The max span allowed during decoding')
agent.add_argument('--rnn_padding', type='bool', default=False)
agent.add_argument('--display_iter', type=int, default=10,
help='Print train error after every \
<display_iter> epoches (default 10)')
agent.add_argument('--dropout_emb', type=float, default=0.4,
help='Dropout rate for word embeddings')
agent.add_argument('--dropout_rnn', type=float, default=0.4,
help='Dropout rate for RNN states')
agent.add_argument('--dropout_rnn_output', type='bool', default=True,
help='Whether to dropout the RNN output')
agent.add_argument('--optimizer', type=str, default='adamax',
help='Optimizer: sgd or adamax (default)')
agent.add_argument('--learning_rate', '-lr', type=float, default=0.1,
help='Learning rate for SGD (default 0.1)')
agent.add_argument('--grad_clipping', type=float, default=10,
help='Gradient clipping (default 10.0)')
agent.add_argument('--weight_decay', type=float, default=0,
help='Weight decay (default 0)')
agent.add_argument('--momentum', type=float, default=0,
help='Momentum (default 0)')
# Model-specific
agent.add_argument('--concat_rnn_layers', type='bool', default=True)
agent.add_argument('--question_merge', type=str, default='self_attn',
help='The way of computing question representation')
agent.add_argument('--use_qemb', type='bool', default=True,
help='Whether to use weighted question embeddings')
agent.add_argument('--use_in_question', type='bool', default=True,
help='Whether to use in_question features')
agent.add_argument('--use_tf', type='bool', default=True,
help='Whether to use tf features')
agent.add_argument('--use_time', type=int, default=0,
help='Time features marking how recent word was said')
def set_defaults(opt):
# Embeddings options
if opt.get('embedding_file'):
if not os.path.isfile(opt['embedding_file']):
raise IOError('No such file: %s' % args.embedding_file)
with open(opt['embedding_file']) as f:
dim = len(f.readline().strip().split(' ')) - 1
opt['embedding_dim'] = dim
elif not opt.get('embedding_dim'):
raise RuntimeError(('Either embedding_file or embedding_dim '
'needs to be specified.'))
# Make sure tune_partial and fix_embeddings are consistent
if opt['tune_partial'] > 0 and opt['fix_embeddings']:
print('Setting fix_embeddings to False as tune_partial > 0.')
opt['fix_embeddings'] = False
# Make sure fix_embeddings and embedding_file are consistent
if opt['fix_embeddings']:
if not opt.get('embedding_file') and not opt.get('pretrained_model'):
print('Setting fix_embeddings to False as embeddings are random.')
opt['fix_embeddings'] = False
def override_args(opt, override_opt):
# Major model args are reset to the values in override_opt.
# Non-architecture args (like dropout) are kept.
args = set(['embedding_file', 'embedding_dim', 'hidden_size', 'doc_layers',
'question_layers', 'rnn_type', 'optimizer', 'concat_rnn_layers',
'question_merge', 'use_qemb', 'use_in_question', 'use_tf',
'vocab_size', 'num_features', 'use_time'])
for k, v in override_opt.items():
if k in args:
opt[k] = v
| 51.102564
| 80
| 0.616993
| 754
| 5,979
| 4.742706
| 0.29443
| 0.098434
| 0.138702
| 0.031879
| 0.22623
| 0.142338
| 0.142338
| 0.074664
| 0.045302
| 0.025168
| 0
| 0.011261
| 0.257401
| 5,979
| 116
| 81
| 51.543103
| 0.794144
| 0.100184
| 0
| 0.022222
| 0
| 0
| 0.342655
| 0.004847
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0.011111
| 0.033333
| 0.011111
| 0.088889
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be451a5cb8b5c7262021b6003b4a6ffdd2ef5a5f
| 424
|
py
|
Python
|
run.py
|
pome-ta/CodeMirror
|
ef39c3032ea128d988c263ed97851860db9f977c
|
[
"MIT"
] | null | null | null |
run.py
|
pome-ta/CodeMirror
|
ef39c3032ea128d988c263ed97851860db9f977c
|
[
"MIT"
] | null | null | null |
run.py
|
pome-ta/CodeMirror
|
ef39c3032ea128d988c263ed97851860db9f977c
|
[
"MIT"
] | null | null | null |
"""
Pythonista3 app CodeMirror
"""
import pythonista.wkwebview as wkwebview
import ui
import pathlib
uri = pathlib.Path('./main_index.html')
class View(ui.View):
def __init__(self):
self.wv = wkwebview.WKWebView(flex='WH')
self.wv.load_url(str(uri))
self.add_subview(self.wv)
def will_close(self):
self.wv.clear_cache()
_view = View()
_view.present(style='fullscreen', orientations=['portrait'])
| 16.96
| 60
| 0.707547
| 58
| 424
| 4.982759
| 0.62069
| 0.083045
| 0.069204
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002755
| 0.143868
| 424
| 24
| 61
| 17.666667
| 0.793388
| 0.061321
| 0
| 0
| 0
| 0
| 0.095116
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.230769
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be47dbc95464f47bb2c554b62349cf2699343260
| 1,868
|
py
|
Python
|
search/tests/test_read_similarities.py
|
cotsog/pathways-backend
|
9231731359fc97833dbdbca33ac23eebeac4f715
|
[
"BSD-3-Clause"
] | null | null | null |
search/tests/test_read_similarities.py
|
cotsog/pathways-backend
|
9231731359fc97833dbdbca33ac23eebeac4f715
|
[
"BSD-3-Clause"
] | null | null | null |
search/tests/test_read_similarities.py
|
cotsog/pathways-backend
|
9231731359fc97833dbdbca33ac23eebeac4f715
|
[
"BSD-3-Clause"
] | null | null | null |
from django.test import TestCase
from search.read_similarities import build_manual_similarity_map
from common.testhelpers.random_test_values import a_string, a_float
class TestReadingManualTaskSimilarities(TestCase):
def test_convert_matrix_to_map_from_topic_to_array_of_services(self):
data = [
['topic1', 'topic2'],
['service1', 'service2'],
]
expected_result = {
'topic1': ['service1'],
'topic2': ['service2'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
def test_can_handle_multiple_services_for_a_topic(self):
data = [
['topic1', ],
['service1'],
['service2'],
['service3'],
]
expected_result = {
'topic1': ['service1', 'service2', 'service3'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
def test_can_handle_different_numbers_of_services_for_different_topics(self):
data = [
['topic1', 'topic2'],
['service1', 'service2'],
['service3'],
]
expected_result = {
'topic1': ['service1', 'service3'],
'topic2': ['service2'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
def test_can_handle_empty_entries(self):
data = [
['topic1', 'topic2'],
['service1', 'service2'],
['', 'service3'],
[None, 'service4'],
]
expected_result = {
'topic1': ['service1'],
'topic2': ['service2', 'service3', 'service4'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
| 32.206897
| 81
| 0.571734
| 165
| 1,868
| 6.121212
| 0.30303
| 0.110891
| 0.10396
| 0.118812
| 0.619802
| 0.619802
| 0.514851
| 0.348515
| 0.348515
| 0.348515
| 0
| 0.029253
| 0.304604
| 1,868
| 57
| 82
| 32.77193
| 0.748268
| 0
| 0
| 0.538462
| 0
| 0
| 0.147752
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 1
| 0.076923
| false
| 0
| 0.057692
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be47eadfdaf03e7261eb7070f1efcdf27e299506
| 7,535
|
py
|
Python
|
fortuna/fortuna.py
|
Zabamund/HackCPH18
|
3855547824c6277ca6f4e7b97c3ad0b3829e266b
|
[
"MIT"
] | 3
|
2018-06-09T08:03:31.000Z
|
2018-11-23T20:18:06.000Z
|
fortuna/fortuna.py
|
Zabamund/HackCPH18
|
3855547824c6277ca6f4e7b97c3ad0b3829e266b
|
[
"MIT"
] | 1
|
2020-03-30T20:23:17.000Z
|
2020-03-30T20:23:17.000Z
|
fortuna/fortuna.py
|
Zabamund/HackCPH18
|
3855547824c6277ca6f4e7b97c3ad0b3829e266b
|
[
"MIT"
] | 2
|
2018-06-09T06:45:53.000Z
|
2018-06-09T15:36:36.000Z
|
"""
Fortuna
Python project to visualize uncertatinty in probabilistic exploration models.
Created on 09/06/2018
@authors: Natalia Shchukina, Graham Brew, Marco van Veen, Behrooz Bashokooh, Tobias Stål, Robert Leckenby
"""
# Import libraries
import numpy as np
import glob
from matplotlib import pyplot as plt
import pandas as pd
import xarray as xr
import pyproj as proj
from scipy.stats import norm
class Fortuna(object):
"""
Class to load the fortuna dataset and call different methods for visualization in a web frontend.
Args:
There are no required arguments at the moment. Input files could be defined.
"""
def __init__(self, **kwargs):
"""
Method that is called when a object of the class Fortuna is initiated, it imports the data and directly creates some important variables.
"""
# hardcode geometry
self.size_raster = (250,162)
self.X_corner = 390885
self.Y_corner = 7156947
self.dx, self.dy, self.dz = 25, 25, 100
self.top_model = 950
self.bottom_model = 1050
self.base_cube = None
self.top_cube = None
self.base_n = None
self.top_n = None
self.vol = None
# Create empty xarray dataset
self.ds = xr.Dataset()
self.xx = None
self.yy = None
self.zz = None
self.model = None
self.base_mean = None
self.base_std = None
self.top_mean = None
self.top_std = None
## Initial methods to load
self.import_data()
self.calc_xarray()
self.calc_stat()
### Methods for initiating the object
def folder2cube(self, files):
"""
Method to read a file.
"""
base_set = glob.glob(files)
cube = np.zeros(self.size_raster + (len(base_set),))
for i, model in enumerate(base_set):
cube[:, :, i] = np.loadtxt(model, skiprows=1).reshape(self.size_raster)
return cube, len(base_set)
def import_data(self):
"""
Method to load different data objects from files.
"""
self.base_cube, self.base_n = self.folder2cube('data/Hackaton/BaseSet/MapSimu__*.data')
self.top_cube, self.top_n = self.folder2cube('data/Hackaton/TopSet/MapSimu__*.data')
self.vol = pd.read_csv('data/Hackaton/VolumeDistribution/Volumes', delim_whitespace=True)
def calc_xarray (self):
self.xx = np.linspace(self.X_corner, self.X_corner + self.size_raster[0] * self.dx, self.size_raster[0])
self.yy = np.linspace(self.Y_corner, self.Y_corner + self.size_raster[1] * self.dy, self.size_raster[1])
self.zz = np.linspace(self.top_model, self.bottom_model, self.dz)
self.model = np.linspace(0, self.top_model, self.base_n)
self.ds.coords['X'] = self.xx
self.ds.coords['Y'] = self.yy
self.ds.coords['Z'] = self.zz
self.ds.coords['MODEL'] = self.model
self.ds['BASE'] = (('X', 'Y', 'MODEL'), self.base_cube)
self.ds['TOP'] = (('X', 'Y', 'MODEL'), self.top_cube)
def calc_stat (self):
self.base_mean = self.ds['BASE'].mean(dim='MODEL')
self.base_std = self.ds['BASE'].std(dim='MODEL')
self.top_mean = self.ds['TOP'].mean(dim='MODEL')
self.top_std = self.ds['TOP'].std(dim='MODEL')
## Data Management methods
def load_pickle(self, path):
return np.load(path)
## Methods to compute different uncertatinty cubes --> cubes to be displayed in the frontend
def calc_lithology(self, iterations = 2):
"""
Sample from both distributions and fill each z-stack accordingly
"""
# create empty array
block = np.zeros((iterations, self.size_raster[0], self.size_raster[1], self.zz.size), dtype='int8')
for i in range(iterations):
for j in range(self.size_raster[0]): # size_raster[0]
for k in range(self.size_raster[1]):
# sample from top and base distributions for specific x,y position
top = np.random.normal(self.top_mean[j, k], self.top_std[j, k])
base = np.random.normal(self.base_mean[j, k], self.base_std[j, k])
# iterate over vertical z-stack
for l in range(self.zz.size):
if self.zz[l] <= top:
block[i, j, k, l] = 1
elif self.zz[l] > base:
block[i, j, k, l] = 3
elif ((self.zz[l] > top) and (l <= base)):
block[i, j, k, l] = 2
return block
def calc_lithology_vect(self, iterations=2):
"""
Resample from z value statistics and fill each z-stack in a lithology block accordingly.
This is the new method with vectorized operations to speed up calculations.
"""
# create empty array
block = np.zeros((iterations, self.xx.size, self.yy.size, self.zz.size), dtype='int8')
for i in range(iterations):
# create meshgrids grid for coordinate-wise iterations
mesh_x, mesh_y, mesh_z = np.meshgrid(np.arange(self.xx.size),
np.arange(self.yy.size),
np.arange(self.zz.size))
# sample from top and base distributions for specific x,y position
top = np.zeros([self.xx.size, self.yy.size])
base = np.zeros([self.xx.size, self.yy.size])
top[mesh_x, mesh_y] = np.random.normal(self.top_mean.values[mesh_x, mesh_y],
self.top_std.values[mesh_x, mesh_y])
base[mesh_x, mesh_y] = np.random.normal(self.top_mean.values[mesh_x, mesh_y],
self.top_std.values[mesh_x, mesh_y])
# compare each cell to resampled reference values
# TODO generalize for any number of lithologies
block[i, mesh_x, mesh_y, mesh_z] = np.where(self.zz < top[mesh_x, mesh_y], 1,
np.where(self.zz < base[mesh_x, mesh_y], 2, 3))
return block
### Modifyed from GemPy!
def calc_probability_lithology(self, cube):
"""Blocks must be just the lith blocks!"""
lith_blocks = cube.reshape([cube.shape[0], (self.xx.size * self.yy.size * self.zz.size)])
lith_id = np.unique(lith_blocks)
# lith_count = np.zeros_like(lith_blocks[0:len(lith_id)])
lith_count = np.zeros((len(np.unique(lith_blocks)), lith_blocks.shape[1]))
for i, l_id in enumerate(lith_id):
lith_count[i] = np.sum(lith_blocks == l_id, axis=0)
lith_prob = lith_count / len(lith_blocks)
return lith_prob
### Modyfied from GemPy!
def calc_information_entropy(self, lith_prob):
"""Calculates information entropy for the given probability array."""
cube = np.zeros_like(lith_prob[0])
for l in lith_prob:
pm = np.ma.masked_equal(l, 0) # mask where layer prob is 0
cube -= (pm * np.ma.log2(pm)).filled(0)
return cube.reshape([self.xx.size, self.yy.size, self.zz.size])
# Try numpy.flatten and numpy.ravel
## Simple plotting methods
def plot_entropy(self, cube, slice=10):
plt.imshow(cube[slice, :, :].T, origin='upperleft', cmap='viridis')
plt.show()
| 34.56422
| 145
| 0.584871
| 1,034
| 7,535
| 4.141199
| 0.259188
| 0.029426
| 0.035965
| 0.023354
| 0.226997
| 0.159972
| 0.146193
| 0.131714
| 0.100887
| 0.082205
| 0
| 0.014424
| 0.30073
| 7,535
| 218
| 146
| 34.56422
| 0.798254
| 0.228666
| 0
| 0.056075
| 0
| 0
| 0.0356
| 0.020114
| 0
| 0
| 0
| 0.004587
| 0
| 1
| 0.102804
| false
| 0
| 0.084112
| 0.009346
| 0.252336
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be47eb6ac22a5716a180d2587e75ad448943ea4f
| 1,104
|
py
|
Python
|
resize.py
|
Linx3/6.867-Final-Project
|
374d7093159be0bc524b291bacad52741f6bdc95
|
[
"MIT"
] | 3
|
2019-12-27T12:18:29.000Z
|
2020-02-10T22:40:36.000Z
|
resize.py
|
Linx3/6.867-Final-Project
|
374d7093159be0bc524b291bacad52741f6bdc95
|
[
"MIT"
] | null | null | null |
resize.py
|
Linx3/6.867-Final-Project
|
374d7093159be0bc524b291bacad52741f6bdc95
|
[
"MIT"
] | 2
|
2019-12-29T02:11:29.000Z
|
2020-02-10T19:49:41.000Z
|
from PIL import Image
# open an image file (.bmp,.jpg,.png,.gif) you have in the working folder
# //imageFile = "03802.png"
import os
arr=os.listdir()
for imageFile in arr:
if "png" in imageFile:
im1 = Image.open(imageFile)
# adjust width and height to your needs
width = 416
height = 416
# use one of these filter options to resize the image
im2 = im1.resize((width, height), Image.NEAREST) # use nearest neighbour
# im3 = im1.resize((width, height), Image.BILINEAR) # linear interpolation in a 2x2 environment
# im4 = im1.resize((width, height), Image.BICUBIC) # cubic spline interpolation in a 4x4 environment
# im5 = im1.resize((width, height), Image.ANTIALIAS) # best down-sizing filter
ext = ".png"
# print(imageFile.split(".")[0])
num=imageFile.split(".")[0]
print(num)
print(type(num))
im2.save(imageFile)
# im2.save(imageFile+ ext)
# im3.save("BILINEAR" + ext)
# im4.save("BICUBIC" + ext)
# im5.save("ANTIALIAS" + ext)
| 36.8
| 113
| 0.600543
| 141
| 1,104
| 4.702128
| 0.475177
| 0.054299
| 0.084465
| 0.120664
| 0.15083
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038799
| 0.276268
| 1,104
| 29
| 114
| 38.068966
| 0.790989
| 0.566123
| 0
| 0
| 0
| 0
| 0.017316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be4ff442cd8f9b517de533a73d5af1571d1d4790
| 2,517
|
py
|
Python
|
src/pipeline/sentence-retrieval/run.py
|
simonepri/fever-transformers
|
3e9c57b0b4e781f318438d48589a56db709124c4
|
[
"MIT"
] | 8
|
2020-05-03T08:40:24.000Z
|
2022-01-25T18:47:44.000Z
|
src/pipeline/sentence-retrieval/run.py
|
simonepri/fever-transformers
|
3e9c57b0b4e781f318438d48589a56db709124c4
|
[
"MIT"
] | null | null | null |
src/pipeline/sentence-retrieval/run.py
|
simonepri/fever-transformers
|
3e9c57b0b4e781f318438d48589a56db709124c4
|
[
"MIT"
] | 3
|
2020-05-02T20:21:45.000Z
|
2022-01-25T18:48:28.000Z
|
#!/usr/bin/env python3
import argparse
import bisect
import csv
import json
import os
from collections import defaultdict
from functools import reduce
from tqdm import tqdm
def get_best_evidence(scores_file, max_sentences_per_claim):
weighted_claim_evidence = defaultdict(lambda: [])
with open(scores_file, "r") as f:
nlines = reduce(lambda a, b: a + b, map(lambda x: 1, f.readlines()), 0)
f.seek(0)
lines = csv.reader(f, delimiter="\t")
for line in tqdm(lines, desc="Score", total=nlines):
claim_id, claim, page, sent_id, sent, score = line
claim_id, sent_id, score = int(claim_id), int(sent_id), float(score)
evid = (page, sent_id, sent)
bisect.insort(weighted_claim_evidence[claim_id], (-score, evid))
if len(weighted_claim_evidence[claim_id]) > max_sentences_per_claim:
weighted_claim_evidence[claim_id].pop()
for claim_id in weighted_claim_evidence:
for i, (score, evid) in enumerate(weighted_claim_evidence[claim_id]):
weighted_claim_evidence[claim_id][i] = (-score, evid)
return weighted_claim_evidence
def main(scores_file, in_file, out_file, max_sentences_per_claim=None):
path = os.getcwd()
scores_file = os.path.join(path, scores_file)
in_file = os.path.join(path, in_file)
out_file = os.path.join(path, out_file)
best_evidence = get_best_evidence(scores_file, max_sentences_per_claim)
with open(out_file, "w+") as fout:
with open(in_file, "r") as fin:
nlines = reduce(lambda a, b: a + b, map(lambda x: 1, fin.readlines()), 0)
fin.seek(0)
lines = map(json.loads, fin.readlines())
for line in tqdm(lines, desc="Claim", total=nlines):
claim_id = line["id"]
line["predicted_sentences"] = best_evidence[claim_id]
fout.write(json.dumps(line) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--scores-file", type=str)
parser.add_argument("--in-file", type=str, help="input dataset")
parser.add_argument("--out-file", type=str,
help="path to save output dataset")
parser.add_argument("--max-sentences-per-claim", type=int,
help="number of top sentences to return for each claim")
args = parser.parse_args()
main(args.scores_file, args.in_file, args.out_file, max_sentences_per_claim=args.max_sentences_per_claim)
| 40.596774
| 109
| 0.65594
| 355
| 2,517
| 4.408451
| 0.276056
| 0.049201
| 0.107348
| 0.089457
| 0.31246
| 0.201917
| 0.139297
| 0.099681
| 0.099681
| 0.042173
| 0
| 0.00359
| 0.225268
| 2,517
| 61
| 110
| 41.262295
| 0.798974
| 0.008343
| 0
| 0
| 0
| 0
| 0.076954
| 0.01002
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.16
| 0
| 0.22
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be514c5db015a36e1e21cf77afc4f28e841509a0
| 4,455
|
py
|
Python
|
bot/__main__.py
|
KOTBOTS/Telegram-CloneBot
|
446d66ba46817f784e8de2b8bd2966865ee1965f
|
[
"MIT"
] | 1
|
2021-11-10T05:06:00.000Z
|
2021-11-10T05:06:00.000Z
|
bot/__main__.py
|
KOTBOTS/Telegram-CloneBot
|
446d66ba46817f784e8de2b8bd2966865ee1965f
|
[
"MIT"
] | null | null | null |
bot/__main__.py
|
KOTBOTS/Telegram-CloneBot
|
446d66ba46817f784e8de2b8bd2966865ee1965f
|
[
"MIT"
] | 1
|
2022-01-30T08:50:28.000Z
|
2022-01-30T08:50:28.000Z
|
from telegram.ext import CommandHandler, run_async
from bot.gDrive import GoogleDriveHelper
from bot.fs_utils import get_readable_file_size
from bot import LOGGER, dispatcher, updater, bot
from bot.config import BOT_TOKEN, OWNER_ID, GDRIVE_FOLDER_ID
from bot.decorators import is_authorised, is_owner
from telegram.error import TimedOut, BadRequest
from bot.clone_status import CloneStatus
from bot.msg_utils import deleteMessage, sendMessage
import time
REPO_LINK = "https://t.me/KOT_BOTS"
# Soon to be used for direct updates from within the bot.
@run_async
def start(update, context):
sendMessage("Hello! Please send me a Google Drive Shareable Link to Clone to your Drive!" \
"\nSend /help for checking all available commands.",
context.bot, update, 'Markdown')
# ;-;
@run_async
def helper(update, context):
sendMessage("Here are the available commands of the bot\n\n" \
"*Usage:* `/clone <link> [DESTINATION_ID]`\n*Example:* \n1. `/clone https://drive.google.com/drive/u/1/folders/0AO-ISIXXXXXXXXXXXX`\n2. `/clone 0AO-ISIXXXXXXXXXXXX`" \
"\n*DESTIONATION_ID* is optional. It can be either link or ID to where you wish to store a particular clone." \
"\n\nYou can also *ignore folders* from clone process by doing the following:\n" \
"`/clone <FOLDER_ID> [DESTINATION] [id1,id2,id3]`\n In this example: id1, id2 and id3 would get ignored from cloning\nDo not use <> or [] in actual message." \
"*Make sure to not put any space between commas (,).*\n" \
f"Source of this bot: [GitHub]({REPO_LINK})", context.bot, update, 'Markdown')
# TODO Cancel Clones with /cancel command.
@run_async
@is_authorised
def cloneNode(update, context):
args = update.message.text.split(" ")
if len(args) > 1:
link = args[1]
try:
ignoreList = args[-1].split(',')
except IndexError:
ignoreList = []
DESTINATION_ID = GDRIVE_FOLDER_ID
try:
DESTINATION_ID = args[2]
print(DESTINATION_ID)
except IndexError:
pass
# Usage: /clone <FolderToClone> <Destination> <IDtoIgnoreFromClone>,<IDtoIgnoreFromClone>
msg = sendMessage(f"<b>Cloning:</b> <code>{link}</code>", context.bot, update)
status_class = CloneStatus()
gd = GoogleDriveHelper(GFolder_ID=DESTINATION_ID)
sendCloneStatus(update, context, status_class, msg, link)
result = gd.clone(link, status_class, ignoreList=ignoreList)
deleteMessage(context.bot, msg)
status_class.set_status(True)
sendMessage(result, context.bot, update)
else:
sendMessage("Please Provide a Google Drive Shared Link to Clone.", bot, update)
@run_async
def sendCloneStatus(update, context, status, msg, link):
old_text = ''
while not status.done():
sleeper(3)
try:
text=f'🔗 *Cloning:* [{status.MainFolderName}]({status.MainFolderLink})\n━━━━━━━━━━━━━━\n🗃️ *Current File:* `{status.get_name()}`\n⬆️ *Transferred*: `{status.get_size()}`\n📁 *Destination:* [{status.DestinationFolderName}]({status.DestinationFolderLink})'
if status.checkFileStatus():
text += f"\n🕒 *Checking Existing Files:* `{str(status.checkFileStatus())}`"
if not text == old_text:
msg.edit_text(text=text, parse_mode="Markdown", timeout=200)
old_text = text
except Exception as e:
LOGGER.error(e)
if str(e) == "Message to edit not found":
break
sleeper(2)
continue
return
def sleeper(value, enabled=True):
time.sleep(int(value))
return
@run_async
@is_owner
def sendLogs(update, context):
with open('log.txt', 'rb') as f:
bot.send_document(document=f, filename=f.name,
reply_to_message_id=update.message.message_id,
chat_id=update.message.chat_id)
def main():
LOGGER.info("Bot Started!")
clone_handler = CommandHandler('clone', cloneNode)
start_handler = CommandHandler('start', start)
help_handler = CommandHandler('help', helper)
log_handler = CommandHandler('logs', sendLogs)
dispatcher.add_handler(log_handler)
dispatcher.add_handler(start_handler)
dispatcher.add_handler(clone_handler)
dispatcher.add_handler(help_handler)
updater.start_polling()
main()
| 40.87156
| 265
| 0.655892
| 557
| 4,455
| 5.166966
| 0.387792
| 0.017026
| 0.022238
| 0.028145
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005811
| 0.227385
| 4,455
| 108
| 266
| 41.25
| 0.82423
| 0.0422
| 0
| 0.130435
| 0
| 0.043478
| 0.298921
| 0.062412
| 0
| 0
| 0
| 0.009259
| 0
| 1
| 0.076087
| false
| 0.01087
| 0.108696
| 0
| 0.206522
| 0.01087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be520ba7720ed297f3538b6906896f4c66ca61d8
| 8,180
|
py
|
Python
|
src/pyfinlab/risk_models.py
|
AnaSan27/pyfinlab
|
509cc9544af5e1a5b2b642eca9ae02d383dd743c
|
[
"BSD-3-Clause"
] | 1
|
2021-10-05T19:34:34.000Z
|
2021-10-05T19:34:34.000Z
|
src/pyfinlab/risk_models.py
|
AnaSan27/pyfinlab
|
509cc9544af5e1a5b2b642eca9ae02d383dd743c
|
[
"BSD-3-Clause"
] | null | null | null |
src/pyfinlab/risk_models.py
|
AnaSan27/pyfinlab
|
509cc9544af5e1a5b2b642eca9ae02d383dd743c
|
[
"BSD-3-Clause"
] | null | null | null |
import pandas as pd
import numpy as np
from portfoliolab.utils import RiskMetrics
from portfoliolab.estimators import RiskEstimators
from pypfopt import risk_models as risk_models_
"""
Available covariance risk models in PortfolioLab library.
https://hudson-and-thames-portfoliolab-pro.readthedocs-hosted.com/en/latest/estimators/risk_estimators.html
Available covariance risk models in PyPortfolioOpt library.
https://pyportfolioopt.readthedocs.io/en/latest/RiskModels.html#
These functions bring together all covariance matrix risk models from PortfolioLab and PyPortfolioOpt into one
function for ease of use.
"""
risk_met = RiskMetrics()
risk_estimators = RiskEstimators()
risk_models = [
# PyPortfolioOpt
'sample_cov',
'semicovariance',
'exp_cov',
'ledoit_wolf_constant_variance',
'ledoit_wolf_single_factor',
'ledoit_wolf_constant_correlation',
'oracle_approximating',
# PortfolioLab
'sample_covariance',
'minimum_covariance_determinant',
'empirical_covariance',
'shrinked_covariance_basic',
'shrinked_covariance_lw',
'shrinked_covariance_oas',
'semi_covariance',
'exponential_covariance',
'constant_residual_eigenvalues_denoised',
'constant_residual_spectral_denoised',
'targeted_shrinkage_denoised',
'targeted_shrinkage_detoned',
'constant_residual_detoned',
'hierarchical_filtered_complete',
'hierarchical_filtered_single',
'hierarchical_filtered_avg'
]
def risk_model(prices, model, kde_bwidth=0.01, basic_shrinkage=0.1):
"""
Calculates the covariance matrix for a dataframe of asset prices.
:param prices: (pd.DataFrame) Dataframe where each column is a series of prices for an asset.
:param model: (str) Risk model to use. Should be one of:
PyPortfolioOpt
- 'sample_cov',
- 'semicovariance',
- 'exp_cov',
- 'ledoit_wolf_constant_variance',
- 'ledoit_wolf_single_factor'
- 'ledoit_wolf_constant_correlation',
- 'oracle_approximating'
PortfolioLab
- 'sample_covariance',
- 'minimum_covariance_determinant',
- 'empirical_covariance',
- 'shrinked_covariance_basic',
- 'shrinked_covariance_lw',
- 'shrinked_covariance_oas',
- 'semi_covariance',
- 'exponential_covariance',
- 'constant_residual_eigenvalues_denoised',
- 'constant_residual_spectral_denoised',
- 'targeted_shrinkage_denoised',
- 'targeted_shrinkage_detoned',
- 'constant_residual_detoned',
- 'hierarchical_filtered_complete',
- 'hierarchical_filtered_single',
- 'hierarchical_filtered_avg'
:param kde_bwidth: (float) Optional, bandwidth of the kernel to fit KDE. (0.01 by default)
:param basic_shrinkage: (float) Optional, between 0 and 1. Coefficient in the convex combination for basic shrinkage.
(0.1 by default)
:return: (pd.DataFrame) Estimated covariance matrix.
"""
tn_relation = prices.shape[0] / prices.shape[1]
sample_cov = prices.pct_change().dropna().cov()
empirical_cov = pd.DataFrame(risk_estimators.empirical_covariance(prices, price_data=True),
index=sample_cov.index, columns=sample_cov.columns)
empirical_corr = pd.DataFrame(risk_estimators.cov_to_corr(empirical_cov ** 2),
index=sample_cov.index, columns=sample_cov.columns)
std = np.diag(empirical_cov) ** (1 / 2)
if model == 'sample_covariance':
return prices.pct_change().dropna().cov()
elif model == 'minimum_covariance_determinant':
covariance_matrix = risk_estimators.minimum_covariance_determinant(prices, price_data=True)
elif model == 'empirical_covariance':
covariance_matrix = risk_estimators.empirical_covariance(prices, price_data=True)
elif model == 'shrinked_covariance_basic':
covariance_matrix = risk_estimators.shrinked_covariance(
prices, price_data=True, shrinkage_type='basic', basic_shrinkage=basic_shrinkage)
elif model == 'shrinked_covariance_lw':
covariance_matrix = risk_estimators.shrinked_covariance(
prices, price_data=True, shrinkage_type='lw', basic_shrinkage=basic_shrinkage)
elif model == 'shrinked_covariance_oas':
covariance_matrix = risk_estimators.shrinked_covariance(
prices, price_data=True, shrinkage_type='oas', basic_shrinkage=basic_shrinkage)
elif model == 'semi_covariance':
covariance_matrix = risk_estimators.semi_covariance(prices, price_data=True, threshold_return=0)
elif model == 'exponential_covariance':
covariance_matrix = risk_estimators.exponential_covariance(prices, price_data=True, window_span=60)
elif model == 'constant_residual_eigenvalues_denoised':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='const_resid_eigen', detone=False, kde_bwidth=kde_bwidth)
elif model == 'constant_residual_spectral_denoised':
covariance_matrix = risk_estimators.denoise_covariance(empirical_cov, tn_relation, denoise_method='spectral')
elif model == 'targeted_shrinkage_denoised':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='target_shrink', detone=False, kde_bwidth=kde_bwidth)
elif model == 'targeted_shrinkage_detoned':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='target_shrink', detone=True, kde_bwidth=kde_bwidth)
elif model == 'constant_residual_detoned':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='const_resid_eigen', detone=True, market_component=1,
kde_bwidth=kde_bwidth)
elif model == 'hierarchical_filtered_complete':
covariance_matrix = risk_estimators.corr_to_cov(risk_estimators.filter_corr_hierarchical(
empirical_corr.to_numpy(), method='complete', draw_plot=False), std)
elif model == 'hierarchical_filtered_single':
covariance_matrix = risk_estimators.corr_to_cov(risk_estimators.filter_corr_hierarchical(
empirical_corr.to_numpy(), method='single', draw_plot=False), std)
elif model == 'hierarchical_filtered_avg':
covariance_matrix = risk_estimators.corr_to_cov(risk_estimators.filter_corr_hierarchical(
empirical_corr.to_numpy(), method='average', draw_plot=False), std)
elif model == 'sample_cov':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.sample_cov(prices)) / 252
elif model == 'semicovariance':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.semicovariance(prices)) / 252
elif model == 'exp_cov':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.exp_cov(prices, span=180)) / 252
elif model == 'ledoit_wolf_constant_variance':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
elif model == 'ledoit_wolf_single_factor':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
elif model == 'ledoit_wolf_constant_correlation':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
elif model == 'oracle_approximating':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
else:
raise NameError('You must input a risk model. Check spelling. Case-Sensitive.')
if not isinstance(covariance_matrix, pd.DataFrame):
covariance_matrix = pd.DataFrame(covariance_matrix, index=sample_cov.index, columns=sample_cov.columns).round(6)
return covariance_matrix * 252
| 49.575758
| 121
| 0.718093
| 895
| 8,180
| 6.192179
| 0.193296
| 0.083724
| 0.083003
| 0.081198
| 0.68044
| 0.631
| 0.588777
| 0.588777
| 0.488993
| 0.477806
| 0
| 0.007446
| 0.195477
| 8,180
| 164
| 122
| 49.878049
| 0.834676
| 0.178484
| 0
| 0.211009
| 0
| 0
| 0.203486
| 0.144021
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009174
| false
| 0
| 0.045872
| 0
| 0.073395
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be55e1c8b12cbd1b4bd83120c737d0990e906ce2
| 3,223
|
py
|
Python
|
citywok_ms/employee/routes.py
|
fossabot/CityWok-Manager
|
ccd31eb684ddeec5c741c9520c779d98eb0e3cc6
|
[
"MIT"
] | null | null | null |
citywok_ms/employee/routes.py
|
fossabot/CityWok-Manager
|
ccd31eb684ddeec5c741c9520c779d98eb0e3cc6
|
[
"MIT"
] | null | null | null |
citywok_ms/employee/routes.py
|
fossabot/CityWok-Manager
|
ccd31eb684ddeec5c741c9520c779d98eb0e3cc6
|
[
"MIT"
] | null | null | null |
from citywok_ms.file.models import EmployeeFile, File
import citywok_ms.employee.messages as employee_msg
import citywok_ms.file.messages as file_msg
from citywok_ms.employee.forms import EmployeeForm
from citywok_ms.file.forms import FileForm
from flask import Blueprint, flash, redirect, render_template, url_for
from citywok_ms.employee.models import Employee
employee = Blueprint("employee", __name__, url_prefix="/employee")
@employee.route("/")
def index():
return render_template(
"employee/index.html",
title=employee_msg.INDEX_TITLE,
active_employees=Employee.get_active(),
suspended_employees=Employee.get_suspended(),
)
@employee.route("/new", methods=["GET", "POST"])
def new():
form = EmployeeForm()
if form.validate_on_submit():
employee = Employee.create_by_form(form)
flash(employee_msg.NEW_SUCCESS.format(name=employee.full_name), "success")
return redirect(url_for("employee.index"))
return render_template(
"employee/form.html", title=employee_msg.NEW_TITLE, form=form
)
@employee.route("/<int:employee_id>")
def detail(employee_id):
return render_template(
"employee/detail.html",
title=employee_msg.DETAIL_TITLE,
employee=Employee.get_or_404(employee_id),
file_form=FileForm(),
)
@employee.route("/<int:employee_id>/update", methods=["GET", "POST"])
def update(employee_id):
employee = Employee.get_or_404(employee_id)
form = EmployeeForm()
form.hide_id.data = employee_id
if form.validate_on_submit():
employee.update_by_form(form)
flash(employee_msg.UPDATE_SUCCESS.format(name=employee.full_name), "success")
return redirect(url_for("employee.detail", employee_id=employee_id))
form.process(obj=employee)
return render_template(
"employee/form.html",
employee=employee,
form=form,
title=employee_msg.UPDATE_TITLE,
)
@employee.route("/<int:employee_id>/suspend", methods=["POST"])
def suspend(employee_id):
employee = Employee.get_or_404(employee_id)
employee.suspend()
flash(employee_msg.SUSPEND_SUCCESS.format(name=employee.full_name), "success")
return redirect(url_for("employee.detail", employee_id=employee_id))
@employee.route("/<int:employee_id>/activate", methods=["POST"])
def activate(employee_id):
employee = Employee.get_or_404(employee_id)
employee.activate()
flash(employee_msg.ACTIVATE_SUCCESS.format(name=employee.full_name), "success")
return redirect(url_for("employee.detail", employee_id=employee_id))
@employee.route("/<int:employee_id>/upload", methods=["POST"])
def upload(employee_id):
form = FileForm()
file = form.file.data
if form.validate_on_submit():
db_file = EmployeeFile.create_by_form(form, Employee.get_or_404(employee_id))
flash(file_msg.UPLOAD_SUCCESS.format(name=db_file.full_name), "success")
elif file is not None:
flash(
file_msg.INVALID_FORMAT.format(format=File.split_file_format(file)),
"danger",
)
else:
flash(file_msg.NO_FILE, "danger")
return redirect(url_for("employee.detail", employee_id=employee_id))
| 33.926316
| 85
| 0.714552
| 413
| 3,223
| 5.317191
| 0.169492
| 0.10929
| 0.090164
| 0.045537
| 0.448998
| 0.386157
| 0.290528
| 0.275046
| 0.275046
| 0.255009
| 0
| 0.005547
| 0.16103
| 3,223
| 94
| 86
| 34.287234
| 0.806583
| 0
| 0
| 0.207792
| 0
| 0
| 0.113249
| 0.031958
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0.025974
| 0.298701
| 0.025974
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be5634f2d2873fa0b75fded2fda0cc44792517a3
| 9,041
|
py
|
Python
|
kitsune/customercare/cron.py
|
safwanrahman/Ford
|
87e91dea1cc22b1759eea81cef069359ccb5cd0b
|
[
"BSD-3-Clause"
] | 1
|
2017-07-03T12:11:03.000Z
|
2017-07-03T12:11:03.000Z
|
kitsune/customercare/cron.py
|
feer56/Kitsune1
|
0b39cbc41cb7a067699ce8401d80205dd7c5138d
|
[
"BSD-3-Clause"
] | 8
|
2020-06-05T18:42:14.000Z
|
2022-03-11T23:26:51.000Z
|
kitsune/customercare/cron.py
|
safwanrahman/Ford
|
87e91dea1cc22b1759eea81cef069359ccb5cd0b
|
[
"BSD-3-Clause"
] | null | null | null |
import calendar
from datetime import datetime, timedelta
import json
import logging
import re
import rfc822
from django.conf import settings
from django.db.utils import IntegrityError
import cronjobs
from multidb.pinning import pin_this_thread
from statsd import statsd
from twython import Twython
from kitsune.customercare.models import Tweet, TwitterAccount, Reply
from kitsune.sumo.redis_utils import redis_client, RedisError
from kitsune.sumo.utils import chunked
LINK_REGEX = re.compile('https?\:', re.IGNORECASE)
RT_REGEX = re.compile('^rt\W', re.IGNORECASE)
ALLOWED_USERS = [
{'id': 2142731, 'username': 'Firefox'},
{'id': 150793437, 'username': 'FirefoxBrasil'},
{'id': 107272435, 'username': 'firefox_es'},
]
log = logging.getLogger('k.twitter')
def get_word_blacklist_regex():
"""
Make a regex that looks kind of like r'\b(foo|bar|baz)\b'.
This is a function so that it isn't calculated at import time,
and so can be tested more easily.
This doesn't use raw strings (r'') because the "mismatched" parens
were confusing my syntax highlighter, which was confusing me.
"""
return re.compile(
'\\b(' +
'|'.join(map(re.escape, settings.CC_WORD_BLACKLIST)) +
')\\b')
@cronjobs.register
def collect_tweets():
# Don't (ab)use the twitter API from dev and stage.
if settings.STAGE:
return
"""Collect new tweets about Firefox."""
with statsd.timer('customercare.tweets.time_elapsed'):
t = Twython(settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_ACCESS_TOKEN,
settings.TWITTER_ACCESS_TOKEN_SECRET)
search_options = {
'q': ('firefox OR #fxinput OR @firefoxbrasil OR #firefoxos '
'OR @firefox_es'),
'count': settings.CC_TWEETS_PERPAGE, # Items per page.
'result_type': 'recent', # Retrieve tweets by date.
}
# If we already have some tweets, collect nothing older than what we
# have.
try:
latest_tweet = Tweet.latest()
except Tweet.DoesNotExist:
log.debug('No existing tweets. Retrieving %d tweets from search.' %
settings.CC_TWEETS_PERPAGE)
else:
search_options['since_id'] = latest_tweet.tweet_id
log.info('Retrieving tweets with id >= %s' % latest_tweet.tweet_id)
# Retrieve Tweets
results = t.search(**search_options)
if len(results['statuses']) == 0:
# Twitter returned 0 results.
return
# Drop tweets into DB
for item in results['statuses']:
# Apply filters to tweet before saving
# Allow links in #fxinput tweets
statsd.incr('customercare.tweet.collected')
item = _filter_tweet(item,
allow_links='#fxinput' in item['text'])
if not item:
continue
created_date = datetime.utcfromtimestamp(calendar.timegm(
rfc822.parsedate(item['created_at'])))
item_lang = item['metadata'].get('iso_language_code', 'en')
tweet = Tweet(tweet_id=item['id'], raw_json=json.dumps(item),
locale=item_lang, created=created_date)
try:
tweet.save()
statsd.incr('customercare.tweet.saved')
except IntegrityError:
pass
@cronjobs.register
def purge_tweets():
"""Periodically purge old tweets for each locale.
This does a lot of DELETEs on master, so it shouldn't run too frequently.
Probably once every hour or more.
"""
# Pin to master
pin_this_thread()
# Build list of tweets to delete, by id.
for locale in settings.SUMO_LANGUAGES:
locale = settings.LOCALES[locale].iso639_1
# Some locales don't have an iso639_1 code, too bad for them.
if not locale:
continue
oldest = _get_oldest_tweet(locale, settings.CC_MAX_TWEETS)
if oldest:
log.debug('Truncating tweet list: Removing tweets older than %s, '
'for [%s].' % (oldest.created, locale))
Tweet.objects.filter(locale=locale,
created__lte=oldest.created).delete()
def _get_oldest_tweet(locale, n=0):
"""Returns the nth oldest tweet per locale, defaults to newest."""
try:
return Tweet.objects.filter(locale=locale).order_by(
'-created')[n]
except IndexError:
return None
def _filter_tweet(item, allow_links=False):
"""
Apply some filters to an incoming tweet.
May modify tweet. If None is returned, tweet will be discarded.
Used to exclude replies and such from incoming tweets.
"""
text = item['text'].lower()
# No replies, except to ALLOWED_USERS
allowed_user_ids = [u['id'] for u in ALLOWED_USERS]
to_user_id = item.get('to_user_id')
if to_user_id and to_user_id not in allowed_user_ids:
statsd.incr('customercare.tweet.rejected.reply_or_mention')
return None
# No mentions, except of ALLOWED_USERS
for user in item['entities']['user_mentions']:
if user['id'] not in allowed_user_ids:
statsd.incr('customercare.tweet.rejected.reply_or_mention')
return None
# No retweets
if RT_REGEX.search(text) or text.find('(via ') > -1:
statsd.incr('customercare.tweet.rejected.retweet')
return None
# No links
if not allow_links and LINK_REGEX.search(text):
statsd.incr('customercare.tweet.rejected.link')
return None
screen_name = item['user']['screen_name']
# Django's caching system will save us here.
IGNORED_USERS = set(
TwitterAccount.objects
.filter(ignored=True)
.values_list('username', flat=True)
)
# Exclude filtered users
if screen_name in IGNORED_USERS:
statsd.incr('customercare.tweet.rejected.user')
return None
# Exlude users with firefox in the handle
if 'firefox' in screen_name.lower():
statsd.incr('customercare.tweet.rejected.firefox_in_handle')
return None
# Exclude problem words
match = get_word_blacklist_regex().search(text)
if match:
bad_word = match.group(1)
statsd.incr('customercare.tweet.rejected.blacklist_word.' + bad_word)
return None
return item
@cronjobs.register
def get_customercare_stats():
"""
Generate customer care stats from the Replies table.
This gets cached in Redis as a sorted list of contributors, stored as JSON.
Example Top Contributor data:
[
{
'twitter_username': 'username1',
'avatar': 'http://twitter.com/path/to/the/avatar.png',
'avatar_https': 'https://twitter.com/path/to/the/avatar.png',
'all': 5211,
'1m': 230,
'1w': 33,
'1d': 3,
},
{ ... },
{ ... },
]
"""
if settings.STAGE:
return
contributor_stats = {}
now = datetime.now()
one_month_ago = now - timedelta(days=30)
one_week_ago = now - timedelta(days=7)
yesterday = now - timedelta(days=1)
for chunk in chunked(Reply.objects.all(), 2500, Reply.objects.count()):
for reply in chunk:
user = reply.twitter_username
if user not in contributor_stats:
raw = json.loads(reply.raw_json)
if 'from_user' in raw: # For tweets collected using v1 API
user_data = raw
else:
user_data = raw['user']
contributor_stats[user] = {
'twitter_username': user,
'avatar': user_data['profile_image_url'],
'avatar_https': user_data['profile_image_url_https'],
'all': 0, '1m': 0, '1w': 0, '1d': 0,
}
contributor = contributor_stats[reply.twitter_username]
contributor['all'] += 1
if reply.created > one_month_ago:
contributor['1m'] += 1
if reply.created > one_week_ago:
contributor['1w'] += 1
if reply.created > yesterday:
contributor['1d'] += 1
sort_key = settings.CC_TOP_CONTRIB_SORT
limit = settings.CC_TOP_CONTRIB_LIMIT
# Sort by whatever is in settings, break ties with 'all'
contributor_stats = sorted(contributor_stats.values(),
key=lambda c: (c[sort_key], c['all']),
reverse=True)[:limit]
try:
redis = redis_client(name='default')
key = settings.CC_TOP_CONTRIB_CACHE_KEY
redis.set(key, json.dumps(contributor_stats))
except RedisError as e:
statsd.incr('redis.error')
log.error('Redis error: %s' % e)
return contributor_stats
| 32.289286
| 79
| 0.605243
| 1,086
| 9,041
| 4.892265
| 0.314917
| 0.018822
| 0.037267
| 0.045737
| 0.121024
| 0.056465
| 0.042914
| 0.032373
| 0.032373
| 0.032373
| 0
| 0.0127
| 0.294547
| 9,041
| 279
| 80
| 32.405018
| 0.82032
| 0.203075
| 0
| 0.153846
| 0
| 0
| 0.140833
| 0.054673
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035503
| false
| 0.005917
| 0.088757
| 0
| 0.213018
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be5b35007ab39510b966782ec2dccb27e2f0b068
| 2,429
|
py
|
Python
|
checkAnnotation.py
|
ZZIDZZ/pytorch-ssd
|
8d3ad092825d6f05b8a3fa7c25be7b541bf86ed9
|
[
"MIT"
] | null | null | null |
checkAnnotation.py
|
ZZIDZZ/pytorch-ssd
|
8d3ad092825d6f05b8a3fa7c25be7b541bf86ed9
|
[
"MIT"
] | null | null | null |
checkAnnotation.py
|
ZZIDZZ/pytorch-ssd
|
8d3ad092825d6f05b8a3fa7c25be7b541bf86ed9
|
[
"MIT"
] | null | null | null |
import argparse
import sys
import cv2
import os
import os.path as osp
import numpy as np
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Training With Pytorch')
train_set = parser.add_mutually_exclusive_group()
parser.add_argument('--root', help='Dataset root directory path')
args = parser.parse_args()
CLASSES = ( # always index 0
'helmet', 'vest', 'no_helmet')
annopath = osp.join('%s', 'Annotations', '%s.{}'.format("xml"))
imgpath = osp.join('%s', 'JPEGImages', '%s.{}'.format("jpg"))
def vocChecker(image_id, width, height, keep_difficult = False):
target = ET.parse(annopath % image_id).getroot()
res = []
for obj in target.iter('object'):
difficult = int(obj.find('difficult').text) == 1
if not keep_difficult and difficult:
continue
name = obj.find('name').text.lower().strip()
bbox = obj.find('bndbox')
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
# scale height or width
cur_pt = float(cur_pt) / width if i % 2 == 0 else float(cur_pt) / height
bndbox.append(cur_pt)
print(name)
label_idx = dict(zip(CLASSES, range(len(CLASSES))))[name]
bndbox.append(label_idx)
res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
print(res)
try :
print(np.array(res)[:,4])
print(np.array(res)[:,:4])
except IndexError:
print("\nINDEX ERROR HERE !\n")
exit(0)
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
if __name__ == '__main__' :
i = 0
for name in sorted(os.listdir(osp.join(args.root,'Annotations'))):
# as we have only one annotations file per image
i += 1
img = cv2.imread(imgpath % (args.root,name.split('.')[0]))
height, width, channels = img.shape
res = vocChecker((args.root, name.split('.')[0]), height, width)
print("path : {}".format(annopath % (args.root,name.split('.')[0])))
res = vocChecker((args.root, name.split('.')[0]), height, width)
print("Total of annotations : {}".format(i))
| 29.987654
| 84
| 0.588308
| 316
| 2,429
| 4.427215
| 0.43038
| 0.01787
| 0.03431
| 0.048606
| 0.15797
| 0.12223
| 0.08792
| 0.067191
| 0.067191
| 0.067191
| 0
| 0.010463
| 0.252367
| 2,429
| 81
| 85
| 29.987654
| 0.759912
| 0.084809
| 0
| 0.035714
| 0
| 0
| 0.11908
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017857
| false
| 0
| 0.142857
| 0
| 0.178571
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be5d745da0aee12618b5456e7d8cbede2e23e222
| 656
|
py
|
Python
|
venv/lib/python3.7/site-packages/convertdate/dublin.py
|
vchiapaikeo/prophet
|
e8c250ca7bfffc280baa7dabc80a2c2d1f72c6a7
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/convertdate/dublin.py
|
vchiapaikeo/prophet
|
e8c250ca7bfffc280baa7dabc80a2c2d1f72c6a7
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/convertdate/dublin.py
|
vchiapaikeo/prophet
|
e8c250ca7bfffc280baa7dabc80a2c2d1f72c6a7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# This file is part of convertdate.
# http://github.com/fitnr/convertdate
# Licensed under the MIT license:
# http://opensource.org/licenses/MIT
# Copyright (c) 2016, fitnr <fitnr@fakeisthenewreal>
'''Convert to and from the Dublin day count'''
from . import daycount
EPOCH = 2415020 # Julian Day Count for Dublin Count 0
_dublin = daycount.DayCount(EPOCH)
to_gregorian = _dublin.to_gregorian
from_gregorian = _dublin.from_gregorian
to_jd = _dublin.to_jd
from_jd = _dublin.from_jd
from_julian = _dublin.from_julian
to_julian = _dublin.to_julian
to_datetime = _dublin.to_datetime
from_datetime = _dublin.from_datetime
| 19.878788
| 54
| 0.762195
| 94
| 656
| 5.053191
| 0.446809
| 0.067368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023091
| 0.141768
| 656
| 32
| 55
| 20.5
| 0.820604
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be5e4769d08439109a7dee5ae6c729de8b3ba612
| 1,232
|
py
|
Python
|
code/generate_thought_vectors.py
|
midas-research/text2facegan
|
3770333f16234fc9328d8254d1c1112fad15a16c
|
[
"MIT"
] | 23
|
2020-04-09T19:17:46.000Z
|
2021-04-13T13:46:06.000Z
|
code/generate_thought_vectors.py
|
midas-research/text2facegan
|
3770333f16234fc9328d8254d1c1112fad15a16c
|
[
"MIT"
] | 3
|
2020-02-16T16:21:38.000Z
|
2021-05-22T13:18:57.000Z
|
code/generate_thought_vectors.py
|
midas-research/text2facegan
|
3770333f16234fc9328d8254d1c1112fad15a16c
|
[
"MIT"
] | 7
|
2020-02-27T22:27:33.000Z
|
2021-03-16T06:03:32.000Z
|
import os
from os.path import join, isfile
import re
import numpy as np
import pickle
import argparse
import skipthoughts
import h5py
def main():
parser = argparse.ArgumentParser()
#parser.add_argument('--caption_file', type=str, default='Data/sample_captions.txt',
# help='caption file')
parser.add_argument('--caption_file', type=str, default='/media/ssd_working_space/osaid/Data/sample_captions.txt',
help='caption file')
#parser.add_argument('--data_dir', type=str, default='Data',
# help='Data Directory')
parser.add_argument('--data_dir', type=str, default='/media/ssd_working_space/osaid/Data',
help='Data Directory')
args = parser.parse_args()
with open( args.caption_file ) as f:
captions = f.read().split('\n')
captions = [cap for cap in captions if len(cap) > 0]
print(captions)
model = skipthoughts.load_model()
caption_vectors = skipthoughts.encode(model, captions)
if os.path.isfile(join(args.data_dir, 'sample_caption_vectors.hdf5')):
os.remove(join(args.data_dir, 'sample_caption_vectors.hdf5'))
h = h5py.File(join(args.data_dir, 'sample_caption_vectors.hdf5'))
h.create_dataset('vectors', data=caption_vectors)
h.close()
if __name__ == '__main__':
main()
| 30.8
| 115
| 0.728896
| 175
| 1,232
| 4.92
| 0.36
| 0.063879
| 0.078978
| 0.052265
| 0.470383
| 0.470383
| 0.470383
| 0.470383
| 0.311266
| 0.123113
| 0
| 0.005576
| 0.126623
| 1,232
| 40
| 116
| 30.8
| 0.79461
| 0.160714
| 0
| 0
| 0
| 0
| 0.230844
| 0.165858
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.285714
| 0
| 0.321429
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be5f92734068facbaab6ebcd59a70aae8bdb395f
| 415
|
py
|
Python
|
venv/Lib/site-packages/mcipc/rcon/response_types/difficulty.py
|
Svesnav2/Discord-Bot-Minecraft-server-status
|
ee34948e741930567a3adb557197523f9d32ace1
|
[
"Unlicense"
] | null | null | null |
venv/Lib/site-packages/mcipc/rcon/response_types/difficulty.py
|
Svesnav2/Discord-Bot-Minecraft-server-status
|
ee34948e741930567a3adb557197523f9d32ace1
|
[
"Unlicense"
] | null | null | null |
venv/Lib/site-packages/mcipc/rcon/response_types/difficulty.py
|
Svesnav2/Discord-Bot-Minecraft-server-status
|
ee34948e741930567a3adb557197523f9d32ace1
|
[
"Unlicense"
] | null | null | null |
"""Parsing responses from the difficulty command."""
from mcipc.rcon.functions import boolmap
__all__ = ['parse']
SET = 'The difficulty has been set to (\\w+)'
UNCHANGED = 'The difficulty did not change; it is already set to (\\w+)'
def parse(text: str) -> bool:
"""Parses a boolean value from the text
returned by the difficulty command.
"""
return boolmap(text, true=SET, false=UNCHANGED)
| 21.842105
| 72
| 0.684337
| 58
| 415
| 4.827586
| 0.655172
| 0.185714
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195181
| 415
| 18
| 73
| 23.055556
| 0.838323
| 0.286747
| 0
| 0
| 0
| 0
| 0.362319
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be6134b8d63935100cb7803033cbd22148a4202a
| 1,558
|
py
|
Python
|
eth/beacon/aggregation.py
|
Bhargavasomu/py-evm
|
ee8f72d5a70805575a967cde0a43942e1526264e
|
[
"MIT"
] | null | null | null |
eth/beacon/aggregation.py
|
Bhargavasomu/py-evm
|
ee8f72d5a70805575a967cde0a43942e1526264e
|
[
"MIT"
] | null | null | null |
eth/beacon/aggregation.py
|
Bhargavasomu/py-evm
|
ee8f72d5a70805575a967cde0a43942e1526264e
|
[
"MIT"
] | null | null | null |
from typing import (
Iterable,
Tuple,
)
from cytoolz import (
pipe
)
from eth._utils import bls
from eth._utils.bitfield import (
set_voted,
)
from eth.beacon.enums import SignatureDomain
from eth.beacon.typing import (
BLSPubkey,
BLSSignature,
Bitfield,
CommitteeIndex,
)
def verify_votes(
message: bytes,
votes: Iterable[Tuple[CommitteeIndex, BLSSignature, BLSPubkey]],
domain: SignatureDomain
) -> Tuple[Tuple[BLSSignature, ...], Tuple[CommitteeIndex, ...]]:
"""
Verify the given votes.
vote: (committee_index, sig, public_key)
"""
sigs_with_committe_info = tuple(
(sig, committee_index)
for (committee_index, sig, public_key)
in votes
if bls.verify(message, public_key, sig, domain)
)
try:
sigs, committee_indices = zip(*sigs_with_committe_info)
except ValueError:
sigs = tuple()
committee_indices = tuple()
return sigs, committee_indices
def aggregate_votes(
bitfield: Bitfield,
sigs: Iterable[BLSSignature],
voting_sigs: Iterable[BLSSignature],
voting_committee_indices: Iterable[CommitteeIndex]
) -> Tuple[Bitfield, BLSSignature]:
"""
Aggregate the votes.
"""
# Update the bitfield and append the signatures
sigs = tuple(sigs) + tuple(voting_sigs)
bitfield = pipe(
bitfield,
*(
set_voted(index=committee_index)
for committee_index in voting_committee_indices
)
)
return bitfield, bls.aggregate_signatures(sigs)
| 23.253731
| 68
| 0.662388
| 165
| 1,558
| 6.072727
| 0.315152
| 0.06986
| 0.023952
| 0.045908
| 0.0998
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.245186
| 1,558
| 66
| 69
| 23.606061
| 0.852041
| 0.085366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.12
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be6143e65d151cdd084aada126448567dcd0c1d7
| 7,090
|
py
|
Python
|
src/server/bos/controllers/v1/components.py
|
Cray-HPE/bos
|
a4a7fc58c884d951b6051093e1a4e2aeaba6740f
|
[
"MIT"
] | 1
|
2022-03-15T18:17:11.000Z
|
2022-03-15T18:17:11.000Z
|
src/server/bos/controllers/v1/components.py
|
Cray-HPE/bos
|
a4a7fc58c884d951b6051093e1a4e2aeaba6740f
|
[
"MIT"
] | null | null | null |
src/server/bos/controllers/v1/components.py
|
Cray-HPE/bos
|
a4a7fc58c884d951b6051093e1a4e2aeaba6740f
|
[
"MIT"
] | 1
|
2022-03-06T12:47:06.000Z
|
2022-03-06T12:47:06.000Z
|
# Copyright 2021 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# (MIT License)
import connexion
from datetime import datetime
import logging
from bos import redis_db_utils as dbutils
LOGGER = logging.getLogger('bos.controllers.v1.components')
DB = dbutils.get_wrapper(db='components')
@dbutils.redis_error_handler
def get_components(ids="", enabled=None):
"""Used by the GET /components API operation
Allows filtering using a comma seperated list of ids.
"""
LOGGER.debug("GET /components invoked get_components")
id_list = []
if ids:
try:
id_list = ids.split(',')
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the ids provided.",
detail=str(err))
response = get_components_data(id_list=id_list, enabled=enabled)
return response, 200
def get_components_data(id_list=None, enabled=None):
"""Used by the GET /components API operation
Allows filtering using a comma separated list of ids.
"""
response = []
if id_list:
for component_id in id_list:
data = DB.get(component_id)
if data:
response.append(data)
else:
# TODO: On large scale systems, this response may be too large
# and require paging to be implemented
response = DB.get_all()
if enabled is not None:
response = [r for r in response if _matches_filter(r, enabled)]
return response
def _matches_filter(data, enabled):
if enabled is not None and data.get('enabled', None) != enabled:
return False
return True
@dbutils.redis_error_handler
def put_components():
"""Used by the PUT /components API operation"""
LOGGER.debug("PUT /components invoked put_components")
try:
data = connexion.request.get_json()
components = []
for component_data in data:
component_id = component_data['id']
components.append((component_id, component_data))
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the data provided.",
detail=str(err))
response = []
for component_id, component_data in components:
component_data = _set_auto_fields(component_data)
response.append(DB.put(component_id, component_data))
return response, 200
@dbutils.redis_error_handler
def patch_components():
"""Used by the PATCH /components API operation"""
LOGGER.debug("PATCH /components invoked patch_components")
try:
data = connexion.request.get_json()
components = []
for component_data in data:
component_id = component_data['id']
if component_id not in DB:
return connexion.problem(
status=404, title="Component could not found.",
detail="Component {} could not be found".format(component_id))
components.append((component_id, component_data))
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the data provided.",
detail=str(err))
response = []
for component_id, component_data in components:
component_data = _set_auto_fields(component_data)
response.append(DB.patch(component_id, component_data, _update_handler))
return response, 200
@dbutils.redis_error_handler
def get_component(component_id, config_details=False, v2=False):
"""Used by the GET /components/{component_id} API operation"""
LOGGER.debug("GET /components/id invoked get_component")
if component_id not in DB:
return connexion.problem(
status=404, title="Component could not found.",
detail="Component {} could not be found".format(component_id))
component = DB.get(component_id)
return component, 200
@dbutils.redis_error_handler
def put_component(component_id):
"""Used by the PUT /components/{component_id} API operation"""
LOGGER.debug("PUT /components/id invoked put_component")
try:
data = connexion.request.get_json()
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the data provided.",
detail=str(err))
data['id'] = component_id
data = _set_auto_fields(data)
return DB.put(component_id, data), 200
@dbutils.redis_error_handler
def patch_component(component_id):
"""Used by the PATCH /components/{component_id} API operation"""
LOGGER.debug("PATCH /components/id invoked patch_component")
if component_id not in DB:
return connexion.problem(
status=404, title="Component could not found.",
detail="Component {} could not be found".format(component_id))
try:
data = connexion.request.get_json()
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the data provided.",
detail=str(err))
data = _set_auto_fields(data)
return DB.patch(component_id, data, _update_handler), 200
@dbutils.redis_error_handler
def delete_component(component_id):
"""Used by the DELETE /components/{component_id} API operation"""
LOGGER.debug("DELETE /components/id invoked delete_component")
if component_id not in DB:
return connexion.problem(
status=404, title="Component could not found.",
detail="Component {} could not be found".format(component_id))
return DB.delete(component_id), 204
def _set_auto_fields(data):
data = _set_last_updated(data)
return data
def _set_last_updated(data):
timestamp = datetime.utcnow().isoformat()
for section in ['actualState', 'desiredState', 'lastAction']:
if section in data and type(data[section]) == dict:
data[section]['lastUpdated'] = timestamp
return data
def _update_handler(data):
# Allows processing of data during common patch operation
return data
| 36.173469
| 82
| 0.687729
| 914
| 7,090
| 5.199125
| 0.221007
| 0.071759
| 0.041667
| 0.05303
| 0.529672
| 0.492635
| 0.426347
| 0.369529
| 0.35101
| 0.35101
| 0
| 0.009847
| 0.226516
| 7,090
| 195
| 83
| 36.358974
| 0.856674
| 0.248237
| 0
| 0.569231
| 0
| 0
| 0.147104
| 0.005526
| 0
| 0
| 0
| 0.005128
| 0
| 1
| 0.092308
| false
| 0
| 0.030769
| 0.007692
| 0.292308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be644a96343b814a2cf63e0bf374f535055ecf7e
| 6,856
|
py
|
Python
|
test/mitmproxy/addons/test_proxyserver.py
|
KarlParkinson/mitmproxy
|
fd5caf40c75ca73c4b767170497abf6a5bf016a0
|
[
"MIT"
] | 24,939
|
2015-01-01T17:13:21.000Z
|
2022-03-31T17:50:04.000Z
|
test/mitmproxy/addons/test_proxyserver.py
|
KarlParkinson/mitmproxy
|
fd5caf40c75ca73c4b767170497abf6a5bf016a0
|
[
"MIT"
] | 3,655
|
2015-01-02T12:31:43.000Z
|
2022-03-31T20:24:57.000Z
|
test/mitmproxy/addons/test_proxyserver.py
|
KarlParkinson/mitmproxy
|
fd5caf40c75ca73c4b767170497abf6a5bf016a0
|
[
"MIT"
] | 3,712
|
2015-01-06T06:47:06.000Z
|
2022-03-31T10:33:27.000Z
|
import asyncio
from contextlib import asynccontextmanager
import pytest
from mitmproxy import exceptions
from mitmproxy.addons.proxyserver import Proxyserver
from mitmproxy.connection import Address
from mitmproxy.proxy import layers, server_hooks
from mitmproxy.proxy.layers.http import HTTPMode
from mitmproxy.test import taddons, tflow
from mitmproxy.test.tflow import tclient_conn, tserver_conn
class HelperAddon:
def __init__(self):
self.flows = []
self.layers = [
lambda ctx: layers.modes.HttpProxy(ctx),
lambda ctx: layers.HttpLayer(ctx, HTTPMode.regular),
lambda ctx: layers.TCPLayer(ctx),
]
def request(self, f):
self.flows.append(f)
def tcp_start(self, f):
self.flows.append(f)
def next_layer(self, nl):
nl.layer = self.layers.pop(0)(nl.context)
@asynccontextmanager
async def tcp_server(handle_conn) -> Address:
server = await asyncio.start_server(handle_conn, '127.0.0.1', 0)
await server.start_serving()
try:
yield server.sockets[0].getsockname()
finally:
server.close()
@pytest.mark.asyncio
async def test_start_stop():
async def server_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
assert await reader.readuntil(b"\r\n\r\n") == b"GET /hello HTTP/1.1\r\n\r\n"
writer.write(b"HTTP/1.1 204 No Content\r\n\r\n")
await writer.drain()
writer.close()
ps = Proxyserver()
with taddons.context(ps) as tctx:
state = HelperAddon()
tctx.master.addons.add(state)
async with tcp_server(server_handler) as addr:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
assert not ps.server
ps.running()
await tctx.master.await_log("Proxy server listening", level="info")
assert ps.server
proxy_addr = ps.server.sockets[0].getsockname()[:2]
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"GET http://{addr[0]}:{addr[1]}/hello HTTP/1.1\r\n\r\n"
writer.write(req.encode())
assert await reader.readuntil(b"\r\n\r\n") == b"HTTP/1.1 204 No Content\r\n\r\n"
assert repr(ps) == "ProxyServer(running, 1 active conns)"
tctx.configure(ps, server=False)
await tctx.master.await_log("Stopping server", level="info")
assert not ps.server
assert state.flows
assert state.flows[0].request.path == "/hello"
assert state.flows[0].response.status_code == 204
# Waiting here until everything is really torn down... takes some effort.
conn_handler = list(ps._connections.values())[0]
client_handler = conn_handler.transports[conn_handler.client].handler
writer.close()
await writer.wait_closed()
try:
await client_handler
except asyncio.CancelledError:
pass
for _ in range(5):
# Get all other scheduled coroutines to run.
await asyncio.sleep(0)
assert repr(ps) == "ProxyServer(stopped, 0 active conns)"
@pytest.mark.asyncio
async def test_inject() -> None:
async def server_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
while s := await reader.read(1):
writer.write(s.upper())
ps = Proxyserver()
with taddons.context(ps) as tctx:
state = HelperAddon()
tctx.master.addons.add(state)
async with tcp_server(server_handler) as addr:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
ps.running()
await tctx.master.await_log("Proxy server listening", level="info")
proxy_addr = ps.server.sockets[0].getsockname()[:2]
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"CONNECT {addr[0]}:{addr[1]} HTTP/1.1\r\n\r\n"
writer.write(req.encode())
assert await reader.readuntil(b"\r\n\r\n") == b"HTTP/1.1 200 Connection established\r\n\r\n"
writer.write(b"a")
assert await reader.read(1) == b"A"
ps.inject_tcp(state.flows[0], False, b"b")
assert await reader.read(1) == b"B"
ps.inject_tcp(state.flows[0], True, b"c")
assert await reader.read(1) == b"c"
@pytest.mark.asyncio
async def test_inject_fail() -> None:
ps = Proxyserver()
with taddons.context(ps) as tctx:
ps.inject_websocket(
tflow.tflow(),
True,
b"test"
)
await tctx.master.await_log("Cannot inject WebSocket messages into non-WebSocket flows.", level="warn")
ps.inject_tcp(
tflow.tflow(),
True,
b"test"
)
await tctx.master.await_log("Cannot inject TCP messages into non-TCP flows.", level="warn")
ps.inject_websocket(
tflow.twebsocketflow(),
True,
b"test"
)
await tctx.master.await_log("Flow is not from a live connection.", level="warn")
ps.inject_websocket(
tflow.ttcpflow(),
True,
b"test"
)
await tctx.master.await_log("Flow is not from a live connection.", level="warn")
@pytest.mark.asyncio
async def test_warn_no_nextlayer():
"""
Test that we log an error if the proxy server is started without NextLayer addon.
That is a mean trap to fall into when writing end-to-end tests.
"""
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
ps.running()
await tctx.master.await_log("Proxy server listening at", level="info")
assert tctx.master.has_log("Warning: Running proxyserver without nextlayer addon!", level="warn")
await ps.shutdown_server()
def test_self_connect():
server = tserver_conn()
client = tclient_conn()
server.address = ("localhost", 8080)
ps = Proxyserver()
with taddons.context(ps) as tctx:
# not calling .running() here to avoid unnecessary socket
ps.options = tctx.options
ps.server_connect(
server_hooks.ServerConnectionHookData(server, client)
)
assert server.error == "Stopped mitmproxy from recursively connecting to itself."
def test_options():
ps = Proxyserver()
with taddons.context(ps) as tctx:
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, body_size_limit="invalid")
tctx.configure(ps, body_size_limit="1m")
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, stream_large_bodies="invalid")
tctx.configure(ps, stream_large_bodies="1m")
| 35.895288
| 111
| 0.622666
| 883
| 6,856
| 4.744054
| 0.235561
| 0.008594
| 0.006445
| 0.008594
| 0.499881
| 0.487945
| 0.412986
| 0.380998
| 0.318453
| 0.318453
| 0
| 0.01603
| 0.262981
| 6,856
| 190
| 112
| 36.084211
| 0.812982
| 0.024796
| 0
| 0.411765
| 0
| 0.013072
| 0.125671
| 0
| 0
| 0
| 0
| 0
| 0.104575
| 1
| 0.039216
| false
| 0.006536
| 0.065359
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be64e074af6729b6171d5eed328bc46d2d983abb
| 19,608
|
py
|
Python
|
tensorflow_probability/python/distributions/masked.py
|
mederrata/probability
|
bc6c411b0fbd83141f303f91a27343fe3c43a797
|
[
"Apache-2.0"
] | 1
|
2022-03-22T11:56:31.000Z
|
2022-03-22T11:56:31.000Z
|
tensorflow_probability/python/distributions/masked.py
|
robot0102/probability
|
89d248c420b8ecabfd9d6de4a1aa8d3886920049
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/distributions/masked.py
|
robot0102/probability
|
89d248c420b8ecabfd9d6de4a1aa8d3886920049
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The MaskedIndependent distribution class."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector as bijector_lib
from tensorflow_probability.python.distributions import batch_broadcast
from tensorflow_probability.python.distributions import distribution as distribution_lib
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.distributions import log_prob_ratio
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
def _add_event_dims_to_mask(validity_mask, *, dist=None, event_ndims=None):
validity_mask = tf.convert_to_tensor(validity_mask)
if event_ndims is None:
event_ndims = ps.rank_from_shape(dist.event_shape_tensor())
return tf.reshape(
validity_mask,
ps.concat([
ps.shape(validity_mask),
ps.ones(event_ndims, dtype=tf.int32)
], axis=0))
def _make_masked_fn(fn_name, n_event_shapes, safe_value,
make_arg0_safe=False):
"""Implements functions like mean, variance, etc.
Args:
fn_name: Name of the method called on the underlying distribution.
n_event_shapes: Number of event shape repeats in the shape of the underlying
function's output.
safe_value: The value to be placed in invalid locations. May be
`'safe_sample'` to specify we should use the "safe sample" value.
make_arg0_safe: If `True`, we will apply `self.safe_sample_fn` to ensure the
argument passed into the underlying routine is a "safe" sample.
Returns:
fn: Callable implementing the given function.
"""
def fn(self, *args, **kwargs):
if safe_value == 'safe_sample' or make_arg0_safe: # Only if needed.
safe_val = tf.stop_gradient(self.safe_sample_fn(self.distribution))
validity_mask = tf.convert_to_tensor(self.validity_mask)
if make_arg0_safe:
x = args[0]
safe_x = tf.where(
_add_event_dims_to_mask(validity_mask, dist=self), x, safe_val)
args = (safe_x,) + args[1:]
val = getattr(self.distribution, fn_name)(*args, **kwargs)
if n_event_shapes:
validity_mask = tf.reshape(
validity_mask,
ps.concat(
[ps.shape(validity_mask)] +
[ps.ones_like(self.event_shape_tensor())] * n_event_shapes,
axis=0))
if safe_value == 'safe_sample':
sentinel = tf.cast(safe_val, val.dtype)
else:
sentinel = tf.cast(safe_value, val.dtype)
return tf.where(validity_mask, val, sentinel)
fn.__name__ = f'_{fn_name}'
return fn
def _fixed_sample(d):
return d.sample(seed=samplers.zeros_seed())
class _Masked(distribution_lib.Distribution):
"""A distribution that masks invalid underlying distributions.
Sometimes we may want a way of masking out a subset of distributions. Perhaps
we have labels for only a subset of batch members and want to evaluate a
log_prob. Or we may want to encode a sparse random variable as a dense
random variable with a mask applied. In single-program/multiple-data regimes,
it can be necessary to pad Distributions and the samples thereof to a given
size in order to achieve the "single-program" desideratum.
When computing a probability density in this regime, we would like to mask out
the contributions of invalid batch members. We may also want to ensure that
the values being sampled are valid parameters for descendant distributions in
a hierarchical model, even if they are ultimately masked out. This
distribution answers those requirements. Specifically, for invalid batch
elements:
- `log_prob(x) == 0.` for all `x`, with no gradients back to `x`, nor any
gradients to the parameters of `distribution`.
- `sample() == tf.stop_gradient(safe_value_fn(distribution))`, with no
gradients back to the parameters of `distribution`.
The distribution accepts a mask specified by `validity_mask`, a boolean tensor
broadcastable with the underlying distribution's batch shape which specifies
for each batch element whether or not it is valid.
Entries in `validity_mask` which are `False` denote missing distributions,
which means that the corresponding entries in the measures (e.g. `prob`)
and statistics (e.g. `mean`) must not be treated as coming from some real
distribution. Whenever doing a reduction across those quantites, make sure to
either mask out the invalid entries or make sure the returned value
corresponds to the identity element of the reduction. For a couple examples:
- OK: `reduce_sum(masked_dist.log_prob(x))`
- OK: `tfd.Independent(masked_dist, ...)`
- Not OK: `reduce_var(masked_dist.mean())` will underestimate the variance
because it uses too large an `N`.
- Not OK: `tf.linalg.cholesky(masked_dist.covariance())` will fail for invalid
batch elements.
The default `safe_value_fn` is to draw a fixed-seeded sample from the
underlying `distribution`. Since this may be expensive, it is suggested to
specify a computationally cheaper method. Some options might include:
- `tfd.Distribution.mode`
- `tfd.Distribution.mean`
- `lambda d: d.quantile(.5)` (median)
- `lambda _: 0.` (if zero is always in the support of d)
- `lambda d: d.experimental_default_event_space_bijector()(0.)`
Besides the output of `sample`, results from `safe_value_fn` may also appear
in (invalid batch members of) `masked.default_event_space_bijector().forward`.
#### Examples
```
# Use tf.sequence_mask for `range(n) < num_valid`.
num_valid = 3
num_entries = 4
d = tfd.Masked(
tfd.MultivariateNormalDiag(tf.zeros([2, num_entries, 5]), tf.ones([5])),
tf.sequence_mask(num_valid, num_entries))
d.batch_shape # [2, 4]
d.event_shape # [5]
d.log_prob(tf.zeros([5])) # shape [2, 4]
# => [[nonzero, nonzero, nonzero, 0.],
# [nonzero, nonzero, nonzero, 0.]]
# Explicitly denote which elements are valid, adding a new batch dim of 2.
d = tfd.Masked(tfd.MultivariateNormalDiag(tf.zeros([4, 5]), tf.ones([5])),
[[False], [True]])
d.batch_shape # [2, 4]
d.event_shape # [5]
d.log_prob(tf.zeros([5])) # shape [2, 4]
# => [[0., 0., 0., 0.],
# [nonzero, nonzero, nonzero, nonzero]]
# Use `BatchBroadcast` and `Independent` to achieve the equivalent of adding
# positional mask functionality to `tfd.Sample`.
# Suppose we wanted to achieve this:
# `tfd.Sample(tfd.Normal(tf.zeros(2), 1), [3, 4], validity_mask=mask)`
# We can write:
d = tfd.Independent(
tfd.Masked(tfd.BatchBroadcast(tfd.Normal(0, 1), [2, 3, 4]), mask),
reinterpreted_batch_ndims=2)
d.batch_shape # [2]
d.event_shape # [3, 4]
d.log_prob(tf.ones([3, 4])) # shape [2]
```
"""
def __init__(self,
distribution,
validity_mask,
safe_sample_fn=_fixed_sample,
validate_args=False,
allow_nan_stats=True,
name=None):
"""Constructs a Masked distribution.
Args:
distribution: The underlying distribution, which will be masked.
validity_mask: Boolean mask where `True` indicates an element is valid.
`validity_mask` must broadcast with the batch shape of the underlying
distribution. Invalid batch elements are masked so that sampling returns
`safe_sample_fn(dist)` in invalid positions and `log_prob(x)` returns
`0.` for invalid positions.
safe_sample_fn: A callable which takes a distribution (namely,
the `distribution` argument) and returns a determinstic, safe sample
value. This helps to avoid `nan` gradients and allows downstream usage
of samples from a `Masked` distribution to assume a "safe" even if
invalid value. (Be careful to ensure that such downstream usages are
themselves masked!) Note that the result of this function will be
wrapped in a `tf.stop_gradient` call.
validate_args: Boolean indicating whether argument assertions should be
run. May impose performance penalties.
allow_nan_stats: Boolean indicating whether statistical functions may
return `nan`, or should instead use asserts where possible.
name: Optional name for operation scoping.
"""
parameters = dict(locals())
with tf.name_scope(name or f'Masked{distribution.name}') as name:
self._distribution = distribution
self._validity_mask = tensor_util.convert_nonref_to_tensor(
validity_mask, dtype_hint=tf.bool)
self._safe_sample_fn = safe_sample_fn
super(_Masked, self).__init__(
dtype=distribution.dtype,
reparameterization_type=distribution.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(
distribution=parameter_properties.BatchedComponentProperties(),
validity_mask=parameter_properties.ParameterProperties(
shape_fn=parameter_properties.SHAPE_FN_NOT_IMPLEMENTED))
@property
def distribution(self):
return self._distribution
@property
def validity_mask(self):
return self._validity_mask
@property
def safe_sample_fn(self):
return self._safe_sample_fn
@property
def experimental_is_sharded(self):
return self.distribution.experimental_is_sharded
def _event_shape(self):
return self.distribution.event_shape
def _event_shape_tensor(self):
return self.distribution.event_shape_tensor()
def _sample_n(self, n, seed=None, **kwargs):
validity_mask = tf.convert_to_tensor(self.validity_mask)
# To avoid the shape gymnastics of drawing extra samples, we delegate
# sampling to the BatchBroadcast distribution.
bb = batch_broadcast.BatchBroadcast(self.distribution,
ps.shape(validity_mask))
samples = bb.sample(n, seed=seed, **kwargs)
safe_val = tf.stop_gradient(self.safe_sample_fn(self.distribution))
return tf.where(_add_event_dims_to_mask(validity_mask, dist=self),
samples, safe_val)
_log_prob = _make_masked_fn(
'log_prob', n_event_shapes=0, safe_value=0., make_arg0_safe=True)
_prob = _make_masked_fn(
'prob', n_event_shapes=0, safe_value=1., make_arg0_safe=True)
_log_cdf = _make_masked_fn(
'log_cdf', n_event_shapes=0, safe_value=0., make_arg0_safe=True)
_cdf = _make_masked_fn(
'cdf', n_event_shapes=0, safe_value=1., make_arg0_safe=True)
_log_survival_function = _make_masked_fn(
'log_survival_function', n_event_shapes=0, safe_value=-float('inf'),
make_arg0_safe=True)
_survival_function = _make_masked_fn(
'survival_function', n_event_shapes=0, safe_value=0.,
make_arg0_safe=True)
_entropy = _make_masked_fn(
'entropy', n_event_shapes=0, safe_value=0.)
_mode = _make_masked_fn(
'mode', n_event_shapes=1, safe_value='safe_sample')
_mean = _make_masked_fn(
'mean', n_event_shapes=1, safe_value='safe_sample')
_variance = _make_masked_fn(
'variance', n_event_shapes=1, safe_value=0.)
_stddev = _make_masked_fn(
'stddev', n_event_shapes=1, safe_value=0.)
_covariance = _make_masked_fn(
'covariance', n_event_shapes=2, safe_value=0.)
_quantile = _make_masked_fn(
'quantile', n_event_shapes=1, safe_value='safe_sample')
def _default_event_space_bijector(self, *args, **kwargs):
underlying_bijector = (
self.distribution.experimental_default_event_space_bijector())
if underlying_bijector is None:
return None
return _MaskedBijector(self, underlying_bijector)
class Masked(_Masked, distribution_lib.AutoCompositeTensorDistribution):
def __new__(cls, *args, **kwargs):
"""Maybe return a non-`CompositeTensor` `_Masked`."""
if cls is Masked:
if args:
distribution = args[0]
else:
distribution = kwargs.get('distribution')
if not isinstance(distribution, tf.__internal__.CompositeTensor):
return _Masked(*args, **kwargs)
return super(Masked, cls).__new__(cls)
Masked.__doc__ = _Masked.__doc__ + '\n' + (
'If `distribution` is a `CompositeTensor`, then the resulting `Masked` '
'instance is a `CompositeTensor` as well. Otherwise, a '
'non-`CompositeTensor` `_Masked` instance is created instead. Distribution '
'subclasses that inherit from `Masked` will also inherit from '
'`CompositeTensor`.')
@kullback_leibler.RegisterKL(_Masked, _Masked)
def _kl_masked_masked(a, b, name=None):
"""KL divergence between Masked distributions."""
with tf.name_scope(name or 'kl_masked_masked'):
a_valid = tf.convert_to_tensor(a.validity_mask)
b_valid = tf.convert_to_tensor(b.validity_mask)
underlying_kl = kullback_leibler.kl_divergence(
a.distribution, b.distribution)
# The treatment for KL is as follows:
# When both random variables are valid, the underlying KL applies.
# When neither random variable is valid, the KL is 0., i.e.
# `a log a - a log b = 0` because log a and log b are everywhere 0.
# When exactly one is valid, we (a) raise an assertion error, if either
# distribution's allow_nan_stats is set to False, or (b) return nan in
# such positions.
asserts = []
if not (a.allow_nan_stats and b.allow_nan_stats):
asserts.append(assert_util.assert_equal(
a_valid, b_valid,
message='KL is only valid for matching mask values'))
with tf.control_dependencies(asserts):
both_valid = (a_valid & b_valid)
neither_valid = (~a_valid) & (~b_valid)
dtype = underlying_kl.dtype
return tf.where(both_valid, underlying_kl,
tf.where(neither_valid,
tf.zeros([], dtype), float('nan')))
@log_prob_ratio.RegisterLogProbRatio(_Masked)
def _masked_log_prob_ratio(p, x, q, y, name=None):
"""Computes log p(x) - log q(y) for Masked p, q."""
with tf.name_scope(name or 'masked_log_prob_ratio'):
p_valid = tf.convert_to_tensor(p.validity_mask)
safe_x = tf.where(_add_event_dims_to_mask(p_valid, dist=p),
x, tf.stop_gradient(p.safe_sample_fn(p.distribution)))
q_valid = tf.convert_to_tensor(q.validity_mask)
safe_y = tf.where(_add_event_dims_to_mask(q_valid, dist=q),
y, tf.stop_gradient(q.safe_sample_fn(q.distribution)))
underlying = log_prob_ratio.log_prob_ratio(
p.distribution, safe_x, q.distribution, safe_y)
asserts = []
# As with KL, we return the underlying log_prob_ratio where both are valid,
# `0.` where neither is valid, and `nan` otherwise (or an assertion if
# either distribution does not `allow_nan_stats`).
if not (p.allow_nan_stats and p.allow_nan_stats):
asserts.append(assert_util.assert_equal(
p_valid, q_valid,
message='Masked log_prob_ratio only valid for matching mask values'))
with tf.control_dependencies(asserts):
both_valid = (p_valid & q_valid)
neither_valid = (~p_valid) & (~q_valid)
return tf.where(both_valid, underlying,
tf.where(neither_valid,
tf.zeros([], dtype=underlying.dtype),
float('nan')))
class _NonCompositeTensorMaskedBijector(bijector_lib.Bijector):
"""Event space bijector for Masked distributions."""
def __init__(self, masked, underlying_bijector):
self._masked = masked
self._bijector = underlying_bijector
super(_NonCompositeTensorMaskedBijector, self).__init__(
validate_args=underlying_bijector.validate_args,
dtype=underlying_bijector.dtype,
forward_min_event_ndims=underlying_bijector.forward_min_event_ndims,
inverse_min_event_ndims=underlying_bijector.inverse_min_event_ndims)
def _forward_event_shape(self, x):
return self._bijector.forward_event_shape(x)
def _forward_event_shape_tensor(self, x):
return self._bijector.forward_event_shape_tensor(x)
def _inverse_event_shape(self, y):
return self._bijector.inverse_event_shape(y)
def _inverse_event_shape_tensor(self, y):
return self._bijector.inverse_event_shape_tensor(y)
def _make_safe_x(self, x, validity_mask):
bij = self._bijector
masked = self._masked
pullback_event_ndims = ps.rank_from_shape(
lambda: bij.inverse_event_shape_tensor(masked.event_shape_tensor()),
self._bijector.inverse_event_shape(masked.event_shape))
pullback_event_mask = _add_event_dims_to_mask(
validity_mask, event_ndims=pullback_event_ndims)
# We presume that 0 in unconstrained space is safe.
return tf.where(pullback_event_mask, x, 0.)
def _forward(self, x):
mask = self._masked.validity_mask
safe_x = self._make_safe_x(x, mask)
return self._make_safe_y(self._bijector.forward(safe_x), mask)
def _forward_log_det_jacobian(self, x):
validity_mask = tf.convert_to_tensor(self._masked.validity_mask)
safe_x = self._make_safe_x(x, validity_mask)
return tf.where(validity_mask,
self._bijector.forward_log_det_jacobian(safe_x),
0.)
def _make_safe_y(self, y, validity_mask):
safe_val = tf.stop_gradient(
self._masked.safe_sample_fn(self._masked.distribution))
event_mask = _add_event_dims_to_mask(validity_mask, dist=self._masked)
return tf.where(event_mask, y, safe_val)
def _inverse(self, y):
safe_y = self._make_safe_y(y, self._masked.validity_mask)
return self._bijector.inverse(safe_y)
def _inverse_log_det_jacobian(self, y):
validity_mask = tf.convert_to_tensor(self._masked.validity_mask)
safe_y = self._make_safe_y(y, validity_mask)
return tf.where(validity_mask,
self._bijector.inverse_log_det_jacobian(safe_y),
0.)
class _MaskedBijector(_NonCompositeTensorMaskedBijector,
bijector_lib.AutoCompositeTensorBijector):
"""Event space bijector for Masked distributions."""
def __new__(cls, *args, **kwargs):
"""Maybe return a `_NonCompositeTensorMaskedBijector`."""
if cls is _MaskedBijector:
if args:
masked = args[0]
else:
masked = kwargs.get('masked')
if len(args) > 1:
bijector = args[1]
else:
bijector = kwargs.get('underlying_bijector')
if not (isinstance(masked, tf.__internal__.CompositeTensor)
and isinstance(bijector, tf.__internal__.CompositeTensor)):
return _NonCompositeTensorMaskedBijector(*args, **kwargs)
return super(_MaskedBijector, cls).__new__(cls)
| 41.719149
| 88
| 0.708588
| 2,680
| 19,608
| 4.916791
| 0.172015
| 0.040981
| 0.015482
| 0.023526
| 0.257494
| 0.214237
| 0.166578
| 0.130379
| 0.099567
| 0.068984
| 0
| 0.006794
| 0.196807
| 19,608
| 469
| 89
| 41.808102
| 0.829894
| 0.375867
| 0
| 0.135659
| 0
| 0
| 0.055192
| 0.007392
| 0
| 0
| 0
| 0
| 0.027132
| 1
| 0.112403
| false
| 0
| 0.042636
| 0.046512
| 0.337209
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be665e63998c0015bc21386a7c5b3385196a6cfb
| 5,403
|
py
|
Python
|
heuristic/improvement/reopt/disruption_updater.py
|
annalunde/master
|
2552d43713e8ebca0b0e57bc5bebd1eaeeac1875
|
[
"MIT"
] | 1
|
2022-03-17T15:40:00.000Z
|
2022-03-17T15:40:00.000Z
|
heuristic/improvement/reopt/disruption_updater.py
|
annalunde/master
|
2552d43713e8ebca0b0e57bc5bebd1eaeeac1875
|
[
"MIT"
] | null | null | null |
heuristic/improvement/reopt/disruption_updater.py
|
annalunde/master
|
2552d43713e8ebca0b0e57bc5bebd1eaeeac1875
|
[
"MIT"
] | null | null | null |
import copy
import pandas as pd
from decouple import config
from heuristic.construction.construction import ConstructionHeuristic
from config.construction_config import *
from simulation.simulator import Simulator
from heuristic.improvement.reopt.new_request_updater import NewRequestUpdater
class DisruptionUpdater:
def __init__(self, new_request_updater):
self.new_request_updater = new_request_updater
def update_route_plan(self, current_route_plan, disruption_type, disruption_info, sim_clock):
# adding current position for each vehicle
vehicle_clocks, artificial_depot = self.update_vehicle_clocks(
current_route_plan, sim_clock, disruption_type, disruption_info)
updated_route_plan = copy.deepcopy(current_route_plan)
if disruption_type == 'request':
self.new_request_updater.set_parameters(disruption_info)
elif disruption_type == 'delay':
updated_route_plan = self.update_with_delay(
current_route_plan, disruption_info)
elif disruption_type == 'cancel':
# update capacities
updated_vehicle_route = self.update_capacities(
updated_route_plan[disruption_info[0]], disruption_info[1], disruption_info[2],
updated_route_plan[disruption_info[0]][disruption_info[1]][5])
updated_route_plan[disruption_info[0]] = updated_vehicle_route
if artificial_depot:
# remove dropoff node
del updated_route_plan[disruption_info[0]][disruption_info[2]]
else:
# remove dropoff node
del updated_route_plan[disruption_info[0]][disruption_info[2]]
# remove pickup node
del updated_route_plan[disruption_info[0]][disruption_info[1]]
else:
# no show
# update capacities
updated_vehicle_route = self.update_capacities(
updated_route_plan[disruption_info[0]], disruption_info[1], disruption_info[2],
updated_route_plan[disruption_info[0]][disruption_info[1]][5])
updated_route_plan[disruption_info[0]] = updated_vehicle_route
# remove dropoff node
del updated_route_plan[disruption_info[0]][disruption_info[2]]
return updated_route_plan, vehicle_clocks
def update_with_delay(self, current_route_plan, disruption_info):
delay_duration = disruption_info[2]
route_plan = copy.deepcopy(current_route_plan)
start_idx = disruption_info[1]
for node in route_plan[disruption_info[0]][disruption_info[1]:]:
t = node[1] + delay_duration
d = node[2] + delay_duration
node = (node[0], t, d, node[3], node[4], node[5])
route_plan[disruption_info[0]][start_idx] = node
start_idx += 1
return route_plan
@staticmethod
def recalibrate_solution(current_route_plan, disruption_info, still_delayed_nodes):
delay_duration = disruption_info[2]
route_plan = copy.deepcopy(current_route_plan)
for node in still_delayed_nodes:
idx = next(i for i, (node_test, *_)
in enumerate(route_plan[disruption_info[0]]) if node_test == node)
node_route = route_plan[disruption_info[0]][idx]
d = node_route[2] - delay_duration
node_route = (node_route[0], node_route[1], d,
node_route[3], node_route[4], node_route[5])
route_plan[disruption_info[0]][idx] = node_route
return route_plan
def update_vehicle_clocks(self, current_route_plan, sim_clock, disruption_type, disruption_info):
artificial_depot = False
# find index for next node after sim_clock and corresponding time of service
vehicle_clocks = []
for vehicle_route in current_route_plan:
if len(vehicle_route) > 1:
if vehicle_route[0][1] < sim_clock:
prev_idx = 0
for idx, (node, time, deviation, passenger, wheelchair, _) in enumerate(vehicle_route):
if time <= sim_clock:
prev_idx = idx
if prev_idx == len(vehicle_route) - 1:
vehicle_clocks.append(sim_clock)
else:
next_idx = prev_idx + 1
vehicle_clocks.append(vehicle_route[next_idx][1])
if disruption_type == 'cancel':
# check whether next node after sim_clock is the request that is cancelled
if current_route_plan[disruption_info[0]][disruption_info[1]] == vehicle_route[next_idx]:
artificial_depot = True
else:
vehicle_clocks.append(sim_clock)
else:
vehicle_clocks.append(sim_clock)
return vehicle_clocks, artificial_depot
def update_capacities(self, vehicle_route, start_id, dropoff_id, request):
idx = start_id
for n, t, d, p, w, _ in vehicle_route[start_id:dropoff_id]:
p -= request["Number of Passengers"]
w -= request["Wheelchair"]
vehicle_route[idx] = (n, t, d, p, w, _)
idx += 1
return vehicle_route
| 40.931818
| 117
| 0.626504
| 631
| 5,403
| 5.026941
| 0.174326
| 0.167718
| 0.119798
| 0.137768
| 0.476671
| 0.395649
| 0.323455
| 0.311791
| 0.287201
| 0.254098
| 0
| 0.014214
| 0.296872
| 5,403
| 131
| 118
| 41.244275
| 0.820742
| 0.057561
| 0
| 0.274725
| 0
| 0
| 0.01063
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065934
| false
| 0.021978
| 0.076923
| 0
| 0.208791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be6ac11cc08ea3cf2a70097fa4537b051b80fea9
| 834
|
py
|
Python
|
tests/test_pyqrcodeng_issue13.py
|
dbajar/segno
|
f7d5669537b12d3ebb914ae6d0a0a1e14f8d25f5
|
[
"BSD-3-Clause"
] | 254
|
2016-09-25T21:32:00.000Z
|
2022-03-30T09:56:14.000Z
|
tests/test_pyqrcodeng_issue13.py
|
dbajar/segno
|
f7d5669537b12d3ebb914ae6d0a0a1e14f8d25f5
|
[
"BSD-3-Clause"
] | 102
|
2016-08-04T12:18:44.000Z
|
2022-03-23T09:09:51.000Z
|
tests/test_pyqrcodeng_issue13.py
|
dbajar/segno
|
f7d5669537b12d3ebb914ae6d0a0a1e14f8d25f5
|
[
"BSD-3-Clause"
] | 34
|
2016-09-25T21:34:42.000Z
|
2022-03-30T08:19:03.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2020 -- Lars Heuer
# All rights reserved.
#
# License: BSD License
#
"""\
Test against issue <https://github.com/pyqrcode/pyqrcodeNG/pull/13/>.
The initial test was created by Mathieu <https://github.com/albatros69>,
see the above mentioned pull request.
Adapted for Segno to check if it suffers from the same problem.
"""
from __future__ import absolute_import, unicode_literals
import segno
def test_autodetect():
data = 'Émetteur'
qr = segno.make(data)
assert qr.mode == 'byte'
def test_encoding():
encoding = 'iso-8859-15'
data = 'Émetteur'
qr = segno.make(data.encode(encoding))
assert qr.mode == 'byte'
qr2 = segno.make(data, encoding=encoding)
assert qr2 == qr
if __name__ == '__main__':
import pytest
pytest.main([__file__])
| 21.947368
| 72
| 0.681055
| 112
| 834
| 4.892857
| 0.625
| 0.04927
| 0.071168
| 0.069343
| 0.09854
| 0.09854
| 0
| 0
| 0
| 0
| 0
| 0.031065
| 0.189448
| 834
| 37
| 73
| 22.540541
| 0.779586
| 0.420863
| 0
| 0.25
| 0
| 0
| 0.09188
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.125
| false
| 0
| 0.1875
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be6f25ab250ddab2ab944a4c759bdf74b87010ce
| 12,251
|
py
|
Python
|
usaspending_api/download/lookups.py
|
lenjonemcse/usaspending-api
|
cbffc4e0a0c2b1339c7a8bfe6b0d687b3731b6ce
|
[
"CC0-1.0"
] | 1
|
2022-01-28T16:08:04.000Z
|
2022-01-28T16:08:04.000Z
|
usaspending_api/download/lookups.py
|
lenjonemcse/usaspending-api
|
cbffc4e0a0c2b1339c7a8bfe6b0d687b3731b6ce
|
[
"CC0-1.0"
] | null | null | null |
usaspending_api/download/lookups.py
|
lenjonemcse/usaspending-api
|
cbffc4e0a0c2b1339c7a8bfe6b0d687b3731b6ce
|
[
"CC0-1.0"
] | null | null | null |
"""
This file defines a series of constants that represent the values used in
the API's "helper" tables.
Rather than define the values in the db setup scripts and then make db calls to
lookup the surrogate keys, we'll define everything here, in a file that can be
used by the db setup scripts *and* the application code.
"""
from collections import namedtuple, OrderedDict
from usaspending_api.accounts.models import AppropriationAccountBalances
from usaspending_api.accounts.v2.filters.account_download import account_download_filter
from usaspending_api.awards.models import Award, TransactionNormalized
from usaspending_api.awards.models import FinancialAccountsByAwards
from usaspending_api.download.helpers.elasticsearch_download_functions import (
AwardsElasticsearchDownload,
TransactionsElasticsearchDownload,
)
from usaspending_api.download.helpers.disaster_filter_functions import disaster_filter_function
from usaspending_api.search.models import AwardSearchView, TransactionSearch, SubawardView
from usaspending_api.awards.v2.filters.idv_filters import (
idv_order_filter,
idv_transaction_filter,
idv_treasury_account_funding_filter,
)
from usaspending_api.awards.v2.filters.award_filters import (
awards_transaction_filter,
awards_subaward_filter,
awards_treasury_account_funding_filter,
)
from usaspending_api.awards.v2.filters.search import (
universal_award_matview_filter,
transaction_search_filter,
)
from usaspending_api.awards.v2.filters.sub_award import subaward_download
from usaspending_api.financial_activities.models import FinancialAccountsByProgramActivityObjectClass
from usaspending_api.download.helpers.download_annotation_functions import (
transaction_search_annotations,
universal_award_matview_annotations,
subaward_annotations,
idv_order_annotations,
idv_transaction_annotations,
)
LookupType = namedtuple("LookupType", ["id", "name", "desc"])
JOB_STATUS = [
LookupType(1, "ready", "job is ready to be run"),
LookupType(2, "running", "job is currently in progress"),
LookupType(3, "finished", "job is complete"),
LookupType(4, "failed", "job failed to complete"),
LookupType(5, "queued", "job sent to queue for async processing"),
LookupType(6, "resumed", "job is being reprocessed after a failure"),
LookupType(7, "created", "job product has been created and stored locally"),
LookupType(8, "uploading", "job is being uploaded to public storage"),
]
JOB_STATUS_DICT = {item.name: item.id for item in JOB_STATUS}
VALUE_MAPPINGS = {
# Award Level
"awards": {
"source_type": "award",
"table": AwardSearchView,
"table_name": "award",
"type_name": "PrimeAwardSummaries",
"download_name": "{agency}{type}_PrimeAwardSummaries_{timestamp}",
"contract_data": "award__latest_transaction__contract_data",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": universal_award_matview_filter,
"annotations_function": universal_award_matview_annotations,
},
# Elasticsearch Award Level
"elasticsearch_awards": {
"source_type": "award",
"table": AwardSearchView,
"table_name": "award",
"type_name": "PrimeAwardSummaries",
"download_name": "{agency}{type}_PrimeAwardSummaries_{timestamp}",
"contract_data": "award__latest_transaction__contract_data",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": AwardsElasticsearchDownload.query,
"annotations_function": universal_award_matview_annotations,
},
# Transaction Level
"transactions": {
"source_type": "award",
"table": TransactionSearch,
"table_name": "transaction",
"type_name": "PrimeTransactions",
"download_name": "{agency}{type}_PrimeTransactions_{timestamp}",
"contract_data": "transaction__contract_data",
"assistance_data": "transaction__assistance_data",
"filter_function": transaction_search_filter,
"annotations_function": transaction_search_annotations,
},
# Elasticsearch Transaction Level
"elasticsearch_transactions": {
"source_type": "award",
"table": TransactionSearch,
"table_name": "transaction",
"type_name": "PrimeTransactions",
"download_name": "{agency}{type}_PrimeTransactions_{timestamp}",
"contract_data": "transaction__contract_data",
"assistance_data": "transaction__assistance_data",
"filter_function": TransactionsElasticsearchDownload.query,
"annotations_function": transaction_search_annotations,
},
# SubAward Level
"sub_awards": {
"source_type": "award",
"table": SubawardView,
"table_name": "subaward",
"type_name": "Subawards",
"download_name": "{agency}{type}_Subawards_{timestamp}",
"contract_data": "award__latest_transaction__contract_data",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": subaward_download,
"annotations_function": subaward_annotations,
},
# Appropriations Account Data
"account_balances": {
"source_type": "account",
"table": AppropriationAccountBalances,
"table_name": "account_balances",
"download_name": "{data_quarters}_{agency}_{level}_AccountBalances_{timestamp}",
"zipfile_template": "{data_quarters}_{agency}_{level}_AccountBalances_{timestamp}",
"filter_function": account_download_filter,
},
# Object Class Program Activity Account Data
"object_class_program_activity": {
"source_type": "account",
"table": FinancialAccountsByProgramActivityObjectClass,
"table_name": "object_class_program_activity",
"download_name": "{data_quarters}_{agency}_{level}_AccountBreakdownByPA-OC_{timestamp}",
"zipfile_template": "{data_quarters}_{agency}_{level}_AccountBreakdownByPA-OC_{timestamp}",
"filter_function": account_download_filter,
},
"award_financial": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "{data_quarters}_{agency}_{level}_AccountBreakdownByAward_{timestamp}",
"zipfile_template": "{data_quarters}_{agency}_{level}_AccountBreakdownByAward_{timestamp}",
"filter_function": account_download_filter,
},
"idv_orders": {
"source_type": "award",
"table": Award,
"table_name": "idv_orders",
"download_name": "IDV_{piid}_Orders",
"contract_data": "latest_transaction__contract_data",
"filter_function": idv_order_filter,
"is_for_idv": True,
"annotations_function": idv_order_annotations,
},
"idv_federal_account_funding": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "IDV_{piid}_FederalAccountFunding",
"filter_function": idv_treasury_account_funding_filter,
"is_for_idv": True,
},
"idv_transaction_history": {
"source_type": "award",
"table": TransactionNormalized,
"table_name": "idv_transaction_history",
"download_name": "IDV_{piid}_TransactionHistory",
"contract_data": "contract_data",
"filter_function": idv_transaction_filter,
"is_for_idv": True,
"annotations_function": idv_transaction_annotations,
},
"contract_federal_account_funding": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "Contract_{piid}_FederalAccountFunding",
"filter_function": awards_treasury_account_funding_filter,
"is_for_contract": True,
},
"assistance_federal_account_funding": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "Assistance_{assistance_id}_FederalAccountFunding",
"filter_function": awards_treasury_account_funding_filter,
"is_for_assistance": True,
},
"sub_contracts": {
"source_type": "award",
"table": SubawardView,
"table_name": "subaward",
"download_name": "Contract_{piid}_Sub-Awards",
"contract_data": "award__latest_transaction__contract_data",
"filter_function": awards_subaward_filter,
"is_for_contract": True,
"annotations_function": subaward_annotations,
},
"sub_grants": {
"source_type": "award",
"table": SubawardView,
"table_name": "subaward",
"download_name": "Assistance_{assistance_id}_Sub-Awards",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": awards_subaward_filter,
"is_for_assistance": True,
"annotations_function": subaward_annotations,
},
"contract_transactions": {
"source_type": "award",
"table": TransactionNormalized,
"table_name": "idv_transaction_history",
"download_name": "Contract_{piid}_TransactionHistory",
"contract_data": "contract_data",
"filter_function": awards_transaction_filter,
"is_for_contract": True,
"annotations_function": idv_transaction_annotations,
},
"assistance_transactions": {
"source_type": "award",
"table": TransactionNormalized,
"table_name": "assistance_transaction_history",
"download_name": "Assistance_{assistance_id}_TransactionHistory",
"assistance_data": "assistance_data",
"filter_function": awards_transaction_filter,
"is_for_assistance": True,
"annotations_function": idv_transaction_annotations,
},
"disaster_recipient": {
"source_type": "disaster",
"table": AwardSearchView,
"table_name": "recipient",
"download_name": "COVID-19_Recipients_{award_category}_{timestamp}",
"filter_function": disaster_filter_function,
"base_fields": ["recipient_name", "recipient_unique_id"],
},
}
# Bulk Download still uses "prime awards" instead of "transactions"
VALUE_MAPPINGS["prime_awards"] = VALUE_MAPPINGS["transactions"]
# List of CFO CGACS for list agencies viewset in the correct order, names included for reference
# TODO: Find a solution that marks the CFO agencies in the database AND have the correct order
CFO_CGACS_MAPPING = OrderedDict(
[
("012", "Department of Agriculture"),
("013", "Department of Commerce"),
("097", "Department of Defense"),
("091", "Department of Education"),
("089", "Department of Energy"),
("075", "Department of Health and Human Services"),
("070", "Department of Homeland Security"),
("086", "Department of Housing and Urban Development"),
("015", "Department of Justice"),
("1601", "Department of Labor"),
("019", "Department of State"),
("014", "Department of the Interior"),
("020", "Department of the Treasury"),
("069", "Department of Transportation"),
("036", "Department of Veterans Affairs"),
("068", "Environmental Protection Agency"),
("047", "General Services Administration"),
("080", "National Aeronautics and Space Administration"),
("049", "National Science Foundation"),
("031", "Nuclear Regulatory Commission"),
("024", "Office of Personnel Management"),
("073", "Small Business Administration"),
("028", "Social Security Administration"),
("072", "Agency for International Development"),
]
)
CFO_CGACS = list(CFO_CGACS_MAPPING.keys())
FILE_FORMATS = {
"csv": {"delimiter": ",", "extension": "csv", "options": "WITH CSV HEADER"},
"tsv": {"delimiter": "\t", "extension": "tsv", "options": r"WITH CSV DELIMITER E'\t' HEADER"},
"pstxt": {"delimiter": "|", "extension": "txt", "options": "WITH CSV DELIMITER '|' HEADER"},
}
VALID_ACCOUNT_SUBMISSION_TYPES = ("account_balances", "object_class_program_activity", "award_financial")
| 42.835664
| 105
| 0.691862
| 1,196
| 12,251
| 6.717391
| 0.231605
| 0.034852
| 0.029126
| 0.027384
| 0.499502
| 0.446975
| 0.369305
| 0.316281
| 0.258402
| 0.250436
| 0
| 0.008891
| 0.192066
| 12,251
| 285
| 106
| 42.985965
| 0.802788
| 0.060893
| 0
| 0.372549
| 0
| 0
| 0.459001
| 0.153377
| 0
| 0
| 0
| 0.003509
| 0
| 1
| 0
| false
| 0
| 0.054902
| 0
| 0.054902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be7401e08d215565703c4b9fa33b7d5e7ca05a69
| 8,827
|
py
|
Python
|
src/evaluation_utils.py
|
philipp-hess/deep-learning-for-heavy-rainfall
|
dbec03245dd8db0c5f2f53af014b8dd8d80f245c
|
[
"MIT"
] | null | null | null |
src/evaluation_utils.py
|
philipp-hess/deep-learning-for-heavy-rainfall
|
dbec03245dd8db0c5f2f53af014b8dd8d80f245c
|
[
"MIT"
] | null | null | null |
src/evaluation_utils.py
|
philipp-hess/deep-learning-for-heavy-rainfall
|
dbec03245dd8db0c5f2f53af014b8dd8d80f245c
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from scipy.stats import spearmanr
from sklearn.metrics import f1_score, precision_score, recall_score
from IPython.display import display, clear_output
from sklearn.metrics import confusion_matrix
import scipy.stats as st
def continuous_to_categorical_with_quantiles(data: np.ndarray, quantiles:list ) -> np.ndarray:
""" Converts continuous data into binar classes using quantiles
Args:
data: shape [n_time, n_lat, n_lon]
quantiles:
list containing quantiles
Returns:
tmp: shape [n_quantiles, n_time*n_lat*n_lon]
binary data
"""
shape = data.shape
tmp = np.zeros((len(quantiles), shape[0], shape[1], shape[2]))
for i, quantile in enumerate(quantiles):
threshold = np.quantile(data, quantile)
binary = np.where(data > threshold, 1, 0).reshape((shape[0], shape[1], shape[2],-1))
tmp[i] = binary.squeeze()
return tmp
def global_thresholds_from_quantiles(data: np.ndarray, quantiles:list) -> list:
thresholds = [np.quantile(data, quantile) for quantile in quantiles]
return thresholds
def local_thresholds_from_percentiles(data: np.ndarray, percentile: float, data_min=0) -> np.ndarray:
n_lat = data.shape[1]
n_lon = data.shape[2]
threshold_map = np.zeros((n_lat, n_lon))
for lat in range(n_lat):
for lon in range(n_lon):
tmp = data[:, lat, lon]
threshold = st.scoreatpercentile(tmp[tmp>data_min], percentile)
if not np.isnan(threshold):
threshold_map[lat, lon] = threshold
return threshold_map
def get_threshold_mask(data: np.ndarray, percentile: float, data_min=0) -> np.ndarray:
n_lat = data.shape[1]
n_lon = data.shape[2]
mask = np.zeros((n_lat, n_lon))
for lat in range(n_lat):
for lon in range(n_lon):
tmp = data[:, lat, lon]
threshold = st.scoreatpercentile(tmp[tmp>data_min], percentile)
if np.isnan(threshold):
mask[lat, lon] = 1
return mask
def continuous_to_categorical_with_thresholds(data: np.ndarray, thresholds: list) -> np.ndarray:
""" Converts continuous data into binar classes using thresholds
Args:
data: shape [n_time, n_lat, n_lon]
quantiles:
list containing thresholds
Returns:
tmp: shape [n_quantiles, n_time*n_lat*n_lon]
binary data
"""
shape = data.shape
tmp = np.zeros((len(thresholds), shape[0], shape[1], shape[2]))
for i, threshold in enumerate(thresholds):
binary = np.where(data > threshold, 1, 0).reshape((shape[0], shape[1], shape[2],-1))
tmp[i] = binary.squeeze()
return tmp
def categorical_evaluation(prediction: np.ndarray, target: np.ndarray, metric_name: str, mask=None) -> pd.DataFrame:
"""
Evaluates a regression prediction with the F1 score
on quantile-based categories
Args:
prediction: shape [n_classes, X]
target: shape [n_classes, X]
X can be any other number of dimensions > 0
Returns:
scores (list):
List with an element per class
"""
n_classes = prediction.shape[0]
prediction = prediction.reshape(n_classes, -1)
target = target.reshape(n_classes, -1)
scores = []
for c in range(n_classes):
forecast_skill = ForecastSkill(prediction[c], target[c])
forecast_skill.compute_categories(mask=mask)
scores.append(getattr(forecast_skill, f'get_{metric_name}')())
return scores
def geographic_categorical_evaluation(prediction: np.ndarray, target: np.ndarray, metric_name: str) -> np.ndarray:
"""
Evaluates a regression prediction with the F1 score
on quantile-based categories
Args:
prediction: shape [n_classes, n_time, n_lat, n_lon]
target: shape [n_classes, n_time, n_lat, n_lon]
Returns:
scores: shape [n_classes, n_lat, n_lon]
"""
n_classes = prediction.shape[0]
n_lat = prediction.shape[2]
n_lon = prediction.shape[3]
scores = np.zeros((n_classes, n_lat, n_lon))
for c in range(n_classes):
for lat in range(n_lat):
for lon in range(n_lon):
grid_cell_prediction = prediction[c, :, lat, lon]
grid_cell_target = target[c, :, lat, lon]
if sum(grid_cell_prediction) == 0 and sum(grid_cell_target) == 0:
scores[c, lat, lon] = -999
else:
forecast_skill = ForecastSkill(prediction[c, :, lat, lon], target[c, :, lat, lon])
forecast_skill.compute_categories()
scores[c, lat, lon] = getattr(forecast_skill, f'get_{metric_name}')()
print(f'Progress {int((lat * lon)/(n_lat*n_lon)*100):2d}%')
clear_output(wait=True)
return scores
class ForecastSkill:
""" A collection of categorical forecast skill metrics """
def __init__(self, prediction, target):
self.prediction = prediction
self.target = target
self.true_positive = 0
self.false_positive = 0
self.false_negative = 0
self.true_negative = 0
def compute_categories(self, mask=None):
self.target = self.target.flatten().astype('int')
self.prediction = self.prediction.flatten().astype('int')
if mask is not None:
mask = mask.flatten()
indices_to_remove = np.where(mask==1)
self.target = np.delete(self.target, indices_to_remove)
self.prediction = np.delete(self.prediction, indices_to_remove)
categories = confusion_matrix(self.target, self.prediction)
self.true_negative, self.false_positive, self.false_negative, self.true_positive = categories.ravel()
def print_category_sums(self):
total = self.target.size
print(f'tp: {self.true_positive/total*100:2.3f}')
print(f'fp: {self.false_positive/total*100:2.3f}')
print(f'fn: {self.false_negative/total*100:2.3f}')
print(f'tn: {self.true_negative/total*100:2.3f}')
def get_category_sums(self):
return self.true_positive, self.false_positive, self.false_negative, self.true_negative
def get_heidke_skill_score(self) -> float:
tp = self.true_positive
fp = self.false_positive
fn = self.false_negative
tn = self.true_negative
nominator = 2*(tp*tn - fp*fn)
denominator = ((tp + fn)*(fn + tn) + (tp + fp)*(fp + tn))
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_critical_success_index(self) -> float:
hits = self.true_positive
false_alarms = self.false_positive
misses = self.false_negative
nominator = hits
denominator = hits + misses + false_alarms
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_false_alarm_ratio(self) -> float:
hits = self.true_positive
false_alarms = self.false_positive
nominator = false_alarms
denominator = hits + false_alarms
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_probability_of_detection(self) -> float:
hits = self.true_positive
misses = self.false_negative
nominator = hits
denominator = hits + misses
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_f1(self) -> float:
return f1_score(self.target, self.prediction, average='binary')
def get_recall(self) -> float:
return recall_score(self.target, self.prediction, average='binary')
def get_precision(self) -> float:
return precision_score(self.target, self.prediction, average='binary')
def rmse(output, target):
return np.sqrt(((output-target)**2).mean(axis=0))
def me(output, target):
return (output-target).mean(axis=0)
def corr(output, target):
result = np.zeros((output.shape[1], output.shape[2]))
for i in range(output.shape[1]):
for j in range(output.shape[2]):
result[i,j] = spearmanr(output[:,i,j], target[:,i,j])[0]
return result
| 32.814126
| 116
| 0.605528
| 1,097
| 8,827
| 4.713765
| 0.161349
| 0.01315
| 0.010636
| 0.017018
| 0.511893
| 0.471476
| 0.437826
| 0.415007
| 0.381551
| 0.355444
| 0
| 0.013408
| 0.290246
| 8,827
| 268
| 117
| 32.936567
| 0.811971
| 0.117934
| 0
| 0.343949
| 0
| 0
| 0.043931
| 0.0227
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133758
| false
| 0
| 0.044586
| 0.038217
| 0.299363
| 0.038217
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be74846aa8bb878ca4aaee267b213fd10335d381
| 1,709
|
py
|
Python
|
poloniex_apis/api_models/deposit_withdrawal_history.py
|
xJuggl3r/anapolo
|
5ffd87594c75575c5a19b9f47bf1b6606cfcdd1b
|
[
"MIT"
] | null | null | null |
poloniex_apis/api_models/deposit_withdrawal_history.py
|
xJuggl3r/anapolo
|
5ffd87594c75575c5a19b9f47bf1b6606cfcdd1b
|
[
"MIT"
] | null | null | null |
poloniex_apis/api_models/deposit_withdrawal_history.py
|
xJuggl3r/anapolo
|
5ffd87594c75575c5a19b9f47bf1b6606cfcdd1b
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
from poloniex_apis.api_models.ticker_price import TickerData
class DWHistory:
def __init__(self, history):
self.withdrawals = defaultdict(float)
self.deposits = defaultdict(float)
self.history = history
def get_dw_history(self):
for deposit in self.history['deposits']:
if deposit['currency'] in self.deposits:
self.deposits[deposit['currency']] += float(deposit['amount'])
else:
self.deposits[deposit['currency']] = float(deposit['amount'])
for withdrawal in self.history['withdrawals']:
if withdrawal['currency'] in self.withdrawals:
self.withdrawals[withdrawal['currency']] += float(withdrawal['amount'])
else:
self.withdrawals[withdrawal['currency']] = float(withdrawal['amount'])
return self.deposits, self.withdrawals
def get_btc_balance(self, ticker):
balance = 0
for deposit_symbol, amount in self.deposits.items():
if deposit_symbol == u"USDT":
balance += amount * ticker.get_price("USDT_BTC")
if deposit_symbol != u'BTC':
balance += amount * ticker.get_price("BTC_" + deposit_symbol)
else:
balance += amount
for withdrawal_symbol, amount in self.withdrawals.items():
if withdrawal_symbol == u"USDT":
balance -= amount * ticker.get_price("USDT_BTC")
if withdrawal_symbol != u'BTC':
balance -= amount * ticker.get_price("BTC_" + withdrawal_symbol)
else:
balance -= amount
return balance
| 39.744186
| 87
| 0.599181
| 176
| 1,709
| 5.664773
| 0.215909
| 0.090271
| 0.076229
| 0.088265
| 0.373119
| 0.373119
| 0.373119
| 0.174524
| 0.174524
| 0.094283
| 0
| 0.000828
| 0.293154
| 1,709
| 42
| 88
| 40.690476
| 0.824503
| 0
| 0
| 0.111111
| 0
| 0
| 0.075527
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.055556
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be748f98db9ba8c29d78f47f7af4dd25c01061b7
| 7,320
|
py
|
Python
|
app/handler.py
|
vnrag/aws-pipeline-dashboard
|
679af73f8e777990840bc829a014e205f0c94ac0
|
[
"BSD-3-Clause"
] | null | null | null |
app/handler.py
|
vnrag/aws-pipeline-dashboard
|
679af73f8e777990840bc829a014e205f0c94ac0
|
[
"BSD-3-Clause"
] | null | null | null |
app/handler.py
|
vnrag/aws-pipeline-dashboard
|
679af73f8e777990840bc829a014e205f0c94ac0
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime,timezone
import sys
import boto3
import json
def pipeline_event(event, context):
state = get_final_state(event)
if state is None:
return
event_time = datetime.strptime(event['time'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc)
metric_data = []
if event['detail-type'] == "CodePipeline Pipeline Execution State Change":
# Write green/red time based on last execution state
prior_execution = get_prior_execution(event['detail']['pipeline'], event['detail']['execution-id'])
if prior_execution is not None:
last_execution_state = prior_execution['status']
seconds_since_last_execution = (event_time - prior_execution['lastUpdateTime']).total_seconds()
if last_execution_state == "Succeeded":
append_metric(metric_data, "GreenTime", event, seconds=seconds_since_last_execution)
elif last_execution_state == "Failed":
append_metric(metric_data, "RedTime", event, seconds=seconds_since_last_execution)
if state == "SUCCEEDED":
append_metric(metric_data, "SuccessCount", event, count=1)
current_execution = get_execution(event['detail']['pipeline'], event['detail']['execution-id'])
if current_execution is not None:
duration = (event_time - current_execution['startTime']).total_seconds()
append_metric(metric_data, "LeadTime", event, seconds=duration)
elif state == "FAILED":
append_metric(metric_data, "FailureCount", event, count=1)
elif event['detail-type'] == "CodePipeline Stage Execution State Change":
if state == "SUCCEEDED":
append_metric(metric_data, "SuccessCount", event, count=1)
#append_metric(metric_data, "LeadTime", event, seconds=duration)
elif state == "FAILED":
append_metric(metric_data, "FailureCount", event, count=1)
elif event['detail-type'] == "CodePipeline Action Execution State Change":
if state == "SUCCEEDED":
append_metric(metric_data, "SuccessCount", event, count=1)
elif state == "FAILED":
append_metric(metric_data, "FailureCount", event, count=1)
if len(metric_data) > 0:
client = boto3.client('cloudwatch')
client.put_metric_data(
Namespace='Pipeline',
MetricData=metric_data
)
# Return the state from the event iff it's one of SUCCEEDED or FAILED
def get_final_state(event):
if 'detail' in event and 'state' in event['detail']:
if any(event['detail']['state'] in s for s in ['SUCCEEDED', 'FAILED']):
return event['detail']['state']
return None
# Return the execution summary for a given execution id
def get_execution(pipeline_name, execution_id):
client = boto3.client('codepipeline')
response = client.list_pipeline_executions(pipelineName=pipeline_name)
for e in response['pipelineExecutionSummaries']:
if e['pipelineExecutionId'] == execution_id:
return e
return None
# Return the execution summary for the most prior final execution before a given execution id
def get_prior_execution(pipeline_name, execution_id):
client = boto3.client('codepipeline')
response = client.list_pipeline_executions(pipelineName=pipeline_name)
found_current = False
for e in response['pipelineExecutionSummaries']:
if found_current and any(e['status'] in s for s in ['Succeeded', 'Failed']):
return e
elif e['pipelineExecutionId'] == execution_id:
found_current = True
return None
def append_metric(metric_list, metric_name, event, seconds=0, count=0):
data = {
'MetricName': metric_name,
'Dimensions': [],
'Timestamp': datetime.strptime(event['time'], '%Y-%m-%dT%H:%M:%SZ'),
}
resource_parts = []
if 'pipeline' in event['detail']:
data['Dimensions'].append({
'Name': 'PipelineName',
'Value': event['detail']['pipeline']
})
resource_parts.append(event['detail']['pipeline'])
if 'stage' in event['detail']:
data['Dimensions'].append({
'Name': 'StageName',
'Value': event['detail']['stage']
})
resource_parts.append(event['detail']['stage'])
if 'action' in event['detail']:
data['Dimensions'].append({
'Name': 'ActionName',
'Value': event['detail']['action']
})
resource_parts.append(event['detail']['action'])
if seconds > 0:
data['Value'] = seconds
data['Unit'] = 'Seconds'
elif count > 0:
data['Value'] = count
data['Unit'] = 'Count'
else:
# no metric to add
return
print("resource=%s metric=%s value=%s" % ('.'.join(resource_parts), metric_name, data['Value']))
metric_list.append(data)
def generate_dashboard(client):
paginator = client.get_paginator('list_metrics')
response_iterator = paginator.paginate(
Namespace='Pipeline'
)
pipeline_names = set()
for response in response_iterator:
for metric in response['Metrics']:
for dim in metric['Dimensions']:
if dim['Name'] == 'PipelineName':
pipeline_names.add(dim['Value'])
widgets = []
dashboard = {
"widgets": widgets
}
y = 0
for pipeline_name in sorted(pipeline_names):
widgets.append({
"type": "metric",
"x": 0,
"y": y,
"width": 18,
"height": 3,
"properties": {
"view": "singleValue",
"metrics": [
[ "Pipeline", "SuccessCount", "PipelineName", pipeline_name, { "stat": "Sum", "period": 2592000 } ],
[ ".", "FailureCount", ".", ".", { "stat": "Sum", "period": 2592000 } ],
[ ".", "LeadTime", ".", ".", { "period": 2592000, "color": "#9467bd" } ],
[ ".", "RedTime", ".", ".", { "stat": "Sum", "period": 2592000, "yAxis": "left", "color": "#d62728" } ],
[ ".", "GreenTime", ".", ".", { "period": 2592000, "stat": "Sum", "color": "#2ca02c" } ]
],
"region": "eu-central-1",
"title": pipeline_name,
"period": 300
}
})
y += 3
widgets.append({
"type": "text",
"x": 18,
"y": 0,
"width": 6,
"height": 6,
"properties": {
"markdown": "\nAll metrics are calculated over the past 30 days\n\n* **SuccessCount** - count of all successful pipeline executions\n* **FailureCount** - count of all failed pipeline executions\n* **LeadTime** - average pipeline time for successful executions\n* **RedTime** - sum of all time spent with a red pipeline\n* **GreenTime** - sum of all time spent with a green pipeline\n"
}
})
return dashboard
def dashboard_event(event, context):
client = boto3.client('cloudwatch')
dashboard = generate_dashboard(client)
client.put_dashboard(
DashboardName='Pipeline',
DashboardBody=json.dumps(dashboard)
)
if __name__ == '__main__':
dashboard_event(None, None)
| 35.882353
| 396
| 0.593579
| 781
| 7,320
| 5.419974
| 0.215109
| 0.049374
| 0.046775
| 0.051973
| 0.409875
| 0.364044
| 0.299551
| 0.24498
| 0.230806
| 0.206237
| 0
| 0.014917
| 0.26735
| 7,320
| 203
| 397
| 36.059113
| 0.77438
| 0.046995
| 0
| 0.246835
| 0
| 0.006329
| 0.246951
| 0.007462
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044304
| false
| 0
| 0.025316
| 0
| 0.126582
| 0.006329
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be74f9e10e7b3e7db834044fe7d0389031a09884
| 4,507
|
py
|
Python
|
cogs/commands.py
|
sudo-do/discord-chatbot
|
970af7d8b9275a518396648ebe5c33c291370d6a
|
[
"MIT"
] | 1
|
2021-05-14T08:01:53.000Z
|
2021-05-14T08:01:53.000Z
|
cogs/commands.py
|
sudo-do/discord-chatbot
|
970af7d8b9275a518396648ebe5c33c291370d6a
|
[
"MIT"
] | null | null | null |
cogs/commands.py
|
sudo-do/discord-chatbot
|
970af7d8b9275a518396648ebe5c33c291370d6a
|
[
"MIT"
] | null | null | null |
import discord
import sqlite3
from discord.ext import commands
conn= sqlite3.connect("dbs/main.db")
class Commands(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.cooldown(1, 30, commands.BucketType.guild)
@commands.has_permissions(manage_channels=True)
async def setchannel(self, ctx, *, cbchannel: discord.TextChannel = None):
if cbchannel == None:
await ctx.send(":warning: You have to mention the channel that you want as the channel in which users will talk to me. Example: `!!setchannel #channel-name`")
return
elif cbchannel != None:
try:
cur= conn.cursor()
guildID= str(ctx.guild.id)
r= cur.execute("SELECT channel_id FROM main WHERE guild_id = '"+guildID+"'")
row= None
for row in r:
...
if row != None:
await ctx.send(f":warning: The channel is already setup to <#{row[0]}>. Use `!!settings channel` to change it.")
elif row == None:
guildID= str(ctx.guild.id)
channelID= str(cbchannel.id)
cur.execute("INSERT INTO main(guild_id, channel_id, toggle) VALUES('"+guildID+"', '"+channelID+"', '1')")
conn.commit()
await ctx.send(f":tada: Start talking to me in {cbchannel.mention}!")
except discord.NotFound:
await ctx.send(":warning: I can't find that channel. Make sure I can access it or channel is valid.")
return
except discord.MissingPermissions:
await ctx.send(":warning: I can't send messages in that channel.")
return
@commands.group(invoke_without_command=True)
async def settings(self, ctx):
em= discord.Embed(title="Discord Chat Bot Settings", description="Welcome to Discord Chat Bot Settings! Here are the list of commands you can use to setup the bot. If this is your first time with this bot, Use the `!!setchannel` command first. **Arguments enclosed in `<>` are required!**")
em.add_field(name="`!!settings channel <channel_mention>`", value="Updates the chatting channel.")
em.add_field(name="`!!settings toggle <toggle>`", value="Toggles the bot chat on or off. This doesn't disable commands.")
await ctx.send(embed=em)
@settings.command()
@commands.has_permissions(manage_channels=True)
@commands.cooldown(1, 30, commands.BucketType.guild)
async def channel(self, ctx, *, cbchannel: discord.TextChannel = None):
cur= conn.cursor()
if cbchannel == None:
guildID= str(ctx.guild.id)
r= cur.execute("SELECT channel_id FROM main WHERE guild_id = '"+guildID+"'")
row= None
for row in r:
...
if row != None:
await ctx.send(f"I'm currently waiting for messages in <#{row[0]}>. Run `!!settings channel #channel-mention` to change this.")
elif row == None:
await ctx.send("Channel is not even setup yet! Use `!!setchannel` to set a channel.")
elif cbchannel != None:
guildID= str(ctx.guild.id)
channelID= str(cbchannel.id)
r= cur.execute("SELECT channel_id FROM main WHERE guild_id = '"+guildID+"'")
row= None
for row in r:
...
if row == None:
await ctx.send("Channel is not even setup yet! Use `!!setchannel` to set a channel.")
elif row != None:
cur.execute("UPDATE main SET channel_id = '"+channelID+"' where guild_id = '"+guildID+"'")
conn.commit()
await ctx.send(f":tada: Channel has been updated to {cbchannel.mention}!")
@settings.command()
@commands.has_permissions(manage_channels=True)
@commands.cooldown(1, 30, commands.BucketType.guild)
async def toggle(self, ctx, *, toggle = None):
if toggle == None:
await ctx.send(":warning: Use the command again but mention the toggle i.e `on` or `off` For example: `!!settings toggle on` to toggle on, `!!settings toggle off` to toggle off.")
elif toggle != None:
if toggle.lower() == "on":
toggle = '1'
elif toggle.lower() == 'off':
toggle = '0'
else:
await ctx.send(":warning: Use the command again but mention the toggle correctly. i.e `on` or `off` For example: `!!settings toggle on` to toggle on, `!!settings toggle off` to toggle off.")
return
guildID= str(ctx.guild.id)
cur= conn.cursor()
r= cur.execute("SELECT toggle FROM main WHERE guild_id = '"+guildID+"'")
row= None
for row in r:
...
if row == None:
await ctx.send("Channel is not setup yet! Use `!!setchannel` to set a channel.")
elif row != None:
cur.execute("UPDATE main SET toggle = '"+toggle+"' where guild_id = '"+guildID+"'")
conn.commit()
await ctx.send(f":tada: Toggle updated!")
def setup(bot):
bot.add_cog(Commands(bot))
| 36.942623
| 292
| 0.676503
| 659
| 4,507
| 4.584219
| 0.2261
| 0.037074
| 0.055611
| 0.037074
| 0.533929
| 0.503807
| 0.465409
| 0.419398
| 0.419398
| 0.419398
| 0
| 0.00435
| 0.183936
| 4,507
| 121
| 293
| 37.247934
| 0.81702
| 0
| 0
| 0.55102
| 0
| 0.071429
| 0.421123
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020408
| false
| 0
| 0.030612
| 0
| 0.102041
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be75b53bc3cf75e488408e710557a7588ee69c9c
| 6,210
|
py
|
Python
|
poetry/console/commands/self/update.py
|
mgasner/poetry
|
44221689e05feb0cc93c231096334f8eefbf86fc
|
[
"MIT"
] | null | null | null |
poetry/console/commands/self/update.py
|
mgasner/poetry
|
44221689e05feb0cc93c231096334f8eefbf86fc
|
[
"MIT"
] | null | null | null |
poetry/console/commands/self/update.py
|
mgasner/poetry
|
44221689e05feb0cc93c231096334f8eefbf86fc
|
[
"MIT"
] | null | null | null |
import hashlib
import os
import shutil
import subprocess
import sys
import tarfile
from functools import cmp_to_key
from gzip import GzipFile
try:
from urllib.error import HTTPError
from urllib.request import urlopen
except ImportError:
from urllib2 import HTTPError
from urllib2 import urlopen
from cleo import argument
from cleo import option
from ..command import Command
class SelfUpdateCommand(Command):
name = "update"
description = "Updates poetry to the latest version."
arguments = [argument("version", "The version to update to.", optional=True)]
options = [option("preview", None, "Install prereleases.")]
BASE_URL = "https://github.com/sdispater/poetry/releases/download"
@property
def home(self):
from poetry.utils._compat import Path
from poetry.utils.appdirs import expanduser
home = Path(expanduser("~"))
return home / ".poetry"
@property
def lib(self):
return self.home / "lib"
@property
def lib_backup(self):
return self.home / "lib-backup"
def handle(self):
from poetry.__version__ import __version__
from poetry.repositories.pypi_repository import PyPiRepository
from poetry.semver import Version
from poetry.utils._compat import Path
current = Path(__file__)
try:
current.relative_to(self.home)
except ValueError:
raise RuntimeError(
"Poetry was not installed with the recommended installer. "
"Cannot update automatically."
)
version = self.argument("version")
if not version:
version = ">=" + __version__
repo = PyPiRepository(fallback=False)
packages = repo.find_packages(
"poetry", version, allow_prereleases=self.option("preview")
)
if not packages:
self.line("No release found for the specified version")
return
packages.sort(
key=cmp_to_key(
lambda x, y: 0
if x.version == y.version
else int(x.version < y.version or -1)
)
)
release = None
for package in packages:
if package.is_prerelease():
if self.option("preview"):
release = package
break
continue
release = package
break
if release is None:
self.line("No new release found")
return
if release.version == Version.parse(__version__):
self.line("You are using the latest version")
return
self.update(release)
def update(self, release):
version = release.version
self.line("Updating to <info>{}</info>".format(version))
if self.lib_backup.exists():
shutil.rmtree(str(self.lib_backup))
# Backup the current installation
if self.lib.exists():
shutil.copytree(str(self.lib), str(self.lib_backup))
shutil.rmtree(str(self.lib))
try:
self._update(version)
except Exception:
if not self.lib_backup.exists():
raise
shutil.copytree(str(self.lib_backup), str(self.lib))
shutil.rmtree(str(self.lib_backup))
raise
finally:
if self.lib_backup.exists():
shutil.rmtree(str(self.lib_backup))
self.line("")
self.line("")
self.line(
"<info>Poetry</info> (<comment>{}</comment>) is installed now. Great!".format(
version
)
)
def _update(self, version):
from poetry.utils.helpers import temporary_directory
platform = sys.platform
if platform == "linux2":
platform = "linux"
checksum = "poetry-{}-{}.sha256sum".format(version, platform)
try:
r = urlopen(self.BASE_URL + "/{}/{}".format(version, checksum))
except HTTPError as e:
if e.code == 404:
raise RuntimeError("Could not find {} file".format(checksum))
raise
checksum = r.read().decode()
# We get the payload from the remote host
name = "poetry-{}-{}.tar.gz".format(version, platform)
try:
r = urlopen(self.BASE_URL + "/{}/{}".format(version, name))
except HTTPError as e:
if e.code == 404:
raise RuntimeError("Could not find {} file".format(name))
raise
meta = r.info()
size = int(meta["Content-Length"])
current = 0
block_size = 8192
bar = self.progress_bar(max=size)
bar.set_format(" - Downloading <info>{}</> <comment>%percent%%</>".format(name))
bar.start()
sha = hashlib.sha256()
with temporary_directory(prefix="poetry-updater-") as dir_:
tar = os.path.join(dir_, name)
with open(tar, "wb") as f:
while True:
buffer = r.read(block_size)
if not buffer:
break
current += len(buffer)
f.write(buffer)
sha.update(buffer)
bar.set_progress(current)
bar.finish()
# Checking hashes
if checksum != sha.hexdigest():
raise RuntimeError(
"Hashes for {} do not match: {} != {}".format(
name, checksum, sha.hexdigest()
)
)
gz = GzipFile(tar, mode="rb")
try:
with tarfile.TarFile(tar, fileobj=gz, format=tarfile.PAX_FORMAT) as f:
f.extractall(str(self.lib))
finally:
gz.close()
def process(self, *args):
return subprocess.check_output(list(args), stderr=subprocess.STDOUT)
def _bin_path(self, base_path, bin):
if sys.platform == "win32":
return (base_path / "Scripts" / bin).with_suffix(".exe")
return base_path / "bin" / bin
| 27.972973
| 90
| 0.54847
| 654
| 6,210
| 5.11315
| 0.311927
| 0.027213
| 0.026914
| 0.023923
| 0.163278
| 0.129785
| 0.102871
| 0.102871
| 0.102871
| 0.102871
| 0
| 0.005939
| 0.349275
| 6,210
| 221
| 91
| 28.099548
| 0.821579
| 0.01401
| 0
| 0.230303
| 0
| 0
| 0.11832
| 0.01095
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048485
| false
| 0
| 0.139394
| 0.018182
| 0.278788
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be7730b08647563bbdf351876a21f2fa9df7d7f9
| 3,765
|
py
|
Python
|
main.py
|
rohit-k-das/crowdstrike-alerts
|
48c23357f819f90134f76cefb58f1355967363d4
|
[
"MIT"
] | 3
|
2019-07-10T17:05:56.000Z
|
2019-10-18T22:34:08.000Z
|
main.py
|
rohit-k-das/crowdstrike-alerts
|
48c23357f819f90134f76cefb58f1355967363d4
|
[
"MIT"
] | 1
|
2020-01-09T14:43:58.000Z
|
2020-02-06T11:24:04.000Z
|
main.py
|
rohit-k-das/crowdstrike-alerts
|
48c23357f819f90134f76cefb58f1355967363d4
|
[
"MIT"
] | 2
|
2019-07-10T17:05:57.000Z
|
2019-10-18T22:34:09.000Z
|
import requests
import crowdstrike_detection as crowdstrike
import logging
import click
import urllib.parse
import ConfigParser
import os
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)-15s [%(levelname)-8s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
Config = ConfigParser.ConfigParser()
Config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'Crowdstrike_creds'))
# Create your own slackbot
hubot_webhook_url = Config.get('Settings', 'Slackbot_Url')
# Send slack alert via hubot for each high or critical detection in crowdstrike
def send_hubot_alert_crowdstrike(detection):
logger.info("Send hubot alert for detection %s" % detection.detection_id)
# Emoji for slack based on action taken
green_alerts = ['Kill process', 'Kill subprocess', 'Quarantine file', 'Kill parent', 'Process blocked',
'Operation blocked']
red_alerts = ['Policy disabled']
amber_alerts = []
actions = []
for behavior in detection.behavior:
actions.extend(behavior['action_taken'])
if actions:
actions = list(set(actions))
alerts = []
if actions:
if list(set(actions).intersection(red_alerts)):
alerts.append(':red-alert: Allowed')
if list(set(actions).intersection(green_alerts)):
alerts.append(':green-alert: Blocked')
else:
alerts.append(':red-alert: Allowed')
if ':green-alert: Blocked' in alerts and ':red-alert: Allowed' in alerts:
alerts = [':amber-alert: Suspicious']
message_to_send = ":crowd-strike: *%s* Alert: <%s|%s> ---> %s\n" % (
detection.severity, detection.link, detection.detection_id.split(':')[2], str(alerts).strip('[').strip(']').replace("'", ""))
message_to_send = "%sDevice: %s\n" % (message_to_send, detection.device)
for behavior in detection.behavior:
message_to_send = "%sBad Behavior: %s\n" % (message_to_send, behavior['bad_behavior'].replace('&', '%26amp;').replace('<', '%26lt;').replace('>', '%26gt;'))
message_to_send = "%sHash: %s\n" % (message_to_send, behavior['hash'])
message_to_send = "%sParent Cmd: %s\n" % (message_to_send, behavior['parent_commandline'])
message_to_send = "%sTactic-Technique: %s\n" % (message_to_send, behavior['tactic + technique'])
if behavior['action_taken']:
message_to_send = "%sAction Taken: %s" % (
message_to_send, str(behavior['action_taken']).strip('[').strip(']').replace("'", ""))
else:
message_to_send = "%sAction Taken: %s" % (message_to_send, 'None')
if len(detection.behavior) > 1:
message_to_send = "%s\n" % message_to_send
# Whom to send the alert
send_to = 'yourchannel or a user'
data = {'message': message_to_send, 'users': send_to}
data = urllib.parse.urlencode(data)
headers = {"Content-Type": "application/x-www-form-urlencoded"}
resp = requests.post(hubot_webhook_url, headers=headers, data=data)
if resp.ok:
logger.info("Sent alert to user/channel %s" % send_to)
else:
logger.critical("Unable to connect to hubot.")
logger.info("Hubot Error %d:%s" % (resp.status_code, resp.text))
@click.command()
@click.option("-d", "--duration", default=600, show_default=True, nargs=1, type=int, required=False, help="Crowdstrike detections that were last seen since 'duration' seconds")
def main(duration):
crowdstrike_detections = crowdstrike.fetch_detections(duration)
if crowdstrike_detections:
logger.info("Sending alerts")
for detection in crowdstrike_detections:
send_hubot_alert_crowdstrike(detection)
if __name__ == '__main__':
main()
| 41.373626
| 176
| 0.661355
| 468
| 3,765
| 5.136752
| 0.358974
| 0.047421
| 0.097338
| 0.027454
| 0.183028
| 0.094842
| 0.032446
| 0.032446
| 0.032446
| 0
| 0
| 0.004913
| 0.18911
| 3,765
| 90
| 177
| 41.833333
| 0.782509
| 0.043293
| 0
| 0.130435
| 0
| 0
| 0.253545
| 0.009174
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028986
| false
| 0
| 0.101449
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be775d3a62274c2c57f452dafb16e1035b3dff0c
| 4,593
|
py
|
Python
|
Test3/yandexAPI3.py
|
klepik1990/YandexTestAPI
|
ded41ff607c0b209b51efbcaa13c8008156a5e0a
|
[
"MIT"
] | null | null | null |
Test3/yandexAPI3.py
|
klepik1990/YandexTestAPI
|
ded41ff607c0b209b51efbcaa13c8008156a5e0a
|
[
"MIT"
] | null | null | null |
Test3/yandexAPI3.py
|
klepik1990/YandexTestAPI
|
ded41ff607c0b209b51efbcaa13c8008156a5e0a
|
[
"MIT"
] | null | null | null |
import requests
import json
HEADERS = {"Authorization": "OAuth AgAAAAA00Se2AAW1W1yCegavqkretMXBGkoUUQk", "Accept": "*/*"}
URL = "https://cloud-api.yandex.net:443/v1/disk/"
def get_folder_info(folder_name_1, folder_name_2, url=None, headers=None):
"""Получение информации о статусе папок на диске
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Информация о папках: путь до папок, если созданы успешно. В противном случае описание ошибки.
"""
info = requests.get(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2 + "&fields=path", headers=HEADERS)
dict_response = json.loads(info.content)
if info.status_code == 404:
return dict_response["description"]
else:
return dict_response["path"]
def get_file_info(folder_name_1, folder_name_2, file_name, url=None, headers=None):
"""Получение информации о файле
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
file_name: имя файла.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Путь до файла.
"""
file_info_json = requests.get(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2 + "/" + file_name +
".jpg&fields=path", headers = HEADERS)
file_info_dict = json.loads(file_info_json.content)
if file_info_json.status_code == 404:
return file_info_dict["description"]
else:
return file_info_dict["path"]
def create_folder(folder_name_1, folder_name_2, url=None, headers=None):
"""Создание папок на диске.
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Информация о папках через вызов другой функции.
"""
response_code = [202, 204]
new_folder = requests.put(url= URL + "resources?path=" + folder_name_1, headers=HEADERS)
if new_folder.status_code == 409:
new_folder = requests.delete(url= URL + "resources?path=" + folder_name_1 + "&permanently=true", headers=HEADERS)
if new_folder.status_code in response_code:
requests.put(url= URL + "resources?path=" + folder_name_1, headers=HEADERS)
requests.put(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2, headers=HEADERS)
return get_folder_info(folder_name_1, folder_name_2)
def create_file(folder_name_1, folder_name_2, file_name, url=None, headers=None):
"""Загрузка файла на диск.
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
file_name: имя файла.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Информацию о созданном файле через вызов другой функции.
"""
assert len(file_name) > 0, "Не введено имя файла"
new_file = requests.get(url= URL + "resources/upload?path=" + folder_name_1 + "/" + folder_name_2 + "/" + file_name +
".jpg&overwrite=true", headers=HEADERS)
get_link = new_file.content
link = json.loads(get_link)
requests.put(url=link["href"])
return get_file_info(folder_name_1, folder_name_2, file_name)
def move_to_bucket(folder_name, url=None, headers=None):
"""Перемещение папки с содержимым в корзину.
Args:
folder_name: имя корневой папки.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Ссылку для проверки статуса.
"""
order_response = requests.delete(url= URL + "resources?path=" + folder_name, headers=HEADERS)
return json.loads(order_response.content)["href"]
def get_status(link, headers=None):
"""Получение статуса операции по ссылке.
Args:
link: ссылка, для которой проверяется статус.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Статус операции.
"""
status_response = requests.get(url=link, headers=HEADERS)
return json.loads(status_response.content)["status"]
def clean_bucket():
"""Очистка корзины.
Returns:
Ссылку для проверки статуса.
"""
remove_folder = requests.delete(url= URL + "trash/resources", headers=HEADERS)
return json.loads(remove_folder.content)["href"]
| 33.282609
| 125
| 0.674069
| 581
| 4,593
| 5.118761
| 0.216867
| 0.114324
| 0.062878
| 0.057162
| 0.589106
| 0.514459
| 0.514459
| 0.463013
| 0.434095
| 0.414257
| 0
| 0.015695
| 0.223166
| 4,593
| 137
| 126
| 33.525547
| 0.817825
| 0.35184
| 0
| 0.044444
| 0
| 0
| 0.141917
| 0.022312
| 0
| 0
| 0
| 0
| 0.022222
| 1
| 0.155556
| false
| 0
| 0.044444
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be78c46e8b283fc835a189209cd53b3fea610e40
| 3,208
|
py
|
Python
|
app/users/operator/views.py
|
trinanda/AQUR
|
2a415b05ba4c0113b05b6fa14fb454af2bad52ec
|
[
"MIT"
] | null | null | null |
app/users/operator/views.py
|
trinanda/AQUR
|
2a415b05ba4c0113b05b6fa14fb454af2bad52ec
|
[
"MIT"
] | null | null | null |
app/users/operator/views.py
|
trinanda/AQUR
|
2a415b05ba4c0113b05b6fa14fb454af2bad52ec
|
[
"MIT"
] | null | null | null |
import os
from collections import defaultdict
from flask import render_template
from flask_login import login_required
from sqlalchemy import and_
from app import db
from app.decorators import operator_required
from app.models import Student, MonthNameList, Course, PaymentStatus, Payment, Teacher, Schedule
from app.users.operator import operator
@operator.route('/')
@login_required
@operator_required
def index():
title = os.environ.get('APP_NAME')
# get all students data on schedule, except if the student tuition payment is None, PENDING, REJECTED or WARNING_3
students_courses_data = db.session.query(Schedule, Payment).join(Payment).filter(
and_(Payment.status_of_payment is not None,
Payment.status_of_payment != PaymentStatus.PENDING.name,
Payment.status_of_payment != PaymentStatus.REJECTED.name,
Payment.status_of_payment != PaymentStatus.WARNING_3.name))
# get the amount of Teachers and Students
total_students = Student.query.count()
total_teachers = Teacher.query.count()
month_name_list = []
for data in MonthNameList:
month_name_list.append(str(data))
# make a query object for "Tahsin" and "Arabic Language" course
tahsin = students_courses_data.join(Course).filter(Course.name == "Tahsin")
arabic = students_courses_data.join(Course).filter(Course.name == "Bahasa Arab")
# the total payment for the courses each month
tahsin_course_data = []
arabic_course_data = []
for data in tahsin:
for month_name in month_name_list:
tahsin_course_data.append({str(month_name): data.Payment.created_at.strftime('%B').count(month_name)})
for data in arabic:
for month_name in month_name_list:
arabic_course_data.append({str(month_name): data.Payment.created_at.strftime('%B').count(month_name)})
# merge and sum the total value from the dictionary on the same month from the _courses_data result above
total_tahsin_students_per_month = defaultdict(int)
total_arabic_students_per_month = defaultdict(int)
for d in tahsin_course_data:
for key, value in d.items():
total_tahsin_students_per_month[key] += value
for d in arabic_course_data:
for key, value in d.items():
total_arabic_students_per_month[key] += value
# store all of the month values on a list for each course
tahsin_values = []
arabic_values = []
for key, value in total_tahsin_students_per_month.items():
tahsin_values.append(value)
for key, value in total_arabic_students_per_month.items():
arabic_values.append(value)
# make a dictionary to represent course name with the matching total student that do the payment for each month
data_courses_each_month = [
{
'Tahsin': tahsin_values,
},
{
'Bahasa Arab': arabic_values
}
]
return render_template('main/operator/operator-dashboard.html', title=title, total_teachers=total_teachers,
total_students=total_students, month_name_list=month_name_list,
data_courses_each_month=data_courses_each_month)
| 40.1
| 118
| 0.711658
| 428
| 3,208
| 5.088785
| 0.238318
| 0.049587
| 0.035813
| 0.040404
| 0.342975
| 0.224977
| 0.162534
| 0.137741
| 0.096419
| 0.065197
| 0
| 0.00079
| 0.211035
| 3,208
| 79
| 119
| 40.607595
| 0.859739
| 0.1649
| 0
| 0.067797
| 0
| 0
| 0.031449
| 0.013852
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016949
| false
| 0
| 0.152542
| 0
| 0.186441
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be7e9dc9b18c9759a533f45fd2110a059eb361f0
| 19,192
|
py
|
Python
|
pfile/accessor.py
|
thorwhalen/ut
|
353a4629c35a2cca76ef91a4d5209afe766433b4
|
[
"MIT"
] | 4
|
2016-12-17T20:06:10.000Z
|
2021-11-19T04:45:29.000Z
|
pfile/accessor.py
|
thorwhalen/ut
|
353a4629c35a2cca76ef91a4d5209afe766433b4
|
[
"MIT"
] | 11
|
2021-01-06T05:35:11.000Z
|
2022-03-11T23:28:31.000Z
|
pfile/accessor.py
|
thorwhalen/ut
|
353a4629c35a2cca76ef91a4d5209afe766433b4
|
[
"MIT"
] | 3
|
2015-06-12T10:44:16.000Z
|
2021-07-26T18:39:47.000Z
|
"""File access utils"""
__author__ = 'thorwhalen'
# from ut.datapath import datapath
import pickle
import os
from ut.util.importing import get_environment_variable
import pandas as pd
import ut.pfile.to as file_to
import ut.pfile.name as pfile_name
import ut.pstr.to as pstr_to
from ut.serialize.local import Local
from ut.serialize.s3 import S3
from os import environ # does this load the whole array? Can we just take MS_DATA instead?
import ut.pstr.trans as pstr_trans
import shutil
try:
MS_DATA = get_environment_variable('MS_DATA')
except KeyError:
MS_DATA = ''
LOCATION_LOCAL = 'LOCAL'
LOCATION_S3 = 'S3'
####################################################################################################################
# Quick Utils
def ms_data_path(relative_root, root_folder=MS_DATA):
return os.path.join(pfile_name.ensure_slash_suffix(root_folder), relative_root)
####################################################################################################################
# FACTORIES
def for_local(relative_root='', read_only=False, extension=None, force_extension=False, root_folder=MS_DATA, **kwargs):
# if a full path (i.e. starting with "/" is entered as a relative_root, then take it as the sound_file_root_folder
if relative_root and ((relative_root[0] == '/') or (relative_root[0] == '~')):
root_folder = relative_root
relative_root = ''
elif relative_root == 'test': # if relative root is test...
relative_root = 'test'
print("you asked for a local test, so I forced the root to be %s" % relative_root)
# ensure that sound_file_root_folder ends with a "/"
file_handler = FilepathHandler(relative_root=pfile_name.ensure_slash_suffix(root_folder)+relative_root)
# take care of extensions
if extension:
extension_handler = ExtensionHandler(extension=extension, force_extension=force_extension)
file_loc_proc = lambda x: file_handler.process(extension_handler.process(x))
else:
file_loc_proc = file_handler.process
instance = Accessor(
relative_root=relative_root,
extension=extension,
force_extension=force_extension,
file_loc_proc=file_loc_proc,
location=LOCATION_LOCAL,
read_only=read_only,
**kwargs
)
instance._set_local_defaults()
return instance
def for_s3(relative_root='loc-data', read_only=False, extension=None, force_extension=False, **kwargs):
if relative_root == 'test':
relative_root = 'loc-data/test'
print("you asked for a s3 test, so I forced the root to be %s" % relative_root)
file_handler = FilepathHandler(relative_root=relative_root)
if extension:
extension_handler = ExtensionHandler(extension=extension, force_extension=force_extension)
file_loc_proc = lambda x: file_handler.process(extension_handler.process(x))
else:
file_loc_proc = file_handler.process
instance = Accessor(
relative_root=relative_root,
extension=extension,
force_extension=force_extension,
file_loc_proc=file_loc_proc,
location=LOCATION_S3,
read_only=read_only,
**kwargs
)
save_kwargs = instance.mk_save_kwargs(relative_root)
try:
bucket_name = save_kwargs['bucket_name']
base_folder = save_kwargs['key_name']
except:
print("couldn't get bucket_name and key_name for relative_root")
instance.s3 = S3(bucket_name=bucket_name, base_folder=base_folder)
instance._set_s3_defaults()
return instance
####################################################################################################################
class Accessor(object):
LOCATION_LOCAL = LOCATION_LOCAL
LOCATION_S3 = LOCATION_S3
def __init__(self,
file_loc_proc=None,
location=LOCATION_LOCAL,
mk_save_kwargs=None,
pre_save_proc=None,
save_fun=None,
mk_load_kwargs=None,
load_fun=None,
post_load_proc=None,
read_only=False,
**kwargs):
# if file_loc_proc:
# self.file_loc_proc = file_loc_proc
# else:
# self.file_loc_proc = FilepathHandler().process
self.file_loc_proc = file_loc_proc
self.location = location
self.mk_save_kwargs = mk_save_kwargs
self.pre_save_proc = pre_save_proc
self.save_fun = save_fun
self.mk_load_kwargs = mk_load_kwargs
self.load_fun = load_fun
self.post_load_proc = post_load_proc
self.read_only = read_only
for k, v in list(kwargs.items()):
self.__setattr__(k,v)
self._guess_missing_attributes()
def __call__(self, *args, **kwargs):
return self.filepath(*args, **kwargs)
####################################################################################################################
# INSTANCE METHODS
def root_folder(self):
if self.extension:
return self.file_loc_proc('')[:(-len(self.extension))]
else:
return self.file_loc_proc('')
def filepath(self, file_spec):
return self.file_loc_proc(file_spec)
def exists(self, file_spec):
return os.path.exists(self.filepath(file_spec))
def save(self, obj, file_spec, **kwargs):
if self.read_only:
raise BaseException("read_only was set to True, so you can't save anything")
else:
# make the dict specifying the input to the save_fun
file_spec = self.file_loc_proc(file_spec)
if self.pre_save_proc:
obj = self.pre_save_proc(obj)
if self.mk_save_kwargs:
file_spec_kwargs = self.mk_save_kwargs(file_spec)
self.save_fun(obj, **file_spec_kwargs)
else:
self.save_fun(obj, file_spec)
def append(self, obj, file_spec, **kwargs): # TODO: Write this code someday
"""
Intent of this function is to append data to a file's data without having to specify how to do so.
For example, if the obj is a string and the file is a text file, use file append.
If obj is a pickled dataframe, the effect (however you do it--hopefully there's a better way than loading the
data, appending, and saving the final result) should be to have a pickled version of the old and new dataframes
appended.
Etc.
"""
pass
# if isinstance(obj, basestring):
# raise ValueError("strings not implemented yet")
# elif isinstance(obj, (pd.DataFrame, pd.Series)):
# pass
def load(self, file_spec, **kwargs):
file_spec = self.file_loc_proc(file_spec)
if pfile_name.get_extension(file_spec) not in ['.xls', '.xlsx']:
if self.mk_load_kwargs:
file_spec_kwargs = self.mk_load_kwargs(file_spec)
obj = self.load_fun(**file_spec_kwargs)
else:
obj = self.load_fun(file_spec)
if self.post_load_proc:
obj = self.post_load_proc(obj)
else:
# obj = pd.read_excel(file_spec, **kwargs)
xls = pd.ExcelFile(file_spec)
kwargs = dict({'sheetname': xls.sheet_names[0]}, **kwargs) # take first sheet if sheet not specified
obj = pd.read_excel(file_spec, **kwargs)
#obj = xls.parse(**kwargs)
return obj
def copy_local_file_to(self, local_file_path, target_file_spec):
'''
Copies a file from the local computer to self.filepath(target_file_spec)
:param local_file_path:
:param target_file_spec:
:return:
'''
if self.read_only:
raise BaseException("read_only was set to True, so you can't copy anything to this location")
else:
if self.location == LOCATION_LOCAL:
if not os.path.exists(local_file_path):
local_file_path = self.filepath(local_file_path)
shutil.copyfile(local_file_path, self.filepath(target_file_spec))
elif self.location == LOCATION_S3:
# make the dict specifying the input to the save_fun
target_file_spec = self.file_loc_proc(target_file_spec)
if self.pre_save_proc:
local_file_path = self.pre_save_proc(local_file_path)
if self.mk_save_kwargs:
file_spec_kwargs = self.mk_save_kwargs(target_file_spec)
self.copy_local_file_to_fun(local_file_path, **file_spec_kwargs)
else:
raise ("this shouldn't happen")
else:
raise ValueError("unknown location")
def copy_to(self, target_relative_root, file_spec, target_location=None):
if isinstance(target_relative_root, str):
target_relative_root, target_location = \
_make_a_file_loc_proc_and_location_from_string_specifications(target_relative_root, target_location)
# make a file accessor for the (target_location, target_relative_root)
facc = Accessor(relative_root=target_relative_root, location=target_location)
####################################################################################################################
# PARTIAL FACTORIES
def _add_extension_handler(self, extension, force_extension=False):
extension_handler = ExtensionHandler(extension=extension, force_extension=force_extension)
self.file_loc_proc = lambda x : self.file_loc_proc(extension_handler.process(x))
def _guess_missing_attributes(self):
if self.file_loc_proc is None: # if no file_loc_proc is given
if self.location is not None and isinstance(self.location, str):
self.file_loc_proc==self.location
else:
self.file_loc_proc==LOCATION_LOCAL
elif isinstance(self.file_loc_proc, str): # if file_loc_proc is a string
self.file_loc_proc, self.location = \
_make_a_file_loc_proc_and_location_from_string_specifications(self.file_loc_proc, self.location)
# if self.file_loc_proc==LOCATION_LOCAL:
# self.location = LOCATION_LOCAL
# self.file_loc_proc = ''
# elif self.file_loc_proc==LOCATION_S3:
# self.location = LOCATION_S3
# self.file_loc_proc = ''
# else:
# if self.location==LOCATION_LOCAL:
# self.file_loc_proc = FilepathHandler(relative_root=os.path.join(MS_DATA,self.file_loc_proc)).process
# elif self.location==LOCATION_S3:
# self.file_loc_proc = FilepathHandler(relative_root=os.path.join('loc-data',self.file_loc_proc)).process
# set defaults for remaining missing attributes
self._set_defaults()
def _set_defaults(self):
if self.location is None:
print("setting location to LOCAL (because you didn't specify a location)")
self.location = LOCATION_LOCAL
if self.location == LOCATION_LOCAL:
self._set_local_defaults()
elif self.location == LOCATION_S3:
self._set_s3_defaults()
def _set_local_defaults(self, root_folder=MS_DATA):
# set defaults for local if attr is None
self.file_loc_proc = self.file_loc_proc or FilepathHandler(relative_root=os.path.join(root_folder)).process
self.save_fun = self.save_fun or LocalIOMethods().unicode_save
self.load_fun = self.load_fun or LocalIOMethods().unicode_load
# self.pre_save_proc = self.pre_save_proc or FilepathHandler().process
# self.post_load_proc = self.post_load_proc or FilepathHandler().process
def _set_s3_defaults(self):
# set defaults for local if attr is None
self.file_loc_proc = self.file_loc_proc or FilepathHandler(relative_root='loc-data').process
self.mk_save_kwargs = fullpath_to_s3_kargs
self.mk_load_kwargs = fullpath_to_s3_kargs
self.save_fun = self.save_fun or S3IOMethods().unicode_save
self.load_fun = self.load_fun or S3IOMethods().unicode_load
self.copy_local_file_to_fun = S3IOMethods().copy_local_file_to_fun
####################################################################################################################
# OBJECT UTILS
def local_file_loc_proc_simple(self, file_spec):
# add extension
file_spec = self.handle_extension(file_spec)
# remove slash suffix if present (because self.sound_file_root_folder ends with / already)
if file_spec.startswith('/'):
file_spec = file_spec[1:]
def handle_extension(self, file_spec):
if self.extension:
if self.force_extension:
file_spec = pfile_name.replace_extension(file_spec, self.extension)
else:
file_spec = pfile_name.add_extension_if_not_present(file_spec, self.extension)
return os.path.join(self.root_folder, file_spec)
####################################################################################################################
# OTHER UTILS
def _make_a_file_loc_proc_and_location_from_string_specifications(file_loc_proc, location):
if file_loc_proc is None and isinstance(location, str):
file_loc_proc = location + "/"
location = None
elif location is None and isinstance(file_loc_proc, str):
first_folder = pfile_name.get_highest_level_folder(location)
if first_folder in [LOCATION_LOCAL, LOCATION_S3]:
location = first_folder # set the location to first_folder
file_loc_proc.replace(location+"/","") # remove the first_folder
else:
raise ValueError("location was not specified and couldn't be guessed from the file_loc_proc")
else:
raise ValueError("you've neither specified a file_loc_proc (as a file_loc_proc) nor a location")
# make a file accessor for the (location, target_relative_root)
file_loc_proc = FilepathHandler(relative_root=os.path.join(location,file_loc_proc)).process
return (file_loc_proc, location)
def file_loc_proc_from_full_path(fullpath):
return FilepathHandler(relative_root=fullpath).process
def fullpath_to_s3_kargs(filename):
# remove slash suffix if present (because self.sound_file_root_folder ends with / already)
if filename.startswith('/'):
filename = filename[1:]
mother_root = pfile_name.get_highest_level_folder(filename)
rest_of_the_filepath = filename.replace(mother_root + '/','',1)
return {
'bucket_name': mother_root,
'key_name': rest_of_the_filepath
}
class ExtensionHandler(object):
def __init__(self, extension=None, force_extension=False):
self.extension = extension
self.force_extension = force_extension
def process(self, file_spec):
if self.force_extension:
return pfile_name.replace_extension(file_spec, self.extension)
else:
return pfile_name.add_extension_if_not_present(file_spec, self.extension)
class FilepathHandler(object):
def __init__(self, relative_root=''):
self.relative_root = relative_root
def process(self, filepath=''):
return os.path.join(self.relative_root, filepath)
##### LOCAL METHODS
class LocalIOMethods(object):
def __init__(self, encoding="UTF-8"):
self.encoding = encoding
def unicode_save(self, obj, filepath=None, **kwargs):
if isinstance(obj, str):
# pstr_to.file(string=pstr_trans.to_unicode_or_bust(obj), tofile=filepath, encoding=self.encoding)
# pstr_to.file(string=pstr_trans.to_utf8_or_bust_iter(obj), tofile=filepath, encoding=self.encoding)
# pstr_to.file(string=pstr_trans.str_to_utf8_or_bust(obj), tofile=filepath, encoding=self.encoding)
pstr_to.file(string=obj, tofile=filepath, encoding=self.encoding)
else:
pickle.dump(obj=obj, file=open(filepath, 'w'))
def simple_save(self, obj, filepath=None, **kwargs):
if isinstance(obj, str):
pstr_to.file(string=obj, tofile=filepath, encoding=self.encoding)
else:
pickle.dump(obj=obj, file=open(filepath, 'w'))
def unicode_load(self, filepath=None, **kwargs):
"""
try pd.from_pickle, then pickle.loading, and if it doesn't work, try file_to.string
"""
return pstr_trans.to_unicode_or_bust(self.simple_load(filepath=filepath, **kwargs))
# try:
# try: # getting it as a pandas object
# return pstr_trans.to_unicode_or_bust(pd.read_pickle(path=filepath))
# except Exception: # getting it as a pickled object
# return pstr_trans.to_unicode_or_bust(pickle.load(file=open(filepath, 'r')))
# except Exception: # getting it as a string
# return pstr_trans.to_unicode_or_bust(file_to.string(filename=filepath))
def simple_load(self, filepath=None, **kwargs):
"""
try pd.read_pickle, pickle.load, and file_to.string in that order
"""
try:
try: # getting it as a pandas object
return pd.read_pickle(path=filepath)
except Exception: # getting it as a pickled object
return pickle.load(file=open(filepath, 'r'))
except Exception: # getting it as a string
return file_to.string(filename=filepath)
##### S3 METHODS
class S3IOMethods(object):
def __init__(self, **kwargs):
self.s3 = S3(**kwargs)
def unicode_save(self, obj, key_name, bucket_name):
if isinstance(obj, str):
self.s3.dumps(the_str=pstr_trans.to_unicode_or_bust(obj), key_name=key_name, bucket_name=bucket_name)
else:
self.s3.dumpo(obj=obj, key_name=key_name, bucket_name=bucket_name)
def simple_save(self, obj, key_name, bucket_name):
if isinstance(obj, str):
self.s3.dumps(the_str=obj, key_name=key_name, bucket_name=bucket_name)
else:
self.s3.dumpo(obj=obj, key_name=key_name, bucket_name=bucket_name)
def unicode_load(self, key_name, bucket_name):
"""
try pickle.loading, and if it doesn't work, try file_to.string
"""
try:
return self.s3.loado(key_name=key_name, bucket_name=bucket_name)
except:
return pstr_trans.to_unicode_or_bust(self.s3.loads(key_name=key_name, bucket_name=bucket_name))
def simple_load(self, key_name, bucket_name):
"""
try pickle.loading, and if it doesn't work, try file_to.string
"""
try:
return self.s3.loado(key_name=key_name, bucket_name=bucket_name)
except:
return self.s3.loads(key_name=key_name, bucket_name=bucket_name)
def copy_local_file_to_fun(self, filepath, key_name, bucket_name):
return self.s3.dumpf(f=filepath, key_name=key_name, bucket_name=bucket_name)
| 41.90393
| 125
| 0.634275
| 2,458
| 19,192
| 4.650936
| 0.110252
| 0.036127
| 0.05677
| 0.039363
| 0.538838
| 0.460549
| 0.398268
| 0.363366
| 0.324615
| 0.278254
| 0
| 0.003295
| 0.240934
| 19,192
| 457
| 126
| 41.995624
| 0.781439
| 0.193935
| 0
| 0.289116
| 0
| 0
| 0.047029
| 0
| 0
| 0
| 0
| 0.002188
| 0
| 1
| 0.129252
| false
| 0.003401
| 0.040816
| 0.02381
| 0.278912
| 0.013605
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be7ea94dc71a3948ab59fd9c3e80bde2599bb1f1
| 4,309
|
py
|
Python
|
scripts/statistics.py
|
cstenkamp/MastersThesisText
|
d026f9c19819c83d99dfff12b594db9d061bfb31
|
[
"CC0-1.0"
] | null | null | null |
scripts/statistics.py
|
cstenkamp/MastersThesisText
|
d026f9c19819c83d99dfff12b594db9d061bfb31
|
[
"CC0-1.0"
] | null | null | null |
scripts/statistics.py
|
cstenkamp/MastersThesisText
|
d026f9c19819c83d99dfff12b594db9d061bfb31
|
[
"CC0-1.0"
] | null | null | null |
import subprocess
import git
from os.path import dirname, join, abspath
import pandas as pd
from matplotlib import pyplot as plt
import requests
import io
import zipfile
import tempfile
from datetime import timedelta
FILENAME = join(dirname(__file__), "..", "thesis.tex")
DISP_PAGESMAX = 80
DISP_WORDSMAX = 10000
def return_piped_cmd(cmd, stdin=None):
cmd = cmd.split("|")
if not stdin:
ps = subprocess.Popen(cmd[0].strip().split(" "), stdout=subprocess.PIPE)
else:
ps = subprocess.Popen(cmd[0].strip().split(" "), stdin=subprocess.PIPE, stdout=subprocess.PIPE)
ps.stdin.write(stdin.encode("UTF-8"))
ps.stdin.close()
if len(cmd) == 1:
return ps.stdout.read().decode("UTF-8")
output = subprocess.check_output(cmd[1].strip().split(" "), stdin=ps.stdout).decode("UTF-8")
ps.wait()
return output
def get_todos(fname=None, txt=None):
if fname:
with open(fname, "r") as rfile:
txt = rfile.read()
txt = txt.replace("% ", "%").lower()
return txt.count("%todo")
def get_npages(fname):
tmp = return_piped_cmd(f'pdfinfo {fname.replace(".tex", ".pdf")}')
return int([i for i in tmp.split("\n") if "Pages:" in i][0][len("Pages:"):].strip())
def github_get_npages(owner, repo, pdfname):
date_pages = {}
resp = requests.get(f"https://api.github.com/repos/{owner}/{repo}/actions/artifacts", headers=dict(Accept="application/vnd.github.v3+json"))
for i in resp.json()["artifacts"]:
art_id = i["url"][i["url"].rfind("/")+1:]
re2 = requests.get(f"https://nightly.link/{owner}/{repo}/actions/artifacts/{art_id}.zip")
if re2.status_code != 404:
# print(i["created_at"])
archive = zipfile.ZipFile(io.BytesIO(re2.content))
with tempfile.NamedTemporaryFile(suffix=".pdf") as wfile:
wfile.write(archive.read(pdfname))
n_pages = get_npages(wfile.name)
# print(f"Pages: {n_pages}")
date_pages[pd.to_datetime([i["created_at"]]).to_pydatetime()[0]] = n_pages
return pd.Series(date_pages)
def plot_df(df):
ax1 = df["Words"].plot(color="red", linestyle="-", marker="o", ylabel="Words")
ax1.set_ylim(0, max(df["Words"].max(), DISP_WORDSMAX))
ax2 = ax1.twinx()
ax2.spines['right'].set_position(('axes', 1.0))
df["Todos"].plot(ax=ax2, color="blue", linestyle="-", marker="x", ylabel="Todos")
ax3 = ax1.twinx()
df["Pages"].plot(ax=ax3, color="yellow", linestyle="", marker="s", ylabel="Pages")
for ax in [ax2, ax3]: ax.set_ylim((0, max(df["Todos"].max(), df["Pages"].max(), DISP_PAGESMAX)))
ax3.yaxis.set_ticklabels([])
lines, labels = list(zip(*[[i[0] for i in ax.get_legend_handles_labels()] for ax in [ax1, ax2, ax3]]))
plt.legend(lines, labels, loc=0)
plt.show()
def create_history_df(repo_dir, filename):
#print(abspath(repo_dir))
repo = git.Repo(repo_dir)
all_commits = {}
for commit in repo.iter_commits():
txt = (commit.tree / filename).data_stream.read().decode("UTF-8")
n_words = int(return_piped_cmd("detex | wc -w", stdin=txt).strip())
n_todos = get_todos(txt=txt)
# print(datetime.fromtimestamp(commit.committed_date))
# print(f"words: {n_words}, todos: {n_todos}")
all_commits[pd.to_datetime(commit.committed_datetime, utc=True)] = [n_words, n_todos]
df = pd.DataFrame(all_commits, index=["Words", "Todos"]).T
return df
def merge_page_df(df, date_pages):
for date in df.index:
try:
nearest_datepage_after = date_pages.index[date_pages.index.get_loc(date, method='bfill')]
except KeyError:
continue
if nearest_datepage_after-date <= timedelta(hours=2):
df.loc[date, "Pages"] = int(date_pages[nearest_datepage_after])
return df
if __name__ == "__main__":
#history
df = create_history_df(dirname(FILENAME), "thesis.tex")
date_pages = github_get_npages("cstenkamp", "MastersThesisText", "thesis.pdf")
df = merge_page_df(df, date_pages)
plot_df(df)
#current
n_words = int(return_piped_cmd(f"detex {FILENAME} | wc -w"))
n_pages = get_npages(FILENAME)
n_todos = get_todos(FILENAME)
print(f"Words: {n_words}, Pages: {n_pages}, Todos: {n_todos}")
| 38.132743
| 144
| 0.637503
| 611
| 4,309
| 4.330606
| 0.319149
| 0.034014
| 0.021164
| 0.015117
| 0.080121
| 0.057445
| 0.023432
| 0
| 0
| 0
| 0
| 0.013486
| 0.191228
| 4,309
| 113
| 145
| 38.132743
| 0.745768
| 0.042933
| 0
| 0.022472
| 0
| 0
| 0.126306
| 0.012631
| 0
| 0
| 0
| 0.00885
| 0
| 1
| 0.078652
| false
| 0
| 0.11236
| 0
| 0.269663
| 0.011236
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be7ef9e5cafc81c92530c829cae514f567ffa39a
| 1,966
|
py
|
Python
|
setup.py
|
TheFraserLab/enrich_pvalues
|
6c5065da5e6367cc39a045afbdfa1e78322857a6
|
[
"MIT"
] | 1
|
2019-03-25T17:38:47.000Z
|
2019-03-25T17:38:47.000Z
|
setup.py
|
TheFraserLab/enrich_pvalues
|
6c5065da5e6367cc39a045afbdfa1e78322857a6
|
[
"MIT"
] | null | null | null |
setup.py
|
TheFraserLab/enrich_pvalues
|
6c5065da5e6367cc39a045afbdfa1e78322857a6
|
[
"MIT"
] | null | null | null |
"""Installation instructions for enrich_pvalues."""
import os
from setuptools import setup
import enrich_pvalues # For version
VERSION=enrich_pvalues.__version__
GITHUB='https://github.com/MikeDacre/enrich_pvalues'
with open('requirements.txt') as fin:
REQUIREMENTS = [
i[0] for i in [j.split('>=') for j in fin.read().strip().split('\n')]
]
def read(fname):
"""Read the contents of a file in this dir."""
with open(os.path.join(os.path.dirname(__file__), fname)) as fin:
return fin.read()
# Actual setup instructions
setup(
name = 'enrich_pvalues',
version = VERSION,
author = 'Mike Dacre',
author_email = 'mike.dacre@gmail.com',
description = (
"Compare one dataset to another at a variety of p-value cutoffs"
),
keywords = (
"statistics p-values biology molecular-biology console"
),
long_description = read('README.rst'),
license = 'MIT',
# URLs
url = GITHUB,
download_url='{0}/archive/v{1}.tar.gz'.format(GITHUB, VERSION),
py_modules=['enrich_pvalues'],
entry_points = {
'console_scripts': [
'enrich_pvalues = enrich_pvalues:main',
],
},
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Utilities',
],
# Requirements
requires=REQUIREMENTS,
install_requires=REQUIREMENTS
)
| 28.085714
| 77
| 0.61648
| 214
| 1,966
| 5.551402
| 0.579439
| 0.087542
| 0.063131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005442
| 0.252289
| 1,966
| 69
| 78
| 28.492754
| 0.802721
| 0.127162
| 0
| 0.08
| 0
| 0
| 0.411176
| 0.013529
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.06
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be8016a800ed48d86a67fbff5afe5ec6d0a2e6a3
| 2,173
|
py
|
Python
|
examples/source/benchmarks/googlenet_model.py
|
ably77/dcos-tensorflow-tools
|
d434ff6c0cee6db9f62be583723dc2bee46ebbf2
|
[
"Apache-2.0"
] | 7
|
2017-11-02T18:21:37.000Z
|
2019-06-20T20:46:51.000Z
|
scripts/tf_cnn_benchmarks/googlenet_model.py
|
Aetf/tf_benchmarks
|
b473961620de1b03cb34902960c820e195bea678
|
[
"Apache-2.0"
] | 7
|
2017-10-19T20:45:25.000Z
|
2020-03-24T15:28:52.000Z
|
scripts/tf_cnn_benchmarks/googlenet_model.py
|
Aetf/tf_benchmarks
|
b473961620de1b03cb34902960c820e195bea678
|
[
"Apache-2.0"
] | 4
|
2017-10-19T09:57:17.000Z
|
2019-01-22T05:33:25.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Googlenet model configuration.
References:
Szegedy, Christian, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, and Andrew Rabinovich
Going deeper with convolutions
arXiv preprint arXiv:1409.4842 (2014)
"""
import model
class GooglenetModel(model.Model):
def __init__(self):
super(GooglenetModel, self).__init__('googlenet', 224, 32, 0.005)
def add_inference(self, cnn):
def inception_v1(cnn, k, l, m, n, p, q):
cols = [[('conv', k, 1, 1)], [('conv', l, 1, 1), ('conv', m, 3, 3)],
[('conv', n, 1, 1), ('conv', p, 5, 5)],
[('mpool', 3, 3, 1, 1, 'SAME'), ('conv', q, 1, 1)]]
cnn.inception_module('incept_v1', cols)
cnn.conv(64, 7, 7, 2, 2)
cnn.mpool(3, 3, 2, 2, mode='SAME')
cnn.conv(64, 1, 1)
cnn.conv(192, 3, 3)
cnn.mpool(3, 3, 2, 2, mode='SAME')
inception_v1(cnn, 64, 96, 128, 16, 32, 32)
inception_v1(cnn, 128, 128, 192, 32, 96, 64)
cnn.mpool(3, 3, 2, 2, mode='SAME')
inception_v1(cnn, 192, 96, 208, 16, 48, 64)
inception_v1(cnn, 160, 112, 224, 24, 64, 64)
inception_v1(cnn, 128, 128, 256, 24, 64, 64)
inception_v1(cnn, 112, 144, 288, 32, 64, 64)
inception_v1(cnn, 256, 160, 320, 32, 128, 128)
cnn.mpool(3, 3, 2, 2, mode='SAME')
inception_v1(cnn, 256, 160, 320, 32, 128, 128)
inception_v1(cnn, 384, 192, 384, 48, 128, 128)
cnn.apool(7, 7, 1, 1, mode='VALID')
cnn.reshape([-1, 1024])
| 37.465517
| 80
| 0.61942
| 339
| 2,173
| 3.908555
| 0.433628
| 0.083019
| 0.10566
| 0.030189
| 0.181132
| 0.158491
| 0.128302
| 0.128302
| 0.113208
| 0.076981
| 0
| 0.13456
| 0.199724
| 2,173
| 57
| 81
| 38.122807
| 0.627372
| 0.42752
| 0
| 0.214286
| 0
| 0
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.035714
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be82ffa5bc528b97777e4e4160bb45aca2d0d6ec
| 12,669
|
py
|
Python
|
process_ops.py
|
gcosne/generative_inpainting
|
1ae50277e5815a4f0c1e339ede0dbfae8e5036d1
|
[
"MIT"
] | 11
|
2018-11-16T04:29:06.000Z
|
2019-07-25T08:11:47.000Z
|
process_ops.py
|
Yukariin/PEPSI
|
91aea1ae6f528d92ee19007ed132d3482b3a98cc
|
[
"MIT"
] | null | null | null |
process_ops.py
|
Yukariin/PEPSI
|
91aea1ae6f528d92ee19007ed132d3482b3a98cc
|
[
"MIT"
] | 1
|
2019-07-16T18:52:49.000Z
|
2019-07-16T18:52:49.000Z
|
import cv2
import numpy as np
try:
import scipy
# scipy.ndimage cannot be accessed until explicitly imported
from scipy import ndimage
except ImportError:
scipy = None
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def random_rotation(x, rg, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order int: order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Rotated Numpy image tensor.
"""
theta = np.random.uniform(-rg, rg)
x = apply_affine_transform(x, theta=theta, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval,
order=interpolation_order)
return x
def random_shift(x, wrg, hrg, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order int: order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
x = apply_affine_transform(x, tx=tx, ty=ty, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval,
order=interpolation_order)
return x
def random_shear(x, intensity, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order int: order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Sheared Numpy image tensor.
"""
shear = np.random.uniform(-intensity, intensity)
x = apply_affine_transform(x, shear=shear, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval,
order=interpolation_order)
return x
def random_zoom(x, zoom_range, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order int: order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: %s' % (zoom_range,))
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
x = apply_affine_transform(x, zx=zx, zy=zy, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval,
order=interpolation_order)
return x
def random_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
# For curving soybean pods. L.C.Uzal
def random_curves_transform(x, strength=0.1, range=(0.,255.)):
low, high = range
delta = (high - low) * strength / 2.
xp = np.random.uniform(low=low + delta, high=high - delta)
yp = np.random.uniform(low=xp-delta, high=xp+delta)
xp = np.asarray([low, xp, high])
yp = np.asarray([low, yp, high])
return np.interp(x,xp,yp)
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_affine_transform(x, theta=0, tx=0, ty=0, shear=0, zx=1, zy=1,
row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., order=1):
"""Applies an affine transformation specified by the parameters given.
# Arguments
x: 2D numpy array, single image.
theta: Rotation angle in degrees.
tx: Width shift.
ty: Heigh shift.
shear: Shear angle in degrees.
zx: Zoom in x direction.
zy: Zoom in y direction
row_axis: Index of axis for rows in the input image.
col_axis: Index of axis for columns in the input image.
channel_axis: Index of axis for channels in the input image.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
order int: order of interpolation
# Returns
The transformed version of the input.
"""
if scipy is None:
raise ImportError('Image transformations require SciPy. '
'Install SciPy.')
transform_matrix = None
if theta != 0:
theta = np.deg2rad(theta)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = shift_matrix
else:
transform_matrix = np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear = np.deg2rad(shear)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = shear_matrix
else:
transform_matrix = np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = zoom_matrix
else:
transform_matrix = np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndimage.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=order,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_transform(x, rotation_range=0,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
horizontal_flip=False,
vertical_flip=False,
random_curves_strength=0.):
# Generate params
if rotation_range:
theta = np.random.uniform(-rotation_range, rotation_range)
else:
theta = 0
h, w = x.shape[0], x.shape[1]
if height_shift_range:
tx = np.random.uniform(-height_shift_range, height_shift_range) * h
else:
tx = 0
if width_shift_range:
ty = np.random.uniform(-width_shift_range, width_shift_range) * w
else:
ty = 0
if shear_range:
shear = np.random.uniform(-shear_range, shear_range)
else:
shear = 0
if np.isscalar(zoom_range):
zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
# Apply transforms
x = apply_affine_transform(x,
theta,
tx, ty,
shear,
zx, zy)
if channel_shift_range != 0:
x = random_channel_shift(x, channel_shift_range, 2)
if horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, 1)
if vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, 0)
if random_curves_strength > 0.:
x = random_curves_transform(x, random_curves_strength)
return x
if __name__ == "__main__":
import argparse
from PIL import Image
parser = argparse.ArgumentParser()
parser.add_argument('--image', default='', type=str)
parser.add_argument('--imageOut', default='result.png', type=str)
args = parser.parse_args()
im = np.array(Image.open(args.image))
img = random_transform(im, rotation_range=10, shear_range=.5, zoom_range=.2, channel_shift_range=10., horizontal_flip=True)
Image.fromarray(np.uint8(img)).save(args.imageOut)
| 38.861963
| 127
| 0.591207
| 1,697
| 12,669
| 4.26046
| 0.120801
| 0.03361
| 0.022822
| 0.03112
| 0.575242
| 0.539972
| 0.529046
| 0.513416
| 0.477593
| 0.477593
| 0
| 0.019158
| 0.311943
| 12,669
| 325
| 128
| 38.981538
| 0.810256
| 0.324809
| 0
| 0.278947
| 0
| 0
| 0.03255
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.042105
| 0
| 0.147368
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be84323ccf5c7d5239ba7b3bf5eba0ad7152ce2f
| 2,927
|
py
|
Python
|
fasm2bels/database/connection_db_utils.py
|
mithro/symbiflow-xc-fasm2bels
|
9ed029558bedca4e726969427dc4e62ecd6d5733
|
[
"ISC"
] | null | null | null |
fasm2bels/database/connection_db_utils.py
|
mithro/symbiflow-xc-fasm2bels
|
9ed029558bedca4e726969427dc4e62ecd6d5733
|
[
"ISC"
] | null | null | null |
fasm2bels/database/connection_db_utils.py
|
mithro/symbiflow-xc-fasm2bels
|
9ed029558bedca4e726969427dc4e62ecd6d5733
|
[
"ISC"
] | null | null | null |
import functools
def create_maybe_get_wire(conn):
c = conn.cursor()
@functools.lru_cache(maxsize=None)
def get_tile_type_pkey(tile):
c.execute('SELECT pkey, tile_type_pkey FROM phy_tile WHERE name = ?',
(tile, ))
return c.fetchone()
@functools.lru_cache(maxsize=None)
def maybe_get_wire(tile, wire):
phy_tile_pkey, tile_type_pkey = get_tile_type_pkey(tile)
c.execute(
'SELECT pkey FROM wire_in_tile WHERE phy_tile_type_pkey = ? and name = ?',
(tile_type_pkey, wire))
result = c.fetchone()
if result is None:
return None
wire_in_tile_pkey = result[0]
c.execute(
'SELECT pkey FROM wire WHERE phy_tile_pkey = ? AND wire_in_tile_pkey = ?',
(phy_tile_pkey, wire_in_tile_pkey))
return c.fetchone()[0]
return maybe_get_wire
def maybe_add_pip(top, maybe_get_wire, feature):
if feature.value != 1:
return
parts = feature.feature.split('.')
assert len(parts) == 3
sink_wire = maybe_get_wire(parts[0], parts[2])
if sink_wire is None:
return
src_wire = maybe_get_wire(parts[0], parts[1])
if src_wire is None:
return
top.active_pips.add((sink_wire, src_wire))
def get_node_pkey(conn, wire_pkey):
c = conn.cursor()
c.execute("SELECT node_pkey FROM wire WHERE pkey = ?", (wire_pkey, ))
return c.fetchone()[0]
def get_wires_in_node(conn, node_pkey):
c = conn.cursor()
c.execute("SELECT pkey FROM wire WHERE node_pkey = ?", (node_pkey, ))
for row in c.fetchall():
yield row[0]
def get_wire(conn, phy_tile_pkey, wire_in_tile_pkey):
c = conn.cursor()
c.execute(
"SELECT pkey FROM wire WHERE wire_in_tile_pkey = ? AND phy_tile_pkey = ?;",
(
wire_in_tile_pkey,
phy_tile_pkey,
))
return c.fetchone()[0]
def get_tile_type(conn, tile_name):
c = conn.cursor()
c.execute(
"""
SELECT name FROM tile_type WHERE pkey = (
SELECT tile_type_pkey FROM phy_tile WHERE name = ?);""", (tile_name, ))
return c.fetchone()[0]
def get_wire_pkey(conn, tile_name, wire):
c = conn.cursor()
c.execute(
"""
WITH selected_tile(phy_tile_pkey, tile_type_pkey) AS (
SELECT
pkey,
tile_type_pkey
FROM
phy_tile
WHERE
name = ?
)
SELECT
wire.pkey
FROM
wire
WHERE
wire.phy_tile_pkey = (
SELECT
selected_tile.phy_tile_pkey
FROM
selected_tile
)
AND wire.wire_in_tile_pkey = (
SELECT
wire_in_tile.pkey
FROM
wire_in_tile
WHERE
wire_in_tile.name = ?
AND wire_in_tile.phy_tile_type_pkey = (
SELECT
tile_type_pkey
FROM
selected_tile
)
);
""", (tile_name, wire))
results = c.fetchone()
assert results is not None, (tile_name, wire)
return results[0]
| 21.364964
| 86
| 0.618039
| 417
| 2,927
| 4.031175
| 0.139089
| 0.080904
| 0.071386
| 0.066627
| 0.51517
| 0.468174
| 0.320048
| 0.166568
| 0.166568
| 0.1047
| 0
| 0.006173
| 0.280492
| 2,927
| 136
| 87
| 21.522059
| 0.792023
| 0
| 0
| 0.307692
| 0
| 0
| 0.155575
| 0
| 0
| 0
| 0
| 0
| 0.030769
| 1
| 0.138462
| false
| 0
| 0.015385
| 0
| 0.323077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be84a1cf98701b670f1ef999229373bd7e2f389c
| 2,443
|
py
|
Python
|
ppr-api/src/services/payment_service.py
|
bcgov/ppr-deprecated
|
c8925b6f6b0d7fb3f4e267dfe25650a1045ef2e3
|
[
"Apache-2.0"
] | 1
|
2019-11-15T19:07:25.000Z
|
2019-11-15T19:07:25.000Z
|
ppr-api/src/services/payment_service.py
|
bryan-gilbert/ppr
|
c8925b6f6b0d7fb3f4e267dfe25650a1045ef2e3
|
[
"Apache-2.0"
] | 6
|
2021-03-03T05:18:35.000Z
|
2022-02-10T21:55:45.000Z
|
ppr-api/src/services/payment_service.py
|
bcgov/ppr-deprecated
|
c8925b6f6b0d7fb3f4e267dfe25650a1045ef2e3
|
[
"Apache-2.0"
] | null | null | null |
"""A module that provides functionality for accessing the Payments API."""
import enum
import http
import logging
import requests
from fastapi import Depends, Header, HTTPException
from fastapi.security.http import HTTPAuthorizationCredentials
import auth.authentication
import config
import schemas.payment
logger = logging.getLogger(__name__)
CORP_TYPE = 'PPR'
class FilingCode(enum.Enum):
"""An enumeration of the filing codes available to PPR."""
SEARCH = 'SERCH'
YEARLY_REGISTRATION = 'FSREG'
INFINITE_REGISTRATION = 'INFRG'
class PaymentService:
"""A service used for interacting with the Payments API."""
auth_header: HTTPAuthorizationCredentials
account_id: str
def __init__(self, auth_header: HTTPAuthorizationCredentials = Depends(auth.authentication.bearer_scheme),
account_id: str = Header(None)):
"""Initialize the repository with the Authorization and Account-Id headers provided in the request."""
self.auth_header = auth_header
self.account_id = account_id
def create_payment(self, filing_code: FilingCode):
"""Submit a payment request and provide the details to the caller."""
request = {
'businessInfo': {'corpType': CORP_TYPE},
'filingInfo': {'filingTypes': [{'filingTypeCode': filing_code.value}]}
}
pay_response = requests.post(
'{}/payment-requests'.format(config.PAY_API_URL), json=request,
headers={
'Authorization': '{} {}'.format(self.auth_header.scheme, self.auth_header.credentials),
'Account-Id': self.account_id
}
)
try:
auth.authentication.check_auth_response(pay_response)
except HTTPException as auth_ex:
logger.error('Create Payment call failed auth with status {}. Response body: {}'.format(
pay_response.status_code, pay_response.text))
raise auth_ex
if not pay_response: # status_code is unsuccessful
logger.error('Create Payment call failed unexpectedly with status {}. Response body: {}'.format(
pay_response.status_code, pay_response.text))
raise HTTPException(status_code=http.HTTPStatus.INTERNAL_SERVER_ERROR)
body = pay_response.json()
return schemas.payment.Payment(id=body['id'], status=body['statusCode'], method=body['paymentMethod'])
| 35.405797
| 110
| 0.677855
| 269
| 2,443
| 5.981413
| 0.423792
| 0.054692
| 0.034804
| 0.039155
| 0.12803
| 0.12803
| 0.085768
| 0.085768
| 0.085768
| 0.085768
| 0
| 0
| 0.22718
| 2,443
| 68
| 111
| 35.926471
| 0.852225
| 0.149406
| 0
| 0.043478
| 0
| 0
| 0.138889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.195652
| 0
| 0.413043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be87bd0c5c2ff868bb6a502f0a693e022ddbbafe
| 1,049
|
py
|
Python
|
logger_decorator.py
|
jbhayback/reconciliation-manager
|
5de10a0ec89e397a4937d1764976c94cde06beee
|
[
"MIT"
] | null | null | null |
logger_decorator.py
|
jbhayback/reconciliation-manager
|
5de10a0ec89e397a4937d1764976c94cde06beee
|
[
"MIT"
] | null | null | null |
logger_decorator.py
|
jbhayback/reconciliation-manager
|
5de10a0ec89e397a4937d1764976c94cde06beee
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import inspect
def log_time(msg=None):
def decorator(f):
nonlocal msg
if msg is None:
msg = '{} time spent: '.format(f.__name__)
def inner(*args, **kwargs):
# check if the object has a logger
global logger
if args and hasattr(args[0], 'logger'):
logger = args[0].logger
start = datetime.now()
result = f(*args, **kwargs)
logger.info(
msg + ' {} seconds'.format((datetime.now() - start).total_seconds())
)
return result
return inner
return decorator
def log_params(f):
arg_spec = inspect.getargspec(f).args
has_self = arg_spec and arg_spec[0] == 'self'
def decorator(*args, **kwargs):
logger.info(
'calling {} with args: {}, and kwargs: {}'.format(
f.__name__, args if not has_self else args[1:], kwargs
)
)
return f(*args, **kwargs)
return decorator
| 25.585366
| 84
| 0.530029
| 121
| 1,049
| 4.46281
| 0.38843
| 0.074074
| 0.040741
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005917
| 0.355577
| 1,049
| 40
| 85
| 26.225
| 0.792899
| 0.030505
| 0
| 0.133333
| 0
| 0
| 0.074877
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.066667
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be8a2d82d13baa6e60ff4dbca25351bcb2190394
| 1,418
|
py
|
Python
|
critical/tasks.py
|
lenarother/django-critical-css
|
15c12ea02f7ea049e59efba4d963c35f41f26d78
|
[
"MIT"
] | 2
|
2020-06-06T06:50:38.000Z
|
2022-02-03T08:54:28.000Z
|
critical/tasks.py
|
lenarother/django-critical-css
|
15c12ea02f7ea049e59efba4d963c35f41f26d78
|
[
"MIT"
] | 5
|
2018-12-17T11:12:20.000Z
|
2020-11-27T10:28:51.000Z
|
critical/tasks.py
|
lenarother/django-critical-css
|
15c12ea02f7ea049e59efba4d963c35f41f26d78
|
[
"MIT"
] | 1
|
2021-08-19T06:02:44.000Z
|
2021-08-19T06:02:44.000Z
|
import logging
from django.utils.safestring import mark_safe
from django_rq import job
from inline_static.css import transform_css_urls
logger = logging.getLogger(__name__)
@job
def calculate_critical_css(critical_id, original_path):
from .exceptions import CriticalException
from .models import Critical
from .services import calculate_critical_css as service_calculate
logger.info('Task: critical css with id {0} requested.'.format(critical_id))
critical = Critical.objects.filter(id=critical_id).first()
if not critical:
raise CriticalException('There is no Critical object with id {0}'.format(critical_id))
logger.info('Task: {0}, {1}'.format(critical.url, critical.path))
critical.is_pending = True
critical.save(update_fields=['is_pending'])
logger.info('Task: critical css with id {0} pending.'.format(critical_id))
try:
critical_css_raw = service_calculate(critical.url, critical.path)
critical_css = transform_css_urls(original_path, critical.path, critical_css_raw)
except Exception as exc:
critical.is_pending = False
critical.save(update_fields=['is_pending'])
raise CriticalException('Could not calculate critical css') from exc
critical.css = mark_safe(critical_css)
critical.is_pending = False
critical.save()
logger.info('Task: critical css with id {0} saved.'.format(critical_id))
| 37.315789
| 94
| 0.74189
| 189
| 1,418
| 5.359788
| 0.322751
| 0.119447
| 0.055281
| 0.065153
| 0.276407
| 0.215202
| 0.094768
| 0.094768
| 0
| 0
| 0
| 0.005059
| 0.163611
| 1,418
| 37
| 95
| 38.324324
| 0.849073
| 0
| 0
| 0.137931
| 0
| 0
| 0.156559
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.241379
| 0
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be8c87105d1db21be6f93eb2ae080ad460d99a47
| 1,837
|
py
|
Python
|
test.py
|
wei2912/bce-simulation
|
65c19051417c871bce4585481eb06c5ba986a96f
|
[
"MIT"
] | null | null | null |
test.py
|
wei2912/bce-simulation
|
65c19051417c871bce4585481eb06c5ba986a96f
|
[
"MIT"
] | 1
|
2016-11-06T11:50:45.000Z
|
2016-11-06T11:53:49.000Z
|
test.py
|
wei2912/bce-simulation
|
65c19051417c871bce4585481eb06c5ba986a96f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
"""
This script tests the simulations of the experiments.
"""
import math
from utils import coin_var, needle_var
def main():
needle_var_vals = [
(1.1, 1.0),
(1.4, 1.0),
(2.0, 1.0),
(2.9, 1.0),
(3.3, 1.0),
(5.0, 1.0)
]
print("needle_var:")
for L, D in needle_var_vals:
trials = 1000000
pred_prob = needle_var.predict_prob(length=L, gap_width=D)
pred_hits = pred_prob * trials
hits = needle_var.run_trials(length=L, gap_width=D, trials=trials)
if pred_hits == 0 or pred_hits == trials:
stat = float('nan')
else:
stat = sum([
(hits - pred_hits) ** 2 / pred_hits,
((trials - hits) - (trials - pred_hits)) ** 2 / (trials-pred_hits)
])
print("L = {}, D = {}, expected = {}, observed = {}, stat = {}".format(L, D, pred_hits, hits, stat))
print("coin_var:")
coin_var_vals = [
(1.0, 1.0),
(1.0, 1.2),
(1.0, math.sqrt(2)),
(1.0, 1.5),
(1.0, 1.8),
(1.0, 1.9),
(1.0, 2.0),
(1.0, 3.0),
(1.0, 5.0)
]
for R, D in coin_var_vals:
trials = 100000
pred_prob = coin_var.predict_prob(diameter=2*R, gap_width=D)
pred_hits = pred_prob * trials
hits = coin_var.run_trials(diameter=2*R, gap_width=D, trials=trials)
if pred_hits == 0 or pred_hits == trials:
stat = float('nan')
else:
stat = sum([
(hits - pred_hits) ** 2 / pred_hits,
((trials - hits) - (trials - pred_hits)) ** 2 / (trials-pred_hits)
])
print("R = {}, D = {}, expected = {}, observed = {}, stat = {}".format(R, D, pred_hits, hits, stat))
main()
| 25.873239
| 108
| 0.491018
| 260
| 1,837
| 3.3
| 0.230769
| 0.037296
| 0.024476
| 0.009324
| 0.562937
| 0.435897
| 0.398601
| 0.398601
| 0.398601
| 0.317016
| 0
| 0.066998
| 0.341862
| 1,837
| 70
| 109
| 26.242857
| 0.64268
| 0.04736
| 0
| 0.313725
| 0
| 0
| 0.078116
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.039216
| 0
| 0.058824
| 0.078431
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be8d24f272fa353fa6c9d0869d13de96b4754241
| 1,960
|
py
|
Python
|
python/530.minimum-absolute-difference-in-bst.py
|
vermouth1992/Leetcode
|
0d7dda52b12f9e01d88fc279243742cd8b4bcfd1
|
[
"MIT"
] | null | null | null |
python/530.minimum-absolute-difference-in-bst.py
|
vermouth1992/Leetcode
|
0d7dda52b12f9e01d88fc279243742cd8b4bcfd1
|
[
"MIT"
] | null | null | null |
python/530.minimum-absolute-difference-in-bst.py
|
vermouth1992/Leetcode
|
0d7dda52b12f9e01d88fc279243742cd8b4bcfd1
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=530 lang=python3
#
# [530] Minimum Absolute Difference in BST
#
# https://leetcode.com/problems/minimum-absolute-difference-in-bst/description/
#
# algorithms
# Easy (55.23%)
# Total Accepted: 115.5K
# Total Submissions: 209K
# Testcase Example: '[4,2,6,1,3]'
#
# Given the root of a Binary Search Tree (BST), return the minimum absolute
# difference between the values of any two different nodes in the tree.
#
#
# Example 1:
#
#
# Input: root = [4,2,6,1,3]
# Output: 1
#
#
# Example 2:
#
#
# Input: root = [1,0,48,null,null,12,49]
# Output: 1
#
#
#
# Constraints:
#
#
# The number of nodes in the tree is in the range [2, 10^4].
# 0 <= Node.val <= 10^5
#
#
#
# Note: This question is the same as 783:
# https://leetcode.com/problems/minimum-distance-between-bst-nodes/
#
#
# Definition for a binary tree node.
from typing import List
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def getNodeValues(self, root: TreeNode) -> List[int]:
value = []
self.getNodeValuesHelper(root, value)
return value
def getNodeValuesHelper(self, root: TreeNode, value: List[int]):
if root is None:
return
value.append(root.val)
self.getNodeValuesHelper(root.left, value)
self.getNodeValuesHelper(root.right, value)
def getMinimumDifference(self, root: TreeNode) -> int:
# get all the values and put into a list O(n)
value = self.getNodeValues(root)
# sort the list O(nlogn)
value = sorted(value)
# find the minimum difference between ajacent values O(n)
min_abs_diff = abs(value[0] - value[1])
for i in range(1, len(value) - 1):
diff = abs(value[i] - value[i + 1])
if diff < min_abs_diff:
min_abs_diff = diff
return min_abs_diff
| 24.5
| 79
| 0.625
| 274
| 1,960
| 4.427007
| 0.39781
| 0.019786
| 0.032976
| 0.044518
| 0.10305
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038961
| 0.253571
| 1,960
| 79
| 80
| 24.810127
| 0.790157
| 0.453571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.038462
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be8d50256f2d9fce8a7ed11893b6cad92bc5a14b
| 2,769
|
py
|
Python
|
tensorflow/python/eager/remote_cloud_tpu_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 26
|
2019-11-10T15:33:34.000Z
|
2022-03-24T19:56:57.000Z
|
tensorflow/python/eager/remote_cloud_tpu_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 6
|
2022-01-15T07:17:47.000Z
|
2022-02-14T15:28:22.000Z
|
tensorflow/python/eager/remote_cloud_tpu_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 6
|
2020-03-29T11:10:53.000Z
|
2021-06-14T05:39:14.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test that we can connect to a real Cloud TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.testing import absltest
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import context
from tensorflow.python.eager import remote
from tensorflow.python.tpu import tpu_strategy_util
FLAGS = flags.FLAGS
flags.DEFINE_string('tpu', '', 'Name of TPU to connect to.')
flags.DEFINE_string('project', None, 'Name of GCP project with TPU.')
flags.DEFINE_string('zone', None, 'Name of GCP zone with TPU.')
EXPECTED_DEVICES_PRE_CONNECT = [
'/job:localhost/replica:0/task:0/device:CPU:0',
'/job:localhost/replica:0/task:0/device:XLA_CPU:0'
]
EXPECTED_DEVICES_AFTER_CONNECT = [
'/job:localhost/replica:0/task:0/device:CPU:0',
'/job:localhost/replica:0/task:0/device:XLA_CPU:0',
'/job:worker/replica:0/task:0/device:CPU:0',
'/job:worker/replica:0/task:0/device:XLA_CPU:0',
'/job:worker/replica:0/task:0/device:TPU_SYSTEM:0',
'/job:worker/replica:0/task:0/device:TPU:0',
'/job:worker/replica:0/task:0/device:TPU:1',
'/job:worker/replica:0/task:0/device:TPU:2',
'/job:worker/replica:0/task:0/device:TPU:3',
'/job:worker/replica:0/task:0/device:TPU:4',
'/job:worker/replica:0/task:0/device:TPU:5',
'/job:worker/replica:0/task:0/device:TPU:6',
'/job:worker/replica:0/task:0/device:TPU:7',
]
class RemoteCloudTPUTest(absltest.TestCase):
"""Test that we can connect to a real Cloud TPU."""
def test_connect(self):
self.assertCountEqual(
EXPECTED_DEVICES_PRE_CONNECT,
context.list_devices())
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project
)
remote.connect_to_cluster(resolver)
self.assertCountEqual(
EXPECTED_DEVICES_AFTER_CONNECT,
context.list_devices())
tpu_strategy_util.initialize_tpu_system(resolver)
if __name__ == '__main__':
absltest.main()
| 36.434211
| 80
| 0.717949
| 406
| 2,769
| 4.756158
| 0.32266
| 0.062144
| 0.093216
| 0.100984
| 0.331434
| 0.299327
| 0.299327
| 0.299327
| 0.203004
| 0.161574
| 0
| 0.02212
| 0.134706
| 2,769
| 75
| 81
| 36.92
| 0.783806
| 0.270495
| 0
| 0.12766
| 0
| 0
| 0.375251
| 0.323647
| 0
| 0
| 0
| 0
| 0.042553
| 1
| 0.021277
| false
| 0
| 0.191489
| 0
| 0.234043
| 0.021277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be8db6395c3bc7d6f2f0df95f16ef512dceb29b7
| 1,418
|
py
|
Python
|
test/functional/bsv-blocksize-params.py
|
gbtn/bitcoin-sv-gbtn
|
8b09d1aa072da819fb3309b0be85dae0f1ac9549
|
[
"MIT"
] | 3
|
2018-12-03T03:55:08.000Z
|
2019-08-13T07:50:45.000Z
|
test/functional/bsv-blocksize-params.py
|
Chihuataneo/bitcoin-sv
|
d9b12a23dbf0d2afc5f488fa077d762b302ba873
|
[
"MIT"
] | 1
|
2020-02-09T11:35:45.000Z
|
2020-02-09T11:35:45.000Z
|
test/functional/bsv-blocksize-params.py
|
Chihuataneo/bitcoin-sv
|
d9b12a23dbf0d2afc5f488fa077d762b302ba873
|
[
"MIT"
] | 1
|
2018-11-25T03:18:52.000Z
|
2018-11-25T03:18:52.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test that the blockmaxsize and excessiveblocksize parameters are also
settable via the bitcoin.conf file.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from test_framework.cdefs import (ONE_MEGABYTE)
import os
class BSVBlockSizeParams(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.maxminedblocksize = 4 * ONE_MEGABYTE
self.maxblocksize = 16 * ONE_MEGABYTE
def setup_chain(self):
super().setup_chain()
with open(os.path.join(self.options.tmpdir + "/node0", "bitcoin.conf"), 'a', encoding='utf8') as f:
f.write("blockmaxsize=" + str(self.maxminedblocksize) + "\n")
f.write("excessiveblocksize=" + str(self.maxblocksize) + "\n")
def add_options(self, parser):
super().add_options(parser)
def run_test(self):
gires = self.nodes[0].getinfo()
assert_equal(gires["maxblocksize"], self.maxblocksize)
assert_equal(gires["maxminedblocksize"], self.maxminedblocksize)
if __name__ == '__main__':
BSVBlockSizeParams().main()
| 34.585366
| 107
| 0.715797
| 174
| 1,418
| 5.666667
| 0.54023
| 0.052738
| 0.051724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01705
| 0.172779
| 1,418
| 40
| 108
| 35.45
| 0.823529
| 0.249647
| 0
| 0
| 0
| 0
| 0.091255
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 1
| 0.181818
| false
| 0
| 0.181818
| 0
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be8eb4d6e0f2ba30a5412f64a491cd5cc3dcacad
| 1,750
|
py
|
Python
|
yotta/test/cli/outdated.py
|
headlessme/yotta
|
947ab074b629c8f18ca91ab84ebaa29096b011c6
|
[
"Apache-2.0"
] | null | null | null |
yotta/test/cli/outdated.py
|
headlessme/yotta
|
947ab074b629c8f18ca91ab84ebaa29096b011c6
|
[
"Apache-2.0"
] | null | null | null |
yotta/test/cli/outdated.py
|
headlessme/yotta
|
947ab074b629c8f18ca91ab84ebaa29096b011c6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import unittest
# internal modules:
from . import util
from . import cli
Test_Outdated = {
'module.json':'''{
"name": "test-outdated",
"version": "0.0.0",
"description": "Test yotta outdated",
"author": "James Crosby <james.crosby@arm.com>",
"license": "Apache-2.0",
"dependencies":{
"test-testing-dummy": "*"
}
}''',
'source/foo.c':'''#include "stdio.h"
int foo(){
printf("foo!\\n");
return 7;
}''',
# test-testing-dummy v0.0.1 (a newer version is available from the registry,
# and will be installed by yt up)
'yotta_modules/test-testing-dummy/module.json':'''{
"name": "test-testing-dummy",
"version": "0.0.1",
"description": "Test yotta's compilation of tests.",
"author": "James Crosby <james.crosby@arm.com>",
"license": "Apache-2.0"
}
'''
}
class TestCLIOutdated(unittest.TestCase):
def test_outdated(self):
path = util.writeTestFiles(Test_Outdated, True)
stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'outdated'], cwd=path)
self.assertNotEqual(statuscode, 0)
self.assertIn('test-testing-dummy', stdout + stderr)
util.rmRf(path)
def test_notOutdated(self):
path = util.writeTestFiles(Test_Outdated, True)
stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'up'], cwd=path)
self.assertEqual(statuscode, 0)
stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'outdated'], cwd=path)
self.assertEqual(statuscode, 0)
self.assertNotIn('test-testing-dummy', stdout + stderr)
util.rmRf(path)
| 27.777778
| 94
| 0.646286
| 223
| 1,750
| 5.044843
| 0.426009
| 0.058667
| 0.085333
| 0.066667
| 0.430222
| 0.430222
| 0.381333
| 0.381333
| 0.310222
| 0.310222
| 0
| 0.020209
| 0.18
| 1,750
| 62
| 95
| 28.225806
| 0.763763
| 0.159429
| 0
| 0.27907
| 0
| 0
| 0.4487
| 0.091655
| 0
| 0
| 0
| 0
| 0.116279
| 1
| 0.046512
| false
| 0
| 0.069767
| 0
| 0.162791
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be8fca7576bb080c666d1d705dca421abd5cb1da
| 2,453
|
py
|
Python
|
A_Stocker/Stocker.py
|
Allen1218/Python_Project_Interesting
|
55d5e58e70e21d45c4bb9dc4d4c219f3a8385834
|
[
"Apache-2.0"
] | 1
|
2021-02-03T12:08:06.000Z
|
2021-02-03T12:08:06.000Z
|
A_Stocker/Stocker.py
|
Allen1218/Python_Project_Interesting
|
55d5e58e70e21d45c4bb9dc4d4c219f3a8385834
|
[
"Apache-2.0"
] | null | null | null |
A_Stocker/Stocker.py
|
Allen1218/Python_Project_Interesting
|
55d5e58e70e21d45c4bb9dc4d4c219f3a8385834
|
[
"Apache-2.0"
] | null | null | null |
import threading
import tushare as ts
import pandas as pd
import datetime
STOCK = {#'002594':[1,170.15], ## 比亚迪 / 几手,成本价
'601012':[11,99.9], ## 隆基股份
'002340':[12,8.72], ## 格林美
'603259':[1,141.7], ## 药明康德
'002346':[10,10.68], ## 柘中股份
#'600438':[9,42.96], ## 通威股份
#'002475':[3,59.51], ## 立讯精密
#'603308':[1,33.49], ## 应流股份
#'002415': [3, 66.40], ## 海康威视
# '600559':[3,35.3], ## 老白干
# '601100':[1, 114.5], ## 恒立液压
# '603466':[6, 22.40] ## 风语筑
}
TimerNum = 20.0 # s
Total = 0
# #rodo
def get_all_price():
'''process all stock'''
stockCode = list(STOCK.keys())
df = ts.get_realtime_quotes(stockCode)
lp = list(STOCK.values())
stockNum = []
stockCostPrice = []
for i in range(len(lp)):
stockNum.append(lp[i][0])
stockCostPrice.append(lp[i][1])
df['num'] = stockNum
df['stockCostPrice'] = stockCostPrice
# 处理
# profit and lost ratio 盈亏率
plRatio = round((df['price'].astype(float) / df['stockCostPrice'] - 1)*100,2)
# profit and lost 盈亏
df['plRatio'] = plRatio
df['stockNum'] = stockNum
pl = round(df['plRatio'].astype(float) * df['stockNum'] * df['stockCostPrice'].astype(float),2)
df['pl'] = pl
# 当日涨幅 Rise and fall
currentRF = round((df['price'].astype(float) / df['pre_close'].astype(float) - 1)*100,2)
df['currentRF'] = currentRF
df1 = df[[ 'open', 'price', 'stockCostPrice', 'plRatio', 'num','pl', 'currentRF','name']]
pd.set_option('display.unicode.ambiguous_as_wide', True)
pd.set_option('display.unicode.east_asian_width', True)
pd.set_option('display.width', 180) # 设置打印宽度(**重要**)
pd.set_option('display.max_columns', 1000)
pd.set_option('display.width', 1000)
pd.set_option('display.max_colwidth', 1000)
sss = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f)")[:-4]
print('\n')
print("----------------" + sss +"------------------")
print(df1)
sum_int = round(df['pl'].sum(),2)
print("total profit and lost is " + sum_int.astype(str))
print('\n')
# df.to_csv('stock_data.csv', encoding='utf_8_sig', index=None)
global timer
timer = threading.Timer(TimerNum, get_all_price, [])
timer.start()
if __name__ == '__main__':
print(STOCK)
get_all_price()
timer = threading.Timer(TimerNum, get_all_price, [])
timer.start()
| 30.6625
| 99
| 0.565838
| 325
| 2,453
| 4.156923
| 0.486154
| 0.022206
| 0.048853
| 0.079941
| 0.222058
| 0.108068
| 0.071058
| 0.071058
| 0.071058
| 0
| 0
| 0.089474
| 0.225438
| 2,453
| 79
| 100
| 31.050633
| 0.621579
| 0.172034
| 0
| 0.115385
| 0
| 0
| 0.202015
| 0.032746
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019231
| false
| 0
| 0.076923
| 0
| 0.096154
| 0.115385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be9026a8dcf2d835f2e8c702efdeeb3e278299c1
| 1,011
|
py
|
Python
|
tests/extractors/test_etrade.py
|
mkazin/StatementRenamer
|
ef03c71f0e627a15a4bba08e45bfa90ecacd28fc
|
[
"Apache-2.0"
] | null | null | null |
tests/extractors/test_etrade.py
|
mkazin/StatementRenamer
|
ef03c71f0e627a15a4bba08e45bfa90ecacd28fc
|
[
"Apache-2.0"
] | 15
|
2018-05-01T12:48:30.000Z
|
2021-05-14T02:52:48.000Z
|
tests/extractors/test_etrade.py
|
mkazin/StatementRenamer
|
ef03c71f0e627a15a4bba08e45bfa90ecacd28fc
|
[
"Apache-2.0"
] | 1
|
2019-07-09T22:59:50.000Z
|
2019-07-09T22:59:50.000Z
|
from datetime import datetime
from statement_renamer.extractors.etrade import ETradeDateExtractor as EXTRACTOR_UNDER_TEST
from statement_renamer.extractors.factory import ExtractorFactory
TESTDATA = (
"""
PAGE 1 OF 6 February 1, 2019 - March 31, 2019AccountNumber:####-####AccountType:ROTH IRA
PAGE 5 OF 6Account Number: ####-####Statement Period : February 1, 2019 - March 31, 2019Account Type
TolearnmoreabouttheRSDAProgram,pleasereviewyourRSDAProgramCustomerAgreement,visitwww.etrade.com,orcallusat1-800-387-2331
"""
)
def test_monthly_statement():
extractor = EXTRACTOR_UNDER_TEST()
data = extractor.extract(TESTDATA)
new_name = extractor.rename(data)
assert data.get_start_date() == datetime(2019, 2, 1)
assert data.get_end_date() == datetime(2019, 3, 31)
assert new_name == '2019-03 E-Trade Statement.pdf'
def test_factory():
extractor = ExtractorFactory.get_matching_extractor(TESTDATA)
assert isinstance(extractor, EXTRACTOR_UNDER_TEST)
| 32.612903
| 124
| 0.75272
| 118
| 1,011
| 6.288136
| 0.516949
| 0.056604
| 0.072776
| 0.080863
| 0.053908
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065192
| 0.150346
| 1,011
| 30
| 125
| 33.7
| 0.798603
| 0
| 0
| 0
| 0
| 0
| 0.043091
| 0
| 0
| 0
| 0
| 0
| 0.266667
| 1
| 0.133333
| false
| 0
| 0.2
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be986d230ef62a7e44ef6996ed58eb548aa4181b
| 4,004
|
py
|
Python
|
SciDataTool/Methods/VectorField/plot_3D_Data.py
|
BenjaminGabet/SciDataTool
|
7994441de4c54921d43750cacd8df761ba4bd421
|
[
"Apache-2.0"
] | null | null | null |
SciDataTool/Methods/VectorField/plot_3D_Data.py
|
BenjaminGabet/SciDataTool
|
7994441de4c54921d43750cacd8df761ba4bd421
|
[
"Apache-2.0"
] | null | null | null |
SciDataTool/Methods/VectorField/plot_3D_Data.py
|
BenjaminGabet/SciDataTool
|
7994441de4c54921d43750cacd8df761ba4bd421
|
[
"Apache-2.0"
] | null | null | null |
def plot_3D_Data(
self,
*arg_list,
is_norm=False,
unit="SI",
component_list=None,
save_path=None,
x_min=None,
x_max=None,
y_min=None,
y_max=None,
z_min=None,
z_max=None,
z_range=None,
is_auto_ticks=True,
is_auto_range=False,
is_2D_view=False,
is_same_size=False,
N_stem=100,
fig=None,
ax=None,
is_show_fig=None,
is_logscale_x=False,
is_logscale_y=False,
is_logscale_z=False,
thresh=0.02,
is_switch_axes=False,
colormap="RdBu_r",
win_title=None,
font_name="arial",
font_size_title=12,
font_size_label=10,
font_size_legend=8,
):
"""Plots a field as a function of time
Parameters
----------
self : Output
an Output object
Data_str : str
name of the Data Object to plot (e.g. "mag.Br")
*arg_list : list of str
arguments to specify which axes to plot
is_norm : bool
boolean indicating if the field must be normalized
unit : str
unit in which to plot the field
save_path : str
full path including folder, name and extension of the file to save if save_path is not None
x_min : float
minimum value for the x-axis
x_max : float
maximum value for the x-axis
y_min : float
minimum value for the y-axis
y_max : float
maximum value for the y-axis
z_min : float
minimum value for the z-axis
z_max : float
maximum value for the z-axis
is_auto_ticks : bool
in fft, adjust ticks to freqs (deactivate if too close)
is_auto_range : bool
in fft, display up to 1% of max
is_2D_view : bool
True to plot Data in xy plane and put z as colormap
is_same_size : bool
True to have all color blocks with same size in 2D view
N_stem : int
number of harmonics to plot (only for stem plots)
fig : Matplotlib.figure.Figure
existing figure to use if None create a new one
ax : Matplotlib.axes.Axes object
ax on which to plot the data
is_show_fig : bool
True to show figure after plot
is_logscale_x : bool
boolean indicating if the x-axis must be set in logarithmic scale
is_logscale_y : bool
boolean indicating if the y-axis must be set in logarithmic scale
is_logscale_z : bool
boolean indicating if the z-axis must be set in logarithmic scale
thresh : float
threshold for automatic fft ticks
is_switch_axes : bool
to switch x and y axes
"""
# Call the plot on each component
if component_list is None: # default: extract all components
component_list = self.components.keys()
for i, comp in enumerate(component_list):
if save_path is not None and len(component_list) > 1:
save_path_comp = (
save_path.split(".")[0] + "_" + comp + "." + save_path.split(".")[1]
)
else:
save_path_comp = save_path
self.components[comp].plot_3D_Data(
arg_list,
is_norm=is_norm,
unit=unit,
save_path=save_path_comp,
x_min=x_min,
x_max=x_max,
y_min=y_min,
y_max=y_max,
z_min=z_min,
z_max=z_max,
colormap=colormap,
is_auto_ticks=is_auto_ticks,
is_auto_range=is_auto_range,
is_2D_view=is_2D_view,
is_same_size=is_same_size,
N_stem=N_stem,
fig=fig,
ax=ax,
is_show_fig=is_show_fig,
is_logscale_x=is_logscale_x,
is_logscale_y=is_logscale_y,
is_logscale_z=is_logscale_z,
thresh=thresh,
is_switch_axes=is_switch_axes,
win_title=win_title,
font_name=font_name,
font_size_title=font_size_title,
font_size_label=font_size_label,
font_size_legend=font_size_legend,
)
| 29.880597
| 99
| 0.610889
| 603
| 4,004
| 3.794362
| 0.237148
| 0.052448
| 0.028846
| 0.04021
| 0.27229
| 0.134178
| 0.049388
| 0.035839
| 0.035839
| 0
| 0
| 0.008151
| 0.325924
| 4,004
| 133
| 100
| 30.105263
| 0.83957
| 0.434815
| 0
| 0
| 0
| 0
| 0.008321
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013514
| false
| 0
| 0
| 0
| 0.013514
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be9aae87c4295f41e5dad9ea47ddb818dd41be55
| 1,246
|
py
|
Python
|
mileage.py
|
vwfinley/mileage
|
eb880107c8c38d33706eac74d01a0d0516716cc7
|
[
"MIT"
] | null | null | null |
mileage.py
|
vwfinley/mileage
|
eb880107c8c38d33706eac74d01a0d0516716cc7
|
[
"MIT"
] | null | null | null |
mileage.py
|
vwfinley/mileage
|
eb880107c8c38d33706eac74d01a0d0516716cc7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Some helpful links
# https://docs.python.org/3/library/tkinter.html
# https://www.python-course.eu/tkinter_entry_widgets.php
import tkinter as tk
class Application(tk.Frame):
def __init__(self, root=None):
super().__init__(root)
self.root = root
self.root.title("Mileage")
self.root.geometry("250x125")
self.pack()
self.miles = tk.Entry(self);
self.gallons = tk.Entry(self);
self.mpg = tk.Label(self)
self.init_widgets()
def init_widgets(self):
self.miles.grid(row=0)
tk.Label(self, text="Miles").grid(row=0, column=1)
self.gallons.grid(row=1)
tk.Label(self, text="Gallons").grid(row=1, column=1)
self.mpg.grid(row=2)
tk.Label(self, text="MPG").grid(row=2, column=1)
tk.Button(self, text="Calculate", command = self.calculate).grid(row=3, column=1)
tk.Button(self, text="Quit", command=self.root.destroy).grid(row=4, column=1)
def calculate(self):
self.mpg['text'] = float(self.miles.get()) / float(self.gallons.get())
app = Application(root=tk.Tk())
app.mainloop()
| 28.318182
| 90
| 0.578652
| 168
| 1,246
| 4.220238
| 0.345238
| 0.078984
| 0.062059
| 0.06347
| 0.06488
| 0.06488
| 0
| 0
| 0
| 0
| 0
| 0.021763
| 0.26244
| 1,246
| 43
| 91
| 28.976744
| 0.749728
| 0.113162
| 0
| 0
| 0
| 0
| 0.043478
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.04
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be9bd5f7d840a39915f5c547fcf6ced95fe85e75
| 1,087
|
py
|
Python
|
rankings/elo.py
|
ulternate/table_tennis_league
|
1762c5b606f149b27d9c06c82e825c948c47b56f
|
[
"MIT"
] | null | null | null |
rankings/elo.py
|
ulternate/table_tennis_league
|
1762c5b606f149b27d9c06c82e825c948c47b56f
|
[
"MIT"
] | 7
|
2017-08-18T04:15:16.000Z
|
2017-08-28T00:54:25.000Z
|
rankings/elo.py
|
mohamed-yahya-zakria/table-tennis-league
|
07cc6fe46100a4d4279c8a6ae5eea26984df4664
|
[
"MIT"
] | 1
|
2017-08-18T11:24:00.000Z
|
2017-08-18T11:24:00.000Z
|
def elo(winner_rank, loser_rank, weighting):
"""
:param winner: The Player that won the match.
:param loser: The Player that lost the match.
:param weighting: The weighting factor to suit your comp.
:return: (winner_new_rank, loser_new_rank) Tuple.
This follows the ELO ranking method.
"""
winner_rank_transformed = 10 ** (winner_rank / 400)
opponent_rank_transformed = 10 ** (loser_rank / 400)
transformed_sum = winner_rank_transformed + opponent_rank_transformed
winner_score = winner_rank_transformed / transformed_sum
loser_score = opponent_rank_transformed / transformed_sum
winner_rank = winner_rank + weighting * (
1 - winner_score)
loser_rank = loser_rank - weighting * loser_score
# Set a floor of 100 for the rankings.
winner_rank = 100 if winner_rank < 100 else winner_rank
loser_rank = 100 if loser_rank < 100 else loser_rank
winner_rank = float('{result:.2f}'.format(result=winner_rank))
loser_rank = float('{result:.2f}'.format(result=loser_rank))
return winner_rank, loser_rank
| 37.482759
| 73
| 0.720331
| 148
| 1,087
| 5
| 0.304054
| 0.175676
| 0.087838
| 0.102703
| 0.078378
| 0.078378
| 0
| 0
| 0
| 0
| 0
| 0.032184
| 0.199632
| 1,087
| 28
| 74
| 38.821429
| 0.818391
| 0.25299
| 0
| 0
| 0
| 0
| 0.030769
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be9c9dcbecf6ee782a06508d51f148623da5f942
| 3,766
|
py
|
Python
|
src/samplics/regression/glm.py
|
samplics-org/samplics
|
b5f49d075194cc24208f567e6a00e86aa24bec26
|
[
"MIT"
] | 14
|
2021-05-03T19:59:58.000Z
|
2022-03-27T18:58:36.000Z
|
src/samplics/regression/glm.py
|
samplics-org/samplics
|
b5f49d075194cc24208f567e6a00e86aa24bec26
|
[
"MIT"
] | 8
|
2021-06-17T01:13:01.000Z
|
2022-03-27T18:31:15.000Z
|
src/samplics/regression/glm.py
|
samplics-org/samplics
|
b5f49d075194cc24208f567e6a00e86aa24bec26
|
[
"MIT"
] | 1
|
2022-03-28T06:58:55.000Z
|
2022-03-28T06:58:55.000Z
|
from __future__ import annotations
from typing import Any, Callable, Optional, Union
import numpy as np
# import pandas as pd
import statsmodels.api as sm
from samplics.estimation.expansion import TaylorEstimator
from samplics.utils.formats import dict_to_dataframe, fpc_as_dict, numpy_array, remove_nans
from samplics.utils.types import Array, Number, Series, StringNumber
class SurveyGLM:
"""General linear models under complex survey sampling"""
def __init__(self):
self.beta: np.ndarray
@staticmethod
def _residuals(e: np.ndarray, psu: np.ndarray, nb_vars: Number) -> tuple(np.ndarray, Number):
psus = np.unique(psu)
if psus.shape[0] == 1 and e.shape[0] == 1:
raise AssertionError("Only one observation in the stratum")
if psus.shape[0] == 1:
psu = np.arange(e.shape[0])
psus = np.unique(psu)
e_values = np.zeros((psus.shape[0], nb_vars))
for i, p in enumerate(np.unique(psus)):
e_values[i, :] += np.sum(e[psu == p, :], axis=0)
e_means = np.sum(e_values, axis=0) / psus.shape[0]
return np.transpose(e_values - e_means) @ (e_values - e_means), psus.shape[0]
def _calculate_g(
self,
samp_weight: np.ndarray,
resid: np.ndarray,
x: np.ndarray,
stratum: Optional[np.ndarray],
psu: Optional[np.ndarray],
fpc: Union[dict[StringNumber, Number], Number],
glm_scale=Number,
) -> np.ndarray:
e = (samp_weight * resid)[:, None] * x / glm_scale
if psu is None:
psu = np.arange(e.shape[0])
if stratum is None:
e_h, n_h = self._residuals(e=e, psu=psu, nb_vars=x.shape[1])
return fpc * (n_h / (n_h - 1)) * e_h
else:
g_h = np.zeros((x.shape[1], x.shape[1]))
for s in np.unique(stratum):
e_s = e[stratum == s, :]
psu_s = psu[stratum == s]
e_h, n_h = self._residuals(e=e_s, psu=psu_s, nb_vars=x.shape[1])
g_h += fpc[s] * (n_h / (n_h - 1)) * e_h
return g_h
def estimate(
self,
y: Array,
x: Optional[Array] = None,
samp_weight: Optional[Array] = None,
stratum: Optional[Series] = None,
psu: Optional[Series] = None,
fpc: Union[dict[StringNumber, Number], Series, Number] = 1.0,
remove_nan: bool = False,
) -> None:
y = numpy_array(y)
y_temp = y.copy()
x = numpy_array(x) if x is not None else None
psu = numpy_array(psu) if psu is not None else None
if samp_weight is None:
weight_temp = np.ones(y.shape[0])
elif isinstance(samp_weight, (float, int)):
weight_temp = samp_weight * np.ones(y_temp.shape[0])
elif isinstance(samp_weight, np.ndarray):
weight_temp = samp_weight.copy()
else:
weight_temp = np.asarray(samp_weight)
if not isinstance(fpc, dict):
self.fpc = fpc_as_dict(stratum, fpc)
else:
if list(np.unique(stratum)) != list(fpc.keys()):
raise AssertionError("fpc dictionary keys must be the same as the strata!")
else:
self.fpc = fpc
glm_model = sm.GLM(endog=y_temp, exog=x, var_weights=weight_temp)
glm_results = glm_model.fit()
g = self._calculate_g(
samp_weight=samp_weight,
resid=glm_results.resid_response,
x=x,
stratum=stratum,
psu=psu,
fpc=self.fpc,
glm_scale=glm_results.scale,
)
d = glm_results.cov_params()
self.beta = glm_results.params
self.cov_beta = (d @ g) @ d
| 32.747826
| 97
| 0.573022
| 522
| 3,766
| 3.963602
| 0.245211
| 0.047849
| 0.024166
| 0.0145
| 0.142098
| 0.071532
| 0.025133
| 0.018366
| 0
| 0
| 0
| 0.008853
| 0.310143
| 3,766
| 114
| 98
| 33.035088
| 0.787529
| 0.019118
| 0
| 0.111111
| 0
| 0
| 0.023319
| 0
| 0
| 0
| 0
| 0
| 0.022222
| 1
| 0.044444
| false
| 0
| 0.077778
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be9e12d7ef9f5aeb6611304d96bd16eabcc64477
| 2,563
|
py
|
Python
|
tests/test_scopes.py
|
leg100/scopes
|
6a31908acf44b9f65f25668230197ed13229a80d
|
[
"MIT"
] | null | null | null |
tests/test_scopes.py
|
leg100/scopes
|
6a31908acf44b9f65f25668230197ed13229a80d
|
[
"MIT"
] | 1
|
2021-11-15T17:47:40.000Z
|
2021-11-15T17:47:40.000Z
|
tests/test_scopes.py
|
leg100/scopes
|
6a31908acf44b9f65f25668230197ed13229a80d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `scopes` package."""
import os
print(os.getenv('PYTHONPATH'))
import pytest
from click.testing import CliRunner
from scopes.tasks import tasks, bolt, spout, builder
from scopes.graph import G, build, topological_sort, traverse
from scopes import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'scopes.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
# t1---
# | |
# v v
# t2 t3
# \ / t4
# v |
# t5<----/
@pytest.fixture
def example():
tasks.clear()
G.clear()
@spout({'x': None})
def t1():
yield {'x': 'east'}
yield {'x': 'west'}
@bolt({'y': None}, lambda d: 'x' in d)
def t2(dep):
return {'y': 1, **dep}
@bolt({'z': None}, lambda d: d == {'x': None})
def t3(dep):
return {'z': 1, **dep}
@spout({'c': None})
def t4():
yield {'c': 4, 'x': 'east'}
yield {'c': 5, 'x': 'west'}
@builder({'a': 2}, lambda _: True, 'x')
def t5(obj, dep):
obj.update(dep)
def test_task_decorator(example):
assert len(tasks) == 5
assert callable(tasks[0].func)
assert tasks[0].obj == {'x': None}
def test_task_dag(example):
build(tasks)
assert len(G) == 5
assert len(G.edges) == 6
def test_task_traversal(example):
build(tasks)
nodes = topological_sort()
results = traverse(nodes)
assert results == {
't1': [{'x': 'east'}, {'x': 'west'}],
't2': [{'x': 'east', 'y': 1}, {'x': 'west', 'y': 1}],
't3': [{'x': 'east', 'z': 1}, {'x': 'west', 'z': 1}],
't4': [{'x': 'east', 'c': 4}, {'x': 'west', 'c': 5}],
't5': [
{'a': 2, 'x': 'east', 'y': 1, 'z': 1, 'c': 4},
{'a': 2, 'x': 'west', 'y': 1, 'z': 1, 'c': 5}
]
}
| 22.286957
| 78
| 0.536871
| 341
| 2,563
| 3.982405
| 0.357771
| 0.025773
| 0.017673
| 0.030928
| 0.07511
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022763
| 0.262973
| 2,563
| 114
| 79
| 22.482456
| 0.696136
| 0.18533
| 0
| 0.065574
| 0
| 0
| 0.084555
| 0
| 0
| 0
| 0
| 0
| 0.163934
| 1
| 0.196721
| false
| 0
| 0.098361
| 0.032787
| 0.327869
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be9e3afec2b413ef97912bf7c25f3305c1a3ab7c
| 1,055
|
py
|
Python
|
timeparse/LunarSolarConverter/__init__.py
|
tornadoyi/timeparse
|
1e44dbc6acdb07d6c023806d55034642c7ec0de9
|
[
"Apache-2.0"
] | null | null | null |
timeparse/LunarSolarConverter/__init__.py
|
tornadoyi/timeparse
|
1e44dbc6acdb07d6c023806d55034642c7ec0de9
|
[
"Apache-2.0"
] | null | null | null |
timeparse/LunarSolarConverter/__init__.py
|
tornadoyi/timeparse
|
1e44dbc6acdb07d6c023806d55034642c7ec0de9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = 'isee15'
import LunarSolarConverter
converter = LunarSolarConverter.LunarSolarConverter()
def LunarToSolar(year, month, day, isleap = False):
lunar = LunarSolarConverter.Lunar(year, month, day, isleap)
solar = converter.LunarToSolar(lunar)
return (solar.solarYear, solar.solarMonth, solar.solarDay)
def SolarToLunar(year, month, day):
solar = LunarSolarConverter.Solar(year, month, day)
lunar = converter.SolarToLunar(solar)
return (lunar.lunarYear, lunar.lunarMonth, lunar.lunarDay)
def LunarMonthDays(year, month, isleap = False):
converter = LunarSolarConverter.LunarSolarConverter
days = converter.lunar_month_days[year - converter.lunar_month_days[0]]
leap = LunarSolarConverter.GetBitInt(days, 4, 13)
offset = 0
loopend = leap
if not isleap:
if month <= leap or leap == 0:
loopend = month - 1
else:
loopend = month
days = LunarSolarConverter.GetBitInt(days, 1, 12 - loopend) == 1 and 30 or 29
return days
| 28.513514
| 81
| 0.694787
| 116
| 1,055
| 6.25
| 0.37069
| 0.062069
| 0.066207
| 0.049655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021454
| 0.204739
| 1,055
| 36
| 82
| 29.305556
| 0.84267
| 0.019905
| 0
| 0
| 0
| 0
| 0.005814
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.041667
| 0
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be9f7ef00ae244d09a69281d387b6fc00e3b787b
| 4,345
|
py
|
Python
|
examples/hello-pt/custom/cifar10validator.py
|
ArnovanHilten/NVFlare
|
bb45e7d606849c6bc8f7542347459c6ba1be00c4
|
[
"Apache-2.0"
] | 155
|
2021-08-05T18:05:09.000Z
|
2022-03-27T15:32:56.000Z
|
examples/hello-pt/custom/cifar10validator.py
|
ArnovanHilten/NVFlare
|
bb45e7d606849c6bc8f7542347459c6ba1be00c4
|
[
"Apache-2.0"
] | 216
|
2021-12-01T06:07:12.000Z
|
2022-03-30T23:34:02.000Z
|
examples/hello-pt/custom/cifar10validator.py
|
ArnovanHilten/NVFlare
|
bb45e7d606849c6bc8f7542347459c6ba1be00c4
|
[
"Apache-2.0"
] | 44
|
2021-11-24T16:03:29.000Z
|
2022-03-24T23:28:39.000Z
|
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, ToTensor, Normalize
from nvflare.apis.dxo import from_shareable, DataKind, DXO
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
from simple_network import SimpleNetwork
class Cifar10Validator(Executor):
def __init__(self, validate_task_name=AppConstants.TASK_VALIDATION):
super(Cifar10Validator, self).__init__()
self._validate_task_name = validate_task_name
# Setup the model
self.model = SimpleNetwork()
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model.to(self.device)
# Preparing the dataset for testing.
transforms = Compose([
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
self.test_data = CIFAR10(root='~/data', train=False, transform=transforms)
self.test_loader = DataLoader(self.test_data, batch_size=4, shuffle=False)
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
if task_name == self._validate_task_name:
model_owner = "?"
try:
try:
dxo = from_shareable(shareable)
except:
self.log_error(fl_ctx, "Error in extracting dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data_kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_exception(fl_ctx, f"DXO is of type {dxo.data_kind} but expected type WEIGHTS.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Extract weights and ensure they are tensor.
model_owner = shareable.get_header(AppConstants.MODEL_OWNER, "?")
weights = {k: torch.as_tensor(v, device=self.device) for k, v in dxo.data.items()}
# Get validation accuracy
val_accuracy = self.do_validation(weights, abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"Accuracy when validating {model_owner}'s model on"
f" {fl_ctx.get_identity_name()}"f's data: {val_accuracy}')
dxo = DXO(data_kind=DataKind.METRICS, data={'val_acc': val_accuracy})
return dxo.to_shareable()
except:
self.log_exception(fl_ctx, f"Exception in validating model from {model_owner}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
else:
return make_reply(ReturnCode.TASK_UNKNOWN)
def do_validation(self, weights, abort_signal):
self.model.load_state_dict(weights)
self.model.eval()
correct = 0
total = 0
with torch.no_grad():
for i, (images, labels) in enumerate(self.test_loader):
if abort_signal.triggered:
return 0
images, labels = images.to(self.device), labels.to(self.device)
output = self.model(images)
_, pred_label = torch.max(output, 1)
correct += (pred_label == labels).sum().item()
total += images.size()[0]
metric = correct/float(total)
return metric
| 40.231481
| 114
| 0.643268
| 537
| 4,345
| 5.040968
| 0.35568
| 0.028445
| 0.033247
| 0.007388
| 0.104913
| 0.047285
| 0.031031
| 0.004433
| 0.004433
| 0.004433
| 0
| 0.010722
| 0.270196
| 4,345
| 107
| 115
| 40.607477
| 0.842952
| 0.163176
| 0
| 0.119403
| 0
| 0
| 0.073542
| 0.007741
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044776
| false
| 0
| 0.179104
| 0
| 0.358209
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be9f9cd98cdf38a09e9b5c7bf41b9142f3bd6c42
| 4,220
|
py
|
Python
|
lambda/enable-traffic-mirroring.py
|
wrharding/aws-infra
|
5e913f8342b3a3b3a4599648c4a914f828b5bc18
|
[
"MIT"
] | 1
|
2022-01-14T18:03:29.000Z
|
2022-01-14T18:03:29.000Z
|
lambda/enable-traffic-mirroring.py
|
wrharding/aws-infra
|
5e913f8342b3a3b3a4599648c4a914f828b5bc18
|
[
"MIT"
] | null | null | null |
lambda/enable-traffic-mirroring.py
|
wrharding/aws-infra
|
5e913f8342b3a3b3a4599648c4a914f828b5bc18
|
[
"MIT"
] | null | null | null |
# MIT License
# Copyright (c) 2020-2021 Chris Farris (https://www.chrisfarris.com)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import boto3
from botocore.exceptions import ClientError
import json
import os
import logging
logger = logging.getLogger()
logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO')))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
TAG_KEY=os.getenv('TAG_KEY', default='WireShark')
def handler(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=True))
ec2_client = boto3.client('ec2')
mirror_sessions = ec2_client.describe_traffic_mirror_sessions()['TrafficMirrorSessions']
enabled_enis = []
max_session_id = 0
for s in mirror_sessions:
enabled_enis.append(s['NetworkInterfaceId'])
if s['SessionNumber'] > max_session_id:
max_session_id = s['SessionNumber']
response = ec2_client.describe_instances(
Filters=[
{'Name': 'instance-state-name', 'Values': ['running']},
],
MaxResults=1000 # I should never need to paginate.
)
for r in response['Reservations']:
for i in r['Instances']:
if not i['InstanceType'].startswith("t3"):
logger.debug(f"Instance {i['InstanceId']} is not a t3 and does not support Traffic Mirroring")
continue
for tag in i['Tags']:
if tag['Key'] == TAG_KEY:
# See if a mirror session is setup
for eni in i['NetworkInterfaces']:
if eni['NetworkInterfaceId'] not in enabled_enis:
logger.info(f"ENI {eni['NetworkInterfaceId']} on Instance {i['InstanceId']} needs Mirroring Enabled")
max_session_id += 1
enable_traffic_mirroring(ec2_client, eni['NetworkInterfaceId'], i['InstanceId'], max_session_id)
else:
logger.debug(f"ENI {eni['NetworkInterfaceId']} on Instance {i['InstanceId']} is already Enabled")
def enable_traffic_mirroring(ec2_client, eni, instance_id, session_id):
response = ec2_client.create_traffic_mirror_session(
NetworkInterfaceId=eni,
TrafficMirrorTargetId=os.environ['TARGET_ID'],
TrafficMirrorFilterId=os.environ['FILTER_ID'],
SessionNumber=session_id,
Description=f"Enabled by Lambda for {instance_id}"
)
## END OF FUNCTION ##
if __name__ == '__main__':
# Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging
# create console handler and set level to debug
ch = logging.StreamHandler()
logger.setLevel(logging.DEBUG)
# create formatter
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
try:
handler(None, None)
except KeyboardInterrupt:
exit(1)
| 39.074074
| 129
| 0.679147
| 525
| 4,220
| 5.361905
| 0.428571
| 0.031261
| 0.021314
| 0.020604
| 0.083837
| 0.056838
| 0.032682
| 0.032682
| 0
| 0
| 0
| 0.008831
| 0.221801
| 4,220
| 107
| 130
| 39.439252
| 0.848356
| 0.345498
| 0
| 0
| 0
| 0
| 0.221164
| 0.027462
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033898
| false
| 0
| 0.084746
| 0
| 0.118644
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
be9fea8e8fc13061760196f0e3818adcd5989d77
| 9,088
|
py
|
Python
|
src/value_function.py
|
wu6u3/async_trpo
|
b6e3dd56775464b58f7433773e8b04d88cf3fdbc
|
[
"MIT"
] | 6
|
2018-02-02T19:53:08.000Z
|
2021-12-06T19:48:19.000Z
|
src/value_function.py
|
wu6u3/async_trpo
|
b6e3dd56775464b58f7433773e8b04d88cf3fdbc
|
[
"MIT"
] | null | null | null |
src/value_function.py
|
wu6u3/async_trpo
|
b6e3dd56775464b58f7433773e8b04d88cf3fdbc
|
[
"MIT"
] | 2
|
2018-07-26T06:22:04.000Z
|
2019-03-06T10:05:18.000Z
|
"""
State-Value Function
Written by Patrick Coady (pat-coady.github.io)
Modified by Tin-Yin Lai (wu6u3) into asynchronous version
"""
import tensorflow as tf
import numpy as np
from sklearn.utils import shuffle
#import os
class NNValueFunction(object):
""" NN-based state-value function """
def __init__(self, obs_dim, hid1_mult, thread_idx, shared_nn):
"""
Args:
obs_dim: number of dimensions in observation vector (int)
hid1_mult: size of first hidden layer, multiplier of obs_dim
"""
self.replay_buffer_x = None
self.replay_buffer_y = None
self.obs_dim = obs_dim
self.hid1_mult = hid1_mult
self.epochs = 10
self.lr = None # learning rate set in _build_graph()
self._thread_idx=thread_idx # -1 for global
self._scope_name = "nn_net_"+str(self._thread_idx)
self._build_graph()
#self.sess = tf.Session(graph=self.g)
#self.sess.run(self.init)
var_refs = [v._ref() for v in self.get_vars()]
self.gradients = tf.gradients(
self.loss, var_refs,
gate_gradients=False,
aggregation_method=None,
colocate_gradients_with_ops=False)
self.apply_gradients=None
self.sync = self.sync_from(shared_nn)
#self. global_fit = self.fit_for_global(x=None, y=None, logger=None)
def _build_graph(self):
""" Construct TensorFlow graph, including loss function, init op and train op """
with tf.variable_scope(self._scope_name) as scope:
self.obs_ph = tf.placeholder(tf.float32, (None, self.obs_dim), 'obs_valfunc')
self.val_ph = tf.placeholder(tf.float32, (None,), 'val_valfunc')
# hid1 layer size is 10x obs_dim, hid3 size is 10, and hid2 is geometric mean
hid1_size = self.obs_dim * self.hid1_mult # default multipler 10 chosen empirically on 'Hopper-v1'
hid3_size = 5 # 5 chosen empirically on 'Hopper-v1'
hid2_size = int(np.sqrt(hid1_size * hid3_size))
# heuristic to set learning rate based on NN size (tuned on 'Hopper-v1')
self.lr = 1e-2 / np.sqrt(hid2_size) # 1e-3 empirically determined
print('Value Params -- h1: {}, h2: {}, h3: {}, lr: {:.3g}'
.format(hid1_size, hid2_size, hid3_size, self.lr))
# 3 hidden layers with tanh activations
out = tf.layers.dense(self.obs_ph, hid1_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / self.obs_dim)), name="h1")
out = tf.layers.dense(out, hid2_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / hid1_size)), name="h2")
out = tf.layers.dense(out, hid3_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / hid2_size)), name="h3")
out = tf.layers.dense(out, 1,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / hid3_size)), name='output')
self.out = tf.squeeze(out)
self.loss = tf.reduce_mean(tf.square(self.out - self.val_ph)) # squared loss
optimizer = tf.train.AdamOptimizer(self.lr)
self.train_op = optimizer.minimize(self.loss)
#self.init = tf.global_variables_initializer()
self.h1_w, self.h1_b = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name+'/h1')
self.h2_w, self.h2_b = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name+'/h2')
self.h3_w, self.h3_b = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name+'/h3')
self.output_w, self.output_b =tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name+'/output')
scope.reuse_variables()
#self.sess = tf.Session(graph=self.g)
#self.sess.run(self.init)
def fit_for_global(self, x, y, logger):
""" Fit model to current data batch + previous data batch
Args:
x: features
y: target
logger: logger to save training loss and % explained variance
"""
num_batches = max(x.shape[0] // 256, 1)
batch_size = x.shape[0] // num_batches
y_hat = self.predict(sess, x) # check explained variance prior to update
old_exp_var = 1 - np.var(y - y_hat)/np.var(y)
if self.replay_buffer_x is None:
x_train, y_train = x, y
else:
x_train = np.concatenate([x, self.replay_buffer_x])
y_train = np.concatenate([y, self.replay_buffer_y])
self.replay_buffer_x = x
self.replay_buffer_y = y
for e in range(self.epochs):
x_train, y_train = shuffle(x_train, y_train)
for j in range(num_batches):
start = j * batch_size
end = (j + 1) * batch_size
feed_dict = {self.obs_ph: x_train[start:end, :],
self.val_ph: y_train[start:end]}
_, l = sess.run([self.train_op, self.loss], feed_dict=feed_dict)
y_hat = self.predict(sess, x)
loss = np.mean(np.square(y_hat - y)) # explained variance after update
exp_var = 1 - np.var(y - y_hat) / np.var(y) # diagnose over-fitting of val func
logger.log({'ValFuncLoss': loss,
'ExplainedVarNew': exp_var,
'ExplainedVarOld': old_exp_var})
def fit(self, sess, x, y, logger):
""" Fit model to current data batch + previous data batch
Args:
x: features
y: target
logger: logger to save training loss and % explained variance
"""
num_batches = max(x.shape[0] // 256, 1)
batch_size = x.shape[0] // num_batches
y_hat = self.predict(sess, x) # check explained variance prior to update
old_exp_var = 1 - np.var(y - y_hat)/np.var(y)
if self.replay_buffer_x is None:
x_train, y_train = x, y
else:
x_train = np.concatenate([x, self.replay_buffer_x])
y_train = np.concatenate([y, self.replay_buffer_y])
self.replay_buffer_x = x
self.replay_buffer_y = y
for e in range(self.epochs):
x_train, y_train = shuffle(x_train, y_train)
for j in range(num_batches):
start = j * batch_size
end = (j + 1) * batch_size
feed_dict = {self.obs_ph: x_train[start:end, :],
self.val_ph: y_train[start:end]}
_, l = sess.run([self.train_op, self.loss], feed_dict=feed_dict)
y_hat = self.predict(sess, x)
loss = np.mean(np.square(y_hat - y)) # explained variance after update
exp_var = 1 - np.var(y - y_hat) / np.var(y) # diagnose over-fitting of val func
logger.log({'ValFuncLoss': loss,
'ExplainedVarNew': exp_var,
'ExplainedVarOld': old_exp_var})
def predict(self, sess, x):
""" Predict method """
feed_dict = {self.obs_ph: x}
y_hat = sess.run(self.out, feed_dict=feed_dict)
return np.squeeze(y_hat)
#def close_sess(self):
# """ Close TensorFlow session """
# sess.close()
def get_vars(self):
return [self.h1_w, self.h1_b,
self.h2_w, self.h2_b,
self.h3_w, self.h3_b,
self.output_w, self.output_b ]
# weights = []
#name = []
#for tensor in self.g.as_graph_def().node:
# name.append(tensor.name)
#print(name)
#with self.g.as_default() as g:
# with tf.variable_scope(self._scope_name) as scope:
# weights.extend(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope))
# weights.append(g.get_tensor_by_name('h1/kernel:0'))
# weights.append(g.get_tensor_by_name('h1/bias:0'))
# weights.append(g.get_tensor_by_name('h2/kernel:0'))
# weights.append(g.get_tensor_by_name('h2/bias:0'))
# weights.append(g.get_tensor_by_name('h3/kernel:0'))
# weights.append(g.get_tensor_by_name('h3/bias:0'))
# return weights
def sync_from(self, shared_nn, name=None):
if shared_nn != None:
src_vars = shared_nn.get_vars()
dst_vars = self.get_vars()
sync_ops = []
with tf.name_scope(name, self._scope_name, []) as name:
for(src_var, dst_var) in zip(src_vars, dst_vars):
sync_op = tf.assign(dst_var, src_var)
sync_ops.append(sync_op)
return tf.group(*sync_ops, name=name)
else:
return None
| 42.666667
| 121
| 0.575704
| 1,221
| 9,088
| 4.061425
| 0.181818
| 0.024198
| 0.038717
| 0.023997
| 0.580964
| 0.545876
| 0.505949
| 0.497076
| 0.490421
| 0.437588
| 0
| 0.016859
| 0.314701
| 9,088
| 212
| 122
| 42.867925
| 0.779383
| 0.242408
| 0
| 0.436508
| 0
| 0
| 0.028272
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.02381
| 0.007937
| 0.119048
| 0.007937
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bea186d9537f0999c2f3875648b97a7c001cd71a
| 10,439
|
py
|
Python
|
gbe/views/make_bid_view.py
|
bethlakshmi/gbe-divio-djangocms-python2.7
|
6e9b2c894162524bbbaaf73dcbe927988707231d
|
[
"Apache-2.0"
] | 1
|
2021-03-14T11:56:47.000Z
|
2021-03-14T11:56:47.000Z
|
gbe/views/make_bid_view.py
|
bethlakshmi/gbe-divio-djangocms-python2.7
|
6e9b2c894162524bbbaaf73dcbe927988707231d
|
[
"Apache-2.0"
] | 180
|
2019-09-15T19:52:46.000Z
|
2021-11-06T23:48:01.000Z
|
gbe/views/make_bid_view.py
|
bethlakshmi/gbe-divio-djangocms-python2.7
|
6e9b2c894162524bbbaaf73dcbe927988707231d
|
[
"Apache-2.0"
] | null | null | null |
from django.views.generic import View
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import never_cache
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.shortcuts import (
get_object_or_404,
render,
)
from gbe.models import (
Conference,
UserMessage,
)
from gbe_logging import log_func
from gbe.functions import (
validate_profile,
)
from gbe.email.functions import notify_reviewers_on_bid_change
from gbetext import (
no_login_msg,
fee_instructions,
full_login_msg,
payment_needed_msg,
payment_details_error,
)
from gbe_utils.text import no_profile_msg
from gbe.ticketing_idd_interface import (
get_payment_details,
get_ticket_form,
fee_paid,
)
class MakeBidView(View):
form = None
has_draft = True
instructions = ''
payment_form = None
coordinated = False
def groundwork(self, request, args, kwargs):
self.owner = validate_profile(request, require=False)
if not self.owner or not self.owner.complete:
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="PROFILE_INCOMPLETE",
defaults={
'summary': "Profile Incomplete",
'description': no_profile_msg})
messages.warning(request, user_message[0].description)
return '%s?next=%s' % (
reverse('profile_update', urlconf='gbe.urls'),
reverse('%s_create' % self.bid_type.lower(),
urlconf='gbe.urls'))
self.bid_object = None
if "bid_id" in kwargs:
bid_id = kwargs.get("bid_id")
self.bid_object = get_object_or_404(self.bid_class, pk=bid_id)
self.conference = self.bid_object.b_conference
else:
self.conference = Conference.objects.filter(
accepting_bids=True).first()
def make_post_forms(self, request, the_form):
if self.bid_object:
self.form = the_form(
request.POST,
instance=self.bid_object,
initial=self.get_initial(),
prefix=self.prefix)
else:
self.form = the_form(
request.POST,
initial=self.get_initial(),
prefix=self.prefix)
self.set_up_form()
def set_up_post(self, request):
the_form = None
if 'submit' in list(request.POST.keys()) or not self.has_draft:
the_form = self.submit_form
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="SUBMIT_SUCCESS",
defaults={
'summary': "%s Submit Success" % self.bid_type,
'description': self.submit_msg})
else:
the_form = self.draft_form
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="DRAFT_SUCCESS",
defaults={
'summary': "%s Save Draft Success" % self.bid_type,
'description': self.draft_msg})
self.make_post_forms(request, the_form)
return user_message
def make_context(self, request):
paid = fee_paid(
self.bid_type,
self.owner.user_object.username,
self.conference)
instructions = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="BID_INSTRUCTIONS",
defaults={
'summary': "%s Bid Instructions" % self.bid_type,
'description': self.instructions})
context = {
'conference': self.conference,
'forms': [self.form],
'page_title': self.page_title,
'view_title': self.view_title,
'draft_fields': self.draft_fields,
'submit_fields': self.submit_fields,
'fee_paid': paid,
'view_header_text': instructions[0].description,
}
if not paid and not self.coordinated:
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="FEE_MESSAGE",
defaults={
'summary': "%s Pre-submit Message" % self.bid_type,
'description': fee_instructions})
messages.info(
request,
user_message[0].description)
if self.payment_form:
context['forms'] += [self.payment_form]
else:
context['forms'] += [get_ticket_form(self.bid_class.__name__,
self.conference)]
return context
def get_create_form(self, request):
if self.bid_object:
self.form = self.submit_form(
prefix=self.prefix,
instance=self.bid_object,
initial=self.get_initial())
else:
self.form = self.submit_form(
prefix=self.prefix,
initial=self.get_initial())
self.set_up_form()
return render(
request,
'gbe/bid.tmpl',
self.make_context(request)
)
def check_validity(self, request):
return self.form.is_valid()
def set_up_form(self):
pass
def get_invalid_response(self, request):
self.set_up_form()
context = self.make_context(request)
return render(
request,
'gbe/bid.tmpl',
context)
def submit_bid(self, request):
self.bid_object.submitted = True
self.bid_object.save()
notify_reviewers_on_bid_change(
self.owner,
self.bid_object,
self.bid_type,
"Submission",
self.conference,
'%s Reviewers' % self.bid_type,
reverse('%s_review' % self.bid_type.lower(),
urlconf='gbe.urls'))
@never_cache
@log_func
def get(self, request, *args, **kwargs):
if not request.user.is_authenticated:
follow_on = '?next=%s' % reverse(
'%s_create' % self.bid_type.lower(),
urlconf='gbe.urls')
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="USER_NOT_LOGGED_IN",
defaults={
'summary': "Need Login - %s Bid",
'description': no_login_msg})
full_msg = full_login_msg % (
user_message[0].description,
reverse('login', urlconf='gbe.urls') + follow_on)
messages.warning(request, full_msg)
return HttpResponseRedirect(
reverse('register', urlconf='gbe.urls') + follow_on)
redirect = self.groundwork(request, args, kwargs)
if redirect:
return HttpResponseRedirect(redirect)
return self.get_create_form(request)
@never_cache
@log_func
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
cart_items = []
paypal_button = None
total = None
redirect = None
redirect = self.groundwork(request, args, kwargs)
if redirect:
return HttpResponseRedirect(redirect)
user_message = self.set_up_post(request)
# check bid validity
if not self.check_validity(request):
return self.get_invalid_response(request)
if not self.coordinated and not fee_paid(
self.bid_type,
self.owner.user_object.username,
self.conference) and "draft" not in list(request.POST.keys()):
self.payment_form = get_ticket_form(self.bid_class.__name__,
self.conference,
request.POST)
if not self.payment_form.is_valid():
error_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="PAYMENT_CHOICE_INVALID",
defaults={
'summary': "User Made Invalid Ticket Choice",
'description': payment_details_error})
messages.error(request, error_message[0].description)
return self.get_invalid_response(request)
# save bid
if not self.bid_object:
self.bid_object = self.form.save(commit=False)
self.set_valid_form(request)
# if this isn't a draft, move forward through process, setting up
# payment review if payment is needed
if "submit" in list(request.POST.keys()):
if self.payment_form:
cart_items, paypal_button, total = get_payment_details(
request,
self.payment_form,
self.bid_type,
self.bid_object.pk,
self.owner.user_object.pk)
dynamic_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="NOT_PAID_INSTRUCTIONS",
defaults={
'summary': "%s Not Paid" % self.bid_type,
'description': payment_needed_msg})
page_title = '%s Payment' % self.bid_type
return render(
request,
'gbe/confirm_pay.tmpl',
{'dynamic_message': dynamic_message[0].description,
'page_title': page_title,
'cart_items': cart_items,
'total': total,
'paypal_button': paypal_button})
else:
redirect = self.submit_bid(request)
messages.success(request, user_message[0].description)
return HttpResponseRedirect(
redirect or reverse('home', urlconf='gbe.urls'))
def dispatch(self, *args, **kwargs):
return super(MakeBidView, self).dispatch(*args, **kwargs)
| 36.121107
| 78
| 0.560111
| 1,082
| 10,439
| 5.11183
| 0.157116
| 0.037968
| 0.027843
| 0.033267
| 0.327427
| 0.286386
| 0.223468
| 0.197975
| 0.169047
| 0.153498
| 0
| 0.001916
| 0.350129
| 10,439
| 288
| 79
| 36.246528
| 0.813384
| 0.012166
| 0
| 0.328125
| 0
| 0
| 0.079364
| 0.004172
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046875
| false
| 0.003906
| 0.058594
| 0.007813
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bea1d1375a8d223083e55cf97bff2f2ce8f4f7ba
| 6,977
|
py
|
Python
|
epicteller/core/dao/character.py
|
KawashiroNitori/epicteller
|
264b11e7e6eb58beb0f67ecbbb811d268a533f7a
|
[
"MIT"
] | null | null | null |
epicteller/core/dao/character.py
|
KawashiroNitori/epicteller
|
264b11e7e6eb58beb0f67ecbbb811d268a533f7a
|
[
"MIT"
] | null | null | null |
epicteller/core/dao/character.py
|
KawashiroNitori/epicteller
|
264b11e7e6eb58beb0f67ecbbb811d268a533f7a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from collections import defaultdict
from typing import List, Optional, Iterable, Dict
import base62
from sqlalchemy import select, and_
from sqlalchemy.dialects.mysql import insert as mysql_insert
from epicteller.core.model.character import Character
from epicteller.core.tables import table
from epicteller.core.util import ObjectDict
from epicteller.core.util.enum import ExternalType
from epicteller.core.util.seq import get_id
def _format_character(result) -> Optional[Character]:
if not result:
return
character = Character(
id=result.id,
url_token=result.url_token,
member_id=result.member_id,
name=result.name,
avatar=result.avatar,
description=result.description,
is_removed=bool(result.is_removed),
raw_data=result.data,
created=result.created,
updated=result.updated,
)
return character
class CharacterDAO:
t = table.character
select_clause = select([
t.c.id,
t.c.url_token,
t.c.name,
t.c.member_id,
t.c.avatar,
t.c.description,
t.c.is_removed,
t.c.data,
t.c.created,
t.c.updated,
])
@classmethod
async def batch_get_character_by_id(cls, character_ids: Iterable[int]) -> Dict[int, Character]:
query = cls.select_clause.where(cls.t.c.id.in_(character_ids))
result = await table.execute(query)
rows = await result.fetchall()
return {row.id: _format_character(row) for row in rows}
@classmethod
async def batch_get_character_by_url_token(cls, url_tokens: Iterable[str]) -> Dict[str, Character]:
query = cls.select_clause.where(cls.t.c.url_token.in_(url_tokens))
result = await table.execute(query)
rows = await result.fetchall()
return {row.url_token: _format_character(result) for row in rows}
@classmethod
async def get_characters_by_owner(cls, member_id: int) -> List[Character]:
query = cls.select_clause.where(cls.t.c.member_id == member_id)
results = await table.execute(query)
characters = [_format_character(room) for room in await results.fetchall()]
return characters
@classmethod
async def update_character(cls, character_id: int, **kwargs) -> None:
if 'updated' not in kwargs:
kwargs['updated'] = int(time.time())
query = cls.t.update().values(kwargs).where(cls.t.c.id == character_id)
await table.execute(query)
@classmethod
async def create_character(cls, member_id: int, name: str, avatar: str, description: str,
raw_data: dict) -> Character:
created = int(time.time())
url_token = base62.encode(get_id())
values = ObjectDict(
url_token=url_token,
member_id=member_id,
name=name,
avatar=avatar,
description=description,
is_removed=0,
data=raw_data,
created=created,
updated=created,
)
query = cls.t.insert().values(values)
result = await table.execute(query)
values.id = result.lastrowid
character = _format_character(values)
return character
class CharacterCampaignDAO:
t = table.character_campaign_index
@classmethod
async def get_character_id_by_campaign_name(cls, campaign_id: int, name: str) -> Optional[int]:
query = select([cls.t.c.character_id]).where(and_(cls.t.c.campaign_id == campaign_id,
cls.t.c.name == name))
result = await table.execute(query)
row = await result.fetchone()
if not row:
return
return int(row.character_id)
@classmethod
async def get_character_ids_by_campaign_id(cls, campaign_id: int) -> List[int]:
query = select([cls.t.c.character_id]).where(cls.t.c.campaign_id == campaign_id)
results = await table.execute(query)
character_ids = [int(row.character_id) for row in await results.fetchall()]
return character_ids
@classmethod
async def get_campaign_ids_by_character_ids(cls, character_ids: List[int]) -> Dict[int, List[int]]:
query = select([
cls.t.c.character_id,
cls.t.c.campaign_id,
]).where(cls.t.c.character_id.in_(character_ids))
results = await table.execute(query)
rows = await results.fetchall()
campaign_map = defaultdict(list)
for r in rows:
campaign_map[r.character_id].append(r.campaign_id)
return dict(campaign_map)
@classmethod
async def bind_character_to_campaign(cls, character_id: int, name: str, campaign_id: int):
query = mysql_insert(cls.t).values(
character_id=character_id,
name=name,
campaign_id=campaign_id,
).on_duplicate_key_update(
name=name,
)
await table.execute(query)
@classmethod
async def unbind_character_to_campaign(cls, character_id: int, campaign_id: int):
query = cls.t.delete().where(and_(cls.t.c.character_id == character_id, cls.t.c.campaign_id == campaign_id))
await table.execute(query)
class CharacterExternalDAO:
t = table.character_external_id
@classmethod
async def get_external_ids_by_character(cls, character_id: int) -> Dict[ExternalType, str]:
query = select([
cls.t.c.type,
cls.t.c.external_id,
]).where(cls.t.c.character_id == character_id)
result = await table.execute(query)
rows = await result.fetchall()
externals = {ExternalType(row.type): row.external_id for row in rows}
return externals
@classmethod
async def get_character_ids_by_external(cls, external_type: ExternalType, external_id: str) -> List[int]:
query = select([cls.t.c.character_id]).where(and_(cls.t.c.type == int(external_type),
cls.t.c.external_id == external_id))
result = await table.execute(query)
rows = await result.fetchall()
character_ids = [r.character_id for r in rows]
return character_ids
@classmethod
async def bind_character_external_id(cls, character_id: int, external_type: ExternalType, external_id: str):
query = mysql_insert(cls.t).values(
character_id=character_id,
type=int(external_type),
external_id=external_id,
).on_duplicate_key_update(
external_id=external_id,
)
await table.execute(query)
@classmethod
async def unbind_character_external_id(cls, character_id: int, external_type: ExternalType):
query = cls.t.delete().where(and_(cls.t.c.character_id == character_id, cls.t.c.type == int(external_type)))
await table.execute(query)
| 36.528796
| 116
| 0.642826
| 887
| 6,977
| 4.854566
| 0.124014
| 0.014863
| 0.025546
| 0.071528
| 0.465165
| 0.385044
| 0.327682
| 0.228983
| 0.218532
| 0.167441
| 0
| 0.001155
| 0.255267
| 6,977
| 190
| 117
| 36.721053
| 0.82756
| 0.00602
| 0
| 0.312883
| 0
| 0
| 0.002019
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006135
| false
| 0
| 0.067485
| 0
| 0.184049
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bea22b520ab74130906570943260ba5b3628befe
| 4,313
|
py
|
Python
|
examples/sentence_classfication/task_sentiment_classification_roformer_v2.py
|
Tongjilibo/bert4torch
|
71d5ffb3698730b16e5a252b06644a136787711e
|
[
"MIT"
] | 49
|
2022-03-15T07:28:16.000Z
|
2022-03-31T07:16:15.000Z
|
examples/sentence_classfication/task_sentiment_classification_roformer_v2.py
|
Tongjilibo/bert4torch
|
71d5ffb3698730b16e5a252b06644a136787711e
|
[
"MIT"
] | null | null | null |
examples/sentence_classfication/task_sentiment_classification_roformer_v2.py
|
Tongjilibo/bert4torch
|
71d5ffb3698730b16e5a252b06644a136787711e
|
[
"MIT"
] | null | null | null |
#! -*- coding:utf-8 -*-
# 情感分类例子,RoPE相对位置编码
# 官方项目:https://github.com/ZhuiyiTechnology/roformer-v2
# pytorch参考项目:https://github.com/JunnYu/RoFormer_pytorch
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, Callback, text_segmentate, ListDataset
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
maxlen = 128
batch_size = 16
config_path = 'F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v2_char_base/config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v2_char_base/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v2_char_base/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
# 加载数据集
class MyDataset(ListDataset):
@staticmethod
def load_data(filenames):
"""加载数据,并尽量划分为不超过maxlen的句子
"""
D = []
seps, strips = u'\n。!?!?;;,, ', u';;,, '
for filename in filenames:
with open(filename, encoding='utf-8') as f:
for l in f:
text, label = l.strip().split('\t')
for t in text_segmentate(text, maxlen - 2, seps, strips):
D.append((t, int(label)))
return D
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
# 加载数据集
train_dataloader = DataLoader(MyDataset(['E:/Github/bert4torch/examples/datasets/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/Github/bert4torch/examples/datasets/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/Github/bert4torch/examples/datasets/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
# 定义bert上的模型结构
class Model(BaseModel):
def __init__(self) -> None:
super().__init__()
# 指定好model和对应的ckpt地址
self.bert, self.config = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, model='roformer_v2', return_model_config=True)
self.dropout = nn.Dropout(0.1)
self.dense = nn.Linear(self.config['hidden_size'], 2)
def forward(self, token_ids, segment_ids):
last_hidden_state = self.bert([token_ids, segment_ids])
output = self.dropout(last_hidden_state[:, 0, :])
output = self.dense(output)
return output
model = Model().to(device)
# 定义使用的loss和optimizer,这里支持自定义
model.compile(
loss=nn.CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5), # 用足够小的学习率
metrics=['accuracy']
)
# 定义评价函数
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, global_step, epoch, logs=None):
val_acc = evaluate(valid_dataloader)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
# model.save_weights('best_model.pt')
print(f'val_acc: {val_acc:.5f}, best_val_acc: {self.best_val_acc:.5f}\n')
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=20, steps_per_epoch=500, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
| 38.855856
| 176
| 0.703455
| 570
| 4,313
| 5.05614
| 0.331579
| 0.020819
| 0.027065
| 0.019431
| 0.298751
| 0.274115
| 0.247051
| 0.206801
| 0.179736
| 0.151978
| 0
| 0.010081
| 0.172038
| 4,313
| 110
| 177
| 39.209091
| 0.796976
| 0.074658
| 0
| 0
| 0
| 0
| 0.156754
| 0.126512
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.272727
| 0.012987
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bea43752768259680c29953a0cec72ec71c5a8eb
| 1,329
|
py
|
Python
|
code_week12_713_719/is_graph_bipartite_hard.py
|
dylanlee101/leetcode
|
b059afdadb83d504e62afd1227107de0b59557af
|
[
"Apache-2.0"
] | null | null | null |
code_week12_713_719/is_graph_bipartite_hard.py
|
dylanlee101/leetcode
|
b059afdadb83d504e62afd1227107de0b59557af
|
[
"Apache-2.0"
] | null | null | null |
code_week12_713_719/is_graph_bipartite_hard.py
|
dylanlee101/leetcode
|
b059afdadb83d504e62afd1227107de0b59557af
|
[
"Apache-2.0"
] | null | null | null |
'''
给定一个无向图graph,当这个图为二分图时返回true。
如果我们能将一个图的节点集合分割成两个独立的子集A和B,并使图中的每一条边的两个节点一个来自A集合,一个来自B集合,我们就将这个图称为二分图。
graph将会以邻接表方式给出,graph[i]表示图中与节点i相连的所有节点。每个节点都是一个在0到graph.length-1之间的整数。这图中没有自环和平行边: graph[i] 中不存在i,并且graph[i]中没有重复的值。
示例 1:
输入: [[1,3], [0,2], [1,3], [0,2]]
输出: true
解释:
无向图如下:
0----1
| |
| |
3----2
我们可以将节点分成两组: {0, 2} 和 {1, 3}。
示例 2:
输入: [[1,2,3], [0,2], [0,1,3], [0,2]]
输出: false
解释:
无向图如下:
0----1
| \ |
| \ |
3----2
我们不能将节点分割成两个独立的子集。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/is-graph-bipartite
'''
class Solution:
def isBipartite(self, graph: List[List[int]]) -> bool:
n = len(graph)
uncolored, red, green = 0, 1, 2
color = [uncolored] * n
valid = True
def dfs(node, c):
nonlocal valid
color[node] = c
cNei = (green if c == red else red)
for neighbor in graph[node]:
if color[neighbor] == uncolored:
dfs(neighbor, cNei)
if not valid:
return
elif color[neighbor] != cNei:
valid = False
return
for i in range(n):
if color[i] == uncolored:
dfs(i, red)
if not valid:
break
return valid
| 21.435484
| 117
| 0.51392
| 160
| 1,329
| 4.26875
| 0.44375
| 0.01757
| 0.01757
| 0.01757
| 0.04978
| 0.032211
| 0
| 0
| 0
| 0
| 0
| 0.042529
| 0.345372
| 1,329
| 61
| 118
| 21.786885
| 0.742529
| 0.394281
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bea493d4dc7e2d4506520e5f797ce4cb0a9a2a6e
| 1,417
|
py
|
Python
|
data_preprocessing/decision_tree_regression.py
|
Frost199/Machine_Learning
|
8cf77c6cbbae7781ac6f2ffcc9218ad79472d287
|
[
"MIT"
] | null | null | null |
data_preprocessing/decision_tree_regression.py
|
Frost199/Machine_Learning
|
8cf77c6cbbae7781ac6f2ffcc9218ad79472d287
|
[
"MIT"
] | null | null | null |
data_preprocessing/decision_tree_regression.py
|
Frost199/Machine_Learning
|
8cf77c6cbbae7781ac6f2ffcc9218ad79472d287
|
[
"MIT"
] | 1
|
2020-05-23T16:46:52.000Z
|
2020-05-23T16:46:52.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 17 06:44:47 2018
@author: Eleam Emmanuel
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
# take all the columns but leave the last one(-1)
# always make sure our independent variable is a matrix not a vector and
# dependent variable can be a vector
X = dataset.iloc[:, 1:-1].values
Y = dataset.iloc[:, 2].values
# splitting the dataset into a training set and a test set
# x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
# feature scaling
"""sc_X = StandardScaler()
x_train = sc_X.fit_transform(x_train)
x_test = sc_X.transform(x_test)
sc_Y = StandardScaler()
x_train = sc_X.fit_transform(x_train)"""
# fitting the Decision Tree regression Model to the dataset
regressor = DecisionTreeRegressor(random_state=0)
regressor.fit(X, Y)
# predicting a new result
y_pred = regressor.predict(6.5)
# Visualizing the Decision tree regression result (for higher resolution and smoother curve)
X_grid = np.arange(min(X), max(X), 0.01)
X_grid = X_grid.reshape(len(X_grid), 1)
plt.scatter(X, Y, color='red')
plt.plot(X_grid, regressor.predict(X_grid), color='blue')
plt.title("Truth or Bluff (Regression Model)")
plt.xlabel("Position Level")
plt.ylabel("Salary")
plt.show()
| 30.804348
| 92
| 0.740296
| 236
| 1,417
| 4.313559
| 0.508475
| 0.02947
| 0.013752
| 0.021611
| 0.08055
| 0.08055
| 0.08055
| 0.08055
| 0.08055
| 0
| 0
| 0.022241
| 0.14326
| 1,417
| 46
| 93
| 30.804348
| 0.81631
| 0.419901
| 0
| 0
| 0
| 0
| 0.125776
| 0.032609
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bea4e663116d7a61eb7a7e77d69904ecfbbff62c
| 1,786
|
py
|
Python
|
user_messages/apps.py
|
everaccountable/django-user-messages
|
101d539b785bdb440bf166fb16ad25eb66e4174a
|
[
"MIT"
] | null | null | null |
user_messages/apps.py
|
everaccountable/django-user-messages
|
101d539b785bdb440bf166fb16ad25eb66e4174a
|
[
"MIT"
] | null | null | null |
user_messages/apps.py
|
everaccountable/django-user-messages
|
101d539b785bdb440bf166fb16ad25eb66e4174a
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
from django.conf import settings
from django.core import checks
from django.template import engines
from django.template.backends.django import DjangoTemplates
from django.utils.text import capfirst
from django.utils.translation import gettext_lazy as _
@checks.register()
def check_context_processors(app_configs, **kwargs):
errors = []
for engine in engines.all():
if isinstance(engine, DjangoTemplates):
django_templates_instance = engine.engine
break
else:
django_templates_instance = None
if django_templates_instance:
if (
"django.contrib.messages.context_processors.messages"
not in django_templates_instance.context_processors
and "admin.E404" not in settings.SILENCED_SYSTEM_CHECKS
):
errors.append(
checks.Error(
"If using 'user_messages.context_processors.messages'"
" instead of the official messages context processor"
" you have to add 'admin.E404' to SILENCED_SYSTEM_CHECKS.",
id="user_messages.E001",
)
)
if ("admin.E406" not in settings.SILENCED_SYSTEM_CHECKS and
"django.contrib.messages" not in settings.INSTALLED_APPS):
errors.append(
checks.Error(
"If using 'user_messages' instead of django.contrib.messages"
" you have to add 'admin.E406' to SILENCED_SYSTEM_CHECKS.",
id="user_messages.E002",
)
)
return errors
class UserMessagesConfig(AppConfig):
default_auto_field = "django.db.models.AutoField"
name = "user_messages"
verbose_name = capfirst(_("user messages"))
| 33.698113
| 79
| 0.647816
| 195
| 1,786
| 5.758974
| 0.394872
| 0.062333
| 0.081923
| 0.058771
| 0.227961
| 0.197685
| 0.138914
| 0.0748
| 0
| 0
| 0
| 0.013964
| 0.278275
| 1,786
| 52
| 80
| 34.346154
| 0.857254
| 0
| 0
| 0.090909
| 0
| 0
| 0.255319
| 0.118701
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0
| 0.159091
| 0
| 0.295455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bea66694bcf52b9fffd500768ba31f40d22d16ce
| 4,908
|
py
|
Python
|
server-python3/server.py
|
Aaron-Ming/websocket_terminal
|
42c24391d51c275eabf1f879fb312b9a3614f51e
|
[
"MIT"
] | 40
|
2016-11-20T09:48:27.000Z
|
2021-04-02T00:29:14.000Z
|
server-python3/server.py
|
Aaron-Ming/websocket_terminal
|
42c24391d51c275eabf1f879fb312b9a3614f51e
|
[
"MIT"
] | 6
|
2018-01-07T03:43:22.000Z
|
2022-03-21T08:43:33.000Z
|
server-python3/server.py
|
glensc/websocket_terminal
|
42c24391d51c275eabf1f879fb312b9a3614f51e
|
[
"MIT"
] | 20
|
2016-12-02T06:00:27.000Z
|
2021-08-15T11:40:34.000Z
|
import os
import urllib.parse
import eventlet
import eventlet.green.socket
# eventlet.monkey_patch()
import eventlet.websocket
import eventlet.wsgi
import wspty.pipe
from flask import Flask, request, redirect
from wspty.EchoTerminal import EchoTerminal
from wspty.EncodedTerminal import EncodedTerminal
from wspty.WebsocketBinding import WebsocketBinding
import config
def make_app():
app = Flask(__name__)
app.static_folder = get_static_folder()
print("Serving static files from: " + app.static_folder)
@app.route('/')
def index():
newurl = b'/static/index.html'
if request.query_string:
newurl = newurl + b'?' + request.query_string
return redirect(newurl)
return app
def parse_query(qstr):
return {k: v[0] for k, v in urllib.parse.parse_qs(qstr).items()}
def debug(s):
app.logger.debug(s)
class TerminalFactory:
def __init__(self, args_dict, allow_unsafe=False):
self.kind = args_dict['kind']
self.hostname = args_dict.get('hostname', 'localhost')
self.port = int(args_dict.get('port', '22'))
self.username = args_dict.get('username')
self.password = args_dict.get('password')
self.term = args_dict.get('term')
self.encoding = args_dict.get('encoding', 'utf8')
self.allow_unsafe = allow_unsafe
def create_binary(self):
if self.kind == 'ssh':
from wspty.SshTerminal import SshTerminal
return SshTerminal(
self.hostname, self.port, self.username, self.password, self.term
)
if self.kind == 'raw':
from wspty.SocketTerminal import SocketTerminal
sock = eventlet.green.socket.socket()
ip = eventlet.green.socket.gethostbyname(self.hostname)
sock.connect((ip, self.port))
return SocketTerminal(sock)
if self.kind == 'echo':
return EchoTerminal()
if self.kind == 'prompt':
if not self.allow_unsafe:
raise Exception("kind {} is disabled".format(self.kind))
from wspty import PromptTerminal
return PromptTerminal.os_terminal()
raise NotImplemented('kind: {}'.format(self.kind))
def create(self):
return EncodedTerminal(self.create_binary(), self.encoding)
class DefaultRootApp:
def __init__(self):
self._app_handle_wssh = eventlet.websocket.WebSocketWSGI(self.handle_wssh)
self.allow_unsafe = False
def handle_wssh(self, ws):
debug('Creating terminal with remote {remote}'.format(
remote=ws.environ.get('REMOTE_ADDR'),
))
ws_binding = WebsocketBinding(ws)
query = parse_query(ws.environ.get('QUERY_STRING', ''))
terminal = None
try:
kind, terminal = self.create_terminal(query)
ws_binding.send('Connected to %s\r\n' % (kind,))
wspty.pipe.pipe(ws_binding, terminal)
except BaseException as e:
ws_binding.send_error(e)
raise
finally:
if terminal:
terminal.close()
debug('Closing terminal normally with remote {remote}'.format(
remote=ws.environ.get('REMOTE_ADDR'),
))
return ''
def create_terminal(self, obj):
factory = TerminalFactory(obj, self.allow_unsafe)
return factory.kind, factory.create()
def handler(self, env, *args):
route = env["PATH_INFO"]
if route == '/wssh':
return self._app_handle_wssh(env, *args)
else:
return app(env, *args)
def make_parser():
import argparse
parser = argparse.ArgumentParser(description='Websocket Terminal server')
parser.add_argument('-l', '--listen', default='', help='Listen on interface (default all)')
parser.add_argument('-p', '--port', default=5002, type=int, help='Listen on port')
parser.add_argument('--unsafe', action='store_true', help='Allow unauthenticated connections to local machine')
return parser
def start(interface, port, root_app_handler):
conn = (interface, port)
listener = eventlet.listen(conn)
print('listening on {0}:{1}'.format(*conn))
try:
eventlet.wsgi.server(listener, root_app_handler)
except KeyboardInterrupt:
pass
def start_default(interface, port, allow_unsafe=False, root_app_cls=DefaultRootApp):
root_app = root_app_cls()
root_app.allow_unsafe = allow_unsafe
start(interface, port, root_app.handler)
def main():
args = make_parser().parse_args()
start_default(args.listen, args.port, args.unsafe)
def get_static_folder():
path_root = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../client')
path_root = os.path.join(path_root, config.CLIENT_DIR)
return os.path.abspath(path_root)
app = make_app()
if __name__ == '__main__':
main()
| 31.261146
| 115
| 0.647514
| 588
| 4,908
| 5.231293
| 0.284014
| 0.032185
| 0.021456
| 0.014304
| 0.06502
| 0.053316
| 0.03251
| 0.03251
| 0.03251
| 0.03251
| 0
| 0.00267
| 0.23696
| 4,908
| 156
| 116
| 31.461538
| 0.818692
| 0.004686
| 0
| 0.04918
| 0
| 0
| 0.101372
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131148
| false
| 0.02459
| 0.131148
| 0.016393
| 0.393443
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bea71c525e82317994bbd637b8bebff771fe81eb
| 3,406
|
py
|
Python
|
tests/unit/test_roger_promote.py
|
seomoz/roger-mesos-tools
|
88b4cb3550a4b49d0187cfb5e6a22246ff6b9765
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_roger_promote.py
|
seomoz/roger-mesos-tools
|
88b4cb3550a4b49d0187cfb5e6a22246ff6b9765
|
[
"Apache-2.0"
] | 47
|
2016-05-26T22:09:56.000Z
|
2018-08-08T20:33:39.000Z
|
tests/unit/test_roger_promote.py
|
seomoz/roger-mesos-tools
|
88b4cb3550a4b49d0187cfb5e6a22246ff6b9765
|
[
"Apache-2.0"
] | 3
|
2017-09-20T22:39:03.000Z
|
2017-11-07T22:29:29.000Z
|
# -*- encoding: utf-8 -*-
"""
Unit test for roger_promote.py
"""
import tests.helper
import unittest
import os
import os.path
import pytest
import requests
from mockito import mock, Mock, when
from cli.roger_promote import RogerPromote
from cli.appconfig import AppConfig
from cli.settings import Settings
from cli.framework import Framework
from cli.frameworkUtils import FrameworkUtils
from cli.marathon import Marathon
from cli.chronos import Chronos
class TestRogerPromote(unittest.TestCase):
def setUp(self):
self.marathon = mock(Marathon)
self.settings = mock(Settings)
self.app_config = mock(AppConfig)
self.framework = self.marathon
self.framework_utils = mock(FrameworkUtils)
self.config_file = "test.yml"
self.roger_env = {}
os.environ['ROGER_CONFIG_DIR'] = '/vagrant/config'
@property
def config_dir(self):
return os.environ['ROGER_CONFIG_DIR']
def test_config_dir(self):
rp = RogerPromote()
assert rp.config_dir == '/vagrant/config'
def test_roger_env(self):
fake_config = tests.helper.fake_config()
settings = mock(Settings)
when(self.app_config).getRogerEnv(
self.config_dir
).thenReturn(fake_config)
rp = RogerPromote(app_config=self.app_config)
assert rp.roger_env == fake_config
def test_set_framework(self):
app_data = {'test_app': {'name': 'test_app'}}
when(self.app_config).getAppData(
self.config_dir, self.config_file, 'test_app'
).thenReturn(app_data)
rp = RogerPromote(app_config=self.app_config)
rp._set_framework(self.config_file, 'test_app')
assert rp._framework.getName() == 'Marathon'
def test_image_name(self):
os.environ['ROGER_USER'] = "first.last"
os.environ['ROGER_USER_PASS_DEV'] = "password"
os.environ['ROGER_USER_PASS_STAGE'] = "password"
os.environ['ROGER_USER_PASS_PROD'] = "password"
framework = mock(Marathon)
when(framework).getName().thenReturn("Marathon")
when(framework).get_app_id(
"test_path/test_app.json",
"Marathon"
).thenReturn("app_id")
when(framework).get_image_name(
'first.last',
"password",
"dev",
"app_id",
self.config_dir,
self.config_file
).thenReturn("test_image")
rp = RogerPromote(framework=framework)
assert rp._image_name(
'dev',
self.config_file,
"test_path/test_app.json") == 'test_image'
def test_config_resolver(self):
framework = mock(Framework)
settings = mock(Settings)
app_config = mock(AppConfig)
config_dir = '/vagrant/config'
fake_team_config = tests.helper.fake_team_config()
when(settings).getConfigDir().thenReturn(config_dir)
when(app_config).getConfig(
config_dir, 'roger.json'
).thenReturn(fake_team_config)
rp = RogerPromote(settings=settings, app_config=app_config)
val = rp._config_resolver('template_path', 'test_app', 'roger.json')
assert val == 'framework_template_path'
def test_roger_push_script(self):
path = RogerPromote()._roger_push_script()
assert 'roger-mesos-tools/cli/roger_push.py' in path
| 28.383333
| 76
| 0.645919
| 401
| 3,406
| 5.239402
| 0.197007
| 0.04712
| 0.039981
| 0.034269
| 0.152308
| 0.088529
| 0.034269
| 0
| 0
| 0
| 0
| 0.00039
| 0.24633
| 3,406
| 119
| 77
| 28.621849
| 0.818076
| 0.016148
| 0
| 0.046512
| 0
| 0
| 0.13465
| 0.037403
| 0
| 0
| 0
| 0
| 0.069767
| 1
| 0.093023
| false
| 0.046512
| 0.162791
| 0.011628
| 0.27907
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bea77828d8025fc0087d40bc8239898137482a39
| 7,097
|
py
|
Python
|
data/collectors.py
|
papb/COVID-19
|
2dc8e683f55c494ca894727aca56f90e53b161f3
|
[
"MIT"
] | 6
|
2020-03-24T22:03:34.000Z
|
2020-03-25T21:08:02.000Z
|
data/collectors.py
|
papb/COVID-19
|
2dc8e683f55c494ca894727aca56f90e53b161f3
|
[
"MIT"
] | null | null | null |
data/collectors.py
|
papb/COVID-19
|
2dc8e683f55c494ca894727aca56f90e53b161f3
|
[
"MIT"
] | 1
|
2020-03-27T20:25:03.000Z
|
2020-03-27T20:25:03.000Z
|
import json
import pandas as pd
import requests
def load_dump_covid_19_data():
COVID_19_BY_CITY_URL='https://raw.githubusercontent.com/wcota/covid19br/master/cases-brazil-cities-time.csv'
by_city=(pd.read_csv(COVID_19_BY_CITY_URL)
.query('country == "Brazil"')
.drop(columns=['country'])
.pipe(lambda df: df[df.state!='TOTAL'])
.assign(city=lambda df: df.city.apply(lambda x: x.split('/')[0]))
.rename(columns={'totalCases': 'cases',
'newCases': 'new_cases',
'state': 'uf'})
.sort_values(by=['city', 'date'])
)
by_uf = (by_city
.groupby(['date', 'uf'])
['new_cases', 'cases']
.sum()
.reset_index())
dfs = [by_uf, by_city]
filenames = ['by_uf', 'by_city']
for df, filename in zip(dfs, filenames):
output_path = f'data/csv/covid_19/{filename}/{filename}.csv'
df.to_csv(output_path, index=False)
print(f'{filename} data exported to {output_path}')
def load_dump_uf_pop():
IBGE_POPULATION_EXCEL_URL = 'ftp://ftp.ibge.gov.br/Estimativas_de_Populacao/Estimativas_2019/estimativa_dou_2019.xls'
def _load_uf_codes():
print('Scraping UF codes')
return (
pd.read_html(
'https://www.oobj.com.br/bc/article/'
'quais-os-c%C3%B3digos-de-cada-uf-no-brasil-465.html'
)
[0]
.replace('\s\(\*\)', '', regex=True)
.rename(columns={'UF': 'uf'})
[['Unidade da Federação', 'uf']]
)
def _load_uf_capitals():
print('Scraping UF capital names')
return (
pd.read_html(
'https://www.estadosecapitaisdobrasil.com/'
)
[0]
.rename(columns={'Sigla': 'uf', 'Capital': 'city'})
[['uf', 'city']]
)
# TODO: download excel file only once
def _download_ibge_excel_file(url):
pass
def _load_city_pop():
print('Scraping city population')
return (
pd.read_excel(IBGE_POPULATION_EXCEL_URL, sheet_name='Municípios', header=1)
.rename(columns={
'COD. UF': 'UF_code',
'COD. MUNIC': 'city_code',
'NOME DO MUNICÍPIO': 'city',
'POPULAÇÃO ESTIMADA': 'estimated_population'
})
.dropna(how='any')
.assign(estimated_population=lambda df: df.estimated_population
.replace('\.', '', regex=True)
.replace('\-', ' ', regex=True)
.replace('\(\d+\)', '', regex=True)
.astype('int')
)
.assign( UF_code=lambda df: df.UF_code.astype(int))
.assign(city_code=lambda df: df.city_code.astype(int))
.rename(columns={'UF': 'uf'})
[['uf', 'city', 'estimated_population']]
)
def _load_uf_pop():
print('Scraping UF population')
uf_codes = _load_uf_codes()
return (
pd.read_excel(IBGE_POPULATION_EXCEL_URL, header=1)
.drop(columns=['Unnamed: 1'])
.rename(columns={'POPULAÇÃO ESTIMADA': 'estimated_population'})
.dropna(how='any')
.assign(estimated_population=lambda df: df.estimated_population
.replace('\.', '', regex=True)
.replace('\-', ' ', regex=True)
.replace('\(\d\)', '', regex=True)
.astype('int')
)
.pipe(lambda df: pd.merge(df,
uf_codes,
left_on='BRASIL E UNIDADES DA FEDERAÇÃO',
right_on='Unidade da Federação',
how='inner'))
[['uf', 'estimated_population']]
)
uf_pop, city_pop, uf_capitals = (_load_uf_pop(),
_load_city_pop(),
_load_uf_capitals())
print('Combining uf and city data')
uf_pop = (
uf_pop
# Add capital city name
.merge(
uf_capitals,
how='left',
on='uf'
)
# Add capital population
.merge(
city_pop,
how='left',
on=['uf', 'city']
)
.rename(
columns={
'estimated_population_x': 'estimated_population',
'estimated_population_y': 'capital_estimated_population'
}
)
)
dfs = [uf_pop, city_pop]
filenames = ['by_uf', 'by_city']
for df, filename in zip(dfs, filenames):
output_path = f'data/csv/population/{filename}/{filename}.csv'
df.to_csv(output_path, index=False)
print(f'{filename} data exported to {output_path}')
def load_jh_df(csv):
'''
Loads a CSV file from JH repository and make some transforms
'''
jh_data_path = (
'https://raw.githubusercontent.com/'
'CSSEGISandData/COVID-19/master/'
'csse_covid_19_data/csse_covid_19_time_series/'
)
return (
pd.read_csv(
jh_data_path
+ csv[1]
)
.drop(['Lat', 'Long'], axis=1)
.groupby('Country/Region')
.sum()
.reset_index()
.rename(
columns={'Country/Region':'country'}
)
.melt(
id_vars=['country'],
var_name='date',
value_name=csv[0]
)
.assign(
date=lambda x: pd.to_datetime(
x['date'],
format='%m/%d/%y'
)
)
)
def load_jh_data():
'''
Loads the latest COVID-19 global data from
Johns Hopkins University repository
'''
cases_csv = ('cases', 'time_series_19-covid-Confirmed.csv')
deaths_csv = ('deaths', 'time_series_19-covid-Deaths.csv')
recovered_csv = ('recoveries', 'time_series_19-covid-Recovered.csv')
return (
pd.merge(
pd.merge(
load_jh_df(cases_csv),
load_jh_df(deaths_csv)
),
load_jh_df(recovered_csv)
)
.reindex(
columns = ['date',
'cases',
'deaths',
'recoveries',
'country']
)
)
if __name__ == '__main__':
try:
load_dump_covid_19_data()
except Exception as e:
print(f'Error when collecting COVID-19 cases data: {repr(e)}')
try:
load_dump_uf_pop()
except Exception as e:
print(f'Error when collecting population data: {repr(e)}')
| 32.856481
| 121
| 0.479217
| 710
| 7,097
| 4.546479
| 0.269014
| 0.070632
| 0.018587
| 0.012392
| 0.288724
| 0.262701
| 0.247831
| 0.247831
| 0.223668
| 0.197026
| 0
| 0.011582
| 0.391715
| 7,097
| 215
| 122
| 33.009302
| 0.736159
| 0.03114
| 0
| 0.240437
| 0
| 0.010929
| 0.233806
| 0.069162
| 0
| 0
| 0
| 0.004651
| 0
| 1
| 0.04918
| false
| 0.005464
| 0.016393
| 0
| 0.098361
| 0.04918
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
beaa8784fc43c71bc8bb5120744ac9a157c4e2a7
| 2,387
|
py
|
Python
|
PathPlanning/run.py
|
CandleStein/VAlg
|
43aecdd351954d316f132793cf069b70bf2e5cc2
|
[
"MIT"
] | null | null | null |
PathPlanning/run.py
|
CandleStein/VAlg
|
43aecdd351954d316f132793cf069b70bf2e5cc2
|
[
"MIT"
] | null | null | null |
PathPlanning/run.py
|
CandleStein/VAlg
|
43aecdd351954d316f132793cf069b70bf2e5cc2
|
[
"MIT"
] | 1
|
2020-09-25T18:31:34.000Z
|
2020-09-25T18:31:34.000Z
|
from planning_framework import path
import cv2 as cv
import numpy as np
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description="Path Planning Visualisation")
parser.add_argument(
"-n",
"--n_heuristic",
default=2,
help="Heuristic for A* Algorithm (default = 2). 0 for Dijkstra's Algorithm",
)
args = parser.parse_args()
N_H = int(args.n_heuristic)
drawing = False # true if mouse is pressed
mode = "obs" # if True, draw rectangle. Press 'm' to toggle to curve
ix, iy = -1, -1
sx, sy = 0, 0
dx, dy = 50, 50
# mouse callback function
def draw(event, x, y, flags, param):
global mode, sx, sy, dx, dy, drawing
if event == cv.EVENT_LBUTTONDOWN:
drawing = True
elif event == cv.EVENT_MOUSEMOVE:
if drawing == True:
if mode == "obs":
cv.rectangle(img, (x - 5, y - 5), (x + 5, y + 5), (255, 255, 255), -1)
elif event == cv.EVENT_LBUTTONUP:
drawing = False
if mode == "obs":
cv.rectangle(img, (x - 5, y - 5), (x + 5, y + 5), (255, 255, 255), -1)
elif mode == "src":
cv.circle(img, (x, y), 5, (255, 0, 0), -1)
sx, sy = x, y
elif mode == "dst":
cv.circle(img, (x, y), 5, (0, 255, 0), -1)
dx, dy = x, y
img = np.zeros((512, 512, 3), np.uint8)
inv_im = np.ones(img.shape) * 255
cv.namedWindow("Draw the Occupancy Map")
cv.setMouseCallback("Draw the Occupancy Map", draw)
while 1:
cv.imshow("Draw the Occupancy Map", inv_im - img)
if cv.waitKey(20) & 0xFF == 27:
break
cv.destroyAllWindows()
mode = "src"
img_ = img
cv.namedWindow("Set the Starting Point")
cv.setMouseCallback("Set the Starting Point", draw)
while 1:
cv.imshow("Set the Starting Point", inv_im - img)
if cv.waitKey(20) & 0xFF == 27:
break
# cv.waitKey(20)
cv.destroyAllWindows()
mode = "dst"
end = "Set the End Point"
cv.namedWindow(end)
cv.setMouseCallback(end, draw)
while cv.getWindowProperty(end, 0) >= 0:
cv.imshow(end, inv_im - img)
if cv.waitKey(20) & 0xFF == 27:
break
cv.destroyAllWindows()
img = cv.resize(img_, (50, 50), interpolation=cv.INTER_AREA)
inv_img = np.ones(img.shape)
np.savetxt("map.txt", np.array(img[:, :, 0]))
plt.imshow(inv_img - img)
start = np.array([sx, sy]) * 50 // 512
end = np.array([dx, dy]) * 50 // 512
path(start, end, N_H)
| 26.820225
| 86
| 0.607038
| 368
| 2,387
| 3.88587
| 0.304348
| 0.008392
| 0.008392
| 0.011189
| 0.202797
| 0.177622
| 0.158042
| 0.158042
| 0.158042
| 0.158042
| 0
| 0.055586
| 0.238793
| 2,387
| 88
| 87
| 27.125
| 0.731425
| 0.049016
| 0
| 0.239437
| 0
| 0
| 0.126711
| 0
| 0
| 0
| 0.005298
| 0
| 0
| 1
| 0.014085
| false
| 0
| 0.070423
| 0
| 0.084507
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
beaf6a34e9709a7f3a490a80d9b84b4126151d38
| 186
|
py
|
Python
|
Codeforces/problems/0136/A/136A.py
|
object-oriented-human/competitive
|
9e761020e887d8980a39a64eeaeaa39af0ecd777
|
[
"MIT"
] | 2
|
2021-07-27T10:46:47.000Z
|
2021-07-27T10:47:57.000Z
|
Codeforces/problems/0136/A/136A.py
|
foooop/competitive
|
9e761020e887d8980a39a64eeaeaa39af0ecd777
|
[
"MIT"
] | null | null | null |
Codeforces/problems/0136/A/136A.py
|
foooop/competitive
|
9e761020e887d8980a39a64eeaeaa39af0ecd777
|
[
"MIT"
] | null | null | null |
n = int(input())
line = list(map(int, input().split()))
l = {}
res = ""
for i, j in enumerate(line):
l[j] = i+1
for k in range(n):
res += str(l[k+1]) + " "
print(res.rstrip())
| 15.5
| 38
| 0.516129
| 34
| 186
| 2.823529
| 0.588235
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013889
| 0.225806
| 186
| 12
| 39
| 15.5
| 0.652778
| 0
| 0
| 0
| 0
| 0
| 0.005348
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
beb013240bc0b9610971205973878d44dedde94f
| 323
|
py
|
Python
|
generatey.py
|
YiLisa/DSCI560-hw2
|
9cf4a40a6e4755ea1b0b68248e553fb4b6b7fdf4
|
[
"Apache-2.0"
] | null | null | null |
generatey.py
|
YiLisa/DSCI560-hw2
|
9cf4a40a6e4755ea1b0b68248e553fb4b6b7fdf4
|
[
"Apache-2.0"
] | null | null | null |
generatey.py
|
YiLisa/DSCI560-hw2
|
9cf4a40a6e4755ea1b0b68248e553fb4b6b7fdf4
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
def main():
input = pd.read_csv('random_x.csv', header=None)
x=input[0].tolist()
y = []
for n in x:
y.append(3*int(n)+6)
df = pd.DataFrame(y)
df.to_csv('output_y.csv', index=False, header=False)
if __name__ == '__main__':
main()
print('generating y = 3x+6...')
| 21.533333
| 56
| 0.582043
| 53
| 323
| 3.320755
| 0.641509
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020243
| 0.235294
| 323
| 15
| 57
| 21.533333
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.166667
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
beb0a9e7bb5a51ebb9a999b6f45ac4bb5d9df106
| 1,002
|
py
|
Python
|
setup.py
|
burn874/mtg
|
cef47f6ec0ca110bdcb885ec09d6f5aca517c3b2
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
burn874/mtg
|
cef47f6ec0ca110bdcb885ec09d6f5aca517c3b2
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
burn874/mtg
|
cef47f6ec0ca110bdcb885ec09d6f5aca517c3b2
|
[
"Apache-2.0"
] | null | null | null |
import re
from pkg_resources import parse_requirements
import pathlib
from setuptools import find_packages, setup
README_FILE = 'README.md'
REQUIREMENTS_FILE = 'requirements.txt'
VERSION_FILE = 'mtg/_version.py'
VERSION_REGEXP = r'^__version__ = \'(\d+\.\d+\.\d+)\''
r = re.search(VERSION_REGEXP, open(VERSION_FILE).read(), re.M)
if r is None:
raise RuntimeError(f'Unable to find version string in {VERSION_FILE}.')
version = r.group(1)
long_description = open(README_FILE, encoding='utf-8').read()
install_requires = [str(r) for r in parse_requirements(open(REQUIREMENTS_FILE, 'rt'))]
setup(
name='mtg',
version=version,
description='mtg is a collection of data science and ml projects for Magic:the Gathering',
long_description=long_description,
long_description_content_type='text/markdown',
author='Ryan Saxe',
author_email='ryancsaxe@gmail.com',
url='https://github.com/RyanSaxe/mtg',
packages=find_packages(),
install_requires=install_requires,
)
| 31.3125
| 94
| 0.739521
| 139
| 1,002
| 5.122302
| 0.539568
| 0.08427
| 0.053371
| 0.08427
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002302
| 0.132735
| 1,002
| 31
| 95
| 32.322581
| 0.817031
| 0
| 0
| 0
| 0
| 0
| 0.260479
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
beb1a4b08f2fc3818a575158bc7a69b7e5f252c7
| 1,399
|
py
|
Python
|
avilla/core/resource/interface.py
|
RF-Tar-Railt/Avilla
|
0b6eff0e253d4c04a5c82f4f252b6a11b7d81e04
|
[
"MIT"
] | null | null | null |
avilla/core/resource/interface.py
|
RF-Tar-Railt/Avilla
|
0b6eff0e253d4c04a5c82f4f252b6a11b7d81e04
|
[
"MIT"
] | 1
|
2021-12-19T07:43:30.000Z
|
2021-12-19T07:43:30.000Z
|
avilla/core/resource/interface.py
|
RF-Tar-Railt/Avilla
|
0b6eff0e253d4c04a5c82f4f252b6a11b7d81e04
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from dataclasses import dataclass
from avilla.core.platform import Base
from avilla.core.resource import Resource, ResourceProvider
@dataclass
class ResourceMatchPrefix:
resource_type: type[Resource]
keypath: str | None = None
platform: Base | None = None
class ResourceInterface:
providers: dict[ResourceMatchPrefix, ResourceProvider]
def __init__(self):
self.providers = {}
def register(
self,
resource_type: type[Resource],
provider: ResourceProvider,
*,
mainline_keypath: str | None = None,
platform: Base | None = None,
):
self.providers[ResourceMatchPrefix(resource_type, mainline_keypath, platform)] = provider
def get_provider(
self,
resource: Resource | type[Resource],
*,
mainline_keypath: str | None = None,
platform: Base | None = None,
) -> ResourceProvider | None:
resource_type = resource if isinstance(resource, type) else type(resource)
for prefix in self.providers:
if all((
prefix.resource_type is resource_type,
prefix.keypath == mainline_keypath if prefix.keypath is not None else True,
prefix.platform == platform if prefix.platform is not None else True
)):
return self.providers[prefix]
| 29.765957
| 97
| 0.647605
| 143
| 1,399
| 6.202797
| 0.272727
| 0.10823
| 0.047351
| 0.060879
| 0.184893
| 0.146561
| 0.146561
| 0.146561
| 0.10372
| 0
| 0
| 0
| 0.277341
| 1,399
| 46
| 98
| 30.413043
| 0.877349
| 0
| 0
| 0.216216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.108108
| 0
| 0.378378
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
beb317bf51c8d955452bb7ade64a00caeb647030
| 8,722
|
py
|
Python
|
autotest/test_gwf_buy_lak01.py
|
scharlton2/modflow6
|
83ac72ee3b6f580aaffef6352cf15c1697d3ce66
|
[
"CC0-1.0"
] | 3
|
2019-07-10T21:16:57.000Z
|
2021-10-08T00:56:20.000Z
|
autotest/test_gwf_buy_lak01.py
|
scharlton2/modflow6
|
83ac72ee3b6f580aaffef6352cf15c1697d3ce66
|
[
"CC0-1.0"
] | null | null | null |
autotest/test_gwf_buy_lak01.py
|
scharlton2/modflow6
|
83ac72ee3b6f580aaffef6352cf15c1697d3ce66
|
[
"CC0-1.0"
] | 3
|
2019-11-28T16:26:50.000Z
|
2020-02-05T11:08:37.000Z
|
# Test the buoyancy package and the variable density flows between the lake
# and the gwf model. This model has 4 layers and a lake incised within it.
# The model is transient and has heads in the aquifer higher than the initial
# stage in the lake. As the model runs, the lake and aquifer equalize and
# should end up at the same level. The test ensures that the initial and
# final water volumes in the entire system are the same. There are three
# different cases:
# 1. No buoyancy package
# 2. Buoyancy package with lake and aquifer density = 1000.
# 3. Buoyancy package with lake and aquifer density = 1024.5
import os
import pytest
import sys
import numpy as np
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
ex = ["buy_lak_01a"] # , 'buy_lak_01b', 'buy_lak_01c']
buy_on_list = [False] # , True, True]
concbuylist = [0.0] # , 0., 35.]
exdirs = []
for s in ex:
exdirs.append(os.path.join("temp", s))
def build_model(idx, dir):
lx = 7.0
lz = 4.0
nlay = 4
nrow = 1
ncol = 7
nper = 1
delc = 1.0
delr = lx / ncol
delz = lz / nlay
top = 4.0
botm = [3.0, 2.0, 1.0, 0.0]
perlen = [10.0]
nstp = [50]
tsmult = [1.0]
Kh = 1.0
Kv = 1.0
tdis_rc = []
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
nouter, ninner = 700, 300
hclose, rclose, relax = 1e-8, 1e-6, 0.97
name = ex[idx]
# build MODFLOW 6 files
ws = dir
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
# create tdis package
tdis = flopy.mf6.ModflowTdis(
sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
)
# create gwf model
gwfname = "gwf_" + name
gwf = flopy.mf6.ModflowGwf(sim, modelname=gwfname, newtonoptions="NEWTON")
imsgwf = flopy.mf6.ModflowIms(
sim,
print_option="ALL",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="NONE",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="BICGSTAB",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
filename="{}.ims".format(gwfname),
)
idomain = np.full((nlay, nrow, ncol), 1)
idomain[0, 0, 1:6] = 0
idomain[1, 0, 2:5] = 0
idomain[2, 0, 3:4] = 0
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
idomain=idomain,
)
# initial conditions
strt = np.zeros((nlay, nrow, ncol), dtype=float)
strt[0, 0, :] = 3.5
strt[1, 0, :] = 3.0
strt[1, 0, 1:6] = 2.5
strt[2, 0, :] = 2.0
strt[3, 0, :] = 1.0
ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
# node property flow
npf = flopy.mf6.ModflowGwfnpf(
gwf,
xt3doptions=False,
save_flows=True,
save_specific_discharge=True,
icelltype=1,
k=Kh,
k33=Kv,
)
sto = flopy.mf6.ModflowGwfsto(gwf, sy=0.3, ss=0.0, iconvert=1)
c = concbuylist[idx]
lake_dense = 1000.0 + 0.7 * c
buy_on = buy_on_list[idx]
if buy_on:
pd = [(0, 0.7, 0.0, "none", "none")]
buy = flopy.mf6.ModflowGwfbuy(
gwf, packagedata=pd, denseref=1000.0, concentration=c
)
nlakeconn = 11 # note: number of connections for this lake
# pak_data = [lakeno, strt, nlakeconn, dense, boundname]
pak_data = [(0, 2.25, nlakeconn, lake_dense)]
connlen = delr / 2.0
connwidth = delc
bedleak = "None"
con_data = [
# con_data=(lakeno,iconn,(cellid),claktype,bedleak,belev,telev,connlen,connwidth )
(0, 0, (0, 0, 0), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth),
(0, 1, (1, 0, 1), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
(0, 2, (1, 0, 1), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth),
(0, 3, (2, 0, 2), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
(0, 4, (2, 0, 2), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth),
(0, 5, (3, 0, 3), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
(0, 6, (2, 0, 4), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth),
(0, 7, (2, 0, 4), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
(0, 8, (1, 0, 5), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth),
(0, 9, (1, 0, 5), "VERTICAL", bedleak, 10, 10, connlen, connwidth),
(0, 10, (0, 0, 6), "HORIZONTAL", bedleak, 10, 10, connlen, connwidth),
]
# period data
p_data = [
(0, "STATUS", "ACTIVE"),
]
# note: for specifying lake number, use fortran indexing!
fname = "{}.lak.obs.csv".format(gwfname)
lak_obs = {
fname: [
("lakestage", "stage", 1),
("lakevolume", "volume", 1),
("lak1", "lak", 1, 1),
("lak2", "lak", 1, 2),
("lak3", "lak", 1, 3),
("lak4", "lak", 1, 4),
("lak5", "lak", 1, 5),
("lak6", "lak", 1, 6),
("lak7", "lak", 1, 7),
("lak8", "lak", 1, 8),
("lak9", "lak", 1, 9),
("lak10", "lak", 1, 10),
("lak11", "lak", 1, 11),
],
# "digits": 10,
}
lak = flopy.mf6.modflow.ModflowGwflak(
gwf,
save_flows=True,
print_input=True,
print_flows=True,
print_stage=True,
stage_filerecord="{}.lak.bin".format(gwfname),
budget_filerecord="{}.lak.bud".format(gwfname),
nlakes=len(pak_data),
ntables=0,
packagedata=pak_data,
pname="LAK-1",
connectiondata=con_data,
perioddata=p_data,
observations=lak_obs,
auxiliary=["DENSITY"],
)
# output control
oc = flopy.mf6.ModflowGwfoc(
gwf,
budget_filerecord="{}.cbc".format(gwfname),
head_filerecord="{}.hds".format(gwfname),
headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
printrecord=[("HEAD", "LAST"), ("BUDGET", "LAST")],
)
return sim, None
def eval_results(sim):
print("evaluating results...")
# calculate volume of water and make sure it is conserved
name = ex[sim.idxsim]
gwfname = "gwf_" + name
fname = gwfname + ".lak.bin"
fname = os.path.join(sim.simpath, fname)
assert os.path.isfile(fname)
bobj = flopy.utils.HeadFile(fname, text="STAGE")
stage = bobj.get_alldata().flatten()
# print(stage)
fname = gwfname + ".hds"
fname = os.path.join(sim.simpath, fname)
assert os.path.isfile(fname)
hobj = flopy.utils.HeadFile(fname)
head = hobj.get_data()
# print(head)
# calculate initial water volume
v0 = 3.5 * 2 # outermost columns
v0 += 2.5 * 2 # next innermost columns
v0 += 2.0 * 2 # next innermost columns
v0 += 1.0 * 1 # middle column
v0 = v0 * 0.3 # specific yield
v0 = v0 + (2.25 - 2.0) * 2 + (2.25 - 1.0)
print("initial volume of water in model = {}".format(v0))
# calculate ending water volume in model
h = head[0, 0, 0]
s = stage[-1]
v = h * 4 + 2.0 * 2 + 1.0 * 1
v = v * 0.3 # specific yield
v = v + (s - 2.0) * 2 + (s - 1.0)
print("final volume of water in model = {}".format(v))
# check to make sure starting water volume same as equalized final volume
errmsg = "initial and final water volume not equal: {} {}".format(v0, v)
assert np.allclose(v0, v)
# todo: add a better check of the lake concentrations
# assert False
# - No need to change any code below
@pytest.mark.parametrize(
"idx, dir",
list(enumerate(exdirs)),
)
def test_mf6model(idx, dir):
# initialize testing framework
test = testing_framework()
# build the model
test.build_mf6_models(build_model, idx, dir)
# run the test model
test.run_mf6(Simulation(dir, exfunc=eval_results, idxsim=idx))
def main():
# initialize testing framework
test = testing_framework()
# run the test model
for idx, dir in enumerate(exdirs):
test.build_mf6_models(build_model, idx, dir)
sim = Simulation(dir, exfunc=eval_results, idxsim=idx)
test.run_mf6(sim)
if __name__ == "__main__":
# print message
print("standalone run of {}".format(os.path.basename(__file__)))
# run main routine
main()
| 29.073333
| 90
| 0.576015
| 1,192
| 8,722
| 4.143456
| 0.300336
| 0.006884
| 0.037862
| 0.040089
| 0.189917
| 0.178781
| 0.142134
| 0.03523
| 0.021462
| 0.021462
| 0
| 0.057552
| 0.278835
| 8,722
| 299
| 91
| 29.170569
| 0.727663
| 0.184132
| 0
| 0.073733
| 0
| 0
| 0.101019
| 0
| 0
| 0
| 0
| 0.003344
| 0.013825
| 1
| 0.018433
| false
| 0
| 0.032258
| 0
| 0.0553
| 0.046083
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
beb37d345ad255de414b430caeba23a0fa10d2d1
| 441
|
py
|
Python
|
lesson-08/roll_dice_v1.0.py
|
hemiaoio/pylearning
|
4b3885ed7177db4e6e03da80dd9ed69719c8d866
|
[
"MIT"
] | 1
|
2018-11-11T03:44:02.000Z
|
2018-11-11T03:44:02.000Z
|
lesson-08/roll_dice_v1.0.py
|
hemiaoio/learn-python
|
4b3885ed7177db4e6e03da80dd9ed69719c8d866
|
[
"MIT"
] | null | null | null |
lesson-08/roll_dice_v1.0.py
|
hemiaoio/learn-python
|
4b3885ed7177db4e6e03da80dd9ed69719c8d866
|
[
"MIT"
] | null | null | null |
"""
功能:模拟掷骰子
版本:1.0
"""
import random
def roll_dice():
roll = random.randint(1, 6)
return roll
def main():
total_times = 100000
result_list = [0] * 6
for i in range(total_times):
roll = roll_dice()
result_list[roll-1] += 1
for i, x in enumerate(result_list):
print('点数{}的次数:{},频率:{}'.format(i+1, x, x/total_times))
print(result_list)
if __name__ == '__main__':
main()
| 15.206897
| 63
| 0.569161
| 65
| 441
| 3.6
| 0.492308
| 0.17094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046875
| 0.274376
| 441
| 28
| 64
| 15.75
| 0.684375
| 0.034014
| 0
| 0
| 0
| 0
| 0.058537
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.066667
| 0
| 0.266667
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
beb557aa11e275e2f9691dee969a012dab3f26db
| 759
|
py
|
Python
|
composer/dataflow-python3/main.py
|
gxercavins/gcp-snippets
|
a90e4e9c922370face876aa7c56db610896e1a6f
|
[
"Apache-2.0"
] | 2
|
2022-02-07T07:53:35.000Z
|
2022-02-23T18:46:03.000Z
|
composer/dataflow-python3/main.py
|
gxercavins/gcp-snippets
|
a90e4e9c922370face876aa7c56db610896e1a6f
|
[
"Apache-2.0"
] | 1
|
2019-10-26T19:03:34.000Z
|
2019-10-26T19:03:48.000Z
|
composer/dataflow-python3/main.py
|
gxercavins/gcp-snippets
|
a90e4e9c922370face876aa7c56db610896e1a6f
|
[
"Apache-2.0"
] | 6
|
2020-03-19T23:58:46.000Z
|
2022-02-07T07:53:37.000Z
|
import argparse
import logging
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
def run(argv=None, save_main_session=True):
"""Dummy pipeline to test Python3 operator."""
parser = argparse.ArgumentParser()
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
p = beam.Pipeline(options=pipeline_options)
# Just a simple test
p | 'Create Events' >> beam.Create([1, 2, 3])
result = p.run()
result.wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| 27.107143
| 78
| 0.777339
| 101
| 759
| 5.544554
| 0.475248
| 0.160714
| 0.117857
| 0.075
| 0.15
| 0.15
| 0.15
| 0
| 0
| 0
| 0
| 0.006033
| 0.126482
| 759
| 27
| 79
| 28.111111
| 0.838612
| 0.079051
| 0
| 0
| 0
| 0
| 0.030303
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.294118
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
beb69b31ba90004b6f8731fea0065e0f64e36216
| 1,412
|
py
|
Python
|
backend/garpix_page/setup.py
|
griviala/garpix_page
|
55f1d9bc6d1de29d18e15369bebcbef18811b5a4
|
[
"MIT"
] | null | null | null |
backend/garpix_page/setup.py
|
griviala/garpix_page
|
55f1d9bc6d1de29d18e15369bebcbef18811b5a4
|
[
"MIT"
] | null | null | null |
backend/garpix_page/setup.py
|
griviala/garpix_page
|
55f1d9bc6d1de29d18e15369bebcbef18811b5a4
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
from os import path
here = path.join(path.abspath(path.dirname(__file__)), 'garpix_page')
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='garpix_page',
version='2.23.0',
description='',
long_description=long_description,
url='https://github.com/garpixcms/garpix_page',
author='Garpix LTD',
author_email='info@garpix.com',
license='MIT',
packages=find_packages(exclude=['testproject', 'testproject.*']),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
include_package_data=True,
zip_safe=False,
install_requires=[
'Django >= 1.11',
'django-polymorphic-tree-for-garpix-page >= 2.1.1',
'django-modeltranslation >= 0.16.2',
'django-multiurl >= 1.4.0',
'djangorestframework >= 3.12.4',
'garpix_utils >= 1.4.0',
'django-tabbed-admin >= 1.0.4',
'model-bakery >= 1.4.0'
],
)
| 32.090909
| 69
| 0.607649
| 163
| 1,412
| 5.153374
| 0.564417
| 0.047619
| 0.119048
| 0.092857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035023
| 0.231586
| 1,412
| 43
| 70
| 32.837209
| 0.739171
| 0
| 0
| 0.05
| 0
| 0
| 0.478754
| 0.043909
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
beb76b3debe06f273a8ef3ec32c53943cd031a3b
| 20,225
|
py
|
Python
|
.kodi/addons/plugin.video.p2p-streams/resources/core/livestreams.py
|
C6SUMMER/allinclusive-kodi-pi
|
8baf247c79526849c640c6e56ca57a708a65bd11
|
[
"Apache-2.0"
] | null | null | null |
.kodi/addons/plugin.video.p2p-streams/resources/core/livestreams.py
|
C6SUMMER/allinclusive-kodi-pi
|
8baf247c79526849c640c6e56ca57a708a65bd11
|
[
"Apache-2.0"
] | null | null | null |
.kodi/addons/plugin.video.p2p-streams/resources/core/livestreams.py
|
C6SUMMER/allinclusive-kodi-pi
|
8baf247c79526849c640c6e56ca57a708a65bd11
|
[
"Apache-2.0"
] | 2
|
2018-04-17T17:34:39.000Z
|
2020-07-26T03:43:33.000Z
|
# -*- coding: utf-8 -*-
""" p2p-streams (c) 2014 enen92 fightnight
This file contains the livestream addon engine. It is mostly based on divingmule work on livestreams addon!
Functions:
xml_lists_menu() -> main menu for the xml list category
addlista() -> add a new list. It'll ask for local or remote and processes the given input
remove_list(name) -> Remove a list
get_groups(url) -> First regex function to parse a given list. Sopcast type list
get_channels(name,url) -> Second regex function to parse a given list. Used to general livestreams xml type lists
getData(url,fanart) -> Get the item data such as iconimage, fanart, etc
getChannelItems(name,url,fanart) -> Function to grab the channel items
getItems(items,fanart) -> Function to grab the items from the xml
removeNonAscii(s) -> Function to remove non-ascii characters from the list
getSoup(url) -> uses beautifulsoup to parse a remote xml
addon_log(string) -> Simple log/print function
getRegexParsed(regexs, url) -> parse the regex expression
list_type(url) -> Checks if the list is xml or m3u
parse_m3u(url) -> Parses a m3u type list
"""
import urllib,urllib2,re,xbmcplugin,xbmcgui,xbmc,xbmcaddon,HTMLParser,time,datetime,os,xbmcvfs,sys
from BeautifulSoup import BeautifulStoneSoup, BeautifulSoup, BeautifulSOAP
from peertopeerutils.pluginxbmc import *
from peertopeerutils.webutils import *
from peertopeerutils.directoryhandle import *
from peertopeerutils.iofile import *
"""
Main Menu
"""
def xml_lists_menu():
if settings.getSetting('sopcast-oficial') == "true":
addDir(translate(40116),"http://sopcast.org/chlist.xml",101,addonpath + art + 'xml_list_sopcast.png',2,True)
try:
if os.path.exists(os.path.join(pastaperfil,"Lists")):
dirs, files = xbmcvfs.listdir(os.path.join(pastaperfil,"Lists"))
for file in files:
f = open(os.path.join(pastaperfil,"Lists",file), "r")
string = f.read()
if xbmcvfs.exists(os.path.join(pastaperfil,"Lists-fanart",file.replace('.txt','.jpg'))):addDir("[B][COLOR orange]" + file.replace(".txt","") + "[/B][/COLOR]",string,101,addonpath + art + 'xml_lists.png',2,True,fan_art=os.path.join(pastaperfil,"Lists-fanart",file.replace('.txt','.jpg')))
else: addDir("[B][COLOR orange]" + file.replace(".txt","") + "[/B][/COLOR]",string,101,addonpath + art + 'xml_lists.png',2,True)
except: pass
addDir(translate(40121),MainURL,107,addonpath + art + 'plus-menu.png',2,False)
#xbmc.executebuiltin("Container.SetViewMode(51)")
"""
Add a new list function
"""
def addlista():
opcao= xbmcgui.Dialog().yesno(translate(40000), translate(40123),"","",translate(40124),translate(40125))
if opcao:
dialog = xbmcgui.Dialog()
lista_xml = dialog.browse(int(1), translate(40186), 'myprograms','.xml|.m3u')
keybdois = xbmc.Keyboard("", translate(40130))
keybdois.doModal()
if (keybdois.isConfirmed()):
searchname = keybdois.getText()
if searchname=='': sys.exit(0)
encode=urllib.quote(searchname)
if xbmcvfs.exists(os.path.join(pastaperfil,"Lists")): pass
else: xbmcvfs.mkdir(os.path.join(pastaperfil,"Lists"))
txt_name = searchname + ".txt"
save(os.path.join(pastaperfil,"Lists",txt_name),lista_xml)
mensagemok(translate(40000),translate(40129))
xbmc.executebuiltin("XBMC.Container.Refresh")
else:
keyb = xbmc.Keyboard("", translate(40127))
keyb.doModal()
if (keyb.isConfirmed()):
search = keyb.getText()
if search=='': sys.exit(0)
if "dropbox" in search and not "?dl=1" in search: search = search + '?dl=1'
if "xml" not in search.split(".")[-1] and "m3u" not in search.split(".")[-1]: mensagemok(translate(40000),translate(40128)); sys.exit(0)
else:
try:
code = get_page_source(search)
except:
mensagemok(translate(40000),translate(40128))
sys.exit(0)
keybdois = xbmc.Keyboard("", translate(40130))
keybdois.doModal()
if (keybdois.isConfirmed()):
searchname = keybdois.getText()
if searchname=='': sys.exit(0)
encode=urllib.quote(searchname)
if os.path.exists(os.path.join(pastaperfil,"Lists")): pass
else: xbmcvfs.mkdir(os.path.join(pastaperfil,"Lists"))
txt_name = searchname + ".txt"
save(os.path.join(pastaperfil,"Lists",txt_name),search)
mensagemok(translate(40000),translate(40129))
xbmc.executebuiltin("XBMC.Container.Refresh")
"""
Remove a List
"""
def remove_list(name):
xbmcvfs.delete(name)
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % (translate(40000), translate(40150), 1,addonpath+"/icon.png"))
xbmc.executebuiltin("Container.Refresh")
"""
Parsing functions
"""
def list_type(url):
ltype = url.split('.')[-1]
if 'xml' in ltype: get_groups(url)
elif 'm3u' in ltype: parse_m3u(url)
else: pass
def parse_m3u(url):
if "http" in url: content = get_page_source(url)
else: content = readfile(url)
match = re.compile('#EXTINF:.+?,(.*?)\n(.*?)(?:\r|\n)').findall(content)
for channel_name,stream_url in match:
if 'plugin://' in stream_url:
stream_url = 'XBMC.RunPlugin('+stream_url+')'
addDir(channel_name,stream_url,106,'',1,False)
elif 'sop://' in stream_url:
addDir(channel_name,stream_url,2,'',1,False)
elif ('acestream://' in stream_url) or ('.acelive' in stream_url) or ('.torrent' in stream_url):
addDir(channel_name,stream_url,1,'',1,False)
else: addLink(channel_name,stream_url,'')
def get_groups(url):
from xml.etree import ElementTree
try:
print("Sopcast xml-type list detected")
if "http" in url:
source = get_page_source(url)
save(os.path.join(pastaperfil,"working.xml"),source)
workingxml = os.path.join(pastaperfil,"working.xml")
else:
workingxml = url
groups = ElementTree.parse(workingxml).findall('.//group')
unname_group_index = 1
LANGUAGE = "en"
for group in groups:
if group.attrib[LANGUAGE] == "":
group.attrib[LANGUAGE] = str(unname_group_index)
unname_group_index = unname_group_index + 1
if re.sub('c','e',LANGUAGE) == LANGUAGE:
OTHER_LANG = re.sub('e','c',LANGUAGE)
else:
OTHER_LANG = re.sub('c','e',LANGUAGE)
if LANGUAGE == "cn":
try:
if len(group.attrib[OTHER_LANG]) > 0:
group.attrib[LANGUAGE] = group.attrib[OTHER_LANG]
unname_group_index = unname_group_index - 1
except:
pass
if (group.find('.//channel')==None): continue
group_name=group.attrib[LANGUAGE]
try:
addDir_livestreams_common(group_name,url,102,addonpath + art + 'xml_list_sopcast.png',True)
except: pass
#xbmc.executebuiltin("Container.SetViewMode(51)")
except:
print("Other type of xml list")
getData(url,"")
def get_channels(name,url):
from xml.etree import ElementTree
if url.startswith('http://'):
source = get_page_source(url)
else:
source = readfile(url)
save(os.path.join(pastaperfil,"working.xml"),source)
chlist_tree = ElementTree.parse(os.path.join(pastaperfil,"working.xml"))
LANGUAGE = "en"
groups = ElementTree.parse(os.path.join(pastaperfil,"working.xml")).findall('.//group')
for group in groups:
if group.attrib[LANGUAGE].encode('utf-8') == name:
channels = group.findall('.//channel')
for channel in channels:
try:
try:
title = channel.find('.//name').attrib['en'].encode('utf-8')
except: title = ''
if not title:
try: title = channel.find('.//name').attrib['cn'].encode('utf-8')
except: title = ''
if not title:
try: title = channel.find('.//name').text
except: title = ''
tipo = channel.find('.//stream_type').text
sop_address = channel.find('.//item').text
if not tipo: tipo = "N/A"
if not title: title = "N/A"
thumbnail = ""
try:
thumbnail = channel.find('.//thumbnail').text
except: pass
if sop_address:
if thumbnail == "": thumbnail = addonpath + art + 'sopcast_link.png'
try: addDir_livestreams_common('[B][COLOR orange]' + title + ' [/B][/COLOR](' + tipo +')',sop_address,2,thumbnail,False)
except:pass
else: pass
except: pass
else: pass
def getData(url,fanart):
soup = getSoup(url)
if len(soup('channels')) > 0:
channels = soup('channel')
for channel in channels:
name = channel('name')[0].string
thumbnail = channel('thumbnail')[0].string
if thumbnail == None:
thumbnail = ''
try:
if not channel('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = fanart
else:
fanArt = channel('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
try:
desc = channel('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = channel('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = channel('date')[0].string
if date == None:
raise
except:
date = ''
try:
credits = channel('credits')[0].string
if credits == None:
raise
except:
credits = ''
try:
addDir_livestreams(name.encode('utf-8', 'ignore'),url.encode('utf-8'),103,thumbnail,fanArt,desc,genre,date,credits,True)
except:
addon_log('There was a problem adding directory from getData(): '+name.encode('utf-8', 'ignore'))
else:
addon_log('No Channels: getItems')
getItems(soup('item'),fanart)
def getChannelItems(name,url,fanart):
soup = getSoup(url)
channel_list = soup.find('channel', attrs={'name' : name.decode('utf-8')})
items = channel_list('item')
try:
fanArt = channel_list('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
for channel in channel_list('subchannel'):
name = channel('name')[0].string
try:
thumbnail = channel('thumbnail')[0].string
if thumbnail == None:
raise
except:
thumbnail = ''
try:
if not channel('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = channel('fanart')[0].string
if fanArt == None:
raise
except:
pass
try:
desc = channel('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = channel('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = channel('date')[0].string
if date == None:
raise
except:
date = ''
try:
credits = channel('credits')[0].string
if credits == None:
raise
except:
credits = ''
try:
addDir_livestreams(name.encode('utf-8', 'ignore'),url.encode('utf-8'),3,thumbnail,fanArt,desc,genre,credits,date)
except:
addon_log('There was a problem adding directory - '+name.encode('utf-8', 'ignore'))
getItems(items,fanArt)
def getItems(items,fanart):
total = len(items)
addon_log('Total Items: %s' %total)
for item in items:
try:
name = item('title')[0].string
if name is None:
name = 'unknown?'
except:
addon_log('Name Error')
name = ''
try:
if item('epg'):
if item.epg_url:
addon_log('Get EPG Regex')
epg_url = item.epg_url.string
epg_regex = item.epg_regex.string
epg_name = get_epg(epg_url, epg_regex)
if epg_name:
name += ' - ' + epg_name
elif item('epg')[0].string > 1:
name += getepg(item('epg')[0].string)
else:
pass
except:
addon_log('EPG Error')
try:
url = []
for i in item('link'):
if not i.string == None:
url.append(i.string)
if len(url) < 1:
raise
except:
addon_log('Error <link> element, Passing:'+name.encode('utf-8', 'ignore'))
continue
try:
thumbnail = item('thumbnail')[0].string
if thumbnail == None:
raise
except:
thumbnail = ''
try:
if not item('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = fanart
else:
fanArt = item('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
try:
desc = item('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = item('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = item('date')[0].string
if date == None:
raise
except:
date = ''
regexs = None
if item('regex'):
try:
regexs = {}
for i in item('regex'):
regexs[i('name')[0].string] = {}
regexs[i('name')[0].string]['expre'] = i('expres')[0].string
regexs[i('name')[0].string]['page'] = i('page')[0].string
try:
regexs[i('name')[0].string]['refer'] = i('referer')[0].string
except:
addon_log("Regex: -- No Referer --")
try:
regexs[i('name')[0].string]['agent'] = i('agent')[0].string
except:
addon_log("Regex: -- No User Agent --")
regexs = urllib.quote(repr(regexs))
except:
regexs = None
addon_log('regex Error: '+name.encode('utf-8', 'ignore'))
try:
if "RunPlugin" in url[0]:
try:
addDir_livestreams(name.encode('utf-8', 'ignore'),url[0],106,thumbnail,fanArt,desc,genre,"credits",date)
except:
match = re.compile("&name=(.+?)\)").findall(url[0].replace(";",""))
if match:
try:
addDir_livestreams(name.encode('utf-8', 'ignore'),removeNonAscii(url[0]),106,thumbnail,fanArt,desc,genre,credits,date)
except:
try:
addDir_livestreams(removeNonAscii(name.encode('utf-8', 'ignore')),removeNonAscii(url[0].replace(";","")),106,thumbnail,fanArt,desc,genre,credits,date)
except:
addon_log('There was a problem adding item - '+name.encode('utf-8', 'ignore'))
else:
addon_log('There was a problem adding item - '+name.encode('utf-8', 'ignore'))
else:
if ('acestream://' in url[0]) or ('.acelive' in url[0]) or ('.torrent' in url[0]):
if 'plugin://' not in url[0]:
addDir_livestreams(name.encode('utf-8', 'ignore'),url[0],1,thumbnail,fanArt,desc,genre,"credits",date)
else:
addLink_livestreams(url[0].replace(';',''),name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total)
elif 'sop://' in url[0]:
if 'plugin://' not in url[0]:
addDir_livestreams(name.encode('utf-8', 'ignore'),url[0],2,thumbnail,fanArt,desc,genre,"credits",date)
else:
addLink_livestreams(url[0].replace(';',''),name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total)
else: addLink_livestreams(url[0].replace(';',''),name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total)
except:
addon_log('There was a problem adding item - '+name.encode('utf-8', 'ignore'))
def removeNonAscii(s): return "".join(filter(lambda x: ord(x)<128, s))
def getSoup(url):
if url.startswith('http://'):
data = makeRequest(url)
else:
if xbmcvfs.exists(url):
if url.startswith("smb://") or url.startswith("nfs://"):
copy = xbmcvfs.copy(url, os.path.join(profile, 'temp', 'sorce_temp.txt'))
if copy:
data = open(os.path.join(profile, 'temp', 'sorce_temp.txt'), "r").read()
xbmcvfs.delete(os.path.join(profile, 'temp', 'sorce_temp.txt'))
else:
addon_log("failed to copy from smb:")
else:
data = open(url, 'r').read()
else:
addon_log("Soup Data not found!")
return
return BeautifulSOAP(data, convertEntities=BeautifulStoneSoup.XML_ENTITIES)
def addon_log(string):
print(string)
def getRegexParsed(regexs, url):
regexs = eval(urllib.unquote(regexs))
cachedPages = {}
doRegexs = re.compile('\$doregex\[([^\]]*)\]').findall(url)
for k in doRegexs:
if k in regexs:
m = regexs[k]
if m['page'] in cachedPages:
link = cachedPages[m['page']]
else:
req = urllib2.Request(m['page'])
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1')
if 'refer' in m:
req.add_header('Referer', m['refer'])
if 'agent' in m:
req.add_header('User-agent', m['agent'])
response = urllib2.urlopen(req)
link = response.read()
response.close()
cachedPages[m['page']] = link
reg = re.compile(m['expre']).search(link)
url = url.replace("$doregex[" + k + "]", reg.group(1).strip())
item = xbmcgui.ListItem(path=url)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
| 38.745211
| 303
| 0.514907
| 2,180
| 20,225
| 4.715138
| 0.155963
| 0.021792
| 0.021403
| 0.023154
| 0.506956
| 0.443623
| 0.419691
| 0.393715
| 0.330966
| 0.305283
| 0
| 0.021257
| 0.348727
| 20,225
| 521
| 304
| 38.819578
| 0.75911
| 0.059135
| 0
| 0.524138
| 0
| 0.002299
| 0.110133
| 0.006525
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032184
| false
| 0.032184
| 0.018391
| 0.002299
| 0.052874
| 0.006897
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
beb987a1f2b8198bf13096fe552301ac5d99117d
| 889
|
py
|
Python
|
api-reference-examples/python/te-tag-query/api-example-update.py
|
b-bold/ThreatExchange
|
6f8d0dc803faccf576c9398569bb52d54a4f9a87
|
[
"BSD-3-Clause"
] | 997
|
2015-03-13T18:04:03.000Z
|
2022-03-30T12:09:10.000Z
|
api-reference-examples/python/te-tag-query/api-example-update.py
|
b-bold/ThreatExchange
|
6f8d0dc803faccf576c9398569bb52d54a4f9a87
|
[
"BSD-3-Clause"
] | 444
|
2015-03-26T17:28:49.000Z
|
2022-03-28T19:34:05.000Z
|
api-reference-examples/python/te-tag-query/api-example-update.py
|
b-bold/ThreatExchange
|
6f8d0dc803faccf576c9398569bb52d54a4f9a87
|
[
"BSD-3-Clause"
] | 294
|
2015-03-13T22:19:43.000Z
|
2022-03-30T08:42:45.000Z
|
#!/usr/bin/env python
# ================================================================
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ================================================================
import sys
import json
import TE
TE.Net.setAppTokenFromEnvName("TX_ACCESS_TOKEN")
postParams = {
"descriptor_id": "4036655176350945", # ID of the descriptor to be updated
"reactions": "INGESTED,IN_REVIEW",
}
showURLs = False
dryRun = False
validationErrorMessage, serverSideError, responseBody = TE.Net.updateThreatDescriptor(
postParams, showURLs, dryRun
)
if validationErrorMessage != None:
sys.stderr.write(validationErrorMessage + "\n")
sys.exit(1)
if serverSideError != None:
sys.stderr.write(str(serverSideError) + "\n")
sys.stderr.write(json.dumps(responseBody) + "\n")
sys.exit(1)
print(json.dumps(responseBody))
| 26.147059
| 86
| 0.620922
| 88
| 889
| 6.227273
| 0.602273
| 0.04927
| 0.076642
| 0.065693
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023286
| 0.130484
| 889
| 33
| 87
| 26.939394
| 0.68564
| 0.285714
| 0
| 0.095238
| 0
| 0
| 0.122222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
beb9a541895990f03cef5c41fda543323a1a2725
| 12,362
|
py
|
Python
|
loaner/web_app/backend/api/shelf_api_test.py
|
Bottom-Feeders/GrabNGO
|
5a467362e423700a5a7276a7fa9a47040033cfcf
|
[
"Apache-2.0"
] | null | null | null |
loaner/web_app/backend/api/shelf_api_test.py
|
Bottom-Feeders/GrabNGO
|
5a467362e423700a5a7276a7fa9a47040033cfcf
|
[
"Apache-2.0"
] | null | null | null |
loaner/web_app/backend/api/shelf_api_test.py
|
Bottom-Feeders/GrabNGO
|
5a467362e423700a5a7276a7fa9a47040033cfcf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backend.api.shelf_api."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import mock
from protorpc import message_types
from google.appengine.api import search
import endpoints
from loaner.web_app.backend.api import root_api # pylint: disable=unused-import
from loaner.web_app.backend.api import shelf_api
from loaner.web_app.backend.api.messages import shared_messages
from loaner.web_app.backend.api.messages import shelf_messages
from loaner.web_app.backend.models import device_model
from loaner.web_app.backend.models import shelf_model # pylint: disable=unused-import
from loaner.web_app.backend.testing import loanertest
class ShelfApiTest(parameterized.TestCase, loanertest.EndpointsTestCase):
"""Test for the Shelf API."""
def setUp(self):
super(ShelfApiTest, self).setUp()
self.patcher_directory = mock.patch(
'__main__.device_model.directory.DirectoryApiClient')
self.mock_directoryclass = self.patcher_directory.start()
self.addCleanup(self.patcher_directory.stop)
self.service = shelf_api.ShelfApi()
self.login_admin_endpoints_user()
self.patcher_xsrf = mock.patch(
'__main__.shelf_api.root_api.Service.check_xsrf_token')
self.shelf = shelf_model.Shelf.enroll(
user_email=loanertest.USER_EMAIL, location='NYC', capacity=10,
friendly_name='GnG', latitude=40.6892534, longitude=-74.0466891,
altitude=1.0)
shelf1 = shelf_model.Shelf.enroll(
user_email=loanertest.USER_EMAIL, location='MTV', capacity=20)
shelf2 = shelf_model.Shelf.enroll(
user_email=loanertest.USER_EMAIL, location='SAO', capacity=10)
self.disabled_shelf = shelf_model.Shelf.enroll(
user_email=loanertest.USER_EMAIL, location='SVL', capacity=10,
friendly_name='Bay')
self.disabled_shelf.disable(loanertest.USER_EMAIL)
self.shelf_locations = [
self.shelf.location, shelf1.location, shelf2.location,
self.disabled_shelf.location]
self.device1_key = device_model.Device(
serial_number='12345',
enrolled=True,
device_model='HP Chromebook 13 G1',
current_ou='/',
chrome_device_id='unique_id_1',
damaged=False,
).put()
self.device2_key = device_model.Device(
serial_number='54321',
enrolled=True,
device_model='HP Chromebook 13 G1',
current_ou='/',
chrome_device_id='unique_id_2',
damaged=False,
).put()
self.device3_key = device_model.Device(
serial_number='67890',
enrolled=True,
shelf=self.shelf.key,
device_model='HP Chromebook 13 G1',
current_ou='/',
chrome_device_id='unique_id_3',
damaged=False,
).put()
self.device4_key = device_model.Device(
serial_number='ABC123',
enrolled=True,
shelf=self.shelf.key,
device_model='HP Chromebook 13 G1',
current_ou='/',
chrome_device_id='unique_id_4',
damaged=False,
).put()
self.device_identifiers = [
self.device1_key.get().serial_number,
self.device2_key.get().serial_number,
self.device3_key.get().serial_number]
def tearDown(self):
super(ShelfApiTest, self).tearDown()
self.service = None
@mock.patch('__main__.root_api.Service.check_xsrf_token')
@mock.patch('__main__.shelf_model.Shelf.enroll')
def test_enroll(self, mock_enroll, mock_xsrf_token):
"""Test Enroll with mock methods."""
request = shelf_messages.EnrollShelfRequest(
location='nyc', capacity=100, friendly_name='test', latitude=12.5,
longitude=12.5, altitude=2.0, responsible_for_audit='precise',
audit_interval_override=33, audit_notification_enabled=True)
response = self.service.enroll(request)
self.assertEqual(mock_xsrf_token.call_count, 1)
self.assertIsInstance(response, message_types.VoidMessage)
def test_enroll_bad_request(self):
request = shelf_messages.EnrollShelfRequest(capacity=10)
with self.assertRaisesRegexp(
shelf_api.endpoints.BadRequestException,
'Entity has uninitialized properties'):
self.service.enroll(request)
request = shelf_messages.EnrollShelfRequest(
location='nyc', capacity=10, latitude=12.5)
with self.assertRaisesRegexp(
shelf_api.endpoints.BadRequestException,
shelf_model._LAT_LONG_MSG):
self.service.enroll(request)
@mock.patch('__main__.root_api.Service.check_xsrf_token')
def test_get_by_location(self, mock_xsrf_token):
request = shelf_messages.ShelfRequest(location='NYC')
response = self.service.get(request)
self.assertEqual(mock_xsrf_token.call_count, 1)
self.assertEqual(self.shelf.location, response.location)
self.assertEqual(self.shelf.friendly_name, response.friendly_name)
def test_disable_by_location(self):
request = shelf_messages.ShelfRequest(location='NYC')
self.assertTrue(self.shelf.enabled)
response = self.service.disable(request)
self.assertFalse(self.shelf.enabled)
self.assertIsInstance(response, message_types.VoidMessage)
@mock.patch('__main__.root_api.Service.check_xsrf_token')
def test_update_using_location(self, mock_xsrf_token):
request = shelf_messages.UpdateShelfRequest(
shelf_request=shelf_messages.ShelfRequest(location='NYC'),
location='NYC-9th')
response = self.service.update(request)
self.assertEqual(mock_xsrf_token.call_count, 1)
self.assertEqual(self.shelf.location, 'NYC-9th')
shelf = shelf_model.Shelf.get(friendly_name='GnG')
self.assertEqual(shelf.location, 'NYC-9th')
self.assertIsInstance(response, message_types.VoidMessage)
@parameterized.parameters(
(shelf_messages.Shelf(capacity=10), 2,),
(shelf_messages.Shelf(enabled=False), 1,),
(shelf_messages.Shelf(
query=shared_messages.SearchRequest(
query_string='enabled:True capacity:10')), 2,),
(shelf_messages.Shelf(
query=shared_messages.SearchRequest(
query_string='enabled:False')), 1,))
@mock.patch('__main__.root_api.Service.check_xsrf_token')
def test_list_shelves(self, request, response_length, mock_xsrf_token):
response = self.service.list_shelves(request)
self.assertEqual(mock_xsrf_token.call_count, 1)
self.assertEqual(response_length, len(response.shelves))
def test_list_shelves_invalid_page_size(self):
with self.assertRaises(endpoints.BadRequestException):
request = shelf_messages.Shelf(page_size=0)
self.service.list_shelves(request)
def test_list_shelves_with_search_constraints(self):
expressions = shared_messages.SearchExpression(expression='location')
expected_response = shelf_messages.ListShelfResponse(
shelves=[shelf_messages.Shelf(
location=self.shelf.location,
shelf_request=shelf_messages.ShelfRequest(
location=self.shelf.location,
urlsafe_key=self.shelf.key.urlsafe()))],
total_results=1, total_pages=1)
request = shelf_messages.Shelf(
query=shared_messages.SearchRequest(
query_string='location:NYC',
expressions=[expressions],
returned_fields=['location']))
response = self.service.list_shelves(request)
self.assertEqual(response, expected_response)
def test_list_shelves_with_offset(self):
previouse_shelf_locations = []
request = shelf_messages.Shelf(enabled=True, page_size=1, page_number=1)
response = self.service.list_shelves(request)
self.assertEqual(len(response.shelves), 1)
previouse_shelf_locations.append(response.shelves[0].location)
# Get next page results and make sure it's not the same as last.
request = shelf_messages.Shelf(enabled=True, page_size=1, page_number=2)
response = self.service.list_shelves(request)
self.assertEqual(len(response.shelves), 1)
self.assertNotIn(response.shelves[0], previouse_shelf_locations)
previouse_shelf_locations.append(response.shelves[0].location)
# Get next page results and make sure it's not the same as last 2.
request = shelf_messages.Shelf(enabled=True, page_size=1, page_number=3)
response = self.service.list_shelves(request)
self.assertEqual(len(response.shelves), 1)
self.assertNotIn(response.shelves[0], previouse_shelf_locations)
previouse_shelf_locations.append(response.shelves[0].location)
@mock.patch('__main__.root_api.Service.check_xsrf_token')
@mock.patch('__main__.shelf_api.logging.info')
def test_audit_using_shelf_location(self, mock_logging, mock_xsrf_token):
request = shelf_messages.ShelfAuditRequest(
shelf_request=shelf_messages.ShelfRequest(location='NYC'),
device_identifiers=self.device_identifiers)
response = self.service.audit(request)
self.assertEqual(mock_xsrf_token.call_count, 1)
mock_logging.assert_called()
for identifier in self.device_identifiers:
datastore_device = device_model.Device.get(serial_number=identifier)
self.assertEqual(datastore_device.shelf.get().location, 'NYC')
self.assertFalse(self.shelf.audit_requested)
self.assertEqual(self.shelf.last_audit_by, loanertest.SUPER_ADMIN_EMAIL)
self.assertIsInstance(response, message_types.VoidMessage)
def test_audit_invalid_device(self):
request = shelf_messages.ShelfAuditRequest(
shelf_request=shelf_messages.ShelfRequest(location='NYC'),
device_identifiers=['Invalid'])
with self.assertRaisesRegexp(
endpoints.NotFoundException,
shelf_api._DEVICE_DOES_NOT_EXIST_MSG % 'Invalid'):
self.service.audit(request)
@mock.patch.object(device_model.Device, 'search')
@mock.patch.object(shelf_api, 'get_shelf', autospec=True)
def test_audit_remove_devices(
self, mock_get_shelf, mock_model_device_search):
shelf = self.device2_key.get()
shelf.shelf = self.shelf.key
shelf.put()
mock_model_device_search.return_value = (
search.SearchResults(
results=[
search.ScoredDocument(
doc_id=self.device2_key.urlsafe()),
search.ScoredDocument(
doc_id=self.device3_key.urlsafe()),
search.ScoredDocument(
doc_id=self.device4_key.urlsafe())],
number_found=3))
mock_get_shelf.return_value = self.shelf
request = shelf_messages.ShelfAuditRequest(
shelf_request=shelf_messages.ShelfRequest(location=self.shelf.location),
device_identifiers=[self.device3_key.get().serial_number])
self.service.audit(request)
self.assertEqual(self.device3_key.get().shelf, self.shelf.key)
self.assertIsNone(self.device2_key.get().shelf)
self.assertIsNone(self.device4_key.get().shelf)
def test_get_shelf_urlsafe_key(self):
"""Test getting a shelf using the urlsafe key."""
request = shelf_messages.ShelfRequest(urlsafe_key=self.shelf.key.urlsafe())
shelf = shelf_api.get_shelf(request)
self.assertEqual(shelf, self.shelf)
def test_get_shelf_using_location(self):
"""Test getting a shelf using the location."""
request = shelf_messages.ShelfRequest(location=self.shelf.location)
shelf = shelf_api.get_shelf(request)
self.assertEqual(shelf, self.shelf)
def test_get_shelf_using_location_error(self):
"""Test getting a shelf with an invalid location."""
request = shelf_messages.ShelfRequest(location='Not_Valid')
with self.assertRaisesRegexp(
endpoints.NotFoundException,
shelf_api._SHELF_DOES_NOT_EXIST_MSG % request.location):
shelf_api.get_shelf(request)
if __name__ == '__main__':
loanertest.main()
| 41.905085
| 86
| 0.729494
| 1,535
| 12,362
| 5.612378
| 0.183062
| 0.043761
| 0.051074
| 0.037145
| 0.543587
| 0.50888
| 0.437028
| 0.36123
| 0.312362
| 0.279745
| 0
| 0.013983
| 0.166963
| 12,362
| 294
| 87
| 42.047619
| 0.822587
| 0.079113
| 0
| 0.35124
| 0
| 0
| 0.065626
| 0.033166
| 0
| 0
| 0
| 0
| 0.14876
| 1
| 0.070248
| false
| 0
| 0.061983
| 0
| 0.136364
| 0.004132
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bebb042aa5530a31d011f0dddb5b720502bac701
| 11,710
|
py
|
Python
|
ahrs/filters/complementary.py
|
jaluebbe/ahrs
|
4b4a33b1006e0d455a71ac8379a2697202361758
|
[
"MIT"
] | null | null | null |
ahrs/filters/complementary.py
|
jaluebbe/ahrs
|
4b4a33b1006e0d455a71ac8379a2697202361758
|
[
"MIT"
] | null | null | null |
ahrs/filters/complementary.py
|
jaluebbe/ahrs
|
4b4a33b1006e0d455a71ac8379a2697202361758
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Complementary Filter
====================
Attitude quaternion obtained with gyroscope and accelerometer-magnetometer
measurements, via complementary filter.
First, the current orientation is estimated at time :math:`t`, from a previous
orientation at time :math:`t-1`, and a given angular velocity,
:math:`\\omega`, in rad/s.
This orientation is computed by numerically integrating the angular velocity
and adding it to the previous orientation, which is known as an **attitude
propagation**.
.. math::
\\begin{array}{rcl}
\\mathbf{q}_\\omega &=& \\Big(\\mathbf{I}_4 + \\frac{\\Delta t}{2}\\boldsymbol\\Omega_t\\Big)\\mathbf{q}_{t-1} \\\\
&=&
\\begin{bmatrix}
1 & -\\frac{\\Delta t}{2}\\omega_x & -\\frac{\\Delta t}{2}\\omega_y & -\\frac{\\Delta t}{2}\\omega_z \\\\
\\frac{\\Delta t}{2}\\omega_x & 1 & \\frac{\\Delta t}{2}\\omega_z & -\\frac{\\Delta t}{2}\\omega_y \\\\
\\frac{\\Delta t}{2}\\omega_y & -\\frac{\\Delta t}{2}\\omega_z & 1 & \\frac{\\Delta t}{2}\\omega_x \\\\
\\frac{\\Delta t}{2}\\omega_z & \\frac{\\Delta t}{2}\\omega_y & -\\frac{\\Delta t}{2}\\omega_x & 1
\\end{bmatrix}
\\begin{bmatrix}q_w \\\\ q_x \\\\ q_y \\\\ q_z \\end{bmatrix} \\\\
&=&
\\begin{bmatrix}
q_w - \\frac{\\Delta t}{2} \\omega_x q_x - \\frac{\\Delta t}{2} \\omega_y q_y - \\frac{\\Delta t}{2} \\omega_z q_z\\\\
q_x + \\frac{\\Delta t}{2} \\omega_x q_w - \\frac{\\Delta t}{2} \\omega_y q_z + \\frac{\\Delta t}{2} \\omega_z q_y\\\\
q_y + \\frac{\\Delta t}{2} \\omega_x q_z + \\frac{\\Delta t}{2} \\omega_y q_w - \\frac{\\Delta t}{2} \\omega_z q_x\\\\
q_z - \\frac{\\Delta t}{2} \\omega_x q_y + \\frac{\\Delta t}{2} \\omega_y q_x + \\frac{\\Delta t}{2} \\omega_z q_w
\\end{bmatrix}
\\end{array}
Secondly, the *tilt* is computed from the accelerometer measurements as:
.. math::
\\begin{array}{rcl}
\\theta &=& \\mathrm{arctan2}(a_y, a_z) \\\\
\\phi &=& \\mathrm{arctan2}\\big(-a_x, \\sqrt{a_y^2+a_z^2}\\big)
\\end{array}
Only the pitch, :math:`\\phi`, and roll, :math:`\\theta`, angles are computed,
leaving the yaw angle, :math:`\\psi` equal to zero.
If a magnetometer sample is available, the yaw angle can be computed. First
compensate the measurement using the *tilt*:
.. math::
\\begin{array}{rcl}
\\mathbf{b} &=&
\\begin{bmatrix}
\\cos\\theta & \\sin\\theta\\sin\\phi & \\sin\\theta\\cos\\phi \\\\
0 & \\cos\\phi & -\\sin\\phi \\\\
-\\sin\\theta & \\cos\\theta\\sin\\phi & \\cos\\theta\\cos\\phi
\\end{bmatrix}
\\begin{bmatrix}m_x \\\\ m_y \\\\ m_z\\end{bmatrix} \\\\
\\begin{bmatrix}b_x \\\\ b_y \\\\ b_z\\end{bmatrix} &=&
\\begin{bmatrix}
m_x\\cos\\theta + m_y\\sin\\theta\\sin\\phi + m_z\\sin\\theta\\cos\\phi \\\\
m_y\\cos\\phi - m_z\\sin\\phi \\\\
-m_x\\sin\\theta + m_y\\cos\\theta\\sin\\phi + m_z\\cos\\theta\\cos\\phi
\\end{bmatrix}
\\end{array}
Then, the yaw angle, :math:`\\psi`, is obtained as:
.. math::
\\begin{array}{rcl}
\\psi &=& \\mathrm{arctan2}(-b_y, b_x) \\\\
&=& \\mathrm{arctan2}\\big(m_z\\sin\\phi - m_y\\cos\\phi, \\; m_x\\cos\\theta + \\sin\\theta(m_y\\sin\\phi + m_z\\cos\\phi)\\big)
\\end{array}
We transform the roll-pitch-yaw angles to a quaternion representation:
.. math::
\\mathbf{q}_{am} =
\\begin{pmatrix}q_w\\\\q_x\\\\q_y\\\\q_z\\end{pmatrix} =
\\begin{pmatrix}
\\cos\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big) + \\sin\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) \\\\
\\sin\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big) - \\cos\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) \\\\
\\cos\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big) + \\sin\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) \\\\
\\cos\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) - \\sin\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big)
\\end{pmatrix}
Finally, after each orientation is estimated independently, they are fused with
the complementary filter.
.. math::
\\mathbf{q} = (1 - \\alpha) \\mathbf{q}_\\omega + \\alpha\\mathbf{q}_{am}
where :math:`\\mathbf{q}_\\omega` is the attitude estimated from the gyroscope,
:math:`\\mathbf{q}_{am}` is the attitude estimated from the accelerometer and
the magnetometer, and :math:`\\alpha` is the gain of the filter.
The filter gain must be a floating value within the range :math:`[0.0, 1.0]`.
It can be seen that when :math:`\\alpha=1`, the attitude is estimated entirely
with the accelerometer and the magnetometer. When :math:`\\alpha=0`, it is
estimated solely with the gyroscope. The values within the range decide how
much of each estimation is "blended" into the quaternion.
This is actually a simple implementation of `LERP
<https://en.wikipedia.org/wiki/Linear_interpolation>`_ commonly used to
linearly interpolate quaternions with small differences between them.
"""
import numpy as np
from ..common.orientation import ecompass
class Complementary:
"""
Complementary filter for attitude estimation as quaternion.
Parameters
----------
gyr : numpy.ndarray, default: None
N-by-3 array with measurements of angular velocity, in rad/s.
acc : numpy.ndarray, default: None
N-by-3 array with measurements of acceleration, in m/s^2.
mag : numpy.ndarray, default: None
N-by-3 array with measurements of magnetic field, in mT.
frequency : float, default: 100.0
Sampling frequency in Herz.
Dt : float, default: 0.01
Sampling step in seconds. Inverse of sampling frequency. Not required
if ``frequency`` value is given.
gain : float, default: 0.1
Filter gain.
q0 : numpy.ndarray, default: None
Initial orientation, as a versor (normalized quaternion).
Raises
------
ValueError
When dimension of input arrays ``acc``, ``gyr``, or ``mag`` are not equal.
"""
def __init__(self,
gyr: np.ndarray = None,
acc: np.ndarray = None,
mag: np.ndarray = None,
frequency: float = 100.0,
gain = 0.9,
**kwargs):
self.gyr: np.ndarray = gyr
self.acc: np.ndarray = acc
self.mag: np.ndarray = mag
self.frequency: float = frequency
self.gain: float = gain
if not(0.0 <= self.gain <= 1.0):
raise ValueError(f"Filter gain must be in the range [0, 1]. Got {self.gain}")
self.Dt: float = kwargs.get('Dt', 1.0/self.frequency)
self.q0: np.ndarray = kwargs.get('q0')
# Process of given data
if self.gyr is not None and self.acc is not None:
self.Q = self._compute_all()
def _compute_all(self) -> np.ndarray:
"""
Estimate the quaternions given all data
Attributes ``gyr``, ``acc`` and, optionally, ``mag`` must contain data.
Returns
-------
Q : numpy.ndarray
M-by-4 Array with all estimated quaternions, where M is the number
of samples.
"""
if self.acc.shape != self.gyr.shape:
raise ValueError("acc and gyr are not the same size")
num_samples = len(self.acc)
Q = np.zeros((num_samples, 4))
if self.mag is None:
self.mag = [None]*num_samples
else:
if self.mag.shape != self.gyr.shape:
raise ValueError("mag and gyr are not the same size")
Q[0] = self.am_estimation(self.acc[0], self.mag[0]) if self.q0 is None else self.q0.copy()
for t in range(1, num_samples):
Q[t] = self.update(Q[t-1], self.gyr[t], self.acc[t], self.mag[t])
return Q
def attitude_propagation(self, q: np.ndarray, omega: np.ndarray, dt: float) -> np.ndarray:
"""
Attitude propagation of the orientation.
Estimate the current orientation at time :math:`t`, from a given
orientation at time :math:`t-1` and a given angular velocity,
:math:`\\omega`, in rad/s.
It is computed by numerically integrating the angular velocity and
adding it to the previous orientation.
Parameters
----------
q : numpy.ndarray
A-priori quaternion.
omega : numpy.ndarray
Tri-axial angular velocity, in rad/s.
dt : float
Time step, in seconds, between consecutive Quaternions.
Returns
-------
q_omega : numpy.ndarray
Estimated orientation, as quaternion.
"""
w = -0.5*dt*omega
A = np.array([
[1.0, -w[0], -w[1], -w[2]],
[w[0], 1.0, w[2], -w[1]],
[w[1], -w[2], 1.0, w[0]],
[w[2], w[1], -w[0], 1.0]])
q_omega = A @ q
return q_omega / np.linalg.norm(q_omega)
def am_estimation(self, acc: np.ndarray, mag: np.ndarray = None) -> np.ndarray:
"""
Attitude estimation from an Accelerometer-Magnetometer architecture.
Parameters
----------
acc : numpy.ndarray
Tri-axial sample of the accelerometer.
mag : numpy.ndarray, default: None
Tri-axial sample of the magnetometer.
Returns
-------
q_am : numpy.ndarray
Estimated attitude.
"""
return ecompass(acc, mag, frame='NED', representation='quaternion')
def update(self, q: np.ndarray, gyr: np.ndarray, acc: np.ndarray, mag: np.ndarray = None, dt: float = None) -> np.ndarray:
"""
Attitude Estimation from given measurements and previous orientation.
The new orientation is first estimated with the angular velocity, then
another orientation is computed using the accelerometers and
magnetometers. The magnetometer is optional.
Each orientation is estimated independently and fused with a
complementary filter.
.. math::
\\mathbf{q} = (1 - \\alpha) \\mathbf{q}_\\omega + \\alpha\\mathbf{q}_{am}
Parameters
----------
q : numpy.ndarray
A-priori quaternion.
gyr : numpy.ndarray
Sample of tri-axial Gyroscope in rad/s.
acc : numpy.ndarray
Sample of tri-axial Accelerometer in m/s^2.
mag : numpy.ndarray, default: None
Sample of tri-axial Magnetometer in uT.
dt : float, default: None
Time step, in seconds, between consecutive Quaternions.
Returns
-------
q : numpy.ndarray
Estimated quaternion.
"""
dt = self.Dt if dt is None else dt
if gyr is None or not np.linalg.norm(gyr) > 0:
return q
q_omega = self.attitude_propagation(q, gyr, dt)
q_am = self.am_estimation(acc, mag)
# Complementary Estimation
if np.linalg.norm(q_omega + q_am) < np.sqrt(2):
q = (1.0 - self.gain)*q_omega - self.gain*q_am
else:
q = (1.0 - self.gain)*q_omega + self.gain*q_am
return q/np.linalg.norm(q)
| 41.232394
| 219
| 0.566695
| 1,644
| 11,710
| 3.965328
| 0.145377
| 0.034515
| 0.038349
| 0.042184
| 0.433809
| 0.353122
| 0.283019
| 0.23025
| 0.218592
| 0.192668
| 0
| 0.016172
| 0.255423
| 11,710
| 283
| 220
| 41.378092
| 0.731506
| 0.662254
| 0
| 0.035088
| 0
| 0
| 0.052731
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087719
| false
| 0.035088
| 0.035088
| 0
| 0.22807
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bebb7eff935771339795abf6b86ab3ed10b32cc3
| 3,550
|
py
|
Python
|
tests/common/models/test_execution.py
|
angry-tony/ceph-lcm-decapod
|
535944d3ee384c3a7c4af82f74041b0a7792433f
|
[
"Apache-2.0"
] | 41
|
2016-11-03T16:40:17.000Z
|
2019-05-23T08:39:17.000Z
|
tests/common/models/test_execution.py
|
Mirantis/ceph-lcm
|
fad9bad0b94f2ef608362953583b10a54a841d24
|
[
"Apache-2.0"
] | 30
|
2016-10-14T10:54:46.000Z
|
2017-10-20T15:58:01.000Z
|
tests/common/models/test_execution.py
|
angry-tony/ceph-lcm-decapod
|
535944d3ee384c3a7c4af82f74041b0a7792433f
|
[
"Apache-2.0"
] | 28
|
2016-09-17T01:17:36.000Z
|
2019-07-05T03:32:54.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for decapod_common.models.execution."""
import pytest
from decapod_common.models import execution
def test_create(new_execution, new_pcmodel, pymongo_connection):
db_model = pymongo_connection.db.execution.find_one(
{"_id": new_execution._id}
)
assert db_model
assert new_execution.model_id == db_model["model_id"]
assert new_execution.version == db_model["version"]
assert new_execution.time_created == db_model["time_created"]
assert new_execution.time_deleted == db_model["time_deleted"]
assert new_execution.initiator_id == db_model["initiator_id"]
assert new_execution.playbook_configuration_model_id == \
db_model["pc_model_id"]
assert new_execution.playbook_configuration_version == \
db_model["pc_version"]
assert new_execution.state.name == db_model["state"]
assert new_execution.state == execution.ExecutionState.created
assert new_execution.playbook_configuration_model_id == \
new_pcmodel.model_id
assert new_execution.playbook_configuration_version == \
new_pcmodel.version
@pytest.mark.parametrize("state", execution.ExecutionState)
def test_change_state_ok(state, new_execution):
new_execution.state = state
new_execution.save()
assert new_execution.state == state
@pytest.mark.parametrize("state", (
"", "changed", "started", 0, None, -1.0, [], {}, object(), set()
))
def test_change_state_fail(state, new_execution):
with pytest.raises(ValueError):
new_execution.state = state
@pytest.mark.parametrize("state", execution.ExecutionState)
def test_api_response(state, new_pcmodel, new_execution):
new_execution.state = state
new_execution.save()
assert new_execution.make_api_structure() == {
"id": new_execution.model_id,
"initiator_id": new_execution.initiator_id,
"time_deleted": new_execution.time_deleted,
"time_updated": new_execution.time_created,
"model": execution.ExecutionModel.MODEL_NAME,
"version": 2,
"data": {
"playbook_configuration": {
"id": new_pcmodel.model_id,
"version": new_pcmodel.version,
"playbook_name": new_pcmodel.playbook_id
},
"state": state.name
}
}
def test_getting_logfile(new_execution, execution_log_storage):
new_execution.logfile
execution_log_storage.get.assert_called_once_with(new_execution.model_id)
def test_create_logfile(new_execution, execution_log_storage):
new_execution.new_logfile.write("1")
execution_log_storage.delete.assert_called_once_with(
new_execution.model_id
)
execution_log_storage.new_file.assert_called_once_with(
new_execution.model_id,
filename="{0}.log".format(new_execution.model_id),
content_type="text/plain"
)
execution_log_storage.new_file().write.assert_called_once_with("1")
| 33.809524
| 77
| 0.719155
| 452
| 3,550
| 5.338496
| 0.29646
| 0.174057
| 0.096975
| 0.047244
| 0.343556
| 0.299627
| 0.298798
| 0.260671
| 0.056361
| 0.056361
| 0
| 0.005498
| 0.180282
| 3,550
| 104
| 78
| 34.134615
| 0.823711
| 0.174366
| 0
| 0.164179
| 0
| 0
| 0.079354
| 0.007558
| 0
| 0
| 0
| 0
| 0.268657
| 1
| 0.089552
| false
| 0
| 0.029851
| 0
| 0.119403
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bebf0f2b55c9070eb2aa8dd30568a2e408a3e498
| 842
|
py
|
Python
|
Problems/Study Plans/Dynamic Programming/Dynamic Programming I/07_delete_and_earn.py
|
andor2718/LeetCode
|
59874f49085818e6da751f1cc26867b31079d35d
|
[
"BSD-3-Clause"
] | 1
|
2022-01-17T19:51:15.000Z
|
2022-01-17T19:51:15.000Z
|
Problems/Study Plans/Dynamic Programming/Dynamic Programming I/07_delete_and_earn.py
|
andor2718/LeetCode
|
59874f49085818e6da751f1cc26867b31079d35d
|
[
"BSD-3-Clause"
] | null | null | null |
Problems/Study Plans/Dynamic Programming/Dynamic Programming I/07_delete_and_earn.py
|
andor2718/LeetCode
|
59874f49085818e6da751f1cc26867b31079d35d
|
[
"BSD-3-Clause"
] | null | null | null |
# https://leetcode.com/problems/delete-and-earn/
class Solution:
def deleteAndEarn(self, nums: list[int]) -> int:
num_profits = dict()
for num in nums:
num_profits[num] = num_profits.get(num, 0) + num
sorted_nums = sorted(num_profits.keys())
second_last_profit = 0
last_profit = num_profits[sorted_nums[0]]
for idx in range(1, len(sorted_nums)):
profit_with_curr_num = num_profits[sorted_nums[idx]]
if sorted_nums[idx - 1] == sorted_nums[idx] - 1:
curr_profit = max(last_profit,
second_last_profit + profit_with_curr_num)
else:
curr_profit = last_profit + profit_with_curr_num
second_last_profit, last_profit = last_profit, curr_profit
return last_profit
| 42.1
| 76
| 0.609264
| 108
| 842
| 4.416667
| 0.342593
| 0.188679
| 0.100629
| 0.106918
| 0.113208
| 0.113208
| 0
| 0
| 0
| 0
| 0
| 0.010204
| 0.301663
| 842
| 19
| 77
| 44.315789
| 0.80102
| 0.054632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bec160209ec5f54fda5f5e3628b149076a57302b
| 7,019
|
py
|
Python
|
pytorch_keras_converter/API.py
|
sonibla/pytorch_keras_converter
|
21925b67b6eb3cbbfa8eb6d33f682d57dafd357d
|
[
"MIT"
] | 17
|
2019-10-01T14:14:18.000Z
|
2021-04-25T13:32:24.000Z
|
pytorch_keras_converter/API.py
|
sonibla/pytorch_keras_converter
|
21925b67b6eb3cbbfa8eb6d33f682d57dafd357d
|
[
"MIT"
] | null | null | null |
pytorch_keras_converter/API.py
|
sonibla/pytorch_keras_converter
|
21925b67b6eb3cbbfa8eb6d33f682d57dafd357d
|
[
"MIT"
] | 2
|
2019-10-01T14:02:43.000Z
|
2019-10-01T14:14:19.000Z
|
"""
Simple API to convert models between PyTorch and Keras
(Conversions from Keras to PyTorch aren't implemented)
"""
from . import utility
from . import tests
from . import io_utils as utils
import tensorflow
def convert(model,
input_shape,
weights=True,
quiet=True,
ignore_tests=False,
input_range=None,
save=None,
filename=None,
directory=None):
"""
Conversion between PyTorch and Keras
(Conversions from Keras to PyTorch aren't implemented)
Arguments:
-model:
A Keras or PyTorch model or layer to convert
-input_shape:
Input shape (list, tuple or int), without batchsize.
-weights (bool):
Also convert weights. If set to false, only convert model
architecture
-quiet (bool):
If a progress bar and some messages should appear
-ignore_tests (bool):
If tests should be ignored.
If set to True, converted model will
still be tested by security. If models are not identical, it will
only print a warning.
If set to False, and models are not identical, RuntimeWarning will
be raised
If weights is False, tests are automatically ignored
-input_range:
Optionnal.
A list of 2 elements containing max and min values to give as
input to the model when performing the tests. If None, models will
be tested on samples from the "standard normal" distribution.
-save:
If model should be exported to a hdf5 file.
-filename:
Filename to give to model's hdf5 file. If filename is not None and
save is not False, then save will automatically be set to True
-directory:
Where to save model's hdf5 file. If directory is not None and
save is not False, then save will automatically be set to True
Raises:
-RuntimeWarning:
If converted and original model aren't identical, and ignore_tests
is False
Returns:
If model has been exported to a file, it will return the name of the
file
Else, it returns the converted model
"""
if (filename is not None or directory is not None) and save is None:
save = True
if save is None:
save = False
if weights == False:
ignore_tests = True
if not quiet:
print('\nConversion...')
# Converting:
newModel = utility.convert(model=utility.LayerRepresentation(model),
input_size=input_shape,
weights=weights,
quiet=quiet)
# Actually, newModel is a LayerRepresentation object
# Equivalents:
torchModel = newModel.equivalent['torch']
kerasModel = newModel.equivalent['keras']
if not quiet:
print('Automatically testing converted model reliability...\n')
# Checking converted model reliability
tested = False
try:
meanSquaredError = tests.comparison(model1=torchModel,
model2=kerasModel,
input_shape=input_shape,
input_range=input_range,
quiet=quiet)
tested = True
except tensorflow.errors.InvalidArgumentError:
print("Warning: tests unavailable!")
if tested and meanSquaredError > 0.0001:
if ignore_tests:
print("Warning: converted and original models aren't identical !\
(mean squared error: {})".format(meanSquaredError))
else:
raise RuntimeWarning("Original and converted model do not match !\
\nOn random input data, outputs showed a mean squared error of {} (if should \
be below 1e-10)".format(meanSquaredError))
elif not quiet and tested:
print('\n Original and converted models match !\nMean squared err\
or : {}'.format(meanSquaredError))
if save:
if not quiet:
print('Saving model...')
defaultName = 'conversion_{}'.format(newModel.name)
if filename is None:
filename = defaultName
# Formatting filename so that we don't overwrite any existing file
file = utils.formatFilename(filename,
directory)
# Freezing Keras model (trainable = False everywhere)
utils.freeze(kerasModel)
# Save the entire model
kerasModel.save(file + '.h5')
if not quiet:
print('Done !')
return file + '.h5'
if not quiet:
print('Done !')
return kerasModel
def convert_and_save(model,
input_shape,
weights=True,
quiet=True,
ignore_tests=False,
input_range=None,
filename=None,
directory=None):
"""
Conversion between PyTorch and Keras, and automatic save
(Conversions from Keras to PyTorch aren't implemented)
Arguments:
-model:
A Keras or PyTorch model or layer to convert
-input_shape:
Input shape (list, tuple or int), without batchsize.
-weights (bool):
Also convert weights. If set to false, only convert model
architecture
-quiet (bool):
If a progress bar and some messages should appear
-ignore_tests (bool):
If tests should be ignored.
If set to True, converted model will
still be tested by security. If models are not identical, it will
only print a warning.
If set to False, and models are not identical, RuntimeWarning will
be raised
If weights is False, tests are automatically ignored
-input_range:
Optionnal.
A list of 2 elements containing max and min values to give as
input to the model when performing the tests. If None, models will
be tested on samples from the "standard normal" distribution.
-filename:
Filename to give to model's hdf5 file. If filename is not None and
save is not False, then save will automatically be set to True
-directory:
Where to save model's hdf5 file. If directory is not None and
save is not False, then save will automatically be set to True
Returns:
Name of created hdf5 file
"""
return convert(model=model,
input_shape=input_shape,
weights=weights,
quiet=quiet,
ignore_tests=ignore_tests,
input_range=input_range,
save=True,
filename=filename,
directory=directory)
| 33.42381
| 78
| 0.581992
| 801
| 7,019
| 5.061174
| 0.213483
| 0.027134
| 0.01036
| 0.0148
| 0.561667
| 0.556981
| 0.540207
| 0.533547
| 0.518254
| 0.518254
| 0
| 0.004466
| 0.362017
| 7,019
| 209
| 79
| 33.583732
| 0.900849
| 0.496794
| 0
| 0.349398
| 0
| 0
| 0.0475
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024096
| false
| 0
| 0.048193
| 0
| 0.108434
| 0.096386
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bec1a22fa74c5c4f594a0551d336d70522ed93f7
| 1,734
|
py
|
Python
|
examples/enable_notifications.py
|
kjwill/bleak
|
7e0fdae6c0f6a78713e5984c2840666e0c38c3f3
|
[
"MIT"
] | null | null | null |
examples/enable_notifications.py
|
kjwill/bleak
|
7e0fdae6c0f6a78713e5984c2840666e0c38c3f3
|
[
"MIT"
] | null | null | null |
examples/enable_notifications.py
|
kjwill/bleak
|
7e0fdae6c0f6a78713e5984c2840666e0c38c3f3
|
[
"MIT"
] | 1
|
2021-09-15T18:53:58.000Z
|
2021-09-15T18:53:58.000Z
|
# -*- coding: utf-8 -*-
"""
Notifications
-------------
Example showing how to add notifications to a characteristic and handle the responses.
Updated on 2019-07-03 by hbldh <henrik.blidh@gmail.com>
"""
import sys
import logging
import asyncio
import platform
from bleak import BleakClient
from bleak import _logger as logger
CHARACTERISTIC_UUID = "f000aa65-0451-4000-b000-000000000000" # <--- Change to the characteristic you want to enable notifications from.
ADDRESS = (
"24:71:89:cc:09:05" # <--- Change to your device's address here if you are using Windows or Linux
if platform.system() != "Darwin"
else "B9EA5233-37EF-4DD6-87A8-2A875E821C46" # <--- Change to your device's address here if you are using macOS
)
if len(sys.argv) == 3:
ADDRESS = sys.argv[1]
CHARACTERISTIC_UUID = sys.argv[2]
def notification_handler(sender, data):
"""Simple notification handler which prints the data received."""
print("{0}: {1}".format(sender, data))
async def run(address, debug=False):
if debug:
import sys
l = logging.getLogger("asyncio")
l.setLevel(logging.DEBUG)
h = logging.StreamHandler(sys.stdout)
h.setLevel(logging.DEBUG)
l.addHandler(h)
logger.addHandler(h)
async with BleakClient(address) as client:
logger.info(f"Connected: {client.is_connected}")
await client.start_notify(CHARACTERISTIC_UUID, notification_handler)
await asyncio.sleep(5.0)
await client.stop_notify(CHARACTERISTIC_UUID)
if __name__ == "__main__":
import os
os.environ["PYTHONASYNCIODEBUG"] = str(1)
loop = asyncio.get_event_loop()
# loop.set_debug(True)
loop.run_until_complete(run(ADDRESS, True))
| 27.52381
| 136
| 0.689158
| 230
| 1,734
| 5.095652
| 0.543478
| 0.061433
| 0.025597
| 0.030717
| 0.073379
| 0.073379
| 0.073379
| 0.073379
| 0.073379
| 0.073379
| 0
| 0.054363
| 0.193772
| 1,734
| 62
| 137
| 27.967742
| 0.783977
| 0.282584
| 0
| 0.055556
| 0
| 0
| 0.137031
| 0.075856
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.222222
| 0
| 0.25
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bec37dd307106b82f4f0bcaf14227eb2f2a4ba93
| 1,974
|
py
|
Python
|
dialogflow/history2xls.py
|
ray-hrst/temi-tools
|
8efb1e1af93a41bd98fe0ee8c1fd6fb44e788341
|
[
"MIT"
] | 1
|
2020-06-04T19:30:57.000Z
|
2020-06-04T19:30:57.000Z
|
dialogflow/history2xls.py
|
ray-hrst/temi-tools
|
8efb1e1af93a41bd98fe0ee8c1fd6fb44e788341
|
[
"MIT"
] | 1
|
2020-01-14T04:16:12.000Z
|
2020-01-14T04:16:12.000Z
|
dialogflow/history2xls.py
|
ray-hrst/temi-tools
|
8efb1e1af93a41bd98fe0ee8c1fd6fb44e788341
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Convert Dialogflow history to spreadsheet
User must manually copy the history from the browser and save this in a text file.
This reads the textfile, parses the data, and saves it to a spreadsheet.
Example training sample:
USER
サワディカ
Nov 4, 11:19 PM
AGENT
No matched intent
Nov 4, 11:19 PM
more_vert
"""
import argparse
import os
from simple_report import SimpleReport
# constants
FIELDS = ["Date", "User", "Agent"]
if __name__ == "__main__":
# collect arguments
PARSER = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
PARSER.add_argument("filename", help="History text file")
ARGS = PARSER.parse_args()
# generate report
filename, file_extension = os.path.splitext(ARGS.filename)
REPORT = SimpleReport(filename, FIELDS)
# step each line of history text file
with open(ARGS.filename, 'r') as fp:
num_lines = sum(1 for line in open(ARGS.filename))
rows = int(num_lines / 7)
print("Reading {} lines of text.".format(num_lines))
print("Writing {} rows.".format(rows))
for row in range(1, rows):
user_utterance = fp.readline().strip() # USER UTTERANCE
date = fp.readline().strip() # DATE
agent_intent = fp.readline().strip() # AGENT INTENT
date = fp.readline().strip() # DATE
_ = fp.readline().strip() # 'more_vert'
utterance = user_utterance.split("USER", 1)[1]
intent = agent_intent.split("AGENT", 1)[1]
if not intent:
intent = "Intent found"
print("[{}] {} {} {}".format(row, date, utterance, intent))
# add row to report
REPORT.add("Date", row, date, date)
REPORT.add("User", row, utterance)
REPORT.add("Agent", row, intent)
REPORT.close()
| 27.802817
| 82
| 0.609422
| 241
| 1,974
| 4.879668
| 0.448133
| 0.042517
| 0.063776
| 0.048469
| 0.056122
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01324
| 0.27305
| 1,974
| 70
| 83
| 28.2
| 0.806272
| 0.251266
| 0
| 0.0625
| 0
| 0
| 0.092593
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.09375
| 0
| 0.09375
| 0.09375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bec393e2a78f7bd8032716c650f164a7178aab68
| 25,176
|
py
|
Python
|
recognition/views.py
|
usathe71-u/Attendance-System-Face-Recognition
|
c73f660a6089e8ca9dd5c473efcf2bc78f13a207
|
[
"Apache-2.0"
] | 3
|
2021-05-31T21:11:38.000Z
|
2021-07-22T18:29:47.000Z
|
recognition/views.py
|
usathe71-u/Attendance-System-Face-Recognition
|
c73f660a6089e8ca9dd5c473efcf2bc78f13a207
|
[
"Apache-2.0"
] | null | null | null |
recognition/views.py
|
usathe71-u/Attendance-System-Face-Recognition
|
c73f660a6089e8ca9dd5c473efcf2bc78f13a207
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render,redirect
from .forms import usernameForm,DateForm,UsernameAndDateForm, DateForm_2
from django.contrib import messages
from django.contrib.auth.models import User
import cv2
import dlib
import imutils
from imutils import face_utils
from imutils.video import VideoStream
from imutils.face_utils import rect_to_bb
from imutils.face_utils import FaceAligner
import time
from attendance_system_facial_recognition.settings import BASE_DIR
import os
import face_recognition
from face_recognition.face_recognition_cli import image_files_in_folder
import pickle
from sklearn.preprocessing import LabelEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
import numpy as np
from django.contrib.auth.decorators import login_required
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import datetime
from django_pandas.io import read_frame
from users.models import Present, Time
import seaborn as sns
import pandas as pd
from django.db.models import Count
#import mpld3
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
from matplotlib import rcParams
import math
mpl.use('Agg')
#utility functions:
def username_present(username):
if User.objects.filter(username=username).exists():
return True
return False
def create_dataset(username):
id = username
if(os.path.exists('face_recognition_data/training_dataset/{}/'.format(id))==False):
os.makedirs('face_recognition_data/training_dataset/{}/'.format(id))
directory='face_recognition_data/training_dataset/{}/'.format(id)
# Detect face
#Loading the HOG face detector and the shape predictpr for allignment
print("[INFO] Loading the facial detector")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('face_recognition_data/shape_predictor_68_face_landmarks.dat') #Add path to the shape predictor ######CHANGE TO RELATIVE PATH LATER
fa = FaceAligner(predictor , desiredFaceWidth = 96)
#capture images from the webcam and process and detect the face
# Initialize the video stream
print("[INFO] Initializing Video stream")
vs = VideoStream(src=0).start()
#time.sleep(2.0) ####CHECK######
# Our identifier
# We will put the id here and we will store the id with a face, so that later we can identify whose face it is
# Our dataset naming counter
sampleNum = 0
# Capturing the faces one by one and detect the faces and showing it on the window
while(True):
# Capturing the image
#vs.read each frame
frame = vs.read()
#Resize each image
frame = imutils.resize(frame ,width = 800)
#the returned img is a colored image but for the classifier to work we need a greyscale image
#to convert
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#To store the faces
#This will detect all the images in the current frame, and it will return the coordinates of the faces
#Takes in image and some other parameter for accurate result
faces = detector(gray_frame,0)
#In above 'faces' variable there can be multiple faces so we have to get each and every face and draw a rectangle around it.
for face in faces:
print("inside for loop")
(x,y,w,h) = face_utils.rect_to_bb(face)
face_aligned = fa.align(frame,gray_frame,face)
# Whenever the program captures the face, we will write that is a folder
# Before capturing the face, we need to tell the script whose face it is
# For that we will need an identifier, here we call it id
# So now we captured a face, we need to write it in a file
sampleNum = sampleNum+1
# Saving the image dataset, but only the face part, cropping the rest
if face is None:
print("face is none")
continue
cv2.imwrite(directory+'/'+str(sampleNum)+'.jpg' , face_aligned)
face_aligned = imutils.resize(face_aligned ,width = 400)
#cv2.imshow("Image Captured",face_aligned)
# @params the initial point of the rectangle will be x,y and
# @params end point will be x+width and y+height
# @params along with color of the rectangle
# @params thickness of the rectangle
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),1)
# Before continuing to the next loop, I want to give it a little pause
# waitKey of 100 millisecond
cv2.waitKey(50)
#Showing the image in another window
#Creates a window with window name "Face" and with the image img
cv2.imshow("Add Images",frame)
#Before closing it we need to give a wait command, otherwise the open cv wont work
# @params with the millisecond of delay 1
cv2.waitKey(1)
#To get out of the loop
if(sampleNum>300):
break
#Stoping the videostream
vs.stop()
# destroying all the windows
cv2.destroyAllWindows()
def predict(face_aligned,svc,threshold=0.7):
face_encodings=np.zeros((1,128))
try:
x_face_locations=face_recognition.face_locations(face_aligned)
faces_encodings=face_recognition.face_encodings(face_aligned,known_face_locations=x_face_locations)
if(len(faces_encodings)==0):
return ([-1],[0])
except:
return ([-1],[0])
prob=svc.predict_proba(faces_encodings)
result=np.where(prob[0]==np.amax(prob[0]))
if(prob[0][result[0]]<=threshold):
return ([-1],prob[0][result[0]])
return (result[0],prob[0][result[0]])
def vizualize_Data(embedded, targets,):
X_embedded = TSNE(n_components=2).fit_transform(embedded)
for i, t in enumerate(set(targets)):
idx = targets == t
plt.scatter(X_embedded[idx, 0], X_embedded[idx, 1], label=t)
plt.legend(bbox_to_anchor=(1, 1));
rcParams.update({'figure.autolayout': True})
plt.tight_layout()
plt.savefig('./recognition/static/recognition/img/training_visualisation.png')
plt.close()
def update_attendance_in_db_in(present):
today=datetime.date.today()
time=datetime.datetime.now()
for person in present:
user=User.objects.get(username=person)
try:
qs=Present.objects.get(user=user,date=today)
except :
qs= None
if qs is None:
if present[person]==True:
a=Present(user=user,date=today,present=True)
a.save()
else:
a=Present(user=user,date=today,present=False)
a.save()
else:
if present[person]==True:
qs.present=True
qs.save(update_fields=['present'])
if present[person]==True:
a=Time(user=user,date=today,time=time, out=False)
a.save()
def update_attendance_in_db_out(present):
today=datetime.date.today()
time=datetime.datetime.now()
for person in present:
user=User.objects.get(username=person)
if present[person]==True:
a=Time(user=user,date=today,time=time, out=True)
a.save()
def check_validity_times(times_all):
if(len(times_all)>0):
sign=times_all.first().out
else:
sign=True
times_in=times_all.filter(out=False)
times_out=times_all.filter(out=True)
if(len(times_in)!=len(times_out)):
sign=True
break_hourss=0
if(sign==True):
check=False
break_hourss=0
return (check,break_hourss)
prev=True
prev_time=times_all.first().time
for obj in times_all:
curr=obj.out
if(curr==prev):
check=False
break_hourss=0
return (check,break_hourss)
if(curr==False):
curr_time=obj.time
to=curr_time
ti=prev_time
break_time=((to-ti).total_seconds())/3600
break_hourss+=break_time
else:
prev_time=obj.time
prev=curr
return (True,break_hourss)
def convert_hours_to_hours_mins(hours):
h=int(hours)
hours-=h
m=hours*60
m=math.ceil(m)
return str(str(h)+ " hrs " + str(m) + " mins")
#used
def hours_vs_date_given_employee(present_qs,time_qs,admin=True):
register_matplotlib_converters()
df_hours=[]
df_break_hours=[]
qs=present_qs
for obj in qs:
date=obj.date
times_in=time_qs.filter(date=date).filter(out=False).order_by('time')
times_out=time_qs.filter(date=date).filter(out=True).order_by('time')
times_all=time_qs.filter(date=date).order_by('time')
obj.time_in=None
obj.time_out=None
obj.hours=0
obj.break_hours=0
if (len(times_in)>0):
obj.time_in=times_in.first().time
if (len(times_out)>0):
obj.time_out=times_out.last().time
if(obj.time_in is not None and obj.time_out is not None):
ti=obj.time_in
to=obj.time_out
hours=((to-ti).total_seconds())/3600
obj.hours=hours
else:
obj.hours=0
(check,break_hourss)= check_validity_times(times_all)
if check:
obj.break_hours=break_hourss
else:
obj.break_hours=0
df_hours.append(obj.hours)
df_break_hours.append(obj.break_hours)
obj.hours=convert_hours_to_hours_mins(obj.hours)
obj.break_hours=convert_hours_to_hours_mins(obj.break_hours)
df = read_frame(qs)
df["hours"]=df_hours
df["break_hours"]=df_break_hours
print(df)
sns.barplot(data=df,x='date',y='hours')
plt.xticks(rotation='vertical')
rcParams.update({'figure.autolayout': True})
plt.tight_layout()
if(admin):
plt.savefig('./recognition/static/recognition/img/attendance_graphs/hours_vs_date/1.png')
plt.close()
else:
plt.savefig('./recognition/static/recognition/img/attendance_graphs/employee_login/1.png')
plt.close()
return qs
#used
def hours_vs_employee_given_date(present_qs,time_qs):
register_matplotlib_converters()
df_hours=[]
df_break_hours=[]
df_username=[]
qs=present_qs
for obj in qs:
user=obj.user
times_in=time_qs.filter(user=user).filter(out=False)
times_out=time_qs.filter(user=user).filter(out=True)
times_all=time_qs.filter(user=user)
obj.time_in=None
obj.time_out=None
obj.hours=0
obj.hours=0
if (len(times_in)>0):
obj.time_in=times_in.first().time
if (len(times_out)>0):
obj.time_out=times_out.last().time
if(obj.time_in is not None and obj.time_out is not None):
ti=obj.time_in
to=obj.time_out
hours=((to-ti).total_seconds())/3600
obj.hours=hours
else:
obj.hours=0
(check,break_hourss)= check_validity_times(times_all)
if check:
obj.break_hours=break_hourss
else:
obj.break_hours=0
df_hours.append(obj.hours)
df_username.append(user.username)
df_break_hours.append(obj.break_hours)
obj.hours=convert_hours_to_hours_mins(obj.hours)
obj.break_hours=convert_hours_to_hours_mins(obj.break_hours)
df = read_frame(qs)
df['hours']=df_hours
df['username']=df_username
df["break_hours"]=df_break_hours
sns.barplot(data=df,x='username',y='hours')
plt.xticks(rotation='vertical')
rcParams.update({'figure.autolayout': True})
plt.tight_layout()
plt.savefig('./recognition/static/recognition/img/attendance_graphs/hours_vs_employee/1.png')
plt.close()
return qs
def total_number_employees():
qs=User.objects.all()
return (len(qs) -1)
# -1 to account for admin
def employees_present_today():
today=datetime.date.today()
qs=Present.objects.filter(date=today).filter(present=True)
return len(qs)
#used
def this_week_emp_count_vs_date():
today=datetime.date.today()
some_day_last_week=today-datetime.timedelta(days=7)
monday_of_last_week=some_day_last_week- datetime.timedelta(days=(some_day_last_week.isocalendar()[2] - 1))
monday_of_this_week = monday_of_last_week + datetime.timedelta(days=7)
qs=Present.objects.filter(date__gte=monday_of_this_week).filter(date__lte=today)
str_dates=[]
emp_count=[]
str_dates_all=[]
emp_cnt_all=[]
cnt=0
for obj in qs:
date=obj.date
str_dates.append(str(date))
qs=Present.objects.filter(date=date).filter(present=True)
emp_count.append(len(qs))
while(cnt<5):
date=str(monday_of_this_week+datetime.timedelta(days=cnt))
cnt+=1
str_dates_all.append(date)
if(str_dates.count(date))>0:
idx=str_dates.index(date)
emp_cnt_all.append(emp_count[idx])
else:
emp_cnt_all.append(0)
df=pd.DataFrame()
df["date"]=str_dates_all
df["Number of employees"]=emp_cnt_all
sns.lineplot(data=df,x='date',y='Number of employees')
plt.savefig('./recognition/static/recognition/img/attendance_graphs/this_week/1.png')
plt.close()
#used
def last_week_emp_count_vs_date():
today=datetime.date.today()
some_day_last_week=today-datetime.timedelta(days=7)
monday_of_last_week=some_day_last_week- datetime.timedelta(days=(some_day_last_week.isocalendar()[2] - 1))
monday_of_this_week = monday_of_last_week + datetime.timedelta(days=7)
qs=Present.objects.filter(date__gte=monday_of_last_week).filter(date__lt=monday_of_this_week)
str_dates=[]
emp_count=[]
str_dates_all=[]
emp_cnt_all=[]
cnt=0
for obj in qs:
date=obj.date
str_dates.append(str(date))
qs=Present.objects.filter(date=date).filter(present=True)
emp_count.append(len(qs))
while(cnt<5):
date=str(monday_of_last_week+datetime.timedelta(days=cnt))
cnt+=1
str_dates_all.append(date)
if(str_dates.count(date))>0:
idx=str_dates.index(date)
emp_cnt_all.append(emp_count[idx])
else:
emp_cnt_all.append(0)
df=pd.DataFrame()
df["date"]=str_dates_all
df["emp_count"]=emp_cnt_all
sns.lineplot(data=df,x='date',y='emp_count')
plt.savefig('./recognition/static/recognition/img/attendance_graphs/last_week/1.png')
plt.close()
# Create your views here.
def home(request):
return render(request, 'recognition/home.html')
@login_required
def dashboard(request):
if(request.user.username=='admin'):
print("admin")
return render(request, 'recognition/admin_dashboard.html')
else:
print("not admin")
return render(request,'recognition/employee_dashboard.html')
@login_required
def add_photos(request):
if request.user.username!='admin':
return redirect('not-authorised')
if request.method=='POST':
form=usernameForm(request.POST)
data = request.POST.copy()
username=data.get('username')
if username_present(username):
create_dataset(username)
messages.success(request, f'Dataset Created')
return redirect('add-photos')
else:
messages.warning(request, f'No such username found. Please register employee first.')
return redirect('dashboard')
else:
form=usernameForm()
return render(request,'recognition/add_photos.html', {'form' : form})
def mark_your_attendance(request):
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('face_recognition_data/shape_predictor_68_face_landmarks.dat') #Add path to the shape predictor ######CHANGE TO RELATIVE PATH LATER
svc_save_path="face_recognition_data/svc.sav"
with open(svc_save_path, 'rb') as f:
svc = pickle.load(f)
fa = FaceAligner(predictor , desiredFaceWidth = 96)
encoder=LabelEncoder()
encoder.classes_ = np.load('face_recognition_data/classes.npy')
faces_encodings = np.zeros((1,128))
no_of_faces = len(svc.predict_proba(faces_encodings)[0])
count = dict()
present = dict()
log_time = dict()
start = dict()
for i in range(no_of_faces):
count[encoder.inverse_transform([i])[0]] = 0
present[encoder.inverse_transform([i])[0]] = False
vs = VideoStream(src=0).start()
sampleNum = 0
while(True):
frame = vs.read()
frame = imutils.resize(frame ,width = 800)
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray_frame,0)
for face in faces:
print("INFO : inside for loop")
(x,y,w,h) = face_utils.rect_to_bb(face)
face_aligned = fa.align(frame,gray_frame,face)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),1)
(pred,prob)=predict(face_aligned,svc)
if(pred!=[-1]):
person_name=encoder.inverse_transform(np.ravel([pred]))[0]
pred=person_name
if count[pred] == 0:
start[pred] = time.time()
count[pred] = count.get(pred,0) + 1
if count[pred] == 4 and (time.time()-start[pred]) > 1.2:
count[pred] = 0
else:
#if count[pred] == 4 and (time.time()-start) <= 1.5:
present[pred] = True
log_time[pred] = datetime.datetime.now()
count[pred] = count.get(pred,0) + 1
print(pred, present[pred], count[pred])
cv2.putText(frame, str(person_name)+ str(prob), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1)
else:
person_name="unknown"
cv2.putText(frame, str(person_name), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1)
#cv2.putText()
# Before continuing to the next loop, I want to give it a little pause
# waitKey of 100 millisecond
#cv2.waitKey(50)
#Showing the image in another window
#Creates a window with window name "Face" and with the image img
cv2.imshow("Mark Attendance - In - Press q to exit",frame)
#Before closing it we need to give a wait command, otherwise the open cv wont work
# @params with the millisecond of delay 1
#cv2.waitKey(1)
#To get out of the loop
key=cv2.waitKey(50) & 0xFF
if(key==ord("q")):
break
#Stoping the videostream
vs.stop()
# destroying all the windows
cv2.destroyAllWindows()
update_attendance_in_db_in(present)
return redirect('home')
def mark_your_attendance_out(request):
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('face_recognition_data/shape_predictor_68_face_landmarks.dat') #Add path to the shape predictor ######CHANGE TO RELATIVE PATH LATER
svc_save_path="face_recognition_data/svc.sav"
with open(svc_save_path, 'rb') as f:
svc = pickle.load(f)
fa = FaceAligner(predictor , desiredFaceWidth = 96)
encoder=LabelEncoder()
encoder.classes_ = np.load('face_recognition_data/classes.npy')
faces_encodings = np.zeros((1,128))
no_of_faces = len(svc.predict_proba(faces_encodings)[0])
count = dict()
present = dict()
log_time = dict()
start = dict()
for i in range(no_of_faces):
count[encoder.inverse_transform([i])[0]] = 0
present[encoder.inverse_transform([i])[0]] = False
vs = VideoStream(src=0).start()
sampleNum = 0
while(True):
frame = vs.read()
frame = imutils.resize(frame ,width = 800)
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray_frame,0)
for face in faces:
print("INFO : inside for loop")
(x,y,w,h) = face_utils.rect_to_bb(face)
face_aligned = fa.align(frame,gray_frame,face)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),1)
(pred,prob)=predict(face_aligned,svc)
if(pred!=[-1]):
person_name=encoder.inverse_transform(np.ravel([pred]))[0]
pred=person_name
if count[pred] == 0:
start[pred] = time.time()
count[pred] = count.get(pred,0) + 1
if count[pred] == 4 and (time.time()-start[pred]) > 1.5:
count[pred] = 0
else:
#if count[pred] == 4 and (time.time()-start) <= 1.5:
present[pred] = True
log_time[pred] = datetime.datetime.now()
count[pred] = count.get(pred,0) + 1
print(pred, present[pred], count[pred])
cv2.putText(frame, str(person_name)+ str(prob), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1)
else:
person_name="unknown"
cv2.putText(frame, str(person_name), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1)
#cv2.putText()
# Before continuing to the next loop, I want to give it a little pause
# waitKey of 100 millisecond
#cv2.waitKey(50)
#Showing the image in another window
#Creates a window with window name "Face" and with the image img
cv2.imshow("Mark Attendance- Out - Press q to exit",frame)
#Before closing it we need to give a wait command, otherwise the open cv wont work
# @params with the millisecond of delay 1
#cv2.waitKey(1)
#To get out of the loop
key=cv2.waitKey(50) & 0xFF
if(key==ord("q")):
break
#Stoping the videostream
vs.stop()
# destroying all the windows
cv2.destroyAllWindows()
update_attendance_in_db_out(present)
return redirect('home')
@login_required
def train(request):
if request.user.username!='admin':
return redirect('not-authorised')
training_dir='face_recognition_data/training_dataset'
count=0
for person_name in os.listdir(training_dir):
curr_directory=os.path.join(training_dir,person_name)
if not os.path.isdir(curr_directory):
continue
for imagefile in image_files_in_folder(curr_directory):
count+=1
X=[]
y=[]
i=0
for person_name in os.listdir(training_dir):
print(str(person_name))
curr_directory=os.path.join(training_dir,person_name)
if not os.path.isdir(curr_directory):
continue
for imagefile in image_files_in_folder(curr_directory):
print(str(imagefile))
image=cv2.imread(imagefile)
try:
X.append((face_recognition.face_encodings(image)[0]).tolist())
y.append(person_name)
i+=1
except:
print("removed")
os.remove(imagefile)
targets=np.array(y)
encoder = LabelEncoder()
encoder.fit(y)
y=encoder.transform(y)
X1=np.array(X)
print("shape: "+ str(X1.shape))
np.save('face_recognition_data/classes.npy', encoder.classes_)
svc = SVC(kernel='linear',probability=True)
svc.fit(X1,y)
svc_save_path="face_recognition_data/svc.sav"
with open(svc_save_path, 'wb') as f:
pickle.dump(svc,f)
vizualize_Data(X1,targets)
messages.success(request, f'Training Complete.')
return render(request,"recognition/train.html")
@login_required
def not_authorised(request):
return render(request,'recognition/not_authorised.html')
@login_required
def view_attendance_home(request):
total_num_of_emp=total_number_employees()
emp_present_today=employees_present_today()
this_week_emp_count_vs_date()
last_week_emp_count_vs_date()
return render(request,"recognition/view_attendance_home.html", {'total_num_of_emp' : total_num_of_emp, 'emp_present_today': emp_present_today})
@login_required
def view_attendance_date(request):
if request.user.username!='admin':
return redirect('not-authorised')
qs=None
time_qs=None
present_qs=None
if request.method=='POST':
form=DateForm(request.POST)
if form.is_valid():
date=form.cleaned_data.get('date')
print("date:"+ str(date))
time_qs=Time.objects.filter(date=date)
present_qs=Present.objects.filter(date=date)
if(len(time_qs)>0 or len(present_qs)>0):
qs=hours_vs_employee_given_date(present_qs,time_qs)
return render(request,'recognition/view_attendance_date.html', {'form' : form,'qs' : qs })
else:
messages.warning(request, f'No records for selected date.')
return redirect('view-attendance-date')
else:
form=DateForm()
return render(request,'recognition/view_attendance_date.html', {'form' : form, 'qs' : qs})
@login_required
def view_attendance_employee(request):
if request.user.username!='admin':
return redirect('not-authorised')
time_qs=None
present_qs=None
qs=None
if request.method=='POST':
form=UsernameAndDateForm(request.POST)
if form.is_valid():
username=form.cleaned_data.get('username')
if username_present(username):
u=User.objects.get(username=username)
time_qs=Time.objects.filter(user=u)
present_qs=Present.objects.filter(user=u)
date_from=form.cleaned_data.get('date_from')
date_to=form.cleaned_data.get('date_to')
if date_to < date_from:
messages.warning(request, f'Invalid date selection.')
return redirect('view-attendance-employee')
else:
time_qs=time_qs.filter(date__gte=date_from).filter(date__lte=date_to).order_by('-date')
present_qs=present_qs.filter(date__gte=date_from).filter(date__lte=date_to).order_by('-date')
if (len(time_qs)>0 or len(present_qs)>0):
qs=hours_vs_date_given_employee(present_qs,time_qs,admin=True)
return render(request,'recognition/view_attendance_employee.html', {'form' : form, 'qs' :qs})
else:
#print("inside qs is None")
messages.warning(request, f'No records for selected duration.')
return redirect('view-attendance-employee')
else:
print("invalid username")
messages.warning(request, f'No such username found.')
return redirect('view-attendance-employee')
else:
form=UsernameAndDateForm()
return render(request,'recognition/view_attendance_employee.html', {'form' : form, 'qs' :qs})
@login_required
def view_my_attendance_employee_login(request):
if request.user.username=='admin':
return redirect('not-authorised')
qs=None
time_qs=None
present_qs=None
if request.method=='POST':
form=DateForm_2(request.POST)
if form.is_valid():
u=request.user
time_qs=Time.objects.filter(user=u)
present_qs=Present.objects.filter(user=u)
date_from=form.cleaned_data.get('date_from')
date_to=form.cleaned_data.get('date_to')
if date_to < date_from:
messages.warning(request, f'Invalid date selection.')
return redirect('view-my-attendance-employee-login')
else:
time_qs=time_qs.filter(date__gte=date_from).filter(date__lte=date_to).order_by('-date')
present_qs=present_qs.filter(date__gte=date_from).filter(date__lte=date_to).order_by('-date')
if (len(time_qs)>0 or len(present_qs)>0):
qs=hours_vs_date_given_employee(present_qs,time_qs,admin=False)
return render(request,'recognition/view_my_attendance_employee_login.html', {'form' : form, 'qs' :qs})
else:
messages.warning(request, f'No records for selected duration.')
return redirect('view-my-attendance-employee-login')
else:
form=DateForm_2()
return render(request,'recognition/view_my_attendance_employee_login.html', {'form' : form, 'qs' :qs})
| 24.97619
| 167
| 0.722196
| 3,879
| 25,176
| 4.508378
| 0.116009
| 0.008234
| 0.014124
| 0.022301
| 0.688872
| 0.637694
| 0.602756
| 0.575766
| 0.550377
| 0.534881
| 0
| 0.014407
| 0.150858
| 25,176
| 1,008
| 168
| 24.97619
| 0.80363
| 0.122339
| 0
| 0.619125
| 0
| 0
| 0.120715
| 0.070747
| 0
| 0
| 0.000364
| 0
| 0
| 1
| 0.040519
| false
| 0
| 0.056726
| 0.003241
| 0.165316
| 0.027553
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bec698afd2c5801e7a05fe6be1339638668af844
| 856
|
py
|
Python
|
2018/05.py
|
GillesArcas/Advent_of_Code
|
1f57eb1686875df2684b0d56916b1d20724e9fb9
|
[
"MIT"
] | null | null | null |
2018/05.py
|
GillesArcas/Advent_of_Code
|
1f57eb1686875df2684b0d56916b1d20724e9fb9
|
[
"MIT"
] | null | null | null |
2018/05.py
|
GillesArcas/Advent_of_Code
|
1f57eb1686875df2684b0d56916b1d20724e9fb9
|
[
"MIT"
] | null | null | null |
import re
import string
DATA = '05.txt'
def react(polymer):
pairs = '|'.join([a + b + '|' + b + a for a, b in zip(string.ascii_lowercase, string.ascii_uppercase)])
length = len(polymer)
while 1:
polymer = re.sub(pairs, '', polymer)
if len(polymer) == length:
return(length)
else:
length = len(polymer)
def code1():
with open(DATA) as f:
polymer = f.readline().strip()
print('1>', react(polymer))
def code2():
with open(DATA) as f:
polymer = f.readline().strip()
minlength = len(polymer)
for c in string.ascii_lowercase:
polymer2 = re.sub(c, '', polymer, flags=re.I)
length = react(polymer2)
if length < minlength:
minlength = length
print('2>', minlength)
code1()
code2()
| 21.948718
| 108
| 0.53972
| 103
| 856
| 4.456311
| 0.407767
| 0.087146
| 0.087146
| 0.061002
| 0.156863
| 0.156863
| 0.156863
| 0.156863
| 0.156863
| 0
| 0
| 0.018966
| 0.32243
| 856
| 38
| 109
| 22.526316
| 0.772414
| 0
| 0
| 0.214286
| 0
| 0
| 0.01467
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.071429
| 0
| 0.178571
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bec79a34dea2c5eb7b1cdd952dbf003070a952c4
| 1,746
|
py
|
Python
|
WebServer.py
|
i3uex/CompareML
|
3d53d58117507db11ad08ca0b1c883ec0997840e
|
[
"MIT"
] | null | null | null |
WebServer.py
|
i3uex/CompareML
|
3d53d58117507db11ad08ca0b1c883ec0997840e
|
[
"MIT"
] | null | null | null |
WebServer.py
|
i3uex/CompareML
|
3d53d58117507db11ad08ca0b1c883ec0997840e
|
[
"MIT"
] | null | null | null |
import json
import cherrypy
import engine
class WebServer(object):
@cherrypy.expose
def index(self):
return open('public/index.html', encoding='utf-8')
@cherrypy.expose
class GetOptionsService(object):
@cherrypy.tools.accept(media='text/plain')
def GET(self):
return json.dumps({
'providers': engine.get_providers(),
'algorithms': engine.get_algorithms(),
'default_datasets': engine.get_all_default_datasets()
})
@cherrypy.expose
class SetOptionsService(object):
@cherrypy.tools.accept(media='text/plain')
def POST(self, options):
""" Use the options selected by the user to execute all algorithms
:param options: {
is_default_dataset: bool,
dataset: str,
providers: []
algorithms: []
target: str
}
if is_default_dataset is true, dataset will contain the name of the default_dataset"""
options_dic = json.loads(options)
try:
result = engine.execute(options_dic['is_default_dataset'], options_dic['dataset'], options_dic['providers'],
options_dic['algorithms'],
options_dic['target'])
except Exception as exception:
message = f"{str(exception)}"
raise cherrypy.HTTPError(500, message=message)
return result
@cherrypy.expose
@cherrypy.tools.json_out()
class GetDefaultDatasetHeadersService(object):
@cherrypy.tools.accept(media='text/plain')
def GET(self, default_dataset_name):
return {'headers': engine.get_default_dataset_headers(default_dataset_name)}
| 30.103448
| 120
| 0.611684
| 180
| 1,746
| 5.783333
| 0.377778
| 0.09414
| 0.054755
| 0.072046
| 0.134486
| 0.134486
| 0.134486
| 0.134486
| 0.09414
| 0.09414
| 0
| 0.00321
| 0.286369
| 1,746
| 57
| 121
| 30.631579
| 0.832263
| 0.184422
| 0
| 0.2
| 0
| 0
| 0.117561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.085714
| 0.085714
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bec831a08a3c7355e5ebc6786562ec7da94bccbd
| 2,421
|
py
|
Python
|
cptk/core/fetcher.py
|
RealA10N/cptk
|
e500d948e91bb70661adc3c2539b149704c734a1
|
[
"Apache-2.0"
] | 5
|
2021-12-25T01:49:45.000Z
|
2022-03-27T10:30:14.000Z
|
cptk/core/fetcher.py
|
RealA10N/cptk
|
e500d948e91bb70661adc3c2539b149704c734a1
|
[
"Apache-2.0"
] | 39
|
2021-12-24T16:35:07.000Z
|
2022-03-18T23:15:14.000Z
|
cptk/core/fetcher.py
|
RealA10N/cptk
|
e500d948e91bb70661adc3c2539b149704c734a1
|
[
"Apache-2.0"
] | 2
|
2022-01-12T19:13:20.000Z
|
2022-01-12T19:32:05.000Z
|
from __future__ import annotations
from typing import TYPE_CHECKING
import pkg_resources
from bs4 import BeautifulSoup
from requests import session
from cptk.scrape import PageInfo
from cptk.scrape import Website
from cptk.utils import cptkException
if TYPE_CHECKING:
from cptk.scrape import Problem
class InvalidClone(cptkException):
""" Raised when the clone command is called with a 'PageInfo' instance that
doesn't describe anything that can be cloned. """
def __init__(self, info: PageInfo) -> None:
self.info = info
super().__init__(f"We don't know how to handle data from {info.url!r}")
class UnknownWebsite(cptkException):
""" Raised when trying to fetch information from a website that is not
registed and can't be handled by cptk. """
def __init__(self, domain: str) -> None:
self.domain = domain
super().__init__(f"We don't know how to handle data from {domain!r}")
class Fetcher:
def __init__(self) -> None:
self.session = session()
self._load_websites()
def _load_websites(self) -> list[type[Website]]:
self._websites = [
point.load()()
for point in pkg_resources.iter_entry_points('cptk_sites')
]
self._domain_to_website = dict()
for website in self._websites:
domain = website.domain
if isinstance(domain, str):
self._domain_to_website[domain] = website
else:
for cur in domain:
self._domain_to_website[cur] = website
def page_to_problem(self, info: PageInfo) -> Problem:
""" Recives an arbitrary page info instance and tries to match it with
a Website class that knows how to handle this specific website. If cptk
doesn't find a way to parse the given webpage, it raises the
'InvalidClone' exception. """
for website in self._websites:
if website.is_problem(info):
return website.to_problem(info)
raise InvalidClone(info)
def to_page(self, url: str) -> PageInfo:
""" Makes an get http/s request to the given URL and returns the result
as a PageInfo instance. """
if not url.startswith('http'):
url = f'http://{url}'
res = self.session.get(url)
data = BeautifulSoup(res.content, 'lxml')
return PageInfo(url, data)
| 31.441558
| 79
| 0.646014
| 317
| 2,421
| 4.769716
| 0.359621
| 0.033069
| 0.027778
| 0.039683
| 0.083333
| 0.051587
| 0.051587
| 0.051587
| 0.051587
| 0.051587
| 0
| 0.000566
| 0.270549
| 2,421
| 76
| 80
| 31.855263
| 0.855606
| 0.224701
| 0
| 0.043478
| 0
| 0
| 0.070679
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.195652
| 0
| 0.434783
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bec8b40804691cfab7d99feee2707b808f11aaed
| 15,006
|
py
|
Python
|
machine_learning/deep_reinforcement_learning_grasping/drlgrasp/drlgrasp/pybullet_envs/kuka_reach_with_visual.py
|
Hinson-A/guyueclass
|
e59129526729542dccefa6c7232378a00dc0175a
|
[
"Apache-2.0"
] | 227
|
2021-01-20T05:34:32.000Z
|
2022-03-29T12:43:05.000Z
|
machine_learning/deep_reinforcement_learning_grasping/drlgrasp/drlgrasp/pybullet_envs/kuka_reach_with_visual.py
|
passYYYY/guyueclass
|
2054ccec2f5e6c002727a5561b494a1046484504
|
[
"Apache-2.0"
] | 1
|
2021-04-22T05:56:00.000Z
|
2021-05-26T06:00:17.000Z
|
machine_learning/deep_reinforcement_learning_grasping/drlgrasp/drlgrasp/pybullet_envs/kuka_reach_with_visual.py
|
passYYYY/guyueclass
|
2054ccec2f5e6c002727a5561b494a1046484504
|
[
"Apache-2.0"
] | 239
|
2021-01-28T02:59:53.000Z
|
2022-03-29T08:02:17.000Z
|
import pybullet as p
import pybullet_data
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from math import sqrt
import random
import time
import math
import cv2
import torch
import os
def random_crop(imgs, out):
"""
args:
imgs: shape (B,C,H,W)
out: output size (e.g. 84)
"""
n, c, h, w = imgs.shape
crop_max = h - out + 1
w1 = np.random.randint(0, crop_max, n)
h1 = np.random.randint(0, crop_max, n)
cropped = np.empty((n, c, out, out), dtype=imgs.dtype)
for i, (img, w11, h11) in enumerate(zip(imgs, w1, h1)):
cropped[i] = img[:, h11:h11 + out, w11:w11 + out]
return cropped
class KukaReachVisualEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 50
}
kMaxEpisodeSteps = 700
kImageSize = {'width': 96, 'height': 96}
kFinalImageSize = {'width': 84, 'height': 84}
def __init__(self, is_render=False, is_good_view=False):
self.is_render = is_render
self.is_good_view = is_good_view
if self.is_render:
p.connect(p.GUI)
else:
p.connect(p.DIRECT)
self.x_low_obs = 0.2
self.x_high_obs = 0.7
self.y_low_obs = -0.3
self.y_high_obs = 0.3
self.z_low_obs = 0
self.z_high_obs = 0.55
self.x_low_action = -0.4
self.x_high_action = 0.4
self.y_low_action = -0.4
self.y_high_action = 0.4
self.z_low_action = -0.6
self.z_high_action = 0.3
self.step_counter = 0
self.urdf_root_path = pybullet_data.getDataPath()
# lower limits for null space
self.lower_limits = [-.967, -2, -2.96, 0.19, -2.96, -2.09, -3.05]
# upper limits for null space
self.upper_limits = [.967, 2, 2.96, 2.29, 2.96, 2.09, 3.05]
# joint ranges for null space
self.joint_ranges = [5.8, 4, 5.8, 4, 5.8, 4, 6]
# restposes for null space
self.rest_poses = [0, 0, 0, 0.5 * math.pi, 0, -math.pi * 0.5 * 0.66, 0]
# joint damping coefficents
self.joint_damping = [
0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001
]
self.init_joint_positions = [
0.006418, 0.413184, -0.011401, -1.589317, 0.005379, 1.137684,
-0.006539
]
self.orientation = p.getQuaternionFromEuler(
[0., -math.pi, math.pi / 2.])
self.camera_parameters = {
'width': 960.,
'height': 720,
'fov': 60,
'near': 0.1,
'far': 100.,
'eye_position': [0.59, 0, 0.8],
'target_position': [0.55, 0, 0.05],
'camera_up_vector':
[1, 0, 0], # I really do not know the parameter's effect.
'light_direction': [
0.5, 0, 1
], # the direction is from the light source position to the origin of the world frame.
}
self.view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=[0.55, 0, 0.05],
distance=.7,
yaw=90,
pitch=-70,
roll=0,
upAxisIndex=2)
self.projection_matrix = p.computeProjectionMatrixFOV(
fov=self.camera_parameters['fov'],
aspect=self.camera_parameters['width'] /
self.camera_parameters['height'],
nearVal=self.camera_parameters['near'],
farVal=self.camera_parameters['far'])
p.configureDebugVisualizer(lightPosition=[5, 0, 5])
p.resetDebugVisualizerCamera(cameraDistance=1.5,
cameraYaw=0,
cameraPitch=-40,
cameraTargetPosition=[0.55, -0.35, 0.2])
self.action_space = spaces.Box(low=np.array(
[self.x_low_action, self.y_low_action, self.z_low_action]),
high=np.array([
self.x_high_action,
self.y_high_action,
self.z_high_action
]),
dtype=np.float32)
self.observation_space = spaces.Box(low=0, high=1,
shape=(1, self.kFinalImageSize['width'], self.kFinalImageSize['height']))
self.seed()
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
self.step_counter = 0
p.resetSimulation()
# p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
self.terminated = False
p.setGravity(0, 0, -10)
# 这些是周围那些白线,用来观察是否超过了obs的边界
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_low_obs, 0],
lineToXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_high_obs, 0],
lineToXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_high_obs, self.y_low_obs, 0],
lineToXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_high_obs, self.y_high_obs, 0],
lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs],
lineToXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs],
lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs],
lineToXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs],
lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs])
p.loadURDF(os.path.join(self.urdf_root_path, "plane.urdf"),
basePosition=[0, 0, -0.65])
self.kuka_id = p.loadURDF(os.path.join(self.urdf_root_path,
"kuka_iiwa/model.urdf"),
useFixedBase=True)
table_uid = p.loadURDF(os.path.join(self.urdf_root_path,
"table/table.urdf"),
basePosition=[0.5, 0, -0.65])
p.changeVisualShape(table_uid, -1, rgbaColor=[1, 1, 1, 1])
self.object_id = p.loadURDF(os.path.join(self.urdf_root_path,
"random_urdfs/000/000.urdf"),
basePosition=[
random.uniform(self.x_low_obs,
self.x_high_obs),
random.uniform(self.y_low_obs,
self.y_high_obs), 0.01
])
self.num_joints = p.getNumJoints(self.kuka_id)
for i in range(self.num_joints):
p.resetJointState(
bodyUniqueId=self.kuka_id,
jointIndex=i,
targetValue=self.init_joint_positions[i],
)
self.robot_pos_obs = p.getLinkState(self.kuka_id,
self.num_joints - 1)[4]
p.stepSimulation()
(_, _, px, _,
_) = p.getCameraImage(width=960,
height=960,
viewMatrix=self.view_matrix,
projectionMatrix=self.projection_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL)
self.images = px
p.enableJointForceTorqueSensor(bodyUniqueId=self.kuka_id,
jointIndex=self.num_joints - 1,
enableSensor=True)
self.object_pos = p.getBasePositionAndOrientation(self.object_id)[0]
self.images = self.images[:, :, :
3] # the 4th channel is alpha channel, we do not need it.
return self._process_image(self.images)
def _process_image(self, image):
"""Convert the RGB pic to gray pic and add a channel 1
Args:
image ([type]): [description]
"""
if image is not None:
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = cv2.resize(image, (self.kImageSize['width'], self.kImageSize['height']))[None, :, :] / 255.
return image
else:
return np.zeros((1, self.kImageSize['width'], self.kImageSize['height']))
def step(self, action):
dv = 0.005
dx = action[0] * dv
dy = action[1] * dv
dz = action[2] * dv
self.current_pos = p.getLinkState(self.kuka_id, self.num_joints - 1)[4]
self.new_robot_pos = [
self.current_pos[0] + dx, self.current_pos[1] + dy,
self.current_pos[2] + dz
]
self.robot_joint_positions = p.calculateInverseKinematics(
bodyUniqueId=self.kuka_id,
endEffectorLinkIndex=self.num_joints - 1,
targetPosition=[
self.new_robot_pos[0], self.new_robot_pos[1],
self.new_robot_pos[2]
],
targetOrientation=self.orientation,
jointDamping=self.joint_damping,
)
for i in range(self.num_joints):
p.resetJointState(
bodyUniqueId=self.kuka_id,
jointIndex=i,
targetValue=self.robot_joint_positions[i],
)
p.stepSimulation()
# 在代码开始部分,如果定义了is_good_view,那么机械臂的动作会变慢,方便观察
if self.is_good_view:
time.sleep(0.05)
self.step_counter += 1
return self._reward()
def _reward(self):
# 一定注意是取第4个值,请参考pybullet手册的这个函数返回值的说明
self.robot_state = p.getLinkState(self.kuka_id, self.num_joints - 1)[4]
self.object_state = np.array(
p.getBasePositionAndOrientation(self.object_id)[0]).astype(
np.float32)
square_dx = (self.robot_state[0] - self.object_state[0]) ** 2
square_dy = (self.robot_state[1] - self.object_state[1]) ** 2
square_dz = (self.robot_state[2] - self.object_state[2]) ** 2
# 用机械臂末端和物体的距离作为奖励函数的依据
self.distance = sqrt(square_dx + square_dy + square_dz)
# print(self.distance)
x = self.robot_state[0]
y = self.robot_state[1]
z = self.robot_state[2]
# 如果机械比末端超过了obs的空间,也视为done,而且会给予一定的惩罚
terminated = bool(x < self.x_low_obs or x > self.x_high_obs
or y < self.y_low_obs or y > self.y_high_obs
or z < self.z_low_obs or z > self.z_high_obs)
if terminated:
reward = -0.1
self.terminated = True
# 如果机械臂一直无所事事,在最大步数还不能接触到物体,也需要给一定的惩罚
elif self.step_counter > self.kMaxEpisodeSteps:
reward = -0.1
self.terminated = True
elif self.distance < 0.1:
reward = 1
self.terminated = True
else:
reward = 0
self.terminated = False
info = {'distance:', self.distance}
(_, _, px, _,
_) = p.getCameraImage(width=960,
height=960,
viewMatrix=self.view_matrix,
projectionMatrix=self.projection_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL)
self.images = px
self.processed_image = self._process_image(self.images)
# self.observation=self.robot_state
self.observation = self.object_state
return self.processed_image, reward, self.terminated, info
def close(self):
p.disconnect()
def _get_force_sensor_value(self):
force_sensor_value = p.getJointState(bodyUniqueId=self.kuka_id,
jointIndex=self.num_joints -
1)[2][2]
# the first 2 stands for jointReactionForces, the second 2 stands for Fz,
# the pybullet methods' return is a tuple,so can not
# index it with str like dict. I think it can be improved
# that return value is a dict rather than tuple.
return force_sensor_value
class CustomSkipFrame(gym.Wrapper):
""" Make a 4 frame skip, so the observation space will change to (4,84,84) from (1,84,84)
Args:
gym ([type]): [description]
"""
def __init__(self, env, skip=4):
super(CustomSkipFrame, self).__init__(env)
self.observation_space = spaces.Box(low=0,
high=1,
shape=(skip, self.kFinalImageSize['width'], self.kFinalImageSize['height']))
self.skip = skip
def step(self, action):
total_reward = 0
states = []
state, reward, done, info = self.env.step(action)
for i in range(self.skip):
if not done:
state, reward, done, info = self.env.step(action)
total_reward += reward
states.append(state)
else:
states.append(state)
states = np.concatenate(states, 0)[None, :, :, :]
return random_crop(states.astype(np.float32), self.kFinalImageSize['width']), reward, done, info
def reset(self):
state = self.env.reset()
states = np.concatenate([state for _ in range(self.skip)],
0)[None, :, :, :]
return random_crop(states.astype(np.float32), self.kFinalImageSize['width'])
if __name__ == '__main__':
# 这一部分是做baseline,即让机械臂随机选择动作,看看能够得到的分数
import matplotlib.pyplot as plt
env = KukaReachVisualEnv(is_render=False)
env = CustomSkipFrame(env)
print(env.observation_space.shape)
print(env.action_space.shape)
print(env.action_space.n)
# for _ in range(20):
# action=env.action_space.sample()
# print(action)
# env.step(action)
#
# state = env.reset()
# print(state.shape)
# img = state[0][0]
# plt.imshow(img, cmap='gray')
# plt.show()
| 36.159036
| 121
| 0.53212
| 1,757
| 15,006
| 4.349459
| 0.206033
| 0.032976
| 0.017796
| 0.021984
| 0.356582
| 0.316802
| 0.278069
| 0.255431
| 0.246009
| 0.222324
| 0
| 0.043787
| 0.362322
| 15,006
| 414
| 122
| 36.246377
| 0.754833
| 0.090097
| 0
| 0.219595
| 0
| 0
| 0.023853
| 0.003658
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040541
| false
| 0
| 0.047297
| 0
| 0.141892
| 0.010135
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bec8c0835477d8b4651705098efe6f5b0368b832
| 6,581
|
py
|
Python
|
tests/ut/cpp/python_input/gtest_input/pre_activate/ir_fusion_test.py
|
GeekHee/mindspore
|
896b8e5165dd0a900ed5a39e0fb23525524bf8b0
|
[
"Apache-2.0"
] | null | null | null |
tests/ut/cpp/python_input/gtest_input/pre_activate/ir_fusion_test.py
|
GeekHee/mindspore
|
896b8e5165dd0a900ed5a39e0fb23525524bf8b0
|
[
"Apache-2.0"
] | null | null | null |
tests/ut/cpp/python_input/gtest_input/pre_activate/ir_fusion_test.py
|
GeekHee/mindspore
|
896b8e5165dd0a900ed5a39e0fb23525524bf8b0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore.ops import Primitive
from mindspore.ops import operations as P
from mindspore.ops.operations import _grad_ops as G
from mindspore.ops import _constants as Constants
# pylint: disable=unused-variable
tuple_getitem = Primitive(Constants.kTupleGetItem)
add = P.Add()
allreduce = P.AllReduce()
allreduce.add_prim_attr('fusion', 1)
make_tuple = Primitive("make_tuple")
conv = P.Conv2D(out_channel=64, kernel_size=7, mode=1, pad_mode="valid", pad=0, stride=1, dilation=1, group=1)
bn = P.FusedBatchNorm()
relu = P.ReLU()
conv_bn1 = Primitive('ConvBN1')
bn2_add_relu = Primitive('BN2AddRelu')
bn2_relu = Primitive('BN2Relu')
fused_bn1 = Primitive('FusedBN1')
fused_bn2 = Primitive('FusedBN2')
fused_bn3 = Primitive('FusedBN3')
bn_grad = G.FusedBatchNormGrad()
bn_grad1 = Primitive('BNGrad1')
bn_grad2 = Primitive('BNGrad2')
bn_grad3 = Primitive('BNGrad3')
class FnDict:
def __init__(self):
self.fnDict = {}
def __call__(self, fn):
self.fnDict[fn.__name__] = fn
def __getitem__(self, name):
return self.fnDict[name]
def test_bn_split(tag):
""" test_split_bn_fusion """
fns = FnDict()
@fns
def before(x, scale, b, mean, variance):
bn_output = bn(x, scale, b, mean, variance)
item0 = tuple_getitem(bn_output, 0)
return item0
@fns
def after(x, scale, b, mean, variance):
fused_bn1_output = fused_bn1(x)
fused_bn2_input0 = tuple_getitem(fused_bn1_output, 0)
fused_bn2_input1 = tuple_getitem(fused_bn1_output, 1)
fused_bn2_output = fused_bn2(fused_bn2_input0, fused_bn2_input1, mean, variance)
fused_bn3_input1 = tuple_getitem(fused_bn2_output, 0)
fused_bn3_input2 = tuple_getitem(fused_bn2_output, 1)
fused_bn3_output = fused_bn3(x, fused_bn3_input1, fused_bn3_input2, scale, b)
output1 = tuple_getitem(fused_bn2_output, 2)
output2 = tuple_getitem(fused_bn2_output, 3)
output3 = tuple_getitem(fused_bn2_output, 0)
output4 = tuple_getitem(fused_bn2_output, 1)
output = make_tuple(fused_bn3_output, output1, output2, output3, output4)
item0 = tuple_getitem(output, 0)
return make_tuple(item0)
return fns[tag]
def test_bn_grad_split(tag):
""" test_bn_grad_split """
fns = FnDict()
@fns
def before(dy, x, scale, save_mean, save_inv_variance):
bn_grad_output = bn_grad(dy, x, scale, save_mean, save_inv_variance)
item0 = tuple_getitem(bn_grad_output, 0)
item1 = tuple_getitem(bn_grad_output, 1)
item2 = tuple_getitem(bn_grad_output, 2)
output = make_tuple(item0, item1, item2)
res = tuple_getitem(output, 0)
return res
@fns
def after(i0, i1, i2, i3, i4):
bn_grad1_output = bn_grad1(i0, i1, i3)
bn_grad1_item0 = tuple_getitem(bn_grad1_output, 0)
bn_grad1_item1 = tuple_getitem(bn_grad1_output, 1)
bn_grad1_item2 = tuple_getitem(bn_grad1_output, 2)
bn_grad2_output = bn_grad2(bn_grad1_item0, bn_grad1_item1, i4, i2)
bn_grad2_item0 = tuple_getitem(bn_grad2_output, 0)
bn_grad2_item1 = tuple_getitem(bn_grad2_output, 1)
bn_grad2_item2 = tuple_getitem(bn_grad2_output, 2)
bn_grad2_item3 = tuple_getitem(bn_grad2_output, 3)
bn_grad2_item4 = tuple_getitem(bn_grad2_output, 4)
bn_grad3_output = bn_grad3(i0, bn_grad2_item2, bn_grad2_item3, bn_grad2_item4, bn_grad1_item2)
bn_grad_make_tuple = make_tuple(bn_grad3_output, bn_grad2_item0, bn_grad2_item1)
item0 = tuple_getitem(bn_grad_make_tuple, 0)
item1 = tuple_getitem(bn_grad_make_tuple, 1)
item2 = tuple_getitem(bn_grad_make_tuple, 2)
output = make_tuple(item0, item1, item2)
return make_tuple(tuple_getitem(output, 0))
return fns[tag]
def test_all_reduce_fusion_all(tag):
""" test_all_reduce_fusion_all """
fns = FnDict()
@fns
def before(x1, x2, x3, x4, x5):
y1 = allreduce(x1)
y2 = allreduce(x2)
y3 = allreduce(x3)
y4 = allreduce(x4)
y5 = allreduce(x5)
return make_tuple(y1, y2, y3, y4, y5)
@fns
def after(x1, x2, x3, x4, x5):
ar = allreduce(x5, x4, x3, x2, x1)
y5 = tuple_getitem(ar, 0)
y4 = tuple_getitem(ar, 1)
y3 = tuple_getitem(ar, 2)
y2 = tuple_getitem(ar, 3)
y1 = tuple_getitem(ar, 4)
res = make_tuple(y1, y2, y3, y4, y5)
return make_tuple(res)
@fns
def after1(x1, x2, x3, x4, x5):
ar = allreduce(x1, x2, x3, x4, x5)
y1 = tuple_getitem(ar, 0)
y2 = tuple_getitem(ar, 1)
y3 = tuple_getitem(ar, 2)
y4 = tuple_getitem(ar, 3)
y5 = tuple_getitem(ar, 4)
res = make_tuple(y1, y2, y3, y4, y5)
return make_tuple(res)
return fns[tag]
def test_all_reduce_fusion_group(tag):
""" test_all_reduce_fusion_group """
fns = FnDict()
@fns
def before(x1, x2, x3, x4, x5):
y1 = allreduce(x1)
y2 = allreduce(x2)
y3 = allreduce(x3)
y4 = allreduce(x4)
y5 = allreduce(x5)
return make_tuple(y1, y2, y3, y4, y5)
@fns
def after1(x1, x2, x3, x4, x5):
ar1 = allreduce(x5, x4)
ar2 = allreduce(x3, x2, x1)
y4 = tuple_getitem(ar1, 1)
y5 = tuple_getitem(ar1, 0)
y1 = tuple_getitem(ar2, 2)
y2 = tuple_getitem(ar2, 1)
y3 = tuple_getitem(ar2, 0)
res = make_tuple(y1, y2, y3, y4, y5)
return make_tuple(res)
@fns
def after2(x1, x2, x3, x4, x5):
ar1 = allreduce(x1, x3, x5)
ar2 = allreduce(x2, x4)
y1 = tuple_getitem(ar1, 2)
y3 = tuple_getitem(ar1, 1)
y5 = tuple_getitem(ar1, 0)
y2 = tuple_getitem(ar2, 1)
y4 = tuple_getitem(ar2, 0)
output = make_tuple(y1, y2, y3, y4, y5)
return make_tuple(output)
return fns[tag]
| 33.576531
| 110
| 0.647166
| 953
| 6,581
| 4.198321
| 0.193075
| 0.140965
| 0.052487
| 0.013997
| 0.446138
| 0.283179
| 0.217946
| 0.185954
| 0.145464
| 0.112472
| 0
| 0.066241
| 0.238414
| 6,581
| 195
| 111
| 33.748718
| 0.732043
| 0.117155
| 0
| 0.324324
| 0
| 0
| 0.015595
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114865
| false
| 0
| 0.027027
| 0.006757
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe1f4c025bf53ebda91717d8cd83c5c619dbfc64
| 7,044
|
py
|
Python
|
app.py
|
PolinaRomanchenko/Victorious_Secret_DSCI_532
|
e83bc19169a1736618ac55f2ade40741583089fd
|
[
"MIT"
] | null | null | null |
app.py
|
PolinaRomanchenko/Victorious_Secret_DSCI_532
|
e83bc19169a1736618ac55f2ade40741583089fd
|
[
"MIT"
] | null | null | null |
app.py
|
PolinaRomanchenko/Victorious_Secret_DSCI_532
|
e83bc19169a1736618ac55f2ade40741583089fd
|
[
"MIT"
] | null | null | null |
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import altair as alt
import vega_datasets
alt.data_transformers.enable('default')
alt.data_transformers.disable_max_rows()
app = dash.Dash(__name__, assets_folder='assets', external_stylesheets=[dbc.themes.BOOTSTRAP])
# Boostrap CSS.
app.css.append_css({'external_url': 'https://codepen.io/amyoshino/pen/jzXypZ.css'}) # noqa: E501
server = app.server
app.title = 'Dash app with pure Altair HTML'
df = pd.read_csv('data/Police_Department_Incidents_-_Previous_Year__2016_.csv')
# df = pd.read_csv("https://raw.github.ubc.ca/MDS-2019-20/DSCI_531_lab4_anas017/master/data/Police_Department_Incidents_-_Previous_Year__2016_.csv?token=AAAHQ0dLxUd74i7Zhzh1SJ_UuOaFVI3_ks5d5dT3wA%3D%3D")
df['datetime'] = pd.to_datetime(df[["Date","Time"]].apply(lambda x: x[0].split()[0] +" "+x[1], axis=1), format="%m/%d/%Y %H:%M")
df['hour'] = df['datetime'].dt.hour
df.dropna(inplace=True)
top_4_crimes = df['Category'].value_counts()[:6].index.to_list()
top_4_crimes
top_4_crimes.remove("NON-CRIMINAL")
top_4_crimes.remove("OTHER OFFENSES")
# top 4 crimes df subset
df_t4 = df[df["Category"].isin(top_4_crimes)].copy()
def make_plot_top(df_new=df_t4):
# Create a plot of the Displacement and the Horsepower of the cars dataset
# making the slider
slider = alt.binding_range(min = 0, max = 23, step = 1)
select_hour = alt.selection_single(name='select', fields = ['hour'],
bind = slider, init={'hour': 0})
#begin of my code
# typeDict = {'ASSAULT':'quantitative',
# 'VANDALISM':'quantitative',
# 'LARCENY/THEFT':'quantitative',
# 'VEHICLE THEFT':'quantitative'
# }
# end
chart = alt.Chart(df_new).mark_bar(size=30).encode(
x=alt.X('Category',type='nominal', title='Category'),
y=alt.Y('count()', title = "Count" , scale = alt.Scale(domain = (0,3300))),
tooltip='count()'
).properties(
title = "Per hour crime occurrences for the top 4 crimes",
width=500,
height = 315
).add_selection(
select_hour
).transform_filter(
select_hour
)
return chart
def make_plot_bot(data=df_t4):
chart_1 = alt.Chart(data).mark_circle(size=3, opacity = 0.8).encode(
longitude='X:Q',
latitude='Y:Q',
color = alt.Color('PdDistrict:N', legend = alt.Legend(title = "District")),
tooltip = 'PdDistrict'
).project(
type='albersUsa'
).properties(
width=450,
height=350
)
chart_2 = alt.Chart(data).mark_bar().encode(
x=alt.X('PdDistrict:N', axis=None, title="District"),
y=alt.Y('count()', title="Count of reports"),
color=alt.Color('PdDistrict:N', legend=alt.Legend(title="District")),
tooltip=['PdDistrict', 'count()']
).properties(
width=450,
height=350
)
# A dropdown filter
crimes_dropdown = alt.binding_select(options=list(data['Category'].unique()))
crimes_select = alt.selection_single(fields=['Category'], bind=crimes_dropdown,
name="Pick\ Crime")
combine_chart = (chart_2 | chart_1)
filter_crimes = combine_chart.add_selection(
crimes_select
).transform_filter(
crimes_select
)
return filter_crimes
body = dbc.Container(
[
dbc.Row(
[
dbc.Col(
[
html.H2("San Francisco Crime"),
html.P(
"""\
When looking for a place to live or visit, one important factor that people will consider
is the safety of the neighborhood. Searching that information district
by district could be time consuming and exhausting. It is even more difficult to
compare specific crime statistics across districts such as the crime rate
at a certain time of day. It would be useful if people can look up crime
related information across district on one application. Our app
aims to help people make decisions when considering their next trip or move to San Francisco, California
via visually exploring a dataset of crime statistics. The app provides an overview of the crime rate across
neighborhoods and allows users to focus on more specific information through
filtering of geological location, crime rate, crime type or time of the
crime.
Use the box below to choose crimes of interest.
"""
),
dcc.Dropdown(
id = 'drop_selection_crime',
options=[{'label': i, 'value': i} for i in df_t4['Category'].unique()
],
style={'height': '20px',
'width': '400px'},
value=df_t4['Category'].unique(),
multi=True)
],
md=5,
),
dbc.Col(
[
dbc.Row(
[
html.Iframe(
sandbox = "allow-scripts",
id = "plot_top",
height = "500",
width = "650",
style = {"border-width": "0px"},
srcDoc = make_plot_top().to_html()
)
]
)
]
),
]
),
dbc.Row(
html.Iframe(
sandbox='allow-scripts',
id='plot_bot',
height='500',
width='1200',
style={'border-width': '0px'},
srcDoc= make_plot_bot().to_html()
)
)
],
className="mt-4",
)
app.layout = html.Div(body)
@app.callback([dash.dependencies.Output('plot_top', 'srcDoc'),
dash.dependencies.Output('plot_bot', 'srcDoc')],
[dash.dependencies.Input('drop_selection_crime', 'value')]
)
def update_df(chosen):
new_df = df_t4[(df_t4["Category"].isin(chosen))]
updated_plot_top = make_plot_top(new_df).to_html()
updated_plot_bottom = make_plot_bot(new_df).to_html()
return updated_plot_top, updated_plot_bottom
if __name__ == '__main__':
app.run_server(debug=False)
| 38.917127
| 203
| 0.534923
| 779
| 7,044
| 4.662388
| 0.409499
| 0.007709
| 0.019273
| 0.006057
| 0.131057
| 0.116189
| 0.105176
| 0.087004
| 0.060573
| 0.037996
| 0
| 0.025105
| 0.355338
| 7,044
| 181
| 204
| 38.917127
| 0.774719
| 0.077371
| 0
| 0.214815
| 0
| 0
| 0.141167
| 0.011255
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.059259
| 0
| 0.103704
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe2074c1f1219a5f9d1c7d8eeb8c9be145ffb2ea
| 5,982
|
py
|
Python
|
train.py
|
hjl-yul154/autodeeplab
|
1bd8399ac830fcafd506a4207b75e05682d1e260
|
[
"MIT"
] | 1
|
2020-07-27T07:08:47.000Z
|
2020-07-27T07:08:47.000Z
|
train.py
|
hjl-yul154/autodeeplab
|
1bd8399ac830fcafd506a4207b75e05682d1e260
|
[
"MIT"
] | null | null | null |
train.py
|
hjl-yul154/autodeeplab
|
1bd8399ac830fcafd506a4207b75e05682d1e260
|
[
"MIT"
] | null | null | null |
import os
import pdb
import warnings
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn
import torch.optim as optim
import dataloaders
from utils.utils import AverageMeter
from utils.loss import build_criterion
from utils.metrics import Evaluator
from utils.step_lr_scheduler import Iter_LR_Scheduler
from retrain_model.build_autodeeplab import Retrain_Autodeeplab
from config_utils.re_train_autodeeplab import obtain_retrain_autodeeplab_args
def main():
warnings.filterwarnings('ignore')
assert torch.cuda.is_available()
torch.backends.cudnn.benchmark = True
args = obtain_retrain_autodeeplab_args()
save_dir = os.path.join('./data/', args.save_path)
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
model_fname = os.path.join(save_dir,
'deeplab_{0}_{1}_v3_{2}_epoch%d.pth'.format(args.backbone, args.dataset, args.exp))
record_name = os.path.join(save_dir, 'training_record.txt')
if args.dataset == 'pascal':
raise NotImplementedError
elif args.dataset == 'cityscapes':
kwargs = {'num_workers': args.workers, 'pin_memory': True, 'drop_last': True}
dataset_loader, num_classes, val_loader = dataloaders.make_data_loader(args, **kwargs)
args.num_classes = num_classes
else:
raise ValueError('Unknown dataset: {}'.format(args.dataset))
if args.backbone == 'autodeeplab':
model = Retrain_Autodeeplab(args)
else:
raise ValueError('Unknown backbone: {}'.format(args.backbone))
if args.criterion == 'Ohem':
args.thresh = 0.7
args.crop_size = [args.crop_size, args.crop_size] if isinstance(args.crop_size, int) else args.crop_size
args.n_min = int((args.batch_size / len(args.gpu) * args.crop_size[0] * args.crop_size[1]) // 16)
criterion = build_criterion(args)
model = nn.DataParallel(model).cuda()
model.train()
if args.freeze_bn:
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
m.weight.requires_grad = False
m.bias.requires_grad = False
optimizer = optim.SGD(model.module.parameters(), lr=args.base_lr, momentum=0.9, weight_decay=0.0001)
max_iteration = len(dataset_loader) * args.epochs
scheduler = Iter_LR_Scheduler(args, max_iteration, len(dataset_loader))
start_epoch = 0
evaluator=Evaluator(num_classes)
if args.resume:
if os.path.isfile(args.resume):
print('=> loading checkpoint {0}'.format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print('=> loaded checkpoint {0} (epoch {1})'.format(args.resume, checkpoint['epoch']))
else:
raise ValueError('=> no checkpoint found at {0}'.format(args.resume))
for epoch in range(start_epoch, args.epochs):
losses = AverageMeter()
print('Training epoch {}'.format(epoch))
model.train()
for i, sample in enumerate(dataset_loader):
cur_iter = epoch * len(dataset_loader) + i
scheduler(optimizer, cur_iter)
inputs = sample['image'].cuda()
target = sample['label'].cuda()
outputs = model(inputs)
loss = criterion(outputs, target)
if np.isnan(loss.item()) or np.isinf(loss.item()):
pdb.set_trace()
losses.update(loss.item(), args.batch_size)
loss.backward()
optimizer.step()
optimizer.zero_grad()
if (i + 1) % 200 == 0:
print('epoch: {0}\t''iter: {1}/{2}\t''lr: {3:.6f}\t''loss: {loss.val:.4f} ({loss.ema:.4f})'.format(
epoch + 1, i + 1, len(dataset_loader), scheduler.get_lr(optimizer), loss=losses))
if epoch < args.epochs:
if (epoch+1) % 5 == 0:
torch.save({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, model_fname % (epoch + 1))
else:
torch.save({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, model_fname % (epoch + 1))
line0 = 'epoch: {0}\t''loss: {loss.val:.4f} ({loss.ema:.4f})'.format(
epoch, loss=losses)
with open(record_name, 'a') as f:
f.write(line0)
if line0[-1] != '\n':
f.write('\n')
if epoch%3!=0 and epoch <args.epochs-20:
continue
print('Validate epoch {}'.format(epoch))
model.eval()
evaluator.reset()
test_loss=0.0
for i,sample in enumerate(val_loader):
inputs = sample['image'].cuda()
target = sample['label'].cuda()
with torch.no_grad():
outputs = model(inputs)
# loss = criterion(outputs, target)
# test_loss+=loss.item()
pred=outputs.data.cpu().numpy()
target=target.cpu().numpy()
pred = np.argmax(pred, axis=1)
evaluator.add_batch(target,pred)
Acc = evaluator.Pixel_Accuracy()
Acc_class = evaluator.Pixel_Accuracy_Class()
mIoU = evaluator.Mean_Intersection_over_Union()
FWIoU = evaluator.Frequency_Weighted_Intersection_over_Union()
print("epoch: {}\t Acc:{:.3f}, Acc_class:{:.3f}, mIoU:{:.3f}, fwIoU: {:.3f}".format(epoch,Acc, Acc_class, mIoU, FWIoU))
line1='epoch: {}\t''mIoU: {:.3f}'.format(epoch,mIoU)
with open(record_name, 'a') as f:
f.write(line1)
if line1[-1] != '\n':
f.write('\n')
if __name__ == "__main__":
main()
| 38.844156
| 127
| 0.596122
| 731
| 5,982
| 4.716826
| 0.27223
| 0.023492
| 0.024362
| 0.013921
| 0.190835
| 0.152552
| 0.135731
| 0.110209
| 0.085847
| 0.069606
| 0
| 0.015342
| 0.269977
| 5,982
| 153
| 128
| 39.098039
| 0.774216
| 0.009361
| 0
| 0.195489
| 0
| 0.007519
| 0.105014
| 0.00574
| 0
| 0
| 0
| 0
| 0.007519
| 1
| 0.007519
| false
| 0
| 0.120301
| 0
| 0.12782
| 0.045113
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe21c2ef055f99448891893a1c18824fdde9d61e
| 1,883
|
py
|
Python
|
test.py
|
xxaxdxcxx/miscellaneous-code
|
cdb88783f39e1b9a89fdb12f7cddfe62619e4357
|
[
"MIT"
] | null | null | null |
test.py
|
xxaxdxcxx/miscellaneous-code
|
cdb88783f39e1b9a89fdb12f7cddfe62619e4357
|
[
"MIT"
] | null | null | null |
test.py
|
xxaxdxcxx/miscellaneous-code
|
cdb88783f39e1b9a89fdb12f7cddfe62619e4357
|
[
"MIT"
] | null | null | null |
class Solution:
# dictionary keys are tuples, storing results
# structure of the tuple:
# (level, prev_sum, val_to_include)
# value is number of successful tuples
def fourSumCount(self, A, B, C, D, prev_sum=0, level=0, sums={}):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
# handle clearing dictionary between tests
sums = {} if level == 3 else sums
# base case:
if level == 3:
total = 0
for num in D:
if prev_sum + num == 0:
print("At level 3, 0 total found using entry w/ value {0}".
format(num))
total += 1
return total
total = 0
lists = [A, B, C]
for num in lists[level]:
if level == 0:
print(str(sums))
if (level, prev_sum, num) in sums:
total += sums[(level, prev_sum, num)]
print("Used dictionary entry {0}, making total {1}".
format((level, prev_sum, num), total))
else:
print("Call from level {0} to level {1}; current sum is {2}".
format(level, level + 1, prev_sum + num))
result = self.fourSumCount(A, B, C, D, prev_sum + num,
level + 1, sums)
sums[(level, prev_sum, num)] = result
total += result
if level == 0:
sums = {}
print(sums)
return total
sol = Solution()
A = [1]
B = [-1]
C = [0]
D = [1]
result = sol.fourSumCount(A, B, C, D)
print("Test 1: {0}".format(result))
A = [1, 2]
B = [-2, -1]
C = [-1, 2]
D = [0, 2]
result = sol.fourSumCount(A, B, C, D)
print("Test 2: {0}".format(result))
| 31.383333
| 79
| 0.463622
| 241
| 1,883
| 3.576763
| 0.282158
| 0.073086
| 0.081207
| 0.018561
| 0.162413
| 0.104408
| 0.078886
| 0.078886
| 0.078886
| 0
| 0
| 0.032258
| 0.407329
| 1,883
| 59
| 80
| 31.915254
| 0.740143
| 0.148168
| 0
| 0.181818
| 0
| 0
| 0.108231
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0
| 0
| 0
| 0.090909
| 0.159091
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|